repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
pytorch-boat | pytorch-boat-main/BOAT-Swin/lr_scheduler.py | # --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# --------------------------------------------------------
import torch
from timm.scheduler.cosine_lr import CosineLRScheduler
from timm.scheduler.step_lr import StepLRScheduler
from timm.scheduler.scheduler import Scheduler
def build_scheduler(config, optimizer, n_iter_per_epoch):
num_steps = int(config.TRAIN.EPOCHS * n_iter_per_epoch)
warmup_steps = int(config.TRAIN.WARMUP_EPOCHS * n_iter_per_epoch)
decay_steps = int(config.TRAIN.LR_SCHEDULER.DECAY_EPOCHS * n_iter_per_epoch)
lr_scheduler = None
if config.TRAIN.LR_SCHEDULER.NAME == 'cosine':
lr_scheduler = CosineLRScheduler(
optimizer,
t_initial=num_steps,
t_mul=1.,
lr_min=config.TRAIN.MIN_LR,
warmup_lr_init=config.TRAIN.WARMUP_LR,
warmup_t=warmup_steps,
cycle_limit=1,
t_in_epochs=False,
)
elif config.TRAIN.LR_SCHEDULER.NAME == 'linear':
lr_scheduler = LinearLRScheduler(
optimizer,
t_initial=num_steps,
lr_min_rate=0.01,
warmup_lr_init=config.TRAIN.WARMUP_LR,
warmup_t=warmup_steps,
t_in_epochs=False,
)
elif config.TRAIN.LR_SCHEDULER.NAME == 'step':
lr_scheduler = StepLRScheduler(
optimizer,
decay_t=decay_steps,
decay_rate=config.TRAIN.LR_SCHEDULER.DECAY_RATE,
warmup_lr_init=config.TRAIN.WARMUP_LR,
warmup_t=warmup_steps,
t_in_epochs=False,
)
return lr_scheduler
class LinearLRScheduler(Scheduler):
def __init__(self,
optimizer: torch.optim.Optimizer,
t_initial: int,
lr_min_rate: float,
warmup_t=0,
warmup_lr_init=0.,
t_in_epochs=True,
noise_range_t=None,
noise_pct=0.67,
noise_std=1.0,
noise_seed=42,
initialize=True,
) -> None:
super().__init__(
optimizer, param_group_field="lr",
noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed,
initialize=initialize)
self.t_initial = t_initial
self.lr_min_rate = lr_min_rate
self.warmup_t = warmup_t
self.warmup_lr_init = warmup_lr_init
self.t_in_epochs = t_in_epochs
if self.warmup_t:
self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values]
super().update_groups(self.warmup_lr_init)
else:
self.warmup_steps = [1 for _ in self.base_values]
def _get_lr(self, t):
if t < self.warmup_t:
lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps]
else:
t = t - self.warmup_t
total_t = self.t_initial - self.warmup_t
lrs = [v - ((v - v * self.lr_min_rate) * (t / total_t)) for v in self.base_values]
return lrs
def get_epoch_values(self, epoch: int):
if self.t_in_epochs:
return self._get_lr(epoch)
else:
return None
def get_update_values(self, num_updates: int):
if not self.t_in_epochs:
return self._get_lr(num_updates)
else:
return None
| 3,547 | 33.446602 | 105 | py |
pytorch-boat | pytorch-boat-main/BOAT-Swin/utils.py | # --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# --------------------------------------------------------
import os
import torch
import torch.distributed as dist
try:
# noinspection PyUnresolvedReferences
from apex import amp
except ImportError:
amp = None
def load_checkpoint(config, model, optimizer, lr_scheduler, logger):
logger.info(f"==============> Resuming form {config.MODEL.RESUME}....................")
if config.MODEL.RESUME.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
config.MODEL.RESUME, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(config.MODEL.RESUME, map_location='cpu')
msg = model.load_state_dict(checkpoint['model'], strict=False)
logger.info(msg)
max_accuracy = 0.0
if not config.EVAL_MODE and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
config.defrost()
config.TRAIN.START_EPOCH = checkpoint['epoch'] + 1
config.freeze()
if 'amp' in checkpoint and config.AMP_OPT_LEVEL != "O0" and checkpoint['config'].AMP_OPT_LEVEL != "O0":
amp.load_state_dict(checkpoint['amp'])
logger.info(f"=> loaded successfully '{config.MODEL.RESUME}' (epoch {checkpoint['epoch']})")
if 'max_accuracy' in checkpoint:
max_accuracy = checkpoint['max_accuracy']
del checkpoint
torch.cuda.empty_cache()
return max_accuracy
def load_pretrained(config, model, logger):
logger.info(f"==============> Loading weight {config.MODEL.PRETRAINED} for fine-tuning......")
checkpoint = torch.load(config.MODEL.PRETRAINED, map_location='cpu')
state_dict = checkpoint['model']
# delete relative_position_index since we always re-init it
relative_position_index_keys = [k for k in state_dict.keys() if "relative_position_index" in k]
for k in relative_position_index_keys:
del state_dict[k]
# delete relative_coords_table since we always re-init it
relative_position_index_keys = [k for k in state_dict.keys() if "relative_coords_table" in k]
for k in relative_position_index_keys:
del state_dict[k]
# delete attn_mask since we always re-init it
attn_mask_keys = [k for k in state_dict.keys() if "attn_mask" in k]
for k in attn_mask_keys:
del state_dict[k]
# bicubic interpolate relative_position_bias_table if not match
relative_position_bias_table_keys = [k for k in state_dict.keys() if "relative_position_bias_table" in k]
for k in relative_position_bias_table_keys:
relative_position_bias_table_pretrained = state_dict[k]
relative_position_bias_table_current = model.state_dict()[k]
L1, nH1 = relative_position_bias_table_pretrained.size()
L2, nH2 = relative_position_bias_table_current.size()
if nH1 != nH2:
logger.warning(f"Error in loading {k}, passing......")
else:
if L1 != L2:
# bicubic interpolate relative_position_bias_table if not match
S1 = int(L1 ** 0.5)
S2 = int(L2 ** 0.5)
relative_position_bias_table_pretrained_resized = torch.nn.functional.interpolate(
relative_position_bias_table_pretrained.permute(1, 0).view(1, nH1, S1, S1), size=(S2, S2),
mode='bicubic')
state_dict[k] = relative_position_bias_table_pretrained_resized.view(nH2, L2).permute(1, 0)
# bicubic interpolate absolute_pos_embed if not match
absolute_pos_embed_keys = [k for k in state_dict.keys() if "absolute_pos_embed" in k]
for k in absolute_pos_embed_keys:
# dpe
absolute_pos_embed_pretrained = state_dict[k]
absolute_pos_embed_current = model.state_dict()[k]
_, L1, C1 = absolute_pos_embed_pretrained.size()
_, L2, C2 = absolute_pos_embed_current.size()
if C1 != C1:
logger.warning(f"Error in loading {k}, passing......")
else:
if L1 != L2:
S1 = int(L1 ** 0.5)
S2 = int(L2 ** 0.5)
absolute_pos_embed_pretrained = absolute_pos_embed_pretrained.reshape(-1, S1, S1, C1)
absolute_pos_embed_pretrained = absolute_pos_embed_pretrained.permute(0, 3, 1, 2)
absolute_pos_embed_pretrained_resized = torch.nn.functional.interpolate(
absolute_pos_embed_pretrained, size=(S2, S2), mode='bicubic')
absolute_pos_embed_pretrained_resized = absolute_pos_embed_pretrained_resized.permute(0, 2, 3, 1)
absolute_pos_embed_pretrained_resized = absolute_pos_embed_pretrained_resized.flatten(1, 2)
state_dict[k] = absolute_pos_embed_pretrained_resized
# check classifier, if not match, then re-init classifier to zero
head_bias_pretrained = state_dict['head.bias']
Nc1 = head_bias_pretrained.shape[0]
Nc2 = model.head.bias.shape[0]
if (Nc1 != Nc2):
if Nc1 == 21841 and Nc2 == 1000:
logger.info("loading ImageNet-22K weight to ImageNet-1K ......")
map22kto1k_path = f'data/map22kto1k.txt'
with open(map22kto1k_path) as f:
map22kto1k = f.readlines()
map22kto1k = [int(id22k.strip()) for id22k in map22kto1k]
state_dict['head.weight'] = state_dict['head.weight'][map22kto1k, :]
state_dict['head.bias'] = state_dict['head.bias'][map22kto1k]
else:
torch.nn.init.constant_(model.head.bias, 0.)
torch.nn.init.constant_(model.head.weight, 0.)
del state_dict['head.weight']
del state_dict['head.bias']
logger.warning(f"Error in loading classifier head, re-init classifier head to 0")
msg = model.load_state_dict(state_dict, strict=False)
logger.warning(msg)
logger.info(f"=> loaded successfully '{config.MODEL.PRETRAINED}'")
del checkpoint
torch.cuda.empty_cache()
def save_checkpoint(config, epoch, model, max_accuracy, optimizer, lr_scheduler, logger):
save_state = {'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'max_accuracy': max_accuracy,
'epoch': epoch,
'config': config}
if config.AMP_OPT_LEVEL != "O0":
save_state['amp'] = amp.state_dict()
save_path = os.path.join(config.OUTPUT, f'ckpt_epoch_{epoch}.pth')
logger.info(f"{save_path} saving......")
torch.save(save_state, save_path)
logger.info(f"{save_path} saved !!!")
def get_grad_norm(parameters, norm_type=2):
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter(lambda p: p.grad is not None, parameters))
norm_type = float(norm_type)
total_norm = 0
for p in parameters:
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm.item() ** norm_type
total_norm = total_norm ** (1. / norm_type)
return total_norm
def auto_resume_helper(output_dir):
checkpoints = os.listdir(output_dir)
checkpoints = [ckpt for ckpt in checkpoints if ckpt.endswith('pth')]
print(f"All checkpoints founded in {output_dir}: {checkpoints}")
if len(checkpoints) > 0:
latest_checkpoint = max([os.path.join(output_dir, d) for d in checkpoints], key=os.path.getmtime)
print(f"The latest checkpoint founded: {latest_checkpoint}")
resume_file = latest_checkpoint
else:
resume_file = None
return resume_file
def reduce_tensor(tensor):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
rt /= dist.get_world_size()
return rt
| 8,012 | 42.786885 | 117 | py |
pytorch-boat | pytorch-boat-main/BOAT-Swin/optimizer.py | # --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# --------------------------------------------------------
from torch import optim as optim
def build_optimizer(config, model):
"""
Build optimizer, set weight decay of normalization to 0 by default.
"""
skip = {}
skip_keywords = {}
if hasattr(model, 'no_weight_decay'):
skip = model.no_weight_decay()
if hasattr(model, 'no_weight_decay_keywords'):
skip_keywords = model.no_weight_decay_keywords()
parameters = set_weight_decay(model, skip, skip_keywords)
opt_lower = config.TRAIN.OPTIMIZER.NAME.lower()
optimizer = None
if opt_lower == 'sgd':
optimizer = optim.SGD(parameters, momentum=config.TRAIN.OPTIMIZER.MOMENTUM, nesterov=True,
lr=config.TRAIN.BASE_LR, weight_decay=config.TRAIN.WEIGHT_DECAY)
elif opt_lower == 'adamw':
optimizer = optim.AdamW(parameters, eps=config.TRAIN.OPTIMIZER.EPS, betas=config.TRAIN.OPTIMIZER.BETAS,
lr=config.TRAIN.BASE_LR, weight_decay=config.TRAIN.WEIGHT_DECAY)
return optimizer
def set_weight_decay(model, skip_list=(), skip_keywords=()):
has_decay = []
no_decay = []
for name, param in model.named_parameters():
if not param.requires_grad:
continue # frozen weights
if len(param.shape) == 1 or name.endswith(".bias") or (name in skip_list) or \
check_keywords_in_name(name, skip_keywords):
no_decay.append(param)
# print(f"{name} has no weight decay")
else:
has_decay.append(param)
return [{'params': has_decay},
{'params': no_decay, 'weight_decay': 0.}]
def check_keywords_in_name(name, keywords=()):
isin = False
for keyword in keywords:
if keyword in name:
isin = True
return isin
| 2,013 | 33.724138 | 111 | py |
pytorch-boat | pytorch-boat-main/BOAT-Swin/models/boat_swin_transformer.py | # --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# --------------------------------------------------------
import torch
import torch.nn as nn
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
import math
from einops import rearrange, repeat
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class ContentAttention(nn.Module):
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0., kmeans = False):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.ws = window_size
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.kmeans = kmeans
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.softmax = nn.Softmax(dim=-1)
self.get_v = nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=1,groups=dim)
def forward(self, x, mask=None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)# 3, B_, self.num_heads,N,D
if True:
q_pre = qkv[0].reshape(B_*self.num_heads,N, C // self.num_heads).permute(0,2,1)#qkv_pre[:,0].reshape(b*self.num_heads,qkvhd//3//self.num_heads,hh*ww)
ntimes = int(math.log(N//49,2))
q_idx_last = torch.arange(N).cuda().unsqueeze(0).expand(B_*self.num_heads,N)
for i in range(ntimes):
bh,d,n = q_pre.shape
q_pre_new = q_pre.reshape(bh,d,2,n//2)
q_avg = q_pre_new.mean(dim=-1)#.reshape(b*self.num_heads,qkvhd//3//self.num_heads,)
q_avg = torch.nn.functional.normalize(q_avg,dim=-2)
iters = 2
for i in range(iters):
q_scores = torch.nn.functional.normalize(q_pre.permute(0,2,1),dim=-1).bmm(q_avg)
soft_assign = torch.nn.functional.softmax(q_scores*100, dim=-1).detach()
q_avg = q_pre.bmm(soft_assign)
q_avg = torch.nn.functional.normalize(q_avg,dim=-2)
q_scores = torch.nn.functional.normalize(q_pre.permute(0,2,1),dim=-1).bmm(q_avg).reshape(bh,n,2)#.unsqueeze(2)
q_idx = (q_scores[:,:,0]+1)/(q_scores[:,:,1]+1)
_,q_idx = torch.sort(q_idx,dim=-1)
q_idx_last = q_idx_last.gather(dim=-1,index=q_idx).reshape(bh*2,n//2)
q_idx = q_idx.unsqueeze(1).expand(q_pre.size())
q_pre = q_pre.gather(dim=-1,index=q_idx).reshape(bh,d,2,n//2).permute(0,2,1,3).reshape(bh*2,d,n//2)
q_idx = q_idx_last.view(B_,self.num_heads,N)
_,q_idx_rev = torch.sort(q_idx,dim=-1)
q_idx = q_idx.unsqueeze(0).unsqueeze(4).expand(qkv.size())
qkv_pre = qkv.gather(dim=-2,index=q_idx)
q, k, v = rearrange(qkv_pre, 'qkv b h (nw ws) c -> qkv (b nw) h ws c', ws=49)
k = k.view(B_*((N//49))//2,2,self.num_heads,49,-1)
k_over1 = k[:,1,:,:20].unsqueeze(1)#.expand(-1,2,-1,-1,-1)
k_over2 = k[:,0,:,29:].unsqueeze(1)#.expand(-1,2,-1,-1,-1)
k_over = torch.cat([k_over1,k_over2],1)
k = torch.cat([k,k_over],3).contiguous().view(B_*((N//49)),self.num_heads,49+20,-1)
v = v.view(B_*((N//49))//2,2,self.num_heads,49,-1)
v_over1 = v[:,1,:,:20].unsqueeze(1)#.expand(-1,2,-1,-1,-1)
v_over2 = v[:,0,:,29:].unsqueeze(1)#.expand(-1,2,-1,-1,-1)
v_over = torch.cat([v_over1,v_over2],1)
v = torch.cat([v,v_over],3).contiguous().view(B_*((N//49)),self.num_heads,49+20,-1)
attn = (q @ k.transpose(-2, -1))*self.scale
attn = self.softmax(attn)
attn = self.attn_drop(attn)
out = attn @ v
if True:
out = rearrange(out, '(b nw) h ws d -> b (h d) nw ws', h=self.num_heads, b=B_)
v = rearrange(v[:,:,:49,:], '(b nw) h ws d -> b h d (nw ws)', h=self.num_heads, b=B_)
W = int(math.sqrt(N))
out = out.reshape(B_,self.num_heads,C//self.num_heads,-1)
q_idx_rev = q_idx_rev.unsqueeze(2).expand(out.size())
x = out.gather(dim=-1,index=q_idx_rev).reshape(B_,C,N).permute(0,2,1)
v = v.gather(dim=-1,index=q_idx_rev).reshape(B_,C,W,W)
v = self.get_v(v)
v = v.reshape(B_,C,N).permute(0,2,1)
x = x + v
x = self.proj(x)
x = self.proj_drop(x)
return x
class WindowAttention(nn.Module):
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0., kmeans = False):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.ws = window_size
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.kmeans = kmeans
# define a parameter table of relative position bias
if True:
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
#trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask=None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
def extra_repr(self) -> str:
return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
def flops(self, N):
# calculate flops for 1 window with token length of N
flops = 0
# qkv = self.qkv(x)
flops += N * self.dim * 3 * self.dim
# attn = (q @ k.transpose(-2, -1))
flops += self.num_heads * N * (self.dim // self.num_heads) * N
# x = (attn @ v)
flops += self.num_heads * N * N * (self.dim // self.num_heads)
# x = self.proj(x)
flops += N * self.dim * self.dim
return flops
class SwinTransformerBlock(nn.Module):
r""" Swin Transformer Block.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, kmeans=shift_size)
if self.shift_size > 0:
self.attnC = ContentAttention(
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, kmeans=shift_size)
self.norm4 = norm_layer(dim);
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
#self.local = nn.Conv2d(dim, dim, window_size, 1, window_size//2, groups=dim, bias=qkv_bias)
#self.norm3 = norm_layer(dim)
#self.norm4 = norm_layer(dim)
if self.shift_size > 0:
# calculate attention mask for SW-MSA
H, W = self.input_resolution
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask)
def forward(self, x):
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
#x = self.attn(x.view(B,H*W,C))
else:
shifted_x = x
if True:
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA
attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
if self.shift_size > 0:
x = x + self.attnC(self.norm4(x))
b,n,c = x.shape
#w = int(math.sqrt(n))
#x = self.norm3(x)
#x = x.view(b,w,w,c).permute(0,3,1,2)
#x = x + self.local(x)
#x = x.permute(0,2,3,1).reshape(b,n,c)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
def flops(self):
flops = 0
H, W = self.input_resolution
# norm1
flops += self.dim * H * W
# W-MSA/SW-MSA
nW = H * W / self.window_size / self.window_size
flops += nW * self.attn.flops(self.window_size * self.window_size)
# mlp
flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
# norm2
flops += self.dim * H * W
return flops
class PatchMerging(nn.Module):
r""" Patch Merging Layer.
Args:
input_resolution (tuple[int]): Resolution of input feature.
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x):
"""
x: B, H*W, C
"""
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
x = x.view(B, H, W, C)
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
def extra_repr(self) -> str:
return f"input_resolution={self.input_resolution}, dim={self.dim}"
def flops(self):
H, W = self.input_resolution
flops = H * W * self.dim
flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
return flops
class BasicLayer(nn.Module):
""" A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
num_heads=num_heads, window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop, attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer)
for i in range(depth)])
# patch merging layer
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x):
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
if self.downsample is not None:
x = self.downsample(x)
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
def flops(self):
flops = 0
for blk in self.blocks:
flops += blk.flops()
if self.downsample is not None:
flops += self.downsample.flops()
return flops
class PatchEmbed(nn.Module):
r""" Image to Patch Embedding
Args:
img_size (int): Image size. Default: 224.
patch_size (int): Patch token size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
self.img_size = img_size
self.patch_size = patch_size
self.patches_resolution = patches_resolution
self.num_patches = patches_resolution[0] * patches_resolution[1]
self.in_chans = in_chans
self.embed_dim = embed_dim
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
if norm_layer is not None:
self.norm = norm_layer(embed_dim)
else:
self.norm = None
def forward(self, x):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2) # B Ph*Pw C
if self.norm is not None:
x = self.norm(x)
return x
def flops(self):
Ho, Wo = self.patches_resolution
flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1])
if self.norm is not None:
flops += Ho * Wo * self.embed_dim
return flops
class SwinTransformer(nn.Module):
r""" Swin Transformer
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Swin Transformer layer.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size. Default: 7
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
drop_rate (float): Dropout rate. Default: 0
attn_drop_rate (float): Attention dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000,
embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24],
window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
use_checkpoint=False, **kwargs):
super().__init__()
self.num_classes = num_classes
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
self.mlp_ratio = mlp_ratio
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
# absolute position embedding
if self.ape:
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
trunc_normal_(self.absolute_pos_embed, std=.02)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer),
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint)
self.layers.append(layer)
self.norm = norm_layer(self.num_features)
self.avgpool = nn.AdaptiveAvgPool1d(1)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'absolute_pos_embed'}
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {'relative_position_bias_table'}
def forward_features(self, x):
x = self.patch_embed(x)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
for layer in self.layers:
x = layer(x)
x = self.norm(x) # B L C
x = self.avgpool(x.transpose(1, 2)) # B C 1
x = torch.flatten(x, 1)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def flops(self):
flops = 0
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)
flops += self.num_features * self.num_classes
return flops
| 30,671 | 41.074074 | 161 | py |
pytorch-boat | pytorch-boat-main/BOAT-Swin/models/swin_mlp.py | # --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# --------------------------------------------------------
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class SwinMLPBlock(nn.Module):
r""" Swin MLP Block.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
drop (float, optional): Dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
mlp_ratio=4., drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.padding = [self.window_size - self.shift_size, self.shift_size,
self.window_size - self.shift_size, self.shift_size] # P_l,P_r,P_t,P_b
self.norm1 = norm_layer(dim)
# use group convolution to implement multi-head MLP
self.spatial_mlp = nn.Conv1d(self.num_heads * self.window_size ** 2,
self.num_heads * self.window_size ** 2,
kernel_size=1,
groups=self.num_heads)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x):
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# shift
if self.shift_size > 0:
P_l, P_r, P_t, P_b = self.padding
shifted_x = F.pad(x, [0, 0, P_l, P_r, P_t, P_b], "constant", 0)
else:
shifted_x = x
_, _H, _W, _ = shifted_x.shape
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
# Window/Shifted-Window Spatial MLP
x_windows_heads = x_windows.view(-1, self.window_size * self.window_size, self.num_heads, C // self.num_heads)
x_windows_heads = x_windows_heads.transpose(1, 2) # nW*B, nH, window_size*window_size, C//nH
x_windows_heads = x_windows_heads.reshape(-1, self.num_heads * self.window_size * self.window_size,
C // self.num_heads)
spatial_mlp_windows = self.spatial_mlp(x_windows_heads) # nW*B, nH*window_size*window_size, C//nH
spatial_mlp_windows = spatial_mlp_windows.view(-1, self.num_heads, self.window_size * self.window_size,
C // self.num_heads).transpose(1, 2)
spatial_mlp_windows = spatial_mlp_windows.reshape(-1, self.window_size * self.window_size, C)
# merge windows
spatial_mlp_windows = spatial_mlp_windows.reshape(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(spatial_mlp_windows, self.window_size, _H, _W) # B H' W' C
# reverse shift
if self.shift_size > 0:
P_l, P_r, P_t, P_b = self.padding
x = shifted_x[:, P_t:-P_b, P_l:-P_r, :].contiguous()
else:
x = shifted_x
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
def flops(self):
flops = 0
H, W = self.input_resolution
# norm1
flops += self.dim * H * W
# Window/Shifted-Window Spatial MLP
if self.shift_size > 0:
nW = (H / self.window_size + 1) * (W / self.window_size + 1)
else:
nW = H * W / self.window_size / self.window_size
flops += nW * self.dim * (self.window_size * self.window_size) * (self.window_size * self.window_size)
# mlp
flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
# norm2
flops += self.dim * H * W
return flops
class PatchMerging(nn.Module):
r""" Patch Merging Layer.
Args:
input_resolution (tuple[int]): Resolution of input feature.
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x):
"""
x: B, H*W, C
"""
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
x = x.view(B, H, W, C)
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
def extra_repr(self) -> str:
return f"input_resolution={self.input_resolution}, dim={self.dim}"
def flops(self):
H, W = self.input_resolution
flops = H * W * self.dim
flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
return flops
class BasicLayer(nn.Module):
""" A basic Swin MLP layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
drop (float, optional): Dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., drop=0., drop_path=0.,
norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
SwinMLPBlock(dim=dim, input_resolution=input_resolution,
num_heads=num_heads, window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
drop=drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer)
for i in range(depth)])
# patch merging layer
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x):
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
if self.downsample is not None:
x = self.downsample(x)
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
def flops(self):
flops = 0
for blk in self.blocks:
flops += blk.flops()
if self.downsample is not None:
flops += self.downsample.flops()
return flops
class PatchEmbed(nn.Module):
r""" Image to Patch Embedding
Args:
img_size (int): Image size. Default: 224.
patch_size (int): Patch token size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
self.img_size = img_size
self.patch_size = patch_size
self.patches_resolution = patches_resolution
self.num_patches = patches_resolution[0] * patches_resolution[1]
self.in_chans = in_chans
self.embed_dim = embed_dim
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
if norm_layer is not None:
self.norm = norm_layer(embed_dim)
else:
self.norm = None
def forward(self, x):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2) # B Ph*Pw C
if self.norm is not None:
x = self.norm(x)
return x
def flops(self):
Ho, Wo = self.patches_resolution
flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1])
if self.norm is not None:
flops += Ho * Wo * self.embed_dim
return flops
class SwinMLP(nn.Module):
r""" Swin MLP
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Swin MLP layer.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size. Default: 7
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
drop_rate (float): Dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000,
embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24],
window_size=7, mlp_ratio=4., drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
use_checkpoint=False, **kwargs):
super().__init__()
self.num_classes = num_classes
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
self.mlp_ratio = mlp_ratio
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
# absolute position embedding
if self.ape:
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
trunc_normal_(self.absolute_pos_embed, std=.02)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer),
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
drop=drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint)
self.layers.append(layer)
self.norm = norm_layer(self.num_features)
self.avgpool = nn.AdaptiveAvgPool1d(1)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, (nn.Linear, nn.Conv1d)):
trunc_normal_(m.weight, std=.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'absolute_pos_embed'}
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {'relative_position_bias_table'}
def forward_features(self, x):
x = self.patch_embed(x)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
for layer in self.layers:
x = layer(x)
x = self.norm(x) # B L C
x = self.avgpool(x.transpose(1, 2)) # B C 1
x = torch.flatten(x, 1)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def flops(self):
flops = 0
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)
flops += self.num_features * self.num_classes
return flops
| 18,508 | 38.464819 | 118 | py |
pytorch-boat | pytorch-boat-main/BOAT-Swin/models/swin_transformer.py | # --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# --------------------------------------------------------
import torch
import torch.nn as nn
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowAttention(nn.Module):
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask=None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
def extra_repr(self) -> str:
return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
def flops(self, N):
# calculate flops for 1 window with token length of N
flops = 0
# qkv = self.qkv(x)
flops += N * self.dim * 3 * self.dim
# attn = (q @ k.transpose(-2, -1))
flops += self.num_heads * N * (self.dim // self.num_heads) * N
# x = (attn @ v)
flops += self.num_heads * N * N * (self.dim // self.num_heads)
# x = self.proj(x)
flops += N * self.dim * self.dim
return flops
class SwinTransformerBlock(nn.Module):
r""" Swin Transformer Block.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
if self.shift_size > 0:
# calculate attention mask for SW-MSA
H, W = self.input_resolution
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask)
def forward(self, x):
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA
attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
def flops(self):
flops = 0
H, W = self.input_resolution
# norm1
flops += self.dim * H * W
# W-MSA/SW-MSA
nW = H * W / self.window_size / self.window_size
flops += nW * self.attn.flops(self.window_size * self.window_size)
# mlp
flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
# norm2
flops += self.dim * H * W
return flops
class PatchMerging(nn.Module):
r""" Patch Merging Layer.
Args:
input_resolution (tuple[int]): Resolution of input feature.
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x):
"""
x: B, H*W, C
"""
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
x = x.view(B, H, W, C)
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
def extra_repr(self) -> str:
return f"input_resolution={self.input_resolution}, dim={self.dim}"
def flops(self):
H, W = self.input_resolution
flops = H * W * self.dim
flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
return flops
class BasicLayer(nn.Module):
""" A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
num_heads=num_heads, window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop, attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer)
for i in range(depth)])
# patch merging layer
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x):
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
if self.downsample is not None:
x = self.downsample(x)
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
def flops(self):
flops = 0
for blk in self.blocks:
flops += blk.flops()
if self.downsample is not None:
flops += self.downsample.flops()
return flops
class PatchEmbed(nn.Module):
r""" Image to Patch Embedding
Args:
img_size (int): Image size. Default: 224.
patch_size (int): Patch token size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
self.img_size = img_size
self.patch_size = patch_size
self.patches_resolution = patches_resolution
self.num_patches = patches_resolution[0] * patches_resolution[1]
self.in_chans = in_chans
self.embed_dim = embed_dim
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
if norm_layer is not None:
self.norm = norm_layer(embed_dim)
else:
self.norm = None
def forward(self, x):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2) # B Ph*Pw C
if self.norm is not None:
x = self.norm(x)
return x
def flops(self):
Ho, Wo = self.patches_resolution
flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1])
if self.norm is not None:
flops += Ho * Wo * self.embed_dim
return flops
class SwinTransformer(nn.Module):
r""" Swin Transformer
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Swin Transformer layer.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size. Default: 7
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
drop_rate (float): Dropout rate. Default: 0
attn_drop_rate (float): Attention dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000,
embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24],
window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
use_checkpoint=False, **kwargs):
super().__init__()
self.num_classes = num_classes
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
self.mlp_ratio = mlp_ratio
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
# absolute position embedding
if self.ape:
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
trunc_normal_(self.absolute_pos_embed, std=.02)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer),
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint)
self.layers.append(layer)
self.norm = norm_layer(self.num_features)
self.avgpool = nn.AdaptiveAvgPool1d(1)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'absolute_pos_embed'}
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {'relative_position_bias_table'}
def forward_features(self, x):
x = self.patch_embed(x)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
for layer in self.layers:
x = layer(x)
x = self.norm(x) # B L C
x = self.avgpool(x.transpose(1, 2)) # B C 1
x = torch.flatten(x, 1)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def flops(self):
flops = 0
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)
flops += self.num_features * self.num_classes
return flops
| 24,234 | 40.356655 | 119 | py |
pytorch-boat | pytorch-boat-main/BOAT-Swin/data/samplers.py | # --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# --------------------------------------------------------
import torch
class SubsetRandomSampler(torch.utils.data.Sampler):
r"""Samples elements randomly from a given list of indices, without replacement.
Arguments:
indices (sequence): a sequence of indices
"""
def __init__(self, indices):
self.epoch = 0
self.indices = indices
def __iter__(self):
return (self.indices[i] for i in torch.randperm(len(self.indices)))
def __len__(self):
return len(self.indices)
def set_epoch(self, epoch):
self.epoch = epoch
| 781 | 25.066667 | 84 | py |
pytorch-boat | pytorch-boat-main/BOAT-Swin/data/build.py | # --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# --------------------------------------------------------
import os
import torch
import numpy as np
import torch.distributed as dist
from torchvision import datasets, transforms
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.data import Mixup
from timm.data import create_transform
from .cached_image_folder import CachedImageFolder
from .samplers import SubsetRandomSampler
try:
from torchvision.transforms import InterpolationMode
def _pil_interp(method):
if method == 'bicubic':
return InterpolationMode.BICUBIC
elif method == 'lanczos':
return InterpolationMode.LANCZOS
elif method == 'hamming':
return InterpolationMode.HAMMING
else:
# default bilinear, do we want to allow nearest?
return InterpolationMode.BILINEAR
except:
from timm.data.transforms import _pil_interp
def build_loader(config):
config.defrost()
dataset_train, config.MODEL.NUM_CLASSES = build_dataset(is_train=True, config=config)
config.freeze()
print(f"local rank {config.LOCAL_RANK} / global rank {dist.get_rank()} successfully build train dataset")
dataset_val, _ = build_dataset(is_train=False, config=config)
print(f"local rank {config.LOCAL_RANK} / global rank {dist.get_rank()} successfully build val dataset")
num_tasks = dist.get_world_size()
global_rank = dist.get_rank()
if config.DATA.ZIP_MODE and config.DATA.CACHE_MODE == 'part':
indices = np.arange(dist.get_rank(), len(dataset_train), dist.get_world_size())
sampler_train = SubsetRandomSampler(indices)
else:
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
if config.TEST.SEQUENTIAL:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
else:
sampler_val = torch.utils.data.distributed.DistributedSampler(
dataset_val, shuffle=False
)
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=config.DATA.BATCH_SIZE,
num_workers=config.DATA.NUM_WORKERS,
pin_memory=config.DATA.PIN_MEMORY,
drop_last=True,
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=config.DATA.BATCH_SIZE,
shuffle=False,
num_workers=config.DATA.NUM_WORKERS,
pin_memory=config.DATA.PIN_MEMORY,
drop_last=False
)
# setup mixup / cutmix
mixup_fn = None
mixup_active = config.AUG.MIXUP > 0 or config.AUG.CUTMIX > 0. or config.AUG.CUTMIX_MINMAX is not None
if mixup_active:
mixup_fn = Mixup(
mixup_alpha=config.AUG.MIXUP, cutmix_alpha=config.AUG.CUTMIX, cutmix_minmax=config.AUG.CUTMIX_MINMAX,
prob=config.AUG.MIXUP_PROB, switch_prob=config.AUG.MIXUP_SWITCH_PROB, mode=config.AUG.MIXUP_MODE,
label_smoothing=config.MODEL.LABEL_SMOOTHING, num_classes=config.MODEL.NUM_CLASSES)
return dataset_train, dataset_val, data_loader_train, data_loader_val, mixup_fn
def build_dataset(is_train, config):
transform = build_transform(is_train, config)
if config.DATA.DATASET == 'imagenet':
prefix = 'train' if is_train else 'val'
if config.DATA.ZIP_MODE:
ann_file = prefix + "_map.txt"
prefix = prefix + ".zip@/"
dataset = CachedImageFolder(config.DATA.DATA_PATH, ann_file, prefix, transform,
cache_mode=config.DATA.CACHE_MODE if is_train else 'part')
else:
root = os.path.join(config.DATA.DATA_PATH, prefix)
dataset = datasets.ImageFolder(root, transform=transform)
nb_classes = 1000
elif config.DATA.DATASET == 'imagenet22K':
raise NotImplementedError("Imagenet-22K will come soon.")
else:
raise NotImplementedError("We only support ImageNet Now.")
return dataset, nb_classes
def build_transform(is_train, config):
resize_im = config.DATA.IMG_SIZE > 32
if is_train:
# this should always dispatch to transforms_imagenet_train
transform = create_transform(
input_size=config.DATA.IMG_SIZE,
is_training=True,
color_jitter=config.AUG.COLOR_JITTER if config.AUG.COLOR_JITTER > 0 else None,
auto_augment=config.AUG.AUTO_AUGMENT if config.AUG.AUTO_AUGMENT != 'none' else None,
re_prob=config.AUG.REPROB,
re_mode=config.AUG.REMODE,
re_count=config.AUG.RECOUNT,
interpolation=config.DATA.INTERPOLATION,
)
if not resize_im:
# replace RandomResizedCropAndInterpolation with
# RandomCrop
transform.transforms[0] = transforms.RandomCrop(config.DATA.IMG_SIZE, padding=4)
return transform
t = []
if resize_im:
if config.TEST.CROP:
size = int((256 / 224) * config.DATA.IMG_SIZE)
t.append(
transforms.Resize(size, interpolation=_pil_interp(config.DATA.INTERPOLATION)),
# to maintain same ratio w.r.t. 224 images
)
t.append(transforms.CenterCrop(config.DATA.IMG_SIZE))
else:
t.append(
transforms.Resize((config.DATA.IMG_SIZE, config.DATA.IMG_SIZE),
interpolation=_pil_interp(config.DATA.INTERPOLATION))
)
t.append(transforms.ToTensor())
t.append(transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD))
return transforms.Compose(t)
| 5,877 | 37.927152 | 113 | py |
pytorch-boat | pytorch-boat-main/BOAT-Swin/data/cached_image_folder.py | # --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# --------------------------------------------------------
import io
import os
import time
import torch.distributed as dist
import torch.utils.data as data
from PIL import Image
from .zipreader import is_zip_path, ZipReader
def has_file_allowed_extension(filename, extensions):
"""Checks if a file is an allowed extension.
Args:
filename (string): path to a file
Returns:
bool: True if the filename ends with a known image extension
"""
filename_lower = filename.lower()
return any(filename_lower.endswith(ext) for ext in extensions)
def find_classes(dir):
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def make_dataset(dir, class_to_idx, extensions):
images = []
dir = os.path.expanduser(dir)
for target in sorted(os.listdir(dir)):
d = os.path.join(dir, target)
if not os.path.isdir(d):
continue
for root, _, fnames in sorted(os.walk(d)):
for fname in sorted(fnames):
if has_file_allowed_extension(fname, extensions):
path = os.path.join(root, fname)
item = (path, class_to_idx[target])
images.append(item)
return images
def make_dataset_with_ann(ann_file, img_prefix, extensions):
images = []
with open(ann_file, "r") as f:
contents = f.readlines()
for line_str in contents:
path_contents = [c for c in line_str.split('\t')]
im_file_name = path_contents[0]
class_index = int(path_contents[1])
assert str.lower(os.path.splitext(im_file_name)[-1]) in extensions
item = (os.path.join(img_prefix, im_file_name), class_index)
images.append(item)
return images
class DatasetFolder(data.Dataset):
"""A generic data loader where the samples are arranged in this way: ::
root/class_x/xxx.ext
root/class_x/xxy.ext
root/class_x/xxz.ext
root/class_y/123.ext
root/class_y/nsdf3.ext
root/class_y/asd932_.ext
Args:
root (string): Root directory path.
loader (callable): A function to load a sample given its path.
extensions (list[string]): A list of allowed extensions.
transform (callable, optional): A function/transform that takes in
a sample and returns a transformed version.
E.g, ``transforms.RandomCrop`` for images.
target_transform (callable, optional): A function/transform that takes
in the target and transforms it.
Attributes:
samples (list): List of (sample path, class_index) tuples
"""
def __init__(self, root, loader, extensions, ann_file='', img_prefix='', transform=None, target_transform=None,
cache_mode="no"):
# image folder mode
if ann_file == '':
_, class_to_idx = find_classes(root)
samples = make_dataset(root, class_to_idx, extensions)
# zip mode
else:
samples = make_dataset_with_ann(os.path.join(root, ann_file),
os.path.join(root, img_prefix),
extensions)
if len(samples) == 0:
raise (RuntimeError("Found 0 files in subfolders of: " + root + "\n" +
"Supported extensions are: " + ",".join(extensions)))
self.root = root
self.loader = loader
self.extensions = extensions
self.samples = samples
self.labels = [y_1k for _, y_1k in samples]
self.classes = list(set(self.labels))
self.transform = transform
self.target_transform = target_transform
self.cache_mode = cache_mode
if self.cache_mode != "no":
self.init_cache()
def init_cache(self):
assert self.cache_mode in ["part", "full"]
n_sample = len(self.samples)
global_rank = dist.get_rank()
world_size = dist.get_world_size()
samples_bytes = [None for _ in range(n_sample)]
start_time = time.time()
for index in range(n_sample):
if index % (n_sample // 10) == 0:
t = time.time() - start_time
print(f'global_rank {dist.get_rank()} cached {index}/{n_sample} takes {t:.2f}s per block')
start_time = time.time()
path, target = self.samples[index]
if self.cache_mode == "full":
samples_bytes[index] = (ZipReader.read(path), target)
elif self.cache_mode == "part" and index % world_size == global_rank:
samples_bytes[index] = (ZipReader.read(path), target)
else:
samples_bytes[index] = (path, target)
self.samples = samples_bytes
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
path, target = self.samples[index]
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def __len__(self):
return len(self.samples)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif']
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
if isinstance(path, bytes):
img = Image.open(io.BytesIO(path))
elif is_zip_path(path):
data = ZipReader.read(path)
img = Image.open(io.BytesIO(data))
else:
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
return img.convert('RGB')
def accimage_loader(path):
import accimage
try:
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image
return pil_loader(path)
def default_img_loader(path):
from torchvision import get_image_backend
if get_image_backend() == 'accimage':
return accimage_loader(path)
else:
return pil_loader(path)
class CachedImageFolder(DatasetFolder):
"""A generic data loader where the images are arranged in this way: ::
root/dog/xxx.png
root/dog/xxy.png
root/dog/xxz.png
root/cat/123.png
root/cat/nsdf3.png
root/cat/asd932_.png
Args:
root (string): Root directory path.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
loader (callable, optional): A function to load an image given its path.
Attributes:
imgs (list): List of (image path, class_index) tuples
"""
def __init__(self, root, ann_file='', img_prefix='', transform=None, target_transform=None,
loader=default_img_loader, cache_mode="no"):
super(CachedImageFolder, self).__init__(root, loader, IMG_EXTENSIONS,
ann_file=ann_file, img_prefix=img_prefix,
transform=transform, target_transform=target_transform,
cache_mode=cache_mode)
self.imgs = self.samples
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
path, target = self.samples[index]
image = self.loader(path)
if self.transform is not None:
img = self.transform(image)
else:
img = image
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
| 9,026 | 34.679842 | 115 | py |
ehrdiff | ehrdiff-main/diffusion_util.py | # -----------------------------------
# Code adapted from:
# https://github.com/lucidrains/denoising-diffusion-pytorch
# -----------------------------------
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, reduce
def exists(val):
return val is not None
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
class Block(nn.Module):
def __init__(self, dim_in, dim_out, *, time_emb_dim=None):
super().__init__()
if time_emb_dim is not None:
self.time_mlp = nn.Sequential(
nn.Linear(time_emb_dim, dim_in),
)
self.out_proj = nn.Sequential(
nn.ReLU(),
nn.Linear(dim_in, dim_out),
)
def forward(self, x, time_emb=None):
if time_emb is not None:
t_emb = self.time_mlp(time_emb)
h = x + t_emb
else:
h = x
out = self.out_proj(h)
return out
class LinearModel(nn.Module):
def __init__(
self, *,
z_dim,
time_dim,
unit_dims,
):
super().__init__()
num_linears = len(unit_dims)
self.time_embedding = nn.Sequential(
SinusoidalPositionEmbeddings(z_dim),
nn.Linear(z_dim, time_dim),
nn.SiLU(),
nn.Linear(time_dim, time_dim),
)
self.block_in = Block(dim_in=z_dim, dim_out=unit_dims[0], time_emb_dim=time_dim)
self.block_mid = nn.ModuleList()
for i in range(num_linears-1):
self.block_mid.append(Block(dim_in=unit_dims[i], dim_out=unit_dims[i+1]))
self.block_out = Block(dim_in=unit_dims[-1], dim_out=z_dim)
def forward(self, x, time_steps):
t_emb = self.time_embedding(time_steps)
x = self.block_in(x, t_emb)
num_mid_blocks = len(self.block_mid)
if num_mid_blocks > 0:
for block in self.block_mid:
x = block(x)
x = self.block_out(x)
return x
class SinusoidalPositionEmbeddings(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, time):
device = time.device
half_dim = self.dim // 2
embeddings = math.log(10000) / (half_dim - 1)
embeddings = torch.exp(torch.arange(half_dim, device=device) * -embeddings)
embeddings = time[:, None] * embeddings[None, :]
embeddings = torch.cat((embeddings.sin(), embeddings.cos()), dim=-1)
return embeddings
class Diffusion(nn.Module):
def __init__(
self,
net,
*,
dim,
num_sample_steps,
sigma_min,
sigma_max,
sigma_data,
rho,
P_mean,
P_std,
):
super().__init__()
self.net = net
self.dim = dim
self.sigma_min = sigma_min
self.sigma_max = sigma_max
self.sigma_data = sigma_data
self.rho = rho
self.P_mean = P_mean
self.P_std = P_std
self.num_sample_steps = num_sample_steps
@property
def device(self):
return next(self.net.parameters()).device
def c_skip(self, sigma):
return (self.sigma_data ** 2) / (sigma ** 2 + self.sigma_data ** 2)
def c_out(self, sigma):
return sigma * self.sigma_data * (self.sigma_data ** 2 + sigma ** 2) ** -0.5
def c_in(self, sigma):
return 1 * (sigma ** 2 + self.sigma_data ** 2) ** -0.5
def c_noise(self, sigma):
return log(sigma) * 0.25
def preconditioned_network_forward(self, noised_ehr, sigma, clamp = False):
batch, device = noised_ehr.shape[0], noised_ehr.device
if isinstance(sigma, float):
sigma = torch.full((batch,), sigma, device = device)
padded_sigma = rearrange(sigma, 'b -> b 1')
net_out = self.net(
self.c_in(padded_sigma) * noised_ehr,
self.c_noise(sigma),
)
out = self.c_skip(padded_sigma) * noised_ehr + self.c_out(padded_sigma) * net_out
if clamp:
out = out.clamp(0, 1)
return out
def sample_schedule(self, num_sample_steps = None):
num_sample_steps = default(num_sample_steps, self.num_sample_steps)
N = num_sample_steps
inv_rho = 1 / self.rho
steps = torch.arange(num_sample_steps, device = self.device, dtype = torch.float32)
sigmas = (self.sigma_max ** inv_rho + steps / (N - 1) * (self.sigma_min ** inv_rho - self.sigma_max ** inv_rho)) ** self.rho
sigmas = F.pad(sigmas, (0, 1), value = 0.)
return sigmas
@torch.no_grad()
def sample(self, batch_size = 32, num_sample_steps = None, clamp = True):
num_sample_steps = default(num_sample_steps, self.num_sample_steps)
shape = (batch_size, self.dim)
sigmas = self.sample_schedule(num_sample_steps)
sigmas_and_sigmas_next = list(zip(sigmas[:-1], sigmas[1:]))
init_sigma = sigmas[0]
ehr = init_sigma * torch.randn(shape, device = self.device)
for sigma, sigma_next in sigmas_and_sigmas_next:
sigma, sigma_next = map(lambda t: t.item(), (sigma, sigma_next))
model_output = self.preconditioned_network_forward(ehr, sigma, clamp = clamp)
denoised_over_sigma = (ehr - model_output) / sigma
ehr_next = ehr + (sigma_next - sigma) * denoised_over_sigma
if sigma_next != 0:
model_output_next = self.preconditioned_network_forward(ehr_next, sigma_next, clamp = clamp)
denoised_prime_over_sigma = (ehr_next - model_output_next) / sigma_next
ehr_next = ehr + 0.5 * (sigma_next - sigma) * (denoised_over_sigma + denoised_prime_over_sigma)
ehr = ehr_next
return ehr
def loss_weight(self, sigma):
return (sigma ** 2 + self.sigma_data ** 2) * (sigma * self.sigma_data) ** -2
def noise_distribution(self, batch_size):
return (self.P_mean + self.P_std * torch.randn((batch_size,), device = self.device)).exp()
def forward(self, ehr):
batch_size = ehr.shape[0]
sigmas = self.noise_distribution(batch_size)
padded_sigmas = rearrange(sigmas, 'b -> b 1')
noise = torch.randn_like(ehr)
noised_ehr = ehr + padded_sigmas * noise
denoised = self.preconditioned_network_forward(noised_ehr, sigmas)
losses = F.mse_loss(denoised, ehr, reduction='none')
losses = reduce(losses, 'b ... -> b', 'mean')
losses = losses * self.loss_weight(sigmas)
return losses.mean()
| 6,942 | 27.809129 | 132 | py |
ehrdiff | ehrdiff-main/train_util.py | import os
import time
import random
import logging
import numpy as np
from scipy.stats import pearsonr
import matplotlib.pyplot as plt
import torch
from torch.utils.data import DataLoader
from transformers import get_cosine_schedule_with_warmup
from diffusion_util import LinearModel, Diffusion
def set_seed(seed=3407):
os.environ['PYTHONHASHSEED'] = str(seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
def train_diff(args):
logging.info("Loading Data...")
raw_data = np.load(args.data_file)
class EHRDataset(torch.utils.data.Dataset):
def __init__(self, data=raw_data):
super().__init__()
self.data = data
def __len__(self):
return self.data.shape[0]
def __getitem__(self, index: int):
return self.data[index]
dataset = EHRDataset(raw_data)
dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=args.if_shuffle, drop_last=args.if_drop_last)
device = args.device
model = LinearModel(z_dim=args.ehr_dim, time_dim=args.time_dim, unit_dims=args.mlp_dims)
model.to(args.device)
optimizer = torch.optim.AdamW([{'params': model.parameters(), 'lr': args.lr, 'weight_decay': args.weight_decay} ])
if args.if_drop_last:
scheduler = get_cosine_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps,\
num_training_steps=(raw_data.shape[0]//args.batch_size)*args.num_epochs)
else:
scheduler = get_cosine_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps,\
num_training_steps=(raw_data.shape[0]//args.batch_size+1)*args.num_epochs)
diffusion = Diffusion(
model,
num_sample_steps = args.num_sample_steps,
dim = args.ehr_dim,
sigma_min = args.sigma_min,
sigma_max = args.sigma_max,
sigma_data = args.sigma_data,
rho = args.rho,
P_mean = args.p_mean,
P_std = args.p_std,
)
# timestamp = time.strftime("%m_%d_%H_%M", time.localtime())
logging.info("Training...")
train_dm_loss = 0
train_cnt = 0
train_steps = 0
best_corr = 0
for epoch in range(args.num_epochs):
for step, batch in enumerate(dataloader):
optimizer.zero_grad()
batch_size = batch.shape[0]
batch = batch.to(device)
loss_dm = diffusion(batch)
train_dm_loss += loss_dm.item()
train_cnt += batch_size
train_steps += 1
if train_steps % args.check_steps == 0:
logging.info('[%d, %5d] dm_loss: %.10f' % (epoch+1, train_steps, train_dm_loss / train_cnt))
model.eval()
if args.eval_samples < args.batch_size:
syn_data = diffusion.sample(batch_size=args.eval_samples).detach().cpu().numpy()
else:
num_iters = args.eval_samples // args.batch_size
num_left = args.eval_samples % args.batch_size
syn_data = []
for _ in range(num_iters):
syn_data.append(diffusion.sample(batch_size=args.batch_size).detach().cpu().numpy())
syn_data.append(diffusion.sample(batch_size=num_left).detach().cpu().numpy())
syn_data = np.concatenate(syn_data)
syn_data = np.rint(np.clip(syn_data, 0, 1))
corr, nzc, flag = plot_dim_dist(raw_data, syn_data, args.model_setting, best_corr)
logging.info('corr: %.4f, none-zero columns: %d'%(corr, nzc))
if flag:
best_corr = corr
# checkpoints_dirname = timestamp + '_' + args.model_setting
# os.makedirs(checkpoints_dirname, exist_ok=True)
# torch.save(model.state_dict(), checkpoints_dirname + "/model")
# torch.save(optimizer.state_dict(), checkpoints_dirname + "/optim")
torch.save(model.state_dict(), 'weight/model.pt')
torch.save(optimizer.state_dict(), 'weight/optim.pt')
logging.info("New Weight saved!")
logging.info("**************************************")
model.train()
loss_dm.backward()
optimizer.step()
scheduler.step()
def plot_dim_dist(train_data, syn_data, model_setting, best_corr):
train_data_mean = np.mean(train_data, axis = 0)
temp_data_mean = np.mean(syn_data, axis = 0)
corr = pearsonr(temp_data_mean, train_data_mean)
nzc = sum(temp_data_mean[i] > 0 for i in range(temp_data_mean.shape[0]))
fig, ax = plt.subplots(figsize=(8, 6))
slope, intercept = np.polyfit(train_data_mean, temp_data_mean, 1)
fitted_values = [slope * i + intercept for i in train_data_mean]
identity_values = [1 * i + 0 for i in train_data_mean]
ax.plot(train_data_mean, fitted_values, 'b', alpha=0.5)
ax.plot(train_data_mean, identity_values, 'r', alpha=0.5)
ax.scatter(train_data_mean, temp_data_mean, alpha=0.3)
ax.set_title('corr: %.4f, none-zero columns: %d, slope: %.4f'%(corr[0], nzc, slope))
ax.set_xlabel('Feature prevalence of real data')
ax.set_ylabel('Feature prevalence of synthetic data')
# fig.savefig('figs/{}.png'.format('Current_' + model_setting))
fig.savefig('figs/{}.png'.format('Cur_res'))
flag = False
if corr[0] > best_corr:
best_corr = corr[0]
flag = True
# fig.savefig('figs/{}.png'.format('Best_' + model_setting))
fig.savefig('figs/{}.png'.format('Best_res'))
plt.close(fig)
return corr[0], nzc, flag
| 6,141 | 36.680982 | 119 | py |
ehrdiff | ehrdiff-main/gen_dat.py | from tqdm import tqdm
import torch
import numpy as np
from diffusion_util import LinearModel, Diffusion
device = torch.device('cuda:0')
dm = LinearModel(z_dim=1782, time_dim=384, unit_dims=[1024, 384, 384, 384, 1024])
dm.load_state_dict(torch.load("weight/model.pt"))
dm.to(device)
diffusion = Diffusion(
dm,
dim = 1782,
P_mean = -1.2,
P_std = 1.2,
sigma_data = 0.14,
num_sample_steps = 32,
sigma_min = 0.02,
sigma_max = 80,
rho = 7,
)
out = []
dm.eval()
for b in tqdm(range(41), desc='Sampling...'):
sampled_seq = diffusion.sample(batch_size=1000)
out.append(sampled_seq)
out_seq = torch.cat(out)
out_seq = out_seq.detach().cpu().numpy()
res = np.rint(np.clip(out_seq, 0, 1))
np.save("EHRDiff", out_seq)
| 1,016 | 26.486486 | 81 | py |
Text-Classification-Models-Pytorch | Text-Classification-Models-Pytorch-master/Model_TextCNN/utils.py | # utils.py
import torch
from torchtext import data
from torchtext.vocab import Vectors
import spacy
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score
class Dataset(object):
def __init__(self, config):
self.config = config
self.train_iterator = None
self.test_iterator = None
self.val_iterator = None
self.vocab = []
self.word_embeddings = {}
def parse_label(self, label):
'''
Get the actual labels from label string
Input:
label (string) : labels of the form '__label__2'
Returns:
label (int) : integer value corresponding to label string
'''
return int(label.strip()[-1])
def get_pandas_df(self, filename):
'''
Load the data into Pandas.DataFrame object
This will be used to convert data to torchtext object
'''
with open(filename, 'r') as datafile:
data = [line.strip().split(',', maxsplit=1) for line in datafile]
data_text = list(map(lambda x: x[1], data))
data_label = list(map(lambda x: self.parse_label(x[0]), data))
full_df = pd.DataFrame({"text":data_text, "label":data_label})
return full_df
def load_data(self, w2v_file, train_file, test_file, val_file=None):
'''
Loads the data from files
Sets up iterators for training, validation and test data
Also create vocabulary and word embeddings based on the data
Inputs:
w2v_file (String): absolute path to file containing word embeddings (GloVe/Word2Vec)
train_file (String): absolute path to training file
test_file (String): absolute path to test file
val_file (String): absolute path to validation file
'''
NLP = spacy.load('en')
tokenizer = lambda sent: [x.text for x in NLP.tokenizer(sent) if x.text != " "]
# Creating Field for data
TEXT = data.Field(sequential=True, tokenize=tokenizer, lower=True, fix_length=self.config.max_sen_len)
LABEL = data.Field(sequential=False, use_vocab=False)
datafields = [("text",TEXT),("label",LABEL)]
# Load data from pd.DataFrame into torchtext.data.Dataset
train_df = self.get_pandas_df(train_file)
train_examples = [data.Example.fromlist(i, datafields) for i in train_df.values.tolist()]
train_data = data.Dataset(train_examples, datafields)
test_df = self.get_pandas_df(test_file)
test_examples = [data.Example.fromlist(i, datafields) for i in test_df.values.tolist()]
test_data = data.Dataset(test_examples, datafields)
# If validation file exists, load it. Otherwise get validation data from training data
if val_file:
val_df = self.get_pandas_df(val_file)
val_examples = [data.Example.fromlist(i, datafields) for i in val_df.values.tolist()]
val_data = data.Dataset(val_examples, datafields)
else:
train_data, val_data = train_data.split(split_ratio=0.8)
TEXT.build_vocab(train_data, vectors=Vectors(w2v_file))
self.word_embeddings = TEXT.vocab.vectors
self.vocab = TEXT.vocab
self.train_iterator = data.BucketIterator(
(train_data),
batch_size=self.config.batch_size,
sort_key=lambda x: len(x.text),
repeat=False,
shuffle=True)
self.val_iterator, self.test_iterator = data.BucketIterator.splits(
(val_data, test_data),
batch_size=self.config.batch_size,
sort_key=lambda x: len(x.text),
repeat=False,
shuffle=False)
print ("Loaded {} training examples".format(len(train_data)))
print ("Loaded {} test examples".format(len(test_data)))
print ("Loaded {} validation examples".format(len(val_data)))
def evaluate_model(model, iterator):
all_preds = []
all_y = []
for idx,batch in enumerate(iterator):
if torch.cuda.is_available():
x = batch.text.cuda()
else:
x = batch.text
y_pred = model(x)
predicted = torch.max(y_pred.cpu().data, 1)[1] + 1
all_preds.extend(predicted.numpy())
all_y.extend(batch.label.numpy())
score = accuracy_score(all_y, np.array(all_preds).flatten())
return score | 4,498 | 37.452991 | 110 | py |
Text-Classification-Models-Pytorch | Text-Classification-Models-Pytorch-master/Model_TextCNN/model.py | # model.py
import torch
from torch import nn
import numpy as np
from utils import *
class TextCNN(nn.Module):
def __init__(self, config, vocab_size, word_embeddings):
super(TextCNN, self).__init__()
self.config = config
# Embedding Layer
self.embeddings = nn.Embedding(vocab_size, self.config.embed_size)
self.embeddings.weight = nn.Parameter(word_embeddings, requires_grad=False)
# This stackoverflow thread clarifies how conv1d works
# https://stackoverflow.com/questions/46503816/keras-conv1d-layer-parameters-filters-and-kernel-size/46504997
self.conv1 = nn.Sequential(
nn.Conv1d(in_channels=self.config.embed_size, out_channels=self.config.num_channels, kernel_size=self.config.kernel_size[0]),
nn.ReLU(),
nn.MaxPool1d(self.config.max_sen_len - self.config.kernel_size[0]+1)
)
self.conv2 = nn.Sequential(
nn.Conv1d(in_channels=self.config.embed_size, out_channels=self.config.num_channels, kernel_size=self.config.kernel_size[1]),
nn.ReLU(),
nn.MaxPool1d(self.config.max_sen_len - self.config.kernel_size[1]+1)
)
self.conv3 = nn.Sequential(
nn.Conv1d(in_channels=self.config.embed_size, out_channels=self.config.num_channels, kernel_size=self.config.kernel_size[2]),
nn.ReLU(),
nn.MaxPool1d(self.config.max_sen_len - self.config.kernel_size[2]+1)
)
self.dropout = nn.Dropout(self.config.dropout_keep)
# Fully-Connected Layer
self.fc = nn.Linear(self.config.num_channels*len(self.config.kernel_size), self.config.output_size)
# Softmax non-linearity
self.softmax = nn.Softmax()
def forward(self, x):
# x.shape = (max_sen_len, batch_size)
embedded_sent = self.embeddings(x).permute(1,2,0)
# embedded_sent.shape = (batch_size=64,embed_size=300,max_sen_len=20)
conv_out1 = self.conv1(embedded_sent).squeeze(2) #shape=(64, num_channels, 1) (squeeze 1)
conv_out2 = self.conv2(embedded_sent).squeeze(2)
conv_out3 = self.conv3(embedded_sent).squeeze(2)
all_out = torch.cat((conv_out1, conv_out2, conv_out3), 1)
final_feature_map = self.dropout(all_out)
final_out = self.fc(final_feature_map)
return self.softmax(final_out)
def add_optimizer(self, optimizer):
self.optimizer = optimizer
def add_loss_op(self, loss_op):
self.loss_op = loss_op
def reduce_lr(self):
print("Reducing LR")
for g in self.optimizer.param_groups:
g['lr'] = g['lr'] / 2
def run_epoch(self, train_iterator, val_iterator, epoch):
train_losses = []
val_accuracies = []
losses = []
# Reduce learning rate as number of epochs increase
if (epoch == int(self.config.max_epochs/3)) or (epoch == int(2*self.config.max_epochs/3)):
self.reduce_lr()
for i, batch in enumerate(train_iterator):
self.optimizer.zero_grad()
if torch.cuda.is_available():
x = batch.text.cuda()
y = (batch.label - 1).type(torch.cuda.LongTensor)
else:
x = batch.text
y = (batch.label - 1).type(torch.LongTensor)
y_pred = self.__call__(x)
loss = self.loss_op(y_pred, y)
loss.backward()
losses.append(loss.data.cpu().numpy())
self.optimizer.step()
if i % 100 == 0:
print("Iter: {}".format(i+1))
avg_train_loss = np.mean(losses)
train_losses.append(avg_train_loss)
print("\tAverage training loss: {:.5f}".format(avg_train_loss))
losses = []
# Evalute Accuracy on validation set
val_accuracy = evaluate_model(self, val_iterator)
print("\tVal Accuracy: {:.4f}".format(val_accuracy))
self.train()
return train_losses, val_accuracies | 4,215 | 39.932039 | 137 | py |
Text-Classification-Models-Pytorch | Text-Classification-Models-Pytorch-master/Model_TextCNN/train.py | # train.py
from utils import *
from model import *
from config import Config
import sys
import torch.optim as optim
from torch import nn
import torch
if __name__=='__main__':
config = Config()
train_file = '../data/ag_news.train'
if len(sys.argv) > 2:
train_file = sys.argv[1]
test_file = '../data/ag_news.test'
if len(sys.argv) > 3:
test_file = sys.argv[2]
w2v_file = '../data/glove.840B.300d.txt'
dataset = Dataset(config)
dataset.load_data(w2v_file, train_file, test_file)
# Create Model with specified optimizer and loss function
##############################################################
model = TextCNN(config, len(dataset.vocab), dataset.word_embeddings)
if torch.cuda.is_available():
model.cuda()
model.train()
optimizer = optim.SGD(model.parameters(), lr=config.lr)
NLLLoss = nn.NLLLoss()
model.add_optimizer(optimizer)
model.add_loss_op(NLLLoss)
##############################################################
train_losses = []
val_accuracies = []
for i in range(config.max_epochs):
print ("Epoch: {}".format(i))
train_loss,val_accuracy = model.run_epoch(dataset.train_iterator, dataset.val_iterator, i)
train_losses.append(train_loss)
val_accuracies.append(val_accuracy)
train_acc = evaluate_model(model, dataset.train_iterator)
val_acc = evaluate_model(model, dataset.val_iterator)
test_acc = evaluate_model(model, dataset.test_iterator)
print ('Final Training Accuracy: {:.4f}'.format(train_acc))
print ('Final Validation Accuracy: {:.4f}'.format(val_acc))
print ('Final Test Accuracy: {:.4f}'.format(test_acc)) | 1,720 | 32.096154 | 98 | py |
Text-Classification-Models-Pytorch | Text-Classification-Models-Pytorch-master/Model_TextCNN/old_code/model.py | # model.py
import torch
from torch import nn
from torch import Tensor
from torch.autograd import Variable
import numpy as np
from sklearn.metrics import accuracy_score
class CNNText(nn.Module):
def __init__(self, config):
super(CNNText, self).__init__()
self.config = config
# Convolutional Layer
# We use 3 kernels as in original paper
# Size of kernels: (3,300),(4,300),(5,300)
self.conv1 = nn.Conv2d(in_channels=self.config.in_channels, out_channels=self.config.num_channels,
kernel_size=(self.config.kernel_size[0],self.config.embed_size),
stride=1, padding=0)
self.activation1 = nn.ReLU()
self.max_out1 = nn.MaxPool1d(self.config.max_sen_len - self.config.kernel_size[0]+1)
self.conv2 = nn.Conv2d(in_channels=self.config.in_channels, out_channels=self.config.num_channels,
kernel_size=(self.config.kernel_size[1],self.config.embed_size),
stride=1, padding=0)
self.activation2 = nn.ReLU()
self.max_out2 = nn.MaxPool1d(self.config.max_sen_len - self.config.kernel_size[1]+1)
self.conv3 = nn.Conv2d(in_channels=self.config.in_channels, out_channels=self.config.num_channels,
kernel_size=(self.config.kernel_size[2],self.config.embed_size),
stride=1, padding=0)
self.activation3 = nn.ReLU()
self.max_out3 = nn.MaxPool1d(self.config.max_sen_len - self.config.kernel_size[2]+1)
self.dropout = nn.Dropout(self.config.dropout_keep)
# Fully-Connected Layer
self.fc = nn.Linear(self.config.num_channels*len(self.config.kernel_size), self.config.output_size)
# Softmax non-linearity
self.softmax = nn.Softmax()
def forward(self, x):
x = x.unsqueeze(1) # (batch_size,max_seq_len,embed_size) => (batch_size,1,max_seq_len,embed_size)
conv_out1 = self.conv1(x).squeeze(3)
activation_out1 = self.activation1(conv_out1)
max_out1 = self.max_out1(activation_out1).squeeze(2)
conv_out2 = self.conv2(x).squeeze(3)
activation_out2 = self.activation2(conv_out2)
max_out2 = self.max_out2(activation_out2).squeeze(2)
conv_out3 = self.conv3(x).squeeze(3)
activation_out3 = self.activation3(conv_out3)
max_out3 = self.max_out3(activation_out3).squeeze(2)
all_out = torch.cat((max_out1, max_out2, max_out3), 1)
final_feature_map = self.dropout(all_out)
final_out = self.fc(final_feature_map)
return self.softmax(final_out)
def add_optimizer(self, optimizer):
self.optimizer = optimizer
def add_loss_op(self, loss_op):
self.loss_op = loss_op
def run_epoch(self, train_data, val_data):
train_x, train_y = train_data[0], train_data[1]
val_x, val_y = val_data[0], val_data[1]
iterator = data_iterator(train_x, train_y, self.config.batch_size)
train_losses = []
val_accuracies = []
losses = []
for i, (x,y) in enumerate(iterator):
self.optimizer.zero_grad()
x = Tensor(x).cuda()
y_pred = self.__call__(x)
loss = self.loss_op(y_pred, torch.cuda.LongTensor(y-1))
loss.backward()
losses.append(loss.data.cpu().numpy())
self.optimizer.step()
if (i + 1) % 50 == 0:
print("Iter: {}".format(i+1))
avg_train_loss = np.mean(losses)
train_losses.append(avg_train_loss)
print("\tAverage training loss: {:.5f}".format(avg_train_loss))
losses = []
# Evalute Accuracy on validation set
self.eval()
all_preds = []
val_iterator = data_iterator(val_x, val_y, self.config.batch_size)
for j, (x,y) in enumerate(val_iterator):
x = Variable(Tensor(x))
y_pred = self.__call__(x.cuda())
predicted = torch.max(y_pred.cpu().data, 1)[1] + 1
all_preds.extend(predicted.numpy())
score = accuracy_score(val_y, np.array(all_preds).flatten())
val_accuracies.append(score)
print("\tVal Accuracy: {:.4f}".format(score))
self.train()
return train_losses, val_accuracies
| 4,642 | 40.088496 | 107 | py |
Text-Classification-Models-Pytorch | Text-Classification-Models-Pytorch-master/Model_TextCNN/old_code/train.py | # train.py
from utils import *
from config import Config
from sklearn.model_selection import train_test_split
import numpy as np
from tqdm import tqdm
import sys
import torch.optim as optim
from torch import nn, Tensor
from torch.autograd import Variable
import torch
from sklearn.metrics import accuracy_score
def get_accuracy(model, test_x, test_y):
all_preds = []
test_iterator = data_iterator(test_x, test_y)
for x, y in test_iterator:
x = Variable(Tensor(x))
y_pred = model(x.cuda())
predicted = torch.max(y_pred.cpu().data, 1)[1] + 1
all_preds.extend(predicted.numpy())
score = accuracy_score(test_y, np.array(all_preds).flatten())
return score
if __name__=='__main__':
train_path = '../data/ag_news.train'
if len(sys.argv) > 2:
train_path = sys.argv[1]
test_path = '../data/ag_news.test'
if len(sys.argv) > 3:
test_path = sys.argv[2]
train_text, train_labels, vocab = get_data(train_path)
train_text, val_text, train_label, val_label = train_test_split(train_text, train_labels, test_size=0.2)
# Read Word Embeddings
w2vfile = '../data/glove.840B.300d.txt'
word_embeddings = get_word_embeddings(w2vfile, vocab.word_to_index, embedsize=300)
# Get all configuration parameters
config = Config()
train_x = np.array([encode_text(text, word_embeddings, config.max_sen_len) for text in tqdm(train_text)]) #(num_examples, max_sen_len, embed_size)
train_y = np.array(train_label) #(num_examples)
val_x = np.array([encode_text(text, word_embeddings, config.max_sen_len) for text in tqdm(val_text)])
val_y = np.array(val_label)
# Create Model with specified optimizer and loss function
##############################################################
model = CNNText(config)
model.cuda()
model.train()
optimizer = optim.SGD(model.parameters(), lr=config.lr)
NLLLoss = nn.NLLLoss()
model.add_optimizer(optimizer)
model.add_loss_op(NLLLoss)
##############################################################
train_data = [train_x, train_y]
val_data = [val_x, val_y]
for i in range(config.max_epochs):
print ("Epoch: {}".format(i))
train_losses,val_accuracies = model.run_epoch(train_data, val_data)
print("\tAverage training loss: {:.5f}".format(np.mean(train_losses)))
print("\tAverage Val Accuracy (per 50 iterations): {:.4f}".format(np.mean(val_accuracies)))
# Reduce learning rate as number of epochs increase
if i > 0.5 * config.max_epochs:
print("Reducing LR")
for g in optimizer.param_groups:
g['lr'] = 0.1
if i > 0.75 * config.max_epochs:
print("Reducing LR")
for g in optimizer.param_groups:
g['lr'] = 0.05
# Get Accuracy of final model
test_text, test_labels, test_vocab = get_data(test_path)
test_x = np.array([encode_text(text, word_embeddings, config.max_sen_len) for text in tqdm(test_text)])
test_y = np.array(test_labels)
train_acc = get_accuracy(model, train_x, train_y)
val_acc = get_accuracy(model, val_x, val_y)
test_acc = get_accuracy(model, test_x, test_y)
print ('Final Training Accuracy: {:.4f}'.format(train_acc))
print ('Final Validation Accuracy: {:.4f}'.format(val_acc))
print ('Final Test Accuracy: {:.4f}'.format(test_acc)) | 3,445 | 36.868132 | 150 | py |
Text-Classification-Models-Pytorch | Text-Classification-Models-Pytorch-master/Model_Seq2Seq_Attention/utils.py | # utils.py
import torch
from torchtext import data
from torchtext.vocab import Vectors
import spacy
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score
class Dataset(object):
def __init__(self, config):
self.config = config
self.train_iterator = None
self.test_iterator = None
self.val_iterator = None
self.vocab = []
self.word_embeddings = {}
def parse_label(self, label):
'''
Get the actual labels from label string
Input:
label (string) : labels of the form '__label__2'
Returns:
label (int) : integer value corresponding to label string
'''
return int(label.strip()[-1])
def get_pandas_df(self, filename):
'''
Load the data into Pandas.DataFrame object
This will be used to convert data to torchtext object
'''
with open(filename, 'r') as datafile:
data = [line.strip().split(',', maxsplit=1) for line in datafile]
data_text = list(map(lambda x: x[1], data))
data_label = list(map(lambda x: self.parse_label(x[0]), data))
full_df = pd.DataFrame({"text":data_text, "label":data_label})
return full_df
def load_data(self, w2v_file, train_file, test_file=None, val_file=None):
'''
Loads the data from files
Sets up iterators for training, validation and test data
Also create vocabulary and word embeddings based on the data
Inputs:
w2v_file (String): path to file containing word embeddings (GloVe/Word2Vec)
train_file (String): path to training file
test_file (String): path to test file
val_file (String): path to validation file
'''
NLP = spacy.load('en')
tokenizer = lambda sent: [x.text for x in NLP.tokenizer(sent) if x.text != " "]
# Creating Field for data
TEXT = data.Field(sequential=True, tokenize=tokenizer, lower=True, fix_length=self.config.max_sen_len)
LABEL = data.Field(sequential=False, use_vocab=False)
datafields = [("text",TEXT),("label",LABEL)]
# Load data from pd.DataFrame into torchtext.data.Dataset
train_df = self.get_pandas_df(train_file)
train_examples = [data.Example.fromlist(i, datafields) for i in train_df.values.tolist()]
train_data = data.Dataset(train_examples, datafields)
test_df = self.get_pandas_df(test_file)
test_examples = [data.Example.fromlist(i, datafields) for i in test_df.values.tolist()]
test_data = data.Dataset(test_examples, datafields)
# If validation file exists, load it. Otherwise get validation data from training data
if val_file:
val_df = self.get_pandas_df(val_file)
val_examples = [data.Example.fromlist(i, datafields) for i in val_df.values.tolist()]
val_data = data.Dataset(val_examples, datafields)
else:
train_data, val_data = train_data.split(split_ratio=0.8)
if w2v_file:
TEXT.build_vocab(train_data, vectors=Vectors(w2v_file))
self.word_embeddings = TEXT.vocab.vectors
self.vocab = TEXT.vocab
self.train_iterator = data.BucketIterator(
(train_data),
batch_size=self.config.batch_size,
sort_key=lambda x: len(x.text),
repeat=False,
shuffle=True)
self.val_iterator, self.test_iterator = data.BucketIterator.splits(
(val_data, test_data),
batch_size=self.config.batch_size,
sort_key=lambda x: len(x.text),
repeat=False,
shuffle=False)
print ("Loaded {} training examples".format(len(train_data)))
print ("Loaded {} test examples".format(len(test_data)))
print ("Loaded {} validation examples".format(len(val_data)))
def evaluate_model(model, iterator):
all_preds = []
all_y = []
for idx,batch in enumerate(iterator):
if torch.cuda.is_available():
x = batch.text.cuda()
else:
x = batch.text
y_pred = model(x)
predicted = torch.max(y_pred.cpu().data, 1)[1] + 1
all_preds.extend(predicted.numpy())
all_y.extend(batch.label.numpy())
score = accuracy_score(all_y, np.array(all_preds).flatten())
return score | 4,492 | 37.076271 | 110 | py |
Text-Classification-Models-Pytorch | Text-Classification-Models-Pytorch-master/Model_Seq2Seq_Attention/model.py | # model.py
import torch
from torch import nn
import numpy as np
from torch.nn import functional as F
from utils import *
class Seq2SeqAttention(nn.Module):
def __init__(self, config, vocab_size, word_embeddings):
super(Seq2SeqAttention, self).__init__()
self.config = config
# Embedding Layer
self.embeddings = nn.Embedding(vocab_size, self.config.embed_size)
self.embeddings.weight = nn.Parameter(word_embeddings, requires_grad=False)
# Encoder RNN
self.lstm = nn.LSTM(input_size = self.config.embed_size,
hidden_size = self.config.hidden_size,
num_layers = self.config.hidden_layers,
bidirectional = self.config.bidirectional)
# Dropout Layer
self.dropout = nn.Dropout(self.config.dropout_keep)
# Fully-Connected Layer
self.fc = nn.Linear(
self.config.hidden_size * (1+self.config.bidirectional) * 2,
self.config.output_size
)
# Softmax non-linearity
self.softmax = nn.Softmax()
def apply_attention(self, rnn_output, final_hidden_state):
'''
Apply Attention on RNN output
Input:
rnn_output (batch_size, seq_len, num_directions * hidden_size): tensor representing hidden state for every word in the sentence
final_hidden_state (batch_size, num_directions * hidden_size): final hidden state of the RNN
Returns:
attention_output(batch_size, num_directions * hidden_size): attention output vector for the batch
'''
hidden_state = final_hidden_state.unsqueeze(2)
attention_scores = torch.bmm(rnn_output, hidden_state).squeeze(2)
soft_attention_weights = F.softmax(attention_scores, 1).unsqueeze(2) #shape = (batch_size, seq_len, 1)
attention_output = torch.bmm(rnn_output.permute(0,2,1), soft_attention_weights).squeeze(2)
return attention_output
def forward(self, x):
# x.shape = (max_sen_len, batch_size)
embedded_sent = self.embeddings(x)
# embedded_sent.shape = (max_sen_len=20, batch_size=64,embed_size=300)
##################################### Encoder #######################################
lstm_output, (h_n,c_n) = self.lstm(embedded_sent)
# lstm_output.shape = (seq_len, batch_size, num_directions * hidden_size)
# Final hidden state of last layer (num_directions, batch_size, hidden_size)
batch_size = h_n.shape[1]
h_n_final_layer = h_n.view(self.config.hidden_layers,
self.config.bidirectional + 1,
batch_size,
self.config.hidden_size)[-1,:,:,:]
##################################### Attention #####################################
# Convert input to (batch_size, num_directions * hidden_size) for attention
final_hidden_state = torch.cat([h_n_final_layer[i,:,:] for i in range(h_n_final_layer.shape[0])], dim=1)
attention_out = self.apply_attention(lstm_output.permute(1,0,2), final_hidden_state)
# Attention_out.shape = (batch_size, num_directions * hidden_size)
#################################### Linear #########################################
concatenated_vector = torch.cat([final_hidden_state, attention_out], dim=1)
final_feature_map = self.dropout(concatenated_vector) # shape=(batch_size, num_directions * hidden_size)
final_out = self.fc(final_feature_map)
return self.softmax(final_out)
def add_optimizer(self, optimizer):
self.optimizer = optimizer
def add_loss_op(self, loss_op):
self.loss_op = loss_op
def reduce_lr(self):
print("Reducing LR")
for g in self.optimizer.param_groups:
g['lr'] = g['lr'] / 2
def run_epoch(self, train_iterator, val_iterator, epoch):
train_losses = []
val_accuracies = []
losses = []
# Reduce learning rate as number of epochs increase
if (epoch == int(self.config.max_epochs/3)) or (epoch == int(2*self.config.max_epochs/3)):
self.reduce_lr()
for i, batch in enumerate(train_iterator):
self.optimizer.zero_grad()
if torch.cuda.is_available():
x = batch.text.cuda()
y = (batch.label - 1).type(torch.cuda.LongTensor)
else:
x = batch.text
y = (batch.label - 1).type(torch.LongTensor)
y_pred = self.__call__(x)
loss = self.loss_op(y_pred, y)
loss.backward()
losses.append(loss.data.cpu().numpy())
self.optimizer.step()
if i % 100 == 0:
print("Iter: {}".format(i+1))
avg_train_loss = np.mean(losses)
train_losses.append(avg_train_loss)
print("\tAverage training loss: {:.5f}".format(avg_train_loss))
losses = []
# Evalute Accuracy on validation set
val_accuracy = evaluate_model(self, val_iterator)
print("\tVal Accuracy: {:.4f}".format(val_accuracy))
self.train()
return train_losses, val_accuracies | 5,529 | 42.203125 | 139 | py |
Text-Classification-Models-Pytorch | Text-Classification-Models-Pytorch-master/Model_Seq2Seq_Attention/train.py | # train.py
from utils import *
from model import *
from config import Config
import sys
import torch.optim as optim
from torch import nn
import torch
if __name__=='__main__':
config = Config()
train_file = '../data/ag_news.train'
if len(sys.argv) > 2:
train_file = sys.argv[1]
test_file = '../data/ag_news.test'
if len(sys.argv) > 3:
test_file = sys.argv[2]
w2v_file = '../data/glove.840B.300d.txt'
dataset = Dataset(config)
dataset.load_data(w2v_file, train_file, test_file)
# Create Model with specified optimizer and loss function
##############################################################
model = Seq2SeqAttention(config, len(dataset.vocab), dataset.word_embeddings)
if torch.cuda.is_available():
model.cuda()
model.train()
optimizer = optim.SGD(model.parameters(), lr=config.lr)
NLLLoss = nn.NLLLoss()
model.add_optimizer(optimizer)
model.add_loss_op(NLLLoss)
##############################################################
train_losses = []
val_accuracies = []
for i in range(config.max_epochs):
print ("Epoch: {}".format(i))
train_loss,val_accuracy = model.run_epoch(dataset.train_iterator, dataset.val_iterator, i)
train_losses.append(train_loss)
val_accuracies.append(val_accuracy)
train_acc = evaluate_model(model, dataset.train_iterator)
val_acc = evaluate_model(model, dataset.val_iterator)
test_acc = evaluate_model(model, dataset.test_iterator)
print ('Final Training Accuracy: {:.4f}'.format(train_acc))
print ('Final Validation Accuracy: {:.4f}'.format(val_acc))
print ('Final Test Accuracy: {:.4f}'.format(test_acc)) | 1,729 | 32.269231 | 98 | py |
Text-Classification-Models-Pytorch | Text-Classification-Models-Pytorch-master/Model_CharCNN/utils.py | # utils.py
import torch
from torchtext import data
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score
def get_embedding_matrix(vocab_chars):
# one hot embedding plus all-zero vector
vocabulary_size = len(vocab_chars)
onehot_matrix = np.eye(vocabulary_size, vocabulary_size)
return onehot_matrix
class Dataset(object):
def __init__(self, config):
self.config = config
self.train_iterator = None
self.test_iterator = None
self.val_iterator = None
def parse_label(self, label):
'''
Get the actual labels from label string
Input:
label (string) : labels of the form '__label__2'
Returns:
label (int) : integer value corresponding to label string
'''
return int(label.strip()[-1]) - 1
def get_pandas_df(self, filename):
'''
Load the data into Pandas.DataFrame object
This will be used to convert data to torchtext object
'''
with open(filename, 'r') as datafile:
data = [line.strip().split(',', maxsplit=1) for line in datafile]
data_text = list(map(lambda x: x[1], data))
data_label = list(map(lambda x: self.parse_label(x[0]), data))
full_df = pd.DataFrame({"text":data_text, "label":data_label})
return full_df
def load_data(self, train_file, test_file, val_file=None):
'''
Loads the data from files
Sets up iterators for training, validation and test data
Also create vocabulary and word embeddings based on the data
Inputs:
train_file (String): absolute path to training file
test_file (String): absolute path to test file
val_file (String): absolute path to validation file
'''
tokenizer = lambda sent: list(sent[::-1])
# Creating Field for data
TEXT = data.Field(tokenize=tokenizer, lower=True, fix_length=self.config.seq_len)
LABEL = data.Field(sequential=False, use_vocab=False)
datafields = [("text",TEXT),("label",LABEL)]
# Load data from pd.DataFrame into torchtext.data.Dataset
train_df = self.get_pandas_df(train_file)
train_examples = [data.Example.fromlist(i, datafields) for i in train_df.values.tolist()]
train_data = data.Dataset(train_examples, datafields)
test_df = self.get_pandas_df(test_file)
test_examples = [data.Example.fromlist(i, datafields) for i in test_df.values.tolist()]
test_data = data.Dataset(test_examples, datafields)
# If validation file exists, load it. Otherwise get validation data from training data
if val_file:
val_df = self.get_pandas_df(val_file)
val_examples = [data.Example.fromlist(i, datafields) for i in val_df.values.tolist()]
val_data = data.Dataset(val_examples, datafields)
else:
train_data, val_data = train_data.split(split_ratio=0.9)
TEXT.build_vocab(train_data)
embedding_mat = get_embedding_matrix(list(TEXT.vocab.stoi.keys()))
TEXT.vocab.set_vectors(TEXT.vocab.stoi, torch.FloatTensor(embedding_mat), len(TEXT.vocab.stoi))
self.vocab = TEXT.vocab
self.embeddings = TEXT.vocab.vectors
self.train_iterator = data.BucketIterator(
(train_data),
batch_size=self.config.batch_size,
sort_key=lambda x: len(x.text),
repeat=False,
shuffle=True)
self.val_iterator, self.test_iterator = data.BucketIterator.splits(
(val_data, test_data),
batch_size=self.config.batch_size,
sort_key=lambda x: len(x.text),
repeat=False,
shuffle=False)
print ("Loaded {} training examples".format(len(train_data)))
print ("Loaded {} test examples".format(len(test_data)))
print ("Loaded {} validation examples".format(len(val_data)))
def evaluate_model(model, iterator):
all_preds = []
all_y = []
for idx,batch in enumerate(iterator):
if torch.cuda.is_available():
x = batch.text.cuda()
else:
x = batch.text
y_pred = model(x)
predicted = torch.max(y_pred.cpu().data, 1)[1]
all_preds.extend(predicted.numpy())
all_y.extend(batch.label.numpy())
score = accuracy_score(all_y, np.array(all_preds).flatten())
return score | 4,545 | 37.525424 | 103 | py |
Text-Classification-Models-Pytorch | Text-Classification-Models-Pytorch-master/Model_CharCNN/model.py | # model.py
import torch
from torch import nn
import numpy as np
from utils import *
class CharCNN(nn.Module):
def __init__(self, config, vocab_size, embeddings):
super(CharCNN, self).__init__()
self.config = config
embed_size = vocab_size
# Embedding Layer
self.embeddings = nn.Embedding(vocab_size, embed_size)
self.embeddings.weight = nn.Parameter(embeddings, requires_grad=False)
# This stackoverflow thread explains how conv1d works
# https://stackoverflow.com/questions/46503816/keras-conv1d-layer-parameters-filters-and-kernel-size/46504997
conv1 = nn.Sequential(
nn.Conv1d(in_channels=embed_size, out_channels=self.config.num_channels, kernel_size=7),
nn.ReLU(),
nn.MaxPool1d(kernel_size=3)
) # (batch_size, num_channels, (seq_len-6)/3)
conv2 = nn.Sequential(
nn.Conv1d(in_channels=self.config.num_channels, out_channels=self.config.num_channels, kernel_size=7),
nn.ReLU(),
nn.MaxPool1d(kernel_size=3)
) # (batch_size, num_channels, (seq_len-6-18)/(3*3))
conv3 = nn.Sequential(
nn.Conv1d(in_channels=self.config.num_channels, out_channels=self.config.num_channels, kernel_size=3),
nn.ReLU()
) # (batch_size, num_channels, (seq_len-6-18-18)/(3*3))
conv4 = nn.Sequential(
nn.Conv1d(in_channels=self.config.num_channels, out_channels=self.config.num_channels, kernel_size=3),
nn.ReLU()
) # (batch_size, num_channels, (seq_len-6-18-18-18)/(3*3))
conv5 = nn.Sequential(
nn.Conv1d(in_channels=self.config.num_channels, out_channels=self.config.num_channels, kernel_size=3),
nn.ReLU()
) # (batch_size, num_channels, (seq_len-6-18-18-18-18)/(3*3))
conv6 = nn.Sequential(
nn.Conv1d(in_channels=self.config.num_channels, out_channels=self.config.num_channels, kernel_size=3),
nn.ReLU(),
nn.MaxPool1d(kernel_size=3)
) # (batch_size, num_channels, (seq_len-6-18-18-18-18-18)/(3*3*3))
# Length of output after conv6
conv_output_size = self.config.num_channels * ((self.config.seq_len - 96) // 27)
linear1 = nn.Sequential(
nn.Linear(conv_output_size, self.config.linear_size),
nn.ReLU(),
nn.Dropout(self.config.dropout_keep)
)
linear2 = nn.Sequential(
nn.Linear(self.config.linear_size, self.config.linear_size),
nn.ReLU(),
nn.Dropout(self.config.dropout_keep)
)
linear3 = nn.Sequential(
nn.Linear(self.config.linear_size, self.config.output_size),
nn.Softmax()
)
self.convolutional_layers = nn.Sequential(conv1,conv2,conv3,conv4,conv5,conv6)
self.linear_layers = nn.Sequential(linear1, linear2, linear3)
def forward(self, x):
embedded_sent = self.embeddings(x).permute(1,2,0) # shape=(batch_size,embed_size,seq_len)
conv_out = self.convolutional_layers(embedded_sent)
conv_out = conv_out.view(conv_out.shape[0], -1)
linear_output = self.linear_layers(conv_out)
return linear_output
def add_optimizer(self, optimizer):
self.optimizer = optimizer
def add_loss_op(self, loss_op):
self.loss_op = loss_op
def reduce_lr(self):
print("Reducing LR")
for g in self.optimizer.param_groups:
g['lr'] = g['lr'] / 2
def run_epoch(self, train_iterator, val_iterator, epoch):
train_losses = []
val_accuracies = []
losses = []
# Reduce learning rate as number of epochs increase
if (epoch > 0) and (epoch % 3 == 0):
self.reduce_lr()
for i, batch in enumerate(train_iterator):
self.optimizer.zero_grad()
if torch.cuda.is_available():
x = batch.text.cuda()
y = (batch.label).type(torch.cuda.LongTensor)
else:
x = batch.text
y = (batch.label).type(torch.LongTensor)
y_pred = self.__call__(x)
loss = self.loss_op(y_pred, y)
loss.backward()
losses.append(loss.data.cpu().numpy())
self.optimizer.step()
if i % 100 == 0:
print("Iter: {}".format(i+1))
avg_train_loss = np.mean(losses)
train_losses.append(avg_train_loss)
print("\tAverage training loss: {:.5f}".format(avg_train_loss))
losses = []
# Evalute Accuracy on validation set
val_accuracy = evaluate_model(self, val_iterator)
print("\tVal Accuracy: {:.4f}".format(val_accuracy))
self.train()
return train_losses, val_accuracies | 5,019 | 40.147541 | 117 | py |
Text-Classification-Models-Pytorch | Text-Classification-Models-Pytorch-master/Model_CharCNN/train.py | # train.py
from utils import *
from model import *
from config import Config
import sys
import torch
import torch.optim as optim
from torch import nn
if __name__=='__main__':
config = Config()
train_file = '../data/ag_news.train'
if len(sys.argv) > 2:
train_file = sys.argv[1]
test_file = '../data/ag_news.test'
if len(sys.argv) > 3:
test_file = sys.argv[2]
dataset = Dataset(config)
dataset.load_data(train_file, test_file)
# Create Model with specified optimizer and loss function
##############################################################
model = CharCNN(config, len(dataset.vocab), dataset.embeddings)
if torch.cuda.is_available():
model.cuda()
model.train()
optimizer = optim.Adam(model.parameters(), lr=config.lr)
loss_fn = nn.CrossEntropyLoss()
model.add_optimizer(optimizer)
model.add_loss_op(loss_fn)
##############################################################
train_losses = []
val_accuracies = []
for i in range(config.max_epochs):
print ("Epoch: {}".format(i))
train_loss,val_accuracy = model.run_epoch(dataset.train_iterator, dataset.val_iterator, i)
train_losses.append(train_loss)
val_accuracies.append(val_accuracy)
train_acc = evaluate_model(model, dataset.train_iterator)
val_acc = evaluate_model(model, dataset.val_iterator)
test_acc = evaluate_model(model, dataset.test_iterator)
print ('Final Training Accuracy: {:.4f}'.format(train_acc))
print ('Final Validation Accuracy: {:.4f}'.format(val_acc))
print ('Final Test Accuracy: {:.4f}'.format(test_acc)) | 1,665 | 32.32 | 98 | py |
Text-Classification-Models-Pytorch | Text-Classification-Models-Pytorch-master/Model_CharCNN/without_torchtext/utils.py | # utils.py
import pandas as pd
import numpy as np
import torch
from torch.utils.data import Dataset
from torch.utils import data
from torch.utils.data import DataLoader
from torch.autograd import Variable
from sklearn.metrics import accuracy_score
# Used part of code to read the dataset from: https://github.com/1991viet/Character-level-cnn-pytorch/blob/master/src/dataset.py
class MyDataset(Dataset):
def __init__(self, data_path, config):
self.config = config
self.vocabulary = list("""abcdefghijklmnopqrstuvwxyz0123456789,;.!?:'\"/\\|_@#$%^&*~`+-=<>()[]{}""")
self.identity_mat = np.identity(len(self.vocabulary))
data = get_pandas_df(data_path)
self.texts = list(data.text)
self.labels = list(data.label)
self.length = len(self.labels)
def __len__(self):
return self.length
def __getitem__(self, index):
raw_text = self.texts[index]
data = np.array([self.identity_mat[self.vocabulary.index(i)] for i in list(raw_text) if i in self.vocabulary],
dtype=np.float32)
if len(data) > self.config.max_len:
data = data[:self.config.max_len]
elif 0 < len(data) < self.config.max_len:
data = np.concatenate(
(data, np.zeros((self.config.max_len - len(data), len(self.vocabulary)), dtype=np.float32)))
elif len(data) == 0:
data = np.zeros((self.config.max_len, len(self.vocabulary)), dtype=np.float32)
label = self.labels[index]
return data, label
def parse_label(label):
'''
Get the actual labels from label string
Input:
label (string) : labels of the form '__label__2'
Returns:
label (int) : integer value corresponding to label string
'''
return int(label.strip()[-1]) - 1
def get_pandas_df(filename):
'''
Load the data into Pandas.DataFrame object
This will be used to convert data to torchtext object
'''
with open(filename, 'r') as datafile:
data = [line.strip().split(',', maxsplit=1) for line in datafile]
data_text = list(map(lambda x: x[1], data))
data_label = list(map(lambda x: parse_label(x[0]), data))
full_df = pd.DataFrame({"text":data_text, "label":data_label})
return full_df
def get_iterators(config, train_file, test_file, val_file=None):
train_set = MyDataset(train_file, config)
test_set = MyDataset(test_file, config)
# If validation file exists, load it. Otherwise get validation data from training data
if val_file:
val_set = MyDataset(val_file, config)
else:
train_size = int(0.9 * len(train_set))
test_size = len(train_set) - train_size
train_set, val_set = data.random_split(train_set, [train_size, test_size])
train_iterator = DataLoader(train_set, batch_size=config.batch_size, shuffle=True)
test_iterator = DataLoader(test_set, batch_size=config.batch_size)
val_iterator = DataLoader(val_set, batch_size=config.batch_size)
return train_iterator, test_iterator, val_iterator
def evaluate_model(model, iterator):
all_preds = []
all_y = []
for idx,batch in enumerate(iterator):
if torch.cuda.is_available():
batch = [Variable(record).cuda() for record in batch]
else:
batch = [Variable(record).cuda() for record in batch]
x, y = batch
y_pred = model(x)
predicted = torch.max(y_pred.cpu().data, 1)[1]
all_preds.extend(predicted.numpy())
all_y.extend(y.cpu().numpy())
score = accuracy_score(all_y, np.array(all_preds).flatten())
return score | 3,657 | 37.505263 | 128 | py |
Text-Classification-Models-Pytorch | Text-Classification-Models-Pytorch-master/Model_CharCNN/without_torchtext/model.py | # model.py
import torch
from torch import nn
import numpy as np
from torch.autograd import Variable
from utils import *
class CharCNN(nn.Module):
def __init__(self, config):
super(CharCNN, self).__init__()
self.config = config
# This stackoverflow thread explains how conv1d works
# https://stackoverflow.com/questions/46503816/keras-conv1d-layer-parameters-filters-and-kernel-size/46504997
conv1 = nn.Sequential(
nn.Conv1d(in_channels=self.config.vocab_size, out_channels=self.config.num_channels, kernel_size=7),
nn.ReLU(),
nn.MaxPool1d(kernel_size=3)
) # (batch_size, num_channels, (max_len-6)/3)
conv2 = nn.Sequential(
nn.Conv1d(in_channels=self.config.num_channels, out_channels=self.config.num_channels, kernel_size=7),
nn.ReLU(),
nn.MaxPool1d(kernel_size=3)
) # (batch_size, num_channels, (max_len-6-18)/(3*3))
conv3 = nn.Sequential(
nn.Conv1d(in_channels=self.config.num_channels, out_channels=self.config.num_channels, kernel_size=3),
nn.ReLU()
) # (batch_size, num_channels, (max_len-6-18-18)/(3*3))
conv4 = nn.Sequential(
nn.Conv1d(in_channels=self.config.num_channels, out_channels=self.config.num_channels, kernel_size=3),
nn.ReLU()
) # (batch_size, num_channels, (max_len-6-18-18-18)/(3*3))
conv5 = nn.Sequential(
nn.Conv1d(in_channels=self.config.num_channels, out_channels=self.config.num_channels, kernel_size=3),
nn.ReLU()
) # (batch_size, num_channels, (max_len-6-18-18-18-18)/(3*3))
conv6 = nn.Sequential(
nn.Conv1d(in_channels=self.config.num_channels, out_channels=self.config.num_channels, kernel_size=3),
nn.ReLU(),
nn.MaxPool1d(kernel_size=3)
) # (batch_size, num_channels, (max_len-6-18-18-18-18-18)/(3*3*3))
# Length of output after conv6
conv_output_size = self.config.num_channels * ((self.config.max_len - 96) // 27)
linear1 = nn.Sequential(
nn.Linear(conv_output_size, self.config.linear_size),
nn.ReLU(),
nn.Dropout(self.config.dropout_keep)
)
linear2 = nn.Sequential(
nn.Linear(self.config.linear_size, self.config.linear_size),
nn.ReLU(),
nn.Dropout(self.config.dropout_keep)
)
linear3 = nn.Sequential(
nn.Linear(self.config.linear_size, self.config.output_size),
nn.Softmax()
)
self.convolutional_layers = nn.Sequential(conv1,conv2,conv3,conv4,conv5,conv6)
self.linear_layers = nn.Sequential(linear1, linear2, linear3)
# Initialize Weights
self._create_weights(mean=0.0, std=0.05)
def _create_weights(self, mean=0.0, std=0.05):
for module in self.modules():
if isinstance(module, nn.Conv1d) or isinstance(module, nn.Linear):
module.weight.data.normal_(mean, std)
def forward(self, embedded_sent):
embedded_sent = embedded_sent.transpose(1,2)#.permute(0,2,1) # shape=(batch_size,embed_size,max_len)
conv_out = self.convolutional_layers(embedded_sent)
conv_out = conv_out.view(conv_out.shape[0], -1)
linear_output = self.linear_layers(conv_out)
return linear_output
def add_optimizer(self, optimizer):
self.optimizer = optimizer
def add_loss_op(self, loss_op):
self.loss_op = loss_op
def reduce_lr(self):
print("Reducing LR")
for g in self.optimizer.param_groups:
g['lr'] = g['lr'] / 2
def run_epoch(self, train_iterator, val_iterator, epoch):
train_losses = []
val_accuracies = []
losses = []
# Reduce learning rate as number of epochs increase
if (epoch > 0 and epoch%3 == 0):
self.reduce_lr()
for i, batch in enumerate(train_iterator):
_, n_true_label = batch
if torch.cuda.is_available():
batch = [Variable(record).cuda() for record in batch]
else:
batch = [Variable(record) for record in batch]
x, y = batch
self.optimizer.zero_grad()
y_pred = self.__call__(x)
loss = self.loss_op(y_pred, y)
loss.backward()
losses.append(loss.data.cpu().numpy())
self.optimizer.step()
if i % 100 == 0:
self.eval()
print("Iter: {}".format(i+1))
avg_train_loss = np.mean(losses)
train_losses.append(avg_train_loss)
print("\tAverage training loss: {:.5f}".format(avg_train_loss))
losses = []
# Evalute Accuracy on validation set
val_accuracy = evaluate_model(self, val_iterator)
print("\tVal Accuracy: {:.4f}".format(val_accuracy))
self.train()
return train_losses, val_accuracies | 5,218 | 39.773438 | 117 | py |
Text-Classification-Models-Pytorch | Text-Classification-Models-Pytorch-master/Model_CharCNN/without_torchtext/train.py | # train.py
from utils import *
from model import *
from config import Config
import sys
import torch
import torch.optim as optim
from torch import nn
if __name__=='__main__':
config = Config()
train_file = '../data/ag_news.train'
if len(sys.argv) > 2:
train_file = sys.argv[1]
test_file = '../data/ag_news.test'
if len(sys.argv) > 3:
test_file = sys.argv[2]
train_iterator, test_iterator, val_iterator = get_iterators(config, train_file, test_file)
# Create Model with specified optimizer and loss function
##############################################################
model = CharCNN(config)
if torch.cuda.is_available():
model.cuda()
model.train()
optimizer = optim.Adam(model.parameters(), lr=config.lr)
loss_fn = nn.CrossEntropyLoss()
model.add_optimizer(optimizer)
model.add_loss_op(loss_fn)
##############################################################
train_losses = []
val_accuracies = []
for i in range(config.max_epochs):
print ("Epoch: {}".format(i))
train_loss,val_accuracy = model.run_epoch(train_iterator, val_iterator, i)
train_losses.append(train_loss)
val_accuracies.append(val_accuracy)
train_acc = evaluate_model(model, train_iterator)
val_acc = evaluate_model(model, val_iterator)
test_acc = evaluate_model(model, test_iterator)
print ('Final Training Accuracy: {:.4f}'.format(train_acc))
print ('Final Validation Accuracy: {:.4f}'.format(val_acc))
print ('Final Test Accuracy: {:.4f}'.format(test_acc)) | 1,605 | 31.77551 | 94 | py |
Text-Classification-Models-Pytorch | Text-Classification-Models-Pytorch-master/Model_fastText/utils.py | # utils.py
import torch
from torchtext import data
from torchtext.vocab import Vectors
import spacy
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score
class Dataset(object):
def __init__(self, config):
self.config = config
self.train_iterator = None
self.test_iterator = None
self.val_iterator = None
self.vocab = []
self.word_embeddings = {}
def parse_label(self, label):
'''
Get the actual labels from label string
Input:
label (string) : labels of the form '__label__2'
Returns:
label (int) : integer value corresponding to label string
'''
return int(label.strip()[-1])
def get_pandas_df(self, filename):
'''
Load the data into Pandas.DataFrame object
This will be used to convert data to torchtext object
'''
with open(filename, 'r') as datafile:
data = [line.strip().split(',', maxsplit=1) for line in datafile]
data_text = list(map(lambda x: x[1], data))
data_label = list(map(lambda x: self.parse_label(x[0]), data))
full_df = pd.DataFrame({"text":data_text, "label":data_label})
return full_df
def load_data(self, w2v_file, train_file, test_file, val_file=None):
'''
Loads the data from files
Sets up iterators for training, validation and test data
Also create vocabulary and word embeddings based on the data
Inputs:
w2v_file (String): absolute path to file containing word embeddings (GloVe/Word2Vec)
train_file (String): absolute path to training file
test_file (String): absolute path to test file
val_file (String): absolute path to validation file
'''
NLP = spacy.load('en')
tokenizer = lambda sent: [x.text for x in NLP.tokenizer(sent) if x.text != " "]
# Creating Field for data
TEXT = data.Field(sequential=True, tokenize=tokenizer, lower=True)
LABEL = data.Field(sequential=False, use_vocab=False)
datafields = [("text",TEXT),("label",LABEL)]
# Load data from pd.DataFrame into torchtext.data.Dataset
train_df = self.get_pandas_df(train_file)
train_examples = [data.Example.fromlist(i, datafields) for i in train_df.values.tolist()]
train_data = data.Dataset(train_examples, datafields)
test_df = self.get_pandas_df(test_file)
test_examples = [data.Example.fromlist(i, datafields) for i in test_df.values.tolist()]
test_data = data.Dataset(test_examples, datafields)
# If validation file exists, load it. Otherwise get validation data from training data
if val_file:
val_df = self.get_pandas_df(val_file)
val_examples = [data.Example.fromlist(i, datafields) for i in val_df.values.tolist()]
val_data = data.Dataset(val_examples, datafields)
else:
train_data, val_data = train_data.split(split_ratio=0.8)
TEXT.build_vocab(train_data, vectors=Vectors(w2v_file))
self.word_embeddings = TEXT.vocab.vectors
self.vocab = TEXT.vocab
self.train_iterator = data.BucketIterator(
(train_data),
batch_size=self.config.batch_size,
sort_key=lambda x: len(x.text),
repeat=False,
shuffle=True)
self.val_iterator, self.test_iterator = data.BucketIterator.splits(
(val_data, test_data),
batch_size=self.config.batch_size,
sort_key=lambda x: len(x.text),
repeat=False,
shuffle=False)
print ("Loaded {} training examples".format(len(train_data)))
print ("Loaded {} test examples".format(len(test_data)))
print ("Loaded {} validation examples".format(len(val_data)))
def evaluate_model(model, iterator):
all_preds = []
all_y = []
for idx,batch in enumerate(iterator):
if torch.cuda.is_available():
x = batch.text.cuda()
else:
x = batch.text
y_pred = model(x)
predicted = torch.max(y_pred.cpu().data, 1)[1] + 1
all_preds.extend(predicted.numpy())
all_y.extend(batch.label.numpy())
score = accuracy_score(all_y, np.array(all_preds).flatten())
return score | 4,462 | 37.145299 | 97 | py |
Text-Classification-Models-Pytorch | Text-Classification-Models-Pytorch-master/Model_fastText/model.py | # model.py
import torch
from torch import nn
import numpy as np
from utils import *
class fastText(nn.Module):
def __init__(self, config, vocab_size, word_embeddings):
super(fastText, self).__init__()
self.config = config
# Embedding Layer
self.embeddings = nn.Embedding(vocab_size, self.config.embed_size)
self.embeddings.weight = nn.Parameter(word_embeddings, requires_grad=False)
# Hidden Layer
self.fc1 = nn.Linear(self.config.embed_size, self.config.hidden_size)
# Output Layer
self.fc2 = nn.Linear(self.config.hidden_size, self.config.output_size)
# Softmax non-linearity
self.softmax = nn.Softmax()
def forward(self, x):
embedded_sent = self.embeddings(x).permute(1,0,2)
h = self.fc1(embedded_sent.mean(1))
z = self.fc2(h)
return self.softmax(z)
def add_optimizer(self, optimizer):
self.optimizer = optimizer
def add_loss_op(self, loss_op):
self.loss_op = loss_op
def reduce_lr(self):
print("Reducing LR")
for g in self.optimizer.param_groups:
g['lr'] = g['lr'] / 2
def run_epoch(self, train_iterator, val_iterator, epoch):
train_losses = []
val_accuracies = []
losses = []
# Reduce learning rate as number of epochs increase
if (epoch == int(self.config.max_epochs/3)) or (epoch == int(2*self.config.max_epochs/3)):
self.reduce_lr()
for i, batch in enumerate(train_iterator):
self.optimizer.zero_grad()
if torch.cuda.is_available():
x = batch.text.cuda()
y = (batch.label - 1).type(torch.cuda.LongTensor)
else:
x = batch.text
y = (batch.label - 1).type(torch.LongTensor)
y_pred = self.__call__(x)
loss = self.loss_op(y_pred, y)
loss.backward()
losses.append(loss.data.cpu().numpy())
self.optimizer.step()
if i % 100 == 0:
print("Iter: {}".format(i+1))
avg_train_loss = np.mean(losses)
train_losses.append(avg_train_loss)
print("\tAverage training loss: {:.5f}".format(avg_train_loss))
losses = []
# Evalute Accuracy on validation set
val_accuracy = evaluate_model(self, val_iterator)
print("\tVal Accuracy: {:.4f}".format(val_accuracy))
self.train()
return train_losses, val_accuracies | 2,709 | 33.74359 | 98 | py |
Text-Classification-Models-Pytorch | Text-Classification-Models-Pytorch-master/Model_fastText/train.py | # train.py
from utils import *
from model import *
from config import Config
import numpy as np
import sys
import torch.optim as optim
from torch import nn
import torch
if __name__=='__main__':
config = Config()
train_file = '../data/ag_news.train'
if len(sys.argv) > 2:
train_file = sys.argv[1]
test_file = '../data/ag_news.test'
if len(sys.argv) > 3:
test_file = sys.argv[2]
w2v_file = '../data/glove.840B.300d.txt'
dataset = Dataset(config)
dataset.load_data(w2v_file, train_file, test_file)
# Create Model with specified optimizer and loss function
##############################################################
model = fastText(config, len(dataset.vocab), dataset.word_embeddings)
if torch.cuda.is_available():
model.cuda()
model.train()
optimizer = optim.SGD(model.parameters(), lr=config.lr)
NLLLoss = nn.NLLLoss()
model.add_optimizer(optimizer)
model.add_loss_op(NLLLoss)
##############################################################
train_losses = []
val_accuracies = []
for i in range(config.max_epochs):
print ("Epoch: {}".format(i))
train_loss,val_accuracy = model.run_epoch(dataset.train_iterator, dataset.val_iterator, i)
train_losses.append(train_loss)
val_accuracies.append(val_accuracy)
train_acc = evaluate_model(model, dataset.train_iterator)
val_acc = evaluate_model(model, dataset.val_iterator)
test_acc = evaluate_model(model, dataset.test_iterator)
print ('Final Training Accuracy: {:.4f}'.format(train_acc))
print ('Final Validation Accuracy: {:.4f}'.format(val_acc))
print ('Final Test Accuracy: {:.4f}'.format(test_acc)) | 1,740 | 31.849057 | 98 | py |
Text-Classification-Models-Pytorch | Text-Classification-Models-Pytorch-master/Model_fastText/old_code/model.py | # model.py
import torch
from torch import nn
from torch import Tensor
from torch.autograd import Variable
import numpy as np
from sklearn.metrics import accuracy_score
class fastText(nn.Module):
def __init__(self, config):
super(fastText, self).__init__()
self.config = config
# Hidden Layer
self.fc1 = nn.Linear(self.config.embed_size, self.config.hidden_size)
# Output Layer
self.fc2 = nn.Linear(self.config.hidden_size, self.config.output_size)
# Softmax non-linearity
self.softmax = nn.Softmax()
def forward(self, x):
h = self.fc1(x)
z = self.fc2(h)
return self.softmax(z)
def add_optimizer(self, optimizer):
self.optimizer = optimizer
def add_loss_op(self, loss_op):
self.loss_op = loss_op
def run_epoch(self, train_data, val_data):
train_x, train_y = train_data[0], train_data[1]
val_x, val_y = val_data[0], val_data[1]
iterator = data_iterator(train_x, train_y, self.config.batch_size)
train_losses = []
val_accuracies = []
losses = []
for i, (x,y) in enumerate(iterator):
self.optimizer.zero_grad()
x = Tensor(x).cuda()
y_pred = self.__call__(x)
loss = self.loss_op(y_pred, torch.cuda.LongTensor(y-1))
loss.backward()
losses.append(loss.data.cpu().numpy())
self.optimizer.step()
if (i + 1) % 50 == 0:
print("Iter: {}".format(i+1))
avg_train_loss = np.mean(losses)
train_losses.append(avg_train_loss)
print("\tAverage training loss: {:.5f}".format(avg_train_loss))
losses = []
# Evalute Accuracy on validation set
self.eval()
all_preds = []
val_iterator = data_iterator(val_x, val_y, self.config.batch_size)
for x, y in val_iterator:
x = Variable(Tensor(x))
y_pred = self.__call__(x.cuda())
predicted = torch.max(y_pred.cpu().data, 1)[1] + 1
all_preds.extend(predicted.numpy())
score = accuracy_score(val_y, np.array(all_preds).flatten())
val_accuracies.append(score)
print("\tVal Accuracy: {:.4f}".format(score))
self.train()
return train_losses, val_accuracies
| 2,569 | 32.815789 | 82 | py |
Text-Classification-Models-Pytorch | Text-Classification-Models-Pytorch-master/Model_fastText/old_code/train.py | # train.py
from utils import *
from config import Config
from sklearn.model_selection import train_test_split
import numpy as np
from tqdm import tqdm
import sys
import torch.optim as optim
from torch import nn, Tensor
from torch.autograd import Variable
import torch
from sklearn.metrics import accuracy_score
def get_accuracy(model, test_x, test_y):
all_preds = []
test_iterator = data_iterator(test_x, test_y)
for x, y in test_iterator:
x = Variable(Tensor(x))
y_pred = model(x.cuda())
predicted = torch.max(y_pred.cpu().data, 1)[1] + 1
all_preds.extend(predicted.numpy())
score = accuracy_score(test_y, np.array(all_preds).flatten())
return score
if __name__=='__main__':
train_path = '../data/ag_news.train'
if len(sys.argv) > 2:
train_path = sys.argv[1]
test_path = '../data/ag_news.test'
if len(sys.argv) > 3:
test_path = sys.argv[2]
train_text, train_labels, vocab = get_data(train_path)
train_text, val_text, train_label, val_label = train_test_split(train_text, train_labels, test_size=0.2)
# Read Word Embeddings
w2vfile = '../data/glove.840B.300d.txt'
word_embeddings = get_word_embeddings(w2vfile, vocab.word_to_index, embedsize=300)
train_x = np.array([encode_text(text, word_embeddings) for text in tqdm(train_text)])
train_y = np.array(train_label)
val_x = np.array([encode_text(text, word_embeddings) for text in tqdm(val_text)])
val_y = np.array(val_label)
# Create Model with specified optimizer and loss function
##############################################################
config = Config()
model = fastText(config)
model.cuda()
model.train()
optimizer = optim.SGD(model.parameters(), lr=config.lr)
NLLLoss = nn.NLLLoss()
model.add_optimizer(optimizer)
model.add_loss_op(NLLLoss)
##############################################################
train_data = [train_x, train_y]
val_data = [val_x, val_y]
for i in range(config.max_epochs):
print ("Epoch: {}".format(i))
train_losses,val_accuracies = model.run_epoch(train_data, val_data)
print("\tAverage training loss: {:.5f}".format(np.mean(train_losses)))
print("\tAverage Val Accuracy (per 50 iterations): {:.4f}".format(np.mean(val_accuracies)))
# Reduce learning rate as number of epochs increase
if i > 0.5 * config.max_epochs:
print("Reducing LR")
for g in optimizer.param_groups:
g['lr'] = 0.25
if i > 0.75 * config.max_epochs:
print("Reducing LR")
for g in optimizer.param_groups:
g['lr'] = 0.15
# Get Accuracy of final model
test_text, test_labels, test_vocab = get_data(test_path)
test_x = np.array([encode_text(text, word_embeddings) for text in tqdm(test_text)])
test_y = np.array(test_labels)
train_acc = get_accuracy(model, train_x, train_y)
val_acc = get_accuracy(model, val_x, val_y)
test_acc = get_accuracy(model, test_x, test_y)
print ('Final Training Accuracy: {:.4f}'.format(train_acc))
print ('Final Validation Accuracy: {:.4f}'.format(val_acc))
print ('Final Test Accuracy: {:.4f}'.format(test_acc))
| 3,314 | 36.247191 | 108 | py |
Text-Classification-Models-Pytorch | Text-Classification-Models-Pytorch-master/Model_RCNN/utils.py | # utils.py
import torch
from torchtext import data
from torchtext.vocab import Vectors
import spacy
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score
class Dataset(object):
def __init__(self, config):
self.config = config
self.train_iterator = None
self.test_iterator = None
self.val_iterator = None
self.vocab = []
self.word_embeddings = {}
def parse_label(self, label):
'''
Get the actual labels from label string
Input:
label (string) : labels of the form '__label__2'
Returns:
label (int) : integer value corresponding to label string
'''
return int(label.strip()[-1])
def get_pandas_df(self, filename):
'''
Load the data into Pandas.DataFrame object
This will be used to convert data to torchtext object
'''
with open(filename, 'r') as datafile:
data = [line.strip().split(',', maxsplit=1) for line in datafile]
data_text = list(map(lambda x: x[1], data))
data_label = list(map(lambda x: self.parse_label(x[0]), data))
full_df = pd.DataFrame({"text":data_text, "label":data_label})
return full_df
def load_data(self, w2v_file, train_file, test_file, val_file=None):
'''
Loads the data from files
Sets up iterators for training, validation and test data
Also create vocabulary and word embeddings based on the data
Inputs:
w2v_file (String): absolute path to file containing word embeddings (GloVe/Word2Vec)
train_file (String): absolute path to training file
test_file (String): absolute path to test file
val_file (String): absolute path to validation file
'''
NLP = spacy.load('en')
tokenizer = lambda sent: [x.text for x in NLP.tokenizer(sent) if x.text != " "]
# Creating Field for data
TEXT = data.Field(sequential=True, tokenize=tokenizer, lower=True, fix_length=self.config.max_sen_len)
LABEL = data.Field(sequential=False, use_vocab=False)
datafields = [("text",TEXT),("label",LABEL)]
# Load data from pd.DataFrame into torchtext.data.Dataset
train_df = self.get_pandas_df(train_file)
train_examples = [data.Example.fromlist(i, datafields) for i in train_df.values.tolist()]
train_data = data.Dataset(train_examples, datafields)
test_df = self.get_pandas_df(test_file)
test_examples = [data.Example.fromlist(i, datafields) for i in test_df.values.tolist()]
test_data = data.Dataset(test_examples, datafields)
# If validation file exists, load it. Otherwise get validation data from training data
if val_file:
val_df = self.get_pandas_df(val_file)
val_examples = [data.Example.fromlist(i, datafields) for i in val_df.values.tolist()]
val_data = data.Dataset(val_examples, datafields)
else:
train_data, val_data = train_data.split(split_ratio=0.8)
TEXT.build_vocab(train_data, vectors=Vectors(w2v_file))
self.word_embeddings = TEXT.vocab.vectors
self.vocab = TEXT.vocab
self.train_iterator = data.BucketIterator(
(train_data),
batch_size=self.config.batch_size,
sort_key=lambda x: len(x.text),
repeat=False,
shuffle=True)
self.val_iterator, self.test_iterator = data.BucketIterator.splits(
(val_data, test_data),
batch_size=self.config.batch_size,
sort_key=lambda x: len(x.text),
repeat=False,
shuffle=False)
print ("Loaded {} training examples".format(len(train_data)))
print ("Loaded {} test examples".format(len(test_data)))
print ("Loaded {} validation examples".format(len(val_data)))
def evaluate_model(model, iterator):
all_preds = []
all_y = []
for idx,batch in enumerate(iterator):
if torch.cuda.is_available():
x = batch.text.cuda()
else:
x = batch.text
y_pred = model(x)
predicted = torch.max(y_pred.cpu().data, 1)[1] + 1
all_preds.extend(predicted.numpy())
all_y.extend(batch.label.numpy())
score = accuracy_score(all_y, np.array(all_preds).flatten())
return score | 4,498 | 37.452991 | 110 | py |
Text-Classification-Models-Pytorch | Text-Classification-Models-Pytorch-master/Model_RCNN/model.py | # model.py
import torch
from torch import nn
import numpy as np
from torch.nn import functional as F
from utils import *
class RCNN(nn.Module):
def __init__(self, config, vocab_size, word_embeddings):
super(RCNN, self).__init__()
self.config = config
# Embedding Layer
self.embeddings = nn.Embedding(vocab_size, self.config.embed_size)
self.embeddings.weight = nn.Parameter(word_embeddings, requires_grad=False)
# Bi-directional LSTM for RCNN
self.lstm = nn.LSTM(input_size = self.config.embed_size,
hidden_size = self.config.hidden_size,
num_layers = self.config.hidden_layers,
dropout = self.config.dropout_keep,
bidirectional = True)
self.dropout = nn.Dropout(self.config.dropout_keep)
# Linear layer to get "convolution output" to be passed to Pooling Layer
self.W = nn.Linear(
self.config.embed_size + 2*self.config.hidden_size,
self.config.hidden_size_linear
)
# Tanh non-linearity
self.tanh = nn.Tanh()
# Fully-Connected Layer
self.fc = nn.Linear(
self.config.hidden_size_linear,
self.config.output_size
)
# Softmax non-linearity
self.softmax = nn.Softmax()
def forward(self, x):
# x.shape = (seq_len, batch_size)
embedded_sent = self.embeddings(x)
# embedded_sent.shape = (seq_len, batch_size, embed_size)
lstm_out, (h_n,c_n) = self.lstm(embedded_sent)
# lstm_out.shape = (seq_len, batch_size, 2 * hidden_size)
input_features = torch.cat([lstm_out,embedded_sent], 2).permute(1,0,2)
# final_features.shape = (batch_size, seq_len, embed_size + 2*hidden_size)
linear_output = self.tanh(
self.W(input_features)
)
# linear_output.shape = (batch_size, seq_len, hidden_size_linear)
linear_output = linear_output.permute(0,2,1) # Reshaping fot max_pool
max_out_features = F.max_pool1d(linear_output, linear_output.shape[2]).squeeze(2)
# max_out_features.shape = (batch_size, hidden_size_linear)
max_out_features = self.dropout(max_out_features)
final_out = self.fc(max_out_features)
return self.softmax(final_out)
def add_optimizer(self, optimizer):
self.optimizer = optimizer
def add_loss_op(self, loss_op):
self.loss_op = loss_op
def reduce_lr(self):
print("Reducing LR")
for g in self.optimizer.param_groups:
g['lr'] = g['lr'] / 2
def run_epoch(self, train_iterator, val_iterator, epoch):
train_losses = []
val_accuracies = []
losses = []
# Reduce learning rate as number of epochs increase
if (epoch == int(self.config.max_epochs/3)) or (epoch == int(2*self.config.max_epochs/3)):
self.reduce_lr()
for i, batch in enumerate(train_iterator):
self.optimizer.zero_grad()
if torch.cuda.is_available():
x = batch.text.cuda()
y = (batch.label - 1).type(torch.cuda.LongTensor)
else:
x = batch.text
y = (batch.label - 1).type(torch.LongTensor)
y_pred = self.__call__(x)
loss = self.loss_op(y_pred, y)
loss.backward()
losses.append(loss.data.cpu().numpy())
self.optimizer.step()
if i % 100 == 0:
print("Iter: {}".format(i+1))
avg_train_loss = np.mean(losses)
train_losses.append(avg_train_loss)
print("\tAverage training loss: {:.5f}".format(avg_train_loss))
losses = []
# Evalute Accuracy on validation set
val_accuracy = evaluate_model(self, val_iterator)
print("\tVal Accuracy: {:.4f}".format(val_accuracy))
self.train()
return train_losses, val_accuracies | 4,267 | 35.793103 | 98 | py |
Text-Classification-Models-Pytorch | Text-Classification-Models-Pytorch-master/Model_RCNN/train.py | # train.py
from utils import *
from model import *
from config import Config
import sys
import torch.optim as optim
from torch import nn
import torch
if __name__=='__main__':
config = Config()
train_file = '../data/ag_news.train'
if len(sys.argv) > 2:
train_file = sys.argv[1]
test_file = '../data/ag_news.test'
if len(sys.argv) > 3:
test_file = sys.argv[2]
w2v_file = '../data/glove.840B.300d.txt'
dataset = Dataset(config)
dataset.load_data(w2v_file, train_file, test_file)
# Create Model with specified optimizer and loss function
##############################################################
model = RCNN(config, len(dataset.vocab), dataset.word_embeddings)
if torch.cuda.is_available():
model.cuda()
model.train()
optimizer = optim.SGD(model.parameters(), lr=config.lr)
NLLLoss = nn.NLLLoss()
model.add_optimizer(optimizer)
model.add_loss_op(NLLLoss)
##############################################################
train_losses = []
val_accuracies = []
for i in range(config.max_epochs):
print ("Epoch: {}".format(i))
train_loss,val_accuracy = model.run_epoch(dataset.train_iterator, dataset.val_iterator, i)
train_losses.append(train_loss)
val_accuracies.append(val_accuracy)
train_acc = evaluate_model(model, dataset.train_iterator)
val_acc = evaluate_model(model, dataset.val_iterator)
test_acc = evaluate_model(model, dataset.test_iterator)
print ('Final Training Accuracy: {:.4f}'.format(train_acc))
print ('Final Validation Accuracy: {:.4f}'.format(val_acc))
print ('Final Test Accuracy: {:.4f}'.format(test_acc)) | 1,717 | 32.038462 | 98 | py |
Text-Classification-Models-Pytorch | Text-Classification-Models-Pytorch-master/Model_Transformer/utils.py | # utils.py
import torch
from torchtext import data
import spacy
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score
class Dataset(object):
def __init__(self, config):
self.config = config
self.train_iterator = None
self.test_iterator = None
self.val_iterator = None
self.vocab = []
self.word_embeddings = {}
def parse_label(self, label):
'''
Get the actual labels from label string
Input:
label (string) : labels of the form '__label__2'
Returns:
label (int) : integer value corresponding to label string
'''
return int(label.strip()[-1])
def get_pandas_df(self, filename):
'''
Load the data into Pandas.DataFrame object
This will be used to convert data to torchtext object
'''
with open(filename, 'r') as datafile:
data = [line.strip().split(',', maxsplit=1) for line in datafile]
data_text = list(map(lambda x: x[1], data))
data_label = list(map(lambda x: self.parse_label(x[0]), data))
full_df = pd.DataFrame({"text":data_text, "label":data_label})
return full_df
def load_data(self, train_file, test_file=None, val_file=None):
'''
Loads the data from files
Sets up iterators for training, validation and test data
Also create vocabulary and word embeddings based on the data
Inputs:
train_file (String): path to training file
test_file (String): path to test file
val_file (String): path to validation file
'''
NLP = spacy.load('en')
tokenizer = lambda sent: [x.text for x in NLP.tokenizer(sent) if x.text != " "]
# Creating Field for data
TEXT = data.Field(sequential=True, tokenize=tokenizer, lower=True, fix_length=self.config.max_sen_len)
LABEL = data.Field(sequential=False, use_vocab=False)
datafields = [("text",TEXT),("label",LABEL)]
# Load data from pd.DataFrame into torchtext.data.Dataset
train_df = self.get_pandas_df(train_file)
train_examples = [data.Example.fromlist(i, datafields) for i in train_df.values.tolist()]
train_data = data.Dataset(train_examples, datafields)
test_df = self.get_pandas_df(test_file)
test_examples = [data.Example.fromlist(i, datafields) for i in test_df.values.tolist()]
test_data = data.Dataset(test_examples, datafields)
# If validation file exists, load it. Otherwise get validation data from training data
if val_file:
val_df = self.get_pandas_df(val_file)
val_examples = [data.Example.fromlist(i, datafields) for i in val_df.values.tolist()]
val_data = data.Dataset(val_examples, datafields)
else:
train_data, val_data = train_data.split(split_ratio=0.8)
TEXT.build_vocab(train_data)
self.vocab = TEXT.vocab
self.train_iterator = data.BucketIterator(
(train_data),
batch_size=self.config.batch_size,
sort_key=lambda x: len(x.text),
repeat=False,
shuffle=True)
self.val_iterator, self.test_iterator = data.BucketIterator.splits(
(val_data, test_data),
batch_size=self.config.batch_size,
sort_key=lambda x: len(x.text),
repeat=False,
shuffle=False)
print ("Loaded {} training examples".format(len(train_data)))
print ("Loaded {} test examples".format(len(test_data)))
print ("Loaded {} validation examples".format(len(val_data)))
def evaluate_model(model, iterator):
all_preds = []
all_y = []
for idx,batch in enumerate(iterator):
if torch.cuda.is_available():
x = batch.text.cuda()
else:
x = batch.text
y_pred = model(x)
predicted = torch.max(y_pred.cpu().data, 1)[1] + 1
all_preds.extend(predicted.numpy())
all_y.extend(batch.label.numpy())
score = accuracy_score(all_y, np.array(all_preds).flatten())
return score | 4,255 | 36.663717 | 110 | py |
Text-Classification-Models-Pytorch | Text-Classification-Models-Pytorch-master/Model_Transformer/model.py | # Model.py
import torch
import torch.nn as nn
from copy import deepcopy
from train_utils import Embeddings,PositionalEncoding
from attention import MultiHeadedAttention
from encoder import EncoderLayer, Encoder
from feed_forward import PositionwiseFeedForward
import numpy as np
from utils import *
class Transformer(nn.Module):
def __init__(self, config, src_vocab):
super(Transformer, self).__init__()
self.config = config
h, N, dropout = self.config.h, self.config.N, self.config.dropout
d_model, d_ff = self.config.d_model, self.config.d_ff
attn = MultiHeadedAttention(h, d_model)
ff = PositionwiseFeedForward(d_model, d_ff, dropout)
position = PositionalEncoding(d_model, dropout)
self.encoder = Encoder(EncoderLayer(config.d_model, deepcopy(attn), deepcopy(ff), dropout), N)
self.src_embed = nn.Sequential(Embeddings(config.d_model, src_vocab), deepcopy(position)) #Embeddings followed by PE
# Fully-Connected Layer
self.fc = nn.Linear(
self.config.d_model,
self.config.output_size
)
# Softmax non-linearity
self.softmax = nn.Softmax()
def forward(self, x):
embedded_sents = self.src_embed(x.permute(1,0)) # shape = (batch_size, sen_len, d_model)
encoded_sents = self.encoder(embedded_sents)
# Convert input to (batch_size, d_model) for linear layer
final_feature_map = encoded_sents[:,-1,:]
final_out = self.fc(final_feature_map)
return self.softmax(final_out)
def add_optimizer(self, optimizer):
self.optimizer = optimizer
def add_loss_op(self, loss_op):
self.loss_op = loss_op
def reduce_lr(self):
print("Reducing LR")
for g in self.optimizer.param_groups:
g['lr'] = g['lr'] / 2
def run_epoch(self, train_iterator, val_iterator, epoch):
train_losses = []
val_accuracies = []
losses = []
# Reduce learning rate as number of epochs increase
if (epoch == int(self.config.max_epochs/3)) or (epoch == int(2*self.config.max_epochs/3)):
self.reduce_lr()
for i, batch in enumerate(train_iterator):
self.optimizer.zero_grad()
if torch.cuda.is_available():
x = batch.text.cuda()
y = (batch.label - 1).type(torch.cuda.LongTensor)
else:
x = batch.text
y = (batch.label - 1).type(torch.LongTensor)
y_pred = self.__call__(x)
loss = self.loss_op(y_pred, y)
loss.backward()
losses.append(loss.data.cpu().numpy())
self.optimizer.step()
if i % 100 == 0:
print("Iter: {}".format(i+1))
avg_train_loss = np.mean(losses)
train_losses.append(avg_train_loss)
print("\tAverage training loss: {:.5f}".format(avg_train_loss))
losses = []
# Evalute Accuracy on validation set
val_accuracy = evaluate_model(self, val_iterator)
print("\tVal Accuracy: {:.4f}".format(val_accuracy))
self.train()
return train_losses, val_accuracies | 3,390 | 35.858696 | 124 | py |
Text-Classification-Models-Pytorch | Text-Classification-Models-Pytorch-master/Model_Transformer/encoder.py | # encoder.py
from torch import nn
from train_utils import clones
from sublayer import LayerNorm, SublayerOutput
class Encoder(nn.Module):
'''
Transformer Encoder
It is a stack of N layers.
'''
def __init__(self, layer, N):
super(Encoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, mask=None):
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
class EncoderLayer(nn.Module):
'''
An encoder layer
Made up of self-attention and a feed forward layer.
Each of these sublayers have residual and layer norm, implemented by SublayerOutput.
'''
def __init__(self, size, self_attn, feed_forward, dropout):
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer_output = clones(SublayerOutput(size, dropout), 2)
self.size = size
def forward(self, x, mask=None):
"Transformer Encoder"
x = self.sublayer_output[0](x, lambda x: self.self_attn(x, x, x, mask)) # Encoder self-attention
return self.sublayer_output[1](x, self.feed_forward) | 1,248 | 30.225 | 104 | py |
Text-Classification-Models-Pytorch | Text-Classification-Models-Pytorch-master/Model_Transformer/feed_forward.py | # feed_forward.py
from torch import nn
import torch.nn.functional as F
class PositionwiseFeedForward(nn.Module):
"Positionwise feed-forward network."
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
"Implements FFN equation."
return self.w_2(self.dropout(F.relu(self.w_1(x)))) | 515 | 31.25 | 58 | py |
Text-Classification-Models-Pytorch | Text-Classification-Models-Pytorch-master/Model_Transformer/sublayer.py | # sublayer.py
import torch
from torch import nn
class LayerNorm(nn.Module):
"Construct a layer normalization module."
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class SublayerOutput(nn.Module):
'''
A residual connection followed by a layer norm.
'''
def __init__(self, size, dropout):
super(SublayerOutput, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"Apply residual connection to any sublayer with the same size."
return x + self.dropout(sublayer(self.norm(x)))
| 950 | 29.677419 | 71 | py |
Text-Classification-Models-Pytorch | Text-Classification-Models-Pytorch-master/Model_Transformer/train_utils.py | # train_utils.py
import torch
from torch import nn
from torch.autograd import Variable
import copy
import math
def clones(module, N):
"Produce N identical layers."
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
class Embeddings(nn.Module):
'''
Usual Embedding layer with weights multiplied by sqrt(d_model)
'''
def __init__(self, d_model, vocab):
super(Embeddings, self).__init__()
self.lut = nn.Embedding(vocab, d_model)
self.d_model = d_model
def forward(self, x):
return self.lut(x) * math.sqrt(self.d_model)
class PositionalEncoding(nn.Module):
"Implement the PE function."
def __init__(self, d_model, dropout, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() *
-(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(torch.as_tensor(position.numpy() * div_term.unsqueeze(0).numpy()))
pe[:, 1::2] = torch.cos(torch.as_tensor(position.numpy() * div_term.unsqueeze(0).numpy()))#torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + Variable(self.pe[:, :x.size(1)],
requires_grad=False)
return self.dropout(x) | 1,577 | 34.863636 | 129 | py |
Text-Classification-Models-Pytorch | Text-Classification-Models-Pytorch-master/Model_Transformer/attention.py | # attention.py
import torch
from torch import nn
import math
import torch.nn.functional as F
from train_utils import clones
def attention(query, key, value, mask=None, dropout=None):
"Implementation of Scaled dot product attention"
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = F.softmax(scores, dim = -1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1):
"Take in model size and number of heads."
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
"Implements Multi-head attention"
if mask is not None:
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = \
[l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
x, self.attn = attention(query, key, value, mask=mask,
dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous() \
.view(nbatches, -1, self.h * self.d_k)
return self.linears[-1](x)
| 1,915 | 35.846154 | 76 | py |
Text-Classification-Models-Pytorch | Text-Classification-Models-Pytorch-master/Model_Transformer/train.py | # train.py
from utils import *
from model import *
from config import Config
import sys
import torch.optim as optim
from torch import nn
import torch
if __name__=='__main__':
config = Config()
train_file = '../data/ag_news.train'
if len(sys.argv) > 2:
train_file = sys.argv[1]
test_file = '../data/ag_news.test'
if len(sys.argv) > 3:
test_file = sys.argv[2]
dataset = Dataset(config)
dataset.load_data(train_file, test_file)
# Create Model with specified optimizer and loss function
##############################################################
model = Transformer(config, len(dataset.vocab))
if torch.cuda.is_available():
model.cuda()
model.train()
optimizer = optim.Adam(model.parameters(), lr=config.lr)
NLLLoss = nn.NLLLoss()
model.add_optimizer(optimizer)
model.add_loss_op(NLLLoss)
##############################################################
train_losses = []
val_accuracies = []
for i in range(config.max_epochs):
print ("Epoch: {}".format(i))
train_loss,val_accuracy = model.run_epoch(dataset.train_iterator, dataset.val_iterator, i)
train_losses.append(train_loss)
val_accuracies.append(val_accuracy)
train_acc = evaluate_model(model, dataset.train_iterator)
val_acc = evaluate_model(model, dataset.val_iterator)
test_acc = evaluate_model(model, dataset.test_iterator)
print ('Final Training Accuracy: {:.4f}'.format(train_acc))
print ('Final Validation Accuracy: {:.4f}'.format(val_acc))
print ('Final Test Accuracy: {:.4f}'.format(test_acc)) | 1,640 | 31.82 | 98 | py |
Text-Classification-Models-Pytorch | Text-Classification-Models-Pytorch-master/Model_TextRNN/utils.py | # utils.py
import torch
from torchtext import data
from torchtext.vocab import Vectors
import spacy
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score
class Dataset(object):
def __init__(self, config):
self.config = config
self.train_iterator = None
self.test_iterator = None
self.val_iterator = None
self.vocab = []
self.word_embeddings = {}
def parse_label(self, label):
'''
Get the actual labels from label string
Input:
label (string) : labels of the form '__label__2'
Returns:
label (int) : integer value corresponding to label string
'''
return int(label.strip()[-1])
def get_pandas_df(self, filename):
'''
Load the data into Pandas.DataFrame object
This will be used to convert data to torchtext object
'''
with open(filename, 'r') as datafile:
data = [line.strip().split(',', maxsplit=1) for line in datafile]
data_text = list(map(lambda x: x[1], data))
data_label = list(map(lambda x: self.parse_label(x[0]), data))
full_df = pd.DataFrame({"text":data_text, "label":data_label})
return full_df
def load_data(self, w2v_file, train_file, test_file, val_file=None):
'''
Loads the data from files
Sets up iterators for training, validation and test data
Also create vocabulary and word embeddings based on the data
Inputs:
w2v_file (String): absolute path to file containing word embeddings (GloVe/Word2Vec)
train_file (String): absolute path to training file
test_file (String): absolute path to test file
val_file (String): absolute path to validation file
'''
NLP = spacy.load('en')
tokenizer = lambda sent: [x.text for x in NLP.tokenizer(sent) if x.text != " "]
# Creating Field for data
TEXT = data.Field(sequential=True, tokenize=tokenizer, lower=True, fix_length=self.config.max_sen_len)
LABEL = data.Field(sequential=False, use_vocab=False)
datafields = [("text",TEXT),("label",LABEL)]
# Load data from pd.DataFrame into torchtext.data.Dataset
train_df = self.get_pandas_df(train_file)
train_examples = [data.Example.fromlist(i, datafields) for i in train_df.values.tolist()]
train_data = data.Dataset(train_examples, datafields)
test_df = self.get_pandas_df(test_file)
test_examples = [data.Example.fromlist(i, datafields) for i in test_df.values.tolist()]
test_data = data.Dataset(test_examples, datafields)
# If validation file exists, load it. Otherwise get validation data from training data
if val_file:
val_df = self.get_pandas_df(val_file)
val_examples = [data.Example.fromlist(i, datafields) for i in val_df.values.tolist()]
val_data = data.Dataset(val_examples, datafields)
else:
train_data, val_data = train_data.split(split_ratio=0.8)
TEXT.build_vocab(train_data, vectors=Vectors(w2v_file))
self.word_embeddings = TEXT.vocab.vectors
self.vocab = TEXT.vocab
self.train_iterator = data.BucketIterator(
(train_data),
batch_size=self.config.batch_size,
sort_key=lambda x: len(x.text),
repeat=False,
shuffle=True)
self.val_iterator, self.test_iterator = data.BucketIterator.splits(
(val_data, test_data),
batch_size=self.config.batch_size,
sort_key=lambda x: len(x.text),
repeat=False,
shuffle=False)
print ("Loaded {} training examples".format(len(train_data)))
print ("Loaded {} test examples".format(len(test_data)))
print ("Loaded {} validation examples".format(len(val_data)))
def evaluate_model(model, iterator):
all_preds = []
all_y = []
for idx,batch in enumerate(iterator):
if torch.cuda.is_available():
x = batch.text.cuda()
else:
x = batch.text
y_pred = model(x)
predicted = torch.max(y_pred.cpu().data, 1)[1] + 1
all_preds.extend(predicted.numpy())
all_y.extend(batch.label.numpy())
score = accuracy_score(all_y, np.array(all_preds).flatten())
return score | 4,498 | 37.452991 | 110 | py |
Text-Classification-Models-Pytorch | Text-Classification-Models-Pytorch-master/Model_TextRNN/model.py | # model.py
import torch
from torch import nn
import numpy as np
from utils import *
class TextRNN(nn.Module):
def __init__(self, config, vocab_size, word_embeddings):
super(TextRNN, self).__init__()
self.config = config
# Embedding Layer
self.embeddings = nn.Embedding(vocab_size, self.config.embed_size)
self.embeddings.weight = nn.Parameter(word_embeddings, requires_grad=False)
self.lstm = nn.LSTM(input_size = self.config.embed_size,
hidden_size = self.config.hidden_size,
num_layers = self.config.hidden_layers,
dropout = self.config.dropout_keep,
bidirectional = self.config.bidirectional)
self.dropout = nn.Dropout(self.config.dropout_keep)
# Fully-Connected Layer
self.fc = nn.Linear(
self.config.hidden_size * self.config.hidden_layers * (1+self.config.bidirectional),
self.config.output_size
)
# Softmax non-linearity
self.softmax = nn.Softmax()
def forward(self, x):
# x.shape = (max_sen_len, batch_size)
embedded_sent = self.embeddings(x)
# embedded_sent.shape = (max_sen_len=20, batch_size=64,embed_size=300)
lstm_out, (h_n,c_n) = self.lstm(embedded_sent)
final_feature_map = self.dropout(h_n) # shape=(num_layers * num_directions, 64, hidden_size)
# Convert input to (64, hidden_size * hidden_layers * num_directions) for linear layer
final_feature_map = torch.cat([final_feature_map[i,:,:] for i in range(final_feature_map.shape[0])], dim=1)
final_out = self.fc(final_feature_map)
return self.softmax(final_out)
def add_optimizer(self, optimizer):
self.optimizer = optimizer
def add_loss_op(self, loss_op):
self.loss_op = loss_op
def reduce_lr(self):
print("Reducing LR")
for g in self.optimizer.param_groups:
g['lr'] = g['lr'] / 2
def run_epoch(self, train_iterator, val_iterator, epoch):
train_losses = []
val_accuracies = []
losses = []
# Reduce learning rate as number of epochs increase
if (epoch == int(self.config.max_epochs/3)) or (epoch == int(2*self.config.max_epochs/3)):
self.reduce_lr()
for i, batch in enumerate(train_iterator):
self.optimizer.zero_grad()
if torch.cuda.is_available():
x = batch.text.cuda()
y = (batch.label - 1).type(torch.cuda.LongTensor)
else:
x = batch.text
y = (batch.label - 1).type(torch.LongTensor)
y_pred = self.__call__(x)
loss = self.loss_op(y_pred, y)
loss.backward()
losses.append(loss.data.cpu().numpy())
self.optimizer.step()
if i % 100 == 0:
print("Iter: {}".format(i+1))
avg_train_loss = np.mean(losses)
train_losses.append(avg_train_loss)
print("\tAverage training loss: {:.5f}".format(avg_train_loss))
losses = []
# Evalute Accuracy on validation set
val_accuracy = evaluate_model(self, val_iterator)
print("\tVal Accuracy: {:.4f}".format(val_accuracy))
self.train()
return train_losses, val_accuracies | 3,586 | 37.569892 | 115 | py |
Text-Classification-Models-Pytorch | Text-Classification-Models-Pytorch-master/Model_TextRNN/train.py | # train.py
from utils import *
from model import *
from config import Config
import sys
import torch.optim as optim
from torch import nn
import torch
if __name__=='__main__':
config = Config()
train_file = '../data/ag_news.train'
if len(sys.argv) > 2:
train_file = sys.argv[1]
test_file = '../data/ag_news.test'
if len(sys.argv) > 3:
test_file = sys.argv[2]
w2v_file = '../data/glove.840B.300d.txt'
dataset = Dataset(config)
dataset.load_data(w2v_file, train_file, test_file)
# Create Model with specified optimizer and loss function
##############################################################
model = TextRNN(config, len(dataset.vocab), dataset.word_embeddings)
if torch.cuda.is_available():
model.cuda()
model.train()
optimizer = optim.SGD(model.parameters(), lr=config.lr)
NLLLoss = nn.NLLLoss()
model.add_optimizer(optimizer)
model.add_loss_op(NLLLoss)
##############################################################
train_losses = []
val_accuracies = []
for i in range(config.max_epochs):
print ("Epoch: {}".format(i))
train_loss,val_accuracy = model.run_epoch(dataset.train_iterator, dataset.val_iterator, i)
train_losses.append(train_loss)
val_accuracies.append(val_accuracy)
train_acc = evaluate_model(model, dataset.train_iterator)
val_acc = evaluate_model(model, dataset.val_iterator)
test_acc = evaluate_model(model, dataset.test_iterator)
print ('Final Training Accuracy: {:.4f}'.format(train_acc))
print ('Final Validation Accuracy: {:.4f}'.format(val_acc))
print ('Final Test Accuracy: {:.4f}'.format(test_acc)) | 1,720 | 32.096154 | 98 | py |
CppDNN-develop | CppDNN-develop/example/keras_simple/simple.py | from keras import models
from keras import layers
from numpy import array
mnistfile = open('mnist_example', 'r')
mnistdata = mnistfile.read()
mnistdata = mnistdata.splitlines()[0].split(' ')
mnistdataf = []
for m in mnistdata:
mnistdataf.append(float(m))
mnistdata = array(mnistdataf)
mnistdata = mnistdata.reshape((1, 784))
mnistdata = [1, 2, 1, 2, 1]
mnistdata = array(mnistdata)
mnistdata = mnistdata.reshape((1, 5))
network = models.Sequential()
network.add(layers.Dense(8, input_shape=(5,)))
network.add(layers.Dense(5, activation='relu'))
network.add(layers.Dense(2, activation='softmax'))
network.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
print(network.predict(mnistdata))
network.save('simple.h5')
| 788 | 27.178571 | 50 | py |
CppDNN-develop | CppDNN-develop/script/DecodeKerasModel.py | import sys
from keras import models
if len(sys.argv) < 3:
print('usage: python DecodeKerasModel.py input output')
exit(1)
input = sys.argv[1]
output = sys.argv[2]
print(input)
outputFile = open(output, 'w')
model = models.load_model(input)
weights_list = model.get_weights()
print("#################################################################")
print("# Layer Numbers: " + str(len(weights_list)) + '\n')
outputFile.write("# Layer Numbers: " + str(int(len(weights_list)/2)) + '\n')
for l in range(int(len(weights_list)/2)):
w = weights_list[l * 2]
b = weights_list[l * 2 + 1]
outputFile.write("# Layer Number: {}".format(l) + '\n')
print("# Layer Number: {}".format(l) + '\n')
outputFile.write(model.layers[l].activation.__str__().split(' ')[1] + '\n')
outputFile.write(str(len(b)) + ' ' + str(len(w)) + '\n')
print(str(len(b)) + ' ' + str(len(w)) + '\n')
outputFile.write("# W" + '\n')
print(w.shape)
for x in w:
for y in x:
outputFile.write(str(y) + '\n')
outputFile.write("# B" + '\n')
print(b.shape)
for x in b:
outputFile.write(str(x) + '\n')
outputFile.close()
| 1,171 | 30.675676 | 79 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/main_arxiv_node_classification.py |
"""
IMPORTING LIBS
"""
import dgl
import numpy as np
import os
import socket
import time
import random
import glob
import argparse, json
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from torch_geometric.data import DataLoader as DataLoaderpyg
from tensorboardX import SummaryWriter
from tqdm import tqdm
import torch_geometric.transforms as T
from ogb.nodeproppred import Evaluator
class DotDict(dict):
def __init__(self, **kwds):
self.update(kwds)
self.__dict__ = self
"""
IMPORTING CUSTOM MODULES/METHODS
"""
from nets.ogb_node_classification.load_net import gnn_model # import GNNs
from data.data import LoadData # import dataset
"""
GPU Setup
"""
def gpu_setup(use_gpu, gpu_id):
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
if torch.cuda.is_available() and use_gpu:
print('cuda available with GPU:',torch.cuda.get_device_name(gpu_id))
device = torch.device("cuda:"+ str(gpu_id))
else:
print('cuda not available')
device = torch.device("cpu")
return device
"""
VIEWING MODEL CONFIG AND PARAMS
"""
def view_model_param(MODEL_NAME, net_params):
model = gnn_model(MODEL_NAME, net_params)
total_param = 0
print("MODEL DETAILS:\n")
print(model)
for param in model.parameters():
# print(param.data.size())
total_param += np.prod(list(param.data.size()))
print('MODEL/Total parameters:', MODEL_NAME, total_param)
return total_param
"""
TRAINING CODE
"""
def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
start0 = time.time()
per_epoch_time = []
DATASET_NAME = dataset.name
if MODEL_NAME in ['GCN', 'GAT']:
if net_params['self_loop']:
print("[!] Adding graph self-loops for GCN/GAT models (central node trick).")
dataset._add_self_loops()
if not net_params['edge_feat']:
edge_feat_dim = 1
dataset.dataset.data.edge_attr = torch.ones(dataset.dataset[0].num_edges, edge_feat_dim).type(torch.float32)
if net_params['pos_enc']:
print("[!] Adding graph positional encoding.")
dataset._add_positional_encodings(net_params['pos_enc_dim'],DATASET_NAME)
print('Time PE:',time.time()-start0)
device = net_params['device']
if DATASET_NAME == 'ogbn-mag':
dataset.split_idx['train'], dataset.split_idx['valid'], dataset.split_idx['test'] = dataset.split_idx['train']['paper'].to(device),\
dataset.split_idx['valid']['paper'].to(device), \
dataset.split_idx['test']['paper'].to(device)
else:
dataset.split_idx['train'], dataset.split_idx['valid'], dataset.split_idx['test'] = dataset.split_idx['train'].to(device), \
dataset.split_idx['valid'].to(device), \
dataset.split_idx['test'].to(device)
# transform = T.ToSparseTensor() To do to save memory
# self.train.graph_lists = [positional_encoding(g, pos_enc_dim, framework='pyg') for _, g in enumerate(dataset.train)]
root_log_dir, root_ckpt_dir, write_file_name, write_config_file = dirs
# Write network and optimization hyper-parameters in folder config/
with open(write_config_file + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n\nTotal Parameters: {}\n\n""" .format(DATASET_NAME, MODEL_NAME, params, net_params, net_params['total_param']))
log_dir = os.path.join(root_log_dir, "RUN_" + str(0))
writer = SummaryWriter(log_dir=log_dir)
# setting seeds
random.seed(params['seed'])
np.random.seed(params['seed'])
torch.manual_seed(params['seed'])
if device.type == 'cuda':
torch.cuda.manual_seed(params['seed'])
print("Training Graphs: ", dataset.split_idx['train'].size(0))
print("Validation Graphs: ", dataset.split_idx['valid'].size(0))
print("Test Graphs: ", dataset.split_idx['test'].size(0))
print("Number of Classes: ", net_params['n_classes'])
model = gnn_model(MODEL_NAME, net_params)
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=params['init_lr'], weight_decay=params['weight_decay'])
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',
factor=params['lr_reduce_factor'],
patience=params['lr_schedule_patience'],
verbose=True)
evaluator = Evaluator(name = DATASET_NAME)
epoch_train_losses, epoch_val_losses = [], []
epoch_train_accs, epoch_val_accs = [], []
# import train functions for all other GCNs
if DATASET_NAME == 'ogbn-arxiv': # , 'ogbn-proteins''
from train.train_ogb_node_classification import train_epoch_arxiv as train_epoch, evaluate_network_arxiv as evaluate_network
elif DATASET_NAME == 'ogbn-proteins':
from train.train_ogb_node_classification import train_epoch_proteins as train_epoch, evaluate_network_proteins as evaluate_network
# elif DATASET_NAME == 'ogbn-mag':
# At any point you can hit Ctrl + C to break out of training early.
try:
with tqdm(range(params['epochs']),ncols= 0) as t:
for epoch in t:
t.set_description('Epoch %d' % epoch)
start = time.time()
# for all other models common train function
epoch_train_loss = train_epoch(model, optimizer, device, dataset.dataset[0], dataset.split_idx['train'])
epoch_train_acc, epoch_val_acc, epoch_test_acc, epoch_val_loss = evaluate_network(model, device, dataset, evaluator)
# _, epoch_test_acc = evaluate_network(model, device, test_loader, epoch)
epoch_train_losses.append(epoch_train_loss)
epoch_val_losses.append(epoch_val_loss)
epoch_train_accs.append(epoch_train_acc)
epoch_val_accs.append(epoch_val_acc)
writer.add_scalar('train/_loss', epoch_train_loss, epoch)
writer.add_scalar('val/_loss', epoch_val_loss, epoch)
writer.add_scalar('train/_acc', epoch_train_acc, epoch)
writer.add_scalar('val/_acc', epoch_val_acc, epoch)
writer.add_scalar('test/_acc', epoch_test_acc, epoch)
writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)
t.set_postfix(time=time.time()-start, lr=optimizer.param_groups[0]['lr'],
train_loss=epoch_train_loss, val_loss=epoch_val_loss,
train_acc=epoch_train_acc, val_acc=epoch_val_acc,
test_acc=epoch_test_acc)
per_epoch_time.append(time.time()-start)
# Saving checkpoint
ckpt_dir = os.path.join(root_ckpt_dir, "RUN_")
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
# the function to save the checkpoint
# torch.save(model.state_dict(), '{}.pkl'.format(ckpt_dir + "/epoch_" + str(epoch)))
files = glob.glob(ckpt_dir + '/*.pkl')
for file in files:
epoch_nb = file.split('_')[-1]
epoch_nb = int(epoch_nb.split('.')[0])
if epoch_nb < epoch-1:
os.remove(file)
scheduler.step(epoch_val_loss)
# it used to test the scripts
# if epoch == 1:
# break
if optimizer.param_groups[0]['lr'] < params['min_lr']:
print("\n!! LR SMALLER OR EQUAL TO MIN LR THRESHOLD.")
break
# Stop training after params['max_time'] hours
if time.time()-start0 > params['max_time']*3600:
print('-' * 89)
print("Max_time for training elapsed {:.2f} hours, so stopping".format(params['max_time']))
break
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early because of KeyboardInterrupt')
train_acc, val_acc, test_acc, _ = evaluate_network(model, device, dataset, evaluator)
train_acc, val_acc, test_acc = 100 * train_acc, 100 * val_acc, 100 * test_acc
print("Test Accuracy: {:.4f}".format(test_acc))
print("Val Accuracy: {:.4f}".format(val_acc))
print("Train Accuracy: {:.4f}".format(train_acc))
print("Convergence Time (Epochs): {:.4f}".format(epoch))
print("TOTAL TIME TAKEN: {:.4f}s".format(time.time()-start0))
print("AVG TIME PER EPOCH: {:.4f}s".format(np.mean(per_epoch_time)))
writer.close()
"""
Write the results in out_dir/results folder
"""
with open(write_file_name + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n
FINAL RESULTS\nTEST ACCURACY: {:.4f}\nval ACCURACY: {:.4f}\nTRAIN ACCURACY: {:.4f}\n\n
Convergence Time (Epochs): {:.4f}\nTotal Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\n"""\
.format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],
test_acc, val_acc,train_acc, epoch, (time.time()-start0)/3600, np.mean(per_epoch_time)))
def main():
"""
USER CONTROLS
"""
parser = argparse.ArgumentParser()
parser.add_argument('--config', help="Please give a config.json file with training/model/data/param details")
parser.add_argument('--framework', type=str, default= None, help="Please give a framework to use")
parser.add_argument('--gpu_id', help="Please give a value for gpu id")
parser.add_argument('--model', help="Please give a value for model name")
parser.add_argument('--dataset', help="Please give a value for dataset name")
parser.add_argument('--out_dir', help="Please give a value for out_dir")
parser.add_argument('--seed', help="Please give a value for seed")
parser.add_argument('--epochs', help="Please give a value for epochs")
parser.add_argument('--batch_size', help="Please give a value for batch_size")
parser.add_argument('--init_lr', help="Please give a value for init_lr")
parser.add_argument('--lr_reduce_factor', help="Please give a value for lr_reduce_factor")
parser.add_argument('--lr_schedule_patience', help="Please give a value for lr_schedule_patience")
parser.add_argument('--min_lr', help="Please give a value for min_lr")
parser.add_argument('--weight_decay', help="Please give a value for weight_decay")
parser.add_argument('--print_epoch_interval', help="Please give a value for print_epoch_interval")
parser.add_argument('--L', help="Please give a value for L")
parser.add_argument('--hidden_dim', help="Please give a value for hidden_dim")
parser.add_argument('--out_dim', help="Please give a value for out_dim")
parser.add_argument('--residual', help="Please give a value for residual")
parser.add_argument('--edge_feat', help="Please give a value for edge_feat")
parser.add_argument('--readout', help="Please give a value for readout")
parser.add_argument('--kernel', help="Please give a value for kernel")
parser.add_argument('--n_heads', help="Please give a value for n_heads")
parser.add_argument('--gated', help="Please give a value for gated")
parser.add_argument('--in_feat_dropout', help="Please give a value for in_feat_dropout")
parser.add_argument('--dropout', help="Please give a value for dropout")
parser.add_argument('--layer_norm', help="Please give a value for layer_norm")
parser.add_argument('--batch_norm', help="Please give a value for batch_norm")
parser.add_argument('--sage_aggregator', help="Please give a value for sage_aggregator")
parser.add_argument('--data_mode', help="Please give a value for data_mode")
parser.add_argument('--num_pool', help="Please give a value for num_pool")
parser.add_argument('--gnn_per_block', help="Please give a value for gnn_per_block")
parser.add_argument('--embedding_dim', help="Please give a value for embedding_dim")
parser.add_argument('--pool_ratio', help="Please give a value for pool_ratio")
parser.add_argument('--linkpred', help="Please give a value for linkpred")
parser.add_argument('--cat', help="Please give a value for cat")
parser.add_argument('--self_loop', help="Please give a value for self_loop")
parser.add_argument('--max_time', help="Please give a value for max_time")
parser.add_argument('--pos_enc_dim', help="Please give a value for pos_enc_dim")
parser.add_argument('--pos_enc', action='store_true', default=False, help="Please give a value for pos_enc")
parser.add_argument('--use_node_embedding', action='store_true', default=False)
args = parser.parse_args()
with open(args.config) as f:
config = json.load(f)
# device
if args.gpu_id is not None:
config['gpu']['id'] = int(args.gpu_id)
config['gpu']['use'] = True
device = gpu_setup(config['gpu']['use'], config['gpu']['id'])
# model, dataset, out_dir
if args.model is not None:
MODEL_NAME = args.model
else:
MODEL_NAME = config['model']
if args.dataset is not None:
DATASET_NAME = args.dataset
else:
DATASET_NAME = config['dataset']
# parameters
params = config['params']
params['framework'] = 'pyg' if MODEL_NAME[-3:] == 'pyg' else 'dgl'
if args.framework is not None:
params['framework'] = str(args.framework)
if args.use_node_embedding is not None:
params['use_node_embedding'] = True if args.use_node_embedding == True else False
dataset = LoadData(DATASET_NAME = DATASET_NAME, use_node_embedding = params['use_node_embedding'])
if args.out_dir is not None:
out_dir = args.out_dir
else:
out_dir = config['out_dir']
if args.seed is not None:
params['seed'] = int(args.seed)
if args.epochs is not None:
params['epochs'] = int(args.epochs)
if args.batch_size is not None:
params['batch_size'] = int(args.batch_size)
if args.init_lr is not None:
params['init_lr'] = float(args.init_lr)
if args.lr_reduce_factor is not None:
params['lr_reduce_factor'] = float(args.lr_reduce_factor)
if args.lr_schedule_patience is not None:
params['lr_schedule_patience'] = int(args.lr_schedule_patience)
if args.min_lr is not None:
params['min_lr'] = float(args.min_lr)
if args.weight_decay is not None:
params['weight_decay'] = float(args.weight_decay)
if args.print_epoch_interval is not None:
params['print_epoch_interval'] = int(args.print_epoch_interval)
if args.max_time is not None:
params['max_time'] = float(args.max_time)
# network parameters
net_params = config['net_params']
net_params['device'] = device
net_params['gpu_id'] = config['gpu']['id']
# net_params['batch_size'] = params['batch_size']
if args.L is not None:
net_params['L'] = int(args.L)
if args.hidden_dim is not None:
net_params['hidden_dim'] = int(args.hidden_dim)
if args.out_dim is not None:
net_params['out_dim'] = int(args.out_dim)
if args.residual is not None:
net_params['residual'] = True if args.residual=='True' else False
if args.edge_feat is not None:
net_params['edge_feat'] = True if args.edge_feat=='True' else False
if args.readout is not None:
net_params['readout'] = args.readout
if args.kernel is not None:
net_params['kernel'] = int(args.kernel)
if args.n_heads is not None:
net_params['n_heads'] = int(args.n_heads)
if args.gated is not None:
net_params['gated'] = True if args.gated=='True' else False
if args.in_feat_dropout is not None:
net_params['in_feat_dropout'] = float(args.in_feat_dropout)
if args.dropout is not None:
net_params['dropout'] = float(args.dropout)
if args.layer_norm is not None:
net_params['layer_norm'] = True if args.layer_norm=='True' else False
if args.batch_norm is not None:
net_params['batch_norm'] = True if args.batch_norm=='True' else False
if args.sage_aggregator is not None:
net_params['sage_aggregator'] = args.sage_aggregator
if args.data_mode is not None:
net_params['data_mode'] = args.data_mode
if args.num_pool is not None:
net_params['num_pool'] = int(args.num_pool)
if args.gnn_per_block is not None:
net_params['gnn_per_block'] = int(args.gnn_per_block)
if args.embedding_dim is not None:
net_params['embedding_dim'] = int(args.embedding_dim)
if args.pool_ratio is not None:
net_params['pool_ratio'] = float(args.pool_ratio)
if args.linkpred is not None:
net_params['linkpred'] = True if args.linkpred=='True' else False
if args.cat is not None:
net_params['cat'] = True if args.cat=='True' else False
if args.self_loop is not None:
net_params['self_loop'] = True if args.self_loop=='True' else False
if args.pos_enc is not None:
net_params['pos_enc'] = True if args.pos_enc==True else False
if args.pos_enc_dim is not None:
net_params['pos_enc_dim'] = int(args.pos_enc_dim)
# arxiv 'ogbn-mag'
net_params['in_dim'] = dataset.dataset[0].x.size(1)
# dataset.dataset[0]
# net_params['n_classes'] = torch.unique(dataset.dataset[0].y,dim=0).size(0)
net_params['n_classes'] = dataset.dataset[0].y.size(1) if DATASET_NAME == 'ogbn-proteins' else torch.unique(dataset.dataset[0].y,dim=0).size(0)
root_log_dir = out_dir + 'logs/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
write_file_name = out_dir + 'results/result_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
write_config_file = out_dir + 'configs/config_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
dirs = root_log_dir, root_ckpt_dir, write_file_name, write_config_file
if not os.path.exists(out_dir + 'results'):
os.makedirs(out_dir + 'results')
if not os.path.exists(out_dir + 'configs'):
os.makedirs(out_dir + 'configs')
net_params['total_param'] = view_model_param(MODEL_NAME, net_params)
train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs)
main()
| 19,431 | 41.060606 | 202 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/test.py | # -*- coding:utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch
import pickle
import torch.utils.data
import time
import os
import numpy as np
import csv
import dgl
from scipy import sparse as sp
import numpy as np
# # coding=gbk
# from tqdm import trange
# from random import random,randint
# import time
#
# with trange(100) as t:
# for i in t:
# #t.set_description("GEN111 %i" % i)
# t.set_postfix(loss=8,gen=randint(1,999),str="h",lst=[1,2],lst11=[1,2],loss11=8)
# time.sleep(0.1)
# t.close()
# import dgl
# import torch as th
# # 4 nodes, 3 edges
# # g1 = dgl.graph((th.tensor([0, 1, 2]), th.tensor([1, 2, 3])))
# def positional_encoding(g, pos_enc_dim):
# """
# Graph positional encoding v/ Laplacian eigenvectors
# """
#
# # Laplacian
# A = g.adjacency_matrix_scipy(return_edge_ids=False).astype(float)
# N = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float)
# L = sp.eye(g.number_of_nodes()) - N * A * N
#
# # Eigenvectors with numpy
# EigVal, EigVec = np.linalg.eig(L.toarray())
# idx = EigVal.argsort() # increasing order from min to max order index
# EigVal, EigVec = EigVal[idx], np.real(EigVec[:, idx])
# g.ndata['pos_enc'] = torch.from_numpy(EigVec[:, 1:pos_enc_dim + 1]).float()
#
# # # Eigenvectors with scipy
# # EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim+1, which='SR')
# # EigVec = EigVec[:, EigVal.argsort()] # increasing order
# # g.ndata['pos_enc'] = torch.from_numpy(np.abs(EigVec[:,1:pos_enc_dim+1])).float()
#
# return g
#
#
# def message_func(edges):
# Bh_j = edges.src['Bh']
# e_ij = edges.data['Ce'] + edges.src['Dh'] + edges.dst['Eh'] # e_ij = Ce_ij + Dhi + Ehj
# edges.data['e'] = e_ij
# return {'Bh_j': Bh_j, 'e_ij': e_ij}
#
#
# def reduce_func(nodes):
# Ah_i = nodes.data['Ah']#这个对只有出去,没有进来的点没有。这个时候只能是0,还不如加个自循环,用messange来做
# Bh_j = nodes.mailbox['Bh_j']
# e = nodes.mailbox['e_ij']
# sigma_ij = torch.sigmoid(e.float()) # sigma_ij = sigmoid(e_ij)
# # h = Ah_i + torch.mean( sigma_ij * Bh_j, dim=1 ) # hi = Ahi + mean_j alpha_ij * Bhj
# h = Ah_i + torch.sum(sigma_ij * Bh_j, dim=1) / (torch.sum(sigma_ij,
# dim=1) + 1e-6) # hi = Ahi + sum_j eta_ij/sum_j' eta_ij' * Bhj <= dense attention
# return {'h': h}
# g1 = dgl.DGLGraph()
# g1.add_nodes(4)
# g1.add_edges([0, 1, 2], [1, 2, 3])
# # g1.ndata['h'] = th.randn((4, 3),dtype=th.float32)
# # g1.ndata['Ah'] = th.randn((4, 3),dtype=th.float32)
# # g1.ndata['Bh'] = th.randn((4, 3),dtype=th.float32)
# # g1.ndata['Dh'] = th.randn((4, 3),dtype=th.float32)
# # g1.ndata['Eh'] = th.randn((4, 3),dtype=th.float32)
# # g1.edata['e']=th.randn((3, 3),dtype=th.float32)
# # g1.edata['Ce'] = th.randn((3, 3),dtype=th.float32)
# g1.ndata['h'] = th.reshape(th.arange(1, 13), (4, 3))
# g1.ndata['Ah'] = th.reshape(th.arange(13, 25), (4, 3))
# g1.ndata['Bh'] = th.reshape(th.arange(26, 38), (4, 3))
# g1.ndata['Dh'] = th.reshape(th.arange(39, 51), (4, 3))
# g1.ndata['Eh'] = th.reshape(th.arange(52, 64), (4, 3))
# g1.edata['e'] = th.reshape(th.arange(65, 74), (3, 3))
# g1.edata['Ce'] = th.reshape(th.arange(75, 84), (3, 3))
# positional_encoding(g1, 3)
# # 3 nodes, 4 edges
# g2 = dgl.DGLGraph()
# g2.add_nodes(3)
# g2.add_edges([0, 0, 0, 1], [0, 1, 2, 0])
# g2.ndata['h'] = th.reshape(th.arange(101, 110), (3, 3))
# g2.ndata['Ah'] = th.reshape(th.arange(113, 122), (3, 3))
# g2.ndata['Bh'] = th.reshape(th.arange(126, 135), (3, 3))
# g2.ndata['Dh'] = th.reshape(th.arange(139, 148), (3, 3))
# g2.ndata['Eh'] = th.reshape(th.arange(152, 161), (3, 3))
# g2.edata['e'] = th.reshape(th.arange(165, 177), (4, 3))
# g2.edata['Ce'] = th.reshape(th.arange(175, 187), (4, 3))
# bg = dgl.batch([g1, g2])
# bg.update_all(message_func, reduce_func)
# bg.ndata['h']
# a = 1+1
# # g3 = dgl.graph((th.tensor([0, 1, 2]), th.tensor([1, 2, 3])))
# # g4 = dgl.graph((th.tensor([0, 0, 0, 1]), th.tensor([0, 1, 2, 0])))
# # bg = dgl.batch([g3, g4], edge_attrs=None)
#
# # import dgl
# # import torch as th
# # g1 = dgl.DGLGraph()
# # g1.add_nodes(2) # Add 2 nodes
# # g1.add_edge(0, 1) # Add edge 0 -> 1
# # g1.ndata['hv'] = th.tensor([[0.], [1.]]) # Initialize node features
# # g1.edata['he'] = th.tensor([[0.]]) # Initialize edge features
# # g2 = dgl.DGLGraph()
# # g2.add_nodes(3) # Add 3 nodes
# # g2.add_edges([0, 2], [1, 1]) # Add edges 0 -> 1, 2 -> 1
# # g2.ndata['hv'] = th.tensor([[2.], [3.], [4.]]) # Initialize node features
# # g2.edata['he'] = th.tensor([[1.], [2.]]) # Initialize edge features
# # bg = dgl.batch([g1, g2], edge_attrs=None)
# import time
# try:
# while True:
# print("你好")
# time.sleep(1)
# except KeyboardInterrupt:
# print('aa')
#
# print("好!")
# import numpy as np
# import torch
# import pickle
# import time
# import os
# import matplotlib.pyplot as plt
# if not os.path.isfile('molecules.zip'):
# print('downloading..')
# !curl https://www.dropbox.com/s/feo9qle74kg48gy/molecules.zip?dl=1 -o molecules.zip -J -L -k
# !unzip molecules.zip -d ../
# # !tar -xvf molecules.zip -C ../
# else:
# print('File already downloaded')
from tqdm import tqdm
import time
for i in tqdm(range(10000)):
time.sleep(0.001)
| 5,463 | 34.712418 | 145 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/main_Planetoid_node_classification.py |
"""
IMPORTING LIBS
"""
import dgl
import numpy as np
import os
import socket
import time
import random
import glob
import argparse, json
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from tqdm import tqdm
class DotDict(dict):
def __init__(self, **kwds):
self.update(kwds)
self.__dict__ = self
# from configs.base import Grid, Config
"""
IMPORTING CUSTOM MODULES/METHODS
"""
from nets.Planetoid_node_classification.load_net import gnn_model # import GNNs
from data.data import LoadData # import dataset
"""
GPU Setup
"""
def gpu_setup(use_gpu, gpu_id):
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
if torch.cuda.is_available() and use_gpu:
print('cuda available with GPU:',torch.cuda.get_device_name(gpu_id))
device = torch.device("cuda:"+ str(gpu_id))
else:
print('cuda not available')
device = torch.device("cpu")
return device
"""
VIEWING MODEL CONFIG AND PARAMS
"""
def view_model_param(MODEL_NAME, net_params):
model = gnn_model(MODEL_NAME, net_params)
total_param = 0
print("MODEL DETAILS:\n")
#print(model)
for param in model.parameters():
# print(param.data.size())
total_param += np.prod(list(param.data.size()))
print('MODEL/Total parameters:', MODEL_NAME, total_param)
return total_param
"""
TRAINING CODE
"""
def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
avg_test_acc = []
avg_train_acc = []
avg_val_acc = []
avg_convergence_epochs = []
t0 = time.time()
per_epoch_time = []
if MODEL_NAME in ['GCN', 'GAT']:
if net_params['self_loop']:
print("[!] Adding graph self-loops for GCN/GAT models (central node trick).")
dataset._add_self_loops()
root_log_dir, root_ckpt_dir, write_file_name, write_config_file = dirs
device = net_params['device']
# Write the network and optimization hyper-parameters in folder config/
with open(write_config_file + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n\nTotal Parameters: {}\n\n""" .format(dataset.name, MODEL_NAME, params, net_params, net_params['total_param']))
# At any point you can hit Ctrl + C to break out of training early.
try:
for split_number in range(10):
training_scores, val_scores, test_scores, epochs = [], [], [], []
# setting seeds
random.seed(params['seed'])
np.random.seed(params['seed'])
torch.manual_seed(params['seed'])
if device.type == 'cuda':
torch.cuda.manual_seed(params['seed'])
# Mitigate bad random initializations
train_idx, val_idx, test_idx = dataset.train_idx[split_number], dataset.val_idx[split_number], \
dataset.test_idx[split_number]
print("Training Nodes: ", len(train_idx))
print("Validation Nodes: ", len(val_idx))
print("Test Nodes: ", len(test_idx))
print("Number of Classes: ", net_params['n_classes'])
for run in range(3):
t0_split = time.time()
print("RUN NUMBER:", split_number, run)
log_dir = os.path.join(root_log_dir, "RUN_" + str(split_number))
writer = SummaryWriter(log_dir=log_dir)
model = gnn_model(MODEL_NAME, net_params)
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=params['init_lr'], weight_decay=params['weight_decay'])
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',
factor=params['lr_reduce_factor'],
patience=params['lr_schedule_patience'],
verbose=True)
epoch_train_losses, epoch_val_losses = [], []
epoch_train_accs, epoch_val_accs = [], []
# import train functions for all other GCNs
from train.train_Planetoid_node_classification import train_epoch_sparse as train_epoch, evaluate_network_sparse as evaluate_network
with tqdm(range(params['epochs']), ncols= 0) as t:
for epoch in t:
t.set_description('Epoch %d' % epoch)
start = time.time()
# for all other models common train function
epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, dataset, train_idx)
epoch_val_loss, epoch_val_acc = evaluate_network(model, device, dataset, val_idx)
_, epoch_test_acc = evaluate_network(model, device, dataset, test_idx)
epoch_train_losses.append(epoch_train_loss)
epoch_val_losses.append(epoch_val_loss)
epoch_train_accs.append(epoch_train_acc)
epoch_val_accs.append(epoch_val_acc)
writer.add_scalar('train/_loss', epoch_train_loss, epoch)
writer.add_scalar('val/_loss', epoch_val_loss, epoch)
writer.add_scalar('train/_acc', epoch_train_acc, epoch)
writer.add_scalar('val/_acc', epoch_val_acc, epoch)
writer.add_scalar('test/_acc', epoch_test_acc, epoch)
writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)
t.set_postfix(time=time.time()-start, lr=optimizer.param_groups[0]['lr'],
train_loss=epoch_train_loss, val_loss=epoch_val_loss,
train_acc=epoch_train_acc, val_acc=epoch_val_acc,
test_acc=epoch_test_acc)
per_epoch_time.append(time.time()-start)
# Saving checkpoint
ckpt_dir = os.path.join(root_ckpt_dir, "RUN_" + str(split_number))
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
# torch.save(model.state_dict(), '{}.pkl'.format(ckpt_dir + "/epoch_" + str(epoch)))
# it is for save the models.
files = glob.glob(ckpt_dir + '/*.pkl')
for file in files:
epoch_nb = file.split('_')[-1]
epoch_nb = int(epoch_nb.split('.')[0])
if epoch_nb < epoch-1:
os.remove(file)
scheduler.step(epoch_val_loss)
# it used to test the scripts
# if epoch == 1:
# break
if optimizer.param_groups[0]['lr'] < params['min_lr']:
print("\n!! LR EQUAL TO MIN LR SET.")
break
# Stop training after params['max_time'] hours
if time.time()-t0_split > params['max_time']*3600/10: # Dividing max_time by 10, since there are 10 runs in TUs
print('-' * 89)
print("Max_time for one train-val-test split experiment elapsed {:.3f} hours, so stopping".format(params['max_time']/10))
break
_, test_acc = evaluate_network(model, device, dataset, test_idx)
_, val_acc = evaluate_network(model, device, dataset, val_idx)
_, train_acc = evaluate_network(model, device, dataset, train_idx)
training_scores.append(train_acc)
val_scores.append(val_acc)
test_scores.append(test_acc)
epochs.append(epoch)
training_score = sum(training_scores) / 3
val_score = sum(val_scores) / 3
test_score = sum(test_scores) / 3
epoch_score = sum(epochs) / 3
avg_val_acc.append(val_score)
avg_test_acc.append(test_score)
avg_train_acc.append(training_score)
avg_convergence_epochs.append(epoch_score)
print("Test Accuracy [LAST EPOCH]: {:.4f}".format(test_score))
print("Val Accuracy: {:.4f}".format(val_score))
print("Train Accuracy [LAST EPOCH]: {:.4f}".format(training_score))
print("Convergence Time (Epochs): {:.4f}".format(epoch_score))
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early because of KeyboardInterrupt')
print("TOTAL TIME TAKEN: {:.4f}hrs".format((time.time()-t0)/3600))
print("AVG TIME PER EPOCH: {:.4f}s".format(np.mean(per_epoch_time)))
print("AVG CONVERGENCE Time (Epochs): {:.4f}".format(np.mean(np.array(avg_convergence_epochs))))
# Final test accuracy value averaged over 10-fold
print("""\n\n\nFINAL RESULTS\n\nTEST ACCURACY averaged: {:.4f} with s.d. {:.4f}""" .format(np.mean(np.array(avg_test_acc))*100, np.std(avg_test_acc)*100))
print("\nAll splits Test Accuracies:\n", avg_test_acc)
print("""\n\n\nFINAL RESULTS\n\nVAL ACCURACY averaged: {:.4f} with s.d. {:.4f}""".format(
np.mean(np.array(avg_val_acc)) * 100, np.std(avg_val_acc) * 100))
print("\nAll splits Val Accuracies:\n", avg_val_acc)
print("""\n\n\nFINAL RESULTS\n\nTRAIN ACCURACY averaged: {:.4f} with s.d. {:.4f}""" .format(np.mean(np.array(avg_train_acc))*100, np.std(avg_train_acc)*100))
print("\nAll splits Train Accuracies:\n", avg_train_acc)
writer.close()
"""
Write the results in out/results folder
"""
with open(write_file_name + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n
FINAL RESULTS\nTEST ACCURACY averaged: {:.4f} with s.d. {:.4f}\nval ACCURACY averaged: {:.4f} with s.d. {:.4f}\nTRAIN ACCURACY averaged: {:.4f} with s.d. {:.4f}\n\n
Average Convergence Time (Epochs): {:.4f} with s.d. {:.4f}\nTotal Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\nAll Splits Test Accuracies: {}"""\
.format(dataset.name, MODEL_NAME, params, net_params, model, net_params['total_param'],
np.mean(np.array(avg_test_acc))*100, np.std(avg_test_acc)*100,
np.mean(np.array(avg_val_acc))*100, np.std(avg_val_acc)*100,
np.mean(np.array(avg_train_acc))*100, np.std(avg_train_acc)*100,
np.mean(avg_convergence_epochs), np.std(avg_convergence_epochs),
(time.time()-t0)/3600, np.mean(per_epoch_time), avg_test_acc))
def main():
"""
USER CONTROLS
"""
parser = argparse.ArgumentParser()
parser.add_argument('--config', help="Please give a config.json file with training/model/data/param details")
parser.add_argument('--framework', type=str, default= None, help="Please give a framework to use")
parser.add_argument('--gpu_id', help="Please give a value for gpu id")
parser.add_argument('--use_node_embedding', action='store_true')
parser.add_argument('--model', help="Please give a value for model name")
parser.add_argument('--dataset', help="Please give a value for dataset name")
parser.add_argument('--out_dir', help="Please give a value for out_dir")
parser.add_argument('--seed', help="Please give a value for seed")
parser.add_argument('--epochs', help="Please give a value for epochs")
parser.add_argument('--batch_size', help="Please give a value for batch_size")
parser.add_argument('--init_lr', help="Please give a value for init_lr")
parser.add_argument('--lr_reduce_factor', help="Please give a value for lr_reduce_factor")
parser.add_argument('--lr_schedule_patience', help="Please give a value for lr_schedule_patience")
parser.add_argument('--min_lr', help="Please give a value for min_lr")
parser.add_argument('--weight_decay', help="Please give a value for weight_decay")
parser.add_argument('--print_epoch_interval', help="Please give a value for print_epoch_interval")
parser.add_argument('--L', help="Please give a value for L")
parser.add_argument('--hidden_dim', help="Please give a value for hidden_dim")
parser.add_argument('--out_dim', help="Please give a value for out_dim")
parser.add_argument('--residual', help="Please give a value for residual")
parser.add_argument('--edge_feat', help="Please give a value for edge_feat")
parser.add_argument('--readout', help="Please give a value for readout")
parser.add_argument('--kernel', help="Please give a value for kernel")
parser.add_argument('--n_heads', help="Please give a value for n_heads")
parser.add_argument('--gated', help="Please give a value for gated")
parser.add_argument('--in_feat_dropout', help="Please give a value for in_feat_dropout")
parser.add_argument('--dropout', help="Please give a value for dropout")
parser.add_argument('--layer_norm', help="Please give a value for layer_norm")
parser.add_argument('--batch_norm', help="Please give a value for batch_norm")
parser.add_argument('--sage_aggregator', help="Please give a value for sage_aggregator")
parser.add_argument('--data_mode', help="Please give a value for data_mode")
parser.add_argument('--num_pool', help="Please give a value for num_pool")
parser.add_argument('--gnn_per_block', help="Please give a value for gnn_per_block")
parser.add_argument('--embedding_dim', help="Please give a value for embedding_dim")
parser.add_argument('--pool_ratio', help="Please give a value for pool_ratio")
parser.add_argument('--linkpred', help="Please give a value for linkpred")
parser.add_argument('--cat', help="Please give a value for cat")
parser.add_argument('--self_loop', help="Please give a value for self_loop")
parser.add_argument('--max_time', help="Please give a value for max_time")
parser.add_argument('--pos_enc_dim', help="Please give a value for pos_enc_dim")
parser.add_argument('--pos_enc', help="Please give a value for pos_enc")
args = parser.parse_args()
with open(args.config) as f:
config = json.load(f)
# it uses to separate the hyper-parameter, to do
# model_configurations = Grid(config_file, dataset_name)
# model_configuration = Config(**model_configurations[0])
#
# exp_path = os.path.join(result_folder, f'{model_configuration.exp_name}_assessment')
# device
if args.gpu_id is not None:
config['gpu']['id'] = int(args.gpu_id)
config['gpu']['use'] = True
device = gpu_setup(config['gpu']['use'], config['gpu']['id'])
# model, dataset, out_dir
if args.model is not None:
MODEL_NAME = args.model
else:
MODEL_NAME = config['model']
if args.dataset is not None:
DATASET_NAME = args.dataset
else:
DATASET_NAME = config['dataset']
# parameters
params = config['params']
params['framework'] = 'pyg' if MODEL_NAME[-3:] == 'pyg' else 'dgl'
if args.framework is not None:
params['framework'] = str(args.framework)
if args.use_node_embedding is not None:
params['use_node_embedding'] = bool(args.use_node_embedding)
dataset = LoadData(DATASET_NAME, use_node_embedding = params['use_node_embedding'],framework = params['framework'])
if args.out_dir is not None:
out_dir = args.out_dir
else:
out_dir = config['out_dir']
if args.seed is not None:
params['seed'] = int(args.seed)
if args.epochs is not None:
params['epochs'] = int(args.epochs)
if args.batch_size is not None:
params['batch_size'] = int(args.batch_size)
if args.init_lr is not None:
params['init_lr'] = float(args.init_lr)
if args.lr_reduce_factor is not None:
params['lr_reduce_factor'] = float(args.lr_reduce_factor)
if args.lr_schedule_patience is not None:
params['lr_schedule_patience'] = int(args.lr_schedule_patience)
if args.min_lr is not None:
params['min_lr'] = float(args.min_lr)
if args.weight_decay is not None:
params['weight_decay'] = float(args.weight_decay)
if args.print_epoch_interval is not None:
params['print_epoch_interval'] = int(args.print_epoch_interval)
if args.max_time is not None:
params['max_time'] = float(args.max_time)
# network parameters
net_params = config['net_params']
net_params['device'] = device
net_params['gpu_id'] = config['gpu']['id']
if args.L is not None:
net_params['L'] = int(args.L)
if args.hidden_dim is not None:
net_params['hidden_dim'] = int(args.hidden_dim)
if args.out_dim is not None:
net_params['out_dim'] = int(args.out_dim)
if args.residual is not None:
net_params['residual'] = True if args.residual=='True' else False
if args.edge_feat is not None:
net_params['edge_feat'] = True if args.edge_feat=='True' else False
if args.readout is not None:
net_params['readout'] = args.readout
if args.kernel is not None:
net_params['kernel'] = int(args.kernel)
if args.n_heads is not None:
net_params['n_heads'] = int(args.n_heads)
if args.gated is not None:
net_params['gated'] = True if args.gated=='True' else False
if args.in_feat_dropout is not None:
net_params['in_feat_dropout'] = float(args.in_feat_dropout)
if args.dropout is not None:
net_params['dropout'] = float(args.dropout)
if args.layer_norm is not None:
net_params['layer_norm'] = True if args.layer_norm=='True' else False
if args.batch_norm is not None:
net_params['batch_norm'] = True if args.batch_norm=='True' else False
if args.sage_aggregator is not None:
net_params['sage_aggregator'] = args.sage_aggregator
if args.data_mode is not None:
net_params['data_mode'] = args.data_mode
if args.num_pool is not None:
net_params['num_pool'] = int(args.num_pool)
if args.gnn_per_block is not None:
net_params['gnn_per_block'] = int(args.gnn_per_block)
if args.embedding_dim is not None:
net_params['embedding_dim'] = int(args.embedding_dim)
if args.pool_ratio is not None:
net_params['pool_ratio'] = float(args.pool_ratio)
if args.linkpred is not None:
net_params['linkpred'] = True if args.linkpred=='True' else False
if args.cat is not None:
net_params['cat'] = True if args.cat=='True' else False
if args.self_loop is not None:
net_params['self_loop'] = True if args.self_loop=='True' else False
if args.pos_enc is not None:
net_params['pos_enc'] = True if args.pos_enc=='True' else False
if args.pos_enc_dim is not None:
net_params['pos_enc_dim'] = int(args.pos_enc_dim)
# Planetoid
net_params['in_dim'] = dataset.dataset[0].x.size(1)
net_params['n_classes'] = torch.unique(dataset.dataset[0].y,dim=0).size(0)
if MODEL_NAME == 'DiffPool':
# calculate assignment dimension: pool_ratio * largest graph's maximum
# number of nodes in the dataset
num_nodes = [dataset.all[i][0].number_of_nodes() for i in range(len(dataset.all))]
max_num_node = max(num_nodes)
net_params['assign_dim'] = int(max_num_node * net_params['pool_ratio']) * net_params['batch_size']
if MODEL_NAME == 'RingGNN':
num_nodes = [dataset.all[i][0].number_of_nodes() for i in range(len(dataset.all))]
net_params['avg_node_num'] = int(np.ceil(np.mean(num_nodes)))
root_log_dir = out_dir + 'logs/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
write_file_name = out_dir + 'results/result_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
write_config_file = out_dir + 'configs/config_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
dirs = root_log_dir, root_ckpt_dir, write_file_name, write_config_file
if not os.path.exists(out_dir + 'results'):
os.makedirs(out_dir + 'results')
if not os.path.exists(out_dir + 'configs'):
os.makedirs(out_dir + 'configs')
net_params['total_param'] = view_model_param(MODEL_NAME, net_params)
train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs)
main()
| 21,251 | 43 | 188 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/main_ogb_node_classification.py |
"""
IMPORTING LIBS
"""
import dgl
import numpy as np
import os
import socket
import time
import random
import glob
import argparse, json
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from torch_geometric.data import DataLoader as DataLoaderpyg
from tensorboardX import SummaryWriter
from tqdm import tqdm
import torch_geometric.transforms as T
from ogb.nodeproppred import Evaluator
from torch_geometric.data import RandomNodeSampler
class DotDict(dict):
def __init__(self, **kwds):
self.update(kwds)
self.__dict__ = self
"""
IMPORTING CUSTOM MODULES/METHODS
"""
from nets.ogb_node_classification.load_net import gnn_model # import GNNs
from data.data import LoadData # import dataset
"""
GPU Setup
"""
def gpu_setup(use_gpu, gpu_id):
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
if torch.cuda.is_available() and use_gpu:
print('cuda available with GPU:',torch.cuda.get_device_name(gpu_id))
device = torch.device("cuda:"+ str(gpu_id))
else:
print('cuda not available')
device = torch.device("cpu")
return device
"""
VIEWING MODEL CONFIG AND PARAMS
"""
def view_model_param(MODEL_NAME, net_params):
model = gnn_model(MODEL_NAME, net_params)
total_param = 0
print("MODEL DETAILS:\n")
print(model)
for param in model.parameters():
# print(param.data.size())
total_param += np.prod(list(param.data.size()))
print('MODEL/Total parameters:', MODEL_NAME, total_param)
return total_param
"""
TRAINING CODE
"""
def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
start0 = time.time()
per_epoch_time = []
DATASET_NAME = dataset.name
if MODEL_NAME in ['GCN', 'GAT']:
if net_params['self_loop']:
print("[!] Adding graph self-loops for GCN/GAT models (central node trick).")
dataset._add_self_loops()
if not net_params['edge_feat']:
edge_feat_dim = 1
if DATASET_NAME == 'ogbn-mag':
dataset.dataset.edge_attr = torch.ones(dataset.dataset[0].num_edges, edge_feat_dim).type(torch.float32)
else:
dataset.dataset.data.edge_attr = torch.ones(dataset.dataset[0].num_edges, edge_feat_dim).type(torch.float32)
if net_params['pos_enc']:
print("[!] Adding graph positional encoding.")
dataset._add_positional_encodings(net_params['pos_enc_dim'],DATASET_NAME)
print('Time PE:',time.time()-start0)
device = net_params['device']
if DATASET_NAME == 'ogbn-mag':
dataset.split_idx['train'], dataset.split_idx['valid'], dataset.split_idx['test'] = dataset.split_idx['train']['paper'],\
dataset.split_idx['valid']['paper'], \
dataset.split_idx['test']['paper']
# else:
# dataset.split_idx['train'], dataset.split_idx['valid'], dataset.split_idx['test'] = dataset.split_idx['train'].to(device), \
# dataset.split_idx['valid'].to(device), \
# dataset.split_idx['test'].to(device)
# transform = T.ToSparseTensor() To do to save memory
# self.train.graph_lists = [positional_encoding(g, pos_enc_dim, framework='pyg') for _, g in enumerate(dataset.train)]
root_log_dir, root_ckpt_dir, write_file_name, write_config_file = dirs
# Write network and optimization hyper-parameters in folder config/
with open(write_config_file + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n\nTotal Parameters: {}\n\n""" .format(DATASET_NAME, MODEL_NAME, params, net_params, net_params['total_param']))
log_dir = os.path.join(root_log_dir, "RUN_" + str(0))
writer = SummaryWriter(log_dir=log_dir)
# setting seeds
random.seed(params['seed'])
np.random.seed(params['seed'])
torch.manual_seed(params['seed'])
if device.type == 'cuda':
torch.cuda.manual_seed(params['seed'])
print("Training Graphs: ", dataset.split_idx['train'].size(0))
print("Validation Graphs: ", dataset.split_idx['valid'].size(0))
print("Test Graphs: ", dataset.split_idx['test'].size(0))
print("Number of Classes: ", net_params['n_classes'])
model = gnn_model(MODEL_NAME, net_params)
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=params['init_lr'], weight_decay=params['weight_decay'])
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',
factor=params['lr_reduce_factor'],
patience=params['lr_schedule_patience'],
verbose=True)
evaluator = Evaluator(name = DATASET_NAME)
epoch_train_losses, epoch_val_losses = [], []
epoch_train_accs, epoch_val_accs = [], []
# import train functions for all other GCNs
if DATASET_NAME == 'ogbn-mag' or DATASET_NAME == 'ogbn-products':
from train.train_ogb_node_classification import train_epoch as train_epoch, evaluate_network as evaluate_network
elif DATASET_NAME == 'ogbn-proteins':
from train.train_ogb_node_classification import train_epoch_proteins as train_epoch, evaluate_network_proteins as evaluate_network
data = dataset.dataset[0]
# Set split indices to masks.
for split in ['train', 'valid', 'test']:
mask = torch.zeros(data.num_nodes, dtype=torch.bool)
mask[dataset.split_idx[split]] = True
data[f'{split}_mask'] = mask
num_parts = 5 if DATASET_NAME == 'ogbn-mag' else 40
train_loader = RandomNodeSampler(data, num_parts=num_parts, shuffle=True,
num_workers=0)
test_loader = RandomNodeSampler(data, num_parts=5, num_workers=0)
# At any point you can hit Ctrl + C to break out of training early.
try:
with tqdm(range(params['epochs']),ncols= 0) as t:
for epoch in t:
t.set_description('Epoch %d' % epoch)
start = time.time()
# for all other models common train function
epoch_train_loss = train_epoch(model, optimizer, device, train_loader, epoch)
epoch_train_acc, epoch_val_acc, epoch_test_acc, epoch_val_loss = evaluate_network(model, device, test_loader, evaluator, epoch)
# _, epoch_test_acc = evaluate_network(model, device, test_loader, epoch)
epoch_train_losses.append(epoch_train_loss)
epoch_val_losses.append(epoch_val_loss)
epoch_train_accs.append(epoch_train_acc)
epoch_val_accs.append(epoch_val_acc)
writer.add_scalar('train/_loss', epoch_train_loss, epoch)
writer.add_scalar('val/_loss', epoch_val_loss, epoch)
writer.add_scalar('train/_acc', epoch_train_acc, epoch)
writer.add_scalar('val/_acc', epoch_val_acc, epoch)
writer.add_scalar('test/_acc', epoch_test_acc, epoch)
writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)
t.set_postfix(time=time.time()-start, lr=optimizer.param_groups[0]['lr'],
train_loss=epoch_train_loss, val_loss=epoch_val_loss,
train_acc=epoch_train_acc, val_acc=epoch_val_acc,
test_acc=epoch_test_acc)
per_epoch_time.append(time.time()-start)
# Saving checkpoint
ckpt_dir = os.path.join(root_ckpt_dir, "RUN_")
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
# the function to save the checkpoint
# torch.save(model.state_dict(), '{}.pkl'.format(ckpt_dir + "/epoch_" + str(epoch)))
files = glob.glob(ckpt_dir + '/*.pkl')
for file in files:
epoch_nb = file.split('_')[-1]
epoch_nb = int(epoch_nb.split('.')[0])
if epoch_nb < epoch-1:
os.remove(file)
scheduler.step(epoch_val_loss)
# it used to test the scripts
# if epoch == 1:
# break
if optimizer.param_groups[0]['lr'] < params['min_lr']:
print("\n!! LR SMALLER OR EQUAL TO MIN LR THRESHOLD.")
break
# Stop training after params['max_time'] hours
if time.time()-start0 > params['max_time']*3600:
print('-' * 89)
print("Max_time for training elapsed {:.2f} hours, so stopping".format(params['max_time']))
break
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early because of KeyboardInterrupt')
train_acc, val_acc, test_acc, _ = evaluate_network(model, device, test_loader, evaluator, epoch)
train_acc, val_acc, test_acc = 100 * train_acc, 100 * val_acc, 100 * test_acc
print("Test Accuracy: {:.4f}".format(test_acc))
print("Val Accuracy: {:.4f}".format(val_acc))
print("Train Accuracy: {:.4f}".format(train_acc))
print("Convergence Time (Epochs): {:.4f}".format(epoch))
print("TOTAL TIME TAKEN: {:.4f}s".format(time.time()-start0))
print("AVG TIME PER EPOCH: {:.4f}s".format(np.mean(per_epoch_time)))
writer.close()
"""
Write the results in out_dir/results folder
"""
with open(write_file_name + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n
FINAL RESULTS\nTEST ACCURACY: {:.4f}\nval ACCURACY: {:.4f}\nTRAIN ACCURACY: {:.4f}\n\n
Convergence Time (Epochs): {:.4f}\nTotal Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\n"""\
.format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],
test_acc, val_acc,train_acc, epoch, (time.time()-start0)/3600, np.mean(per_epoch_time)))
def main():
"""
USER CONTROLS
"""
parser = argparse.ArgumentParser()
parser.add_argument('--config', help="Please give a config.json file with training/model/data/param details")
parser.add_argument('--framework', type=str, default= None, help="Please give a framework to use")
parser.add_argument('--gpu_id', help="Please give a value for gpu id")
parser.add_argument('--model', help="Please give a value for model name")
parser.add_argument('--dataset', help="Please give a value for dataset name")
parser.add_argument('--out_dir', help="Please give a value for out_dir")
parser.add_argument('--seed', help="Please give a value for seed")
parser.add_argument('--epochs', help="Please give a value for epochs")
parser.add_argument('--batch_size', help="Please give a value for batch_size")
parser.add_argument('--init_lr', help="Please give a value for init_lr")
parser.add_argument('--lr_reduce_factor', help="Please give a value for lr_reduce_factor")
parser.add_argument('--lr_schedule_patience', help="Please give a value for lr_schedule_patience")
parser.add_argument('--min_lr', help="Please give a value for min_lr")
parser.add_argument('--weight_decay', help="Please give a value for weight_decay")
parser.add_argument('--print_epoch_interval', help="Please give a value for print_epoch_interval")
parser.add_argument('--L', help="Please give a value for L")
parser.add_argument('--hidden_dim', help="Please give a value for hidden_dim")
parser.add_argument('--out_dim', help="Please give a value for out_dim")
parser.add_argument('--residual', help="Please give a value for residual")
parser.add_argument('--edge_feat', help="Please give a value for edge_feat")
parser.add_argument('--readout', help="Please give a value for readout")
parser.add_argument('--kernel', help="Please give a value for kernel")
parser.add_argument('--n_heads', help="Please give a value for n_heads")
parser.add_argument('--gated', help="Please give a value for gated")
parser.add_argument('--in_feat_dropout', help="Please give a value for in_feat_dropout")
parser.add_argument('--dropout', help="Please give a value for dropout")
parser.add_argument('--layer_norm', help="Please give a value for layer_norm")
parser.add_argument('--batch_norm', help="Please give a value for batch_norm")
parser.add_argument('--sage_aggregator', help="Please give a value for sage_aggregator")
parser.add_argument('--data_mode', help="Please give a value for data_mode")
parser.add_argument('--num_pool', help="Please give a value for num_pool")
parser.add_argument('--gnn_per_block', help="Please give a value for gnn_per_block")
parser.add_argument('--embedding_dim', help="Please give a value for embedding_dim")
parser.add_argument('--pool_ratio', help="Please give a value for pool_ratio")
parser.add_argument('--linkpred', help="Please give a value for linkpred")
parser.add_argument('--cat', help="Please give a value for cat")
parser.add_argument('--self_loop', help="Please give a value for self_loop")
parser.add_argument('--max_time', help="Please give a value for max_time")
parser.add_argument('--pos_enc_dim', help="Please give a value for pos_enc_dim")
parser.add_argument('--pos_enc', action='store_true', default=False, help="Please give a value for pos_enc")
parser.add_argument('--use_node_embedding', action='store_true', default=False)
args = parser.parse_args()
with open(args.config) as f:
config = json.load(f)
# device
if args.gpu_id is not None:
config['gpu']['id'] = int(args.gpu_id)
config['gpu']['use'] = True
device = gpu_setup(config['gpu']['use'], config['gpu']['id'])
# model, dataset, out_dir
if args.model is not None:
MODEL_NAME = args.model
else:
MODEL_NAME = config['model']
if args.dataset is not None:
DATASET_NAME = args.dataset
else:
DATASET_NAME = config['dataset']
# parameters
params = config['params']
params['framework'] = 'pyg' if MODEL_NAME[-3:] == 'pyg' else 'dgl'
if args.framework is not None:
params['framework'] = str(args.framework)
if args.use_node_embedding is not None:
params['use_node_embedding'] = True if args.use_node_embedding == True else False
dataset = LoadData(DATASET_NAME = DATASET_NAME, use_node_embedding = params['use_node_embedding'])
if args.out_dir is not None:
out_dir = args.out_dir
else:
out_dir = config['out_dir']
if args.seed is not None:
params['seed'] = int(args.seed)
if args.epochs is not None:
params['epochs'] = int(args.epochs)
if args.batch_size is not None:
params['batch_size'] = int(args.batch_size)
if args.init_lr is not None:
params['init_lr'] = float(args.init_lr)
if args.lr_reduce_factor is not None:
params['lr_reduce_factor'] = float(args.lr_reduce_factor)
if args.lr_schedule_patience is not None:
params['lr_schedule_patience'] = int(args.lr_schedule_patience)
if args.min_lr is not None:
params['min_lr'] = float(args.min_lr)
if args.weight_decay is not None:
params['weight_decay'] = float(args.weight_decay)
if args.print_epoch_interval is not None:
params['print_epoch_interval'] = int(args.print_epoch_interval)
if args.max_time is not None:
params['max_time'] = float(args.max_time)
# network parameters
net_params = config['net_params']
net_params['device'] = device
net_params['gpu_id'] = config['gpu']['id']
# net_params['batch_size'] = params['batch_size']
if args.L is not None:
net_params['L'] = int(args.L)
if args.hidden_dim is not None:
net_params['hidden_dim'] = int(args.hidden_dim)
if args.out_dim is not None:
net_params['out_dim'] = int(args.out_dim)
if args.residual is not None:
net_params['residual'] = True if args.residual=='True' else False
if args.edge_feat is not None:
net_params['edge_feat'] = True if args.edge_feat=='True' else False
if args.readout is not None:
net_params['readout'] = args.readout
if args.kernel is not None:
net_params['kernel'] = int(args.kernel)
if args.n_heads is not None:
net_params['n_heads'] = int(args.n_heads)
if args.gated is not None:
net_params['gated'] = True if args.gated=='True' else False
if args.in_feat_dropout is not None:
net_params['in_feat_dropout'] = float(args.in_feat_dropout)
if args.dropout is not None:
net_params['dropout'] = float(args.dropout)
if args.layer_norm is not None:
net_params['layer_norm'] = True if args.layer_norm=='True' else False
if args.batch_norm is not None:
net_params['batch_norm'] = True if args.batch_norm=='True' else False
if args.sage_aggregator is not None:
net_params['sage_aggregator'] = args.sage_aggregator
if args.data_mode is not None:
net_params['data_mode'] = args.data_mode
if args.num_pool is not None:
net_params['num_pool'] = int(args.num_pool)
if args.gnn_per_block is not None:
net_params['gnn_per_block'] = int(args.gnn_per_block)
if args.embedding_dim is not None:
net_params['embedding_dim'] = int(args.embedding_dim)
if args.pool_ratio is not None:
net_params['pool_ratio'] = float(args.pool_ratio)
if args.linkpred is not None:
net_params['linkpred'] = True if args.linkpred=='True' else False
if args.cat is not None:
net_params['cat'] = True if args.cat=='True' else False
if args.self_loop is not None:
net_params['self_loop'] = True if args.self_loop=='True' else False
if args.pos_enc is not None:
net_params['pos_enc'] = True if args.pos_enc==True else False
if args.pos_enc_dim is not None:
net_params['pos_enc_dim'] = int(args.pos_enc_dim)
# arxiv 'ogbn-mag'
net_params['in_dim'] = dataset.dataset[0].x.size(1)
# dataset.dataset[0]
# net_params['n_classes'] = torch.unique(dataset.dataset[0].y,dim=0).size(0)
net_params['n_classes'] = dataset.dataset[0].y.size(1) if DATASET_NAME == 'ogbn-proteins' else torch.unique(dataset.dataset[0].y,dim=0).size(0)
root_log_dir = out_dir + 'logs/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
write_file_name = out_dir + 'results/result_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
write_config_file = out_dir + 'configs/config_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
dirs = root_log_dir, root_ckpt_dir, write_file_name, write_config_file
if not os.path.exists(out_dir + 'results'):
os.makedirs(out_dir + 'results')
if not os.path.exists(out_dir + 'configs'):
os.makedirs(out_dir + 'configs')
net_params['total_param'] = view_model_param(MODEL_NAME, net_params)
train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs)
main()
| 20,051 | 41.303797 | 202 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/main_SBMs_node_classification.py |
"""
IMPORTING LIBS
"""
import dgl
import numpy as np
import os
import socket
import time
import random
import glob
import argparse, json
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from torch_geometric.data import DataLoader as DataLoaderpyg
from tensorboardX import SummaryWriter
from tqdm import tqdm
import torch_geometric.transforms as T
class DotDict(dict):
def __init__(self, **kwds):
self.update(kwds)
self.__dict__ = self
"""
IMPORTING CUSTOM MODULES/METHODS
"""
from nets.SBMs_node_classification.load_net import gnn_model # import GNNs
from data.data import LoadData # import dataset
"""
GPU Setup
"""
def gpu_setup(use_gpu, gpu_id):
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
if torch.cuda.is_available() and use_gpu:
print('cuda available with GPU:',torch.cuda.get_device_name(gpu_id))
device = torch.device("cuda:"+ str(gpu_id))
else:
print('cuda not available')
device = torch.device("cpu")
return device
"""
VIEWING MODEL CONFIG AND PARAMS
"""
def view_model_param(MODEL_NAME, net_params):
model = gnn_model(MODEL_NAME, net_params)
total_param = 0
print("MODEL DETAILS:\n")
#print(model)
for param in model.parameters():
# print(param.data.size())
total_param += np.prod(list(param.data.size()))
print('MODEL/Total parameters:', MODEL_NAME, total_param)
return total_param
"""
TRAINING CODE
"""
def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
start0 = time.time()
per_epoch_time = []
DATASET_NAME = dataset.name
if MODEL_NAME in ['GCN', 'GAT']:
if net_params['self_loop']:
print("[!] Adding graph self-loops for GCN/GAT models (central node trick).")
dataset._add_self_loops()
if MODEL_NAME in ['GatedGCN_pyg','ResGatedGCN_pyg']:
if net_params['pos_enc']:
print("[!] Adding graph positional encoding.")
dataset._add_positional_encodings(net_params['pos_enc_dim'])
print('Time PE:',time.time()-start0)
trainset, valset, testset = dataset.train, dataset.val, dataset.test
# transform = T.ToSparseTensor() To do to save memory
# self.train.graph_lists = [positional_encoding(g, pos_enc_dim, framework='pyg') for _, g in enumerate(dataset.train)]
root_log_dir, root_ckpt_dir, write_file_name, write_config_file = dirs
device = net_params['device']
# Write network and optimization hyper-parameters in folder config/
with open(write_config_file + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n\nTotal Parameters: {}\n\n""" .format(DATASET_NAME, MODEL_NAME, params, net_params, net_params['total_param']))
log_dir = os.path.join(root_log_dir, "RUN_" + str(0))
writer = SummaryWriter(log_dir=log_dir)
# setting seeds
random.seed(params['seed'])
np.random.seed(params['seed'])
torch.manual_seed(params['seed'])
if device.type == 'cuda':
torch.cuda.manual_seed(params['seed'])
print("Training Graphs: ", len(trainset))
print("Validation Graphs: ", len(valset))
print("Test Graphs: ", len(testset))
print("Number of Classes: ", net_params['n_classes'])
model = gnn_model(MODEL_NAME, net_params)
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=params['init_lr'], weight_decay=params['weight_decay'])
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',
factor=params['lr_reduce_factor'],
patience=params['lr_schedule_patience'],
verbose=True)
epoch_train_losses, epoch_val_losses = [], []
epoch_train_accs, epoch_val_accs = [], []
if MODEL_NAME in ['RingGNN', '3WLGNN']:
# import train functions specific for WL-GNNs
from train.train_SBMs_node_classification import train_epoch_dense as train_epoch, evaluate_network_dense as evaluate_network
train_loader = DataLoader(trainset, shuffle=True, collate_fn=dataset.collate_dense_gnn)
val_loader = DataLoader(valset, shuffle=False, collate_fn=dataset.collate_dense_gnn)
test_loader = DataLoader(testset, shuffle=False, collate_fn=dataset.collate_dense_gnn)
else:
# import train functions for all other GCNs
from train.train_SBMs_node_classification import train_epoch_sparse as train_epoch, evaluate_network_sparse as evaluate_network
# train_loader = DataLoaderpyg(trainset, batch_size=2, shuffle=False)
train_loader = DataLoaderpyg(trainset, batch_size=params['batch_size'], shuffle=True) if params['framework'] == 'pyg' else DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, collate_fn=dataset.collate)
val_loader = DataLoaderpyg(valset, batch_size=params['batch_size'], shuffle=False) if params['framework'] == 'pyg' else DataLoader(valset, batch_size=params['batch_size'], shuffle=True, collate_fn=dataset.collate)
test_loader = DataLoaderpyg(testset, batch_size=params['batch_size'], shuffle=False) if params['framework'] == 'pyg' else DataLoader(testset, batch_size=params['batch_size'], shuffle=True, collate_fn=dataset.collate)
# At any point you can hit Ctrl + C to break out of training early.
try:
with tqdm(range(params['epochs']),ncols= 0) as t:
for epoch in t:
t.set_description('Epoch %d' % epoch)
start = time.time()
if MODEL_NAME in ['RingGNN', '3WLGNN']: # since different batch training function for dense GNNs
epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch)
else: # for all other models common train function
epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch, params['framework'])
epoch_val_loss, epoch_val_acc = evaluate_network(model, device, val_loader, epoch, params['framework'])
_, epoch_test_acc = evaluate_network(model, device, test_loader, epoch, params['framework'])
epoch_train_losses.append(epoch_train_loss)
epoch_val_losses.append(epoch_val_loss)
epoch_train_accs.append(epoch_train_acc)
epoch_val_accs.append(epoch_val_acc)
writer.add_scalar('train/_loss', epoch_train_loss, epoch)
writer.add_scalar('val/_loss', epoch_val_loss, epoch)
writer.add_scalar('train/_acc', epoch_train_acc, epoch)
writer.add_scalar('val/_acc', epoch_val_acc, epoch)
writer.add_scalar('test/_acc', epoch_test_acc, epoch)
writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)
t.set_postfix(time=time.time()-start, lr=optimizer.param_groups[0]['lr'],
train_loss=epoch_train_loss, val_loss=epoch_val_loss,
train_acc=epoch_train_acc, val_acc=epoch_val_acc,
test_acc=epoch_test_acc)
per_epoch_time.append(time.time()-start)
# Saving checkpoint
ckpt_dir = os.path.join(root_ckpt_dir, "RUN_")
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
# the function to save the checkpoint
# torch.save(model.state_dict(), '{}.pkl'.format(ckpt_dir + "/epoch_" + str(epoch)))
files = glob.glob(ckpt_dir + '/*.pkl')
for file in files:
epoch_nb = file.split('_')[-1]
epoch_nb = int(epoch_nb.split('.')[0])
if epoch_nb < epoch-1:
os.remove(file)
scheduler.step(epoch_val_loss)
# it used to test the scripts
# if epoch == 1:
# break
if optimizer.param_groups[0]['lr'] < params['min_lr']:
print("\n!! LR SMALLER OR EQUAL TO MIN LR THRESHOLD.")
break
# Stop training after params['max_time'] hours
if time.time()-start0 > params['max_time']*3600:
print('-' * 89)
print("Max_time for training elapsed {:.2f} hours, so stopping".format(params['max_time']))
break
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early because of KeyboardInterrupt')
_, test_acc = evaluate_network(model, device, test_loader, epoch, params['framework'])
_, val_acc = evaluate_network(model, device, val_loader, epoch, params['framework'])
_, train_acc = evaluate_network(model, device, train_loader, epoch, params['framework'])
print("Test Accuracy: {:.4f}".format(test_acc))
print("Val Accuracy: {:.4f}".format(val_acc))
print("Train Accuracy: {:.4f}".format(train_acc))
print("Convergence Time (Epochs): {:.4f}".format(epoch))
print("TOTAL TIME TAKEN: {:.4f}s".format(time.time()-start0))
print("AVG TIME PER EPOCH: {:.4f}s".format(np.mean(per_epoch_time)))
writer.close()
"""
Write the results in out_dir/results folder
"""
with open(write_file_name + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n
FINAL RESULTS\nTEST ACCURACY: {:.4f}\nval ACCURACY: {:.4f}\nTRAIN ACCURACY: {:.4f}\n\n
Convergence Time (Epochs): {:.4f}\nTotal Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\n"""\
.format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],
test_acc, val_acc,train_acc, epoch, (time.time()-start0)/3600, np.mean(per_epoch_time)))
def main():
"""
USER CONTROLS
"""
parser = argparse.ArgumentParser()
parser.add_argument('--config', help="Please give a config.json file with training/model/data/param details")
parser.add_argument('--framework', type=str, default= None, help="Please give a framework to use")
parser.add_argument('--gpu_id', help="Please give a value for gpu id")
parser.add_argument('--model', help="Please give a value for model name")
parser.add_argument('--dataset', help="Please give a value for dataset name")
parser.add_argument('--out_dir', help="Please give a value for out_dir")
parser.add_argument('--seed', help="Please give a value for seed")
parser.add_argument('--epochs', help="Please give a value for epochs")
parser.add_argument('--batch_size', help="Please give a value for batch_size")
parser.add_argument('--init_lr', help="Please give a value for init_lr")
parser.add_argument('--lr_reduce_factor', help="Please give a value for lr_reduce_factor")
parser.add_argument('--lr_schedule_patience', help="Please give a value for lr_schedule_patience")
parser.add_argument('--min_lr', help="Please give a value for min_lr")
parser.add_argument('--weight_decay', help="Please give a value for weight_decay")
parser.add_argument('--print_epoch_interval', help="Please give a value for print_epoch_interval")
parser.add_argument('--L', help="Please give a value for L")
parser.add_argument('--hidden_dim', help="Please give a value for hidden_dim")
parser.add_argument('--out_dim', help="Please give a value for out_dim")
parser.add_argument('--residual', help="Please give a value for residual")
parser.add_argument('--edge_feat', help="Please give a value for edge_feat")
parser.add_argument('--readout', help="Please give a value for readout")
parser.add_argument('--kernel', help="Please give a value for kernel")
parser.add_argument('--n_heads', help="Please give a value for n_heads")
parser.add_argument('--gated', help="Please give a value for gated")
parser.add_argument('--in_feat_dropout', help="Please give a value for in_feat_dropout")
parser.add_argument('--dropout', help="Please give a value for dropout")
parser.add_argument('--layer_norm', help="Please give a value for layer_norm")
parser.add_argument('--batch_norm', help="Please give a value for batch_norm")
parser.add_argument('--sage_aggregator', help="Please give a value for sage_aggregator")
parser.add_argument('--data_mode', help="Please give a value for data_mode")
parser.add_argument('--num_pool', help="Please give a value for num_pool")
parser.add_argument('--gnn_per_block', help="Please give a value for gnn_per_block")
parser.add_argument('--embedding_dim', help="Please give a value for embedding_dim")
parser.add_argument('--pool_ratio', help="Please give a value for pool_ratio")
parser.add_argument('--linkpred', help="Please give a value for linkpred")
parser.add_argument('--cat', help="Please give a value for cat")
parser.add_argument('--self_loop', help="Please give a value for self_loop")
parser.add_argument('--max_time', help="Please give a value for max_time")
parser.add_argument('--pos_enc_dim', help="Please give a value for pos_enc_dim")
parser.add_argument('--pos_enc', help="Please give a value for pos_enc")
args = parser.parse_args()
with open(args.config) as f:
config = json.load(f)
# device
if args.gpu_id is not None:
config['gpu']['id'] = int(args.gpu_id)
config['gpu']['use'] = True
device = gpu_setup(config['gpu']['use'], config['gpu']['id'])
# model, dataset, out_dir
if args.model is not None:
MODEL_NAME = args.model
else:
MODEL_NAME = config['model']
if args.dataset is not None:
DATASET_NAME = args.dataset
else:
DATASET_NAME = config['dataset']
# parameters
params = config['params']
params['framework'] = 'pyg' if MODEL_NAME[-3:] == 'pyg' else 'dgl'
if args.framework is not None:
params['framework'] = str(args.framework)
dataset = LoadData(DATASET_NAME, framework = params['framework'])
if args.out_dir is not None:
out_dir = args.out_dir
else:
out_dir = config['out_dir']
if args.seed is not None:
params['seed'] = int(args.seed)
if args.epochs is not None:
params['epochs'] = int(args.epochs)
if args.batch_size is not None:
params['batch_size'] = int(args.batch_size)
if args.init_lr is not None:
params['init_lr'] = float(args.init_lr)
if args.lr_reduce_factor is not None:
params['lr_reduce_factor'] = float(args.lr_reduce_factor)
if args.lr_schedule_patience is not None:
params['lr_schedule_patience'] = int(args.lr_schedule_patience)
if args.min_lr is not None:
params['min_lr'] = float(args.min_lr)
if args.weight_decay is not None:
params['weight_decay'] = float(args.weight_decay)
if args.print_epoch_interval is not None:
params['print_epoch_interval'] = int(args.print_epoch_interval)
if args.max_time is not None:
params['max_time'] = float(args.max_time)
# network parameters
net_params = config['net_params']
net_params['device'] = device
net_params['gpu_id'] = config['gpu']['id']
net_params['batch_size'] = params['batch_size']
if args.L is not None:
net_params['L'] = int(args.L)
if args.hidden_dim is not None:
net_params['hidden_dim'] = int(args.hidden_dim)
if args.out_dim is not None:
net_params['out_dim'] = int(args.out_dim)
if args.residual is not None:
net_params['residual'] = True if args.residual=='True' else False
if args.edge_feat is not None:
net_params['edge_feat'] = True if args.edge_feat=='True' else False
if args.readout is not None:
net_params['readout'] = args.readout
if args.kernel is not None:
net_params['kernel'] = int(args.kernel)
if args.n_heads is not None:
net_params['n_heads'] = int(args.n_heads)
if args.gated is not None:
net_params['gated'] = True if args.gated=='True' else False
if args.in_feat_dropout is not None:
net_params['in_feat_dropout'] = float(args.in_feat_dropout)
if args.dropout is not None:
net_params['dropout'] = float(args.dropout)
if args.layer_norm is not None:
net_params['layer_norm'] = True if args.layer_norm=='True' else False
if args.batch_norm is not None:
net_params['batch_norm'] = True if args.batch_norm=='True' else False
if args.sage_aggregator is not None:
net_params['sage_aggregator'] = args.sage_aggregator
if args.data_mode is not None:
net_params['data_mode'] = args.data_mode
if args.num_pool is not None:
net_params['num_pool'] = int(args.num_pool)
if args.gnn_per_block is not None:
net_params['gnn_per_block'] = int(args.gnn_per_block)
if args.embedding_dim is not None:
net_params['embedding_dim'] = int(args.embedding_dim)
if args.pool_ratio is not None:
net_params['pool_ratio'] = float(args.pool_ratio)
if args.linkpred is not None:
net_params['linkpred'] = True if args.linkpred=='True' else False
if args.cat is not None:
net_params['cat'] = True if args.cat=='True' else False
if args.self_loop is not None:
net_params['self_loop'] = True if args.self_loop=='True' else False
if args.pos_enc is not None:
net_params['pos_enc'] = True if args.pos_enc=='True' else False
if args.pos_enc_dim is not None:
net_params['pos_enc_dim'] = int(args.pos_enc_dim)
# SBM
# net_params['in_dim'] = torch.unique(dataset.train[0][0].ndata['feat'],dim=0).size(0) # node_dim (feat is an integer)
# net_params['n_classes'] = torch.unique(dataset.train[0][1],dim=0).size(0)
net_params['in_dim'] = torch.unique(dataset.train[0].x,dim=0).size(0) if 'pyg' == params['framework'] else torch.unique(dataset.train[0][0].ndata['feat'],dim=0).size(0)
net_params['n_classes'] = torch.unique(dataset.train[0].y,dim=0).size(0) if 'pyg' == params['framework'] else torch.unique(dataset.train[0][1], dim=0).size(0)
if MODEL_NAME == 'RingGNN':
num_nodes = [dataset.train[i][0].number_of_nodes() for i in range(len(dataset.train))]
net_params['avg_node_num'] = int(np.ceil(np.mean(num_nodes)))
root_log_dir = out_dir + 'logs/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
write_file_name = out_dir + 'results/result_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
write_config_file = out_dir + 'configs/config_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
dirs = root_log_dir, root_ckpt_dir, write_file_name, write_config_file
if not os.path.exists(out_dir + 'results'):
os.makedirs(out_dir + 'results')
if not os.path.exists(out_dir + 'configs'):
os.makedirs(out_dir + 'configs')
net_params['total_param'] = view_model_param(MODEL_NAME, net_params)
train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs)
main()
| 19,999 | 41.643923 | 226 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/nets/ogb_node_classification/gat_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
from torch_geometric.typing import OptPairTensor
"""
GAT: Graph Attention Network
Graph Attention Networks (Veličković et al., ICLR 2018)
https://arxiv.org/abs/1710.10903
"""
from layers.gat_layer import GATLayer
from layers.mlp_readout_layer import MLPReadout
from torch_geometric.nn import GATConv
class GATNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
num_heads = net_params['n_heads']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
self.n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.dropout = dropout
self.n_classes = n_classes
self.device = net_params['device']
self.pos_enc = net_params['pos_enc']
if self.pos_enc:
pos_enc_dim = net_params['pos_enc_dim']
self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim * num_heads)
self.embedding_h = nn.Linear(in_dim_node, hidden_dim * num_heads) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([GATConv(hidden_dim * num_heads, hidden_dim, num_heads,
dropout = dropout) for _ in range(self.n_layers - 1)])
self.layers.append(GATConv(hidden_dim * num_heads, out_dim, 1, dropout))
if self.batch_norm:
self.batchnorm_h = nn.ModuleList([nn.BatchNorm1d(hidden_dim * num_heads) for _ in range(self.n_layers - 1)])
self.batchnorm_h.append(nn.BatchNorm1d(out_dim))
# self.layers = nn.ModuleList([GATLayer(hidden_dim * num_heads, hidden_dim, num_heads,
# dropout, self.batch_norm, self.residual) for _ in range(n_layers - 1)])
# self.layers.append(GATLayer(hidden_dim * num_heads, out_dim, 1, dropout, self.batch_norm, self.residual))
# self.MLP_layer = MLPReadout(out_dim, n_classes)
self.MLP_layer = nn.Linear(out_dim, n_classes, bias=True)
def forward(self, h, edge_index, e, h_pos_enc = None):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
if self.pos_enc:
h_pos_enc = self.embedding_pos_enc(h_pos_enc.float())
h = h + h_pos_enc
# GAT
for i in range(self.n_layers):
h_in = h
h: OptPairTensor = (h, h) # make cat the value not simple add it.
h = self.layers[i](h, edge_index)
if self.batch_norm:
h = self.batchnorm_h[i](h)
# if self.activation:
h = F.elu(h)
if self.residual:
h = h_in + h # residual connection
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
def loss_proteins(self, pred, label):
criterion = nn.BCEWithLogitsLoss()
loss = criterion(pred, label.to(torch.float))
return loss
| 3,492 | 38.247191 | 120 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/nets/ogb_node_classification/graphsage_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
"""
GraphSAGE:
William L. Hamilton, Rex Ying, Jure Leskovec, Inductive Representation Learning on Large Graphs (NeurIPS 2017)
https://cs.stanford.edu/people/jure/pubs/graphsage-nips17.pdf
"""
from layers.graphsage_layer import GraphSageLayer
from layers.mlp_readout_layer import MLPReadout
from torch_geometric.nn import SAGEConv
class GraphSageNet(nn.Module):
"""
Grahpsage network with multiple GraphSageLayer layers
"""
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
aggregator_type = net_params['sage_aggregator']
n_layers = net_params['L']
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.readout = net_params['readout']
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([GraphSageLayer(hidden_dim, hidden_dim, F.relu,
dropout, aggregator_type, batch_norm, residual) for _ in range(n_layers-1)])
self.layers.append(GraphSageLayer(hidden_dim, out_dim, F.relu, dropout, aggregator_type, batch_norm, residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
def forward(self, g, h, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# graphsage
for conv in self.layers:
h = conv(g, h)
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
"""
GraphSAGE:
William L. Hamilton, Rex Ying, Jure Leskovec, Inductive Representation Learning on Large Graphs (NeurIPS 2017)
https://cs.stanford.edu/people/jure/pubs/graphsage-nips17.pdf
# the implementation of dgl and pyg are different
"""
class GraphSageNet_pyg(nn.Module):
"""
Grahpsage network with multiple GraphSageLayer layers
"""
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
aggregator_type = net_params['sage_aggregator']
self.n_layers = net_params['L']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.readout = net_params['readout']
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Linear(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.dropout = nn.Dropout(p=dropout)
if self.batch_norm:
self.batchnorm_h = nn.ModuleList([nn.BatchNorm1d(hidden_dim) for _ in range(self.n_layers - 1)])
self.batchnorm_h.append(nn.BatchNorm1d(out_dim))
self.layers = nn.ModuleList([SAGEConv(hidden_dim, hidden_dim) for _ in
range(self.n_layers - 1)])
self.layers.append(
SAGEConv(hidden_dim, out_dim))
# self.layers = nn.ModuleList([GraphSageLayer(hidden_dim, hidden_dim, F.relu,
# dropout, aggregator_type, batch_norm, residual) for _ in
# range(n_layers - 1)])
# self.layers.append(
# GraphSageLayer(hidden_dim, out_dim, F.relu, dropout, aggregator_type, batch_norm, residual))
# self.MLP_layer = MLPReadout(out_dim, n_classes)
self.MLP_layer = nn.Linear(out_dim, n_classes, bias=True)
if aggregator_type == 'maxpool':
self.aggr = 'max'
elif aggregator_type == 'mean':
self.aggr = 'mean'
# to do
def forward(self, h, edge_index, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# graphsage
for i in range(self.n_layers):
h_in = h
h = self.dropout(h)
h = self.layers[i](h, edge_index)
if self.batch_norm:
h = self.batchnorm_h[i](h)
#h = F.relu(h)
if self.residual:
h = h_in + h # residual connection
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
def loss_proteins(self, pred, label):
criterion = nn.BCEWithLogitsLoss()
loss = criterion(pred, label.to(torch.float))
return loss | 5,368 | 35.52381 | 122 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/nets/ogb_node_classification/gin_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
from dgl.nn.pytorch.glob import SumPooling, AvgPooling, MaxPooling
"""
GIN: Graph Isomorphism Networks
HOW POWERFUL ARE GRAPH NEURAL NETWORKS? (Keyulu Xu, Weihua Hu, Jure Leskovec and Stefanie Jegelka, ICLR 2019)
https://arxiv.org/pdf/1810.00826.pdf
"""
from layers.gin_layer import GINLayer, ApplyNodeFunc, MLP
from gcn_lib.sparse import MultiSeq, PlainDynBlock, ResDynBlock, DenseDynBlock, DilatedKnnGraph
from gcn_lib.sparse import MLP as MLPpyg
from gcn_lib.sparse import GraphConv as GraphConvNet
# import torch_geometric as tg
from torch_geometric.nn import GINConv
class GINNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
dropout = net_params['dropout']
self.n_layers = net_params['L']
n_mlp_layers = net_params['n_mlp_GIN'] # GIN
learn_eps = net_params['learn_eps_GIN'] # GIN
neighbor_aggr_type = net_params['neighbor_aggr_GIN'] # GIN
readout = net_params['readout'] # this is graph_pooling_type
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
# List of MLPs
self.ginlayers = torch.nn.ModuleList()
self.embedding_h = nn.Embedding(in_dim, hidden_dim)
for layer in range(self.n_layers):
mlp = MLP(n_mlp_layers, hidden_dim, hidden_dim, hidden_dim)
self.ginlayers.append(GINLayer(ApplyNodeFunc(mlp), neighbor_aggr_type,
dropout, batch_norm, residual, 0, learn_eps))
# Linear function for output of each layer
# which maps the output of different layers into a prediction score
self.linears_prediction = torch.nn.ModuleList()
for layer in range(self.n_layers+1):
self.linears_prediction.append(nn.Linear(hidden_dim, n_classes))
def forward(self, g, h, e):
h = self.embedding_h(h)
# list of hidden representation at each layer (including input)
hidden_rep = [h]
for i in range(self.n_layers):
h = self.ginlayers[i](g, h)
hidden_rep.append(h)
score_over_layer = 0
for i, h in enumerate(hidden_rep):
score_over_layer += self.linears_prediction[i](h)
return score_over_layer
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
class GINNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
dropout = net_params['dropout']
self.n_layers = net_params['L']
n_mlp_layers = net_params['n_mlp_GIN'] # GIN
learn_eps = net_params['learn_eps_GIN'] # GIN
neighbor_aggr_type = net_params['neighbor_aggr_GIN'] # GIN
readout = net_params['readout'] # this is graph_pooling_type
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
# List of MLPs
self.ginlayers = torch.nn.ModuleList()
self.normlayers = torch.nn.ModuleList()
self.embedding_h = nn.Linear(in_dim_node, hidden_dim) # node feat is an integer
self.dropout = dropout
self.batch_norm = batch_norm
self.residual = residual
for layer in range(self.n_layers):
mlp = MLP(n_mlp_layers, hidden_dim, hidden_dim, hidden_dim)
self.ginlayers.append(GINConv(ApplyNodeFunc(mlp), 0, learn_eps))
# note that neighbor_aggr_type can not work because the father
# self.ginlayers.append(GINLayer(ApplyNodeFunc(mlp), neighbor_aggr_type,
# dropout, batch_norm, residual, 0, learn_eps))
if batch_norm:
self.normlayers.append(nn.BatchNorm1d(hidden_dim))
# Linear function for output of each layer
# which maps the output of different layers into a prediction score
self.linears_prediction = torch.nn.ModuleList()
for layer in range(self.n_layers + 1):
self.linears_prediction.append(nn.Linear(hidden_dim, n_classes))
def forward(self, h, edge_index, e):
h = self.embedding_h(h)
# list of hidden representation at each layer (including input)
hidden_rep = [h]
for i in range(self.n_layers):
h_in = h
h = self.ginlayers[i](h, edge_index)
if self.batch_norm:
h = self.normlayers[i](h) # batch normalization
h = F.relu(h) # non-linear activation
if self.residual:
h = h_in + h # residual connection
h = F.dropout(h, self.dropout, training=self.training)
hidden_rep.append(h)
score_over_layer = 0
for i, h in enumerate(hidden_rep):
score_over_layer += self.linears_prediction[i](h)
return score_over_layer
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
def loss_proteins(self, pred, label):
criterion = nn.BCEWithLogitsLoss()
loss = criterion(pred, label.to(torch.float))
return loss | 5,771 | 35.531646 | 113 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/nets/ogb_node_classification/gcn_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GCNConv
import dgl
import numpy as np
"""
GCN: Graph Convolutional Networks
Thomas N. Kipf, Max Welling, Semi-Supervised Classification with Graph Convolutional Networks (ICLR 2017)
http://arxiv.org/abs/1609.02907
"""
class GCNNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
self.n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
self.pos_enc = net_params['pos_enc']
if self.pos_enc:
pos_enc_dim = net_params['pos_enc_dim']
self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
self.embedding_h = nn.Linear(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.dropout = dropout
self.layers = nn.ModuleList([GCNConv(hidden_dim, hidden_dim, improved = False)
for _ in range(self.n_layers)])
if self.batch_norm:
self.normlayers = nn.ModuleList([nn.BatchNorm1d(hidden_dim)
for _ in range(self.n_layers)])
# self.layers = nn.ModuleList([GCNLayer(hidden_dim, hidden_dim, F.relu, dropout,
# self.batch_norm, self.residual) for _ in range(n_layers - 1)])
# self.layers.append(GCNLayer(hidden_dim, out_dim, F.relu, dropout, self.batch_norm, self.residual))
# self.MLP_layer = MLPReadout(out_dim, n_classes)
self.MLP_layer = nn.Linear(hidden_dim, n_classes, bias=True)
def forward(self, h, edge_index, e, h_pos_enc=None):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# GCN
if self.pos_enc:
h_pos_enc = self.embedding_pos_enc(h_pos_enc.float())
h = h + h_pos_enc
for i in range(self.n_layers):
h_in = h
h = self.layers[i](h, edge_index)
if self.batch_norm:
h = self.normlayers[i](h) # batch normalization
h = F.relu(h) # non-linear activation
if self.residual:
h = h_in + h # residual connection
h = F.dropout(h, self.dropout, training=self.training)
# i = 0
# for conv in self.layers:
# h_in = h
# h = conv(h, e)
# if self.batch_norm:
# h = self.normlayers[i](h) # batch normalization
# i += 1
# h = F.relu(h)
# if self.residual:
# h = h_in + h # residual connection
# h = F.dropout(h, self.dropout, training=self.training)
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
def loss_proteins(self, pred, label):
criterion = nn.BCEWithLogitsLoss()
loss = criterion(pred, label.to(torch.float))
return loss
| 3,608 | 33.701923 | 110 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/nets/ogb_node_classification/gated_gcn_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
import numpy as np
"""
ResGatedGCN: Residual Gated Graph ConvNets
An Experimental Study of Neural Networks for Variable Graphs (Xavier Bresson and Thomas Laurent, ICLR 2018)
https://arxiv.org/pdf/1711.07553v2.pdf
"""
from layers.gated_gcn_layer import GatedGCNLayer, ResGatedGCNLayer
from layers.mlp_readout_layer import MLPReadout
from torch_geometric.nn import GatedGraphConv
class GatedGCNNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
in_dim_edge = 1 # edge_dim (feat is a float)
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
self.pos_enc = net_params['pos_enc']
if self.pos_enc:
pos_enc_dim = net_params['pos_enc_dim']
self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.embedding_e = nn.Linear(in_dim_edge, hidden_dim) # edge feat is a float
self.layers = nn.ModuleList([ GatedGCNLayer(hidden_dim, hidden_dim, dropout,
self.batch_norm, self.residual) for _ in range(n_layers) ])
self.MLP_layer = MLPReadout(hidden_dim, n_classes)
def forward(self, g, h, e, h_pos_enc=None):
# input embedding
h = self.embedding_h(h)
if self.pos_enc:
h_pos_enc = self.embedding_pos_enc(h_pos_enc.float())
h = h + h_pos_enc
e = self.embedding_e(e)
# res gated convnets
for conv in self.layers:
h, e = conv(g, h, e)
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
"""
ResGatedGCN: Residual Gated Graph ConvNets
An Experimental Study of Neural Networks for Variable Graphs (Xavier Bresson and Thomas Laurent, ICLR 2018)
https://arxiv.org/pdf/1711.07553v2.pdf
"""
class ResGatedGCNNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
in_dim_edge = 1 # edge_dim (feat is a float)
num_bond_type = 3
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
self.dropout = net_params['dropout']
self.n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.edge_feat = net_params['edge_feat']
self.residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
self.pos_enc = net_params['pos_enc']
if self.pos_enc:
pos_enc_dim = net_params['pos_enc_dim']
self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
self.embedding_h = nn.Linear(in_dim_node, hidden_dim) # node feat is an integer
if self.edge_feat:
self.embedding_e = nn.Embedding(num_bond_type, hidden_dim)
else:
self.embedding_e = nn.Linear(1, hidden_dim)
self.embedding_e = nn.Linear(in_dim_edge, hidden_dim) # edge feat is a float
self.layers = nn.ModuleList([ResGatedGCNLayer(hidden_dim, hidden_dim, self.dropout,
self.batch_norm, self.residual) for _ in range(self.n_layers)])
# self.layers = nn.ModuleList([GatedGCNLayer(hidden_dim, hidden_dim, dropout,
# self.batch_norm, self.residual) for _ in range(n_layers)])
if self.batch_norm:
self.normlayers_h = nn.ModuleList([nn.BatchNorm1d(hidden_dim) for _ in range(self.n_layers)])
self.normlayers_e = nn.ModuleList([nn.BatchNorm1d(hidden_dim) for _ in range(self.n_layers)])
# self.MLP_layer = MLPReadout(hidden_dim, n_classes)
self.MLP_layer = nn.Linear(hidden_dim, n_classes, bias=True)
def forward(self, h, edge_index, e, h_pos_enc=None):
# input embedding
h = self.embedding_h(h)
if self.pos_enc:
h_pos_enc = self.embedding_pos_enc(h_pos_enc.float())
h = h + h_pos_enc
e = self.embedding_e(e)
# res gated convnets
for i in range(self.n_layers):
h_in = h
e_in = e
h, e = self.layers[i](h, edge_index, e)
if self.batch_norm:
h = self.normlayers_h[i](h)
e = self.normlayers_e[i](e) # batch normalization
if self.residual:
h = h_in + h # residual connection
e = e_in + e
h = F.dropout(h, self.dropout, training=self.training)
e = F.dropout(e, self.dropout, training=self.training)
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
def loss_proteins(self, pred, label):
criterion = nn.BCEWithLogitsLoss()
loss = criterion(pred, label.to(torch.float))
return loss
"""
Gated Graph Sequence Neural Networks
An Experimental Study of Neural Networks for Variable Graphs
Li Y, Tarlow D, Brockschmidt M, et al. Gated graph sequence neural networks[J]. arXiv preprint arXiv:1511.05493, 2015.
https://arxiv.org/abs/1511.05493
Note that the pyg and dgl of the gatedGCN are different models.
"""
class GatedGCNNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
in_dim_edge = 1 # edge_dim (feat is a float)
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
self.dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
self.pos_enc = net_params['pos_enc']
if self.pos_enc:
pos_enc_dim = net_params['pos_enc_dim']
self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
self.embedding_h = nn.Linear(in_dim_node, hidden_dim) # node feat is an integer
# self.embedding_e = nn.Linear(in_dim_edge, hidden_dim) # edge feat is a float
self.layers = nn.ModuleList([GatedGraphConv(hidden_dim, n_layers, aggr = 'add')])
# self.layers = nn.ModuleList([GatedGCNLayer(hidden_dim, hidden_dim, dropout,
# self.batch_norm, self.residual) for _ in range(n_layers)])
if self.batch_norm:
self.normlayers = nn.ModuleList([nn.BatchNorm1d(hidden_dim)])
# self.MLP_layer = MLPReadout(hidden_dim, n_classes)
self.MLP_layer = nn.Linear(hidden_dim, n_classes, bias=True)
def forward(self, h, edge_index, e, h_pos_enc=None):
# input embedding
h = self.embedding_h(h)
if self.pos_enc:
h_pos_enc = self.embedding_pos_enc(h_pos_enc.float())
h = h + h_pos_enc
# e = self.embedding_e(e)
# res gated convnets
for conv in self.layers:
h_in = h
h = conv(h, edge_index, e)
if self.batch_norm:
h = self.normlayers[0](h)
if self.residual:
h = h_in + h # residual connection
h = F.dropout(h, self.dropout, training=self.training)
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
# dataset.dataset[0].y.view(-1).size()
return loss
def loss_proteins(self, pred, label):
criterion = nn.BCEWithLogitsLoss()
loss = criterion(pred, label.to(torch.float))
return loss | 8,653 | 37.807175 | 122 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/nets/ogb_node_classification/mlp_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
from layers.mlp_readout_layer import MLPReadout
class MLPNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
n_layers = net_params['L']
self.gated = net_params['gated']
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
feat_mlp_modules = [
nn.Linear(hidden_dim, hidden_dim, bias=True),
nn.ReLU(),
nn.Dropout(dropout),
]
for _ in range(n_layers-1):
feat_mlp_modules.append(nn.Linear(hidden_dim, hidden_dim, bias=True))
feat_mlp_modules.append(nn.ReLU())
feat_mlp_modules.append(nn.Dropout(dropout))
self.feat_mlp = nn.Sequential(*feat_mlp_modules)
if self.gated:
self.gates = nn.Linear(hidden_dim, hidden_dim, bias=True)
self.readout_mlp = MLPReadout(hidden_dim, n_classes)
def forward(self, g, h, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# MLP
h = self.feat_mlp(h)
if self.gated:
h = torch.sigmoid(self.gates(h)) * h
# output
h_out = self.readout_mlp(h)
return h_out
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
class MLPNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
n_layers = net_params['L']
self.gated = net_params['gated']
self.n_classes = n_classes
self.device = net_params['device']
self.pos_enc = net_params['pos_enc']
if self.pos_enc:
pos_enc_dim = net_params['pos_enc_dim']
self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
self.embedding_h = nn.Linear(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
feat_mlp_modules = [
nn.Linear(hidden_dim, hidden_dim, bias=True),
nn.ReLU(),
nn.Dropout(dropout),
]
for _ in range(n_layers - 1):
feat_mlp_modules.append(nn.Linear(hidden_dim, hidden_dim, bias=True))
feat_mlp_modules.append(nn.ReLU())
feat_mlp_modules.append(nn.Dropout(dropout))
self.feat_mlp = nn.Sequential(*feat_mlp_modules)
if self.gated:
self.gates = nn.Linear(hidden_dim, hidden_dim, bias=True)
# self.readout_mlp = MLPReadout(hidden_dim, n_classes)
self.readout_mlp = nn.Linear(hidden_dim, n_classes, bias=True)
def forward(self, h, edge_index, e, h_pos_enc = None):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
if self.pos_enc:
h_pos_enc = self.embedding_pos_enc(h_pos_enc.float())
h = h + h_pos_enc
# MLP
h = self.feat_mlp(h)
if self.gated:
h = torch.sigmoid(self.gates(h)) * h
# output
h_out = self.readout_mlp(h)
return h_out
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
def loss_proteins(self, pred, label):
criterion = nn.BCEWithLogitsLoss()
loss = criterion(pred, label.to(torch.float))
return loss
| 4,176 | 29.268116 | 93 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/nets/ogb_node_classification/mo_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_scatter import scatter_add
import dgl
import numpy as np
"""
GMM: Gaussian Mixture Model Convolution layer
Geometric Deep Learning on Graphs and Manifolds using Mixture Model CNNs (Federico Monti et al., CVPR 2017)
https://arxiv.org/pdf/1611.08402.pdf
"""
from layers.gmm_layer import GMMLayer
from layers.mlp_readout_layer import MLPReadout
from torch_geometric.nn import GMMConv
class MoNet(nn.Module):
def __init__(self, net_params):
super().__init__()
self.name = 'MoNet'
in_dim = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
kernel = net_params['kernel'] # for MoNet
dim = net_params['pseudo_dim_MoNet'] # for MoNet
n_classes = net_params['n_classes']
dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.device = net_params['device']
self.n_classes = n_classes
aggr_type = "sum" # default for MoNet
self.embedding_h = nn.Embedding(in_dim, hidden_dim)
self.layers = nn.ModuleList()
self.pseudo_proj = nn.ModuleList()
# Hidden layer
for _ in range(n_layers-1):
self.layers.append(GMMLayer(hidden_dim, hidden_dim, dim, kernel, aggr_type,
dropout, batch_norm, residual))
self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
# Output layer
self.layers.append(GMMLayer(hidden_dim, out_dim, dim, kernel, aggr_type,
dropout, batch_norm, residual))
self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
self.MLP_layer = MLPReadout(out_dim, n_classes)
def forward(self, g, h, e):
h = self.embedding_h(h)
# computing the 'pseudo' named tensor which depends on node degrees
g.ndata['deg'] = g.in_degrees()
g.apply_edges(self.compute_pseudo)
pseudo = g.edata['pseudo'].to(self.device).float()
for i in range(len(self.layers)):
h = self.layers[i](g, h, self.pseudo_proj[i](pseudo))
return self.MLP_layer(h)
def compute_pseudo(self, edges):
# compute pseudo edge features for MoNet
# to avoid zero division in case in_degree is 0, we add constant '1' in all node degrees denoting self-loop
srcs = 1/np.sqrt(edges.src['deg']+1)
dsts = 1/np.sqrt(edges.dst['deg']+1)
pseudo = torch.cat((srcs.unsqueeze(-1), dsts.unsqueeze(-1)), dim=1)
return {'pseudo': pseudo}
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
"""
GMM: Gaussian Mixture Model Convolution layer
Geometric Deep Learning on Graphs and Manifolds using Mixture Model CNNs (Federico Monti et al., CVPR 2017)
https://arxiv.org/pdf/1611.08402.pdf
"""
class MoNetNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
self.name = 'MoNet'
in_dim_node = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
kernel = net_params['kernel'] # for MoNet
dim = net_params['pseudo_dim_MoNet'] # for MoNet
n_classes = net_params['n_classes']
self.dropout = net_params['dropout']
self.n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.device = net_params['device']
self.n_classes = n_classes
self.dim = dim
# aggr_type = "sum" # default for MoNet
aggr_type = "mean"
self.embedding_h = nn.Linear(in_dim_node, hidden_dim) # node feat is an integer
# self.embedding_e = nn.Linear(1, dim) # edge feat is a float
self.layers = nn.ModuleList()
self.pseudo_proj = nn.ModuleList()
self.batchnorm_h = nn.ModuleList()
# Hidden layer
for _ in range(self.n_layers - 1):
self.layers.append(GMMConv(hidden_dim, hidden_dim, dim, kernel, separate_gaussians = False ,aggr = aggr_type,
root_weight = True, bias = True))
if self.batch_norm:
self.batchnorm_h.append(nn.BatchNorm1d(hidden_dim))
self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
# Output layer
self.layers.append(GMMConv(hidden_dim, out_dim, dim, kernel, separate_gaussians = False ,aggr = aggr_type,
root_weight = True, bias = True))
if self.batch_norm:
self.batchnorm_h.append(nn.BatchNorm1d(out_dim))
self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
# self.MLP_layer = MLPReadout(out_dim, n_classes)
self.MLP_layer = nn.Linear(out_dim, n_classes, bias=True)
# to do
def forward(self, h, edge_index, e):
h = self.embedding_h(h)
edge_weight = torch.ones((edge_index.size(1),),
device = edge_index.device)
row, col = edge_index[0], edge_index[1]
deg = scatter_add(edge_weight, row, dim=0, dim_size=h.size(0))
deg_inv_sqrt = deg.pow_(-0.5)
deg_inv_sqrt.masked_fill_(deg_inv_sqrt == float('inf'), 0)
pseudo = torch.cat((deg_inv_sqrt[row].unsqueeze(-1), deg_inv_sqrt[col].unsqueeze(-1)), dim=1)
for i in range(self.n_layers):
h_in = h
h = self.layers[i](h, edge_index, self.pseudo_proj[i](pseudo))
if self.batch_norm:
h = self.batchnorm_h[i](h) # batch normalization
h = F.relu(h) # non-linear activation
if self.residual:
h = h_in + h # residual connection
h = F.dropout(h, self.dropout, training=self.training)
return self.MLP_layer(h)
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
def loss_proteins(self, pred, label):
criterion = nn.BCEWithLogitsLoss()
loss = criterion(pred, label.to(torch.float))
return loss | 6,647 | 39.785276 | 121 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/nets/SBMs_node_classification/gat_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
from torch_geometric.typing import OptPairTensor
"""
GAT: Graph Attention Network
Graph Attention Networks (Veličković et al., ICLR 2018)
https://arxiv.org/abs/1710.10903
"""
from layers.gat_layer import GATLayer
from layers.mlp_readout_layer import MLPReadout
from torch_geometric.nn import GATConv
class GATNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
num_heads = net_params['n_heads']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.dropout = dropout
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim * num_heads) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([GATLayer(hidden_dim * num_heads, hidden_dim, num_heads,
dropout, self.batch_norm, self.residual) for _ in range(n_layers-1)])
self.layers.append(GATLayer(hidden_dim * num_heads, out_dim, 1, dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
def forward(self, g, h, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# GAT
for conv in self.layers:
h = conv(g, h)
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes>0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
class GATNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
num_heads = net_params['n_heads']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
self.n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.dropout = dropout
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim * num_heads) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([GATConv(hidden_dim * num_heads, hidden_dim, num_heads,
dropout = dropout) for _ in range(self.n_layers - 1)])
self.layers.append(GATConv(hidden_dim * num_heads, out_dim, 1, dropout))
if self.batch_norm:
self.batchnorm_h = nn.ModuleList([nn.BatchNorm1d(hidden_dim * num_heads) for _ in range(self.n_layers - 1)])
self.batchnorm_h.append(nn.BatchNorm1d(out_dim))
# self.layers = nn.ModuleList([GATLayer(hidden_dim * num_heads, hidden_dim, num_heads,
# dropout, self.batch_norm, self.residual) for _ in range(n_layers - 1)])
# self.layers.append(GATLayer(hidden_dim * num_heads, out_dim, 1, dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
def forward(self, h, edge_index, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# GAT
for i in range(self.n_layers):
h_in = h
h: OptPairTensor = (h, h) # make cat the value not simple add it.
h = self.layers[i](h, edge_index)
if self.batch_norm:
h = self.batchnorm_h[i](h)
# if self.activation:
h = F.elu(h)
if self.residual:
h = h_in + h # residual connection
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes > 0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
| 5,639 | 35.862745 | 120 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/nets/SBMs_node_classification/ring_gnn_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
import time
"""
Ring-GNN
On the equivalence between graph isomorphism testing and function approximation with GNNs (Chen et al, 2019)
https://arxiv.org/pdf/1905.12560v1.pdf
"""
from layers.ring_gnn_equiv_layer import RingGNNEquivLayer
from layers.mlp_readout_layer import MLPReadout
class RingGNNNet(nn.Module):
def __init__(self, net_params):
super().__init__()
self.num_node_type = net_params['in_dim'] # 'num_node_type' is 'nodeclasses' as in RingGNN original repo
# node_classes = net_params['node_classes']
avg_node_num = net_params['avg_node_num']
radius = net_params['radius']
hidden_dim = net_params['hidden_dim']
dropout = net_params['dropout']
n_layers = net_params['L']
self.n_classes = net_params['n_classes']
self.layer_norm = net_params['layer_norm']
self.residual = net_params['residual']
self.device = net_params['device']
self.depth = [torch.LongTensor([1+self.num_node_type])] + [torch.LongTensor([hidden_dim])] * n_layers
self.equi_modulelist = nn.ModuleList([RingGNNEquivLayer(self.device, m, n,
layer_norm=self.layer_norm,
residual=self.residual,
dropout=dropout,
radius=radius,
k2_init=0.5/avg_node_num) for m, n in zip(self.depth[:-1], self.depth[1:])])
self.prediction = MLPReadout(torch.sum(torch.stack(self.depth)).item(), self.n_classes)
def forward(self, x_with_node_feat):
"""
CODE ADPATED FROM https://github.com/leichen2018/Ring-GNN/
: preparing input to the model in form new_adj
"""
x = x_with_node_feat
# this x is the tensor with all info available => adj, node feat
x_list = [x]
for layer in self.equi_modulelist:
x = layer(x)
x_list.append(x)
# readout
x_list = [torch.sum(x, dim=2) for x in x_list]
x_list = torch.cat(x_list, dim=1)
# reshaping in form of [n x d_out]
x_out = x_list.squeeze().permute(1,0)
x_out = self.prediction(x_out)
return x_out
def loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes>0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
| 3,202 | 38.060976 | 141 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/nets/SBMs_node_classification/three_wl_gnn_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
import time
"""
3WLGNN / ThreeWLGNN
Provably Powerful Graph Networks (Maron et al., 2019)
https://papers.nips.cc/paper/8488-provably-powerful-graph-networks.pdf
CODE adapted from https://github.com/hadarser/ProvablyPowerfulGraphNetworks_torch/
"""
from layers.three_wl_gnn_layers import RegularBlock, MlpBlock, SkipConnection, FullyConnected, diag_offdiag_maxpool
from layers.mlp_readout_layer import MLPReadout
class ThreeWLGNNNet(nn.Module):
def __init__(self, net_params):
super().__init__()
self.num_node_type = net_params['in_dim']
depth_of_mlp = net_params['depth_of_mlp']
hidden_dim = net_params['hidden_dim']
dropout = net_params['dropout']
n_layers = net_params['L']
self.n_classes = net_params['n_classes']
self.layer_norm = net_params['layer_norm']
self.residual = net_params['residual']
self.device = net_params['device']
self.gin_like_readout = True # if True, uses GIN like readout, but without diag poool, since node task
block_features = [hidden_dim] * n_layers # L here is the block number
original_features_num = self.num_node_type + 1 # Number of features of the input
# sequential mlp blocks
last_layer_features = original_features_num
self.reg_blocks = nn.ModuleList()
for layer, next_layer_features in enumerate(block_features):
mlp_block = RegularBlock(depth_of_mlp, last_layer_features, next_layer_features, self.residual)
self.reg_blocks.append(mlp_block)
last_layer_features = next_layer_features
if self.gin_like_readout:
self.fc_layers = nn.ModuleList()
for output_features in block_features:
# each block's output will be pooled (thus have 2*output_features), and pass through a fully connected
fc = FullyConnected(output_features, self.n_classes, activation_fn=None)
self.fc_layers.append(fc)
else:
self.mlp_prediction = MLPReadout(sum(block_features)+original_features_num, self.n_classes)
def forward(self, x_with_node_feat):
x = x_with_node_feat
# this x is the tensor with all info available => adj, node feat
if self.gin_like_readout:
scores = torch.tensor(0, device=self.device, dtype=x.dtype)
else:
x_list = [x]
for i, block in enumerate(self.reg_blocks):
x = block(x)
if self.gin_like_readout:
x_out = torch.sum(x, dim=2) # from [1 x d_out x n x n] to [1 x d_out x n]
x_out = x_out.squeeze().permute(1,0) # reshaping in form of [n x d_out]
scores = self.fc_layers[i](x_out) + scores
else:
x_list.append(x)
if self.gin_like_readout:
return scores
else:
# readout
x_list = [torch.sum(x, dim=2) for x in x_list]
x_list = torch.cat(x_list, dim=1)
# reshaping in form of [n x d_out]
x_out = x_list.squeeze().permute(1,0)
x_out = self.mlp_prediction(x_out)
return x_out
def loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes>0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
| 4,050 | 36.859813 | 118 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/nets/SBMs_node_classification/graphsage_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
"""
GraphSAGE:
William L. Hamilton, Rex Ying, Jure Leskovec, Inductive Representation Learning on Large Graphs (NeurIPS 2017)
https://cs.stanford.edu/people/jure/pubs/graphsage-nips17.pdf
"""
from layers.graphsage_layer import GraphSageLayer
from layers.mlp_readout_layer import MLPReadout
from torch_geometric.nn import SAGEConv
class GraphSageNet(nn.Module):
"""
Grahpsage network with multiple GraphSageLayer layers
"""
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
aggregator_type = net_params['sage_aggregator']
n_layers = net_params['L']
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.readout = net_params['readout']
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([GraphSageLayer(hidden_dim, hidden_dim, F.relu,
dropout, aggregator_type, batch_norm, residual) for _ in range(n_layers-1)])
self.layers.append(GraphSageLayer(hidden_dim, out_dim, F.relu, dropout, aggregator_type, batch_norm, residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
def forward(self, g, h, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# graphsage
for conv in self.layers:
h = conv(g, h)
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes>0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
"""
GraphSAGE:
William L. Hamilton, Rex Ying, Jure Leskovec, Inductive Representation Learning on Large Graphs (NeurIPS 2017)
https://cs.stanford.edu/people/jure/pubs/graphsage-nips17.pdf
# the implementation of dgl and pyg are different
"""
class GraphSageNet_pyg(nn.Module):
"""
Grahpsage network with multiple GraphSageLayer layers
"""
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
aggregator_type = net_params['sage_aggregator']
self.n_layers = net_params['L']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.readout = net_params['readout']
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.dropout = nn.Dropout(p=dropout)
if self.batch_norm:
self.batchnorm_h = nn.ModuleList([nn.BatchNorm1d(hidden_dim) for _ in range(self.n_layers - 1)])
self.batchnorm_h.append(nn.BatchNorm1d(out_dim))
self.layers = nn.ModuleList([SAGEConv(hidden_dim, hidden_dim) for _ in
range(self.n_layers - 1)])
self.layers.append(
SAGEConv(hidden_dim, out_dim))
# self.layers = nn.ModuleList([GraphSageLayer(hidden_dim, hidden_dim, F.relu,
# dropout, aggregator_type, batch_norm, residual) for _ in
# range(n_layers - 1)])
# self.layers.append(
# GraphSageLayer(hidden_dim, out_dim, F.relu, dropout, aggregator_type, batch_norm, residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
if aggregator_type == 'maxpool':
self.aggr = 'max'
elif aggregator_type == 'mean':
self.aggr = 'mean'
# to do
def forward(self, h, edge_index, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# graphsage
for i in range(self.n_layers):
h_in = h
h = self.dropout(h)
h = self.layers[i](h, edge_index)
if self.batch_norm:
h = self.batchnorm_h[i](h)
#h = F.relu(h)
if self.residual:
h = h_in + h # residual connection
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes > 0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
| 6,149 | 36.048193 | 122 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/nets/SBMs_node_classification/gin_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
from dgl.nn.pytorch.glob import SumPooling, AvgPooling, MaxPooling
"""
GIN: Graph Isomorphism Networks
HOW POWERFUL ARE GRAPH NEURAL NETWORKS? (Keyulu Xu, Weihua Hu, Jure Leskovec and Stefanie Jegelka, ICLR 2019)
https://arxiv.org/pdf/1810.00826.pdf
"""
from layers.gin_layer import GINLayer, ApplyNodeFunc, MLP
from gcn_lib.sparse import MultiSeq, PlainDynBlock, ResDynBlock, DenseDynBlock, DilatedKnnGraph
from gcn_lib.sparse import MLP as MLPpyg
from gcn_lib.sparse import GraphConv as GraphConvNet
# import torch_geometric as tg
from torch_geometric.nn import GINConv
class GINNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
dropout = net_params['dropout']
self.n_layers = net_params['L']
n_mlp_layers = net_params['n_mlp_GIN'] # GIN
learn_eps = net_params['learn_eps_GIN'] # GIN
neighbor_aggr_type = net_params['neighbor_aggr_GIN'] # GIN
readout = net_params['readout'] # this is graph_pooling_type
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
# List of MLPs
self.ginlayers = torch.nn.ModuleList()
self.embedding_h = nn.Embedding(in_dim, hidden_dim)
for layer in range(self.n_layers):
mlp = MLP(n_mlp_layers, hidden_dim, hidden_dim, hidden_dim)
self.ginlayers.append(GINLayer(ApplyNodeFunc(mlp), neighbor_aggr_type,
dropout, batch_norm, residual, 0, learn_eps))
# Linear function for output of each layer
# which maps the output of different layers into a prediction score
self.linears_prediction = torch.nn.ModuleList()
for layer in range(self.n_layers+1):
self.linears_prediction.append(nn.Linear(hidden_dim, n_classes))
def forward(self, g, h, e):
h = self.embedding_h(h)
# list of hidden representation at each layer (including input)
hidden_rep = [h]
for i in range(self.n_layers):
h = self.ginlayers[i](g, h)
hidden_rep.append(h)
score_over_layer = 0
for i, h in enumerate(hidden_rep):
score_over_layer += self.linears_prediction[i](h)
return score_over_layer
def loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes>0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
class GINNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
dropout = net_params['dropout']
self.n_layers = net_params['L']
n_mlp_layers = net_params['n_mlp_GIN'] # GIN
learn_eps = net_params['learn_eps_GIN'] # GIN
neighbor_aggr_type = net_params['neighbor_aggr_GIN'] # GIN
readout = net_params['readout'] # this is graph_pooling_type
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
# List of MLPs
self.ginlayers = torch.nn.ModuleList()
self.normlayers = torch.nn.ModuleList()
self.embedding_h = nn.Embedding(in_dim, hidden_dim)
self.dropout = dropout
self.batch_norm = batch_norm
self.residual = residual
for layer in range(self.n_layers):
mlp = MLP(n_mlp_layers, hidden_dim, hidden_dim, hidden_dim)
self.ginlayers.append(GINConv(ApplyNodeFunc(mlp), 0, learn_eps))
# note that neighbor_aggr_type can not work because the father
# self.ginlayers.append(GINLayer(ApplyNodeFunc(mlp), neighbor_aggr_type,
# dropout, batch_norm, residual, 0, learn_eps))
if batch_norm:
self.normlayers.append(nn.BatchNorm1d(hidden_dim))
# Linear function for output of each layer
# which maps the output of different layers into a prediction score
self.linears_prediction = torch.nn.ModuleList()
for layer in range(self.n_layers + 1):
self.linears_prediction.append(nn.Linear(hidden_dim, n_classes))
def forward(self, h, edge_index, e):
h = self.embedding_h(h)
# list of hidden representation at each layer (including input)
hidden_rep = [h]
for i in range(self.n_layers):
h_in = h
h = self.ginlayers[i](h, edge_index)
if self.batch_norm:
h = self.normlayers[i](h) # batch normalization
h = F.relu(h) # non-linear activation
if self.residual:
h = h_in + h # residual connection
h = F.dropout(h, self.dropout, training=self.training)
hidden_rep.append(h)
score_over_layer = 0
for i, h in enumerate(hidden_rep):
score_over_layer += self.linears_prediction[i](h)
return score_over_layer
def loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes > 0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss | 6,582 | 36.19209 | 113 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/nets/SBMs_node_classification/gcn_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GCNConv
import dgl
import numpy as np
"""
GCN: Graph Convolutional Networks
Thomas N. Kipf, Max Welling, Semi-Supervised Classification with Graph Convolutional Networks (ICLR 2017)
http://arxiv.org/abs/1609.02907
"""
from layers.gcn_layer import GCNLayer
from layers.mlp_readout_layer import MLPReadout
class GCNNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
# note that the GCNLayer is a little different from the builtin function,
# it averaging the received message by reduce, not c_{ij} the papers apply
self.layers = nn.ModuleList([GCNLayer(hidden_dim, hidden_dim, F.relu, dropout,
self.batch_norm, self.residual) for _ in range(n_layers-1)])
self.layers.append(GCNLayer(hidden_dim, out_dim, F.relu, dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
def forward(self, g, h, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# GCN
for conv in self.layers:
h = conv(g, h)
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes>0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
class GCNNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
self.n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.dropout = dropout
self.layers = nn.ModuleList([GCNConv(hidden_dim, hidden_dim, improved = False)
for _ in range(self.n_layers)])
if self.batch_norm:
self.normlayers = nn.ModuleList([nn.BatchNorm1d(hidden_dim)
for _ in range(self.n_layers)])
# self.layers = nn.ModuleList([GCNLayer(hidden_dim, hidden_dim, F.relu, dropout,
# self.batch_norm, self.residual) for _ in range(n_layers - 1)])
# self.layers.append(GCNLayer(hidden_dim, out_dim, F.relu, dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
def forward(self, h, edge_index, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# GCN
for i in range(self.n_layers):
h_in = h
h = self.layers[i](h, edge_index)
if self.batch_norm:
h = self.normlayers[i](h) # batch normalization
h = F.relu(h) # non-linear activation
if self.residual:
h = h_in + h # residual connection
h = F.dropout(h, self.dropout, training=self.training)
# i = 0
# for conv in self.layers:
# h_in = h
# h = conv(h, e)
# if self.batch_norm:
# h = self.normlayers[i](h) # batch normalization
# i += 1
# h = F.relu(h)
# if self.residual:
# h = h_in + h # residual connection
# h = F.dropout(h, self.dropout, training=self.training)
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes > 0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
| 5,901 | 34.769697 | 110 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/nets/SBMs_node_classification/gated_gcn_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
import numpy as np
"""
ResGatedGCN: Residual Gated Graph ConvNets
An Experimental Study of Neural Networks for Variable Graphs (Xavier Bresson and Thomas Laurent, ICLR 2018)
https://arxiv.org/pdf/1711.07553v2.pdf
"""
from layers.gated_gcn_layer import GatedGCNLayer, ResGatedGCNLayer
from layers.mlp_readout_layer import MLPReadout
from torch_geometric.nn import GatedGraphConv
class GatedGCNNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
in_dim_edge = 1 # edge_dim (feat is a float)
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
self.pos_enc = net_params['pos_enc']
if self.pos_enc:
pos_enc_dim = net_params['pos_enc_dim']
self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.embedding_e = nn.Linear(in_dim_edge, hidden_dim) # edge feat is a float
self.layers = nn.ModuleList([ GatedGCNLayer(hidden_dim, hidden_dim, dropout,
self.batch_norm, self.residual) for _ in range(n_layers) ])
self.MLP_layer = MLPReadout(hidden_dim, n_classes)
def forward(self, g, h, e, h_pos_enc=None):
# input embedding
h = self.embedding_h(h)
if self.pos_enc:
h_pos_enc = self.embedding_pos_enc(h_pos_enc.float())
h = h + h_pos_enc
e = self.embedding_e(e)
# res gated convnets
for conv in self.layers:
h, e = conv(g, h, e)
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes>0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
class ResGatedGCNNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
in_dim_edge = 1 # edge_dim (feat is a float)
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
self.dropout = net_params['dropout']
self.n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
self.pos_enc = net_params['pos_enc']
if self.pos_enc:
pos_enc_dim = net_params['pos_enc_dim']
self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.embedding_e = nn.Linear(in_dim_edge, hidden_dim) # edge feat is a float
self.layers = nn.ModuleList([ResGatedGCNLayer(hidden_dim, hidden_dim, self.dropout,
self.batch_norm, self.residual) for _ in range(self.n_layers)])
# self.layers = nn.ModuleList([GatedGCNLayer(hidden_dim, hidden_dim, dropout,
# self.batch_norm, self.residual) for _ in range(n_layers)])
if self.batch_norm:
self.normlayers_h = nn.ModuleList([nn.BatchNorm1d(hidden_dim) for _ in range(self.n_layers)])
self.normlayers_e = nn.ModuleList([nn.BatchNorm1d(hidden_dim) for _ in range(self.n_layers)])
self.MLP_layer = MLPReadout(hidden_dim, n_classes)
def forward(self, h, edge_index, e, h_pos_enc=None):
# input embedding
h = self.embedding_h(h)
if self.pos_enc:
h_pos_enc = self.embedding_pos_enc(h_pos_enc.float())
h = h + h_pos_enc
e = self.embedding_e(e)
# res gated convnets
for i in range(self.n_layers):
h_in = h
e_in = e
h, e = self.layers[i](h, edge_index, e)
if self.batch_norm:
h = self.normlayers_h[i](h)
e = self.normlayers_e[i](e) # batch normalization
if self.residual:
h = h_in + h # residual connection
e = e_in + e
h = F.dropout(h, self.dropout, training=self.training)
e = F.dropout(e, self.dropout, training=self.training)
# output
h_out = self.MLP_layer(h)
return h_out
# if self.batch_norm:
# h = self.bn_node_h(h) # batch normalization
# e = self.bn_node_e(e) # batch normalization
#
# h = F.relu(h) # non-linear activation
# e = F.relu(e) # non-linear activation
#
# if self.residual:
# h = h_in + h # residual connection
# e = e_in + e # residual connection
#
# h = F.dropout(h, self.dropout, training=self.training)
# e = F.dropout(e, self.dropout, training=self.training)
def loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes > 0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
"""
Gated Graph Sequence Neural Networks
An Experimental Study of Neural Networks for Variable Graphs
Li Y, Tarlow D, Brockschmidt M, et al. Gated graph sequence neural networks[J]. arXiv preprint arXiv:1511.05493, 2015.
https://arxiv.org/abs/1511.05493
Note that the pyg and dgl of the gatedGCN are different models.
"""
class GatedGCNNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
in_dim_edge = 1 # edge_dim (feat is a float)
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
self.dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
self.pos_enc = net_params['pos_enc']
if self.pos_enc:
pos_enc_dim = net_params['pos_enc_dim']
self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
# self.embedding_e = nn.Linear(in_dim_edge, hidden_dim) # edge feat is a float
self.layers = nn.ModuleList([GatedGraphConv(hidden_dim, n_layers, aggr = 'add')])
# self.layers = nn.ModuleList([GatedGCNLayer(hidden_dim, hidden_dim, dropout,
# self.batch_norm, self.residual) for _ in range(n_layers)])
if self.batch_norm:
self.normlayers = nn.ModuleList([nn.BatchNorm1d(hidden_dim)])
self.MLP_layer = MLPReadout(hidden_dim, n_classes)
def forward(self, h, edge_index, e, h_pos_enc=None):
# input embedding
h = self.embedding_h(h)
if self.pos_enc:
h_pos_enc = self.embedding_pos_enc(h_pos_enc.float())
h = h + h_pos_enc
# e = self.embedding_e(e)
# res gated convnets
for conv in self.layers:
h_in = h
h = conv(h, edge_index, e)
if self.batch_norm:
h = self.normlayers[0](h)
if self.residual:
h = h_in + h # residual connection
h = F.dropout(h, self.dropout, training=self.training)
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes > 0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss | 9,626 | 37.818548 | 122 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/nets/SBMs_node_classification/mlp_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
from layers.mlp_readout_layer import MLPReadout
class MLPNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
n_layers = net_params['L']
self.gated = net_params['gated']
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
feat_mlp_modules = [
nn.Linear(hidden_dim, hidden_dim, bias=True),
nn.ReLU(),
nn.Dropout(dropout),
]
for _ in range(n_layers-1):
feat_mlp_modules.append(nn.Linear(hidden_dim, hidden_dim, bias=True))
feat_mlp_modules.append(nn.ReLU())
feat_mlp_modules.append(nn.Dropout(dropout))
self.feat_mlp = nn.Sequential(*feat_mlp_modules)
if self.gated:
self.gates = nn.Linear(hidden_dim, hidden_dim, bias=True)
self.readout_mlp = MLPReadout(hidden_dim, n_classes)
def forward(self, g, h, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# MLP
h = self.feat_mlp(h)
if self.gated:
h = torch.sigmoid(self.gates(h)) * h
# output
h_out = self.readout_mlp(h)
return h_out
def loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes>0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
class MLPNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
n_layers = net_params['L']
self.gated = net_params['gated']
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
feat_mlp_modules = [
nn.Linear(hidden_dim, hidden_dim, bias=True),
nn.ReLU(),
nn.Dropout(dropout),
]
for _ in range(n_layers - 1):
feat_mlp_modules.append(nn.Linear(hidden_dim, hidden_dim, bias=True))
feat_mlp_modules.append(nn.ReLU())
feat_mlp_modules.append(nn.Dropout(dropout))
self.feat_mlp = nn.Sequential(*feat_mlp_modules)
if self.gated:
self.gates = nn.Linear(hidden_dim, hidden_dim, bias=True)
self.readout_mlp = MLPReadout(hidden_dim, n_classes)
def forward(self, h, edge_index, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# MLP
h = self.feat_mlp(h)
if self.gated:
h = torch.sigmoid(self.gates(h)) * h
# output
h_out = self.readout_mlp(h)
return h_out
def loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes > 0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
| 4,619 | 29.8 | 91 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/nets/SBMs_node_classification/mo_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
# from torch_scatter import scatter_add
# from num_nodes import maybe_num_nodes
import dgl
from torch_geometric.nn.conv import MessagePassing
import numpy as np
import torch.nn as nn
from torch import Tensor
# from torch_geometric.utils import degree
from torch_scatter import scatter_add
"""
GMM: Gaussian Mixture Model Convolution layer
Geometric Deep Learning on Graphs and Manifolds using Mixture Model CNNs (Federico Monti et al., CVPR 2017)
https://arxiv.org/pdf/1611.08402.pdf
"""
from layers.gmm_layer import GMMLayer
from layers.mlp_readout_layer import MLPReadout
from torch_geometric.nn import GMMConv
class MoNet(nn.Module):
def __init__(self, net_params):
super().__init__()
self.name = 'MoNet'
in_dim = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
kernel = net_params['kernel'] # for MoNet
dim = net_params['pseudo_dim_MoNet'] # for MoNet
n_classes = net_params['n_classes']
dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.device = net_params['device']
self.n_classes = n_classes
aggr_type = "sum" # default for MoNet
self.embedding_h = nn.Embedding(in_dim, hidden_dim)
self.layers = nn.ModuleList()
self.pseudo_proj = nn.ModuleList()
# Hidden layer
for _ in range(n_layers-1):
self.layers.append(GMMLayer(hidden_dim, hidden_dim, dim, kernel, aggr_type,
dropout, batch_norm, residual))
self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
# Output layer
self.layers.append(GMMLayer(hidden_dim, out_dim, dim, kernel, aggr_type,
dropout, batch_norm, residual))
self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
self.MLP_layer = MLPReadout(out_dim, n_classes)
def forward(self, g, h, e):
h = self.embedding_h(h)
# computing the 'pseudo' named tensor which depends on node degrees
g.ndata['deg'] = g.in_degrees()
g.apply_edges(self.compute_pseudo)
pseudo = g.edata['pseudo'].to(self.device).float()
for i in range(len(self.layers)):
h = self.layers[i](g, h, self.pseudo_proj[i](pseudo))
return self.MLP_layer(h)
def compute_pseudo(self, edges):
# compute pseudo edge features for MoNet
# to avoid zero division in case in_degree is 0, we add constant '1' in all node degrees denoting self-loop
# srcs = 1/np.sqrt((edges.src['deg']+1).cpu())
# dsts = 1/np.sqrt((edges.src['deg']+1).cpu())
srcs = 1 / (edges.src['deg'] + 1).float().sqrt()
dsts = 1 / (edges.src['deg'] + 1).float().sqrt()
pseudo = torch.cat((srcs.unsqueeze(-1), dsts.unsqueeze(-1)), dim=1)
return {'pseudo': pseudo}
def loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes>0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
"""
GMM: Gaussian Mixture Model Convolution layer
Geometric Deep Learning on Graphs and Manifolds using Mixture Model CNNs (Federico Monti et al., CVPR 2017)
https://arxiv.org/pdf/1611.08402.pdf
"""
class MoNetNet_pyg(MessagePassing):
def __init__(self, net_params):
super().__init__()
self.name = 'MoNet'
in_dim = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
kernel = net_params['kernel'] # for MoNet
dim = net_params['pseudo_dim_MoNet'] # for MoNet
n_classes = net_params['n_classes']
self.dropout = net_params['dropout']
self.n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.device = net_params['device']
self.n_classes = n_classes
self.dim = dim
# aggr_type = "sum" # default for MoNet
aggr_type = "mean"
self.embedding_h = nn.Embedding(in_dim, hidden_dim)
self.layers = nn.ModuleList()
self.pseudo_proj = nn.ModuleList()
self.batchnorm_h = nn.ModuleList()
# Hidden layer
for _ in range(self.n_layers - 1):
self.layers.append(GMMConv(hidden_dim, hidden_dim, dim, kernel, separate_gaussians = False ,aggr = aggr_type,
root_weight = True, bias = True))
if self.batch_norm:
self.batchnorm_h.append(nn.BatchNorm1d(hidden_dim))
self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
# Output layer
self.layers.append(GMMConv(hidden_dim, out_dim, dim, kernel, separate_gaussians = False ,aggr = aggr_type,
root_weight = True, bias = True))
if self.batch_norm:
self.batchnorm_h.append(nn.BatchNorm1d(out_dim))
self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
self.MLP_layer = MLPReadout(out_dim, n_classes)
# to do
def forward(self, h, edge_index, e):
h = self.embedding_h(h)
edge_weight = torch.ones((edge_index.size(1),),
device = edge_index.device)
row, col = edge_index[0], edge_index[1]
deg = scatter_add(edge_weight, row, dim=0, dim_size=h.size(0))
deg_inv_sqrt = deg.pow_(-0.5)
deg_inv_sqrt.masked_fill_(deg_inv_sqrt == float('inf'), 0)
pseudo = torch.cat((deg_inv_sqrt[row].unsqueeze(-1), deg_inv_sqrt[col].unsqueeze(-1)), dim=1)
for i in range(self.n_layers):
h_in = h
h = self.layers[i](h, edge_index, self.pseudo_proj[i](pseudo))
if self.batch_norm:
h = self.batchnorm_h[i](h) # batch normalization
h = F.relu(h) # non-linear activation
if self.residual:
h = h_in + h # residual connection
h = F.dropout(h, self.dropout, training=self.training)
return self.MLP_layer(h)
def loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes > 0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss | 7,669 | 40.236559 | 121 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/nets/Planetoid_node_classification/gat_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
from torch_geometric.typing import OptPairTensor
"""
GAT: Graph Attention Network
Graph Attention Networks (Veličković et al., ICLR 2018)
https://arxiv.org/abs/1710.10903
"""
from layers.gat_layer import GATLayer
from layers.mlp_readout_layer import MLPReadout
from torch_geometric.nn import GATConv
class GATNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
num_heads = net_params['n_heads']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.dropout = dropout
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim * num_heads) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([GATLayer(hidden_dim * num_heads, hidden_dim, num_heads,
dropout, self.batch_norm, self.residual) for _ in range(n_layers-1)])
self.layers.append(GATLayer(hidden_dim * num_heads, out_dim, 1, dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
def forward(self, g, h, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# GAT
for conv in self.layers:
h = conv(g, h)
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
return loss
class GATNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
num_heads = net_params['n_heads']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
self.n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.dropout = dropout
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Linear(in_dim_node, hidden_dim * num_heads) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([GATConv(hidden_dim * num_heads, hidden_dim, num_heads,
dropout = dropout) for _ in range(self.n_layers - 1)])
self.layers.append(GATConv(hidden_dim * num_heads, out_dim, 1, dropout))
if self.batch_norm:
self.batchnorm_h = nn.ModuleList([nn.BatchNorm1d(hidden_dim * num_heads) for _ in range(self.n_layers - 1)])
self.batchnorm_h.append(nn.BatchNorm1d(out_dim))
# self.layers = nn.ModuleList([GATLayer(hidden_dim * num_heads, hidden_dim, num_heads,
# dropout, self.batch_norm, self.residual) for _ in range(n_layers - 1)])
# self.layers.append(GATLayer(hidden_dim * num_heads, out_dim, 1, dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
def forward(self, h, edge_index, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# GAT
for i in range(self.n_layers):
h_in = h
h: OptPairTensor = (h, h) # make cat the value not simple add it.
h = self.layers[i](h, edge_index)
if self.batch_norm:
h = self.batchnorm_h[i](h)
# if self.activation:
h = F.elu(h)
if self.residual:
h = h_in + h # residual connection
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
| 4,653 | 34.257576 | 120 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/nets/Planetoid_node_classification/graphsage_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
"""
GraphSAGE:
William L. Hamilton, Rex Ying, Jure Leskovec, Inductive Representation Learning on Large Graphs (NeurIPS 2017)
https://cs.stanford.edu/people/jure/pubs/graphsage-nips17.pdf
"""
from layers.graphsage_layer import GraphSageLayer
from layers.mlp_readout_layer import MLPReadout
from torch_geometric.nn import SAGEConv
class GraphSageNet(nn.Module):
"""
Grahpsage network with multiple GraphSageLayer layers
"""
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
aggregator_type = net_params['sage_aggregator']
n_layers = net_params['L']
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.readout = net_params['readout']
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([GraphSageLayer(hidden_dim, hidden_dim, F.relu,
dropout, aggregator_type, batch_norm, residual) for _ in range(n_layers-1)])
self.layers.append(GraphSageLayer(hidden_dim, out_dim, F.relu, dropout, aggregator_type, batch_norm, residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
def forward(self, g, h, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# graphsage
for conv in self.layers:
h = conv(g, h)
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
"""
GraphSAGE:
William L. Hamilton, Rex Ying, Jure Leskovec, Inductive Representation Learning on Large Graphs (NeurIPS 2017)
https://cs.stanford.edu/people/jure/pubs/graphsage-nips17.pdf
# the implementation of dgl and pyg are different
"""
class GraphSageNet_pyg(nn.Module):
"""
Grahpsage network with multiple GraphSageLayer layers
"""
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
aggregator_type = net_params['sage_aggregator']
self.n_layers = net_params['L']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.readout = net_params['readout']
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Linear(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.dropout = nn.Dropout(p=dropout)
if self.batch_norm:
self.batchnorm_h = nn.ModuleList([nn.BatchNorm1d(hidden_dim) for _ in range(self.n_layers - 1)])
self.batchnorm_h.append(nn.BatchNorm1d(out_dim))
self.layers = nn.ModuleList([SAGEConv(hidden_dim, hidden_dim) for _ in
range(self.n_layers - 1)])
self.layers.append(
SAGEConv(hidden_dim, out_dim))
# self.layers = nn.ModuleList([GraphSageLayer(hidden_dim, hidden_dim, F.relu,
# dropout, aggregator_type, batch_norm, residual) for _ in
# range(n_layers - 1)])
# self.layers.append(
# GraphSageLayer(hidden_dim, out_dim, F.relu, dropout, aggregator_type, batch_norm, residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
if aggregator_type == 'maxpool':
self.aggr = 'max'
elif aggregator_type == 'mean':
self.aggr = 'mean'
# to do
def forward(self, h, edge_index, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# graphsage
for i in range(self.n_layers):
h_in = h
h = self.dropout(h)
h = self.layers[i](h, edge_index)
if self.batch_norm:
h = self.batchnorm_h[i](h)
#h = F.relu(h)
if self.residual:
h = h_in + h # residual connection
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss | 5,141 | 35.211268 | 122 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/nets/Planetoid_node_classification/gin_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
from dgl.nn.pytorch.glob import SumPooling, AvgPooling, MaxPooling
"""
GIN: Graph Isomorphism Networks
HOW POWERFUL ARE GRAPH NEURAL NETWORKS? (Keyulu Xu, Weihua Hu, Jure Leskovec and Stefanie Jegelka, ICLR 2019)
https://arxiv.org/pdf/1810.00826.pdf
"""
from layers.gin_layer import GINLayer, ApplyNodeFunc, MLP
from gcn_lib.sparse import MultiSeq, PlainDynBlock, ResDynBlock, DenseDynBlock, DilatedKnnGraph
from gcn_lib.sparse import MLP as MLPpyg
from gcn_lib.sparse import GraphConv as GraphConvNet
# import torch_geometric as tg
from torch_geometric.nn import GINConv
class GINNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
dropout = net_params['dropout']
self.n_layers = net_params['L']
n_mlp_layers = net_params['n_mlp_GIN'] # GIN
learn_eps = net_params['learn_eps_GIN'] # GIN
neighbor_aggr_type = net_params['neighbor_aggr_GIN'] # GIN
readout = net_params['readout'] # this is graph_pooling_type
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
# List of MLPs
self.ginlayers = torch.nn.ModuleList()
self.embedding_h = nn.Embedding(in_dim, hidden_dim)
for layer in range(self.n_layers):
mlp = MLP(n_mlp_layers, hidden_dim, hidden_dim, hidden_dim)
self.ginlayers.append(GINLayer(ApplyNodeFunc(mlp), neighbor_aggr_type,
dropout, batch_norm, residual, 0, learn_eps))
# Linear function for output of each layer
# which maps the output of different layers into a prediction score
self.linears_prediction = torch.nn.ModuleList()
for layer in range(self.n_layers+1):
self.linears_prediction.append(nn.Linear(hidden_dim, n_classes))
def forward(self, g, h, e):
h = self.embedding_h(h)
# list of hidden representation at each layer (including input)
hidden_rep = [h]
for i in range(self.n_layers):
h = self.ginlayers[i](g, h)
hidden_rep.append(h)
score_over_layer = 0
for i, h in enumerate(hidden_rep):
score_over_layer += self.linears_prediction[i](h)
return score_over_layer
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
class GINNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
dropout = net_params['dropout']
self.n_layers = net_params['L']
n_mlp_layers = net_params['n_mlp_GIN'] # GIN
learn_eps = net_params['learn_eps_GIN'] # GIN
neighbor_aggr_type = net_params['neighbor_aggr_GIN'] # GIN
readout = net_params['readout'] # this is graph_pooling_type
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
# List of MLPs
self.ginlayers = torch.nn.ModuleList()
self.normlayers = torch.nn.ModuleList()
self.embedding_h = nn.Linear(in_dim_node, hidden_dim) # node feat is an integer
self.dropout = dropout
self.batch_norm = batch_norm
self.residual = residual
for layer in range(self.n_layers):
mlp = MLP(n_mlp_layers, hidden_dim, hidden_dim, hidden_dim)
self.ginlayers.append(GINConv(ApplyNodeFunc(mlp), 0, learn_eps))
# note that neighbor_aggr_type can not work because the father
# self.ginlayers.append(GINLayer(ApplyNodeFunc(mlp), neighbor_aggr_type,
# dropout, batch_norm, residual, 0, learn_eps))
if batch_norm:
self.normlayers.append(nn.BatchNorm1d(hidden_dim))
# Linear function for output of each layer
# which maps the output of different layers into a prediction score
self.linears_prediction = torch.nn.ModuleList()
for layer in range(self.n_layers + 1):
self.linears_prediction.append(nn.Linear(hidden_dim, n_classes))
def forward(self, h, edge_index, e):
h = self.embedding_h(h)
# list of hidden representation at each layer (including input)
hidden_rep = [h]
for i in range(self.n_layers):
h_in = h
h = self.ginlayers[i](h, edge_index)
if self.batch_norm:
h = self.normlayers[i](h) # batch normalization
h = F.relu(h) # non-linear activation
if self.residual:
h = h_in + h # residual connection
h = F.dropout(h, self.dropout, training=self.training)
hidden_rep.append(h)
score_over_layer = 0
for i, h in enumerate(hidden_rep):
score_over_layer += self.linears_prediction[i](h)
return score_over_layer
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss | 5,612 | 35.448052 | 113 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/nets/Planetoid_node_classification/gcn_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GCNConv
import dgl
import numpy as np
"""
GCN: Graph Convolutional Networks
Thomas N. Kipf, Max Welling, Semi-Supervised Classification with Graph Convolutional Networks (ICLR 2017)
http://arxiv.org/abs/1609.02907
"""
from layers.gcn_layer import GCNLayer
from layers.mlp_readout_layer import MLPReadout
class GCNNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
# note that the GCNLayer is a little different from the builtin function,
# it averaging the received message by reduce, not c_{ij} the papers apply
self.layers = nn.ModuleList([GCNLayer(hidden_dim, hidden_dim, F.relu, dropout,
self.batch_norm, self.residual) for _ in range(n_layers-1)])
self.layers.append(GCNLayer(hidden_dim, out_dim, F.relu, dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
def forward(self, g, h, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# GCN
for conv in self.layers:
h = conv(g, h)
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
class GCNNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
self.n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Linear(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.dropout = dropout
self.layers = nn.ModuleList([GCNConv(hidden_dim, hidden_dim, improved = False)
for _ in range(self.n_layers)])
if self.batch_norm:
self.normlayers = nn.ModuleList([nn.BatchNorm1d(hidden_dim)
for _ in range(self.n_layers)])
# self.layers = nn.ModuleList([GCNLayer(hidden_dim, hidden_dim, F.relu, dropout,
# self.batch_norm, self.residual) for _ in range(n_layers - 1)])
# self.layers.append(GCNLayer(hidden_dim, out_dim, F.relu, dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
# self.MLP_layer = nn.Linear(hidden_dim, n_classes, bias=True)
def forward(self, h, edge_index, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# GCN
for i in range(self.n_layers):
h_in = h
h = self.layers[i](h, edge_index)
if self.batch_norm:
h = self.normlayers[i](h) # batch normalization
h = F.relu(h) # non-linear activation
if self.residual:
h = h_in + h # residual connection
h = F.dropout(h, self.dropout, training=self.training)
# i = 0
# for conv in self.layers:
# h_in = h
# h = conv(h, e)
# if self.batch_norm:
# h = self.normlayers[i](h) # batch normalization
# i += 1
# h = F.relu(h)
# if self.residual:
# h = h_in + h # residual connection
# h = F.dropout(h, self.dropout, training=self.training)
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
def loss_proteins(self, pred, label):
criterion = nn.BCEWithLogitsLoss()
loss = criterion(pred, label.to(torch.float))
return loss
| 5,125 | 33.635135 | 110 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/nets/Planetoid_node_classification/gated_gcn_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
import numpy as np
"""
ResGatedGCN: Residual Gated Graph ConvNets
An Experimental Study of Neural Networks for Variable Graphs (Xavier Bresson and Thomas Laurent, ICLR 2018)
https://arxiv.org/pdf/1711.07553v2.pdf
"""
from layers.gated_gcn_layer import GatedGCNLayer, ResGatedGCNLayer
from layers.mlp_readout_layer import MLPReadout
from torch_geometric.nn import GatedGraphConv
class GatedGCNNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
in_dim_edge = 1 # edge_dim (feat is a float)
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
self.pos_enc = net_params['pos_enc']
if self.pos_enc:
pos_enc_dim = net_params['pos_enc_dim']
self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.embedding_e = nn.Linear(in_dim_edge, hidden_dim) # edge feat is a float
self.layers = nn.ModuleList([ GatedGCNLayer(hidden_dim, hidden_dim, dropout,
self.batch_norm, self.residual) for _ in range(n_layers) ])
self.MLP_layer = MLPReadout(hidden_dim, n_classes)
def forward(self, g, h, e, h_pos_enc=None):
# input embedding
h = self.embedding_h(h)
if self.pos_enc:
h_pos_enc = self.embedding_pos_enc(h_pos_enc.float())
h = h + h_pos_enc
e = self.embedding_e(e)
# res gated convnets
for conv in self.layers:
h, e = conv(g, h, e)
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
"""
ResGatedGCN: Residual Gated Graph ConvNets
An Experimental Study of Neural Networks for Variable Graphs (Xavier Bresson and Thomas Laurent, ICLR 2018)
https://arxiv.org/pdf/1711.07553v2.pdf
"""
class ResGatedGCNNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
in_dim_edge = 1 # edge_dim (feat is a float)
num_bond_type = 3
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
self.dropout = net_params['dropout']
self.n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.edge_feat = net_params['edge_feat']
self.residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
self.pos_enc = net_params['pos_enc']
if self.pos_enc:
pos_enc_dim = net_params['pos_enc_dim']
self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
self.embedding_h = nn.Linear(in_dim_node, hidden_dim) # node feat is an integer
if self.edge_feat:
self.embedding_e = nn.Embedding(num_bond_type, hidden_dim)
else:
self.embedding_e = nn.Linear(1, hidden_dim)
self.embedding_e = nn.Linear(in_dim_edge, hidden_dim) # edge feat is a float
self.layers = nn.ModuleList([ResGatedGCNLayer(hidden_dim, hidden_dim, self.dropout,
self.batch_norm, self.residual) for _ in range(self.n_layers)])
# self.layers = nn.ModuleList([GatedGCNLayer(hidden_dim, hidden_dim, dropout,
# self.batch_norm, self.residual) for _ in range(n_layers)])
if self.batch_norm:
self.normlayers_h = nn.ModuleList([nn.BatchNorm1d(hidden_dim) for _ in range(self.n_layers)])
self.normlayers_e = nn.ModuleList([nn.BatchNorm1d(hidden_dim) for _ in range(self.n_layers)])
self.MLP_layer = MLPReadout(hidden_dim, n_classes)
def forward(self, h, edge_index, e, h_pos_enc=None):
# input embedding
h = self.embedding_h(h)
if self.pos_enc:
h_pos_enc = self.embedding_pos_enc(h_pos_enc.float())
h = h + h_pos_enc
e = self.embedding_e(e)
# res gated convnets
for i in range(self.n_layers):
h_in = h
e_in = e
h, e = self.layers[i](h, edge_index, e)
if self.batch_norm:
h = self.normlayers_h[i](h)
e = self.normlayers_e[i](e) # batch normalization
if self.residual:
h = h_in + h # residual connection
e = e_in + e
h = F.dropout(h, self.dropout, training=self.training)
e = F.dropout(e, self.dropout, training=self.training)
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
def loss_proteins(self, pred, label):
criterion = nn.BCEWithLogitsLoss()
loss = criterion(pred, label.to(torch.float))
return loss
"""
Gated Graph Sequence Neural Networks
An Experimental Study of Neural Networks for Variable Graphs
Li Y, Tarlow D, Brockschmidt M, et al. Gated graph sequence neural networks[J]. arXiv preprint arXiv:1511.05493, 2015.
https://arxiv.org/abs/1511.05493
Note that the pyg and dgl of the gatedGCN are different models.
"""
class GatedGCNNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
in_dim_edge = 1 # edge_dim (feat is a float)
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
self.dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
self.pos_enc = net_params['pos_enc']
if self.pos_enc:
pos_enc_dim = net_params['pos_enc_dim']
self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
self.embedding_h = nn.Linear(in_dim_node, hidden_dim) # node feat is an integer
# self.embedding_e = nn.Linear(in_dim_edge, hidden_dim) # edge feat is a float
self.layers = nn.ModuleList([GatedGraphConv(hidden_dim, n_layers, aggr = 'add')])
# self.layers = nn.ModuleList([GatedGCNLayer(hidden_dim, hidden_dim, dropout,
# self.batch_norm, self.residual) for _ in range(n_layers)])
if self.batch_norm:
self.normlayers = nn.ModuleList([nn.BatchNorm1d(hidden_dim)])
self.MLP_layer = MLPReadout(hidden_dim, n_classes)
def forward(self, h, edge_index, e, h_pos_enc=None):
# input embedding
h = self.embedding_h(h)
if self.pos_enc:
h_pos_enc = self.embedding_pos_enc(h_pos_enc.float())
h = h + h_pos_enc
# e = self.embedding_e(e)
# res gated convnets
for conv in self.layers:
h_in = h
h = conv(h, edge_index, e)
if self.batch_norm:
h = self.normlayers[0](h)
if self.residual:
h = h_in + h # residual connection
h = F.dropout(h, self.dropout, training=self.training)
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
# dataset.dataset[0].y.view(-1).size()
return loss | 8,352 | 37.493088 | 122 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/nets/Planetoid_node_classification/mlp_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
from layers.mlp_readout_layer import MLPReadout
class MLPNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
n_layers = net_params['L']
self.gated = net_params['gated']
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
feat_mlp_modules = [
nn.Linear(hidden_dim, hidden_dim, bias=True),
nn.ReLU(),
nn.Dropout(dropout),
]
for _ in range(n_layers-1):
feat_mlp_modules.append(nn.Linear(hidden_dim, hidden_dim, bias=True))
feat_mlp_modules.append(nn.ReLU())
feat_mlp_modules.append(nn.Dropout(dropout))
self.feat_mlp = nn.Sequential(*feat_mlp_modules)
if self.gated:
self.gates = nn.Linear(hidden_dim, hidden_dim, bias=True)
self.readout_mlp = MLPReadout(hidden_dim, n_classes)
def forward(self, g, h, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# MLP
h = self.feat_mlp(h)
if self.gated:
h = torch.sigmoid(self.gates(h)) * h
# output
h_out = self.readout_mlp(h)
return h_out
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
class MLPNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
n_layers = net_params['L']
self.gated = net_params['gated']
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Linear(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
feat_mlp_modules = [
nn.Linear(hidden_dim, hidden_dim, bias=True),
nn.ReLU(),
nn.Dropout(dropout),
]
for _ in range(n_layers - 1):
feat_mlp_modules.append(nn.Linear(hidden_dim, hidden_dim, bias=True))
feat_mlp_modules.append(nn.ReLU())
feat_mlp_modules.append(nn.Dropout(dropout))
self.feat_mlp = nn.Sequential(*feat_mlp_modules)
if self.gated:
self.gates = nn.Linear(hidden_dim, hidden_dim, bias=True)
self.readout_mlp = MLPReadout(hidden_dim, n_classes)
def forward(self, h, edge_index, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# MLP
h = self.feat_mlp(h)
if self.gated:
h = torch.sigmoid(self.gates(h)) * h
# output
h_out = self.readout_mlp(h)
return h_out
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
def loss_proteins(self, pred, label):
criterion = nn.BCEWithLogitsLoss()
loss = criterion(pred, label.to(torch.float))
return loss
| 3,772 | 27.583333 | 93 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/nets/Planetoid_node_classification/mo_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_scatter import scatter_add
import dgl
import numpy as np
"""
GMM: Gaussian Mixture Model Convolution layer
Geometric Deep Learning on Graphs and Manifolds using Mixture Model CNNs (Federico Monti et al., CVPR 2017)
https://arxiv.org/pdf/1611.08402.pdf
"""
from layers.gmm_layer import GMMLayer
from layers.mlp_readout_layer import MLPReadout
from torch_geometric.nn import GMMConv
class MoNet(nn.Module):
def __init__(self, net_params):
super().__init__()
self.name = 'MoNet'
in_dim = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
kernel = net_params['kernel'] # for MoNet
dim = net_params['pseudo_dim_MoNet'] # for MoNet
n_classes = net_params['n_classes']
dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.device = net_params['device']
self.n_classes = n_classes
aggr_type = "sum" # default for MoNet
self.embedding_h = nn.Embedding(in_dim, hidden_dim)
self.layers = nn.ModuleList()
self.pseudo_proj = nn.ModuleList()
# Hidden layer
for _ in range(n_layers-1):
self.layers.append(GMMLayer(hidden_dim, hidden_dim, dim, kernel, aggr_type,
dropout, batch_norm, residual))
self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
# Output layer
self.layers.append(GMMLayer(hidden_dim, out_dim, dim, kernel, aggr_type,
dropout, batch_norm, residual))
self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
self.MLP_layer = MLPReadout(out_dim, n_classes)
def forward(self, g, h, e):
h = self.embedding_h(h)
# computing the 'pseudo' named tensor which depends on node degrees
g.ndata['deg'] = g.in_degrees()
g.apply_edges(self.compute_pseudo)
pseudo = g.edata['pseudo'].to(self.device).float()
for i in range(len(self.layers)):
h = self.layers[i](g, h, self.pseudo_proj[i](pseudo))
return self.MLP_layer(h)
def compute_pseudo(self, edges):
# compute pseudo edge features for MoNet
# to avoid zero division in case in_degree is 0, we add constant '1' in all node degrees denoting self-loop
srcs = 1/np.sqrt(edges.src['deg']+1)
dsts = 1/np.sqrt(edges.dst['deg']+1)
pseudo = torch.cat((srcs.unsqueeze(-1), dsts.unsqueeze(-1)), dim=1)
return {'pseudo': pseudo}
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
"""
GMM: Gaussian Mixture Model Convolution layer
Geometric Deep Learning on Graphs and Manifolds using Mixture Model CNNs (Federico Monti et al., CVPR 2017)
https://arxiv.org/pdf/1611.08402.pdf
"""
class MoNetNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
self.name = 'MoNet'
in_dim_node = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
kernel = net_params['kernel'] # for MoNet
dim = net_params['pseudo_dim_MoNet'] # for MoNet
n_classes = net_params['n_classes']
self.dropout = net_params['dropout']
self.n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.device = net_params['device']
self.n_classes = n_classes
self.dim = dim
# aggr_type = "sum" # default for MoNet
aggr_type = "mean"
self.embedding_h = nn.Linear(in_dim_node, hidden_dim) # node feat is an integer
# self.embedding_e = nn.Linear(1, dim) # edge feat is a float
self.layers = nn.ModuleList()
self.pseudo_proj = nn.ModuleList()
self.batchnorm_h = nn.ModuleList()
# Hidden layer
for _ in range(self.n_layers - 1):
self.layers.append(GMMConv(hidden_dim, hidden_dim, dim, kernel, separate_gaussians = False ,aggr = aggr_type,
root_weight = True, bias = True))
if self.batch_norm:
self.batchnorm_h.append(nn.BatchNorm1d(hidden_dim))
self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
# Output layer
self.layers.append(GMMConv(hidden_dim, out_dim, dim, kernel, separate_gaussians = False ,aggr = aggr_type,
root_weight = True, bias = True))
if self.batch_norm:
self.batchnorm_h.append(nn.BatchNorm1d(out_dim))
self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
self.MLP_layer = MLPReadout(out_dim, n_classes)
# to do
def forward(self, h, edge_index, e):
h = self.embedding_h(h)
edge_weight = torch.ones((edge_index.size(1),),
device = edge_index.device)
row, col = edge_index[0], edge_index[1]
deg = scatter_add(edge_weight, row, dim=0, dim_size=h.size(0))
deg_inv_sqrt = deg.pow_(-0.5)
deg_inv_sqrt.masked_fill_(deg_inv_sqrt == float('inf'), 0)
pseudo = torch.cat((deg_inv_sqrt[row].unsqueeze(-1), deg_inv_sqrt[col].unsqueeze(-1)), dim=1)
for i in range(self.n_layers):
h_in = h
h = self.layers[i](h, edge_index, self.pseudo_proj[i](pseudo))
if self.batch_norm:
h = self.batchnorm_h[i](h) # batch normalization
h = F.relu(h) # non-linear activation
if self.residual:
h = h_in + h # residual connection
h = F.dropout(h, self.dropout, training=self.training)
return self.MLP_layer(h)
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss | 6,420 | 39.639241 | 121 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/layers/graphsage_layer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl.function as fn
from dgl.nn.pytorch import SAGEConv
"""
GraphSAGE:
William L. Hamilton, Rex Ying, Jure Leskovec, Inductive Representation Learning on Large Graphs (NeurIPS 2017)
https://cs.stanford.edu/people/jure/pubs/graphsage-nips17.pdf
"""
class GraphSageLayer(nn.Module):
def __init__(self, in_feats, out_feats, activation, dropout,
aggregator_type, batch_norm, residual=False,
bias=True, dgl_builtin=False):
super().__init__()
self.in_channels = in_feats
self.out_channels = out_feats
self.aggregator_type = aggregator_type
self.batch_norm = batch_norm
self.residual = residual
self.dgl_builtin = dgl_builtin
if in_feats != out_feats:
self.residual = False
self.dropout = nn.Dropout(p=dropout)
if dgl_builtin == False:
self.nodeapply = NodeApply(in_feats, out_feats, activation, dropout,
bias=bias)
if aggregator_type == "maxpool":
self.aggregator = MaxPoolAggregator(in_feats, in_feats,
activation, bias)
elif aggregator_type == "lstm":
self.aggregator = LSTMAggregator(in_feats, in_feats)
else:
self.aggregator = MeanAggregator()
else:
self.sageconv = SAGEConv(in_feats, out_feats, aggregator_type,
dropout, activation=activation)
if self.batch_norm:
self.batchnorm_h = nn.BatchNorm1d(out_feats)
def forward(self, g, h):
h_in = h # for residual connection
if self.dgl_builtin == False:
h = self.dropout(h)
g.ndata['h'] = h
g.update_all(fn.copy_src(src='h', out='m'),
self.aggregator,
self.nodeapply)
h = g.ndata['h']
else:
h = self.sageconv(g, h)
if self.batch_norm:
h = self.batchnorm_h(h)
if self.residual:
h = h_in + h # residual connection
return h
def __repr__(self):
return '{}(in_channels={}, out_channels={}, aggregator={}, residual={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels, self.aggregator_type, self.residual)
"""
Aggregators for GraphSage
"""
class Aggregator(nn.Module):
"""
Base Aggregator class.
"""
def __init__(self):
super().__init__()
def forward(self, node):
neighbour = node.mailbox['m']
c = self.aggre(neighbour)
return {"c": c}
def aggre(self, neighbour):
# N x F
raise NotImplementedError
class MeanAggregator(Aggregator):
"""
Mean Aggregator for graphsage
"""
def __init__(self):
super().__init__()
def aggre(self, neighbour):
mean_neighbour = torch.mean(neighbour, dim=1)
return mean_neighbour
class MaxPoolAggregator(Aggregator):
"""
Maxpooling aggregator for graphsage
"""
def __init__(self, in_feats, out_feats, activation, bias):
super().__init__()
self.linear = nn.Linear(in_feats, out_feats, bias=bias)
self.activation = activation
def aggre(self, neighbour):
neighbour = self.linear(neighbour)
if self.activation:
neighbour = self.activation(neighbour)
maxpool_neighbour = torch.max(neighbour, dim=1)[0]
return maxpool_neighbour
class LSTMAggregator(Aggregator):
"""
LSTM aggregator for graphsage
"""
def __init__(self, in_feats, hidden_feats):
super().__init__()
self.lstm = nn.LSTM(in_feats, hidden_feats, batch_first=True)
self.hidden_dim = hidden_feats
self.hidden = self.init_hidden()
nn.init.xavier_uniform_(self.lstm.weight,
gain=nn.init.calculate_gain('relu'))
def init_hidden(self):
"""
Defaulted to initialite all zero
"""
return (torch.zeros(1, 1, self.hidden_dim),
torch.zeros(1, 1, self.hidden_dim))
def aggre(self, neighbours):
"""
aggregation function
"""
# N X F
rand_order = torch.randperm(neighbours.size()[1])
neighbours = neighbours[:, rand_order, :]
(lstm_out, self.hidden) = self.lstm(neighbours.view(neighbours.size()[0], neighbours.size()[1], -1))
return lstm_out[:, -1, :]
def forward(self, node):
neighbour = node.mailbox['m']
c = self.aggre(neighbour)
return {"c": c}
class NodeApply(nn.Module):
"""
Works -> the node_apply function in DGL paradigm
"""
def __init__(self, in_feats, out_feats, activation, dropout, bias=True):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
self.linear = nn.Linear(in_feats * 2, out_feats, bias)
self.activation = activation
def concat(self, h, aggre_result):
bundle = torch.cat((h, aggre_result), 1)
bundle = self.linear(bundle)
return bundle
def forward(self, node):
h = node.data['h']
c = node.data['c']
bundle = self.concat(h, c)
bundle = F.normalize(bundle, p=2, dim=1)
if self.activation:
bundle = self.activation(bundle)
return {"h": bundle}
##############################################################
#
# Additional layers for edge feature/representation analysis
#
##############################################################
class GraphSageLayerEdgeFeat(nn.Module):
def __init__(self, in_feats, out_feats, activation, dropout,
aggregator_type, batch_norm, residual=False,
bias=True, dgl_builtin=False):
super().__init__()
self.in_channels = in_feats
self.out_channels = out_feats
self.batch_norm = batch_norm
self.residual = residual
if in_feats != out_feats:
self.residual = False
self.dropout = nn.Dropout(p=dropout)
self.activation = activation
self.A = nn.Linear(in_feats, out_feats, bias=bias)
self.B = nn.Linear(in_feats, out_feats, bias=bias)
self.nodeapply = NodeApply(in_feats, out_feats, activation, dropout, bias=bias)
if self.batch_norm:
self.batchnorm_h = nn.BatchNorm1d(out_feats)
def message_func(self, edges):
Ah_j = edges.src['Ah']
e_ij = edges.src['Bh'] + edges.dst['Bh'] # e_ij = Bhi + Bhj
edges.data['e'] = e_ij
return {'Ah_j' : Ah_j, 'e_ij' : e_ij}
def reduce_func(self, nodes):
# Anisotropic MaxPool aggregation
Ah_j = nodes.mailbox['Ah_j']
e = nodes.mailbox['e_ij']
sigma_ij = torch.sigmoid(e) # sigma_ij = sigmoid(e_ij)
Ah_j = sigma_ij * Ah_j
if self.activation:
Ah_j = self.activation(Ah_j)
c = torch.max(Ah_j, dim=1)[0]
return {'c' : c}
def forward(self, g, h):
h_in = h # for residual connection
h = self.dropout(h)
g.ndata['h'] = h
g.ndata['Ah'] = self.A(h)
g.ndata['Bh'] = self.B(h)
g.update_all(self.message_func,
self.reduce_func,
self.nodeapply)
h = g.ndata['h']
if self.batch_norm:
h = self.batchnorm_h(h)
if self.residual:
h = h_in + h # residual connection
return h
def __repr__(self):
return '{}(in_channels={}, out_channels={}, residual={})'.format(
self.__class__.__name__,
self.in_channels,
self.out_channels,
self.residual)
##############################################################
class GraphSageLayerEdgeReprFeat(nn.Module):
def __init__(self, in_feats, out_feats, activation, dropout,
aggregator_type, batch_norm, residual=False,
bias=True, dgl_builtin=False):
super().__init__()
self.in_channels = in_feats
self.out_channels = out_feats
self.batch_norm = batch_norm
self.residual = residual
if in_feats != out_feats:
self.residual = False
self.dropout = nn.Dropout(p=dropout)
self.activation = activation
self.A = nn.Linear(in_feats, out_feats, bias=bias)
self.B = nn.Linear(in_feats, out_feats, bias=bias)
self.C = nn.Linear(in_feats, out_feats, bias=bias)
self.nodeapply = NodeApply(in_feats, out_feats, activation, dropout, bias=bias)
if self.batch_norm:
self.batchnorm_h = nn.BatchNorm1d(out_feats)
self.batchnorm_e = nn.BatchNorm1d(out_feats)
def message_func(self, edges):
Ah_j = edges.src['Ah']
e_ij = edges.data['Ce'] + edges.src['Bh'] + edges.dst['Bh'] # e_ij = Ce_ij + Bhi + Bhj
edges.data['e'] = e_ij
return {'Ah_j' : Ah_j, 'e_ij' : e_ij}
def reduce_func(self, nodes):
# Anisotropic MaxPool aggregation
Ah_j = nodes.mailbox['Ah_j']
e = nodes.mailbox['e_ij']
sigma_ij = torch.sigmoid(e) # sigma_ij = sigmoid(e_ij)
Ah_j = sigma_ij * Ah_j
if self.activation:
Ah_j = self.activation(Ah_j)
c = torch.max(Ah_j, dim=1)[0]
return {'c' : c}
def forward(self, g, h, e):
h_in = h # for residual connection
e_in = e
h = self.dropout(h)
g.ndata['h'] = h
g.ndata['Ah'] = self.A(h)
g.ndata['Bh'] = self.B(h)
g.edata['e'] = e
g.edata['Ce'] = self.C(e)
g.update_all(self.message_func,
self.reduce_func,
self.nodeapply)
h = g.ndata['h']
e = g.edata['e']
if self.activation:
e = self.activation(e) # non-linear activation
if self.batch_norm:
h = self.batchnorm_h(h)
e = self.batchnorm_e(e)
if self.residual:
h = h_in + h # residual connection
e = e_in + e # residual connection
return h, e
def __repr__(self):
return '{}(in_channels={}, out_channels={}, residual={})'.format(
self.__class__.__name__,
self.in_channels,
self.out_channels,
self.residual) | 10,938 | 29.386111 | 114 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/layers/mlp_readout_layer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
"""
MLP Layer used after graph vector representation
"""
class MLPReadout(nn.Module):
def __init__(self, input_dim, output_dim, L=2): #L=nb_hidden_layers
super().__init__()
list_FC_layers = [ nn.Linear( input_dim//2**l , input_dim//2**(l+1) , bias=True ) for l in range(L) ]
list_FC_layers.append(nn.Linear( input_dim//2**L , output_dim , bias=True ))
self.FC_layers = nn.ModuleList(list_FC_layers)
self.L = L
def forward(self, x):
y = x
for l in range(self.L):
y = self.FC_layers[l](y)
y = F.relu(y)
y = self.FC_layers[self.L](y)
return y
# class MLPReadout(nn.Module):
#
# def __init__(self, input_dim, output_dim): # L=nb_hidden_layers
# super().__init__()
# FC_layers = nn.Linear(input_dim, output_dim, bias=True)
#
#
# def forward(self, x):
# y = x
# y = self.FC_layers(y)
# return y | 1,026 | 26.756757 | 109 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/layers/gated_gcn_layer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from torch_geometric.typing import OptTensor
from torch_scatter import scatter
from torch_geometric.nn.conv import MessagePassing
"""
ResGatedGCN: Residual Gated Graph ConvNets
An Experimental Study of Neural Networks for Variable Graphs (Xavier Bresson and Thomas Laurent, ICLR 2018)
https://arxiv.org/pdf/1711.07553v2.pdf
"""
class GatedGCNLayer(nn.Module):
"""
Param: []
"""
def __init__(self, input_dim, output_dim, dropout, batch_norm, residual=False):
super().__init__()
self.in_channels = input_dim
self.out_channels = output_dim
self.dropout = dropout
self.batch_norm = batch_norm
self.residual = residual
if input_dim != output_dim:
self.residual = False
self.A = nn.Linear(input_dim, output_dim, bias=True)
self.B = nn.Linear(input_dim, output_dim, bias=True)
self.C = nn.Linear(input_dim, output_dim, bias=True)
self.D = nn.Linear(input_dim, output_dim, bias=True)
self.E = nn.Linear(input_dim, output_dim, bias=True)
self.bn_node_h = nn.BatchNorm1d(output_dim)
self.bn_node_e = nn.BatchNorm1d(output_dim)
def message_func(self, edges):
Bh_j = edges.src['Bh']
e_ij = edges.data['Ce'] + edges.src['Dh'] + edges.dst['Eh'] # e_ij = Ce_ij + Dhi + Ehj
edges.data['e'] = e_ij
return {'Bh_j' : Bh_j, 'e_ij' : e_ij}
def reduce_func(self, nodes):
Ah_i = nodes.data['Ah']
Bh_j = nodes.mailbox['Bh_j']
e = nodes.mailbox['e_ij']
sigma_ij = torch.sigmoid(e) # sigma_ij = sigmoid(e_ij)
#h = Ah_i + torch.mean( sigma_ij * Bh_j, dim=1 ) # hi = Ahi + mean_j alpha_ij * Bhj
h = Ah_i + torch.sum( sigma_ij * Bh_j, dim=1 ) / ( torch.sum( sigma_ij, dim=1 ) + 1e-6 ) # hi = Ahi + sum_j eta_ij/sum_j' eta_ij' * Bhj <= dense attention
return {'h' : h}
def forward(self, g, h, e):
h_in = h # for residual connection
e_in = e # for residual connection
g.ndata['h'] = h
g.ndata['Ah'] = self.A(h)
g.ndata['Bh'] = self.B(h)
g.ndata['Dh'] = self.D(h)
g.ndata['Eh'] = self.E(h)
g.edata['e'] = e
g.edata['Ce'] = self.C(e)
g.update_all(self.message_func,self.reduce_func)
h = g.ndata['h'] # result of graph convolution
e = g.edata['e'] # result of graph convolution
if self.batch_norm:
h = self.bn_node_h(h) # batch normalization
e = self.bn_node_e(e) # batch normalization
h = F.relu(h) # non-linear activation
e = F.relu(e) # non-linear activation
if self.residual:
h = h_in + h # residual connection
e = e_in + e # residual connection
h = F.dropout(h, self.dropout, training=self.training)
e = F.dropout(e, self.dropout, training=self.training)
return h, e
def __repr__(self):
return '{}(in_channels={}, out_channels={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels)
"""
ResGatedGCN: Residual Gated Graph ConvNets for pyg implement, is made by myself
An Experimental Study of Neural Networks for Variable Graphs (Xavier Bresson and Thomas Laurent, ICLR 2018)
https://arxiv.org/pdf/1711.07553v2.pdf
"""
class ResGatedGCNLayer(MessagePassing):
"""
Param: []
"""
def __init__(self, input_dim, output_dim, dropout, batch_norm, residual=False):
super().__init__()
self.in_channels = input_dim
self.out_channels = output_dim
self.dropout = dropout
self.batch_norm = batch_norm
self.residual = residual
if input_dim != output_dim:
self.residual = False
self.A = nn.Linear(input_dim, output_dim, bias=True)
self.B = nn.Linear(input_dim, output_dim, bias=True)
self.C = nn.Linear(input_dim, output_dim, bias=True)
self.D = nn.Linear(input_dim, output_dim, bias=True)
self.E = nn.Linear(input_dim, output_dim, bias=True)
def message(self, x_j: Tensor, alpha_j: Tensor, alpha_i: Tensor, Ah: Tensor ,edge_weight: OptTensor):
e_ij = edge_weight + alpha_j + alpha_i
# e_ij = edges.data['Ce'] + edges.src['Dh'] + edges.dst['Eh'] # e_ij = Ce_ij + Dhi + Ehj
return [x_j, e_ij, Ah]
def aggregate(self, inputs, index, ptr=None, dim_size=None):
Ah_i = inputs[2]
Bh_j = inputs[0]
sigma_ij = torch.sigmoid(inputs[1])
e = inputs[1]
# aa=scatter(sigma_ij * Bh_j, index, dim=self.node_dim, dim_size=dim_size,
# reduce='add')
h = Ah_i + scatter(sigma_ij*Bh_j, index, dim= self.node_dim, dim_size=dim_size,
reduce='add') / (scatter(sigma_ij, index, dim=self.node_dim, dim_size=dim_size, reduce='sum') + 1e-6)
return [h, e]
# hi = Ahi + sum_j eta_ij/sum_j' eta_ij' * Bhj <= dense attention
def forward(self, h, edge_index, edge_weight):
# h = conv(h, edge_index, e)g, h, e
h_in = h # for residual connection
e_in = edge_weight # for residual connection
Ah = self.A(h)
Bh = self.B(h)
Dh = self.D(h)
Eh = self.E(h)
Ce = self.C(edge_weight)
# g.update_all(self.message_func, self.reduce_func)
m = self.propagate(edge_index, x=(Bh,Bh), alpha=(Dh,Eh), Ah=Ah, edge_weight=Ce,
size=None)
h = m[0] # result of graph convolution
e = m[1] # result of graph convolution
return h, e
def __repr__(self):
return '{}(in_channels={}, out_channels={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels)
##############################################################
#
# Additional layers for edge feature/representation analysis
#
##############################################################
class GatedGCNLayerEdgeFeatOnly(nn.Module):
"""
Param: []
"""
def __init__(self, input_dim, output_dim, dropout, batch_norm, residual=False):
super().__init__()
self.in_channels = input_dim
self.out_channels = output_dim
self.dropout = dropout
self.batch_norm = batch_norm
self.residual = residual
if input_dim != output_dim:
self.residual = False
self.A = nn.Linear(input_dim, output_dim, bias=True)
self.B = nn.Linear(input_dim, output_dim, bias=True)
self.D = nn.Linear(input_dim, output_dim, bias=True)
self.E = nn.Linear(input_dim, output_dim, bias=True)
self.bn_node_h = nn.BatchNorm1d(output_dim)
def message_func(self, edges):
Bh_j = edges.src['Bh']
e_ij = edges.src['Dh'] + edges.dst['Eh'] # e_ij = Dhi + Ehj
edges.data['e'] = e_ij
return {'Bh_j' : Bh_j, 'e_ij' : e_ij}
def reduce_func(self, nodes):
Ah_i = nodes.data['Ah']
Bh_j = nodes.mailbox['Bh_j']
e = nodes.mailbox['e_ij']
sigma_ij = torch.sigmoid(e) # sigma_ij = sigmoid(e_ij)
h = Ah_i + torch.sum( sigma_ij * Bh_j, dim=1 ) / ( torch.sum( sigma_ij, dim=1 ) + 1e-6 ) # hi = Ahi + sum_j eta_ij/sum_j' eta_ij' * Bhj <= dense attention
return {'h' : h}
def forward(self, g, h, e):
h_in = h # for residual connection
g.ndata['h'] = h
g.ndata['Ah'] = self.A(h)
g.ndata['Bh'] = self.B(h)
g.ndata['Dh'] = self.D(h)
g.ndata['Eh'] = self.E(h)
g.update_all(self.message_func,self.reduce_func)
h = g.ndata['h'] # result of graph convolution
if self.batch_norm:
h = self.bn_node_h(h) # batch normalization
h = F.relu(h) # non-linear activation
if self.residual:
h = h_in + h # residual connection
h = F.dropout(h, self.dropout, training=self.training)
return h, e
def __repr__(self):
return '{}(in_channels={}, out_channels={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels)
##############################################################
class GatedGCNLayerIsotropic(nn.Module):
"""
Param: []
"""
def __init__(self, input_dim, output_dim, dropout, batch_norm, residual=False):
super().__init__()
self.in_channels = input_dim
self.out_channels = output_dim
self.dropout = dropout
self.batch_norm = batch_norm
self.residual = residual
if input_dim != output_dim:
self.residual = False
self.A = nn.Linear(input_dim, output_dim, bias=True)
self.B = nn.Linear(input_dim, output_dim, bias=True)
self.bn_node_h = nn.BatchNorm1d(output_dim)
def message_func(self, edges):
Bh_j = edges.src['Bh']
return {'Bh_j' : Bh_j}
def reduce_func(self, nodes):
Ah_i = nodes.data['Ah']
Bh_j = nodes.mailbox['Bh_j']
h = Ah_i + torch.sum( Bh_j, dim=1 ) # hi = Ahi + sum_j Bhj
return {'h' : h}
def forward(self, g, h, e):
h_in = h # for residual connection
g.ndata['h'] = h
g.ndata['Ah'] = self.A(h)
g.ndata['Bh'] = self.B(h)
g.update_all(self.message_func,self.reduce_func)
h = g.ndata['h'] # result of graph convolution
if self.batch_norm:
h = self.bn_node_h(h) # batch normalization
h = F.relu(h) # non-linear activation
if self.residual:
h = h_in + h # residual connection
h = F.dropout(h, self.dropout, training=self.training)
return h, e
def __repr__(self):
return '{}(in_channels={}, out_channels={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels)
| 10,484 | 35.40625 | 170 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/layers/gat_layer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from dgl.nn.pytorch import GATConv
"""
GAT: Graph Attention Network
Graph Attention Networks (Veličković et al., ICLR 2018)
https://arxiv.org/abs/1710.10903
"""
class GATLayer(nn.Module):
"""
Parameters
----------
in_dim :
Number of input features.
out_dim :
Number of output features.
num_heads : int
Number of heads in Multi-Head Attention.
dropout :
Required for dropout of attn and feat in GATConv
batch_norm :
boolean flag for batch_norm layer.
residual :
If True, use residual connection inside this layer. Default: ``False``.
activation : callable activation function/layer or None, optional.
If not None, applies an activation function to the updated node features.
Using dgl builtin GATConv by default:
https://github.com/graphdeeplearning/benchmarking-gnns/commit/206e888ecc0f8d941c54e061d5dffcc7ae2142fc
"""
def __init__(self, in_dim, out_dim, num_heads, dropout, batch_norm, residual=False, activation=F.elu):
super().__init__()
self.residual = residual
self.activation = activation
self.batch_norm = batch_norm
if in_dim != (out_dim*num_heads):
self.residual = False
self.gatconv = GATConv(in_dim, out_dim, num_heads, dropout, dropout)
if self.batch_norm:
self.batchnorm_h = nn.BatchNorm1d(out_dim * num_heads)
def forward(self, g, h):
h_in = h # for residual connection
h = self.gatconv(g, h).flatten(1)
if self.batch_norm:
h = self.batchnorm_h(h)
if self.activation:
h = self.activation(h)
if self.residual:
h = h_in + h # residual connection
return h
##############################################################
#
# Additional layers for edge feature/representation analysis
#
##############################################################
class CustomGATHeadLayer(nn.Module):
def __init__(self, in_dim, out_dim, dropout, batch_norm):
super().__init__()
self.dropout = dropout
self.batch_norm = batch_norm
self.fc = nn.Linear(in_dim, out_dim, bias=False)
self.attn_fc = nn.Linear(2 * out_dim, 1, bias=False)
self.batchnorm_h = nn.BatchNorm1d(out_dim)
def edge_attention(self, edges):
z2 = torch.cat([edges.src['z'], edges.dst['z']], dim=1)
a = self.attn_fc(z2)
return {'e': F.leaky_relu(a)}
def message_func(self, edges):
return {'z': edges.src['z'], 'e': edges.data['e']}
def reduce_func(self, nodes):
alpha = F.softmax(nodes.mailbox['e'], dim=1)
alpha = F.dropout(alpha, self.dropout, training=self.training)
h = torch.sum(alpha * nodes.mailbox['z'], dim=1)
return {'h': h}
def forward(self, g, h):
z = self.fc(h)
g.ndata['z'] = z
g.apply_edges(self.edge_attention)
g.update_all(self.message_func, self.reduce_func)
h = g.ndata['h']
if self.batch_norm:
h = self.batchnorm_h(h)
h = F.elu(h)
h = F.dropout(h, self.dropout, training=self.training)
return h
class CustomGATLayer(nn.Module):
"""
Param: [in_dim, out_dim, n_heads]
"""
def __init__(self, in_dim, out_dim, num_heads, dropout, batch_norm, residual=True):
super().__init__()
self.in_channels = in_dim
self.out_channels = out_dim
self.num_heads = num_heads
self.residual = residual
if in_dim != (out_dim*num_heads):
self.residual = False
self.heads = nn.ModuleList()
for i in range(num_heads):
self.heads.append(CustomGATHeadLayer(in_dim, out_dim, dropout, batch_norm))
self.merge = 'cat'
def forward(self, g, h, e):
h_in = h # for residual connection
head_outs = [attn_head(g, h) for attn_head in self.heads]
if self.merge == 'cat':
h = torch.cat(head_outs, dim=1)
else:
h = torch.mean(torch.stack(head_outs))
if self.residual:
h = h_in + h # residual connection
return h, e
def __repr__(self):
return '{}(in_channels={}, out_channels={}, heads={}, residual={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels, self.num_heads, self.residual)
##############################################################
class CustomGATHeadLayerEdgeReprFeat(nn.Module):
def __init__(self, in_dim, out_dim, dropout, batch_norm):
super().__init__()
self.dropout = dropout
self.batch_norm = batch_norm
self.fc_h = nn.Linear(in_dim, out_dim, bias=False)
self.fc_e = nn.Linear(in_dim, out_dim, bias=False)
self.fc_proj = nn.Linear(3* out_dim, out_dim)
self.attn_fc = nn.Linear(3* out_dim, 1, bias=False)
self.batchnorm_h = nn.BatchNorm1d(out_dim)
self.batchnorm_e = nn.BatchNorm1d(out_dim)
def edge_attention(self, edges):
z = torch.cat([edges.data['z_e'], edges.src['z_h'], edges.dst['z_h']], dim=1)
e_proj = self.fc_proj(z)
attn = F.leaky_relu(self.attn_fc(z))
return {'attn': attn, 'e_proj': e_proj}
def message_func(self, edges):
return {'z': edges.src['z_h'], 'attn': edges.data['attn']}
def reduce_func(self, nodes):
alpha = F.softmax(nodes.mailbox['attn'], dim=1)
h = torch.sum(alpha * nodes.mailbox['z'], dim=1)
return {'h': h}
def forward(self, g, h, e):
z_h = self.fc_h(h)
z_e = self.fc_e(e)
g.ndata['z_h'] = z_h
g.edata['z_e'] = z_e
g.apply_edges(self.edge_attention)
g.update_all(self.message_func, self.reduce_func)
h = g.ndata['h']
e = g.edata['e_proj']
if self.batch_norm:
h = self.batchnorm_h(h)
e = self.batchnorm_e(e)
h = F.elu(h)
e = F.elu(e)
h = F.dropout(h, self.dropout, training=self.training)
e = F.dropout(e, self.dropout, training=self.training)
return h, e
class CustomGATLayerEdgeReprFeat(nn.Module):
"""
Param: [in_dim, out_dim, n_heads]
"""
def __init__(self, in_dim, out_dim, num_heads, dropout, batch_norm, residual=True):
super().__init__()
self.in_channels = in_dim
self.out_channels = out_dim
self.num_heads = num_heads
self.residual = residual
if in_dim != (out_dim*num_heads):
self.residual = False
self.heads = nn.ModuleList()
for i in range(num_heads):
self.heads.append(CustomGATHeadLayerEdgeReprFeat(in_dim, out_dim, dropout, batch_norm))
self.merge = 'cat'
def forward(self, g, h, e):
h_in = h # for residual connection
e_in = e
head_outs_h = []
head_outs_e = []
for attn_head in self.heads:
h_temp, e_temp = attn_head(g, h, e)
head_outs_h.append(h_temp)
head_outs_e.append(e_temp)
if self.merge == 'cat':
h = torch.cat(head_outs_h, dim=1)
e = torch.cat(head_outs_e, dim=1)
else:
raise NotImplementedError
if self.residual:
h = h_in + h # residual connection
e = e_in + e
return h, e
def __repr__(self):
return '{}(in_channels={}, out_channels={}, heads={}, residual={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels, self.num_heads, self.residual)
##############################################################
class CustomGATHeadLayerIsotropic(nn.Module):
def __init__(self, in_dim, out_dim, dropout, batch_norm):
super().__init__()
self.dropout = dropout
self.batch_norm = batch_norm
self.fc = nn.Linear(in_dim, out_dim, bias=False)
self.batchnorm_h = nn.BatchNorm1d(out_dim)
def message_func(self, edges):
return {'z': edges.src['z']}
def reduce_func(self, nodes):
h = torch.sum(nodes.mailbox['z'], dim=1)
return {'h': h}
def forward(self, g, h):
z = self.fc(h)
g.ndata['z'] = z
g.update_all(self.message_func, self.reduce_func)
h = g.ndata['h']
if self.batch_norm:
h = self.batchnorm_h(h)
h = F.elu(h)
h = F.dropout(h, self.dropout, training=self.training)
return h
class CustomGATLayerIsotropic(nn.Module):
"""
Param: [in_dim, out_dim, n_heads]
"""
def __init__(self, in_dim, out_dim, num_heads, dropout, batch_norm, residual=True):
super().__init__()
self.in_channels = in_dim
self.out_channels = out_dim
self.num_heads = num_heads
self.residual = residual
if in_dim != (out_dim*num_heads):
self.residual = False
self.heads = nn.ModuleList()
for i in range(num_heads):
self.heads.append(CustomGATHeadLayerIsotropic(in_dim, out_dim, dropout, batch_norm))
self.merge = 'cat'
def forward(self, g, h, e):
h_in = h # for residual connection
head_outs = [attn_head(g, h) for attn_head in self.heads]
if self.merge == 'cat':
h = torch.cat(head_outs, dim=1)
else:
h = torch.mean(torch.stack(head_outs))
if self.residual:
h = h_in + h # residual connection
return h, e
def __repr__(self):
return '{}(in_channels={}, out_channels={}, heads={}, residual={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels, self.num_heads, self.residual)
| 10,303 | 29.850299 | 107 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/layers/gin_layer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl.function as fn
"""
GIN: Graph Isomorphism Networks
HOW POWERFUL ARE GRAPH NEURAL NETWORKS? (Keyulu Xu, Weihua Hu, Jure Leskovec and Stefanie Jegelka, ICLR 2019)
https://arxiv.org/pdf/1810.00826.pdf
"""
class GINLayer(nn.Module):
"""
[!] code adapted from dgl implementation of GINConv
Parameters
----------
apply_func : callable activation function/layer or None
If not None, apply this function to the updated node feature,
the :math:`f_\Theta` in the formula.
aggr_type :
Aggregator type to use (``sum``, ``max`` or ``mean``).
out_dim :
Rquired for batch norm layer; should match out_dim of apply_func if not None.
dropout :
Required for dropout of output features.
batch_norm :
boolean flag for batch_norm layer.
residual :
boolean flag for using residual connection.
init_eps : optional
Initial :math:`\epsilon` value, default: ``0``.
learn_eps : bool, optional
If True, :math:`\epsilon` will be a learnable parameter.
"""
def __init__(self, apply_func, aggr_type, dropout, batch_norm, residual=False, init_eps=0, learn_eps=False):
super().__init__()
self.apply_func = apply_func
if aggr_type == 'sum':
self._reducer = fn.sum
elif aggr_type == 'max':
self._reducer = fn.max
elif aggr_type == 'mean':
self._reducer = fn.mean
else:
raise KeyError('Aggregator type {} not recognized.'.format(aggr_type))
self.batch_norm = batch_norm
self.residual = residual
self.dropout = dropout
in_dim = apply_func.mlp.input_dim
out_dim = apply_func.mlp.output_dim
if in_dim != out_dim:
self.residual = False
# to specify whether eps is trainable or not.
if learn_eps:
self.eps = torch.nn.Parameter(torch.FloatTensor([init_eps]))
else:
self.register_buffer('eps', torch.FloatTensor([init_eps]))
self.bn_node_h = nn.BatchNorm1d(out_dim)
def forward(self, g, h):
h_in = h # for residual connection
g = g.local_var()
g.ndata['h'] = h
g.update_all(fn.copy_u('h', 'm'), self._reducer('m', 'neigh'))
h = (1 + self.eps) * h + g.ndata['neigh']
if self.apply_func is not None:
h = self.apply_func(h)
if self.batch_norm:
h = self.bn_node_h(h) # batch normalization
h = F.relu(h) # non-linear activation
if self.residual:
h = h_in + h # residual connection
h = F.dropout(h, self.dropout, training=self.training)
return h
class ApplyNodeFunc(nn.Module):
"""
This class is used in class GINNet
Update the node feature hv with MLP
"""
def __init__(self, mlp):
super().__init__()
self.mlp = mlp
def forward(self, h):
h = self.mlp(h)
return h
class MLP(nn.Module):
"""MLP with linear output"""
def __init__(self, num_layers, input_dim, hidden_dim, output_dim):
super().__init__()
self.linear_or_not = True # default is linear model
self.num_layers = num_layers
self.output_dim = output_dim
self.input_dim = input_dim
if num_layers < 1:
raise ValueError("number of layers should be positive!")
elif num_layers == 1:
# Linear model
self.linear = nn.Linear(input_dim, output_dim)
else:
# Multi-layer model
self.linear_or_not = False
self.linears = torch.nn.ModuleList()
self.batch_norms = torch.nn.ModuleList()
self.linears.append(nn.Linear(input_dim, hidden_dim))
for layer in range(num_layers - 2):
self.linears.append(nn.Linear(hidden_dim, hidden_dim))
self.linears.append(nn.Linear(hidden_dim, output_dim))
for layer in range(num_layers - 1):
self.batch_norms.append(nn.BatchNorm1d((hidden_dim)))
def forward(self, x):
if self.linear_or_not:
# If linear model
return self.linear(x)
else:
# If MLP
h = x
for i in range(self.num_layers - 1):
h = F.relu(self.batch_norms[i](self.linears[i](h)))
return self.linears[-1](h)
| 4,598 | 30.9375 | 113 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/layers/gmm_layer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
import dgl.function as fn
"""
GMM: Gaussian Mixture Model Convolution layer
Geometric Deep Learning on Graphs and Manifolds using Mixture Model CNNs (Federico Monti et al., CVPR 2017)
https://arxiv.org/pdf/1611.08402.pdf
"""
class GMMLayer(nn.Module):
"""
[!] code adapted from dgl implementation of GMMConv
Parameters
----------
in_dim :
Number of input features.
out_dim :
Number of output features.
dim :
Dimensionality of pseudo-coordinte.
kernel :
Number of kernels :math:`K`.
aggr_type :
Aggregator type (``sum``, ``mean``, ``max``).
dropout :
Required for dropout of output features.
batch_norm :
boolean flag for batch_norm layer.
residual :
If True, use residual connection inside this layer. Default: ``False``.
bias :
If True, adds a learnable bias to the output. Default: ``True``.
"""
def __init__(self, in_dim, out_dim, dim, kernel, aggr_type, dropout,
batch_norm, residual=False, bias=True):
super().__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.dim = dim
self.kernel = kernel
self.batch_norm = batch_norm
self.residual = residual
self.dropout = dropout
if aggr_type == 'sum':
self._reducer = fn.sum
elif aggr_type == 'mean':
self._reducer = fn.mean
elif aggr_type == 'max':
self._reducer = fn.max
else:
raise KeyError("Aggregator type {} not recognized.".format(aggr_type))
self.mu = nn.Parameter(torch.Tensor(kernel, dim))
self.inv_sigma = nn.Parameter(torch.Tensor(kernel, dim))
self.fc = nn.Linear(in_dim, kernel * out_dim, bias=False)
self.bn_node_h = nn.BatchNorm1d(out_dim)
if in_dim != out_dim:
self.residual = False
if bias:
self.bias = nn.Parameter(torch.Tensor(out_dim))
else:
self.register_buffer('bias', None)
self.reset_parameters()
def reset_parameters(self):
"""Reinitialize learnable parameters."""
gain = init.calculate_gain('relu')
init.xavier_normal_(self.fc.weight, gain=gain)
init.normal_(self.mu.data, 0, 0.1)
init.constant_(self.inv_sigma.data, 1)
if self.bias is not None:
init.zeros_(self.bias.data)
def forward(self, g, h, pseudo):
h_in = h # for residual connection
g = g.local_var()
g.ndata['h'] = self.fc(h).view(-1, self.kernel, self.out_dim)
E = g.number_of_edges()
# compute gaussian weight
gaussian = -0.5 * ((pseudo.view(E, 1, self.dim) -
self.mu.view(1, self.kernel, self.dim)) ** 2)
gaussian = gaussian * (self.inv_sigma.view(1, self.kernel, self.dim) ** 2)
gaussian = torch.exp(gaussian.sum(dim=-1, keepdim=True)) # (E, K, 1)
g.edata['w'] = gaussian
g.update_all(fn.u_mul_e('h', 'w', 'm'), self._reducer('m', 'h'))
h = g.ndata['h'].sum(1)
if self.batch_norm:
h = self.bn_node_h(h) # batch normalization
h = F.relu(h) # non-linear activation
if self.residual:
h = h_in + h # residual connection
if self.bias is not None:
h = h + self.bias
h = F.dropout(h, self.dropout, training=self.training)
return h
| 3,680 | 31.289474 | 111 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/layers/gcn_layer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl.function as fn
from dgl.nn.pytorch import GraphConv
"""
GCN: Graph Convolutional Networks
Thomas N. Kipf, Max Welling, Semi-Supervised Classification with Graph Convolutional Networks (ICLR 2017)
http://arxiv.org/abs/1609.02907
"""
# Sends a message of node feature h
# Equivalent to => return {'m': edges.src['h']}
msg = fn.copy_src(src='h', out='m')
def reduce(nodes):
accum = torch.mean(nodes.mailbox['m'], 1)
return {'h': accum}
class NodeApplyModule(nn.Module):
# Update node feature h_v with (Wh_v+b)
def __init__(self, in_dim, out_dim):
super().__init__()
self.linear = nn.Linear(in_dim, out_dim)
def forward(self, node):
h = self.linear(node.data['h'])
return {'h': h}
class GCNLayer(nn.Module):
"""
Param: [in_dim, out_dim]
"""
def __init__(self, in_dim, out_dim, activation, dropout, batch_norm, residual=False, dgl_builtin=False):
super().__init__()
self.in_channels = in_dim
self.out_channels = out_dim
self.batch_norm = batch_norm
self.residual = residual
self.dgl_builtin = dgl_builtin
if in_dim != out_dim:
self.residual = False
self.batchnorm_h = nn.BatchNorm1d(out_dim)
self.activation = activation
self.dropout = nn.Dropout(dropout)
if self.dgl_builtin == False:
self.apply_mod = NodeApplyModule(in_dim, out_dim)
else:
self.conv = GraphConv(in_dim, out_dim)
def forward(self, g, feature):
h_in = feature # to be used for residual connection
if self.dgl_builtin == False:
g.ndata['h'] = feature
g.update_all(msg, reduce)
g.apply_nodes(func=self.apply_mod)
h = g.ndata['h'] # result of graph convolution
else:
h = self.conv(g, feature)
if self.batch_norm:
h = self.batchnorm_h(h) # batch normalization
if self.activation:
h = self.activation(h)
if self.residual:
h = h_in + h # residual connection
h = self.dropout(h)
return h
def __repr__(self):
return '{}(in_channels={}, out_channels={}, residual={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels, self.residual)
| 2,561 | 29.86747 | 109 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/layers/ring_gnn_equiv_layer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
"""
Ring-GNN equi 2 to 2 layer file
On the equivalence between graph isomorphism testing and function approximation with GNNs (Chen et al, 2019)
https://arxiv.org/pdf/1905.12560v1.pdf
CODE ADPATED FROM https://github.com/leichen2018/Ring-GNN/
"""
class RingGNNEquivLayer(nn.Module):
def __init__(self, device, input_dim, output_dim, layer_norm, residual, dropout,
normalization='inf', normalization_val=1.0, radius=2, k2_init = 0.1):
super().__init__()
self.device = device
basis_dimension = 15
self.radius = radius
self.layer_norm = layer_norm
self.residual = residual
self.dropout = dropout
coeffs_values = lambda i, j, k: torch.randn([i, j, k]) * torch.sqrt(2. / (i + j).float())
self.diag_bias_list = nn.ParameterList([])
for i in range(radius):
for j in range(i+1):
self.diag_bias_list.append(nn.Parameter(torch.zeros(1, output_dim, 1, 1)))
self.all_bias = nn.Parameter(torch.zeros(1, output_dim, 1, 1))
self.coeffs_list = nn.ParameterList([])
for i in range(radius):
for j in range(i+1):
self.coeffs_list.append(nn.Parameter(coeffs_values(input_dim, output_dim, basis_dimension)))
self.switch = nn.ParameterList([nn.Parameter(torch.FloatTensor([1])), nn.Parameter(torch.FloatTensor([k2_init]))])
self.output_dim = output_dim
self.normalization = normalization
self.normalization_val = normalization_val
if self.layer_norm:
self.ln_x = LayerNorm(output_dim.item())
if self.residual:
self.res_x = nn.Linear(input_dim.item(), output_dim.item())
def forward(self, inputs):
m = inputs.size()[3]
ops_out = ops_2_to_2(inputs, m, normalization=self.normalization)
ops_out = torch.stack(ops_out, dim = 2)
output_list = []
for i in range(self.radius):
for j in range(i+1):
output_i = torch.einsum('dsb,ndbij->nsij', self.coeffs_list[i*(i+1)//2 + j], ops_out)
mat_diag_bias = torch.eye(inputs.size()[3]).unsqueeze(0).unsqueeze(0).to(self.device) * self.diag_bias_list[i*(i+1)//2 + j]
# mat_diag_bias = torch.eye(inputs.size()[3]).to('cuda:0').unsqueeze(0).unsqueeze(0) * self.diag_bias_list[i*(i+1)//2 + j]
if j == 0:
output = output_i + mat_diag_bias
else:
output = torch.einsum('abcd,abde->abce', output_i, output)
output_list.append(output)
output = 0
for i in range(self.radius):
output += output_list[i] * self.switch[i]
output = output + self.all_bias
if self.layer_norm:
# Now, changing shapes from [1xdxnxn] to [nxnxd] for BN
output = output.permute(3,2,1,0).squeeze()
# output = self.bn_x(output.reshape(m*m, self.output_dim.item())) # batch normalization
output = self.ln_x(output) # layer normalization
# Returning output back to original shape
output = output.reshape(m, m, self.output_dim.item())
output = output.permute(2,1,0).unsqueeze(0)
output = F.relu(output) # non-linear activation
if self.residual:
# Now, changing shapes from [1xdxnxn] to [nxnxd] for Linear() layer
inputs, output = inputs.permute(3,2,1,0).squeeze(), output.permute(3,2,1,0).squeeze()
residual_ = self.res_x(inputs)
output = residual_ + output # residual connection
# Returning output back to original shape
output = output.permute(2,1,0).unsqueeze(0)
output = F.dropout(output, self.dropout, training=self.training)
return output
def ops_2_to_2(inputs, dim, normalization='inf', normalization_val=1.0): # N x D x m x m
# input: N x D x m x m
diag_part = torch.diagonal(inputs, dim1 = 2, dim2 = 3) # N x D x m
sum_diag_part = torch.sum(diag_part, dim=2, keepdim = True) # N x D x 1
sum_of_rows = torch.sum(inputs, dim=3) # N x D x m
sum_of_cols = torch.sum(inputs, dim=2) # N x D x m
sum_all = torch.sum(sum_of_rows, dim=2) # N x D
# op1 - (1234) - extract diag
op1 = torch.diag_embed(diag_part) # N x D x m x m
# op2 - (1234) + (12)(34) - place sum of diag on diag
op2 = torch.diag_embed(sum_diag_part.repeat(1, 1, dim))
# op3 - (1234) + (123)(4) - place sum of row i on diag ii
op3 = torch.diag_embed(sum_of_rows)
# op4 - (1234) + (124)(3) - place sum of col i on diag ii
op4 = torch.diag_embed(sum_of_cols)
# op5 - (1234) + (124)(3) + (123)(4) + (12)(34) + (12)(3)(4) - place sum of all entries on diag
op5 = torch.diag_embed(sum_all.unsqueeze(2).repeat(1, 1, dim))
# op6 - (14)(23) + (13)(24) + (24)(1)(3) + (124)(3) + (1234) - place sum of col i on row i
op6 = sum_of_cols.unsqueeze(3).repeat(1, 1, 1, dim)
# op7 - (14)(23) + (23)(1)(4) + (234)(1) + (123)(4) + (1234) - place sum of row i on row i
op7 = sum_of_rows.unsqueeze(3).repeat(1, 1, 1, dim)
# op8 - (14)(2)(3) + (134)(2) + (14)(23) + (124)(3) + (1234) - place sum of col i on col i
op8 = sum_of_cols.unsqueeze(2).repeat(1, 1, dim, 1)
# op9 - (13)(24) + (13)(2)(4) + (134)(2) + (123)(4) + (1234) - place sum of row i on col i
op9 = sum_of_rows.unsqueeze(2).repeat(1, 1, dim, 1)
# op10 - (1234) + (14)(23) - identity
op10 = inputs
# op11 - (1234) + (13)(24) - transpose
op11 = torch.transpose(inputs, -2, -1)
# op12 - (1234) + (234)(1) - place ii element in row i
op12 = diag_part.unsqueeze(3).repeat(1, 1, 1, dim)
# op13 - (1234) + (134)(2) - place ii element in col i
op13 = diag_part.unsqueeze(2).repeat(1, 1, dim, 1)
# op14 - (34)(1)(2) + (234)(1) + (134)(2) + (1234) + (12)(34) - place sum of diag in all entries
op14 = sum_diag_part.unsqueeze(3).repeat(1, 1, dim, dim)
# op15 - sum of all ops - place sum of all entries in all entries
op15 = sum_all.unsqueeze(2).unsqueeze(3).repeat(1, 1, dim, dim)
#A_2 = torch.einsum('abcd,abde->abce', inputs, inputs)
#A_4 = torch.einsum('abcd,abde->abce', A_2, A_2)
#op16 = torch.where(A_4>1, torch.ones(A_4.size()), A_4)
if normalization is not None:
float_dim = float(dim)
if normalization is 'inf':
op2 = torch.div(op2, float_dim)
op3 = torch.div(op3, float_dim)
op4 = torch.div(op4, float_dim)
op5 = torch.div(op5, float_dim**2)
op6 = torch.div(op6, float_dim)
op7 = torch.div(op7, float_dim)
op8 = torch.div(op8, float_dim)
op9 = torch.div(op9, float_dim)
op14 = torch.div(op14, float_dim)
op15 = torch.div(op15, float_dim**2)
#return [op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16]
'''
l = [op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15]
for i, ls in enumerate(l):
print(i+1)
print(torch.sum(ls))
print("$%^&*(*&^%$#$%^&*(*&^%$%^&*(*&^%$%^&*(")
'''
return [op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15]
class LayerNorm(nn.Module):
def __init__(self, d):
super().__init__()
self.a = nn.Parameter(torch.ones(d).unsqueeze(0).unsqueeze(0)) # shape is 1 x 1 x d
self.b = nn.Parameter(torch.zeros(d).unsqueeze(0).unsqueeze(0)) # shape is 1 x 1 x d
def forward(self, x):
# x tensor of the shape n x n x d
mean = x.mean(dim=(0,1), keepdim=True)
var = x.var(dim=(0,1), keepdim=True, unbiased=False)
x = self.a * (x - mean) / torch.sqrt(var + 1e-6) + self.b # shape is n x n x d
return x
| 8,076 | 39.385 | 139 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/layers/three_wl_gnn_layers.py | import torch
import torch.nn as nn
import torch.nn.functional as F
"""
Layers used for
3WLGNN
Provably Powerful Graph Networks (Maron et al., 2019)
https://papers.nips.cc/paper/8488-provably-powerful-graph-networks.pdf
CODE adapted from https://github.com/hadarser/ProvablyPowerfulGraphNetworks_torch/
"""
class RegularBlock(nn.Module):
"""
Imputs: N x input_depth x m x m
Take the input through 2 parallel MLP routes, multiply the result, and add a skip-connection at the end.
At the skip-connection, reduce the dimension back to output_depth
"""
def __init__(self, depth_of_mlp, in_features, out_features, residual=False):
super().__init__()
self.residual = residual
self.mlp1 = MlpBlock(in_features, out_features, depth_of_mlp)
self.mlp2 = MlpBlock(in_features, out_features, depth_of_mlp)
self.skip = SkipConnection(in_features+out_features, out_features)
if self.residual:
self.res_x = nn.Linear(in_features, out_features)
def forward(self, inputs):
mlp1 = self.mlp1(inputs)
mlp2 = self.mlp2(inputs)
mult = torch.matmul(mlp1, mlp2)
out = self.skip(in1=inputs, in2=mult)
if self.residual:
# Now, changing shapes from [1xdxnxn] to [nxnxd] for Linear() layer
inputs, out = inputs.permute(3,2,1,0).squeeze(), out.permute(3,2,1,0).squeeze()
residual_ = self.res_x(inputs)
out = residual_ + out # residual connection
# Returning output back to original shape
out = out.permute(2,1,0).unsqueeze(0)
return out
class MlpBlock(nn.Module):
"""
Block of MLP layers with activation function after each (1x1 conv layers).
"""
def __init__(self, in_features, out_features, depth_of_mlp, activation_fn=nn.functional.relu):
super().__init__()
self.activation = activation_fn
self.convs = nn.ModuleList()
for i in range(depth_of_mlp):
self.convs.append(nn.Conv2d(in_features, out_features, kernel_size=1, padding=0, bias=True))
_init_weights(self.convs[-1])
in_features = out_features
def forward(self, inputs):
out = inputs
for conv_layer in self.convs:
out = self.activation(conv_layer(out))
return out
class SkipConnection(nn.Module):
"""
Connects the two given inputs with concatenation
:param in1: earlier input tensor of shape N x d1 x m x m
:param in2: later input tensor of shape N x d2 x m x m
:param in_features: d1+d2
:param out_features: output num of features
:return: Tensor of shape N x output_depth x m x m
"""
def __init__(self, in_features, out_features):
super().__init__()
self.conv = nn.Conv2d(in_features, out_features, kernel_size=1, padding=0, bias=True)
_init_weights(self.conv)
def forward(self, in1, in2):
# in1: N x d1 x m x m
# in2: N x d2 x m x m
out = torch.cat((in1, in2), dim=1)
out = self.conv(out)
return out
class FullyConnected(nn.Module):
def __init__(self, in_features, out_features, activation_fn=nn.functional.relu):
super().__init__()
self.fc = nn.Linear(in_features, out_features)
_init_weights(self.fc)
self.activation = activation_fn
def forward(self, input):
out = self.fc(input)
if self.activation is not None:
out = self.activation(out)
return out
def diag_offdiag_maxpool(input):
N = input.shape[-1]
max_diag = torch.max(torch.diagonal(input, dim1=-2, dim2=-1), dim=2)[0] # BxS
# with torch.no_grad():
max_val = torch.max(max_diag)
min_val = torch.max(-1 * input)
val = torch.abs(torch.add(max_val, min_val))
min_mat = torch.mul(val, torch.eye(N, device=input.device)).view(1, 1, N, N)
max_offdiag = torch.max(torch.max(input - min_mat, dim=3)[0], dim=2)[0] # BxS
return torch.cat((max_diag, max_offdiag), dim=1) # output Bx2S
def _init_weights(layer):
"""
Init weights of the layer
:param layer:
:return:
"""
nn.init.xavier_uniform_(layer.weight)
# nn.init.xavier_normal_(layer.weight)
if layer.bias is not None:
nn.init.zeros_(layer.bias)
class LayerNorm(nn.Module):
def __init__(self, d):
super().__init__()
self.a = nn.Parameter(torch.ones(d).unsqueeze(0).unsqueeze(0)) # shape is 1 x 1 x d
self.b = nn.Parameter(torch.zeros(d).unsqueeze(0).unsqueeze(0)) # shape is 1 x 1 x d
def forward(self, x):
# x tensor of the shape n x n x d
mean = x.mean(dim=(0,1), keepdim=True)
var = x.var(dim=(0,1), keepdim=True, unbiased=False)
x = self.a * (x - mean) / torch.sqrt(var + 1e-6) + self.b # shape is n x n x d
return x
| 4,983 | 31.154839 | 108 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/train/train_Planetoid_node_classification.py | """
Utility functions for training one epoch
and evaluating one epoch
"""
import torch
import torch.nn as nn
import math
from train.metrics import accuracy_TU as accuracy
"""
For GCNs
"""
def train_epoch_sparse(model, optimizer, device, dataset, train_idx):
model.train()
epoch_loss = 0
epoch_train_acc = 0
nb_data = 0
gpu_mem = 0
# for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
batch_x = dataset.dataset[0].x.to(device)
batch_e = dataset.edge_attr.to(device)
batch_labels = dataset.dataset[0].y.long().to(device)
edge_index = dataset.dataset[0].edge_index.long().to(device)
train_idx = train_idx.to(device)
optimizer.zero_grad()
batch_scores = model.forward(batch_x, edge_index, batch_e)[train_idx]
loss = model.loss(batch_scores, batch_labels[train_idx]).to(torch.float)
loss.backward()
optimizer.step()
epoch_loss = loss.detach().item()
epoch_train_acc = accuracy(batch_scores, batch_labels[train_idx]) / train_idx.size(0)
return epoch_loss, epoch_train_acc, optimizer
def evaluate_network_sparse(model, device, dataset, val_idx):
model.eval()
epoch_test_loss = 0
epoch_test_acc = 0
nb_data = 0
with torch.no_grad():
batch_x = dataset.dataset[0].x.to(device)
batch_e = dataset.edge_attr.to(device)
batch_labels = dataset.dataset[0].y.long().to(device)
edge_index = dataset.dataset[0].edge_index.long().to(device)
val_idx = val_idx.to(device)
batch_scores = model.forward(batch_x, edge_index, batch_e)[val_idx]
loss = model.loss(batch_scores, batch_labels[val_idx]).to(torch.float)
epoch_test_loss = loss.detach().item()
epoch_test_acc = accuracy(batch_scores, batch_labels[val_idx]) / val_idx.size(0)
return epoch_test_loss, epoch_test_acc
# """
# For WL-GNNs
# """
# def train_epoch_dense(model, optimizer, device, data_loader, epoch, batch_size):
# model.train()
# epoch_loss = 0
# epoch_train_acc = 0
# nb_data = 0
# gpu_mem = 0
# optimizer.zero_grad()
# for iter, (x_with_node_feat, labels) in enumerate(data_loader):
# x_with_node_feat = x_with_node_feat.to(device)
# labels = labels.to(device)
#
# scores = model.forward(x_with_node_feat)
# loss = model.loss(scores, labels)
# loss.backward()
#
# if not (iter%batch_size):
# optimizer.step()
# optimizer.zero_grad()
#
# epoch_loss += loss.detach().item()
# epoch_train_acc += accuracy(scores, labels)
# nb_data += labels.size(0)
# epoch_loss /= (iter + 1)
# epoch_train_acc /= nb_data
#
# return epoch_loss, epoch_train_acc, optimizer
#
# def evaluate_network_dense(model, device, data_loader, epoch):
# model.eval()
# epoch_test_loss = 0
# epoch_test_acc = 0
# nb_data = 0
# with torch.no_grad():
# for iter, (x_with_node_feat, labels) in enumerate(data_loader):
# x_with_node_feat = x_with_node_feat.to(device)
# labels = labels.to(device)
#
# scores = model.forward(x_with_node_feat)
# loss = model.loss(scores, labels)
# epoch_test_loss += loss.detach().item()
# epoch_test_acc += accuracy(scores, labels)
# nb_data += labels.size(0)
# epoch_test_loss /= (iter + 1)
# epoch_test_acc /= nb_data
#
# return epoch_test_loss, epoch_test_acc
def check_patience(all_losses, best_loss, best_epoch, curr_loss, curr_epoch, counter):
if curr_loss < best_loss:
counter = 0
best_loss = curr_loss
best_epoch = curr_epoch
else:
counter += 1
return best_loss, best_epoch, counter | 3,774 | 30.991525 | 89 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/train/metrics.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
import numpy as np
def MAE(scores, targets):
MAE = F.l1_loss(scores, targets)
MAE = MAE.detach().item()
return MAE
# it is the original one to calculate the value, have found the ogb evaluator use the same way. There we use this one to calculate.
def accuracy_TU(scores, targets):
scores = scores.detach().argmax(dim=1)
acc = (scores==targets).float().sum().item()
return acc
def accuracy_MNIST_CIFAR(scores, targets):
scores = scores.detach().argmax(dim=1)
acc = (scores==targets).float().sum().item()
return acc
def accuracy_CITATION_GRAPH(scores, targets):
scores = scores.detach().argmax(dim=1)
acc = (scores==targets).float().sum().item()
acc = acc / len(targets)
return acc
# it takes into account the case of each class, is the sum of the accuracy of each class(the right divide the total in each class) then
# divided by the total number of classes
def accuracy_SBM(scores, targets):
S = targets.cpu().numpy()
C = np.argmax( torch.nn.Softmax(dim=1)(scores).cpu().detach().numpy() , axis=1 )
CM = confusion_matrix(S,C).astype(np.float32)
nb_classes = CM.shape[0]
targets = targets.cpu().detach().numpy()
nb_non_empty_classes = 0
pr_classes = np.zeros(nb_classes)
for r in range(nb_classes):
cluster = np.where(targets==r)[0]
if cluster.shape[0] != 0:
pr_classes[r] = CM[r,r]/ float(cluster.shape[0])
if CM[r,r]>0:
nb_non_empty_classes += 1
else:
pr_classes[r] = 0.0
acc = 100.* np.sum(pr_classes)/ float(nb_classes)
return acc
def accuracy_ogb(y_pred, y_true):
acc_list = []
# y_true = data.y
# y_pred = y_pred.argmax(dim=-1, keepdim=True)
# for i in range(y_true.shape[0]):
# is_labeled = y_true[:, i] == y_true[:, i]
# correct = y_true[is_labeled, i] == y_pred[is_labeled, i]
# acc_list.append(float(np.sum(correct)) / len(correct))
y_pred = y_pred.detach().argmax(dim=1)
acc = (y_pred == y_true).float().sum().item()
return acc
def binary_f1_score(scores, targets):
"""Computes the F1 score using scikit-learn for binary class labels.
Returns the F1 score for the positive class, i.e. labelled '1'.
"""
y_true = targets.cpu().numpy()
y_pred = scores.argmax(dim=1).cpu().numpy()
return f1_score(y_true, y_pred, average='binary')
def accuracy_VOC(scores, targets):
scores = scores.detach().argmax(dim=1).cpu()
targets = targets.cpu().detach().numpy()
acc = f1_score(scores, targets, average='weighted')
return acc
| 2,754 | 31.797619 | 135 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/train/train_ogb_node_classification.py | """
Utility functions for training one epoch
and evaluating one epoch
"""
import torch
import torch.nn as nn
import math
import dgl
from tqdm import tqdm
from train.metrics import accuracy_SBM as accuracy
from train.metrics import accuracy_ogb
from ogb.nodeproppred import Evaluator
"""
For GCNs
"""
def train_epoch(model, optimizer, device, train_loader, epoch=None):
model.train()
# pbar = tqdm(total=len(train_loader))
# pbar.set_description(f'Training epoch: {epoch:04d}')
total_loss = total_examples = 0
for data in train_loader:
optimizer.zero_grad()
data = data.to(device)
try:
batch_pos_enc = data.pos_enc.to(device)
sign_flip = torch.rand(batch_pos_enc.size(1)).to(device)
sign_flip[sign_flip >= 0.5] = 1.0
sign_flip[sign_flip < 0.5] = -1.0
batch_pos_enc = batch_pos_enc * sign_flip.unsqueeze(0)
batch_scores = model.forward(data.x, data.edge_index, data.edge_attr,batch_pos_enc)
except:
batch_scores = model(data.x, data.edge_index, data.edge_attr)
loss = model.loss(batch_scores[data.train_mask], data.y.view(-1)[data.train_mask]).to(torch.float)
loss.backward()
optimizer.step()
total_loss += float(loss) * int(data.train_mask.sum())
total_examples += int(data.train_mask.sum())
# pbar.update(1)
#
# pbar.close()
return total_loss / total_examples
# model.train()
#
# # for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
# batch_x = dataset.x.to(device)
# batch_e = dataset.edge_attr.to(device)
# # batch_e = dataset.edge_attr
# batch_labels = dataset.y.long().to(device)
# edge_index = dataset.edge_index.long().to(device)
# train_idx = train_idx.to(device)
#
# optimizer.zero_grad()
# batch_scores = model.forward(batch_x, edge_index, batch_e)[train_idx]
# loss = model.loss(batch_scores, batch_labels.view(-1)[train_idx]).to(torch.float)
# loss.backward()
# optimizer.step()
# epoch_loss = loss.detach().item()
#
# return epoch_loss
def train_epoch_arxiv(model, optimizer, device, dataset, train_idx):
model.train()
# for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
batch_x = dataset.x.to(device)
batch_e = dataset.edge_attr.to(device)
# batch_e = dataset.edge_attr
batch_labels = dataset.y.long().to(device)
edge_index = dataset.edge_index.long().to(device)
train_idx = train_idx.to(device)
optimizer.zero_grad()
try:
batch_pos_enc = dataset.pos_enc.to(device)
sign_flip = torch.rand(batch_pos_enc.size(1)).to(device)
sign_flip[sign_flip >= 0.5] = 1.0
sign_flip[sign_flip < 0.5] = -1.0
batch_pos_enc = batch_pos_enc * sign_flip.unsqueeze(0)
batch_scores = model.forward(batch_x, edge_index, batch_e, batch_pos_enc)[train_idx]
except:
batch_scores = model.forward(batch_x, edge_index, batch_e)[train_idx]
loss = model.loss(batch_scores, batch_labels.view(-1)[train_idx]).to(torch.float)
loss.backward()
optimizer.step()
epoch_loss = loss.detach().item()
return epoch_loss
def train_epoch_proteins(model, optimizer, device, train_loader, epoch=None):
model.train()
# pbar = tqdm(total=len(train_loader))
# pbar.set_description(f'Training epoch: {epoch:04d}')
total_loss = total_examples = 0
for data in train_loader:
optimizer.zero_grad()
data = data.to(device)
try:
batch_pos_enc = data.pos_enc.to(device)
sign_flip = torch.rand(batch_pos_enc.size(1)).to(device)
sign_flip[sign_flip >= 0.5] = 1.0
sign_flip[sign_flip < 0.5] = -1.0
batch_pos_enc = batch_pos_enc * sign_flip.unsqueeze(0)
out = model.forward(data.x, data.edge_index, data.edge_attr,batch_pos_enc)
except:
out = model(data.x, data.edge_index, data.edge_attr)
loss = model.loss_proteins(out[data.train_mask], data.y[data.train_mask])
loss.backward()
optimizer.step()
total_loss += float(loss) * int(data.train_mask.sum())
total_examples += int(data.train_mask.sum())
# pbar.update(1)
#
# pbar.close()
return total_loss / total_examples
@torch.no_grad()
def evaluate_network(model, device, test_loader, evaluator, epoch):
model.eval()
y_true = {'train': [], 'valid': [], 'test': []}
y_pred = {'train': [], 'valid': [], 'test': []}
# pbar = tqdm(total=len(test_loader))
# pbar.set_description(f'Evaluating epoch: {epoch:04d}')
total_loss = total_examples = 0
for data in test_loader:
data = data.to(device)
try:
batch_pos_enc = data.pos_enc.to(device)
out = model.forward(data.x, data.edge_index.long(), data.edge_attr, batch_pos_enc)
except:
out = model.forward(data.x, data.edge_index.long(), data.edge_attr)
# out = model(data.x, data.edge_index.long(), data.edge_attr)
for split in y_true.keys():
mask = data[f'{split}_mask']
y_true[split].append(data.y[mask].cpu())
y_pred[split].append(out[mask].argmax(dim=-1, keepdim=True).cpu())
loss = model.loss(out[data.valid_mask], data.y.view(-1)[data.valid_mask])
total_loss += float(loss) * int(data.valid_mask.sum())
total_examples += int(data.valid_mask.sum())
# pbar.update(1)
# pbar.close()
train_acc = evaluator.eval({
'y_true': torch.cat(y_true['train'], dim=0),
'y_pred': torch.cat(y_pred['train'], dim=0),
})['acc']
valid_acc = evaluator.eval({
'y_true': torch.cat(y_true['valid'], dim=0),
'y_pred': torch.cat(y_pred['valid'], dim=0),
})['acc']
test_acc = evaluator.eval({
'y_true': torch.cat(y_true['test'], dim=0),
'y_pred': torch.cat(y_pred['test'], dim=0),
})['acc']
return train_acc, valid_acc, test_acc, total_loss / total_examples
# model.train()
#
# # for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
# batch_x = dataset.x.to(device)
# batch_e = dataset.edge_attr.to(device)
# batch_labels = dataset.y.long().to(device)
# edge_index = dataset.edge_index.long().to(device)
# train_idx = train_idx.to(device)
#
# optimizer.zero_grad()
# batch_scores = model.forward(batch_x, edge_index, batch_e)[train_idx]
# loss = model.loss_proteins(batch_scores, batch_labels[train_idx]).to(torch.float)
# loss.backward()
# optimizer.step()
# epoch_loss = loss.detach().item()
#
# return epoch_loss
@torch.no_grad()
def evaluate_network_arxiv(model, device, dataset, evaluator):
model.eval()
batch_x = dataset.dataset[0].x.to(device)
y_true = dataset.dataset[0].y.long().to(device)
split_idx = dataset.split_idx
batch_e = dataset.dataset[0].edge_attr.to(device)
edge_index = dataset.dataset[0].edge_index.long().to(device)
try:
batch_pos_enc = dataset.dataset[0].pos_enc.to(device)
batch_scores = model.forward(batch_x, edge_index, batch_e, batch_pos_enc)
except:
batch_scores = model.forward(batch_x, edge_index, batch_e)
# batch_scores = model.forward(batch_x, edge_index, batch_e)
loss = model.loss(batch_scores[split_idx['valid']], y_true.view(-1)[split_idx['valid']]).to(torch.float)
epoch_valid_loss = loss.detach().item()
# y_pred = batch_scores
y_pred = batch_scores.argmax(dim=-1, keepdim=True)
# y_true = y_true.view(-1, 1)
y_true = y_true.view(-1, 1)
train_acc = evaluator.eval({
'y_true': y_true[split_idx['train']],
'y_pred': y_pred[split_idx['train']],
})['acc']
valid_acc = evaluator.eval({
'y_true': y_true[split_idx['valid']],
'y_pred': y_pred[split_idx['valid']],
})['acc']
test_acc = evaluator.eval({
'y_true': y_true[split_idx['test']],
'y_pred': y_pred[split_idx['test']],
})['acc']
return train_acc, valid_acc, test_acc, epoch_valid_loss
@torch.no_grad()
def evaluate_network_proteins(model, device, test_loader, evaluator, epoch = None):
model.eval()
y_true = {'train': [], 'valid': [], 'test': []}
y_pred = {'train': [], 'valid': [], 'test': []}
# pbar = tqdm(total=len(test_loader))
# pbar.set_description(f'Evaluating epoch: {epoch:04d}')
total_loss = total_examples = 0
for data in test_loader:
data = data.to(device)
try:
batch_pos_enc = data.pos_enc.to(device)
out = model.forward(data.x, data.edge_index, data.edge_attr, batch_pos_enc)
except:
out = model.forward(data.x, data.edge_index, data.edge_attr)
# out = model(data.x, data.edge_index, data.edge_attr)
for split in y_true.keys():
mask = data[f'{split}_mask']
y_true[split].append(data.y[mask].cpu())
y_pred[split].append(out[mask].cpu())
loss = model.loss_proteins(out[data.valid_mask], data.y[data.valid_mask])
total_loss += float(loss) * int(data.valid_mask.sum())
total_examples += int(data.valid_mask.sum())
# pbar.update(1)
# pbar.close()
train_rocauc = evaluator.eval({
'y_true': torch.cat(y_true['train'], dim=0),
'y_pred': torch.cat(y_pred['train'], dim=0),
})['rocauc']
valid_rocauc = evaluator.eval({
'y_true': torch.cat(y_true['valid'], dim=0),
'y_pred': torch.cat(y_pred['valid'], dim=0),
})['rocauc']
test_rocauc = evaluator.eval({
'y_true': torch.cat(y_true['test'], dim=0),
'y_pred': torch.cat(y_pred['test'], dim=0),
})['rocauc']
return train_rocauc, valid_rocauc, test_rocauc, total_loss / total_examples
#
# model.eval()
# batch_x = dataset.dataset[0].x.to(device)
# y_true = dataset.dataset[0].y.long().to(device)
# split_idx = dataset.split_idx
# batch_e = dataset.dataset[0].edge_attr.to(device)
# edge_index = dataset.dataset[0].edge_index.long().to(device)
#
# batch_scores = model.forward(batch_x, edge_index, batch_e)
# loss = model.loss_proteins(batch_scores[split_idx['valid']], y_true[split_idx['valid']]).to(torch.float)
# epoch_valid_loss = loss.detach().item()
# y_pred = batch_scores
# # y_pred = batch_scores.argmax(dim=-1, keepdim=True)
# # y_true = y_true.view(-1, 1)
# train_acc = evaluator.eval({
# 'y_true': y_true[split_idx['train']],
# 'y_pred': y_pred[split_idx['train']],
# })['rocauc']
# valid_acc = evaluator.eval({
# 'y_true': y_true[split_idx['valid']],
# 'y_pred': y_pred[split_idx['valid']],
# })['rocauc']
# test_acc = evaluator.eval({
# 'y_true': y_true[split_idx['test']],
# 'y_pred': y_pred[split_idx['test']],
# })['rocauc']
#
# return train_acc, valid_acc, test_acc, epoch_valid_loss
| 11,004 | 35.440397 | 110 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/train/train_SBMs_node_classification.py | """
Utility functions for training one epoch
and evaluating one epoch
"""
import torch
import torch.nn as nn
import math
import dgl
from train.metrics import accuracy_SBM as accuracy
from train.metrics import accuracy_ogb
"""
For GCNs
"""
def train_epoch_sparse(model, optimizer, device, data_loader, epoch, framework = 'pyg'):
model.train()
epoch_loss = 0
epoch_train_acc = 0
epoch_train_acc_ogb = 0
nb_data = 0
gpu_mem = 0
if framework == 'pyg':
for iter, batch_graphs in enumerate(data_loader):
batch_x = batch_graphs.x.to(device) # num x feat
edge_index = batch_graphs.edge_index.to(device)
batch_e = batch_graphs.edge_attr.to(device)
batch_labels = batch_graphs.y.long().to(device)
# batch_graphs = batch_graphs.to(device) # add to satisfy the version to put the graph to the cuda
optimizer.zero_grad()
try:
batch_pos_enc = batch_graphs.pos_enc.to(device)
sign_flip = torch.rand(batch_pos_enc.size(1)).to(device)
sign_flip[sign_flip >= 0.5] = 1.0
sign_flip[sign_flip < 0.5] = -1.0
batch_pos_enc = batch_pos_enc * sign_flip.unsqueeze(0)
batch_scores = model.forward(batch_x, edge_index, batch_e, batch_pos_enc)
except:
# batch_scores = model.forward(batch_graphs.x, batch_graphs.edge_index)
batch_scores = model.forward(batch_x, edge_index, batch_e)
loss = model.loss(batch_scores, batch_labels)
loss.backward()
optimizer.step()
epoch_loss += loss.detach().item()
epoch_train_acc += accuracy(batch_scores, batch_labels)
# epoch_train_acc_ogb += accuracy_ogb(batch_scores, batch_labels)
# nb_data += batch_labels.size(0)
# print("Number: ", iter)
epoch_loss /= (iter + 1)
epoch_train_acc /= (iter + 1)
# epoch_train_acc_ogb /= nb_data
return epoch_loss, epoch_train_acc, optimizer
elif framework == 'dgl':
for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device) # num x feat
batch_e = batch_graphs.edata['feat'].to(device)
batch_labels = batch_labels.to(device)
batch_graphs = batch_graphs.to(device) #add to satisfy the version to put the graph to the cuda
optimizer.zero_grad()
try:
batch_pos_enc = batch_graphs.ndata['pos_enc'].to(device)
sign_flip = torch.rand(batch_pos_enc.size(1)).to(device)
sign_flip[sign_flip>=0.5] = 1.0; sign_flip[sign_flip<0.5] = -1.0
batch_pos_enc = batch_pos_enc * sign_flip.unsqueeze(0)
batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_pos_enc)
except:
batch_scores = model.forward(batch_graphs, batch_x, batch_e)
loss = model.loss(batch_scores, batch_labels)
loss.backward()
optimizer.step()
epoch_loss += loss.detach().item()
epoch_train_acc += accuracy(batch_scores, batch_labels)
# print("Number: ", iter)
epoch_loss /= (iter + 1)
epoch_train_acc /= (iter + 1)
return epoch_loss, epoch_train_acc, optimizer
def evaluate_network_sparse(model, device, data_loader, epoch, framework = 'pyg'):
model.eval()
epoch_test_loss = 0
epoch_test_acc = 0
nb_data = 0
if framework == 'pyg':
with torch.no_grad():
for iter, batch_graphs in enumerate(data_loader):
batch_x = batch_graphs.x.to(device) # num x feat
edge_index = batch_graphs.edge_index.to(device)
batch_e = batch_graphs.edge_attr.to(device)
batch_labels = batch_graphs.y.long().to(device)
try:
batch_pos_enc = batch_graphs.pos_enc.to(device)
batch_scores = model.forward(batch_x, edge_index,batch_e, batch_pos_enc)
except:
batch_scores = model.forward(batch_x, edge_index,batch_e)
loss = model.loss(batch_scores, batch_labels)
epoch_test_loss += loss.detach().item()
epoch_test_acc += accuracy(batch_scores, batch_labels)
epoch_test_loss /= (iter + 1)
epoch_test_acc /= (iter + 1)
return epoch_test_loss, epoch_test_acc
elif framework == 'dgl':
with torch.no_grad():
for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device)
batch_e = batch_graphs.edata['feat'].to(device)
batch_labels = batch_labels.to(device)
batch_graphs = batch_graphs.to(device) # add to satisfy the version to put the graph to the cuda
try:
batch_pos_enc = batch_graphs.ndata['pos_enc'].to(device)
batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_pos_enc)
except:
batch_scores = model.forward(batch_graphs, batch_x, batch_e)
loss = model.loss(batch_scores, batch_labels)
epoch_test_loss += loss.detach().item()
epoch_test_acc += accuracy(batch_scores, batch_labels)
epoch_test_loss /= (iter + 1)
epoch_test_acc /= (iter + 1)
return epoch_test_loss, epoch_test_acc
"""
For WL-GNNs
"""
# def train_epoch_dense(model, optimizer, device, data_loader, epoch, batch_size):
#
# model.train()
# epoch_loss = 0
# epoch_train_acc = 0
# nb_data = 0
# gpu_mem = 0
# optimizer.zero_grad()
# for iter, (x_with_node_feat, labels) in enumerate(data_loader):
# x_with_node_feat = x_with_node_feat.to(device)
# labels = labels.to(device)
#
# scores = model.forward(x_with_node_feat)
# loss = model.loss(scores, labels)
# loss.backward()
#
# if not (iter%batch_size):
# optimizer.step()
# optimizer.zero_grad()
#
# epoch_loss += loss.detach().item()
# epoch_train_acc += accuracy(scores, labels)
# epoch_loss /= (iter + 1)
# epoch_train_acc /= (iter + 1)
#
# return epoch_loss, epoch_train_acc, optimizer
#
#
#
# def evaluate_network_dense(model, device, data_loader, epoch):
#
# model.eval()
# epoch_test_loss = 0
# epoch_test_acc = 0
# nb_data = 0
# with torch.no_grad():
# for iter, (x_with_node_feat, labels) in enumerate(data_loader):
# x_with_node_feat = x_with_node_feat.to(device)
# labels = labels.to(device)
#
# scores = model.forward(x_with_node_feat)
# loss = model.loss(scores, labels)
# epoch_test_loss += loss.detach().item()
# epoch_test_acc += accuracy(scores, labels)
# epoch_test_loss /= (iter + 1)
# epoch_test_acc /= (iter + 1)
#
# return epoch_test_loss, epoch_test_acc
| 7,186 | 38.489011 | 113 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/data/ogbn.py |
import time
import os
import pickle
import numpy as np
import os.path as osp
import dgl
import torch
from torch_scatter import scatter
from scipy import sparse as sp
import numpy as np
from tqdm import tqdm
from torch_geometric.data import InMemoryDataset
from torch_geometric.data import Data
from scipy.sparse import csr_matrix
from ogb.nodeproppred import PygNodePropPredDataset, Evaluator
from torch_geometric.utils import get_laplacian
# to_undirected
from torch_geometric.utils.num_nodes import maybe_num_nodes
from torch_sparse import coalesce
class load_SBMsDataSetDGL(torch.utils.data.Dataset):
def __init__(self,
data_dir,
name,
split):
self.split = split
self.is_test = split.lower() in ['test', 'val']
with open(os.path.join(data_dir, name + '_%s.pkl' % self.split), 'rb') as f:
self.dataset = pickle.load(f)
self.node_labels = []
self.graph_lists = []
self.n_samples = len(self.dataset)
self._prepare()
def _prepare(self):
print("preparing %d graphs for the %s set..." % (self.n_samples, self.split.upper()))
for data in self.dataset:
node_features = data.node_feat
edge_list = (data.W != 0).nonzero() # converting adj matrix to edge_list
# Create the DGL Graph
g = dgl.DGLGraph()
g.add_nodes(node_features.size(0))
g.ndata['feat'] = node_features.long()
for src, dst in edge_list:
g.add_edges(src.item(), dst.item())
# adding edge features for Residual Gated ConvNet
#edge_feat_dim = g.ndata['feat'].size(1) # dim same as node feature dim
edge_feat_dim = 1 # dim same as node feature dim
g.edata['feat'] = torch.ones(g.number_of_edges(), edge_feat_dim)
self.graph_lists.append(g)
self.node_labels.append(data.node_label)
def __len__(self):
"""Return the number of graphs in the dataset."""
return self.n_samples
def __getitem__(self, idx):
"""
Get the idx^th sample.
Parameters
---------
idx : int
The sample index.
Returns
-------
(dgl.DGLGraph, int)
DGLGraph with node feature stored in `feat` field
And its label.
"""
return self.graph_lists[idx], self.node_labels[idx]
class PygNodeSBMsDataset(InMemoryDataset):
def __init__(self,
data_dir,
name,
split,
transform = None,
pre_transform = None,
meta_dict = None
):
self.split = split
self.root = data_dir
self.name = name
self.is_test = split.lower() in ['test', 'val']
self.node_labels = []
self.graph_lists = []
super(PygNodeSBMsDataset, self).__init__(self.root, transform)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_dir(self):
return osp.join(self.root)
@property
def num_classes(self):
return self.__num_classes__
@property
def raw_file_names(self):
return [os.path.join(self.name + '_%s.pkl' % self.split)]
@property
def processed_file_names(self):
return 'geometric_data_processed' + self.name + self.split + '.pt'
def download(self):
r"""Downloads the dataset to the :obj:`self.raw_dir` folder."""
raise NotImplementedError
def process(self):
with open(os.path.join(self.root, self.name + '_%s.pkl' % self.split), 'rb') as f:
self.dataset = pickle.load(f)
# self.n_samples = len(self.dataset)
print("preparing graphs for the %s set..." % (self.split.upper()))
print('Converting graphs into PyG objects...')
pyg_graph_list = []
for data in tqdm(self.dataset):
node_features = data.node_feat
edge_list = (data.W != 0).nonzero() # converting adj matrix to edge_list
g = Data()
g.__num_nodes__ = node_features.size(0)
g.edge_index = edge_list.T
#g.edge_index = torch.from_numpy(edge_list)
g.x = node_features.long()
# adding edge features for Residual Gated ConvNet
edge_feat_dim = 1
g.edge_attr = torch.ones(g.num_edges, edge_feat_dim)
g.y = data.node_label.to(torch.float32)
pyg_graph_list.append(g)
del self.dataset
data, slices = self.collate(pyg_graph_list)
print('Saving...')
torch.save((data, slices), self.processed_paths[0])
def __repr__(self):
return '{}()'.format(self.__class__.__name__)
# def size_repr(key, item, indent=0):
# indent_str = ' ' * indent
# if torch.is_tensor(item) and item.dim() == 0:
# out = item.item()
# elif torch.is_tensor(item):
# out = str(list(item.size()))
# elif isinstance(item, list) or isinstance(item, tuple):
# out = str([len(item)])
# elif isinstance(item, dict):
# lines = [indent_str + size_repr(k, v, 2) for k, v in item.items()]
# out = '{\n' + ',\n'.join(lines) + '\n' + indent_str + '}'
# elif isinstance(item, str):
# out = f'"{item}"'
# else:
# out = str(item)
#
# return f'{indent_str}{key}={out}'
#
# class PygNodeSBMsDataset(InMemoryDataset):
#
# def __init__(self,
# data_dir,
# name,
# split,
# transform = None,
# pre_transform = None,
# meta_dict = None
# ):
#
# self.split = split
# self.root = data_dir
# self.name = name
# self.is_test = split.lower() in ['test', 'val']
#
# self.node_labels = []
# self.graph_lists = []
# super(PygNodeSBMsDataset, self).__init__(self.root, transform)
# self.data, self.slices = torch.load(self.processed_paths[0])
#
#
# @property
# def raw_dir(self):
# return osp.join(self.root)
#
# @property
# def num_classes(self):
# return self.__num_classes__
#
# @property
# def raw_file_names(self):
# return [os.path.join(self.name + '_%s.pkl' % self.split)]
#
# @property
# def processed_file_names(self):
# return 'geometric_data_processed' + self.name + self.split + '.pt'
#
# def download(self):
# r"""Downloads the dataset to the :obj:`self.raw_dir` folder."""
# raise NotImplementedError
#
# def process(self):
# with open(os.path.join(self.root, self.name + '_%s.pkl' % self.split), 'rb') as f:
# self.dataset = pickle.load(f)
# # self.n_samples = len(self.dataset)
# print("preparing graphs for the %s set..." % (self.split.upper()))
# print('Converting graphs into PyG objects...')
# pyg_graph_list = []
# for data in tqdm(self.dataset):
# node_features = data.node_feat
# edge_list = (data.W != 0).nonzero() # converting adj matrix to edge_list
# g = Data()
# g.__num_nodes__ = node_features.size(0)
# g.edge_index = edge_list.T
# #g.edge_index = torch.from_numpy(edge_list)
# g.x = node_features.long()
# # adding edge features for Residual Gated ConvNet
# edge_feat_dim = 1
# g.edge_attr = torch.ones(g.num_edges, edge_feat_dim)
# g.y = data.node_label.to(torch.float32)
# pyg_graph_list.append(g)
# del self.dataset
# data, slices = self.collate(pyg_graph_list)
# print('Saving...')
# torch.save((data, slices), self.processed_paths[0])
#
# def __repr__(self):
# return '{}()'.format(self.__class__.__name__)
class Data_idx(object):
def __init__(self, x=None, edge_index=None, edge_attr=None, y=None, pos_enc = None,
**kwargs):
self.x = x
self.edge_index = edge_index
self.y = y
self.edge_attr = edge_attr
self.pos_enc = pos_enc
def __len__(self):
r"""The number of examples in the dataset."""
return 1
def __getitem__(self, idx):
# data = self.data.__class__()
dataset = Data(
x = self.x,
edge_index = self.edge_index,
y=self.y,
edge_attr=self.edge_attr) if self.pos_enc == None else Data(x = self.x,
edge_index = self.edge_index,
y=self.y,
edge_attr=self.edge_attr,
pos_enc=self.pos_enc)
return dataset
#
# def __repr__(self):
# cls = str(self.__class__.__name__)
# has_dict = any([isinstance(item, dict) for _, item in self])
#
# if not has_dict:
# info = [size_repr(key, item) for key, item in self]
# return '{}({})'.format(cls, ', '.join(info))
# else:
# info = [size_repr(key, item, indent=2) for key, item in self]
# return '{}(\n{}\n)'.format(cls, ',\n'.join(info))
def to_undirected(edge_index, num_nodes=None):
r"""Converts the graph given by :attr:`edge_index` to an undirected graph,
so that :math:`(j,i) \in \mathcal{E}` for every edge :math:`(i,j) \in
\mathcal{E}`.
Args:
edge_index (LongTensor): The edge indices.
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)
:rtype: :class:`LongTensor`
"""
num_nodes = maybe_num_nodes(edge_index, num_nodes)
row, col = edge_index
value_ori = torch.full([row.size(0)], 2)
value_add = torch.full([col.size(0)], 1)
row, col = torch.cat([row, col], dim=0), torch.cat([col, row], dim=0)
edge_attr = torch.cat([value_ori, value_add], dim=0)
edge_index = torch.stack([row, col], dim=0)
edge_index, edge_attr = coalesce(edge_index, edge_attr, num_nodes, num_nodes)
return edge_index, edge_attr.view(-1,1).type(torch.float32)
class ogbnDatasetpyg(InMemoryDataset):
def __init__(self, name, use_node_embedding = False):
"""
TODO
"""
start = time.time()
print("[I] Loading data ...")
self.name = name
data_dir = 'data/ogbn'
# edge_index = self.dataset[0].edge_index
# dataset = PygNodePropPredDataset(name=name, root=data_dir)
if name == 'ogbn-arxiv':
self.dataset = PygNodePropPredDataset(name=name, root=data_dir)
self.split_idx = self.dataset.get_idx_split()
self.dataset.data.edge_index, self.dataset.data.edge_attr = to_undirected(self.dataset[0].edge_index, self.dataset[0].num_nodes)
self.dataset.data.y = self.dataset.data.y.squeeze(1)
self.dataset.slices['edge_index'] = torch.tensor([0,self.dataset.data.edge_index.size(1)],dtype=torch.long)
self.dataset.slices['edge_attr'] = torch.tensor([0, self.dataset.data.edge_index.size(1)],
dtype=torch.long)
if name == 'ogbn-proteins':
self.dataset = PygNodePropPredDataset(name=name, root=data_dir)
self.split_idx = self.dataset.get_idx_split()
self.dataset.data.x = scatter(self.dataset[0].edge_attr, self.dataset[0].edge_index[0], dim=0,
dim_size=self.dataset[0].num_nodes, reduce='mean').to('cpu')
self.dataset.slices['x'] = torch.tensor([0, self.dataset.data.y.size(0)],
dtype=torch.long)
# self.edge_attr = self.dataset[0].edge_attr
edge_feat_dim = 1
self.edge_attr = torch.ones(self.dataset[0].num_edges, edge_feat_dim).type(torch.float32)
if name == 'ogbn-mag':
dataset = PygNodePropPredDataset(name=name, root=data_dir)
self.split_idx = dataset.get_idx_split()
rel_data = dataset[0]
edge_index, self.edge_attr = to_undirected(rel_data.edge_index_dict[('paper', 'cites', 'paper')],
rel_data.num_nodes['paper'])
# We are only interested in paper <-> paper relations.
self.dataset = Data_idx(
x=rel_data.x_dict['paper'],
edge_index=edge_index,
y=rel_data.y_dict['paper'],
edge_attr = self.edge_attr)
if name == 'ogbn-products':
self.dataset = PygNodePropPredDataset(name=name, root=data_dir)
self.split_idx = self.dataset.get_idx_split()
self.dataset.slices['edge_attr'] = torch.tensor([0, self.dataset.data.edge_index.size(1)],
dtype=torch.long)
edge_feat_dim = 1
self.dataset.edge_attr = torch.ones(self.dataset[0].num_edges, edge_feat_dim).type(torch.float32)
if use_node_embedding:
print("use_node_embedding ...")
embedding = torch.load('data/ogbn/embedding_' + name[5:] + '.pt', map_location='cpu')
self.dataset.data.x = torch.cat([self.dataset.data.x, embedding], dim=-1)
# self.dataset.data.embedding = embedding#torch.cat([self.dataset.data.x, embedding], dim=-1)
# self.dataset.slices['embedding'] = torch.tensor([0, self.dataset.data.x.size(0)],
# dtype=torch.long)
# edge_attr.type =
# edge_feat_dim = 1
# self.edge_attr = torch.ones(self.dataset[0].num_edges, edge_feat_dim)
print("[I] Finished loading.")
print("[I] Data load time: {:.4f}s".format(time.time()-start))
def _add_positional_encodings(self, pos_enc_dim, DATASET_NAME = None):
# Graph positional encoding v/ Laplacian eigenvectors
# self.train.graph_lists = [positional_encoding(g, pos_enc_dim) for g in self.train.graph_lists]
# iter(self.train)
self.graph_lists = [positional_encoding(self.dataset[0], pos_enc_dim, DATASET_NAME = DATASET_NAME)]
self.dataset.data, self.dataset.slices = self.collate(self.graph_lists)
def __repr__(self):
return '{}()'.format(self.__class__.__name__)
class SBMsDatasetDGL(torch.utils.data.Dataset):
def __init__(self, name):
"""
TODO
"""
start = time.time()
print("[I] Loading data ...")
self.name = name
data_dir = 'data/SBMs'
self.train = load_SBMsDataSetDGL(data_dir, name, split='train')
self.test = load_SBMsDataSetDGL(data_dir, name, split='test')
self.val = load_SBMsDataSetDGL(data_dir, name, split='val')
print("[I] Finished loading.")
print("[I] Data load time: {:.4f}s".format(time.time()-start))
def self_loop(g):
"""
Utility function only, to be used only when necessary as per user self_loop flag
: Overwriting the function dgl.transform.add_self_loop() to not miss ndata['feat'] and edata['feat']
This function is called inside a function in SBMsDataset class.
"""
new_g = dgl.DGLGraph()
new_g.add_nodes(g.number_of_nodes())
new_g.ndata['feat'] = g.ndata['feat']
src, dst = g.all_edges(order="eid")
src = dgl.backend.zerocopy_to_numpy(src)
dst = dgl.backend.zerocopy_to_numpy(dst)
non_self_edges_idx = src != dst
nodes = np.arange(g.number_of_nodes())
new_g.add_edges(src[non_self_edges_idx], dst[non_self_edges_idx])
new_g.add_edges(nodes, nodes)
# This new edata is not used since this function gets called only for GCN, GAT
# However, we need this for the generic requirement of ndata and edata
new_g.edata['feat'] = torch.zeros(new_g.number_of_edges())
return new_g
def positional_encoding(g, pos_enc_dim, DATASET_NAME = None):
"""
Graph positional encoding v/ Laplacian eigenvectors
"""
# Laplacian,for the pyg
try:
g.pos_enc = torch.load('data/ogbn/laplacian_'+DATASET_NAME[5:]+'.pt', map_location='cpu')
if g.pos_enc.size(1) != pos_enc_dim:
os.remove('data/ogbn/laplacian_'+DATASET_NAME[5:]+'.pt')
L = get_laplacian(g.edge_index, normalization='sym', dtype=torch.float64)
L = csr_matrix((L[1], (L[0][0], L[0][1])), shape=(g.num_nodes, g.num_nodes))
# Eigenvectors with scipy
# EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim+1, which='SR')
EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim + 1, which='SR', tol=1e-2) # for 40 PEs
EigVec = EigVec[:, EigVal.argsort()] # increasing order
g.pos_enc = torch.from_numpy(EigVec[:, 1:pos_enc_dim + 1].astype(np.float32)).float()
torch.save(g.pos_enc.cpu(), 'data/ogbn/laplacian_' + DATASET_NAME[5:] + '.pt')
except:
L = get_laplacian(g.edge_index,normalization='sym',dtype = torch.float64)
L = csr_matrix((L[1], (L[0][0], L[0][1])), shape=(g.num_nodes, g.num_nodes))
# Eigenvectors with scipy
# EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim+1, which='SR')
EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim + 1, which='SR', tol=1e-2) # for 40 PEs
EigVec = EigVec[:, EigVal.argsort()] # increasing order
g.pos_enc = torch.from_numpy(EigVec[:, 1:pos_enc_dim + 1].astype(np.float32)).float()
torch.save(g.pos_enc.cpu(), 'data/ogbn/laplacian_'+DATASET_NAME[5:]+'.pt')
# add astype to discards the imaginary part to satisfy the version change pytorch1.5.0
# # Eigenvectors with numpy
# EigVal, EigVec = np.linalg.eig(L.toarray())
# idx = EigVal.argsort() # increasing order
# EigVal, EigVec = EigVal[idx], np.real(EigVec[:,idx])
# g.ndata['pos_enc'] = torch.from_numpy(np.abs(EigVec[:,1:pos_enc_dim+1])).float()
return g
class SBMsDataset(torch.utils.data.Dataset):
def __init__(self, name):
"""
Loading SBM datasets
"""
start = time.time()
print("[I] Loading dataset %s..." % (name))
self.name = name
data_dir = 'data/SBMs/'
with open(data_dir+name+'.pkl',"rb") as f:
f = pickle.load(f)
self.train = f[0]
self.val = f[1]
self.test = f[2]
print('train, test, val sizes :',len(self.train),len(self.test),len(self.val))
print("[I] Finished loading.")
print("[I] Data load time: {:.4f}s".format(time.time()-start))
# form a mini batch from a given list of samples = [(graph, label) pairs]
def collate(self, samples):
# The input samples is a list of pairs (graph, label).
graphs, labels = map(list, zip(*samples))
labels = torch.cat(labels).long()
#tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
#tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
#snorm_n = torch.cat(tab_snorm_n).sqrt()
#tab_sizes_e = [ graphs[i].number_of_edges() for i in range(len(graphs))]
#tab_snorm_e = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_e ]
#snorm_e = torch.cat(tab_snorm_e).sqrt()
batched_graph = dgl.batch(graphs)
return batched_graph, labels
# prepare dense tensors for GNNs which use; such as RingGNN and 3WLGNN
def collate_dense_gnn(self, samples):
# The input samples is a list of pairs (graph, label).
graphs, labels = map(list, zip(*samples))
labels = torch.cat(labels).long()
#tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
#tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
#snorm_n = tab_snorm_n[0][0].sqrt()
#batched_graph = dgl.batch(graphs)
g = graphs[0]
adj = self._sym_normalize_adj(g.adjacency_matrix().to_dense())
"""
Adapted from https://github.com/leichen2018/Ring-GNN/
Assigning node and edge feats::
we have the adjacency matrix in R^{n x n}, the node features in R^{d_n} and edge features R^{d_e}.
Then we build a zero-initialized tensor, say T, in R^{(1 + d_n + d_e) x n x n}. T[0, :, :] is the adjacency matrix.
The diagonal T[1:1+d_n, i, i], i = 0 to n-1, store the node feature of node i.
The off diagonal T[1+d_n:, i, j] store edge features of edge(i, j).
"""
zero_adj = torch.zeros_like(adj)
if self.name == 'SBM_CLUSTER':
self.num_node_type = 7
elif self.name == 'SBM_PATTERN':
self.num_node_type = 3
# use node feats to prepare adj
adj_node_feat = torch.stack([zero_adj for j in range(self.num_node_type)])
adj_node_feat = torch.cat([adj.unsqueeze(0), adj_node_feat], dim=0)
for node, node_label in enumerate(g.ndata['feat']):
adj_node_feat[node_label.item()+1][node][node] = 1
x_node_feat = adj_node_feat.unsqueeze(0)
return x_node_feat, labels
def _sym_normalize_adj(self, adj):
deg = torch.sum(adj, dim = 0)#.squeeze()
deg_inv = torch.where(deg>0, 1./torch.sqrt(deg), torch.zeros(deg.size()))
deg_inv = torch.diag(deg_inv)
return torch.mm(deg_inv, torch.mm(adj, deg_inv))
def _add_self_loops(self):
# function for adding self loops
# this function will be called only if self_loop flag is True
self.train.graph_lists = [self_loop(g) for g in self.train.graph_lists]
self.val.graph_lists = [self_loop(g) for g in self.val.graph_lists]
self.test.graph_lists = [self_loop(g) for g in self.test.graph_lists]
def _add_positional_encodings(self, pos_enc_dim):
# Graph positional encoding v/ Laplacian eigenvectors
self.train.graph_lists = [positional_encoding(g, pos_enc_dim, framework = 'dgl') for g in self.train.graph_lists]
self.val.graph_lists = [positional_encoding(g, pos_enc_dim, framework = 'dgl') for g in self.val.graph_lists]
self.test.graph_lists = [positional_encoding(g, pos_enc_dim, framework = 'dgl') for g in self.test.graph_lists]
| 22,737 | 38.407279 | 140 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/data/molecules.py | import torch
import pickle
import torch.utils.data
import time
import os
import numpy as np
import csv
import dgl
from scipy import sparse as sp
import numpy as np
from torch_geometric.data import Data
from torch_geometric.data import InMemoryDataset
from tqdm import tqdm
# *NOTE
# The dataset pickle and index files are in ./zinc_molecules/ dir
# [<split>.pickle and <split>.index; for split 'train', 'val' and 'test']
class MoleculeDGL(torch.utils.data.Dataset):
def __init__(self, data_dir, split, num_graphs):
self.data_dir = data_dir
self.split = split
self.num_graphs = num_graphs
with open(data_dir + "/%s.pickle" % self.split,"rb") as f:
self.data = pickle.load(f)
# loading the sampled indices from file ./zinc_molecules/<split>.index
with open(data_dir + "/%s.index" % self.split,"r") as f:
data_idx = [list(map(int, idx)) for idx in csv.reader(f)]
self.data = [ self.data[i] for i in data_idx[0] ]
assert len(self.data)==num_graphs, "Sample num_graphs again; available idx: train/val/test => 10k/1k/1k"
"""
data is a list of Molecule dict objects with following attributes
molecule = data[idx]
; molecule['num_atom'] : nb of atoms, an integer (N)
; molecule['atom_type'] : tensor of size N, each element is an atom type, an integer between 0 and num_atom_type
; molecule['bond_type'] : tensor of size N x N, each element is a bond type, an integer between 0 and num_bond_type
; molecule['logP_SA_cycle_normalized'] : the chemical property to regress, a float variable
"""
self.graph_lists = []
self.graph_labels = []
self.n_samples = len(self.data)
self._prepare()
def _prepare(self):
print("preparing %d graphs for the %s set..." % (self.num_graphs, self.split.upper()))
for molecule in self.data:
node_features = molecule['atom_type'].long()
adj = molecule['bond_type']
edge_list = (adj != 0).nonzero() # converting adj matrix to edge_list
edge_idxs_in_adj = edge_list.split(1, dim=1)
edge_features = adj[edge_idxs_in_adj].reshape(-1).long()
# Create the DGL Graph
g = dgl.DGLGraph()
g.add_nodes(molecule['num_atom'])
g.ndata['feat'] = node_features
for src, dst in edge_list:
g.add_edges(src.item(), dst.item())
g.edata['feat'] = edge_features
self.graph_lists.append(g)
self.graph_labels.append(molecule['logP_SA_cycle_normalized'])
def __len__(self):
"""Return the number of graphs in the dataset."""
return self.n_samples
def __getitem__(self, idx):
"""
Get the idx^th sample.
Parameters
---------
idx : int
The sample index.
Returns
-------
(dgl.DGLGraph, int)
DGLGraph with node feature stored in `feat` field
And its label.
"""
return self.graph_lists[idx], self.graph_labels[idx]
class Moleculepyg(InMemoryDataset):
def __init__(self, data_dir, split, num_graphs, name, root = 'dataset', transform=None, pre_transform=None, meta_dict = None):
self.data_dir = data_dir
self.split = split
self.num_graphs = num_graphs
self.root = root
self.name = name
with open(data_dir + "/%s.pickle" % self.split, "rb") as f:
self.data = pickle.load(f)
# loading the sampled indices from file ./zinc_molecules/<split>.index
with open(data_dir + "/%s.index" % self.split, "r") as f:
data_idx = [list(map(int, idx)) for idx in csv.reader(f)]
self.data = [self.data[i] for i in data_idx[0]]
assert len(self.data) == num_graphs, "Sample num_graphs again; available idx: train/val/test => 10k/1k/1k"
"""
data is a list of Molecule dict objects with following attributes
molecule = data[idx]
; molecule['num_atom'] : nb of atoms, an integer (N)
; molecule['atom_type'] : tensor of size N, each element is an atom type, an integer between 0 and num_atom_type
; molecule['bond_type'] : tensor of size N x N, each element is a bond type, an integer between 0 and num_bond_type
; molecule['logP_SA_cycle_normalized'] : the chemical property to regress, a float variable
"""
self.n_samples = len(self.data)
super(Moleculepyg, self).__init__(self.root, transform)
self.data, self.slices = torch.load(self.processed_paths[0])
#self.process()
@property
def processed_file_names(self):
return 'geometric_data_processed' + self.name + self.split + '.pt'
def process(self):
print("preparing %d graphs for the %s set..." % (self.num_graphs, self.split.upper()))
print('Converting graphs into PyG objects...')
pyg_graph_list = []
graph_labels = []
for graph in tqdm(self.data):
node_features = graph['atom_type'].long()
adj = graph['bond_type']
edge_list = (adj != 0).nonzero() # converting adj matrix to edge_list
edge_idxs_in_adj = edge_list.split(1, dim=1)
edge_features = adj[edge_idxs_in_adj].reshape(-1).long()
g = Data()
g.__num_nodes__ = graph['num_atom']
g.edge_index = edge_list.T
#g.edge_index = torch.from_numpy(edge_list)
g.edge_attr = edge_features
del graph['bond_type']
if graph['atom_type'] is not None:
g.x = graph['atom_type'].long()
del graph['atom_type']
graph_labels.append(graph['logP_SA_cycle_normalized'])
pyg_graph_list.append(g)
for i, g in enumerate(pyg_graph_list):
# if 'classification' in self.task_type:
# if has_nan:
g.y = graph_labels[i].to(torch.float32)
# else:
# g.y = torch.from_numpy(graph_label[i]).view(1,-1).to(torch.long)
# else:
# g.y = torch.from_numpy(graph_label[i]).view(1,-1).to(torch.float32)
data, slices = self.collate(pyg_graph_list)
print('Saving...')
torch.save((data, slices), self.processed_paths[0])
class MoleculeDatasetDGL(torch.utils.data.Dataset):
def __init__(self, name='Zinc', framework = 'pyg'):
t0 = time.time()
self.name = name
self.num_atom_type = 28 # known meta-info about the zinc dataset; can be calculated as well
self.num_bond_type = 4 # known meta-info about the zinc dataset; can be calculated as well
data_dir='./data/molecules'
self.train = MoleculeDGL(data_dir, 'train', num_graphs=10000)
self.val = MoleculeDGL(data_dir, 'val', num_graphs=1000)
self.test = MoleculeDGL(data_dir, 'test', num_graphs=1000)
print("Time taken: {:.4f}s".format(time.time()-t0))
class MoleculeDatasetpyg(InMemoryDataset):
def __init__(self, name='ZINC'):
t0 = time.time()
self.name = name
self.num_atom_type = 28 # known meta-info about the zinc dataset; can be calculated as well
self.num_bond_type = 4 # known meta-info about the zinc dataset; can be calculated as well
data_dir = './data/molecules'
self.train = Moleculepyg(data_dir, 'train', num_graphs=10000, name='ZINC')
self.val = Moleculepyg(data_dir, 'val', num_graphs=1000, name='ZINC')
self.test = Moleculepyg(data_dir, 'test', num_graphs=1000, name='ZINC')
print("Time taken: {:.4f}s".format(time.time() - t0))
def self_loop(g):
"""
Utility function only, to be used only when necessary as per user self_loop flag
: Overwriting the function dgl.transform.add_self_loop() to not miss ndata['feat'] and edata['feat']
This function is called inside a function in MoleculeDataset class.
"""
new_g = dgl.DGLGraph()
new_g.add_nodes(g.number_of_nodes())
new_g.ndata['feat'] = g.ndata['feat']
src, dst = g.all_edges(order="eid")
src = dgl.backend.zerocopy_to_numpy(src)
dst = dgl.backend.zerocopy_to_numpy(dst)
non_self_edges_idx = src != dst
nodes = np.arange(g.number_of_nodes())
new_g.add_edges(src[non_self_edges_idx], dst[non_self_edges_idx])
new_g.add_edges(nodes, nodes)
# This new edata is not used since this function gets called only for GCN, GAT
# However, we need this for the generic requirement of ndata and edata
new_g.edata['feat'] = torch.zeros(new_g.number_of_edges())
return new_g
def positional_encoding(g, pos_enc_dim):
"""
Graph positional encoding v/ Laplacian eigenvectors
"""
# Laplacian
A = g.adjacency_matrix_scipy(return_edge_ids=False).astype(float)
N = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float)
L = sp.eye(g.number_of_nodes()) - N * A * N
# Eigenvectors with numpy
EigVal, EigVec = np.linalg.eig(L.toarray())
idx = EigVal.argsort() # increasing order
EigVal, EigVec = EigVal[idx], np.real(EigVec[:,idx])
g.ndata['pos_enc'] = torch.from_numpy(EigVec[:,1:pos_enc_dim+1]).float()
# # Eigenvectors with scipy
# EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim+1, which='SR')
# EigVec = EigVec[:, EigVal.argsort()] # increasing order
# g.ndata['pos_enc'] = torch.from_numpy(np.abs(EigVec[:,1:pos_enc_dim+1])).float()
return g
class MoleculeDataset(torch.utils.data.Dataset):
def __init__(self, name):
"""
Loading SBM datasets
"""
start = time.time()
print("[I] Loading dataset %s..." % (name))
self.name = name
data_dir = 'data/molecules/'
with open(data_dir+name+'.pkl',"rb") as f:
f = pickle.load(f)
self.train = f[0]
self.val = f[1]
self.test = f[2]
self.num_atom_type = f[3]
self.num_bond_type = f[4]
print('train, test, val sizes :',len(self.train),len(self.test),len(self.val))
print("[I] Finished loading.")
print("[I] Data load time: {:.4f}s".format(time.time()-start))
# form a mini batch from a given list of samples = [(graph, label) pairs]
def collate(self, samples):
# The input samples is a list of pairs (graph, label).
graphs, labels = map(list, zip(*samples))
labels = torch.tensor(np.array(labels)).unsqueeze(1)
#tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
#tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
#snorm_n = torch.cat(tab_snorm_n).sqrt()
#tab_sizes_e = [ graphs[i].number_of_edges() for i in range(len(graphs))]
#tab_snorm_e = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_e ]
#snorm_e = torch.cat(tab_snorm_e).sqrt()
batched_graph = dgl.batch(graphs)
return batched_graph, labels
# prepare dense tensors for GNNs using them; such as RingGNN, 3WLGNN
def collate_dense_gnn(self, samples, edge_feat):
# The input samples is a list of pairs (graph, label).
graphs, labels = map(list, zip(*samples))
labels = torch.tensor(np.array(labels)).unsqueeze(1)
#tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
#tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
#snorm_n = tab_snorm_n[0][0].sqrt()
#batched_graph = dgl.batch(graphs)
g = graphs[0]
adj = self._sym_normalize_adj(g.adjacency_matrix().to_dense())
"""
Adapted from https://github.com/leichen2018/Ring-GNN/
Assigning node and edge feats::
we have the adjacency matrix in R^{n x n}, the node features in R^{d_n} and edge features R^{d_e}.
Then we build a zero-initialized tensor, say T, in R^{(1 + d_n + d_e) x n x n}. T[0, :, :] is the adjacency matrix.
The diagonal T[1:1+d_n, i, i], i = 0 to n-1, store the node feature of node i.
The off diagonal T[1+d_n:, i, j] store edge features of edge(i, j).
"""
zero_adj = torch.zeros_like(adj)
if edge_feat:
# use edge feats also to prepare adj
adj_with_edge_feat = torch.stack([zero_adj for j in range(self.num_atom_type + self.num_bond_type)])
adj_with_edge_feat = torch.cat([adj.unsqueeze(0), adj_with_edge_feat], dim=0)
us, vs = g.edges()
for idx, edge_label in enumerate(g.edata['feat']):
adj_with_edge_feat[edge_label.item()+1+self.num_atom_type][us[idx]][vs[idx]] = 1
for node, node_label in enumerate(g.ndata['feat']):
adj_with_edge_feat[node_label.item()+1][node][node] = 1
x_with_edge_feat = adj_with_edge_feat.unsqueeze(0)
return None, x_with_edge_feat, labels
else:
# use only node feats to prepare adj
adj_no_edge_feat = torch.stack([zero_adj for j in range(self.num_atom_type)])
adj_no_edge_feat = torch.cat([adj.unsqueeze(0), adj_no_edge_feat], dim=0)
for node, node_label in enumerate(g.ndata['feat']):
adj_no_edge_feat[node_label.item()+1][node][node] = 1
x_no_edge_feat = adj_no_edge_feat.unsqueeze(0)
return x_no_edge_feat, None, labels
def _sym_normalize_adj(self, adj):
deg = torch.sum(adj, dim = 0)#.squeeze()
deg_inv = torch.where(deg>0, 1./torch.sqrt(deg), torch.zeros(deg.size()))
deg_inv = torch.diag(deg_inv)
return torch.mm(deg_inv, torch.mm(adj, deg_inv))
def _add_self_loops(self):
# function for adding self loops
# this function will be called only if self_loop flag is True
self.train.graph_lists = [self_loop(g) for g in self.train.graph_lists]
self.val.graph_lists = [self_loop(g) for g in self.val.graph_lists]
self.test.graph_lists = [self_loop(g) for g in self.test.graph_lists]
def _add_positional_encodings(self, pos_enc_dim):
# Graph positional encoding v/ Laplacian eigenvectors
self.train.graph_lists = [positional_encoding(g, pos_enc_dim) for g in self.train.graph_lists]
self.val.graph_lists = [positional_encoding(g, pos_enc_dim) for g in self.val.graph_lists]
self.test.graph_lists = [positional_encoding(g, pos_enc_dim) for g in self.test.graph_lists]
| 14,989 | 39.404313 | 130 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/data/node2vec_citeseer.py | import argparse
import torch
from torch_geometric.nn import Node2Vec
from torch_geometric.utils import to_undirected
import torch_geometric as pyg
from ogb.nodeproppred import PygNodePropPredDataset
import os.path as osp
def save_embedding(model):
torch.save(model.embedding.weight.data.cpu(), 'data/planetoid/embedding_Citeseer.pt')
def main():
parser = argparse.ArgumentParser(description='OGBN-citeseer (Node2Vec)')
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--embedding_dim', type=int, default=256)
parser.add_argument('--walk_length', type=int, default=80)
parser.add_argument('--context_size', type=int, default=20)
parser.add_argument('--walks_per_node', type=int, default=10)
parser.add_argument('--batch_size', type=int, default=256)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--epochs', type=int, default=20)
parser.add_argument('--log_steps', type=int, default=1)
args = parser.parse_args()
device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'
device = torch.device(device)
# root = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'arxiv')
data_dir = 'planetoid'
dataset = pyg.datasets.Planetoid(name='Citeseer',root = data_dir)
data = dataset[0]
data.edge_index = to_undirected(data.edge_index, data.num_nodes)
model = Node2Vec(data.edge_index, args.embedding_dim, args.walk_length,
args.context_size, args.walks_per_node,
sparse=True).to(device)
loader = model.loader(batch_size=args.batch_size, shuffle=True,
num_workers=4)
optimizer = torch.optim.SparseAdam(model.parameters(), lr=args.lr)
model.train()
for epoch in range(1, args.epochs + 1):
for i, (pos_rw, neg_rw) in enumerate(loader):
optimizer.zero_grad()
loss = model.loss(pos_rw.to(device), neg_rw.to(device))
loss.backward()
optimizer.step()
if (i + 1) % args.log_steps == 0:
print(f'Epoch: {epoch:02d}, Step: {i+1:03d}/{len(loader)}, '
f'Loss: {loss:.4f}')
if (i + 1) % 100 == 0: # Save model every 100 steps.
save_embedding(model)
save_embedding(model)
if __name__ == "__main__":
main()
| 2,380 | 36.793651 | 89 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/data/SBMs.py |
import time
import os
import pickle
import numpy as np
import os.path as osp
import dgl
import torch
from ogb.utils.url import decide_download, download_url, extract_zip
from scipy import sparse as sp
import numpy as np
from tqdm import tqdm
from torch_geometric.data import InMemoryDataset
from torch_geometric.data import Data
from scipy.sparse import csr_matrix
from torch_geometric.utils import get_laplacian
class load_SBMsDataSetDGL(torch.utils.data.Dataset):
def __init__(self,
data_dir,
name,
split):
self.split = split
self.is_test = split.lower() in ['test', 'val']
with open(os.path.join(data_dir, name + '_%s.pkl' % self.split), 'rb') as f:
self.dataset = pickle.load(f)
self.node_labels = []
self.graph_lists = []
self.n_samples = len(self.dataset)
self._prepare()
def _prepare(self):
print("preparing %d graphs for the %s set..." % (self.n_samples, self.split.upper()))
for data in self.dataset:
node_features = data.node_feat
edge_list = (data.W != 0).nonzero() # converting adj matrix to edge_list
# Create the DGL Graph
g = dgl.DGLGraph()
g.add_nodes(node_features.size(0))
g.ndata['feat'] = node_features.long()
for src, dst in edge_list:
g.add_edges(src.item(), dst.item())
# adding edge features for Residual Gated ConvNet
#edge_feat_dim = g.ndata['feat'].size(1) # dim same as node feature dim
edge_feat_dim = 1 # dim same as node feature dim
g.edata['feat'] = torch.ones(g.number_of_edges(), edge_feat_dim)
self.graph_lists.append(g)
self.node_labels.append(data.node_label)
def __len__(self):
"""Return the number of graphs in the dataset."""
return self.n_samples
def __getitem__(self, idx):
"""
Get the idx^th sample.
Parameters
---------
idx : int
The sample index.
Returns
-------
(dgl.DGLGraph, int)
DGLGraph with node feature stored in `feat` field
And its label.
"""
return self.graph_lists[idx], self.node_labels[idx]
class PygNodeSBMsDataset(InMemoryDataset):
def __init__(self,
data_dir,
name,
split,
transform = None,
pre_transform = None,
meta_dict = None
):
self.url = ''
self.split = split
self.root = data_dir
self.original_root = data_dir
self.name = name
self.is_test = split.lower() in ['test', 'val']
self.node_labels = []
self.graph_lists = []
super(PygNodeSBMsDataset, self).__init__(self.root, transform)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_dir(self):
return osp.join(self.root)
@property
def num_classes(self):
return self.__num_classes__
@property
def raw_file_names(self):
return [os.path.join(self.name + '_%s.pkl' % self.split)]
@property
def processed_file_names(self):
return 'geometric_data_processed' + self.name + self.split + '.pt'
# def download(self):
# r"""Downloads the dataset to the :obj:`self.raw_dir` folder."""
# raise NotImplementedError
def download(self):
url = self.url
if decide_download(url):
path = download_url(url, self.original_root)
extract_zip(path, self.original_root)
os.unlink(path)
shutil.rmtree(self.root)
shutil.move(osp.join(self.original_root, self.download_name), self.root)
else:
print('Stop downloading.')
shutil.rmtree(self.root)
exit(-1)
def process(self):
with open(os.path.join(self.root, self.name + '_%s.pkl' % self.split), 'rb') as f:
self.dataset = pickle.load(f)
# self.n_samples = len(self.dataset)
print("preparing graphs for the %s set..." % (self.split.upper()))
print('Converting graphs into PyG objects...')
pyg_graph_list = []
for data in tqdm(self.dataset):
node_features = data.node_feat
edge_list = (data.W != 0).nonzero() # converting adj matrix to edge_list
g = Data()
g.__num_nodes__ = node_features.size(0)
g.edge_index = edge_list.T
#g.edge_index = torch.from_numpy(edge_list)
g.x = node_features.long()
# adding edge features for Residual Gated ConvNet
edge_feat_dim = 1
g.edge_attr = torch.ones(g.num_edges, edge_feat_dim)
g.y = data.node_label.to(torch.float32)
pyg_graph_list.append(g)
del self.dataset
data, slices = self.collate(pyg_graph_list)
print('Saving...')
torch.save((data, slices), self.processed_paths[0])
def __repr__(self):
return '{}()'.format(self.__class__.__name__)
class SBMsDatasetpyg(InMemoryDataset):
def __init__(self, name):
"""
TODO
"""
start = time.time()
print("[I] Loading data ...")
self.name = name
data_dir = 'data/SBMs'
self.train = PygNodeSBMsDataset(data_dir, name, split='train')
self.test = PygNodeSBMsDataset(data_dir, name, split='test')
self.val = PygNodeSBMsDataset(data_dir, name, split='val')
print("[I] Finished loading.")
print("[I] Data load time: {:.4f}s".format(time.time()-start))
def _add_positional_encodings(self, pos_enc_dim):
# Graph positional encoding v/ Laplacian eigenvectors
# self.train.graph_lists = [positional_encoding(g, pos_enc_dim) for g in self.train.graph_lists]
# iter(self.train)
self.train.graph_lists = [positional_encoding(g, pos_enc_dim, framework = 'pyg') for _, g in enumerate(self.train)]
self.val.graph_lists = [positional_encoding(g, pos_enc_dim, framework = 'pyg') for _, g in enumerate(self.val)]
self.test.graph_lists = [positional_encoding(g, pos_enc_dim, framework = 'pyg') for _, g in enumerate(self.test)]
# self.train.data.pos_enc = [torch.cat(g.pos_enc,dim=0) for _, g in enumerate(self.train.graph_lists)]
self.train.data, self.train.slices = self.collate(self.train.graph_lists)
self.val.data, self.val.slices = self.collate(self.val.graph_lists)
self.test.data, self.test.slices = self.collate(self.test.graph_lists)
class SBMsDatasetDGL(torch.utils.data.Dataset):
def __init__(self, name):
"""
TODO
"""
start = time.time()
print("[I] Loading data ...")
self.name = name
data_dir = 'data/SBMs'
self.train = load_SBMsDataSetDGL(data_dir, name, split='train')
self.test = load_SBMsDataSetDGL(data_dir, name, split='test')
self.val = load_SBMsDataSetDGL(data_dir, name, split='val')
print("[I] Finished loading.")
print("[I] Data load time: {:.4f}s".format(time.time()-start))
def self_loop(g):
"""
Utility function only, to be used only when necessary as per user self_loop flag
: Overwriting the function dgl.transform.add_self_loop() to not miss ndata['feat'] and edata['feat']
This function is called inside a function in SBMsDataset class.
"""
new_g = dgl.DGLGraph()
new_g.add_nodes(g.number_of_nodes())
new_g.ndata['feat'] = g.ndata['feat']
src, dst = g.all_edges(order="eid")
src = dgl.backend.zerocopy_to_numpy(src)
dst = dgl.backend.zerocopy_to_numpy(dst)
non_self_edges_idx = src != dst
nodes = np.arange(g.number_of_nodes())
new_g.add_edges(src[non_self_edges_idx], dst[non_self_edges_idx])
new_g.add_edges(nodes, nodes)
# This new edata is not used since this function gets called only for GCN, GAT
# However, we need this for the generic requirement of ndata and edata
new_g.edata['feat'] = torch.zeros(new_g.number_of_edges())
return new_g
def positional_encoding(g, pos_enc_dim, framework = 'dgl'):
"""
Graph positional encoding v/ Laplacian eigenvectors
"""
# Laplacian,for the pyg
if framework == 'pyg':
L = get_laplacian(g.edge_index,normalization='sym',dtype = torch.float64)
L = csr_matrix((L[1], (L[0][0], L[0][1])), shape=(g.num_nodes, g.num_nodes))
# Eigenvectors with scipy
# EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim+1, which='SR')
EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim + 1, which='SR', tol=1e-2) # for 40 PEs
EigVec = EigVec[:, EigVal.argsort()] # increasing order
g.pos_enc = torch.from_numpy(EigVec[:, 1:pos_enc_dim + 1].astype(np.float32)).float()
# add astype to discards the imaginary part to satisfy the version change pytorch1.5.0
elif framework == 'dgl':
A = g.adjacency_matrix_scipy(return_edge_ids=False).astype(float)
N = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float)
L = sp.eye(g.number_of_nodes()) - N * A * N
# Eigenvectors with scipy
# EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim+1, which='SR')
EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim + 1, which='SR', tol=1e-2) # for 40 PEs
EigVec = EigVec[:, EigVal.argsort()] # increasing order
g.ndata['pos_enc'] = torch.from_numpy(EigVec[:, 1:pos_enc_dim + 1].astype(np.float32)).float()
# add astype to discards the imaginary part to satisfy the version change pytorch1.5.0
# # Eigenvectors with numpy
# EigVal, EigVec = np.linalg.eig(L.toarray())
# idx = EigVal.argsort() # increasing order
# EigVal, EigVec = EigVal[idx], np.real(EigVec[:,idx])
# g.ndata['pos_enc'] = torch.from_numpy(np.abs(EigVec[:,1:pos_enc_dim+1])).float()
return g
class SBMsDataset(torch.utils.data.Dataset):
def __init__(self, name):
"""
Loading SBM datasets
"""
start = time.time()
print("[I] Loading dataset %s..." % (name))
self.name = name
data_dir = 'data/SBMs/'
with open(data_dir+name+'.pkl',"rb") as f:
f = pickle.load(f)
self.train = f[0]
self.val = f[1]
self.test = f[2]
print('train, test, val sizes :',len(self.train),len(self.test),len(self.val))
print("[I] Finished loading.")
print("[I] Data load time: {:.4f}s".format(time.time()-start))
# form a mini batch from a given list of samples = [(graph, label) pairs]
def collate(self, samples):
# The input samples is a list of pairs (graph, label).
graphs, labels = map(list, zip(*samples))
labels = torch.cat(labels).long()
#tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
#tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
#snorm_n = torch.cat(tab_snorm_n).sqrt()
#tab_sizes_e = [ graphs[i].number_of_edges() for i in range(len(graphs))]
#tab_snorm_e = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_e ]
#snorm_e = torch.cat(tab_snorm_e).sqrt()
batched_graph = dgl.batch(graphs)
return batched_graph, labels
# prepare dense tensors for GNNs which use; such as RingGNN and 3WLGNN
def collate_dense_gnn(self, samples):
# The input samples is a list of pairs (graph, label).
graphs, labels = map(list, zip(*samples))
labels = torch.cat(labels).long()
#tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
#tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
#snorm_n = tab_snorm_n[0][0].sqrt()
#batched_graph = dgl.batch(graphs)
g = graphs[0]
adj = self._sym_normalize_adj(g.adjacency_matrix().to_dense())
"""
Adapted from https://github.com/leichen2018/Ring-GNN/
Assigning node and edge feats::
we have the adjacency matrix in R^{n x n}, the node features in R^{d_n} and edge features R^{d_e}.
Then we build a zero-initialized tensor, say T, in R^{(1 + d_n + d_e) x n x n}. T[0, :, :] is the adjacency matrix.
The diagonal T[1:1+d_n, i, i], i = 0 to n-1, store the node feature of node i.
The off diagonal T[1+d_n:, i, j] store edge features of edge(i, j).
"""
zero_adj = torch.zeros_like(adj)
if self.name == 'SBM_CLUSTER':
self.num_node_type = 7
elif self.name == 'SBM_PATTERN':
self.num_node_type = 3
# use node feats to prepare adj
adj_node_feat = torch.stack([zero_adj for j in range(self.num_node_type)])
adj_node_feat = torch.cat([adj.unsqueeze(0), adj_node_feat], dim=0)
for node, node_label in enumerate(g.ndata['feat']):
adj_node_feat[node_label.item()+1][node][node] = 1
x_node_feat = adj_node_feat.unsqueeze(0)
return x_node_feat, labels
def _sym_normalize_adj(self, adj):
deg = torch.sum(adj, dim = 0)#.squeeze()
deg_inv = torch.where(deg>0, 1./torch.sqrt(deg), torch.zeros(deg.size()))
deg_inv = torch.diag(deg_inv)
return torch.mm(deg_inv, torch.mm(adj, deg_inv))
def _add_self_loops(self):
# function for adding self loops
# this function will be called only if self_loop flag is True
self.train.graph_lists = [self_loop(g) for g in self.train.graph_lists]
self.val.graph_lists = [self_loop(g) for g in self.val.graph_lists]
self.test.graph_lists = [self_loop(g) for g in self.test.graph_lists]
def _add_positional_encodings(self, pos_enc_dim):
# Graph positional encoding v/ Laplacian eigenvectors
self.train.graph_lists = [positional_encoding(g, pos_enc_dim, framework = 'dgl') for g in self.train.graph_lists]
self.val.graph_lists = [positional_encoding(g, pos_enc_dim, framework = 'dgl') for g in self.val.graph_lists]
self.test.graph_lists = [positional_encoding(g, pos_enc_dim, framework = 'dgl') for g in self.test.graph_lists]
| 14,591 | 37.70557 | 127 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/data/node2vec_proteins.py | import argparse
import torch
from torch_geometric.nn import Node2Vec
from ogb.nodeproppred import PygNodePropPredDataset
def save_embedding(model):
torch.save(model.embedding.weight.data.cpu(), 'ogbn/embedding_proteins.pt')
def main():
parser = argparse.ArgumentParser(description='OGBN-Proteins (Node2Vec)')
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--embedding_dim', type=int, default=128)
parser.add_argument('--walk_length', type=int, default=80)
parser.add_argument('--context_size', type=int, default=20)
parser.add_argument('--walks_per_node', type=int, default=10)
parser.add_argument('--batch_size', type=int, default=256)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--epochs', type=int, default=1)
parser.add_argument('--log_steps', type=int, default=1)
args = parser.parse_args()
device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'
device = torch.device(device)
dataset = PygNodePropPredDataset(name='ogbn-proteins',root = 'ogbn')
data = dataset[0]
model = Node2Vec(data.edge_index, args.embedding_dim, args.walk_length,
args.context_size, args.walks_per_node,
sparse=True).to(device)
loader = model.loader(batch_size=args.batch_size, shuffle=True,
num_workers=4)
optimizer = torch.optim.SparseAdam(model.parameters(), lr=args.lr)
model.train()
for epoch in range(1, args.epochs + 1):
for i, (pos_rw, neg_rw) in enumerate(loader):
optimizer.zero_grad()
loss = model.loss(pos_rw.to(device), neg_rw.to(device))
loss.backward()
optimizer.step()
if (i + 1) % args.log_steps == 0:
print(f'Epoch: {epoch:02d}, Step: {i+1:03d}/{len(loader)}, '
f'Loss: {loss:.4f}')
if (i + 1) % 100 == 0: # Save model every 100 steps.
save_embedding(model)
save_embedding(model)
if __name__ == "__main__":
main()
| 2,096 | 34.542373 | 79 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/data/CSL.py | import numpy as np, time, pickle, random, csv
import torch
from torch.utils.data import DataLoader, Dataset
import os
import pickle
import numpy as np
import dgl
from sklearn.model_selection import StratifiedKFold, train_test_split
random.seed(42)
from scipy import sparse as sp
class DGLFormDataset(torch.utils.data.Dataset):
"""
DGLFormDataset wrapping graph list and label list as per pytorch Dataset.
*lists (list): lists of 'graphs' and 'labels' with same len().
"""
def __init__(self, *lists):
assert all(len(lists[0]) == len(li) for li in lists)
self.lists = lists
self.graph_lists = lists[0]
self.graph_labels = lists[1]
def __getitem__(self, index):
return tuple(li[index] for li in self.lists)
def __len__(self):
return len(self.lists[0])
def format_dataset(dataset):
"""
Utility function to recover data,
INTO-> dgl/pytorch compatible format
"""
graphs = [data[0] for data in dataset]
labels = [data[1] for data in dataset]
for graph in graphs:
#graph.ndata['feat'] = torch.FloatTensor(graph.ndata['feat'])
graph.ndata['feat'] = graph.ndata['feat'].float() # dgl 4.0
# adding edge features for Residual Gated ConvNet, if not there
if 'feat' not in graph.edata.keys():
edge_feat_dim = graph.ndata['feat'].shape[1] # dim same as node feature dim
graph.edata['feat'] = torch.ones(graph.number_of_edges(), edge_feat_dim)
return DGLFormDataset(graphs, labels)
def get_all_split_idx(dataset):
"""
- Split total number of graphs into 3 (train, val and test) in 3:1:1
- Stratified split proportionate to original distribution of data with respect to classes
- Using sklearn to perform the split and then save the indexes
- Preparing 5 such combinations of indexes split to be used in Graph NNs
- As with KFold, each of the 5 fold have unique test set.
"""
root_idx_dir = './data/CSL/'
if not os.path.exists(root_idx_dir):
os.makedirs(root_idx_dir)
all_idx = {}
# If there are no idx files, do the split and store the files
if not (os.path.exists(root_idx_dir + dataset.name + '_train.index')):
print("[!] Splitting the data into train/val/test ...")
# Using 5-fold cross val as used in RP-GNN paper
k_splits = 5
cross_val_fold = StratifiedKFold(n_splits=k_splits, shuffle=True)
k_data_splits = []
# this is a temporary index assignment, to be used below for val splitting
for i in range(len(dataset.graph_lists)):
dataset[i][0].a = lambda: None
setattr(dataset[i][0].a, 'index', i)
for indexes in cross_val_fold.split(dataset.graph_lists, dataset.graph_labels):
remain_index, test_index = indexes[0], indexes[1]
remain_set = format_dataset([dataset[index] for index in remain_index])
# Gets final 'train' and 'val'
train, val, _, __ = train_test_split(remain_set,
range(len(remain_set.graph_lists)),
test_size=0.25,
stratify=remain_set.graph_labels)
train, val = format_dataset(train), format_dataset(val)
test = format_dataset([dataset[index] for index in test_index])
# Extracting only idxs
idx_train = [item[0].a.index for item in train]
idx_val = [item[0].a.index for item in val]
idx_test = [item[0].a.index for item in test]
f_train_w = csv.writer(open(root_idx_dir + dataset.name + '_train.index', 'a+'))
f_val_w = csv.writer(open(root_idx_dir + dataset.name + '_val.index', 'a+'))
f_test_w = csv.writer(open(root_idx_dir + dataset.name + '_test.index', 'a+'))
f_train_w.writerow(idx_train)
f_val_w.writerow(idx_val)
f_test_w.writerow(idx_test)
print("[!] Splitting done!")
# reading idx from the files
for section in ['train', 'val', 'test']:
with open(root_idx_dir + dataset.name + '_'+ section + '.index', 'r') as f:
reader = csv.reader(f)
all_idx[section] = [list(map(int, idx)) for idx in reader]
return all_idx
class CSL(torch.utils.data.Dataset):
"""
Circular Skip Link Graphs:
Source: https://github.com/PurdueMINDS/RelationalPooling/
"""
def __init__(self, path="data/CSL/"):
self.name = "CSL"
self.adj_list = pickle.load(open(os.path.join(path, 'graphs_Kary_Deterministic_Graphs.pkl'), 'rb'))
self.graph_labels = torch.load(os.path.join(path, 'y_Kary_Deterministic_Graphs.pt'))
self.graph_lists = []
self.n_samples = len(self.graph_labels)
self.num_node_type = 1 #41
self.num_edge_type = 1 #164
self._prepare()
def _prepare(self):
t0 = time.time()
print("[I] Preparing Circular Skip Link Graphs v4 ...")
for sample in self.adj_list:
_g = dgl.DGLGraph()
_g.from_scipy_sparse_matrix(sample)
g = dgl.transform.remove_self_loop(_g)
g.ndata['feat'] = torch.zeros(g.number_of_nodes()).long()
#g.ndata['feat'] = torch.arange(0, g.number_of_nodes()).long() # v1
#g.ndata['feat'] = torch.randperm(g.number_of_nodes()).long() # v3
# adding edge features as generic requirement
g.edata['feat'] = torch.zeros(g.number_of_edges()).long()
#g.edata['feat'] = torch.arange(0, g.number_of_edges()).long() # v1
#g.edata['feat'] = torch.ones(g.number_of_edges()).long() # v2
# NOTE: come back here, to define edge features as distance between the indices of the edges
###################################################################
# srcs, dsts = new_g.edges()
# edge_feat = []
# for edge in range(len(srcs)):
# a = srcs[edge].item()
# b = dsts[edge].item()
# edge_feat.append(abs(a-b))
# g.edata['feat'] = torch.tensor(edge_feat, dtype=torch.int).long()
###################################################################
self.graph_lists.append(g)
self.num_node_type = self.graph_lists[0].ndata['feat'].size(0)
self.num_edge_type = self.graph_lists[0].edata['feat'].size(0)
print("[I] Finished preparation after {:.4f}s".format(time.time()-t0))
def __len__(self):
return self.n_samples
def __getitem__(self, idx):
return self.graph_lists[idx], self.graph_labels[idx]
def self_loop(g):
"""
Utility function only, to be used only when necessary as per user self_loop flag
: Overwriting the function dgl.transform.add_self_loop() to not miss ndata['feat'] and edata['feat']
This function is called inside a function in TUsDataset class.
"""
new_g = dgl.DGLGraph()
new_g.add_nodes(g.number_of_nodes())
new_g.ndata['feat'] = g.ndata['feat']
src, dst = g.all_edges(order="eid")
src = dgl.backend.zerocopy_to_numpy(src)
dst = dgl.backend.zerocopy_to_numpy(dst)
non_self_edges_idx = src != dst
nodes = np.arange(g.number_of_nodes())
new_g.add_edges(src[non_self_edges_idx], dst[non_self_edges_idx])
new_g.add_edges(nodes, nodes)
# This new edata is not used since this function gets called only for GCN, GAT
# However, we need this for the generic requirement of ndata and edata
new_g.edata['feat'] = torch.zeros(new_g.number_of_edges())
return new_g
def positional_encoding(g, pos_enc_dim):
"""
Graph positional encoding v/ Laplacian eigenvectors
"""
n = g.number_of_nodes()
# Laplacian
A = g.adjacency_matrix_scipy(return_edge_ids=False).astype(float)
N = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float)
L = sp.eye(n) - N * A * N
# Eigenvectors
EigVal, EigVec = np.linalg.eig(L.toarray())
idx = EigVal.argsort() # increasing order
EigVal, EigVec = EigVal[idx], np.real(EigVec[:,idx])
g.ndata['pos_enc'] = torch.from_numpy(EigVec[:,1:pos_enc_dim+1]).float()
return g
class CSLDataset(torch.utils.data.Dataset):
def __init__(self, name='CSL'):
t0 = time.time()
self.name = name
dataset = CSL()
print("[!] Dataset: ", self.name)
# this function splits data into train/val/test and returns the indices
self.all_idx = get_all_split_idx(dataset)
self.all = dataset
self.train = [self.format_dataset([dataset[idx] for idx in self.all_idx['train'][split_num]]) for split_num in range(5)]
self.val = [self.format_dataset([dataset[idx] for idx in self.all_idx['val'][split_num]]) for split_num in range(5)]
self.test = [self.format_dataset([dataset[idx] for idx in self.all_idx['test'][split_num]]) for split_num in range(5)]
print("Time taken: {:.4f}s".format(time.time()-t0))
def format_dataset(self, dataset):
"""
Utility function to recover data,
INTO-> dgl/pytorch compatible format
"""
graphs = [data[0] for data in dataset]
labels = [data[1] for data in dataset]
return DGLFormDataset(graphs, labels)
# form a mini batch from a given list of samples = [(graph, label) pairs]
def collate(self, samples):
# The input samples is a list of pairs (graph, label).
graphs, labels = map(list, zip(*samples))
labels = torch.tensor(np.array(labels))
batched_graph = dgl.batch(graphs)
return batched_graph, labels
# prepare dense tensors for GNNs using them; such as RingGNN, 3WLGNN
def collate_dense_gnn(self, samples, pos_enc):
# The input samples is a list of pairs (graph, label).
graphs, labels = map(list, zip(*samples))
labels = torch.tensor(np.array(labels))
g = graphs[0]
adj = self._sym_normalize_adj(g.adjacency_matrix().to_dense())
"""
Adapted from https://github.com/leichen2018/Ring-GNN/
Assigning node and edge feats::
we have the adjacency matrix in R^{n x n}, the node features in R^{d_n} and edge features R^{d_e}.
Then we build a zero-initialized tensor, say T, in R^{(1 + d_n + d_e) x n x n}. T[0, :, :] is the adjacency matrix.
The diagonal T[1:1+d_n, i, i], i = 0 to n-1, store the node feature of node i.
The off diagonal T[1+d_n:, i, j] store edge features of edge(i, j).
"""
zero_adj = torch.zeros_like(adj)
if pos_enc:
in_dim = g.ndata['pos_enc'].shape[1]
# use node feats to prepare adj
adj_node_feat = torch.stack([zero_adj for j in range(in_dim)])
adj_node_feat = torch.cat([adj.unsqueeze(0), adj_node_feat], dim=0)
for node, node_feat in enumerate(g.ndata['pos_enc']):
adj_node_feat[1:, node, node] = node_feat
x_node_feat = adj_node_feat.unsqueeze(0)
return x_node_feat, labels
else: # no node features here
in_dim = 1
# use node feats to prepare adj
adj_node_feat = torch.stack([zero_adj for j in range(in_dim)])
adj_node_feat = torch.cat([adj.unsqueeze(0), adj_node_feat], dim=0)
for node, node_feat in enumerate(g.ndata['feat']):
adj_node_feat[1:, node, node] = node_feat
x_no_node_feat = adj_node_feat.unsqueeze(0)
return x_no_node_feat, labels
def _sym_normalize_adj(self, adj):
deg = torch.sum(adj, dim = 0)#.squeeze()
deg_inv = torch.where(deg>0, 1./torch.sqrt(deg), torch.zeros(deg.size()))
deg_inv = torch.diag(deg_inv)
return torch.mm(deg_inv, torch.mm(adj, deg_inv))
def _add_self_loops(self):
# function for adding self loops
# this function will be called only if self_loop flag is True
for split_num in range(5):
self.train[split_num].graph_lists = [self_loop(g) for g in self.train[split_num].graph_lists]
self.val[split_num].graph_lists = [self_loop(g) for g in self.val[split_num].graph_lists]
self.test[split_num].graph_lists = [self_loop(g) for g in self.test[split_num].graph_lists]
for split_num in range(5):
self.train[split_num] = DGLFormDataset(self.train[split_num].graph_lists, self.train[split_num].graph_labels)
self.val[split_num] = DGLFormDataset(self.val[split_num].graph_lists, self.val[split_num].graph_labels)
self.test[split_num] = DGLFormDataset(self.test[split_num].graph_lists, self.test[split_num].graph_labels)
def _add_positional_encodings(self, pos_enc_dim):
# Graph positional encoding v/ Laplacian eigenvectors
for split_num in range(5):
self.train[split_num].graph_lists = [positional_encoding(g, pos_enc_dim) for g in self.train[split_num].graph_lists]
self.val[split_num].graph_lists = [positional_encoding(g, pos_enc_dim) for g in self.val[split_num].graph_lists]
self.test[split_num].graph_lists = [positional_encoding(g, pos_enc_dim) for g in self.test[split_num].graph_lists]
| 13,727 | 40.101796 | 128 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/data/node2vec-products.py | import argparse
import torch
from torch_geometric.nn import Node2Vec
from ogb.nodeproppred import PygNodePropPredDataset
def save_embedding(model):
torch.save(model.embedding.weight.data.cpu(), 'ogbn/embedding_products.pt')
def main():
parser = argparse.ArgumentParser(description='OGBN-Products (Node2Vec)')
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--embedding_dim', type=int, default=128)
parser.add_argument('--walk_length', type=int, default=40)
parser.add_argument('--context_size', type=int, default=20)
parser.add_argument('--walks_per_node', type=int, default=10)
parser.add_argument('--batch_size', type=int, default=256)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--epochs', type=int, default=1)
parser.add_argument('--log_steps', type=int, default=1)
args = parser.parse_args()
device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'
device = torch.device(device)
dataset = PygNodePropPredDataset(name='ogbn-products',root='ogbn')
data = dataset[0]
model = Node2Vec(data.edge_index, args.embedding_dim, args.walk_length,
args.context_size, args.walks_per_node,
sparse=True).to(device)
loader = model.loader(batch_size=args.batch_size, shuffle=True,
num_workers=4) #change from 4 to 0 for convient debug
optimizer = torch.optim.SparseAdam(model.parameters(), lr=args.lr)
model.train()
for epoch in range(1, args.epochs + 1):
for i, (pos_rw, neg_rw) in enumerate(loader):
optimizer.zero_grad()
loss = model.loss(pos_rw.to(device), neg_rw.to(device))
loss.backward()
optimizer.step()
if (i + 1) % args.log_steps == 0:
print(f'Epoch: {epoch:02d}, Step: {i+1:03d}/{len(loader)}, '
f'Loss: {loss:.4f}')
if (i + 1) % 100 == 0: # Save model every 100 steps.
save_embedding(model)
save_embedding(model)
if __name__ == "__main__":
main()
| 2,133 | 35.169492 | 79 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/data/planetoids.py | import torch
import pickle
import torch.utils.data
import time
import os
import numpy as np
from torch_geometric.utils import get_laplacian
import csv
from scipy import sparse as sp
import dgl
from dgl.data import TUDataset
from dgl.data import LegacyTUDataset
import torch_geometric as pyg
from scipy.sparse import csr_matrix
import random
random.seed(42)
from sklearn.model_selection import StratifiedKFold, train_test_split
from torch_geometric.data import InMemoryDataset
import csv
import json
class pygFormDataset(torch.utils.data.Dataset):
"""
DGLFormDataset wrapping graph list and label list as per pytorch Dataset.
*lists (list): lists of 'graphs' and 'labels' with same len().
"""
def __init__(self, *lists):
assert all(len(lists[0]) == len(li) for li in lists)
self.lists = lists
self.node_lists = lists[0]
self.node_labels = lists[1]
def __getitem__(self, index):
return tuple(li[index] for li in self.lists)
def __len__(self):
return len(self.lists[0])
def format_dataset(dataset):
"""
Utility function to recover data,
INTO-> dgl/pytorch compatible format
"""
nodes = [data[0] for data in dataset]
labels = [data[1] for data in dataset]
return pygFormDataset(nodes, labels)
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
def get_all_split_idx(dataset):
"""
- Split total number of graphs into 3 (train, val and test) in 80:10:10
- Stratified split proportionate to original distribution of data with respect to classes
- Using sklearn to perform the split and then save the indexes
- Preparing 10 such combinations of indexes split to be used in Graph NNs
- As with KFold, each of the 10 fold have unique test set.
"""
root_idx_dir = './data/planetoid/'
if not os.path.exists(root_idx_dir):
os.makedirs(root_idx_dir)
# If there are no idx files, do the split and store the files
if not os.path.exists(root_idx_dir + f"{dataset.name}_splits.json"):
print("[!] Splitting the data into train/val/test ...")
all_idxs = np.arange(dataset[0].num_nodes)
# Using 10-fold cross val to compare with benchmark papers
k_splits = 10
cross_val_fold = StratifiedKFold(n_splits=k_splits, shuffle=True)
k_data_splits = []
split = {"train": [], "val": [], "test": []}
for train_ok_split, test_ok_split in cross_val_fold.split(X = all_idxs, y = dataset[0].y):
# split = {"train": [], "val": [], "test": all_idxs[test_ok_split]}
train_ok_targets = dataset[0].y[train_ok_split]
# Gets final 'train' and 'val'
train_i_split, val_i_split = train_test_split(train_ok_split,
test_size=0.111,
stratify=train_ok_targets)
# Extracting only idxs
split['train'].append(train_i_split)
split['val'].append(val_i_split)
split['test'].append(all_idxs[test_ok_split])
filename = root_idx_dir + f"{dataset.name}_splits.json"
with open(filename, "w") as f:
json.dump(split, f, cls=NumpyEncoder) # , cls=NumpyEncoder
print("[!] Splitting done!")
# reading idx from the files
with open(root_idx_dir + f"{dataset.name}_splits.json", "r") as fp:
all_idx = json.load(fp)
return all_idx
class DGLFormDataset(torch.utils.data.Dataset):
"""
DGLFormDataset wrapping graph list and label list as per pytorch Dataset.
*lists (list): lists of 'graphs' and 'labels' with same len().
"""
def __init__(self, *lists):
assert all(len(lists[0]) == len(li) for li in lists)
self.lists = lists
self.graph_lists = lists[0]
self.graph_labels = lists[1]
def __getitem__(self, index):
return tuple(li[index] for li in self.lists)
def __len__(self):
return len(self.lists[0])
def self_loop(g):
"""
Utility function only, to be used only when necessary as per user self_loop flag
: Overwriting the function dgl.transform.add_self_loop() to not miss ndata['feat'] and edata['feat']
This function is called inside a function in TUsDataset class.
"""
new_g = dgl.DGLGraph()
new_g.add_nodes(g.number_of_nodes())
new_g.ndata['feat'] = g.ndata['feat']
src, dst = g.all_edges(order="eid")
src = dgl.backend.zerocopy_to_numpy(src)
dst = dgl.backend.zerocopy_to_numpy(dst)
non_self_edges_idx = src != dst
nodes = np.arange(g.number_of_nodes())
new_g.add_edges(src[non_self_edges_idx], dst[non_self_edges_idx])
new_g.add_edges(nodes, nodes)
# This new edata is not used since this function gets called only for GCN, GAT
# However, we need this for the generic requirement of ndata and edata
new_g.edata['feat'] = torch.zeros(new_g.number_of_edges())
return new_g
def positional_encoding(g, pos_enc_dim, framework = 'pyg'):
"""
Graph positional encoding v/ Laplacian eigenvectors
"""
# Laplacian,for the pyg
if framework == 'pyg':
L = get_laplacian(g.edge_index,normalization='sym',dtype = torch.float64)
L = csr_matrix((L[1], (L[0][0], L[0][1])), shape=(g.num_nodes, g.num_nodes))
# Eigenvectors with scipy
# EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim+1, which='SR')
EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim + 1, which='SR', tol=1e-2) # for 40 PEs
EigVec = EigVec[:, EigVal.argsort()] # increasing order
pos_enc = torch.from_numpy(EigVec[:, 1:pos_enc_dim + 1].astype(np.float32)).float()
return pos_enc
# add astype to discards the imaginary part to satisfy the version change pytorch1.5.0
elif framework == 'dgl':
A = g.adjacency_matrix_scipy(return_edge_ids=False).astype(float)
N = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float)
L = sp.eye(g.number_of_nodes()) - N * A * N
# Eigenvectors with scipy
# EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim+1, which='SR')
EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim + 1, which='SR', tol=1e-2) # for 40 PEs
EigVec = EigVec[:, EigVal.argsort()] # increasing order
g.ndata['pos_enc'] = torch.from_numpy(EigVec[:, 1:pos_enc_dim + 1].astype(np.float32)).float()
# add astype to discards the imaginary part to satisfy the version change pytorch1.5.0
class PlanetoidDataset(InMemoryDataset):
def __init__(self, name, use_node_embedding = False):
t0 = time.time()
self.name = name
data_dir = 'data/planetoid'
#dataset = TUDataset(self.name, hidden_size=1)
# dataset = LegacyTUDataset(self.name, hidden_size=1) # dgl 4.0
self.dataset = pyg.datasets.Planetoid(root=data_dir, name= name ,split = 'full')
print("[!] Dataset: ", self.name)
if use_node_embedding:
embedding = torch.load(data_dir + '/embedding_'+name + '.pt', map_location='cpu')
# self.dataset.data.x = embedding
# self.laplacian = positional_encoding(self.dataset[0], 200, framework = 'pyg')
self.dataset.data.x = torch.cat([self.dataset.data.x, embedding], dim=-1)
# this function splits data into train/val/test and returns the indices
self.all_idx = get_all_split_idx(self.dataset)
edge_feat_dim = 1
self.edge_attr = torch.ones(self.dataset[0].num_edges, edge_feat_dim)
# self.all = dataset
# dataset.train[split_number]
self.train_idx = [torch.tensor(self.all_idx['train'][split_num], dtype=torch.long) for split_num in range(10)]
self.val_idx = [torch.tensor(self.all_idx['val'][split_num], dtype=torch.long) for split_num in range(10)]
self.test_idx = [torch.tensor(self.all_idx['test'][split_num], dtype=torch.long) for split_num in range(10)]
# self.train = [self.format_dataset([dataset[idx] for idx in self.all_idx['train'][split_num]]) for split_num in range(10)]
# self.val = [self.format_dataset([dataset[idx] for idx in self.all_idx['val'][split_num]]) for split_num in range(10)]
# self.test = [self.format_dataset([dataset[idx] for idx in self.all_idx['test'][split_num]]) for split_num in range(10)]
print("Time taken: {:.4f}s".format(time.time()-t0))
def format_dataset(self, dataset):
"""
Utility function to recover data,
INTO-> dgl/pytorch compatible format
"""
graphs = [data[0] for data in dataset]
labels = [data[1] for data in dataset]
for graph in graphs:
#graph.ndata['feat'] = torch.FloatTensor(graph.ndata['feat'])
graph.ndata['feat'] = graph.ndata['feat'].float() # dgl 4.0
# adding edge features for Residual Gated ConvNet, if not there
if 'feat' not in graph.edata.keys():
edge_feat_dim = graph.ndata['feat'].shape[1] # dim same as node feature dim
graph.edata['feat'] = torch.ones(graph.number_of_edges(), edge_feat_dim)
return DGLFormDataset(graphs, labels)
# form a mini batch from a given list of samples = [(graph, label) pairs]
def collate(self, samples):
# The input samples is a list of pairs (graph, label).
graphs, labels = map(list, zip(*samples))
labels = torch.tensor(np.array(labels))
#tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
#tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
#snorm_n = torch.cat(tab_snorm_n).sqrt()
#tab_sizes_e = [ graphs[i].number_of_edges() for i in range(len(graphs))]
#tab_snorm_e = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_e ]
#snorm_e = torch.cat(tab_snorm_e).sqrt()
batched_graph = dgl.batch(graphs)
return batched_graph, labels
# prepare dense tensors for GNNs using them; such as RingGNN, 3WLGNN
def collate_dense_gnn(self, samples):
# The input samples is a list of pairs (graph, label).
graphs, labels = map(list, zip(*samples))
labels = torch.tensor(np.array(labels))
#tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
#tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
#snorm_n = tab_snorm_n[0][0].sqrt()
#batched_graph = dgl.batch(graphs)
g = graphs[0]
adj = self._sym_normalize_adj(g.adjacency_matrix().to_dense())
"""
Adapted from https://github.com/leichen2018/Ring-GNN/
Assigning node and edge feats::
we have the adjacency matrix in R^{n x n}, the node features in R^{d_n} and edge features R^{d_e}.
Then we build a zero-initialized tensor, say T, in R^{(1 + d_n + d_e) x n x n}. T[0, :, :] is the adjacency matrix.
The diagonal T[1:1+d_n, i, i], i = 0 to n-1, store the node feature of node i.
The off diagonal T[1+d_n:, i, j] store edge features of edge(i, j).
"""
zero_adj = torch.zeros_like(adj)
in_dim = g.ndata['feat'].shape[1]
# use node feats to prepare adj
adj_node_feat = torch.stack([zero_adj for j in range(in_dim)])
adj_node_feat = torch.cat([adj.unsqueeze(0), adj_node_feat], dim=0)
for node, node_feat in enumerate(g.ndata['feat']):
adj_node_feat[1:, node, node] = node_feat
x_node_feat = adj_node_feat.unsqueeze(0)
return x_node_feat, labels
def _sym_normalize_adj(self, adj):
deg = torch.sum(adj, dim = 0)#.squeeze()
deg_inv = torch.where(deg>0, 1./torch.sqrt(deg), torch.zeros(deg.size()))
deg_inv = torch.diag(deg_inv)
return torch.mm(deg_inv, torch.mm(adj, deg_inv))
def _add_self_loops(self):
# function for adding self loops
# this function will be called only if self_loop flag is True
for split_num in range(10):
self.train[split_num].graph_lists = [self_loop(g) for g in self.train[split_num].graph_lists]
self.val[split_num].graph_lists = [self_loop(g) for g in self.val[split_num].graph_lists]
self.test[split_num].graph_lists = [self_loop(g) for g in self.test[split_num].graph_lists]
for split_num in range(10):
self.train[split_num] = DGLFormDataset(self.train[split_num].graph_lists, self.train[split_num].graph_labels)
self.val[split_num] = DGLFormDataset(self.val[split_num].graph_lists, self.val[split_num].graph_labels)
self.test[split_num] = DGLFormDataset(self.test[split_num].graph_lists, self.test[split_num].graph_labels)
| 13,158 | 43.60678 | 131 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/data/node2vec_arxiv.py | import argparse
import torch
from torch_geometric.nn import Node2Vec
from torch_geometric.utils import to_undirected
from ogb.nodeproppred import PygNodePropPredDataset
import os.path as osp
def save_embedding(model):
torch.save(model.embedding.weight.data.cpu(), 'ogbn/embedding_arxiv.pt')
def main():
parser = argparse.ArgumentParser(description='OGBN-Arxiv (Node2Vec)')
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--embedding_dim', type=int, default=128)
parser.add_argument('--walk_length', type=int, default=80)
parser.add_argument('--context_size', type=int, default=20)
parser.add_argument('--walks_per_node', type=int, default=10)
parser.add_argument('--batch_size', type=int, default=256)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--epochs', type=int, default=5)
parser.add_argument('--log_steps', type=int, default=1)
args = parser.parse_args()
device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'
device = torch.device(device)
# root = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'arxiv')
dataset = PygNodePropPredDataset(name='ogbn-arxiv',root = 'ogbn')
data = dataset[0]
data.edge_index = to_undirected(data.edge_index, data.num_nodes)
model = Node2Vec(data.edge_index, args.embedding_dim, args.walk_length,
args.context_size, args.walks_per_node,
sparse=True).to(device)
loader = model.loader(batch_size=args.batch_size, shuffle=True,
num_workers=4)
optimizer = torch.optim.SparseAdam(model.parameters(), lr=args.lr)
model.train()
for epoch in range(1, args.epochs + 1):
for i, (pos_rw, neg_rw) in enumerate(loader):
optimizer.zero_grad()
loss = model.loss(pos_rw.to(device), neg_rw.to(device))
loss.backward()
optimizer.step()
if (i + 1) % args.log_steps == 0:
print(f'Epoch: {epoch:02d}, Step: {i+1:03d}/{len(loader)}, '
f'Loss: {loss:.4f}')
if (i + 1) % 100 == 0: # Save model every 100 steps.
save_embedding(model)
save_embedding(model)
if __name__ == "__main__":
main()
| 2,306 | 36.819672 | 81 | py |
benchmarking-gnns-pyg | benchmarking-gnns-pyg-master/data/molecules/prepare_molecules.py | #!/usr/bin/env python
# coding: utf-8
# # Notebook for preparing and saving MOLECULAR graphs
# In[1]:
import numpy as np
import torch
import pickle
import time
import os
from IPython import get_ipython
#get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
# In[2]:
print(torch.__version__)
# # Download ZINC dataset
# In[1]:
#!unzip molecules.zip -d ../
# In[3]:
if not os.path.isfile('molecules.zip'):
print('downloading..')
os.system('curl https://www.dropbox.com/s/feo9qle74kg48gy/molecules.zip?dl=1 -o molecules.zip -J -L -k')
os.system('unzip molecules.zip -d ../')
# !tar -xvf molecules.zip -C ../
else:
print('File already downloaded')
# # Convert to DGL format and save with pickle
# In[4]:
import os
os.chdir('../../') # go to root folder of the project
print(os.getcwd())
# In[5]:
import pickle
# get_ipython().run_line_magic('load_ext', 'autoreload')
# get_ipython().run_line_magic('autoreload', '2')
from data.molecules import MoleculeDatasetDGL ,MoleculeDatasetpyg
from data.data import LoadData
from torch.utils.data import DataLoader
from data.molecules import MoleculeDataset
# In[6]:
framwork = 'pyg'
DATASET_NAME = 'ZINC'
dataset = MoleculeDatasetDGL(DATASET_NAME) if 'dgl' == framwork else MoleculeDatasetpyg(DATASET_NAME)
# In[7]:
def plot_histo_graphs(dataset, title):
# histogram of graph sizes
graph_sizes = []
for graph in dataset:
graph_sizes.append(graph.num_nodes) if framwork == 'pyg' else graph_sizes.append(graph[0].number_of_nodes())
plt.figure(1)
plt.hist(graph_sizes, bins=20)
plt.title(title)
plt.show()
graph_sizes = torch.Tensor(graph_sizes)
print('min/max :',graph_sizes.min().long().item(),graph_sizes.max().long().item())
#plot_histo_graphs(dataset.train,'trainset')
plot_histo_graphs(dataset.val,'valset')
plot_histo_graphs(dataset.test,'testset')
# In[8]:
#print(len(dataset.train))
print(len(dataset.val))
print(len(dataset.test))
#print(dataset.train[0])
print(dataset.val[0])
print(dataset.test[0])
# In[9]:
num_atom_type = 28
num_bond_type = 4
# In[10]:
# start = time.time()
#
# with open('data/molecules/ZINC_dgl.pkl','wb') as f:
# pickle.dump([dataset.train,dataset.val,dataset.test,num_atom_type,num_bond_type],f)
# print('Time (sec):',time.time() - start)
# # Test load function
# In[11]:
# DATASET_NAME = 'ZINC'
# dataset = LoadData(DATASET_NAME, framwork)
# trainset, valset, testset = dataset.train, dataset.val, dataset.test
# In[12]:
from torch_geometric.data import DataLoader
loader = DataLoader(dataset.val, batch_size=32, shuffle=True)
for batch in loader:
print(batch)
print(batch.y)
print(len(batch.y))
# batch_size = 10
# collate = MoleculeDataset.collate
# print(MoleculeDataset)
# train_loader = DataLoader(trainset, batch_size=batch_size, shuffle=True, collate_fn=collate)
# In[ ]:
# In[ ]:
| 2,951 | 17.110429 | 116 | py |
SleePyCo | SleePyCo-main/train_mtcl.py | import os
import json
import argparse
import warnings
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from utils import *
from loader import EEGDataLoader
from models.main_model import MainModel
class OneFoldTrainer:
def __init__(self, args, fold, config):
self.args = args
self.fold = fold
self.cfg = config
self.ds_cfg = config['dataset']
self.fp_cfg = config['feature_pyramid']
self.tp_cfg = config['training_params']
self.es_cfg = self.tp_cfg['early_stopping']
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('[INFO] Config name: {}'.format(config['name']))
self.train_iter = 0
self.model = self.build_model()
self.loader_dict = self.build_dataloader()
self.criterion = nn.CrossEntropyLoss()
self.activate_train_mode()
self.optimizer = optim.Adam([p for p in self.model.parameters() if p.requires_grad], lr=self.tp_cfg['lr'], weight_decay=self.tp_cfg['weight_decay'])
self.ckpt_path = os.path.join('checkpoints', config['name'])
self.ckpt_name = 'ckpt_fold-{0:02d}.pth'.format(self.fold)
self.early_stopping = EarlyStopping(patience=self.es_cfg['patience'], verbose=True, ckpt_path=self.ckpt_path, ckpt_name=self.ckpt_name, mode=self.es_cfg['mode'])
def build_model(self):
model = MainModel(self.cfg)
print('[INFO] Number of params of model: ', sum(p.numel() for p in model.parameters() if p.requires_grad))
model = torch.nn.DataParallel(model, device_ids=list(range(len(self.args.gpu.split(",")))))
if self.tp_cfg['mode'] != 'scratch':
print('[INFO] Model loaded for finetune')
load_name = self.cfg['name'].replace('SL-{:02d}'.format(self.ds_cfg['seq_len']), 'SL-01')
load_name = load_name.replace('numScales-{}'.format(self.fp_cfg['num_scales']), 'numScales-1')
load_name = load_name.replace(self.tp_cfg['mode'], 'pretrain')
load_path = os.path.join('checkpoints', load_name, 'ckpt_fold-{0:02d}.pth'.format(self.fold))
model.load_state_dict(torch.load(load_path), strict=False)
model.to(self.device)
print('[INFO] Model prepared, Device used: {} GPU:{}'.format(self.device, self.args.gpu))
return model
def build_dataloader(self):
train_dataset = EEGDataLoader(self.cfg, self.fold, set='train')
train_loader = DataLoader(dataset=train_dataset, batch_size=self.tp_cfg['batch_size'], shuffle=True, num_workers=4*len(self.args.gpu.split(",")), pin_memory=True)
val_dataset = EEGDataLoader(self.cfg, self.fold, set='val')
val_loader = DataLoader(dataset=val_dataset, batch_size=self.tp_cfg['batch_size'], shuffle=False, num_workers=4*len(self.args.gpu.split(",")), pin_memory=True)
test_dataset = EEGDataLoader(self.cfg, self.fold, set='test')
test_loader = DataLoader(dataset=test_dataset, batch_size=self.tp_cfg['batch_size'], shuffle=False, num_workers=4*len(self.args.gpu.split(",")), pin_memory=True)
print('[INFO] Dataloader prepared')
return {'train': train_loader, 'val': val_loader, 'test': test_loader}
def activate_train_mode(self):
self.model.train()
if self.tp_cfg['mode'] == 'freezefinetune':
print('[INFO] Freeze backone')
self.model.module.feature.train(False)
for p in self.model.module.feature.parameters():
p.requires_grad = False
print('[INFO] Unfreeze conv_c5')
self.model.module.feature.conv_c5.train(True)
for p in self.model.module.feature.conv_c5.parameters(): p.requires_grad = True
if self.fp_cfg['num_scales'] > 1:
print('[INFO] Unfreeze conv_c4')
self.model.module.feature.conv_c4.train(True)
for p in self.model.module.feature.conv_c4.parameters(): p.requires_grad = True
if self.fp_cfg['num_scales'] > 2:
print('[INFO] Unfreeze conv_c3')
self.model.module.feature.conv_c3.train(True)
for p in self.model.module.feature.conv_c3.parameters(): p.requires_grad = True
def train_one_epoch(self, epoch):
correct, total, train_loss = 0, 0, 0
for i, (inputs, labels) in enumerate(self.loader_dict['train']):
loss = 0
total += labels.size(0)
inputs = inputs.to(self.device)
labels = labels.view(-1).to(self.device)
outputs = self.model(inputs)
outputs_sum = torch.zeros_like(outputs[0])
for j in range(len(outputs)):
loss += self.criterion(outputs[j], labels)
outputs_sum += outputs[j]
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
train_loss += loss.item()
predicted = torch.argmax(outputs_sum, 1)
correct += predicted.eq(labels).sum().item()
self.train_iter += 1
progress_bar(i, len(self.loader_dict['train']), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss / (i + 1), 100. * correct / total, correct, total))
if self.train_iter % self.tp_cfg['val_period'] == 0:
print('')
val_acc, val_loss = self.evaluate(mode='val')
self.early_stopping(val_acc, val_loss, self.model)
self.activate_train_mode()
if self.early_stopping.early_stop:
break
@torch.no_grad()
def evaluate(self, mode):
self.model.eval()
correct, total, eval_loss = 0, 0, 0
y_true = np.zeros(0)
y_pred = np.zeros((0, self.cfg['classifier']['num_classes']))
for i, (inputs, labels) in enumerate(self.loader_dict[mode]):
loss = 0
total += labels.size(0)
inputs = inputs.to(self.device)
labels = labels.view(-1).to(self.device)
outputs = self.model(inputs)
outputs_sum = torch.zeros_like(outputs[0])
for j in range(len(outputs)):
loss += self.criterion(outputs[j], labels)
outputs_sum += outputs[j]
eval_loss += loss.item()
predicted = torch.argmax(outputs_sum, 1)
correct += predicted.eq(labels).sum().item()
y_true = np.concatenate([y_true, labels.cpu().numpy()])
y_pred = np.concatenate([y_pred, outputs_sum.cpu().numpy()])
progress_bar(i, len(self.loader_dict[mode]), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (eval_loss / (i + 1), 100. * correct / total, correct, total))
if mode == 'val':
return 100. * correct / total, eval_loss
elif mode == 'test':
return y_true, y_pred
else:
raise NotImplementedError
def run(self):
for epoch in range(self.tp_cfg['max_epochs']):
print('\n[INFO] Fold: {}, Epoch: {}'.format(self.fold, epoch))
self.train_one_epoch(epoch)
if self.early_stopping.early_stop:
break
self.model.load_state_dict(torch.load(os.path.join(self.ckpt_path, self.ckpt_name)))
y_true, y_pred = self.evaluate(mode='test')
print('')
return y_true, y_pred
def main():
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=UserWarning)
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--seed', type=int, default=42, help='random seed')
parser.add_argument('--gpu', type=str, default="0", help='gpu id')
parser.add_argument('--config', type=str, help='config file path')
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
# For reproducibility
set_random_seed(args.seed, use_cuda=True)
with open(args.config) as config_file:
config = json.load(config_file)
config['name'] = os.path.basename(args.config).replace('.json', '')
Y_true = np.zeros(0)
Y_pred = np.zeros((0, config['classifier']['num_classes']))
for fold in range(1, config['dataset']['num_splits'] + 1):
trainer = OneFoldTrainer(args, fold, config)
y_true, y_pred = trainer.run()
Y_true = np.concatenate([Y_true, y_true])
Y_pred = np.concatenate([Y_pred, y_pred])
summarize_result(config, fold, Y_true, Y_pred)
if __name__ == "__main__":
main()
| 8,872 | 40.853774 | 170 | py |
SleePyCo | SleePyCo-main/test.py | import os
import json
import argparse
import warnings
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from utils import *
from loader import EEGDataLoader
from train_mtcl import OneFoldTrainer
from models.main_model import MainModel
class OneFoldEvaluator(OneFoldTrainer):
def __init__(self, args, fold, config):
self.args = args
self.fold = fold
self.cfg = config
self.ds_cfg = config['dataset']
self.tp_cfg = config['training_params']
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('[INFO] Config name: {}'.format(config['name']))
self.model = self.build_model()
self.loader_dict = self.build_dataloader()
self.criterion = nn.CrossEntropyLoss()
self.ckpt_path = os.path.join('checkpoints', config['name'])
self.ckpt_name = 'ckpt_fold-{0:02d}.pth'.format(self.fold)
def build_model(self):
model = MainModel(self.cfg)
print('[INFO] Number of params of model: ', sum(p.numel() for p in model.parameters() if p.requires_grad))
model = torch.nn.DataParallel(model, device_ids=list(range(len(self.args.gpu.split(",")))))
model.to(self.device)
print('[INFO] Model prepared, Device used: {} GPU:{}'.format(self.device, self.args.gpu))
return model
def build_dataloader(self):
test_dataset = EEGDataLoader(self.cfg, self.fold, set='test')
test_loader = DataLoader(dataset=test_dataset, batch_size=self.tp_cfg['batch_size'], shuffle=False, num_workers=4*len(self.args.gpu.split(",")), pin_memory=True)
print('[INFO] Dataloader prepared')
return {'test': test_loader}
def run(self):
print('\n[INFO] Fold: {}'.format(self.fold))
self.model.load_state_dict(torch.load(os.path.join(self.ckpt_path, self.ckpt_name)))
y_true, y_pred = self.evaluate(mode='test')
print('')
return y_true, y_pred
def main():
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=UserWarning)
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--seed', type=int, default=42, help='random seed')
parser.add_argument('--gpu', type=str, default="0", help='gpu id')
parser.add_argument('--config', type=str, help='config file path')
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
with open(args.config) as config_file:
config = json.load(config_file)
config['name'] = os.path.basename(args.config).replace('.json', '')
Y_true = np.zeros(0)
Y_pred = np.zeros((0, config['classifier']['num_classes']))
for fold in range(1, config['dataset']['num_splits'] + 1):
evaluator = OneFoldEvaluator(args, fold, config)
y_true, y_pred = evaluator.run()
Y_true = np.concatenate([Y_true, y_true])
Y_pred = np.concatenate([Y_pred, y_pred])
summarize_result(config, fold, Y_true, Y_pred)
if __name__ == "__main__":
main()
| 3,233 | 34.933333 | 169 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.