repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
MST | MST-main/real/test_code/dataset.py | import torch.utils.data as tud
import random
import torch
import numpy as np
import scipy.io as sio
class dataset(tud.Dataset):
def __init__(self, opt, CAVE, KAIST):
super(dataset, self).__init__()
self.isTrain = opt.isTrain
self.size = opt.size
# self.path = opt.data_path
if self.isTrain == True:
self.num = opt.trainset_num
else:
self.num = opt.testset_num
self.CAVE = CAVE
self.KAIST = KAIST
## load mask
data = sio.loadmat(opt.mask_path)
self.mask = data['mask']
self.mask_3d = np.tile(self.mask[:, :, np.newaxis], (1, 1, 28))
def __getitem__(self, index):
if self.isTrain == True:
# index1 = 0
index1 = random.randint(0, 29)
d = random.randint(0, 1)
if d == 0:
hsi = self.CAVE[:,:,:,index1]
else:
hsi = self.KAIST[:, :, :, index1]
else:
index1 = index
hsi = self.HSI[:, :, :, index1]
shape = np.shape(hsi)
px = random.randint(0, shape[0] - self.size)
py = random.randint(0, shape[1] - self.size)
label = hsi[px:px + self.size:1, py:py + self.size:1, :]
# while np.max(label)==0:
# px = random.randint(0, shape[0] - self.size)
# py = random.randint(0, shape[1] - self.size)
# label = hsi[px:px + self.size:1, py:py + self.size:1, :]
# print(np.min(), np.max())
pxm = random.randint(0, 660 - self.size)
pym = random.randint(0, 660 - self.size)
mask_3d = self.mask_3d[pxm:pxm + self.size:1, pym:pym + self.size:1, :]
mask_3d_shift = np.zeros((self.size, self.size + (28 - 1) * 2, 28))
mask_3d_shift[:, 0:self.size, :] = mask_3d
for t in range(28):
mask_3d_shift[:, :, t] = np.roll(mask_3d_shift[:, :, t], 2 * t, axis=1)
mask_3d_shift_s = np.sum(mask_3d_shift ** 2, axis=2, keepdims=False)
mask_3d_shift_s[mask_3d_shift_s == 0] = 1
if self.isTrain == True:
rotTimes = random.randint(0, 3)
vFlip = random.randint(0, 1)
hFlip = random.randint(0, 1)
# Random rotation
for j in range(rotTimes):
label = np.rot90(label)
# Random vertical Flip
for j in range(vFlip):
label = label[:, ::-1, :].copy()
# Random horizontal Flip
for j in range(hFlip):
label = label[::-1, :, :].copy()
temp = mask_3d * label
temp_shift = np.zeros((self.size, self.size + (28 - 1) * 2, 28))
temp_shift[:, 0:self.size, :] = temp
for t in range(28):
temp_shift[:, :, t] = np.roll(temp_shift[:, :, t], 2 * t, axis=1)
meas = np.sum(temp_shift, axis=2)
input = meas / 28 * 2 * 1.2
QE, bit = 0.4, 2048
input = np.random.binomial((input * bit / QE).astype(int), QE)
input = np.float32(input) / np.float32(bit)
label = torch.FloatTensor(label.copy()).permute(2,0,1)
input = torch.FloatTensor(input.copy())
mask_3d_shift = torch.FloatTensor(mask_3d_shift.copy()).permute(2,0,1)
mask_3d_shift_s = torch.FloatTensor(mask_3d_shift_s.copy())
return input, label, mask_3d, mask_3d_shift, mask_3d_shift_s
def __len__(self):
return self.num
| 3,450 | 34.57732 | 83 | py |
MST | MST-main/real/test_code/architecture/MST_Plus_Plus.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from einops import rearrange
import math
import warnings
from torch.nn.init import _calculate_fan_in_and_fan_out
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
def norm_cdf(x):
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
tensor.uniform_(2 * l - 1, 2 * u - 1)
tensor.erfinv_()
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'):
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
if mode == 'fan_in':
denom = fan_in
elif mode == 'fan_out':
denom = fan_out
elif mode == 'fan_avg':
denom = (fan_in + fan_out) / 2
variance = scale / denom
if distribution == "truncated_normal":
trunc_normal_(tensor, std=math.sqrt(variance) / .87962566103423978)
elif distribution == "normal":
tensor.normal_(std=math.sqrt(variance))
elif distribution == "uniform":
bound = math.sqrt(3 * variance)
tensor.uniform_(-bound, bound)
else:
raise ValueError(f"invalid distribution {distribution}")
def lecun_normal_(tensor):
variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal')
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x, *args, **kwargs):
x = self.norm(x)
return self.fn(x, *args, **kwargs)
class GELU(nn.Module):
def forward(self, x):
return F.gelu(x)
def conv(in_channels, out_channels, kernel_size, bias=False, padding = 1, stride = 1):
return nn.Conv2d(
in_channels, out_channels, kernel_size,
padding=(kernel_size//2), bias=bias, stride=stride)
def shift_back(inputs,step=2): # input [bs,28,256,310] output [bs, 28, 256, 256]
[bs, nC, row, col] = inputs.shape
down_sample = 256//row
step = float(step)/float(down_sample*down_sample)
out_col = row
for i in range(nC):
inputs[:,i,:,:out_col] = \
inputs[:,i,:,int(step*i):int(step*i)+out_col]
return inputs[:, :, :, :out_col]
class MS_MSA(nn.Module):
def __init__(
self,
dim,
dim_head,
heads,
):
super().__init__()
self.num_heads = heads
self.dim_head = dim_head
self.to_q = nn.Linear(dim, dim_head * heads, bias=False)
self.to_k = nn.Linear(dim, dim_head * heads, bias=False)
self.to_v = nn.Linear(dim, dim_head * heads, bias=False)
self.rescale = nn.Parameter(torch.ones(heads, 1, 1))
self.proj = nn.Linear(dim_head * heads, dim, bias=True)
self.pos_emb = nn.Sequential(
nn.Conv2d(dim, dim, 3, 1, 1, bias=False, groups=dim),
GELU(),
nn.Conv2d(dim, dim, 3, 1, 1, bias=False, groups=dim),
)
self.dim = dim
def forward(self, x_in):
"""
x_in: [b,h,w,c]
return out: [b,h,w,c]
"""
b, h, w, c = x_in.shape
x = x_in.reshape(b,h*w,c)
q_inp = self.to_q(x)
k_inp = self.to_k(x)
v_inp = self.to_v(x)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=self.num_heads),
(q_inp, k_inp, v_inp))
v = v
# q: b,heads,hw,c
q = q.transpose(-2, -1)
k = k.transpose(-2, -1)
v = v.transpose(-2, -1)
q = F.normalize(q, dim=-1, p=2)
k = F.normalize(k, dim=-1, p=2)
attn = (k @ q.transpose(-2, -1)) # A = K^T*Q
attn = attn * self.rescale
attn = attn.softmax(dim=-1)
x = attn @ v # b,heads,d,hw
x = x.permute(0, 3, 1, 2) # Transpose
x = x.reshape(b, h * w, self.num_heads * self.dim_head)
out_c = self.proj(x).view(b, h, w, c)
out_p = self.pos_emb(v_inp.reshape(b,h,w,c).permute(0, 3, 1, 2)).permute(0, 2, 3, 1)
out = out_c + out_p
return out
class FeedForward(nn.Module):
def __init__(self, dim, mult=4):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(dim, dim * mult, 1, 1, bias=False),
GELU(),
nn.Conv2d(dim * mult, dim * mult, 3, 1, 1, bias=False, groups=dim * mult),
GELU(),
nn.Conv2d(dim * mult, dim, 1, 1, bias=False),
)
def forward(self, x):
"""
x: [b,h,w,c]
return out: [b,h,w,c]
"""
out = self.net(x.permute(0, 3, 1, 2))
return out.permute(0, 2, 3, 1)
class MSAB(nn.Module):
def __init__(
self,
dim,
dim_head,
heads,
num_blocks,
):
super().__init__()
self.blocks = nn.ModuleList([])
for _ in range(num_blocks):
self.blocks.append(nn.ModuleList([
MS_MSA(dim=dim, dim_head=dim_head, heads=heads),
PreNorm(dim, FeedForward(dim=dim))
]))
def forward(self, x):
"""
x: [b,c,h,w]
return out: [b,c,h,w]
"""
x = x.permute(0, 2, 3, 1)
for (attn, ff) in self.blocks:
x = attn(x) + x
x = ff(x) + x
out = x.permute(0, 3, 1, 2)
return out
class MST(nn.Module):
def __init__(self, in_dim=28, out_dim=28, dim=28, stage=2, num_blocks=[2,4,4]):
super(MST, self).__init__()
self.dim = dim
self.stage = stage
# Input projection
self.embedding = nn.Conv2d(in_dim, self.dim, 3, 1, 1, bias=False)
# Encoder
self.encoder_layers = nn.ModuleList([])
dim_stage = dim
for i in range(stage):
self.encoder_layers.append(nn.ModuleList([
MSAB(
dim=dim_stage, num_blocks=num_blocks[i], dim_head=dim, heads=dim_stage // dim),
nn.Conv2d(dim_stage, dim_stage * 2, 4, 2, 1, bias=False),
]))
dim_stage *= 2
# Bottleneck
self.bottleneck = MSAB(
dim=dim_stage, dim_head=dim, heads=dim_stage // dim, num_blocks=num_blocks[-1])
# Decoder
self.decoder_layers = nn.ModuleList([])
for i in range(stage):
self.decoder_layers.append(nn.ModuleList([
nn.ConvTranspose2d(dim_stage, dim_stage // 2, stride=2, kernel_size=2, padding=0, output_padding=0),
nn.Conv2d(dim_stage, dim_stage // 2, 1, 1, bias=False),
MSAB(
dim=dim_stage // 2, num_blocks=num_blocks[stage - 1 - i], dim_head=dim,
heads=(dim_stage // 2) // dim),
]))
dim_stage //= 2
# Output projection
self.mapping = nn.Conv2d(self.dim, out_dim, 3, 1, 1, bias=False)
#### activation function
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x):
"""
x: [b,c,h,w]
return out:[b,c,h,w]
"""
# Embedding
fea = self.embedding(x)
# Encoder
fea_encoder = []
for (MSAB, FeaDownSample) in self.encoder_layers:
fea = MSAB(fea)
fea_encoder.append(fea)
fea = FeaDownSample(fea)
# Bottleneck
fea = self.bottleneck(fea)
# Decoder
for i, (FeaUpSample, Fution, LeWinBlcok) in enumerate(self.decoder_layers):
fea = FeaUpSample(fea)
fea = Fution(torch.cat([fea, fea_encoder[self.stage-1-i]], dim=1))
fea = LeWinBlcok(fea)
# Mapping
out = self.mapping(fea) + x
return out
class MST_Plus_Plus(nn.Module):
def __init__(self, in_channels=28, out_channels=28, n_feat=28, stage=3):
super(MST_Plus_Plus, self).__init__()
self.stage = stage
self.conv_in = nn.Conv2d(in_channels, n_feat, kernel_size=3, padding=(3 - 1) // 2,bias=False)
modules_body = [MST(dim=n_feat, stage=2, num_blocks=[1,1,1]) for _ in range(stage)]
self.fution = nn.Conv2d(28, 28, 1, padding=0, bias=True)
self.body = nn.Sequential(*modules_body)
self.conv_out = nn.Conv2d(n_feat, out_channels, kernel_size=3, padding=(3 - 1) // 2,bias=False)
def initial_x(self, y):
"""
:param y: [b,1,256,310]
:return: x: [b,28,256,310]
"""
nC, step = 28, 2
bs, row, col = y.shape
x = torch.zeros(bs, nC, row, row).cuda().float()
for i in range(nC):
x[:, i, :, :] = y[:, :, step * i:step * i + col - (nC - 1) * step]
x = self.fution(x)
return x
def forward(self, y, input_mask=None):
"""
x: [b,c,h,w]
return out:[b,c,h,w]
"""
x = self.initial_x(y)
b, c, h_inp, w_inp = x.shape
hb, wb = 8, 8
pad_h = (hb - h_inp % hb) % hb
pad_w = (wb - w_inp % wb) % wb
x = F.pad(x, [0, pad_w, 0, pad_h], mode='reflect')
x = self.conv_in(x)
h = self.body(x)
h = self.conv_out(h)
h += x
return h[:, :, :h_inp, :w_inp]
| 10,153 | 30.534161 | 116 | py |
MST | MST-main/real/test_code/architecture/DGSMP.py | import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
import torch.nn.functional as F
class Resblock(nn.Module):
def __init__(self, HBW):
super(Resblock, self).__init__()
self.block1 = nn.Sequential(nn.Conv2d(HBW, HBW, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(HBW, HBW, kernel_size=3, stride=1, padding=1))
self.block2 = nn.Sequential(nn.Conv2d(HBW, HBW, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(HBW, HBW, kernel_size=3, stride=1, padding=1))
def forward(self, x):
tem = x
r1 = self.block1(x)
out = r1 + tem
r2 = self.block2(out)
out = r2 + out
return out
class Encoding(nn.Module):
def __init__(self):
super(Encoding, self).__init__()
self.E1 = nn.Sequential(nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
self.E2 = nn.Sequential(nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
self.E3 = nn.Sequential(nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
self.E4 = nn.Sequential(nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
self.E5 = nn.Sequential(nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
def forward(self, x):
## encoding blocks
E1 = self.E1(x)
E2 = self.E2(F.avg_pool2d(E1, kernel_size=2, stride=2))
E3 = self.E3(F.avg_pool2d(E2, kernel_size=2, stride=2))
E4 = self.E4(F.avg_pool2d(E3, kernel_size=2, stride=2))
E5 = self.E5(F.avg_pool2d(E4, kernel_size=2, stride=2))
return E1, E2, E3, E4, E5
class Decoding(nn.Module):
def __init__(self, Ch=28, kernel_size=[7,7,7]):
super(Decoding, self).__init__()
self.upMode = 'bilinear'
self.Ch = Ch
out_channel1 = Ch * kernel_size[0]
out_channel2 = Ch * kernel_size[1]
out_channel3 = Ch * kernel_size[2]
self.D1 = nn.Sequential(nn.Conv2d(in_channels=128+128, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
self.D2 = nn.Sequential(nn.Conv2d(in_channels=128+64, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
self.D3 = nn.Sequential(nn.Conv2d(in_channels=64+64, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
self.D4 = nn.Sequential(nn.Conv2d(in_channels=64+32, out_channels=32, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
self.w_generator = nn.Sequential(nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=32, out_channels=self.Ch, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=self.Ch, out_channels=self.Ch, kernel_size=1, stride=1, padding=0)
)
self.filter_g_1 = nn.Sequential(nn.Conv2d(64 + 32, out_channel1, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(out_channel1, out_channel1, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(out_channel1, out_channel1, 1, 1, 0)
)
self.filter_g_2 = nn.Sequential(nn.Conv2d(64 + 32, out_channel2, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(out_channel2, out_channel2, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(out_channel2, out_channel2, 1, 1, 0)
)
self.filter_g_3 = nn.Sequential(nn.Conv2d(64 + 32, out_channel3, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(out_channel3, out_channel3, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(out_channel3, out_channel3, 1, 1, 0)
)
def forward(self, E1, E2, E3, E4, E5):
## decoding blocks
D1 = self.D1(torch.cat([E4, F.interpolate(E5, scale_factor=2, mode=self.upMode)], dim=1))
D2 = self.D2(torch.cat([E3, F.interpolate(D1, scale_factor=2, mode=self.upMode)], dim=1))
D3 = self.D3(torch.cat([E2, F.interpolate(D2, scale_factor=2, mode=self.upMode)], dim=1))
D4 = self.D4(torch.cat([E1, F.interpolate(D3, scale_factor=2, mode=self.upMode)], dim=1))
## estimating the regularization parameters w
w = self.w_generator(D4)
## generate 3D filters
f1 = self.filter_g_1(torch.cat([E1, F.interpolate(D3, scale_factor=2, mode=self.upMode)], dim=1))
f2 = self.filter_g_2(torch.cat([E1, F.interpolate(D3, scale_factor=2, mode=self.upMode)], dim=1))
f3 = self.filter_g_3(torch.cat([E1, F.interpolate(D3, scale_factor=2, mode=self.upMode)], dim=1))
return w, f1, f2, f3
class HSI_CS(nn.Module):
def __init__(self, Ch, stages):
super(HSI_CS, self).__init__()
self.Ch = Ch
self.s = stages
self.filter_size = [7,7,7] ## 3D filter size
## The modules for learning the measurement matrix A and A^T
self.AT = nn.Sequential(nn.Conv2d(Ch, 64, kernel_size=3, stride=1, padding=1), nn.LeakyReLU(),
Resblock(64), Resblock(64),
nn.Conv2d(64, Ch, kernel_size=3, stride=1, padding=1), nn.LeakyReLU())
self.A = nn.Sequential(nn.Conv2d(Ch, 64, kernel_size=3, stride=1, padding=1), nn.LeakyReLU(),
Resblock(64), Resblock(64),
nn.Conv2d(64, Ch, kernel_size=3, stride=1, padding=1), nn.LeakyReLU())
## Encoding blocks
self.Encoding = Encoding()
## Decoding blocks
self.Decoding = Decoding(Ch=self.Ch, kernel_size=self.filter_size)
## Dense connection
self.conv = nn.Conv2d(Ch, 32, kernel_size=3, stride=1, padding=1)
self.Den_con1 = nn.Conv2d(32 , 32, kernel_size=1, stride=1, padding=0)
self.Den_con2 = nn.Conv2d(32 * 2, 32, kernel_size=1, stride=1, padding=0)
self.Den_con3 = nn.Conv2d(32 * 3, 32, kernel_size=1, stride=1, padding=0)
self.Den_con4 = nn.Conv2d(32 * 4, 32, kernel_size=1, stride=1, padding=0)
# self.Den_con5 = nn.Conv2d(32 * 5, 32, kernel_size=1, stride=1, padding=0)
# self.Den_con6 = nn.Conv2d(32 * 6, 32, kernel_size=1, stride=1, padding=0)
self.delta_0 = Parameter(torch.ones(1), requires_grad=True)
self.delta_1 = Parameter(torch.ones(1), requires_grad=True)
self.delta_2 = Parameter(torch.ones(1), requires_grad=True)
self.delta_3 = Parameter(torch.ones(1), requires_grad=True)
# self.delta_4 = Parameter(torch.ones(1), requires_grad=True)
# self.delta_5 = Parameter(torch.ones(1), requires_grad=True)
self._initialize_weights()
torch.nn.init.normal_(self.delta_0, mean=0.1, std=0.01)
torch.nn.init.normal_(self.delta_1, mean=0.1, std=0.01)
torch.nn.init.normal_(self.delta_2, mean=0.1, std=0.01)
torch.nn.init.normal_(self.delta_3, mean=0.1, std=0.01)
# torch.nn.init.normal_(self.delta_4, mean=0.1, std=0.01)
# torch.nn.init.normal_(self.delta_5, mean=0.1, std=0.01)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight.data)
nn.init.constant_(m.bias.data, 0.0)
elif isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight.data)
nn.init.constant_(m.bias.data, 0.0)
def Filtering_1(self, cube, core):
batch_size, bandwidth, height, width = cube.size()
cube_pad = F.pad(cube, [self.filter_size[0] // 2, self.filter_size[0] // 2, 0, 0], mode='replicate')
img_stack = []
for i in range(self.filter_size[0]):
img_stack.append(cube_pad[:, :, :, i:i + width])
img_stack = torch.stack(img_stack, dim=1)
out = torch.sum(core.mul_(img_stack), dim=1, keepdim=False)
return out
def Filtering_2(self, cube, core):
batch_size, bandwidth, height, width = cube.size()
cube_pad = F.pad(cube, [0, 0, self.filter_size[1] // 2, self.filter_size[1] // 2], mode='replicate')
img_stack = []
for i in range(self.filter_size[1]):
img_stack.append(cube_pad[:, :, i:i + height, :])
img_stack = torch.stack(img_stack, dim=1)
out = torch.sum(core.mul_(img_stack), dim=1, keepdim=False)
return out
def Filtering_3(self, cube, core):
batch_size, bandwidth, height, width = cube.size()
cube_pad = F.pad(cube.unsqueeze(0).unsqueeze(0), pad=(0, 0, 0, 0, self.filter_size[2] // 2, self.filter_size[2] // 2)).squeeze(0).squeeze(0)
img_stack = []
for i in range(self.filter_size[2]):
img_stack.append(cube_pad[:, i:i + bandwidth, :, :])
img_stack = torch.stack(img_stack, dim=1)
out = torch.sum(core.mul_(img_stack), dim=1, keepdim=False)
return out
def recon(self, res1, res2, Xt, i):
if i == 0 :
delta = self.delta_0
elif i == 1:
delta = self.delta_1
elif i == 2:
delta = self.delta_2
elif i == 3:
delta = self.delta_3
# elif i == 4:
# delta = self.delta_4
# elif i == 5:
# delta = self.delta_5
Xt = Xt - 2 * delta * (res1 + res2)
return Xt
def y2x(self, y):
## Spilt operator
sz = y.size()
if len(sz) == 3:
y = y.unsqueeze(1)
bs = sz[0]
sz = y.size()
x = torch.zeros([bs, 28, sz[2], sz[2]]).cuda()
for t in range(28):
temp = y[:, :, :, 0 + 2 * t : sz[2] + 2 * t]
x[:, t, :, :] = temp.squeeze(1)
return x
def x2y(self, x):
## Shift and Sum operator
sz = x.size()
if len(sz) == 3:
x = x.unsqueeze(0).unsqueeze(0)
bs = 1
else:
bs = sz[0]
sz = x.size()
y = torch.zeros([bs, sz[2], sz[2]+2*27]).cuda()
for t in range(28):
y[:, :, 0 + 2 * t : sz[2] + 2 * t] = x[:, t, :, :] + y[:, :, 0 + 2 * t : sz[2] + 2 * t]
return y
def forward(self, y, input_mask=None):
## The measurements y is split into a 3D data cube of size H × W × L to initialize x.
y = y / 28 * 2
Xt = self.y2x(y)
feature_list = []
for i in range(0, self.s):
AXt = self.x2y(self.A(Xt)) # y = Ax
Res1 = self.AT(self.y2x(AXt - y)) # A^T * (Ax − y)
fea = self.conv(Xt)
if i == 0:
feature_list.append(fea)
fufea = self.Den_con1(fea)
elif i == 1:
feature_list.append(fea)
fufea = self.Den_con2(torch.cat(feature_list, 1))
elif i == 2:
feature_list.append(fea)
fufea = self.Den_con3(torch.cat(feature_list, 1))
elif i == 3:
feature_list.append(fea)
fufea = self.Den_con4(torch.cat(feature_list, 1))
# elif i == 4:
# feature_list.append(fea)
# fufea = self.Den_con5(torch.cat(feature_list, 1))
# elif i == 5:
# feature_list.append(fea)
# fufea = self.Den_con6(torch.cat(feature_list, 1))
E1, E2, E3, E4, E5 = self.Encoding(fufea)
W, f1, f2, f3 = self.Decoding(E1, E2, E3, E4, E5)
batch_size, p, height, width = f1.size()
f1 = F.normalize(f1.view(batch_size, self.filter_size[0], self.Ch, height, width),dim=1)
batch_size, p, height, width = f2.size()
f2 = F.normalize(f2.view(batch_size, self.filter_size[1], self.Ch, height, width),dim=1)
batch_size, p, height, width = f3.size()
f3 = F.normalize(f3.view(batch_size, self.filter_size[2], self.Ch, height, width),dim=1)
## Estimating the local means U
u1 = self.Filtering_1(Xt, f1)
u2 = self.Filtering_2(u1, f2)
U = self.Filtering_3(u2, f3)
## w * (x − u)
Res2 = (Xt - U).mul(W)
## Reconstructing HSIs
Xt = self.recon(Res1, Res2, Xt, i)
return Xt
| 15,283 | 46.318885 | 148 | py |
MST | MST-main/real/test_code/architecture/DAUHST.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from einops import rearrange
import math
import warnings
from torch import einsum
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
def norm_cdf(x):
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
tensor.uniform_(2 * l - 1, 2 * u - 1)
tensor.erfinv_()
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x, *args, **kwargs):
x = self.norm(x)
return self.fn(x, *args, **kwargs)
class GELU(nn.Module):
def forward(self, x):
return F.gelu(x)
class HS_MSA(nn.Module):
def __init__(
self,
dim,
window_size=(8, 8),
dim_head=28,
heads=8,
only_local_branch=False
):
super().__init__()
self.dim = dim
self.heads = heads
self.scale = dim_head ** -0.5
self.window_size = window_size
self.only_local_branch = only_local_branch
# position embedding
if only_local_branch:
seq_l = window_size[0] * window_size[1]
self.pos_emb = nn.Parameter(torch.Tensor(1, heads, seq_l, seq_l))
trunc_normal_(self.pos_emb)
else:
seq_l1 = window_size[0] * window_size[1]
self.pos_emb1 = nn.Parameter(torch.Tensor(1, 1, heads//2, seq_l1, seq_l1))
h,w = 256//self.heads,320//self.heads
seq_l2 = h*w//seq_l1
self.pos_emb2 = nn.Parameter(torch.Tensor(1, 1, heads//2, seq_l2, seq_l2))
trunc_normal_(self.pos_emb1)
trunc_normal_(self.pos_emb2)
inner_dim = dim_head * heads
self.to_q = nn.Linear(dim, inner_dim, bias=False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False)
self.to_out = nn.Linear(inner_dim, dim)
def forward(self, x):
"""
x: [b,h,w,c]
return out: [b,h,w,c]
"""
b, h, w, c = x.shape
w_size = self.window_size
assert h % w_size[0] == 0 and w % w_size[1] == 0, 'fmap dimensions must be divisible by the window size'
if self.only_local_branch:
x_inp = rearrange(x, 'b (h b0) (w b1) c -> (b h w) (b0 b1) c', b0=w_size[0], b1=w_size[1])
q = self.to_q(x_inp)
k, v = self.to_kv(x_inp).chunk(2, dim=-1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=self.heads), (q, k, v))
q *= self.scale
sim = einsum('b h i d, b h j d -> b h i j', q, k)
sim = sim + self.pos_emb
attn = sim.softmax(dim=-1)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
out = rearrange(out, '(b h w) (b0 b1) c -> b (h b0) (w b1) c', h=h // w_size[0], w=w // w_size[1],
b0=w_size[0])
else:
q = self.to_q(x)
k, v = self.to_kv(x).chunk(2, dim=-1)
q1, q2 = q[:,:,:,:c//2], q[:,:,:,c//2:]
k1, k2 = k[:,:,:,:c//2], k[:,:,:,c//2:]
v1, v2 = v[:,:,:,:c//2], v[:,:,:,c//2:]
# local branch
q1, k1, v1 = map(lambda t: rearrange(t, 'b (h b0) (w b1) c -> b (h w) (b0 b1) c',
b0=w_size[0], b1=w_size[1]), (q1, k1, v1))
q1, k1, v1 = map(lambda t: rearrange(t, 'b n mm (h d) -> b n h mm d', h=self.heads//2), (q1, k1, v1))
q1 *= self.scale
sim1 = einsum('b n h i d, b n h j d -> b n h i j', q1, k1)
sim1 = sim1 + self.pos_emb1
attn1 = sim1.softmax(dim=-1)
out1 = einsum('b n h i j, b n h j d -> b n h i d', attn1, v1)
out1 = rearrange(out1, 'b n h mm d -> b n mm (h d)')
# non-local branch
q2, k2, v2 = map(lambda t: rearrange(t, 'b (h b0) (w b1) c -> b (h w) (b0 b1) c',
b0=w_size[0], b1=w_size[1]), (q2, k2, v2))
q2, k2, v2 = map(lambda t: t.permute(0, 2, 1, 3), (q2.clone(), k2.clone(), v2.clone()))
q2, k2, v2 = map(lambda t: rearrange(t, 'b n mm (h d) -> b n h mm d', h=self.heads//2), (q2, k2, v2))
q2 *= self.scale
sim2 = einsum('b n h i d, b n h j d -> b n h i j', q2, k2)
sim2 = sim2 + self.pos_emb2
attn2 = sim2.softmax(dim=-1)
out2 = einsum('b n h i j, b n h j d -> b n h i d', attn2, v2)
out2 = rearrange(out2, 'b n h mm d -> b n mm (h d)')
out2 = out2.permute(0, 2, 1, 3)
out = torch.cat([out1,out2],dim=-1).contiguous()
out = self.to_out(out)
out = rearrange(out, 'b (h w) (b0 b1) c -> b (h b0) (w b1) c', h=h // w_size[0], w=w // w_size[1],
b0=w_size[0])
return out
class HSAB(nn.Module):
def __init__(
self,
dim,
window_size=(8, 8),
dim_head=64,
heads=8,
num_blocks=2,
):
super().__init__()
self.blocks = nn.ModuleList([])
for _ in range(num_blocks):
self.blocks.append(nn.ModuleList([
PreNorm(dim, HS_MSA(dim=dim, window_size=window_size, dim_head=dim_head, heads=heads, only_local_branch=(heads==1))),
PreNorm(dim, FeedForward(dim=dim))
]))
def forward(self, x):
"""
x: [b,c,h,w]
return out: [b,c,h,w]
"""
x = x.permute(0, 2, 3, 1)
for (attn, ff) in self.blocks:
x = attn(x) + x
x = ff(x) + x
out = x.permute(0, 3, 1, 2)
return out
class FeedForward(nn.Module):
def __init__(self, dim, mult=4):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(dim, dim * mult, 1, 1, bias=False),
GELU(),
nn.Conv2d(dim * mult, dim * mult, 3, 1, 1, bias=False, groups=dim * mult),
GELU(),
nn.Conv2d(dim * mult, dim, 1, 1, bias=False),
)
def forward(self, x):
"""
x: [b,h,w,c]
return out: [b,h,w,c]
"""
out = self.net(x.permute(0, 3, 1, 2))
return out.permute(0, 2, 3, 1)
class HST(nn.Module):
def __init__(self, in_dim=28, out_dim=28, dim=28, num_blocks=[1,1,1]):
super(HST, self).__init__()
self.dim = dim
self.scales = len(num_blocks)
# Input projection
self.embedding = nn.Conv2d(in_dim, self.dim, 3, 1, 1, bias=False)
# Encoder
self.encoder_layers = nn.ModuleList([])
dim_scale = dim
for i in range(self.scales-1):
self.encoder_layers.append(nn.ModuleList([
HSAB(dim=dim_scale, num_blocks=num_blocks[i], dim_head=dim, heads=dim_scale // dim),
nn.Conv2d(dim_scale, dim_scale * 2, 4, 2, 1, bias=False),
]))
dim_scale *= 2
# Bottleneck
self.bottleneck = HSAB(dim=dim_scale, dim_head=dim, heads=dim_scale // dim, num_blocks=num_blocks[-1])
# Decoder
self.decoder_layers = nn.ModuleList([])
for i in range(self.scales-1):
self.decoder_layers.append(nn.ModuleList([
nn.ConvTranspose2d(dim_scale, dim_scale // 2, stride=2, kernel_size=2, padding=0, output_padding=0),
nn.Conv2d(dim_scale, dim_scale // 2, 1, 1, bias=False),
HSAB(dim=dim_scale // 2, num_blocks=num_blocks[self.scales - 2 - i], dim_head=dim,
heads=(dim_scale // 2) // dim),
]))
dim_scale //= 2
# Output projection
self.mapping = nn.Conv2d(self.dim, out_dim, 3, 1, 1, bias=False)
#### activation function
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x):
"""
x: [b,c,h,w]
return out:[b,c,h,w]
"""
b, c, h_inp, w_inp = x.shape
hb, wb = 16, 16
pad_h = (hb - h_inp % hb) % hb
pad_w = (wb - w_inp % wb) % wb
x = F.pad(x, [0, pad_w, 0, pad_h], mode='reflect')
# Embedding
fea = self.embedding(x)
x = x[:,:28,:,:]
# Encoder
fea_encoder = []
for (HSAB, FeaDownSample) in self.encoder_layers:
fea = HSAB(fea)
fea_encoder.append(fea)
fea = FeaDownSample(fea)
# Bottleneck
fea = self.bottleneck(fea)
# Decoder
for i, (FeaUpSample, Fution, HSAB) in enumerate(self.decoder_layers):
fea = FeaUpSample(fea)
fea = Fution(torch.cat([fea, fea_encoder[self.scales-2-i]], dim=1))
fea = HSAB(fea)
# Mapping
out = self.mapping(fea) + x
return out[:, :, :h_inp, :w_inp]
def A(x,Phi):
temp = x*Phi
y = torch.sum(temp,1)
return y
def At(y,Phi):
temp = torch.unsqueeze(y, 1).repeat(1,Phi.shape[1],1,1)
x = temp*Phi
return x
def shift_3d(inputs,step=2):
[bs, nC, row, col] = inputs.shape
for i in range(nC):
inputs[:,i,:,:] = torch.roll(inputs[:,i,:,:], shifts=step*i, dims=2)
return inputs
def shift_back_3d(inputs,step=2):
[bs, nC, row, col] = inputs.shape
for i in range(nC):
inputs[:,i,:,:] = torch.roll(inputs[:,i,:,:], shifts=(-1)*step*i, dims=2)
return inputs
class HyPaNet(nn.Module):
def __init__(self, in_nc=29, out_nc=8, channel=64):
super(HyPaNet, self).__init__()
self.fution = nn.Conv2d(in_nc, channel, 1, 1, 0, bias=True)
self.down_sample = nn.Conv2d(channel, channel, 3, 2, 1, bias=True)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.mlp = nn.Sequential(
nn.Conv2d(channel, channel, 1, padding=0, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(channel, channel, 1, padding=0, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(channel, out_nc, 1, padding=0, bias=True),
nn.Softplus())
self.relu = nn.ReLU(inplace=True)
self.out_nc = out_nc
def forward(self, x):
x = self.down_sample(self.relu(self.fution(x)))
x = self.avg_pool(x)
x = self.mlp(x) + 1e-6
return x[:,:self.out_nc//2,:,:], x[:,self.out_nc//2:,:,:]
class DAUHST(nn.Module):
def __init__(self, num_iterations=1):
super(DAUHST, self).__init__()
self.para_estimator = HyPaNet(in_nc=28, out_nc=num_iterations*2)
self.fution = nn.Conv2d(56, 28, 1, padding=0, bias=True)
self.num_iterations = num_iterations
self.denoisers = nn.ModuleList([])
for _ in range(num_iterations):
self.denoisers.append(
HST(in_dim=29, out_dim=28, dim=28, num_blocks=[1,1,1]),
)
def initial(self, y, Phi):
"""
:param y: [b,256,310]
:param Phi: [b,28,256,310]
:return: temp: [b,28,256,310]; alpha: [b, num_iterations]; beta: [b, num_iterations]
"""
nC, step = 28, 2
y = y / nC * 2
bs,row,col = y.shape
y_shift = torch.zeros(bs, nC, row, col).cuda().float()
for i in range(nC):
y_shift[:, i, :, step * i:step * i + col - (nC - 1) * step] = y[:, :, step * i:step * i + col - (nC - 1) * step]
z = self.fution(torch.cat([y_shift, Phi], dim=1))
alpha, beta = self.para_estimator(self.fution(torch.cat([y_shift, Phi], dim=1)))
return z, alpha, beta
def forward(self, y, input_mask=None):
"""
:param y: [b,256,310]
:param Phi: [b,28,256,310]
:param Phi_PhiT: [b,256,310]
:return: z_crop: [b,28,256,256]
"""
Phi, Phi_s = input_mask
z, alphas, betas = self.initial(y, Phi)
for i in range(self.num_iterations):
alpha, beta = alphas[:,i,:,:], betas[:,i:i+1,:,:]
Phi_z = A(z, Phi)
x = z + At(torch.div(y-Phi_z,alpha+Phi_s), Phi)
x = shift_back_3d(x)
beta_repeat = beta.repeat(1,1,x.shape[2], x.shape[3])
z = self.denoisers[i](torch.cat([x, beta_repeat],dim=1))
if i<self.num_iterations-1:
z = shift_3d(z)
return z[:, :, :, 0:256]
| 13,343 | 35.26087 | 133 | py |
MST | MST-main/real/test_code/architecture/CST.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from einops import rearrange
from torch import einsum
import math
import warnings
from torch.nn.init import _calculate_fan_in_and_fan_out
from collections import defaultdict, Counter
import numpy as np
from tqdm import tqdm
import random
def uniform(a, b, shape, device='cuda'):
return (b - a) * torch.rand(shape, device=device) + a
class AsymmetricTransform:
def Q(self, *args, **kwargs):
raise NotImplementedError('Query transform not implemented')
def K(self, *args, **kwargs):
raise NotImplementedError('Key transform not implemented')
class LSH:
def __call__(self, *args, **kwargs):
raise NotImplementedError('LSH scheme not implemented')
def compute_hash_agreement(self, q_hash, k_hash):
return (q_hash == k_hash).min(dim=-1)[0].sum(dim=-1)
class XBOXPLUS(AsymmetricTransform):
def set_norms(self, x):
self.x_norms = x.norm(p=2, dim=-1, keepdim=True)
self.MX = torch.amax(self.x_norms, dim=-2, keepdim=True)
def X(self, x):
device = x.device
ext = torch.sqrt((self.MX**2).to(device) - (self.x_norms**2).to(device))
zero = torch.tensor(0.0, device=x.device).repeat(x.shape[:-1], 1).unsqueeze(-1)
return torch.cat((x, ext, zero), -1)
def lsh_clustering(x, n_rounds, r=1):
salsh = SALSH(n_rounds=n_rounds, dim=x.shape[-1], r=r, device=x.device)
x_hashed = salsh(x).reshape((n_rounds,) + x.shape[:-1])
return x_hashed.argsort(dim=-1)
class SALSH(LSH):
def __init__(self, n_rounds, dim, r, device='cuda'):
super(SALSH, self).__init__()
self.alpha = torch.normal(0, 1, (dim, n_rounds), device=device)
self.beta = uniform(0, r, shape=(1, n_rounds), device=device)
self.dim = dim
self.r = r
def __call__(self, vecs):
projection = vecs @ self.alpha
projection_shift = projection + self.beta
projection_rescale = projection_shift / self.r
return projection_rescale.permute(2, 0, 1)
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
def norm_cdf(x):
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
tensor.uniform_(2 * l - 1, 2 * u - 1)
tensor.erfinv_()
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'):
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
if mode == 'fan_in':
denom = fan_in
elif mode == 'fan_out':
denom = fan_out
elif mode == 'fan_avg':
denom = (fan_in + fan_out) / 2
variance = scale / denom
if distribution == "truncated_normal":
trunc_normal_(tensor, std=math.sqrt(variance) / .87962566103423978)
elif distribution == "normal":
tensor.normal_(std=math.sqrt(variance))
elif distribution == "uniform":
bound = math.sqrt(3 * variance)
tensor.uniform_(-bound, bound)
else:
raise ValueError(f"invalid distribution {distribution}")
def lecun_normal_(tensor):
variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal')
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x, *args, **kwargs):
x = self.norm(x)
return self.fn(x, *args, **kwargs)
class GELU(nn.Module):
def forward(self, x):
return F.gelu(x)
def batch_scatter(output, src, dim, index):
"""
:param output: [b,n,c]
:param src: [b,n,c]
:param dim: int
:param index: [b,n]
:return: output: [b,n,c]
"""
b,k,c = src.shape
index = index[:, :, None].expand(-1, -1, c)
output, src, index = map(lambda t: rearrange(t, 'b k c -> (b c) k'), (output, src, index))
output.scatter_(dim,index,src)
output = rearrange(output, '(b c) k -> b k c', b=b)
return output
def batch_gather(x, index, dim):
"""
:param x: [b,n,c]
:param index: [b,n//2]
:param dim: int
:return: output: [b,n//2,c]
"""
b,n,c = x.shape
index = index[:,:,None].expand(-1,-1,c)
x, index = map(lambda t: rearrange(t, 'b n c -> (b c) n'), (x, index))
output = torch.gather(x,dim,index)
output = rearrange(output, '(b c) n -> b n c', b=b)
return output
class FeedForward(nn.Module):
def __init__(self, dim, mult=4):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(dim, dim * mult, 1, 1, bias=False),
GELU(),
nn.Conv2d(dim * mult, dim * mult, 3, 1, 1, bias=False, groups=dim * mult),
GELU(),
nn.Conv2d(dim * mult, dim, 1, 1, bias=False),
)
def forward(self, x):
"""
x: [b,h,w,c]
return out: [b,h,w,c]
"""
out = self.net(x.permute(0, 3, 1, 2))
return out.permute(0, 2, 3, 1)
class SAH_MSA(nn.Module):
def __init__(self, heads=4, n_rounds=2, channels=64, patch_size=144,
r=1):
super(SAH_MSA, self).__init__()
self.heads = heads
self.n_rounds = n_rounds
inner_dim = channels*3
self.to_q = nn.Linear(channels, inner_dim, bias=False)
self.to_k = nn.Linear(channels, inner_dim, bias=False)
self.to_v = nn.Linear(channels, inner_dim, bias=False)
self.to_out = nn.Linear(inner_dim, channels, bias=False)
self.xbox_plus = XBOXPLUS()
self.clustering_params = {
'r': r,
'n_rounds': self.n_rounds
}
self.q_attn_size = patch_size[0] * patch_size[1]
self.k_attn_size = patch_size[0] * patch_size[1]
def forward(self, input):
"""
:param input: [b,n,c]
:return: output: [b,n,c]
"""
B, N, C_inp = input.shape
query = self.to_q(input)
key = self.to_k(input)
value = self.to_v(input)
input_hash = input.view(B, N, self.heads, C_inp//self.heads)
x_hash = rearrange(input_hash, 'b t h e -> (b h) t e')
bs, x_seqlen, dim = x_hash.shape
with torch.no_grad():
self.xbox_plus.set_norms(x_hash)
Xs = self.xbox_plus.X(x_hash)
x_positions = lsh_clustering(Xs, **self.clustering_params)
x_positions = x_positions.reshape(self.n_rounds, bs, -1)
del Xs
C = query.shape[-1]
query = query.view(B, N, self.heads, C // self.heads)
key = key.view(B, N, self.heads, C // self.heads)
value = value.view(B, N, self.heads, C // self.heads)
query = rearrange(query, 'b t h e -> (b h) t e') # [bs, q_seqlen,c]
key = rearrange(key, 'b t h e -> (b h) t e')
value = rearrange(value, 'b s h d -> (b h) s d')
bs, q_seqlen, dim = query.shape
bs, k_seqlen, dim = key.shape
v_dim = value.shape[-1]
x_rev_positions = torch.argsort(x_positions, dim=-1)
x_offset = torch.arange(bs, device=query.device).unsqueeze(-1) * x_seqlen
x_flat = (x_positions + x_offset).reshape(-1)
s_queries = query.reshape(-1, dim).index_select(0, x_flat).reshape(-1, self.q_attn_size, dim)
s_keys = key.reshape(-1, dim).index_select(0, x_flat).reshape(-1, self.k_attn_size, dim)
s_values = value.reshape(-1, v_dim).index_select(0, x_flat).reshape(-1, self.k_attn_size, v_dim)
inner = s_queries @ s_keys.transpose(2, 1)
norm_factor = 1
inner = inner / norm_factor
# free memory
del x_positions
# softmax denominator
dots_logsumexp = torch.logsumexp(inner, dim=-1, keepdim=True)
# softmax
dots = torch.exp(inner - dots_logsumexp)
# dropout
# n_rounds outs
bo = (dots @ s_values).reshape(self.n_rounds, bs, q_seqlen, -1)
# undo sort
x_offset = torch.arange(bs * self.n_rounds, device=query.device).unsqueeze(-1) * x_seqlen
x_rev_flat = (x_rev_positions.reshape(-1, x_seqlen) + x_offset).reshape(-1)
o = bo.reshape(-1, v_dim).index_select(0, x_rev_flat).reshape(self.n_rounds, bs, q_seqlen, -1)
slogits = dots_logsumexp.reshape(self.n_rounds, bs, -1)
logits = torch.gather(slogits, 2, x_rev_positions)
# free memory
del x_rev_positions
# weighted sum multi-round attention
probs = torch.exp(logits - torch.logsumexp(logits, dim=0, keepdim=True))
out = torch.sum(o * probs.unsqueeze(-1), dim=0)
out = rearrange(out, '(b h) t d -> b t h d', h=self.heads)
out = out.reshape(B, N, -1)
out = self.to_out(out)
return out
class SAHAB(nn.Module):
def __init__(
self,
dim,
patch_size=(16, 16),
heads=8,
shift_size=0,
sparse=False
):
super().__init__()
self.blocks = nn.ModuleList([])
self.attn = PreNorm(dim, SAH_MSA(heads=heads, n_rounds=2, r=1, channels=dim, patch_size=patch_size))
self.ffn = PreNorm(dim, FeedForward(dim=dim))
self.shift_size = shift_size
self.patch_size = patch_size
self.sparse = sparse
def forward(self, x, mask=None):
"""
x: [b,h,w,c]
mask: [b,h,w]
return out: [b,h,w,c]
"""
b,h,w,c = x.shape
if self.shift_size > 0:
x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
mask = torch.roll(mask, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
w_size = self.patch_size
# Split into large patches
x = rearrange(x, 'b (nh hh) (nw ww) c-> b (nh nw) (hh ww c)', hh=w_size[0] * 2, ww=w_size[1] * 2)
mask = rearrange(mask, 'b (nh hh) (nw ww) -> b (nh nw) (hh ww)', hh=w_size[0] * 2, ww=w_size[1] * 2)
N = x.shape[1]
mask = torch.mean(mask,dim=2,keepdim=False) # [b,nh*nw]
if self.sparse:
mask_select = mask.topk(mask.shape[1] // 2, dim=1)[1] # [b,nh*nw//2]
x_select = batch_gather(x, mask_select, 1) # [b,nh*nw//2,hh*ww*c]
x_select = x_select.reshape(b*N//2,-1,c)
x_select = self.attn(x_select)+x_select
x_select = x_select.view(b,N//2,-1)
x = batch_scatter(x.clone(), x_select, 1, mask_select)
else:
x = x.view(b*N,-1,c)
x = self.attn(x) + x
x = x.view(b, N, -1)
x = rearrange(x, 'b (nh nw) (hh ww c) -> b (nh hh) (nw ww) c', nh=h//(w_size[0] * 2), hh=w_size[0] * 2, ww=w_size[1] * 2)
if self.shift_size > 0:
x = torch.roll(x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
x = self.ffn(x) + x
return x
class SAHABs(nn.Module):
def __init__(
self,
dim,
patch_size=(8, 8),
heads=8,
num_blocks=2,
sparse=False
):
super().__init__()
blocks = []
for _ in range(num_blocks):
blocks.append(
SAHAB(heads=heads, dim=dim, patch_size=patch_size,sparse=sparse,
shift_size=0 if (_ % 2 == 0) else patch_size[0]))
self.blocks = nn.Sequential(*blocks)
def forward(self, x, mask=None):
"""
x: [b,c,h,w]
mask: [b,1,h,w]
return x: [b,c,h,w]
"""
x = x.permute(0, 2, 3, 1)
mask = mask.squeeze(1)
for block in self.blocks:
x = block(x, mask)
x = x.permute(0, 3, 1, 2)
return x
class ASPPConv(nn.Sequential):
def __init__(self, in_channels, out_channels, dilation):
modules = [
nn.Conv2d(in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False),
nn.ReLU()
]
super(ASPPConv, self).__init__(*modules)
class ASPPPooling(nn.Sequential):
def __init__(self, in_channels, out_channels):
super(ASPPPooling, self).__init__(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels, out_channels, 1, bias=False),
nn.ReLU())
def forward(self, x):
size = x.shape[-2:]
for mod in self:
x = mod(x)
return F.interpolate(x, size=size, mode='bilinear', align_corners=False)
class ASPP(nn.Module):
def __init__(self, in_channels, atrous_rates, out_channels):
super(ASPP, self).__init__()
modules = []
rates = tuple(atrous_rates)
for rate in rates:
modules.append(ASPPConv(in_channels, out_channels, rate))
modules.append(ASPPPooling(in_channels, out_channels))
self.convs = nn.ModuleList(modules)
self.project = nn.Sequential(
nn.Conv2d(len(self.convs) * out_channels, out_channels, 1, bias=False),
nn.ReLU(),
nn.Dropout(0.5))
def forward(self, x):
res = []
for conv in self.convs:
res.append(conv(x))
res = torch.cat(res, dim=1)
return self.project(res)
class Sparsity_Estimator(nn.Module):
def __init__(self, dim=28, expand=2, sparse=False):
super(Sparsity_Estimator, self).__init__()
self.dim = dim
self.stage = 2
self.sparse = sparse
# Input projection
self.in_proj = nn.Conv2d(28, dim, 1, 1, 0, bias=False)
# Encoder
self.encoder_layers = nn.ModuleList([])
dim_stage = dim
for i in range(2):
self.encoder_layers.append(nn.ModuleList([
nn.Conv2d(dim_stage, dim_stage * expand, 1, 1, 0, bias=False),
nn.Conv2d(dim_stage * expand, dim_stage * expand, 3, 2, 1, bias=False, groups=dim_stage * expand),
nn.Conv2d(dim_stage * expand, dim_stage*expand, 1, 1, 0, bias=False),
]))
dim_stage *= 2
# Bottleneck
self.bottleneck = ASPP(dim_stage, [3,6], dim_stage)
# Decoder:
self.decoder_layers = nn.ModuleList([])
for i in range(2):
self.decoder_layers.append(nn.ModuleList([
nn.ConvTranspose2d(dim_stage, dim_stage // 2, stride=2, kernel_size=2, padding=0, output_padding=0),
nn.Conv2d(dim_stage // 2, dim_stage, 1, 1, 0, bias=False),
nn.Conv2d(dim_stage, dim_stage, 3, 1, 1, bias=False, groups=dim_stage),
nn.Conv2d(dim_stage, dim_stage // 2, 1, 1, 0, bias=False),
]))
dim_stage //= 2
# Output projection
if sparse:
self.out_conv2 = nn.Conv2d(self.dim, self.dim+1, 3, 1, 1, bias=False)
else:
self.out_conv2 = nn.Conv2d(self.dim, self.dim, 3, 1, 1, bias=False)
#### activation function
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def forward(self, x):
"""
x: [b,c,h,w]
return out:[b,c,h,w]
"""
# Input projection
fea = self.lrelu(self.in_proj(x))
# Encoder
fea_encoder = [] # [c 2c 4c 8c]
for (Conv1, Conv2, Conv3) in self.encoder_layers:
fea_encoder.append(fea)
fea = Conv3(self.lrelu(Conv2(self.lrelu(Conv1(fea)))))
# Bottleneck
fea = self.bottleneck(fea)+fea
# Decoder
for i, (FeaUpSample, Conv1, Conv2, Conv3) in enumerate(self.decoder_layers):
fea = FeaUpSample(fea)
fea = Conv3(self.lrelu(Conv2(self.lrelu(Conv1(fea)))))
fea = fea + fea_encoder[self.stage-1-i]
# Output projection
out = self.out_conv2(fea)
if self.sparse:
error_map = out[:,-1:,:,:]
return out[:,:-1], error_map
return out
class CST(nn.Module):
def __init__(self, dim=28, stage=2, num_blocks=[2, 2, 2], sparse=False):
super(CST, self).__init__()
self.dim = dim
self.stage = stage
self.sparse = sparse
# Fution physical mask and shifted measurement
self.fution = nn.Conv2d(28, 28, 1, 1, 0, bias=False)
# Sparsity Estimator
if num_blocks==[2,4,6]:
self.fe = nn.Sequential(Sparsity_Estimator(dim=28,expand=2,sparse=False),
Sparsity_Estimator(dim=28, expand=2, sparse=sparse))
else:
self.fe = Sparsity_Estimator(dim=28, expand=2, sparse=sparse)
# Encoder
self.encoder_layers = nn.ModuleList([])
dim_stage = dim
for i in range(stage):
self.encoder_layers.append(nn.ModuleList([
SAHABs(dim=dim_stage, num_blocks=num_blocks[i], heads=dim_stage // dim, sparse=sparse),
nn.Conv2d(dim_stage, dim_stage * 2, 4, 2, 1, bias=False),
nn.AvgPool2d(kernel_size=2, stride=2),
]))
dim_stage *= 2
# Bottleneck
self.bottleneck = SAHABs(
dim=dim_stage, heads=dim_stage // dim, num_blocks=num_blocks[-1], sparse=sparse)
# Decoder
self.decoder_layers = nn.ModuleList([])
for i in range(stage):
self.decoder_layers.append(nn.ModuleList([
nn.ConvTranspose2d(dim_stage, dim_stage // 2, stride=2, kernel_size=2, padding=0, output_padding=0),
SAHABs(dim=dim_stage // 2, num_blocks=num_blocks[stage - 1 - i],
heads=(dim_stage // 2) // dim, sparse=sparse),
]))
dim_stage //= 2
# Output projection
self.out_proj = nn.Conv2d(self.dim, dim, 3, 1, 1, bias=False)
#### activation function
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def initial_x(self, y):
"""
:param y: [b,1,256,310]
:return: x: [b,28,256,310]
"""
nC, step = 28, 2
bs, row, col = y.shape
x = torch.zeros(bs, nC, row, row).cuda().float()
for i in range(nC):
x[:, i, :, :] = y[:, :, step * i:step * i + col - (nC - 1) * step]
x = self.fution(x)
return x
def forward(self, x, input_mask=None):
"""
x: [b,h,w]
return out:[b,c,h,w]
"""
x = self.initial_x(x)
# Feature Extraction
if self.sparse:
fea,mask = self.fe(x)
else:
fea = self.fe(x)
mask = torch.randn((b,1,h,w)).cuda()
# Encoder
fea_encoder = []
masks = []
for (Blcok, FeaDownSample, MaskDownSample) in self.encoder_layers:
fea = Blcok(fea, mask)
masks.append(mask)
fea_encoder.append(fea)
fea = FeaDownSample(fea)
mask = MaskDownSample(mask)
# Bottleneck
fea = self.bottleneck(fea, mask)
# Decoder
for i, (FeaUpSample, Blcok) in enumerate(self.decoder_layers):
fea = FeaUpSample(fea)
fea = fea + fea_encoder[self.stage - 1 - i]
mask = masks[self.stage - 1 - i]
fea = Blcok(fea, mask)
# Output projection
out = self.out_proj(fea) + x
return out
| 20,008 | 32.404007 | 129 | py |
MST | MST-main/real/test_code/architecture/MST.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from einops import rearrange
import math
import warnings
from torch.nn.init import _calculate_fan_in_and_fan_out
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
def norm_cdf(x):
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
tensor.uniform_(2 * l - 1, 2 * u - 1)
tensor.erfinv_()
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'):
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
if mode == 'fan_in':
denom = fan_in
elif mode == 'fan_out':
denom = fan_out
elif mode == 'fan_avg':
denom = (fan_in + fan_out) / 2
variance = scale / denom
if distribution == "truncated_normal":
trunc_normal_(tensor, std=math.sqrt(variance) / .87962566103423978)
elif distribution == "normal":
tensor.normal_(std=math.sqrt(variance))
elif distribution == "uniform":
bound = math.sqrt(3 * variance)
tensor.uniform_(-bound, bound)
else:
raise ValueError(f"invalid distribution {distribution}")
def lecun_normal_(tensor):
variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal')
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x, *args, **kwargs):
x = self.norm(x)
return self.fn(x, *args, **kwargs)
class GELU(nn.Module):
def forward(self, x):
return F.gelu(x)
def conv(in_channels, out_channels, kernel_size, bias=False, padding = 1, stride = 1):
return nn.Conv2d(
in_channels, out_channels, kernel_size,
padding=(kernel_size//2), bias=bias, stride=stride)
def shift_back(inputs,step=2): # input [bs,28,256,310] output [bs, 28, 256, 256]
[bs, nC, row, col] = inputs.shape
down_sample = 256//row
step = float(step)/float(down_sample*down_sample)
out_col = row
for i in range(nC):
inputs[:,i,:,:out_col] = \
inputs[:,i,:,int(step*i):int(step*i)+out_col]
return inputs[:, :, :, :out_col]
class MS_MSA(nn.Module):
def __init__(
self,
dim,
dim_head=64,
heads=8,
):
super().__init__()
self.num_heads = heads
self.dim_head = dim_head
self.to_q = nn.Linear(dim, dim_head * heads, bias=False)
self.to_k = nn.Linear(dim, dim_head * heads, bias=False)
self.to_v = nn.Linear(dim, dim_head * heads, bias=False)
self.rescale = nn.Parameter(torch.ones(heads, 1, 1))
self.proj = nn.Linear(dim_head * heads, dim, bias=True)
self.pos_emb = nn.Sequential(
nn.Conv2d(dim, dim, 3, 1, 1, bias=False, groups=dim),
GELU(),
nn.Conv2d(dim, dim, 3, 1, 1, bias=False, groups=dim),
)
self.dim = dim
def forward(self, x_in):
"""
x_in: [b,h,w,c]
return out: [b,h,w,c]
"""
b, h, w, c = x_in.shape
x = x_in.reshape(b,h*w,c)
q_inp = self.to_q(x)
k_inp = self.to_k(x)
v_inp = self.to_v(x)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=self.num_heads),
(q_inp, k_inp, v_inp))
# q: b,heads,hw,c
q = q.transpose(-2, -1)
k = k.transpose(-2, -1)
v = v.transpose(-2, -1)
q = F.normalize(q, dim=-1, p=2)
k = F.normalize(k, dim=-1, p=2)
attn = (k @ q.transpose(-2, -1)) # A = K^T*Q
attn = attn * self.rescale
attn = attn.softmax(dim=-1)
x = attn @ v # b,heads,d,hw
x = x.permute(0, 3, 1, 2) # Transpose
x = x.reshape(b, h * w, self.num_heads * self.dim_head)
out_c = self.proj(x).view(b, h, w, c)
out_p = self.pos_emb(v_inp.reshape(b,h,w,c).permute(0, 3, 1, 2)).permute(0, 2, 3, 1)
out = out_c + out_p
return out
class FeedForward(nn.Module):
def __init__(self, dim, mult=4):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(dim, dim * mult, 1, 1, bias=False),
GELU(),
nn.Conv2d(dim * mult, dim * mult, 3, 1, 1, bias=False, groups=dim * mult),
GELU(),
nn.Conv2d(dim * mult, dim, 1, 1, bias=False),
)
def forward(self, x):
"""
x: [b,h,w,c]
return out: [b,h,w,c]
"""
out = self.net(x.permute(0, 3, 1, 2))
return out.permute(0, 2, 3, 1)
class MSAB(nn.Module):
def __init__(
self,
dim,
dim_head=64,
heads=8,
num_blocks=2,
):
super().__init__()
self.blocks = nn.ModuleList([])
for _ in range(num_blocks):
self.blocks.append(nn.ModuleList([
MS_MSA(dim=dim, dim_head=dim_head, heads=heads),
PreNorm(dim, FeedForward(dim=dim))
]))
def forward(self, x):
"""
x: [b,c,h,w]
return out: [b,c,h,w]
"""
x = x.permute(0, 2, 3, 1)
for (attn, ff) in self.blocks:
x = attn(x) + x
x = ff(x) + x
out = x.permute(0, 3, 1, 2)
return out
class MST(nn.Module):
def __init__(self, dim=28, stage=3, num_blocks=[2,2,2]):
super(MST, self).__init__()
self.dim = dim
self.stage = stage
# Input projection
self.embedding = nn.Conv2d(28, self.dim, 3, 1, 1, bias=False)
self.fution = nn.Conv2d(28, 28, 3, 1, 1, bias=False)
# Encoder
self.encoder_layers = nn.ModuleList([])
dim_stage = dim
for i in range(stage):
self.encoder_layers.append(nn.ModuleList([
MSAB(
dim=dim_stage, num_blocks=num_blocks[i], dim_head=dim, heads=dim_stage // dim),
nn.Conv2d(dim_stage, dim_stage * 2, 4, 2, 1, bias=False),
]))
dim_stage *= 2
# Bottleneck
self.bottleneck = MSAB(
dim=dim_stage, dim_head=dim, heads=dim_stage // dim, num_blocks=num_blocks[-1])
# Decoder
self.decoder_layers = nn.ModuleList([])
for i in range(stage):
self.decoder_layers.append(nn.ModuleList([
nn.ConvTranspose2d(dim_stage, dim_stage // 2, stride=2, kernel_size=2, padding=0, output_padding=0),
nn.Conv2d(dim_stage, dim_stage // 2, 1, 1, bias=False),
MSAB(
dim=dim_stage // 2, num_blocks=num_blocks[stage - 1 - i], dim_head=dim,
heads=(dim_stage // 2) // dim),
]))
dim_stage //= 2
# Output projection
self.mapping = nn.Conv2d(self.dim, 28, 3, 1, 1, bias=False)
#### activation function
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def initial_x(self, y):
"""
:param y: [b,1,256,310]
:param Phi: [b,28,256,310]
:return: z: [b,28,256,310]
"""
nC, step = 28, 2
bs, row, col = y.shape
x = torch.zeros(bs, nC, row, row).cuda().float()
for i in range(nC):
x[:, i, :, :] = y[:, :, step * i:step * i + col - (nC - 1) * step]
x = self.fution(x)
return x
def forward(self, x, input_mask=None):
"""
x: [b,h,w]
return out:[b,c,h,w]
"""
x = self.initial_x(x)
# Embedding
fea = self.lrelu(self.embedding(x))
# Encoder
fea_encoder = []
for (MSAB, FeaDownSample) in self.encoder_layers:
fea = MSAB(fea)
fea_encoder.append(fea)
fea = FeaDownSample(fea)
# Bottleneck
fea = self.bottleneck(fea)
# Decoder
for i, (FeaUpSample, Fution, LeWinBlcok) in enumerate(self.decoder_layers):
fea = FeaUpSample(fea)
fea = Fution(torch.cat([fea, fea_encoder[self.stage-1-i]], dim=1))
fea = LeWinBlcok(fea)
# Mapping
out = self.mapping(fea) + x
return out
| 8,814 | 28.881356 | 116 | py |
MST | MST-main/real/test_code/architecture/BIRNAT.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class self_attention(nn.Module):
def __init__(self, ch):
super(self_attention, self).__init__()
self.conv1 = nn.Conv2d(ch, ch // 8, 1)
self.conv2 = nn.Conv2d(ch, ch // 8, 1)
self.conv3 = nn.Conv2d(ch, ch, 1)
self.conv4 = nn.Conv2d(ch, ch, 1)
self.gamma1 = torch.nn.Parameter(torch.Tensor([0]))
self.ch = ch
def forward(self, x):
batch_size = x.shape[0]
f = self.conv1(x)
g = self.conv2(x)
h = self.conv3(x)
ht = h.reshape([batch_size, self.ch, -1])
ft = f.reshape([batch_size, self.ch // 8, -1])
n = torch.matmul(ft.permute([0, 2, 1]), g.reshape([batch_size, self.ch // 8, -1]))
beta = F.softmax(n, dim=1)
o = torch.matmul(ht, beta)
o = o.reshape(x.shape) # [bs, C, h, w]
o = self.conv4(o)
x = self.gamma1 * o + x
return x
class res_part(nn.Module):
def __init__(self, in_ch, out_ch):
super(res_part, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_ch, in_ch, 3, padding=1),
# nn.BatchNorm2d(out_ch),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_ch, out_ch, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_ch, in_ch, 3, padding=1),
)
self.conv2 = nn.Sequential(
nn.Conv2d(in_ch, in_ch, 3, padding=1),
# nn.BatchNorm2d(out_ch),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_ch, out_ch, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_ch, in_ch, 3, padding=1),
)
self.conv3 = nn.Sequential(
nn.Conv2d(in_ch, in_ch, 3, padding=1),
# nn.BatchNorm2d(out_ch),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_ch, out_ch, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_ch, in_ch, 3, padding=1),
)
def forward(self, x):
x1 = self.conv1(x)
x = x1 + x
x1 = self.conv2(x)
x = x1 + x
x1 = self.conv3(x)
x = x1 + x
return x
class down_feature(nn.Module):
def __init__(self, in_ch, out_ch):
super(down_feature, self).__init__()
self.conv = nn.Sequential(
# nn.Conv2d(in_ch, 20, 5, stride=1, padding=2),
# nn.Conv2d(20, 40, 5, stride=2, padding=2),
# nn.Conv2d(40, out_ch, 5, stride=2, padding=2),
nn.Conv2d(in_ch, 20, 5, stride=1, padding=2),
nn.Conv2d(20, 20, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(20, 20, 3, stride=1, padding=1),
nn.Conv2d(20, 40, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(40, out_ch, 3, stride=1, padding=1),
)
def forward(self, x):
x = self.conv(x)
return x
class up_feature(nn.Module):
def __init__(self, in_ch, out_ch):
super(up_feature, self).__init__()
self.conv = nn.Sequential(
# nn.ConvTranspose2d(in_ch, 40, 3, stride=2, padding=1, output_padding=1),
# nn.ConvTranspose2d(40, 20, 3, stride=2, padding=1, output_padding=1),
nn.Conv2d(in_ch, 40, 3, stride=1, padding=1),
nn.Conv2d(40, 30, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(30, 20, 3, stride=1, padding=1),
nn.Conv2d(20, 20, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(20, 20, 3, padding=1),
nn.Conv2d(20, out_ch, 1),
# nn.Sigmoid(),
)
def forward(self, x):
x = self.conv(x)
return x
class cnn1(nn.Module):
# 输入meas concat mask
# 3 下采样
def __init__(self, B):
super(cnn1, self).__init__()
self.conv1 = nn.Conv2d(B + 1, 32, kernel_size=5, stride=1, padding=2)
self.relu1 = nn.LeakyReLU(inplace=True)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1)
self.relu2 = nn.LeakyReLU(inplace=True)
self.conv3 = nn.Conv2d(64, 64, kernel_size=1, stride=1)
self.relu3 = nn.LeakyReLU(inplace=True)
self.conv4 = nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1)
self.relu4 = nn.LeakyReLU(inplace=True)
self.conv5 = nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, output_padding=1)
self.relu5 = nn.LeakyReLU(inplace=True)
self.conv51 = nn.Conv2d(64, 32, kernel_size=3, stride=1, padding=1)
self.relu51 = nn.LeakyReLU(inplace=True)
self.conv52 = nn.Conv2d(32, 16, kernel_size=1, stride=1)
self.relu52 = nn.LeakyReLU(inplace=True)
self.conv6 = nn.Conv2d(16, 1, kernel_size=3, stride=1, padding=1)
self.res_part1 = res_part(128, 128)
self.res_part2 = res_part(128, 128)
self.res_part3 = res_part(128, 128)
self.conv7 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
self.relu7 = nn.LeakyReLU(inplace=True)
self.conv8 = nn.Conv2d(128, 128, kernel_size=1, stride=1)
self.conv9 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
self.relu9 = nn.LeakyReLU(inplace=True)
self.conv10 = nn.Conv2d(128, 128, kernel_size=1, stride=1)
self.att1 = self_attention(128)
def forward(self, meas=None, nor_meas=None, PhiTy=None):
data = torch.cat([torch.unsqueeze(nor_meas, dim=1), PhiTy], dim=1)
out = self.conv1(data)
out = self.relu1(out)
out = self.conv2(out)
out = self.relu2(out)
out = self.conv3(out)
out = self.relu3(out)
out = self.conv4(out)
out = self.relu4(out)
out = self.res_part1(out)
out = self.conv7(out)
out = self.relu7(out)
out = self.conv8(out)
out = self.res_part2(out)
out = self.conv9(out)
out = self.relu9(out)
out = self.conv10(out)
out = self.res_part3(out)
# out = self.att1(out)
out = self.conv5(out)
out = self.relu5(out)
out = self.conv51(out)
out = self.relu51(out)
out = self.conv52(out)
out = self.relu52(out)
out = self.conv6(out)
return out
class forward_rnn(nn.Module):
def __init__(self):
super(forward_rnn, self).__init__()
self.extract_feature1 = down_feature(1, 20)
self.up_feature1 = up_feature(60, 1)
self.conv_x1 = nn.Sequential(
nn.Conv2d(1, 16, 5, stride=1, padding=2),
nn.Conv2d(16, 32, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(32, 40, 3, stride=2, padding=1),
nn.Conv2d(40, 20, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(20, 20, 3, stride=1, padding=1),
nn.LeakyReLU(inplace=True),
nn.ConvTranspose2d(20, 10, kernel_size=3, stride=2, padding=1, output_padding=1),
)
self.conv_x2 = nn.Sequential(
nn.Conv2d(1, 10, 5, stride=1, padding=2),
nn.Conv2d(10, 10, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(10, 40, 3, stride=2, padding=1),
nn.Conv2d(40, 20, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(20, 20, 3, stride=1, padding=1),
nn.LeakyReLU(inplace=True),
nn.ConvTranspose2d(20, 10, kernel_size=3, stride=2, padding=1, output_padding=1),
)
self.h_h = nn.Sequential(
nn.Conv2d(60, 30, 3, padding=1),
nn.Conv2d(30, 20, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(20, 20, 3, padding=1),
)
self.res_part1 = res_part(60, 60)
self.res_part2 = res_part(60, 60)
def forward(self, xt1, meas=None, nor_meas=None, PhiTy=None, mask3d_batch=None, h=None, cs_rate=28):
ht = h
xt = xt1
step = 2
[bs, nC, row, col] = xt1.shape
out = xt1
x11 = self.conv_x1(torch.unsqueeze(nor_meas, 1))
for i in range(cs_rate - 1):
d1 = torch.zeros(bs, row, col).cuda()
d2 = torch.zeros(bs, row, col).cuda()
for ii in range(i + 1):
d1 = d1 + torch.mul(mask3d_batch[:, ii, :, :], out[:, ii, :, :])
for ii in range(i + 2, cs_rate):
d2 = d2 + torch.mul(mask3d_batch[:, ii, :, :], torch.squeeze(nor_meas))
x12 = self.conv_x2(torch.unsqueeze(meas - d1 - d2, 1))
x2 = self.extract_feature1(xt)
h = torch.cat([ht, x11, x12, x2], dim=1)
h = self.res_part1(h)
h = self.res_part2(h)
ht = self.h_h(h)
xt = self.up_feature1(h)
out = torch.cat([out, xt], dim=1)
return out, ht
class backrnn(nn.Module):
def __init__(self):
super(backrnn, self).__init__()
self.extract_feature1 = down_feature(1, 20)
self.up_feature1 = up_feature(60, 1)
self.conv_x1 = nn.Sequential(
nn.Conv2d(1, 16, 5, stride=1, padding=2),
nn.Conv2d(16, 32, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(32, 40, 3, stride=2, padding=1),
nn.Conv2d(40, 20, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(20, 20, 3, stride=1, padding=1),
nn.LeakyReLU(inplace=True),
nn.ConvTranspose2d(20, 10, kernel_size=3, stride=2, padding=1, output_padding=1),
)
self.conv_x2 = nn.Sequential(
nn.Conv2d(1, 10, 5, stride=1, padding=2),
nn.Conv2d(10, 10, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(10, 40, 3, stride=2, padding=1),
nn.Conv2d(40, 20, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(20, 20, 3, stride=1, padding=1),
nn.LeakyReLU(inplace=True),
nn.ConvTranspose2d(20, 10, kernel_size=3, stride=2, padding=1, output_padding=1),
)
self.h_h = nn.Sequential(
nn.Conv2d(60, 30, 3, padding=1),
nn.Conv2d(30, 20, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(20, 20, 3, padding=1),
)
self.res_part1 = res_part(60, 60)
self.res_part2 = res_part(60, 60)
def forward(self, xt8, meas=None, nor_meas=None, PhiTy=None, mask3d_batch=None, h=None, cs_rate=28):
ht = h
step = 2
[bs, nC, row, col] = xt8.shape
xt = torch.unsqueeze(xt8[:, cs_rate - 1, :, :], 1)
out = torch.zeros(bs, cs_rate, row, col).cuda()
out[:, cs_rate - 1, :, :] = xt[:, 0, :, :]
x11 = self.conv_x1(torch.unsqueeze(nor_meas, 1))
for i in range(cs_rate - 1):
d1 = torch.zeros(bs, row, col).cuda()
d2 = torch.zeros(bs, row, col).cuda()
for ii in range(i + 1):
d1 = d1 + torch.mul(mask3d_batch[:, cs_rate - 1 - ii, :, :], out[:, cs_rate - 1 - ii, :, :].clone())
for ii in range(i + 2, cs_rate):
d2 = d2 + torch.mul(mask3d_batch[:, cs_rate - 1 - ii, :, :], xt8[:, cs_rate - 1 - ii, :, :].clone())
x12 = self.conv_x2(torch.unsqueeze(meas - d1 - d2, 1))
x2 = self.extract_feature1(xt)
h = torch.cat([ht, x11, x12, x2], dim=1)
h = self.res_part1(h)
h = self.res_part2(h)
ht = self.h_h(h)
xt = self.up_feature1(h)
out[:, cs_rate - 2 - i, :, :] = xt[:, 0, :, :]
return out
def shift_gt_back(inputs, step=2): # input [bs,256,310] output [bs, 28, 256, 256]
[bs, nC, row, col] = inputs.shape
output = torch.zeros(bs, nC, row, col - (nC - 1) * step).cuda().float()
for i in range(nC):
output[:, i, :, :] = inputs[:, i, :, step * i:step * i + col - (nC - 1) * step]
return output
def shift(inputs, step=2):
[bs, nC, row, col] = inputs.shape
if inputs.is_cuda:
output = torch.zeros(bs, nC, row, col + (nC - 1) * step).cuda().float()
else:
output = torch.zeros(bs, nC, row, col + (nC - 1) * step).float()
for i in range(nC):
output[:, i, :, step * i:step * i + col] = inputs[:, i, :, :]
return output
class BIRNAT(nn.Module):
def __init__(self):
super(BIRNAT, self).__init__()
self.cs_rate = 28
self.first_frame_net = cnn1(self.cs_rate).cuda()
self.rnn1 = forward_rnn().cuda()
self.rnn2 = backrnn().cuda()
def gen_meas_torch(self, meas, shift_mask):
batch_size, H = meas.shape[0:2]
mask_s = torch.sum(shift_mask, 1)
nor_meas = torch.div(meas, mask_s)
temp = torch.mul(torch.unsqueeze(nor_meas, dim=1).expand([batch_size, 28, H, shift_mask.shape[3]]), shift_mask)
return nor_meas, temp
def forward(self, meas, shift_mask=None):
if shift_mask==None:
shift_mask = torch.zeros(1, 28, 256, 310).cuda()
H, W = meas.shape[-2:]
nor_meas, PhiTy = self.gen_meas_torch(meas, shift_mask)
h0 = torch.zeros(meas.shape[0], 20, H, W).cuda()
xt1 = self.first_frame_net(meas, nor_meas, PhiTy)
model_out1, h1 = self.rnn1(xt1, meas, nor_meas, PhiTy, shift_mask, h0, self.cs_rate)
model_out2 = self.rnn2(model_out1, meas, nor_meas, PhiTy, shift_mask, h1, self.cs_rate)
model_out2 = shift_gt_back(model_out2)
return model_out2
| 13,326 | 35.412568 | 119 | py |
MST | MST-main/real/test_code/architecture/GAP_Net.py | import torch.nn.functional as F
import torch
import torch.nn as nn
def A(x,Phi):
temp = x*Phi
y = torch.sum(temp,1)
return y
def At(y,Phi):
temp = torch.unsqueeze(y, 1).repeat(1,Phi.shape[1],1,1)
x = temp*Phi
return x
def shift_3d(inputs,step=2):
[bs, nC, row, col] = inputs.shape
for i in range(nC):
inputs[:,i,:,:] = torch.roll(inputs[:,i,:,:], shifts=step*i, dims=2)
return inputs
def shift_back_3d(inputs,step=2):
[bs, nC, row, col] = inputs.shape
for i in range(nC):
inputs[:,i,:,:] = torch.roll(inputs[:,i,:,:], shifts=(-1)*step*i, dims=2)
return inputs
class double_conv(nn.Module):
def __init__(self, in_channels, out_channels):
super(double_conv, self).__init__()
self.d_conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3, padding=1),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.d_conv(x)
return x
class Unet(nn.Module):
def __init__(self, in_ch, out_ch):
super(Unet, self).__init__()
self.dconv_down1 = double_conv(in_ch, 32)
self.dconv_down2 = double_conv(32, 64)
self.dconv_down3 = double_conv(64, 128)
self.maxpool = nn.MaxPool2d(2)
self.upsample2 = nn.Sequential(
nn.ConvTranspose2d(128, 64, kernel_size=2, stride=2),
# nn.Conv2d(64, 64, (1,2), padding=(0,1)),
nn.ReLU(inplace=True)
)
self.upsample1 = nn.Sequential(
nn.ConvTranspose2d(64, 32, kernel_size=2, stride=2),
nn.ReLU(inplace=True)
)
self.dconv_up2 = double_conv(64 + 64, 64)
self.dconv_up1 = double_conv(32 + 32, 32)
self.conv_last = nn.Conv2d(32, out_ch, 1)
self.afn_last = nn.Tanh()
def forward(self, x):
b, c, h_inp, w_inp = x.shape
hb, wb = 8, 8
pad_h = (hb - h_inp % hb) % hb
pad_w = (wb - w_inp % wb) % wb
x = F.pad(x, [0, pad_w, 0, pad_h], mode='reflect')
inputs = x
conv1 = self.dconv_down1(x)
x = self.maxpool(conv1)
conv2 = self.dconv_down2(x)
x = self.maxpool(conv2)
conv3 = self.dconv_down3(x)
x = self.upsample2(conv3)
x = torch.cat([x, conv2], dim=1)
x = self.dconv_up2(x)
x = self.upsample1(x)
x = torch.cat([x, conv1], dim=1)
x = self.dconv_up1(x)
x = self.conv_last(x)
x = self.afn_last(x)
out = x + inputs
return out[:, :, :h_inp, :w_inp]
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels, mid_channels=None):
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class GAP_net(nn.Module):
def __init__(self):
super(GAP_net, self).__init__()
self.unet1 = Unet(28, 28)
self.unet2 = Unet(28, 28)
self.unet3 = Unet(28, 28)
self.unet4 = Unet(28, 28)
self.unet5 = Unet(28, 28)
self.unet6 = Unet(28, 28)
self.unet7 = Unet(28, 28)
self.unet8 = Unet(28, 28)
self.unet9 = Unet(28, 28)
def forward(self, y, input_mask=None):
if input_mask==None:
Phi = torch.rand((1,28,256,310)).cuda()
Phi_s = torch.rand((1, 256, 310)).cuda()
else:
Phi, Phi_s = input_mask
x_list = []
x = At(y, Phi) # v0=H^T y
### 1-3
yb = A(x, Phi)
x = x + At(torch.div(y - yb, Phi_s), Phi)
x = shift_back_3d(x)
x = self.unet1(x)
x = shift_3d(x)
yb = A(x, Phi)
x = x + At(torch.div(y - yb, Phi_s), Phi)
x = shift_back_3d(x)
x = self.unet2(x)
x = shift_3d(x)
yb = A(x, Phi)
x = x + At(torch.div(y - yb, Phi_s), Phi)
x = shift_back_3d(x)
x = self.unet3(x)
x = shift_3d(x)
### 4-6
yb = A(x, Phi)
x = x + At(torch.div(y - yb, Phi_s), Phi)
x = shift_back_3d(x)
x = self.unet4(x)
x = shift_3d(x)
yb = A(x, Phi)
x = x + At(torch.div(y - yb, Phi_s), Phi)
x = shift_back_3d(x)
x = self.unet5(x)
x = shift_3d(x)
yb = A(x, Phi)
x = x + At(torch.div(y - yb, Phi_s), Phi)
x = shift_back_3d(x)
x = self.unet6(x)
x = shift_3d(x)
# ### 7-9
yb = A(x, Phi)
x = x + At(torch.div(y - yb, Phi_s), Phi)
x = shift_back_3d(x)
x = self.unet7(x)
x = shift_3d(x)
x_list.append(x[:, :, :, 0:256])
yb = A(x, Phi)
x = x + At(torch.div(y - yb, Phi_s), Phi)
x = shift_back_3d(x)
x = self.unet8(x)
x = shift_3d(x)
x_list.append(x[:, :, :, 0:256])
yb = A(x, Phi)
x = x + At(torch.div(y - yb, Phi_s), Phi)
x = shift_back_3d(x)
x = self.unet9(x)
x = shift_3d(x)
return x[:, :, :, 0:256] | 5,524 | 28.232804 | 81 | py |
MST | MST-main/real/test_code/architecture/Lambda_Net.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from einops import rearrange
from torch import einsum
class LambdaNetAttention(nn.Module):
def __init__(
self,
dim,
):
super().__init__()
self.dim = dim
self.to_q = nn.Linear(dim, dim//8, bias=False)
self.to_k = nn.Linear(dim, dim//8, bias=False)
self.to_v = nn.Linear(dim, dim, bias=False)
self.rescale = (dim//8)**-0.5
self.gamma = nn.Parameter(torch.ones(1))
def forward(self, x):
"""
x: [b,c,h,w]
return out: [b,c,h,w]
"""
x = x.permute(0,2,3,1)
b, h, w, c = x.shape
# Reshape to (B,N,C), where N = window_size[0]*window_size[1] is the length of sentence
x_inp = rearrange(x, 'b h w c -> b (h w) c')
# produce query, key and value
q = self.to_q(x_inp)
k = self.to_k(x_inp)
v = self.to_v(x_inp)
# attention
sim = einsum('b i d, b j d -> b i j', q, k)*self.rescale
attn = sim.softmax(dim=-1)
# aggregate
out = einsum('b i j, b j d -> b i d', attn, v)
# merge blocks back to original feature map
out = rearrange(out, 'b (h w) c -> b h w c', h=h, w=w)
out = self.gamma*out + x
return out.permute(0,3,1,2)
class triple_conv(nn.Module):
def __init__(self, in_channels, out_channels):
super(triple_conv, self).__init__()
self.t_conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.ReLU(),
nn.Conv2d(out_channels, out_channels, 3, padding=1),
nn.ReLU(),
nn.Conv2d(out_channels, out_channels, 3, padding=1),
)
def forward(self, x):
x = self.t_conv(x)
return x
class double_conv(nn.Module):
def __init__(self, in_channels, out_channels):
super(double_conv, self).__init__()
self.d_conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.ReLU(),
nn.Conv2d(out_channels, out_channels, 3, padding=1),
)
def forward(self, x):
x = self.d_conv(x)
return x
def shift_back_3d(inputs,step=2):
[bs, nC, row, col] = inputs.shape
for i in range(nC):
inputs[:,i,:,:] = torch.roll(inputs[:,i,:,:], shifts=(-1)*step*i, dims=2)
return inputs
class Lambda_Net(nn.Module):
def __init__(self, out_ch=28):
super(Lambda_Net, self).__init__()
self.conv_in = nn.Conv2d(1+28, 28, 3, padding=1)
# encoder
self.conv_down1 = triple_conv(28, 32)
self.conv_down2 = triple_conv(32, 64)
self.conv_down3 = triple_conv(64, 128)
self.conv_down4 = triple_conv(128, 256)
self.conv_down5 = double_conv(256, 512)
self.conv_down6 = double_conv(512, 1024)
self.maxpool = nn.MaxPool2d(2)
# decoder
self.upsample5 = nn.ConvTranspose2d(1024, 512, kernel_size=2, stride=2)
self.upsample4 = nn.ConvTranspose2d(512, 256, kernel_size=2, stride=2)
self.upsample3 = nn.ConvTranspose2d(256, 128, kernel_size=2, stride=2)
self.upsample2 = nn.ConvTranspose2d(128, 64, kernel_size=2, stride=2)
self.upsample1 = nn.ConvTranspose2d(64, 32, kernel_size=2, stride=2)
self.conv_up1 = triple_conv(32+32, 32)
self.conv_up2 = triple_conv(64+64, 64)
self.conv_up3 = triple_conv(128+128, 128)
self.conv_up4 = triple_conv(256+256, 256)
self.conv_up5 = double_conv(512+512, 512)
# attention
self.attention = LambdaNetAttention(dim=128)
self.conv_last1 = nn.Conv2d(32, 6, 3,1,1)
self.conv_last2 = nn.Conv2d(38, 32, 3,1,1)
self.conv_last3 = nn.Conv2d(32, 12, 3,1,1)
self.conv_last4 = nn.Conv2d(44, 32, 3,1,1)
self.conv_last5 = nn.Conv2d(32, out_ch, 1)
self.act = nn.ReLU()
def forward(self, x, input_mask=None):
if input_mask == None:
input_mask = torch.zeros((1,28,256,310)).cuda()
x = x/28*2
x = self.conv_in(torch.cat([x.unsqueeze(1), input_mask], dim=1))
b, c, h_inp, w_inp = x.shape
hb, wb = 32, 32
pad_h = (hb - h_inp % hb) % hb
pad_w = (wb - w_inp % wb) % wb
x = F.pad(x, [0, pad_w, 0, pad_h], mode='reflect')
res0 = x
conv1 = self.conv_down1(x)
x = self.maxpool(conv1)
conv2 = self.conv_down2(x)
x = self.maxpool(conv2)
conv3 = self.conv_down3(x)
x = self.maxpool(conv3)
conv4 = self.conv_down4(x)
x = self.maxpool(conv4)
conv5 = self.conv_down5(x)
x = self.maxpool(conv5)
conv6 = self.conv_down6(x)
x = self.upsample5(conv6)
x = torch.cat([x, conv5], dim=1)
x = self.conv_up5(x)
x = self.upsample4(x)
x = torch.cat([x, conv4], dim=1)
x = self.conv_up4(x)
x = self.upsample3(x)
x = torch.cat([x, conv3], dim=1)
x = self.conv_up3(x)
x = self.attention(x)
x = self.upsample2(x)
x = torch.cat([x, conv2], dim=1)
x = self.conv_up2(x)
x = self.upsample1(x)
x = torch.cat([x, conv1], dim=1)
x = self.conv_up1(x)
res1 = x
out1 = self.act(self.conv_last1(x))
x = self.conv_last2(torch.cat([res1,out1],dim=1))
res2 = x
out2 = self.act(self.conv_last3(x))
out3 = self.conv_last4(torch.cat([res2, out2], dim=1))
out = self.conv_last5(out3)+res0
out = out[:, :, :h_inp, :w_inp]
return shift_back_3d(out)[:, :, :, :256]
| 5,680 | 30.38674 | 95 | py |
MST | MST-main/real/test_code/architecture/ADMM_Net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
def A(x,Phi):
temp = x*Phi
y = torch.sum(temp,1)
return y
def At(y,Phi):
temp = torch.unsqueeze(y, 1).repeat(1,Phi.shape[1],1,1)
x = temp*Phi
return x
class double_conv(nn.Module):
def __init__(self, in_channels, out_channels):
super(double_conv, self).__init__()
self.d_conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3, padding=1),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.d_conv(x)
return x
class Unet(nn.Module):
def __init__(self, in_ch, out_ch):
super(Unet, self).__init__()
self.dconv_down1 = double_conv(in_ch, 32)
self.dconv_down2 = double_conv(32, 64)
self.dconv_down3 = double_conv(64, 128)
self.maxpool = nn.MaxPool2d(2)
self.upsample2 = nn.Sequential(
nn.ConvTranspose2d(128, 64, kernel_size=2, stride=2),
nn.ReLU(inplace=True)
)
self.upsample1 = nn.Sequential(
nn.ConvTranspose2d(64, 32, kernel_size=2, stride=2),
nn.ReLU(inplace=True)
)
self.dconv_up2 = double_conv(64 + 64, 64)
self.dconv_up1 = double_conv(32 + 32, 32)
self.conv_last = nn.Conv2d(32, out_ch, 1)
self.afn_last = nn.Tanh()
def forward(self, x):
b, c, h_inp, w_inp = x.shape
hb, wb = 8, 8
pad_h = (hb - h_inp % hb) % hb
pad_w = (wb - w_inp % wb) % wb
x = F.pad(x, [0, pad_w, 0, pad_h], mode='reflect')
inputs = x
conv1 = self.dconv_down1(x)
x = self.maxpool(conv1)
conv2 = self.dconv_down2(x)
x = self.maxpool(conv2)
conv3 = self.dconv_down3(x)
x = self.upsample2(conv3)
x = torch.cat([x, conv2], dim=1)
x = self.dconv_up2(x)
x = self.upsample1(x)
x = torch.cat([x, conv1], dim=1)
x = self.dconv_up1(x)
x = self.conv_last(x)
x = self.afn_last(x)
out = x + inputs
return out[:, :, :h_inp, :w_inp]
def shift_3d(inputs,step=2):
[bs, nC, row, col] = inputs.shape
for i in range(nC):
inputs[:,i,:,:] = torch.roll(inputs[:,i,:,:], shifts=step*i, dims=2)
return inputs
def shift_back_3d(inputs,step=2):
[bs, nC, row, col] = inputs.shape
for i in range(nC):
inputs[:,i,:,:] = torch.roll(inputs[:,i,:,:], shifts=(-1)*step*i, dims=2)
return inputs
class ADMM_net(nn.Module):
def __init__(self):
super(ADMM_net, self).__init__()
self.unet1 = Unet(28, 28)
self.unet2 = Unet(28, 28)
self.unet3 = Unet(28, 28)
self.unet4 = Unet(28, 28)
self.unet5 = Unet(28, 28)
self.unet6 = Unet(28, 28)
self.unet7 = Unet(28, 28)
self.unet8 = Unet(28, 28)
self.unet9 = Unet(28, 28)
self.gamma1 = torch.nn.Parameter(torch.Tensor([0]))
self.gamma2 = torch.nn.Parameter(torch.Tensor([0]))
self.gamma3 = torch.nn.Parameter(torch.Tensor([0]))
self.gamma4 = torch.nn.Parameter(torch.Tensor([0]))
self.gamma5 = torch.nn.Parameter(torch.Tensor([0]))
self.gamma6 = torch.nn.Parameter(torch.Tensor([0]))
self.gamma7 = torch.nn.Parameter(torch.Tensor([0]))
self.gamma8 = torch.nn.Parameter(torch.Tensor([0]))
self.gamma9 = torch.nn.Parameter(torch.Tensor([0]))
def forward(self, y, input_mask=None):
if input_mask == None:
Phi = torch.rand((1, 28, 256, 310)).cuda()
Phi_s = torch.rand((1, 256, 310)).cuda()
else:
Phi, Phi_s = input_mask
x_list = []
theta = At(y,Phi)
b = torch.zeros_like(Phi)
### 1-3
yb = A(theta+b,Phi)
x = theta+b + At(torch.div(y-yb,Phi_s+self.gamma1),Phi)
x1 = x-b
x1 = shift_back_3d(x1)
theta = self.unet1(x1)
theta = shift_3d(theta)
b = b- (x-theta)
x_list.append(theta)
yb = A(theta+b,Phi)
x = theta+b + At(torch.div(y-yb,Phi_s+self.gamma2),Phi)
x1 = x-b
x1 = shift_back_3d(x1)
theta = self.unet2(x1)
theta = shift_3d(theta)
b = b- (x-theta)
x_list.append(theta)
yb = A(theta+b,Phi)
x = theta+b + At(torch.div(y-yb,Phi_s+self.gamma3),Phi)
x1 = x-b
x1 = shift_back_3d(x1)
theta = self.unet3(x1)
theta = shift_3d(theta)
b = b- (x-theta)
x_list.append(theta)
### 4-6
yb = A(theta+b,Phi)
x = theta+b + At(torch.div(y-yb,Phi_s+self.gamma4),Phi)
x1 = x-b
x1 = shift_back_3d(x1)
theta = self.unet4(x1)
theta = shift_3d(theta)
b = b- (x-theta)
x_list.append(theta)
yb = A(theta+b,Phi)
x = theta+b + At(torch.div(y-yb,Phi_s+self.gamma5),Phi)
x1 = x-b
x1 = shift_back_3d(x1)
theta = self.unet5(x1)
theta = shift_3d(theta)
b = b- (x-theta)
x_list.append(theta)
yb = A(theta+b,Phi)
x = theta+b + At(torch.div(y-yb,Phi_s+self.gamma6),Phi)
x1 = x-b
x1 = shift_back_3d(x1)
theta = self.unet6(x1)
theta = shift_3d(theta)
b = b- (x-theta)
x_list.append(theta)
### 7-9
yb = A(theta+b,Phi)
x = theta+b + At(torch.div(y-yb,Phi_s+self.gamma7),Phi)
x1 = x-b
x1 = shift_back_3d(x1)
theta = self.unet7(x1)
theta = shift_3d(theta)
b = b- (x-theta)
x_list.append(theta)
yb = A(theta+b,Phi)
x = theta+b + At(torch.div(y-yb,Phi_s+self.gamma8),Phi)
x1 = x-b
x1 = shift_back_3d(x1)
theta = self.unet8(x1)
theta = shift_3d(theta)
b = b- (x-theta)
x_list.append(theta)
yb = A(theta+b,Phi)
x = theta+b + At(torch.div(y-yb,Phi_s+self.gamma9),Phi)
x1 = x-b
x1 = shift_back_3d(x1)
theta = self.unet9(x1)
theta = shift_3d(theta)
return theta[:, :, :, 0:256]
| 6,191 | 29.653465 | 81 | py |
MST | MST-main/real/test_code/architecture/TSA_Net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
_NORM_BONE = False
def conv_block(in_planes, out_planes, the_kernel=3, the_stride=1, the_padding=1, flag_norm=False, flag_norm_act=True):
conv = nn.Conv2d(in_planes, out_planes, kernel_size=the_kernel, stride=the_stride, padding=the_padding)
activation = nn.ReLU(inplace=True)
norm = nn.BatchNorm2d(out_planes)
if flag_norm:
return nn.Sequential(conv,norm,activation) if flag_norm_act else nn.Sequential(conv,activation,norm)
else:
return nn.Sequential(conv,activation)
def conv1x1_block(in_planes, out_planes, flag_norm=False):
conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0,bias=False)
norm = nn.BatchNorm2d(out_planes)
return nn.Sequential(conv,norm) if flag_norm else conv
def fully_block(in_dim, out_dim, flag_norm=False, flag_norm_act=True):
fc = nn.Linear(in_dim, out_dim)
activation = nn.ReLU(inplace=True)
norm = nn.BatchNorm2d(out_dim)
if flag_norm:
return nn.Sequential(fc,norm,activation) if flag_norm_act else nn.Sequential(fc,activation,norm)
else:
return nn.Sequential(fc,activation)
class Res2Net(nn.Module):
def __init__(self, inChannel, uPlane, scale=4):
super(Res2Net, self).__init__()
self.uPlane = uPlane
self.scale = scale
self.conv_init = nn.Conv2d(inChannel, uPlane * scale, kernel_size=1, bias=False)
self.bn_init = nn.BatchNorm2d(uPlane * scale)
convs = []
bns = []
for i in range(self.scale - 1):
convs.append(nn.Conv2d(self.uPlane, self.uPlane, kernel_size=3, stride=1, padding=1, bias=False))
bns.append(nn.BatchNorm2d(self.uPlane))
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
self.conv_end = nn.Conv2d(uPlane * scale, inChannel, kernel_size=1, bias=False)
self.bn_end = nn.BatchNorm2d(inChannel)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
out = self.conv_init(x)
out = self.bn_init(out)
out = self.relu(out)
spx = torch.split(out, self.uPlane, 1)
for i in range(self.scale - 1):
if i == 0:
sp = spx[i]
else:
sp = sp + spx[i]
sp = self.convs[i](sp)
sp = self.relu(self.bns[i](sp))
if i == 0:
out = sp
else:
out = torch.cat((out, sp), 1)
out = torch.cat((out, spx[self.scale - 1]), 1)
out = self.conv_end(out)
out = self.bn_end(out)
return out
_NORM_ATTN = True
_NORM_FC = False
class TSA_Transform(nn.Module):
""" Spectral-Spatial Self-Attention """
def __init__(self, uSpace, inChannel, outChannel, nHead, uAttn, mode=[0, 1], flag_mask=False, gamma_learn=False):
super(TSA_Transform, self).__init__()
''' ------------------------------------------
uSpace:
uHeight: the [-2] dim of the 3D tensor
uWidth: the [-1] dim of the 3D tensor
inChannel:
the number of Channel of the input tensor
outChannel:
the number of Channel of the output tensor
nHead:
the number of Head of the input tensor
uAttn:
uSpatial: the dim of the spatial features
uSpectral: the dim of the spectral features
mask:
The Spectral Smoothness Mask
{mode} and {gamma_learn} is just for variable selection
------------------------------------------ '''
self.nHead = nHead
self.uAttn = uAttn
self.outChannel = outChannel
self.uSpatial = nn.Parameter(torch.tensor(float(uAttn[0])), requires_grad=False)
self.uSpectral = nn.Parameter(torch.tensor(float(uAttn[1])), requires_grad=False)
self.mask = nn.Parameter(Spectral_Mask(outChannel), requires_grad=False) if flag_mask else None
self.attn_scale = nn.Parameter(torch.tensor(1.1), requires_grad=False) if flag_mask else None
self.gamma = nn.Parameter(torch.tensor(1.0), requires_grad=gamma_learn)
if sum(mode) > 0:
down_sample = []
scale = 1
cur_channel = outChannel
for i in range(sum(mode)):
scale *= 2
down_sample.append(conv_block(cur_channel, 2 * cur_channel, 3, 2, 1, _NORM_ATTN))
cur_channel = 2 * cur_channel
self.cur_channel = cur_channel
self.down_sample = nn.Sequential(*down_sample)
self.up_sample = nn.ConvTranspose2d(outChannel * scale, outChannel, scale, scale)
else:
self.down_sample = None
self.up_sample = None
spec_dim = int(uSpace[0] / 4 - 3) * int(uSpace[1] / 4 - 3)
self.preproc = conv1x1_block(inChannel, outChannel, _NORM_ATTN)
self.query_x = Feature_Spatial(outChannel, nHead, int(uSpace[1] / 4), uAttn[0], mode)
self.query_y = Feature_Spatial(outChannel, nHead, int(uSpace[0] / 4), uAttn[0], mode)
self.query_lambda = Feature_Spectral(outChannel, nHead, spec_dim, uAttn[1])
self.key_x = Feature_Spatial(outChannel, nHead, int(uSpace[1] / 4), uAttn[0], mode)
self.key_y = Feature_Spatial(outChannel, nHead, int(uSpace[0] / 4), uAttn[0], mode)
self.key_lambda = Feature_Spectral(outChannel, nHead, spec_dim, uAttn[1])
self.value = conv1x1_block(outChannel, nHead * outChannel, _NORM_ATTN)
self.aggregation = nn.Linear(nHead * outChannel, outChannel)
def forward(self, image):
feat = self.preproc(image)
feat_qx = self.query_x(feat, 'X')
feat_qy = self.query_y(feat, 'Y')
feat_qlambda = self.query_lambda(feat)
feat_kx = self.key_x(feat, 'X')
feat_ky = self.key_y(feat, 'Y')
feat_klambda = self.key_lambda(feat)
feat_value = self.value(feat)
feat_qx = torch.cat(torch.split(feat_qx, 1, dim=1)).squeeze(dim=1)
feat_qy = torch.cat(torch.split(feat_qy, 1, dim=1)).squeeze(dim=1)
feat_kx = torch.cat(torch.split(feat_kx, 1, dim=1)).squeeze(dim=1)
feat_ky = torch.cat(torch.split(feat_ky, 1, dim=1)).squeeze(dim=1)
feat_qlambda = torch.cat(torch.split(feat_qlambda, self.uAttn[1], dim=-1))
feat_klambda = torch.cat(torch.split(feat_klambda, self.uAttn[1], dim=-1))
feat_value = torch.cat(torch.split(feat_value, self.outChannel, dim=1))
energy_x = torch.bmm(feat_qx, feat_kx.permute(0, 2, 1)) / torch.sqrt(self.uSpatial)
energy_y = torch.bmm(feat_qy, feat_ky.permute(0, 2, 1)) / torch.sqrt(self.uSpatial)
energy_lambda = torch.bmm(feat_qlambda, feat_klambda.permute(0, 2, 1)) / torch.sqrt(self.uSpectral)
attn_x = F.softmax(energy_x, dim=-1)
attn_y = F.softmax(energy_y, dim=-1)
attn_lambda = F.softmax(energy_lambda, dim=-1)
if self.mask is not None:
attn_lambda = (attn_lambda + self.mask) / torch.sqrt(self.attn_scale)
pro_feat = feat_value if self.down_sample is None else self.down_sample(feat_value)
batchhead, dim_c, dim_x, dim_y = pro_feat.size()
attn_x_repeat = attn_x.unsqueeze(dim=1).repeat(1, dim_c, 1, 1).view(-1, dim_x, dim_x)
attn_y_repeat = attn_y.unsqueeze(dim=1).repeat(1, dim_c, 1, 1).view(-1, dim_y, dim_y)
pro_feat = pro_feat.view(-1, dim_x, dim_y)
pro_feat = torch.bmm(pro_feat, attn_y_repeat.permute(0, 2, 1))
pro_feat = torch.bmm(pro_feat.permute(0, 2, 1), attn_x_repeat.permute(0, 2, 1)).permute(0, 2, 1)
pro_feat = pro_feat.view(batchhead, dim_c, dim_x, dim_y)
if self.up_sample is not None:
pro_feat = self.up_sample(pro_feat)
_, _, dim_x, dim_y = pro_feat.size()
pro_feat = pro_feat.contiguous().view(batchhead, self.outChannel, -1).permute(0, 2, 1)
pro_feat = torch.bmm(pro_feat, attn_lambda.permute(0, 2, 1)).permute(0, 2, 1)
pro_feat = pro_feat.view(batchhead, self.outChannel, dim_x, dim_y)
pro_feat = torch.cat(torch.split(pro_feat, int(batchhead / self.nHead), dim=0), dim=1).permute(0, 2, 3, 1)
pro_feat = self.aggregation(pro_feat).permute(0, 3, 1, 2)
out = self.gamma * pro_feat + feat
return out, (attn_x, attn_y, attn_lambda)
class Feature_Spatial(nn.Module):
""" Spatial Feature Generation Component """
def __init__(self, inChannel, nHead, shiftDim, outDim, mode):
super(Feature_Spatial, self).__init__()
kernel = [(1, 5), (3, 5)]
stride = [(1, 2), (2, 2)]
padding = [(0, 2), (1, 2)]
self.conv1 = conv_block(inChannel, nHead, kernel[mode[0]], stride[mode[0]], padding[mode[0]], _NORM_ATTN)
self.conv2 = conv_block(nHead, nHead, kernel[mode[1]], stride[mode[1]], padding[mode[1]], _NORM_ATTN)
self.fully = fully_block(shiftDim, outDim, _NORM_FC)
def forward(self, image, direction):
if direction == 'Y':
image = image.permute(0, 1, 3, 2)
feat = self.conv1(image)
feat = self.conv2(feat)
feat = self.fully(feat)
return feat
class Feature_Spectral(nn.Module):
""" Spectral Feature Generation Component """
def __init__(self, inChannel, nHead, viewDim, outDim):
super(Feature_Spectral, self).__init__()
self.inChannel = inChannel
self.conv1 = conv_block(inChannel, inChannel, 5, 2, 0, _NORM_ATTN)
self.conv2 = conv_block(inChannel, inChannel, 5, 2, 0, _NORM_ATTN)
self.fully = fully_block(viewDim, int(nHead * outDim), _NORM_FC)
def forward(self, image):
bs = image.size(0)
feat = self.conv1(image)
feat = self.conv2(feat)
feat = feat.view(bs, self.inChannel, -1)
feat = self.fully(feat)
return feat
def Spectral_Mask(dim_lambda):
'''After put the available data into the model, we use this mask to avoid outputting the estimation of itself.'''
orig = (np.cos(np.linspace(-1, 1, num=2 * dim_lambda - 1) * np.pi) + 1.0) / 2.0
att = np.zeros((dim_lambda, dim_lambda))
for i in range(dim_lambda):
att[i, :] = orig[dim_lambda - 1 - i:2 * dim_lambda - 1 - i]
AM_Mask = torch.from_numpy(att.astype(np.float32)).unsqueeze(0)
return AM_Mask
class TSA_Net(nn.Module):
def __init__(self, in_ch=28, out_ch=28):
super(TSA_Net, self).__init__()
self.tconv_down1 = Encoder_Triblock(in_ch, 64, False)
self.tconv_down2 = Encoder_Triblock(64, 128, False)
self.tconv_down3 = Encoder_Triblock(128, 256)
self.tconv_down4 = Encoder_Triblock(256, 512)
self.bottom1 = conv_block(512, 1024)
self.bottom2 = conv_block(1024, 1024)
self.tconv_up4 = Decoder_Triblock(1024, 512)
self.tconv_up3 = Decoder_Triblock(512, 256)
self.transform3 = TSA_Transform((64, 64), 256, 256, 8, (64, 80), [0, 0])
self.tconv_up2 = Decoder_Triblock(256, 128)
self.transform2 = TSA_Transform((128, 128), 128, 128, 8, (64, 40), [1, 0])
self.tconv_up1 = Decoder_Triblock(128, 64)
self.transform1 = TSA_Transform((256, 256), 64, 28, 8, (48, 30), [1, 1], True)
self.conv_last = nn.Conv2d(out_ch, out_ch, 1)
self.afn_last = nn.Sigmoid()
def forward(self, x, input_mask=None):
enc1, enc1_pre = self.tconv_down1(x)
enc2, enc2_pre = self.tconv_down2(enc1)
enc3, enc3_pre = self.tconv_down3(enc2)
enc4, enc4_pre = self.tconv_down4(enc3)
# enc5,enc5_pre = self.tconv_down5(enc4)
bottom = self.bottom1(enc4)
bottom = self.bottom2(bottom)
# dec5 = self.tconv_up5(bottom,enc5_pre)
dec4 = self.tconv_up4(bottom, enc4_pre)
dec3 = self.tconv_up3(dec4, enc3_pre)
dec3, _ = self.transform3(dec3)
dec2 = self.tconv_up2(dec3, enc2_pre)
dec2, _ = self.transform2(dec2)
dec1 = self.tconv_up1(dec2, enc1_pre)
dec1, _ = self.transform1(dec1)
dec1 = self.conv_last(dec1)
output = self.afn_last(dec1)
return output
class Encoder_Triblock(nn.Module):
def __init__(self, inChannel, outChannel, flag_res=True, nKernal=3, nPool=2, flag_Pool=True):
super(Encoder_Triblock, self).__init__()
self.layer1 = conv_block(inChannel, outChannel, nKernal, flag_norm=_NORM_BONE)
if flag_res:
self.layer2 = Res2Net(outChannel, int(outChannel / 4))
else:
self.layer2 = conv_block(outChannel, outChannel, nKernal, flag_norm=_NORM_BONE)
self.pool = nn.MaxPool2d(nPool) if flag_Pool else None
def forward(self, x):
feat = self.layer1(x)
feat = self.layer2(feat)
feat_pool = self.pool(feat) if self.pool is not None else feat
return feat_pool, feat
class Decoder_Triblock(nn.Module):
def __init__(self, inChannel, outChannel, flag_res=True, nKernal=3, nPool=2, flag_Pool=True):
super(Decoder_Triblock, self).__init__()
self.layer1 = nn.Sequential(
nn.ConvTranspose2d(inChannel, outChannel, kernel_size=2, stride=2),
nn.ReLU(inplace=True)
)
if flag_res:
self.layer2 = Res2Net(int(outChannel * 2), int(outChannel / 2))
else:
self.layer2 = conv_block(outChannel * 2, outChannel * 2, nKernal, flag_norm=_NORM_BONE)
self.layer3 = conv_block(outChannel * 2, outChannel, nKernal, flag_norm=_NORM_BONE)
def forward(self, feat_dec, feat_enc):
feat_dec = self.layer1(feat_dec)
diffY = feat_enc.size()[2] - feat_dec.size()[2]
diffX = feat_enc.size()[3] - feat_dec.size()[3]
if diffY != 0 or diffX != 0:
print('Padding for size mismatch ( Enc:', feat_enc.size(), 'Dec:', feat_dec.size(), ')')
feat_dec = F.pad(feat_dec, [diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2])
feat = torch.cat([feat_dec, feat_enc], dim=1)
feat = self.layer2(feat)
feat = self.layer3(feat)
return feat | 14,086 | 41.687879 | 118 | py |
MST | MST-main/real/test_code/architecture/__init__.py | import torch
from .MST import MST
from .GAP_Net import GAP_net
from .ADMM_Net import ADMM_net
from .TSA_Net import TSA_Net
from .HDNet import HDNet, FDL
from .DGSMP import HSI_CS
from .BIRNAT import BIRNAT
from .MST_Plus_Plus import MST_Plus_Plus
from .Lambda_Net import Lambda_Net
from .CST import CST
from .DAUHST import DAUHST
def model_generator(method, pretrained_model_path=None):
if method == 'mst_s':
model = MST(dim=28, stage=2, num_blocks=[2, 2, 2]).cuda()
elif method == 'mst_m':
model = MST(dim=28, stage=2, num_blocks=[2, 4, 4]).cuda()
elif method == 'mst_l':
model = MST(dim=28, stage=2, num_blocks=[4, 7, 5]).cuda()
elif method == 'gap_net':
model = GAP_net().cuda()
elif method == 'admm_net':
model = ADMM_net().cuda()
elif method == 'tsa_net':
model = TSA_Net().cuda()
elif method == 'hdnet':
model = HDNet().cuda()
fdl_loss = FDL(loss_weight=0.7,
alpha=2.0,
patch_factor=4,
ave_spectrum=True,
log_matrix=True,
batch_matrix=True,
).cuda()
elif method == 'dgsmp':
model = HSI_CS(Ch=28, stages=4).cuda()
elif method == 'birnat':
model = BIRNAT().cuda()
elif method == 'mst_plus_plus':
model = MST_Plus_Plus(in_channels=28, out_channels=28, n_feat=28, stage=3).cuda()
elif method == 'lambda_net':
model = Lambda_Net(out_ch=28).cuda()
elif method == 'cst_s':
model = CST(num_blocks=[1, 1, 2], sparse=True).cuda()
elif method == 'cst_m':
model = CST(num_blocks=[2, 2, 2], sparse=True).cuda()
elif method == 'cst_l':
model = CST(num_blocks=[2, 4, 6], sparse=True).cuda()
elif method == 'cst_l_plus':
model = CST(num_blocks=[2, 4, 6], sparse=False).cuda()
elif 'dauhst' in method:
num_iterations = int(method.split('_')[1][0])
model = DAUHST(num_iterations=num_iterations).cuda()
else:
print(f'Method {method} is not defined !!!!')
if pretrained_model_path is not None:
print(f'load model from {pretrained_model_path}')
checkpoint = torch.load(pretrained_model_path)
model.load_state_dict({k.replace('module.', ''): v for k, v in checkpoint.items()},
strict=True)
if method == 'hdnet':
return model,fdl_loss
return model | 2,403 | 36.5625 | 91 | py |
MST | MST-main/real/test_code/architecture/HDNet.py | import torch
import torch.nn as nn
import math
def default_conv(in_channels, out_channels, kernel_size, bias=True):
return nn.Conv2d(
in_channels, out_channels, kernel_size,
padding=(kernel_size//2), bias=bias)
class MeanShift(nn.Conv2d):
def __init__(
self, rgb_range,
rgb_mean=(0.4488, 0.4371, 0.4040), rgb_std=(1.0, 1.0, 1.0), sign=-1):
super(MeanShift, self).__init__(3, 3, kernel_size=1)
std = torch.Tensor(rgb_std)
self.weight.data = torch.eye(3).view(3, 3, 1, 1) / std.view(3, 1, 1, 1)
self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean) / std
for p in self.parameters():
p.requires_grad = False
class BasicBlock(nn.Sequential):
def __init__(
self, conv, in_channels, out_channels, kernel_size, stride=1, bias=False,
bn=True, act=nn.ReLU(True)):
m = [conv(in_channels, out_channels, kernel_size, bias=bias)]
if bn:
m.append(nn.BatchNorm2d(out_channels))
if act is not None:
m.append(act)
super(BasicBlock, self).__init__(*m)
class ResBlock(nn.Module):
def __init__(
self, conv, n_feats, kernel_size,
bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
super(ResBlock, self).__init__()
m = []
for i in range(2):
m.append(conv(n_feats, n_feats, kernel_size, bias=bias))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if i == 0:
m.append(act)
self.body = nn.Sequential(*m)
self.res_scale = res_scale
def forward(self, x):
res = self.body(x).mul(self.res_scale) # So res_scale is a scaler? just scale all elements in each feature's residual? Why?
res += x
return res
class Upsampler(nn.Sequential):
def __init__(self, conv, scale, n_feats, bn=False, act=False, bias=True):
m = []
if (scale & (scale - 1)) == 0:
for _ in range(int(math.log(scale, 2))):
m.append(conv(n_feats, 4 * n_feats, 3, bias))
m.append(nn.PixelShuffle(2))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if act == 'relu':
m.append(nn.ReLU(True))
elif act == 'prelu':
m.append(nn.PReLU(n_feats))
elif scale == 3:
m.append(conv(n_feats, 9 * n_feats, 3, bias))
m.append(nn.PixelShuffle(3))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if act == 'relu':
m.append(nn.ReLU(True))
elif act == 'prelu':
m.append(nn.PReLU(n_feats))
else:
raise NotImplementedError
super(Upsampler, self).__init__(*m)
_NORM_BONE = False
def constant_init(module, val, bias=0):
if hasattr(module, 'weight') and module.weight is not None:
nn.init.constant_(module.weight, val)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
def kaiming_init(module,
a=0,
mode='fan_out',
nonlinearity='relu',
bias=0,
distribution='normal'):
assert distribution in ['uniform', 'normal']
if distribution == 'uniform':
nn.init.kaiming_uniform_(
module.weight, a=a, mode=mode, nonlinearity=nonlinearity)
else:
nn.init.kaiming_normal_(
module.weight, a=a, mode=mode, nonlinearity=nonlinearity)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
# depthwise-separable convolution (DSC)
class DSC(nn.Module):
def __init__(self, nin: int) -> None:
super(DSC, self).__init__()
self.conv_dws = nn.Conv2d(
nin, nin, kernel_size=1, stride=1, padding=0, groups=nin
)
self.bn_dws = nn.BatchNorm2d(nin, momentum=0.9)
self.relu_dws = nn.ReLU(inplace=False)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
self.conv_point = nn.Conv2d(
nin, 1, kernel_size=1, stride=1, padding=0, groups=1
)
self.bn_point = nn.BatchNorm2d(1, momentum=0.9)
self.relu_point = nn.ReLU(inplace=False)
self.softmax = nn.Softmax(dim=2)
def forward(self, x: torch.Tensor) -> torch.Tensor:
out = self.conv_dws(x)
out = self.bn_dws(out)
out = self.relu_dws(out)
out = self.maxpool(out)
out = self.conv_point(out)
out = self.bn_point(out)
out = self.relu_point(out)
m, n, p, q = out.shape
out = self.softmax(out.view(m, n, -1))
out = out.view(m, n, p, q)
out = out.expand(x.shape[0], x.shape[1], x.shape[2], x.shape[3])
out = torch.mul(out, x)
out = out + x
return out
# Efficient Feature Fusion(EFF)
class EFF(nn.Module):
def __init__(self, nin: int, nout: int, num_splits: int) -> None:
super(EFF, self).__init__()
assert nin % num_splits == 0
self.nin = nin
self.nout = nout
self.num_splits = num_splits
self.subspaces = nn.ModuleList(
[DSC(int(self.nin / self.num_splits)) for i in range(self.num_splits)]
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
sub_feat = torch.chunk(x, self.num_splits, dim=1)
out = []
for idx, l in enumerate(self.subspaces):
out.append(self.subspaces[idx](sub_feat[idx]))
out = torch.cat(out, dim=1)
return out
# spatial-spectral domain attention learning(SDL)
class SDL_attention(nn.Module):
def __init__(self, inplanes, planes, kernel_size=1, stride=1):
super(SDL_attention, self).__init__()
self.inplanes = inplanes
self.inter_planes = planes // 2
self.planes = planes
self.kernel_size = kernel_size
self.stride = stride
self.padding = (kernel_size-1)//2
self.conv_q_right = nn.Conv2d(self.inplanes, 1, kernel_size=1, stride=stride, padding=0, bias=False)
self.conv_v_right = nn.Conv2d(self.inplanes, self.inter_planes, kernel_size=1, stride=stride, padding=0, bias=False)
self.conv_up = nn.Conv2d(self.inter_planes, self.planes, kernel_size=1, stride=1, padding=0, bias=False)
self.softmax_right = nn.Softmax(dim=2)
self.sigmoid = nn.Sigmoid()
self.conv_q_left = nn.Conv2d(self.inplanes, self.inter_planes, kernel_size=1, stride=stride, padding=0, bias=False) #g
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_v_left = nn.Conv2d(self.inplanes, self.inter_planes, kernel_size=1, stride=stride, padding=0, bias=False) #theta
self.softmax_left = nn.Softmax(dim=2)
self.reset_parameters()
def reset_parameters(self):
kaiming_init(self.conv_q_right, mode='fan_in')
kaiming_init(self.conv_v_right, mode='fan_in')
kaiming_init(self.conv_q_left, mode='fan_in')
kaiming_init(self.conv_v_left, mode='fan_in')
self.conv_q_right.inited = True
self.conv_v_right.inited = True
self.conv_q_left.inited = True
self.conv_v_left.inited = True
# HR spatial attention
def spatial_attention(self, x):
input_x = self.conv_v_right(x)
batch, channel, height, width = input_x.size()
input_x = input_x.view(batch, channel, height * width)
context_mask = self.conv_q_right(x)
context_mask = context_mask.view(batch, 1, height * width)
context_mask = self.softmax_right(context_mask)
context = torch.matmul(input_x, context_mask.transpose(1,2))
context = context.unsqueeze(-1)
context = self.conv_up(context)
mask_ch = self.sigmoid(context)
out = x * mask_ch
return out
# HR spectral attention
def spectral_attention(self, x):
g_x = self.conv_q_left(x)
batch, channel, height, width = g_x.size()
avg_x = self.avg_pool(g_x)
batch, channel, avg_x_h, avg_x_w = avg_x.size()
avg_x = avg_x.view(batch, channel, avg_x_h * avg_x_w).permute(0, 2, 1)
theta_x = self.conv_v_left(x).view(batch, self.inter_planes, height * width)
context = torch.matmul(avg_x, theta_x)
context = self.softmax_left(context)
context = context.view(batch, 1, height, width)
mask_sp = self.sigmoid(context)
out = x * mask_sp
return out
def forward(self, x):
context_spectral = self.spectral_attention(x)
context_spatial = self.spatial_attention(x)
out = context_spatial + context_spectral
return out
class HDNet(nn.Module):
def __init__(self, in_ch=28, out_ch=28, conv=default_conv):
super(HDNet, self).__init__()
n_resblocks = 16
n_feats = 64
kernel_size = 3
act = nn.ReLU(True)
# define head module
m_head = [conv(in_ch, n_feats, kernel_size)]
# define body module
m_body = [
ResBlock(
conv, n_feats, kernel_size, act=act, res_scale= 1
) for _ in range(n_resblocks)
]
m_body.append(SDL_attention(inplanes = n_feats, planes = n_feats))
m_body.append(EFF(nin=n_feats, nout=n_feats, num_splits=4))
for i in range(1, n_resblocks):
m_body.append(ResBlock(
conv, n_feats, kernel_size, act=act, res_scale= 1
))
m_body.append(conv(n_feats, n_feats, kernel_size))
m_tail = [conv(n_feats, out_ch, kernel_size)]
self.head = nn.Sequential(*m_head)
self.body = nn.Sequential(*m_body)
self.tail = nn.Sequential(*m_tail)
def forward(self, x, input_mask=None):
x = self.head(x)
res = self.body(x)
res += x
x = self.tail(res)
return x
# frequency domain learning(FDL)
class FDL(nn.Module):
def __init__(self, loss_weight=1.0, alpha=1.0, patch_factor=1, ave_spectrum=False, log_matrix=False, batch_matrix=False):
super(FDL, self).__init__()
self.loss_weight = loss_weight
self.alpha = alpha
self.patch_factor = patch_factor
self.ave_spectrum = ave_spectrum
self.log_matrix = log_matrix
self.batch_matrix = batch_matrix
def tensor2freq(self, x):
patch_factor = self.patch_factor
_, _, h, w = x.shape
assert h % patch_factor == 0 and w % patch_factor == 0, (
'Patch factor should be divisible by image height and width')
patch_list = []
patch_h = h // patch_factor
patch_w = w // patch_factor
for i in range(patch_factor):
for j in range(patch_factor):
patch_list.append(x[:, :, i * patch_h:(i + 1) * patch_h, j * patch_w:(j + 1) * patch_w])
y = torch.stack(patch_list, 1)
return torch.rfft(y, 2, onesided=False, normalized=True)
def loss_formulation(self, recon_freq, real_freq, matrix=None):
if matrix is not None:
weight_matrix = matrix.detach()
else:
matrix_tmp = (recon_freq - real_freq) ** 2
matrix_tmp = torch.sqrt(matrix_tmp[..., 0] + matrix_tmp[..., 1]) ** self.alpha
if self.log_matrix:
matrix_tmp = torch.log(matrix_tmp + 1.0)
if self.batch_matrix:
matrix_tmp = matrix_tmp / matrix_tmp.max()
else:
matrix_tmp = matrix_tmp / matrix_tmp.max(-1).values.max(-1).values[:, :, :, None, None]
matrix_tmp[torch.isnan(matrix_tmp)] = 0.0
matrix_tmp = torch.clamp(matrix_tmp, min=0.0, max=1.0)
weight_matrix = matrix_tmp.clone().detach()
assert weight_matrix.min().item() >= 0 and weight_matrix.max().item() <= 1, (
'The values of spectrum weight matrix should be in the range [0, 1], '
'but got Min: %.10f Max: %.10f' % (weight_matrix.min().item(), weight_matrix.max().item()))
tmp = (recon_freq - real_freq) ** 2
freq_distance = tmp[..., 0] + tmp[..., 1]
loss = weight_matrix * freq_distance
return torch.mean(loss)
def forward(self, pred, target, matrix=None, **kwargs):
pred_freq = self.tensor2freq(pred)
target_freq = self.tensor2freq(target)
if self.ave_spectrum:
pred_freq = torch.mean(pred_freq, 0, keepdim=True)
target_freq = torch.mean(target_freq, 0, keepdim=True)
return self.loss_formulation(pred_freq, target_freq, matrix) * self.loss_weight
| 12,665 | 33.048387 | 132 | py |
MST | MST-main/simulation/train_code/ssim_torch.py | import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from math import exp
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size // 2) ** 2 / float(2 * sigma ** 2)) for x in range(window_size)])
return gauss / gauss.sum()
def create_window(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
return window
def _ssim(img1, img2, window, window_size, channel, size_average=True):
mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel)
mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq
sigma2_sq = F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq
sigma12 = F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel) - mu1_mu2
C1 = 0.01 ** 2
C2 = 0.03 ** 2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
class SSIM(torch.nn.Module):
def __init__(self, window_size=11, size_average=True):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = 1
self.window = create_window(window_size, self.channel)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.data.type() == img1.data.type():
window = self.window
else:
window = create_window(self.window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
self.window = window
self.channel = channel
return _ssim(img1, img2, window, self.window_size, channel, self.size_average)
def ssim(img1, img2, window_size=11, size_average=True):
(_, channel, _, _) = img1.size()
window = create_window(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
return _ssim(img1, img2, window, window_size, channel, size_average)
| 2,621 | 32.615385 | 114 | py |
MST | MST-main/simulation/train_code/utils.py | import scipy.io as sio
import os
import numpy as np
import torch
import logging
import random
from ssim_torch import ssim
def generate_masks(mask_path, batch_size):
mask = sio.loadmat(mask_path + '/mask.mat')
mask = mask['mask']
mask3d = np.tile(mask[:, :, np.newaxis], (1, 1, 28))
mask3d = np.transpose(mask3d, [2, 0, 1])
mask3d = torch.from_numpy(mask3d)
[nC, H, W] = mask3d.shape
mask3d_batch = mask3d.expand([batch_size, nC, H, W]).cuda().float()
return mask3d_batch
def generate_shift_masks(mask_path, batch_size):
mask = sio.loadmat(mask_path + '/mask_3d_shift.mat')
mask_3d_shift = mask['mask_3d_shift']
mask_3d_shift = np.transpose(mask_3d_shift, [2, 0, 1])
mask_3d_shift = torch.from_numpy(mask_3d_shift)
[nC, H, W] = mask_3d_shift.shape
Phi_batch = mask_3d_shift.expand([batch_size, nC, H, W]).cuda().float()
Phi_s_batch = torch.sum(Phi_batch**2,1)
Phi_s_batch[Phi_s_batch==0] = 1
# print(Phi_batch.shape, Phi_s_batch.shape)
return Phi_batch, Phi_s_batch
def LoadTraining(path):
imgs = []
scene_list = os.listdir(path)
scene_list.sort()
print('training sences:', len(scene_list))
for i in range(len(scene_list)):
# for i in range(5):
scene_path = path + scene_list[i]
scene_num = int(scene_list[i].split('.')[0][5:])
if scene_num<=205:
if 'mat' not in scene_path:
continue
img_dict = sio.loadmat(scene_path)
if "img_expand" in img_dict:
img = img_dict['img_expand'] / 65536.
elif "img" in img_dict:
img = img_dict['img'] / 65536.
img = img.astype(np.float32)
imgs.append(img)
print('Sence {} is loaded. {}'.format(i, scene_list[i]))
return imgs
def LoadTest(path_test):
scene_list = os.listdir(path_test)
scene_list.sort()
test_data = np.zeros((len(scene_list), 256, 256, 28))
for i in range(len(scene_list)):
scene_path = path_test + scene_list[i]
img = sio.loadmat(scene_path)['img']
test_data[i, :, :, :] = img
test_data = torch.from_numpy(np.transpose(test_data, (0, 3, 1, 2)))
return test_data
def LoadMeasurement(path_test_meas):
img = sio.loadmat(path_test_meas)['simulation_test']
test_data = img
test_data = torch.from_numpy(test_data)
return test_data
# We find that this calculation method is more close to DGSMP's.
def torch_psnr(img, ref): # input [28,256,256]
img = (img*256).round()
ref = (ref*256).round()
nC = img.shape[0]
psnr = 0
for i in range(nC):
mse = torch.mean((img[i, :, :] - ref[i, :, :]) ** 2)
psnr += 10 * torch.log10((255*255)/mse)
return psnr / nC
def torch_ssim(img, ref): # input [28,256,256]
return ssim(torch.unsqueeze(img, 0), torch.unsqueeze(ref, 0))
def time2file_name(time):
year = time[0:4]
month = time[5:7]
day = time[8:10]
hour = time[11:13]
minute = time[14:16]
second = time[17:19]
time_filename = year + '_' + month + '_' + day + '_' + hour + '_' + minute + '_' + second
return time_filename
def shuffle_crop(train_data, batch_size, crop_size=256, argument=True):
if argument:
gt_batch = []
# The first half data use the original data.
index = np.random.choice(range(len(train_data)), batch_size//2)
processed_data = np.zeros((batch_size//2, crop_size, crop_size, 28), dtype=np.float32)
for i in range(batch_size//2):
img = train_data[index[i]]
h, w, _ = img.shape
x_index = np.random.randint(0, h - crop_size)
y_index = np.random.randint(0, w - crop_size)
processed_data[i, :, :, :] = img[x_index:x_index + crop_size, y_index:y_index + crop_size, :]
processed_data = torch.from_numpy(np.transpose(processed_data, (0, 3, 1, 2))).cuda().float()
for i in range(processed_data.shape[0]):
gt_batch.append(arguement_1(processed_data[i]))
# The other half data use splicing.
processed_data = np.zeros((4, 128, 128, 28), dtype=np.float32)
for i in range(batch_size - batch_size // 2):
sample_list = np.random.randint(0, len(train_data), 4)
for j in range(4):
x_index = np.random.randint(0, h-crop_size//2)
y_index = np.random.randint(0, w-crop_size//2)
processed_data[j] = train_data[sample_list[j]][x_index:x_index+crop_size//2,y_index:y_index+crop_size//2,:]
gt_batch_2 = torch.from_numpy(np.transpose(processed_data, (0, 3, 1, 2))).cuda() # [4,28,128,128]
gt_batch.append(arguement_2(gt_batch_2))
gt_batch = torch.stack(gt_batch, dim=0)
return gt_batch
else:
index = np.random.choice(range(len(train_data)), batch_size)
processed_data = np.zeros((batch_size, crop_size, crop_size, 28), dtype=np.float32)
for i in range(batch_size):
h, w, _ = train_data[index[i]].shape
x_index = np.random.randint(0, h - crop_size)
y_index = np.random.randint(0, w - crop_size)
processed_data[i, :, :, :] = train_data[index[i]][x_index:x_index + crop_size, y_index:y_index + crop_size, :]
gt_batch = torch.from_numpy(np.transpose(processed_data, (0, 3, 1, 2)))
return gt_batch
def arguement_1(x):
"""
:param x: c,h,w
:return: c,h,w
"""
rotTimes = random.randint(0, 3)
vFlip = random.randint(0, 1)
hFlip = random.randint(0, 1)
# Random rotation
for j in range(rotTimes):
x = torch.rot90(x, dims=(1, 2))
# Random vertical Flip
for j in range(vFlip):
x = torch.flip(x, dims=(2,))
# Random horizontal Flip
for j in range(hFlip):
x = torch.flip(x, dims=(1,))
return x
def arguement_2(generate_gt):
c, h, w = generate_gt.shape[1],256,256
divid_point_h = 128
divid_point_w = 128
output_img = torch.zeros(c,h,w).cuda()
output_img[:, :divid_point_h, :divid_point_w] = generate_gt[0]
output_img[:, :divid_point_h, divid_point_w:] = generate_gt[1]
output_img[:, divid_point_h:, :divid_point_w] = generate_gt[2]
output_img[:, divid_point_h:, divid_point_w:] = generate_gt[3]
return output_img
def gen_meas_torch(data_batch, mask3d_batch, Y2H=True, mul_mask=False):
nC = data_batch.shape[1]
temp = shift(mask3d_batch * data_batch, 2)
meas = torch.sum(temp, 1)
if Y2H:
meas = meas / nC * 2
H = shift_back(meas)
if mul_mask:
HM = torch.mul(H, mask3d_batch)
return HM
return H
return meas
def shift(inputs, step=2):
[bs, nC, row, col] = inputs.shape
output = torch.zeros(bs, nC, row, col + (nC - 1) * step).cuda().float()
for i in range(nC):
output[:, i, :, step * i:step * i + col] = inputs[:, i, :, :]
return output
def shift_back(inputs, step=2): # input [bs,256,310] output [bs, 28, 256, 256]
[bs, row, col] = inputs.shape
nC = 28
output = torch.zeros(bs, nC, row, col - (nC - 1) * step).cuda().float()
for i in range(nC):
output[:, i, :, :] = inputs[:, :, step * i:step * i + col - (nC - 1) * step]
return output
def gen_log(model_path):
logger = logging.getLogger()
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(levelname)s: %(message)s")
log_file = model_path + '/log.txt'
fh = logging.FileHandler(log_file, mode='a')
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
return logger
def init_mask(mask_path, mask_type, batch_size):
mask3d_batch = generate_masks(mask_path, batch_size)
if mask_type == 'Phi':
shift_mask3d_batch = shift(mask3d_batch)
input_mask = shift_mask3d_batch
elif mask_type == 'Phi_PhiPhiT':
Phi_batch, Phi_s_batch = generate_shift_masks(mask_path, batch_size)
input_mask = (Phi_batch, Phi_s_batch)
elif mask_type == 'Mask':
input_mask = mask3d_batch
elif mask_type == None:
input_mask = None
return mask3d_batch, input_mask
def init_meas(gt, mask, input_setting):
if input_setting == 'H':
input_meas = gen_meas_torch(gt, mask, Y2H=True, mul_mask=False)
elif input_setting == 'HM':
input_meas = gen_meas_torch(gt, mask, Y2H=True, mul_mask=True)
elif input_setting == 'Y':
input_meas = gen_meas_torch(gt, mask, Y2H=False, mul_mask=True)
return input_meas
def checkpoint(model, epoch, model_path, logger):
model_out_path = model_path + "/model_epoch_{}.pth".format(epoch)
torch.save(model.state_dict(), model_out_path)
logger.info("Checkpoint saved to {}".format(model_out_path))
| 8,889 | 36.510549 | 123 | py |
MST | MST-main/simulation/train_code/train.py | from architecture import *
from utils import *
import torch
import scipy.io as scio
import time
import os
import numpy as np
from torch.autograd import Variable
import datetime
from option import opt
import torch.nn.functional as F
os.environ["CUDA_DEVICE_ORDER"] = 'PCI_BUS_ID'
os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpu_id
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
if not torch.cuda.is_available():
raise Exception('NO GPU!')
# init mask
mask3d_batch_train, input_mask_train = init_mask(opt.mask_path, opt.input_mask, opt.batch_size)
mask3d_batch_test, input_mask_test = init_mask(opt.mask_path, opt.input_mask, 10)
# dataset
train_set = LoadTraining(opt.data_path)
test_data = LoadTest(opt.test_path)
# saving path
date_time = str(datetime.datetime.now())
date_time = time2file_name(date_time)
result_path = opt.outf + date_time + '/result/'
model_path = opt.outf + date_time + '/model/'
if not os.path.exists(result_path):
os.makedirs(result_path)
if not os.path.exists(model_path):
os.makedirs(model_path)
# model
if opt.method=='hdnet':
model, FDL_loss = model_generator(opt.method, opt.pretrained_model_path).cuda()
else:
model = model_generator(opt.method, opt.pretrained_model_path).cuda()
# optimizing
optimizer = torch.optim.Adam(model.parameters(), lr=opt.learning_rate, betas=(0.9, 0.999))
if opt.scheduler=='MultiStepLR':
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=opt.milestones, gamma=opt.gamma)
elif opt.scheduler=='CosineAnnealingLR':
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, opt.max_epoch, eta_min=1e-6)
mse = torch.nn.MSELoss().cuda()
def train(epoch, logger):
epoch_loss = 0
begin = time.time()
batch_num = int(np.floor(opt.epoch_sam_num / opt.batch_size))
for i in range(batch_num):
gt_batch = shuffle_crop(train_set, opt.batch_size)
gt = Variable(gt_batch).cuda().float()
input_meas = init_meas(gt, mask3d_batch_train, opt.input_setting)
optimizer.zero_grad()
if opt.method in ['cst_s', 'cst_m', 'cst_l']:
model_out, diff_pred = model(input_meas, input_mask_train)
loss = torch.sqrt(mse(model_out, gt))
diff_gt = torch.mean(torch.abs(model_out.detach() - gt),dim=1, keepdim=True) # [b,1,h,w]
loss_sparsity = F.mse_loss(diff_gt, diff_pred)
loss = loss + 2 * loss_sparsity
else:
model_out = model(input_meas, input_mask_train)
loss = torch.sqrt(mse(model_out, gt))
if opt.method=='hdnet':
fdl_loss = FDL_loss(model_out, gt)
loss = loss + 0.7 * fdl_loss
epoch_loss += loss.data
loss.backward()
optimizer.step()
end = time.time()
logger.info("===> Epoch {} Complete: Avg. Loss: {:.6f} time: {:.2f}".
format(epoch, epoch_loss / batch_num, (end - begin)))
return 0
def test(epoch, logger):
psnr_list, ssim_list = [], []
test_gt = test_data.cuda().float()
input_meas = init_meas(test_gt, mask3d_batch_test, opt.input_setting)
model.eval()
begin = time.time()
with torch.no_grad():
if opt.method in ['cst_s', 'cst_m', 'cst_l']:
model_out, _ = model(input_meas, input_mask_test)
else:
model_out = model(input_meas, input_mask_test)
end = time.time()
for k in range(test_gt.shape[0]):
psnr_val = torch_psnr(model_out[k, :, :, :], test_gt[k, :, :, :])
ssim_val = torch_ssim(model_out[k, :, :, :], test_gt[k, :, :, :])
psnr_list.append(psnr_val.detach().cpu().numpy())
ssim_list.append(ssim_val.detach().cpu().numpy())
pred = np.transpose(model_out.detach().cpu().numpy(), (0, 2, 3, 1)).astype(np.float32)
truth = np.transpose(test_gt.cpu().numpy(), (0, 2, 3, 1)).astype(np.float32)
psnr_mean = np.mean(np.asarray(psnr_list))
ssim_mean = np.mean(np.asarray(ssim_list))
logger.info('===> Epoch {}: testing psnr = {:.2f}, ssim = {:.3f}, time: {:.2f}'
.format(epoch, psnr_mean, ssim_mean,(end - begin)))
model.train()
return pred, truth, psnr_list, ssim_list, psnr_mean, ssim_mean
def main():
logger = gen_log(model_path)
logger.info("Learning rate:{}, batch_size:{}.\n".format(opt.learning_rate, opt.batch_size))
psnr_max = 0
for epoch in range(1, opt.max_epoch + 1):
train(epoch, logger)
(pred, truth, psnr_all, ssim_all, psnr_mean, ssim_mean) = test(epoch, logger)
scheduler.step()
if psnr_mean > psnr_max:
psnr_max = psnr_mean
if psnr_mean > 28:
name = result_path + '/' + 'Test_{}_{:.2f}_{:.3f}'.format(epoch, psnr_max, ssim_mean) + '.mat'
scio.savemat(name, {'truth': truth, 'pred': pred, 'psnr_list': psnr_all, 'ssim_list': ssim_all})
checkpoint(model, epoch, model_path, logger)
if __name__ == '__main__':
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
main()
| 5,046 | 37.526718 | 112 | py |
MST | MST-main/simulation/train_code/architecture/MST_Plus_Plus.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from einops import rearrange
import math
import warnings
from torch.nn.init import _calculate_fan_in_and_fan_out
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
def norm_cdf(x):
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
tensor.uniform_(2 * l - 1, 2 * u - 1)
tensor.erfinv_()
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'):
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
if mode == 'fan_in':
denom = fan_in
elif mode == 'fan_out':
denom = fan_out
elif mode == 'fan_avg':
denom = (fan_in + fan_out) / 2
variance = scale / denom
if distribution == "truncated_normal":
trunc_normal_(tensor, std=math.sqrt(variance) / .87962566103423978)
elif distribution == "normal":
tensor.normal_(std=math.sqrt(variance))
elif distribution == "uniform":
bound = math.sqrt(3 * variance)
tensor.uniform_(-bound, bound)
else:
raise ValueError(f"invalid distribution {distribution}")
def lecun_normal_(tensor):
variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal')
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x, *args, **kwargs):
x = self.norm(x)
return self.fn(x, *args, **kwargs)
class GELU(nn.Module):
def forward(self, x):
return F.gelu(x)
def conv(in_channels, out_channels, kernel_size, bias=False, padding = 1, stride = 1):
return nn.Conv2d(
in_channels, out_channels, kernel_size,
padding=(kernel_size//2), bias=bias, stride=stride)
def shift_back(inputs,step=2): # input [bs,28,256,310] output [bs, 28, 256, 256]
[bs, nC, row, col] = inputs.shape
down_sample = 256//row
step = float(step)/float(down_sample*down_sample)
out_col = row
for i in range(nC):
inputs[:,i,:,:out_col] = \
inputs[:,i,:,int(step*i):int(step*i)+out_col]
return inputs[:, :, :, :out_col]
class MS_MSA(nn.Module):
def __init__(
self,
dim,
dim_head,
heads,
):
super().__init__()
self.num_heads = heads
self.dim_head = dim_head
self.to_q = nn.Linear(dim, dim_head * heads, bias=False)
self.to_k = nn.Linear(dim, dim_head * heads, bias=False)
self.to_v = nn.Linear(dim, dim_head * heads, bias=False)
self.rescale = nn.Parameter(torch.ones(heads, 1, 1))
self.proj = nn.Linear(dim_head * heads, dim, bias=True)
self.pos_emb = nn.Sequential(
nn.Conv2d(dim, dim, 3, 1, 1, bias=False, groups=dim),
GELU(),
nn.Conv2d(dim, dim, 3, 1, 1, bias=False, groups=dim),
)
self.dim = dim
def forward(self, x_in):
"""
x_in: [b,h,w,c]
return out: [b,h,w,c]
"""
b, h, w, c = x_in.shape
x = x_in.reshape(b,h*w,c)
q_inp = self.to_q(x)
k_inp = self.to_k(x)
v_inp = self.to_v(x)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=self.num_heads),
(q_inp, k_inp, v_inp))
v = v
# q: b,heads,hw,c
q = q.transpose(-2, -1)
k = k.transpose(-2, -1)
v = v.transpose(-2, -1)
q = F.normalize(q, dim=-1, p=2)
k = F.normalize(k, dim=-1, p=2)
attn = (k @ q.transpose(-2, -1)) # A = K^T*Q
attn = attn * self.rescale
attn = attn.softmax(dim=-1)
x = attn @ v # b,heads,d,hw
x = x.permute(0, 3, 1, 2) # Transpose
x = x.reshape(b, h * w, self.num_heads * self.dim_head)
out_c = self.proj(x).view(b, h, w, c)
out_p = self.pos_emb(v_inp.reshape(b,h,w,c).permute(0, 3, 1, 2)).permute(0, 2, 3, 1)
out = out_c + out_p
return out
class FeedForward(nn.Module):
def __init__(self, dim, mult=4):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(dim, dim * mult, 1, 1, bias=False),
GELU(),
nn.Conv2d(dim * mult, dim * mult, 3, 1, 1, bias=False, groups=dim * mult),
GELU(),
nn.Conv2d(dim * mult, dim, 1, 1, bias=False),
)
def forward(self, x):
"""
x: [b,h,w,c]
return out: [b,h,w,c]
"""
out = self.net(x.permute(0, 3, 1, 2))
return out.permute(0, 2, 3, 1)
class MSAB(nn.Module):
def __init__(
self,
dim,
dim_head,
heads,
num_blocks,
):
super().__init__()
self.blocks = nn.ModuleList([])
for _ in range(num_blocks):
self.blocks.append(nn.ModuleList([
MS_MSA(dim=dim, dim_head=dim_head, heads=heads),
PreNorm(dim, FeedForward(dim=dim))
]))
def forward(self, x):
"""
x: [b,c,h,w]
return out: [b,c,h,w]
"""
x = x.permute(0, 2, 3, 1)
for (attn, ff) in self.blocks:
x = attn(x) + x
x = ff(x) + x
out = x.permute(0, 3, 1, 2)
return out
class MST(nn.Module):
def __init__(self, in_dim=28, out_dim=28, dim=28, stage=2, num_blocks=[2,4,4]):
super(MST, self).__init__()
self.dim = dim
self.stage = stage
# Input projection
self.embedding = nn.Conv2d(in_dim, self.dim, 3, 1, 1, bias=False)
# Encoder
self.encoder_layers = nn.ModuleList([])
dim_stage = dim
for i in range(stage):
self.encoder_layers.append(nn.ModuleList([
MSAB(
dim=dim_stage, num_blocks=num_blocks[i], dim_head=dim, heads=dim_stage // dim),
nn.Conv2d(dim_stage, dim_stage * 2, 4, 2, 1, bias=False),
]))
dim_stage *= 2
# Bottleneck
self.bottleneck = MSAB(
dim=dim_stage, dim_head=dim, heads=dim_stage // dim, num_blocks=num_blocks[-1])
# Decoder
self.decoder_layers = nn.ModuleList([])
for i in range(stage):
self.decoder_layers.append(nn.ModuleList([
nn.ConvTranspose2d(dim_stage, dim_stage // 2, stride=2, kernel_size=2, padding=0, output_padding=0),
nn.Conv2d(dim_stage, dim_stage // 2, 1, 1, bias=False),
MSAB(
dim=dim_stage // 2, num_blocks=num_blocks[stage - 1 - i], dim_head=dim,
heads=(dim_stage // 2) // dim),
]))
dim_stage //= 2
# Output projection
self.mapping = nn.Conv2d(self.dim, out_dim, 3, 1, 1, bias=False)
#### activation function
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x):
"""
x: [b,c,h,w]
return out:[b,c,h,w]
"""
# Embedding
fea = self.embedding(x)
# Encoder
fea_encoder = []
for (MSAB, FeaDownSample) in self.encoder_layers:
fea = MSAB(fea)
fea_encoder.append(fea)
fea = FeaDownSample(fea)
# Bottleneck
fea = self.bottleneck(fea)
# Decoder
for i, (FeaUpSample, Fution, LeWinBlcok) in enumerate(self.decoder_layers):
fea = FeaUpSample(fea)
fea = Fution(torch.cat([fea, fea_encoder[self.stage-1-i]], dim=1))
fea = LeWinBlcok(fea)
# Mapping
out = self.mapping(fea) + x
return out
class MST_Plus_Plus(nn.Module):
def __init__(self, in_channels=3, out_channels=28, n_feat=28, stage=3):
super(MST_Plus_Plus, self).__init__()
self.stage = stage
self.conv_in = nn.Conv2d(in_channels, n_feat, kernel_size=3, padding=(3 - 1) // 2,bias=False)
modules_body = [MST(dim=n_feat, stage=2, num_blocks=[1,1,1]) for _ in range(stage)]
self.fution = nn.Conv2d(56, 28, 1, padding=0, bias=True)
self.body = nn.Sequential(*modules_body)
self.conv_out = nn.Conv2d(n_feat, out_channels, kernel_size=3, padding=(3 - 1) // 2,bias=False)
def initial_x(self, y, Phi):
"""
:param y: [b,256,310]
:param Phi: [b,28,256,256]
:return: z: [b,28,256,256]
"""
x = self.fution(torch.cat([y, Phi], dim=1))
return x
def forward(self, y, Phi=None):
"""
x: [b,c,h,w]
return out:[b,c,h,w]
"""
if Phi==None:
Phi = torch.rand((1,28,256,256)).cuda()
x = self.initial_x(y, Phi)
b, c, h_inp, w_inp = x.shape
hb, wb = 8, 8
pad_h = (hb - h_inp % hb) % hb
pad_w = (wb - w_inp % wb) % wb
x = F.pad(x, [0, pad_w, 0, pad_h], mode='reflect')
x = self.conv_in(x)
h = self.body(x)
h = self.conv_out(h)
h += x
return h[:, :, :h_inp, :w_inp]
| 10,068 | 30.367601 | 116 | py |
MST | MST-main/simulation/train_code/architecture/DGSMP.py | import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
import torch.nn.functional as F
class Resblock(nn.Module):
def __init__(self, HBW):
super(Resblock, self).__init__()
self.block1 = nn.Sequential(nn.Conv2d(HBW, HBW, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(HBW, HBW, kernel_size=3, stride=1, padding=1))
self.block2 = nn.Sequential(nn.Conv2d(HBW, HBW, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(HBW, HBW, kernel_size=3, stride=1, padding=1))
def forward(self, x):
tem = x
r1 = self.block1(x)
out = r1 + tem
r2 = self.block2(out)
out = r2 + out
return out
class Encoding(nn.Module):
def __init__(self):
super(Encoding, self).__init__()
self.E1 = nn.Sequential(nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
self.E2 = nn.Sequential(nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
self.E3 = nn.Sequential(nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
self.E4 = nn.Sequential(nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
self.E5 = nn.Sequential(nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
def forward(self, x):
## encoding blocks
E1 = self.E1(x)
E2 = self.E2(F.avg_pool2d(E1, kernel_size=2, stride=2))
E3 = self.E3(F.avg_pool2d(E2, kernel_size=2, stride=2))
E4 = self.E4(F.avg_pool2d(E3, kernel_size=2, stride=2))
E5 = self.E5(F.avg_pool2d(E4, kernel_size=2, stride=2))
return E1, E2, E3, E4, E5
class Decoding(nn.Module):
def __init__(self, Ch=28, kernel_size=[7,7,7]):
super(Decoding, self).__init__()
self.upMode = 'bilinear'
self.Ch = Ch
out_channel1 = Ch * kernel_size[0]
out_channel2 = Ch * kernel_size[1]
out_channel3 = Ch * kernel_size[2]
self.D1 = nn.Sequential(nn.Conv2d(in_channels=128+128, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
self.D2 = nn.Sequential(nn.Conv2d(in_channels=128+64, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
self.D3 = nn.Sequential(nn.Conv2d(in_channels=64+64, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
self.D4 = nn.Sequential(nn.Conv2d(in_channels=64+32, out_channels=32, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
self.w_generator = nn.Sequential(nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=32, out_channels=self.Ch, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=self.Ch, out_channels=self.Ch, kernel_size=1, stride=1, padding=0)
)
self.filter_g_1 = nn.Sequential(nn.Conv2d(64 + 32, out_channel1, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(out_channel1, out_channel1, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(out_channel1, out_channel1, 1, 1, 0)
)
self.filter_g_2 = nn.Sequential(nn.Conv2d(64 + 32, out_channel2, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(out_channel2, out_channel2, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(out_channel2, out_channel2, 1, 1, 0)
)
self.filter_g_3 = nn.Sequential(nn.Conv2d(64 + 32, out_channel3, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(out_channel3, out_channel3, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(out_channel3, out_channel3, 1, 1, 0)
)
def forward(self, E1, E2, E3, E4, E5):
## decoding blocks
D1 = self.D1(torch.cat([E4, F.interpolate(E5, scale_factor=2, mode=self.upMode)], dim=1))
D2 = self.D2(torch.cat([E3, F.interpolate(D1, scale_factor=2, mode=self.upMode)], dim=1))
D3 = self.D3(torch.cat([E2, F.interpolate(D2, scale_factor=2, mode=self.upMode)], dim=1))
D4 = self.D4(torch.cat([E1, F.interpolate(D3, scale_factor=2, mode=self.upMode)], dim=1))
## estimating the regularization parameters w
w = self.w_generator(D4)
## generate 3D filters
f1 = self.filter_g_1(torch.cat([E1, F.interpolate(D3, scale_factor=2, mode=self.upMode)], dim=1))
f2 = self.filter_g_2(torch.cat([E1, F.interpolate(D3, scale_factor=2, mode=self.upMode)], dim=1))
f3 = self.filter_g_3(torch.cat([E1, F.interpolate(D3, scale_factor=2, mode=self.upMode)], dim=1))
return w, f1, f2, f3
class HSI_CS(nn.Module):
def __init__(self, Ch, stages):
super(HSI_CS, self).__init__()
self.Ch = Ch
self.s = stages
self.filter_size = [7,7,7] ## 3D filter size
## The modules for learning the measurement matrix A and A^T
self.AT = nn.Sequential(nn.Conv2d(Ch, 64, kernel_size=3, stride=1, padding=1), nn.LeakyReLU(),
Resblock(64), Resblock(64),
nn.Conv2d(64, Ch, kernel_size=3, stride=1, padding=1), nn.LeakyReLU())
self.A = nn.Sequential(nn.Conv2d(Ch, 64, kernel_size=3, stride=1, padding=1), nn.LeakyReLU(),
Resblock(64), Resblock(64),
nn.Conv2d(64, Ch, kernel_size=3, stride=1, padding=1), nn.LeakyReLU())
## Encoding blocks
self.Encoding = Encoding()
## Decoding blocks
self.Decoding = Decoding(Ch=self.Ch, kernel_size=self.filter_size)
## Dense connection
self.conv = nn.Conv2d(Ch, 32, kernel_size=3, stride=1, padding=1)
self.Den_con1 = nn.Conv2d(32 , 32, kernel_size=1, stride=1, padding=0)
self.Den_con2 = nn.Conv2d(32 * 2, 32, kernel_size=1, stride=1, padding=0)
self.Den_con3 = nn.Conv2d(32 * 3, 32, kernel_size=1, stride=1, padding=0)
self.Den_con4 = nn.Conv2d(32 * 4, 32, kernel_size=1, stride=1, padding=0)
# self.Den_con5 = nn.Conv2d(32 * 5, 32, kernel_size=1, stride=1, padding=0)
# self.Den_con6 = nn.Conv2d(32 * 6, 32, kernel_size=1, stride=1, padding=0)
self.delta_0 = Parameter(torch.ones(1), requires_grad=True)
self.delta_1 = Parameter(torch.ones(1), requires_grad=True)
self.delta_2 = Parameter(torch.ones(1), requires_grad=True)
self.delta_3 = Parameter(torch.ones(1), requires_grad=True)
# self.delta_4 = Parameter(torch.ones(1), requires_grad=True)
# self.delta_5 = Parameter(torch.ones(1), requires_grad=True)
self._initialize_weights()
torch.nn.init.normal_(self.delta_0, mean=0.1, std=0.01)
torch.nn.init.normal_(self.delta_1, mean=0.1, std=0.01)
torch.nn.init.normal_(self.delta_2, mean=0.1, std=0.01)
torch.nn.init.normal_(self.delta_3, mean=0.1, std=0.01)
# torch.nn.init.normal_(self.delta_4, mean=0.1, std=0.01)
# torch.nn.init.normal_(self.delta_5, mean=0.1, std=0.01)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight.data)
nn.init.constant_(m.bias.data, 0.0)
elif isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight.data)
nn.init.constant_(m.bias.data, 0.0)
def Filtering_1(self, cube, core):
batch_size, bandwidth, height, width = cube.size()
cube_pad = F.pad(cube, [self.filter_size[0] // 2, self.filter_size[0] // 2, 0, 0], mode='replicate')
img_stack = []
for i in range(self.filter_size[0]):
img_stack.append(cube_pad[:, :, :, i:i + width])
img_stack = torch.stack(img_stack, dim=1)
out = torch.sum(core.mul_(img_stack), dim=1, keepdim=False)
return out
def Filtering_2(self, cube, core):
batch_size, bandwidth, height, width = cube.size()
cube_pad = F.pad(cube, [0, 0, self.filter_size[1] // 2, self.filter_size[1] // 2], mode='replicate')
img_stack = []
for i in range(self.filter_size[1]):
img_stack.append(cube_pad[:, :, i:i + height, :])
img_stack = torch.stack(img_stack, dim=1)
out = torch.sum(core.mul_(img_stack), dim=1, keepdim=False)
return out
def Filtering_3(self, cube, core):
batch_size, bandwidth, height, width = cube.size()
cube_pad = F.pad(cube.unsqueeze(0).unsqueeze(0), pad=(0, 0, 0, 0, self.filter_size[2] // 2, self.filter_size[2] // 2)).squeeze(0).squeeze(0)
img_stack = []
for i in range(self.filter_size[2]):
img_stack.append(cube_pad[:, i:i + bandwidth, :, :])
img_stack = torch.stack(img_stack, dim=1)
out = torch.sum(core.mul_(img_stack), dim=1, keepdim=False)
return out
def recon(self, res1, res2, Xt, i):
if i == 0 :
delta = self.delta_0
elif i == 1:
delta = self.delta_1
elif i == 2:
delta = self.delta_2
elif i == 3:
delta = self.delta_3
# elif i == 4:
# delta = self.delta_4
# elif i == 5:
# delta = self.delta_5
Xt = Xt - 2 * delta * (res1 + res2)
return Xt
def y2x(self, y):
## Spilt operator
sz = y.size()
if len(sz) == 3:
y = y.unsqueeze(1)
bs = sz[0]
sz = y.size()
x = torch.zeros([bs, 28, sz[2], sz[2]]).cuda()
for t in range(28):
temp = y[:, :, :, 0 + 2 * t : sz[2] + 2 * t]
x[:, t, :, :] = temp.squeeze(1)
return x
def x2y(self, x):
## Shift and Sum operator
sz = x.size()
if len(sz) == 3:
x = x.unsqueeze(0).unsqueeze(0)
bs = 1
else:
bs = sz[0]
sz = x.size()
y = torch.zeros([bs, sz[2], sz[2]+2*27]).cuda()
for t in range(28):
y[:, :, 0 + 2 * t : sz[2] + 2 * t] = x[:, t, :, :] + y[:, :, 0 + 2 * t : sz[2] + 2 * t]
return y
def forward(self, y, input_mask=None):
## The measurements y is split into a 3D data cube of size H × W × L to initialize x.
y = y / 28 * 2
Xt = self.y2x(y)
feature_list = []
for i in range(0, self.s):
AXt = self.x2y(self.A(Xt)) # y = Ax
Res1 = self.AT(self.y2x(AXt - y)) # A^T * (Ax − y)
fea = self.conv(Xt)
if i == 0:
feature_list.append(fea)
fufea = self.Den_con1(fea)
elif i == 1:
feature_list.append(fea)
fufea = self.Den_con2(torch.cat(feature_list, 1))
elif i == 2:
feature_list.append(fea)
fufea = self.Den_con3(torch.cat(feature_list, 1))
elif i == 3:
feature_list.append(fea)
fufea = self.Den_con4(torch.cat(feature_list, 1))
# elif i == 4:
# feature_list.append(fea)
# fufea = self.Den_con5(torch.cat(feature_list, 1))
# elif i == 5:
# feature_list.append(fea)
# fufea = self.Den_con6(torch.cat(feature_list, 1))
E1, E2, E3, E4, E5 = self.Encoding(fufea)
W, f1, f2, f3 = self.Decoding(E1, E2, E3, E4, E5)
batch_size, p, height, width = f1.size()
f1 = F.normalize(f1.view(batch_size, self.filter_size[0], self.Ch, height, width),dim=1)
batch_size, p, height, width = f2.size()
f2 = F.normalize(f2.view(batch_size, self.filter_size[1], self.Ch, height, width),dim=1)
batch_size, p, height, width = f3.size()
f3 = F.normalize(f3.view(batch_size, self.filter_size[2], self.Ch, height, width),dim=1)
## Estimating the local means U
u1 = self.Filtering_1(Xt, f1)
u2 = self.Filtering_2(u1, f2)
U = self.Filtering_3(u2, f3)
## w * (x − u)
Res2 = (Xt - U).mul(W)
## Reconstructing HSIs
Xt = self.recon(Res1, Res2, Xt, i)
return Xt
| 15,284 | 46.175926 | 148 | py |
MST | MST-main/simulation/train_code/architecture/DAUHST.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from einops import rearrange
import math
import warnings
from torch import einsum
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
def norm_cdf(x):
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
tensor.uniform_(2 * l - 1, 2 * u - 1)
tensor.erfinv_()
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x, *args, **kwargs):
x = self.norm(x)
return self.fn(x, *args, **kwargs)
class GELU(nn.Module):
def forward(self, x):
return F.gelu(x)
class HS_MSA(nn.Module):
def __init__(
self,
dim,
window_size=(8, 8),
dim_head=28,
heads=8,
only_local_branch=False
):
super().__init__()
self.dim = dim
self.heads = heads
self.scale = dim_head ** -0.5
self.window_size = window_size
self.only_local_branch = only_local_branch
# position embedding
if only_local_branch:
seq_l = window_size[0] * window_size[1]
self.pos_emb = nn.Parameter(torch.Tensor(1, heads, seq_l, seq_l))
trunc_normal_(self.pos_emb)
else:
seq_l1 = window_size[0] * window_size[1]
self.pos_emb1 = nn.Parameter(torch.Tensor(1, 1, heads//2, seq_l1, seq_l1))
h,w = 256//self.heads,320//self.heads
seq_l2 = h*w//seq_l1
self.pos_emb2 = nn.Parameter(torch.Tensor(1, 1, heads//2, seq_l2, seq_l2))
trunc_normal_(self.pos_emb1)
trunc_normal_(self.pos_emb2)
inner_dim = dim_head * heads
self.to_q = nn.Linear(dim, inner_dim, bias=False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False)
self.to_out = nn.Linear(inner_dim, dim)
def forward(self, x):
"""
x: [b,h,w,c]
return out: [b,h,w,c]
"""
b, h, w, c = x.shape
w_size = self.window_size
assert h % w_size[0] == 0 and w % w_size[1] == 0, 'fmap dimensions must be divisible by the window size'
if self.only_local_branch:
x_inp = rearrange(x, 'b (h b0) (w b1) c -> (b h w) (b0 b1) c', b0=w_size[0], b1=w_size[1])
q = self.to_q(x_inp)
k, v = self.to_kv(x_inp).chunk(2, dim=-1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=self.heads), (q, k, v))
q *= self.scale
sim = einsum('b h i d, b h j d -> b h i j', q, k)
sim = sim + self.pos_emb
attn = sim.softmax(dim=-1)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
out = rearrange(out, '(b h w) (b0 b1) c -> b (h b0) (w b1) c', h=h // w_size[0], w=w // w_size[1],
b0=w_size[0])
else:
q = self.to_q(x)
k, v = self.to_kv(x).chunk(2, dim=-1)
q1, q2 = q[:,:,:,:c//2], q[:,:,:,c//2:]
k1, k2 = k[:,:,:,:c//2], k[:,:,:,c//2:]
v1, v2 = v[:,:,:,:c//2], v[:,:,:,c//2:]
# local branch
q1, k1, v1 = map(lambda t: rearrange(t, 'b (h b0) (w b1) c -> b (h w) (b0 b1) c',
b0=w_size[0], b1=w_size[1]), (q1, k1, v1))
q1, k1, v1 = map(lambda t: rearrange(t, 'b n mm (h d) -> b n h mm d', h=self.heads//2), (q1, k1, v1))
q1 *= self.scale
sim1 = einsum('b n h i d, b n h j d -> b n h i j', q1, k1)
sim1 = sim1 + self.pos_emb1
attn1 = sim1.softmax(dim=-1)
out1 = einsum('b n h i j, b n h j d -> b n h i d', attn1, v1)
out1 = rearrange(out1, 'b n h mm d -> b n mm (h d)')
# non-local branch
q2, k2, v2 = map(lambda t: rearrange(t, 'b (h b0) (w b1) c -> b (h w) (b0 b1) c',
b0=w_size[0], b1=w_size[1]), (q2, k2, v2))
q2, k2, v2 = map(lambda t: t.permute(0, 2, 1, 3), (q2.clone(), k2.clone(), v2.clone()))
q2, k2, v2 = map(lambda t: rearrange(t, 'b n mm (h d) -> b n h mm d', h=self.heads//2), (q2, k2, v2))
q2 *= self.scale
sim2 = einsum('b n h i d, b n h j d -> b n h i j', q2, k2)
sim2 = sim2 + self.pos_emb2
attn2 = sim2.softmax(dim=-1)
out2 = einsum('b n h i j, b n h j d -> b n h i d', attn2, v2)
out2 = rearrange(out2, 'b n h mm d -> b n mm (h d)')
out2 = out2.permute(0, 2, 1, 3)
out = torch.cat([out1,out2],dim=-1).contiguous()
out = self.to_out(out)
out = rearrange(out, 'b (h w) (b0 b1) c -> b (h b0) (w b1) c', h=h // w_size[0], w=w // w_size[1],
b0=w_size[0])
return out
class HSAB(nn.Module):
def __init__(
self,
dim,
window_size=(8, 8),
dim_head=64,
heads=8,
num_blocks=2,
):
super().__init__()
self.blocks = nn.ModuleList([])
for _ in range(num_blocks):
self.blocks.append(nn.ModuleList([
PreNorm(dim, HS_MSA(dim=dim, window_size=window_size, dim_head=dim_head, heads=heads, only_local_branch=(heads==1))),
PreNorm(dim, FeedForward(dim=dim))
]))
def forward(self, x):
"""
x: [b,c,h,w]
return out: [b,c,h,w]
"""
x = x.permute(0, 2, 3, 1)
for (attn, ff) in self.blocks:
x = attn(x) + x
x = ff(x) + x
out = x.permute(0, 3, 1, 2)
return out
class FeedForward(nn.Module):
def __init__(self, dim, mult=4):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(dim, dim * mult, 1, 1, bias=False),
GELU(),
nn.Conv2d(dim * mult, dim * mult, 3, 1, 1, bias=False, groups=dim * mult),
GELU(),
nn.Conv2d(dim * mult, dim, 1, 1, bias=False),
)
def forward(self, x):
"""
x: [b,h,w,c]
return out: [b,h,w,c]
"""
out = self.net(x.permute(0, 3, 1, 2))
return out.permute(0, 2, 3, 1)
class HST(nn.Module):
def __init__(self, in_dim=28, out_dim=28, dim=28, num_blocks=[1,1,1]):
super(HST, self).__init__()
self.dim = dim
self.scales = len(num_blocks)
# Input projection
self.embedding = nn.Conv2d(in_dim, self.dim, 3, 1, 1, bias=False)
# Encoder
self.encoder_layers = nn.ModuleList([])
dim_scale = dim
for i in range(self.scales-1):
self.encoder_layers.append(nn.ModuleList([
HSAB(dim=dim_scale, num_blocks=num_blocks[i], dim_head=dim, heads=dim_scale // dim),
nn.Conv2d(dim_scale, dim_scale * 2, 4, 2, 1, bias=False),
]))
dim_scale *= 2
# Bottleneck
self.bottleneck = HSAB(dim=dim_scale, dim_head=dim, heads=dim_scale // dim, num_blocks=num_blocks[-1])
# Decoder
self.decoder_layers = nn.ModuleList([])
for i in range(self.scales-1):
self.decoder_layers.append(nn.ModuleList([
nn.ConvTranspose2d(dim_scale, dim_scale // 2, stride=2, kernel_size=2, padding=0, output_padding=0),
nn.Conv2d(dim_scale, dim_scale // 2, 1, 1, bias=False),
HSAB(dim=dim_scale // 2, num_blocks=num_blocks[self.scales - 2 - i], dim_head=dim,
heads=(dim_scale // 2) // dim),
]))
dim_scale //= 2
# Output projection
self.mapping = nn.Conv2d(self.dim, out_dim, 3, 1, 1, bias=False)
#### activation function
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x):
"""
x: [b,c,h,w]
return out:[b,c,h,w]
"""
b, c, h_inp, w_inp = x.shape
hb, wb = 16, 16
pad_h = (hb - h_inp % hb) % hb
pad_w = (wb - w_inp % wb) % wb
x = F.pad(x, [0, pad_w, 0, pad_h], mode='reflect')
# Embedding
fea = self.embedding(x)
x = x[:,:28,:,:]
# Encoder
fea_encoder = []
for (HSAB, FeaDownSample) in self.encoder_layers:
fea = HSAB(fea)
fea_encoder.append(fea)
fea = FeaDownSample(fea)
# Bottleneck
fea = self.bottleneck(fea)
# Decoder
for i, (FeaUpSample, Fution, HSAB) in enumerate(self.decoder_layers):
fea = FeaUpSample(fea)
fea = Fution(torch.cat([fea, fea_encoder[self.scales-2-i]], dim=1))
fea = HSAB(fea)
# Mapping
out = self.mapping(fea) + x
return out[:, :, :h_inp, :w_inp]
def A(x,Phi):
temp = x*Phi
y = torch.sum(temp,1)
return y
def At(y,Phi):
temp = torch.unsqueeze(y, 1).repeat(1,Phi.shape[1],1,1)
x = temp*Phi
return x
def shift_3d(inputs,step=2):
[bs, nC, row, col] = inputs.shape
for i in range(nC):
inputs[:,i,:,:] = torch.roll(inputs[:,i,:,:], shifts=step*i, dims=2)
return inputs
def shift_back_3d(inputs,step=2):
[bs, nC, row, col] = inputs.shape
for i in range(nC):
inputs[:,i,:,:] = torch.roll(inputs[:,i,:,:], shifts=(-1)*step*i, dims=2)
return inputs
class HyPaNet(nn.Module):
def __init__(self, in_nc=29, out_nc=8, channel=64):
super(HyPaNet, self).__init__()
self.fution = nn.Conv2d(in_nc, channel, 1, 1, 0, bias=True)
self.down_sample = nn.Conv2d(channel, channel, 3, 2, 1, bias=True)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.mlp = nn.Sequential(
nn.Conv2d(channel, channel, 1, padding=0, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(channel, channel, 1, padding=0, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(channel, out_nc, 1, padding=0, bias=True),
nn.Softplus())
self.relu = nn.ReLU(inplace=True)
self.out_nc = out_nc
def forward(self, x):
x = self.down_sample(self.relu(self.fution(x)))
x = self.avg_pool(x)
x = self.mlp(x) + 1e-6
return x[:,:self.out_nc//2,:,:], x[:,self.out_nc//2:,:,:]
class DAUHST(nn.Module):
def __init__(self, num_iterations=1):
super(DAUHST, self).__init__()
self.para_estimator = HyPaNet(in_nc=28, out_nc=num_iterations*2)
self.fution = nn.Conv2d(56, 28, 1, padding=0, bias=True)
self.num_iterations = num_iterations
self.denoisers = nn.ModuleList([])
for _ in range(num_iterations):
self.denoisers.append(
HST(in_dim=29, out_dim=28, dim=28, num_blocks=[1,1,1]),
)
def initial(self, y, Phi):
"""
:param y: [b,256,310]
:param Phi: [b,28,256,310]
:return: temp: [b,28,256,310]; alpha: [b, num_iterations]; beta: [b, num_iterations]
"""
nC, step = 28, 2
y = y / nC * 2
bs,row,col = y.shape
y_shift = torch.zeros(bs, nC, row, col).cuda().float()
for i in range(nC):
y_shift[:, i, :, step * i:step * i + col - (nC - 1) * step] = y[:, :, step * i:step * i + col - (nC - 1) * step]
z = self.fution(torch.cat([y_shift, Phi], dim=1))
alpha, beta = self.para_estimator(self.fution(torch.cat([y_shift, Phi], dim=1)))
return z, alpha, beta
def forward(self, y, input_mask=None):
"""
:param y: [b,256,310]
:param Phi: [b,28,256,310]
:param Phi_PhiT: [b,256,310]
:return: z_crop: [b,28,256,256]
"""
Phi, Phi_s = input_mask
z, alphas, betas = self.initial(y, Phi)
for i in range(self.num_iterations):
alpha, beta = alphas[:,i,:,:], betas[:,i:i+1,:,:]
Phi_z = A(z, Phi)
x = z + At(torch.div(y-Phi_z,alpha+Phi_s), Phi)
x = shift_back_3d(x)
beta_repeat = beta.repeat(1,1,x.shape[2], x.shape[3])
z = self.denoisers[i](torch.cat([x, beta_repeat],dim=1))
if i<self.num_iterations-1:
z = shift_3d(z)
return z[:, :, :, 0:256]
| 13,343 | 35.26087 | 133 | py |
MST | MST-main/simulation/train_code/architecture/CST.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from einops import rearrange
from torch import einsum
import math
import warnings
from torch.nn.init import _calculate_fan_in_and_fan_out
from collections import defaultdict, Counter
import numpy as np
from tqdm import tqdm
import random
def uniform(a, b, shape, device='cuda'):
return (b - a) * torch.rand(shape, device=device) + a
class AsymmetricTransform:
def Q(self, *args, **kwargs):
raise NotImplementedError('Query transform not implemented')
def K(self, *args, **kwargs):
raise NotImplementedError('Key transform not implemented')
class LSH:
def __call__(self, *args, **kwargs):
raise NotImplementedError('LSH scheme not implemented')
def compute_hash_agreement(self, q_hash, k_hash):
return (q_hash == k_hash).min(dim=-1)[0].sum(dim=-1)
class XBOXPLUS(AsymmetricTransform):
def set_norms(self, x):
self.x_norms = x.norm(p=2, dim=-1, keepdim=True)
self.MX = torch.amax(self.x_norms, dim=-2, keepdim=True)
def X(self, x):
device = x.device
ext = torch.sqrt((self.MX**2).to(device) - (self.x_norms**2).to(device))
zero = torch.tensor(0.0, device=x.device).repeat(x.shape[:-1], 1).unsqueeze(-1)
return torch.cat((x, ext, zero), -1)
def lsh_clustering(x, n_rounds, r=1):
salsh = SALSH(n_rounds=n_rounds, dim=x.shape[-1], r=r, device=x.device)
x_hashed = salsh(x).reshape((n_rounds,) + x.shape[:-1])
return x_hashed.argsort(dim=-1)
class SALSH(LSH):
def __init__(self, n_rounds, dim, r, device='cuda'):
super(SALSH, self).__init__()
self.alpha = torch.normal(0, 1, (dim, n_rounds), device=device)
self.beta = uniform(0, r, shape=(1, n_rounds), device=device)
self.dim = dim
self.r = r
def __call__(self, vecs):
projection = vecs @ self.alpha
projection_shift = projection + self.beta
projection_rescale = projection_shift / self.r
return projection_rescale.permute(2, 0, 1)
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
def norm_cdf(x):
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
tensor.uniform_(2 * l - 1, 2 * u - 1)
tensor.erfinv_()
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'):
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
if mode == 'fan_in':
denom = fan_in
elif mode == 'fan_out':
denom = fan_out
elif mode == 'fan_avg':
denom = (fan_in + fan_out) / 2
variance = scale / denom
if distribution == "truncated_normal":
trunc_normal_(tensor, std=math.sqrt(variance) / .87962566103423978)
elif distribution == "normal":
tensor.normal_(std=math.sqrt(variance))
elif distribution == "uniform":
bound = math.sqrt(3 * variance)
tensor.uniform_(-bound, bound)
else:
raise ValueError(f"invalid distribution {distribution}")
def lecun_normal_(tensor):
variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal')
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x, *args, **kwargs):
x = self.norm(x)
return self.fn(x, *args, **kwargs)
class GELU(nn.Module):
def forward(self, x):
return F.gelu(x)
def batch_scatter(output, src, dim, index):
"""
:param output: [b,n,c]
:param src: [b,n,c]
:param dim: int
:param index: [b,n]
:return: output: [b,n,c]
"""
b,k,c = src.shape
index = index[:, :, None].expand(-1, -1, c)
output, src, index = map(lambda t: rearrange(t, 'b k c -> (b c) k'), (output, src, index))
output.scatter_(dim,index,src)
output = rearrange(output, '(b c) k -> b k c', b=b)
return output
def batch_gather(x, index, dim):
"""
:param x: [b,n,c]
:param index: [b,n//2]
:param dim: int
:return: output: [b,n//2,c]
"""
b,n,c = x.shape
index = index[:,:,None].expand(-1,-1,c)
x, index = map(lambda t: rearrange(t, 'b n c -> (b c) n'), (x, index))
output = torch.gather(x,dim,index)
output = rearrange(output, '(b c) n -> b n c', b=b)
return output
class FeedForward(nn.Module):
def __init__(self, dim, mult=4):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(dim, dim * mult, 1, 1, bias=False),
GELU(),
nn.Conv2d(dim * mult, dim * mult, 3, 1, 1, bias=False, groups=dim * mult),
GELU(),
nn.Conv2d(dim * mult, dim, 1, 1, bias=False),
)
def forward(self, x):
"""
x: [b,h,w,c]
return out: [b,h,w,c]
"""
out = self.net(x.permute(0, 3, 1, 2))
return out.permute(0, 2, 3, 1)
class SAH_MSA(nn.Module):
def __init__(self, heads=4, n_rounds=2, channels=64, patch_size=144,
r=1):
super(SAH_MSA, self).__init__()
self.heads = heads
self.n_rounds = n_rounds
inner_dim = channels*3
self.to_q = nn.Linear(channels, inner_dim, bias=False)
self.to_k = nn.Linear(channels, inner_dim, bias=False)
self.to_v = nn.Linear(channels, inner_dim, bias=False)
self.to_out = nn.Linear(inner_dim, channels, bias=False)
self.xbox_plus = XBOXPLUS()
self.clustering_params = {
'r': r,
'n_rounds': self.n_rounds
}
self.q_attn_size = patch_size[0] * patch_size[1]
self.k_attn_size = patch_size[0] * patch_size[1]
def forward(self, input):
"""
:param input: [b,n,c]
:return: output: [b,n,c]
"""
B, N, C_inp = input.shape
query = self.to_q(input)
key = self.to_k(input)
value = self.to_v(input)
input_hash = input.view(B, N, self.heads, C_inp//self.heads)
x_hash = rearrange(input_hash, 'b t h e -> (b h) t e')
bs, x_seqlen, dim = x_hash.shape
with torch.no_grad():
self.xbox_plus.set_norms(x_hash)
Xs = self.xbox_plus.X(x_hash)
x_positions = lsh_clustering(Xs, **self.clustering_params)
x_positions = x_positions.reshape(self.n_rounds, bs, -1)
del Xs
C = query.shape[-1]
query = query.view(B, N, self.heads, C // self.heads)
key = key.view(B, N, self.heads, C // self.heads)
value = value.view(B, N, self.heads, C // self.heads)
query = rearrange(query, 'b t h e -> (b h) t e') # [bs, q_seqlen,c]
key = rearrange(key, 'b t h e -> (b h) t e')
value = rearrange(value, 'b s h d -> (b h) s d')
bs, q_seqlen, dim = query.shape
bs, k_seqlen, dim = key.shape
v_dim = value.shape[-1]
x_rev_positions = torch.argsort(x_positions, dim=-1)
x_offset = torch.arange(bs, device=query.device).unsqueeze(-1) * x_seqlen
x_flat = (x_positions + x_offset).reshape(-1)
s_queries = query.reshape(-1, dim).index_select(0, x_flat).reshape(-1, self.q_attn_size, dim)
s_keys = key.reshape(-1, dim).index_select(0, x_flat).reshape(-1, self.k_attn_size, dim)
s_values = value.reshape(-1, v_dim).index_select(0, x_flat).reshape(-1, self.k_attn_size, v_dim)
inner = s_queries @ s_keys.transpose(2, 1)
norm_factor = 1
inner = inner / norm_factor
# free memory
del x_positions
# softmax denominator
dots_logsumexp = torch.logsumexp(inner, dim=-1, keepdim=True)
# softmax
dots = torch.exp(inner - dots_logsumexp)
# dropout
# n_rounds outs
bo = (dots @ s_values).reshape(self.n_rounds, bs, q_seqlen, -1)
# undo sort
x_offset = torch.arange(bs * self.n_rounds, device=query.device).unsqueeze(-1) * x_seqlen
x_rev_flat = (x_rev_positions.reshape(-1, x_seqlen) + x_offset).reshape(-1)
o = bo.reshape(-1, v_dim).index_select(0, x_rev_flat).reshape(self.n_rounds, bs, q_seqlen, -1)
slogits = dots_logsumexp.reshape(self.n_rounds, bs, -1)
logits = torch.gather(slogits, 2, x_rev_positions)
# free memory
del x_rev_positions
# weighted sum multi-round attention
probs = torch.exp(logits - torch.logsumexp(logits, dim=0, keepdim=True))
out = torch.sum(o * probs.unsqueeze(-1), dim=0)
out = rearrange(out, '(b h) t d -> b t h d', h=self.heads)
out = out.reshape(B, N, -1)
out = self.to_out(out)
return out
class SAHAB(nn.Module):
def __init__(
self,
dim,
patch_size=(16, 16),
heads=8,
shift_size=0,
sparse=False
):
super().__init__()
self.blocks = nn.ModuleList([])
self.attn = PreNorm(dim, SAH_MSA(heads=heads, n_rounds=2, r=1, channels=dim, patch_size=patch_size))
self.ffn = PreNorm(dim, FeedForward(dim=dim))
self.shift_size = shift_size
self.patch_size = patch_size
self.sparse = sparse
def forward(self, x, mask=None):
"""
x: [b,h,w,c]
mask: [b,h,w]
return out: [b,h,w,c]
"""
b,h,w,c = x.shape
if self.shift_size > 0:
x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
mask = torch.roll(mask, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
w_size = self.patch_size
# Split into large patches
x = rearrange(x, 'b (nh hh) (nw ww) c-> b (nh nw) (hh ww c)', hh=w_size[0] * 2, ww=w_size[1] * 2)
mask = rearrange(mask, 'b (nh hh) (nw ww) -> b (nh nw) (hh ww)', hh=w_size[0] * 2, ww=w_size[1] * 2)
N = x.shape[1]
mask = torch.mean(mask,dim=2,keepdim=False) # [b,nh*nw]
if self.sparse:
mask_select = mask.topk(mask.shape[1] // 2, dim=1)[1] # [b,nh*nw//2]
x_select = batch_gather(x, mask_select, 1) # [b,nh*nw//2,hh*ww*c]
x_select = x_select.reshape(b*N//2,-1,c)
x_select = self.attn(x_select)+x_select
x_select = x_select.view(b,N//2,-1)
x = batch_scatter(x.clone(), x_select, 1, mask_select)
else:
x = x.view(b*N,-1,c)
x = self.attn(x) + x
x = x.view(b, N, -1)
x = rearrange(x, 'b (nh nw) (hh ww c) -> b (nh hh) (nw ww) c', nh=h//(w_size[0] * 2), hh=w_size[0] * 2, ww=w_size[1] * 2)
if self.shift_size > 0:
x = torch.roll(x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
x = self.ffn(x) + x
return x
class SAHABs(nn.Module):
def __init__(
self,
dim,
patch_size=(8, 8),
heads=8,
num_blocks=2,
sparse=False
):
super().__init__()
blocks = []
for _ in range(num_blocks):
blocks.append(
SAHAB(heads=heads, dim=dim, patch_size=patch_size,sparse=sparse,
shift_size=0 if (_ % 2 == 0) else patch_size[0]))
self.blocks = nn.Sequential(*blocks)
def forward(self, x, mask=None):
"""
x: [b,c,h,w]
mask: [b,1,h,w]
return x: [b,c,h,w]
"""
x = x.permute(0, 2, 3, 1)
mask = mask.squeeze(1)
for block in self.blocks:
x = block(x, mask)
x = x.permute(0, 3, 1, 2)
return x
class ASPPConv(nn.Sequential):
def __init__(self, in_channels, out_channels, dilation):
modules = [
nn.Conv2d(in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False),
nn.ReLU()
]
super(ASPPConv, self).__init__(*modules)
class ASPPPooling(nn.Sequential):
def __init__(self, in_channels, out_channels):
super(ASPPPooling, self).__init__(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels, out_channels, 1, bias=False),
nn.ReLU())
def forward(self, x):
size = x.shape[-2:]
for mod in self:
x = mod(x)
return F.interpolate(x, size=size, mode='bilinear', align_corners=False)
class ASPP(nn.Module):
def __init__(self, in_channels, atrous_rates, out_channels):
super(ASPP, self).__init__()
modules = []
rates = tuple(atrous_rates)
for rate in rates:
modules.append(ASPPConv(in_channels, out_channels, rate))
modules.append(ASPPPooling(in_channels, out_channels))
self.convs = nn.ModuleList(modules)
self.project = nn.Sequential(
nn.Conv2d(len(self.convs) * out_channels, out_channels, 1, bias=False),
nn.ReLU(),
nn.Dropout(0.5))
def forward(self, x):
res = []
for conv in self.convs:
res.append(conv(x))
res = torch.cat(res, dim=1)
return self.project(res)
class Sparsity_Estimator(nn.Module):
def __init__(self, dim=28, expand=2, sparse=False):
super(Sparsity_Estimator, self).__init__()
self.dim = dim
self.stage = 2
self.sparse = sparse
# Input projection
self.in_proj = nn.Conv2d(28, dim, 1, 1, 0, bias=False)
# Encoder
self.encoder_layers = nn.ModuleList([])
dim_stage = dim
for i in range(2):
self.encoder_layers.append(nn.ModuleList([
nn.Conv2d(dim_stage, dim_stage * expand, 1, 1, 0, bias=False),
nn.Conv2d(dim_stage * expand, dim_stage * expand, 3, 2, 1, bias=False, groups=dim_stage * expand),
nn.Conv2d(dim_stage * expand, dim_stage*expand, 1, 1, 0, bias=False),
]))
dim_stage *= 2
# Bottleneck
self.bottleneck = ASPP(dim_stage, [3,6], dim_stage)
# Decoder:
self.decoder_layers = nn.ModuleList([])
for i in range(2):
self.decoder_layers.append(nn.ModuleList([
nn.ConvTranspose2d(dim_stage, dim_stage // 2, stride=2, kernel_size=2, padding=0, output_padding=0),
nn.Conv2d(dim_stage // 2, dim_stage, 1, 1, 0, bias=False),
nn.Conv2d(dim_stage, dim_stage, 3, 1, 1, bias=False, groups=dim_stage),
nn.Conv2d(dim_stage, dim_stage // 2, 1, 1, 0, bias=False),
]))
dim_stage //= 2
# Output projection
if sparse:
self.out_conv2 = nn.Conv2d(self.dim, self.dim+1, 3, 1, 1, bias=False)
else:
self.out_conv2 = nn.Conv2d(self.dim, self.dim, 3, 1, 1, bias=False)
#### activation function
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def forward(self, x):
"""
x: [b,c,h,w]
return out:[b,c,h,w]
"""
# Input projection
fea = self.lrelu(self.in_proj(x))
# Encoder
fea_encoder = [] # [c 2c 4c 8c]
for (Conv1, Conv2, Conv3) in self.encoder_layers:
fea_encoder.append(fea)
fea = Conv3(self.lrelu(Conv2(self.lrelu(Conv1(fea)))))
# Bottleneck
fea = self.bottleneck(fea)+fea
# Decoder
for i, (FeaUpSample, Conv1, Conv2, Conv3) in enumerate(self.decoder_layers):
fea = FeaUpSample(fea)
fea = Conv3(self.lrelu(Conv2(self.lrelu(Conv1(fea)))))
fea = fea + fea_encoder[self.stage-1-i]
# Output projection
out = self.out_conv2(fea)
if self.sparse:
error_map = out[:,-1:,:,:]
return out[:,:-1], error_map
return out
class CST(nn.Module):
def __init__(self, dim=28, stage=2, num_blocks=[2, 2, 2], sparse=False):
super(CST, self).__init__()
self.dim = dim
self.stage = stage
self.sparse = sparse
# Fution physical mask and shifted measurement
self.fution = nn.Conv2d(56, 28, 1, 1, 0, bias=False)
# Sparsity Estimator
if num_blocks==[2,4,6]:
self.fe = nn.Sequential(Sparsity_Estimator(dim=28,expand=2,sparse=False),
Sparsity_Estimator(dim=28, expand=2, sparse=sparse))
else:
self.fe = Sparsity_Estimator(dim=28, expand=2, sparse=sparse)
# Encoder
self.encoder_layers = nn.ModuleList([])
dim_stage = dim
for i in range(stage):
self.encoder_layers.append(nn.ModuleList([
SAHABs(dim=dim_stage, num_blocks=num_blocks[i], heads=dim_stage // dim, sparse=sparse),
nn.Conv2d(dim_stage, dim_stage * 2, 4, 2, 1, bias=False),
nn.AvgPool2d(kernel_size=2, stride=2),
]))
dim_stage *= 2
# Bottleneck
self.bottleneck = SAHABs(
dim=dim_stage, heads=dim_stage // dim, num_blocks=num_blocks[-1], sparse=sparse)
# Decoder
self.decoder_layers = nn.ModuleList([])
for i in range(stage):
self.decoder_layers.append(nn.ModuleList([
nn.ConvTranspose2d(dim_stage, dim_stage // 2, stride=2, kernel_size=2, padding=0, output_padding=0),
SAHABs(dim=dim_stage // 2, num_blocks=num_blocks[stage - 1 - i],
heads=(dim_stage // 2) // dim, sparse=sparse),
]))
dim_stage //= 2
# Output projection
self.out_proj = nn.Conv2d(self.dim, dim, 3, 1, 1, bias=False)
#### activation function
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x, mask=None):
"""
x: [b,c,h,w]
mask: [b,c,h,w]
return out:[b,c,h,w]
"""
b, c, h, w = x.shape
# Fution
x = self.fution(torch.cat([x,mask],dim=1))
# Feature Extraction
if self.sparse:
fea,mask = self.fe(x)
else:
fea = self.fe(x)
mask = torch.randn((b,1,h,w)).cuda()
# Encoder
fea_encoder = []
masks = []
for (Blcok, FeaDownSample, MaskDownSample) in self.encoder_layers:
fea = Blcok(fea, mask)
masks.append(mask)
fea_encoder.append(fea)
fea = FeaDownSample(fea)
mask = MaskDownSample(mask)
# Bottleneck
fea = self.bottleneck(fea, mask)
# Decoder
for i, (FeaUpSample, Blcok) in enumerate(self.decoder_layers):
fea = FeaUpSample(fea)
fea = fea + fea_encoder[self.stage - 1 - i]
mask = masks[self.stage - 1 - i]
fea = Blcok(fea, mask)
# Output projection
out = self.out_proj(fea) + x
if self.sparse:
return out, mask
else:
return out
| 19,782 | 32.41723 | 129 | py |
MST | MST-main/simulation/train_code/architecture/MST.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from einops import rearrange
import math
import warnings
from torch.nn.init import _calculate_fan_in_and_fan_out
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
def norm_cdf(x):
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
tensor.uniform_(2 * l - 1, 2 * u - 1)
tensor.erfinv_()
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'):
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
if mode == 'fan_in':
denom = fan_in
elif mode == 'fan_out':
denom = fan_out
elif mode == 'fan_avg':
denom = (fan_in + fan_out) / 2
variance = scale / denom
if distribution == "truncated_normal":
trunc_normal_(tensor, std=math.sqrt(variance) / .87962566103423978)
elif distribution == "normal":
tensor.normal_(std=math.sqrt(variance))
elif distribution == "uniform":
bound = math.sqrt(3 * variance)
tensor.uniform_(-bound, bound)
else:
raise ValueError(f"invalid distribution {distribution}")
def lecun_normal_(tensor):
variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal')
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x, *args, **kwargs):
x = self.norm(x)
return self.fn(x, *args, **kwargs)
class GELU(nn.Module):
def forward(self, x):
return F.gelu(x)
def conv(in_channels, out_channels, kernel_size, bias=False, padding = 1, stride = 1):
return nn.Conv2d(
in_channels, out_channels, kernel_size,
padding=(kernel_size//2), bias=bias, stride=stride)
def shift_back(inputs,step=2): # input [bs,28,256,310] output [bs, 28, 256, 256]
[bs, nC, row, col] = inputs.shape
down_sample = 256//row
step = float(step)/float(down_sample*down_sample)
out_col = row
for i in range(nC):
inputs[:,i,:,:out_col] = \
inputs[:,i,:,int(step*i):int(step*i)+out_col]
return inputs[:, :, :, :out_col]
class MaskGuidedMechanism(nn.Module):
def __init__(
self, n_feat):
super(MaskGuidedMechanism, self).__init__()
self.conv1 = nn.Conv2d(n_feat, n_feat, kernel_size=1, bias=True)
self.conv2 = nn.Conv2d(n_feat, n_feat, kernel_size=1, bias=True)
self.depth_conv = nn.Conv2d(n_feat, n_feat, kernel_size=5, padding=2, bias=True, groups=n_feat)
def forward(self, mask_shift):
# x: b,c,h,w
[bs, nC, row, col] = mask_shift.shape
mask_shift = self.conv1(mask_shift)
attn_map = torch.sigmoid(self.depth_conv(self.conv2(mask_shift)))
res = mask_shift * attn_map
mask_shift = res + mask_shift
mask_emb = shift_back(mask_shift)
return mask_emb
class MS_MSA(nn.Module):
def __init__(
self,
dim,
dim_head=64,
heads=8,
):
super().__init__()
self.num_heads = heads
self.dim_head = dim_head
self.to_q = nn.Linear(dim, dim_head * heads, bias=False)
self.to_k = nn.Linear(dim, dim_head * heads, bias=False)
self.to_v = nn.Linear(dim, dim_head * heads, bias=False)
self.rescale = nn.Parameter(torch.ones(heads, 1, 1))
self.proj = nn.Linear(dim_head * heads, dim, bias=True)
self.pos_emb = nn.Sequential(
nn.Conv2d(dim, dim, 3, 1, 1, bias=False, groups=dim),
GELU(),
nn.Conv2d(dim, dim, 3, 1, 1, bias=False, groups=dim),
)
self.mm = MaskGuidedMechanism(dim)
self.dim = dim
def forward(self, x_in, mask=None):
"""
x_in: [b,h,w,c]
mask: [1,h,w,c]
return out: [b,h,w,c]
"""
b, h, w, c = x_in.shape
x = x_in.reshape(b,h*w,c)
q_inp = self.to_q(x)
k_inp = self.to_k(x)
v_inp = self.to_v(x)
mask_attn = self.mm(mask.permute(0,3,1,2)).permute(0,2,3,1)
if b != 0:
mask_attn = (mask_attn[0, :, :, :]).expand([b, h, w, c])
q, k, v, mask_attn = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=self.num_heads),
(q_inp, k_inp, v_inp, mask_attn.flatten(1, 2)))
v = v * mask_attn
# q: b,heads,hw,c
q = q.transpose(-2, -1)
k = k.transpose(-2, -1)
v = v.transpose(-2, -1)
q = F.normalize(q, dim=-1, p=2)
k = F.normalize(k, dim=-1, p=2)
attn = (k @ q.transpose(-2, -1)) # A = K^T*Q
attn = attn * self.rescale
attn = attn.softmax(dim=-1)
x = attn @ v # b,heads,d,hw
x = x.permute(0, 3, 1, 2) # Transpose
x = x.reshape(b, h * w, self.num_heads * self.dim_head)
out_c = self.proj(x).view(b, h, w, c)
out_p = self.pos_emb(v_inp.reshape(b,h,w,c).permute(0, 3, 1, 2)).permute(0, 2, 3, 1)
out = out_c + out_p
return out
class FeedForward(nn.Module):
def __init__(self, dim, mult=4):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(dim, dim * mult, 1, 1, bias=False),
GELU(),
nn.Conv2d(dim * mult, dim * mult, 3, 1, 1, bias=False, groups=dim * mult),
GELU(),
nn.Conv2d(dim * mult, dim, 1, 1, bias=False),
)
def forward(self, x):
"""
x: [b,h,w,c]
return out: [b,h,w,c]
"""
out = self.net(x.permute(0, 3, 1, 2))
return out.permute(0, 2, 3, 1)
class MSAB(nn.Module):
def __init__(
self,
dim,
dim_head=64,
heads=8,
num_blocks=2,
):
super().__init__()
self.blocks = nn.ModuleList([])
for _ in range(num_blocks):
self.blocks.append(nn.ModuleList([
MS_MSA(dim=dim, dim_head=dim_head, heads=heads),
PreNorm(dim, FeedForward(dim=dim))
]))
def forward(self, x, mask):
"""
x: [b,c,h,w]
return out: [b,c,h,w]
"""
x = x.permute(0, 2, 3, 1)
for (attn, ff) in self.blocks:
x = attn(x, mask=mask.permute(0, 2, 3, 1)) + x
x = ff(x) + x
out = x.permute(0, 3, 1, 2)
return out
class MST(nn.Module):
def __init__(self, dim=28, stage=3, num_blocks=[2,2,2]):
super(MST, self).__init__()
self.dim = dim
self.stage = stage
# Input projection
self.embedding = nn.Conv2d(28, self.dim, 3, 1, 1, bias=False)
# Encoder
self.encoder_layers = nn.ModuleList([])
dim_stage = dim
for i in range(stage):
self.encoder_layers.append(nn.ModuleList([
MSAB(
dim=dim_stage, num_blocks=num_blocks[i], dim_head=dim, heads=dim_stage // dim),
nn.Conv2d(dim_stage, dim_stage * 2, 4, 2, 1, bias=False),
nn.Conv2d(dim_stage, dim_stage * 2, 4, 2, 1, bias=False)
]))
dim_stage *= 2
# Bottleneck
self.bottleneck = MSAB(
dim=dim_stage, dim_head=dim, heads=dim_stage // dim, num_blocks=num_blocks[-1])
# Decoder
self.decoder_layers = nn.ModuleList([])
for i in range(stage):
self.decoder_layers.append(nn.ModuleList([
nn.ConvTranspose2d(dim_stage, dim_stage // 2, stride=2, kernel_size=2, padding=0, output_padding=0),
nn.Conv2d(dim_stage, dim_stage // 2, 1, 1, bias=False),
MSAB(
dim=dim_stage // 2, num_blocks=num_blocks[stage - 1 - i], dim_head=dim,
heads=(dim_stage // 2) // dim),
]))
dim_stage //= 2
# Output projection
self.mapping = nn.Conv2d(self.dim, 28, 3, 1, 1, bias=False)
#### activation function
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def forward(self, x, mask=None):
"""
x: [b,c,h,w]
return out:[b,c,h,w]
"""
if mask == None:
mask = torch.zeros((1,28,256,310)).cuda()
# Embedding
fea = self.lrelu(self.embedding(x))
# Encoder
fea_encoder = []
masks = []
for (MSAB, FeaDownSample, MaskDownSample) in self.encoder_layers:
fea = MSAB(fea, mask)
masks.append(mask)
fea_encoder.append(fea)
fea = FeaDownSample(fea)
mask = MaskDownSample(mask)
# Bottleneck
fea = self.bottleneck(fea, mask)
# Decoder
for i, (FeaUpSample, Fution, LeWinBlcok) in enumerate(self.decoder_layers):
fea = FeaUpSample(fea)
fea = Fution(torch.cat([fea, fea_encoder[self.stage-1-i]], dim=1))
mask = masks[self.stage - 1 - i]
fea = LeWinBlcok(fea, mask)
# Mapping
out = self.mapping(fea) + x
return out
| 9,703 | 30.102564 | 116 | py |
MST | MST-main/simulation/train_code/architecture/BIRNAT.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class self_attention(nn.Module):
def __init__(self, ch):
super(self_attention, self).__init__()
self.conv1 = nn.Conv2d(ch, ch // 8, 1)
self.conv2 = nn.Conv2d(ch, ch // 8, 1)
self.conv3 = nn.Conv2d(ch, ch, 1)
self.conv4 = nn.Conv2d(ch, ch, 1)
self.gamma1 = torch.nn.Parameter(torch.Tensor([0]))
self.ch = ch
def forward(self, x):
batch_size = x.shape[0]
f = self.conv1(x)
g = self.conv2(x)
h = self.conv3(x)
ht = h.reshape([batch_size, self.ch, -1])
ft = f.reshape([batch_size, self.ch // 8, -1])
n = torch.matmul(ft.permute([0, 2, 1]), g.reshape([batch_size, self.ch // 8, -1]))
beta = F.softmax(n, dim=1)
o = torch.matmul(ht, beta)
o = o.reshape(x.shape) # [bs, C, h, w]
o = self.conv4(o)
x = self.gamma1 * o + x
return x
class res_part(nn.Module):
def __init__(self, in_ch, out_ch):
super(res_part, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_ch, in_ch, 3, padding=1),
# nn.BatchNorm2d(out_ch),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_ch, out_ch, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_ch, in_ch, 3, padding=1),
)
self.conv2 = nn.Sequential(
nn.Conv2d(in_ch, in_ch, 3, padding=1),
# nn.BatchNorm2d(out_ch),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_ch, out_ch, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_ch, in_ch, 3, padding=1),
)
self.conv3 = nn.Sequential(
nn.Conv2d(in_ch, in_ch, 3, padding=1),
# nn.BatchNorm2d(out_ch),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_ch, out_ch, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_ch, in_ch, 3, padding=1),
)
def forward(self, x):
x1 = self.conv1(x)
x = x1 + x
x1 = self.conv2(x)
x = x1 + x
x1 = self.conv3(x)
x = x1 + x
return x
class down_feature(nn.Module):
def __init__(self, in_ch, out_ch):
super(down_feature, self).__init__()
self.conv = nn.Sequential(
# nn.Conv2d(in_ch, 20, 5, stride=1, padding=2),
# nn.Conv2d(20, 40, 5, stride=2, padding=2),
# nn.Conv2d(40, out_ch, 5, stride=2, padding=2),
nn.Conv2d(in_ch, 20, 5, stride=1, padding=2),
nn.Conv2d(20, 20, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(20, 20, 3, stride=1, padding=1),
nn.Conv2d(20, 40, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(40, out_ch, 3, stride=1, padding=1),
)
def forward(self, x):
x = self.conv(x)
return x
class up_feature(nn.Module):
def __init__(self, in_ch, out_ch):
super(up_feature, self).__init__()
self.conv = nn.Sequential(
# nn.ConvTranspose2d(in_ch, 40, 3, stride=2, padding=1, output_padding=1),
# nn.ConvTranspose2d(40, 20, 3, stride=2, padding=1, output_padding=1),
nn.Conv2d(in_ch, 40, 3, stride=1, padding=1),
nn.Conv2d(40, 30, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(30, 20, 3, stride=1, padding=1),
nn.Conv2d(20, 20, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(20, 20, 3, padding=1),
nn.Conv2d(20, out_ch, 1),
# nn.Sigmoid(),
)
def forward(self, x):
x = self.conv(x)
return x
class cnn1(nn.Module):
# 输入meas concat mask
# 3 下采样
def __init__(self, B):
super(cnn1, self).__init__()
self.conv1 = nn.Conv2d(B + 1, 32, kernel_size=5, stride=1, padding=2)
self.relu1 = nn.LeakyReLU(inplace=True)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1)
self.relu2 = nn.LeakyReLU(inplace=True)
self.conv3 = nn.Conv2d(64, 64, kernel_size=1, stride=1)
self.relu3 = nn.LeakyReLU(inplace=True)
self.conv4 = nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1)
self.relu4 = nn.LeakyReLU(inplace=True)
self.conv5 = nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, output_padding=1)
self.relu5 = nn.LeakyReLU(inplace=True)
self.conv51 = nn.Conv2d(64, 32, kernel_size=3, stride=1, padding=1)
self.relu51 = nn.LeakyReLU(inplace=True)
self.conv52 = nn.Conv2d(32, 16, kernel_size=1, stride=1)
self.relu52 = nn.LeakyReLU(inplace=True)
self.conv6 = nn.Conv2d(16, 1, kernel_size=3, stride=1, padding=1)
self.res_part1 = res_part(128, 128)
self.res_part2 = res_part(128, 128)
self.res_part3 = res_part(128, 128)
self.conv7 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
self.relu7 = nn.LeakyReLU(inplace=True)
self.conv8 = nn.Conv2d(128, 128, kernel_size=1, stride=1)
self.conv9 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
self.relu9 = nn.LeakyReLU(inplace=True)
self.conv10 = nn.Conv2d(128, 128, kernel_size=1, stride=1)
self.att1 = self_attention(128)
def forward(self, meas=None, nor_meas=None, PhiTy=None):
data = torch.cat([torch.unsqueeze(nor_meas, dim=1), PhiTy], dim=1)
out = self.conv1(data)
out = self.relu1(out)
out = self.conv2(out)
out = self.relu2(out)
out = self.conv3(out)
out = self.relu3(out)
out = self.conv4(out)
out = self.relu4(out)
out = self.res_part1(out)
out = self.conv7(out)
out = self.relu7(out)
out = self.conv8(out)
out = self.res_part2(out)
out = self.conv9(out)
out = self.relu9(out)
out = self.conv10(out)
out = self.res_part3(out)
# out = self.att1(out)
out = self.conv5(out)
out = self.relu5(out)
out = self.conv51(out)
out = self.relu51(out)
out = self.conv52(out)
out = self.relu52(out)
out = self.conv6(out)
return out
class forward_rnn(nn.Module):
def __init__(self):
super(forward_rnn, self).__init__()
self.extract_feature1 = down_feature(1, 20)
self.up_feature1 = up_feature(60, 1)
self.conv_x1 = nn.Sequential(
nn.Conv2d(1, 16, 5, stride=1, padding=2),
nn.Conv2d(16, 32, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(32, 40, 3, stride=2, padding=1),
nn.Conv2d(40, 20, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(20, 20, 3, stride=1, padding=1),
nn.LeakyReLU(inplace=True),
nn.ConvTranspose2d(20, 10, kernel_size=3, stride=2, padding=1, output_padding=1),
)
self.conv_x2 = nn.Sequential(
nn.Conv2d(1, 10, 5, stride=1, padding=2),
nn.Conv2d(10, 10, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(10, 40, 3, stride=2, padding=1),
nn.Conv2d(40, 20, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(20, 20, 3, stride=1, padding=1),
nn.LeakyReLU(inplace=True),
nn.ConvTranspose2d(20, 10, kernel_size=3, stride=2, padding=1, output_padding=1),
)
self.h_h = nn.Sequential(
nn.Conv2d(60, 30, 3, padding=1),
nn.Conv2d(30, 20, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(20, 20, 3, padding=1),
)
self.res_part1 = res_part(60, 60)
self.res_part2 = res_part(60, 60)
def forward(self, xt1, meas=None, nor_meas=None, PhiTy=None, mask3d_batch=None, h=None, cs_rate=28):
ht = h
xt = xt1
step = 2
[bs, nC, row, col] = xt1.shape
out = xt1
x11 = self.conv_x1(torch.unsqueeze(nor_meas, 1))
for i in range(cs_rate - 1):
d1 = torch.zeros(bs, row, col).cuda()
d2 = torch.zeros(bs, row, col).cuda()
for ii in range(i + 1):
d1 = d1 + torch.mul(mask3d_batch[:, ii, :, :], out[:, ii, :, :])
for ii in range(i + 2, cs_rate):
d2 = d2 + torch.mul(mask3d_batch[:, ii, :, :], torch.squeeze(nor_meas))
x12 = self.conv_x2(torch.unsqueeze(meas - d1 - d2, 1))
x2 = self.extract_feature1(xt)
h = torch.cat([ht, x11, x12, x2], dim=1)
h = self.res_part1(h)
h = self.res_part2(h)
ht = self.h_h(h)
xt = self.up_feature1(h)
out = torch.cat([out, xt], dim=1)
return out, ht
class backrnn(nn.Module):
def __init__(self):
super(backrnn, self).__init__()
self.extract_feature1 = down_feature(1, 20)
self.up_feature1 = up_feature(60, 1)
self.conv_x1 = nn.Sequential(
nn.Conv2d(1, 16, 5, stride=1, padding=2),
nn.Conv2d(16, 32, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(32, 40, 3, stride=2, padding=1),
nn.Conv2d(40, 20, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(20, 20, 3, stride=1, padding=1),
nn.LeakyReLU(inplace=True),
nn.ConvTranspose2d(20, 10, kernel_size=3, stride=2, padding=1, output_padding=1),
)
self.conv_x2 = nn.Sequential(
nn.Conv2d(1, 10, 5, stride=1, padding=2),
nn.Conv2d(10, 10, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(10, 40, 3, stride=2, padding=1),
nn.Conv2d(40, 20, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(20, 20, 3, stride=1, padding=1),
nn.LeakyReLU(inplace=True),
nn.ConvTranspose2d(20, 10, kernel_size=3, stride=2, padding=1, output_padding=1),
)
self.h_h = nn.Sequential(
nn.Conv2d(60, 30, 3, padding=1),
nn.Conv2d(30, 20, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(20, 20, 3, padding=1),
)
self.res_part1 = res_part(60, 60)
self.res_part2 = res_part(60, 60)
def forward(self, xt8, meas=None, nor_meas=None, PhiTy=None, mask3d_batch=None, h=None, cs_rate=28):
ht = h
step = 2
[bs, nC, row, col] = xt8.shape
xt = torch.unsqueeze(xt8[:, cs_rate - 1, :, :], 1)
out = torch.zeros(bs, cs_rate, row, col).cuda()
out[:, cs_rate - 1, :, :] = xt[:, 0, :, :]
x11 = self.conv_x1(torch.unsqueeze(nor_meas, 1))
for i in range(cs_rate - 1):
d1 = torch.zeros(bs, row, col).cuda()
d2 = torch.zeros(bs, row, col).cuda()
for ii in range(i + 1):
d1 = d1 + torch.mul(mask3d_batch[:, cs_rate - 1 - ii, :, :], out[:, cs_rate - 1 - ii, :, :].clone())
for ii in range(i + 2, cs_rate):
d2 = d2 + torch.mul(mask3d_batch[:, cs_rate - 1 - ii, :, :], xt8[:, cs_rate - 1 - ii, :, :].clone())
x12 = self.conv_x2(torch.unsqueeze(meas - d1 - d2, 1))
x2 = self.extract_feature1(xt)
h = torch.cat([ht, x11, x12, x2], dim=1)
h = self.res_part1(h)
h = self.res_part2(h)
ht = self.h_h(h)
xt = self.up_feature1(h)
out[:, cs_rate - 2 - i, :, :] = xt[:, 0, :, :]
return out
def shift_gt_back(inputs, step=2): # input [bs,256,310] output [bs, 28, 256, 256]
[bs, nC, row, col] = inputs.shape
output = torch.zeros(bs, nC, row, col - (nC - 1) * step).cuda().float()
for i in range(nC):
output[:, i, :, :] = inputs[:, i, :, step * i:step * i + col - (nC - 1) * step]
return output
def shift(inputs, step=2):
[bs, nC, row, col] = inputs.shape
if inputs.is_cuda:
output = torch.zeros(bs, nC, row, col + (nC - 1) * step).cuda().float()
else:
output = torch.zeros(bs, nC, row, col + (nC - 1) * step).float()
for i in range(nC):
output[:, i, :, step * i:step * i + col] = inputs[:, i, :, :]
return output
class BIRNAT(nn.Module):
def __init__(self):
super(BIRNAT, self).__init__()
self.cs_rate = 28
self.first_frame_net = cnn1(self.cs_rate).cuda()
self.rnn1 = forward_rnn().cuda()
self.rnn2 = backrnn().cuda()
def gen_meas_torch(self, meas, shift_mask):
batch_size, H = meas.shape[0:2]
mask_s = torch.sum(shift_mask, 1)
nor_meas = torch.div(meas, mask_s)
temp = torch.mul(torch.unsqueeze(nor_meas, dim=1).expand([batch_size, 28, H, shift_mask.shape[3]]), shift_mask)
return nor_meas, temp
def forward(self, meas, shift_mask=None):
if shift_mask==None:
shift_mask = torch.zeros(1, 28, 256, 310).cuda()
H, W = meas.shape[-2:]
nor_meas, PhiTy = self.gen_meas_torch(meas, shift_mask)
h0 = torch.zeros(meas.shape[0], 20, H, W).cuda()
xt1 = self.first_frame_net(meas, nor_meas, PhiTy)
model_out1, h1 = self.rnn1(xt1, meas, nor_meas, PhiTy, shift_mask, h0, self.cs_rate)
model_out2 = self.rnn2(model_out1, meas, nor_meas, PhiTy, shift_mask, h1, self.cs_rate)
model_out2 = shift_gt_back(model_out2)
return model_out2
| 13,326 | 35.412568 | 119 | py |
MST | MST-main/simulation/train_code/architecture/GAP_Net.py | import torch.nn.functional as F
import torch
import torch.nn as nn
def A(x,Phi):
temp = x*Phi
y = torch.sum(temp,1)
return y
def At(y,Phi):
temp = torch.unsqueeze(y, 1).repeat(1,Phi.shape[1],1,1)
x = temp*Phi
return x
def shift_3d(inputs,step=2):
[bs, nC, row, col] = inputs.shape
for i in range(nC):
inputs[:,i,:,:] = torch.roll(inputs[:,i,:,:], shifts=step*i, dims=2)
return inputs
def shift_back_3d(inputs,step=2):
[bs, nC, row, col] = inputs.shape
for i in range(nC):
inputs[:,i,:,:] = torch.roll(inputs[:,i,:,:], shifts=(-1)*step*i, dims=2)
return inputs
class double_conv(nn.Module):
def __init__(self, in_channels, out_channels):
super(double_conv, self).__init__()
self.d_conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3, padding=1),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.d_conv(x)
return x
class Unet(nn.Module):
def __init__(self, in_ch, out_ch):
super(Unet, self).__init__()
self.dconv_down1 = double_conv(in_ch, 32)
self.dconv_down2 = double_conv(32, 64)
self.dconv_down3 = double_conv(64, 128)
self.maxpool = nn.MaxPool2d(2)
self.upsample2 = nn.Sequential(
nn.ConvTranspose2d(128, 64, kernel_size=2, stride=2),
# nn.Conv2d(64, 64, (1,2), padding=(0,1)),
nn.ReLU(inplace=True)
)
self.upsample1 = nn.Sequential(
nn.ConvTranspose2d(64, 32, kernel_size=2, stride=2),
nn.ReLU(inplace=True)
)
self.dconv_up2 = double_conv(64 + 64, 64)
self.dconv_up1 = double_conv(32 + 32, 32)
self.conv_last = nn.Conv2d(32, out_ch, 1)
self.afn_last = nn.Tanh()
def forward(self, x):
b, c, h_inp, w_inp = x.shape
hb, wb = 8, 8
pad_h = (hb - h_inp % hb) % hb
pad_w = (wb - w_inp % wb) % wb
x = F.pad(x, [0, pad_w, 0, pad_h], mode='reflect')
inputs = x
conv1 = self.dconv_down1(x)
x = self.maxpool(conv1)
conv2 = self.dconv_down2(x)
x = self.maxpool(conv2)
conv3 = self.dconv_down3(x)
x = self.upsample2(conv3)
x = torch.cat([x, conv2], dim=1)
x = self.dconv_up2(x)
x = self.upsample1(x)
x = torch.cat([x, conv1], dim=1)
x = self.dconv_up1(x)
x = self.conv_last(x)
x = self.afn_last(x)
out = x + inputs
return out[:, :, :h_inp, :w_inp]
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels, mid_channels=None):
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class GAP_net(nn.Module):
def __init__(self):
super(GAP_net, self).__init__()
self.unet1 = Unet(28, 28)
self.unet2 = Unet(28, 28)
self.unet3 = Unet(28, 28)
self.unet4 = Unet(28, 28)
self.unet5 = Unet(28, 28)
self.unet6 = Unet(28, 28)
self.unet7 = Unet(28, 28)
self.unet8 = Unet(28, 28)
self.unet9 = Unet(28, 28)
def forward(self, y, input_mask=None):
if input_mask==None:
Phi = torch.rand((1,28,256,310)).cuda()
Phi_s = torch.rand((1, 256, 310)).cuda()
else:
Phi, Phi_s = input_mask
x_list = []
x = At(y, Phi) # v0=H^T y
### 1-3
yb = A(x, Phi)
x = x + At(torch.div(y - yb, Phi_s), Phi)
x = shift_back_3d(x)
x = self.unet1(x)
x = shift_3d(x)
yb = A(x, Phi)
x = x + At(torch.div(y - yb, Phi_s), Phi)
x = shift_back_3d(x)
x = self.unet2(x)
x = shift_3d(x)
yb = A(x, Phi)
x = x + At(torch.div(y - yb, Phi_s), Phi)
x = shift_back_3d(x)
x = self.unet3(x)
x = shift_3d(x)
### 4-6
yb = A(x, Phi)
x = x + At(torch.div(y - yb, Phi_s), Phi)
x = shift_back_3d(x)
x = self.unet4(x)
x = shift_3d(x)
yb = A(x, Phi)
x = x + At(torch.div(y - yb, Phi_s), Phi)
x = shift_back_3d(x)
x = self.unet5(x)
x = shift_3d(x)
yb = A(x, Phi)
x = x + At(torch.div(y - yb, Phi_s), Phi)
x = shift_back_3d(x)
x = self.unet6(x)
x = shift_3d(x)
# ### 7-9
yb = A(x, Phi)
x = x + At(torch.div(y - yb, Phi_s), Phi)
x = shift_back_3d(x)
x = self.unet7(x)
x = shift_3d(x)
x_list.append(x[:, :, :, 0:256])
yb = A(x, Phi)
x = x + At(torch.div(y - yb, Phi_s), Phi)
x = shift_back_3d(x)
x = self.unet8(x)
x = shift_3d(x)
x_list.append(x[:, :, :, 0:256])
yb = A(x, Phi)
x = x + At(torch.div(y - yb, Phi_s), Phi)
x = shift_back_3d(x)
x = self.unet9(x)
x = shift_3d(x)
return x[:, :, :, 0:256]
| 5,525 | 28.084211 | 81 | py |
MST | MST-main/simulation/train_code/architecture/Lambda_Net.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from einops import rearrange
from torch import einsum
class LambdaNetAttention(nn.Module):
def __init__(
self,
dim,
):
super().__init__()
self.dim = dim
self.to_q = nn.Linear(dim, dim//8, bias=False)
self.to_k = nn.Linear(dim, dim//8, bias=False)
self.to_v = nn.Linear(dim, dim, bias=False)
self.rescale = (dim//8)**-0.5
self.gamma = nn.Parameter(torch.ones(1))
def forward(self, x):
"""
x: [b,c,h,w]
return out: [b,c,h,w]
"""
x = x.permute(0,2,3,1)
b, h, w, c = x.shape
# Reshape to (B,N,C), where N = window_size[0]*window_size[1] is the length of sentence
x_inp = rearrange(x, 'b h w c -> b (h w) c')
# produce query, key and value
q = self.to_q(x_inp)
k = self.to_k(x_inp)
v = self.to_v(x_inp)
# attention
sim = einsum('b i d, b j d -> b i j', q, k)*self.rescale
attn = sim.softmax(dim=-1)
# aggregate
out = einsum('b i j, b j d -> b i d', attn, v)
# merge blocks back to original feature map
out = rearrange(out, 'b (h w) c -> b h w c', h=h, w=w)
out = self.gamma*out + x
return out.permute(0,3,1,2)
class triple_conv(nn.Module):
def __init__(self, in_channels, out_channels):
super(triple_conv, self).__init__()
self.t_conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.ReLU(),
nn.Conv2d(out_channels, out_channels, 3, padding=1),
nn.ReLU(),
nn.Conv2d(out_channels, out_channels, 3, padding=1),
)
def forward(self, x):
x = self.t_conv(x)
return x
class double_conv(nn.Module):
def __init__(self, in_channels, out_channels):
super(double_conv, self).__init__()
self.d_conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.ReLU(),
nn.Conv2d(out_channels, out_channels, 3, padding=1),
)
def forward(self, x):
x = self.d_conv(x)
return x
def shift_back_3d(inputs,step=2):
[bs, nC, row, col] = inputs.shape
for i in range(nC):
inputs[:,i,:,:] = torch.roll(inputs[:,i,:,:], shifts=(-1)*step*i, dims=2)
return inputs
class Lambda_Net(nn.Module):
def __init__(self, out_ch=28):
super(Lambda_Net, self).__init__()
self.conv_in = nn.Conv2d(1+28, 28, 3, padding=1)
# encoder
self.conv_down1 = triple_conv(28, 32)
self.conv_down2 = triple_conv(32, 64)
self.conv_down3 = triple_conv(64, 128)
self.conv_down4 = triple_conv(128, 256)
self.conv_down5 = double_conv(256, 512)
self.conv_down6 = double_conv(512, 1024)
self.maxpool = nn.MaxPool2d(2)
# decoder
self.upsample5 = nn.ConvTranspose2d(1024, 512, kernel_size=2, stride=2)
self.upsample4 = nn.ConvTranspose2d(512, 256, kernel_size=2, stride=2)
self.upsample3 = nn.ConvTranspose2d(256, 128, kernel_size=2, stride=2)
self.upsample2 = nn.ConvTranspose2d(128, 64, kernel_size=2, stride=2)
self.upsample1 = nn.ConvTranspose2d(64, 32, kernel_size=2, stride=2)
self.conv_up1 = triple_conv(32+32, 32)
self.conv_up2 = triple_conv(64+64, 64)
self.conv_up3 = triple_conv(128+128, 128)
self.conv_up4 = triple_conv(256+256, 256)
self.conv_up5 = double_conv(512+512, 512)
# attention
self.attention = LambdaNetAttention(dim=128)
self.conv_last1 = nn.Conv2d(32, 6, 3,1,1)
self.conv_last2 = nn.Conv2d(38, 32, 3,1,1)
self.conv_last3 = nn.Conv2d(32, 12, 3,1,1)
self.conv_last4 = nn.Conv2d(44, 32, 3,1,1)
self.conv_last5 = nn.Conv2d(32, out_ch, 1)
self.act = nn.ReLU()
def forward(self, x, input_mask=None):
if input_mask == None:
input_mask = torch.zeros((1,28,256,310)).cuda()
x = x/28*2
x = self.conv_in(torch.cat([x.unsqueeze(1), input_mask], dim=1))
b, c, h_inp, w_inp = x.shape
hb, wb = 32, 32
pad_h = (hb - h_inp % hb) % hb
pad_w = (wb - w_inp % wb) % wb
x = F.pad(x, [0, pad_w, 0, pad_h], mode='reflect')
res0 = x
conv1 = self.conv_down1(x)
x = self.maxpool(conv1)
conv2 = self.conv_down2(x)
x = self.maxpool(conv2)
conv3 = self.conv_down3(x)
x = self.maxpool(conv3)
conv4 = self.conv_down4(x)
x = self.maxpool(conv4)
conv5 = self.conv_down5(x)
x = self.maxpool(conv5)
conv6 = self.conv_down6(x)
x = self.upsample5(conv6)
x = torch.cat([x, conv5], dim=1)
x = self.conv_up5(x)
x = self.upsample4(x)
x = torch.cat([x, conv4], dim=1)
x = self.conv_up4(x)
x = self.upsample3(x)
x = torch.cat([x, conv3], dim=1)
x = self.conv_up3(x)
x = self.attention(x)
x = self.upsample2(x)
x = torch.cat([x, conv2], dim=1)
x = self.conv_up2(x)
x = self.upsample1(x)
x = torch.cat([x, conv1], dim=1)
x = self.conv_up1(x)
res1 = x
out1 = self.act(self.conv_last1(x))
x = self.conv_last2(torch.cat([res1,out1],dim=1))
res2 = x
out2 = self.act(self.conv_last3(x))
out3 = self.conv_last4(torch.cat([res2, out2], dim=1))
out = self.conv_last5(out3)+res0
out = out[:, :, :h_inp, :w_inp]
return shift_back_3d(out)[:, :, :, :256]
| 5,679 | 30.555556 | 95 | py |
MST | MST-main/simulation/train_code/architecture/ADMM_Net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
def A(x,Phi):
temp = x*Phi
y = torch.sum(temp,1)
return y
def At(y,Phi):
temp = torch.unsqueeze(y, 1).repeat(1,Phi.shape[1],1,1)
x = temp*Phi
return x
class double_conv(nn.Module):
def __init__(self, in_channels, out_channels):
super(double_conv, self).__init__()
self.d_conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3, padding=1),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.d_conv(x)
return x
class Unet(nn.Module):
def __init__(self, in_ch, out_ch):
super(Unet, self).__init__()
self.dconv_down1 = double_conv(in_ch, 32)
self.dconv_down2 = double_conv(32, 64)
self.dconv_down3 = double_conv(64, 128)
self.maxpool = nn.MaxPool2d(2)
self.upsample2 = nn.Sequential(
nn.ConvTranspose2d(128, 64, kernel_size=2, stride=2),
nn.ReLU(inplace=True)
)
self.upsample1 = nn.Sequential(
nn.ConvTranspose2d(64, 32, kernel_size=2, stride=2),
nn.ReLU(inplace=True)
)
self.dconv_up2 = double_conv(64 + 64, 64)
self.dconv_up1 = double_conv(32 + 32, 32)
self.conv_last = nn.Conv2d(32, out_ch, 1)
self.afn_last = nn.Tanh()
def forward(self, x):
b, c, h_inp, w_inp = x.shape
hb, wb = 8, 8
pad_h = (hb - h_inp % hb) % hb
pad_w = (wb - w_inp % wb) % wb
x = F.pad(x, [0, pad_w, 0, pad_h], mode='reflect')
inputs = x
conv1 = self.dconv_down1(x)
x = self.maxpool(conv1)
conv2 = self.dconv_down2(x)
x = self.maxpool(conv2)
conv3 = self.dconv_down3(x)
x = self.upsample2(conv3)
x = torch.cat([x, conv2], dim=1)
x = self.dconv_up2(x)
x = self.upsample1(x)
x = torch.cat([x, conv1], dim=1)
x = self.dconv_up1(x)
x = self.conv_last(x)
x = self.afn_last(x)
out = x + inputs
return out[:, :, :h_inp, :w_inp]
def shift_3d(inputs,step=2):
[bs, nC, row, col] = inputs.shape
for i in range(nC):
inputs[:,i,:,:] = torch.roll(inputs[:,i,:,:], shifts=step*i, dims=2)
return inputs
def shift_back_3d(inputs,step=2):
[bs, nC, row, col] = inputs.shape
for i in range(nC):
inputs[:,i,:,:] = torch.roll(inputs[:,i,:,:], shifts=(-1)*step*i, dims=2)
return inputs
class ADMM_net(nn.Module):
def __init__(self):
super(ADMM_net, self).__init__()
self.unet1 = Unet(28, 28)
self.unet2 = Unet(28, 28)
self.unet3 = Unet(28, 28)
self.unet4 = Unet(28, 28)
self.unet5 = Unet(28, 28)
self.unet6 = Unet(28, 28)
self.unet7 = Unet(28, 28)
self.unet8 = Unet(28, 28)
self.unet9 = Unet(28, 28)
self.gamma1 = torch.nn.Parameter(torch.Tensor([0]))
self.gamma2 = torch.nn.Parameter(torch.Tensor([0]))
self.gamma3 = torch.nn.Parameter(torch.Tensor([0]))
self.gamma4 = torch.nn.Parameter(torch.Tensor([0]))
self.gamma5 = torch.nn.Parameter(torch.Tensor([0]))
self.gamma6 = torch.nn.Parameter(torch.Tensor([0]))
self.gamma7 = torch.nn.Parameter(torch.Tensor([0]))
self.gamma8 = torch.nn.Parameter(torch.Tensor([0]))
self.gamma9 = torch.nn.Parameter(torch.Tensor([0]))
def forward(self, y, input_mask=None):
if input_mask == None:
Phi = torch.rand((1, 28, 256, 310)).cuda()
Phi_s = torch.rand((1, 256, 310)).cuda()
else:
Phi, Phi_s = input_mask
x_list = []
theta = At(y,Phi)
b = torch.zeros_like(Phi)
### 1-3
yb = A(theta+b,Phi)
x = theta+b + At(torch.div(y-yb,Phi_s+self.gamma1),Phi)
x1 = x-b
x1 = shift_back_3d(x1)
theta = self.unet1(x1)
theta = shift_3d(theta)
b = b- (x-theta)
x_list.append(theta)
yb = A(theta+b,Phi)
x = theta+b + At(torch.div(y-yb,Phi_s+self.gamma2),Phi)
x1 = x-b
x1 = shift_back_3d(x1)
theta = self.unet2(x1)
theta = shift_3d(theta)
b = b- (x-theta)
x_list.append(theta)
yb = A(theta+b,Phi)
x = theta+b + At(torch.div(y-yb,Phi_s+self.gamma3),Phi)
x1 = x-b
x1 = shift_back_3d(x1)
theta = self.unet3(x1)
theta = shift_3d(theta)
b = b- (x-theta)
x_list.append(theta)
### 4-6
yb = A(theta+b,Phi)
x = theta+b + At(torch.div(y-yb,Phi_s+self.gamma4),Phi)
x1 = x-b
x1 = shift_back_3d(x1)
theta = self.unet4(x1)
theta = shift_3d(theta)
b = b- (x-theta)
x_list.append(theta)
yb = A(theta+b,Phi)
x = theta+b + At(torch.div(y-yb,Phi_s+self.gamma5),Phi)
x1 = x-b
x1 = shift_back_3d(x1)
theta = self.unet5(x1)
theta = shift_3d(theta)
b = b- (x-theta)
x_list.append(theta)
yb = A(theta+b,Phi)
x = theta+b + At(torch.div(y-yb,Phi_s+self.gamma6),Phi)
x1 = x-b
x1 = shift_back_3d(x1)
theta = self.unet6(x1)
theta = shift_3d(theta)
b = b- (x-theta)
x_list.append(theta)
### 7-9
yb = A(theta+b,Phi)
x = theta+b + At(torch.div(y-yb,Phi_s+self.gamma7),Phi)
x1 = x-b
x1 = shift_back_3d(x1)
theta = self.unet7(x1)
theta = shift_3d(theta)
b = b- (x-theta)
x_list.append(theta)
yb = A(theta+b,Phi)
x = theta+b + At(torch.div(y-yb,Phi_s+self.gamma8),Phi)
x1 = x-b
x1 = shift_back_3d(x1)
theta = self.unet8(x1)
theta = shift_3d(theta)
b = b- (x-theta)
x_list.append(theta)
yb = A(theta+b,Phi)
x = theta+b + At(torch.div(y-yb,Phi_s+self.gamma9),Phi)
x1 = x-b
x1 = shift_back_3d(x1)
theta = self.unet9(x1)
theta = shift_3d(theta)
return theta[:, :, :, 0:256]
| 6,191 | 29.653465 | 81 | py |
MST | MST-main/simulation/train_code/architecture/TSA_Net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
_NORM_BONE = False
def conv_block(in_planes, out_planes, the_kernel=3, the_stride=1, the_padding=1, flag_norm=False, flag_norm_act=True):
conv = nn.Conv2d(in_planes, out_planes, kernel_size=the_kernel, stride=the_stride, padding=the_padding)
activation = nn.ReLU(inplace=True)
norm = nn.BatchNorm2d(out_planes)
if flag_norm:
return nn.Sequential(conv,norm,activation) if flag_norm_act else nn.Sequential(conv,activation,norm)
else:
return nn.Sequential(conv,activation)
def conv1x1_block(in_planes, out_planes, flag_norm=False):
conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0,bias=False)
norm = nn.BatchNorm2d(out_planes)
return nn.Sequential(conv,norm) if flag_norm else conv
def fully_block(in_dim, out_dim, flag_norm=False, flag_norm_act=True):
fc = nn.Linear(in_dim, out_dim)
activation = nn.ReLU(inplace=True)
norm = nn.BatchNorm2d(out_dim)
if flag_norm:
return nn.Sequential(fc,norm,activation) if flag_norm_act else nn.Sequential(fc,activation,norm)
else:
return nn.Sequential(fc,activation)
class Res2Net(nn.Module):
def __init__(self, inChannel, uPlane, scale=4):
super(Res2Net, self).__init__()
self.uPlane = uPlane
self.scale = scale
self.conv_init = nn.Conv2d(inChannel, uPlane * scale, kernel_size=1, bias=False)
self.bn_init = nn.BatchNorm2d(uPlane * scale)
convs = []
bns = []
for i in range(self.scale - 1):
convs.append(nn.Conv2d(self.uPlane, self.uPlane, kernel_size=3, stride=1, padding=1, bias=False))
bns.append(nn.BatchNorm2d(self.uPlane))
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
self.conv_end = nn.Conv2d(uPlane * scale, inChannel, kernel_size=1, bias=False)
self.bn_end = nn.BatchNorm2d(inChannel)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
out = self.conv_init(x)
out = self.bn_init(out)
out = self.relu(out)
spx = torch.split(out, self.uPlane, 1)
for i in range(self.scale - 1):
if i == 0:
sp = spx[i]
else:
sp = sp + spx[i]
sp = self.convs[i](sp)
sp = self.relu(self.bns[i](sp))
if i == 0:
out = sp
else:
out = torch.cat((out, sp), 1)
out = torch.cat((out, spx[self.scale - 1]), 1)
out = self.conv_end(out)
out = self.bn_end(out)
return out
_NORM_ATTN = True
_NORM_FC = False
class TSA_Transform(nn.Module):
""" Spectral-Spatial Self-Attention """
def __init__(self, uSpace, inChannel, outChannel, nHead, uAttn, mode=[0, 1], flag_mask=False, gamma_learn=False):
super(TSA_Transform, self).__init__()
''' ------------------------------------------
uSpace:
uHeight: the [-2] dim of the 3D tensor
uWidth: the [-1] dim of the 3D tensor
inChannel:
the number of Channel of the input tensor
outChannel:
the number of Channel of the output tensor
nHead:
the number of Head of the input tensor
uAttn:
uSpatial: the dim of the spatial features
uSpectral: the dim of the spectral features
mask:
The Spectral Smoothness Mask
{mode} and {gamma_learn} is just for variable selection
------------------------------------------ '''
self.nHead = nHead
self.uAttn = uAttn
self.outChannel = outChannel
self.uSpatial = nn.Parameter(torch.tensor(float(uAttn[0])), requires_grad=False)
self.uSpectral = nn.Parameter(torch.tensor(float(uAttn[1])), requires_grad=False)
self.mask = nn.Parameter(Spectral_Mask(outChannel), requires_grad=False) if flag_mask else None
self.attn_scale = nn.Parameter(torch.tensor(1.1), requires_grad=False) if flag_mask else None
self.gamma = nn.Parameter(torch.tensor(1.0), requires_grad=gamma_learn)
if sum(mode) > 0:
down_sample = []
scale = 1
cur_channel = outChannel
for i in range(sum(mode)):
scale *= 2
down_sample.append(conv_block(cur_channel, 2 * cur_channel, 3, 2, 1, _NORM_ATTN))
cur_channel = 2 * cur_channel
self.cur_channel = cur_channel
self.down_sample = nn.Sequential(*down_sample)
self.up_sample = nn.ConvTranspose2d(outChannel * scale, outChannel, scale, scale)
else:
self.down_sample = None
self.up_sample = None
spec_dim = int(uSpace[0] / 4 - 3) * int(uSpace[1] / 4 - 3)
self.preproc = conv1x1_block(inChannel, outChannel, _NORM_ATTN)
self.query_x = Feature_Spatial(outChannel, nHead, int(uSpace[1] / 4), uAttn[0], mode)
self.query_y = Feature_Spatial(outChannel, nHead, int(uSpace[0] / 4), uAttn[0], mode)
self.query_lambda = Feature_Spectral(outChannel, nHead, spec_dim, uAttn[1])
self.key_x = Feature_Spatial(outChannel, nHead, int(uSpace[1] / 4), uAttn[0], mode)
self.key_y = Feature_Spatial(outChannel, nHead, int(uSpace[0] / 4), uAttn[0], mode)
self.key_lambda = Feature_Spectral(outChannel, nHead, spec_dim, uAttn[1])
self.value = conv1x1_block(outChannel, nHead * outChannel, _NORM_ATTN)
self.aggregation = nn.Linear(nHead * outChannel, outChannel)
def forward(self, image):
feat = self.preproc(image)
feat_qx = self.query_x(feat, 'X')
feat_qy = self.query_y(feat, 'Y')
feat_qlambda = self.query_lambda(feat)
feat_kx = self.key_x(feat, 'X')
feat_ky = self.key_y(feat, 'Y')
feat_klambda = self.key_lambda(feat)
feat_value = self.value(feat)
feat_qx = torch.cat(torch.split(feat_qx, 1, dim=1)).squeeze(dim=1)
feat_qy = torch.cat(torch.split(feat_qy, 1, dim=1)).squeeze(dim=1)
feat_kx = torch.cat(torch.split(feat_kx, 1, dim=1)).squeeze(dim=1)
feat_ky = torch.cat(torch.split(feat_ky, 1, dim=1)).squeeze(dim=1)
feat_qlambda = torch.cat(torch.split(feat_qlambda, self.uAttn[1], dim=-1))
feat_klambda = torch.cat(torch.split(feat_klambda, self.uAttn[1], dim=-1))
feat_value = torch.cat(torch.split(feat_value, self.outChannel, dim=1))
energy_x = torch.bmm(feat_qx, feat_kx.permute(0, 2, 1)) / torch.sqrt(self.uSpatial)
energy_y = torch.bmm(feat_qy, feat_ky.permute(0, 2, 1)) / torch.sqrt(self.uSpatial)
energy_lambda = torch.bmm(feat_qlambda, feat_klambda.permute(0, 2, 1)) / torch.sqrt(self.uSpectral)
attn_x = F.softmax(energy_x, dim=-1)
attn_y = F.softmax(energy_y, dim=-1)
attn_lambda = F.softmax(energy_lambda, dim=-1)
if self.mask is not None:
attn_lambda = (attn_lambda + self.mask) / torch.sqrt(self.attn_scale)
pro_feat = feat_value if self.down_sample is None else self.down_sample(feat_value)
batchhead, dim_c, dim_x, dim_y = pro_feat.size()
attn_x_repeat = attn_x.unsqueeze(dim=1).repeat(1, dim_c, 1, 1).view(-1, dim_x, dim_x)
attn_y_repeat = attn_y.unsqueeze(dim=1).repeat(1, dim_c, 1, 1).view(-1, dim_y, dim_y)
pro_feat = pro_feat.view(-1, dim_x, dim_y)
pro_feat = torch.bmm(pro_feat, attn_y_repeat.permute(0, 2, 1))
pro_feat = torch.bmm(pro_feat.permute(0, 2, 1), attn_x_repeat.permute(0, 2, 1)).permute(0, 2, 1)
pro_feat = pro_feat.view(batchhead, dim_c, dim_x, dim_y)
if self.up_sample is not None:
pro_feat = self.up_sample(pro_feat)
_, _, dim_x, dim_y = pro_feat.size()
pro_feat = pro_feat.contiguous().view(batchhead, self.outChannel, -1).permute(0, 2, 1)
pro_feat = torch.bmm(pro_feat, attn_lambda.permute(0, 2, 1)).permute(0, 2, 1)
pro_feat = pro_feat.view(batchhead, self.outChannel, dim_x, dim_y)
pro_feat = torch.cat(torch.split(pro_feat, int(batchhead / self.nHead), dim=0), dim=1).permute(0, 2, 3, 1)
pro_feat = self.aggregation(pro_feat).permute(0, 3, 1, 2)
out = self.gamma * pro_feat + feat
return out, (attn_x, attn_y, attn_lambda)
class Feature_Spatial(nn.Module):
""" Spatial Feature Generation Component """
def __init__(self, inChannel, nHead, shiftDim, outDim, mode):
super(Feature_Spatial, self).__init__()
kernel = [(1, 5), (3, 5)]
stride = [(1, 2), (2, 2)]
padding = [(0, 2), (1, 2)]
self.conv1 = conv_block(inChannel, nHead, kernel[mode[0]], stride[mode[0]], padding[mode[0]], _NORM_ATTN)
self.conv2 = conv_block(nHead, nHead, kernel[mode[1]], stride[mode[1]], padding[mode[1]], _NORM_ATTN)
self.fully = fully_block(shiftDim, outDim, _NORM_FC)
def forward(self, image, direction):
if direction == 'Y':
image = image.permute(0, 1, 3, 2)
feat = self.conv1(image)
feat = self.conv2(feat)
feat = self.fully(feat)
return feat
class Feature_Spectral(nn.Module):
""" Spectral Feature Generation Component """
def __init__(self, inChannel, nHead, viewDim, outDim):
super(Feature_Spectral, self).__init__()
self.inChannel = inChannel
self.conv1 = conv_block(inChannel, inChannel, 5, 2, 0, _NORM_ATTN)
self.conv2 = conv_block(inChannel, inChannel, 5, 2, 0, _NORM_ATTN)
self.fully = fully_block(viewDim, int(nHead * outDim), _NORM_FC)
def forward(self, image):
bs = image.size(0)
feat = self.conv1(image)
feat = self.conv2(feat)
feat = feat.view(bs, self.inChannel, -1)
feat = self.fully(feat)
return feat
def Spectral_Mask(dim_lambda):
'''After put the available data into the model, we use this mask to avoid outputting the estimation of itself.'''
orig = (np.cos(np.linspace(-1, 1, num=2 * dim_lambda - 1) * np.pi) + 1.0) / 2.0
att = np.zeros((dim_lambda, dim_lambda))
for i in range(dim_lambda):
att[i, :] = orig[dim_lambda - 1 - i:2 * dim_lambda - 1 - i]
AM_Mask = torch.from_numpy(att.astype(np.float32)).unsqueeze(0)
return AM_Mask
class TSA_Net(nn.Module):
def __init__(self, in_ch=28, out_ch=28):
super(TSA_Net, self).__init__()
self.tconv_down1 = Encoder_Triblock(in_ch, 64, False)
self.tconv_down2 = Encoder_Triblock(64, 128, False)
self.tconv_down3 = Encoder_Triblock(128, 256)
self.tconv_down4 = Encoder_Triblock(256, 512)
self.bottom1 = conv_block(512, 1024)
self.bottom2 = conv_block(1024, 1024)
self.tconv_up4 = Decoder_Triblock(1024, 512)
self.tconv_up3 = Decoder_Triblock(512, 256)
self.transform3 = TSA_Transform((64, 64), 256, 256, 8, (64, 80), [0, 0])
self.tconv_up2 = Decoder_Triblock(256, 128)
self.transform2 = TSA_Transform((128, 128), 128, 128, 8, (64, 40), [1, 0])
self.tconv_up1 = Decoder_Triblock(128, 64)
self.transform1 = TSA_Transform((256, 256), 64, 28, 8, (48, 30), [1, 1], True)
self.conv_last = nn.Conv2d(out_ch, out_ch, 1)
self.afn_last = nn.Sigmoid()
def forward(self, x, input_mask=None):
enc1, enc1_pre = self.tconv_down1(x)
enc2, enc2_pre = self.tconv_down2(enc1)
enc3, enc3_pre = self.tconv_down3(enc2)
enc4, enc4_pre = self.tconv_down4(enc3)
# enc5,enc5_pre = self.tconv_down5(enc4)
bottom = self.bottom1(enc4)
bottom = self.bottom2(bottom)
# dec5 = self.tconv_up5(bottom,enc5_pre)
dec4 = self.tconv_up4(bottom, enc4_pre)
dec3 = self.tconv_up3(dec4, enc3_pre)
dec3, _ = self.transform3(dec3)
dec2 = self.tconv_up2(dec3, enc2_pre)
dec2, _ = self.transform2(dec2)
dec1 = self.tconv_up1(dec2, enc1_pre)
dec1, _ = self.transform1(dec1)
dec1 = self.conv_last(dec1)
output = self.afn_last(dec1)
return output
class Encoder_Triblock(nn.Module):
def __init__(self, inChannel, outChannel, flag_res=True, nKernal=3, nPool=2, flag_Pool=True):
super(Encoder_Triblock, self).__init__()
self.layer1 = conv_block(inChannel, outChannel, nKernal, flag_norm=_NORM_BONE)
if flag_res:
self.layer2 = Res2Net(outChannel, int(outChannel / 4))
else:
self.layer2 = conv_block(outChannel, outChannel, nKernal, flag_norm=_NORM_BONE)
self.pool = nn.MaxPool2d(nPool) if flag_Pool else None
def forward(self, x):
feat = self.layer1(x)
feat = self.layer2(feat)
feat_pool = self.pool(feat) if self.pool is not None else feat
return feat_pool, feat
class Decoder_Triblock(nn.Module):
def __init__(self, inChannel, outChannel, flag_res=True, nKernal=3, nPool=2, flag_Pool=True):
super(Decoder_Triblock, self).__init__()
self.layer1 = nn.Sequential(
nn.ConvTranspose2d(inChannel, outChannel, kernel_size=2, stride=2),
nn.ReLU(inplace=True)
)
if flag_res:
self.layer2 = Res2Net(int(outChannel * 2), int(outChannel / 2))
else:
self.layer2 = conv_block(outChannel * 2, outChannel * 2, nKernal, flag_norm=_NORM_BONE)
self.layer3 = conv_block(outChannel * 2, outChannel, nKernal, flag_norm=_NORM_BONE)
def forward(self, feat_dec, feat_enc):
feat_dec = self.layer1(feat_dec)
diffY = feat_enc.size()[2] - feat_dec.size()[2]
diffX = feat_enc.size()[3] - feat_dec.size()[3]
if diffY != 0 or diffX != 0:
print('Padding for size mismatch ( Enc:', feat_enc.size(), 'Dec:', feat_dec.size(), ')')
feat_dec = F.pad(feat_dec, [diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2])
feat = torch.cat([feat_dec, feat_enc], dim=1)
feat = self.layer2(feat)
feat = self.layer3(feat)
return feat | 14,086 | 41.687879 | 118 | py |
MST | MST-main/simulation/train_code/architecture/__init__.py | import torch
from .MST import MST
from .GAP_Net import GAP_net
from .ADMM_Net import ADMM_net
from .TSA_Net import TSA_Net
from .HDNet import HDNet, FDL
from .DGSMP import HSI_CS
from .BIRNAT import BIRNAT
from .MST_Plus_Plus import MST_Plus_Plus
from .Lambda_Net import Lambda_Net
from .CST import CST
from .DAUHST import DAUHST
def model_generator(method, pretrained_model_path=None):
if method == 'mst_s':
model = MST(dim=28, stage=2, num_blocks=[2, 2, 2]).cuda()
elif method == 'mst_m':
model = MST(dim=28, stage=2, num_blocks=[2, 4, 4]).cuda()
elif method == 'mst_l':
model = MST(dim=28, stage=2, num_blocks=[4, 7, 5]).cuda()
elif method == 'gap_net':
model = GAP_net().cuda()
elif method == 'admm_net':
model = ADMM_net().cuda()
elif method == 'tsa_net':
model = TSA_Net().cuda()
elif method == 'hdnet':
model = HDNet().cuda()
fdl_loss = FDL(loss_weight=0.7,
alpha=2.0,
patch_factor=4,
ave_spectrum=True,
log_matrix=True,
batch_matrix=True,
).cuda()
elif method == 'dgsmp':
model = HSI_CS(Ch=28, stages=4).cuda()
elif method == 'birnat':
model = BIRNAT().cuda()
elif method == 'mst_plus_plus':
model = MST_Plus_Plus(in_channels=28, out_channels=28, n_feat=28, stage=3).cuda()
elif method == 'lambda_net':
model = Lambda_Net(out_ch=28).cuda()
elif method == 'cst_s':
model = CST(num_blocks=[1, 1, 2], sparse=True).cuda()
elif method == 'cst_m':
model = CST(num_blocks=[2, 2, 2], sparse=True).cuda()
elif method == 'cst_l':
model = CST(num_blocks=[2, 4, 6], sparse=True).cuda()
elif method == 'cst_l_plus':
model = CST(num_blocks=[2, 4, 6], sparse=False).cuda()
elif 'dauhst' in method:
num_iterations = int(method.split('_')[1][0])
model = DAUHST(num_iterations=num_iterations).cuda()
else:
print(f'Method {method} is not defined !!!!')
if pretrained_model_path is not None:
print(f'load model from {pretrained_model_path}')
checkpoint = torch.load(pretrained_model_path)
model.load_state_dict({k.replace('module.', ''): v for k, v in checkpoint.items()},
strict=True)
if method == 'hdnet':
return model,fdl_loss
return model | 2,403 | 36.5625 | 91 | py |
MST | MST-main/simulation/train_code/architecture/HDNet.py | import torch
import torch.nn as nn
import math
def default_conv(in_channels, out_channels, kernel_size, bias=True):
return nn.Conv2d(
in_channels, out_channels, kernel_size,
padding=(kernel_size//2), bias=bias)
class MeanShift(nn.Conv2d):
def __init__(
self, rgb_range,
rgb_mean=(0.4488, 0.4371, 0.4040), rgb_std=(1.0, 1.0, 1.0), sign=-1):
super(MeanShift, self).__init__(3, 3, kernel_size=1)
std = torch.Tensor(rgb_std)
self.weight.data = torch.eye(3).view(3, 3, 1, 1) / std.view(3, 1, 1, 1)
self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean) / std
for p in self.parameters():
p.requires_grad = False
class BasicBlock(nn.Sequential):
def __init__(
self, conv, in_channels, out_channels, kernel_size, stride=1, bias=False,
bn=True, act=nn.ReLU(True)):
m = [conv(in_channels, out_channels, kernel_size, bias=bias)]
if bn:
m.append(nn.BatchNorm2d(out_channels))
if act is not None:
m.append(act)
super(BasicBlock, self).__init__(*m)
class ResBlock(nn.Module):
def __init__(
self, conv, n_feats, kernel_size,
bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
super(ResBlock, self).__init__()
m = []
for i in range(2):
m.append(conv(n_feats, n_feats, kernel_size, bias=bias))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if i == 0:
m.append(act)
self.body = nn.Sequential(*m)
self.res_scale = res_scale
def forward(self, x):
res = self.body(x).mul(self.res_scale) # So res_scale is a scaler? just scale all elements in each feature's residual? Why?
res += x
return res
class Upsampler(nn.Sequential):
def __init__(self, conv, scale, n_feats, bn=False, act=False, bias=True):
m = []
if (scale & (scale - 1)) == 0:
for _ in range(int(math.log(scale, 2))):
m.append(conv(n_feats, 4 * n_feats, 3, bias))
m.append(nn.PixelShuffle(2))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if act == 'relu':
m.append(nn.ReLU(True))
elif act == 'prelu':
m.append(nn.PReLU(n_feats))
elif scale == 3:
m.append(conv(n_feats, 9 * n_feats, 3, bias))
m.append(nn.PixelShuffle(3))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if act == 'relu':
m.append(nn.ReLU(True))
elif act == 'prelu':
m.append(nn.PReLU(n_feats))
else:
raise NotImplementedError
super(Upsampler, self).__init__(*m)
_NORM_BONE = False
def constant_init(module, val, bias=0):
if hasattr(module, 'weight') and module.weight is not None:
nn.init.constant_(module.weight, val)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
def kaiming_init(module,
a=0,
mode='fan_out',
nonlinearity='relu',
bias=0,
distribution='normal'):
assert distribution in ['uniform', 'normal']
if distribution == 'uniform':
nn.init.kaiming_uniform_(
module.weight, a=a, mode=mode, nonlinearity=nonlinearity)
else:
nn.init.kaiming_normal_(
module.weight, a=a, mode=mode, nonlinearity=nonlinearity)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
# depthwise-separable convolution (DSC)
class DSC(nn.Module):
def __init__(self, nin: int) -> None:
super(DSC, self).__init__()
self.conv_dws = nn.Conv2d(
nin, nin, kernel_size=1, stride=1, padding=0, groups=nin
)
self.bn_dws = nn.BatchNorm2d(nin, momentum=0.9)
self.relu_dws = nn.ReLU(inplace=False)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
self.conv_point = nn.Conv2d(
nin, 1, kernel_size=1, stride=1, padding=0, groups=1
)
self.bn_point = nn.BatchNorm2d(1, momentum=0.9)
self.relu_point = nn.ReLU(inplace=False)
self.softmax = nn.Softmax(dim=2)
def forward(self, x: torch.Tensor) -> torch.Tensor:
out = self.conv_dws(x)
out = self.bn_dws(out)
out = self.relu_dws(out)
out = self.maxpool(out)
out = self.conv_point(out)
out = self.bn_point(out)
out = self.relu_point(out)
m, n, p, q = out.shape
out = self.softmax(out.view(m, n, -1))
out = out.view(m, n, p, q)
out = out.expand(x.shape[0], x.shape[1], x.shape[2], x.shape[3])
out = torch.mul(out, x)
out = out + x
return out
# Efficient Feature Fusion(EFF)
class EFF(nn.Module):
def __init__(self, nin: int, nout: int, num_splits: int) -> None:
super(EFF, self).__init__()
assert nin % num_splits == 0
self.nin = nin
self.nout = nout
self.num_splits = num_splits
self.subspaces = nn.ModuleList(
[DSC(int(self.nin / self.num_splits)) for i in range(self.num_splits)]
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
sub_feat = torch.chunk(x, self.num_splits, dim=1)
out = []
for idx, l in enumerate(self.subspaces):
out.append(self.subspaces[idx](sub_feat[idx]))
out = torch.cat(out, dim=1)
return out
# spatial-spectral domain attention learning(SDL)
class SDL_attention(nn.Module):
def __init__(self, inplanes, planes, kernel_size=1, stride=1):
super(SDL_attention, self).__init__()
self.inplanes = inplanes
self.inter_planes = planes // 2
self.planes = planes
self.kernel_size = kernel_size
self.stride = stride
self.padding = (kernel_size-1)//2
self.conv_q_right = nn.Conv2d(self.inplanes, 1, kernel_size=1, stride=stride, padding=0, bias=False)
self.conv_v_right = nn.Conv2d(self.inplanes, self.inter_planes, kernel_size=1, stride=stride, padding=0, bias=False)
self.conv_up = nn.Conv2d(self.inter_planes, self.planes, kernel_size=1, stride=1, padding=0, bias=False)
self.softmax_right = nn.Softmax(dim=2)
self.sigmoid = nn.Sigmoid()
self.conv_q_left = nn.Conv2d(self.inplanes, self.inter_planes, kernel_size=1, stride=stride, padding=0, bias=False) #g
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_v_left = nn.Conv2d(self.inplanes, self.inter_planes, kernel_size=1, stride=stride, padding=0, bias=False) #theta
self.softmax_left = nn.Softmax(dim=2)
self.reset_parameters()
def reset_parameters(self):
kaiming_init(self.conv_q_right, mode='fan_in')
kaiming_init(self.conv_v_right, mode='fan_in')
kaiming_init(self.conv_q_left, mode='fan_in')
kaiming_init(self.conv_v_left, mode='fan_in')
self.conv_q_right.inited = True
self.conv_v_right.inited = True
self.conv_q_left.inited = True
self.conv_v_left.inited = True
# HR spatial attention
def spatial_attention(self, x):
input_x = self.conv_v_right(x)
batch, channel, height, width = input_x.size()
input_x = input_x.view(batch, channel, height * width)
context_mask = self.conv_q_right(x)
context_mask = context_mask.view(batch, 1, height * width)
context_mask = self.softmax_right(context_mask)
context = torch.matmul(input_x, context_mask.transpose(1,2))
context = context.unsqueeze(-1)
context = self.conv_up(context)
mask_ch = self.sigmoid(context)
out = x * mask_ch
return out
# HR spectral attention
def spectral_attention(self, x):
g_x = self.conv_q_left(x)
batch, channel, height, width = g_x.size()
avg_x = self.avg_pool(g_x)
batch, channel, avg_x_h, avg_x_w = avg_x.size()
avg_x = avg_x.view(batch, channel, avg_x_h * avg_x_w).permute(0, 2, 1)
theta_x = self.conv_v_left(x).view(batch, self.inter_planes, height * width)
context = torch.matmul(avg_x, theta_x)
context = self.softmax_left(context)
context = context.view(batch, 1, height, width)
mask_sp = self.sigmoid(context)
out = x * mask_sp
return out
def forward(self, x):
context_spectral = self.spectral_attention(x)
context_spatial = self.spatial_attention(x)
out = context_spatial + context_spectral
return out
class HDNet(nn.Module):
def __init__(self, in_ch=28, out_ch=28, conv=default_conv):
super(HDNet, self).__init__()
n_resblocks = 16
n_feats = 64
kernel_size = 3
act = nn.ReLU(True)
# define head module
m_head = [conv(in_ch, n_feats, kernel_size)]
# define body module
m_body = [
ResBlock(
conv, n_feats, kernel_size, act=act, res_scale= 1
) for _ in range(n_resblocks)
]
m_body.append(SDL_attention(inplanes = n_feats, planes = n_feats))
m_body.append(EFF(nin=n_feats, nout=n_feats, num_splits=4))
for i in range(1, n_resblocks):
m_body.append(ResBlock(
conv, n_feats, kernel_size, act=act, res_scale= 1
))
m_body.append(conv(n_feats, n_feats, kernel_size))
m_tail = [conv(n_feats, out_ch, kernel_size)]
self.head = nn.Sequential(*m_head)
self.body = nn.Sequential(*m_body)
self.tail = nn.Sequential(*m_tail)
def forward(self, x, input_mask=None):
x = self.head(x)
res = self.body(x)
res += x
x = self.tail(res)
return x
# frequency domain learning(FDL)
class FDL(nn.Module):
def __init__(self, loss_weight=1.0, alpha=1.0, patch_factor=1, ave_spectrum=False, log_matrix=False, batch_matrix=False):
super(FDL, self).__init__()
self.loss_weight = loss_weight
self.alpha = alpha
self.patch_factor = patch_factor
self.ave_spectrum = ave_spectrum
self.log_matrix = log_matrix
self.batch_matrix = batch_matrix
def tensor2freq(self, x):
patch_factor = self.patch_factor
_, _, h, w = x.shape
assert h % patch_factor == 0 and w % patch_factor == 0, (
'Patch factor should be divisible by image height and width')
patch_list = []
patch_h = h // patch_factor
patch_w = w // patch_factor
for i in range(patch_factor):
for j in range(patch_factor):
patch_list.append(x[:, :, i * patch_h:(i + 1) * patch_h, j * patch_w:(j + 1) * patch_w])
y = torch.stack(patch_list, 1)
return torch.rfft(y, 2, onesided=False, normalized=True)
def loss_formulation(self, recon_freq, real_freq, matrix=None):
if matrix is not None:
weight_matrix = matrix.detach()
else:
matrix_tmp = (recon_freq - real_freq) ** 2
matrix_tmp = torch.sqrt(matrix_tmp[..., 0] + matrix_tmp[..., 1]) ** self.alpha
if self.log_matrix:
matrix_tmp = torch.log(matrix_tmp + 1.0)
if self.batch_matrix:
matrix_tmp = matrix_tmp / matrix_tmp.max()
else:
matrix_tmp = matrix_tmp / matrix_tmp.max(-1).values.max(-1).values[:, :, :, None, None]
matrix_tmp[torch.isnan(matrix_tmp)] = 0.0
matrix_tmp = torch.clamp(matrix_tmp, min=0.0, max=1.0)
weight_matrix = matrix_tmp.clone().detach()
assert weight_matrix.min().item() >= 0 and weight_matrix.max().item() <= 1, (
'The values of spectrum weight matrix should be in the range [0, 1], '
'but got Min: %.10f Max: %.10f' % (weight_matrix.min().item(), weight_matrix.max().item()))
tmp = (recon_freq - real_freq) ** 2
freq_distance = tmp[..., 0] + tmp[..., 1]
loss = weight_matrix * freq_distance
return torch.mean(loss)
def forward(self, pred, target, matrix=None, **kwargs):
pred_freq = self.tensor2freq(pred)
target_freq = self.tensor2freq(target)
if self.ave_spectrum:
pred_freq = torch.mean(pred_freq, 0, keepdim=True)
target_freq = torch.mean(target_freq, 0, keepdim=True)
return self.loss_formulation(pred_freq, target_freq, matrix) * self.loss_weight
| 12,665 | 33.048387 | 132 | py |
MST | MST-main/simulation/test_code/test.py | from architecture import *
from utils import *
import scipy.io as scio
import torch
import os
import numpy as np
from option import opt
os.environ["CUDA_DEVICE_ORDER"] = 'PCI_BUS_ID'
os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpu_id
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
if not torch.cuda.is_available():
raise Exception('NO GPU!')
# Intialize mask
mask3d_batch, input_mask = init_mask(opt.mask_path, opt.input_mask, 10)
if not os.path.exists(opt.outf):
os.makedirs(opt.outf)
def test(model):
test_data = LoadTest(opt.test_path)
test_gt = test_data.cuda().float()
input_meas = init_meas(test_gt, mask3d_batch, opt.input_setting)
model.eval()
with torch.no_grad():
model_out = model(input_meas, input_mask)
pred = np.transpose(model_out.detach().cpu().numpy(), (0, 2, 3, 1)).astype(np.float32)
truth = np.transpose(test_gt.cpu().numpy(), (0, 2, 3, 1)).astype(np.float32)
model.train()
return pred, truth
def main():
# model
if opt.method == 'hdnet':
model, FDL_loss = model_generator(opt.method, opt.pretrained_model_path)
model = model.cuda()
else:
model = model_generator(opt.method, opt.pretrained_model_path).cuda()
pred, truth = test(model)
name = opt.outf + 'Test_result.mat'
print(f'Save reconstructed HSIs as {name}.')
scio.savemat(name, {'truth': truth, 'pred': pred})
if __name__ == '__main__':
main() | 1,458 | 30.042553 | 90 | py |
MST | MST-main/simulation/test_code/utils.py | import scipy.io as sio
import os
import numpy as np
import torch
import logging
from fvcore.nn import FlopCountAnalysis
def generate_masks(mask_path, batch_size):
mask = sio.loadmat(mask_path + '/mask.mat')
mask = mask['mask']
mask3d = np.tile(mask[:, :, np.newaxis], (1, 1, 28))
mask3d = np.transpose(mask3d, [2, 0, 1])
mask3d = torch.from_numpy(mask3d)
[nC, H, W] = mask3d.shape
mask3d_batch = mask3d.expand([batch_size, nC, H, W]).cuda().float()
return mask3d_batch
def generate_shift_masks(mask_path, batch_size):
mask = sio.loadmat(mask_path + '/mask_3d_shift.mat')
mask_3d_shift = mask['mask_3d_shift']
mask_3d_shift = np.transpose(mask_3d_shift, [2, 0, 1])
mask_3d_shift = torch.from_numpy(mask_3d_shift)
[nC, H, W] = mask_3d_shift.shape
Phi_batch = mask_3d_shift.expand([batch_size, nC, H, W]).cuda().float()
Phi_s_batch = torch.sum(Phi_batch**2,1)
Phi_s_batch[Phi_s_batch==0] = 1
# print(Phi_batch.shape, Phi_s_batch.shape)
return Phi_batch, Phi_s_batch
def LoadTest(path_test):
scene_list = os.listdir(path_test)
scene_list.sort()
test_data = np.zeros((len(scene_list), 256, 256, 28))
for i in range(len(scene_list)):
scene_path = path_test + scene_list[i]
img = sio.loadmat(scene_path)['img']
test_data[i, :, :, :] = img
test_data = torch.from_numpy(np.transpose(test_data, (0, 3, 1, 2)))
return test_data
def LoadMeasurement(path_test_meas):
img = sio.loadmat(path_test_meas)['simulation_test']
test_data = img
test_data = torch.from_numpy(test_data)
return test_data
def time2file_name(time):
year = time[0:4]
month = time[5:7]
day = time[8:10]
hour = time[11:13]
minute = time[14:16]
second = time[17:19]
time_filename = year + '_' + month + '_' + day + '_' + hour + '_' + minute + '_' + second
return time_filename
def shuffle_crop(train_data, batch_size, crop_size=256):
index = np.random.choice(range(len(train_data)), batch_size)
processed_data = np.zeros((batch_size, crop_size, crop_size, 28), dtype=np.float32)
for i in range(batch_size):
h, w, _ = train_data[index[i]].shape
x_index = np.random.randint(0, h - crop_size)
y_index = np.random.randint(0, w - crop_size)
processed_data[i, :, :, :] = train_data[index[i]][x_index:x_index + crop_size, y_index:y_index + crop_size, :]
gt_batch = torch.from_numpy(np.transpose(processed_data, (0, 3, 1, 2)))
return gt_batch
def gen_meas_torch(data_batch, mask3d_batch, Y2H=True, mul_mask=False):
[batch_size, nC, H, W] = data_batch.shape
mask3d_batch = (mask3d_batch[0, :, :, :]).expand([batch_size, nC, H, W]).cuda().float() # [10,28,256,256]
temp = shift(mask3d_batch * data_batch, 2)
meas = torch.sum(temp, 1)
if Y2H:
meas = meas / nC * 2
H = shift_back(meas)
if mul_mask:
HM = torch.mul(H, mask3d_batch)
return HM
return H
return meas
def shift(inputs, step=2):
[bs, nC, row, col] = inputs.shape
output = torch.zeros(bs, nC, row, col + (nC - 1) * step).cuda().float()
for i in range(nC):
output[:, i, :, step * i:step * i + col] = inputs[:, i, :, :]
return output
def shift_back(inputs, step=2): # input [bs,256,310] output [bs, 28, 256, 256]
[bs, row, col] = inputs.shape
nC = 28
output = torch.zeros(bs, nC, row, col - (nC - 1) * step).cuda().float()
for i in range(nC):
output[:, i, :, :] = inputs[:, :, step * i:step * i + col - (nC - 1) * step]
return output
def gen_log(model_path):
logger = logging.getLogger()
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(levelname)s: %(message)s")
log_file = model_path + '/log.txt'
fh = logging.FileHandler(log_file, mode='a')
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
return logger
def init_mask(mask_path, mask_type, batch_size):
mask3d_batch = generate_masks(mask_path, batch_size)
if mask_type == 'Phi':
shift_mask3d_batch = shift(mask3d_batch)
input_mask = shift_mask3d_batch
elif mask_type == 'Phi_PhiPhiT':
Phi_batch, Phi_s_batch = generate_shift_masks(mask_path, batch_size)
input_mask = (Phi_batch, Phi_s_batch)
elif mask_type == 'Mask':
input_mask = mask3d_batch
elif mask_type == None:
input_mask = None
return mask3d_batch, input_mask
def init_meas(gt, mask, input_setting):
if input_setting == 'H':
input_meas = gen_meas_torch(gt, mask, Y2H=True, mul_mask=False)
elif input_setting == 'HM':
input_meas = gen_meas_torch(gt, mask, Y2H=True, mul_mask=True)
elif input_setting == 'Y':
input_meas = gen_meas_torch(gt, mask, Y2H=False, mul_mask=True)
return input_meas
def my_summary(test_model, H = 256, W = 256, C = 28, N = 1):
model = test_model.cuda()
print(model)
inputs = torch.randn((N, C, H, W)).cuda()
flops = FlopCountAnalysis(model,inputs)
n_param = sum([p.nelement() for p in model.parameters()])
print(f'GMac:{flops.total()/(1024*1024*1024)}')
print(f'Params:{n_param}') | 5,335 | 35.547945 | 118 | py |
MST | MST-main/simulation/test_code/architecture/MST_Plus_Plus.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from einops import rearrange
import math
import warnings
from torch.nn.init import _calculate_fan_in_and_fan_out
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
def norm_cdf(x):
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
tensor.uniform_(2 * l - 1, 2 * u - 1)
tensor.erfinv_()
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'):
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
if mode == 'fan_in':
denom = fan_in
elif mode == 'fan_out':
denom = fan_out
elif mode == 'fan_avg':
denom = (fan_in + fan_out) / 2
variance = scale / denom
if distribution == "truncated_normal":
trunc_normal_(tensor, std=math.sqrt(variance) / .87962566103423978)
elif distribution == "normal":
tensor.normal_(std=math.sqrt(variance))
elif distribution == "uniform":
bound = math.sqrt(3 * variance)
tensor.uniform_(-bound, bound)
else:
raise ValueError(f"invalid distribution {distribution}")
def lecun_normal_(tensor):
variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal')
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x, *args, **kwargs):
x = self.norm(x)
return self.fn(x, *args, **kwargs)
class GELU(nn.Module):
def forward(self, x):
return F.gelu(x)
def conv(in_channels, out_channels, kernel_size, bias=False, padding = 1, stride = 1):
return nn.Conv2d(
in_channels, out_channels, kernel_size,
padding=(kernel_size//2), bias=bias, stride=stride)
def shift_back(inputs,step=2): # input [bs,28,256,310] output [bs, 28, 256, 256]
[bs, nC, row, col] = inputs.shape
down_sample = 256//row
step = float(step)/float(down_sample*down_sample)
out_col = row
for i in range(nC):
inputs[:,i,:,:out_col] = \
inputs[:,i,:,int(step*i):int(step*i)+out_col]
return inputs[:, :, :, :out_col]
class MS_MSA(nn.Module):
def __init__(
self,
dim,
dim_head,
heads,
):
super().__init__()
self.num_heads = heads
self.dim_head = dim_head
self.to_q = nn.Linear(dim, dim_head * heads, bias=False)
self.to_k = nn.Linear(dim, dim_head * heads, bias=False)
self.to_v = nn.Linear(dim, dim_head * heads, bias=False)
self.rescale = nn.Parameter(torch.ones(heads, 1, 1))
self.proj = nn.Linear(dim_head * heads, dim, bias=True)
self.pos_emb = nn.Sequential(
nn.Conv2d(dim, dim, 3, 1, 1, bias=False, groups=dim),
GELU(),
nn.Conv2d(dim, dim, 3, 1, 1, bias=False, groups=dim),
)
self.dim = dim
def forward(self, x_in):
"""
x_in: [b,h,w,c]
return out: [b,h,w,c]
"""
b, h, w, c = x_in.shape
x = x_in.reshape(b,h*w,c)
q_inp = self.to_q(x)
k_inp = self.to_k(x)
v_inp = self.to_v(x)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=self.num_heads),
(q_inp, k_inp, v_inp))
v = v
# q: b,heads,hw,c
q = q.transpose(-2, -1)
k = k.transpose(-2, -1)
v = v.transpose(-2, -1)
q = F.normalize(q, dim=-1, p=2)
k = F.normalize(k, dim=-1, p=2)
attn = (k @ q.transpose(-2, -1)) # A = K^T*Q
attn = attn * self.rescale
attn = attn.softmax(dim=-1)
x = attn @ v # b,heads,d,hw
x = x.permute(0, 3, 1, 2) # Transpose
x = x.reshape(b, h * w, self.num_heads * self.dim_head)
out_c = self.proj(x).view(b, h, w, c)
out_p = self.pos_emb(v_inp.reshape(b,h,w,c).permute(0, 3, 1, 2)).permute(0, 2, 3, 1)
out = out_c + out_p
return out
class FeedForward(nn.Module):
def __init__(self, dim, mult=4):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(dim, dim * mult, 1, 1, bias=False),
GELU(),
nn.Conv2d(dim * mult, dim * mult, 3, 1, 1, bias=False, groups=dim * mult),
GELU(),
nn.Conv2d(dim * mult, dim, 1, 1, bias=False),
)
def forward(self, x):
"""
x: [b,h,w,c]
return out: [b,h,w,c]
"""
out = self.net(x.permute(0, 3, 1, 2))
return out.permute(0, 2, 3, 1)
class MSAB(nn.Module):
def __init__(
self,
dim,
dim_head,
heads,
num_blocks,
):
super().__init__()
self.blocks = nn.ModuleList([])
for _ in range(num_blocks):
self.blocks.append(nn.ModuleList([
MS_MSA(dim=dim, dim_head=dim_head, heads=heads),
PreNorm(dim, FeedForward(dim=dim))
]))
def forward(self, x):
"""
x: [b,c,h,w]
return out: [b,c,h,w]
"""
x = x.permute(0, 2, 3, 1)
for (attn, ff) in self.blocks:
x = attn(x) + x
x = ff(x) + x
out = x.permute(0, 3, 1, 2)
return out
class MST(nn.Module):
def __init__(self, in_dim=28, out_dim=28, dim=28, stage=2, num_blocks=[2,4,4]):
super(MST, self).__init__()
self.dim = dim
self.stage = stage
# Input projection
self.embedding = nn.Conv2d(in_dim, self.dim, 3, 1, 1, bias=False)
# Encoder
self.encoder_layers = nn.ModuleList([])
dim_stage = dim
for i in range(stage):
self.encoder_layers.append(nn.ModuleList([
MSAB(
dim=dim_stage, num_blocks=num_blocks[i], dim_head=dim, heads=dim_stage // dim),
nn.Conv2d(dim_stage, dim_stage * 2, 4, 2, 1, bias=False),
]))
dim_stage *= 2
# Bottleneck
self.bottleneck = MSAB(
dim=dim_stage, dim_head=dim, heads=dim_stage // dim, num_blocks=num_blocks[-1])
# Decoder
self.decoder_layers = nn.ModuleList([])
for i in range(stage):
self.decoder_layers.append(nn.ModuleList([
nn.ConvTranspose2d(dim_stage, dim_stage // 2, stride=2, kernel_size=2, padding=0, output_padding=0),
nn.Conv2d(dim_stage, dim_stage // 2, 1, 1, bias=False),
MSAB(
dim=dim_stage // 2, num_blocks=num_blocks[stage - 1 - i], dim_head=dim,
heads=(dim_stage // 2) // dim),
]))
dim_stage //= 2
# Output projection
self.mapping = nn.Conv2d(self.dim, out_dim, 3, 1, 1, bias=False)
#### activation function
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x):
"""
x: [b,c,h,w]
return out:[b,c,h,w]
"""
# Embedding
fea = self.embedding(x)
# Encoder
fea_encoder = []
for (MSAB, FeaDownSample) in self.encoder_layers:
fea = MSAB(fea)
fea_encoder.append(fea)
fea = FeaDownSample(fea)
# Bottleneck
fea = self.bottleneck(fea)
# Decoder
for i, (FeaUpSample, Fution, LeWinBlcok) in enumerate(self.decoder_layers):
fea = FeaUpSample(fea)
fea = Fution(torch.cat([fea, fea_encoder[self.stage-1-i]], dim=1))
fea = LeWinBlcok(fea)
# Mapping
out = self.mapping(fea) + x
return out
class MST_Plus_Plus(nn.Module):
def __init__(self, in_channels=3, out_channels=28, n_feat=28, stage=3):
super(MST_Plus_Plus, self).__init__()
self.stage = stage
self.conv_in = nn.Conv2d(in_channels, n_feat, kernel_size=3, padding=(3 - 1) // 2,bias=False)
modules_body = [MST(dim=n_feat, stage=2, num_blocks=[1,1,1]) for _ in range(stage)]
self.fution = nn.Conv2d(56, 28, 1, padding=0, bias=True)
self.body = nn.Sequential(*modules_body)
self.conv_out = nn.Conv2d(n_feat, out_channels, kernel_size=3, padding=(3 - 1) // 2,bias=False)
def initial_x(self, y, Phi):
"""
:param y: [b,256,310]
:param Phi: [b,28,256,256]
:return: z: [b,28,256,256]
"""
x = self.fution(torch.cat([y, Phi], dim=1))
return x
def forward(self, y, Phi=None):
"""
x: [b,c,h,w]
return out:[b,c,h,w]
"""
if Phi==None:
Phi = torch.rand((1,28,256,256)).cuda()
x = self.initial_x(y, Phi)
b, c, h_inp, w_inp = x.shape
hb, wb = 8, 8
pad_h = (hb - h_inp % hb) % hb
pad_w = (wb - w_inp % wb) % wb
x = F.pad(x, [0, pad_w, 0, pad_h], mode='reflect')
x = self.conv_in(x)
h = self.body(x)
h = self.conv_out(h)
h += x
return h[:, :, :h_inp, :w_inp]
| 10,068 | 30.367601 | 116 | py |
MST | MST-main/simulation/test_code/architecture/DGSMP.py | import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
import torch.nn.functional as F
class Resblock(nn.Module):
def __init__(self, HBW):
super(Resblock, self).__init__()
self.block1 = nn.Sequential(nn.Conv2d(HBW, HBW, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(HBW, HBW, kernel_size=3, stride=1, padding=1))
self.block2 = nn.Sequential(nn.Conv2d(HBW, HBW, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(HBW, HBW, kernel_size=3, stride=1, padding=1))
def forward(self, x):
tem = x
r1 = self.block1(x)
out = r1 + tem
r2 = self.block2(out)
out = r2 + out
return out
class Encoding(nn.Module):
def __init__(self):
super(Encoding, self).__init__()
self.E1 = nn.Sequential(nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
self.E2 = nn.Sequential(nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
self.E3 = nn.Sequential(nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
self.E4 = nn.Sequential(nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
self.E5 = nn.Sequential(nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
def forward(self, x):
## encoding blocks
E1 = self.E1(x)
E2 = self.E2(F.avg_pool2d(E1, kernel_size=2, stride=2))
E3 = self.E3(F.avg_pool2d(E2, kernel_size=2, stride=2))
E4 = self.E4(F.avg_pool2d(E3, kernel_size=2, stride=2))
E5 = self.E5(F.avg_pool2d(E4, kernel_size=2, stride=2))
return E1, E2, E3, E4, E5
class Decoding(nn.Module):
def __init__(self, Ch=28, kernel_size=[7,7,7]):
super(Decoding, self).__init__()
self.upMode = 'bilinear'
self.Ch = Ch
out_channel1 = Ch * kernel_size[0]
out_channel2 = Ch * kernel_size[1]
out_channel3 = Ch * kernel_size[2]
self.D1 = nn.Sequential(nn.Conv2d(in_channels=128+128, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
self.D2 = nn.Sequential(nn.Conv2d(in_channels=128+64, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
self.D3 = nn.Sequential(nn.Conv2d(in_channels=64+64, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
self.D4 = nn.Sequential(nn.Conv2d(in_channels=64+32, out_channels=32, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
self.w_generator = nn.Sequential(nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=32, out_channels=self.Ch, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=self.Ch, out_channels=self.Ch, kernel_size=1, stride=1, padding=0)
)
self.filter_g_1 = nn.Sequential(nn.Conv2d(64 + 32, out_channel1, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(out_channel1, out_channel1, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(out_channel1, out_channel1, 1, 1, 0)
)
self.filter_g_2 = nn.Sequential(nn.Conv2d(64 + 32, out_channel2, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(out_channel2, out_channel2, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(out_channel2, out_channel2, 1, 1, 0)
)
self.filter_g_3 = nn.Sequential(nn.Conv2d(64 + 32, out_channel3, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(out_channel3, out_channel3, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(out_channel3, out_channel3, 1, 1, 0)
)
def forward(self, E1, E2, E3, E4, E5):
## decoding blocks
D1 = self.D1(torch.cat([E4, F.interpolate(E5, scale_factor=2, mode=self.upMode)], dim=1))
D2 = self.D2(torch.cat([E3, F.interpolate(D1, scale_factor=2, mode=self.upMode)], dim=1))
D3 = self.D3(torch.cat([E2, F.interpolate(D2, scale_factor=2, mode=self.upMode)], dim=1))
D4 = self.D4(torch.cat([E1, F.interpolate(D3, scale_factor=2, mode=self.upMode)], dim=1))
## estimating the regularization parameters w
w = self.w_generator(D4)
## generate 3D filters
f1 = self.filter_g_1(torch.cat([E1, F.interpolate(D3, scale_factor=2, mode=self.upMode)], dim=1))
f2 = self.filter_g_2(torch.cat([E1, F.interpolate(D3, scale_factor=2, mode=self.upMode)], dim=1))
f3 = self.filter_g_3(torch.cat([E1, F.interpolate(D3, scale_factor=2, mode=self.upMode)], dim=1))
return w, f1, f2, f3
class HSI_CS(nn.Module):
def __init__(self, Ch, stages):
super(HSI_CS, self).__init__()
self.Ch = Ch
self.s = stages
self.filter_size = [7,7,7] ## 3D filter size
## The modules for learning the measurement matrix A and A^T
self.AT = nn.Sequential(nn.Conv2d(Ch, 64, kernel_size=3, stride=1, padding=1), nn.LeakyReLU(),
Resblock(64), Resblock(64),
nn.Conv2d(64, Ch, kernel_size=3, stride=1, padding=1), nn.LeakyReLU())
self.A = nn.Sequential(nn.Conv2d(Ch, 64, kernel_size=3, stride=1, padding=1), nn.LeakyReLU(),
Resblock(64), Resblock(64),
nn.Conv2d(64, Ch, kernel_size=3, stride=1, padding=1), nn.LeakyReLU())
## Encoding blocks
self.Encoding = Encoding()
## Decoding blocks
self.Decoding = Decoding(Ch=self.Ch, kernel_size=self.filter_size)
## Dense connection
self.conv = nn.Conv2d(Ch, 32, kernel_size=3, stride=1, padding=1)
self.Den_con1 = nn.Conv2d(32 , 32, kernel_size=1, stride=1, padding=0)
self.Den_con2 = nn.Conv2d(32 * 2, 32, kernel_size=1, stride=1, padding=0)
self.Den_con3 = nn.Conv2d(32 * 3, 32, kernel_size=1, stride=1, padding=0)
self.Den_con4 = nn.Conv2d(32 * 4, 32, kernel_size=1, stride=1, padding=0)
# self.Den_con5 = nn.Conv2d(32 * 5, 32, kernel_size=1, stride=1, padding=0)
# self.Den_con6 = nn.Conv2d(32 * 6, 32, kernel_size=1, stride=1, padding=0)
self.delta_0 = Parameter(torch.ones(1), requires_grad=True)
self.delta_1 = Parameter(torch.ones(1), requires_grad=True)
self.delta_2 = Parameter(torch.ones(1), requires_grad=True)
self.delta_3 = Parameter(torch.ones(1), requires_grad=True)
# self.delta_4 = Parameter(torch.ones(1), requires_grad=True)
# self.delta_5 = Parameter(torch.ones(1), requires_grad=True)
self._initialize_weights()
torch.nn.init.normal_(self.delta_0, mean=0.1, std=0.01)
torch.nn.init.normal_(self.delta_1, mean=0.1, std=0.01)
torch.nn.init.normal_(self.delta_2, mean=0.1, std=0.01)
torch.nn.init.normal_(self.delta_3, mean=0.1, std=0.01)
# torch.nn.init.normal_(self.delta_4, mean=0.1, std=0.01)
# torch.nn.init.normal_(self.delta_5, mean=0.1, std=0.01)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight.data)
nn.init.constant_(m.bias.data, 0.0)
elif isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight.data)
nn.init.constant_(m.bias.data, 0.0)
def Filtering_1(self, cube, core):
batch_size, bandwidth, height, width = cube.size()
cube_pad = F.pad(cube, [self.filter_size[0] // 2, self.filter_size[0] // 2, 0, 0], mode='replicate')
img_stack = []
for i in range(self.filter_size[0]):
img_stack.append(cube_pad[:, :, :, i:i + width])
img_stack = torch.stack(img_stack, dim=1)
out = torch.sum(core.mul_(img_stack), dim=1, keepdim=False)
return out
def Filtering_2(self, cube, core):
batch_size, bandwidth, height, width = cube.size()
cube_pad = F.pad(cube, [0, 0, self.filter_size[1] // 2, self.filter_size[1] // 2], mode='replicate')
img_stack = []
for i in range(self.filter_size[1]):
img_stack.append(cube_pad[:, :, i:i + height, :])
img_stack = torch.stack(img_stack, dim=1)
out = torch.sum(core.mul_(img_stack), dim=1, keepdim=False)
return out
def Filtering_3(self, cube, core):
batch_size, bandwidth, height, width = cube.size()
cube_pad = F.pad(cube.unsqueeze(0).unsqueeze(0), pad=(0, 0, 0, 0, self.filter_size[2] // 2, self.filter_size[2] // 2)).squeeze(0).squeeze(0)
img_stack = []
for i in range(self.filter_size[2]):
img_stack.append(cube_pad[:, i:i + bandwidth, :, :])
img_stack = torch.stack(img_stack, dim=1)
out = torch.sum(core.mul_(img_stack), dim=1, keepdim=False)
return out
def recon(self, res1, res2, Xt, i):
if i == 0 :
delta = self.delta_0
elif i == 1:
delta = self.delta_1
elif i == 2:
delta = self.delta_2
elif i == 3:
delta = self.delta_3
# elif i == 4:
# delta = self.delta_4
# elif i == 5:
# delta = self.delta_5
Xt = Xt - 2 * delta * (res1 + res2)
return Xt
def y2x(self, y):
## Spilt operator
sz = y.size()
if len(sz) == 3:
y = y.unsqueeze(1)
bs = sz[0]
sz = y.size()
x = torch.zeros([bs, 28, sz[2], sz[2]]).cuda()
for t in range(28):
temp = y[:, :, :, 0 + 2 * t : sz[2] + 2 * t]
x[:, t, :, :] = temp.squeeze(1)
return x
def x2y(self, x):
## Shift and Sum operator
sz = x.size()
if len(sz) == 3:
x = x.unsqueeze(0).unsqueeze(0)
bs = 1
else:
bs = sz[0]
sz = x.size()
y = torch.zeros([bs, sz[2], sz[2]+2*27]).cuda()
for t in range(28):
y[:, :, 0 + 2 * t : sz[2] + 2 * t] = x[:, t, :, :] + y[:, :, 0 + 2 * t : sz[2] + 2 * t]
return y
def forward(self, y, input_mask=None):
## The measurements y is split into a 3D data cube of size H × W × L to initialize x.
y = y / 28 * 2
Xt = self.y2x(y)
feature_list = []
for i in range(0, self.s):
AXt = self.x2y(self.A(Xt)) # y = Ax
Res1 = self.AT(self.y2x(AXt - y)) # A^T * (Ax − y)
fea = self.conv(Xt)
if i == 0:
feature_list.append(fea)
fufea = self.Den_con1(fea)
elif i == 1:
feature_list.append(fea)
fufea = self.Den_con2(torch.cat(feature_list, 1))
elif i == 2:
feature_list.append(fea)
fufea = self.Den_con3(torch.cat(feature_list, 1))
elif i == 3:
feature_list.append(fea)
fufea = self.Den_con4(torch.cat(feature_list, 1))
# elif i == 4:
# feature_list.append(fea)
# fufea = self.Den_con5(torch.cat(feature_list, 1))
# elif i == 5:
# feature_list.append(fea)
# fufea = self.Den_con6(torch.cat(feature_list, 1))
E1, E2, E3, E4, E5 = self.Encoding(fufea)
W, f1, f2, f3 = self.Decoding(E1, E2, E3, E4, E5)
batch_size, p, height, width = f1.size()
f1 = F.normalize(f1.view(batch_size, self.filter_size[0], self.Ch, height, width),dim=1)
batch_size, p, height, width = f2.size()
f2 = F.normalize(f2.view(batch_size, self.filter_size[1], self.Ch, height, width),dim=1)
batch_size, p, height, width = f3.size()
f3 = F.normalize(f3.view(batch_size, self.filter_size[2], self.Ch, height, width),dim=1)
## Estimating the local means U
u1 = self.Filtering_1(Xt, f1)
u2 = self.Filtering_2(u1, f2)
U = self.Filtering_3(u2, f3)
## w * (x − u)
Res2 = (Xt - U).mul(W)
## Reconstructing HSIs
Xt = self.recon(Res1, Res2, Xt, i)
return Xt
| 15,284 | 46.175926 | 148 | py |
MST | MST-main/simulation/test_code/architecture/DAUHST.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from einops import rearrange
import math
import warnings
from torch import einsum
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
def norm_cdf(x):
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
tensor.uniform_(2 * l - 1, 2 * u - 1)
tensor.erfinv_()
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x, *args, **kwargs):
x = self.norm(x)
return self.fn(x, *args, **kwargs)
class GELU(nn.Module):
def forward(self, x):
return F.gelu(x)
class HS_MSA(nn.Module):
def __init__(
self,
dim,
window_size=(8, 8),
dim_head=28,
heads=8,
only_local_branch=False
):
super().__init__()
self.dim = dim
self.heads = heads
self.scale = dim_head ** -0.5
self.window_size = window_size
self.only_local_branch = only_local_branch
# position embedding
if only_local_branch:
seq_l = window_size[0] * window_size[1]
self.pos_emb = nn.Parameter(torch.Tensor(1, heads, seq_l, seq_l))
trunc_normal_(self.pos_emb)
else:
seq_l1 = window_size[0] * window_size[1]
self.pos_emb1 = nn.Parameter(torch.Tensor(1, 1, heads//2, seq_l1, seq_l1))
h,w = 256//self.heads,320//self.heads
seq_l2 = h*w//seq_l1
self.pos_emb2 = nn.Parameter(torch.Tensor(1, 1, heads//2, seq_l2, seq_l2))
trunc_normal_(self.pos_emb1)
trunc_normal_(self.pos_emb2)
inner_dim = dim_head * heads
self.to_q = nn.Linear(dim, inner_dim, bias=False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False)
self.to_out = nn.Linear(inner_dim, dim)
def forward(self, x):
"""
x: [b,h,w,c]
return out: [b,h,w,c]
"""
b, h, w, c = x.shape
w_size = self.window_size
assert h % w_size[0] == 0 and w % w_size[1] == 0, 'fmap dimensions must be divisible by the window size'
if self.only_local_branch:
x_inp = rearrange(x, 'b (h b0) (w b1) c -> (b h w) (b0 b1) c', b0=w_size[0], b1=w_size[1])
q = self.to_q(x_inp)
k, v = self.to_kv(x_inp).chunk(2, dim=-1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=self.heads), (q, k, v))
q *= self.scale
sim = einsum('b h i d, b h j d -> b h i j', q, k)
sim = sim + self.pos_emb
attn = sim.softmax(dim=-1)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
out = rearrange(out, '(b h w) (b0 b1) c -> b (h b0) (w b1) c', h=h // w_size[0], w=w // w_size[1],
b0=w_size[0])
else:
q = self.to_q(x)
k, v = self.to_kv(x).chunk(2, dim=-1)
q1, q2 = q[:,:,:,:c//2], q[:,:,:,c//2:]
k1, k2 = k[:,:,:,:c//2], k[:,:,:,c//2:]
v1, v2 = v[:,:,:,:c//2], v[:,:,:,c//2:]
# local branch
q1, k1, v1 = map(lambda t: rearrange(t, 'b (h b0) (w b1) c -> b (h w) (b0 b1) c',
b0=w_size[0], b1=w_size[1]), (q1, k1, v1))
q1, k1, v1 = map(lambda t: rearrange(t, 'b n mm (h d) -> b n h mm d', h=self.heads//2), (q1, k1, v1))
q1 *= self.scale
sim1 = einsum('b n h i d, b n h j d -> b n h i j', q1, k1)
sim1 = sim1 + self.pos_emb1
attn1 = sim1.softmax(dim=-1)
out1 = einsum('b n h i j, b n h j d -> b n h i d', attn1, v1)
out1 = rearrange(out1, 'b n h mm d -> b n mm (h d)')
# non-local branch
q2, k2, v2 = map(lambda t: rearrange(t, 'b (h b0) (w b1) c -> b (h w) (b0 b1) c',
b0=w_size[0], b1=w_size[1]), (q2, k2, v2))
q2, k2, v2 = map(lambda t: t.permute(0, 2, 1, 3), (q2.clone(), k2.clone(), v2.clone()))
q2, k2, v2 = map(lambda t: rearrange(t, 'b n mm (h d) -> b n h mm d', h=self.heads//2), (q2, k2, v2))
q2 *= self.scale
sim2 = einsum('b n h i d, b n h j d -> b n h i j', q2, k2)
sim2 = sim2 + self.pos_emb2
attn2 = sim2.softmax(dim=-1)
out2 = einsum('b n h i j, b n h j d -> b n h i d', attn2, v2)
out2 = rearrange(out2, 'b n h mm d -> b n mm (h d)')
out2 = out2.permute(0, 2, 1, 3)
out = torch.cat([out1,out2],dim=-1).contiguous()
out = self.to_out(out)
out = rearrange(out, 'b (h w) (b0 b1) c -> b (h b0) (w b1) c', h=h // w_size[0], w=w // w_size[1],
b0=w_size[0])
return out
class HSAB(nn.Module):
def __init__(
self,
dim,
window_size=(8, 8),
dim_head=64,
heads=8,
num_blocks=2,
):
super().__init__()
self.blocks = nn.ModuleList([])
for _ in range(num_blocks):
self.blocks.append(nn.ModuleList([
PreNorm(dim, HS_MSA(dim=dim, window_size=window_size, dim_head=dim_head, heads=heads, only_local_branch=(heads==1))),
PreNorm(dim, FeedForward(dim=dim))
]))
def forward(self, x):
"""
x: [b,c,h,w]
return out: [b,c,h,w]
"""
x = x.permute(0, 2, 3, 1)
for (attn, ff) in self.blocks:
x = attn(x) + x
x = ff(x) + x
out = x.permute(0, 3, 1, 2)
return out
class FeedForward(nn.Module):
def __init__(self, dim, mult=4):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(dim, dim * mult, 1, 1, bias=False),
GELU(),
nn.Conv2d(dim * mult, dim * mult, 3, 1, 1, bias=False, groups=dim * mult),
GELU(),
nn.Conv2d(dim * mult, dim, 1, 1, bias=False),
)
def forward(self, x):
"""
x: [b,h,w,c]
return out: [b,h,w,c]
"""
out = self.net(x.permute(0, 3, 1, 2))
return out.permute(0, 2, 3, 1)
class HST(nn.Module):
def __init__(self, in_dim=28, out_dim=28, dim=28, num_blocks=[1,1,1]):
super(HST, self).__init__()
self.dim = dim
self.scales = len(num_blocks)
# Input projection
self.embedding = nn.Conv2d(in_dim, self.dim, 3, 1, 1, bias=False)
# Encoder
self.encoder_layers = nn.ModuleList([])
dim_scale = dim
for i in range(self.scales-1):
self.encoder_layers.append(nn.ModuleList([
HSAB(dim=dim_scale, num_blocks=num_blocks[i], dim_head=dim, heads=dim_scale // dim),
nn.Conv2d(dim_scale, dim_scale * 2, 4, 2, 1, bias=False),
]))
dim_scale *= 2
# Bottleneck
self.bottleneck = HSAB(dim=dim_scale, dim_head=dim, heads=dim_scale // dim, num_blocks=num_blocks[-1])
# Decoder
self.decoder_layers = nn.ModuleList([])
for i in range(self.scales-1):
self.decoder_layers.append(nn.ModuleList([
nn.ConvTranspose2d(dim_scale, dim_scale // 2, stride=2, kernel_size=2, padding=0, output_padding=0),
nn.Conv2d(dim_scale, dim_scale // 2, 1, 1, bias=False),
HSAB(dim=dim_scale // 2, num_blocks=num_blocks[self.scales - 2 - i], dim_head=dim,
heads=(dim_scale // 2) // dim),
]))
dim_scale //= 2
# Output projection
self.mapping = nn.Conv2d(self.dim, out_dim, 3, 1, 1, bias=False)
#### activation function
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x):
"""
x: [b,c,h,w]
return out:[b,c,h,w]
"""
b, c, h_inp, w_inp = x.shape
hb, wb = 16, 16
pad_h = (hb - h_inp % hb) % hb
pad_w = (wb - w_inp % wb) % wb
x = F.pad(x, [0, pad_w, 0, pad_h], mode='reflect')
# Embedding
fea = self.embedding(x)
x = x[:,:28,:,:]
# Encoder
fea_encoder = []
for (HSAB, FeaDownSample) in self.encoder_layers:
fea = HSAB(fea)
fea_encoder.append(fea)
fea = FeaDownSample(fea)
# Bottleneck
fea = self.bottleneck(fea)
# Decoder
for i, (FeaUpSample, Fution, HSAB) in enumerate(self.decoder_layers):
fea = FeaUpSample(fea)
fea = Fution(torch.cat([fea, fea_encoder[self.scales-2-i]], dim=1))
fea = HSAB(fea)
# Mapping
out = self.mapping(fea) + x
return out[:, :, :h_inp, :w_inp]
def A(x,Phi):
temp = x*Phi
y = torch.sum(temp,1)
return y
def At(y,Phi):
temp = torch.unsqueeze(y, 1).repeat(1,Phi.shape[1],1,1)
x = temp*Phi
return x
def shift_3d(inputs,step=2):
[bs, nC, row, col] = inputs.shape
for i in range(nC):
inputs[:,i,:,:] = torch.roll(inputs[:,i,:,:], shifts=step*i, dims=2)
return inputs
def shift_back_3d(inputs,step=2):
[bs, nC, row, col] = inputs.shape
for i in range(nC):
inputs[:,i,:,:] = torch.roll(inputs[:,i,:,:], shifts=(-1)*step*i, dims=2)
return inputs
class HyPaNet(nn.Module):
def __init__(self, in_nc=29, out_nc=8, channel=64):
super(HyPaNet, self).__init__()
self.fution = nn.Conv2d(in_nc, channel, 1, 1, 0, bias=True)
self.down_sample = nn.Conv2d(channel, channel, 3, 2, 1, bias=True)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.mlp = nn.Sequential(
nn.Conv2d(channel, channel, 1, padding=0, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(channel, channel, 1, padding=0, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(channel, out_nc, 1, padding=0, bias=True),
nn.Softplus())
self.relu = nn.ReLU(inplace=True)
self.out_nc = out_nc
def forward(self, x):
x = self.down_sample(self.relu(self.fution(x)))
x = self.avg_pool(x)
x = self.mlp(x) + 1e-6
return x[:,:self.out_nc//2,:,:], x[:,self.out_nc//2:,:,:]
class DAUHST(nn.Module):
def __init__(self, num_iterations=1):
super(DAUHST, self).__init__()
self.para_estimator = HyPaNet(in_nc=28, out_nc=num_iterations*2)
self.fution = nn.Conv2d(56, 28, 1, padding=0, bias=True)
self.num_iterations = num_iterations
self.denoisers = nn.ModuleList([])
for _ in range(num_iterations):
self.denoisers.append(
HST(in_dim=29, out_dim=28, dim=28, num_blocks=[1,1,1]),
)
def initial(self, y, Phi):
"""
:param y: [b,256,310]
:param Phi: [b,28,256,310]
:return: temp: [b,28,256,310]; alpha: [b, num_iterations]; beta: [b, num_iterations]
"""
nC, step = 28, 2
y = y / nC * 2
bs,row,col = y.shape
y_shift = torch.zeros(bs, nC, row, col).cuda().float()
for i in range(nC):
y_shift[:, i, :, step * i:step * i + col - (nC - 1) * step] = y[:, :, step * i:step * i + col - (nC - 1) * step]
z = self.fution(torch.cat([y_shift, Phi], dim=1))
alpha, beta = self.para_estimator(self.fution(torch.cat([y_shift, Phi], dim=1)))
return z, alpha, beta
def forward(self, y, input_mask=None):
"""
:param y: [b,256,310]
:param Phi: [b,28,256,310]
:param Phi_PhiT: [b,256,310]
:return: z_crop: [b,28,256,256]
"""
Phi, Phi_s = input_mask
z, alphas, betas = self.initial(y, Phi)
for i in range(self.num_iterations):
alpha, beta = alphas[:,i,:,:], betas[:,i:i+1,:,:]
Phi_z = A(z, Phi)
x = z + At(torch.div(y-Phi_z,alpha+Phi_s), Phi)
x = shift_back_3d(x)
beta_repeat = beta.repeat(1,1,x.shape[2], x.shape[3])
z = self.denoisers[i](torch.cat([x, beta_repeat],dim=1))
if i<self.num_iterations-1:
z = shift_3d(z)
return z[:, :, :, 0:256]
| 13,343 | 35.26087 | 133 | py |
MST | MST-main/simulation/test_code/architecture/CST.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from einops import rearrange
from torch import einsum
import math
import warnings
from torch.nn.init import _calculate_fan_in_and_fan_out
from collections import defaultdict, Counter
import numpy as np
from tqdm import tqdm
import random
def uniform(a, b, shape, device='cuda'):
return (b - a) * torch.rand(shape, device=device) + a
class AsymmetricTransform:
def Q(self, *args, **kwargs):
raise NotImplementedError('Query transform not implemented')
def K(self, *args, **kwargs):
raise NotImplementedError('Key transform not implemented')
class LSH:
def __call__(self, *args, **kwargs):
raise NotImplementedError('LSH scheme not implemented')
def compute_hash_agreement(self, q_hash, k_hash):
return (q_hash == k_hash).min(dim=-1)[0].sum(dim=-1)
class XBOXPLUS(AsymmetricTransform):
def set_norms(self, x):
self.x_norms = x.norm(p=2, dim=-1, keepdim=True)
self.MX = torch.amax(self.x_norms, dim=-2, keepdim=True)
def X(self, x):
device = x.device
ext = torch.sqrt((self.MX**2).to(device) - (self.x_norms**2).to(device))
zero = torch.tensor(0.0, device=x.device).repeat(x.shape[:-1], 1).unsqueeze(-1)
return torch.cat((x, ext, zero), -1)
def lsh_clustering(x, n_rounds, r=1):
salsh = SALSH(n_rounds=n_rounds, dim=x.shape[-1], r=r, device=x.device)
x_hashed = salsh(x).reshape((n_rounds,) + x.shape[:-1])
return x_hashed.argsort(dim=-1)
class SALSH(LSH):
def __init__(self, n_rounds, dim, r, device='cuda'):
super(SALSH, self).__init__()
self.alpha = torch.normal(0, 1, (dim, n_rounds), device=device)
self.beta = uniform(0, r, shape=(1, n_rounds), device=device)
self.dim = dim
self.r = r
def __call__(self, vecs):
projection = vecs @ self.alpha
projection_shift = projection + self.beta
projection_rescale = projection_shift / self.r
return projection_rescale.permute(2, 0, 1)
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
def norm_cdf(x):
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
tensor.uniform_(2 * l - 1, 2 * u - 1)
tensor.erfinv_()
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'):
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
if mode == 'fan_in':
denom = fan_in
elif mode == 'fan_out':
denom = fan_out
elif mode == 'fan_avg':
denom = (fan_in + fan_out) / 2
variance = scale / denom
if distribution == "truncated_normal":
trunc_normal_(tensor, std=math.sqrt(variance) / .87962566103423978)
elif distribution == "normal":
tensor.normal_(std=math.sqrt(variance))
elif distribution == "uniform":
bound = math.sqrt(3 * variance)
tensor.uniform_(-bound, bound)
else:
raise ValueError(f"invalid distribution {distribution}")
def lecun_normal_(tensor):
variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal')
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x, *args, **kwargs):
x = self.norm(x)
return self.fn(x, *args, **kwargs)
class GELU(nn.Module):
def forward(self, x):
return F.gelu(x)
def batch_scatter(output, src, dim, index):
"""
:param output: [b,n,c]
:param src: [b,n,c]
:param dim: int
:param index: [b,n]
:return: output: [b,n,c]
"""
b,k,c = src.shape
index = index[:, :, None].expand(-1, -1, c)
output, src, index = map(lambda t: rearrange(t, 'b k c -> (b c) k'), (output, src, index))
output.scatter_(dim,index,src)
output = rearrange(output, '(b c) k -> b k c', b=b)
return output
def batch_gather(x, index, dim):
"""
:param x: [b,n,c]
:param index: [b,n//2]
:param dim: int
:return: output: [b,n//2,c]
"""
b,n,c = x.shape
index = index[:,:,None].expand(-1,-1,c)
x, index = map(lambda t: rearrange(t, 'b n c -> (b c) n'), (x, index))
output = torch.gather(x,dim,index)
output = rearrange(output, '(b c) n -> b n c', b=b)
return output
class FeedForward(nn.Module):
def __init__(self, dim, mult=4):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(dim, dim * mult, 1, 1, bias=False),
GELU(),
nn.Conv2d(dim * mult, dim * mult, 3, 1, 1, bias=False, groups=dim * mult),
GELU(),
nn.Conv2d(dim * mult, dim, 1, 1, bias=False),
)
def forward(self, x):
"""
x: [b,h,w,c]
return out: [b,h,w,c]
"""
out = self.net(x.permute(0, 3, 1, 2))
return out.permute(0, 2, 3, 1)
class SAH_MSA(nn.Module):
def __init__(self, heads=4, n_rounds=2, channels=64, patch_size=144,
r=1):
super(SAH_MSA, self).__init__()
self.heads = heads
self.n_rounds = n_rounds
inner_dim = channels*3
self.to_q = nn.Linear(channels, inner_dim, bias=False)
self.to_k = nn.Linear(channels, inner_dim, bias=False)
self.to_v = nn.Linear(channels, inner_dim, bias=False)
self.to_out = nn.Linear(inner_dim, channels, bias=False)
self.xbox_plus = XBOXPLUS()
self.clustering_params = {
'r': r,
'n_rounds': self.n_rounds
}
self.q_attn_size = patch_size[0] * patch_size[1]
self.k_attn_size = patch_size[0] * patch_size[1]
def forward(self, input):
"""
:param input: [b,n,c]
:return: output: [b,n,c]
"""
B, N, C_inp = input.shape
query = self.to_q(input)
key = self.to_k(input)
value = self.to_v(input)
input_hash = input.view(B, N, self.heads, C_inp//self.heads)
x_hash = rearrange(input_hash, 'b t h e -> (b h) t e')
bs, x_seqlen, dim = x_hash.shape
with torch.no_grad():
self.xbox_plus.set_norms(x_hash)
Xs = self.xbox_plus.X(x_hash)
x_positions = lsh_clustering(Xs, **self.clustering_params)
x_positions = x_positions.reshape(self.n_rounds, bs, -1)
del Xs
C = query.shape[-1]
query = query.view(B, N, self.heads, C // self.heads)
key = key.view(B, N, self.heads, C // self.heads)
value = value.view(B, N, self.heads, C // self.heads)
query = rearrange(query, 'b t h e -> (b h) t e') # [bs, q_seqlen,c]
key = rearrange(key, 'b t h e -> (b h) t e')
value = rearrange(value, 'b s h d -> (b h) s d')
bs, q_seqlen, dim = query.shape
bs, k_seqlen, dim = key.shape
v_dim = value.shape[-1]
x_rev_positions = torch.argsort(x_positions, dim=-1)
x_offset = torch.arange(bs, device=query.device).unsqueeze(-1) * x_seqlen
x_flat = (x_positions + x_offset).reshape(-1)
s_queries = query.reshape(-1, dim).index_select(0, x_flat).reshape(-1, self.q_attn_size, dim)
s_keys = key.reshape(-1, dim).index_select(0, x_flat).reshape(-1, self.k_attn_size, dim)
s_values = value.reshape(-1, v_dim).index_select(0, x_flat).reshape(-1, self.k_attn_size, v_dim)
inner = s_queries @ s_keys.transpose(2, 1)
norm_factor = 1
inner = inner / norm_factor
# free memory
del x_positions
# softmax denominator
dots_logsumexp = torch.logsumexp(inner, dim=-1, keepdim=True)
# softmax
dots = torch.exp(inner - dots_logsumexp)
# dropout
# n_rounds outs
bo = (dots @ s_values).reshape(self.n_rounds, bs, q_seqlen, -1)
# undo sort
x_offset = torch.arange(bs * self.n_rounds, device=query.device).unsqueeze(-1) * x_seqlen
x_rev_flat = (x_rev_positions.reshape(-1, x_seqlen) + x_offset).reshape(-1)
o = bo.reshape(-1, v_dim).index_select(0, x_rev_flat).reshape(self.n_rounds, bs, q_seqlen, -1)
slogits = dots_logsumexp.reshape(self.n_rounds, bs, -1)
logits = torch.gather(slogits, 2, x_rev_positions)
# free memory
del x_rev_positions
# weighted sum multi-round attention
probs = torch.exp(logits - torch.logsumexp(logits, dim=0, keepdim=True))
out = torch.sum(o * probs.unsqueeze(-1), dim=0)
out = rearrange(out, '(b h) t d -> b t h d', h=self.heads)
out = out.reshape(B, N, -1)
out = self.to_out(out)
return out
class SAHAB(nn.Module):
def __init__(
self,
dim,
patch_size=(16, 16),
heads=8,
shift_size=0,
sparse=False
):
super().__init__()
self.blocks = nn.ModuleList([])
self.attn = PreNorm(dim, SAH_MSA(heads=heads, n_rounds=2, r=1, channels=dim, patch_size=patch_size))
self.ffn = PreNorm(dim, FeedForward(dim=dim))
self.shift_size = shift_size
self.patch_size = patch_size
self.sparse = sparse
def forward(self, x, mask=None):
"""
x: [b,h,w,c]
mask: [b,h,w]
return out: [b,h,w,c]
"""
b,h,w,c = x.shape
if self.shift_size > 0:
x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
mask = torch.roll(mask, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
w_size = self.patch_size
# Split into large patches
x = rearrange(x, 'b (nh hh) (nw ww) c-> b (nh nw) (hh ww c)', hh=w_size[0] * 2, ww=w_size[1] * 2)
mask = rearrange(mask, 'b (nh hh) (nw ww) -> b (nh nw) (hh ww)', hh=w_size[0] * 2, ww=w_size[1] * 2)
N = x.shape[1]
mask = torch.mean(mask,dim=2,keepdim=False) # [b,nh*nw]
if self.sparse:
mask_select = mask.topk(mask.shape[1] // 2, dim=1)[1] # [b,nh*nw//2]
x_select = batch_gather(x, mask_select, 1) # [b,nh*nw//2,hh*ww*c]
x_select = x_select.reshape(b*N//2,-1,c)
x_select = self.attn(x_select)+x_select
x_select = x_select.view(b,N//2,-1)
x = batch_scatter(x.clone(), x_select, 1, mask_select)
else:
x = x.view(b*N,-1,c)
x = self.attn(x) + x
x = x.view(b, N, -1)
x = rearrange(x, 'b (nh nw) (hh ww c) -> b (nh hh) (nw ww) c', nh=h//(w_size[0] * 2), hh=w_size[0] * 2, ww=w_size[1] * 2)
if self.shift_size > 0:
x = torch.roll(x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
x = self.ffn(x) + x
return x
class SAHABs(nn.Module):
def __init__(
self,
dim,
patch_size=(8, 8),
heads=8,
num_blocks=2,
sparse=False
):
super().__init__()
blocks = []
for _ in range(num_blocks):
blocks.append(
SAHAB(heads=heads, dim=dim, patch_size=patch_size,sparse=sparse,
shift_size=0 if (_ % 2 == 0) else patch_size[0]))
self.blocks = nn.Sequential(*blocks)
def forward(self, x, mask=None):
"""
x: [b,c,h,w]
mask: [b,1,h,w]
return x: [b,c,h,w]
"""
x = x.permute(0, 2, 3, 1)
mask = mask.squeeze(1)
for block in self.blocks:
x = block(x, mask)
x = x.permute(0, 3, 1, 2)
return x
class ASPPConv(nn.Sequential):
def __init__(self, in_channels, out_channels, dilation):
modules = [
nn.Conv2d(in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False),
nn.ReLU()
]
super(ASPPConv, self).__init__(*modules)
class ASPPPooling(nn.Sequential):
def __init__(self, in_channels, out_channels):
super(ASPPPooling, self).__init__(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels, out_channels, 1, bias=False),
nn.ReLU())
def forward(self, x):
size = x.shape[-2:]
for mod in self:
x = mod(x)
return F.interpolate(x, size=size, mode='bilinear', align_corners=False)
class ASPP(nn.Module):
def __init__(self, in_channels, atrous_rates, out_channels):
super(ASPP, self).__init__()
modules = []
rates = tuple(atrous_rates)
for rate in rates:
modules.append(ASPPConv(in_channels, out_channels, rate))
modules.append(ASPPPooling(in_channels, out_channels))
self.convs = nn.ModuleList(modules)
self.project = nn.Sequential(
nn.Conv2d(len(self.convs) * out_channels, out_channels, 1, bias=False),
nn.ReLU(),
nn.Dropout(0.5))
def forward(self, x):
res = []
for conv in self.convs:
res.append(conv(x))
res = torch.cat(res, dim=1)
return self.project(res)
class Sparsity_Estimator(nn.Module):
def __init__(self, dim=28, expand=2, sparse=False):
super(Sparsity_Estimator, self).__init__()
self.dim = dim
self.stage = 2
self.sparse = sparse
# Input projection
self.in_proj = nn.Conv2d(28, dim, 1, 1, 0, bias=False)
# Encoder
self.encoder_layers = nn.ModuleList([])
dim_stage = dim
for i in range(2):
self.encoder_layers.append(nn.ModuleList([
nn.Conv2d(dim_stage, dim_stage * expand, 1, 1, 0, bias=False),
nn.Conv2d(dim_stage * expand, dim_stage * expand, 3, 2, 1, bias=False, groups=dim_stage * expand),
nn.Conv2d(dim_stage * expand, dim_stage*expand, 1, 1, 0, bias=False),
]))
dim_stage *= 2
# Bottleneck
self.bottleneck = ASPP(dim_stage, [3,6], dim_stage)
# Decoder:
self.decoder_layers = nn.ModuleList([])
for i in range(2):
self.decoder_layers.append(nn.ModuleList([
nn.ConvTranspose2d(dim_stage, dim_stage // 2, stride=2, kernel_size=2, padding=0, output_padding=0),
nn.Conv2d(dim_stage // 2, dim_stage, 1, 1, 0, bias=False),
nn.Conv2d(dim_stage, dim_stage, 3, 1, 1, bias=False, groups=dim_stage),
nn.Conv2d(dim_stage, dim_stage // 2, 1, 1, 0, bias=False),
]))
dim_stage //= 2
# Output projection
if sparse:
self.out_conv2 = nn.Conv2d(self.dim, self.dim+1, 3, 1, 1, bias=False)
else:
self.out_conv2 = nn.Conv2d(self.dim, self.dim, 3, 1, 1, bias=False)
#### activation function
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def forward(self, x):
"""
x: [b,c,h,w]
return out:[b,c,h,w]
"""
# Input projection
fea = self.lrelu(self.in_proj(x))
# Encoder
fea_encoder = [] # [c 2c 4c 8c]
for (Conv1, Conv2, Conv3) in self.encoder_layers:
fea_encoder.append(fea)
fea = Conv3(self.lrelu(Conv2(self.lrelu(Conv1(fea)))))
# Bottleneck
fea = self.bottleneck(fea)+fea
# Decoder
for i, (FeaUpSample, Conv1, Conv2, Conv3) in enumerate(self.decoder_layers):
fea = FeaUpSample(fea)
fea = Conv3(self.lrelu(Conv2(self.lrelu(Conv1(fea)))))
fea = fea + fea_encoder[self.stage-1-i]
# Output projection
out = self.out_conv2(fea)
if self.sparse:
error_map = out[:,-1:,:,:]
return out[:,:-1], error_map
return out
class CST(nn.Module):
def __init__(self, dim=28, stage=2, num_blocks=[2, 2, 2], sparse=False):
super(CST, self).__init__()
self.dim = dim
self.stage = stage
self.sparse = sparse
# Fution physical mask and shifted measurement
self.fution = nn.Conv2d(56, 28, 1, 1, 0, bias=False)
# Sparsity Estimator
if num_blocks==[2,4,6]:
self.fe = nn.Sequential(Sparsity_Estimator(dim=28,expand=2,sparse=False),
Sparsity_Estimator(dim=28, expand=2, sparse=sparse))
else:
self.fe = Sparsity_Estimator(dim=28, expand=2, sparse=sparse)
# Encoder
self.encoder_layers = nn.ModuleList([])
dim_stage = dim
for i in range(stage):
self.encoder_layers.append(nn.ModuleList([
SAHABs(dim=dim_stage, num_blocks=num_blocks[i], heads=dim_stage // dim, sparse=sparse),
nn.Conv2d(dim_stage, dim_stage * 2, 4, 2, 1, bias=False),
nn.AvgPool2d(kernel_size=2, stride=2),
]))
dim_stage *= 2
# Bottleneck
self.bottleneck = SAHABs(
dim=dim_stage, heads=dim_stage // dim, num_blocks=num_blocks[-1], sparse=sparse)
# Decoder
self.decoder_layers = nn.ModuleList([])
for i in range(stage):
self.decoder_layers.append(nn.ModuleList([
nn.ConvTranspose2d(dim_stage, dim_stage // 2, stride=2, kernel_size=2, padding=0, output_padding=0),
SAHABs(dim=dim_stage // 2, num_blocks=num_blocks[stage - 1 - i],
heads=(dim_stage // 2) // dim, sparse=sparse),
]))
dim_stage //= 2
# Output projection
self.out_proj = nn.Conv2d(self.dim, dim, 3, 1, 1, bias=False)
#### activation function
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x, mask=None):
"""
x: [b,c,h,w]
mask: [b,c,h,w]
return out:[b,c,h,w]
"""
b, c, h, w = x.shape
# Fution
x = self.fution(torch.cat([x,mask],dim=1))
# Feature Extraction
if self.sparse:
fea,mask = self.fe(x)
else:
fea = self.fe(x)
mask = torch.randn((b,1,h,w)).cuda()
# Encoder
fea_encoder = []
masks = []
for (Blcok, FeaDownSample, MaskDownSample) in self.encoder_layers:
fea = Blcok(fea, mask)
masks.append(mask)
fea_encoder.append(fea)
fea = FeaDownSample(fea)
mask = MaskDownSample(mask)
# Bottleneck
fea = self.bottleneck(fea, mask)
# Decoder
for i, (FeaUpSample, Blcok) in enumerate(self.decoder_layers):
fea = FeaUpSample(fea)
fea = fea + fea_encoder[self.stage - 1 - i]
mask = masks[self.stage - 1 - i]
fea = Blcok(fea, mask)
# Output projection
out = self.out_proj(fea) + x
return out
| 19,711 | 32.466893 | 129 | py |
MST | MST-main/simulation/test_code/architecture/MST.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from einops import rearrange
import math
import warnings
from torch.nn.init import _calculate_fan_in_and_fan_out
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
def norm_cdf(x):
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
tensor.uniform_(2 * l - 1, 2 * u - 1)
tensor.erfinv_()
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'):
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
if mode == 'fan_in':
denom = fan_in
elif mode == 'fan_out':
denom = fan_out
elif mode == 'fan_avg':
denom = (fan_in + fan_out) / 2
variance = scale / denom
if distribution == "truncated_normal":
trunc_normal_(tensor, std=math.sqrt(variance) / .87962566103423978)
elif distribution == "normal":
tensor.normal_(std=math.sqrt(variance))
elif distribution == "uniform":
bound = math.sqrt(3 * variance)
tensor.uniform_(-bound, bound)
else:
raise ValueError(f"invalid distribution {distribution}")
def lecun_normal_(tensor):
variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal')
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x, *args, **kwargs):
x = self.norm(x)
return self.fn(x, *args, **kwargs)
class GELU(nn.Module):
def forward(self, x):
return F.gelu(x)
def conv(in_channels, out_channels, kernel_size, bias=False, padding = 1, stride = 1):
return nn.Conv2d(
in_channels, out_channels, kernel_size,
padding=(kernel_size//2), bias=bias, stride=stride)
def shift_back(inputs,step=2): # input [bs,28,256,310] output [bs, 28, 256, 256]
[bs, nC, row, col] = inputs.shape
down_sample = 256//row
step = float(step)/float(down_sample*down_sample)
out_col = row
for i in range(nC):
inputs[:,i,:,:out_col] = \
inputs[:,i,:,int(step*i):int(step*i)+out_col]
return inputs[:, :, :, :out_col]
class MaskGuidedMechanism(nn.Module):
def __init__(
self, n_feat):
super(MaskGuidedMechanism, self).__init__()
self.conv1 = nn.Conv2d(n_feat, n_feat, kernel_size=1, bias=True)
self.conv2 = nn.Conv2d(n_feat, n_feat, kernel_size=1, bias=True)
self.depth_conv = nn.Conv2d(n_feat, n_feat, kernel_size=5, padding=2, bias=True, groups=n_feat)
def forward(self, mask_shift):
# x: b,c,h,w
[bs, nC, row, col] = mask_shift.shape
mask_shift = self.conv1(mask_shift)
attn_map = torch.sigmoid(self.depth_conv(self.conv2(mask_shift)))
res = mask_shift * attn_map
mask_shift = res + mask_shift
mask_emb = shift_back(mask_shift)
return mask_emb
class MS_MSA(nn.Module):
def __init__(
self,
dim,
dim_head=64,
heads=8,
):
super().__init__()
self.num_heads = heads
self.dim_head = dim_head
self.to_q = nn.Linear(dim, dim_head * heads, bias=False)
self.to_k = nn.Linear(dim, dim_head * heads, bias=False)
self.to_v = nn.Linear(dim, dim_head * heads, bias=False)
self.rescale = nn.Parameter(torch.ones(heads, 1, 1))
self.proj = nn.Linear(dim_head * heads, dim, bias=True)
self.pos_emb = nn.Sequential(
nn.Conv2d(dim, dim, 3, 1, 1, bias=False, groups=dim),
GELU(),
nn.Conv2d(dim, dim, 3, 1, 1, bias=False, groups=dim),
)
self.mm = MaskGuidedMechanism(dim)
self.dim = dim
def forward(self, x_in, mask=None):
"""
x_in: [b,h,w,c]
mask: [1,h,w,c]
return out: [b,h,w,c]
"""
b, h, w, c = x_in.shape
x = x_in.reshape(b,h*w,c)
q_inp = self.to_q(x)
k_inp = self.to_k(x)
v_inp = self.to_v(x)
mask_attn = self.mm(mask.permute(0,3,1,2)).permute(0,2,3,1)
if b != 0:
mask_attn = (mask_attn[0, :, :, :]).expand([b, h, w, c])
q, k, v, mask_attn = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=self.num_heads),
(q_inp, k_inp, v_inp, mask_attn.flatten(1, 2)))
v = v * mask_attn
# q: b,heads,hw,c
q = q.transpose(-2, -1)
k = k.transpose(-2, -1)
v = v.transpose(-2, -1)
q = F.normalize(q, dim=-1, p=2)
k = F.normalize(k, dim=-1, p=2)
attn = (k @ q.transpose(-2, -1)) # A = K^T*Q
attn = attn * self.rescale
attn = attn.softmax(dim=-1)
x = attn @ v # b,heads,d,hw
x = x.permute(0, 3, 1, 2) # Transpose
x = x.reshape(b, h * w, self.num_heads * self.dim_head)
out_c = self.proj(x).view(b, h, w, c)
out_p = self.pos_emb(v_inp.reshape(b,h,w,c).permute(0, 3, 1, 2)).permute(0, 2, 3, 1)
out = out_c + out_p
return out
class FeedForward(nn.Module):
def __init__(self, dim, mult=4):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(dim, dim * mult, 1, 1, bias=False),
GELU(),
nn.Conv2d(dim * mult, dim * mult, 3, 1, 1, bias=False, groups=dim * mult),
GELU(),
nn.Conv2d(dim * mult, dim, 1, 1, bias=False),
)
def forward(self, x):
"""
x: [b,h,w,c]
return out: [b,h,w,c]
"""
out = self.net(x.permute(0, 3, 1, 2))
return out.permute(0, 2, 3, 1)
class MSAB(nn.Module):
def __init__(
self,
dim,
dim_head=64,
heads=8,
num_blocks=2,
):
super().__init__()
self.blocks = nn.ModuleList([])
for _ in range(num_blocks):
self.blocks.append(nn.ModuleList([
MS_MSA(dim=dim, dim_head=dim_head, heads=heads),
PreNorm(dim, FeedForward(dim=dim))
]))
def forward(self, x, mask):
"""
x: [b,c,h,w]
return out: [b,c,h,w]
"""
x = x.permute(0, 2, 3, 1)
for (attn, ff) in self.blocks:
x = attn(x, mask=mask.permute(0, 2, 3, 1)) + x
x = ff(x) + x
out = x.permute(0, 3, 1, 2)
return out
class MST(nn.Module):
def __init__(self, dim=28, stage=3, num_blocks=[2,2,2]):
super(MST, self).__init__()
self.dim = dim
self.stage = stage
# Input projection
self.embedding = nn.Conv2d(28, self.dim, 3, 1, 1, bias=False)
# Encoder
self.encoder_layers = nn.ModuleList([])
dim_stage = dim
for i in range(stage):
self.encoder_layers.append(nn.ModuleList([
MSAB(
dim=dim_stage, num_blocks=num_blocks[i], dim_head=dim, heads=dim_stage // dim),
nn.Conv2d(dim_stage, dim_stage * 2, 4, 2, 1, bias=False),
nn.Conv2d(dim_stage, dim_stage * 2, 4, 2, 1, bias=False)
]))
dim_stage *= 2
# Bottleneck
self.bottleneck = MSAB(
dim=dim_stage, dim_head=dim, heads=dim_stage // dim, num_blocks=num_blocks[-1])
# Decoder
self.decoder_layers = nn.ModuleList([])
for i in range(stage):
self.decoder_layers.append(nn.ModuleList([
nn.ConvTranspose2d(dim_stage, dim_stage // 2, stride=2, kernel_size=2, padding=0, output_padding=0),
nn.Conv2d(dim_stage, dim_stage // 2, 1, 1, bias=False),
MSAB(
dim=dim_stage // 2, num_blocks=num_blocks[stage - 1 - i], dim_head=dim,
heads=(dim_stage // 2) // dim),
]))
dim_stage //= 2
# Output projection
self.mapping = nn.Conv2d(self.dim, 28, 3, 1, 1, bias=False)
#### activation function
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def forward(self, x, mask=None):
"""
x: [b,c,h,w]
return out:[b,c,h,w]
"""
if mask == None:
mask = torch.zeros((1,28,256,310)).cuda()
# Embedding
fea = self.lrelu(self.embedding(x))
# Encoder
fea_encoder = []
masks = []
for (MSAB, FeaDownSample, MaskDownSample) in self.encoder_layers:
fea = MSAB(fea, mask)
masks.append(mask)
fea_encoder.append(fea)
fea = FeaDownSample(fea)
mask = MaskDownSample(mask)
# Bottleneck
fea = self.bottleneck(fea, mask)
# Decoder
for i, (FeaUpSample, Fution, LeWinBlcok) in enumerate(self.decoder_layers):
fea = FeaUpSample(fea)
fea = Fution(torch.cat([fea, fea_encoder[self.stage-1-i]], dim=1))
mask = masks[self.stage - 1 - i]
fea = LeWinBlcok(fea, mask)
# Mapping
out = self.mapping(fea) + x
return out
| 9,703 | 30.102564 | 116 | py |
MST | MST-main/simulation/test_code/architecture/BIRNAT.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class self_attention(nn.Module):
def __init__(self, ch):
super(self_attention, self).__init__()
self.conv1 = nn.Conv2d(ch, ch // 8, 1)
self.conv2 = nn.Conv2d(ch, ch // 8, 1)
self.conv3 = nn.Conv2d(ch, ch, 1)
self.conv4 = nn.Conv2d(ch, ch, 1)
self.gamma1 = torch.nn.Parameter(torch.Tensor([0]))
self.ch = ch
def forward(self, x):
batch_size = x.shape[0]
f = self.conv1(x)
g = self.conv2(x)
h = self.conv3(x)
ht = h.reshape([batch_size, self.ch, -1])
ft = f.reshape([batch_size, self.ch // 8, -1])
n = torch.matmul(ft.permute([0, 2, 1]), g.reshape([batch_size, self.ch // 8, -1]))
beta = F.softmax(n, dim=1)
o = torch.matmul(ht, beta)
o = o.reshape(x.shape) # [bs, C, h, w]
o = self.conv4(o)
x = self.gamma1 * o + x
return x
class res_part(nn.Module):
def __init__(self, in_ch, out_ch):
super(res_part, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_ch, in_ch, 3, padding=1),
# nn.BatchNorm2d(out_ch),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_ch, out_ch, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_ch, in_ch, 3, padding=1),
)
self.conv2 = nn.Sequential(
nn.Conv2d(in_ch, in_ch, 3, padding=1),
# nn.BatchNorm2d(out_ch),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_ch, out_ch, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_ch, in_ch, 3, padding=1),
)
self.conv3 = nn.Sequential(
nn.Conv2d(in_ch, in_ch, 3, padding=1),
# nn.BatchNorm2d(out_ch),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_ch, out_ch, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_ch, in_ch, 3, padding=1),
)
def forward(self, x):
x1 = self.conv1(x)
x = x1 + x
x1 = self.conv2(x)
x = x1 + x
x1 = self.conv3(x)
x = x1 + x
return x
class down_feature(nn.Module):
def __init__(self, in_ch, out_ch):
super(down_feature, self).__init__()
self.conv = nn.Sequential(
# nn.Conv2d(in_ch, 20, 5, stride=1, padding=2),
# nn.Conv2d(20, 40, 5, stride=2, padding=2),
# nn.Conv2d(40, out_ch, 5, stride=2, padding=2),
nn.Conv2d(in_ch, 20, 5, stride=1, padding=2),
nn.Conv2d(20, 20, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(20, 20, 3, stride=1, padding=1),
nn.Conv2d(20, 40, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(40, out_ch, 3, stride=1, padding=1),
)
def forward(self, x):
x = self.conv(x)
return x
class up_feature(nn.Module):
def __init__(self, in_ch, out_ch):
super(up_feature, self).__init__()
self.conv = nn.Sequential(
# nn.ConvTranspose2d(in_ch, 40, 3, stride=2, padding=1, output_padding=1),
# nn.ConvTranspose2d(40, 20, 3, stride=2, padding=1, output_padding=1),
nn.Conv2d(in_ch, 40, 3, stride=1, padding=1),
nn.Conv2d(40, 30, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(30, 20, 3, stride=1, padding=1),
nn.Conv2d(20, 20, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(20, 20, 3, padding=1),
nn.Conv2d(20, out_ch, 1),
# nn.Sigmoid(),
)
def forward(self, x):
x = self.conv(x)
return x
class cnn1(nn.Module):
# 输入meas concat mask
# 3 下采样
def __init__(self, B):
super(cnn1, self).__init__()
self.conv1 = nn.Conv2d(B + 1, 32, kernel_size=5, stride=1, padding=2)
self.relu1 = nn.LeakyReLU(inplace=True)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1)
self.relu2 = nn.LeakyReLU(inplace=True)
self.conv3 = nn.Conv2d(64, 64, kernel_size=1, stride=1)
self.relu3 = nn.LeakyReLU(inplace=True)
self.conv4 = nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1)
self.relu4 = nn.LeakyReLU(inplace=True)
self.conv5 = nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, output_padding=1)
self.relu5 = nn.LeakyReLU(inplace=True)
self.conv51 = nn.Conv2d(64, 32, kernel_size=3, stride=1, padding=1)
self.relu51 = nn.LeakyReLU(inplace=True)
self.conv52 = nn.Conv2d(32, 16, kernel_size=1, stride=1)
self.relu52 = nn.LeakyReLU(inplace=True)
self.conv6 = nn.Conv2d(16, 1, kernel_size=3, stride=1, padding=1)
self.res_part1 = res_part(128, 128)
self.res_part2 = res_part(128, 128)
self.res_part3 = res_part(128, 128)
self.conv7 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
self.relu7 = nn.LeakyReLU(inplace=True)
self.conv8 = nn.Conv2d(128, 128, kernel_size=1, stride=1)
self.conv9 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
self.relu9 = nn.LeakyReLU(inplace=True)
self.conv10 = nn.Conv2d(128, 128, kernel_size=1, stride=1)
self.att1 = self_attention(128)
def forward(self, meas=None, nor_meas=None, PhiTy=None):
data = torch.cat([torch.unsqueeze(nor_meas, dim=1), PhiTy], dim=1)
out = self.conv1(data)
out = self.relu1(out)
out = self.conv2(out)
out = self.relu2(out)
out = self.conv3(out)
out = self.relu3(out)
out = self.conv4(out)
out = self.relu4(out)
out = self.res_part1(out)
out = self.conv7(out)
out = self.relu7(out)
out = self.conv8(out)
out = self.res_part2(out)
out = self.conv9(out)
out = self.relu9(out)
out = self.conv10(out)
out = self.res_part3(out)
# out = self.att1(out)
out = self.conv5(out)
out = self.relu5(out)
out = self.conv51(out)
out = self.relu51(out)
out = self.conv52(out)
out = self.relu52(out)
out = self.conv6(out)
return out
class forward_rnn(nn.Module):
def __init__(self):
super(forward_rnn, self).__init__()
self.extract_feature1 = down_feature(1, 20)
self.up_feature1 = up_feature(60, 1)
self.conv_x1 = nn.Sequential(
nn.Conv2d(1, 16, 5, stride=1, padding=2),
nn.Conv2d(16, 32, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(32, 40, 3, stride=2, padding=1),
nn.Conv2d(40, 20, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(20, 20, 3, stride=1, padding=1),
nn.LeakyReLU(inplace=True),
nn.ConvTranspose2d(20, 10, kernel_size=3, stride=2, padding=1, output_padding=1),
)
self.conv_x2 = nn.Sequential(
nn.Conv2d(1, 10, 5, stride=1, padding=2),
nn.Conv2d(10, 10, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(10, 40, 3, stride=2, padding=1),
nn.Conv2d(40, 20, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(20, 20, 3, stride=1, padding=1),
nn.LeakyReLU(inplace=True),
nn.ConvTranspose2d(20, 10, kernel_size=3, stride=2, padding=1, output_padding=1),
)
self.h_h = nn.Sequential(
nn.Conv2d(60, 30, 3, padding=1),
nn.Conv2d(30, 20, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(20, 20, 3, padding=1),
)
self.res_part1 = res_part(60, 60)
self.res_part2 = res_part(60, 60)
def forward(self, xt1, meas=None, nor_meas=None, PhiTy=None, mask3d_batch=None, h=None, cs_rate=28):
ht = h
xt = xt1
step = 2
[bs, nC, row, col] = xt1.shape
out = xt1
x11 = self.conv_x1(torch.unsqueeze(nor_meas, 1))
for i in range(cs_rate - 1):
d1 = torch.zeros(bs, row, col).cuda()
d2 = torch.zeros(bs, row, col).cuda()
for ii in range(i + 1):
d1 = d1 + torch.mul(mask3d_batch[:, ii, :, :], out[:, ii, :, :])
for ii in range(i + 2, cs_rate):
d2 = d2 + torch.mul(mask3d_batch[:, ii, :, :], torch.squeeze(nor_meas))
x12 = self.conv_x2(torch.unsqueeze(meas - d1 - d2, 1))
x2 = self.extract_feature1(xt)
h = torch.cat([ht, x11, x12, x2], dim=1)
h = self.res_part1(h)
h = self.res_part2(h)
ht = self.h_h(h)
xt = self.up_feature1(h)
out = torch.cat([out, xt], dim=1)
return out, ht
class backrnn(nn.Module):
def __init__(self):
super(backrnn, self).__init__()
self.extract_feature1 = down_feature(1, 20)
self.up_feature1 = up_feature(60, 1)
self.conv_x1 = nn.Sequential(
nn.Conv2d(1, 16, 5, stride=1, padding=2),
nn.Conv2d(16, 32, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(32, 40, 3, stride=2, padding=1),
nn.Conv2d(40, 20, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(20, 20, 3, stride=1, padding=1),
nn.LeakyReLU(inplace=True),
nn.ConvTranspose2d(20, 10, kernel_size=3, stride=2, padding=1, output_padding=1),
)
self.conv_x2 = nn.Sequential(
nn.Conv2d(1, 10, 5, stride=1, padding=2),
nn.Conv2d(10, 10, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(10, 40, 3, stride=2, padding=1),
nn.Conv2d(40, 20, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(20, 20, 3, stride=1, padding=1),
nn.LeakyReLU(inplace=True),
nn.ConvTranspose2d(20, 10, kernel_size=3, stride=2, padding=1, output_padding=1),
)
self.h_h = nn.Sequential(
nn.Conv2d(60, 30, 3, padding=1),
nn.Conv2d(30, 20, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(20, 20, 3, padding=1),
)
self.res_part1 = res_part(60, 60)
self.res_part2 = res_part(60, 60)
def forward(self, xt8, meas=None, nor_meas=None, PhiTy=None, mask3d_batch=None, h=None, cs_rate=28):
ht = h
step = 2
[bs, nC, row, col] = xt8.shape
xt = torch.unsqueeze(xt8[:, cs_rate - 1, :, :], 1)
out = torch.zeros(bs, cs_rate, row, col).cuda()
out[:, cs_rate - 1, :, :] = xt[:, 0, :, :]
x11 = self.conv_x1(torch.unsqueeze(nor_meas, 1))
for i in range(cs_rate - 1):
d1 = torch.zeros(bs, row, col).cuda()
d2 = torch.zeros(bs, row, col).cuda()
for ii in range(i + 1):
d1 = d1 + torch.mul(mask3d_batch[:, cs_rate - 1 - ii, :, :], out[:, cs_rate - 1 - ii, :, :].clone())
for ii in range(i + 2, cs_rate):
d2 = d2 + torch.mul(mask3d_batch[:, cs_rate - 1 - ii, :, :], xt8[:, cs_rate - 1 - ii, :, :].clone())
x12 = self.conv_x2(torch.unsqueeze(meas - d1 - d2, 1))
x2 = self.extract_feature1(xt)
h = torch.cat([ht, x11, x12, x2], dim=1)
h = self.res_part1(h)
h = self.res_part2(h)
ht = self.h_h(h)
xt = self.up_feature1(h)
out[:, cs_rate - 2 - i, :, :] = xt[:, 0, :, :]
return out
def shift_gt_back(inputs, step=2): # input [bs,256,310] output [bs, 28, 256, 256]
[bs, nC, row, col] = inputs.shape
output = torch.zeros(bs, nC, row, col - (nC - 1) * step).cuda().float()
for i in range(nC):
output[:, i, :, :] = inputs[:, i, :, step * i:step * i + col - (nC - 1) * step]
return output
def shift(inputs, step=2):
[bs, nC, row, col] = inputs.shape
if inputs.is_cuda:
output = torch.zeros(bs, nC, row, col + (nC - 1) * step).cuda().float()
else:
output = torch.zeros(bs, nC, row, col + (nC - 1) * step).float()
for i in range(nC):
output[:, i, :, step * i:step * i + col] = inputs[:, i, :, :]
return output
class BIRNAT(nn.Module):
def __init__(self):
super(BIRNAT, self).__init__()
self.cs_rate = 28
self.first_frame_net = cnn1(self.cs_rate).cuda()
self.rnn1 = forward_rnn().cuda()
self.rnn2 = backrnn().cuda()
def gen_meas_torch(self, meas, shift_mask):
batch_size, H = meas.shape[0:2]
mask_s = torch.sum(shift_mask, 1)
nor_meas = torch.div(meas, mask_s)
temp = torch.mul(torch.unsqueeze(nor_meas, dim=1).expand([batch_size, 28, H, shift_mask.shape[3]]), shift_mask)
return nor_meas, temp
def forward(self, meas, shift_mask=None):
if shift_mask==None:
shift_mask = torch.zeros(1, 28, 256, 310).cuda()
H, W = meas.shape[-2:]
nor_meas, PhiTy = self.gen_meas_torch(meas, shift_mask)
h0 = torch.zeros(meas.shape[0], 20, H, W).cuda()
xt1 = self.first_frame_net(meas, nor_meas, PhiTy)
model_out1, h1 = self.rnn1(xt1, meas, nor_meas, PhiTy, shift_mask, h0, self.cs_rate)
model_out2 = self.rnn2(model_out1, meas, nor_meas, PhiTy, shift_mask, h1, self.cs_rate)
model_out2 = shift_gt_back(model_out2)
return model_out2
| 13,326 | 35.412568 | 119 | py |
MST | MST-main/simulation/test_code/architecture/GAP_Net.py | import torch.nn.functional as F
import torch
import torch.nn as nn
def A(x,Phi):
temp = x*Phi
y = torch.sum(temp,1)
return y
def At(y,Phi):
temp = torch.unsqueeze(y, 1).repeat(1,Phi.shape[1],1,1)
x = temp*Phi
return x
def shift_3d(inputs,step=2):
[bs, nC, row, col] = inputs.shape
for i in range(nC):
inputs[:,i,:,:] = torch.roll(inputs[:,i,:,:], shifts=step*i, dims=2)
return inputs
def shift_back_3d(inputs,step=2):
[bs, nC, row, col] = inputs.shape
for i in range(nC):
inputs[:,i,:,:] = torch.roll(inputs[:,i,:,:], shifts=(-1)*step*i, dims=2)
return inputs
class double_conv(nn.Module):
def __init__(self, in_channels, out_channels):
super(double_conv, self).__init__()
self.d_conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3, padding=1),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.d_conv(x)
return x
class Unet(nn.Module):
def __init__(self, in_ch, out_ch):
super(Unet, self).__init__()
self.dconv_down1 = double_conv(in_ch, 32)
self.dconv_down2 = double_conv(32, 64)
self.dconv_down3 = double_conv(64, 128)
self.maxpool = nn.MaxPool2d(2)
self.upsample2 = nn.Sequential(
nn.ConvTranspose2d(128, 64, kernel_size=2, stride=2),
# nn.Conv2d(64, 64, (1,2), padding=(0,1)),
nn.ReLU(inplace=True)
)
self.upsample1 = nn.Sequential(
nn.ConvTranspose2d(64, 32, kernel_size=2, stride=2),
nn.ReLU(inplace=True)
)
self.dconv_up2 = double_conv(64 + 64, 64)
self.dconv_up1 = double_conv(32 + 32, 32)
self.conv_last = nn.Conv2d(32, out_ch, 1)
self.afn_last = nn.Tanh()
def forward(self, x):
b, c, h_inp, w_inp = x.shape
hb, wb = 8, 8
pad_h = (hb - h_inp % hb) % hb
pad_w = (wb - w_inp % wb) % wb
x = F.pad(x, [0, pad_w, 0, pad_h], mode='reflect')
inputs = x
conv1 = self.dconv_down1(x)
x = self.maxpool(conv1)
conv2 = self.dconv_down2(x)
x = self.maxpool(conv2)
conv3 = self.dconv_down3(x)
x = self.upsample2(conv3)
x = torch.cat([x, conv2], dim=1)
x = self.dconv_up2(x)
x = self.upsample1(x)
x = torch.cat([x, conv1], dim=1)
x = self.dconv_up1(x)
x = self.conv_last(x)
x = self.afn_last(x)
out = x + inputs
return out[:, :, :h_inp, :w_inp]
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels, mid_channels=None):
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class GAP_net(nn.Module):
def __init__(self):
super(GAP_net, self).__init__()
self.unet1 = Unet(28, 28)
self.unet2 = Unet(28, 28)
self.unet3 = Unet(28, 28)
self.unet4 = Unet(28, 28)
self.unet5 = Unet(28, 28)
self.unet6 = Unet(28, 28)
self.unet7 = Unet(28, 28)
self.unet8 = Unet(28, 28)
self.unet9 = Unet(28, 28)
def forward(self, y, input_mask=None):
if input_mask==None:
Phi = torch.rand((1,28,256,310)).cuda()
Phi_s = torch.rand((1, 256, 310)).cuda()
else:
Phi, Phi_s = input_mask
x_list = []
x = At(y, Phi) # v0=H^T y
### 1-3
yb = A(x, Phi)
x = x + At(torch.div(y - yb, Phi_s), Phi)
x = shift_back_3d(x)
x = self.unet1(x)
x = shift_3d(x)
yb = A(x, Phi)
x = x + At(torch.div(y - yb, Phi_s), Phi)
x = shift_back_3d(x)
x = self.unet2(x)
x = shift_3d(x)
yb = A(x, Phi)
x = x + At(torch.div(y - yb, Phi_s), Phi)
x = shift_back_3d(x)
x = self.unet3(x)
x = shift_3d(x)
### 4-6
yb = A(x, Phi)
x = x + At(torch.div(y - yb, Phi_s), Phi)
x = shift_back_3d(x)
x = self.unet4(x)
x = shift_3d(x)
yb = A(x, Phi)
x = x + At(torch.div(y - yb, Phi_s), Phi)
x = shift_back_3d(x)
x = self.unet5(x)
x = shift_3d(x)
yb = A(x, Phi)
x = x + At(torch.div(y - yb, Phi_s), Phi)
x = shift_back_3d(x)
x = self.unet6(x)
x = shift_3d(x)
# ### 7-9
yb = A(x, Phi)
x = x + At(torch.div(y - yb, Phi_s), Phi)
x = shift_back_3d(x)
x = self.unet7(x)
x = shift_3d(x)
x_list.append(x[:, :, :, 0:256])
yb = A(x, Phi)
x = x + At(torch.div(y - yb, Phi_s), Phi)
x = shift_back_3d(x)
x = self.unet8(x)
x = shift_3d(x)
x_list.append(x[:, :, :, 0:256])
yb = A(x, Phi)
x = x + At(torch.div(y - yb, Phi_s), Phi)
x = shift_back_3d(x)
x = self.unet9(x)
x = shift_3d(x)
return x[:, :, :, 0:256]
| 5,525 | 28.084211 | 81 | py |
MST | MST-main/simulation/test_code/architecture/Lambda_Net.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from einops import rearrange
from torch import einsum
class LambdaNetAttention(nn.Module):
def __init__(
self,
dim,
):
super().__init__()
self.dim = dim
self.to_q = nn.Linear(dim, dim//8, bias=False)
self.to_k = nn.Linear(dim, dim//8, bias=False)
self.to_v = nn.Linear(dim, dim, bias=False)
self.rescale = (dim//8)**-0.5
self.gamma = nn.Parameter(torch.ones(1))
def forward(self, x):
"""
x: [b,c,h,w]
return out: [b,c,h,w]
"""
x = x.permute(0,2,3,1)
b, h, w, c = x.shape
# Reshape to (B,N,C), where N = window_size[0]*window_size[1] is the length of sentence
x_inp = rearrange(x, 'b h w c -> b (h w) c')
# produce query, key and value
q = self.to_q(x_inp)
k = self.to_k(x_inp)
v = self.to_v(x_inp)
# attention
sim = einsum('b i d, b j d -> b i j', q, k)*self.rescale
attn = sim.softmax(dim=-1)
# aggregate
out = einsum('b i j, b j d -> b i d', attn, v)
# merge blocks back to original feature map
out = rearrange(out, 'b (h w) c -> b h w c', h=h, w=w)
out = self.gamma*out + x
return out.permute(0,3,1,2)
class triple_conv(nn.Module):
def __init__(self, in_channels, out_channels):
super(triple_conv, self).__init__()
self.t_conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.ReLU(),
nn.Conv2d(out_channels, out_channels, 3, padding=1),
nn.ReLU(),
nn.Conv2d(out_channels, out_channels, 3, padding=1),
)
def forward(self, x):
x = self.t_conv(x)
return x
class double_conv(nn.Module):
def __init__(self, in_channels, out_channels):
super(double_conv, self).__init__()
self.d_conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.ReLU(),
nn.Conv2d(out_channels, out_channels, 3, padding=1),
)
def forward(self, x):
x = self.d_conv(x)
return x
def shift_back_3d(inputs,step=2):
[bs, nC, row, col] = inputs.shape
for i in range(nC):
inputs[:,i,:,:] = torch.roll(inputs[:,i,:,:], shifts=(-1)*step*i, dims=2)
return inputs
class Lambda_Net(nn.Module):
def __init__(self, out_ch=28):
super(Lambda_Net, self).__init__()
self.conv_in = nn.Conv2d(1+28, 28, 3, padding=1)
# encoder
self.conv_down1 = triple_conv(28, 32)
self.conv_down2 = triple_conv(32, 64)
self.conv_down3 = triple_conv(64, 128)
self.conv_down4 = triple_conv(128, 256)
self.conv_down5 = double_conv(256, 512)
self.conv_down6 = double_conv(512, 1024)
self.maxpool = nn.MaxPool2d(2)
# decoder
self.upsample5 = nn.ConvTranspose2d(1024, 512, kernel_size=2, stride=2)
self.upsample4 = nn.ConvTranspose2d(512, 256, kernel_size=2, stride=2)
self.upsample3 = nn.ConvTranspose2d(256, 128, kernel_size=2, stride=2)
self.upsample2 = nn.ConvTranspose2d(128, 64, kernel_size=2, stride=2)
self.upsample1 = nn.ConvTranspose2d(64, 32, kernel_size=2, stride=2)
self.conv_up1 = triple_conv(32+32, 32)
self.conv_up2 = triple_conv(64+64, 64)
self.conv_up3 = triple_conv(128+128, 128)
self.conv_up4 = triple_conv(256+256, 256)
self.conv_up5 = double_conv(512+512, 512)
# attention
self.attention = LambdaNetAttention(dim=128)
self.conv_last1 = nn.Conv2d(32, 6, 3,1,1)
self.conv_last2 = nn.Conv2d(38, 32, 3,1,1)
self.conv_last3 = nn.Conv2d(32, 12, 3,1,1)
self.conv_last4 = nn.Conv2d(44, 32, 3,1,1)
self.conv_last5 = nn.Conv2d(32, out_ch, 1)
self.act = nn.ReLU()
def forward(self, x, input_mask=None):
if input_mask == None:
input_mask = torch.zeros((1,28,256,310)).cuda()
x = x/28*2
x = self.conv_in(torch.cat([x.unsqueeze(1), input_mask], dim=1))
b, c, h_inp, w_inp = x.shape
hb, wb = 32, 32
pad_h = (hb - h_inp % hb) % hb
pad_w = (wb - w_inp % wb) % wb
x = F.pad(x, [0, pad_w, 0, pad_h], mode='reflect')
res0 = x
conv1 = self.conv_down1(x)
x = self.maxpool(conv1)
conv2 = self.conv_down2(x)
x = self.maxpool(conv2)
conv3 = self.conv_down3(x)
x = self.maxpool(conv3)
conv4 = self.conv_down4(x)
x = self.maxpool(conv4)
conv5 = self.conv_down5(x)
x = self.maxpool(conv5)
conv6 = self.conv_down6(x)
x = self.upsample5(conv6)
x = torch.cat([x, conv5], dim=1)
x = self.conv_up5(x)
x = self.upsample4(x)
x = torch.cat([x, conv4], dim=1)
x = self.conv_up4(x)
x = self.upsample3(x)
x = torch.cat([x, conv3], dim=1)
x = self.conv_up3(x)
x = self.attention(x)
x = self.upsample2(x)
x = torch.cat([x, conv2], dim=1)
x = self.conv_up2(x)
x = self.upsample1(x)
x = torch.cat([x, conv1], dim=1)
x = self.conv_up1(x)
res1 = x
out1 = self.act(self.conv_last1(x))
x = self.conv_last2(torch.cat([res1,out1],dim=1))
res2 = x
out2 = self.act(self.conv_last3(x))
out3 = self.conv_last4(torch.cat([res2, out2], dim=1))
out = self.conv_last5(out3)+res0
out = out[:, :, :h_inp, :w_inp]
return shift_back_3d(out)[:, :, :, :256]
| 5,679 | 30.555556 | 95 | py |
MST | MST-main/simulation/test_code/architecture/ADMM_Net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
def A(x,Phi):
temp = x*Phi
y = torch.sum(temp,1)
return y
def At(y,Phi):
temp = torch.unsqueeze(y, 1).repeat(1,Phi.shape[1],1,1)
x = temp*Phi
return x
class double_conv(nn.Module):
def __init__(self, in_channels, out_channels):
super(double_conv, self).__init__()
self.d_conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3, padding=1),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.d_conv(x)
return x
class Unet(nn.Module):
def __init__(self, in_ch, out_ch):
super(Unet, self).__init__()
self.dconv_down1 = double_conv(in_ch, 32)
self.dconv_down2 = double_conv(32, 64)
self.dconv_down3 = double_conv(64, 128)
self.maxpool = nn.MaxPool2d(2)
self.upsample2 = nn.Sequential(
nn.ConvTranspose2d(128, 64, kernel_size=2, stride=2),
nn.ReLU(inplace=True)
)
self.upsample1 = nn.Sequential(
nn.ConvTranspose2d(64, 32, kernel_size=2, stride=2),
nn.ReLU(inplace=True)
)
self.dconv_up2 = double_conv(64 + 64, 64)
self.dconv_up1 = double_conv(32 + 32, 32)
self.conv_last = nn.Conv2d(32, out_ch, 1)
self.afn_last = nn.Tanh()
def forward(self, x):
b, c, h_inp, w_inp = x.shape
hb, wb = 8, 8
pad_h = (hb - h_inp % hb) % hb
pad_w = (wb - w_inp % wb) % wb
x = F.pad(x, [0, pad_w, 0, pad_h], mode='reflect')
inputs = x
conv1 = self.dconv_down1(x)
x = self.maxpool(conv1)
conv2 = self.dconv_down2(x)
x = self.maxpool(conv2)
conv3 = self.dconv_down3(x)
x = self.upsample2(conv3)
x = torch.cat([x, conv2], dim=1)
x = self.dconv_up2(x)
x = self.upsample1(x)
x = torch.cat([x, conv1], dim=1)
x = self.dconv_up1(x)
x = self.conv_last(x)
x = self.afn_last(x)
out = x + inputs
return out[:, :, :h_inp, :w_inp]
def shift_3d(inputs,step=2):
[bs, nC, row, col] = inputs.shape
for i in range(nC):
inputs[:,i,:,:] = torch.roll(inputs[:,i,:,:], shifts=step*i, dims=2)
return inputs
def shift_back_3d(inputs,step=2):
[bs, nC, row, col] = inputs.shape
for i in range(nC):
inputs[:,i,:,:] = torch.roll(inputs[:,i,:,:], shifts=(-1)*step*i, dims=2)
return inputs
class ADMM_net(nn.Module):
def __init__(self):
super(ADMM_net, self).__init__()
self.unet1 = Unet(28, 28)
self.unet2 = Unet(28, 28)
self.unet3 = Unet(28, 28)
self.unet4 = Unet(28, 28)
self.unet5 = Unet(28, 28)
self.unet6 = Unet(28, 28)
self.unet7 = Unet(28, 28)
self.unet8 = Unet(28, 28)
self.unet9 = Unet(28, 28)
self.gamma1 = torch.nn.Parameter(torch.Tensor([0]))
self.gamma2 = torch.nn.Parameter(torch.Tensor([0]))
self.gamma3 = torch.nn.Parameter(torch.Tensor([0]))
self.gamma4 = torch.nn.Parameter(torch.Tensor([0]))
self.gamma5 = torch.nn.Parameter(torch.Tensor([0]))
self.gamma6 = torch.nn.Parameter(torch.Tensor([0]))
self.gamma7 = torch.nn.Parameter(torch.Tensor([0]))
self.gamma8 = torch.nn.Parameter(torch.Tensor([0]))
self.gamma9 = torch.nn.Parameter(torch.Tensor([0]))
def forward(self, y, input_mask=None):
if input_mask == None:
Phi = torch.rand((1, 28, 256, 310)).cuda()
Phi_s = torch.rand((1, 256, 310)).cuda()
else:
Phi, Phi_s = input_mask
x_list = []
theta = At(y,Phi)
b = torch.zeros_like(Phi)
### 1-3
yb = A(theta+b,Phi)
x = theta+b + At(torch.div(y-yb,Phi_s+self.gamma1),Phi)
x1 = x-b
x1 = shift_back_3d(x1)
theta = self.unet1(x1)
theta = shift_3d(theta)
b = b- (x-theta)
x_list.append(theta)
yb = A(theta+b,Phi)
x = theta+b + At(torch.div(y-yb,Phi_s+self.gamma2),Phi)
x1 = x-b
x1 = shift_back_3d(x1)
theta = self.unet2(x1)
theta = shift_3d(theta)
b = b- (x-theta)
x_list.append(theta)
yb = A(theta+b,Phi)
x = theta+b + At(torch.div(y-yb,Phi_s+self.gamma3),Phi)
x1 = x-b
x1 = shift_back_3d(x1)
theta = self.unet3(x1)
theta = shift_3d(theta)
b = b- (x-theta)
x_list.append(theta)
### 4-6
yb = A(theta+b,Phi)
x = theta+b + At(torch.div(y-yb,Phi_s+self.gamma4),Phi)
x1 = x-b
x1 = shift_back_3d(x1)
theta = self.unet4(x1)
theta = shift_3d(theta)
b = b- (x-theta)
x_list.append(theta)
yb = A(theta+b,Phi)
x = theta+b + At(torch.div(y-yb,Phi_s+self.gamma5),Phi)
x1 = x-b
x1 = shift_back_3d(x1)
theta = self.unet5(x1)
theta = shift_3d(theta)
b = b- (x-theta)
x_list.append(theta)
yb = A(theta+b,Phi)
x = theta+b + At(torch.div(y-yb,Phi_s+self.gamma6),Phi)
x1 = x-b
x1 = shift_back_3d(x1)
theta = self.unet6(x1)
theta = shift_3d(theta)
b = b- (x-theta)
x_list.append(theta)
### 7-9
yb = A(theta+b,Phi)
x = theta+b + At(torch.div(y-yb,Phi_s+self.gamma7),Phi)
x1 = x-b
x1 = shift_back_3d(x1)
theta = self.unet7(x1)
theta = shift_3d(theta)
b = b- (x-theta)
x_list.append(theta)
yb = A(theta+b,Phi)
x = theta+b + At(torch.div(y-yb,Phi_s+self.gamma8),Phi)
x1 = x-b
x1 = shift_back_3d(x1)
theta = self.unet8(x1)
theta = shift_3d(theta)
b = b- (x-theta)
x_list.append(theta)
yb = A(theta+b,Phi)
x = theta+b + At(torch.div(y-yb,Phi_s+self.gamma9),Phi)
x1 = x-b
x1 = shift_back_3d(x1)
theta = self.unet9(x1)
theta = shift_3d(theta)
return theta[:, :, :, 0:256]
| 6,191 | 29.653465 | 81 | py |
MST | MST-main/simulation/test_code/architecture/TSA_Net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
_NORM_BONE = False
def conv_block(in_planes, out_planes, the_kernel=3, the_stride=1, the_padding=1, flag_norm=False, flag_norm_act=True):
conv = nn.Conv2d(in_planes, out_planes, kernel_size=the_kernel, stride=the_stride, padding=the_padding)
activation = nn.ReLU(inplace=True)
norm = nn.BatchNorm2d(out_planes)
if flag_norm:
return nn.Sequential(conv,norm,activation) if flag_norm_act else nn.Sequential(conv,activation,norm)
else:
return nn.Sequential(conv,activation)
def conv1x1_block(in_planes, out_planes, flag_norm=False):
conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0,bias=False)
norm = nn.BatchNorm2d(out_planes)
return nn.Sequential(conv,norm) if flag_norm else conv
def fully_block(in_dim, out_dim, flag_norm=False, flag_norm_act=True):
fc = nn.Linear(in_dim, out_dim)
activation = nn.ReLU(inplace=True)
norm = nn.BatchNorm2d(out_dim)
if flag_norm:
return nn.Sequential(fc,norm,activation) if flag_norm_act else nn.Sequential(fc,activation,norm)
else:
return nn.Sequential(fc,activation)
class Res2Net(nn.Module):
def __init__(self, inChannel, uPlane, scale=4):
super(Res2Net, self).__init__()
self.uPlane = uPlane
self.scale = scale
self.conv_init = nn.Conv2d(inChannel, uPlane * scale, kernel_size=1, bias=False)
self.bn_init = nn.BatchNorm2d(uPlane * scale)
convs = []
bns = []
for i in range(self.scale - 1):
convs.append(nn.Conv2d(self.uPlane, self.uPlane, kernel_size=3, stride=1, padding=1, bias=False))
bns.append(nn.BatchNorm2d(self.uPlane))
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
self.conv_end = nn.Conv2d(uPlane * scale, inChannel, kernel_size=1, bias=False)
self.bn_end = nn.BatchNorm2d(inChannel)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
out = self.conv_init(x)
out = self.bn_init(out)
out = self.relu(out)
spx = torch.split(out, self.uPlane, 1)
for i in range(self.scale - 1):
if i == 0:
sp = spx[i]
else:
sp = sp + spx[i]
sp = self.convs[i](sp)
sp = self.relu(self.bns[i](sp))
if i == 0:
out = sp
else:
out = torch.cat((out, sp), 1)
out = torch.cat((out, spx[self.scale - 1]), 1)
out = self.conv_end(out)
out = self.bn_end(out)
return out
_NORM_ATTN = True
_NORM_FC = False
class TSA_Transform(nn.Module):
""" Spectral-Spatial Self-Attention """
def __init__(self, uSpace, inChannel, outChannel, nHead, uAttn, mode=[0, 1], flag_mask=False, gamma_learn=False):
super(TSA_Transform, self).__init__()
''' ------------------------------------------
uSpace:
uHeight: the [-2] dim of the 3D tensor
uWidth: the [-1] dim of the 3D tensor
inChannel:
the number of Channel of the input tensor
outChannel:
the number of Channel of the output tensor
nHead:
the number of Head of the input tensor
uAttn:
uSpatial: the dim of the spatial features
uSpectral: the dim of the spectral features
mask:
The Spectral Smoothness Mask
{mode} and {gamma_learn} is just for variable selection
------------------------------------------ '''
self.nHead = nHead
self.uAttn = uAttn
self.outChannel = outChannel
self.uSpatial = nn.Parameter(torch.tensor(float(uAttn[0])), requires_grad=False)
self.uSpectral = nn.Parameter(torch.tensor(float(uAttn[1])), requires_grad=False)
self.mask = nn.Parameter(Spectral_Mask(outChannel), requires_grad=False) if flag_mask else None
self.attn_scale = nn.Parameter(torch.tensor(1.1), requires_grad=False) if flag_mask else None
self.gamma = nn.Parameter(torch.tensor(1.0), requires_grad=gamma_learn)
if sum(mode) > 0:
down_sample = []
scale = 1
cur_channel = outChannel
for i in range(sum(mode)):
scale *= 2
down_sample.append(conv_block(cur_channel, 2 * cur_channel, 3, 2, 1, _NORM_ATTN))
cur_channel = 2 * cur_channel
self.cur_channel = cur_channel
self.down_sample = nn.Sequential(*down_sample)
self.up_sample = nn.ConvTranspose2d(outChannel * scale, outChannel, scale, scale)
else:
self.down_sample = None
self.up_sample = None
spec_dim = int(uSpace[0] / 4 - 3) * int(uSpace[1] / 4 - 3)
self.preproc = conv1x1_block(inChannel, outChannel, _NORM_ATTN)
self.query_x = Feature_Spatial(outChannel, nHead, int(uSpace[1] / 4), uAttn[0], mode)
self.query_y = Feature_Spatial(outChannel, nHead, int(uSpace[0] / 4), uAttn[0], mode)
self.query_lambda = Feature_Spectral(outChannel, nHead, spec_dim, uAttn[1])
self.key_x = Feature_Spatial(outChannel, nHead, int(uSpace[1] / 4), uAttn[0], mode)
self.key_y = Feature_Spatial(outChannel, nHead, int(uSpace[0] / 4), uAttn[0], mode)
self.key_lambda = Feature_Spectral(outChannel, nHead, spec_dim, uAttn[1])
self.value = conv1x1_block(outChannel, nHead * outChannel, _NORM_ATTN)
self.aggregation = nn.Linear(nHead * outChannel, outChannel)
def forward(self, image):
feat = self.preproc(image)
feat_qx = self.query_x(feat, 'X')
feat_qy = self.query_y(feat, 'Y')
feat_qlambda = self.query_lambda(feat)
feat_kx = self.key_x(feat, 'X')
feat_ky = self.key_y(feat, 'Y')
feat_klambda = self.key_lambda(feat)
feat_value = self.value(feat)
feat_qx = torch.cat(torch.split(feat_qx, 1, dim=1)).squeeze(dim=1)
feat_qy = torch.cat(torch.split(feat_qy, 1, dim=1)).squeeze(dim=1)
feat_kx = torch.cat(torch.split(feat_kx, 1, dim=1)).squeeze(dim=1)
feat_ky = torch.cat(torch.split(feat_ky, 1, dim=1)).squeeze(dim=1)
feat_qlambda = torch.cat(torch.split(feat_qlambda, self.uAttn[1], dim=-1))
feat_klambda = torch.cat(torch.split(feat_klambda, self.uAttn[1], dim=-1))
feat_value = torch.cat(torch.split(feat_value, self.outChannel, dim=1))
energy_x = torch.bmm(feat_qx, feat_kx.permute(0, 2, 1)) / torch.sqrt(self.uSpatial)
energy_y = torch.bmm(feat_qy, feat_ky.permute(0, 2, 1)) / torch.sqrt(self.uSpatial)
energy_lambda = torch.bmm(feat_qlambda, feat_klambda.permute(0, 2, 1)) / torch.sqrt(self.uSpectral)
attn_x = F.softmax(energy_x, dim=-1)
attn_y = F.softmax(energy_y, dim=-1)
attn_lambda = F.softmax(energy_lambda, dim=-1)
if self.mask is not None:
attn_lambda = (attn_lambda + self.mask) / torch.sqrt(self.attn_scale)
pro_feat = feat_value if self.down_sample is None else self.down_sample(feat_value)
batchhead, dim_c, dim_x, dim_y = pro_feat.size()
attn_x_repeat = attn_x.unsqueeze(dim=1).repeat(1, dim_c, 1, 1).view(-1, dim_x, dim_x)
attn_y_repeat = attn_y.unsqueeze(dim=1).repeat(1, dim_c, 1, 1).view(-1, dim_y, dim_y)
pro_feat = pro_feat.view(-1, dim_x, dim_y)
pro_feat = torch.bmm(pro_feat, attn_y_repeat.permute(0, 2, 1))
pro_feat = torch.bmm(pro_feat.permute(0, 2, 1), attn_x_repeat.permute(0, 2, 1)).permute(0, 2, 1)
pro_feat = pro_feat.view(batchhead, dim_c, dim_x, dim_y)
if self.up_sample is not None:
pro_feat = self.up_sample(pro_feat)
_, _, dim_x, dim_y = pro_feat.size()
pro_feat = pro_feat.contiguous().view(batchhead, self.outChannel, -1).permute(0, 2, 1)
pro_feat = torch.bmm(pro_feat, attn_lambda.permute(0, 2, 1)).permute(0, 2, 1)
pro_feat = pro_feat.view(batchhead, self.outChannel, dim_x, dim_y)
pro_feat = torch.cat(torch.split(pro_feat, int(batchhead / self.nHead), dim=0), dim=1).permute(0, 2, 3, 1)
pro_feat = self.aggregation(pro_feat).permute(0, 3, 1, 2)
out = self.gamma * pro_feat + feat
return out, (attn_x, attn_y, attn_lambda)
class Feature_Spatial(nn.Module):
""" Spatial Feature Generation Component """
def __init__(self, inChannel, nHead, shiftDim, outDim, mode):
super(Feature_Spatial, self).__init__()
kernel = [(1, 5), (3, 5)]
stride = [(1, 2), (2, 2)]
padding = [(0, 2), (1, 2)]
self.conv1 = conv_block(inChannel, nHead, kernel[mode[0]], stride[mode[0]], padding[mode[0]], _NORM_ATTN)
self.conv2 = conv_block(nHead, nHead, kernel[mode[1]], stride[mode[1]], padding[mode[1]], _NORM_ATTN)
self.fully = fully_block(shiftDim, outDim, _NORM_FC)
def forward(self, image, direction):
if direction == 'Y':
image = image.permute(0, 1, 3, 2)
feat = self.conv1(image)
feat = self.conv2(feat)
feat = self.fully(feat)
return feat
class Feature_Spectral(nn.Module):
""" Spectral Feature Generation Component """
def __init__(self, inChannel, nHead, viewDim, outDim):
super(Feature_Spectral, self).__init__()
self.inChannel = inChannel
self.conv1 = conv_block(inChannel, inChannel, 5, 2, 0, _NORM_ATTN)
self.conv2 = conv_block(inChannel, inChannel, 5, 2, 0, _NORM_ATTN)
self.fully = fully_block(viewDim, int(nHead * outDim), _NORM_FC)
def forward(self, image):
bs = image.size(0)
feat = self.conv1(image)
feat = self.conv2(feat)
feat = feat.view(bs, self.inChannel, -1)
feat = self.fully(feat)
return feat
def Spectral_Mask(dim_lambda):
'''After put the available data into the model, we use this mask to avoid outputting the estimation of itself.'''
orig = (np.cos(np.linspace(-1, 1, num=2 * dim_lambda - 1) * np.pi) + 1.0) / 2.0
att = np.zeros((dim_lambda, dim_lambda))
for i in range(dim_lambda):
att[i, :] = orig[dim_lambda - 1 - i:2 * dim_lambda - 1 - i]
AM_Mask = torch.from_numpy(att.astype(np.float32)).unsqueeze(0)
return AM_Mask
class TSA_Net(nn.Module):
def __init__(self, in_ch=28, out_ch=28):
super(TSA_Net, self).__init__()
self.tconv_down1 = Encoder_Triblock(in_ch, 64, False)
self.tconv_down2 = Encoder_Triblock(64, 128, False)
self.tconv_down3 = Encoder_Triblock(128, 256)
self.tconv_down4 = Encoder_Triblock(256, 512)
self.bottom1 = conv_block(512, 1024)
self.bottom2 = conv_block(1024, 1024)
self.tconv_up4 = Decoder_Triblock(1024, 512)
self.tconv_up3 = Decoder_Triblock(512, 256)
self.transform3 = TSA_Transform((64, 64), 256, 256, 8, (64, 80), [0, 0])
self.tconv_up2 = Decoder_Triblock(256, 128)
self.transform2 = TSA_Transform((128, 128), 128, 128, 8, (64, 40), [1, 0])
self.tconv_up1 = Decoder_Triblock(128, 64)
self.transform1 = TSA_Transform((256, 256), 64, 28, 8, (48, 30), [1, 1], True)
self.conv_last = nn.Conv2d(out_ch, out_ch, 1)
self.afn_last = nn.Sigmoid()
def forward(self, x, input_mask=None):
enc1, enc1_pre = self.tconv_down1(x)
enc2, enc2_pre = self.tconv_down2(enc1)
enc3, enc3_pre = self.tconv_down3(enc2)
enc4, enc4_pre = self.tconv_down4(enc3)
# enc5,enc5_pre = self.tconv_down5(enc4)
bottom = self.bottom1(enc4)
bottom = self.bottom2(bottom)
# dec5 = self.tconv_up5(bottom,enc5_pre)
dec4 = self.tconv_up4(bottom, enc4_pre)
dec3 = self.tconv_up3(dec4, enc3_pre)
dec3, _ = self.transform3(dec3)
dec2 = self.tconv_up2(dec3, enc2_pre)
dec2, _ = self.transform2(dec2)
dec1 = self.tconv_up1(dec2, enc1_pre)
dec1, _ = self.transform1(dec1)
dec1 = self.conv_last(dec1)
output = self.afn_last(dec1)
return output
class Encoder_Triblock(nn.Module):
def __init__(self, inChannel, outChannel, flag_res=True, nKernal=3, nPool=2, flag_Pool=True):
super(Encoder_Triblock, self).__init__()
self.layer1 = conv_block(inChannel, outChannel, nKernal, flag_norm=_NORM_BONE)
if flag_res:
self.layer2 = Res2Net(outChannel, int(outChannel / 4))
else:
self.layer2 = conv_block(outChannel, outChannel, nKernal, flag_norm=_NORM_BONE)
self.pool = nn.MaxPool2d(nPool) if flag_Pool else None
def forward(self, x):
feat = self.layer1(x)
feat = self.layer2(feat)
feat_pool = self.pool(feat) if self.pool is not None else feat
return feat_pool, feat
class Decoder_Triblock(nn.Module):
def __init__(self, inChannel, outChannel, flag_res=True, nKernal=3, nPool=2, flag_Pool=True):
super(Decoder_Triblock, self).__init__()
self.layer1 = nn.Sequential(
nn.ConvTranspose2d(inChannel, outChannel, kernel_size=2, stride=2),
nn.ReLU(inplace=True)
)
if flag_res:
self.layer2 = Res2Net(int(outChannel * 2), int(outChannel / 2))
else:
self.layer2 = conv_block(outChannel * 2, outChannel * 2, nKernal, flag_norm=_NORM_BONE)
self.layer3 = conv_block(outChannel * 2, outChannel, nKernal, flag_norm=_NORM_BONE)
def forward(self, feat_dec, feat_enc):
feat_dec = self.layer1(feat_dec)
diffY = feat_enc.size()[2] - feat_dec.size()[2]
diffX = feat_enc.size()[3] - feat_dec.size()[3]
if diffY != 0 or diffX != 0:
print('Padding for size mismatch ( Enc:', feat_enc.size(), 'Dec:', feat_dec.size(), ')')
feat_dec = F.pad(feat_dec, [diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2])
feat = torch.cat([feat_dec, feat_enc], dim=1)
feat = self.layer2(feat)
feat = self.layer3(feat)
return feat | 14,086 | 41.687879 | 118 | py |
MST | MST-main/simulation/test_code/architecture/__init__.py | import torch
from .MST import MST
from .GAP_Net import GAP_net
from .ADMM_Net import ADMM_net
from .TSA_Net import TSA_Net
from .HDNet import HDNet, FDL
from .DGSMP import HSI_CS
from .BIRNAT import BIRNAT
from .MST_Plus_Plus import MST_Plus_Plus
from .Lambda_Net import Lambda_Net
from .CST import CST
from .DAUHST import DAUHST
def model_generator(method, pretrained_model_path=None):
if method == 'mst_s':
model = MST(dim=28, stage=2, num_blocks=[2, 2, 2]).cuda()
elif method == 'mst_m':
model = MST(dim=28, stage=2, num_blocks=[2, 4, 4]).cuda()
elif method == 'mst_l':
model = MST(dim=28, stage=2, num_blocks=[4, 7, 5]).cuda()
elif method == 'gap_net':
model = GAP_net().cuda()
elif method == 'admm_net':
model = ADMM_net().cuda()
elif method == 'tsa_net':
model = TSA_Net().cuda()
elif method == 'hdnet':
model = HDNet().cuda()
fdl_loss = FDL(loss_weight=0.7,
alpha=2.0,
patch_factor=4,
ave_spectrum=True,
log_matrix=True,
batch_matrix=True,
).cuda()
elif method == 'dgsmp':
model = HSI_CS(Ch=28, stages=4).cuda()
elif method == 'birnat':
model = BIRNAT().cuda()
elif method == 'mst_plus_plus':
model = MST_Plus_Plus(in_channels=28, out_channels=28, n_feat=28, stage=3).cuda()
elif method == 'lambda_net':
model = Lambda_Net(out_ch=28).cuda()
elif method == 'cst_s':
model = CST(num_blocks=[1, 1, 2], sparse=True).cuda()
elif method == 'cst_m':
model = CST(num_blocks=[2, 2, 2], sparse=True).cuda()
elif method == 'cst_l':
model = CST(num_blocks=[2, 4, 6], sparse=True).cuda()
elif method == 'cst_l_plus':
model = CST(num_blocks=[2, 4, 6], sparse=False).cuda()
elif 'dauhst' in method:
num_iterations = int(method.split('_')[1][0])
model = DAUHST(num_iterations=num_iterations).cuda()
else:
print(f'Method {method} is not defined !!!!')
if pretrained_model_path is not None:
print(f'load model from {pretrained_model_path}')
checkpoint = torch.load(pretrained_model_path)
model.load_state_dict({k.replace('module.', ''): v for k, v in checkpoint.items()},
strict=True)
if method == 'hdnet':
return model,fdl_loss
return model | 2,403 | 36.5625 | 91 | py |
MST | MST-main/simulation/test_code/architecture/HDNet.py | import torch
import torch.nn as nn
import math
def default_conv(in_channels, out_channels, kernel_size, bias=True):
return nn.Conv2d(
in_channels, out_channels, kernel_size,
padding=(kernel_size//2), bias=bias)
class MeanShift(nn.Conv2d):
def __init__(
self, rgb_range,
rgb_mean=(0.4488, 0.4371, 0.4040), rgb_std=(1.0, 1.0, 1.0), sign=-1):
super(MeanShift, self).__init__(3, 3, kernel_size=1)
std = torch.Tensor(rgb_std)
self.weight.data = torch.eye(3).view(3, 3, 1, 1) / std.view(3, 1, 1, 1)
self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean) / std
for p in self.parameters():
p.requires_grad = False
class BasicBlock(nn.Sequential):
def __init__(
self, conv, in_channels, out_channels, kernel_size, stride=1, bias=False,
bn=True, act=nn.ReLU(True)):
m = [conv(in_channels, out_channels, kernel_size, bias=bias)]
if bn:
m.append(nn.BatchNorm2d(out_channels))
if act is not None:
m.append(act)
super(BasicBlock, self).__init__(*m)
class ResBlock(nn.Module):
def __init__(
self, conv, n_feats, kernel_size,
bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
super(ResBlock, self).__init__()
m = []
for i in range(2):
m.append(conv(n_feats, n_feats, kernel_size, bias=bias))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if i == 0:
m.append(act)
self.body = nn.Sequential(*m)
self.res_scale = res_scale
def forward(self, x):
res = self.body(x).mul(self.res_scale) # So res_scale is a scaler? just scale all elements in each feature's residual? Why?
res += x
return res
class Upsampler(nn.Sequential):
def __init__(self, conv, scale, n_feats, bn=False, act=False, bias=True):
m = []
if (scale & (scale - 1)) == 0:
for _ in range(int(math.log(scale, 2))):
m.append(conv(n_feats, 4 * n_feats, 3, bias))
m.append(nn.PixelShuffle(2))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if act == 'relu':
m.append(nn.ReLU(True))
elif act == 'prelu':
m.append(nn.PReLU(n_feats))
elif scale == 3:
m.append(conv(n_feats, 9 * n_feats, 3, bias))
m.append(nn.PixelShuffle(3))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if act == 'relu':
m.append(nn.ReLU(True))
elif act == 'prelu':
m.append(nn.PReLU(n_feats))
else:
raise NotImplementedError
super(Upsampler, self).__init__(*m)
_NORM_BONE = False
def constant_init(module, val, bias=0):
if hasattr(module, 'weight') and module.weight is not None:
nn.init.constant_(module.weight, val)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
def kaiming_init(module,
a=0,
mode='fan_out',
nonlinearity='relu',
bias=0,
distribution='normal'):
assert distribution in ['uniform', 'normal']
if distribution == 'uniform':
nn.init.kaiming_uniform_(
module.weight, a=a, mode=mode, nonlinearity=nonlinearity)
else:
nn.init.kaiming_normal_(
module.weight, a=a, mode=mode, nonlinearity=nonlinearity)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
# depthwise-separable convolution (DSC)
class DSC(nn.Module):
def __init__(self, nin: int) -> None:
super(DSC, self).__init__()
self.conv_dws = nn.Conv2d(
nin, nin, kernel_size=1, stride=1, padding=0, groups=nin
)
self.bn_dws = nn.BatchNorm2d(nin, momentum=0.9)
self.relu_dws = nn.ReLU(inplace=False)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
self.conv_point = nn.Conv2d(
nin, 1, kernel_size=1, stride=1, padding=0, groups=1
)
self.bn_point = nn.BatchNorm2d(1, momentum=0.9)
self.relu_point = nn.ReLU(inplace=False)
self.softmax = nn.Softmax(dim=2)
def forward(self, x: torch.Tensor) -> torch.Tensor:
out = self.conv_dws(x)
out = self.bn_dws(out)
out = self.relu_dws(out)
out = self.maxpool(out)
out = self.conv_point(out)
out = self.bn_point(out)
out = self.relu_point(out)
m, n, p, q = out.shape
out = self.softmax(out.view(m, n, -1))
out = out.view(m, n, p, q)
out = out.expand(x.shape[0], x.shape[1], x.shape[2], x.shape[3])
out = torch.mul(out, x)
out = out + x
return out
# Efficient Feature Fusion(EFF)
class EFF(nn.Module):
def __init__(self, nin: int, nout: int, num_splits: int) -> None:
super(EFF, self).__init__()
assert nin % num_splits == 0
self.nin = nin
self.nout = nout
self.num_splits = num_splits
self.subspaces = nn.ModuleList(
[DSC(int(self.nin / self.num_splits)) for i in range(self.num_splits)]
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
sub_feat = torch.chunk(x, self.num_splits, dim=1)
out = []
for idx, l in enumerate(self.subspaces):
out.append(self.subspaces[idx](sub_feat[idx]))
out = torch.cat(out, dim=1)
return out
# spatial-spectral domain attention learning(SDL)
class SDL_attention(nn.Module):
def __init__(self, inplanes, planes, kernel_size=1, stride=1):
super(SDL_attention, self).__init__()
self.inplanes = inplanes
self.inter_planes = planes // 2
self.planes = planes
self.kernel_size = kernel_size
self.stride = stride
self.padding = (kernel_size-1)//2
self.conv_q_right = nn.Conv2d(self.inplanes, 1, kernel_size=1, stride=stride, padding=0, bias=False)
self.conv_v_right = nn.Conv2d(self.inplanes, self.inter_planes, kernel_size=1, stride=stride, padding=0, bias=False)
self.conv_up = nn.Conv2d(self.inter_planes, self.planes, kernel_size=1, stride=1, padding=0, bias=False)
self.softmax_right = nn.Softmax(dim=2)
self.sigmoid = nn.Sigmoid()
self.conv_q_left = nn.Conv2d(self.inplanes, self.inter_planes, kernel_size=1, stride=stride, padding=0, bias=False) #g
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_v_left = nn.Conv2d(self.inplanes, self.inter_planes, kernel_size=1, stride=stride, padding=0, bias=False) #theta
self.softmax_left = nn.Softmax(dim=2)
self.reset_parameters()
def reset_parameters(self):
kaiming_init(self.conv_q_right, mode='fan_in')
kaiming_init(self.conv_v_right, mode='fan_in')
kaiming_init(self.conv_q_left, mode='fan_in')
kaiming_init(self.conv_v_left, mode='fan_in')
self.conv_q_right.inited = True
self.conv_v_right.inited = True
self.conv_q_left.inited = True
self.conv_v_left.inited = True
# HR spatial attention
def spatial_attention(self, x):
input_x = self.conv_v_right(x)
batch, channel, height, width = input_x.size()
input_x = input_x.view(batch, channel, height * width)
context_mask = self.conv_q_right(x)
context_mask = context_mask.view(batch, 1, height * width)
context_mask = self.softmax_right(context_mask)
context = torch.matmul(input_x, context_mask.transpose(1,2))
context = context.unsqueeze(-1)
context = self.conv_up(context)
mask_ch = self.sigmoid(context)
out = x * mask_ch
return out
# HR spectral attention
def spectral_attention(self, x):
g_x = self.conv_q_left(x)
batch, channel, height, width = g_x.size()
avg_x = self.avg_pool(g_x)
batch, channel, avg_x_h, avg_x_w = avg_x.size()
avg_x = avg_x.view(batch, channel, avg_x_h * avg_x_w).permute(0, 2, 1)
theta_x = self.conv_v_left(x).view(batch, self.inter_planes, height * width)
context = torch.matmul(avg_x, theta_x)
context = self.softmax_left(context)
context = context.view(batch, 1, height, width)
mask_sp = self.sigmoid(context)
out = x * mask_sp
return out
def forward(self, x):
context_spectral = self.spectral_attention(x)
context_spatial = self.spatial_attention(x)
out = context_spatial + context_spectral
return out
class HDNet(nn.Module):
def __init__(self, in_ch=28, out_ch=28, conv=default_conv):
super(HDNet, self).__init__()
n_resblocks = 16
n_feats = 64
kernel_size = 3
act = nn.ReLU(True)
# define head module
m_head = [conv(in_ch, n_feats, kernel_size)]
# define body module
m_body = [
ResBlock(
conv, n_feats, kernel_size, act=act, res_scale= 1
) for _ in range(n_resblocks)
]
m_body.append(SDL_attention(inplanes = n_feats, planes = n_feats))
m_body.append(EFF(nin=n_feats, nout=n_feats, num_splits=4))
for i in range(1, n_resblocks):
m_body.append(ResBlock(
conv, n_feats, kernel_size, act=act, res_scale= 1
))
m_body.append(conv(n_feats, n_feats, kernel_size))
m_tail = [conv(n_feats, out_ch, kernel_size)]
self.head = nn.Sequential(*m_head)
self.body = nn.Sequential(*m_body)
self.tail = nn.Sequential(*m_tail)
def forward(self, x, input_mask=None):
x = self.head(x)
res = self.body(x)
res += x
x = self.tail(res)
return x
# frequency domain learning(FDL)
class FDL(nn.Module):
def __init__(self, loss_weight=1.0, alpha=1.0, patch_factor=1, ave_spectrum=False, log_matrix=False, batch_matrix=False):
super(FDL, self).__init__()
self.loss_weight = loss_weight
self.alpha = alpha
self.patch_factor = patch_factor
self.ave_spectrum = ave_spectrum
self.log_matrix = log_matrix
self.batch_matrix = batch_matrix
def tensor2freq(self, x):
patch_factor = self.patch_factor
_, _, h, w = x.shape
assert h % patch_factor == 0 and w % patch_factor == 0, (
'Patch factor should be divisible by image height and width')
patch_list = []
patch_h = h // patch_factor
patch_w = w // patch_factor
for i in range(patch_factor):
for j in range(patch_factor):
patch_list.append(x[:, :, i * patch_h:(i + 1) * patch_h, j * patch_w:(j + 1) * patch_w])
y = torch.stack(patch_list, 1)
return torch.rfft(y, 2, onesided=False, normalized=True)
def loss_formulation(self, recon_freq, real_freq, matrix=None):
if matrix is not None:
weight_matrix = matrix.detach()
else:
matrix_tmp = (recon_freq - real_freq) ** 2
matrix_tmp = torch.sqrt(matrix_tmp[..., 0] + matrix_tmp[..., 1]) ** self.alpha
if self.log_matrix:
matrix_tmp = torch.log(matrix_tmp + 1.0)
if self.batch_matrix:
matrix_tmp = matrix_tmp / matrix_tmp.max()
else:
matrix_tmp = matrix_tmp / matrix_tmp.max(-1).values.max(-1).values[:, :, :, None, None]
matrix_tmp[torch.isnan(matrix_tmp)] = 0.0
matrix_tmp = torch.clamp(matrix_tmp, min=0.0, max=1.0)
weight_matrix = matrix_tmp.clone().detach()
assert weight_matrix.min().item() >= 0 and weight_matrix.max().item() <= 1, (
'The values of spectrum weight matrix should be in the range [0, 1], '
'but got Min: %.10f Max: %.10f' % (weight_matrix.min().item(), weight_matrix.max().item()))
tmp = (recon_freq - real_freq) ** 2
freq_distance = tmp[..., 0] + tmp[..., 1]
loss = weight_matrix * freq_distance
return torch.mean(loss)
def forward(self, pred, target, matrix=None, **kwargs):
pred_freq = self.tensor2freq(pred)
target_freq = self.tensor2freq(target)
if self.ave_spectrum:
pred_freq = torch.mean(pred_freq, 0, keepdim=True)
target_freq = torch.mean(target_freq, 0, keepdim=True)
return self.loss_formulation(pred_freq, target_freq, matrix) * self.loss_weight
| 12,665 | 33.048387 | 132 | py |
USCL | USCL-main/train_USCL/simclr.py | import torch
from models.resnet_simclr import ResNetSimCLR
# from torch.utils.tensorboard import SummaryWriter
import torch.nn.functional as F
from loss.nt_xent import NTXentLoss
import os
import shutil
import sys
import time
import torch.nn as nn
apex_support = False
try:
sys.path.append('./apex')
from apex import amp
print("Apex on, run on mixed precision.")
apex_support = True
except:
print("Please install apex for mixed precision training from: https://github.com/NVIDIA/apex")
apex_support = False
import numpy as np
torch.manual_seed(2)
def _save_config_file(model_checkpoints_folder):
if not os.path.exists(model_checkpoints_folder):
os.makedirs(model_checkpoints_folder)
shutil.copy('./config.yaml', os.path.join(model_checkpoints_folder, 'config.yaml'))
def FindNotX(list, x):
index = []
for i, item in enumerate(list):
if item != x:
index.append(i)
return index
class SimCLR(object):
def __init__(self, dataset, config, lumbda, Checkpoint_Num):
self.lumbda1 = lumbda
self.lumbda2 = lumbda
self.config = config
self.Checkpoint_Num = Checkpoint_Num
print('\nThe configurations of this model are in the following:\n', config)
self.device = self._get_device()
# self.writer = SummaryWriter()
self.dataset = dataset
self.nt_xent_criterion = NTXentLoss(self.device, config['batch_size'], **config['loss'])
def _get_device(self):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print("\nRunning on:", device)
if device == 'cuda':
device_name = torch.cuda.get_device_name()
print("The device name is:", device_name)
cap = torch.cuda.get_device_capability(device=None)
print("The capability of this device is:", cap, '\n')
return device
def _step(self, model, xis, xjs):
# get the representations and the projections
ris, zis, labelis = model(xis) # [N,C]
# get the representations and the projections
rjs, zjs, labeljs = model(xjs) # [N,C]
# normalize projection feature vectors
zis = F.normalize(zis, dim=1)
zjs = F.normalize(zjs, dim=1)
loss = self.nt_xent_criterion(zis, zjs)
return loss
def train(self):
train_loader, valid_loader = self.dataset.get_data_loaders()
model = ResNetSimCLR(**self.config["model"]).to(self.device)
model = self._load_pre_trained_weights(model)
criterion = nn.CrossEntropyLoss() # loss function
optimizer = torch.optim.Adam(model.parameters(), 3e-4, weight_decay=eval(self.config['weight_decay']))
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=self.config['epochs'], eta_min=0,
last_epoch=-1)
if apex_support and self.config['fp16_precision']:
model, optimizer = amp.initialize(model, optimizer,
opt_level='O2',
keep_batchnorm_fp32=True)
model_checkpoints_folder = os.path.join(
'/home/zhangchunhui/MedicalAI/USCL/checkpoints_multi_aug',
'checkpoint_' + str(self.Checkpoint_Num))
# save config file
_save_config_file(model_checkpoints_folder)
start_time = time.time()
end_time = time.time()
valid_n_iter = 0
best_valid_loss = np.inf
for epoch in range(self.config['epochs']):
for i, data in enumerate(train_loader, 1):
# forward
# mixupimg1, label1, mixupimg2, label2, original img1, original img2
xis, labelis, xjs, labeljs, imgis, imgjs = data # N samples of left branch, N samples of right branch
xis = xis.to(self.device)
xjs = xjs.to(self.device)
####### 1-Semi-supervised
hi, xi, outputis = model(xis)
hj, xj, outputjs = model(xjs)
labelindexi, labelindexj = FindNotX(labelis.tolist(), 9999), FindNotX(labeljs.tolist(), 9999) # X=9999=no label
lossi = criterion(outputis[labelindexi], labelis.to(self.device)[labelindexi])
lossj = criterion(outputjs[labelindexj], labeljs.to(self.device)[labelindexj])
# lumbda1=lumbda2 # small value is better
lumbda1, lumbda2 = self.lumbda1, self.lumbda2 # small value is better
loss = self._step(model, xis, xjs) + lumbda1 * lossi + lumbda2 * lossj
########################################################################################################
####### 2-Self-supervised
# loss = self._step(model, xis, xjs)
########################################################################################################
# backward
optimizer.zero_grad()
if apex_support and self.config['fp16_precision']:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
# update weights
optimizer.step()
if i % self.config['log_every_n_steps'] == 0:
# self.writer.add_scalar('train_loss', loss, global_step=i)
start_time, end_time = end_time, time.time()
print("\nTraining:Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Time: {:.2f}s".format(
epoch + 1, self.config['epochs'], i, len(train_loader), loss, end_time - start_time))
# validate the model if requested
if epoch % self.config['eval_every_n_epochs'] == 0:
start_time = time.time()
valid_loss = self._validate(model, valid_loader)
end_time = time.time()
if valid_loss < best_valid_loss:
# save the model weights
best_valid_loss = valid_loss
torch.save(model.state_dict(), os.path.join(model_checkpoints_folder, 'best_model.pth'))
print("Valid:\t Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Time: {:.2f}s".format(
epoch + 1, self.config['epochs'], len(valid_loader), len(valid_loader), valid_loss,
end_time - start_time))
# self.writer.add_scalar('validation_loss', valid_loss, global_step=valid_n_iter)
valid_n_iter += 1
print('Learning rate this epoch:', scheduler.get_last_lr()[0]) # python >=3.7
# print('Learning rate this epoch:', scheduler.base_lrs[0]) # python 3.6
# warmup for the first 10 epochs
if epoch >= 10:
scheduler.step()
# self.writer.add_scalar('cosine_lr_decay', scheduler.get_lr()[0], global_step=i)
def _load_pre_trained_weights(self, model):
try:
checkpoints_folder = os.path.join('./runs', self.config['fine_tune_from'], 'checkpoints')
state_dict = torch.load(os.path.join(checkpoints_folder, 'best_model.pth'))
model.load_state_dict(state_dict)
print("Loaded pre-trained model with success.")
except FileNotFoundError:
print("Pre-trained weights not found. Training from scratch.")
return model
def _validate(self, model, valid_loader):
# validation steps
with torch.no_grad():
model.eval()
valid_loss = 0.0
counter = 0
for xis, labelis, xjs, labeljs, imgis, imgjs in valid_loader:
## 1. original images
xis = imgis.to(self.device)
xjs = imgjs.to(self.device)
loss = self._step(model, xis, xjs)
valid_loss += loss.item()
counter += 1
## 2. augmented images
# xis = xis.to(self.device)
# xjs = xjs.to(self.device)
#
# loss = self._step(model, xis, xjs)
# valid_loss += loss.item()
# counter += 1
valid_loss /= (counter + 1e-6) # in case 0
model.train()
return valid_loss
| 8,478 | 37.716895 | 127 | py |
USCL | USCL-main/train_USCL/linear_eval.py | import os
import yaml
import pickle
import torch
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn import preprocessing
import importlib.util
##################################### 设定 ####################################
fold = 1
self_supervised_pretrained = True
model_path = '/home/zhangchunhui/WorkSpace/SSL/checkpoints_multi_aug/checkpoint_resnet18/best_model.pth'
out_dim = 256
base_model = 'resnet18'
pretrained = False # 初始化模型时,是否载入ImageNet预训练参数
batch_size = 256
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print("Using device:", device)
checkpoints_folder = '/home/zhangchunhui/WorkSpace/SSL/checkpoints_multi_aug/checkpoint_resnet18/'
config = yaml.load(open(os.path.join(checkpoints_folder, "config.yaml"), "r"), Loader=yaml.Loader)
print("\nConfig:\n", config)
###############################################################################
# load covid ultrasound data
with open('/home/zhangchunhui/WorkSpace/SSL/covid_5_fold/covid_data{}.pkl'.format(fold), 'rb') as f:
X_train, y_train, X_test, y_test = pickle.load(f)
def linear_model_eval(X_train, y_train, X_test, y_test):
clf = LogisticRegression(random_state=0, max_iter=1200, solver='lbfgs', C=1.0)
clf.fit(X_train, y_train)
print("\nLogistic Regression feature eval")
print("Train score:", clf.score(X_train, y_train))
print("Test score:", clf.score(X_test, y_test))
print("-------------------------------")
neigh = KNeighborsClassifier(n_neighbors=10)
neigh.fit(X_train, y_train)
print("KNN feature eval")
print("Train score:", neigh.score(X_train, y_train))
print("Test score:", neigh.score(X_test, y_test))
def next_batch(X, y, batch_size, dtype):
for i in range(0, X.shape[0], batch_size):
# must convert data type to type of weights
X_batch = torch.tensor(X[i: i+batch_size], dtype=dtype) / 255.
y_batch = torch.tensor(y[i: i+batch_size])
yield X_batch.to(device), y_batch.to(device)
################################ 定义模型与参数 #################################
# Load the neural net module
spec = importlib.util.spec_from_file_location("model", '/models/resnet_simclr_copy.py')
resnet_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(resnet_module)
model = resnet_module.ResNetSimCLR(base_model, out_dim, pretrained)
model.eval()
state_dict = torch.load(model_path, map_location=torch.device('cpu'))
weight_dtype = state_dict[list(state_dict.keys())[0]].dtype
if self_supervised_pretrained:
print('Load self-supervised model parameters.')
model.load_state_dict(state_dict)
model = model.to(device)
################################ 训练集的特征 ##################################
X_train_feature = []
for batch_x, batch_y in next_batch(X_train, y_train, batch_size=batch_size, dtype=weight_dtype):
features, _ = model(batch_x) # 只要输出的特征向量,不要投影头输出的Logit
X_train_feature.extend(features.cpu().detach().numpy())
X_train_feature = np.array(X_train_feature)
print("Train features")
print(X_train_feature.shape)
################################ 测试集的特征 ##################################
X_test_feature = []
for batch_x, batch_y in next_batch(X_test, y_test, batch_size=batch_size, dtype=weight_dtype):
features, _ = model(batch_x)
X_test_feature.extend(features.cpu().detach().numpy())
X_test_feature = np.array(X_test_feature)
print("Test features")
print(X_test_feature.shape)
################################## 评估特征 ###################################
scaler = preprocessing.StandardScaler()
scaler.fit(X_train_feature) # 获取用于特征标准化的均值方差
linear_model_eval(scaler.transform(X_train_feature), y_train, scaler.transform(X_test_feature), y_test)
del X_train_feature
del X_test_feature
################################# 计算总准确率 #################################
def cal_total_acc(acc_list):
acc = (acc_list[0]*476 + acc_list[1]*369 + acc_list[2]*487 + acc_list[3]*360 + acc_list[4]*424)/2116
return acc
| 4,071 | 26.890411 | 104 | py |
USCL | USCL-main/train_USCL/NMI_loss.py | # -*- coding:utf-8 -*-
'''
Created on 2017年10月28日
@summary: 利用Python实现NMI计算
@author: dreamhome
'''
import math
import numpy as np
from sklearn import metrics
import time
import random
import torch
def MILoss(TensorA=None, TensorB=None):
# TensorA, TensorB = range(112*512*7*7), range(112*512*7*7)
#
# TensorA, TensorB = np.array(TensorA), np.array(TensorB)
# TensorA, TensorB= np.reshape(TensorA, [112, 512, 7, 7]), np.reshape(TensorB, [112, 512, 7, 7])
# A1 = np.mean(np.mean(np.mean(TensorA, axis=2), axis=2), axis=1)
# B1 = np.mean(np.mean(np.mean(TensorB, axis=2), axis=2), axis=1)
# device = 'cpu'
# TensorA, TensorB = TensorA.to('cpu'), TensorB.to('cpu')
TensorA, TensorB = np.array(TensorA), np.array(TensorB)
A1 = np.mean(np.mean(TensorA, axis=2), axis=2)
B1 = np.mean(np.mean(TensorB, axis=2), axis=2)
MI_loss = 0
# start = time.time()
for i in range(A1.shape[1]):
TempA = A1[:, i]
TempB = B1[:, i]
MI_loss += metrics.normalized_mutual_info_score(TempA, TempB)
out = MI_loss / A1.shape[1]
# print(time.time() - start)
# start = time.time()
# out = NMI(A1, B1)
# print(time.time()-start)
# start = time.time()
# metrics.normalized_mutual_info_score(A1, B1)
# print(time.time()-start)
return torch.from_numpy(np.asarray(out))
def NMI(A,B):
#样本点数
total = len(A)
A_ids = set(A)
B_ids = set(B)
#互信息计算
MI = 0
eps = 1.4e-45
for idA in A_ids:
for idB in B_ids:
idAOccur = np.where(A==idA)
idBOccur = np.where(B==idB)
idABOccur = np.intersect1d(idAOccur,idBOccur)
px = 1.0*len(idAOccur[0])/total
py = 1.0*len(idBOccur[0])/total
pxy = 1.0*len(idABOccur)/total
MI = MI + pxy*math.log(pxy/(px*py)+eps,2)
# 标准化互信息
Hx = 0
for idA in A_ids:
idAOccurCount = 1.0*len(np.where(A==idA)[0])
Hx = Hx - (idAOccurCount/total)*math.log(idAOccurCount/total+eps,2)
Hy = 0
for idB in B_ids:
idBOccurCount = 1.0*len(np.where(B==idB)[0])
Hy = Hy - (idBOccurCount/total)*math.log(idBOccurCount/total+eps,2)
MIhat = 2.0*MI/(Hx+Hy)
return MIhat
if __name__ == '__main__':
A = np.array([1,1,1,1,1,1,2,2,2,2,2,2,3,3,3,3,3])
B = np.array([1,2,1,1,1,1,1,2,2,2,2,3,1,1,3,3,3])
print(NMI(A,B))
print(metrics.normalized_mutual_info_score(A,B))
result = MILoss()
print(result) | 2,477 | 29.219512 | 100 | py |
USCL | USCL-main/train_USCL/models/model_resnet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from torch.nn import init
from .cbam import *
from .bam import *
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNet', 'resnet18_cbam', 'resnet34_cbam', 'resnet50_cbam', 'resnet101_cbam',
'resnet152_cbam']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, use_cbam=False):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
# use_cbam = True # default = False
if use_cbam:
self.cbam = CBAM( planes, 16 )
else:
self.cbam = None
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
if not self.cbam is None:
out = self.cbam(out)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
# use_cbam = False
def __init__(self, inplanes, planes, stride=1, downsample=None, use_cbam=False):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
# use_cbam = True # default = False
if use_cbam:
self.cbam = CBAM( planes * 4, 16 )
else:
self.cbam = None
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
if not self.cbam is None:
out = self.cbam(out)
out += residual
out = self.relu(out)
return out
####################################################################
## network_type = "ImageNet", num_classes = 2, att_type=None (CBAM)
class ResNet(nn.Module):
def __init__(self, block, layers, network_type="ImageNet", num_classes=2, att_type="CBAM"):
self.inplanes = 64
super(ResNet, self).__init__()
self.network_type = network_type
# different model config between ImageNet and CIFAR
if network_type == "ImageNet":
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.avgpool = nn.AvgPool2d(7)
else:
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
if att_type=='BAM':
self.bam1 = BAM(64*block.expansion)
self.bam2 = BAM(128*block.expansion)
self.bam3 = BAM(256*block.expansion)
else:
self.bam1, self.bam2, self.bam3 = None, None, None
self.layer1 = self._make_layer(block, 64, layers[0], att_type=att_type)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, att_type=att_type)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, att_type=att_type)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, att_type=att_type)
self.fc = nn.Linear(512 * block.expansion, num_classes)
init.kaiming_normal_(self.fc.weight)
for key in self.state_dict():
if key.split('.')[-1]=="weight":
if "conv" in key:
init.kaiming_normal_(self.state_dict()[key], mode='fan_out')
if "bn" in key:
if "SpatialGate" in key:
self.state_dict()[key][...] = 0
else:
self.state_dict()[key][...] = 1
elif key.split(".")[-1]=='bias':
self.state_dict()[key][...] = 0
def _make_layer(self, block, planes, blocks, stride=1, att_type=None):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, use_cbam=att_type=='CBAM'))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, use_cbam=att_type=='CBAM'))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
if self.network_type == "ImageNet":
x = self.maxpool(x)
x = self.layer1(x)
if not self.bam1 is None:
x = self.bam1(x)
x = self.layer2(x)
if not self.bam2 is None:
x = self.bam2(x)
x = self.layer3(x)
if not self.bam3 is None:
x = self.bam3(x)
x = self.layer4(x)
if self.network_type == "ImageNet":
x = self.avgpool(x)
else:
x = F.avg_pool2d(x, 4)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def ResidualNet(network_type, depth, num_classes, att_type):
assert network_type in ["ImageNet", "CIFAR10", "CIFAR100"], "network type should be ImageNet or CIFAR10 / CIFAR100"
assert depth in [18, 34, 50, 101], 'network depth should be 18, 34, 50 or 101'
if depth == 18:
model = ResNet(BasicBlock, [2, 2, 2, 2], network_type, num_classes, att_type)
elif depth == 34:
model = ResNet(BasicBlock, [3, 4, 6, 3], network_type, num_classes, att_type)
elif depth == 50:
model = ResNet(Bottleneck, [3, 4, 6, 3], network_type, num_classes, att_type)
elif depth == 101:
model = ResNet(Bottleneck, [3, 4, 23, 3], network_type, num_classes, att_type)
return model
def resnet18_cbam(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
pretrained_state_dict = model_zoo.load_url(model_urls['resnet18'])
now_state_dict = model.state_dict()
now_state_dict.update(pretrained_state_dict)
model.load_state_dict(now_state_dict)
return model
def resnet34_cbam(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
pretrained_state_dict = model_zoo.load_url(model_urls['resnet34'])
now_state_dict = model.state_dict()
now_state_dict.update(pretrained_state_dict)
model.load_state_dict(now_state_dict)
return model
def resnet50_cbam(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
pretrained_state_dict = model_zoo.load_url(model_urls['resnet50'])
now_state_dict = model.state_dict()
now_state_dict.update(pretrained_state_dict)
model.load_state_dict(now_state_dict)
return model
def resnet101_cbam(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
pretrained_state_dict = model_zoo.load_url(model_urls['resnet101'])
now_state_dict = model.state_dict()
now_state_dict.update(pretrained_state_dict)
model.load_state_dict(now_state_dict)
return model
def resnet152_cbam(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
pretrained_state_dict = model_zoo.load_url(model_urls['resnet152'])
now_state_dict = model.state_dict()
now_state_dict.update(pretrained_state_dict)
model.load_state_dict(now_state_dict)
return model | 10,063 | 32.658863 | 119 | py |
USCL | USCL-main/train_USCL/models/resnet_simclr.py | import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from .model_resnet import resnet18_cbam, resnet50_cbam
class ResNetSimCLR(nn.Module):
''' The ResNet feature extractor + projection head for SimCLR '''
def __init__(self, base_model, out_dim, pretrained=False):
super(ResNetSimCLR, self).__init__()
use_CBAM = False # # use CBAM or not, att_type="CBAM" or None
if use_CBAM:
self.resnet_dict = {"resnet18": resnet18_cbam(pretrained=pretrained),
"resnet50": resnet50_cbam(pretrained=pretrained)}
else:
self.resnet_dict = {"resnet18": models.resnet18(pretrained=pretrained),
"resnet50": models.resnet50(pretrained=pretrained)}
if pretrained:
print('\nImageNet pretrained parameters loaded.\n')
else:
print('\nRandom initialize model parameters.\n')
resnet = self._get_basemodel(base_model)
num_ftrs = resnet.fc.in_features
self.features = nn.Sequential(*list(resnet.children())[:-1]) # discard the last fc layer
# projection MLP
self.l1 = nn.Linear(num_ftrs, num_ftrs)
self.l2 = nn.Linear(num_ftrs, out_dim)
#########################################################
num_classes = 2
self.fc = nn.Linear(out_dim, num_classes)
## Mixup
self.fc1 = nn.Linear(num_ftrs, num_ftrs)
self.fc2 = nn.Linear(num_ftrs, num_classes)
def _get_basemodel(self, model_name):
try:
model = self.resnet_dict[model_name]
print("Feature extractor:", model_name)
return model
except:
raise ("Invalid model name. Check the config file and pass one of: resnet18 or resnet50")
# def forward(self, x):
# h = self.features(x)
#
# h = h.squeeze()
#
# x = self.l1(h)
# x = F.relu(x)
# x = self.l2(x)
# return h, x # the feature vector, the output
def forward(self, x):
h = self.features(x)
h1 = h.squeeze() # feature before project g()=h1
x = self.l1(h1)
x = F.relu(x)
x = self.l2(x)
# 1.classification: feature is before project g()
# c = h1
# # c = self.avgpool(c)
# c = c.view(c.size(0), -1)
# c = self.fc1(c)
# c = F.relu(c)
# c = self.fc2(c)
## 2.classification: feature is the output of project g()
c = x
c = c.view(c.size(0), -1)
c = self.fc(c)
return h1, x, c # the feature vector, the output
| 2,664 | 31.108434 | 101 | py |
USCL | USCL-main/train_USCL/models/bam.py | import torch
import math
import torch.nn as nn
import torch.nn.functional as F
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class ChannelGate(nn.Module):
def __init__(self, gate_channel, reduction_ratio=16, num_layers=1):
super(ChannelGate, self).__init__()
self.gate_activation = gate_activation
self.gate_c = nn.Sequential()
self.gate_c.add_module( 'flatten', Flatten() )
gate_channels = [gate_channel]
gate_channels += [gate_channel // reduction_ratio] * num_layers
gate_channels += [gate_channel]
for i in range( len(gate_channels) - 2 ):
self.gate_c.add_module( 'gate_c_fc_%d'%i, nn.Linear(gate_channels[i], gate_channels[i+1]) )
self.gate_c.add_module( 'gate_c_bn_%d'%(i+1), nn.BatchNorm1d(gate_channels[i+1]) )
self.gate_c.add_module( 'gate_c_relu_%d'%(i+1), nn.ReLU() )
self.gate_c.add_module( 'gate_c_fc_final', nn.Linear(gate_channels[-2], gate_channels[-1]) )
def forward(self, in_tensor):
avg_pool = F.avg_pool2d( in_tensor, in_tensor.size(2), stride=in_tensor.size(2) )
return self.gate_c( avg_pool ).unsqueeze(2).unsqueeze(3).expand_as(in_tensor)
class SpatialGate(nn.Module):
def __init__(self, gate_channel, reduction_ratio=16, dilation_conv_num=2, dilation_val=4):
super(SpatialGate, self).__init__()
self.gate_s = nn.Sequential()
self.gate_s.add_module( 'gate_s_conv_reduce0', nn.Conv2d(gate_channel, gate_channel//reduction_ratio, kernel_size=1))
self.gate_s.add_module( 'gate_s_bn_reduce0', nn.BatchNorm2d(gate_channel//reduction_ratio) )
self.gate_s.add_module( 'gate_s_relu_reduce0',nn.ReLU() )
for i in range( dilation_conv_num ):
self.gate_s.add_module( 'gate_s_conv_di_%d'%i, nn.Conv2d(gate_channel//reduction_ratio, gate_channel//reduction_ratio, kernel_size=3, \
padding=dilation_val, dilation=dilation_val) )
self.gate_s.add_module( 'gate_s_bn_di_%d'%i, nn.BatchNorm2d(gate_channel//reduction_ratio) )
self.gate_s.add_module( 'gate_s_relu_di_%d'%i, nn.ReLU() )
self.gate_s.add_module( 'gate_s_conv_final', nn.Conv2d(gate_channel//reduction_ratio, 1, kernel_size=1) )
def forward(self, in_tensor):
return self.gate_s( in_tensor ).expand_as(in_tensor)
class BAM(nn.Module):
def __init__(self, gate_channel):
super(BAM, self).__init__()
self.channel_att = ChannelGate(gate_channel)
self.spatial_att = SpatialGate(gate_channel)
def forward(self,in_tensor):
att = 1 + torch.sigmoid( self.channel_att(in_tensor) * self.spatial_att(in_tensor) )
return att * in_tensor
| 2,729 | 53.6 | 147 | py |
USCL | USCL-main/train_USCL/models/cbam.py | import torch
import math
import torch.nn as nn
import torch.nn.functional as F
class BasicConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True, bias=False):
super(BasicConv, self).__init__()
self.out_channels = out_planes
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.bn = nn.BatchNorm2d(out_planes,eps=1e-5, momentum=0.01, affine=True) if bn else None
self.relu = nn.ReLU() if relu else None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class ChannelGate(nn.Module):
def __init__(self, gate_channels, reduction_ratio=16, pool_types=['avg', 'max']):
super(ChannelGate, self).__init__()
self.gate_channels = gate_channels
self.mlp = nn.Sequential(
Flatten(),
nn.Linear(gate_channels, gate_channels // reduction_ratio),
nn.ReLU(),
nn.Linear(gate_channels // reduction_ratio, gate_channels)
)
self.pool_types = pool_types
def forward(self, x):
channel_att_sum = None
for pool_type in self.pool_types:
if pool_type=='avg':
avg_pool = F.avg_pool2d( x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp( avg_pool )
elif pool_type=='max':
max_pool = F.max_pool2d( x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp( max_pool )
elif pool_type=='lp':
lp_pool = F.lp_pool2d( x, 2, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp( lp_pool )
elif pool_type=='lse':
# LSE pool only
lse_pool = logsumexp_2d(x)
channel_att_raw = self.mlp( lse_pool )
if channel_att_sum is None:
channel_att_sum = channel_att_raw
else:
channel_att_sum = channel_att_sum + channel_att_raw
scale = torch.sigmoid( channel_att_sum ).unsqueeze(2).unsqueeze(3).expand_as(x)
return x * scale
def logsumexp_2d(tensor):
tensor_flatten = tensor.view(tensor.size(0), tensor.size(1), -1)
s, _ = torch.max(tensor_flatten, dim=2, keepdim=True)
outputs = s + (tensor_flatten - s).exp().sum(dim=2, keepdim=True).log()
return outputs
class ChannelPool(nn.Module):
def forward(self, x):
return torch.cat( (torch.max(x,1)[0].unsqueeze(1), torch.mean(x,1).unsqueeze(1)), dim=1 )
class SpatialGate(nn.Module):
def __init__(self):
super(SpatialGate, self).__init__()
kernel_size = 7
self.compress = ChannelPool()
self.spatial = BasicConv(2, 1, kernel_size, stride=1, padding=(kernel_size-1) // 2, relu=False)
def forward(self, x):
x_compress = self.compress(x)
x_out = self.spatial(x_compress)
scale = torch.sigmoid(x_out) # broadcasting
return x * scale
class CBAM(nn.Module):
def __init__(self, gate_channels, reduction_ratio=16, pool_types=['avg', 'max'], no_spatial=False):
super(CBAM, self).__init__()
self.ChannelGate = ChannelGate(gate_channels, reduction_ratio, pool_types)
self.no_spatial=no_spatial
if not no_spatial:
self.SpatialGate = SpatialGate()
def forward(self, x):
## CBAM
x_out = self.ChannelGate(x)
if not self.no_spatial:
x_out = self.SpatialGate(x_out)
## Spatial
# if not self.no_spatial:
# x_out = self.SpatialGate(x)
## Channel
# x_out = self.ChannelGate(x)
return x_out
| 4,038 | 37.836538 | 154 | py |
USCL | USCL-main/train_USCL/models/baseline_encoder.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
class Encoder(nn.Module):
''' The 4 layer convolutional network backbone + 2 layer fc projection head '''
def __init__(self, out_dim=64):
super(Encoder, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1)
self.conv3 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1)
self.conv4 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
self.pool = nn.MaxPool2d(2, 2)
# projection MLP
self.l1 = nn.Linear(64, 64)
self.l2 = nn.Linear(64, out_dim)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.pool(x)
x = self.conv2(x)
x = F.relu(x)
x = self.pool(x)
x = self.conv3(x)
x = F.relu(x)
x = self.pool(x)
x = self.conv4(x)
x = F.relu(x)
x = self.pool(x)
h = torch.mean(x, dim=[2, 3])
x = self.l1(h)
x = F.relu(x)
x = self.l2(x)
return h, x
| 1,184 | 24.76087 | 83 | py |
USCL | USCL-main/train_USCL/loss/nt_xent.py | import torch
import numpy as np
class NTXentLoss(torch.nn.Module):
def __init__(self, device, batch_size, temperature, use_cosine_similarity):
super(NTXentLoss, self).__init__()
self.batch_size = batch_size
self.temperature = temperature
self.device = device
self.softmax = torch.nn.Softmax(dim=-1)
self.similarity_function = self._get_similarity_function(use_cosine_similarity)
self.criterion = torch.nn.CrossEntropyLoss(reduction="sum") # sum all 2N terms of loss instead of getting mean val
def _get_similarity_function(self, use_cosine_similarity):
''' Cosine similarity or dot similarity for computing loss '''
if use_cosine_similarity:
self._cosine_similarity = torch.nn.CosineSimilarity(dim=-1)
return self._cosine_simililarity
else:
return self._dot_simililarity
def _get_correlated_mask(self):
diag = np.eye(2 * self.batch_size) # I(2Nx2N), identity matrix
l1 = np.eye((2 * self.batch_size), 2 * self.batch_size, k=-self.batch_size) # lower diagonal matrix, N non-zero elements
l2 = np.eye((2 * self.batch_size), 2 * self.batch_size, k=self.batch_size) # upper diagonal matrix, N non-zero elements
mask = torch.from_numpy((diag + l1 + l2)) # [2N, 2N], with 4N elements are non-zero
mask = (1 - mask).type(torch.bool) # [2N, 2N], with 4(N^2 - N) elements are "True"
return mask.to(self.device)
@staticmethod
def _dot_simililarity(x, y):
v = torch.tensordot(x.unsqueeze(1), y.T.unsqueeze(0), dims=2) # extend the dimensions before calculating similarity
# x shape: (N, 1, C)
# y shape: (1, C, 2N)
# v shape: (N, 2N)
return v
def _cosine_simililarity(self, x, y):
# x shape: (N, 1, C), N input samples
# y shape: (1, 2N, C), 2N output representations
# v shape: (N, 2N)
v = self._cosine_similarity(x.unsqueeze(1), y.unsqueeze(0)) # extend the dimensions before calculating similarity
return v
def forward(self, zis, zjs):
if self.batch_size != zis.shape[0]:
self.batch_size = zis.shape[0] # the last batch may not have the same batch size
self.mask_samples_from_same_repr = self._get_correlated_mask().type(torch.bool)
representations = torch.cat([zjs, zis], dim=0) # [N, C] => [2N, C]
similarity_matrix = self.similarity_function(representations, representations) # [2N, 2N]
# filter out the scores from the positive samples
l_pos = torch.diag(similarity_matrix, self.batch_size) # upper diagonal, N x [left, right] positive sample pairs
r_pos = torch.diag(similarity_matrix, -self.batch_size) # lower diagonal, N x [right, left] positive sample pairs
positives = torch.cat([l_pos, r_pos]).view(2 * self.batch_size, 1) # similarity of positive pairs, [2N, 1]
negatives = similarity_matrix[self.mask_samples_from_same_repr].view(2 * self.batch_size, -1) # [2N, 2N]
logits = torch.cat((positives, negatives), dim=1) # [2N, 2N+1], the 2N+1 elements of one column are used for one loss term
logits /= self.temperature
# labels are all 0, meaning the first value of each vector is the nominator term of CELoss
# each denominator contains 2N+1-2 = 2N-1 terms, corresponding to all similarities between the sample and other samples.
labels = torch.zeros(2 * self.batch_size).to(self.device).long()
loss = self.criterion(logits, labels)
return loss / (2 * self.batch_size) # Don't know why it is divided by 2N, the CELoss can set directly to reduction='mean'
| 3,708 | 41.147727 | 130 | py |
USCL | USCL-main/train_USCL/data_aug/outpainting.py | import torch
import numpy as np
import random
class Outpainting(object):
"""Randomly mask out one or more patches from an image, we only need mask regions.
Args:
n_holes (int): Number of patches to cut out of each image.
length (int): The length (in pixels) of each square patch.
"""
def __init__(self, n_holes=5):
self.n_holes = n_holes
def __call__(self, img):
"""
Args:
img (Tensor): Tensor image of size (C, H, W).
Returns:
Tensor: Image with n_holes outpainting of it.
"""
c, h, w = img.shape
new_img = np.random.rand(c, h, w) * 1.0
for n in range(self.n_holes):
# length of edges
block_noise_size_x = w - random.randint(3*w//7, 4*w//7)
block_noise_size_y = h - random.randint(3*h//7, 4*h//7)
# lower left corner
noise_x = random.randint(3, w-block_noise_size_x-3)
noise_y = random.randint(3, h-block_noise_size_y-3)
# copy the original image
new_img[:, noise_y:noise_y+block_noise_size_y,
noise_x:noise_x+block_noise_size_x] = img[:, noise_y:noise_y+block_noise_size_y,
noise_x:noise_x+block_noise_size_x]
new_img = torch.tensor(new_img)
new_img = new_img.type(torch.FloatTensor)
return new_img
# return torch.tensor(new_img)
| 1,491 | 33.697674 | 100 | py |
USCL | USCL-main/train_USCL/data_aug/sharpen.py | import torch
import numpy as np
from PIL import Image
from PIL import ImageFilter
class Sharpen(object):
""" Sharpen an image before inputing it to networks
Args:
degree (int): The sharpen intensity, from -1 to 5.
0 represents original image.
"""
def __init__(self, degree=0):
self.degree = degree
def __call__(self, img):
"""
Args:
img (PIL image): input image
Returns:
img (PIL image): sharpened output image
"""
if self.degree == -1:
img = img.filter(ImageFilter.Kernel((3,3),(-1, -1/2, -1,
-1/2, 3, -1/2,
-1, -1/2, -1))) # 比原图还模糊
elif self.degree == 0: # 原图
pass
elif self.degree == 1:
img = img.filter(ImageFilter.Kernel((3,3),(1, -2, 1,
-2, 5, -2,
1, -2, 1))) # 很弱,几乎没有什么锐化
elif self.degree == 2:
img = img.filter(ImageFilter.Kernel((3,3),(0, -2/7, 0,
-2/7, 19/7, -2/7,
0, -2/7, 0))) # 能看出一丁点锐化
elif self.degree == 3:
img = img.filter(ImageFilter.Kernel((3,3),(0, -1, 0,
-1, 5, -1,
0, -1, 0))) # 锐化较明显
elif self.degree == 4:
img = img.filter(ImageFilter.Kernel((3,3),(-1, -1, -1,
-1, 9, -1,
-1, -1, -1))) # 锐化很明显
elif self.degree == 5:
img = img.filter(ImageFilter.Kernel((3,3),(-1, -4, -1,
-4, 21, -4,
-1, -4, -1))) # 最强
else:
raise ValueError('The degree must be integer between -1 and 5')
return img
| 2,260 | 34.888889 | 80 | py |
USCL | USCL-main/train_USCL/data_aug/dataset_wrapper_Ultrasound_Video_Mixup.py | import os
import random
from PIL import Image
import numpy as np
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
import torchvision.transforms as transforms
from data_aug.gaussian_blur import GaussianBlur
from data_aug.cutout import Cutout
from data_aug.outpainting import Outpainting
from data_aug.nonlin_trans import NonlinearTrans
from data_aug.sharpen import Sharpen
np.random.seed(0)
class USDataset_video(Dataset):
def __init__(self, data_dir, transform=None, LabelList=None, DataList=None, Checkpoint_Num=None):
"""
Ultrasound self-supervised training Dataset, choose 2 different images from a video
:param data_dir: str
:param transform: torch.transform
"""
# self.label_name = {"Rb1": 0, "Rb2": 1, "Rb3": 2, "Rb4": 3, "Rb5": 4, "F0_": 5, "F1_": 6, "F2_": 7, "F3_": 8, "F4_": 9,
# "Reg": 10, "Cov": 11, "Ali": 10, "Bli": 11, "Ple": 11, "Oth": 11} # US-4
# self.label_name = {"Rb1": 0, "Rb2": 1, "Rb3": 2, "Rb4": 3, "Rb5": 4} # CLUST
# self.label_name = {"F0_": 0, "F1_": 1, "F2_": 2, "F3_": 3, "F4_": 4} # Liver Forbrosis
self.label_name = {"Reg": 0, "Cov": 1} # Butterfly
# self.label_name = {"Ali": 0, "Bli": 1, "Ple": 2, "Oth": 3} # COVID19-LUSMS
self.data_info = self.get_img_info(data_dir)
self.transform = transform
self.LabelList = LabelList
self.DataList = DataList
self.Checkpoint_Num = Checkpoint_Num
def __getitem__(self, index):
# ## Different data rate
if index not in self.DataList:
index = random.sample(self.DataList, 1)[0] # index in data set
path_imgs = self.data_info[index]
if len(path_imgs) >= 3: # more than 3 images in one video
path_img = random.sample(path_imgs, 3) # random choose 3 images
img1 = Image.open(path_img[0]).convert('RGB') # 0~255
img2 = Image.open(path_img[1]).convert('RGB') # 0~255
img3 = Image.open(path_img[2]).convert('RGB') # 0~255
if index in self.LabelList:
# path_imgs[0]: '/home/zhangchunhui/MedicalAI/Butte/Cov-Cardiomyopathy_mp4/Cov-Cardiomyopathy_mp4_frame0.jpg'
# path_imgs[0][35:38]: 'Cov'
label1 = self.label_name[path_imgs[0][35:38]]
label2 = self.label_name[path_imgs[1][35:38]]
label3 = self.label_name[path_imgs[2][35:38]]
else:
label1, label2, label3 = 9999, 9999, 9999 # unlabel data = 9999
if self.transform is not None:
img1, img2, img3 = self.transform((img1, img2, img3)) # transform
##########################################################################
### frame mixup
# alpha, beta = 2, 5
alpha, beta = 0.5, 0.5
lam = np.random.beta(alpha, beta)
# img2 as anchor
mixupimg1 = lam * img1 + (1.0 - lam) * img2
mixupimg2 = lam * img3 + (1.0 - lam) * img2
return mixupimg1, label1, mixupimg2, label2, img1, img2
elif len(path_imgs) == 2:
path_img = random.sample(path_imgs, 2) # random choose 3 images
img1 = Image.open(path_img[0]).convert('RGB') # 0~255
img2 = Image.open(path_img[1]).convert('RGB') # 0~255
if index in self.LabelList:
label1 = self.label_name[path_imgs[0][35:38]]
label2 = self.label_name[path_imgs[1][35:38]]
else:
label1, label2 = 9999, 9999 # unlabel data = 9999
if self.transform is not None:
img1, img2 = self.transform((img1, img2)) # transform
return img1, label1, img2, label2, img1, img2
else: # one image in the video, using augmentation to obtain two positive samples
img1 = Image.open(path_imgs[0]).convert('RGB') # 0~255
img2 = Image.open(path_imgs[0]).convert('RGB') # 0~255
if index in self.LabelList:
label1 = self.label_name[path_imgs[0][35:38]]
label2 = self.label_name[path_imgs[0][35:38]]
else:
label1, label2 = 9999, 9999 # unlabel data = 9999
if self.transform is not None:
img1, img2 = self.transform((img1, img2)) # transform
return img1, label1, img2, label2, img1, img2
# if self.transform is not None:
# img1, img2 = self.transform((img1, img2)) # transform
# return img1, label1, img2, label2
def __len__(self): # len
return len(self.data_info)
@staticmethod
def get_img_info(data_dir):
data_info = list()
for root, dirs, _ in os.walk(data_dir):
for sub_dir in dirs: # one video as one class
img_names = os.listdir(os.path.join(root, sub_dir))
img_names = list(filter(lambda x: x.endswith('.jpg') or x.endswith('.png'), img_names))
path_imgs = [] # list
for i in range(len(img_names)):
img_name = img_names[i]
path_img = os.path.join(root, sub_dir, img_name)
path_imgs.append(path_img)
data_info.append(path_imgs)
return data_info
class USDataset_image(Dataset):
def __init__(self, data_dir, transform=None, LabelList=None, DataList=None):
"""
Ultrasound self-supervised training Dataset, only choose one image from a video
:param data_dir: str
:param transform: torch.transform
"""
self.data_info = self.get_img_info(data_dir)
self.transform = transform
self.LabelList = LabelList
self.DataList = DataList
def __getitem__(self, index):
path_imgs = self.data_info[index] # list
path_img = random.sample(path_imgs, 1) # random choose one image
img1 = Image.open(path_img[0]).convert('RGB') # 0~255
img2 = Image.open(path_img[0]).convert('RGB') # 0~255
label1 = 0 if path_img[0].lower()[64:].find("cov") > -1 else (1 if path_img[0].lower()[64:].find("pneu") > -1 else 2)
if self.transform is not None:
img1, img2 = self.transform((img1, img2)) # transform
return img1, label1, img2, label1
def __len__(self): # len
return len(self.data_info)
@staticmethod
def get_img_info(data_dir):
data_info = list()
for root, dirs, _ in os.walk(data_dir):
for sub_dir in dirs: # one video as one class
img_names = os.listdir(os.path.join(root, sub_dir))
img_names = list(filter(lambda x: x.endswith('.jpg') or x.endswith('.png'), img_names))
path_imgs = []
for i in range(len(img_names)):
img_name = img_names[i]
path_img = os.path.join(root, sub_dir, img_name)
path_imgs.append(path_img)
data_info.append(path_imgs)
return data_info
class DataSetWrapper(object):
def __init__(self, batch_size, LabelList, DataList, Checkpoint_Num, num_workers, valid_size, input_shape, s):
self.batch_size = batch_size
self.num_workers = num_workers
self.valid_size = valid_size # leave out ratio, e.g. 0.20
self.s = s
self.input_shape = eval(input_shape) # (H, W, C) shape of input image
self.LabelList = LabelList
self.DataList = DataList
self.Checkpoint_Num = Checkpoint_Num
def get_data_loaders(self):
''' Get dataloader for target dataset, this function will be called before the training process '''
data_augment = self._get_simclr_pipeline_transform()
print('\nData augmentation:')
print(data_augment)
use_video = True
if use_video:
print('\nUse video augmentation!')
# US-4
# train_dataset = USDataset_video("/home/zhangchunhui/WorkSpace/SSL/Ultrasound_Datasets_train/Video/",
# transform=SimCLRDataTransform(data_augment), LabelList=self.LabelList, DataList=self.DataList) # augmented from 2 images
# 1 video-CLUST
# train_dataset = USDataset_video("/home/zhangchunhui/MedicalAI/Ultrasound_Datasets_train/CLUST/",
# transform=SimCLRDataTransform(data_augment), LabelList=self.LabelList, DataList=self.DataList) # augmented from 2 images
# 1 video-Liver
# train_dataset = USDataset_video("/home/zhangchunhui/MedicalAI/Ultrasound_Datasets_train/Liver/",
# transform=SimCLRDataTransform(data_augment), LabelList=self.LabelList, DataList=self.DataList) # augmented from 2 images
# 1 video-COVID
# train_dataset = USDataset_video("/home/zhangchunhui/MedicalAI/Ultrasound_Datasets_train/COVID/",
# transform=SimCLRDataTransform(data_augment), LabelList=self.LabelList, DataList=self.DataList) # augmented from 2 images
# 1 video-Butte
train_dataset = USDataset_video("/home/zhangchunhui/MedicalAI/Butte/",
transform=SimCLRDataTransform(data_augment), LabelList=self.LabelList, DataList=self.DataList) # augmented from 2 images
else:
print('\nDo not use video augmentation!')
# Images
train_dataset = USDataset_image("/home/zhangchunhui/MedicalAI/Butte/",
transform=SimCLRDataTransform(data_augment), LabelList=self.LabelList, DataList=self.DataList) # augmented from 1 image
train_loader, valid_loader = self.get_train_validation_data_loaders(train_dataset)
# train_loader = self.get_train_validation_data_loaders(train_dataset)
return train_loader, valid_loader
# return train_loader
def __len__(self): #
return self.batch_size
def _get_simclr_pipeline_transform(self):
'''
Get a set of data augmentation transformations as described in the SimCLR paper.
Random Crop (resize to original size) + Random color distortion + Gaussian Blur
'''
color_jitter = transforms.ColorJitter(0.8 * self.s, 0.8 * self.s, 0.8 * self.s, 0.2 * self.s)
data_transforms = transforms.Compose([Sharpen(degree=0),
transforms.Resize((self.input_shape[0], self.input_shape[1])),
transforms.RandomResizedCrop(size=self.input_shape[0], scale=(0.8, 1.0), ratio=(0.8, 1.25)),
transforms.RandomHorizontalFlip(),
# transforms.RandomRotation(10),
color_jitter,
# transforms.RandomAffine(degrees=0, translate=(0.1, 0.1)),
# GaussianBlur(kernel_size=int(0.05 * self.input_shape[0])),
transforms.ToTensor(),
# NonlinearTrans(prob=0.9), # 0-1
# transforms.Normalize(mean=[0.5,0.5,0.5], std=[0.25,0.25,0.25]),
# Cutout(n_holes=3, length=32),
# Outpainting(n_holes=5),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.25, 0.25, 0.25]),
])
return data_transforms
def get_train_validation_data_loaders(self, train_dataset):
# obtain indices that will be used for training / validation
num_train = len(train_dataset)
indices = list(range(num_train))
np.random.shuffle(indices)
split = int(np.floor(self.valid_size * num_train))
train_idx, valid_idx = indices[split:], indices[:split]
train_idx= indices[split:]
# define samplers for obtaining training and validation batches
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
# data loaders for training and validation, drop_last should be False to avoid data shortage of valid_loader
train_loader = DataLoader(train_dataset, batch_size=self.batch_size, sampler=train_sampler,
num_workers=self.num_workers, drop_last=False, shuffle=False)
valid_loader = DataLoader(train_dataset, batch_size=self.batch_size, sampler=valid_sampler,
num_workers=self.num_workers, drop_last=False)
return train_loader, valid_loader
# return train_loader
class SimCLRDataTransform(object):
''' transform two images in a video to two augmented samples '''
def __init__(self, transform):
self.transform = transform
def __call__(self, sample):
if len(sample)>2:
xi = self.transform(sample[0]) # sample -> xi, xj in original implementation
xj = self.transform(sample[1])
xk = self.transform(sample[2])
return xi, xj, xk
else:
xi = self.transform(sample[0]) # sample -> xi, xj in original implementation
xj = self.transform(sample[1])
return xi, xj
| 13,707 | 45.310811 | 167 | py |
USCL | USCL-main/train_USCL/data_aug/cutout.py | import torch
import numpy as np
class Cutout(object):
"""Randomly mask out one or more patches from an image.
Args:
n_holes (int): Number of patches to cut out of each image.
length (int): The length (in pixels) of each square patch.
"""
def __init__(self, n_holes, length):
self.n_holes = n_holes
self.length = length
def __call__(self, img):
"""
Args:
img (Tensor): Tensor image of size (C, H, W).
Returns:
Tensor: Image with n_holes of dimension (length x length) cut out of it.
"""
h = img.size(1)
w = img.size(2)
mask = np.ones((h, w), np.float32)
for n in range(self.n_holes):
# center
y = np.random.randint(h)
x = np.random.randint(w)
# edges
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img = img * mask
return img | 1,213 | 26.590909 | 84 | py |
USCL | USCL-main/train_USCL/data_aug/nonlin_trans.py | from __future__ import print_function
import random
import numpy as np
import torch
try: # SciPy >= 0.19
from scipy.special import comb
except ImportError:
from scipy.misc import comb
def bernstein_poly(i, n, t):
"""
The Bernstein polynomial of n, i as a function of t
"""
return comb(n, i) * ( t**(n-i) ) * (1 - t)**i
def bezier_curve(points, nTimes=1000):
"""
Given a set of control points, return the
bezier curve defined by the control points.
Control points should be a list of lists, or list of tuples
such as [ [1,1],
[2,3],
[4,5], ..[Xn, Yn] ]
nTimes is the number of time steps, defaults to 1000
See http://processingjs.nihongoresources.com/bezierinfo/
"""
nPoints = len(points)
xPoints = np.array([p[0] for p in points])
yPoints = np.array([p[1] for p in points])
t = np.linspace(0.0, 1.0, nTimes)
polynomial_array = np.array([bernstein_poly(i, nPoints-1, t) for i in range(0, nPoints)])
xvals = np.dot(xPoints, polynomial_array)
yvals = np.dot(yPoints, polynomial_array)
return xvals, yvals
def nonlinear_transformation(x, prob=0.5):
if random.random() >= prob:
return x
points = [[0, 0], [random.random(), random.random()], [random.random(), random.random()], [1, 1]]
xvals, yvals = bezier_curve(points, nTimes=1000)
if random.random() < 0.5:
# Half chance to get flip
xvals = np.sort(xvals)
else:
xvals, yvals = np.sort(xvals), np.sort(yvals)
nonlinear_x = np.interp(x, xvals, yvals) # x => nonlinear_x, interpolate to curve [xvals, yvals]
return nonlinear_x
class NonlinearTrans(object):
"""Randomly do nonlinear transformation on an image.
Args:
prob (float): Probability to do the transformation.
"""
def __init__(self, prob=0.9):
self.prob = prob
def __call__(self, img):
"""
Args:
img (Tensor): Tensor image of size (C, H, W), values are between 0 and 1
Returns:
img (Tensor): Tensor image of size (C, H, W) after transformation
"""
out_img = nonlinear_transformation(img, prob=self.prob)
try:
# if transform, the dtype would change from Tensor to ndarray
out_img = torch.from_numpy(out_img)
except:
# but the img may also remain the origin, still Tensor
pass
# out_img = out_img.double()
out_img = out_img.type(torch.FloatTensor)
return out_img
if __name__ == '__main__':
import matplotlib.pyplot as plt
x = np.linspace(0, 1, 100)
for i in range(5):
y = nonlinear_transformation(x, prob=1.0)
plt.plot(x, y)
plt.show()
| 2,817 | 25.584906 | 101 | py |
USCL | USCL-main/eval_pretrained_model/eval_pretrained_model.py | import os
import sys
import time
import random
import argparse
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
import torch.optim as optim
from tools.my_dataset import COVIDDataset
from resnet_uscl import ResNetUSCL
apex_support = False
try:
sys.path.append('./apex')
from apex import amp
print("Apex on, run on mixed precision.")
apex_support = True
except:
print("Please install apex for mixed precision training from: https://github.com/NVIDIA/apex")
apex_support = False
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print("\nRunning on:", device)
if device == 'cuda':
device_name = torch.cuda.get_device_name()
print("The device name is:", device_name)
cap = torch.cuda.get_device_capability(device=None)
print("The capability of this device is:", cap, '\n')
def set_seed(seed=1):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def main():
# ============================ step 1/5 data ============================
# transforms
train_transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.RandomResizedCrop(size=224, scale=(0.8, 1.0), ratio=(0.8, 1.25)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5,0.5,0.5], std=[0.25,0.25,0.25])
])
valid_transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5,0.5,0.5], std=[0.25,0.25,0.25])
])
# MyDataset
train_data = COVIDDataset(data_dir=data_dir, train=True, transform=train_transform)
valid_data = COVIDDataset(data_dir=data_dir, train=False, transform=valid_transform)
# DataLoder
train_loader = DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)
valid_loader = DataLoader(dataset=valid_data, batch_size=BATCH_SIZE)
# ============================ step 2/5 model ============================
net = ResNetUSCL(base_model='resnet18', out_dim=256, pretrained=pretrained)
if pretrained:
print('\nThe ImageNet pretrained parameters are loaded.')
else:
print('\nThe ImageNet pretrained parameters are not loaded.')
if selfsup: # import pretrained model weights
state_dict = torch.load(state_dict_path)
new_dict = {k: state_dict[k] for k in list(state_dict.keys())
if not (k.startswith('l')
| k.startswith('fc'))} # # discard MLP and fc
model_dict = net.state_dict()
model_dict.update(new_dict)
net.load_state_dict(model_dict)
print('\nThe self-supervised trained parameters are loaded.\n')
else:
print('\nThe self-supervised trained parameters are not loaded.\n')
# frozen all convolutional layers
# for param in net.parameters():
# param.requires_grad = False
# fine-tune last 3 layers
for name, param in net.named_parameters():
if not name.startswith('features.7.1'):
param.requires_grad = False
# add a classifier for linear evaluation
num_ftrs = net.linear.in_features
net.linear = nn.Linear(num_ftrs, 3)
net.fc = nn.Linear(3, 3)
for name, param in net.named_parameters():
print(name, '\t', 'requires_grad=', param.requires_grad)
net.to(device)
# ============================ step 3/5 loss function ============================
criterion = nn.CrossEntropyLoss() # choose loss function
# ============================ step 4/5 optimizer ============================
optimizer = optim.Adam(net.parameters(), lr=LR, weight_decay=weight_decay) # choose optimizer
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer,
T_max=MAX_EPOCH,
eta_min=0,
last_epoch=-1) # set learning rate decay strategy
# ============================ step 5/5 training ============================
print('\nTraining start!\n')
start = time.time()
train_curve = list()
valid_curve = list()
max_acc = 0.
reached = 0 # which epoch reached the max accuracy
# the statistics of classification result: classification_results[true][pred]
classification_results = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
best_classification_results = None
if apex_support and fp16_precision:
net, optimizer = amp.initialize(net, optimizer,
opt_level='O2',
keep_batchnorm_fp32=True)
for epoch in range(MAX_EPOCH):
loss_mean = 0.
correct = 0.
total = 0.
net.train()
for i, data in enumerate(train_loader):
# forward
inputs, labels = data
inputs = inputs.to(device)
labels = labels.to(device)
outputs = net(inputs)
# backward
optimizer.zero_grad()
loss = criterion(outputs, labels)
if apex_support and fp16_precision:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
# update weights
optimizer.step()
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).cpu().squeeze().sum().numpy()
# print training information
loss_mean += loss.item()
train_curve.append(loss.item())
if (i+1) % log_interval == 0:
loss_mean = loss_mean / log_interval
print("\nTraining:Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{:.2%}".format(
epoch, MAX_EPOCH, i+1, len(train_loader), loss_mean, correct / total))
loss_mean = 0.
print('Learning rate this epoch:', scheduler.get_last_lr()[0])
scheduler.step() # updata learning rate
# validate the model
if (epoch+1) % val_interval == 0:
correct_val = 0.
total_val = 0.
loss_val = 0.
net.eval()
with torch.no_grad():
for j, data in enumerate(valid_loader):
inputs, labels = data
inputs = inputs.to(device)
labels = labels.to(device)
outputs = net(inputs)
loss = criterion(outputs, labels)
_, predicted = torch.max(outputs.data, 1)
total_val += labels.size(0)
correct_val += (predicted == labels).cpu().squeeze().sum().numpy()
for k in range(len(predicted)):
classification_results[labels[k]][predicted[k]] += 1 # "label" is regarded as "predicted"
loss_val += loss.item()
acc = correct_val / total_val
if acc > max_acc: # record best accuracy
max_acc = acc
reached = epoch
best_classification_results = classification_results
classification_results = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
valid_curve.append(loss_val/valid_loader.__len__())
print("Valid:\t Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{:.2%}".format(
epoch, MAX_EPOCH, j+1, len(valid_loader), loss_val, acc))
print('\nTraining finish, the time consumption of {} epochs is {}s\n'.format(MAX_EPOCH, round(time.time() - start)))
print('The max validation accuracy is: {:.2%}, reached at epoch {}.\n'.format(max_acc, reached))
print('\nThe best prediction results of the dataset:')
print('Class 0 predicted as class 0:', best_classification_results[0][0])
print('Class 0 predicted as class 1:', best_classification_results[0][1])
print('Class 0 predicted as class 2:', best_classification_results[0][2])
print('Class 1 predicted as class 0:', best_classification_results[1][0])
print('Class 1 predicted as class 1:', best_classification_results[1][1])
print('Class 1 predicted as class 2:', best_classification_results[1][2])
print('Class 2 predicted as class 0:', best_classification_results[2][0])
print('Class 2 predicted as class 1:', best_classification_results[2][1])
print('Class 2 predicted as class 2:', best_classification_results[2][2])
acc0 = best_classification_results[0][0] / sum(best_classification_results[i][0] for i in range(3))
recall0 = best_classification_results[0][0] / sum(best_classification_results[0])
print('\nClass 0 accuracy:', acc0)
print('Class 0 recall:', recall0)
print('Class 0 F1:', 2 * acc0 * recall0 / (acc0 + recall0))
acc1 = best_classification_results[1][1] / sum(best_classification_results[i][1] for i in range(3))
recall1 = best_classification_results[1][1] / sum(best_classification_results[1])
print('\nClass 1 accuracy:', acc1)
print('Class 1 recall:', recall1)
print('Class 1 F1:', 2 * acc1 * recall1 / (acc1 + recall1))
acc2 = best_classification_results[2][2] / sum(best_classification_results[i][2] for i in range(3))
recall2 = best_classification_results[2][2] / sum(best_classification_results[2])
print('\nClass 2 accuracy:', acc2)
print('Class 2 recall:', recall2)
print('Class 2 F1:', 2 * acc2 * recall2 / (acc2 + recall2))
return best_classification_results
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='linear evaluation')
parser.add_argument('-p', '--path', default='checkpoint', help='folder of ckpt')
args = parser.parse_args()
set_seed(1) # random seed
# parameters
MAX_EPOCH = 100 # default = 100
BATCH_SIZE = 32 # default = 32
LR = 0.01 # default = 0.01
weight_decay = 1e-4 # default = 1e-4
log_interval = 10
val_interval = 1
base_path = "./eval_pretrained_model/"
state_dict_path = os.path.join(base_path, args.path, "best_model.pth")
print('State dict path:', state_dict_path)
fp16_precision = True
pretrained = False
selfsup = True
# save result
save_dir = os.path.join('result')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
resultfile = save_dir + '/my_result.txt'
if (not (os.path.exists(resultfile))) and (os.path.exists(state_dict_path)):
confusion_matrix = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
for i in range(1, 6):
print('\n' + '='*20 + 'The training of fold {} start.'.format(i) + '='*20)
data_dir = "./covid_5_fold/covid_data{}.pkl".format(i)
best_classification_results = main()
confusion_matrix = confusion_matrix + np.array(best_classification_results)
print('\nThe confusion matrix is:')
print(confusion_matrix)
print('\nThe precision of class 0 is:', confusion_matrix[0,0] / sum(confusion_matrix[:,0]))
print('The precision of class 1 is:', confusion_matrix[1,1] / sum(confusion_matrix[:,1]))
print('The precision of class 2 is:', confusion_matrix[2,2] / sum(confusion_matrix[:,2]))
print('\nThe recall of class 0 is:', confusion_matrix[0,0] / sum(confusion_matrix[0]))
print('The recall of class 1 is:', confusion_matrix[1,1] / sum(confusion_matrix[1]))
print('The recall of class 2 is:', confusion_matrix[2,2] / sum(confusion_matrix[2]))
print('\nTotal acc is:', (confusion_matrix[0,0]+confusion_matrix[1,1]+confusion_matrix[2,2])/confusion_matrix.sum())
file_handle = open(save_dir + '/my_result.txt', mode='w+')
file_handle.write("precision 0: "+str(confusion_matrix[0,0] / sum(confusion_matrix[:,0]))); file_handle.write('\r\n')
file_handle.write("precision 1: "+str(confusion_matrix[1,1] / sum(confusion_matrix[:,1]))); file_handle.write('\r\n')
file_handle.write("precision 2: "+str(confusion_matrix[2,2] / sum(confusion_matrix[:,2]))); file_handle.write('\r\n')
file_handle.write("recall 0: "+str(confusion_matrix[0,0] / sum(confusion_matrix[0]))); file_handle.write('\r\n')
file_handle.write("recall 1: "+str(confusion_matrix[1,1] / sum(confusion_matrix[1]))); file_handle.write('\r\n')
file_handle.write("recall 2: "+str(confusion_matrix[2,2] / sum(confusion_matrix[2]))); file_handle.write('\r\n')
file_handle.write("Total acc: "+str((confusion_matrix[0,0]+confusion_matrix[1,1]+confusion_matrix[2,2])/confusion_matrix.sum()))
file_handle.close() | 12,878 | 42.218121 | 136 | py |
USCL | USCL-main/eval_pretrained_model/resnet_uscl.py | import torch.nn as nn
import torchvision.models as models
class ResNetUSCL(nn.Module):
''' The ResNet feature extractor + projection head + classifier for USCL '''
def __init__(self, base_model, out_dim, pretrained=False):
super(ResNetUSCL, self).__init__()
self.resnet_dict = {"resnet18": models.resnet18(pretrained=pretrained),
"resnet50": models.resnet50(pretrained=pretrained)}
if pretrained:
print('\nModel parameters loaded.\n')
else:
print('\nRandom initialize model parameters.\n')
resnet = self._get_basemodel(base_model)
num_ftrs = resnet.fc.in_features
self.features = nn.Sequential(*list(resnet.children())[:-1]) # discard the last fc layer
# projection MLP
self.linear = nn.Linear(num_ftrs, out_dim)
# classifier
num_classes = 12
self.fc = nn.Linear(out_dim, num_classes)
def _get_basemodel(self, model_name):
try:
model = self.resnet_dict[model_name]
print("Feature extractor:", model_name)
return model
except:
raise ("Invalid model name. Check the config file and pass one of: resnet18 or resnet50")
def forward(self, x):
h = self.features(x)
h = h.squeeze()
x = self.linear(h)
return x
| 1,383 | 30.454545 | 101 | py |
USCL | USCL-main/eval_pretrained_model/tools/my_dataset.py | import os
import random
import pickle
from PIL import Image
from torch.utils.data import Dataset
random.seed(1)
class COVIDDataset(Dataset):
def __init__(self, data_dir, train=True, transform=None):
"""
POCUS Dataset
param data_dir: str
param transform: torch.transform
"""
self.label_name = {"covid19": 0, "pneumonia": 1, "regular": 2}
with open(data_dir, 'rb') as f:
X_train, y_train, X_test, y_test = pickle.load(f)
if train:
self.X, self.y = X_train, y_train # [N, C, H, W], [N]
else:
self.X, self.y = X_test, y_test # [N, C, H, W], [N]
self.transform = transform
def __getitem__(self, index):
img_arr = self.X[index].transpose(1,2,0) # CHW => HWC
img = Image.fromarray(img_arr.astype('uint8')).convert('RGB') # 0~255
label = self.y[index]
if self.transform is not None:
img = self.transform(img)
return img, label
def __len__(self):
return len(self.y) | 1,079 | 29 | 77 | py |
real-robot-challenge | real-robot-challenge-main/python/pybullet_planning/utils/transformations.py | # -*- coding: utf-8 -*-
# transformations.py
# Copyright (c) 2006, Christoph Gohlke
# Copyright (c) 2006-2009, The Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Homogeneous Transformation Matrices and Quaternions.
A library for calculating 4x4 matrices for translating, rotating, reflecting,
scaling, shearing, projecting, orthogonalizing, and superimposing arrays of
3D homogeneous coordinates as well as for converting between rotation matrices,
Euler angles, and quaternions. Also includes an Arcball control object and
functions to decompose transformation matrices.
:Authors:
`Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`__,
Laboratory for Fluorescence Dynamics, University of California, Irvine
:Version: 20090418
Requirements
------------
* `Python 2.6 <http://www.python.org>`__
* `Numpy 1.3 <http://numpy.scipy.org>`__
* `transformations.c 20090418 <http://www.lfd.uci.edu/~gohlke/>`__
(optional implementation of some functions in C)
Notes
-----
Matrices (M) can be inverted using numpy.linalg.inv(M), concatenated using
numpy.dot(M0, M1), or used to transform homogeneous coordinates (v) using
numpy.dot(M, v) for shape (4, \*) "point of arrays", respectively
numpy.dot(v, M.T) for shape (\*, 4) "array of points".
Calculations are carried out with numpy.float64 precision.
This Python implementation is not optimized for speed.
Vector, point, quaternion, and matrix function arguments are expected to be
"array like", i.e. tuple, list, or numpy arrays.
Return types are numpy arrays unless specified otherwise.
Angles are in radians unless specified otherwise.
Quaternions ix+jy+kz+w are represented as [x, y, z, w].
Use the transpose of transformation matrices for OpenGL glMultMatrixd().
A triple of Euler angles can be applied/interpreted in 24 ways, which can
be specified using a 4 character string or encoded 4-tuple:
*Axes 4-string*: e.g. 'sxyz' or 'ryxy'
- first character : rotations are applied to 's'tatic or 'r'otating frame
- remaining characters : successive rotation axis 'x', 'y', or 'z'
*Axes 4-tuple*: e.g. (0, 0, 0, 0) or (1, 1, 1, 1)
- inner axis: code of axis ('x':0, 'y':1, 'z':2) of rightmost matrix.
- parity : even (0) if inner axis 'x' is followed by 'y', 'y' is followed
by 'z', or 'z' is followed by 'x'. Otherwise odd (1).
- repetition : first and last axis are same (1) or different (0).
- frame : rotations are applied to static (0) or rotating (1) frame.
References
----------
(1) Matrices and transformations. Ronald Goldman.
In "Graphics Gems I", pp 472-475. Morgan Kaufmann, 1990.
(2) More matrices and transformations: shear and pseudo-perspective.
Ronald Goldman. In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991.
(3) Decomposing a matrix into simple transformations. Spencer Thomas.
In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991.
(4) Recovering the data from the transformation matrix. Ronald Goldman.
In "Graphics Gems II", pp 324-331. Morgan Kaufmann, 1991.
(5) Euler angle conversion. Ken Shoemake.
In "Graphics Gems IV", pp 222-229. Morgan Kaufmann, 1994.
(6) Arcball rotation control. Ken Shoemake.
In "Graphics Gems IV", pp 175-192. Morgan Kaufmann, 1994.
(7) Representing attitude: Euler angles, unit quaternions, and rotation
vectors. James Diebel. 2006.
(8) A discussion of the solution for the best rotation to relate two sets
of vectors. W Kabsch. Acta Cryst. 1978. A34, 827-828.
(9) Closed-form solution of absolute orientation using unit quaternions.
BKP Horn. J Opt Soc Am A. 1987. 4(4), 629-642.
(10) Quaternions. Ken Shoemake.
http://www.sfu.ca/~jwa3/cmpt461/files/quatut.pdf
(11) From quaternion to matrix and back. JMP van Waveren. 2005.
http://www.intel.com/cd/ids/developer/asmo-na/eng/293748.htm
(12) Uniform random rotations. Ken Shoemake.
In "Graphics Gems III", pp 124-132. Morgan Kaufmann, 1992.
Examples
--------
>>> alpha, beta, gamma = 0.123, -1.234, 2.345
>>> origin, xaxis, yaxis, zaxis = (0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1)
>>> I = identity_matrix()
>>> Rx = rotation_matrix(alpha, xaxis)
>>> Ry = rotation_matrix(beta, yaxis)
>>> Rz = rotation_matrix(gamma, zaxis)
>>> R = concatenate_matrices(Rx, Ry, Rz)
>>> euler = euler_from_matrix(R, 'rxyz')
>>> numpy.allclose([alpha, beta, gamma], euler)
True
>>> Re = euler_matrix(alpha, beta, gamma, 'rxyz')
>>> is_same_transform(R, Re)
True
>>> al, be, ga = euler_from_matrix(Re, 'rxyz')
>>> is_same_transform(Re, euler_matrix(al, be, ga, 'rxyz'))
True
>>> qx = quaternion_about_axis(alpha, xaxis)
>>> qy = quaternion_about_axis(beta, yaxis)
>>> qz = quaternion_about_axis(gamma, zaxis)
>>> q = quaternion_multiply(qx, qy)
>>> q = quaternion_multiply(q, qz)
>>> Rq = quaternion_matrix(q)
>>> is_same_transform(R, Rq)
True
>>> S = scale_matrix(1.23, origin)
>>> T = translation_matrix((1, 2, 3))
>>> Z = shear_matrix(beta, xaxis, origin, zaxis)
>>> R = random_rotation_matrix(numpy.random.rand(3))
>>> M = concatenate_matrices(T, R, Z, S)
>>> scale, shear, angles, trans, persp = decompose_matrix(M)
>>> numpy.allclose(scale, 1.23)
True
>>> numpy.allclose(trans, (1, 2, 3))
True
>>> numpy.allclose(shear, (0, math.tan(beta), 0))
True
>>> is_same_transform(R, euler_matrix(axes='sxyz', *angles))
True
>>> M1 = compose_matrix(scale, shear, angles, trans, persp)
>>> is_same_transform(M, M1)
True
"""
from __future__ import division
import warnings
import math
import numpy
import random
# Documentation in HTML format can be generated with Epydoc
__docformat__ = "restructuredtext en"
def identity_matrix():
"""Return 4x4 identity/unit matrix.
>>> I = identity_matrix()
>>> numpy.allclose(I, numpy.dot(I, I))
True
>>> numpy.sum(I), numpy.trace(I)
(4.0, 4.0)
>>> numpy.allclose(I, numpy.identity(4, dtype=numpy.float64))
True
"""
return numpy.identity(4, dtype=numpy.float64)
def translation_matrix(direction):
"""Return matrix to translate by direction vector.
>>> v = numpy.random.random(3) - 0.5
>>> numpy.allclose(v, translation_matrix(v)[:3, 3])
True
"""
M = numpy.identity(4)
M[:3, 3] = direction[:3]
return M
def translation_from_matrix(matrix):
"""Return translation vector from translation matrix.
>>> v0 = numpy.random.random(3) - 0.5
>>> v1 = translation_from_matrix(translation_matrix(v0))
>>> numpy.allclose(v0, v1)
True
"""
return numpy.array(matrix, copy=False)[:3, 3].copy()
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.0
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2., numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def reflection_from_matrix(matrix):
"""Return mirror plane point and normal vector from reflection matrix.
>>> v0 = numpy.random.random(3) - 0.5
>>> v1 = numpy.random.random(3) - 0.5
>>> M0 = reflection_matrix(v0, v1)
>>> point, normal = reflection_from_matrix(M0)
>>> M1 = reflection_matrix(point, normal)
>>> is_same_transform(M0, M1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
# normal: unit eigenvector corresponding to eigenvalue -1
l, V = numpy.linalg.eig(M[:3, :3])
i = numpy.where(abs(numpy.real(l) + 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue -1")
normal = numpy.real(V[:, i[0]]).squeeze()
# point: any unit eigenvector corresponding to eigenvalue 1
l, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
return point, normal
def rotation_matrix(angle, direction, point=None):
"""Return matrix to rotate about axis defined by point and direction.
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> R1 = rotation_matrix(angle-2*math.pi, direc, point)
>>> is_same_transform(R0, R1)
True
>>> R0 = rotation_matrix(angle, direc, point)
>>> R1 = rotation_matrix(-angle, -direc, point)
>>> is_same_transform(R0, R1)
True
>>> I = numpy.identity(4, numpy.float64)
>>> numpy.allclose(I, rotation_matrix(math.pi*2, direc))
True
>>> numpy.allclose(2., numpy.trace(rotation_matrix(math.pi/2,
... direc, point)))
True
"""
sina = math.sin(angle)
cosa = math.cos(angle)
direction = unit_vector(direction[:3])
# rotation matrix around unit vector
R = numpy.array(((cosa, 0.0, 0.0),
(0.0, cosa, 0.0),
(0.0, 0.0, cosa)), dtype=numpy.float64)
R += numpy.outer(direction, direction) * (1.0 - cosa)
direction *= sina
R += numpy.array((( 0.0, -direction[2], direction[1]),
( direction[2], 0.0, -direction[0]),
(-direction[1], direction[0], 0.0)),
dtype=numpy.float64)
M = numpy.identity(4)
M[:3, :3] = R
if point is not None:
# rotation not around origin
point = numpy.array(point[:3], dtype=numpy.float64, copy=False)
M[:3, 3] = point - numpy.dot(R, point)
return M
def rotation_from_matrix(matrix):
"""Return rotation angle and axis from rotation matrix.
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> angle, direc, point = rotation_from_matrix(R0)
>>> R1 = rotation_matrix(angle, direc, point)
>>> is_same_transform(R0, R1)
True
"""
R = numpy.array(matrix, dtype=numpy.float64, copy=False)
R33 = R[:3, :3]
# direction: unit eigenvector of R33 corresponding to eigenvalue of 1
l, W = numpy.linalg.eig(R33.T)
i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
direction = numpy.real(W[:, i[-1]]).squeeze()
# point: unit eigenvector of R33 corresponding to eigenvalue of 1
l, Q = numpy.linalg.eig(R)
i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = numpy.real(Q[:, i[-1]]).squeeze()
point /= point[3]
# rotation angle depending on direction
cosa = (numpy.trace(R33) - 1.0) / 2.0
if abs(direction[2]) > 1e-8:
sina = (R[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2]
elif abs(direction[1]) > 1e-8:
sina = (R[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1]
else:
sina = (R[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0]
angle = math.atan2(sina, cosa)
return angle, direction, point
def scale_matrix(factor, origin=None, direction=None):
"""Return matrix to scale by factor around origin in direction.
Use factor -1 for point symmetry.
>>> v = (numpy.random.rand(4, 5) - 0.5) * 20.0
>>> v[3] = 1.0
>>> S = scale_matrix(-1.234)
>>> numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3])
True
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S = scale_matrix(factor, origin)
>>> S = scale_matrix(factor, origin, direct)
"""
if direction is None:
# uniform scaling
M = numpy.array(((factor, 0.0, 0.0, 0.0),
(0.0, factor, 0.0, 0.0),
(0.0, 0.0, factor, 0.0),
(0.0, 0.0, 0.0, 1.0)), dtype=numpy.float64)
if origin is not None:
M[:3, 3] = origin[:3]
M[:3, 3] *= 1.0 - factor
else:
# nonuniform scaling
direction = unit_vector(direction[:3])
factor = 1.0 - factor
M = numpy.identity(4)
M[:3, :3] -= factor * numpy.outer(direction, direction)
if origin is not None:
M[:3, 3] = (factor * numpy.dot(origin[:3], direction)) * direction
return M
def scale_from_matrix(matrix):
"""Return scaling factor, origin and direction from scaling matrix.
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S0 = scale_matrix(factor, origin)
>>> factor, origin, direction = scale_from_matrix(S0)
>>> S1 = scale_matrix(factor, origin, direction)
>>> is_same_transform(S0, S1)
True
>>> S0 = scale_matrix(factor, origin, direct)
>>> factor, origin, direction = scale_from_matrix(S0)
>>> S1 = scale_matrix(factor, origin, direction)
>>> is_same_transform(S0, S1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
factor = numpy.trace(M33) - 2.0
try:
# direction: unit eigenvector corresponding to eigenvalue factor
l, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(l) - factor) < 1e-8)[0][0]
direction = numpy.real(V[:, i]).squeeze()
direction /= vector_norm(direction)
except IndexError:
# uniform scaling
factor = (factor + 2.0) / 3.0
direction = None
# origin: any eigenvector corresponding to eigenvalue 1
l, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 1")
origin = numpy.real(V[:, i[-1]]).squeeze()
origin /= origin[3]
return factor, origin, direction
def projection_matrix(point, normal, direction=None,
perspective=None, pseudo=False):
"""Return matrix to project onto plane defined by point and normal.
Using either perspective point, projection direction, or none of both.
If pseudo is True, perspective projections will preserve relative depth
such that Perspective = dot(Orthogonal, PseudoPerspective).
>>> P = projection_matrix((0, 0, 0), (1, 0, 0))
>>> numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:])
True
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(3) - 0.5
>>> P0 = projection_matrix(point, normal)
>>> P1 = projection_matrix(point, normal, direction=direct)
>>> P2 = projection_matrix(point, normal, perspective=persp)
>>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True)
>>> is_same_transform(P2, numpy.dot(P0, P3))
True
>>> P = projection_matrix((3, 0, 0), (1, 1, 0), (1, 0, 0))
>>> v0 = (numpy.random.rand(4, 5) - 0.5) * 20.0
>>> v0[3] = 1.0
>>> v1 = numpy.dot(P, v0)
>>> numpy.allclose(v1[1], v0[1])
True
>>> numpy.allclose(v1[0], 3.0-v1[1])
True
"""
M = numpy.identity(4)
point = numpy.array(point[:3], dtype=numpy.float64, copy=False)
normal = unit_vector(normal[:3])
if perspective is not None:
# perspective projection
perspective = numpy.array(perspective[:3], dtype=numpy.float64,
copy=False)
M[0, 0] = M[1, 1] = M[2, 2] = numpy.dot(perspective-point, normal)
M[:3, :3] -= numpy.outer(perspective, normal)
if pseudo:
# preserve relative depth
M[:3, :3] -= numpy.outer(normal, normal)
M[:3, 3] = numpy.dot(point, normal) * (perspective+normal)
else:
M[:3, 3] = numpy.dot(point, normal) * perspective
M[3, :3] = -normal
M[3, 3] = numpy.dot(perspective, normal)
elif direction is not None:
# parallel projection
direction = numpy.array(direction[:3], dtype=numpy.float64, copy=False)
scale = numpy.dot(direction, normal)
M[:3, :3] -= numpy.outer(direction, normal) / scale
M[:3, 3] = direction * (numpy.dot(point, normal) / scale)
else:
# orthogonal projection
M[:3, :3] -= numpy.outer(normal, normal)
M[:3, 3] = numpy.dot(point, normal) * normal
return M
def projection_from_matrix(matrix, pseudo=False):
"""Return projection plane and perspective point from projection matrix.
Return values are same as arguments for projection_matrix function:
point, normal, direction, perspective, and pseudo.
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(3) - 0.5
>>> P0 = projection_matrix(point, normal)
>>> result = projection_from_matrix(P0)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, direct)
>>> result = projection_from_matrix(P0)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=False)
>>> result = projection_from_matrix(P0, pseudo=False)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=True)
>>> result = projection_from_matrix(P0, pseudo=True)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
l, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
if not pseudo and len(i):
# point: any eigenvector corresponding to eigenvalue 1
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
# direction: unit eigenvector corresponding to eigenvalue 0
l, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(l)) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 0")
direction = numpy.real(V[:, i[0]]).squeeze()
direction /= vector_norm(direction)
# normal: unit eigenvector of M33.T corresponding to eigenvalue 0
l, V = numpy.linalg.eig(M33.T)
i = numpy.where(abs(numpy.real(l)) < 1e-8)[0]
if len(i):
# parallel projection
normal = numpy.real(V[:, i[0]]).squeeze()
normal /= vector_norm(normal)
return point, normal, direction, None, False
else:
# orthogonal projection, where normal equals direction vector
return point, direction, None, None, False
else:
# perspective projection
i = numpy.where(abs(numpy.real(l)) > 1e-8)[0]
if not len(i):
raise ValueError(
"no eigenvector not corresponding to eigenvalue 0")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
normal = - M[3, :3]
perspective = M[:3, 3] / numpy.dot(point[:3], normal)
if pseudo:
perspective -= normal
return point, normal, None, perspective, pseudo
def clip_matrix(left, right, bottom, top, near, far, perspective=False):
"""Return matrix to obtain normalized device coordinates from frustrum.
The frustrum bounds are axis-aligned along x (left, right),
y (bottom, top) and z (near, far).
Normalized device coordinates are in range [-1, 1] if coordinates are
inside the frustrum.
If perspective is True the frustrum is a truncated pyramid with the
perspective point at origin and direction along z axis, otherwise an
orthographic canonical view volume (a box).
Homogeneous coordinates transformed by the perspective clip matrix
need to be dehomogenized (devided by w coordinate).
>>> frustrum = numpy.random.rand(6)
>>> frustrum[1] += frustrum[0]
>>> frustrum[3] += frustrum[2]
>>> frustrum[5] += frustrum[4]
>>> M = clip_matrix(*frustrum, perspective=False)
>>> numpy.dot(M, [frustrum[0], frustrum[2], frustrum[4], 1.0])
array([-1., -1., -1., 1.])
>>> numpy.dot(M, [frustrum[1], frustrum[3], frustrum[5], 1.0])
array([1., 1., 1., 1.])
>>> M = clip_matrix(*frustrum, perspective=True)
>>> v = numpy.dot(M, [frustrum[0], frustrum[2], frustrum[4], 1.0])
>>> v / v[3]
array([-1., -1., -1., 1.])
>>> v = numpy.dot(M, [frustrum[1], frustrum[3], frustrum[4], 1.0])
>>> v / v[3]
array([ 1., 1., -1., 1.])
"""
if left >= right or bottom >= top or near >= far:
raise ValueError("invalid frustrum")
if perspective:
if near <= _EPS:
raise ValueError("invalid frustrum: near <= 0")
t = 2.0 * near
M = ((-t/(right-left), 0.0, (right+left)/(right-left), 0.0),
(0.0, -t/(top-bottom), (top+bottom)/(top-bottom), 0.0),
(0.0, 0.0, -(far+near)/(far-near), t*far/(far-near)),
(0.0, 0.0, -1.0, 0.0))
else:
M = ((2.0/(right-left), 0.0, 0.0, (right+left)/(left-right)),
(0.0, 2.0/(top-bottom), 0.0, (top+bottom)/(bottom-top)),
(0.0, 0.0, 2.0/(far-near), (far+near)/(near-far)),
(0.0, 0.0, 0.0, 1.0))
return numpy.array(M, dtype=numpy.float64)
def shear_matrix(angle, direction, point, normal):
"""Return matrix to shear by angle along direction vector on shear plane.
The shear plane is defined by a point and normal vector. The direction
vector must be orthogonal to the plane's normal vector.
A point P is transformed by the shear matrix into P" such that
the vector P-P" is parallel to the direction vector and its extent is
given by the angle of P-P'-P", where P' is the orthogonal projection
of P onto the shear plane.
>>> angle = (random.random() - 0.5) * 4*math.pi
>>> direct = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.cross(direct, numpy.random.random(3))
>>> S = shear_matrix(angle, direct, point, normal)
>>> numpy.allclose(1.0, numpy.linalg.det(S))
True
"""
normal = unit_vector(normal[:3])
direction = unit_vector(direction[:3])
if abs(numpy.dot(normal, direction)) > 1e-6:
raise ValueError("direction and normal vectors are not orthogonal")
angle = math.tan(angle)
M = numpy.identity(4)
M[:3, :3] += angle * numpy.outer(direction, normal)
M[:3, 3] = -angle * numpy.dot(point[:3], normal) * direction
return M
def shear_from_matrix(matrix):
"""Return shear angle, direction and plane from shear matrix.
>>> angle = (random.random() - 0.5) * 4*math.pi
>>> direct = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.cross(direct, numpy.random.random(3))
>>> S0 = shear_matrix(angle, direct, point, normal)
>>> angle, direct, point, normal = shear_from_matrix(S0)
>>> S1 = shear_matrix(angle, direct, point, normal)
>>> is_same_transform(S0, S1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
# normal: cross independent eigenvectors corresponding to the eigenvalue 1
l, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-4)[0]
if len(i) < 2:
raise ValueError("No two linear independent eigenvectors found %s" % l)
V = numpy.real(V[:, i]).squeeze().T
lenorm = -1.0
for i0, i1 in ((0, 1), (0, 2), (1, 2)):
n = numpy.cross(V[i0], V[i1])
l = vector_norm(n)
if l > lenorm:
lenorm = l
normal = n
normal /= lenorm
# direction and angle
direction = numpy.dot(M33 - numpy.identity(3), normal)
angle = vector_norm(direction)
direction /= angle
angle = math.atan(angle)
# point: eigenvector corresponding to eigenvalue 1
l, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 1")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
return angle, direction, point, normal
def decompose_matrix(matrix):
"""Return sequence of transformations from transformation matrix.
matrix : array_like
Non-degenerative homogeneous transformation matrix
Return tuple of:
scale : vector of 3 scaling factors
shear : list of shear factors for x-y, x-z, y-z axes
angles : list of Euler angles about static x, y, z axes
translate : translation vector along x, y, z axes
perspective : perspective partition of matrix
Raise ValueError if matrix is of wrong type or degenerative.
>>> T0 = translation_matrix((1, 2, 3))
>>> scale, shear, angles, trans, persp = decompose_matrix(T0)
>>> T1 = translation_matrix(trans)
>>> numpy.allclose(T0, T1)
True
>>> S = scale_matrix(0.123)
>>> scale, shear, angles, trans, persp = decompose_matrix(S)
>>> scale[0]
0.123
>>> R0 = euler_matrix(1, 2, 3)
>>> scale, shear, angles, trans, persp = decompose_matrix(R0)
>>> R1 = euler_matrix(*angles)
>>> numpy.allclose(R0, R1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=True).T
if abs(M[3, 3]) < _EPS:
raise ValueError("M[3, 3] is zero")
M /= M[3, 3]
P = M.copy()
P[:, 3] = 0, 0, 0, 1
if not numpy.linalg.det(P):
raise ValueError("Matrix is singular")
scale = numpy.zeros((3, ), dtype=numpy.float64)
shear = [0, 0, 0]
angles = [0, 0, 0]
if any(abs(M[:3, 3]) > _EPS):
perspective = numpy.dot(M[:, 3], numpy.linalg.inv(P.T))
M[:, 3] = 0, 0, 0, 1
else:
perspective = numpy.array((0, 0, 0, 1), dtype=numpy.float64)
translate = M[3, :3].copy()
M[3, :3] = 0
row = M[:3, :3].copy()
scale[0] = vector_norm(row[0])
row[0] /= scale[0]
shear[0] = numpy.dot(row[0], row[1])
row[1] -= row[0] * shear[0]
scale[1] = vector_norm(row[1])
row[1] /= scale[1]
shear[0] /= scale[1]
shear[1] = numpy.dot(row[0], row[2])
row[2] -= row[0] * shear[1]
shear[2] = numpy.dot(row[1], row[2])
row[2] -= row[1] * shear[2]
scale[2] = vector_norm(row[2])
row[2] /= scale[2]
shear[1:] /= scale[2]
if numpy.dot(row[0], numpy.cross(row[1], row[2])) < 0:
scale *= -1
row *= -1
angles[1] = math.asin(-row[0, 2])
if math.cos(angles[1]):
angles[0] = math.atan2(row[1, 2], row[2, 2])
angles[2] = math.atan2(row[0, 1], row[0, 0])
else:
#angles[0] = math.atan2(row[1, 0], row[1, 1])
angles[0] = math.atan2(-row[2, 1], row[1, 1])
angles[2] = 0.0
return scale, shear, angles, translate, perspective
def compose_matrix(scale=None, shear=None, angles=None, translate=None,
perspective=None):
"""Return transformation matrix from sequence of transformations.
This is the inverse of the decompose_matrix function.
Sequence of transformations:
scale : vector of 3 scaling factors
shear : list of shear factors for x-y, x-z, y-z axes
angles : list of Euler angles about static x, y, z axes
translate : translation vector along x, y, z axes
perspective : perspective partition of matrix
>>> scale = numpy.random.random(3) - 0.5
>>> shear = numpy.random.random(3) - 0.5
>>> angles = (numpy.random.random(3) - 0.5) * (2*math.pi)
>>> trans = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(4) - 0.5
>>> M0 = compose_matrix(scale, shear, angles, trans, persp)
>>> result = decompose_matrix(M0)
>>> M1 = compose_matrix(*result)
>>> is_same_transform(M0, M1)
True
"""
M = numpy.identity(4)
if perspective is not None:
P = numpy.identity(4)
P[3, :] = perspective[:4]
M = numpy.dot(M, P)
if translate is not None:
T = numpy.identity(4)
T[:3, 3] = translate[:3]
M = numpy.dot(M, T)
if angles is not None:
R = euler_matrix(angles[0], angles[1], angles[2], 'sxyz')
M = numpy.dot(M, R)
if shear is not None:
Z = numpy.identity(4)
Z[1, 2] = shear[2]
Z[0, 2] = shear[1]
Z[0, 1] = shear[0]
M = numpy.dot(M, Z)
if scale is not None:
S = numpy.identity(4)
S[0, 0] = scale[0]
S[1, 1] = scale[1]
S[2, 2] = scale[2]
M = numpy.dot(M, S)
M /= M[3, 3]
return M
def orthogonalization_matrix(lengths, angles):
"""Return orthogonalization matrix for crystallographic cell coordinates.
Angles are expected in degrees.
The de-orthogonalization matrix is the inverse.
>>> O = orthogonalization_matrix((10., 10., 10.), (90., 90., 90.))
>>> numpy.allclose(O[:3, :3], numpy.identity(3, float) * 10)
True
>>> O = orthogonalization_matrix([9.8, 12.0, 15.5], [87.2, 80.7, 69.7])
>>> numpy.allclose(numpy.sum(O), 43.063229)
True
"""
a, b, c = lengths
angles = numpy.radians(angles)
sina, sinb, _ = numpy.sin(angles)
cosa, cosb, cosg = numpy.cos(angles)
co = (cosa * cosb - cosg) / (sina * sinb)
return numpy.array((
( a*sinb*math.sqrt(1.0-co*co), 0.0, 0.0, 0.0),
(-a*sinb*co, b*sina, 0.0, 0.0),
( a*cosb, b*cosa, c, 0.0),
( 0.0, 0.0, 0.0, 1.0)),
dtype=numpy.float64)
def superimposition_matrix(v0, v1, scaling=False, usesvd=True):
"""Return matrix to transform given vector set into second vector set.
v0 and v1 are shape (3, \*) or (4, \*) arrays of at least 3 vectors.
If usesvd is True, the weighted sum of squared deviations (RMSD) is
minimized according to the algorithm by W. Kabsch [8]. Otherwise the
quaternion based algorithm by B. Horn [9] is used (slower when using
this Python implementation).
The returned matrix performs rotation, translation and uniform scaling
(if specified).
>>> v0 = numpy.random.rand(3, 10)
>>> M = superimposition_matrix(v0, v0)
>>> numpy.allclose(M, numpy.identity(4))
True
>>> R = random_rotation_matrix(numpy.random.random(3))
>>> v0 = ((1,0,0), (0,1,0), (0,0,1), (1,1,1))
>>> v1 = numpy.dot(R, v0)
>>> M = superimposition_matrix(v0, v1)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20.0
>>> v0[3] = 1.0
>>> v1 = numpy.dot(R, v0)
>>> M = superimposition_matrix(v0, v1)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> S = scale_matrix(random.random())
>>> T = translation_matrix(numpy.random.random(3)-0.5)
>>> M = concatenate_matrices(T, R, S)
>>> v1 = numpy.dot(M, v0)
>>> v0[:3] += numpy.random.normal(0.0, 1e-9, 300).reshape(3, -1)
>>> M = superimposition_matrix(v0, v1, scaling=True)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> M = superimposition_matrix(v0, v1, scaling=True, usesvd=False)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> v = numpy.empty((4, 100, 3), dtype=numpy.float64)
>>> v[:, :, 0] = v0
>>> M = superimposition_matrix(v0, v1, scaling=True, usesvd=False)
>>> numpy.allclose(v1, numpy.dot(M, v[:, :, 0]))
True
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=False)[:3]
v1 = numpy.array(v1, dtype=numpy.float64, copy=False)[:3]
if v0.shape != v1.shape or v0.shape[1] < 3:
raise ValueError("Vector sets are of wrong shape or type.")
# move centroids to origin
t0 = numpy.mean(v0, axis=1)
t1 = numpy.mean(v1, axis=1)
v0 = v0 - t0.reshape(3, 1)
v1 = v1 - t1.reshape(3, 1)
if usesvd:
# Singular Value Decomposition of covariance matrix
u, s, vh = numpy.linalg.svd(numpy.dot(v1, v0.T))
# rotation matrix from SVD orthonormal bases
R = numpy.dot(u, vh)
if numpy.linalg.det(R) < 0.0:
# R does not constitute right handed system
R -= numpy.outer(u[:, 2], vh[2, :]*2.0)
s[-1] *= -1.0
# homogeneous transformation matrix
M = numpy.identity(4)
M[:3, :3] = R
else:
# compute symmetric matrix N
xx, yy, zz = numpy.sum(v0 * v1, axis=1)
xy, yz, zx = numpy.sum(v0 * numpy.roll(v1, -1, axis=0), axis=1)
xz, yx, zy = numpy.sum(v0 * numpy.roll(v1, -2, axis=0), axis=1)
N = ((xx+yy+zz, yz-zy, zx-xz, xy-yx),
(yz-zy, xx-yy-zz, xy+yx, zx+xz),
(zx-xz, xy+yx, -xx+yy-zz, yz+zy),
(xy-yx, zx+xz, yz+zy, -xx-yy+zz))
# quaternion: eigenvector corresponding to most positive eigenvalue
l, V = numpy.linalg.eig(N)
q = V[:, numpy.argmax(l)]
q /= vector_norm(q) # unit quaternion
q = numpy.roll(q, -1) # move w component to end
# homogeneous transformation matrix
M = quaternion_matrix(q)
# scale: ratio of rms deviations from centroid
if scaling:
v0 *= v0
v1 *= v1
M[:3, :3] *= math.sqrt(numpy.sum(v1) / numpy.sum(v0))
# translation
M[:3, 3] = t1
T = numpy.identity(4)
T[:3, 3] = -t0
M = numpy.dot(M, T)
return M
def euler_matrix(ai, aj, ak, axes='sxyz'):
"""Return homogeneous rotation matrix from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> R = euler_matrix(1, 2, 3, 'syxz')
>>> numpy.allclose(numpy.sum(R[0]), -1.34786452)
True
>>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1))
>>> numpy.allclose(numpy.sum(R[0]), -0.383436184)
True
>>> ai, aj, ak = (4.0*math.pi) * (numpy.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R = euler_matrix(ai, aj, ak, axes)
>>> for axes in _TUPLE2AXES.keys():
... R = euler_matrix(ai, aj, ak, axes)
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes]
except (AttributeError, KeyError):
_ = _TUPLE2AXES[axes]
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
if frame:
ai, ak = ak, ai
if parity:
ai, aj, ak = -ai, -aj, -ak
si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak)
ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak)
cc, cs = ci*ck, ci*sk
sc, ss = si*ck, si*sk
M = numpy.identity(4)
if repetition:
M[i, i] = cj
M[i, j] = sj*si
M[i, k] = sj*ci
M[j, i] = sj*sk
M[j, j] = -cj*ss+cc
M[j, k] = -cj*cs-sc
M[k, i] = -sj*ck
M[k, j] = cj*sc+cs
M[k, k] = cj*cc-ss
else:
M[i, i] = cj*ck
M[i, j] = sj*sc-cs
M[i, k] = sj*cc+ss
M[j, i] = cj*sk
M[j, j] = sj*ss+cc
M[j, k] = sj*cs-sc
M[k, i] = -sj
M[k, j] = cj*si
M[k, k] = cj*ci
return M
def euler_from_matrix(matrix, axes='sxyz'):
"""Return Euler angles from rotation matrix for specified axis sequence.
axes : One of 24 axis sequences as string or encoded tuple
Note that many Euler angle triplets can describe one matrix.
>>> R0 = euler_matrix(1, 2, 3, 'syxz')
>>> al, be, ga = euler_from_matrix(R0, 'syxz')
>>> R1 = euler_matrix(al, be, ga, 'syxz')
>>> numpy.allclose(R0, R1)
True
>>> angles = (4.0*math.pi) * (numpy.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R0 = euler_matrix(axes=axes, *angles)
... R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes))
... if not numpy.allclose(R0, R1): print(axes, "failed")
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_ = _TUPLE2AXES[axes]
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:3, :3]
if repetition:
sy = math.sqrt(M[i, j]*M[i, j] + M[i, k]*M[i, k])
if sy > _EPS:
ax = math.atan2( M[i, j], M[i, k])
ay = math.atan2( sy, M[i, i])
az = math.atan2( M[j, i], -M[k, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2( sy, M[i, i])
az = 0.0
else:
cy = math.sqrt(M[i, i]*M[i, i] + M[j, i]*M[j, i])
if cy > _EPS:
ax = math.atan2( M[k, j], M[k, k])
ay = math.atan2(-M[k, i], cy)
az = math.atan2( M[j, i], M[i, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2(-M[k, i], cy)
az = 0.0
if parity:
ax, ay, az = -ax, -ay, -az
if frame:
ax, az = az, ax
return ax, ay, az
def euler_from_quaternion(quaternion, axes='sxyz'):
"""Return Euler angles from quaternion for specified axis sequence.
>>> angles = euler_from_quaternion([0.06146124, 0, 0, 0.99810947])
>>> numpy.allclose(angles, [0.123, 0, 0])
True
"""
return euler_from_matrix(quaternion_matrix(quaternion), axes)
def quaternion_from_euler(ai, aj, ak, axes='sxyz'):
"""Return quaternion from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> q = quaternion_from_euler(1, 2, 3, 'ryxz')
>>> numpy.allclose(q, [0.310622, -0.718287, 0.444435, 0.435953])
True
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_ = _TUPLE2AXES[axes]
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
if frame:
ai, ak = ak, ai
if parity:
aj = -aj
ai /= 2.0
aj /= 2.0
ak /= 2.0
ci = math.cos(ai)
si = math.sin(ai)
cj = math.cos(aj)
sj = math.sin(aj)
ck = math.cos(ak)
sk = math.sin(ak)
cc = ci*ck
cs = ci*sk
sc = si*ck
ss = si*sk
quaternion = numpy.empty((4, ), dtype=numpy.float64)
if repetition:
quaternion[i] = cj*(cs + sc)
quaternion[j] = sj*(cc + ss)
quaternion[k] = sj*(cs - sc)
quaternion[3] = cj*(cc - ss)
else:
quaternion[i] = cj*sc - sj*cs
quaternion[j] = cj*ss + sj*cc
quaternion[k] = cj*cs - sj*sc
quaternion[3] = cj*cc + sj*ss
if parity:
quaternion[j] *= -1
return quaternion
def quaternion_about_axis(angle, axis):
"""Return quaternion for rotation about axis.
>>> q = quaternion_about_axis(0.123, (1, 0, 0))
>>> numpy.allclose(q, [0.06146124, 0, 0, 0.99810947])
True
"""
quaternion = numpy.zeros((4, ), dtype=numpy.float64)
quaternion[:3] = axis[:3]
qlen = vector_norm(quaternion)
if qlen > _EPS:
quaternion *= math.sin(angle/2.0) / qlen
quaternion[3] = math.cos(angle/2.0)
return quaternion
def quaternion_matrix(quaternion):
"""Return homogeneous rotation matrix from quaternion.
>>> R = quaternion_matrix([0.06146124, 0, 0, 0.99810947])
>>> numpy.allclose(R, rotation_matrix(0.123, (1, 0, 0)))
True
"""
q = numpy.array(quaternion[:4], dtype=numpy.float64, copy=True)
nq = numpy.dot(q, q)
if nq < _EPS:
return numpy.identity(4)
q *= math.sqrt(2.0 / nq)
q = numpy.outer(q, q)
return numpy.array((
(1.0-q[1, 1]-q[2, 2], q[0, 1]-q[2, 3], q[0, 2]+q[1, 3], 0.0),
( q[0, 1]+q[2, 3], 1.0-q[0, 0]-q[2, 2], q[1, 2]-q[0, 3], 0.0),
( q[0, 2]-q[1, 3], q[1, 2]+q[0, 3], 1.0-q[0, 0]-q[1, 1], 0.0),
( 0.0, 0.0, 0.0, 1.0)
), dtype=numpy.float64)
def quaternion_from_matrix(matrix):
"""Return quaternion from rotation matrix.
>>> R = rotation_matrix(0.123, (1, 2, 3))
>>> q = quaternion_from_matrix(R)
>>> numpy.allclose(q, [0.0164262, 0.0328524, 0.0492786, 0.9981095])
True
"""
q = numpy.empty((4, ), dtype=numpy.float64)
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:4, :4]
t = numpy.trace(M)
if t > M[3, 3]:
q[3] = t
q[2] = M[1, 0] - M[0, 1]
q[1] = M[0, 2] - M[2, 0]
q[0] = M[2, 1] - M[1, 2]
else:
i, j, k = 0, 1, 2
if M[1, 1] > M[0, 0]:
i, j, k = 1, 2, 0
if M[2, 2] > M[i, i]:
i, j, k = 2, 0, 1
t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3]
q[i] = t
q[j] = M[i, j] + M[j, i]
q[k] = M[k, i] + M[i, k]
q[3] = M[k, j] - M[j, k]
q *= 0.5 / math.sqrt(t * M[3, 3])
return q
def quaternion_multiply(quaternion1, quaternion0):
"""Return multiplication of two quaternions.
>>> q = quaternion_multiply([1, -2, 3, 4], [-5, 6, 7, 8])
>>> numpy.allclose(q, [-44, -14, 48, 28])
True
"""
x0, y0, z0, w0 = quaternion0
x1, y1, z1, w1 = quaternion1
return numpy.array((
x1*w0 + y1*z0 - z1*y0 + w1*x0,
-x1*z0 + y1*w0 + z1*x0 + w1*y0,
x1*y0 - y1*x0 + z1*w0 + w1*z0,
-x1*x0 - y1*y0 - z1*z0 + w1*w0), dtype=numpy.float64)
def quaternion_conjugate(quaternion):
"""Return conjugate of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_conjugate(q0)
>>> q1[3] == q0[3] and all(q1[:3] == -q0[:3])
True
"""
return numpy.array((-quaternion[0], -quaternion[1],
-quaternion[2], quaternion[3]), dtype=numpy.float64)
def quaternion_inverse(quaternion):
"""Return inverse of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_inverse(q0)
>>> numpy.allclose(quaternion_multiply(q0, q1), [0, 0, 0, 1])
True
"""
return quaternion_conjugate(quaternion) / numpy.dot(quaternion, quaternion)
def quaternion_slerp(quat0, quat1, fraction, spin=0, shortestpath=True):
"""Return spherical linear interpolation between two quaternions.
>>> q0 = random_quaternion()
>>> q1 = random_quaternion()
>>> q = quaternion_slerp(q0, q1, 0.0)
>>> numpy.allclose(q, q0)
True
>>> q = quaternion_slerp(q0, q1, 1.0, 1)
>>> numpy.allclose(q, q1)
True
>>> q = quaternion_slerp(q0, q1, 0.5)
>>> angle = math.acos(numpy.dot(q0, q))
>>> numpy.allclose(2.0, math.acos(numpy.dot(q0, q1)) / angle) or \
numpy.allclose(2.0, math.acos(-numpy.dot(q0, q1)) / angle)
True
"""
q0 = unit_vector(quat0[:4])
q1 = unit_vector(quat1[:4])
if fraction == 0.0:
return q0
elif fraction == 1.0:
return q1
d = numpy.dot(q0, q1)
if abs(abs(d) - 1.0) < _EPS:
return q0
if shortestpath and d < 0.0:
# invert rotation
d = -d
q1 *= -1.0
angle = math.acos(d) + spin * math.pi
if abs(angle) < _EPS:
return q0
isin = 1.0 / math.sin(angle)
q0 *= math.sin((1.0 - fraction) * angle) * isin
q1 *= math.sin(fraction * angle) * isin
q0 += q1
return q0
def random_quaternion(rand=None):
"""Return uniform random unit quaternion.
rand: array like or None
Three independent random variables that are uniformly distributed
between 0 and 1.
>>> q = random_quaternion()
>>> numpy.allclose(1.0, vector_norm(q))
True
>>> q = random_quaternion(numpy.random.random(3))
>>> q.shape
(4,)
"""
if rand is None:
rand = numpy.random.rand(3)
else:
assert len(rand) == 3
r1 = numpy.sqrt(1.0 - rand[0])
r2 = numpy.sqrt(rand[0])
pi2 = math.pi * 2.0
t1 = pi2 * rand[1]
t2 = pi2 * rand[2]
return numpy.array((numpy.sin(t1)*r1,
numpy.cos(t1)*r1,
numpy.sin(t2)*r2,
numpy.cos(t2)*r2), dtype=numpy.float64)
def random_rotation_matrix(rand=None):
"""Return uniform random rotation matrix.
rnd: array like
Three independent random variables that are uniformly distributed
between 0 and 1 for each returned quaternion.
>>> R = random_rotation_matrix()
>>> numpy.allclose(numpy.dot(R.T, R), numpy.identity(4))
True
"""
return quaternion_matrix(random_quaternion(rand))
class Arcball(object):
"""Virtual Trackball Control.
>>> ball = Arcball()
>>> ball = Arcball(initial=numpy.identity(4))
>>> ball.place([320, 320], 320)
>>> ball.down([500, 250])
>>> ball.drag([475, 275])
>>> R = ball.matrix()
>>> numpy.allclose(numpy.sum(R), 3.90583455)
True
>>> ball = Arcball(initial=[0, 0, 0, 1])
>>> ball.place([320, 320], 320)
>>> ball.setaxes([1,1,0], [-1, 1, 0])
>>> ball.setconstrain(True)
>>> ball.down([400, 200])
>>> ball.drag([200, 400])
>>> R = ball.matrix()
>>> numpy.allclose(numpy.sum(R), 0.2055924)
True
>>> ball.next()
"""
def __init__(self, initial=None):
"""Initialize virtual trackball control.
initial : quaternion or rotation matrix
"""
self._axis = None
self._axes = None
self._radius = 1.0
self._center = [0.0, 0.0]
self._vdown = numpy.array([0, 0, 1], dtype=numpy.float64)
self._constrain = False
if initial is None:
self._qdown = numpy.array([0, 0, 0, 1], dtype=numpy.float64)
else:
initial = numpy.array(initial, dtype=numpy.float64)
if initial.shape == (4, 4):
self._qdown = quaternion_from_matrix(initial)
elif initial.shape == (4, ):
initial /= vector_norm(initial)
self._qdown = initial
else:
raise ValueError("initial not a quaternion or matrix.")
self._qnow = self._qpre = self._qdown
def place(self, center, radius):
"""Place Arcball, e.g. when window size changes.
center : sequence[2]
Window coordinates of trackball center.
radius : float
Radius of trackball in window coordinates.
"""
self._radius = float(radius)
self._center[0] = center[0]
self._center[1] = center[1]
def setaxes(self, *axes):
"""Set axes to constrain rotations."""
if axes is None:
self._axes = None
else:
self._axes = [unit_vector(axis) for axis in axes]
def setconstrain(self, constrain):
"""Set state of constrain to axis mode."""
self._constrain = constrain == True
def getconstrain(self):
"""Return state of constrain to axis mode."""
return self._constrain
def down(self, point):
"""Set initial cursor window coordinates and pick constrain-axis."""
self._vdown = arcball_map_to_sphere(point, self._center, self._radius)
self._qdown = self._qpre = self._qnow
if self._constrain and self._axes is not None:
self._axis = arcball_nearest_axis(self._vdown, self._axes)
self._vdown = arcball_constrain_to_axis(self._vdown, self._axis)
else:
self._axis = None
def drag(self, point):
"""Update current cursor window coordinates."""
vnow = arcball_map_to_sphere(point, self._center, self._radius)
if self._axis is not None:
vnow = arcball_constrain_to_axis(vnow, self._axis)
self._qpre = self._qnow
t = numpy.cross(self._vdown, vnow)
if numpy.dot(t, t) < _EPS:
self._qnow = self._qdown
else:
q = [t[0], t[1], t[2], numpy.dot(self._vdown, vnow)]
self._qnow = quaternion_multiply(q, self._qdown)
def next(self, acceleration=0.0):
"""Continue rotation in direction of last drag."""
q = quaternion_slerp(self._qpre, self._qnow, 2.0+acceleration, False)
self._qpre, self._qnow = self._qnow, q
def matrix(self):
"""Return homogeneous rotation matrix."""
return quaternion_matrix(self._qnow)
def arcball_map_to_sphere(point, center, radius):
"""Return unit sphere coordinates from window coordinates."""
v = numpy.array(((point[0] - center[0]) / radius,
(center[1] - point[1]) / radius,
0.0), dtype=numpy.float64)
n = v[0]*v[0] + v[1]*v[1]
if n > 1.0:
v /= math.sqrt(n) # position outside of sphere
else:
v[2] = math.sqrt(1.0 - n)
return v
def arcball_constrain_to_axis(point, axis):
"""Return sphere point perpendicular to axis."""
v = numpy.array(point, dtype=numpy.float64, copy=True)
a = numpy.array(axis, dtype=numpy.float64, copy=True)
v -= a * numpy.dot(a, v) # on plane
n = vector_norm(v)
if n > _EPS:
if v[2] < 0.0:
v *= -1.0
v /= n
return v
if a[2] == 1.0:
return numpy.array([1, 0, 0], dtype=numpy.float64)
return unit_vector([-a[1], a[0], 0])
def arcball_nearest_axis(point, axes):
"""Return axis, which arc is nearest to point."""
point = numpy.array(point, dtype=numpy.float64, copy=False)
nearest = None
mx = -1.0
for axis in axes:
t = numpy.dot(arcball_constrain_to_axis(point, axis), point)
if t > mx:
nearest = axis
mx = t
return nearest
# epsilon for testing whether a number is close to zero
_EPS = numpy.finfo(float).eps * 4.0
# axis sequences for Euler angles
_NEXT_AXIS = [1, 2, 0, 1]
# map axes strings to/from tuples of inner axis, parity, repetition, frame
_AXES2TUPLE = {
'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0),
'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0),
'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0),
'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0),
'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1),
'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1),
'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1),
'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)}
_TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items())
# helper functions
def vector_norm(data, axis=None, out=None):
"""Return length, i.e. eucledian norm, of ndarray along axis.
>>> v = numpy.random.random(3)
>>> n = vector_norm(v)
>>> numpy.allclose(n, numpy.linalg.norm(v))
True
>>> v = numpy.random.rand(6, 5, 3)
>>> n = vector_norm(v, axis=-1)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=2)))
True
>>> n = vector_norm(v, axis=1)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
True
>>> v = numpy.random.rand(5, 4, 3)
>>> n = numpy.empty((5, 3), dtype=numpy.float64)
>>> vector_norm(v, axis=1, out=n)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
True
>>> vector_norm([])
0.0
>>> vector_norm([1.0])
1.0
"""
data = numpy.array(data, dtype=numpy.float64, copy=True)
if out is None:
if data.ndim == 1:
return math.sqrt(numpy.dot(data, data))
data *= data
out = numpy.atleast_1d(numpy.sum(data, axis=axis))
numpy.sqrt(out, out)
return out
else:
data *= data
numpy.sum(data, axis=axis, out=out)
numpy.sqrt(out, out)
def unit_vector(data, axis=None, out=None):
"""Return ndarray normalized by length, i.e. eucledian norm, along axis.
>>> v0 = numpy.random.random(3)
>>> v1 = unit_vector(v0)
>>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0))
True
>>> v0 = numpy.random.rand(5, 4, 3)
>>> v1 = unit_vector(v0, axis=-1)
>>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2)
>>> numpy.allclose(v1, v2)
True
>>> v1 = unit_vector(v0, axis=1)
>>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1)
>>> numpy.allclose(v1, v2)
True
>>> v1 = numpy.empty((5, 4, 3), dtype=numpy.float64)
>>> unit_vector(v0, axis=1, out=v1)
>>> numpy.allclose(v1, v2)
True
>>> list(unit_vector([]))
[]
>>> list(unit_vector([1.0]))
[1.0]
"""
if out is None:
data = numpy.array(data, dtype=numpy.float64, copy=True)
if data.ndim == 1:
data /= math.sqrt(numpy.dot(data, data))
return data
else:
if out is not data:
out[:] = numpy.array(data, copy=False)
data = out
length = numpy.atleast_1d(numpy.sum(data*data, axis))
numpy.sqrt(length, length)
if axis is not None:
length = numpy.expand_dims(length, axis)
data /= length
if out is None:
return data
def random_vector(size):
"""Return array of random doubles in the half-open interval [0.0, 1.0).
>>> v = random_vector(10000)
>>> numpy.all(v >= 0.0) and numpy.all(v < 1.0)
True
>>> v0 = random_vector(10)
>>> v1 = random_vector(10)
>>> numpy.any(v0 == v1)
False
"""
return numpy.random.random(size)
def inverse_matrix(matrix):
"""Return inverse of square transformation matrix.
>>> M0 = random_rotation_matrix()
>>> M1 = inverse_matrix(M0.T)
>>> numpy.allclose(M1, numpy.linalg.inv(M0.T))
True
>>> for size in range(1, 7):
... M0 = numpy.random.rand(size, size)
... M1 = inverse_matrix(M0)
... if not numpy.allclose(M1, numpy.linalg.inv(M0)): print(size)
"""
return numpy.linalg.inv(matrix)
def concatenate_matrices(*matrices):
"""Return concatenation of series of transformation matrices.
>>> M = numpy.random.rand(16).reshape((4, 4)) - 0.5
>>> numpy.allclose(M, concatenate_matrices(M))
True
>>> numpy.allclose(numpy.dot(M, M.T), concatenate_matrices(M, M.T))
True
"""
M = numpy.identity(4)
for i in matrices:
M = numpy.dot(M, i)
return M
def is_same_transform(matrix0, matrix1):
"""Return True if two matrices perform same transformation.
>>> is_same_transform(numpy.identity(4), numpy.identity(4))
True
>>> is_same_transform(numpy.identity(4), random_rotation_matrix())
False
"""
matrix0 = numpy.array(matrix0, dtype=numpy.float64, copy=True)
matrix0 /= matrix0[3, 3]
matrix1 = numpy.array(matrix1, dtype=numpy.float64, copy=True)
matrix1 /= matrix1[3, 3]
return numpy.allclose(matrix0, matrix1)
def _import_module(module_name, warn=True, prefix='_py_', ignore='_'):
"""Try import all public attributes from module into global namespace.
Existing attributes with name clashes are renamed with prefix.
Attributes starting with underscore are ignored by default.
Return True on successful import.
"""
try:
module = __import__(module_name)
except ImportError:
if warn:
warnings.warn("Failed to import module " + module_name)
else:
for attr in dir(module):
if ignore and attr.startswith(ignore):
continue
if prefix:
if attr in globals():
globals()[prefix + attr] = globals()[attr]
elif warn:
warnings.warn("No Python implementation of " + attr)
globals()[attr] = getattr(module, attr)
return True
# https://github.com/ros/geometry/blob/hydro-devel/tf/src/tf/transformations.py
def apply_transform(pos, ori, points):
import numpy as np
import pybullet as p
T = np.eye(4)
T[:3, :3] = np.array(p.getMatrixFromQuaternion(ori)).reshape((3, 3))
T[:3, -1] = pos
if len(points.shape) == 1:
points = points[None]
homogeneous = points.shape[-1] == 4
if not homogeneous:
points_homo = np.ones((points.shape[0], 4))
points_homo[:, :3] = points
points = points_homo
points = T.dot(points.T).T
if not homogeneous:
points = points[:, :3]
return points
def assign_positions_to_fingers(tips, goal_tips):
import numpy as np
import itertools
min_cost = 1000000
opt_tips = []
opt_inds = [0, 1, 2]
for v in itertools.permutations([0, 1, 2]):
sorted_tips = goal_tips[v, :]
cost = np.linalg.norm(sorted_tips - tips)
if min_cost > cost:
min_cost = cost
opt_tips = sorted_tips
opt_inds = v
return opt_tips, opt_inds
| 58,641 | 35.355859 | 79 | py |
pyparrot | pyparrot-master/docs/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# pyparrot documentation build configuration file, created by
# sphinx-quickstart on Tue May 29 13:55:14 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
# the mock stuff was borrowed from hagelslag to help make things work on readthedocs
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['numpy', 'scipy', 'zeroconf', 'cv2', 'untangle', 'bluepy', 'bluepy.btle',
'ipaddress', 'queue', 'http.server', 'PyQt5', 'PyQt5.QtCore', 'PyQt5.QtGui',
'PyQt5.QtWidgets']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
#source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pyparrot'
copyright = '2018, Amy McGovern'
author = 'Amy McGovern'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.5'
# The full version, including alpha/beta/rc tags.
release = '1.5.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyparrotdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pyparrot.tex', 'pyparrot Documentation',
'Amy McGovern', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pyparrot', 'pyparrot Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pyparrot', 'pyparrot Documentation',
author, 'pyparrot', 'One line description of project.',
'Miscellaneous'),
]
| 5,536 | 29.761111 | 92 | py |
ocropy2 | ocropy2-master/ocropy2/ocrnet.py | #def _patched_view_4d(*tensors):
# output = []
# for t in tensors:
# assert t.dim() == 3
# size = list(t.size())
# size.insert(2, 1)
# output += [t.contiguous().view(*size)]
# return output
#
#import torch.nn._functions.conv
#
#torch.nn._functions.conv._view4d = _patched_view_4d
from pylab import *
import os
import glob
import random as pyr
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
import ocrcodecs
import cctc
import lineest
import inputs
class Ignore(Exception):
pass
# In[8]:
def asnd(x, torch_axes=None):
"""Convert torch/numpy to numpy."""
if isinstance(x, np.ndarray):
return x
if isinstance(x, Variable):
x = x.data
if isinstance(x, (torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.IntTensor)):
x = x.cpu()
assert isinstance(x, torch.Tensor)
x = x.numpy()
if torch_axes is not None:
x = x.transpose(torch_axes)
return x
# In[11]:
def novar(x):
if isinstance(x, Variable):
return x.data
return x
def astorch(x, axes=None, single=True):
"""Convert torch/numpy to torch."""
if isinstance(x, np.ndarray):
if axes is not None:
x = x.transpose(axes)
if x.dtype == np.dtype("f"):
return torch.FloatTensor(x)
elif x.dtype == np.dtype("d"):
if single:
return torch.FloatTensor(x)
else:
return torch.DoubleTensor(x)
elif x.dtype == np.dtype("i"):
return torch.IntTensor(x)
else:
error("unknown dtype")
return x
# In[13]:
def typeas(x, y):
"""Make x the same type as y, for numpy, torch, torch.cuda."""
assert not isinstance(x, Variable)
if isinstance(y, Variable):
y = y.data
if isinstance(y, np.ndarray):
return asnd(x)
if isinstance(x, np.ndarray):
if isinstance(y, (torch.FloatTensor, torch.cuda.FloatTensor)):
x = torch.FloatTensor(x)
else:
x = torch.DoubleTensor(x)
return x.type_as(y)
# In[16]:
def one_sequence_softmax(x):
"""Compute softmax over a sequence; shape is (l, d)"""
y = asnd(x)
assert y.ndim==2, "%s: input should be (length, depth)" % y.shape
l, d = y.shape
y = amax(y, axis=1)[:, newaxis] -y
y = clip(y, -80, 80)
y = exp(y)
y = y / sum(y, axis=1)[:, newaxis]
return typeas(y, x)
def sequence_softmax(x):
"""Compute sotmax over a batch of sequences; shape is (b, l, d)."""
y = asnd(x)
assert y.ndim==3, "%s: input should be (batch, length, depth)" % y.shape
for i in range(len(y)):
y[i] = one_sequence_softmax(y[i])
return typeas(y, x)
def is_normalized(x, eps=1e-3):
"""Check whether a batch of sequences (b, l, d) is normalized in d."""
assert x.dim() == 3
marginal = x.sum(2)
return (marginal - 1.0).abs().lt(eps).all()
def ctc_align(prob, target):
"""Perform CTC alignment on torch sequence batches (using ocrolstm)"""
prob_ = prob.cpu()
target = target.cpu()
b, l, d = prob.size()
bt, lt, dt = target.size()
assert bt==b, (bt, b)
assert dt==d, (dt, d)
assert is_normalized(prob), prob
assert is_normalized(target), target
result = torch.rand(1)
cctc.ctc_align_targets_batch(result, prob_, target)
return typeas(result, prob)
class Textline2Img(nn.Module):
def __init__(self):
nn.Module.__init__(self)
def forward(self, seq):
b, l, d = seq.size()
return seq.view(b, 1, l, d)
class Img2Seq(nn.Module):
def __init__(self):
nn.Module.__init__(self)
def forward(self, img):
b, d, w, h = img.size()
perm = img.permute(0, 2, 1, 3).contiguous()
return perm.view(b, w, d*h)
class ImgMaxSeq(nn.Module):
def __init__(self):
nn.Module.__init__(self)
def forward(self, img):
# BDWH -> BDW -> BWD
return img.max(3)[0].squeeze(3).permute(0, 2, 1).contiguous()
class ImgSumSeq(nn.Module):
def __init__(self):
nn.Module.__init__(self)
def forward(self, img):
# BDWH -> BDW -> BWD
return img.sum(3)[0].squeeze(3).permute(0, 2, 1).contiguous()
class Lstm2Dto1D(nn.Module):
"""An LSTM that summarizes one dimension."""
def __init__(self, ninput=None, noutput=None):
nn.Module.__init__(self)
self.ninput = ninput
self.noutput = noutput
self.lstm = nn.LSTM(ninput, noutput, 1, bidirectional=False)
def forward(self, img, volatile=False):
# BDWH -> HBWD -> HBsD
b, d, w, h = img.size()
seq = img.permute(3, 0, 2, 1).contiguous().view(h, b*w, d)
bs = b*w
h0 = Variable(typeas(torch.zeros(1, bs, self.noutput), img), volatile=volatile)
c0 = Variable(typeas(torch.zeros(1, bs, self.noutput), img), volatile=volatile)
# HBsD -> HBsD
post_lstm, _ = self.lstm(seq, (h0, c0))
# HBsD -> BsD -> BWD
final = post_lstm.select(0, h-1).view(b, w, self.noutput)
return final
class RowwiseLSTM(nn.Module):
def __init__(self, ninput=None, noutput=None, ndir=2):
nn.Module.__init__(self)
self.ndir = ndir
self.ninput = ninput
self.noutput = noutput
self.lstm = nn.LSTM(ninput, noutput, 1, bidirectional=self.ndir-1)
def forward(self, img):
volatile = not isinstance(img, Variable) or img.volatile
b, d, h, w = img.size()
# BDHW -> WHBD -> WLD
seq = img.permute(3, 2, 0, 1).contiguous().view(w, h*b, d)
# WLD
h0 = typeas(torch.zeros(self.ndir, h*b, self.noutput), img)
c0 = typeas(torch.zeros(self.ndir, h*b, self.noutput), img)
h0 = Variable(h0, volatile=volatile)
c0 = Variable(c0, volatile=volatile)
seqresult, _ = self.lstm(seq, (h0, c0))
# WLD' -> BD'HW
result = seqresult.view(w, h, b, self.noutput*self.ndir).permute(2, 3, 1, 0)
return result
class Lstm2D(nn.Module):
"""A 2D LSTM module."""
def __init__(self, ninput=None, noutput=None, npre=None, nhidden=None, ndir=2, ksize=3):
nn.Module.__init__(self)
self.ndir = ndir
npre = npre or noutput
nhidden = nhidden or noutput
self.sizes = (ninput, npre, nhidden, noutput)
assert ksize%2==1
padding = (ksize-1)//2
self.conv = nn.Conv2d(ninput, npre, kernel_size=ksize, padding=padding)
self.hlstm = RowwiseLSTM(npre, nhidden, ndir=ndir)
self.vlstm = RowwiseLSTM(self.ndir*nhidden, noutput, ndir)
def forward(self, img, volatile=False):
ninput, npre, nhidden, noutput = self.sizes
# BDHW
filtered = self.conv(img)
horiz = self.hlstm(filtered)
horizT = horiz.permute(0, 1, 3, 2).contiguous()
vert = self.vlstm(horizT)
vertT = vert.permute(0, 1, 3, 2).contiguous()
return vertT
class LstmLin(nn.Module):
"""A simple bidirectional LSTM with linear output mapping."""
def __init__(self, ninput=None, nhidden=None, noutput=None, ndir=2):
nn.Module.__init__(self)
assert ninput is not None
assert nhidden is not None
assert noutput is not None
self.ndir = 2
self.ninput = ninput
self.nhidden = nhidden
self.nooutput = noutput
self.layers = []
self.lstm = nn.LSTM(ninput, nhidden, 1, bidirectional=self.ndir-1)
self.conv = nn.Conv1d(ndir*nhidden, noutput, 1)
def forward(self, seq, volatile=False):
# BLD -> LBD
bs, l, d = seq.size()
assert d==self.ninput, seq.size()
seq = seq.permute(1, 0, 2).contiguous()
h0 = Variable(torch.zeros(self.ndir, bs, self.nhidden).type_as(novar(seq)),
volatile=volatile)
c0 = Variable(torch.zeros(self.ndir, bs, self.nhidden).type_as(novar(seq)),
volatile=volatile)
post_lstm, _ = self.lstm(seq, (h0, c0))
assert post_lstm is not None
# LBD -> BDL
post_conv = self.conv(post_lstm.permute(1, 2, 0).contiguous())
# BDL -> BLD
return post_conv.permute(0, 2, 1).contiguous()
def conv_layers(sizes=[64, 64], k=3, mp=(1,2), mpk=2, relu=1e-3, bn=None, fmp=False):
"""Construct a set of conv layers programmatically based on some parameters."""
if isinstance(mp, (int, float)):
mp = (1, mp)
layers = []
nin = 1
for nout in sizes:
if nout < 0:
nout = -nout
layers += [Lstm2D(nin, nout)]
else:
layers += [nn.Conv2d(nin, nout, k, padding=(k-1)//2)]
if relu>0:
layers += [nn.LeakyReLU(relu)]
else:
layers += [nn.ReLU()]
if bn is not None:
assert isinstance(bn, float)
layers += [nn.BatchNorm2d(nout, momentum=bn)]
if not fmp:
layers += [nn.MaxPool2d(kernel_size=mpk, stride=mp)]
else:
layers += [nn.FractionalMaxPool2d(kernel_size=mpk, output_ratio=(1.0/mp[0], 1.0/mp[1]))]
nin = nout
return layers
def make_projector(project):
"""Given the name of a projection method for the H dimension, turns a BDWH image
into a BLD sequence, with DH combined into the new depth. Projection methods
include concat, max, sum, and lstm:n, with n being the new depth."""
if project is None or project=="cocnat":
return Img2Seq()
elif project == "max":
return ImgMaxSeq()
elif project == "sum":
return ImgSumSeq()
elif project.startswith("lstm:"):
_, n = project.split(":")
return Lstm2Dto1D(int(n))
else:
raise Exception("unknown projection: "+project)
def wrapup(layers):
"""Wraps up a list of layers into an nn.Sequential if necessary."""
assert isinstance(layers, list)
if len(layers)==1:
return layers[0]
else:
return nn.Sequential(*layers)
def size_tester(model, shape):
"""Given a layer or a list of layers, runs a random input of the given shape
through it and returns the output shape."""
if isinstance(model, list):
model = nn.Sequential(*model)
test = torch.rand(*shape)
test_output = model(Variable(test, volatile=True))
print "# preproc", test.size(), "->", test_output.size()
return test_output.size()
def make_lstm(ninput=48, nhidden=100, noutput=None, project=None, **kw):
"""Builds an LSTM-based recognizer, possibly with convolutional preprocessing."""
layers = []
d = ninput
if "sizes" in kw:
layers += [Textline2Img()]
layers += conv_layers(**kw)
layers += [make_projector(project)]
b, l, d = size_tester(layers, (1, 400, d))
assert b==1, (b, l, d)
layers += [LstmLin(ninput=d, nhidden=nhidden, noutput=noutput)]
return wrapup(layers)
# In[26]:
def ctc_loss(logits, target):
"""A CTC loss function for BLD sequence training."""
assert logits.is_contiguous()
assert target.is_contiguous()
probs = sequence_softmax(logits)
aligned = ctc_align(probs, target)
assert aligned.size()==probs.size(), (aligned.size(), probs.size())
deltas = aligned - probs
logits.backward(deltas.contiguous())
return deltas, aligned
def mock_ctc_loss(logits, target):
deltas = torch.randn(*logits.size()) * 0.001
logits.backward(deltas.cuda())
# In[30]:
codecs = dict(
ascii=ocrcodecs.ascii_codec()
)
def maketarget(s, codec="ascii"):
"""Turn a string into an LD target."""
assert isinstance(s, (str, unicode))
codec = codecs.get(codec, codec)
codes = codec.encode(s)
n = codec.size()
target = astorch(ocrcodecs.make_target(codes, n))
return target
def transcribe(probs, codec="ascii"):
codes = ocrcodecs.translate_back(asnd(probs))
codec = codecs.get(codec, codec)
s = codec.decode(codes)
return "".join(s)
def makeseq(image):
"""Turn an image into an LD sequence."""
assert isinstance(image, np.ndarray), type(image)
if image.ndim==3 and image.shape[2]==3:
image = np.mean(image, 2)
assert image.ndim==2
return astorch(image.T)
def makebatch(images, for_target=False):
"""Given a list of LD sequences, make a BLD batch tensor."""
assert isinstance(images, list), type(images)
assert isinstance(images[0], torch.FloatTensor), images
assert images[0].dim() == 2, images[0].dim()
l, d = amax(array([img.size() for img in images], 'i'), axis=0)
ibatch = torch.zeros(len(images), int(l), int(d))
if for_target:
ibatch[:, :, 0] = 1.0
for i, image in enumerate(images):
l, d = image.size()
ibatch[i, :l, :d] = image
return ibatch
#import memory_profiler
class SimpleOCR(object):
"""Perform simple OCRopus-like 1D LSTM OCR."""
def __init__(self, model, lr=1e-4, momentum=0.9, ninput=48, builder=None,
cuda=True, codec=None, mname=None):
if codec is None: codec = ocrcodecs.ascii_codec()
self.codec = codec
self.noutput = self.codec.size()
self.model = model
self.lr = lr
self.momentum = momentum
self.cuda = cuda
self.normalizer = lineest.CenterNormalizer()
self.setup_model()
def setup_model(self):
if self.model is None: return
if self.cuda: self.model.cuda()
self.optimizer = optim.SGD(self.model.parameters(), lr=self.lr, momentum=self.momentum)
if not hasattr(self.model, "META"):
self.model.META = dict(ntrain=0)
self.ntrain = self.model.META["ntrain"]
def gpu(self):
self.cuda = True
self.setup_model()
def cpu(self):
self.cuda = False
self.setup_model()
def set_lr(self, lr, momentum=0.9):
self.lr = lr
self.momentum = momentum
self.optimizer = optim.SGD(self.model.parameters(), lr=self.lr, momentum=self.momentum)
def save(self, fname):
"""Save this model."""
if "%0" in fname:
fname = fname % self.ntrain
print "# saving", fname
if not hasattr(self.model, "META"):
self.model.META = {}
self.model.META["ntrain"] = self.ntrain
torch.save(self.model, fname)
def load(self, fname):
"""Load this model."""
self.model = torch.load(fname)
if not hasattr(self.model, "META"):
self.model.META = {}
self.ntrain = self.model.META.get("ntrain", 0)
self.setup_model()
def C(self, x):
"""Convert to cuda if required."""
if self.cuda: return x.cuda()
return x
def pad_length(self, input, r=20):
b, f, l, d = input.shape
assert f==1, "must have #features == 1"
result = np.zeros((b, f, l+r, d))
result[:, :, :l, :] = input
return result
def train_batch(self, input, target):
"""Train a BLD input batch against a BLD target batch."""
assert input.shape[0] == target.shape[0]
input = self.pad_length(input)
input = astorch(input)
target = astorch(target)
self.input = torch.FloatTensor()
self.logits = torch.FloatTensor()
self.aligned = torch.FloatTensor()
self.target = torch.FloatTensor()
try:
self.ntrain += input.size(0)
self.input = Variable(self.C(input))
self.target = self.C(target)
# print input.size(), input.min(), input.max()
# print target.size(), target.min(), target.max()
output = self.model.forward(self.input)
output = output.permute(0, 2, 1).contiguous()
self.logits = output
self.probs = sequence_softmax(self.logits)
self.optimizer.zero_grad()
_, self.aligned = ctc_loss(self.logits, self.target)
self.optimizer.step()
except Ignore:
print "input", input.size()
print "logits", self.logits.size()
print "aligned", self.aligned.size()
print "target", self.target.size()
def train(self, image, transcript):
"""Train a single image and its transcript using LSTM+CTC."""
input = makeseq(image).unsqueeze(0)
target = maketarget(transcript).unsqueeze(0)
self.train_batch(input, target)
def train_multi(self, images, transcripts):
"""Train a list of images and their transcripts using LSTM+CTC."""
images = makebatch([makeseq(x) for x in images])
targets = makebatch([maketarget(x) for x in transcripts], for_target=True)
self.train_batch(images, targets)
def predict_batch(self, input):
"""Train a BLD input batch against a BLD target batch."""
input = self.pad_length(input)
input = astorch(input)
input = Variable(self.C(input), volatile=True)
output = self.model.forward(input)
output = output.permute(0, 2, 1).contiguous()
probs = novar(sequence_softmax(output))
result = [transcribe(probs[i]) for i in range(len(probs))]
return result
def recognize(self, lines):
data = inputs.itlines(lines)
data = inputs.itmapper(data, input=self.normalizer.measure_and_normalize)
data = inputs.itimbatched(data, 20)
data = inputs.itlinebatcher(data)
result = []
for batch in data:
input = batch["input"]
if input.ndim==3:
input = np.expand_dims(input, 1)
try:
predicted = self.predict_batch(input)
except RuntimeError:
predicted = ["_"] * len(input)
for i in range(len(predicted)):
result.append((batch["id"][i], predicted[i]))
return [transcript for i, transcript in sorted(result)]
| 17,908 | 34.53373 | 100 | py |
ocropy2 | ocropy2-master/ocropy2/layers.py | import sys
import numpy as np
import torch
from torch import nn
from torch.legacy import nn as legnn
from torch.autograd import Variable
sys.modules["layers"] = sys.modules["ocroseg.layers"]
BD = "BD"
LBD = "LBD"
LDB = "LDB"
BDL = "BDL"
BLD = "BLD"
BWHD = "BWHD"
BDWH = "BDWH"
BWH = "BWH"
def lbd2bdl(x):
assert len(x.size()) == 3
return x.permute(1, 2, 0).contiguous()
def bdl2lbd(x):
assert len(x.size()) == 3
return x.permute(2, 0, 1).contiguous()
def typeas(x, y):
"""Make x the same type as y, for numpy, torch, torch.cuda."""
assert not isinstance(x, Variable)
if isinstance(y, Variable):
y = y.data
if isinstance(y, np.ndarray):
return asnd(x)
if isinstance(x, np.ndarray):
if isinstance(y, (torch.FloatTensor, torch.cuda.FloatTensor)):
x = torch.FloatTensor(x)
else:
x = torch.DoubleTensor(x)
return x.type_as(y)
class Viewer(nn.Module):
def __init__(self, *args):
nn.Module.__init__(self)
self.shape = args
def forward(self, x):
return x.view(*self.shape)
def __repr__(self):
return "Viewer %s" % (self.shape, )
class Textline2Img(nn.Module):
iorder = BWH
oorder = BDWH
def __init__(self):
nn.Module.__init__(self)
def forward(self, seq):
b, l, d = seq.size()
return seq.view(b, 1, l, d)
class Img2Seq(nn.Module):
iorder = BDWH
oorder = BDL
def __init__(self):
nn.Module.__init__(self)
def forward(self, img):
b, d, w, h = img.size()
perm = img.permute(0, 1, 3, 2).contiguous()
return perm.view(b, d * h, w)
class ImgMaxSeq(nn.Module):
iorder = BDWH
oorder = BDL
def __init__(self):
nn.Module.__init__(self)
def forward(self, img):
# BDWH -> BDW -> BWD
return img.max(3)[0].squeeze(3)
class ImgSumSeq(nn.Module):
iorder = BDWH
oorder = BDL
def __init__(self):
nn.Module.__init__(self)
def forward(self, img):
# BDWH -> BDW -> BWD
return img.sum(3)[0].squeeze(3).permute(0, 2, 1).contiguous()
class Lstm1(nn.Module):
"""A simple bidirectional LSTM."""
iorder = BDL
oorder = BDL
def __init__(self, ninput=None, noutput=None, ndir=2):
nn.Module.__init__(self)
assert ninput is not None
assert noutput is not None
self.ndir = ndir
self.ninput = ninput
self.noutput = noutput
self.lstm = nn.LSTM(ninput, noutput, 1, bidirectional=self.ndir - 1)
def forward(self, seq, volatile=False):
seq = bdl2lbd(seq)
l, bs, d = seq.size()
assert d == self.ninput, seq.size()
h0 = Variable(typeas(torch.zeros(self.ndir, bs, self.noutput), seq), volatile=volatile)
c0 = Variable(typeas(torch.zeros(self.ndir, bs, self.noutput), seq), volatile=volatile)
post_lstm, _ = self.lstm(seq, (h0, c0))
return lbd2bdl(post_lstm)
class Lstm2to1(nn.Module):
"""An LSTM that summarizes one dimension."""
iorder = BDWH
oorder = BDL
def __init__(self, ninput=None, noutput=None):
nn.Module.__init__(self)
self.ninput = ninput
self.noutput = noutput
self.lstm = nn.LSTM(ninput, noutput, 1, bidirectional=False)
def forward(self, img, volatile=False):
# BDWH -> HBWD -> HBsD
b, d, w, h = img.size()
seq = img.permute(3, 0, 2, 1).contiguous().view(h, b * w, d)
bs = b * w
h0 = Variable(typeas(torch.zeros(1, bs, self.noutput), img), volatile=volatile)
c0 = Variable(typeas(torch.zeros(1, bs, self.noutput), img), volatile=volatile)
# HBsD -> HBsD
assert seq.size() == (h, b * w, d), (seq.size(), (h, b * w, d))
post_lstm, _ = self.lstm(seq, (h0, c0))
assert post_lstm.size() == (h, b * w, self.noutput), (post_lstm.size(), (h, b * w, self.noutput))
# HBsD -> BsD -> BWD
final = post_lstm.select(0, h - 1).view(b, w, self.noutput)
assert final.size() == (b, w, self.noutput), (final.size(), (b, w, self.noutput))
# BWD -> BDW
final = final.permute(0, 2, 1).contiguous()
assert final.size() == (b, self.noutput, w), (final.size(), (b, self.noutput, self.noutput))
return final
class Lstm1to0(nn.Module):
"""An LSTM that summarizes one dimension."""
iorder = BDL
oorder = BD
def __init__(self, ninput=None, noutput=None):
nn.Module.__init__(self)
self.ninput = ninput
self.noutput = noutput
self.lstm = nn.LSTM(ninput, noutput, 1, bidirectional=False)
def forward(self, seq):
volatile = not isinstance(seq, Variable) or seq.volatile
seq = bdl2lbd(seq)
l, b, d = seq.size()
assert d == self.ninput, (d, self.ninput)
h0 = Variable(typeas(torch.zeros(1, b, self.noutput), seq), volatile=volatile)
c0 = Variable(typeas(torch.zeros(1, b, self.noutput), seq), volatile=volatile)
assert seq.size() == (l, b, d)
post_lstm, _ = self.lstm(seq, (h0, c0))
assert post_lstm.size() == (l, b, self.noutput)
final = post_lstm.select(0, l - 1).view(b, self.noutput)
return final
class RowwiseLSTM(nn.Module):
def __init__(self, ninput=None, noutput=None, ndir=2):
nn.Module.__init__(self)
self.ndir = ndir
self.ninput = ninput
self.noutput = noutput
self.lstm = nn.LSTM(ninput, noutput, 1, bidirectional=self.ndir - 1)
def forward(self, img):
volatile = not isinstance(img, Variable) or img.volatile
b, d, h, w = img.size()
# BDHW -> WHBD -> WB'D
seq = img.permute(3, 2, 0, 1).contiguous().view(w, h * b, d)
# WB'D
h0 = typeas(torch.zeros(self.ndir, h * b, self.noutput), img)
c0 = typeas(torch.zeros(self.ndir, h * b, self.noutput), img)
h0 = Variable(h0, volatile=volatile)
c0 = Variable(c0, volatile=volatile)
seqresult, _ = self.lstm(seq, (h0, c0))
# WB'D' -> BD'HW
result = seqresult.view(w, h, b, self.noutput * self.ndir).permute(2, 3, 1, 0)
return result
class Lstm2(nn.Module):
"""A 2D LSTM module."""
def __init__(self, ninput=None, noutput=None, nhidden=None, ndir=2):
nn.Module.__init__(self)
assert ndir in [1, 2]
nhidden = nhidden or noutput
self.hlstm = RowwiseLSTM(ninput, nhidden, ndir=ndir)
self.vlstm = RowwiseLSTM(nhidden * ndir, noutput, ndir=ndir)
def forward(self, img):
horiz = self.hlstm(img)
horizT = horiz.permute(0, 1, 3, 2).contiguous()
vert = self.vlstm(horizT)
vertT = vert.permute(0, 1, 3, 2).contiguous()
return vertT
| 6,762 | 28.792952 | 105 | py |
ocropy2 | ocropy2-master/ocropy2/inputs.py | import os
import sqlite3
import math
import numpy as np
import ocrnet
from PIL import Image
from StringIO import StringIO
import glob
import os.path
import codecs
import random as pyr
import re
import pylab
import ocrcodecs
import scipy.ndimage as ndi
import lineest
verbose = True
def image(x, normalize=True, gray=False):
"""Convert a string to an image.
Commonly used as a decoder. This will ensure that
the output is rank 3 (or rank 1 if gray=True)."""
image = np.array(Image.open(StringIO(x)))
assert isinstance(image, np.ndarray)
if normalize: image = image / 255.0
if gray:
if image.ndim == 3:
image = np.mean(image, 2)
image = image.reshape(image.shape + (1,))
assert image.ndim == 3 and image.shape[2] == 1
return image
else:
if image.ndim == 2:
image = np.stack([image, image, image], axis=2)
if image.shape[2] > 3:
image = image[...,:3]
assert image.ndim == 3 and image.shape[2] == 3
return image
def isimage(v):
if v[:4]=="\x89PNG": return True
if v[:2]=="\377\330": return True
if v[:6]=="IHDR\r\n": return True
return False
def auto_decode(sample, normalize=True, gray=False):
for k, v in sample.items():
if k == "transcript":
sample[k] = str(v)
elif isinstance(v, buffer):
v = str(v)
elif isinstance(v, str) and isimage(v):
try:
sample[k] = image(v, normalize=normalize, gray=gray)
except ValueError:
pass
return sample
def fiximage(image, transcript, minlen=4, maxheight=48):
"""The UW3 dataset contains some vertical text lines; rotate these
to horizontal. Also, fix lines that are too tall"""
if image.ndim==3:
image = np.mean(image, 2)
image = lineest.autocrop(image)
h, w = image.shape
if h > w and len(transcript)>minlen:
image = image[::-1].T
h, w = image.shape
if h > maxheight:
z = maxheight * 1.0 / h
image = ndi.zoom(image, [z, z], order=1)
return image
def itfix(data):
for sample in data:
sample["input"] = fiximage(sample["input"], sample["transcript"])
yield sample
default_renames = {
"rowid": "id",
"index": "id",
"inx": "id",
"image": "input",
"images": "input",
"inputs": "input",
"output": "target",
"outputs": "target",
"cls" : "target",
"targets": "target",
}
def itsqlite(dbfile, table="train", nepochs=1, cols="*",
decoder=auto_decode, fields=None, renames=default_renames, extra=""):
assert "," not in table
if "::" in dbfile:
dbfile, table = dbfile.rsplit("::", 1)
assert os.path.exists(dbfile)
db = sqlite3.connect(dbfile)
if fields is not None:
fields = fields.split(",")
for epoch in xrange(nepochs):
if verbose:
print "# epoch", epoch, "of", nepochs, "from", dbfile, table
count = 0
c = db.cursor()
sql = "select %s from %s %s" % (cols, table, extra)
for row in c.execute(sql):
sample = {}
if fields is not None:
assert len(fields) == len(row)
for i, r in enumerate(row):
sample[fields[i]] = r
else:
cs = [x[0] for x in c.description]
for i, col in enumerate(cs):
value = row[i]
if isinstance(value, buffer):
value = str(value)
ocol = renames.get(col, col)
assert ocol not in sample
sample[ocol] = value
if decoder: sample = decoder(sample)
if "transcript" in sample:
assert isinstance(sample["transcript"], (str, unicode)), decoder
count = count+1
yield sample
c.close()
del c
def itbook(dname, epochs=1000):
fnames = glob.glob(dname+"/????/??????.gt.txt")
for epoch in range(epochs):
# pyr.shuffle(fnames)
fnames.sort()
for fname in fnames:
base = re.sub(".gt.txt$", "", fname)
if not os.path.exists(base+".dew.png"): continue
image = pylab.imread(base+".dew.png")
if image.ndim==3: image = np.mean(image, 2)
image -= np.amin(image)
image /= np.amax(image)
image = 1.0 - image
with codecs.open(fname, "r", "utf-8") as stream:
transcript = stream.read().strip()
yield dict(input=image, transcript=transcript)
def itinfinite(sample):
"""Repeat the same sample over and over again (for testing)."""
while True:
yield sample
def itmaxsize(sample, h, w):
for sample in data:
shape = sample["input"].shape
if shape[0] >= h: continue
if shape[1] >= w: continue
yield sample
def image2seq(image):
"""Turn a WH or WH3 image into an LD sequence."""
if image.ndim==3 and image.shape[2]==3:
image = np.mean(image, 2)
assert image.ndim==2
return ocrnet.astorch(image.T)
def itmapper(data, **keys):
"""Map the fields in each sample using name=f arguments."""
for sample in data:
sample = sample.copy()
for k, f in keys.items():
sample[k] = f(sample[k])
yield sample
def itimbatched(data, batchsize=5, scale=1.8, seqkey="input"):
"""List-batch input samples into similar sized batches."""
buckets = {}
for sample in data:
seq = sample[seqkey]
d, l = seq.shape[:2]
r = int(math.floor(math.log(l) / math.log(scale)))
batched = buckets.get(r, {})
for k, v in sample.items():
if k in batched:
batched[k].append(v)
else:
batched[k] = [v]
if len(batched[seqkey]) >= batchsize:
batched["_bucket"] = r
yield batched
batched = {}
buckets[r] = batched
for r, batched in buckets.items():
if batched == {}: continue
batched["_bucket"] = r
yield batched
def makeseq(image):
"""Turn an image into an LD sequence."""
assert isinstance(image, np.ndarray), type(image)
if image.ndim==3 and image.shape[2]==3:
image = np.mean(image, 2)
assert image.ndim==2
return image.T
def makebatch(images, for_target=False, l_pad=0, d_pad=0):
"""Given a list of LD sequences, make a BLD batch tensor."""
assert isinstance(images, list), type(images)
assert isinstance(images[0], np.ndarray), images
assert images[0].ndim==2, images[0].ndim
l, d = np.amax(np.array([img.shape for img in images], 'i'), axis=0)
l += l_pad
d += d_pad
ibatch = np.zeros([len(images), int(l), int(d)])
if for_target:
ibatch[:, :, 0] = 1.0
for i, image in enumerate(images):
l, d = image.shape
ibatch[i, :l, :d] = image
return ibatch
def images2batch(images):
images = [makeseq(x) for x in images]
return makebatch(images)
def maketarget(s, codec="ascii"):
"""Turn a string into an LD target."""
assert isinstance(s, (str, unicode)), (type(s), s)
codec = ocrnet.codecs.get(codec, codec)
codes = codec.encode(s)
n = codec.size()
return ocrcodecs.make_target(codes, n)
def transcripts2batch(transcripts, codec="ascii"):
targets = [maketarget(s) for s in transcripts]
return makebatch(targets, for_target=True)
def itlinebatcher(data):
for sample in data:
sample = sample.copy()
sample["input"] = images2batch(sample["input"])
sample["target"] = transcripts2batch(sample["transcript"])
yield sample
def itshuffle(data, bufsize=1000):
"""Shuffle the data in the stream.
This uses a buffer of size `bufsize`. Shuffling at
startup is less random; this is traded off against
yielding samples quickly."""
buf = []
for sample in data:
if len(buf) < bufsize:
buf.append(data.next())
k = pyr.randint(0, len(buf)-1)
sample, buf[k] = buf[k], sample
yield sample
def itlines(images):
for i, image in enumerate(images):
if image.ndim==3:
image = np.mean(image[:,:,:3], 2)
sample = dict(input=image, key=i, id=i, transcript="")
yield sample
| 8,389 | 30.541353 | 82 | py |
ocropy2 | ocropy2-master/ocropy2/psegutils.py | from __future__ import print_function
import os
# import sl,morph
import torch
import scipy.ndimage as ndi
from pylab import *
from scipy.ndimage import filters, morphology, interpolation
from torch.autograd import Variable
def sl_width(s):
return s.stop - s.start
def sl_area(s):
return sl_width(s[0]) * sl_width(s[1])
def sl_dim0(s):
return sl_width(s[0])
def sl_dim1(s):
return sl_width(s[1])
def sl_tuple(s):
return s[0].start, s[0].stop, s[1].start, s[1].stop
def B(a):
if a.dtype == dtype('B'): return a
return array(a, 'B')
class record:
def __init__(self, **kw):
self.__dict__.update(kw)
def find_on_path(fname, path, separator=":"):
dirs = path.split(separator)
for dir in dirs:
result = os.path.join(dir, fname)
if os.path.exists(result):
return result
return None
def spread_labels(labels, maxdist=9999999):
"""Spread the given labels to the background"""
distances, features = morphology.distance_transform_edt(labels == 0, return_distances=1, return_indices=1)
indexes = features[0] * labels.shape[1] + features[1]
spread = labels.ravel()[indexes.ravel()].reshape(*labels.shape)
spread *= (distances < maxdist)
return spread
def correspondences(labels1, labels2):
"""Given two labeled images, compute an array giving the correspondences
between labels in the two images."""
q = 100000
assert amin(labels1) >= 0 and amin(labels2) >= 0
assert amax(labels2) < q
combo = labels1 * q + labels2
result = unique(combo)
result = array([result // q, result % q])
return result
def blackout_images(image, ticlass):
"""Takes a page image and a ticlass text/image classification image and replaces
all regions tagged as 'image' with rectangles in the page image. The page image
is modified in place. All images are iulib arrays."""
rgb = ocropy.intarray()
ticlass.textImageProbabilities(rgb, image)
r = ocropy.bytearray()
g = ocropy.bytearray()
b = ocropy.bytearray()
ocropy.unpack_rgb(r, g, b, rgb)
components = ocropy.intarray()
components.copy(g)
n = ocropy.label_components(components)
print("[note] number of image regions", n)
tirects = ocropy.rectarray()
ocropy.bounding_boxes(tirects, components)
for i in range(1, tirects.length()):
r = tirects.at(i)
ocropy.fill_rect(image, r, 0)
r.pad_by(-5, -5)
ocropy.fill_rect(image, r, 255)
def binary_objects(binary):
labels, n = ndi.label(binary)
objects = ndi.find_objects(labels)
return objects
def estimate_scale(binary):
objects = binary_objects(binary)
bysize = sorted(objects, key=sl_area)
scalemap = zeros(binary.shape)
for o in bysize:
if amax(scalemap[o]) > 0: continue
scalemap[o] = sl_area(o)**0.5
scale = median(scalemap[(scalemap > 3) & (scalemap < 100)])
return scale
def compute_boxmap(binary, lo=10, hi=5000, dtype='i'):
objects = binary_objects(binary)
bysize = sorted(objects, key=sl_area)
boxmap = zeros(binary.shape, dtype)
for o in bysize:
if sl_area(o)**.5 < lo: continue
if sl_area(o)**.5 > hi: continue
boxmap[o] = 1
return boxmap
def compute_lines(segmentation, scale):
"""Given a line segmentation map, computes a list
of tuples consisting of 2D slices and masked images."""
lobjects = ndi.find_objects(segmentation)
lines = []
for i, o in enumerate(lobjects):
if o is None: continue
if sl_dim1(o) < 2 * scale or sl_dim0(o) < scale: continue
mask = (segmentation[o] == i + 1)
if amax(mask) == 0: continue
result = record()
result.label = i + 1
result.bounds = o
result.mask = mask
lines.append(result)
return lines
def pad_image(image, d, cval=inf):
result = ones(array(image.shape) + 2 * d)
result[:, :] = amax(image) if cval == inf else cval
result[d:-d, d:-d] = image
return result
def extract(image, y0, x0, y1, x1, mode='nearest', cval=0):
h, w = image.shape
ch, cw = y1 - y0, x1 - x0
y, x = clip(y0, 0, max(h - ch, 0)), clip(x0, 0, max(w - cw, 0))
sub = image[y:y + ch, x:x + cw]
# print("extract", image.dtype, image.shape)
try:
r = interpolation.shift(sub, (y - y0, x - x0), mode=mode, cval=cval, order=0)
if cw > w or ch > h:
pady0, padx0 = max(-y0, 0), max(-x0, 0)
r = interpolation.affine_transform(r, eye(2), offset=(pady0, padx0), cval=1, output_shape=(ch, cw))
return r
except RuntimeError:
# workaround for platform differences between 32bit and 64bit
# scipy.ndimage
dtype = sub.dtype
sub = array(sub, dtype='float64')
sub = interpolation.shift(sub, (y - y0, x - x0), mode=mode, cval=cval, order=0)
sub = array(sub, dtype=dtype)
return sub
def extract_masked(image, linedesc, pad=5, expand=0, background=None):
"""Extract a subimage from the image using the line descriptor.
A line descriptor consists of bounds and a mask."""
assert amin(image) >= 0 and amax(image) <= 1
if background is None:
background = amin(image)
y0,x0,y1,x1 = [int(x) for x in [linedesc.bounds[0].start,linedesc.bounds[1].start, \
linedesc.bounds[0].stop,linedesc.bounds[1].stop]]
if pad > 0:
mask = pad_image(linedesc.mask, pad, cval=0)
else:
mask = linedesc.mask
line = extract(image, y0 - pad, x0 - pad, y1 + pad, x1 + pad)
if expand > 0:
mask = filters.maximum_filter(mask, (expand, expand))
line = where(mask, line, background)
return line
def reading_order(lines, highlight=None, debug=0):
"""Given the list of lines (a list of 2D slices), computes
the partial reading order. The output is a binary 2D array
such that order[i,j] is true if line i comes before line j
in reading order."""
order = zeros((len(lines), len(lines)), 'B')
def x_overlaps(u, v):
return u[1].start < v[1].stop and u[1].stop > v[1].start
def above(u, v):
return u[0].start < v[0].start
def left_of(u, v):
return u[1].stop < v[1].start
def separates(w, u, v):
if w[0].stop < min(u[0].start, v[0].start): return 0
if w[0].start > max(u[0].stop, v[0].stop): return 0
if w[1].start < u[1].stop and w[1].stop > v[1].start: return 1
if highlight is not None:
clf()
title("highlight")
imshow(binary)
ginput(1, debug)
for i, u in enumerate(lines):
for j, v in enumerate(lines):
if x_overlaps(u, v):
if above(u, v):
order[i, j] = 1
else:
if [w for w in lines if separates(w, u, v)] == []:
if left_of(u, v): order[i, j] = 1
if j == highlight and order[i, j]:
print((i, j), end=' ')
y0, x0 = sl.center(lines[i])
y1, x1 = sl.center(lines[j])
plot([x0, x1 + 200], [y0, y1])
if highlight is not None:
print()
ginput(1, debug)
return order
def topsort(order):
"""Given a binary array defining a partial order (o[i,j]==True means i<j),
compute a topological sort. This is a quick and dirty implementation
that works for up to a few thousand elements."""
n = len(order)
visited = zeros(n)
L = []
def visit(k):
if visited[k]: return
visited[k] = 1
for l in find(order[:, k]):
visit(l)
L.append(k)
for k in range(n):
visit(k)
return L #[::-1]
def show_lines(image, lines, lsort):
"""Overlays the computed lines on top of the image, for debugging
purposes."""
ys, xs = [], []
clf()
cla()
imshow(image)
for i in range(len(lines)):
l = lines[lsort[i]]
y, x = sl.center(l.bounds)
xs.append(x)
ys.append(y)
o = l.bounds
r = matplotlib.patches.Rectangle((o[1].start, o[0].start),
edgecolor='r',
fill=0,
width=sl_dim1(o),
height=sl_dim0(o))
gca().add_patch(r)
h, w = image.shape
ylim(h, 0)
xlim(0, w)
plot(xs, ys)
def propagate_labels(image, labels, conflict=0):
"""Given an image and a set of labels, apply the labels
to all the regions in the image that overlap a label.
Assign the value `conflict` to any labels that have a conflict."""
rlabels, _ = ndi.label(image)
cors = correspondences(rlabels, labels)
outputs = zeros(amax(rlabels) + 1, 'i')
oops = -(1 << 30)
for o, i in cors.T:
if outputs[o] != 0: outputs[o] = oops
else: outputs[o] = i
outputs[outputs == oops] = conflict
outputs[0] = 0
return outputs[rlabels]
def remove_noise(line, minsize=8):
"""Remove small componentsfrom an image."""
if minsize == 0: return line
bin = (line > 0.5 * amax(line))
labels, n = ndi.label(bin)
sums = measurements.sum(bin, labels, range(n + 1))
sums = sums[labels]
good = minimum(bin, 1 - (sums > 0) * (sums < minsize))
return good
def remove_big(image, max_h=100, max_w=100):
"""Remove large components."""
assert image.ndim==2
bin = (image > 0.5 * amax(image))
labels, n = ndi.label(bin)
objects = ndi.find_objects(labels)
indexes = ones(n+1, 'i')
for i, (yr, xr) in enumerate(objects):
if yr.stop-yr.start<max_h and xr.stop-xr.start<max_w:
continue
indexes[i+1] = 0
indexes[0] = 0
return indexes[labels]
def hysteresis_threshold(image, lo, hi):
binlo = (image > lo)
lablo, n = ndi.label(binlo)
n += 1
good = set((lablo * (image > hi)).flat)
markers = zeros(n, 'i')
for index in good:
if index == 0: continue
markers[index] = 1
return markers[lablo]
class LineSegmenter(object):
def __init__(self, mname, invert=False, docthreshold=0.5, hiprob=0.5, loprob=None):
self.hi = hiprob
self.lo = loprob or hiprob
self.basic_size = 10
self.model = torch.load(mname)
self.invert = invert
self.model.cuda()
self.cuinput = Variable(torch.randn(1, 1, 100, 100).cuda(), volatile=True)
self.docthreshold = docthreshold
def line_probs(self, pimage):
if pimage.ndim == 3: pimage = mean(pimage, 2)
ih, iw = pimage.shape
pimage = pimage - amin(pimage)
pimage /= amax(pimage)
if self.invert:
pimage = amax(pimage) - pimage
self.cuinput.data.resize_(1, 1, *pimage.shape).copy_(torch.FloatTensor(pimage))
cuoutput = self.model(self.cuinput)
poutput = cuoutput.data.cpu().numpy()[0, 0]
oh, ow = poutput.shape
scale = oh * 1.0 / ih
poutput = ndi.affine_transform(poutput, eye(2) * scale, output_shape=pimage.shape, order=1)
self.probs = poutput
return poutput
def line_seeds(self, pimage):
poutput = self.line_probs(pimage)
binoutput = hysteresis_threshold(poutput, self.lo, self.hi)
self.lines = binoutput
seeds, _ = ndi.label(binoutput)
return seeds
def lineseg(self, pimage, max_size=(300, 300)):
self.image = pimage
self.binary = pimage > self.docthreshold
if max_size is not None:
self.binary = remove_big(self.binary, *max_size)
self.boxmap = compute_boxmap(self.binary, dtype="B")
self.seeds = self.line_seeds(pimage)
self.llabels = propagate_labels(self.boxmap, self.seeds, conflict=0)
self.spread = spread_labels(self.seeds, maxdist=self.basic_size)
self.llabels = where(self.llabels > 0, self.llabels, self.spread * self.binary)
self.segmentation = self.llabels * self.binary
return self.segmentation
def reordered_lines(lseg):
lines = compute_lines(lseg, 20)
order = reading_order([l.bounds for l in lines])
lsort = topsort(order)
nlabels = amax(lseg) + 1
renumber = zeros(nlabels, 'i')
for i, v in enumerate(lsort):
renumber[lines[v].label] = 0x010000 + (i + 1)
renumbered_lseg = renumber[lseg]
sorted_lines = [lines[i] for i in lsort]
return sorted_lines, renumbered_lseg
def extract_textlines(lseg, image, pad=5, expand=3):
lines, segmentation = reordered_lines(lseg)
for i, l in enumerate(lines):
grayline = extract_masked(image, l, pad=pad, expand=expand)
yield grayline, sl_tuple(l.bounds)
| 12,723 | 30.730673 | 111 | py |
ImageNetV2 | ImageNetV2-master/code/train_imagenet_dataset_discriminator.py | import json
import pathlib
import concurrent.futures as fs
import os
import time
import math
import argparse
import random
import click
import numpy as np
import torchvision.models as models
import torchvision.transforms as transforms
import torch.optim as optim
from torch.optim import lr_scheduler
from tqdm import tqdm
import torch.nn as nn
from timeit import default_timer as timer
import pickle
import utils
import pywren
import imageio
import torch
import scipy.linalg
import sklearn.metrics as metrics
from numba import jit
import candidate_data
import image_loader
import imagenet
from eval_utils import ImageLoaderDataset
from collections import defaultdict
CONTROL_NAME = "imagenet-validation-original"
class CBImageLoaderDatasetPair(torch.utils.data.Dataset):
''' Take 2 ImagenetLoaderDatasets and returns two new
class balanced datasets that contain entries uniformly sampled
from both datasets'''
def __init__(self, dataset0, dataset1, num_per_cls=5):
self.dataset0 = dataset0
self.dataset1 = dataset1
self.mode = 'train'
assert isinstance(dataset0, ImageLoaderDataset)
assert isinstance(dataset1, ImageLoaderDataset)
assert len(set(dataset0.class_ids)) == 1000
assert len(set(dataset1.class_ids)) == 1000
self.ds0_locs_by_class = defaultdict(list)
self.ds1_locs_by_class = defaultdict(list)
for idx, cid in enumerate(dataset0.class_ids):
self.ds0_locs_by_class[cid].append(idx)
for idx, cid in enumerate(dataset1.class_ids):
self.ds1_locs_by_class[cid].append(idx)
self.train_dataset = []
self.test_dataset = []
for cls in range(1000):
ds0_cls = self.ds0_locs_by_class[cls]
ds1_cls = self.ds1_locs_by_class[cls]
assert len(ds0_cls) >= num_per_cls
for i in range(num_per_cls):
idx = ds0_cls.pop(0)
self.train_dataset.append((self.dataset0, idx, 0))
assert len(ds0_cls) >= num_per_cls
for i in range(num_per_cls):
idx = ds0_cls.pop(0)
self.test_dataset.append((self.dataset0, idx, 0))
assert len(ds1_cls) >= num_per_cls
for i in range(num_per_cls):
idx = ds1_cls.pop(0)
self.train_dataset.append((self.dataset1, idx, 1))
assert len(ds1_cls) >= num_per_cls
for i in range(num_per_cls):
idx = ds1_cls.pop(0)
self.test_dataset.append((self.dataset1, idx, 1))
random.shuffle(self.train_dataset)
random.shuffle(self.test_dataset)
def train(self):
self.mode = 'train'
def test(self):
self.mode = 'test'
def __len__(self):
if (self.mode == 'train'):
return len(self.train_dataset)
elif (self.mode == 'test'):
return len(self.test_dataset)
else:
raise Exception("Unsupported mode")
def __getitem__(self, index):
if (self.mode == 'train'):
ds, idx, cls = self.train_dataset[index]
elif (self.mode == 'test'):
ds, idx, cls = self.test_dataset[index]
else:
raise Exception("Unsupported mode")
return ds[idx][0], cls
def finetune_model(dataset, model, epochs=10, initial_lr=1e-4, decay_factor=1e-1, thresh=1e-2, batch_size=32):
since = time.time()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=initial_lr, momentum=0.9)
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.5, threshold=1e-2)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, pin_memory=True)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if (torch.cuda.is_available()):
model = model.cuda()
for epoch in range(epochs):
epoch_str = 'Epoch {}/{}'.format(epoch, epochs - 1)
pbar = tqdm(total=len(dataset), desc=epoch_str)
model.train() # Set model to training mode
running_loss = 0.0
running_corrects = 0
batch = 0
# Iterate over data.
for inputs, labels in dataloader:
pbar.update(inputs.size(0))
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
with torch.set_grad_enabled(True):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
pbar.close()
epoch_loss = running_loss / len(dataset)
epoch_acc = running_corrects.double() / len(dataset)
scheduler.step(epoch_loss)
print('Loss: {:.4f} Acc: {:.4f}'.format(epoch_loss, epoch_acc))
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
return model
def eval_model(dataset, model, batch_size=32):
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True)
running_loss = 0.0
running_corrects = 0
criterion = nn.CrossEntropyLoss()
pbar = tqdm(total=len(dataset), desc=f"eval {dataset.mode}")
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
for inputs, labels in dataloader:
inputs = inputs.to(device)
labels = labels.to(device)
pbar.update(inputs.size(0))
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
loss = running_loss / len(dataset)
acc = running_corrects.double() / len(dataset)
pbar.close()
return loss, acc
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('dataset')
parser.add_argument('--pretrained', action="store_const", const=True, default=False)
parser.add_argument('--model', default="resnet18")
parser.add_argument('--epochs', default=10, type=int)
parser.add_argument('--initial_lr', default=1e-4, type=float)
args = parser.parse_args()
if (args.model == "resnet18"):
model_ft = models.resnet18(pretrained=args.pretrained)
model_ft.fc = nn.Linear(2048, 2)
elif (args.model == "resnet152"):
model_ft = models.resnet152(pretrained=args.pretrained)
model_ft.fc = nn.Linear(8192, 2)
else:
raise Exception("Unsupported model")
dataset_filename = args.dataset
dataset_filepath = pathlib.Path(__file__).parent / '../data/datasets' / (dataset_filename + '.json')
with open(dataset_filepath, 'r') as f:
dataset = json.load(f)
imgs = [x[0] for x in dataset['image_filenames']]
print('Reading dataset from {} ...'.format(dataset_filepath))
imgnet = imagenet.ImageNetData()
cds = candidate_data.CandidateData(load_metadata_from_s3=False, exclude_blacklisted_candidates=False)
loader = image_loader.ImageLoader(imgnet, cds)
pbar = tqdm(total=len(imgs), desc='New Dataset download')
img_data = loader.load_image_bytes_batch(imgs, size='scaled_256', verbose=False, download_callback=lambda x:pbar.update(x))
pbar.close()
torch_dataset = ImageLoaderDataset(imgs, imgnet, cds, 'scaled_256', transform=transforms.ToTensor())
control_dataset_filename = CONTROL_NAME
control_dataset_filepath = pathlib.Path(__file__).parent / '../data/datasets' / (control_dataset_filename + '.json')
with open(control_dataset_filepath, 'r') as f:
control_dataset = json.load(f)
control_imgs = [x[0] for x in dataset['image_filenames']]
print('Reading dataset from {} ...'.format(control_dataset_filepath))
control_torch_dataset = ImageLoaderDataset(control_imgs, imgnet, cds, 'scaled_256', transform=transforms.ToTensor())
pbar = tqdm(total=len(control_imgs), desc='Control Dataset download')
img_data = loader.load_image_bytes_batch(control_imgs, size='scaled_256', verbose=False, download_callback=lambda x:pbar.update(x))
pbar.close()
dataset = CBImageLoaderDatasetPair(control_torch_dataset, torch_dataset)
finetune_model(dataset, model_ft, epochs=args.epochs, initial_lr=args.initial_lr)
dataset.test()
loss, acc = eval_model(dataset, model_ft)
print(f"Test loss is {loss}, Test Accuracy is {acc}")
| 8,705 | 36.852174 | 135 | py |
ImageNetV2 | ImageNetV2-master/code/make_imagenet_folders.py | import json
import pathlib
import concurrent.futures as fs
import os
import time
import math
import argparse
import random
import click
import numpy as np
import torchvision.models as models
import torchvision.transforms as transforms
import torch.optim as optim
from torch.optim import lr_scheduler
from tqdm import tqdm
import torch.nn as nn
from timeit import default_timer as timer
import pickle
import utils
import pywren
import imageio
import torch
import scipy.linalg
import sklearn.metrics as metrics
from numba import jit
import candidate_data
import image_loader
import imagenet
from eval_utils import ImageLoaderDataset
from collections import defaultdict
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('dataset')
parser.add_argument('out_folder')
args = parser.parse_args()
out_path_root = pathlib.Path(args.out_folder)
dataset_filename = args.dataset
dataset_filepath = pathlib.Path(__file__).parent / '../data/datasets' / (dataset_filename + '.json')
if (out_path_root.exists()):
assert out_path_root.is_dir()
else:
out_path_root.mkdir()
with open(dataset_filepath, 'r') as f:
dataset = json.load(f)
imgs = [x[0] for x in dataset['image_filenames']]
wnids = [x[1] for x in dataset['image_filenames']]
all_wnids = sorted(list(set(wnids)))
class_ids = [all_wnids.index(x) for x in wnids]
print('Reading dataset from {} ...'.format(dataset_filepath))
imgnet = imagenet.ImageNetData()
cds = candidate_data.CandidateData(load_metadata_from_s3=False, exclude_blacklisted_candidates=False)
loader = image_loader.ImageLoader(imgnet, cds)
pbar = tqdm(total=len(imgs), desc='New Dataset download')
img_data = loader.load_image_bytes_batch(imgs, size='scaled_500', verbose=False, download_callback=lambda x:pbar.update(x))
pbar.close()
pbar = tqdm(total=len(imgs), desc='Saving Dataset')
print("wnids", len(wnids))
for img, label in zip(imgs, class_ids):
pbar.update(1)
cls_path = out_path_root / pathlib.Path(str(label))
if (cls_path.exists()):
assert cls_path.is_dir()
else:
cls_path.mkdir()
cls_idx = len([x for x in cls_path.iterdir()])
inst_path = cls_path / pathlib.Path(str(cls_idx) + ".jpeg")
img_bytes = loader.load_image_bytes(img, size='scaled_500')
with inst_path.open(mode="wb+") as f:
f.write(img_bytes)
pbar.close()
| 2,511 | 27.545455 | 127 | py |
ImageNetV2 | ImageNetV2-master/code/featurize_candidates.py | import argparse
import io
import os
import pickle
import tarfile
import time
from timeit import default_timer as timer
import json
import boto3
import numpy as np
import skimage.transform
import torch
import torchvision.models as models
from torch.autograd import Variable
from torch import nn
import candidate_data
import imagenet
import featurize
import mturk_data
import utils
FEATURIZE_SIZE = (224, 224)
def featurize_candidates(bucket, prefix, batch_size, source_filename):
imgnt = imagenet.ImageNetData()
cds = candidate_data.CandidateData(verbose=False)
filenames_to_ignore = [
'2018-08-06_17:33_vaishaal.json',
'2018-08-17_17:24_vaishaal.json',
'vaishaal_hits_submitted_2018-08-17-18:28:33-PDT.json',
'vaishaal_hits_submitted_2018-08-17-18:50:38-PDT.json',
'vaishaal_hits_submitted_2018-08-17-19:28:24-PDT.json',
'vaishaal_hits_submitted_2018-08-17-19:56:28-PDT.json',
'vaishaal_hits_submitted_2018-08-25-09:47:26-PDT.json']
mturk = mturk_data.MTurkData(live=True, load_assignments=True, source_filenames_to_ignore=filenames_to_ignore, verbose=False)
to_featurize = []
to_featurize_keys = []
client = utils.get_s3_client()
i = 0
#candidate_list = dataset_sampling.get_histogram_sampling_ndc_candidates(imgnet=imgnt, cds=cds, mturk=mturk)
start = timer()
with open('../data/metadata/fc7_candidates.json', 'r') as f:
candidate_list = json.load(f)
for k in candidate_list:
key_name = os.path.join(prefix, str(k)+".npy")
key_exists = utils.key_exists(bucket, key_name)
if not key_exists:
img = cds.load_image(k, size='original', verbose=False)
img = skimage.transform.resize(img, FEATURIZE_SIZE, preserve_range=True)
to_featurize.append(img)
to_featurize_keys.append(k)
#if i > 250:
# break;
i = i + 1
print('Got candidate {}'.format(i))
end = timer()
print(f"Took {end-start} seconds to get remaining candidates.")
print('Beginning featurization of {} items'.format(len(to_featurize_keys)))
if len(to_featurize) > 0:
to_featurize = np.stack(to_featurize, axis=0)
print(f"input shape {to_featurize.shape}")
batch_size = min(len(to_featurize), batch_size)
features = featurize.vgg16_features(to_featurize, batch_size=batch_size)
print(f"features shape {features.shape}")
for i,f in enumerate(features):
key_name = os.path.join(prefix, to_featurize_keys[i]+".npy")
bio = io.BytesIO()
np.save(bio, f)
print("writing key {0}".format(key_name))
utils.put_s3_object_bytes_with_backoff(bio.getvalue(), key_name)
print(f"Took {end-start} seconds to get remaining candidates.")
if __name__ == "__main__":
parser = argparse.ArgumentParser("featurize candidate images if not exist")
parser.add_argument("--bucket", default="imagenet2datav2")
parser.add_argument("--prefix", default="imagenet2candidates_featurized")
parser.add_argument("--batch_size", default=32, type=int)
parser.add_argument("--source_filename", type=str)
args = parser.parse_args()
featurize_candidates(args.bucket, args.prefix, args.batch_size, args.source_filename)
| 3,328 | 39.108434 | 129 | py |
ImageNetV2 | ImageNetV2-master/code/featurize_test.py | import argparse
import io
import pickle
import tarfile
import time
from timeit import default_timer as timer
import boto3
import numpy as np
import skimage.transform
import torch
import torchvision.models as models
from torch.autograd import Variable
from torch import nn
import candidate_data
import featurize
import imagenet
import utils
def featurize_and_upload_batch(to_featurize, to_featurize_keys, batch_size, bucket, prefix, client):
start = timer()
to_featurize = np.stack(to_featurize, axis=0)
print(f"input shape {to_featurize.shape}")
batch_size = min(len(to_featurize), batch_size)
end = timer()
print('Stacking took ', end-start)
start = timer()
features = featurize.vgg16_features(to_featurize, batch_size=batch_size, use_gpu=True)
end = timer()
print('Featurization took ', end-start)
print(f"features shape {features.shape}")
start = timer()
for i,f in enumerate(features):
key_name = os.path.join(prefix, f"{to_featurize_keys[i]}.npy")
bio = io.BytesIO()
np.save(bio, f)
client.put_object(Key=key_name, Bucket=bucket, Body=bio.getvalue(), ACL="bucket-owner-full-control")
end = timer()
print('Uploading took ', end-start)
FEATURIZE_SIZE = (224, 224)
def featurize_test_images(bucket, prefix, batch_size):
imgnt = imagenet.ImageNetData()
to_featurize = []
to_featurize_keys = []
client = utils.get_s3_client()
start = timer()
num_batches = 0
for k in imgnt.test_filenames:
key_name = os.path.join(prefix, f"{k}.npy")
key_exists = utils.key_exists(bucket, key_name)
if not key_exists:
img = imgnt.load_image(k, size='scaled_256', force_rgb=True, verbose=False)
img = skimage.transform.resize(img, FEATURIZE_SIZE, preserve_range=True)
to_featurize.append(img)
to_featurize_keys.append(k)
if len(to_featurize) >= batch_size:
num_batches += 1
featurize_and_upload_batch(to_featurize, to_featurize_keys, batch_size, bucket, prefix, client)
end = timer()
print('processing bach {} (size {}) took {} seconds'.format(num_batches, len(to_featurize), end-start))
start = timer()
to_featurize = []
to_featurize_keys = []
if len(to_featurize) > 0:
featurize_and_upload_batch(to_featurize, to_featurize_keys, batch_size, bucket, prefix, client)
if __name__ == "__main__":
parser = argparse.ArgumentParser("featurize test images if not exist")
parser.add_argument("--bucket", default="imagenet2datav2")
parser.add_argument("--prefix", default="imagenet-test-featurized")
parser.add_argument("--batch_size", default=64, type=int)
args = parser.parse_args()
featurize_test_images(args.bucket, args.prefix, args.batch_size)
| 2,873 | 35.379747 | 119 | py |
ImageNetV2 | ImageNetV2-master/code/featurize.py | import io
import pickle
import sys
import tarfile
import time
import boto3
import imageio
import numpy as np
import skimage.transform
import torch
from torch.autograd import Variable
from torch import nn
import torchvision.models as models
import utils
def vgg16_features(images, batch_size=60, use_gpu=True):
model = models.vgg16(pretrained=True)
model.eval()
results = []
if (use_gpu):
model = model.cuda()
for i,images_chunk in enumerate(utils.chunks(images, batch_size)):
if (len(images_chunk.shape) < 4):
images_chunk = images[np.newaxis, :, :, :]
images_chunk = images_chunk.astype('float32').transpose(0,3,1,2)
images_torch = Variable(torch.from_numpy(images_chunk))
if (use_gpu):
images_torch = images_torch.cuda()
x = model.features(images_torch)
x = x.view(x.size(0), -1)
fc7_net = torch.nn.Sequential(*list(model.classifier)[:-1])
if (use_gpu):
fc7_net = fc7_net.cuda()
x = fc7_net(x)
x = x.cpu().data.numpy()
results.append(x)
if (use_gpu):
torch.cuda.empty_cache()
return np.concatenate(results, axis=0)
def featurize_test(test_keys, batch_size=64):
images = []
for img_name in test_keys:
try:
file_bytes = utils.get_s3_file_bytes(img_name, verbose=False)
image = imageio.imread(file_bytes)
if len(image.shape) == 3:
if image.shape[2] == 4:
print('Removing alpha channel for image', name)
image = image[:,:,:3]
elif len(image.shape) == 2:
image = np.stack((image,image,image), axis=2)
if image.size!= 196608:
print(img_name)
raise
image = skimage.transform.resize(image, (224, 224), preserve_range=True)
except:
print('Exception: '+ str(img_name) + str(sys.exc_info()[0]))
raise
images.append(image)
images = np.stack(images, axis=0)
print('Beginning featurization')
features = vgg16_features(images, batch_size=batch_size)
write_test_output(test_keys, features)
return features
def write_test_output(test_keys, features, bucket="imagenet2datav2"):
client = utils.get_s3_client()
for idx in range(features.shape[0]):
filename = test_keys[idx].split('.')[0].split('/')[1]
key = 'imagenet-test-featurized-2/' + filename + '.npy'
bio = io.BytesIO()
np.save(bio, features[idx])
bstream = bio.getvalue()
client.put_object(Bucket=bucket, Key=key, Body=bstream, ACL="bucket-owner-full-control")
print('Done writing features')
def featurize_s3_tarball(tarball_key, bucket="imagenet2datav2", batch_size=32):
client = utils.get_s3_client()
read_bytes = client.get_object(Key=tarball_key, Bucket=bucket)["Body"].read()
tarball = tarfile.open(fileobj=io.BytesIO(read_bytes))
images = []
image_filenames = []
for member in tarball.getmembers():
f = tarball.extractfile(member)
if (f != None):
im = skimage.transform.resize(imageio.imread(f), (224, 224), preserve_range=True)
if (len(im.shape) == 2):
im = np.stack((im,im,im), axis=2)
image_filenames.append(member.name)
images.append(im)
images = np.stack(images, axis=0)
features = vgg16_features(images, batch_size=batch_size)
write_output(tarball_key, features, image_filenames)
return features,tarball_key
def write_output(tarball_key, features, image_filenames, bucket="imagenet2datav2",):
client = utils.get_s3_client()
dir_name, file_key = tarball_key.split('/')
file_key = file_key.replace('-scaled.tar', '-fc7.pkl' )
key = dir_name + '-featurized/' + file_key
results = {}
for idx in range(features.shape[0]):
filename = image_filenames[idx].split('.')[0].split('/')[1]
results[filename] = features[idx]
tmp = pickle.dumps(results)
print('Uploading {} to s3 '.format(key))
client.put_object(Bucket=bucket, Key=key, Body=tmp, ACL="bucket-owner-full-control")
def featurize_all(keys):
for img_class in keys:
t0 = time.time()
featurize_s3_tarball(img_class)
t1 = time.time()
print('Took {} seconds to upload features'.format(t1-t0))
| 4,394 | 35.932773 | 96 | py |
ImageNetV2 | ImageNetV2-master/code/eval.py | import json
import pathlib
import click
import numpy as np
import torchvision.models
from tqdm import tqdm
import candidate_data
import eval_utils
import image_loader
import imagenet
import pretrainedmodels
import pretrainedmodels.utils as pretrained_utils
import torch
import os
import time
torch.backends.cudnn.deterministic = True
all_models = ['alexnet',
'densenet121',
'densenet161',
'densenet169',
'densenet201',
'inception_v3',
'resnet101',
'resnet152',
'resnet18',
'resnet34',
'resnet50',
'squeezenet1_0',
'squeezenet1_1',
'vgg11',
'vgg11_bn',
'vgg13',
'vgg13_bn',
'vgg16',
'vgg16_bn',
'vgg19',
'vgg19_bn']
extra_models = []
for m in pretrainedmodels.model_names:
if m not in all_models:
all_models.append(m)
extra_models.append(m)
@click.command()
@click.option('--dataset', required=True, type=str)
@click.option('--models', required=True, type=str)
@click.option('--batch_size', default=32, type=str)
def eval(dataset, models, batch_size):
dataset_filename = dataset
if models == 'all':
models = all_models
else:
models = models.split(',')
for model in models:
assert model in all_models
dataset_filepath = pathlib.Path(__file__).parent / '../data/datasets' / (dataset_filename + '.json')
print('Reading dataset from {} ...'.format(dataset_filepath))
with open(dataset_filepath, 'r') as f:
dataset = json.load(f)
cur_imgs = [x[0] for x in dataset['image_filenames']]
imgnet = imagenet.ImageNetData()
cds = candidate_data.CandidateData(load_metadata_from_s3=False, exclude_blacklisted_candidates=False)
loader = image_loader.ImageLoader(imgnet, cds)
pbar = tqdm(total=len(cur_imgs), desc='Dataset download')
img_data = loader.load_image_bytes_batch(cur_imgs, size='scaled_500', verbose=False, download_callback=lambda x:pbar.update(x))
pbar.close()
for model in tqdm(models, desc='Model evaluations'):
if (model not in extra_models):
tqdm.write('Evaluating {}'.format(model))
resize_size = 256
center_crop_size = 224
if model == 'inception_v3':
resize_size = 299
center_crop_size = 299
data_loader = eval_utils.get_data_loader(cur_imgs,
imgnet,
cds,
image_size='scaled_500',
resize_size=resize_size,
center_crop_size=center_crop_size,
batch_size=batch_size)
pt_model = getattr(torchvision.models, model)(pretrained=True)
if (torch.cuda.is_available()):
pt_model = pt_model.cuda()
pt_model.eval()
tqdm.write(' Number of trainable parameters: {}'.format(sum(p.numel() for p in pt_model.parameters() if p.requires_grad)))
predictions, top1_acc, top5_acc, total_time, num_images = eval_utils.evaluate_model(
pt_model, data_loader, show_progress_bar=True)
tqdm.write(' Evaluated {} images'.format(num_images))
tqdm.write(' Top-1 accuracy: {:.2f}'.format(100.0 * top1_acc))
tqdm.write(' Top-5 accuracy: {:.2f}'.format(100.0 * top5_acc))
tqdm.write(' Total time: {:.1f} (average time per image: {:.2f} ms)'.format(total_time, 1000.0 * total_time / num_images))
npy_out_filepath = pathlib.Path(__file__).parent / '../data/predictions' / dataset_filename / (model + '.npy')
npy_out_filepath = npy_out_filepath.resolve()
directory = os.path.dirname(npy_out_filepath)
if not os.path.exists(directory):
os.makedirs(directory)
if (os.path.exists(npy_out_filepath)):
old_preds = np.load(npy_out_filepath)
np.save(f'{npy_out_filepath}.{int(time.time())}', old_preds)
print('checking old preds is same as new preds')
if not np.allclose(old_preds, predictions):
diffs = np.round(old_preds - predictions, 4)
print('old preds != new preds')
else:
print('old preds == new_preds!')
np.save(npy_out_filepath, predictions)
tqdm.write(' Saved predictions to {}'.format(npy_out_filepath))
else:
tqdm.write('Evaluating extra model {}'.format(model))
if (model in {"dpn68b", "dpn92", "dpn107"}):
pt_model = pretrainedmodels.__dict__[model](num_classes=1000, pretrained='imagenet+5k')
else:
pt_model = pretrainedmodels.__dict__[model](num_classes=1000, pretrained='imagenet')
tf_img = pretrained_utils.TransformImage(pt_model)
load_img = pretrained_utils.LoadImage()
tqdm.write(' Number of trainable parameters: {}'.format(sum(p.numel() for p in pt_model.parameters() if p.requires_grad)))
#print(pt_model)
#print(load_img)
dataset = eval_utils.ImageLoaderDataset(cur_imgs, imgnet, cds,
'scaled_500', transform=tf_img)
data_loader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size, shuffle=False,
num_workers=0, pin_memory=True)
if (torch.cuda.is_available()):
pt_model = pt_model.cuda()
pt_model.eval()
predictions, top1_acc, top5_acc, total_time, num_images = eval_utils.evaluate_model(
pt_model, data_loader, show_progress_bar=True)
tqdm.write(' Evaluated {} images'.format(num_images))
tqdm.write(' Top-1 accuracy: {:.2f}'.format(100.0 * top1_acc))
tqdm.write(' Top-5 accuracy: {:.2f}'.format(100.0 * top5_acc))
tqdm.write(' Total time: {:.1f} (average time per image: {:.2f} ms)'.format(total_time, 1000.0 * total_time / num_images))
npy_out_filepath = pathlib.Path(__file__).parent / '../data/predictions' / dataset_filename / (model + '.npy')
npy_out_filepath = npy_out_filepath.resolve()
directory = os.path.dirname(npy_out_filepath)
if not os.path.exists(directory):
os.makedirs(directory)
if (os.path.exists(npy_out_filepath)):
old_preds = np.load(npy_out_filepath)
np.save(f'{npy_out_filepath}.{int(time.time())}', old_preds)
print('checking old preds is same as new preds')
if not np.allclose(old_preds, predictions):
diffs = np.round(old_preds - predictions, 4)
print('old preds != new preds')
else:
print('old preds == new_preds!')
np.save(npy_out_filepath, predictions)
tqdm.write(' Saved predictions to {}'.format(npy_out_filepath))
if __name__ == '__main__':
eval()
| 7,435 | 41.735632 | 138 | py |
ImageNetV2 | ImageNetV2-master/code/eval_utils.py | import math
from timeit import default_timer as timer
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
import tqdm
import image_loader
class ImageLoaderDataset(torch.utils.data.Dataset):
def __init__(self, filenames, imgnet, cds, size, verbose=False, transform=None):
self.imgnet = imgnet
self.cds = cds
self.loader = image_loader.ImageLoader(imgnet, cds)
self.size = size
self.verbose = verbose
self.filenames = list(filenames)
self.transform = transform
self.wnids = [self.loader.get_wnid_of_image(x) for x in self.filenames]
self.class_ids = [self.imgnet.class_info_by_wnid[x].cid for x in self.wnids]
for x in self.class_ids:
assert x >= 0
assert x < 1000
def __len__(self):
return len(self.filenames)
def __getitem__(self, index):
cur_fn = self.filenames[index]
img = self.loader.load_image(cur_fn, size=self.size, verbose=self.verbose, loader='pillow', force_rgb=True)
if self.transform is not None:
img = self.transform(img)
return (img, self.class_ids[index])
def get_data_loader(imgs, imgnet, cds, image_size='scaled_500', batch_size=128, num_workers=0, resize_size=256, center_crop_size=224):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
dataset = ImageLoaderDataset(imgs, imgnet, cds, image_size, transform=transforms.Compose([
transforms.Resize(resize_size),
transforms.CenterCrop(center_crop_size),
transforms.ToTensor(),
normalize]))
val_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=True)
return val_loader
def evaluate_model(model, data_loader, show_progress_bar=False, notebook_progress_bar=False):
cudnn.benchmark = True
num_images = 0
num_top1_correct = 0
num_top5_correct = 0
predictions = []
start = timer()
with torch.no_grad():
enumerable = enumerate(data_loader)
if show_progress_bar:
total = int(math.ceil(len(data_loader.dataset) / data_loader.batch_size))
desc = 'Batch'
if notebook_progress_bar:
enumerable = tqdm.tqdm_notebook(enumerable, total=total, desc=desc)
else:
enumerable = tqdm.tqdm(enumerable, total=total, desc=desc)
for ii, (img_input, target) in enumerable:
img_input = img_input.cuda(non_blocking=True)
_, output_index = model(img_input).topk(k=5, dim=1, largest=True, sorted=True)
output_index = output_index.cpu().numpy()
predictions.append(output_index)
for jj, correct_class in enumerate(target.cpu().numpy()):
if correct_class == output_index[jj, 0]:
num_top1_correct += 1
if correct_class in output_index[jj, :]:
num_top5_correct += 1
num_images += len(target)
end = timer()
predictions = np.vstack(predictions)
assert predictions.shape == (num_images, 5)
return predictions, num_top1_correct / num_images, num_top5_correct / num_images, end - start, num_images
| 3,351 | 39.878049 | 134 | py |
COV19D_3rd | COV19D_3rd-main/Seg-Exct-Classif-Pipeline-Hybrid Method.py | # -*- KENAN MORANI - IZMIR DEMOCRACY UNIVERSITY -*-
#### COV19-CT DB Database #####
### part of IEEE ICASSP 2023: AI-enabled Medical Image Analysis Workshop and Covid-19 Diagnosis Competition (AI-MIA-COV19D)
### at https://mlearn.lincoln.ac.uk/icassp-2023-ai-mia/
#### B. 3rd COV19D Competition ---- I. Covid-19 Detection Challenge
#### kenan.morani@gmail.com
# Importing Libraries
import os, glob
import numpy as np
import matplotlib.pyplot as plt
import cv2
import nibabel as nib
import tensorflow as tf
from tensorflow import keras
from skimage.feature import canny
#from scipy import ndimage as ndi
from skimage import io
from skimage.exposure import histogram
from PIL import Image as im
import skimage
from skimage import data,morphology
from skimage.color import rgb2gray
#import scipy.ndimage as nd
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers import Conv2D, Dropout, BatchNormalization, Activation, MaxPool2D, Conv2DTranspose, Concatenate, Input
from tensorflow.keras.models import Model
#from tensorflow.keras.applications import VGG16
from keras.callbacks import ModelCheckpoint
from skimage import color, filters
from tensorflow.keras import backend as K
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import layers, models
from tensorflow.keras import activations
from tensorflow.keras.models import Sequential
from termcolor import colored
#import visualkeras
from collections import defaultdict
from PIL import ImageFont
from tensorflow.keras.preprocessing.image import load_img, img_to_array, array_to_img
import cv2
import csv
from sklearn.utils import class_weight
from collections import Counter
from skimage.morphology import ball, disk, dilation, binary_erosion, remove_small_objects, erosion, closing, reconstruction, binary_closing
from skimage.measure import label,regionprops, perimeter
from skimage.morphology import binary_dilation, binary_opening
from skimage.filters import roberts, sobel
from skimage import measure, feature
from skimage.segmentation import clear_border
#from skimage.util.montage import montage2d
from scipy import ndimage as ndi
#from mpl_toolkits.mplot3d.art3d import Poly3DCollection
#import dicom
import scipy.misc
#from tensorflow.keras.models import model_from_json
#import visualkeras
from tensorflow.python.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D, ZeroPadding2D
#from collections import defaultdict
#from PIL import ImageFont
####################################################################################################
####################### Processing : Slicing and Saving Exctracted Slices from 3D Images [If needed] #########
###############################################################################
"""# Slicing and Saving"""
## The path here include the image volumes (308MB)
## and the lung masks (1MB) were extracted from the COVID-19 CT segmentation dataset
dataInputPath = '/home/idu/Desktop/COV19D/segmentation/volumes'
imagePathInput = os.path.join(dataInputPath, 'img/') ## Image volumes were exctracted to this subfolder
maskPathInput = os.path.join(dataInputPath, 'mask/') ## lung masks were exctracted to this subfolder
# Preparing the outputpath for slicing the CT volume from the above data++++
dataOutputPath = '/home/idu/Desktop/COV19D/segmentation/slices/'
imageSliceOutput = os.path.join(dataOutputPath, 'img/') ## Image volume slices will be placed here
maskSliceOutput = os.path.join(dataOutputPath, 'mask/') ## Annotated masks slices will be placed here
# Slicing only in Z direction
# Slices in Z direction shows the required lung area
SLICE_X = False
SLICE_Y = False
SLICE_Z = True
SLICE_DECIMATE_IDENTIFIER = 3
# Choosing normalization boundaries suitable from the chosen images
HOUNSFIELD_MIN = -1020
HOUNSFIELD_MAX = 2995
HOUNSFIELD_RANGE = HOUNSFIELD_MAX - HOUNSFIELD_MIN
# Normalizing the images
def normalizeImageIntensityRange (img):
img[img < HOUNSFIELD_MIN] = HOUNSFIELD_MIN
img[img > HOUNSFIELD_MAX] = HOUNSFIELD_MAX
return (img - HOUNSFIELD_MIN) / HOUNSFIELD_RANGE
#nImg = normalizeImageIntensityRange(img)
#np.min(nImg), np.max(nImg), nImg.shape, type(nImg)
# Reading image or mask volume
def readImageVolume(imgPath, normalize=True):
img = nib.load(imgPath).get_fdata()
if normalize:
return normalizeImageIntensityRange(img)
else:
return img
#readImageVolume(imgPath, normalize=False)
#readImageVolume(maskPath, normalize=False)
# Slicing image and saving
def sliceAndSaveVolumeImage(vol, fname, path):
(dimx, dimy, dimz) = vol.shape
print(dimx, dimy, dimz)
cnt = 0
if SLICE_X:
cnt += dimx
print('Slicing X: ')
for i in range(dimx):
saveSlice(vol[i,:,:], fname+f'-slice{str(i).zfill(SLICE_DECIMATE_IDENTIFIER)}_x', path)
if SLICE_Y:
cnt += dimy
print('Slicing Y: ')
for i in range(dimy):
saveSlice(vol[:,i,:], fname+f'-slice{str(i).zfill(SLICE_DECIMATE_IDENTIFIER)}_y', path)
if SLICE_Z:
cnt += dimz
print('Slicing Z: ')
for i in range(dimz):
saveSlice(vol[:,:,i], fname+f'-slice{str(i).zfill(SLICE_DECIMATE_IDENTIFIER)}_z', path)
return cnt
# Saving volume slices to file
def saveSlice (img, fname, path):
img = np.uint8(img * 255)
fout = os.path.join(path, f'{fname}.png')
cv2.imwrite(fout, img)
print(f'[+] Slice saved: {fout}', end='\r')
# Reading and processing image volumes for TEST images
for index, filename in enumerate(sorted(glob.iglob(imagePathInput+'*.nii.gz'))):
img = readImageVolume(filename, True)
print(filename, img.shape, np.sum(img.shape), np.min(img), np.max(img))
numOfSlices = sliceAndSaveVolumeImage(img, 't'+str(index), imageSliceOutput)
print(f'\n{filename}, {numOfSlices} slices created \n')
# Reading and processing image mask volumes for TEST masks
for index, filename in enumerate(sorted(glob.iglob(maskPathInput+'*.nii.gz'))):
img = readImageVolume(filename, False)
print(filename, img.shape, np.sum(img.shape), np.min(img), np.max(img))
numOfSlices = sliceAndSaveVolumeImage(img, 't'+str(index), maskSliceOutput)
print(f'\n{filename}, {numOfSlices} slices created \n')
# Exploring the data
imgPath = os.path.join(imagePathInput, '1.nii.gz')
img = nib.load(imgPath).get_fdata()
np.min(img), np.max(img), img.shape, type(img)
maskPath = os.path.join(maskPathInput, '1.nii.gz')
mask = nib.load(maskPath).get_fdata()
np.min(mask), np.max(mask), mask.shape, type(mask)
# Showing Mask slice
imgSlice = mask[:,:,20]
plt.imshow(imgSlice, cmap='gray')
plt.show()
# Showing Corresponding Image slice
imgSlice = img[:,:,20]
plt.imshow(imgSlice, cmap='gray')
plt.show()
"""# Training and testing Generator"""
# Define constants
#SEED = 42
### Setting the training and testing dataset for validation of the proposed VGG16-UNET model
### All t0&t1 Z-sliced slices (images and masks) were used for testing
#BATCH_SIZE_TRAIN = 32
#BATCH_SIZE_TEST = 32
IMAGE_HEIGHT = 224
IMAGE_WIDTH = 224
SIZE = IMAGE_HEIGHT = IMAGE_HEIGHT
IMG_SIZE = (IMAGE_HEIGHT, IMAGE_WIDTH)
#### Splitting the data into training and test sets happen manually
### t0 volumes and masks were chosen as test sets
data_dir = '/home/idu/Desktop/COV19D/segmentation/slices/'
data_dir_train = os.path.join(data_dir, 'training')
# The images should be stored under: "data/slices/training/img/img"
data_dir_train_image = os.path.join(data_dir_train, 'img')
# The images should be stored under: "data/slices/training/mask/img"
data_dir_train_mask = os.path.join(data_dir_train, 'mask')
data_dir_test = os.path.join(data_dir, 'test')
# The images should be stored under: "data/slices/test/img/img"
data_dir_test_image = os.path.join(data_dir_test, 'img')
# The images should be stored under: "data/slices/test/mask/img"
data_dir_test_mask = os.path.join(data_dir_test, 'mask')
BATCH_SIZE = 64
def orthogonal_rot(image):
return np.rot90(image, np.random.choice([-1, 0, 1]))
def create_segmentation_generator_train(img_path, msk_path, BATCH_SIZE):
data_gen_args = dict(rescale=1./255,
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90,
preprocessing_function=orthogonal_rot,
width_shift_range=0.2,
height_shift_range=0.2,
zoom_range=0.3
)
datagen = ImageDataGenerator(**data_gen_args)
img_generator = datagen.flow_from_directory(img_path, target_size=IMG_SIZE, class_mode=None, color_mode='grayscale', batch_size=BATCH_SIZE, seed=SEED)
msk_generator = datagen.flow_from_directory(msk_path, target_size=IMG_SIZE, class_mode=None, color_mode='grayscale', batch_size=BATCH_SIZE, seed=SEED)
return zip(img_generator, msk_generator)
# Remember not to perform any image augmentation in the test generator!
def create_segmentation_generator_test(img_path, msk_path, BATCH_SIZE):
data_gen_args = dict(rescale=1./255)
datagen = ImageDataGenerator(**data_gen_args)
img_generator = datagen.flow_from_directory(img_path, target_size=IMG_SIZE, class_mode=None, color_mode='grayscale', batch_size=BATCH_SIZE, seed=SEED)
msk_generator = datagen.flow_from_directory(msk_path, target_size=IMG_SIZE, class_mode=None, color_mode='grayscale', batch_size=BATCH_SIZE, seed=SEED)
return zip(img_generator, msk_generator)
BATCH_SIZE_TRAIN = BATCH_SIZE_TEST = BATCH_SIZE
SEED = 44
train_generator = create_segmentation_generator_train(data_dir_train_image, data_dir_train_mask, BATCH_SIZE_TRAIN)
test_generator = create_segmentation_generator_test(data_dir_test_image, data_dir_test_mask, BATCH_SIZE_TEST)
NUM_TRAIN = 2*745
NUM_TEST = 2*84
## Choosing number of training epoches
NUM_OF_EPOCHS = 20
def display(display_list):
plt.figure(figsize=(15,15))
title = ['Input Image', 'True Mask', 'Predicted Mask']
for i in range(len(display_list)):
plt.subplot(1, len(display_list), i+1)
plt.title(title[i])
plt.imshow(tf.keras.preprocessing.image.array_to_img(display_list[i]), cmap='gray')
plt.show()
def show_dataset(datagen, num=1):
for i in range(0,num):
image,mask = next(datagen)
display([image[0], mask[0]])
show_dataset(test_generator, 2)
#############################################################
######################## Method 1
##################################################################################
##################################################################################
#############################################################
######################## Image Processing and CNN modelling
##################################################################################
### Manual Cropping of the image; r
img = cv2.imread('/home/idu/Desktop/COV19D/val-seg1 (copy)/non-covid/ct_scan210/1.jpg')
img = skimage.color.rgb2gray(img)
r = cv2.selectROI(img)
# Plotting hostogram
# find frequency of pixels in range 0-255
img = Image.open('/home/idu/Desktop/COV19D/val-seg1 (copy)/non-covid/ct_scan210/166.jpg')
img = skimage.color.rgb2gray(img)
img = img < t
img = np.array(img)
#img = skimage.filters.gaussian(img, sigma=1.0)
# Flatten the image array to 1D
img = img.ravel()
# Plot the histogram
plt.hist(img)#, bins=256, range=(0, 256), color='red', alpha=0.4)
plt.xlabel("Pixel value")
#plt.xlim(0, 50)
plt.ylabel("Counts")
plt.title("Image histogram")
plt.show()
histr = cv2.calcHist([img],[0],None,[256],[0,256])
# show the plotting graph of an image
plt.plot(histr)
plt.show()
t = 0.45 #Histogram Threshold
#### Cropping right-lung as an ROI and removing upper and lowermost of the slices
count = []
folder_path = '/home/idu/Desktop/COV19D/val-seg1 (copy)/covid1'
#Change this directory to the directory where you need to do preprocessing for images
#Inside the directory must folder(s), which have the images inside them
for fldr in os.listdir(folder_path):
sub_folder_path = os.path.join(folder_path, fldr)
for filee in os.listdir(sub_folder_path):
file_path = os.path.join(sub_folder_path, filee)
img = cv2.imread(file_path)
#Grayscale images
img = skimage.color.rgb2gray(img)
# First cropping an image
#%r = cv2.selectROI(im)
#Select ROI from images before you start the code
#Reference: https://learnopencv.com/how-to-select-a-bounding-box-roi-in-opencv-cpp-python/
#{Last access 15th of Dec, 2021}
# Crop image using r
#img_cropped = img[int(r[1]):int(r[1]+r[3]), int(r[0]):int(r[0]+r[2])]
img_cropped=img
# Thresholding and binarizing images
# Reference: https://datacarpentry.org/image-processing/07-thresholding/
#{Last access 15th of Dec, 2021}
# Gussian Filtering
#img = skimage.filters.gaussian(img_cropped, sigma=1.0)
# Binarizing the image
#print (img)
img = img < t
count = np.count_nonzero(img)
#print(count)
if count > 260000: ## Threshold to be selected
#print(count)
img_cropped = np.expand_dims(img_cropped, axis=2)
img_cropped = array_to_img (img_cropped)
# Replace images with the image that includes ROI
img_cropped.save(str(file_path), 'JPEG')
#print('saved')
else:
#print(count)
# Remove non-representative slices
os.remove(str(file_path))
print('removed')
print(str(sub_folder_path))
# Check that there is at least one slice left
if not os.listdir(str(sub_folder_path)):
print(str(sub_folder_path), "Directory is empty")
count = []
#################### Building CNN Model
h = w = 224
#batch_size = 64
batch_size=128
#batch_size=256
train_datagen = ImageDataGenerator(rescale=1./255,
vertical_flip=True,
horizontal_flip=True)
train_generator = train_datagen.flow_from_directory(
'/home/idu/Desktop/COV19D/train-Preprocessed/', ## COV19-CT-DB Training set
target_size=(h, w),
batch_size=batch_size,
color_mode='grayscale',
classes = ['covid','non-covid'],
class_mode='binary')
val_datagen = ImageDataGenerator(rescale=1./255)
val_generator = val_datagen.flow_from_directory(
'/home/idu/Desktop/COV19D/validation-Preprocessed/', ## COV19-CT-DB Validation set
target_size=(h, w),
batch_size=batch_size,
color_mode='grayscale',
classes = ['covid','non-covid'],
class_mode='binary')
#### The CNN model
def make_model():
model = models.Sequential()
# Convulotional Layer 1
model.add(layers.Conv2D(16,(3,3),input_shape=(h,w,1), padding="same"))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.MaxPooling2D((2,2)))
# Convulotional Layer 2
model.add(layers.Conv2D(32,(3,3), padding="same"))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.MaxPooling2D((2,2)))
# Convulotional Layer 3
model.add(layers.Conv2D(64,(3,3), padding="same"))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.MaxPooling2D((2,2)))
# Convulotional Layer 4
model.add(layers.Conv2D(128,(3,3), padding="same"))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.MaxPooling2D((2,2)))
# Fully Connected Layer
model.add(layers.Flatten())
model.add(layers.Dense(256))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.Dropout(0.1)) # Try 0.1 or 0.3
# Dense Layer
model.add(layers.Dense(1,activation='sigmoid'))
return model
model = make_model()
## Load model weights after saving
model.load_weights('/home/idu/Desktop/COV19D/ChatGPT-saved-models/imagepreprocesscnnclass.h5')
n_epochs= 100
# Compiling the model using SGD optimizer with a learning rate schedualer
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=[tf.keras.metrics.Precision(), tf.keras.metrics.Recall(),
'accuracy'])
early_stopping_cb = keras.callbacks.EarlyStopping(monitor="val_accuracy", patience=7)
checkpoint = ModelCheckpoint('/home/idu/Desktop/COV19D/ChatGPT-saved-models/imagepreprocesscnnclass.h5', save_best_only=True, save_weights_only=True)
###Learning Rate decay
initial_learning_rate = 0.1
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate,
decay_steps=100000,
decay_rate=0.96,
staircase=True)
def decayed_learning_rate(step):
return initial_learning_rate * decay_rate ^ (step / decay_steps)
## Class weight
counter = Counter(train_generator.classes)
max_val = float(max(counter.values()))
class_weights = {class_id : max_val/num_images for class_id, num_images in counter.items()}
class_weights
training_steps = 2*269309 // batch_size
training_steps = 434726 // batch_size ## Without Slice reduction
val_steps = 67285 // batch_size
val_steps = 106378 // batch_size ## Without Slice reduction
history=model.fit(train_generator,
steps_per_epoch=training_steps,
validation_data=val_generator,
validation_steps=val_steps,
verbose=2,
epochs=n_epochs,
callbacks=[early_stopping_cb, checkpoint],
class_weight=class_weights)
## saving the model
model.save('/home/idu/Desktop/COV19D/ChatGPT-saved-models/imagepreprocesscnnclass.h5')
model = keras.models.load_model('/home/idu/Desktop/COV19D/ChatGPT-saved-models/imagepreprocesscnnclass.h5')
# Evaluatin the model
model.evaluate(val_generator, batch_size=128)
#############################################################
######################## Method 2 & 3
##################################################################################
##################################################################################
#############################################################
######################## Stage 1 : SEGMENTAITON
##################################################################################
############ K-Means Clustering Based Segmetnation [Optional]
def extract_lungs(mask):
kernel = np.ones((5,5),np.uint8)
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
return mask
def kmeans_segmentation(image):
Z = image.reshape((-1,1))
Z = np.float32(Z)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
K = 2
ret,label,center=cv2.kmeans(Z,K,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS)
center = np.uint8(center)
res = center[label.flatten()]
segmented_image = res.reshape((image.shape))
return segmented_image
def segment_and_extract_lungs(image):
segmented_image = kmeans_segmentation(image)
binary_mask = np.zeros(image.shape, dtype=np.uint8)
binary_mask[segmented_image == segmented_image.min()] = 1
binary_mask = extract_lungs(binary_mask)
lung_extracted_image = np.zeros(image.shape, dtype=np.uint8)
lung_extracted_image[binary_mask == 1] = image[binary_mask == 1]
return lung_extracted_image
# Modify here to run the code on all the required slices
input_folder = "/home/idu/Desktop/COV19D/test"
output_folder = "/home/idu/Desktop/COV19D/test-seg1"
if not os.path.exists(output_folder):
os.makedirs(output_folder)
for subdir, dirs, files in os.walk(input_folder):
for file in files:
if not file.endswith('.jpg'): # Check if the file is a JPEG image
continue
image_path = os.path.join(subdir, file)
try:
image = cv2.imread(image_path, 0) # Read the input image
lung_extracted_image = segment_and_extract_lungs(image)
except Exception as e:
print(f"Error processing {image_path}: {e}")
continue
subfolder_name = os.path.basename(subdir)
subfolder_path = os.path.join(output_folder, subfolder_name)
if not os.path.exists(subfolder_path):
os.makedirs(subfolder_path)
output_path = os.path.join(subfolder_path, file)
cv2.imwrite(output_path, lung_extracted_image)
################# Remove Non-representative Slices [optional]
def check_valid_image(image):
return cv2.countNonZero(image) > 1764 # Choose a threshold for removal
input_folder = "/home/idu/Desktop/COV19D/train-seg/non-covid"
output_folder = "/home/idu/Desktop/COV19D/train-seg-sliceremove/non-covid"
if not os.path.exists(output_folder):
os.makedirs(output_folder)
for subdir, dirs, files in os.walk(input_folder):
subfolder_name = subdir.split('/')[-1]
subfolder_path = os.path.join(output_folder, subfolder_name)
if not os.path.exists(subfolder_path):
os.makedirs(subfolder_path)
count = 0
for file in files:
image_path = os.path.join(subdir, file)
if '.jpg' in image_path:
image = cv2.imread(image_path, 0)
if check_valid_image(image):
count += 1
output_path = os.path.join(subfolder_path, file)
cv2.imwrite(output_path, image)
if count == 0:
print(f"No valid images were found in subfolder: {subfolder_name}")
# calculating average, min and max dice coeffecient on the test set
GT_path = '/home/idu/Desktop/COV19D/segmentation/slices/test/mask/img'
pred_path = '/home/idu/Desktop/COV19D/segmentation/slices/test/img/img'
### Mean IoU & Dice COeffecient Measures
# specify the img directory path
#path = "path/to/img/folder/"
# list files in img directory
files = os.listdir(GT_path)
files2 = os.listdir(pred_path)
print(files)
mean_iou = []
dicee = []
dim = (224,224)
num_classes = 2
for file in files:
# make sure file is an image
for filee in files2:
if str(filee) == str(file):
## Ground Truth Mask
p1 = os.path.join(GT_path, file)
print(p1)
img = cv2.imread(p1 , 0)
img = cv2.resize(img, dim)
edges = canny(img)
img = nd.binary_fill_holes(edges)
elevation_map = sobel(img)
# Since, the contrast difference is not much. Anyways we will perform it
markers = np.zeros_like(img)
markers[img < 0.1171875] = 1 # 30/255
markers[img > 0.5859375] = 2 # 150/255
segmentation = morphology.watershed(elevation_map, markers)
#img = img / 255.0
#imgg = img
#img = numpy.bool(img)
#img = np.asarray(img).astype(np.bool)
## Predicted mask
#p2 = os.path.join(pred_path, filee)
#img2 = cv2.imread(p2, 0)
#img2= cv2.resize(img2, dim)
#img2 = img2 / 255.0
#img2 = img2[None]
#img2 = np.expand_dims(img2, axis=-1)
#img2 = UNet_model.predict(img2) > 0.5
#imgg2 = UNet_model.predict(img2) #> 0.5
#imgg2 = imgg2.astype(numpy.float64)
#img2 = np.squeeze(img2)
#imgg2 = np.squeeze(imgg2)
#d = dtype(image)
#print(d)
#img2 = np.asarray(img2).astype(np.bool)
IOU_keras = MeanIoU(num_classes=num_classes)
IOU_keras.update_state(img, segmentation)
print("Mean IoU =", IOU_keras.result().numpy())
mean_iou.append(IOU_keras.result().numpy())
value = dice_coef(img, segmentation)
print("Dice coeffecient value is", value, "\n")
dicee.append(value)
############ UNET Based Segemtnation
# Building 2D-UNET model
def unet(n_levels, initial_features=64, n_blocks=2, kernel_size=5, pooling_size=2, in_channels=1, out_channels=1):
inputs = keras.layers.Input(shape=(IMAGE_HEIGHT, IMAGE_WIDTH, in_channels))
x = inputs
convpars = dict(kernel_size=kernel_size, activation='relu', padding='same')
#downstream
skips = {}
for level in range(n_levels):
for _ in range(n_blocks):
x = keras.layers.Conv2D(initial_features * 2 ** level, **convpars)(x)
x = BatchNormalization()(x)
if level < n_levels - 1:
skips[level] = x
x = keras.layers.MaxPool2D(pooling_size)(x)
# upstream
for level in reversed(range(n_levels-1)):
x = keras.layers.Conv2DTranspose(initial_features * 2 ** level, strides=pooling_size, **convpars)(x)
x = keras.layers.Concatenate()([x, skips[level]])
for _ in range(n_blocks):
x = keras.layers.Conv2D(initial_features * 2 ** level, **convpars)(x)
# output
activation = 'sigmoid' if out_channels == 1 else 'softmax'
x = BatchNormalization()(x)
x = keras.layers.Conv2D(out_channels, kernel_size=1, activation=activation, padding='same')(x)
return keras.Model(inputs=[inputs], outputs=[x], name=f'UNET-L{n_levels}-F{initial_features}')
## Choosing UNet depth
#UNet_model = unet(2) # 2-level depth UNet model
UNet_model = unet(3) # 3-level depth UNet model
#UNet_model = unet(4)# # 4-level depth UNet model
UNet_model.summary()
# Hyperparameters tuning
from tensorflow.keras.metrics import MeanIoU
import math
initial_learning_rate = 0.1
def lr_exp_decay(epoch, lr):
k = 1
return initial_learning_rate * math.exp(-k*epoch)
early_stopping = tf.keras.callbacks.EarlyStopping(
monitor="val_loss",
patience=3,
#verbose=1,
#mode="auto",
#baseline=None,
#restore_best_weights=False,
)
EPOCH_STEP_TRAIN = 2*12*NUM_TRAIN // BATCH_SIZE_TRAIN
EPOCH_STEP_TEST = 2*NUM_TEST // BATCH_SIZE_TEST
UNet_model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=[tf.keras.metrics.Precision(), tf.keras.metrics.Recall(),
#tf.keras.metrics.MeanIoU(num_classes = 2),
'accuracy'])
NUM_OF_EPOCHS = 20
UNet_model.fit_generator(generator=train_generator,
steps_per_epoch=EPOCH_STEP_TRAIN,
validation_data=test_generator,
validation_steps=EPOCH_STEP_TEST,
epochs=NUM_OF_EPOCHS,
callbacks=[early_stopping, tf.keras.callbacks.LearningRateScheduler(lr_exp_decay, verbose=1)]
)
#Evaluating the UNet models on the test partition
UNet_model.evaluate(test_generator, batch_size=128, steps=EPOCH_STEP_TEST)
#Saving the UNet model models with different depth levels and batch norm()
UNet_model.save('/home/idu/Desktop/COV19D/ChatGPT-saved-models/Segmentation models/UNet_model-3L-BatchNorm.h5')
#Loading saved models
UNet_model = keras.models.load_model('/home/idu/Desktop/COV19D/ChatGPT-saved-models/Segmentation models/UNet_model-3L-BatchNorm.h5')
# Displaying predicted slices against ground truth
def display(display_list):
title = ['Input Image', 'True Mask', 'Predicted Mask']
for i in range(len(display_list)):
plt.subplot(1, len(display_list), i+1)
plt.title(title[i])
plt.imshow(tf.keras.preprocessing.image.array_to_img(display_list[i]), cmap='gray')
plt.show()
def show_dataset(datagen, num=1):
for i in range(0, num):
image,mask = next(datagen)
display((image[0], mask[0]))
path = '/home/idu/Desktop/COV19D/segmentation/Segmentation Results/'
def show_prediction(datagen, num=1):
for i in range(0,num):
image,mask = next(datagen)
pred_mask = UNet_model.predict(image)[0] > 0.5
display([image[0], mask[0], pred_mask])
num_classes = 2
IOU_keras = MeanIoU(num_classes=num_classes)
IOU_keras.update_state(mask[0], pred_mask)
print("Mean IoU =", IOU_keras.result().numpy())
values = np.array(IOU_keras.get_weights()).reshape(num_classes, num_classes)
print(values)
show_prediction(test_generator, 12)
results = UNet_model.evaluate(test_generator, steps = EPOCH_STEP_TEST)
print("test loss, test acc:", results)
for name, value in zip(UNet_model.metrics_names, results):
print(name, ':', value)
import cv2 as cv
## Segmenting images based on k-means clustering and exctracting lung regions and saving them
#in the same directory as the original images (in .png format)
path = '/home/idu/Desktop/COV19D/segmentation/Segmentation Results/'
kernel = np.ones((5,5),np.float32)
def show_prediction(datagen, num=1):
for i in range(0,num):
image,mask = next(datagen)
pred_mask = UNet_model.predict(image)[0] > 0.5
print(pred_mask.dtype)
#pred_mask = pred_mask.astype(np.float32)
#pred_mask = pred_mask*255
print(pred_mask.dtype)
#pred_mask = pred_mask[None]
#pre_mask = np.squeeze(pred_mask, axis=0)
#pred_mask = np.expand_dims(pred_mask, axis=0)
print(pred_mask.dtype)
#pred_mask = cv.dilate(pred_mask, kernel, iterations = 1)
#pred_mask = Image.fromarray(pred_mask, 'L')
#pred_mask = pred_mask.convert('LA')
#pred_mask = np.expand_dims(pred_mask, axis=-1)
#pred_mask.show()
display([image[0], mask[0], pred_mask])
#num_classes = 2
#IOU_keras = MeanIoU(num_classes=num_classes)
#IOU_keras.update_state(mask[0], pred_mask)
#print("Mean IoU =", IOU_keras.result().numpy())
#mean_iou1.append(IOU_keras.result().numpy())
#values = np.array(IOU_keras.get_weights()).reshape(num_classes, num_classes)
#print(values)
show_prediction(test_generator, 2)
# calculating average, min and max dice coeffecient on the test set
GT_path = '/home/idu/Desktop/COV19D/segmentation/slices/test/mask/img'
pred_path = '/home/idu/Desktop/COV19D/segmentation/slices/test/img/img'
# Mean IoU & Dice COeffecient Measures
# specify the img directory path
#path = "path/to/img/folder/"
# list files in img directory
files = os.listdir(GT_path)
files2 = os.listdir(pred_path)
print(files)
from tensorflow.keras import backend as K
def dice_coef(img, img2):
if img.shape != img2.shape:
raise ValueError("Shape mismatch: img and img2 must have to be of the same shape.")
else:
lenIntersection=0
for i in range(img.shape[0]):
for j in range(img.shape[1]):
if ( np.array_equal(img[i][j],img2[i][j]) ):
lenIntersection+=1
lenimg=img.shape[0]*img.shape[1]
lenimg2=img2.shape[0]*img2.shape[1]
value = (2. * lenIntersection / (lenimg + lenimg2))
return value
mean_iou = []
dicee = []
dim = (224,224)
num_classes = 2
for file in files:
# make sure file is an image
for filee in files2:
if str(filee) == str(file):
## Ground Truth Mask
p1 = os.path.join(GT_path, file)
print(p1)
img = cv2.imread(p1 , 0)
img = cv2.resize(img, dim)
img = img / 255.0
#imgg = img
#img = img > 0.5
#img = numpy.bool(img)
#img = np.asarray(img).astype(np.bool)
## Predicted mask
p2 = os.path.join(pred_path, filee)
img2 = cv2.imread(p2, 0)
img2= cv2.resize(img2, dim)
img2 = img2 / 255.0
img2 = img2[None]
img2 = np.expand_dims(img2, axis=-1)
img2 = UNet_model.predict(img2) > 0.5
#imgg2 = UNet_model.predict(img2) #> 0.5
#imgg2 = imgg2.astype(numpy.float64)
img2 = np.squeeze(img2)
#imgg2 = np.squeeze(imgg2)
#d = dtype(image)
#print(d)
#img2 = np.asarray(img2).astype(np.bool)
IOU_keras = MeanIoU(num_classes=num_classes)
IOU_keras.update_state(img, img2)
print("Mean IoU =", IOU_keras.result().numpy())
mean_iou.append(IOU_keras.result().numpy())
value = dice_coef(img, img2)
print("Dice coeffecient value is", value, "\n")
dicee.append(value)
UNet_model.summary()
#print (img)
#print(img2)
dicee = np.array(dicee)
L = len(mean_iou)
print("Number of Values is", L)
# Taking average of dice values
av=np.mean(mean_iou)
avv=np.mean(dicee)
print ("average value is", av)
print ("average value is", avv)
# Taking maximuim and minimuim of dice values
mx=np.max(mean_iou)
mxx=np.max(dicee)
print ("maximuim value is", mx)
print ("maximuim value is", mxx)
mn=np.min(mean_iou)
mnn=np.min(dicee)
print ("minimuim value is", mn)
print ("minimuim value is", mnn)
md=np.median(mean_iou)
mdd=np.median(dicee)
print ("median value is", md)
print ("median value is", mdd)
#############################################################
######################## Stage 2 : LUNG EXCTRACTION
##################################################################################
UNet_model = tf.keras.models.load_model('/home/idu/Desktop/COV19D/segmentation/UNet_model-3L-BatchNorm.h5')
## Comparing the results of predicted masks between public dataset and COV19-CT database
## Ectracting with one example
file_path1 = '/home/idu/Desktop/COV19D/im/156.jpg'
file_path2 = '/home/idu/Desktop/COV19D/segmentation/slices/img/t2-slice140_z.png'
n1 = cv2.imread(file_path1, 0)
n2 = image2=cv2.imread(file_path2, 0)
image1=cv2.imread(file_path1, 0)
image2=cv2.imread(file_path2, 0)
#cv2.imwrite('/home/idu/Desktop/COV19D/im/156.png', image1)
#file_path11 = '/home/idu/Desktop/COV19D/im/156.png'
#image1=cv2.imread(file_path11, 0)
dim = (224, 224)
n1 = cv2.resize(n1, dim)
n2 = cv2.resize(n2, dim)
image1 = cv2.resize(image1, dim)
image2 = cv2.resize(image2, dim)
n1= n1 * 100.0 / 255.0
image1 = image1 * 100.0 / 255.0
hist,bins = np.histogram(image1.flatten(),256,[0,256])
plt.plot(hist, color = 'b')
hist,bins = np.histogram(image2.flatten(),256,[0,256])
plt.plot(hist, color = 'b')
#image2 = cv2.equalizeHist(image2) ### Histogram equalization
#image = image.expand_dims(segmented_data, axis=-1)
image1 = image1 / 255.0
image2 = image2 / 255.0
image1 = image1[None]
image2 = image2[None]
pred_mask1 = UNet_model.predict(image1) > 0.5
pred_mask2 = UNet_model.predict(image2) > 0.5
pred_mask1 = np.squeeze(pred_mask1)
pred_mask2 = np.squeeze(pred_mask2)
plt.imshow(pred_mask1)
plt.imshow(pred_mask2)
pred_mask1 = np.asarray(pred_mask1, dtype="uint8")
kernel = np.ones((5, 5), np.uint8)
pred_mask11 = cv2.erode(pred_mask1, kernel, iterations=2)
pred_mask11 = cv2.dilate(pred_mask1, kernel, iterations=2)
plt.imshow(pred_mask1)
# Clear Image border
cleared = clear_border(pred_mask1)
plt.imshow(cleared)
# Label iameg
label_image = label(cleared)
plt.imshow(label_image)
#Keep the labels with 2 largest areas.
areas = [r.area for r in regionprops(label_image)]
areas.sort()
if len(areas) > 2:
for region in regionprops(label_image):
if region.area < areas[-2]:
for coordinates in region.coords:
label_image[coordinates[0], coordinates[1]] = 0
binary = label_image > 0
plt.imshow(binary)
# Erosion
selem = disk(2)
binary = binary_erosion(binary, selem)
plt.imshow(binary)
# Closure operation with a disk of radius 10. This operation is to keep nodules attached to the lung wall.
selem = disk(10)
binary = binary_closing(binary, selem)
plt.imshow(binary)
# Fill in the small holes inside the binary mask of lungs
edges = roberts(binary)
binary = ndi.binary_fill_holes(edges)
plt.imshow(binary)
# Superimposing the binary image on the original image
#binary = int(binary)
binary=binary.astype(np.uint8)
final = cv2.bitwise_and(n1, n1, mask=binary)
plt.imshow(final)
#h = 255
#w = 298
dim = (224, 224)
dim = (h, w)
h=w=224
#kernel = np.ones((5, 5), np.uint8)
### Exctracting for all CT image in COV19-CT-DB
folder_path = '/home/idu/Desktop/COV19D/test/4' # Changoe this directory to loop over all training, validation and testing images
directory = '/home/idu/Desktop/COV19D/test-seg/4' # Changoe this directory to save the lung segmented images in the appropriate bath syncronizing with line above
for fldr in os.listdir(folder_path):
sub_folder_path = os.path.join(folder_path, fldr)
dir = os.path.join(directory, fldr)
os.mkdir(dir)
for filee in os.listdir(sub_folder_path):
file_path = os.path.join(sub_folder_path, filee)
#cv2.imwrite('/home/idu/Desktop/COV19D/im/156.png', image1)
#file_path11 = '/home/idu/Desktop/COV19D/im/156.png'
#image1=cv2.imread(file_path11, 0)
## Using "try" to avoid problems with input image slices such format problems or other issues
try:
n = cv2.imread(file_path, 0)
image = cv2.imread(file_path, 0)
except cv2.error as e:
# If the resize operation fails, print the error message and continue to the next image
if "(-215:Assertion failed) !ssize.empty()" in str(e):
print(f"Skipped {file_path}: {str(e)}")
continue
elif "name 'file_path' is not defined" in str(e):
print(f"Skipped image: {str(e)}")
continue
elif "TypeError: unsupported operand type(s) for *: 'NoneType' and 'float'" in str(e):
print(f"Skipped {file_path}: {str(e)}")
continue
try:
n = cv2.resize(n, dim)
image = cv2.resize(image, dim)
except cv2.error as e:
# If the resize operation fails, print the error message and continue to the next image
if "(-215:Assertion failed) !ssize.empty()" in str(e):
print(f"Skipped {file_path}: {str(e)}")
continue
elif "name 'file_path' is not defined" in str(e):
print(f"Skipped image: {str(e)}")
continue
elif "TypeError: unsupported operand type(s) for *: 'NoneType' and 'float'" in str(e):
print(f"Skipped {file_path}: {str(e)}")
continue
image = image * 100.0 / 255.0 ## Squeezing the histogram bins to values intensity between 0 and 100 to make it similar to the histograms in the public dataset
image = image / 255.0
image = image[None]
print('predicting')
pred_mask = UNet_model.predict(image) > 0.5
pred_mask = np.squeeze(pred_mask)
#plt.imshow(pred_mask1)
pred_mask = np.asarray(pred_mask, dtype="uint8")
#plt.imshow(pred_mask1)
# Clear Image border
cleared = clear_border(pred_mask)
#plt.imshow(cleared)
# Label iameg
label_image = label(cleared)
#plt.imshow(label_image)
#Keep the labels with 2 largest areas.
areas = [r.area for r in regionprops(label_image)]
areas.sort()
if len(areas) > 2:
for region in regionprops(label_image):
if region.area < areas[-2]:
for coordinates in region.coords:
label_image[coordinates[0], coordinates[1]] = 0
binary = label_image > 0
#plt.imshow(binary)
# Erosion
selem = disk(2)
binary = binary_erosion(binary, selem)
#plt.imshow(binary)
# Closure operation with a disk of radius 10. This operation is to keep nodules attached to the lung wall.
selem = disk(10)
binary = binary_closing(binary, selem)
#plt.imshow(binary)
#Fill in the small holes inside the binary mask of lungs
edges = roberts(binary)
binary = ndi.binary_fill_holes(edges)
#plt.imshow(binary)
## Superimposing the binary image on the original image
binary=binary.astype(np.uint8)
final = cv2.bitwise_and(n, n, mask=binary)
#plt.imshow(final)
file_name, file_ext = os.path.splitext(filee)
print(fldr)
dirr = os.path.join(directory, fldr)
name=dirr+'/'+str(file_name)+'.jpg'
print(name)
print(file_path)
#final = im.fromarray(final)
#directory = sub_folder_path
#name=directory+'/'+str(file_name)+'.png'
#print(os.path.join(directory, file_name))
#final.save(name)
#print(os.path.join(directory, file_name))
#pth=
#dir = os.path.join(directory, fldr)
cv2. imwrite(name, final)
#final.save('{}.png'.format(file_name))
#print (directory,'',file_name)
n = []
image = []
################### Slice Removal After Lung Exctraction (Optional)
file_path1 = '/home/idu/Desktop/COV19D/train-seg/non-covid/ct_scan931/0.jpg'
file_path2 = '/home/idu/Desktop/COV19D/train-seg/non-covid/ct_scan931/40.jpg'
file_path3 = '/home/idu/Desktop/COV19D/train-seg/non-covid/ct_scan931/380.jpg'
file_path4 = '/home/idu/Desktop/COV19D/train-seg/non-covid/ct_scan931/420.jpg'
file_path5 = '/home/idu/Desktop/COV19D//train-seg/non-covid/ct_scan165/185.jpg'
n1 = cv2.imread(file_path1, 0)
n11 = n1.astype(float)
n11 /= 255.0 # Normallization
n_zeros = np.count_nonzero(n11==0)
n_zeros
n2 = cv2.imread(file_path2, 0)
n22 = n1.astype(float)
n22 /= 255.0 # Normallization
n_zeros = np.count_nonzero(n22==0)
n_zeros
n3 = cv2.imread(file_path3, 0)
n33 = n1.astype(float)
n33 /= 255.0 # Normallization
n_zeros = np.count_nonzero(n33)
n_zeros
n4 = cv2.imread(file_path4, 0)
n4 /= 255.0 # Normallization
n5 = cv2.imread(file_path5, 0)
n5 /= 255.0 # Normallization
dim = (224, 224)
#n1 = cv2.resize(n1, dim)
#n2 = cv2.resize(n2, dim)
#n3 = cv2.resize(n3, dim)
#n4 = cv2.resize(n4, dim)
#n5 = cv2.resize(n5, dim)
#n1 = cv.equalizeHist(n1)
#n2 = cv.equalizeHist(n2)
#n3 = cv.equalizeHist(n3)
#n1 = n1 * 10000.0
#n2 = n2 * 10000.0
#n3 = n3 * 10000.0
histr = cv2.calcHist([n33],[0],None,[256],[0,256])
histr = np.histogram(n33)
plt.plot(histr)
plt.show()
hist,bins = np.histogram(n1.ravel(),256,[0,256])
plt.hist(n1.ravel(),256,[0,256])
plt.show()
plt.plot(hist, color = 'b')
hist,bins = np.histogram(n2.flatten(),256,[0,256])
plt.plot(hist, color = 'b')
hist,bins = np.histogram(n3.flatten(),256,[0,256])
plt.plot(hist, color = 'b')
#image2 = cv2.equalizeHist(image2) ### Histogram equalization
# None-representative
## [Uppermost]
count1 = np.count_nonzero(n11)
print(count1)
count4 = np.count_nonzero(n4)
print(count4)
## [Lowermost]
count3 = np.count_nonzero(n3)
print(count3)
count5 = np.count_nonzero(n5)
print(count5)
# Representative
count2 = np.count_nonzero(n2)
print(count2)
cv2.imwrite(output_path, lung_extracted_image)
################# Slice Removal Using ChatGPT [optional-Recommended]
def check_valid_image(image, threshold):
return cv2.countNonZero(image) > threshold
input_folder = "/home/idu/Desktop/COV19D/test-seg"
output_folder = "/home/idu/Desktop/COV19D/test-seg-sliceremove"
## We use the initial threshold value of 1764, the first fallback threshold value of 1000, and the second fallback threshold value of 500.
## If no valid slices are found with the initial threshold value, the code retries with the first fallback threshold value.
## If no valid slices are found even with the first fallback threshold value, the code retries with the second fallback threshold value.
## If no valid slices are found even with the second fallback threshold value, we keep the all slices in the folder/CT; i.e. the CT scan does not change.
initial_threshold = 1764
first_fallback_threshold = 1000
second_fallback_threshold = 500
if not os.path.exists(output_folder):
os.makedirs(output_folder)
for subdir, dirs, files in os.walk(input_folder):
subfolder_name = subdir.split('/')[-1]
subfolder_path = os.path.join(output_folder, subfolder_name)
if not os.path.exists(subfolder_path):
os.makedirs(subfolder_path)
count = 0
threshold = initial_threshold
for file in files:
image_path = os.path.join(subdir, file)
if '.jpg' not in image_path:
continue
image = cv2.imread(image_path, 0)
if check_valid_image(image, threshold):
count += 1
output_path = os.path.join(subfolder_path, file)
cv2.imwrite(output_path, image)
elif count == 0 and threshold == initial_threshold: # Try again with first fallback threshold
threshold = first_fallback_threshold
elif count == 0 and threshold == first_fallback_threshold: # Try again with second fallback threshold
threshold = second_fallback_threshold
elif count == 0: # Keep all images if no valid image has been found
output_path = os.path.join(subfolder_path, file)
cv2.imwrite(output_path, image)
if count == 0:
print(f"No valid images were found in subfolder: {subfolder_name}. All images in this subfolder will be kept.")
################################# Slice Cropping [optional]
img = cv2.imread('/home/idu/Desktop/COV19D/train-seg-removal/non-covid/ct_scan882/117.jpg')
img = skimage.color.rgb2gray(img)
r = cv2.selectROI(img)
count = []
folder_path = '/home/idu/Desktop/COV19D/train-seg-removal-crop/non-covid'
#Change this directory to the directory where you need to do preprocessing for images
#Inside the directory must folder(s), which have the images inside them
for fldr in os.listdir(folder_path):
sub_folder_path = os.path.join(folder_path, fldr)
for filee in os.listdir(sub_folder_path):
file_path = os.path.join(sub_folder_path, filee)
img = cv2.imread(file_path)
#Grayscale images
img = skimage.color.rgb2gray(img)
# First cropping an image
#%r = cv2.selectROI(im)
#Select ROI from images before you start the code
#Reference: https://learnopencv.com/how-to-select-a-bounding-box-roi-in-opencv-cpp-python/
#{Last access 15th of Dec, 2021}
# Crop image using r
img_cropped = img[int(r[1]):int(r[1]+r[3]), int(r[0]):int(r[0]+r[2])]
# Thresholding and binarizing images
# Reference: https://datacarpentry.org/image-processing/07-thresholding/
#{Last access 15th of Dec, 2021}
# Gussian Filtering
#img = skimage.filters.gaussian(img_cropped, sigma=1.0)
# Binarizing the image
# Replace images with the image that includes ROI
img_cropped = np.expand_dims(img_cropped, axis=2)
img_cropped = array_to_img (img_cropped)
img_cropped.save(str(file_path), 'JPEG')
#print('saved')
#############################################################
######################## Stage 3 : CLASSIFICAIOTN
##################################################################################
# Using imagedatagenerator
batch_size = 128
#h= 224
#w=224
#w = 152 # After cropping
#h = 104 # After cropping
train_datagen = ImageDataGenerator(rescale=1./255,
vertical_flip=True,
horizontal_flip=True)
train_generator = train_datagen.flow_from_directory(
'/home/idu/Desktop/COV19D/train-seg/', ## COV19-CT-DB Training set
target_size=(h, w),
batch_size=batch_size,
color_mode='grayscale',
classes = ['covid','non-covid'],
class_mode='binary')
val_datagen = ImageDataGenerator(rescale=1./255)
val_generator = val_datagen.flow_from_directory(
'/home/idu/Desktop/COV19D/val-seg/', ## COV19-CT-DB Validation set
target_size=(h, w),
batch_size=batch_size,
color_mode='grayscale',
classes = ['covid','non-covid'],
class_mode='binary')
#################### Transfer Learnign Classificaiton Approach [optional]
# Images must be 3 w
Model_Xcep = tf.keras.applications.xception.Xception(include_top=False, weights='imagenet', input_shape=(h, w, 3))
for layer in Model_Xcep.layers:
layer.trainable = False
model = tf.keras.Sequential([
Model_Xcep,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.summary()
h = 224
w = 224
h=w=512
##################### CNN model Classidier Approach
#from tensorflow.keras import models, layers, regularizers
def make_model():
model = tf.keras.models.Sequential()
# Convulotional Layer 1
model.add(layers.Conv2D(16,(3,3),input_shape=(h,w,1), padding="same"))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.MaxPooling2D((2,2)))
# Convulotional Layer 2
model.add(layers.Conv2D(32,(3,3), padding="same"))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.MaxPooling2D((2,2)))
# Convulotional Layer 3
model.add(layers.Conv2D(64,(3,3), padding="same"))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.MaxPooling2D((2,2)))
# Convulotional Layer 4
model.add(layers.Conv2D(128,(3,3), padding="same"))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.MaxPooling2D((2,2)))
# Fully Connected Layer
model.add(layers.Flatten())
model.add(layers.Dense(256))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.Dropout(0.3))
# Dense Layer
model.add(layers.Dense(1, activation='sigmoid'))
return model
model = make_model()
### K-means Clustering Segmetnation + CNN - No slice removal
model.load_weights('/home/idu/Desktop/COV19D/ChatGPT-saved-models/kmeans-cluster-seg-cnn-classif.h5')
### UNet Seg + CNN - No slice removal
## Same as the Previous Architecture
model.load_weights('/home/idu/Desktop/COV19D/ChatGPT-saved-models/UNet-BatchNorm-CNN-model.h5')
### UNet Seg + CNN - with slice removal
def make_model():
model = models.Sequential()
# Convulotional Layer 1
model.add(layers.Conv2D(16, (3, 3), input_shape=(h, w, 1), padding="same"))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.MaxPooling2D((2, 2)))
# Convulotional Layer 2
model.add(layers.Conv2D(32, (3, 3), padding="same"))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.MaxPooling2D((2, 2)))
# Convulotional Layer 3
model.add(layers.Conv2D(64, (3, 3), padding="same"))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.MaxPooling2D((2, 2)))
# Convulotional Layer 4
model.add(layers.Conv2D(128, (3, 3), padding="same"))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.MaxPooling2D((2, 2)))
# Convulotional Layer 5
model.add(layers.Conv2D(256, (3, 3), padding="same"))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.MaxPooling2D((2, 2)))
# Fully Connected Layer
model.add(layers.Flatten())
model.add(layers.Dense(256, kernel_regularizer=regularizers.l2(0.001)))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.Dropout(0.3))
# Dense Layer
model.add(layers.Dense(1, activation='sigmoid'))
return model
model = make_model()
model.load_weights('/home/idu/Desktop/COV19D/ChatGPT-saved-models/UNet-seg-sliceremove-cnn-class.h5')
## Choosing number of epoches
n_epochs= 100
###Learning Rate decay
def decayed_learning_rate(step):
return initial_learning_rate * decay_rate ^ (step / decay_steps)
# Compiling the model
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=[tf.keras.metrics.Precision(), tf.keras.metrics.Recall(),
'accuracy'])
initial_learning_rate = 0.1
def lr_exp_decay(epoch, lr):
k = 1
return initial_learning_rate * math.exp(-k*epoch)
# early stopping
early_stopping_cb = keras.callbacks.EarlyStopping(monitor="val_loss", patience=3)
initial_learning_rate = 0.1
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate,
decay_steps=100000,
decay_rate=0.96,
staircase=True)
# saving weights
checkpoint = ModelCheckpoint('/home/idu/Desktop/COV19D/ChatGPT-saved-models/UNet-seg-5Layer-cnn-class.h5', save_best_only=True, save_weights_only=True)
# Class weight
counter = Counter(train_generator.classes)
max_val = float(max(counter.values()))
class_weights = {class_id : max_val/num_images for class_id, num_images in counter.items()}
class_weights
#training_steps = 2*269309 // batch_size
#training_steps = 434726 // batch_size ## Without Slice reduction
#val_steps = 67285 // batch_size
#val_steps = 106378 // batch_size ## Without Slice reduction
history=model.fit(train_generator,
#steps_per_epoch=training_steps,
validation_data=val_generator,
#validation_steps=val_steps,
verbose=2,
epochs=100,
callbacks=[early_stopping_cb, checkpoint, lr_schedule],#,
class_weight=class_weights)
model.evaluate(val_generator, batch_size=128)
##Evaluating the CNN model
print (history.history.keys())
Train_accuracy = history.history['accuracy']
print(Train_accuracy)
print(np.mean(Train_accuracy))
val_accuracy = history.history['val_accuracy']
print(val_accuracy)
print( np.mean(val_accuracy))
val_loss = history.history['val_loss']
print(val_loss)
print( np.mean(val_loss))
epochs = range(1, len(Train_accuracy)+1)
plt.figure(figsize=(12,6))
plt.plot(epochs, Train_accuracy, 'g', label='Training acc')
plt.plot(epochs, val_accuracy, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('accuracy')
plt.ylim(0.7,1)
plt.xlim(0,50)
plt.legend()
plt.show()
val_recall = history.history['val_recall_1']
print(val_recall)
avg_recall = np.mean(val_recall)
avg_recall
val_precision = history.history['val_precision_1']
avg_precision = np.mean(val_precision)
avg_precision
epochs = range(1, len(Train_accuracy)+1)
plt.figure(figsize=(12,6))
plt.plot(epochs, val_recall, 'g', label='Validation Recall')
plt.plot(epochs, val_precision, 'b', label='Validation Prcision')
plt.title('Validation recall and Validation Percision')
plt.xlabel('Epochs')
plt.ylabel('Recall and Precision')
plt.legend()
plt.ylim(0.5,1)
plt.show()
Macro_F1score = (2*avg_precision*avg_recall)/ (avg_precision + avg_recall)
Macro_F1score
## Making diagnosis predicitons at patient level using the trained CNN model classifier
## Use this for validation set and test set of COV19-DT-DB or other datasets you wish to test the model on
#############################################################################
###############################Making Prediciotns
## Input images shouldbe resized to 224x224 if any error
## Choosing the directory where the test/validation data is at
folder_path = '/home/idu/Desktop/COV19D/val-seg/non-covid'
extensions0 = []
extensions1 = []
extensions2 = []
extensions3 = []
extensions4 = []
extensions5 = []
extensions6 = []
extensions7 = []
extensions8 = []
extensions9 = []
extensions10 = []
extensions11 = []
extensions12 = []
extensions13 = []
covidd = []
noncovidd = []
coviddd = []
noncoviddd = []
covidddd = []
noncovidddd = []
coviddddd = []
noncoviddddd = []
covidd6 = []
noncovidd6 = []
covidd7 = []
noncovidd7 = []
covidd8 = []
noncovidd8 = []
results =1
for fldr in os.listdir(folder_path):
if fldr.startswith("ct"):
#sub_folder_path = os.path.join(folder_path, fldr)
for filee in os.listdir(sub_folder_path):
file_path = os.path.join(sub_folder_path, filee)
c = cv2.imread(file_path, 0)
c = cv2.resize (c, (224, 224))
c = c / 255.0
#c=img_to_array(c)
c = np.expand_dims(c, axis=-1)
c = c[None]
#result = model.predict_proba(c) #Probability of 1 (non-covid)
result = model.predict(c)
if result > 0.97: # Class probability threshod is 0.97
extensions1.append(results)
else:
extensions0.append(results)
if result > 0.90: # Class probability threshod is 0.90
extensions3.append(results)
else:
extensions2.append(results)
if result > 0.70: # Class probability threshod is 0.70
extensions5.append(results)
else:
extensions4.append(results)
if result > 0.40: # Class probability threshod is 0.40
extensions7.append(results)
else:
extensions6.append(results)
if result > 0.50: # Class probability threshod is 0.50
extensions9.append(results)
else:
extensions8.append(results)
if result > 0.15: # Class probability threshod is 0.15
extensions11.append(results)
else:
extensions10.append(results)
if result > 0.05: # Class probability threshod is 0.05
extensions13.append(results)
else:
extensions12.append(results)
#print(sub_folder_path, end="\r \n")
## The majority voting at Patient's level
if len(extensions1) > len(extensions0):
print(fldr, colored("NON-COVID", 'red'), len(extensions1), "to", len(extensions0))
noncovidd.append(fldr)
else:
print (fldr, colored("COVID", 'blue'), len(extensions0), "to", len(extensions1))
covidd.append(fldr)
if len(extensions3) > len(extensions2):
print (fldr, colored("NON-COVID", 'red'), len(extensions3), "to", len(extensions2))
noncoviddd.append(fldr)
else:
print (fldr, colored("COVID", 'blue'), len(extensions2), "to", len(extensions3))
coviddd.append(fldr)
if len(extensions5) > len(extensions4):
print (fldr, colored("NON-COVID", 'red'), len(extensions5), "to", len(extensions4))
noncovidddd.append(fldr)
else:
print (fldr, colored("COVID", 'blue'), len(extensions5), "to", len(extensions4))
covidddd.append(fldr)
if len(extensions7) > len(extensions6):
print (fldr, colored("NON-COVID", 'red'), len(extensions7), "to", len(extensions6))
noncoviddddd.append(fldr)
else:
print (fldr, colored("COVID", 'blue'), len(extensions6), "to", len(extensions7))
coviddddd.append(fldr)
if len(extensions9) > len(extensions8):
print (fldr, colored("NON-COVID", 'red'), len(extensions9), "to", len(extensions8))
noncovidd6.append(fldr)
else:
print (fldr, colored("COVID", 'blue'), len(extensions8), "to", len(extensions9))
covidd6.append(fldr)
if len(extensions11) > len(extensions10):
print (fldr, colored("NON-COVID", 'red'), len(extensions11), "to", len(extensions10))
noncovidd7.append(fldr)
else:
print (fldr, colored("COVID", 'blue'), len(extensions10), "to", len(extensions11))
covidd7.append(fldr)
if len(extensions13) > len(extensions12):
print (fldr, colored("NON-COVID", 'red'), len(extensions13), "to", len(extensions12))
noncovidd8.append(fldr)
else:
print (fldr, colored("COVID", 'blue'), len(extensions12), "to", len(extensions13))
covidd8.append(fldr)
extensions0=[]
extensions1=[]
extensions2=[]
extensions3=[]
extensions4=[]
extensions5=[]
extensions6=[]
extensions7=[]
extensions8=[]
extensions9=[]
extensions10=[]
extensions11=[]
extensions12=[]
extensions13=[]
#Checking the results
print(len(covidd))
print(len(coviddd))
print(len(covidddd))
print(len(coviddddd)) # 0.4 Threshold
print(len(covidd6)) # 0.5 Threshold
print(len(covidd7))
print(len(covidd8))
print(len(noncovidd))
print(len(noncoviddd))
print(len(noncovidddd))
print(len(noncoviddddd))
print(len(noncovidd6))
print(len(noncovidd7))
print(len(noncovidd8))
print(len(covidd+noncovidd))
print(len(coviddd+noncoviddd))
print(len(covidddd+noncovidddd))
print(len(coviddddd+noncoviddddd))
print(len(covidd6+noncovidd6))
print(len(covidd7+noncovidd7))
print(len(covidd8+noncovidd8))
####################################################################
######### Using a Hybrid method [optional]
####################################################
#Actuall Covid
fulllist = list1 + list1n
#Actual nonCovid
fulllistt = listt1 + listt1n
####### Actual Covid Loop [In the validation set]
list1 = covidd6
list1n = noncovidd6
list2 = covidd6 # For model 2
list2n = noncovidd6
list3 = covidd6 # For model 3 [unET sEG + cnn]
list3n = noncovidd6
listt3 = covidd
listt3n = noncovidd
listtt3 = covidd8
listtt3n = noncovidd8
list4 = covidd6 # For model 4 [UNet Seg+ SliceRemoval+cnn]
list4n = noncovidd6
list4a = coviddddd # For model 4
list4an = noncoviddddd
covid = []
noncovid = []
#### Making the decision for each CT scan
# If two models decide the CT scan is covid, then it will be considerd covid. Else the CT scan is non-covid
for item in listt3:
if item in list2 or item in list3 or item in list1:
covid.append(item)
else:
noncovid.append(item)
for item in listt3n:
if item in list2n or item in list3n or item in list1n:
noncovid.append(item)
else:
covid.append(item)
for item in fulllist:
if item in listt3 and item in listtt3:
covid.append(item)
elif item in listt3n and item in listtt3n:
noncovid.append(item)
elif item in list3n and item in list2n:
noncovid.append(item)
else:
covid.append(item)
# Correctly Classified
print(covid)
print(len(covid))
# misclassified
print(noncovid)
print(len(noncovid))
print(len(noncovid)+len(covid))
print(len(fulllist))
#print(len(covid+noncovid))
import csv
csv_filename = '/home/idu/Desktop/s/listt11.csv'
with open(csv_filename) as f:
reader = csv.reader(f)
listt11 = list(reader)
fulllistn = list11 + list11n
######### Actual non-Covid Loop [In the validation set]
list11 = covidd6
list11n = noncovidd6
list22 = covidd6 # For model 2
list22n = noncovidd6
list33 = covidd6 # For model 3 [UNet Seg + CNN]
list33n = noncovidd6
listt33 = covidd
listt33n = noncovidd
listtt33 = covidd8
listtt33n = noncovidd8
list44 = covidd6 # For model 4 [UNet seg - sliceremova+cnn]
list44n = noncovidd6
list44a = coviddddd # For model 4
list44an = noncoviddddd
covid2 = []
noncovid2 = []
## If two models decide the CT scan is covid, then it will be considerd covid. Else the CT scan is non-covid
for item in listt33:
if item in list22 or item in list33 or item in list11:
covid2.append(item)
else:
noncovid2.append(item)
for item in listt33n:
if item in list22n or item in list33n or item in list11n:
noncovid2.append(item)
else:
covid2.append(item)
fulllistt = listt33 + listt33n
for item in fulllistt:
if item in listt33 and item in listtt33:
covid2.append(item)
elif item in listt33n and item in listtt33n:
noncovid2.append(item)
elif item in list33n and item in list22n:
noncovid2.append(item)
else:
covid2.append(item)
# Misclassified
print(covid2)
print(len(covid2))
# Correctly Classified
print(noncovid2)
print(len(noncovid2))
print(len(covid2)+len(noncovid2))
#####################################################################################
######### Saving to csv files format to report the results
###############################################
## Using Majority Voting for each CT scan
####0.5 slice level class probability
with open('/home/idu/Desktop/s/listt11.csv', 'w') as f:
wr = csv.writer(f, delimiter="\n")
wr.writerow(listt11)
with open('/home/idu/Desktop/covid.csv', 'w') as f:
wr = csv.writer(f, delimiter="\n")
wr.writerow(coviddddd)
####0.9 Slice level class probability
with open('/home/idu/Desktop/noncovid.csv', 'w') as f:
wr = csv.writer(f, delimiter="\n")
wr.writerow(noncoviddd)
with open('/home/idu/Desktop/ncovid.csv', 'w') as f:
wr = csv.writer(f, delimiter="\n")
wr.writerow(coviddd)
####0.15 Slice level class probability
with open('/home/idu/Desktop/noncovid.csv', 'w') as f:
wr = csv.writer(f, delimiter="\n")
wr.writerow(noncovidd7)
with open('/home/idu/Desktop/covid.csv', 'w') as f:
wr = csv.writer(f, delimiter="\n")
wr.writerow(covidd7)
############## 0.4 Slice level class probability
with open('/home/idu/Desktop/noncovid.csv', 'w') as f:
wr = csv.writer(f, delimiter="\n")
wr.writerow(noncoviddddd)
with open('/home/idu/Desktop/covid.csv', 'w') as f:
wr = csv.writer(f, delimiter="\n")
wr.writerow(coviddddd)
### KENAN MORANI - END OF THE CODE
##### github.com/kenanmorani | 65,932 | 32.215617 | 171 | py |
COV19D_3rd | COV19D_3rd-main/loading_models/Loading-Models.py |
## Image Process + CNN Model - no slcie removal
h=w=224
def make_model():
model = models.Sequential()
# Convulotional Layer 1
model.add(layers.Conv2D(16,(3,3),input_shape=(h,w,1), padding="same"))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.MaxPooling2D((2,2)))
# Convulotional Layer 2
model.add(layers.Conv2D(32,(3,3), padding="same"))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.MaxPooling2D((2,2)))
# Convulotional Layer 3
model.add(layers.Conv2D(64,(3,3), padding="same"))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.MaxPooling2D((2,2)))
# Convulotional Layer 4
model.add(layers.Conv2D(128,(3,3), padding="same"))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.MaxPooling2D((2,2)))
# Fully Connected Layer
model.add(layers.Flatten())
model.add(layers.Dense(256))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.Dropout(0.1))
# Dense Layer
model.add(layers.Dense(1,activation='sigmoid'))
return model
model = make_model()
## Load models weight
model.load_weights('/home/idu/Desktop/COV19D/ChatGPT-saved-models/imagepreprocesscnnclass.h5')
### K-means Clustering Seg + CNN - No slice removal
def make_model():
model = tf.keras.models.Sequential()
# Convulotional Layer 1
model.add(layers.Conv2D(16,(3,3),input_shape=(h,w,1), padding="same"))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.MaxPooling2D((2,2)))
# Convulotional Layer 2
model.add(layers.Conv2D(32,(3,3), padding="same"))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.MaxPooling2D((2,2)))
# Convulotional Layer 3
model.add(layers.Conv2D(64,(3,3), padding="same"))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.MaxPooling2D((2,2)))
# Convulotional Layer 4
model.add(layers.Conv2D(128,(3,3), padding="same"))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.MaxPooling2D((2,2)))
# Fully Connected Layer
model.add(layers.Flatten())
model.add(layers.Dense(256))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.Dropout(0.3))
# Dense Layer
model.add(layers.Dense(1, activation='sigmoid'))
return model
model = make_model()
model.load_weights('/home/idu/Desktop/COV19D/ChatGPT-saved-models/kmeans-cluster-seg-cnn-classif.h5')
### UNet Seg + CNN - No slice removal
## Same as the Previous Architecture
model.load_weights('/home/idu/Desktop/COV19D/ChatGPT-saved-models/UNet-BatchNorm-CNN-model.h5')
### UNet Seg + CNN - with slice removal
def make_model():
model = models.Sequential()
# Convulotional Layer 1
model.add(layers.Conv2D(16, (3, 3), input_shape=(h, w, 1), padding="same"))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.MaxPooling2D((2, 2)))
# Convulotional Layer 2
model.add(layers.Conv2D(32, (3, 3), padding="same"))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.MaxPooling2D((2, 2)))
# Convulotional Layer 3
model.add(layers.Conv2D(64, (3, 3), padding="same"))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.MaxPooling2D((2, 2)))
# Convulotional Layer 4
model.add(layers.Conv2D(128, (3, 3), padding="same"))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.MaxPooling2D((2, 2)))
# Convulotional Layer 5
model.add(layers.Conv2D(256, (3, 3), padding="same"))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.MaxPooling2D((2, 2)))
# Fully Connected Layer
model.add(layers.Flatten())
model.add(layers.Dense(256, kernel_regularizer=regularizers.l2(0.001)))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.Dropout(0.3))
# Dense Layer
model.add(layers.Dense(1, activation='sigmoid'))
return model
model = make_model()
model.load_weights('/home/idu/Desktop/COV19D/ChatGPT-saved-models/UNet-seg-sliceremove-cnn-class.h5')
### CNN model with slice removal
def make_model():
model = models.Sequential()
# Convulotional Layer 1
model.add(layers.Conv2D(16, (3, 3), input_shape=(h, w, 1), padding="same"))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.MaxPooling2D((2, 2)))
# Convulotional Layer 2
model.add(layers.Conv2D(32, (3, 3), padding="same"))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.MaxPooling2D((2, 2)))
# Convulotional Layer 3
model.add(layers.Conv2D(64, (3, 3), padding="same"))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.MaxPooling2D((2, 2)))
# Convulotional Layer 4
model.add(layers.Conv2D(128, (3, 3), padding="same"))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.MaxPooling2D((2, 2)))
# Convulotional Layer 5
#model.add(layers.Conv2D(256, (3, 3), padding="same"))
#model.add(layers.BatchNormalization())
#model.add(layers.ReLU())
#model.add(layers.MaxPooling2D((2, 2)))
# Fully Connected Layer
model.add(layers.Flatten())
model.add(layers.Dense(128))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.Dropout(0.1))
# Dense Layer
model.add(layers.Dense(1, activation='sigmoid'))
return model
model = make_model()
model.load_weights('/home/idu/Desktop/COV19D/ChatGPT-saved-models/Image-Preprocess-sliceremove-cnn-class.h5')
| 6,156 | 28.743961 | 109 | py |
MetaSAug | MetaSAug-main/MetaSAug_LDAM_train.py | import os
import time
import argparse
import random
import copy
import torch
import torchvision
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.transforms as transforms
from data_utils import *
from resnet import *
import shutil
from loss import *
parser = argparse.ArgumentParser(description='Imbalanced Example')
parser.add_argument('--dataset', default='cifar100', type=str,
help='dataset (cifar10 or cifar100[default])')
parser.add_argument('--batch-size', type=int, default=100, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--num_classes', type=int, default=100)
parser.add_argument('--num_meta', type=int, default=10,
help='The number of meta data for each class.')
parser.add_argument('--imb_factor', type=float, default=0.005)
parser.add_argument('--test-batch-size', type=int, default=100, metavar='N',
help='input batch size for testing (default: 100)')
parser.add_argument('--epochs', type=int, default=200, metavar='N',
help='number of epochs to train')
parser.add_argument('--lr', '--learning-rate', default=1e-1, type=float,
help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--nesterov', default=True, type=bool, help='nesterov momentum')
parser.add_argument('--weight-decay', '--wd', default=5e-4, type=float,
help='weight decay (default: 5e-4)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--split', type=int, default=1000)
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 42)')
parser.add_argument('--print-freq', '-p', default=100, type=int,
help='print frequency (default: 10)')
parser.add_argument('--lam', default=0.25, type=float, help='[0.25, 0.5, 0.75, 1.0]')
parser.add_argument('--gpu', default=0, type=int)
parser.add_argument('--meta_lr', default=0.1, type=float)
parser.add_argument('--save_name', default='name', type=str)
parser.add_argument('--idx', default='0', type=str)
args = parser.parse_args()
for arg in vars(args):
print("{}={}".format(arg, getattr(args, arg)))
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]= str(args.gpu)
kwargs = {'num_workers': 1, 'pin_memory': False}
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
train_data_meta, train_data, test_dataset = build_dataset(args.dataset, args.num_meta)
print(f'length of meta dataset:{len(train_data_meta)}')
print(f'length of train dataset: {len(train_data)}')
train_loader = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size, shuffle=True, **kwargs)
np.random.seed(42)
random.seed(42)
torch.manual_seed(args.seed)
classe_labels = range(args.num_classes)
data_list = {}
for j in range(args.num_classes):
data_list[j] = [i for i, label in enumerate(train_loader.dataset.targets) if label == j]
img_num_list = get_img_num_per_cls(args.dataset, args.imb_factor, args.num_meta*args.num_classes)
print(img_num_list)
print(sum(img_num_list))
im_data = {}
idx_to_del = []
for cls_idx, img_id_list in data_list.items():
random.shuffle(img_id_list)
img_num = img_num_list[int(cls_idx)]
im_data[cls_idx] = img_id_list[img_num:]
idx_to_del.extend(img_id_list[img_num:])
print(len(idx_to_del))
imbalanced_train_dataset = copy.deepcopy(train_data)
imbalanced_train_dataset.targets = np.delete(train_loader.dataset.targets, idx_to_del, axis=0)
imbalanced_train_dataset.data = np.delete(train_loader.dataset.data, idx_to_del, axis=0)
print(len(imbalanced_train_dataset))
imbalanced_train_loader = torch.utils.data.DataLoader(
imbalanced_train_dataset, batch_size=args.batch_size, shuffle=True, **kwargs)
validation_loader = torch.utils.data.DataLoader(
train_data_meta, batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=args.batch_size, shuffle=False, **kwargs)
best_prec1 = 0
beta = 0.9999
effective_num = 1.0 - np.power(beta, img_num_list)
per_cls_weights = (1.0 - beta) / np.array(effective_num)
per_cls_weights = per_cls_weights / np.sum(per_cls_weights) * len(img_num_list)
per_cls_weights = torch.FloatTensor(per_cls_weights).cuda()
weights = torch.tensor(per_cls_weights).float()
def main():
global args, best_prec1
args = parser.parse_args()
model = build_model()
optimizer_a = torch.optim.SGD(model.params(), args.lr,
momentum=args.momentum, nesterov=args.nesterov,
weight_decay=args.weight_decay)
cudnn.benchmark = True
criterion = LDAM_meta(64, args.dataset == "cifar10" and 10 or 100, cls_num_list=img_num_list,
max_m=0.5, s=30)
for epoch in range(args.epochs):
adjust_learning_rate(optimizer_a, epoch + 1)
ratio = args.lam * float(epoch) / float(args.epochs)
if epoch < 160:
train(imbalanced_train_loader, model, optimizer_a, epoch)
else:
train_MetaSAug(imbalanced_train_loader, validation_loader, model, optimizer_a, epoch, criterion, ratio)
prec1, preds, gt_labels = validate(test_loader, model, nn.CrossEntropyLoss().cuda(), epoch)
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
# save_checkpoint(args, {
# 'epoch': epoch + 1,
# 'state_dict': model.state_dict(),
# 'best_acc1': best_prec1,
# 'optimizer': optimizer_a.state_dict(),
# }, is_best)
print('Best accuracy: ', best_prec1)
def train(train_loader, model, optimizer_a, epoch):
losses = AverageMeter()
top1 = AverageMeter()
model.train()
for i, (input, target) in enumerate(train_loader):
input_var = to_var(input, requires_grad=False)
target_var = to_var(target, requires_grad=False)
_, y_f = model(input_var)
del _
cost_w = F.cross_entropy(y_f, target_var, reduce=False)
l_f = torch.mean(cost_w)
prec_train = accuracy(y_f.data, target_var.data, topk=(1,))[0]
losses.update(l_f.item(), input.size(0))
top1.update(prec_train.item(), input.size(0))
optimizer_a.zero_grad()
l_f.backward()
optimizer_a.step()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch, i, len(train_loader),
loss=losses,top1=top1))
def train_MetaSAug(train_loader, validation_loader, model,optimizer_a, epoch, criterion, ratio):
losses = AverageMeter()
top1 = AverageMeter()
model.train()
for i, (input, target) in enumerate(train_loader):
input_var = to_var(input, requires_grad=False)
target_var = to_var(target, requires_grad=False)
cv = criterion.get_cv()
cv_var = to_var(cv)
meta_model = ResNet32(args.dataset == 'cifar10' and 10 or 100)
meta_model.load_state_dict(model.state_dict())
meta_model.cuda()
feat_hat, y_f_hat = meta_model(input_var)
cls_loss_meta = criterion(meta_model.linear, feat_hat, y_f_hat, target_var, ratio,
weights, cv_var, "none")
meta_model.zero_grad()
grads = torch.autograd.grad(cls_loss_meta, (meta_model.params()), create_graph=True)
meta_lr = args.lr * ((0.01 ** int(epoch >= 160)) * (0.01 ** int(epoch >= 180)))
meta_model.update_params(meta_lr, source_params=grads)
input_val, target_val = next(iter(validation_loader))
input_val_var = to_var(input_val, requires_grad=False)
target_val_var = to_var(target_val, requires_grad=False)
_, y_val = meta_model(input_val_var)
cls_meta = F.cross_entropy(y_val, target_val_var)
grad_cv = torch.autograd.grad(cls_meta, cv_var, only_inputs=True)[0]
new_cv = cv_var - args.meta_lr * grad_cv
del grad_cv, grads
#model.train()
features, predicts = model(input_var)
cls_loss = criterion(model.linear, features, predicts, target_var, ratio, weights, new_cv, "update")
prec_train = accuracy(predicts.data, target_var.data, topk=(1,))[0]
losses.update(cls_loss.item(), input.size(0))
top1.update(prec_train.item(), input.size(0))
optimizer_a.zero_grad()
cls_loss.backward()
optimizer_a.step()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch, i, len(train_loader),
loss=losses,top1=top1))
def validate(val_loader, model, criterion, epoch):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
model.eval()
true_labels = []
preds = []
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda()
input = input.cuda()
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
with torch.no_grad():
_, output = model(input_var)
output_numpy = output.data.cpu().numpy()
preds_output = list(output_numpy.argmax(axis=1))
true_labels += list(target_var.data.cpu().numpy())
preds += preds_output
prec1 = accuracy(output.data, target, topk=(1,))[0]
top1.update(prec1.item(), input.size(0))
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1))
print(' * Prec@1 {top1.avg:.3f}'.format(top1=top1))
return top1.avg, preds, true_labels
def build_model():
model = ResNet32(args.dataset == 'cifar10' and 10 or 100)
if torch.cuda.is_available():
model.cuda()
torch.backends.cudnn.benchmark = True
return model
def to_var(x, requires_grad=True):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x, requires_grad=requires_grad)
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch):
lr = args.lr * ((0.01 ** int(epoch >= 160)) * (0.01 ** int(epoch >= 180)))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def save_checkpoint(args, state, is_best):
path = 'checkpoint/ours/' + args.idx + '/'
if not os.path.exists(path):
os.makedirs(path)
filename = path + args.save_name + '_ckpt.pth.tar'
if is_best:
torch.save(state, filename)
if __name__ == '__main__':
main()
| 12,114 | 32.559557 | 115 | py |
MetaSAug | MetaSAug-main/resnet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from torch.autograd import Variable
import torch.nn.init as init
def to_var(x, requires_grad=True):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x, requires_grad=requires_grad)
class MetaModule(nn.Module):
# adopted from: Adrien Ecoffet https://github.com/AdrienLE
def params(self):
for name, param in self.named_params(self):
yield param
def named_leaves(self):
return []
def named_submodules(self):
return []
def named_params(self, curr_module=None, memo=None, prefix=''):
if memo is None:
memo = set()
if hasattr(curr_module, 'named_leaves'):
for name, p in curr_module.named_leaves():
if p is not None and p not in memo:
memo.add(p)
yield prefix + ('.' if prefix else '') + name, p
else:
for name, p in curr_module._parameters.items():
if p is not None and p not in memo:
memo.add(p)
yield prefix + ('.' if prefix else '') + name, p
for mname, module in curr_module.named_children():
submodule_prefix = prefix + ('.' if prefix else '') + mname
for name, p in self.named_params(module, memo, submodule_prefix):
yield name, p
def update_params(self, lr_inner, first_order=False, source_params=None, detach=False):
if source_params is not None:
for tgt, src in zip(self.named_params(self), source_params):
name_t, param_t = tgt
grad = src
if first_order:
grad = to_var(grad.detach().data)
tmp = param_t - lr_inner * grad
self.set_param(self, name_t, tmp)
else:
for name, param in self.named_params(self):
if not detach:
grad = param.grad
if first_order:
grad = to_var(grad.detach().data)
tmp = param - lr_inner * grad
self.set_param(self, name, tmp)
else:
param = param.detach_()
self.set_param(self, name, param)
def set_param(self, curr_mod, name, param):
if '.' in name:
n = name.split('.')
module_name = n[0]
rest = '.'.join(n[1:])
for name, mod in curr_mod.named_children():
if module_name == name:
self.set_param(mod, rest, param)
break
else:
setattr(curr_mod, name, param)
def detach_params(self):
for name, param in self.named_params(self):
self.set_param(self, name, param.detach())
def copy(self, other, same_var=False):
for name, param in other.named_params():
if not same_var:
param = to_var(param.data.clone(), requires_grad=True)
self.set_param(name, param)
class MetaLinear(MetaModule):
def __init__(self, *args, **kwargs):
super().__init__()
ignore = nn.Linear(*args, **kwargs)
self.register_buffer('weight', to_var(ignore.weight.data, requires_grad=True))
self.register_buffer('bias', to_var(ignore.bias.data, requires_grad=True))
def forward(self, x):
return F.linear(x, self.weight, self.bias)
def named_leaves(self):
return [('weight', self.weight), ('bias', self.bias)]
class MetaLinear_Norm(MetaModule):
def __init__(self, *args, **kwargs):
super().__init__()
temp = nn.Linear(*args, **kwargs)
temp.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5)
self.register_buffer('weight', to_var(temp.weight.data.t(), requires_grad=True))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5)
def forward(self, x):
out = F.normalize(x, dim=1).mm(F.normalize(self.weight, dim=0))
return out
def named_leaves(self):
return [('weight', self.weight)]
class MetaConv2d(MetaModule):
def __init__(self, *args, **kwargs):
super().__init__()
ignore = nn.Conv2d(*args, **kwargs)
self.in_channels = ignore.in_channels
self.out_channels = ignore.out_channels
self.stride = ignore.stride
self.padding = ignore.padding
self.dilation = ignore.dilation
self.groups = ignore.groups
self.kernel_size = ignore.kernel_size
self.register_buffer('weight', to_var(ignore.weight.data, requires_grad=True))
if ignore.bias is not None:
self.register_buffer('bias', to_var(ignore.bias.data, requires_grad=True))
else:
self.register_buffer('bias', None)
def forward(self, x):
return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
def named_leaves(self):
return [('weight', self.weight), ('bias', self.bias)]
class MetaConvTranspose2d(MetaModule):
def __init__(self, *args, **kwargs):
super().__init__()
ignore = nn.ConvTranspose2d(*args, **kwargs)
self.stride = ignore.stride
self.padding = ignore.padding
self.dilation = ignore.dilation
self.groups = ignore.groups
self.register_buffer('weight', to_var(ignore.weight.data, requires_grad=True))
if ignore.bias is not None:
self.register_buffer('bias', to_var(ignore.bias.data, requires_grad=True))
else:
self.register_buffer('bias', None)
def forward(self, x, output_size=None):
output_padding = self._output_padding(x, output_size)
return F.conv_transpose2d(x, self.weight, self.bias, self.stride, self.padding,
output_padding, self.groups, self.dilation)
def named_leaves(self):
return [('weight', self.weight), ('bias', self.bias)]
class MetaBatchNorm2d(MetaModule):
def __init__(self, *args, **kwargs):
super().__init__()
ignore = nn.BatchNorm2d(*args, **kwargs)
self.num_features = ignore.num_features
self.eps = ignore.eps
self.momentum = ignore.momentum
self.affine = ignore.affine
self.track_running_stats = ignore.track_running_stats
if self.affine:
self.register_buffer('weight', to_var(ignore.weight.data, requires_grad=True))
self.register_buffer('bias', to_var(ignore.bias.data, requires_grad=True))
if self.track_running_stats:
self.register_buffer('running_mean', torch.zeros(self.num_features))
self.register_buffer('running_var', torch.ones(self.num_features))
else:
self.register_parameter('running_mean', None)
self.register_parameter('running_var', None)
def forward(self, x):
return F.batch_norm(x, self.running_mean, self.running_var, self.weight, self.bias,
self.training or not self.track_running_stats, self.momentum, self.eps)
def named_leaves(self):
return [('weight', self.weight), ('bias', self.bias)]
def _weights_init(m):
classname = m.__class__.__name__
if isinstance(m, MetaLinear) or isinstance(m, MetaConv2d):
init.kaiming_normal(m.weight)
class LambdaLayer(MetaModule):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(MetaModule):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock, self).__init__()
self.conv1 = MetaConv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = MetaBatchNorm2d(planes)
self.conv2 = MetaConv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = MetaBatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
MetaConv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
MetaBatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet32(MetaModule):
def __init__(self, num_classes, block=BasicBlock, num_blocks=[5, 5, 5]):
super(ResNet32, self).__init__()
self.in_planes = 16
self.conv1 = MetaConv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = MetaBatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.linear = MetaLinear(64, num_classes)
self.apply(_weights_init)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
y = self.linear(out)
return out, y
| 10,031 | 34.828571 | 120 | py |
MetaSAug | MetaSAug-main/loss.py | import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import math
import torch.nn.functional as F
import pdb
class EstimatorCV():
def __init__(self, feature_num, class_num):
super(EstimatorCV, self).__init__()
self.class_num = class_num
self.CoVariance = torch.zeros(class_num, feature_num, feature_num).cuda()
self.Ave = torch.zeros(class_num, feature_num).cuda()
self.Amount = torch.zeros(class_num).cuda()
def update_CV(self, features, labels):
N = features.size(0)
C = self.class_num
A = features.size(1)
NxCxFeatures = features.view(N, 1, A).expand(N, C, A)
onehot = torch.zeros(N, C).cuda()
onehot.scatter_(1, labels.view(-1, 1), 1)
NxCxA_onehot = onehot.view(N, C, 1).expand(N, C, A)
features_by_sort = NxCxFeatures.mul(NxCxA_onehot)
Amount_CxA = NxCxA_onehot.sum(0)
Amount_CxA[Amount_CxA == 0] = 1
ave_CxA = features_by_sort.sum(0) / Amount_CxA
var_temp = features_by_sort - ave_CxA.expand(N, C, A).mul(NxCxA_onehot)
var_temp = torch.bmm(var_temp.permute(1, 2, 0), var_temp.permute(1, 0, 2)).div(Amount_CxA.view(C, A, 1).expand(C, A, A))
sum_weight_CV = onehot.sum(0).view(C, 1, 1).expand(C, A, A)
sum_weight_AV = onehot.sum(0).view(C, 1).expand(C, A)
weight_CV = sum_weight_CV.div(sum_weight_CV + self.Amount.view(C, 1, 1).expand(C, A, A))
weight_CV[weight_CV != weight_CV] = 0
weight_AV = sum_weight_AV.div(sum_weight_AV + self.Amount.view(C, 1).expand(C, A))
weight_AV[weight_AV != weight_AV] = 0
additional_CV = weight_CV.mul(1 - weight_CV).mul(
torch.bmm(
(self.Ave - ave_CxA).view(C, A, 1),
(self.Ave - ave_CxA).view(C, 1, A)
)
)
self.CoVariance = (self.CoVariance.mul(1 - weight_CV) + var_temp.mul(weight_CV)).detach() + additional_CV.detach()
self.Ave = (self.Ave.mul(1 - weight_AV) + ave_CxA.mul(weight_AV)).detach()
self.Amount += onehot.sum(0)
class LDAM_meta(nn.Module):
def __init__(self, feature_num, class_num, cls_num_list, max_m=0.5, s=30):
super(LDAM_meta, self).__init__()
self.estimator = EstimatorCV(feature_num, class_num)
self.class_num = class_num
self.cross_entropy = nn.CrossEntropyLoss()
m_list = 1.0 / np.sqrt(np.sqrt(cls_num_list))
m_list = m_list * (max_m / np.max(m_list))
m_list = torch.cuda.FloatTensor(m_list)
self.m_list = m_list
assert s > 0
self.s = s
def MetaSAug(self, fc, features, y_s, labels_s, s_cv_matrix, ratio,):
N = features.size(0)
C = self.class_num
A = features.size(1)
weight_m = list(fc.named_leaves())[0][1]
NxW_ij = weight_m.expand(N, C, A)
NxW_kj = torch.gather(NxW_ij, 1, labels_s.view(N, 1, 1).expand(N, C, A))
s_CV_temp = s_cv_matrix[labels_s]
sigma2 = ratio * torch.bmm(torch.bmm(NxW_ij - NxW_kj, s_CV_temp), (NxW_ij - NxW_kj).permute(0, 2, 1))
sigma2 = sigma2.mul(torch.eye(C).cuda().expand(N, C, C)).sum(2).view(N, C)
aug_result = y_s + 0.5 * sigma2
index = torch.zeros_like(y_s, dtype=torch.uint8)
index.scatter_(1, labels_s.data.view(-1, 1), 1)
index_float = index.type(torch.cuda.FloatTensor)
batch_m = torch.matmul(self.m_list[None, :], index_float.transpose(0, 1))
batch_m = batch_m.view((-1, 1))
aug_result_m = aug_result - batch_m
output = torch.where(index, aug_result_m, aug_result)
return output
def forward(self, fc, features, y_s, labels, ratio, weights, cv, manner):
#self.estimator.update_CV(features.detach(), labels)
aug_y = self.MetaSAug(fc, features, y_s, labels, cv, \
ratio)
if manner == "update":
self.estimator.update_CV(features.detach(), labels)
loss = F.cross_entropy(aug_y, labels, weight=weights)
else:
loss = F.cross_entropy(aug_y, labels, weight=weights)
return loss
def get_cv(self):
return self.estimator.CoVariance
def update_cv(self, cv):
self.estimator.CoVariance = cv
| 4,318 | 34.401639 | 128 | py |
MetaSAug | MetaSAug-main/data_utils.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision
import numpy as np
import copy
np.random.seed(6)
def build_dataset(dataset,num_meta):
normalize = transforms.Normalize(mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
std=[x / 255.0 for x in [63.0, 62.1, 66.7]])
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: F.pad(x.unsqueeze(0),
(4, 4, 4, 4), mode='reflect').squeeze()),
transforms.ToPILImage(),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
transform_test = transforms.Compose([
transforms.ToTensor(),
normalize
])
if dataset == 'cifar10':
train_dataset = torchvision.datasets.CIFAR10(root='../cifar-10', train=True, download=False, transform=transform_train)
test_dataset = torchvision.datasets.CIFAR10('../cifar-10', train=False, transform=transform_test)
img_num_list = [num_meta] * 10
num_classes = 10
if dataset == 'cifar100':
train_dataset = torchvision.datasets.CIFAR100(root='../cifar-100', train=True, download=True, transform=transform_train)
test_dataset = torchvision.datasets.CIFAR100('../cifar-100', train=False, transform=transform_test)
img_num_list = [num_meta] * 100
num_classes = 100
data_list_val = {}
for j in range(num_classes):
data_list_val[j] = [i for i, label in enumerate(train_dataset.targets) if label == j]
idx_to_meta = []
idx_to_train = []
print(img_num_list)
for cls_idx, img_id_list in data_list_val.items():
np.random.shuffle(img_id_list)
img_num = img_num_list[int(cls_idx)]
idx_to_meta.extend(img_id_list[:img_num])
idx_to_train.extend(img_id_list[img_num:])
train_data = copy.deepcopy(train_dataset)
train_data_meta = copy.deepcopy(train_dataset)
train_data_meta.data = np.delete(train_dataset.data, idx_to_train,axis=0)
train_data_meta.targets = np.delete(train_dataset.targets, idx_to_train, axis=0)
train_data.data = np.delete(train_dataset.data, idx_to_meta, axis=0)
train_data.targets = np.delete(train_dataset.targets, idx_to_meta, axis=0)
return train_data_meta, train_data, test_dataset
def get_img_num_per_cls(dataset, imb_factor=None, num_meta=None):
if dataset == 'cifar10':
img_max = (50000-num_meta)/10
cls_num = 10
if dataset == 'cifar100':
img_max = (50000-num_meta)/100
cls_num = 100
if imb_factor is None:
return [img_max] * cls_num
img_num_per_cls = []
for cls_idx in range(cls_num):
num = img_max * (imb_factor**(cls_idx / (cls_num - 1.0)))
img_num_per_cls.append(int(num))
return img_num_per_cls
| 3,070 | 33.897727 | 128 | py |
MetaSAug | MetaSAug-main/MetaSAug_test.py | import os
import time
import argparse
import random
import copy
import torch
import torchvision
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
import torch.nn as nn
import torchvision.transforms as transforms
from data_utils import *
from resnet import *
import shutil
import gc
parser = argparse.ArgumentParser(description='Imbalanced Example')
parser.add_argument('--checkpoint_path', default='path.pth.tar', type=str,
help='the path of checkpoint')
parser.add_argument('--dataset', default='cifar10', type=str)
parser.add_argument('--imb_factor', default='0.1', type=float)
args = parser.parse_args()
print('checkpoint_path:', args.checkpoint_path)
params = args.checkpoint_path.split('_')
dataset = args.dataset
imb_factor = args.imb_factor
kwargs = {'num_workers': 4, 'pin_memory': False}
use_cuda = torch.cuda.is_available()
torch.manual_seed(42)
print('start loading test data')
train_data_meta, train_data, test_dataset = build_dataset(dataset, 10)
test_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=100, shuffle=False, **kwargs)
print('load test data successfully')
best_prec1 = 0
def main():
global args, best_prec1
args = parser.parse_args()
model = build_model()
net_dict = torch.load(args.checkpoint_path)
model.load_state_dict(net_dict['state_dict'])
prec1, preds, gt_labels = validate(
test_loader, model, nn.CrossEntropyLoss().cuda(), 0)
print('Test result:\n'
'Dataset: {0}\t'
'Imb_factor: {1}\t'
'Accuracy: {2:.2f} \t'
'Error: {3:.2f} \n'.format(
dataset, int(1 / imb_factor), prec1,100 - prec1))
def validate(val_loader, model, criterion, epoch):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
model.eval()
true_labels = []
preds = []
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda()
input = input.cuda()
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
with torch.no_grad():
_, output = model(input_var)
output_numpy = output.data.cpu().numpy()
preds_output = list(output_numpy.argmax(axis=1))
true_labels += list(target_var.data.cpu().numpy())
preds += preds_output
prec1 = accuracy(output.data, target, topk=(1,))[0]
top1.update(prec1.item(), input.size(0))
batch_time.update(time.time() - end)
end = time.time()
if i % 100 == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1))
print(' * Prec@1 {top1.avg:.3f}'.format(top1=top1))
return top1.avg, preds, true_labels
def build_model():
model = ResNet32(dataset == 'cifar10' and 10 or 100)
if torch.cuda.is_available():
model.cuda()
torch.backends.cudnn.benchmark = True
return model
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 4,032 | 23.295181 | 77 | py |
MetaSAug | MetaSAug-main/ImageNet_iNat/ResNet.py | from resnet_meta import *
from utils import *
from os import path
def create_model(use_selfatt=False, use_fc=False, dropout=None, stage1_weights=False, dataset=None, log_dir=None, test=False, *args):
print('Loading Scratch ResNet 50 Feature Model.')
if not use_fc:
resnet50 = FeatureMeta(BottleneckMeta, [3, 4, 6, 3], dropout=None)
else:
resnet50 = FCMeta(2048, 1000)
if not test:
if stage1_weights:
assert dataset
print('Loading %s Stage 1 ResNet 10 Weights.' % dataset)
if log_dir is not None:
# subdir = log_dir.strip('/').split('/')[-1]
# subdir = subdir.replace('stage2', 'stage1')
# weight_dir = path.join('/'.join(log_dir.split('/')[:-1]), subdir)
#weight_dir = path.join('/'.join(log_dir.split('/')[:-1]), 'stage1')
weight_dir = log_dir
else:
weight_dir = './logs/%s/stage1' % dataset
print('==> Loading weights from %s' % weight_dir)
if not use_fc:
resnet50 = init_weights(model=resnet50,
weights_path=weight_dir)
else:
resnet50 = init_weights(model=resnet50, weights_path=weight_dir, classifier=True)
#resnet50.load_state_dict(torch.load(weight_dir))
else:
print('No Pretrained Weights For Feature Model.')
return resnet50
| 1,473 | 39.944444 | 133 | py |
MetaSAug | MetaSAug-main/ImageNet_iNat/test.py |
import os
import time
import argparse
import random
import copy
import torch
import torchvision
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.transforms as transforms
from data_utils import *
from dataloader import load_data_distributed
import shutil
from ResNet import *
import resnet_meta
import multiprocessing
import torch.nn.parallel
import torch.nn as nn
from collections import Counter
import time
parser = argparse.ArgumentParser(description='Imbalanced Example')
parser.add_argument('--dataset', default='iNaturalist18', type=str,
help='dataset')
parser.add_argument('--data_root', default='/data1/TL/data/iNaturalist18', type=str)
parser.add_argument('--batch_size', type=int, default=32, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--num_classes', type=int, default=8142)
parser.add_argument('--num_meta', type=int, default=10,
help='The number of meta data for each class.')
parser.add_argument('--test_batch_size', type=int, default=512, metavar='N',
help='input batch size for testing (default: 100)')
parser.add_argument('--epochs', type=int, default=200, metavar='N',
help='number of epochs to train')
parser.add_argument('--lr', '--learning-rate', default=1e-1, type=float,
help='initial learning rate')
parser.add_argument('--workers', default=16, type=int)
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--nesterov', default=True, type=bool, help='nesterov momentum')
parser.add_argument('--weight-decay', '--wd', default=5e-4, type=float,
help='weight decay (default: 5e-4)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--split', type=int, default=1000)
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 42)')
parser.add_argument('--print-freq', '-p', default=1000, type=int,
help='print frequency (default: 10)')
parser.add_argument('--gpu', default=None, type=int)
parser.add_argument('--lam', default=0.25, type=float)
parser.add_argument('--local_rank', default=0, type=int)
parser.add_argument('--meta_lr', default=0.1, type=float)
parser.add_argument('--loading_path', default=None, type=str)
args = parser.parse_args()
# print(args)
for arg in vars(args):
print("{}={}".format(arg, getattr(args, arg)))
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
kwargs = {'num_workers': 1, 'pin_memory': True}
use_cuda = not args.no_cuda and torch.cuda.is_available()
cudnn.benchmark = True
cudnn.enabled = True
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
if args.dataset == 'ImageNet_LT':
val_set = load_data_distributed(data_root=args.data_root, dataset=args.dataset, phase="test", batch_size=args.test_batch_size,
num_workers=args.workers, test_open=False, shuffle=False)
else:
val_set = load_data_distributed(data_root=args.data_root, dataset=args.dataset, phase="val", batch_size=args.test_batch_size,
num_workers=args.workers, test_open=False, shuffle=False)
val_loader = torch.utils.data.DataLoader(val_set, batch_size=args.test_batch_size, shuffle=False, num_workers=0, pin_memory=True)
np.random.seed(42)
random.seed(42)
torch.manual_seed(args.seed)
data_list = {}
data_list_num = []
best_prec1 = 0
model_dict = {"ImageNet_LT": "models/resnet50_uniform_e90.pth",
"iNaturalist18": "models/iNat18/resnet50_uniform_e200.pth"}
def main():
global args, best_prec1
args = parser.parse_args()
cudnn.benchmark = True
print(torch.cuda.is_available())
print(torch.cuda.device_count())
model = FCModel(2048, args.num_classes)
model = model.cuda()
loading_path = args.loading_path
weights = torch.load(loading_path, map_location=torch.device("cpu"))
weights_c = weights['state_dict']['classifier']
weights_c = {k: weights_c[k] for k in model.state_dict()}
for k in model.state_dict():
if k not in weights:
print("Loading Weights Warning.")
model.load_state_dict(weights_c)
feature_extractor = create_model(stage1_weights=True, dataset=args.dataset, log_dir=model_dict[args.dataset])
weight_f = weights['state_dict']['feature']
feature_extractor.load_state_dict(weight_f)
feature_extractor = feature_extractor.cuda()
feature_extractor.eval()
prec1, preds, gt_labels = validate(val_loader, model, feature_extractor, nn.CrossEntropyLoss())
print('Accuracy: ', prec1)
def validate(val_loader, model, feature_extractor, criterion):
"""Perform validation on the validation set"""
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
true_labels = []
preds = []
torch.cuda.empty_cache()
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
with torch.no_grad():
feature = feature_extractor(input)
output = model(feature)
loss = criterion(output, target)
output_numpy = output.data.cpu().numpy()
preds_output = list(output_numpy.argmax(axis=1))
true_labels += list(target.data.cpu().numpy())
preds += preds_output
# measure accuracy and record loss
prec1 = accuracy(output.data, target, topk=(1,))[0]
losses.update(loss.data.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1))
print(' * Prec@1 {top1.avg:.3f}'.format(top1=top1))
# log to TensorBoard
# import pdb; pdb.set_trace()
return top1.avg, preds, true_labels
def to_var(x, requires_grad=True):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x, requires_grad=requires_grad)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def save_checkpoint(args, state, is_best, epoch):
filename = 'checkpoint/' + 'train_' + str(args.dataset) + '/' + str(args.lr) + '_' + str(args.batch_size) + '_' + str(args.meta_lr) + 'epoch' + str(epoch) + '_ckpt.pth.tar'
file_root, _ = os.path.split(filename)
if not os.path.exists(file_root):
os.makedirs(file_root)
torch.save(state, filename)
if __name__ == '__main__':
main()
| 7,842 | 33.70354 | 177 | py |
MetaSAug | MetaSAug-main/ImageNet_iNat/dataloader.py | import numpy as np
import torchvision
from torch.utils.data import Dataset, DataLoader, ConcatDataset
from torchvision import transforms
import os
from PIL import Image
import json
# Image statistics
RGB_statistics = {
'iNaturalist18': {
'mean': [0.466, 0.471, 0.380],
'std': [0.195, 0.194, 0.192]
},
'default': {
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]
}
}
def get_data_transform(split, rgb_mean, rbg_std, key='default'):
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(rgb_mean, rbg_std)
]) if key == 'iNaturalist18' else transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0),
transforms.ToTensor(),
transforms.Normalize(rgb_mean, rbg_std)
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(rgb_mean, rbg_std)
]),
'test': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(rgb_mean, rbg_std)
])
}
return data_transforms[split]
class LT_Dataset(Dataset):
def __init__(self, root, txt, transform=None):
self.img_path = []
self.labels = []
self.transform = transform
print("--------------------------------------------")
print(root)
with open(txt) as f:
for line in f:
self.img_path.append(os.path.join(root, line.split()[0]))
self.labels.append(int(line.split()[1]))
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
path = self.img_path[index]
label = self.labels[index]
with open(path, 'rb') as f:
sample = Image.open(f).convert('RGB')
if self.transform is not None:
sample = self.transform(sample)
return sample, label
class LT_Dataset_iNat17(Dataset):
'''
Reading the Json file of iNaturalist17
'''
def __init__(self, root, txt, transform=None):
self.img_path = []
self.labels = []
self.transform = transform
with open(txt,'r',encoding='utf8')as fp:
json_data = json.load(fp)
images = json_data["images"]
labels = json_data["annotations"]
for i in range(len(images)):
self.img_path.append(os.path.join(root, images[i]["file_name"]))
self.labels.append(int(labels[i]["category_id"]))
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
path = self.img_path[index]
label = self.labels[index]
with open(path, 'rb') as f:
sample = Image.open(f).convert('RGB')
if self.transform is not None:
sample = self.transform(sample)
return sample, label
def load_data_distributed(data_root, dataset, phase, batch_size, sampler_dic=None, num_workers=4, test_open=False, shuffle=True):
# if phase == 'train_plain':
# txt_split = 'train'
# elif phase == 'train_val':
# txt_split = 'val'
# phase = 'train'
# else:
# txt_split = phase
if dataset == "iNaturalist17":
txt = 'data/%s_%s.json' % (dataset, phase)
else:
txt = 'data/%s_%s.txt' % (dataset, phase)
print('Loading data from %s' % txt)
if dataset == 'iNaturalist18':
print('===> Loading iNaturalist18 statistics')
key = 'iNaturalist18'
elif dataset == 'iNaturalist17':
print('===> Loading iNaturalist17 statistics')
key = 'iNaturalist18'
else:
key = 'default'
rgb_mean, rgb_std = RGB_statistics[key]['mean'], RGB_statistics[key]['std']
if phase not in ['train', 'val']:
transform = get_data_transform('test', rgb_mean, rgb_std, key)
else:
transform = get_data_transform(phase, rgb_mean, rgb_std, key)
print('Use data transformation:', transform)
if dataset == "iNaturalist17":
set_ = LT_Dataset_iNat17(data_root, txt, transform)
else:
set_ = LT_Dataset(data_root, txt, transform)
return set_
| 4,682 | 28.828025 | 129 | py |
MetaSAug | MetaSAug-main/ImageNet_iNat/loss.py | # -*- coding: utf-8 -*
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import math
import torch.nn.functional as F
import pdb
def MI(outputs_target):
batch_size = outputs_target.size(0)
softmax_outs_t = nn.Softmax(dim=1)(outputs_target)
avg_softmax_outs_t = torch.sum(softmax_outs_t, dim=0) / float(batch_size)
log_avg_softmax_outs_t = torch.log(avg_softmax_outs_t)
item1 = -torch.sum(avg_softmax_outs_t * log_avg_softmax_outs_t)
item2 = -torch.sum(softmax_outs_t * torch.log(softmax_outs_t)) / float(batch_size)
return item1, item2
class EstimatorMean():
def __init__(self, feature_num, class_num):
super(EstimatorMean, self).__init__()
self.class_num = class_num
self.Ave = torch.zeros(class_num, feature_num).cuda()
self.Amount = torch.zeros(class_num).cuda()
def update_Mean(self, features, labels):
N = features.size(0)
C = self.class_num
A = features.size(1)
NxCxFeatures = features.view(N, 1, A).expand(N, C, A)
onehot = torch.zeros(N, C).cuda()
onehot.scatter_(1, labels.view(-1, 1), 1)
NxCxA_onehot = onehot.view(N, C, 1).expand(N, C, A)
features_by_sort = NxCxFeatures.mul(NxCxA_onehot)
Amount_CxA = NxCxA_onehot.sum(0)
Amount_CxA[Amount_CxA == 0] = 1
ave_CxA = features_by_sort.sum(0) / Amount_CxA
sum_weight_CV = onehot.sum(0).view(C, 1, 1).expand(C, A, A)
sum_weight_AV = onehot.sum(0).view(C, 1).expand(C, A)
weight_CV = sum_weight_CV.div(sum_weight_CV + self.Amount.view(C, 1, 1).expand(C, A, A))
weight_CV[weight_CV != weight_CV] = 0
weight_AV = sum_weight_AV.div(sum_weight_AV + self.Amount.view(C, 1).expand(C, A))
weight_AV[weight_AV != weight_AV] = 0
self.Ave = (self.Ave.mul(1 - weight_AV) + ave_CxA.mul(weight_AV)).detach()
self.Amount += onehot.sum(0)
# the estimation of covariance matrix
class EstimatorCV():
def __init__(self, feature_num, class_num):
super(EstimatorCV, self).__init__()
self.class_num = class_num
self.CoVariance = torch.zeros(class_num, feature_num).cuda()
self.Ave = torch.zeros(class_num, feature_num).cuda()
self.Amount = torch.zeros(class_num).cuda()
def update_CV(self, features, labels):
N = features.size(0)
C = self.class_num
A = features.size(1)
NxCxFeatures = features.view(N, 1, A).expand(N, C, A)
# onehot = torch.zeros(N, C).cuda()
onehot = torch.zeros(N, C).cuda()
onehot.scatter_(1, labels.view(-1, 1), 1)
NxCxA_onehot = onehot.view(N, C, 1).expand(N, C, A)
features_by_sort = NxCxFeatures.mul(NxCxA_onehot)
Amount_CxA = NxCxA_onehot.sum(0)
Amount_CxA[Amount_CxA == 0] = 1
ave_CxA = features_by_sort.sum(0) / Amount_CxA
var_temp = features_by_sort - ave_CxA.expand(N, C, A).mul(NxCxA_onehot)
# var_temp = torch.bmm(var_temp.permute(1, 2, 0), var_temp.permute(1, 0, 2)).div(Amount_CxA.view(C, A, 1).expand(C, A, A))
var_temp = torch.mul(var_temp.permute(1, 2, 0), var_temp.permute(1, 2, 0)).sum(2).div(Amount_CxA.view(C, A))
# sum_weight_CV = onehot.sum(0).view(C, 1, 1).expand(C, A, A)
sum_weight_CV = onehot.sum(0).view(C, 1).expand(C, A)
sum_weight_AV = onehot.sum(0).view(C, 1).expand(C, A)
# weight_CV = sum_weight_CV.div(sum_weight_CV + self.Amount.view(C, 1, 1).expand(C, A, A))
weight_CV = sum_weight_CV.div(sum_weight_CV + self.Amount.view(C, 1).expand(C, A))
weight_CV[weight_CV != weight_CV] = 0
weight_AV = sum_weight_AV.div(sum_weight_AV + self.Amount.view(C, 1).expand(C, A))
weight_AV[weight_AV != weight_AV] = 0
additional_CV = weight_CV.mul(1 - weight_CV).mul(
torch.mul(
(self.Ave - ave_CxA).view(C, A),
(self.Ave - ave_CxA).view(C, A)
)
)
# (self.Ave - ave_CxA).pow(2)
self.CoVariance = (self.CoVariance.mul(1 - weight_CV) + var_temp.mul(
weight_CV)).detach() + additional_CV.detach()
self.Ave = (self.Ave.mul(1 - weight_AV) + ave_CxA.mul(weight_AV)).detach()
self.Amount += onehot.sum(0)
class Loss_meta(nn.Module):
def __init__(self, feature_num, class_num):
super(Loss_meta, self).__init__()
self.source_estimator = EstimatorCV(feature_num, class_num)
self.class_num = class_num
self.cross_entropy = nn.CrossEntropyLoss()
def MetaSAug(self, fc, features, y_s, labels_s, s_cv_matrix, ratio):
N = features.size(0)
C = self.class_num
A = features.size(1)
weight_m = fc
NxW_ij = weight_m.expand(N, C, A)
NxW_kj = torch.gather(NxW_ij, 1, labels_s.view(N, 1, 1).expand(N, C, A))
CV_temp = s_cv_matrix[labels_s]
sigma2 = ratio * (weight_m - NxW_kj).pow(2).mul(CV_temp.view(N, 1, A).expand(N, C, A)).sum(2)
aug_result = y_s + 0.5 * sigma2
return aug_result
def forward(self, fc, features_source, y_s, labels_source, ratio, weights, cv, mode):
aug_y = self.MetaSAug(fc, features_source, y_s, labels_source, cv, \
ratio)
if mode == "update":
self.source_estimator.update_CV(features_source.detach(), labels_source)
loss = F.cross_entropy(aug_y, labels_source, weight=weights)
else:
loss = F.cross_entropy(aug_y, labels_source, weight=weights)
return loss
def get_cv(self):
return self.source_estimator.CoVariance
def update_cv(self, cv):
self.source_estimator.CoVariance = cv
| 5,772 | 36.245161 | 130 | py |
MetaSAug | MetaSAug-main/ImageNet_iNat/utils.py | import numpy as np
import matplotlib.pyplot as plt
import torch
from sklearn.metrics import f1_score
import torch.nn.functional as F
import importlib
def source_import(file_path):
"""This function imports python module directly from source code using importlib"""
spec = importlib.util.spec_from_file_location('', file_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def batch_show(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.figure(figsize=(20,20))
plt.imshow(inp)
if title is not None:
plt.title(title)
def print_write(print_str, log_file):
print(*print_str)
if log_file is None:
return
with open(log_file, 'a') as f:
print(*print_str, file=f)
def init_weights(model, weights_path, caffe=False, classifier=False):
"""Initialize weights"""
print('Pretrained %s weights path: %s' % ('classifier' if classifier else 'feature model', weights_path))
weights = torch.load(weights_path, map_location=torch.device("cpu"))
weights_c = weights['state_dict_best']['classifier']
weights_f = weights['state_dict_best']['feat_model']
if not classifier:
if caffe:
weights = {k: weights[k] if k in weights else model.state_dict()[k]
for k in model.state_dict()}
else:
weights = weights['state_dict_best']['feat_model']
weights = {k: weights['module.' + k] for k in model.state_dict()}
else:
weights = weights['state_dict_best']['classifier']
weights = {k: weights['module.' + k] if 'module.' + k in weights else model.state_dict()[k]
for k in model.state_dict()}
model.load_state_dict(weights)
return model
def shot_acc(preds, labels, train_data, many_shot_thr=100, low_shot_thr=20, acc_per_cls=False):
if isinstance(train_data, np.ndarray):
training_labels = np.array(train_data).astype(int)
else:
training_labels = np.array(train_data.dataset.labels).astype(int)
if isinstance(preds, torch.Tensor):
preds = preds.detach().cpu().numpy()
labels = labels.detach().cpu().numpy()
elif isinstance(preds, np.ndarray):
pass
else:
raise TypeError('Type ({}) of preds not supported'.format(type(preds)))
train_class_count = []
test_class_count = []
class_correct = []
for l in np.unique(labels):
train_class_count.append(len(training_labels[training_labels == l]))
test_class_count.append(len(labels[labels == l]))
class_correct.append((preds[labels == l] == labels[labels == l]).sum())
many_shot = []
median_shot = []
low_shot = []
for i in range(len(train_class_count)):
if train_class_count[i] > many_shot_thr:
many_shot.append((class_correct[i] / test_class_count[i]))
elif train_class_count[i] < low_shot_thr:
low_shot.append((class_correct[i] / test_class_count[i]))
else:
median_shot.append((class_correct[i] / test_class_count[i]))
if len(many_shot) == 0:
many_shot.append(0)
if len(median_shot) == 0:
median_shot.append(0)
if len(low_shot) == 0:
low_shot.append(0)
if acc_per_cls:
class_accs = [c / cnt for c, cnt in zip(class_correct, test_class_count)]
return np.mean(many_shot), np.mean(median_shot), np.mean(low_shot), class_accs
else:
return np.mean(many_shot), np.mean(median_shot), np.mean(low_shot)
def weighted_shot_acc(preds, labels, ws, train_data, many_shot_thr=100, low_shot_thr=20):
training_labels = np.array(train_data.dataset.labels).astype(int)
if isinstance(preds, torch.Tensor):
preds = preds.detach().cpu().numpy()
labels = labels.detach().cpu().numpy()
elif isinstance(preds, np.ndarray):
pass
else:
raise TypeError('Type ({}) of preds not supported'.format(type(preds)))
train_class_count = []
test_class_count = []
class_correct = []
for l in np.unique(labels):
train_class_count.append(len(training_labels[training_labels == l]))
test_class_count.append(ws[labels==l].sum())
class_correct.append(((preds[labels==l] == labels[labels==l]) * ws[labels==l]).sum())
many_shot = []
median_shot = []
low_shot = []
for i in range(len(train_class_count)):
if train_class_count[i] > many_shot_thr:
many_shot.append((class_correct[i] / test_class_count[i]))
elif train_class_count[i] < low_shot_thr:
low_shot.append((class_correct[i] / test_class_count[i]))
else:
median_shot.append((class_correct[i] / test_class_count[i]))
return np.mean(many_shot), np.mean(median_shot), np.mean(low_shot)
def F_measure(preds, labels, openset=False, theta=None):
if openset:
# f1 score for openset evaluation
true_pos = 0.
false_pos = 0.
false_neg = 0.
for i in range(len(labels)):
true_pos += 1 if preds[i] == labels[i] and labels[i] != -1 else 0
false_pos += 1 if preds[i] != labels[i] and labels[i] != -1 and preds[i] != -1 else 0
false_neg += 1 if preds[i] != labels[i] and labels[i] == -1 else 0
precision = true_pos / (true_pos + false_pos)
recall = true_pos / (true_pos + false_neg)
return 2 * ((precision * recall) / (precision + recall + 1e-12))
else:
# Regular f1 score
return f1_score(labels.detach().cpu().numpy(), preds.detach().cpu().numpy(), average='macro')
def mic_acc_cal(preds, labels):
if isinstance(labels, tuple):
assert len(labels) == 3
targets_a, targets_b, lam = labels
acc_mic_top1 = (lam * preds.eq(targets_a.data).cpu().sum().float() \
+ (1 - lam) * preds.eq(targets_b.data).cpu().sum().float()) / len(preds)
else:
acc_mic_top1 = (preds == labels).sum().item() / len(labels)
return acc_mic_top1
def weighted_mic_acc_cal(preds, labels, ws):
acc_mic_top1 = ws[preds == labels].sum() / ws.sum()
return acc_mic_top1
def class_count(data):
labels = np.array(data.dataset.labels)
class_data_num = []
for l in np.unique(labels):
class_data_num.append(len(labels[labels == l]))
return class_data_num
def torch2numpy(x):
if isinstance(x, torch.Tensor):
return x.detach().cpu().numpy()
elif isinstance(x, (list, tuple)):
return tuple([torch2numpy(xi) for xi in x])
else:
return x
def logits2score(logits, labels):
scores = F.softmax(logits, dim=1)
score = scores.gather(1, labels.view(-1, 1))
score = score.squeeze().cpu().numpy()
return score
def logits2entropy(logits):
scores = F.softmax(logits, dim=1)
scores = scores.cpu().numpy() + 1e-30
ent = -scores * np.log(scores)
ent = np.sum(ent, 1)
return ent
def logits2CE(logits, labels):
scores = F.softmax(logits, dim=1)
score = scores.gather(1, labels.view(-1, 1))
score = score.squeeze().cpu().numpy() + 1e-30
ce = -np.log(score)
return ce
def get_priority(ptype, logits, labels):
if ptype == 'score':
ws = 1 - logits2score(logits, labels)
elif ptype == 'entropy':
ws = logits2entropy(logits)
elif ptype == 'CE':
ws = logits2CE(logits, labels)
return ws
def get_value(oldv, newv):
if newv is not None:
return newv
else:
return oldv
| 7,719 | 32.859649 | 109 | py |
MetaSAug | MetaSAug-main/ImageNet_iNat/data_utils.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision
import numpy as np
import copy
np.random.seed(6)
#random.seed(2)
def build_dataset(dataset,num_meta,num_classes):
img_num_list = [num_meta] * num_classes
# print(train_dataset.targets)
data_list_val = {}
for j in range(num_classes):
data_list_val[j] = [i for i, label in enumerate(dataset.labels) if label == j]
idx_to_meta = []
idx_to_train = []
#print(img_num_list)
for cls_idx, img_id_list in data_list_val.items():
np.random.shuffle(img_id_list)
img_num = img_num_list[int(cls_idx)]
idx_to_meta.extend(img_id_list[:img_num])
idx_to_train.extend(img_id_list[img_num:])
train_data = copy.deepcopy(dataset)
train_data_meta = copy.deepcopy(dataset)
train_data_meta.img_path = np.delete(dataset.img_path,idx_to_train,axis=0)
train_data_meta.labels = np.delete(dataset.labels, idx_to_train, axis=0)
train_data.img_path = np.delete(dataset.img_path, idx_to_meta, axis=0)
train_data.labels = np.delete(dataset.labels, idx_to_meta, axis=0)
return train_data_meta, train_data
def get_img_num_per_cls(dataset, imb_factor=None, num_meta=None):
"""
Get a list of image numbers for each class, given cifar version
Num of imgs follows emponential distribution
img max: 5000 / 500 * e^(-lambda * 0);
img min: 5000 / 500 * e^(-lambda * int(cifar_version - 1))
exp(-lambda * (int(cifar_version) - 1)) = img_max / img_min
args:
cifar_version: str, '10', '100', '20'
imb_factor: float, imbalance factor: img_min/img_max,
None if geting default cifar data number
output:
img_num_per_cls: a list of number of images per class
"""
if dataset == 'cifar10':
img_max = (50000-num_meta)/10
cls_num = 10
if dataset == 'cifar100':
img_max = (50000-num_meta)/100
cls_num = 100
if imb_factor is None:
return [img_max] * cls_num
img_num_per_cls = []
for cls_idx in range(cls_num):
num = img_max * (imb_factor**(cls_idx / (cls_num - 1.0)))
img_num_per_cls.append(int(num))
return img_num_per_cls
# This function is used to generate imbalanced test set
'''
def get_img_num_per_cls_test(dataset,imb_factor=None,num_meta=None):
"""
Get a list of image numbers for each class, given cifar version
Num of imgs follows emponential distribution
img max: 5000 / 500 * e^(-lambda * 0);
img min: 5000 / 500 * e^(-lambda * int(cifar_version - 1))
exp(-lambda * (int(cifar_version) - 1)) = img_max / img_min
args:
cifar_version: str, '10', '100', '20'
imb_factor: float, imbalance factor: img_min/img_max,
None if geting default cifar data number
output:
img_num_per_cls: a list of number of images per class
"""
if dataset == 'cifar10':
img_max = (10000-num_meta)/10
cls_num = 10
if dataset == 'cifar100':
img_max = (10000-num_meta)/100
cls_num = 100
if imb_factor is None:
return [img_max] * cls_num
img_num_per_cls = []
for cls_idx in range(cls_num):
num = img_max * (imb_factor**(cls_idx / (cls_num - 1.0)))
img_num_per_cls.append(int(num))
return img_num_per_cls
'''
| 3,467 | 31.111111 | 86 | py |
MetaSAug | MetaSAug-main/ImageNet_iNat/train.py |
import os
import time
import argparse
import random
import copy
import torch
import torchvision
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.transforms as transforms
from data_utils import *
# import resnet
from dataloader import load_data_distributed
import shutil
from ResNet import *
import loss
import multiprocessing
import torch.nn.parallel
import torch.nn as nn
from collections import Counter
import time
parser = argparse.ArgumentParser(description='Imbalanced Example')
parser.add_argument('--dataset', default='iNaturalist18', type=str,
help='dataset')
parser.add_argument('--data_root', default='/data1/TL/data/iNaturalist18', type=str)
parser.add_argument('--batch_size', type=int, default=32, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--num_classes', type=int, default=8142)
parser.add_argument('--num_meta', type=int, default=10,
help='The number of meta data for each class.')
parser.add_argument('--test_batch_size', type=int, default=512, metavar='N',
help='input batch size for testing (default: 100)')
parser.add_argument('--epochs', type=int, default=200, metavar='N',
help='number of epochs to train')
parser.add_argument('--lr', '--learning-rate', default=1e-1, type=float,
help='initial learning rate')
parser.add_argument('--workers', default=16, type=int)
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--nesterov', default=True, type=bool, help='nesterov momentum')
parser.add_argument('--weight-decay', '--wd', default=5e-4, type=float,
help='weight decay (default: 5e-4)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--split', type=int, default=1000)
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 42)')
parser.add_argument('--print-freq', '-p', default=1000, type=int,
help='print frequency (default: 10)')
parser.add_argument('--gpu', default=None, type=int)
parser.add_argument('--lam', default=0.25, type=float)
parser.add_argument('--local_rank', default=0, type=int)
parser.add_argument('--meta_lr', default=0.1, type=float)
args = parser.parse_args()
# print(args)
for arg in vars(args):
print("{}={}".format(arg, getattr(args, arg)))
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
kwargs = {'num_workers': 1, 'pin_memory': True}
use_cuda = not args.no_cuda and torch.cuda.is_available()
cudnn.benchmark = True
cudnn.enabled = True
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
print(f'num_gpus: {num_gpus}')
args.distributed = num_gpus > 1
print("ditributed: {args.distributed}")
if args.distributed:
torch.cuda.set_device(args.local_rank)
#torch.distributed.init_process_group(backend="nccl", init_method="env://")
torch.distributed.init_process_group(backend="nccl")
args.batch_size = int(args.batch_size / num_gpus)
######### ImageNet dataset
splits = ["train", "val", "test"]
if args.dataset == 'ImageNet_LT':
train_set = load_data_distributed(data_root=args.data_root, dataset=args.dataset, phase="train", batch_size=args.batch_size,
num_workers=args.workers, test_open=False, shuffle=False)
val_set = load_data_distributed(data_root=args.data_root, dataset=args.dataset, phase="test", batch_size=args.test_batch_size,
num_workers=args.workers, test_open=False, shuffle=False)
meta_set = load_data_distributed(data_root=args.data_root, dataset=args.dataset, phase="val", batch_size=args.batch_size, num_workers=args.workers, test_open=False, shuffle=False)
else:
train_set = load_data_distributed(data_root=args.data_root, dataset=args.dataset, phase="train", batch_size=args.batch_size,
num_workers=args.workers, test_open=False, shuffle=False)
val_set = load_data_distributed(data_root=args.data_root, dataset=args.dataset, phase="val", batch_size=args.test_batch_size,
num_workers=args.workers, test_open=False, shuffle=False)
meta_set = train_set
if args.dataset == 'iNaturalist17':
meta_set, _ = build_dataset(meta_set, 5, args.num_classes)
elif args.dataset == 'iNaturalist18':
meta_set, _ = build_dataset(meta_set, 2, args.num_classes)
else:
meta_set, _ = build_dataset(meta_set, 10, args.num_classes)
train_sampler = torch.utils.data.distributed.DistributedSampler(train_set)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, shuffle=(train_sampler is None), num_workers=0,
pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(val_set, batch_size=args.test_batch_size, shuffle=False, num_workers=0, pin_memory=True)
meta_sampler = torch.utils.data.distributed.DistributedSampler(meta_set)
meta_loader = torch.utils.data.DataLoader(meta_set, batch_size=args.batch_size, shuffle=(meta_sampler is None), num_workers=0,
pin_memory=True, sampler=meta_sampler)
np.random.seed(42)
random.seed(42)
torch.manual_seed(args.seed)
classe_labels = range(args.num_classes)
data_list = {}
data_list_num = []
num = Counter(train_loader.dataset.labels)
data_list_num = [0] * args.num_classes
for key in num:
data_list_num[key] = num[key]
beta = 0.9999
effective_num = 1.0 - np.power(beta, data_list_num)
per_cls_weights = (1.0 - beta) / np.array(effective_num)
per_cls_weights = per_cls_weights / np.sum(per_cls_weights) * len(data_list_num)
per_cls_weights = torch.FloatTensor(per_cls_weights).cuda()
model_dict = {"ImageNet_LT": "models/resnet50_uniform_e90.pth",
"iNaturalist18": "models/iNat18/resnet50_uniform_e200.pth"}
def main():
global args
args = parser.parse_args()
cudnn.benchmark = True
print(torch.cuda.is_available())
print(torch.cuda.device_count())
print(f'local_rank: {args.local_rank}')
model = FCModel(2048, args.num_classes)
model = model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
weights = torch.load(model_dict[args.dataset], map_location=torch.device("cpu"))
weights = weights['state_dict_best']['classifier']
weights = {k: weights['module.' + k] for k in model.module.state_dict()}
for k in model.module.state_dict():
if k not in weights:
print("Pretrained Weights Warning.")
model.module.load_state_dict(weights)
feature_extractor = create_model(stage1_weights=True, dataset=args.dataset, log_dir=model_dict[args.dataset])
feature_extractor = feature_extractor.cuda()
feature_extractor.eval()
torch.autograd.set_detect_anomaly(True)
torch.distributed.barrier()
optimizer_a = torch.optim.SGD(model.module.parameters(), args.lr,
momentum=args.momentum, nesterov=args.nesterov,
weight_decay=args.weight_decay)
criterion = loss.Loss_meta(2048, args.num_classes)
for epoch in range(args.epochs):
ratio = args.lam * float(epoch) / float(args.epochs)
train_meta(train_loader, model, feature_extractor, optimizer_a, epoch, criterion, ratio)
if args.local_rank == 0:
save_checkpoint(args, {
'epoch': epoch + 1,
'state_dict': {'feature': feature_extractor.state_dict(), 'classifier': model.module.state_dict()},
'optimizer' : optimizer_a.state_dict(),
}, False, epoch)
def train_meta(train_loader, model, feature_extractor, optimizer_a, epoch, criterion, ratio):
"""Experimenting how to train stably in stage-2"""
batch_time = AverageMeter()
losses = AverageMeter()
meta_losses = AverageMeter()
top1 = AverageMeter()
meta_top1 = AverageMeter()
model.train()
weights = torch.tensor(per_cls_weights).float()
for i, (input, target) in enumerate(train_loader):
input_var = input.cuda(non_blocking=True)
target_var = target.cuda(non_blocking=True)
cv = criterion.get_cv()
cv_var = to_var(cv)
meta_model = FCMeta(2048, args.num_classes)
meta_model.load_state_dict(model.module.state_dict())
meta_model.cuda()
with torch.no_grad():
feat_hat = feature_extractor(input_var)
y_f_hat = meta_model(feat_hat)
cls_loss_meta = criterion(list(meta_model.fc.named_leaves())[0][1], feat_hat, y_f_hat, target_var, ratio,
weights, cv_var, "none")
meta_model.zero_grad()
grads = torch.autograd.grad(cls_loss_meta, (meta_model.params()), create_graph=True)
meta_lr = args.lr
meta_model.fc.update_params(meta_lr, source_params=grads)
input_val, target_val = next(iter(meta_loader))
input_val_var = input_val.cuda(non_blocking=True)
target_val_var = target_val.cuda(non_blocking=True)
with torch.no_grad():
feature_val = feature_extractor(input_val_var)
y_val = meta_model(feature_val)
cls_meta = F.cross_entropy(y_val, target_val_var)
grad_cv = torch.autograd.grad(cls_meta, cv_var, only_inputs=True)[0]
new_cv = cv - args.meta_lr * grad_cv
del grad_cv, grads, meta_model
with torch.no_grad():
features = feature_extractor(input_var)
predicts = model(features)
cls_loss = criterion(list(model.module.fc.parameters())[0], features, predicts, target_var, ratio, weights, new_cv.detach(), "update")
prec_train = accuracy(predicts.data, target_var.data, topk=(1,))[0]
losses.update(cls_loss.item(), input.size(0))
top1.update(prec_train.item(), input.size(0))
optimizer_a.zero_grad()
cls_loss.backward()
optimizer_a.step()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch, i, len(train_loader),
loss=losses,top1=top1))
def validate(val_loader, model, feature_extractor, criterion, epoch, local_rank, distributed):
"""Perform validation on the validation set"""
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to evaluate mode
true_labels = []
preds = []
if distributed:
model = model.module
torch.cuda.empty_cache()
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
with torch.no_grad():
feature = feature_extractor(input)
output = model(feature)
loss = criterion(output, target)
output_numpy = output.data.cpu().numpy()
preds_output = list(output_numpy.argmax(axis=1))
true_labels += list(target.data.cpu().numpy())
preds += preds_output
# measure accuracy and record loss
prec1 = accuracy(output.data, target, topk=(1,))[0]
losses.update(loss.data.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0 and local_rank==0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1))
print(' * Prec@1 {top1.avg:.3f}'.format(top1=top1))
return top1.avg, preds, true_labels
def to_var(x, requires_grad=True):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x, requires_grad=requires_grad)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def save_checkpoint(args, state, is_best, epoch):
filename = 'checkpoint/' + 'train_' + str(args.dataset) + '/' + str(args.lr) + '_' + str(args.batch_size) + '_' + str(args.meta_lr) + 'epoch' + str(epoch) + '_ckpt.pth.tar'
file_root, _ = os.path.split(filename)
if not os.path.exists(file_root):
os.makedirs(file_root)
torch.save(state, filename)
if __name__ == '__main__':
main()
| 13,612 | 38.005731 | 184 | py |
MetaSAug | MetaSAug-main/ImageNet_iNat/resnet_meta.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from torch.autograd import Variable
import torch.nn.init as init
def to_var(x, requires_grad=True):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x, requires_grad=requires_grad)
class MetaModule(nn.Module):
# adopted from: Adrien Ecoffet https://github.com/AdrienLE
def params(self):
for name, param in self.named_params(self):
yield param
def named_leaves(self):
return []
def named_submodules(self):
return []
def named_params(self, curr_module=None, memo=None, prefix=''):
if memo is None:
memo = set()
if hasattr(curr_module, 'named_leaves'):
for name, p in curr_module.named_leaves():
if p is not None and p not in memo:
memo.add(p)
yield prefix + ('.' if prefix else '') + name, p
else:
for name, p in curr_module._parameters.items():
if p is not None and p not in memo:
memo.add(p)
yield prefix + ('.' if prefix else '') + name, p
for mname, module in curr_module.named_children():
submodule_prefix = prefix + ('.' if prefix else '') + mname
for name, p in self.named_params(module, memo, submodule_prefix):
yield name, p
def update_params(self, lr_inner, first_order=False, source_params=None, detach=False):
if source_params is not None:
for tgt, src in zip(self.named_params(self), source_params):
name_t, param_t = tgt
# name_s, param_s = src
# grad = param_s.grad
# name_s, param_s = src
grad = src
if first_order:
grad = to_var(grad.detach().data)
tmp = param_t - lr_inner * grad
self.set_param(self, name_t, tmp)
else:
for name, param in self.named_params(self):
if not detach:
grad = param.grad
if first_order:
grad = to_var(grad.detach().data)
tmp = param - lr_inner * grad
self.set_param(self, name, tmp)
else:
param = param.detach_() # https://blog.csdn.net/qq_39709535/article/details/81866686
self.set_param(self, name, param)
def set_param(self, curr_mod, name, param):
if '.' in name:
n = name.split('.')
module_name = n[0]
rest = '.'.join(n[1:])
for name, mod in curr_mod.named_children():
if module_name == name:
self.set_param(mod, rest, param)
break
else:
setattr(curr_mod, name, param)
def detach_params(self):
for name, param in self.named_params(self):
self.set_param(self, name, param.detach())
def copy(self, other, same_var=False):
for name, param in other.named_params():
if not same_var:
param = to_var(param.data.clone(), requires_grad=True)
self.set_param(name, param)
class MetaLinear(MetaModule):
def __init__(self, *args, **kwargs):
super().__init__()
ignore = nn.Linear(*args, **kwargs)
self.register_buffer('weight', to_var(ignore.weight.data, requires_grad=True))
self.register_buffer('bias', to_var(ignore.bias.data, requires_grad=True))
def forward(self, x):
return F.linear(x, self.weight, self.bias)
def named_leaves(self):
return [('weight', self.weight), ('bias', self.bias)]
# This layer will be used when the loss function is LDAM
class MetaLinear_Norm(MetaModule):
def __init__(self, *args, **kwargs):
super().__init__()
temp = nn.Linear(*args, **kwargs)
# import pdb; pdb.set_trace()
temp.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5)
self.register_buffer('weight', to_var(temp.weight.data.t(), requires_grad=True))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5)
#self.register_buffer('bias', to_var(ignore.bias.data, requires_grad=True))
def forward(self, x):
out = F.normalize(x, dim=1).mm(F.normalize(self.weight, dim=0))
return out
# return F.linear(x, self.weight, self.bias)
def named_leaves(self):
return [('weight', self.weight)]#, ('bias', self.bias)]
class MetaConv2d(MetaModule):
def __init__(self, *args, **kwargs):
super().__init__()
ignore = nn.Conv2d(*args, **kwargs)
self.in_channels = ignore.in_channels
self.out_channels = ignore.out_channels
self.stride = ignore.stride
self.padding = ignore.padding
self.dilation = ignore.dilation
self.groups = ignore.groups
self.kernel_size = ignore.kernel_size
self.register_buffer('weight', to_var(ignore.weight.data, requires_grad=True))
if ignore.bias is not None:
self.register_buffer('bias', to_var(ignore.bias.data, requires_grad=True))
else:
self.register_buffer('bias', None)
def forward(self, x):
return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
def named_leaves(self):
return [('weight', self.weight), ('bias', self.bias)]
class MetaConvTranspose2d(MetaModule):
def __init__(self, *args, **kwargs):
super().__init__()
ignore = nn.ConvTranspose2d(*args, **kwargs)
self.stride = ignore.stride
self.padding = ignore.padding
self.dilation = ignore.dilation
self.groups = ignore.groups
self.register_buffer('weight', to_var(ignore.weight.data, requires_grad=True))
if ignore.bias is not None:
self.register_buffer('bias', to_var(ignore.bias.data, requires_grad=True))
else:
self.register_buffer('bias', None)
def forward(self, x, output_size=None):
output_padding = self._output_padding(x, output_size)
return F.conv_transpose2d(x, self.weight, self.bias, self.stride, self.padding,
output_padding, self.groups, self.dilation)
def named_leaves(self):
return [('weight', self.weight), ('bias', self.bias)]
class MetaBatchNorm1d(MetaModule):
def __init__(self, *args, **kwargs):
super(MetaBatchNorm1d, self).__init__()
ignore = nn.BatchNorm1d(*args, **kwargs)
self.num_features = ignore.num_features
self.eps = ignore.eps
self.momentum = ignore.momentum
self.affine = ignore.affine
self.track_running_stats = ignore.track_running_stats
if self.affine:
self.register_buffer('weight', to_var(ignore.weight.data, requires_grad=True))
self.register_buffer('bias', to_var(ignore.bias.data, requires_grad=True))
if self.track_running_stats:
self.register_buffer('running_mean', torch.zeros(self.num_features))
self.register_buffer('running_var', torch.ones(self.num_features))
self.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long))
else:
self.register_parameter('running_mean', None)
self.register_parameter('running_var', None)
def reset_running_stats(self):
if self.track_running_stats:
self.running_mean.zero_()
self.running_var.fill_(1)
self.num_batches_tracked.zero_()
def reset_parameters(self):
self.reset_running_stats()
if self.affine:
self.weight.data.uniform_()
self.bias.data.zero_()
def _check_input_dim(self, input):
if input.dim() != 2 and input.dim() != 3:
raise ValueError('expected 2D or 3D input (got {}D input)'
.format(input.dim()))
def forward(self, x):
self._check_input_dim(x)
exponential_average_factor = 0.0
if self.training and self.track_running_stats:
self.num_batches_tracked += 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / self.num_batches_tracked.item()
else: # use exponential moving average
exponential_average_factor = self.momentum
return F.batch_norm(x, self.running_mean, self.running_var, self.weight, self.bias,
self.training or not self.track_running_stats, self.momentum, self.eps)
def named_leaves(self):
return [('weight', self.weight), ('bias', self.bias)]
def extra_repr(self):
return '{num_features}, eps={eps}, momentum={momentum}, affine={affine}, ' \
'track_running_stats={track_running_stats}'.format(**self.__dict__)
def _load_from_state_dict(self, state_dict, prefix, metadata, strict,
missing_keys, unexpected_keys, error_msgs):
version = metadata.get('version', None)
if (version is None or version < 2) and self.track_running_stats:
# at version 2: added num_batches_tracked buffer
# this should have a default value of 0
num_batches_tracked_key = prefix + 'num_batches_tracked'
if num_batches_tracked_key not in state_dict:
state_dict[num_batches_tracked_key] = torch.tensor(0, dtype=torch.long)
super(MetaBatchNorm1d, self)._load_from_state_dict(
state_dict, prefix, metadata, strict,
missing_keys, unexpected_keys, error_msgs)
class MetaBatchNorm2d(MetaModule):
def __init__(self, *args, **kwargs):
super().__init__()
ignore = nn.BatchNorm2d(*args, **kwargs)
self.num_features = ignore.num_features
self.eps = ignore.eps
self.momentum = ignore.momentum
self.affine = ignore.affine
self.track_running_stats = ignore.track_running_stats
if self.affine:
self.register_buffer('weight', to_var(ignore.weight.data, requires_grad=True))
self.register_buffer('bias', to_var(ignore.bias.data, requires_grad=True))
if self.track_running_stats:
self.register_buffer('running_mean', torch.zeros(self.num_features))
self.register_buffer('running_var', torch.ones(self.num_features))
else:
self.register_parameter('running_mean', None)
self.register_parameter('running_var', None)
def forward(self, x):
return F.batch_norm(x, self.running_mean, self.running_var, self.weight, self.bias,
self.training or not self.track_running_stats, self.momentum, self.eps)
def named_leaves(self):
return [('weight', self.weight), ('bias', self.bias)]
def _weights_init(m):
classname = m.__class__.__name__
# print(classname)
if isinstance(m, MetaLinear) or isinstance(m, MetaConv2d):
init.kaiming_normal(m.weight)
class LambdaLayer(MetaModule):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(MetaModule):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock, self).__init__()
self.conv1 = MetaConv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = MetaBatchNorm2d(planes)
self.conv2 = MetaConv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = MetaBatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
MetaConv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
MetaBatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class BottleneckMeta(MetaModule):
expansion = 4
def __init__(self, in_planes, planes, stride=1, downsample=None):
super(BottleneckMeta, self).__init__()
self.conv1 = MetaConv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = MetaBatchNorm2d(planes)
self.conv2 = MetaConv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = MetaBatchNorm2d(planes)
self.conv3 = MetaConv2d(planes, self.expansion * planes, kernel_size=1, bias=False)
self.bn3 = MetaBatchNorm2d(planes * self.expansion)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = F.relu(self.bn1(out))
out = self.conv2(out)
out = F.relu(self.bn2(out))
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = F.relu(out)
return out
class ResNet32(MetaModule):
def __init__(self, num_classes, block=BasicBlock, num_blocks=[5, 5, 5]):
super(ResNet32, self).__init__()
self.in_planes = 16
self.conv1 = MetaConv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = MetaBatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.linear = MetaLinear(64, num_classes) # MetaLinear_Norm(64,num_classes,bias=False) #
self.apply(_weights_init)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
y = self.linear(out)
return out, y
class FeatureMeta(MetaModule):
def __init__(self, block, num_blocks, use_fc=False, dropout=None):
super(FeatureMeta, self).__init__()
self.inplanes = 64
self.conv1 = MetaConv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = MetaBatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
# self.avgpool = nn.AvgPool2d(7, stride=1)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.use_fc = use_fc
self.use_dropout = True if dropout else False
#if self.use_fc:
#print('Using fc.')
#self.fc = MetaLinear(512*block.expansion, 8142)
if self.use_dropout:
print('Using dropout')
self.dropout = nn.Dropout(p=dropout)
for m in self.modules():
if isinstance(m, MetaConv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, MetaBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, num_blocks, stride):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
MetaConv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
MetaBatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, num_blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
#if self.use_fc:
#y = F.relu(self.fc(x))
if self.use_dropout:
x = self.dropout(x)
return x
class FCMeta(MetaModule):
def __init__(self, feature_dim=2048, output_dim=1000):
super(FCMeta, self).__init__()
self.fc = MetaLinear(feature_dim, output_dim)
def forward(self, x):
y = self.fc(x)
return y
class FCModel(nn.Module):
def __init__(self, feature_dim, output_dim=1000):
super(FCModel, self).__init__()
self.fc = nn.Linear(feature_dim, output_dim)
def forward(self, x):
y = self.fc(x)
return y
| 18,152 | 35.306 | 120 | py |
coocmap | coocmap-main/match.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
from collections import Counter
import numpy as np
import embeddings
np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)})
MAX_SVD_DIM = 5000 # maximum SVD to avoid long compute time
### initialization methods ###
def vecmap_unsup(x, z, norm_proc=['unit', 'center', 'unit']):
print('maxdim', MAX_SVD_DIM)
sim_size = min(MAX_SVD_DIM, min(x.shape[0], z.shape[0]))
u, s, vt = np.linalg.svd(x, full_matrices=False)
xsim = (u*s).dot(u.T)
u, s, vt = np.linalg.svd(z, full_matrices=False)
zsim = (u*s).dot(u.T)
del u, s, vt
xsim.sort(axis=1)
zsim.sort(axis=1)
norm_proc = ['unit', 'center', 'unit']
embeddings.normalize(xsim, norm_proc)
embeddings.normalize(zsim, norm_proc)
sim = xsim.dot(zsim.T)
return sim
def match_sim(xsim, zsim, sort=True, metric='cosine', norm_proc=['unit', 'center', 'unit']):
sim_size = min(xsim.shape[1], zsim.shape[1])
xsim = np.array(xsim[:, :sim_size])
zsim = np.array(zsim[:, :sim_size])
if sort:
xsim.sort(axis=1)
zsim.sort(axis=1)
embeddings.normalize(xsim, norm_proc)
embeddings.normalize(zsim, norm_proc)
sim = xsim @ zsim.T
return sim
### main search loops ###
def vecmap(x: np.ndarray, z: np.ndarray, args, sim_init=None, evalf=None):
print('running vecmap', x.shape)
keep_prob = args.stochastic_initial
best_objective = float('-inf')
last_improvement = 0
end = False
inds1, inds2 = 0, 0
for it in range(args.maxiter):
if it - last_improvement > args.stochastic_interval:
# maxswaps = max(1, maxswaps - 1)
if keep_prob == 1:
end = True
keep_prob = min(1.0, args.stochastic_multiplier * keep_prob)
last_improvement = it
if it == 0:
if sim_init is not None:
sim = sim_init
else:
sim = vecmap_unsup(x, z, norm_proc=['unit', 'center', 'unit'])
else:
# rotation
if args.method == 'orthogonal':
u, s, vt = np.linalg.svd(x[inds1].T @ z[inds2])
w = u @ vt
elif args.method == 'lstsq':
w, r, r, s = np.linalg.lstsq(x[inds1], z[inds2], rcond=1e-5)
sim = x @ w @ z.T
#
if args.csls:
sim = most_diff_match(sim, 10)
inds1, inds2, evalsim = match(sim, args.match)
if evalf is not None:
evalf(evalsim)
objf = np.mean(sim.max(axis=1))
objb = np.mean(sim.max(axis=0))
objective = (objf + objb) / 2
print(f'{it} {keep_prob} \t{objf:.4f}\t{objective:.4f}\t{best_objective:.4f}')
if objective >= best_objective + args.threshold:
last_improvement = it
if it != 0:
best_objective = objective
if end:
break
return inds1, inds2, sim
def coocmapt(Cp1: np.ndarray, Cp2: np.ndarray, args, normproc=['unit'], sim_init=None, evalf=None):
"""
basic coocmap using just numpy but only works for cosine distance
"""
best_objective = float('-inf')
last_improvement = 0
end = False
inds1, inds2 = 0, 0
simd = 0
for it in range(args.maxiter):
if it - last_improvement > args.stochastic_interval:
end = True
if it == 0:
if sim_init is not None:
sim = sim_init
else:
sim = match_sim(Cp1, Cp2, sort=True, metric='cosine', norm_proc=['unit', 'center', 'unit'])
sim_init = sim
# sim = vecmap_unsup(Cp1, Cp2)
if args.csls:
sim = most_diff_match(sim, 10)
inds1, inds2, evalsim = match(sim, args.match)
if evalf is not None:
evalf(evalsim)
if end:
break
uniqf2 = uniqb1 = len(inds1)
Cp1f = Cp1[:, inds1]
Cp2f = Cp2[:, inds2]
embeddings.normalize(Cp1f, normproc)
embeddings.normalize(Cp2f, normproc)
# maybe these matches
sim = Cp1f @ Cp2f.T
# X = torch.from_numpy(Cp1f)
# Y = torch.from_numpy(Cp2f)
# sim = -torch.cdist(X, Y, p=2).numpy()
objf = np.mean(np.max(sim, axis=1))
objb = np.mean(np.max(sim, axis=0))
objective = 0.5 * (objf + objb)
if objective > best_objective:
last_improvement = it
if it > 0: # the initial round use a different matrix and should not be compared
best_objective = objective
print(f'objective {it} \t{objf:.5f} \t{objective:.5f} \t {best_objective:.5f} \t {uniqf2} \t {uniqb1}')
return inds1, inds2, sim
def coocmapl1(Cp1: np.ndarray, Cp2: np.ndarray, args, normproc=['unit'], sim_init=None, evalf=None):
"""
duplicated code using cdistance from torch, mainly to test l1 distance
"""
best_objective = float('-inf')
last_improvement = 0
end = False
inds1, inds2 = 0, 0
simd = 0
for it in range(args.maxiter):
if it - last_improvement > args.stochastic_interval:
end = True
if it == 0:
if sim_init is not None:
sim = sim_init
else:
sim = match_sim(Cp1, Cp2, sort=True, metric='cosine', norm_proc=['unit', 'center', 'unit'])
sim_init = sim
# sim = vecmap_unsup(Cp1, Cp2)
if args.csls:
sim = most_diff_match(sim, 10)
inds1, inds2, evalsim = match(sim, args.match)
if evalf is not None:
evalf(evalsim)
if end:
break
uniqf2 = uniqb1 = len(inds1)
Cp1f = Cp1[:, inds1]
Cp2f = Cp2[:, inds2]
embeddings.normalize(Cp1f, normproc)
embeddings.normalize(Cp2f, normproc)
# maybe these matches
# sim = Cp1f @ Cp2f.T
import torch
if torch.cuda.is_available():
X = torch.from_numpy(Cp1f).cuda()
Y = torch.from_numpy(Cp2f).cuda()
sim = -torch.cdist(X, Y, p=1).cpu().numpy()
else:
X = torch.from_numpy(Cp1f)
Y = torch.from_numpy(Cp2f)
sim = -torch.cdist(X, Y, p=1).numpy()
# this is only approximately a greedy method, as this objective is not guaranteed to increase
objf = np.mean(np.max(sim, axis=1))
objb = np.mean(np.max(sim, axis=0))
objective = 0.5 * (objf + objb)
if objective > best_objective:
last_improvement = it
if it > 0: # the initial round use a different matrix and should not be compared
best_objective = objective
print(f'objective {it} \t{objf:.5f} \t{objective:.5f} \t {best_objective:.5f} \t {uniqf2} \t {uniqb1}')
return inds1, inds2, sim
def svd_power(X, beta=1, drop=None, dim=None, symmetric=False):
u, s, vt = np.linalg.svd(X, full_matrices=False)
print('np.power(s)', np.power(s, 1).sum())
if dim is not None:
# s = np.sqrt(np.maximum(0, s**2 - s[dim]**2))
# s = np.maximum(0, s - s[dim])
s[dim:]=0
print('np.power(s_dim)', np.power(s, 1).sum())
if dim is not None:
s = np.power(s, beta)
if drop is not None:
if isinstance(drop, np.ndarray):
s[list(drop)] = 0
elif isinstance(drop, int):
s[:drop] = 0
print('np.power(s_drop)', np.power(s, 1).sum())
if symmetric:
res = (u * s) @ u.T
else:
res = (u * s) @ vt
norm = np.linalg.norm(res - X, ord='fro')
normX = np.linalg.norm(X, ord='fro')
print(f'diff {norm:.2e} / {normX:.2e}')
return res
def sim_vecs(Co, dim, alpha=0.5, beta=1):
maxdim = min(Co.shape[1], 10000)
Co = Co[:, :maxdim]
u, s, _ = np.linalg.svd(np.power(Co, alpha), full_matrices=False)
u = u[:, :dim]*np.power(s[:dim], beta)
return u
### matching methods ###
def greedy_match(sim0, iters=10):
sim = sim0.copy()
for i in range(iters):
# if sim is n by m, am1 is size m, am0 is size n
am1 = np.nanargmax(sim, axis=0)
am0 = np.nanargmax(sim, axis=1)
bi0 = am0[am1] == np.arange(sim.shape[1])
bi1 = am1[am0] == np.arange(sim.shape[0])
assert bi0.sum() == bi1.sum()
bimatches = bi0.sum()
uniques = len(np.unique(am0)), len(np.unique(am1))
hubs = np.mean([c for _, c in Counter(am0).most_common(3)])
value = np.take_along_axis(sim0, am1[:, None], axis=1).mean()
stats = {'bimatches': bimatches, 'uniques': uniques, 'hubs': hubs, 'value': value}
print(stats)
if bimatches > 0.98 * min(*sim.shape):
break
for i in range(sim.shape[0]):
if bi1[i]:
sim[i] = float('nan')
sim[:, am0[i]] = float('nan')
sim[i, am0[i]] = float('inf')
return np.arange(sim.shape[1])[bi0], am0[bi0], sim
def most_diff_match(sim0, k):
sim = sim0.copy()
top0 = -np.partition(-sim, kth=k, axis=0)
top1 = -np.partition(-sim, kth=k, axis=1)
mean0 = top0[:k, :].mean(axis=0, keepdims=True)
mean1 = top1[:, :k].mean(axis=1, keepdims=True)
return sim - 0.5*(mean0 + mean1)
def forward_backward_match(sim):
indsf2 = np.argmax(sim, axis=1)
indsb1 = np.argmax(sim, axis=0)
indsb2 = np.arange(sim.shape[1])
indsf1 = np.arange(sim.shape[0])
inds1 = np.concatenate((indsf1, indsb1))
inds2 = np.concatenate((indsf2, indsb2))
hubsf = Counter(indsf2).most_common(3)
hubsb = Counter(indsb1).most_common(3)
print('hubs', hubsf, hubsb)
return inds1, inds2, sim
def match(sim, method):
if method == 'vecmap':
return forward_backward_match(sim)
elif method == 'coocmap':
return greedy_match(sim, iters=10)
### clipping ###
def clipthres(A, p1, p2):
R1 = np.percentile(A, p1, axis=1, keepdims=True)
r = np.percentile(R1, p2)
print('percent greater \t', np.sum((A > r) * 1) / A.size)
return r
def clipBoth(A, r1, r2):
ub = clipthres(A, r1, r2)
lb = clipthres(A, 100-r1, 100-r2)
print('clipped', lb, ub)
return lb, ub
def clip(A, r1=99, r2=99):
lb, ub = clipBoth(A, r1, r2)
A[A < lb] = lb
A[A > ub] = ub
| 10,281 | 32.061093 | 111 | py |
MateriAppsInstaller | MateriAppsInstaller-master/docs/sphinx/en/source/conf.py | # -*- coding: utf-8 -*-
#
# MateriApps-Installer documentation build configuration file, created by
# sphinx-quickstart on Sun May 1 14:29:22 2020.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.mathjax']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
html_static_path = ["../../_static"]
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'MateriApps Installer'
copyright = u'2013-, the University of Tokyo'
author = u'MateriApps Installer Development Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.1'
# The full version, including alpha/beta/rc tags.
release = u'1.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
html_log = "logo.png"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'logo': 'logo.png',
'font_family': 'Helvetica',
'sidebar_search_button': 'pink_1'
}
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html',
]
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
numfig = True
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'MAInstallerdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'MAInstaller.tex', u'MateriApps Installer Documentation',
u'MateriApps Installer Development Team', 'manual', 'True'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'MateriApps Installer', u'MateriApps Installer Documentation',
[author], 1)
]
latex_logo = "../../_static/logo.png"
#latex_docclass = {'manual': 'jsbook'}
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'MateriApps Installer', u'MateriApps Installer Documentation',
author, 'MateriApps Installer', 'One line description of project.',
'Miscellaneous'),
]
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html',
]
}
| 5,684 | 28.455959 | 79 | py |
MateriAppsInstaller | MateriAppsInstaller-master/docs/sphinx/ja/source/conf.py | # -*- coding: utf-8 -*-
#
# MateriApps-Installer documentation build configuration file, created by
# sphinx-quickstart on Sun May 1 14:29:22 2020.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.mathjax']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
html_static_path = ["../../_static"]
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'MateriApps Installer'
copyright = u'2013-, the University of Tokyo'
author = u'MateriApps Installer Development Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.1'
# The full version, including alpha/beta/rc tags.
release = u'1.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'ja'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
html_log = "logo.png"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'logo': 'logo.png',
'font_family': 'Helvetica',
'sidebar_search_button': 'pink_1'
}
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html',
]
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
numfig = True
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'MAInstallerdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'MAInstaller_ja.tex', u'MateriApps Installer Documentation',
u'MateriApps Installer Development Team', 'manual', 'True'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'MateriApps Installer', u'MateriApps Installer Documentation',
[author], 1)
]
latex_docclass = {'manual': 'jsbook'}
latex_logo = "../../_static/logo.png"
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'MateriApps Installer', u'MateriApps Installer Documentation',
author, 'MateriApps Installer', 'One line description of project.',
'Miscellaneous'),
]
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html',
]
}
| 5,686 | 28.466321 | 79 | py |
harmonic | harmonic-main/docs/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'Harmonic'
copyright = '2021, Jason D. McEwen, Christopher G. R. Wallis, Matthew A. Price, Matthew M. Docherty'
author = 'Jason D. McEwen, Christopher G. R. Wallis, Matthew A. Price, Matthew M. Docherty'
# The short X.Y version
version = '1.1.1'
# The full version, including alpha/beta/rc tags
release = '1.1.1'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'nbsphinx_link',
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax',
'sphinx.ext.githubpages',
'sphinx_rtd_theme',
'sphinx_rtd_dark_mode',
'nbsphinx',
'IPython.sphinxext.ipython_console_highlighting',
'sphinx_tabs.tabs',
'sphinx_git',
'sphinxcontrib.bibtex',
'sphinxcontrib.texfigure',
'sphinx.ext.autosectionlabel',
]
bibtex_bibfiles = ['assets/refs.bib']
bibtex_default_style = 'unsrt'
nbsphinx_execute = 'never'
napoleon_google_docstring = True
napoleon_include_init_with_doc = True
napoleon_numpy_docstring = False
#autosummary_generate = True
#autoclass_content = "class"
#autodoc_default_flags = ["members", "no-special-members"]
#always_document_param_types = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ['.rst', '.ipynb']
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
default_dark_mode = False
sphinx_tabs_disable_css_loading = True
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# html_logo = "assets/harm_badge.png"
html_logo = "assets/harm_badge_simple.svg"
# html_logo = "assets/harm_logo.png"
html_theme_options = {
'logo_only': True,
'display_version': True,
# 'style_nav_header_background': '#C48EDC',
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
#html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = [
'css/custom.css',
'css/custom_tabs.css',
]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Harmonicdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Harmonic.tex', 'Harmonic Documentation',
'Jason D. McEwen, Christopher G. R. Wallis, Matthew A. Price, Matthew M. Docherty', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'harmonic', 'Harmonic Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Harmonic', 'Harmonic Documentation',
author, 'Harmonic', 'Learnt harmonic mean estimator',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
suppress_warnings = [ 'autosectionlabel.*', 'autodoc','autodoc.import_object']
# -- Extension configuration -------------------------------------------------
| 6,728 | 29.586364 | 100 | py |
DeepGAR | DeepGAR-main/test.py | from common import utils
from collections import defaultdict
from datetime import datetime
from sklearn.metrics import roc_auc_score, confusion_matrix
from sklearn.metrics import precision_recall_curve, average_precision_score
import torch
USE_ORCA_FEATS = False # whether to use orca motif counts along with embeddings
MAX_MARGIN_SCORE = 1e9 # a very large margin score to given orca constraints
def validation(args, model, test_pts, logger, batch_n, epoch, verbose=False):
# test on new motifs
model.eval()
all_raw_preds, all_preds, all_labels = [], [], []
all_raw_preds_nodes, all_preds_nodes, all_labels_nodes = [], [], []
for pos_a, pos_b, neg_a, neg_b in test_pts:
if pos_a:
pos_a = pos_a.to(utils.get_device())
pos_b = pos_b.to(utils.get_device())
neg_a = neg_a.to(utils.get_device())
neg_b = neg_b.to(utils.get_device())
labels = torch.tensor([1]*(pos_a.num_graphs if pos_a else 0) +
[0]*neg_a.num_graphs).to(utils.get_device())
# Alignment matrrix label
align_mat_batch = []
for sample_idx in range(len(pos_b.node_label)):
align_mat = torch.zeros(len(pos_a.node_label[sample_idx]), len(pos_b.node_label[sample_idx]))
for i, a_n in enumerate(pos_a.node_label[sample_idx]):
if a_n in pos_b.alignment[sample_idx]:
align_mat[i][pos_b.alignment[sample_idx].index(a_n)] = 1
align_mat_batch.append(align_mat)
align_mat_batch_all = []
for i, align_mat in enumerate(align_mat_batch):
align_mat_batch_all.append(align_mat.flatten())
labels_nodes = torch.cat(align_mat_batch_all, dim=-1)
with torch.no_grad():
(emb_neg_a, emb_neg_a_nodes), (emb_neg_b, emb_neg_b_nodes) = (model.emb_model(neg_a),
model.emb_model(neg_b))
if pos_a:
(emb_pos_a, emb_pos_a_nodes), (emb_pos_b, emb_pos_b_nodes) = (model.emb_model(pos_a),
model.emb_model(pos_b))
emb_as = torch.cat((emb_pos_a, emb_neg_a), dim=0)
emb_bs = torch.cat((emb_pos_b, emb_neg_b), dim=0)
else:
emb_as, emb_bs = emb_neg_a, emb_neg_b
pred = model(emb_as, emb_bs, emb_pos_a_nodes, emb_pos_b_nodes)
raw_pred, raw_pred_nodes = model.predict(pred)
if USE_ORCA_FEATS:
import orca
import matplotlib.pyplot as plt
def make_feats(g):
counts5 = np.array(orca.orbit_counts("node", 5, g))
for v, n in zip(counts5, g.nodes):
if g.nodes[n]["node_feature"][0] > 0:
anchor_v = v
break
v5 = np.sum(counts5, axis=0)
return v5, anchor_v
for i, (ga, gb) in enumerate(zip(neg_a.G, neg_b.G)):
(va, na), (vb, nb) = make_feats(ga), make_feats(gb)
if (va < vb).any() or (na < nb).any():
raw_pred[pos_a.num_graphs + i] = MAX_MARGIN_SCORE
if args.method_type == "order":
pred = model.clf_model(raw_pred.unsqueeze(1)).argmax(dim=-1)
pred_nodes = model.clf_model_nodes(raw_pred_nodes.unsqueeze(1)).argmax(dim=-1)
raw_pred *= -1
raw_pred_nodes *=-1
elif args.method_type == "ensemble":
pred = torch.stack([m.clf_model(
raw_pred.unsqueeze(1)).argmax(dim=-1) for m in model.models])
for i in range(pred.shape[1]):
print(pred[:,i])
pred = torch.min(pred, dim=0)[0]
raw_pred *= -1
elif args.method_type == "mlp":
raw_pred = raw_pred[:,1]
pred = pred.argmax(dim=-1)
all_raw_preds.append(raw_pred)
all_preds.append(pred)
all_labels.append(labels)
# Nodes
all_raw_preds_nodes.append(raw_pred_nodes)
all_preds_nodes.append(pred_nodes)
all_labels_nodes.append(labels_nodes)
pred = torch.cat(all_preds, dim=-1)
labels = torch.cat(all_labels, dim=-1)
raw_pred = torch.cat(all_raw_preds, dim=-1)
acc = torch.mean((pred == labels).type(torch.float))
prec = (torch.sum(pred * labels).item() / torch.sum(pred).item() if
torch.sum(pred) > 0 else float("NaN"))
recall = (torch.sum(pred * labels).item() /
torch.sum(labels).item() if torch.sum(labels) > 0 else
float("NaN"))
labels = labels.detach().cpu().numpy()
raw_pred = raw_pred.detach().cpu().numpy()
pred = pred.detach().cpu().numpy()
pred_nodes = pred_nodes.detach().cpu().numpy()
auroc = roc_auc_score(labels, raw_pred)
avg_prec = average_precision_score(labels, raw_pred)
tn, fp, fn, tp = confusion_matrix(labels, pred).ravel()
# Node Level
pred_nodes = torch.cat(all_preds_nodes, dim=-1)
labels_nodes = torch.cat(all_labels_nodes, dim=-1)
raw_pred_nodes = torch.cat(all_raw_preds_nodes, dim=-1)
acc_nodes = torch.mean((pred_nodes == labels_nodes).type(torch.float))
prec_nodes = (torch.sum(pred_nodes * labels_nodes).item() / torch.sum(pred_nodes).item() if
torch.sum(pred_nodes) > 0 else float("NaN"))
recall_nodes = (torch.sum(pred_nodes * labels_nodes).item() /
torch.sum(labels_nodes).item() if torch.sum(labels_nodes) > 0 else
float("NaN"))
labels_nodes = labels_nodes.detach().cpu().numpy()
raw_pred_nodes = raw_pred_nodes.detach().cpu().numpy()
pred_nodes = pred_nodes.detach().cpu().numpy()
auroc_nodes = roc_auc_score(labels_nodes, raw_pred_nodes)
avg_prec_nodes = average_precision_score(labels_nodes, raw_pred_nodes)
tn_nd, fp_nd, fn_nd, tp_nd = confusion_matrix(labels_nodes, pred_nodes).ravel()
if verbose:
import matplotlib.pyplot as plt
precs, recalls, threshs = precision_recall_curve(labels, raw_pred)
plt.plot(recalls, precs)
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.savefig("plots/precision-recall-curve.png")
print("Saved PR curve plot in plots/precision-recall-curve.png")
print("\n{}".format(str(datetime.now())))
print("Validation. Epoch {}. Acc: {:.4f}. "
"P: {:.4f}. R: {:.4f}. AUROC: {:.4f}. AP: {:.4f}.\n "
"TN: {}. FP: {}. FN: {}. TP: {}".format(epoch,
acc, prec, recall, auroc, avg_prec,
tn, fp, fn, tp))
print("Validation Node Level. Epoch {}. Acc: {:.4f}. "
"P: {:.4f}. R: {:.4f}. AUROC: {:.4f}. AP: {:.4f}.\n "
"TN: {}. FP: {}. FN: {}. TP: {}".format(epoch,
acc_nodes, prec_nodes, recall_nodes, auroc_nodes, avg_prec_nodes,
tn_nd, fp_nd, fn_nd, tp_nd))
if not args.test:
logger.add_scalar("Accuracy/test", acc, batch_n)
logger.add_scalar("Precision/test", prec, batch_n)
logger.add_scalar("Recall/test", recall, batch_n)
logger.add_scalar("AUROC/test", auroc, batch_n)
logger.add_scalar("AvgPrec/test", avg_prec, batch_n)
logger.add_scalar("TP/test", tp, batch_n)
logger.add_scalar("TN/test", tn, batch_n)
logger.add_scalar("FP/test", fp, batch_n)
logger.add_scalar("FN/test", fn, batch_n)
print("Saving {}".format(args.model_path))
torch.save(model.state_dict(), args.model_path)
if verbose:
conf_mat_examples = defaultdict(list)
idx = 0
for pos_a, pos_b, neg_a, neg_b in test_pts:
if pos_a:
pos_a = pos_a.to(utils.get_device())
pos_b = pos_b.to(utils.get_device())
neg_a = neg_a.to(utils.get_device())
neg_b = neg_b.to(utils.get_device())
for list_a, list_b in [(pos_a, pos_b), (neg_a, neg_b)]:
if not list_a: continue
for a, b in zip(list_a.G, list_b.G):
correct = pred[idx] == labels[idx]
conf_mat_examples[correct, pred[idx]].append((a, b))
idx += 1
if __name__ == "__main__":
from subgraph_matching.train import main
main(force_test=True)
| 8,369 | 46.828571 | 115 | py |
DeepGAR | DeepGAR-main/deepgar.py | HYPERPARAM_SEARCH = False
HYPERPARAM_SEARCH_N_TRIALS = None # how many grid search trials to run
# (set to None for exhaustive search)
import argparse
from itertools import permutations
import pickle
from queue import PriorityQueue
import os
import random
import time
import networkx as nx
import numpy as np
from sklearn.manifold import TSNE
import torch
import torch.nn as nn
import torch.multiprocessing as mp
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from sklearn.metrics import roc_auc_score
import pickle
from common import data
from common import models
from common import utils
if HYPERPARAM_SEARCH:
from test_tube import HyperOptArgumentParser
from hyp_search import parse_encoder
else:
from config import parse_encoder
from test import validation
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def build_model(args):
# build model
if args.method_type == "order":
model = models.OrderEmbedder(2, args.hidden_dim, args)
elif args.method_type == "mlp":
model = models.BaselineMLP(2, args.hidden_dim, args)
model.to(utils.get_device())
if args.test and args.model_path:
model.load_state_dict(torch.load(args.model_path,
map_location=utils.get_device()))
return model
def make_data_source(args):
toks = args.dataset.split("-")
if toks[0] == "syn":
if len(toks) == 1 or toks[1] == "balanced":
data_source = data.OTFSynDataSource(
node_anchored=args.node_anchored)
elif toks[1] == "imbalanced":
data_source = data.OTFSynImbalancedDataSource(
node_anchored=args.node_anchored)
else:
raise Exception("Error: unrecognized dataset")
else:
if len(toks) == 1 or toks[1] == "balanced":
data_source = data.DiskDataSource(toks[0],
node_anchored=args.node_anchored)
elif toks[1] == "imbalanced":
data_source = data.DiskImbalancedDataSource(toks[0],
node_anchored=args.node_anchored)
else:
raise Exception("Error: unrecognized dataset")
return data_source
def train(args, model, logger, in_queue, out_queue):
"""Train the order embedding model.
args: Commandline arguments
logger: logger for logging progress
in_queue: input queue to an intersection computation worker
out_queue: output queue to an intersection computation worker
"""
# print("Running train")
scheduler, opt = utils.build_optimizer(args, model.parameters())
if args.method_type == "order":
clf_opt = optim.Adam(model.clf_model.parameters(), lr=args.lr)
# clf_opt_nodes = optim.Adam(model.clf_model_nodes.parameters(), lr=args.lr)
mlp_model_nodes_opt = optim.Adam(model.mlp_model_nodes.parameters(), lr=args.lr)
done = False
k = 0
print(k)
while not done:
data_source = make_data_source(args)
loaders = data_source.gen_data_loaders(args.eval_interval *
args.batch_size, args.batch_size, train=True)
for batch_target, batch_neg_target, batch_neg_query in zip(*loaders):
msg, _ = in_queue.get()
if msg == "done":
done = True
break
# train
model.train()
model.zero_grad()
pos_a, pos_b, neg_a, neg_b = data_source.gen_batch(batch_target,
batch_neg_target, batch_neg_query, True)
# emb_pos_a, emb_pos_b = model.emb_model(pos_a), model.emb_model(pos_b)
# emb_neg_a, emb_neg_b = model.emb_model(neg_a), model.emb_model(neg_b)
# Added by TC
emb_pos_a, emb_pos_a_nodes = model.emb_model(pos_a)
emb_pos_b, emb_pos_b_nodes = model.emb_model(pos_b)
emb_neg_a, emb_neg_a_nodes = model.emb_model(neg_a)
emb_neg_b, emb_neg_b_nodes = model.emb_model(neg_b)
# print(emb_pos_a.shape, emb_neg_a.shape, emb_neg_b.shape)
emb_as = torch.cat((emb_pos_a, emb_neg_a), dim=0)
emb_bs = torch.cat((emb_pos_b, emb_neg_b), dim=0)
labels = torch.tensor([1]*pos_a.num_graphs + [0]*neg_a.num_graphs).to(
utils.get_device())
# Added by TC
# Alignment matrrix label
align_mat_batch = []
for sample_idx in range(len(pos_b.node_label)):
align_mat = torch.zeros(len(pos_a.node_label[sample_idx]), len(pos_b.node_label[sample_idx]))
for i, a_n in enumerate(pos_a.node_label[sample_idx]):
if a_n in pos_b.alignment[sample_idx]:
align_mat[i][pos_b.alignment[sample_idx].index(a_n)] = 1
align_mat_batch.append(align_mat)
align_mat_batch_all = []
for i, align_mat in enumerate(align_mat_batch):
align_mat_batch_all.append(align_mat.flatten())
labels_nodes = torch.cat(align_mat_batch_all, dim=-1)
intersect_embs = None
# pred = model(emb_as, emb_bs)
# loss = model.criterion(pred, intersect_embs, labels)
pred = model(emb_as, emb_bs, emb_pos_a_nodes, emb_pos_b_nodes)
loss = model.criterion(pred, intersect_embs, labels, align_mat_batch)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
opt.step()
if scheduler:
scheduler.step()
if args.method_type == "order":
with torch.no_grad():
pred, pred_nodes = model.predict(pred)
model.clf_model.zero_grad()
pred = model.clf_model(pred.unsqueeze(1))
criterion = nn.NLLLoss()
clf_loss = criterion(pred, labels)
clf_loss.backward()
clf_opt.step()
# model.clf_model_nodes.zero_grad()
# pred_nodes = model.clf_model(pred_nodes.unsqueeze(1))
# model.mlp_model_nodes.zero_grad()
# pred_scores = model.mlp_model_nodes(emb_pos_a_nodes, emb_pos_b_nodes, align_mat_batch)
# # pred_scores = torch.cat(pred_scores_batch_all, dim=-1)
# node_alignment_loss = F.binary_cross_entropy_with_logits(pred_scores, labels_nodes)
# node_alignment_loss.backward()
# mlp_model_nodes_opt.step()
# criterion_nodes = nn.BCEWithLogitsLoss()
# clf_loss_nodes = criterion_nodes(pred_scores, labels_nodes)
# pred_scores_batch_all = []
# for i in range(len(emb_pos_a_nodes)):
# align_mat_score = model.mlp_model_nodes(emb_pos_a_nodes[i], emb_pos_b_nodes[i])
# pred_scores_batch_all.append(align_mat_score.flatten())
# pred_scores = torch.cat(pred_scores_batch_all, dim=-1)
# clf_loss_nodes.backward()
# mlp_model_nodes_opt.step()
# print("Node Alignment Loss {}; AUC {}".format(node_alignment_loss, auc))
# criterion_nodes = nn.NLLLoss()
# clf_loss_nodes = criterion_nodes(pred_nodes, labels_nodes.long())
# clf_loss_nodes.backward()
# clf_opt_nodes.step()
pred = pred.argmax(dim=-1)
# pred_nodes = pred_nodes.argmax(dim=-1)
acc = torch.mean((pred == labels).type(torch.float))
# acc_nodes = torch.mean((pred_nodes == labels_nodes).type(torch.float))
acc_nodes = roc_auc_score(labels_nodes, pred_scores.detach().numpy())
train_loss = loss.item()
train_acc = (acc.item() + acc_nodes.item())/2
# out_queue.put(("step", (loss.item(), acc, acc_nodes)))
print('Epoch: {}, loss: {}, Accuracy: {}'.format(k+1, loss.item(), acc))
k += 1
def train_loop(args):
if not os.path.exists(os.path.dirname(args.model_path)):
os.makedirs(os.path.dirname(args.model_path))
if not os.path.exists("plots/"):
os.makedirs("plots/")
print("Starting {} workers".format(args.n_workers))
in_queue, out_queue = mp.Queue(), mp.Queue()
print("Using dataset {}".format(args.dataset))
record_keys = ["conv_type", "n_layers", "hidden_dim",
"margin", "dataset", "max_graph_size", "skip"]
args_str = ".".join(["{}={}".format(k, v)
for k, v in sorted(vars(args).items()) if k in record_keys])
# logger = SummaryWriter(comment=args_str)
logger = SummaryWriter()
model = build_model(args).to(device)
model.share_memory()
if args.method_type == "order":
clf_opt = optim.Adam(model.clf_model.parameters(), lr=args.lr)
else:
clf_opt = None
data_source = make_data_source(args)
# print(type(data_source))
# loaders = data_source.gen_data_loaders(args.val_size, args.batch_size,
# train=False, use_distributed_sampling=False)
if args.test:
print('Test Data')
loaders = data_source.gen_data_loaders(args.val_size, args.batch_size,
train=False, use_distributed_sampling=False)
else:
print('Train Data')
loaders = data_source.gen_data_loaders(args.val_size, args.batch_size,
train=True, use_distributed_sampling=False)
test_pts = []
# count = 0
for batch_target, batch_neg_target, batch_neg_query in zip(*loaders):
# print(count)
# count += 1
# print(batch_target, batch_neg_target, batch_neg_query)
if args.test:
pos_a, pos_b, neg_a, neg_b = data_source.gen_batch(batch_target,
batch_neg_target, batch_neg_query, train = False)
else:
pos_a, pos_b, neg_a, neg_b = data_source.gen_batch(batch_target,
batch_neg_target, batch_neg_query, train = True)
# print(type(pos_a))
if pos_a:
pos_a = pos_a.to(device)
pos_b = pos_b.to(device)
neg_a = neg_a.to(device)
neg_b = neg_b.to(device)
test_pts.append((pos_a, pos_b, neg_a, neg_b))
# print(len(test_pts[len(test_pts)-1][0])) # 12
# file = open('test_pts.obj','wb')
# pickle.dump(test_pts,file)
# print('done!')
print('Data Load Finished!')
train(args, model, logger, in_queue, out_queue)
# workers = []
# for i in range(args.n_workers):
# worker = mp.Process(target=train, args=(args, model, data_source,
# in_queue, out_queue))
# worker.start()
# workers.append(worker)
if args.test:
# pos_a, pos_b, neg_a, neg_b = data_source.gen_batch(batch_target,
# batch_neg_target, batch_neg_query, train = False)
# if pos_a:
# pos_a = pos_a.to(torch.device("cpu"))
# pos_b = pos_b.to(torch.device("cpu"))
# neg_a = neg_a.to(torch.device("cpu"))
# neg_b = neg_b.to(torch.device("cpu"))
# test_pts.append((pos_a, pos_b, neg_a, neg_b))
validation(args, model, test_pts, logger, 0, 0, verbose=True)
else:
batch_n = 0
# for epoch in range(args.n_batches // args.eval_interval):
for epoch in range(54):
for i in range(args.eval_interval):
in_queue.put(("step", None))
for i in range(args.eval_interval):
# print(args.eval_interval)
msg, params = out_queue.get()
train_loss, train_acc, train_acc_nodes = params
print("Batch {}. Loss: {:.4f}. Subgraph acc: {:.4f} Node Alignment acc: {:.4f}".format(
batch_n, train_loss, train_acc, train_acc_nodes), end=" \r")
logger.add_scalar("Loss/train", train_loss, batch_n)
logger.add_scalar("Accuracy/train", train_acc, batch_n)
batch_n += 1
validation(args, model, test_pts, logger, batch_n, epoch)
for i in range(args.n_workers):
in_queue.put(("done", None))
for worker in workers:
worker.join()
def main(force_test=False):
mp.set_start_method("spawn", force=True)
parser = (argparse.ArgumentParser(description='Order embedding arguments')
if not HYPERPARAM_SEARCH else
HyperOptArgumentParser(strategy='grid_search'))
utils.parse_optimizer(parser)
parse_encoder(parser)
args = parser.parse_args([])
# train(args)
if force_test:
args.test = True
# Currently due to parallelism in multi-gpu training, this code performs
# sequential hyperparameter tuning.
# All gpus are used for every run of training in hyperparameter search.
if HYPERPARAM_SEARCH:
for i, hparam_trial in enumerate(args.trials(HYPERPARAM_SEARCH_N_TRIALS)):
print("Running hyperparameter search trial", i)
print(hparam_trial)
train_loop(hparam_trial)
else:
train_loop(args)
mp.set_start_method("spawn", force=True)
parser = (argparse.ArgumentParser(description='Order embedding arguments')
if not HYPERPARAM_SEARCH else
HyperOptArgumentParser(strategy='grid_search'))
utils.parse_optimizer(parser)
parse_encoder(parser)
args = parser.parse_args([])
if not os.path.exists(os.path.dirname(args.model_path)):
os.makedirs(os.path.dirname(args.model_path))
if not os.path.exists("plots/"):
os.makedirs("plots/")
print("Starting {} workers".format(args.n_workers))
in_queue, out_queue = mp.Queue(), mp.Queue()
print("Using dataset {}".format(args.dataset))
record_keys = ["conv_type", "n_layers", "hidden_dim",
"margin", "dataset", "max_graph_size", "skip"]
args_str = ".".join(["{}={}".format(k, v)
for k, v in sorted(vars(args).items()) if k in record_keys])
# logger = SummaryWriter(comment=args_str)
logger = SummaryWriter()
model = build_model(args).to(device)
model.share_memory()
if args.method_type == "order":
clf_opt = optim.Adam(model.clf_model.parameters(), lr=args.lr)
else:
clf_opt = None
data_source = make_data_source(args)
print(data_source)
# print(type(data_source))
# loaders = data_source.gen_data_loaders(args.val_size, args.batch_size,
# train=False, use_distributed_sampling=False)
if args.test:
print('Test Data')
loaders = data_source.gen_data_loaders(args.val_size, args.batch_size,
train=False, use_distributed_sampling=False)
else:
print('Train Data')
loaders = data_source.gen_data_loaders(args.val_size, args.batch_size,
train=True, use_distributed_sampling=False)
test_pts = []
# count = 0
for batch_target, batch_neg_target, batch_neg_query in zip(*loaders):
# print(count)
# count += 1
# print(batch_target, batch_neg_target, batch_neg_query)
if args.test:
pos_a, pos_b, neg_a, neg_b = data_source.gen_batch(batch_target,
batch_neg_target, batch_neg_query, train = False)
else:
pos_a, pos_b, neg_a, neg_b = data_source.gen_batch(batch_target,
batch_neg_target, batch_neg_query, train = True)
# print(type(pos_a))
if pos_a:
pos_a = pos_a.to(device)
pos_b = pos_b.to(device)
neg_a = neg_a.to(device)
neg_b = neg_b.to(device)
test_pts.append((pos_a, pos_b, neg_a, neg_b))
# print(len(test_pts[len(test_pts)-1][0])) # 12
# file = open('test_pts.obj','wb')
# pickle.dump(test_pts,file)
# print('done!')
print('Data Load Finished!')
model = build_model(args).to(device)
model.share_memory()
scheduler, opt = utils.build_optimizer(args, model.parameters())
if args.method_type == "order":
clf_opt = optim.Adam(model.clf_model.parameters(), lr=1e-3)
# clf_opt_nodes = optim.Adam(model.clf_model_nodes.parameters(), lr=args.lr)
mlp_model_nodes_opt = optim.Adam(model.mlp_model_nodes.parameters(), lr=args.lr)
print('Setting Finished!')
done = False
k = 0
for k in range(30):
data_source = make_data_source(args)
loaders = data_source.gen_data_loaders(args.eval_interval *
args.batch_size, args.batch_size, train=True)
step = 0
for batch_target, batch_neg_target, batch_neg_query in zip(*loaders):
# msg, _ = in_queue.get()
# if msg == "done":
# done = True
# break
# train
model.train()
model.zero_grad()
pos_a, pos_b, neg_a, neg_b = data_source.gen_batch(batch_target,
batch_neg_target, batch_neg_query, True)
# emb_pos_a, emb_pos_b = model.emb_model(pos_a), model.emb_model(pos_b)
# emb_neg_a, emb_neg_b = model.emb_model(neg_a), model.emb_model(neg_b)
# Added by TC
emb_pos_a, emb_pos_a_nodes = model.emb_model(pos_a.to(device))
emb_pos_b, emb_pos_b_nodes = model.emb_model(pos_b.to(device))
emb_neg_a, emb_neg_a_nodes = model.emb_model(neg_a.to(device))
emb_neg_b, emb_neg_b_nodes = model.emb_model(neg_b.to(device))
# print(emb_pos_a.shape, emb_neg_a.shape, emb_neg_b.shape)
emb_as = torch.cat((emb_pos_a, emb_neg_a), dim=0)
emb_bs = torch.cat((emb_pos_b, emb_neg_b), dim=0)
labels = torch.tensor([1]*pos_a.num_graphs + [0]*neg_a.num_graphs).to(
utils.get_device())
# Added by TC
# Alignment matrrix label
align_mat_batch = []
for sample_idx in range(len(pos_b.node_label)):
align_mat = torch.zeros(len(pos_a.node_label[sample_idx]), len(pos_b.node_label[sample_idx]))
for i, a_n in enumerate(pos_a.node_label[sample_idx]):
if a_n in pos_b.alignment[sample_idx]:
align_mat[i][pos_b.alignment[sample_idx].index(a_n)] = 1
align_mat_batch.append(align_mat)
align_mat_batch_all = []
for i, align_mat in enumerate(align_mat_batch):
align_mat_batch_all.append(align_mat.flatten())
labels_nodes = torch.cat(align_mat_batch_all, dim=-1)
intersect_embs = None
# pred = model(emb_as, emb_bs)
# loss = model.criterion(pred, intersect_embs, labels)
pred = model(emb_as, emb_bs, emb_pos_a_nodes, emb_pos_b_nodes)
loss = model.criterion(pred, intersect_embs, labels, align_mat_batch)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
opt.step()
if scheduler:
scheduler.step()
if args.method_type == "order":
if loss.item() > 10:
acc = 0
else:
with torch.no_grad():
pred, pred_nodes = model.predict(pred)
model.clf_model.zero_grad()
pred = model.clf_model(pred.unsqueeze(1))
criterion = nn.NLLLoss()
clf_loss = criterion(pred, labels)
clf_loss.backward()
clf_opt.step()
pred = pred.argmax(dim=-1)
acc = torch.mean((pred == labels).type(torch.float))
# model.clf_model_nodes.zero_grad()
# pred_nodes = model.clf_model(pred_nodes.unsqueeze(1))
# model.mlp_model_nodes.zero_grad()
# pred_scores = model.mlp_model_nodes(emb_pos_a_nodes, emb_pos_b_nodes, align_mat_batch)
# # pred_scores = torch.cat(pred_scores_batch_all, dim=-1)
# node_alignment_loss = F.binary_cross_entropy_with_logits(pred_scores, labels_nodes)
# node_alignment_loss.backward()
# mlp_model_nodes_opt.step()
# criterion_nodes = nn.BCEWithLogitsLoss()
# clf_loss_nodes = criterion_nodes(pred_scores, labels_nodes)
# pred_scores_batch_all = []
# for i in range(len(emb_pos_a_nodes)):
# align_mat_score = model.mlp_model_nodes(emb_pos_a_nodes[i], emb_pos_b_nodes[i])
# pred_scores_batch_all.append(align_mat_score.flatten())
# pred_scores = torch.cat(pred_scores_batch_all, dim=-1)
# clf_loss_nodes.backward()
# mlp_model_nodes_opt.step()
# print("Node Alignment Loss {}; AUC {}".format(node_alignment_loss, auc))
# criterion_nodes = nn.NLLLoss()
# clf_loss_nodes = criterion_nodes(pred_nodes, labels_nodes.long())
# clf_loss_nodes.backward()
# clf_opt_nodes.step()
# pred_nodes = pred_nodes.argmax(dim=-1)
# acc = torch.mean((pred == labels).type(torch.float))
# acc_nodes = torch.mean((pred_nodes == labels_nodes).type(torch.float))
# acc_nodes = roc_auc_score(labels_nodes, pred_scores.detach().numpy())
train_loss = loss.item()
# train_acc = (acc.item() + acc_nodes.item())/2
# out_queue.put(("step", (loss.item(), acc, acc_nodes)))
print('Epoch: {}, loss: {}, Accuracy: {}'.format(k, loss.item(), acc))
k += 1
model.eval()
def admm_opt(emb_a, emb_b, adj_pair, true_matrix, initial_align, epochs=50, p=1):
# Initialize X0 (q, t), Y0 (q, t), Z0 (q, t), P0 (q, t)
# initial_X = true_matrix.T
# initial_X = torch.zeros(true_matrix.shape).T
# initial_X = torch.empty((true_matrix.shape)).T
# torch.nn.init.orthogonal_(initial_X)
# align_ind = torch.argmax(initial_X, dim=1)
# for i in range(initial_X.shape[0]):
# initial_X[i, align_ind[i]] = 1
initial_X = initial_align.detach().cpu().T
initial_Y = torch.mm(initial_X, adj_pair[0].float())
H = torch.zeros((initial_X.shape[0], initial_X.shape[1]))
# for i in range(initial_X.shape[0]):
# for j in range(initial_X.shape[1]):
# H[i, j] = torch.max(emb_b[i]-emb_a[j])
# H = torch.max(torch.zeros(H.shape), torch.min(torch.ones(H.shape), H))
initial_Z = initial_X*H
initial_P = initial_X.clone()
# Initialize U1, U2, U3
initial_U1, initial_U2, initial_U3 = torch.zeros(initial_X.shape), torch.zeros(initial_X.shape), torch.zeros(initial_X.shape)
# ADMM Algorithm
for epoch in range(epochs):
# Update P
u, d, v = torch.linalg.svd(initial_X - initial_U3/p)
initial_P = torch.mm(torch.mm(u, torch.eye(initial_X.shape[0], initial_X.shape[1])), v)
# Update X
X_1 = torch.mm(adj_pair[1].float().T, initial_Y) + p*torch.mm(initial_Y, adj_pair[0].float().T)\
+ torch.mm(initial_U1, adj_pair[0].float().T) + (p*initial_Z+initial_U2)*H\
+ p*initial_P + initial_U3
X_2 = torch.mm(initial_Y.T, initial_Y) + torch.mm(adj_pair[0].float(), adj_pair[0].float().T)\
+ p*torch.mm(H.T, H) + p*torch.eye(initial_X.shape[1])
X_2_inv = torch.linalg.inv(X_2)
initial_X = torch.mm(X_1, X_2_inv)
# Update Y
initial_Y1 = initial_Y.clone()
Y_1 = torch.mm(adj_pair[1].float(), initial_X) + p*torch.mm(initial_X, adj_pair[0].float())\
- initial_U1
Y_2 = 2*torch.mm(initial_X.T, initial_X) + p*torch.eye(initial_X.shape[1])
Y_2_inv = torch.linalg.pinv(Y_2)
initial_Y = torch.mm(Y_1, Y_2_inv)
# Update Z
Z_1 = (p*initial_X*H - initial_U2) / (p+2)
Z_2 = torch.min(torch.zeros(initial_U2.shape), initial_X*H-initial_U2/p)
initial_Z = torch.max(Z_1, Z_2)
# Update R1, R2, R3
R1 = initial_Y - torch.mm(initial_X, adj_pair[0].float())
R2 = initial_Z - initial_X*H
R3 = initial_P - initial_X
# Update U1, U2, U3
initial_U1 += p*R1
initial_U2 += p*R2
initial_U3 += p*R3
align_matrix = ortho_align(initial_X)
auc = roc_auc_score(true_matrix.T.flatten(), initial_X.flatten())
print('Epoch: {}, AUC: {}'.format(epoch+1, auc))
return initial_P, auc
def depth_count(adj_t):
G_t = nx.from_numpy_matrix(adj_t, create_using=nx.DiGraph)
in_dict = dict(G_t.in_degree)
source_ind = [ind for ind in in_dict.keys() if in_dict[ind] == 0]
avg_depth = np.zeros(len(G_t.nodes))
for ind in source_ind:
single_dict = nx.shortest_path_length(G_t,ind)
single_depth = np.zeros(len(G_t.nodes))
for key, value in single_dict.items():
single_depth[key] = value
for i, value in enumerate(avg_depth):
if value < single_depth[i]:
avg_depth[i] = single_depth[i]
# avg_depth += single_depth
# avg_depth /= len(source_ind)
return avg_depth
def ortho_align(align_mat):
align_matrix = torch.zeros(align_mat.shape)
align_sort = torch.sort(align_mat.flatten(), descending=True).values
align_sort1 = torch.zeros(align_mat.shape)
for value in align_sort:
align_sort1[align_mat==value] = torch.where(align_sort==value)[0].float()
align_row = []
align_column = []
for i in range(align_mat.flatten().shape[0]):
if torch.sum(align_matrix) < align_matrix.shape[0]:
ind = torch.where(align_sort1==i)
if ind[0] not in align_row and ind[1] not in align_column:
align_matrix[ind[0], ind[1]] = 1
align_row.append(ind[0])
align_column.append(ind[1])
return align_matrix
def alignment(emb_a, emb_b, adj_pair, true_matrix, epoch=200):
tr = 50
initial_align = torch.autograd.Variable(-1e-3 * torch.ones(true_matrix.shape).float()).to(emb_a.device)
initial_align = torch.autograd.Variable(initial_align).to(emb_a.device)
hardsigmoid = nn.Hardsigmoid()
avg_t = torch.Tensor(depth_count(np.array(adj_pair[0]))).to(emb_a.device)
initial_align.requires_grad = True
align_opt = optim.Adam([initial_align], lr=5e-3)
for i in range(epoch):
align_opt.zero_grad()
align_loss_1 = torch.sum((torch.mm(torch.mm(torch.sigmoid(tr * initial_align)), adj_pair[0].float().to(emb_a.device)), torch.sigmoid(tr * initial_align))) - adj_pair[1].float().to(emb_a.device)**2)
align_loss_2 = 0
for j in range(initial_align.shape[0]):
for k in range(initial_align.shape[1]):
align_loss_2 += torch.sum((torch.max(torch.zeros(emb_a[j].shape).to(emb_a.device), initial_align[j][k] * (emb_b[k]-emb_a[j])))**2)
align_loss_3 = torch.sum((torch.mm(torch.sigmoid(tr * initial_align.T), torch.sigmoid(tr * initial_align)) - torch.eye(initial_align.shape[1]).to(emb_a.device))**2)
align_loss_4 = 0
for j in range(initial_align.shape[0]):
for k in range(initial_align.shape[1]):
align_loss_4 += (torch.sigmoid(tr * initial_align[j][k]) * avg_t[j])**2
align_loss = align_loss_1 + align_loss_2 + 1e-5 * align_loss_3 + 1e-3 * align_loss_4
align_loss.backward()
align_opt.step()
align_matrix = ortho_align(initial_align.detach().cpu())
auc = roc_auc_score(true_matrix.flatten(), align_matrix.flatten())
print('Epoch: {}, Loss: {}, AUC: {}'.format(i, align_loss.item(), auc))
return align_matrix
USE_ORCA_FEATS = False
model.eval()
all_raw_preds, all_preds, all_labels = [], [], []
all_raw_preds_nodes, all_preds_nodes, all_labels_nodes = [], [], []
all_test_pairs = []
for pos_a, pos_b, neg_a, neg_b in test_pts:
if pos_a:
pos_a = pos_a.to(utils.get_device())
pos_b = pos_b.to(utils.get_device())
neg_a = neg_a.to(utils.get_device())
neg_b = neg_b.to(utils.get_device())
labels = torch.tensor([1]*(pos_a.num_graphs if pos_a else 0) +
[0]*neg_a.num_graphs).to(utils.get_device())
# Alignment matrrix label
align_mat_batch = []
adj_pair = []
for sample_idx in range(len(pos_b.node_label)):
align_mat = torch.zeros(len(pos_a.node_label[sample_idx]), len(pos_b.node_label[sample_idx]))
adj_pair.append([torch.tensor(nx.adjacency_matrix(pos_a.G[sample_idx]).todense()), torch.tensor(nx.adjacency_matrix(pos_b.G[sample_idx]).todense())])
for i, a_n in enumerate(pos_a.node_label[sample_idx]):
if a_n in pos_b.alignment[sample_idx]:
align_mat[i][pos_b.alignment[sample_idx].index(a_n)] = 1
align_mat_batch.append(align_mat)
align_mat_batch_all = []
for i, align_mat in enumerate(align_mat_batch):
align_mat_batch_all.append(align_mat.flatten())
labels_nodes = torch.cat(align_mat_batch_all, dim=-1)
with torch.no_grad():
(emb_neg_a, emb_neg_a_nodes), (emb_neg_b, emb_neg_b_nodes) = (model.emb_model(neg_a),
model.emb_model(neg_b))
if pos_a:
(emb_pos_a, emb_pos_a_nodes), (emb_pos_b, emb_pos_b_nodes) = (model.emb_model(pos_a),
model.emb_model(pos_b))
emb_as = torch.cat((emb_pos_a, emb_neg_a), dim=0)
emb_bs = torch.cat((emb_pos_b, emb_neg_b), dim=0)
else:
emb_as, emb_bs = emb_neg_a, emb_neg_b
pred = model(emb_as, emb_bs, emb_pos_a_nodes, emb_pos_b_nodes)
raw_pred, raw_pred_nodes = model.predict(pred)
if USE_ORCA_FEATS:
import orca
import matplotlib.pyplot as plt
def make_feats(g):
counts5 = np.array(orca.orbit_counts("node", 5, g))
for v, n in zip(counts5, g.nodes):
if g.nodes[n]["node_feature"][0] > 0:
anchor_v = v
break
v5 = np.sum(counts5, axis=0)
return v5, anchor_v
for i, (ga, gb) in enumerate(zip(neg_a.G, neg_b.G)):
(va, na), (vb, nb) = make_feats(ga), make_feats(gb)
if (va < vb).any() or (na < nb).any():
raw_pred[pos_a.num_graphs + i] = MAX_MARGIN_SCORE
if args.method_type == "order":
pred = model.clf_model(raw_pred.unsqueeze(1)).argmax(dim=-1)
pred_nodes = model.clf_model_nodes(raw_pred_nodes.unsqueeze(1)).argmax(dim=-1)
raw_pred *= -1
raw_pred_nodes *=-1
elif args.method_type == "ensemble":
pred = torch.stack([m.clf_model(
raw_pred.unsqueeze(1)).argmax(dim=-1) for m in model.models])
for i in range(pred.shape[1]):
print(pred[:,i])
pred = torch.min(pred, dim=0)[0]
raw_pred *= -1
elif args.method_type == "mlp":
raw_pred = raw_pred[:,1]
pred = pred.argmax(dim=-1)
all_raw_preds.append(raw_pred)
all_preds.append(pred)
all_labels.append(labels)
# Nodes
pred_nodes = []
avg_auc = 0
for idx in range(len(pos_b.node_label)):
print('Graph pair {} start!'.format(idx))
initial_alignment = alignment(emb_pos_a_nodes[idx],
emb_pos_b_nodes[idx],
adj_pair[idx],
align_mat_batch[idx],
epoch=200)
node_align_matrix, auc = admm_opt(emb_pos_a_nodes[idx],
emb_pos_b_nodes[idx],
adj_pair[idx],
align_mat_batch[idx],
initial_alignment,
epochs=100,
p=0.5)
avg_auc += auc
print('Average Test AUC {}'.format*(auc/len(pos_b.node_label)))
| 31,768 | 39.31599 | 205 | py |
DeepGAR | DeepGAR-main/common/utils.py | from collections import defaultdict, Counter
from deepsnap.graph import Graph as DSGraph
from deepsnap.batch import Batch
from deepsnap.dataset import GraphDataset
import torch
import torch.optim as optim
import torch_geometric.utils as pyg_utils
from torch_geometric.data import DataLoader
import networkx as nx
import numpy as np
import random
import scipy.stats as stats
from tqdm import tqdm
import pickle as pkl
from common import feature_preprocess
def sample_neigh(graphs, size):
ps = np.array([len(g) for g in graphs], dtype=np.float)
ps /= np.sum(ps)
dist = stats.rv_discrete(values=(np.arange(len(graphs)), ps))
while True:
idx = dist.rvs()
#graph = random.choice(graphs)
graph = graphs[idx]
start_node = random.choice(list(graph.nodes))
neigh = [start_node]
frontier = list(set(graph.neighbors(start_node)) - set(neigh))
visited = set([start_node])
while len(neigh) < size and frontier:
new_node = random.choice(list(frontier))
#new_node = max(sorted(frontier))
assert new_node not in neigh
neigh.append(new_node)
visited.add(new_node)
frontier += list(graph.neighbors(new_node))
frontier = [x for x in frontier if x not in visited]
if len(neigh) == size:
return graph, neigh
cached_masks = None
def vec_hash(v):
global cached_masks
if cached_masks is None:
random.seed(2019)
cached_masks = [random.getrandbits(32) for i in range(len(v))]
#v = [hash(tuple(v)) ^ mask for mask in cached_masks]
v = [hash(v[i]) ^ mask for i, mask in enumerate(cached_masks)]
#v = [np.sum(v) for mask in cached_masks]
return v
def wl_hash(g, dim=64, node_anchored=False):
g = nx.convert_node_labels_to_integers(g)
vecs = np.zeros((len(g), dim), dtype=np.int)
if node_anchored:
for v in g.nodes:
if g.nodes[v]["anchor"] == 1:
vecs[v] = 1
break
for i in range(len(g)):
newvecs = np.zeros((len(g), dim), dtype=np.int)
for n in g.nodes:
newvecs[n] = vec_hash(np.sum(vecs[list(g.neighbors(n)) + [n]],
axis=0))
vecs = newvecs
return tuple(np.sum(vecs, axis=0))
def gen_baseline_queries_rand_esu(queries, targets, node_anchored=False):
sizes = Counter([len(g) for g in queries])
max_size = max(sizes.keys())
all_subgraphs = defaultdict(lambda: defaultdict(list))
total_n_max_subgraphs, total_n_subgraphs = 0, 0
for target in tqdm(targets):
subgraphs = enumerate_subgraph(target, k=max_size,
progress_bar=len(targets) < 10, node_anchored=node_anchored)
for (size, k), v in subgraphs.items():
all_subgraphs[size][k] += v
if size == max_size: total_n_max_subgraphs += len(v)
total_n_subgraphs += len(v)
print(total_n_subgraphs, "subgraphs explored")
print(total_n_max_subgraphs, "max-size subgraphs explored")
out = []
for size, count in sizes.items():
counts = all_subgraphs[size]
for _, neighs in list(sorted(counts.items(), key=lambda x: len(x[1]),
reverse=True))[:count]:
print(len(neighs))
out.append(random.choice(neighs))
return out
def enumerate_subgraph(G, k=3, progress_bar=False, node_anchored=False):
ps = np.arange(1.0, 0.0, -1.0/(k+1)) ** 1.5
#ps = [1.0]*(k+1)
motif_counts = defaultdict(list)
for node in tqdm(G.nodes) if progress_bar else G.nodes:
sg = set()
sg.add(node)
v_ext = set()
neighbors = [nbr for nbr in list(G[node].keys()) if nbr > node]
n_frac = len(neighbors) * ps[1]
n_samples = int(n_frac) + (1 if random.random() < n_frac - int(n_frac)
else 0)
neighbors = random.sample(neighbors, n_samples)
for nbr in neighbors:
v_ext.add(nbr)
extend_subgraph(G, k, sg, v_ext, node, motif_counts, ps, node_anchored)
return motif_counts
def extend_subgraph(G, k, sg, v_ext, node_id, motif_counts, ps, node_anchored):
# Base case
sg_G = G.subgraph(sg)
if node_anchored:
sg_G = sg_G.copy()
nx.set_node_attributes(sg_G, 0, name="anchor")
sg_G.nodes[node_id]["anchor"] = 1
motif_counts[len(sg), wl_hash(sg_G,
node_anchored=node_anchored)].append(sg_G)
if len(sg) == k:
return
# Recursive step:
old_v_ext = v_ext.copy()
while len(v_ext) > 0:
w = v_ext.pop()
new_v_ext = v_ext.copy()
neighbors = [nbr for nbr in list(G[w].keys()) if nbr > node_id and nbr
not in sg and nbr not in old_v_ext]
n_frac = len(neighbors) * ps[len(sg) + 1]
n_samples = int(n_frac) + (1 if random.random() < n_frac - int(n_frac)
else 0)
neighbors = random.sample(neighbors, n_samples)
for nbr in neighbors:
#if nbr > node_id and nbr not in sg and nbr not in old_v_ext:
new_v_ext.add(nbr)
sg.add(w)
extend_subgraph(G, k, sg, new_v_ext, node_id, motif_counts, ps,
node_anchored)
sg.remove(w)
def gen_baseline_queries_mfinder(queries, targets, n_samples=10000,
node_anchored=False):
sizes = Counter([len(g) for g in queries])
#sizes = {}
#for i in range(5, 17):
# sizes[i] = 10
out = []
for size, count in tqdm(sizes.items()):
print(size)
counts = defaultdict(list)
for i in tqdm(range(n_samples)):
graph, neigh = sample_neigh(targets, size)
v = neigh[0]
neigh = graph.subgraph(neigh).copy()
nx.set_node_attributes(neigh, 0, name="anchor")
neigh.nodes[v]["anchor"] = 1
neigh.remove_edges_from(nx.selfloop_edges(neigh))
counts[wl_hash(neigh, node_anchored=node_anchored)].append(neigh)
#bads, t = 0, 0
#for ka, nas in counts.items():
# for kb, nbs in counts.items():
# if ka != kb:
# for a in nas:
# for b in nbs:
# if nx.is_isomorphic(a, b):
# bads += 1
# print("bad", bads, t)
# t += 1
for _, neighs in list(sorted(counts.items(), key=lambda x: len(x[1]),
reverse=True))[:count]:
print(len(neighs))
out.append(random.choice(neighs))
return out
device_cache = None
def get_device():
global device_cache
if device_cache is None:
device_cache = torch.device("cuda") if torch.cuda.is_available() \
else torch.device("cpu")
#device_cache = torch.device("cpu")
return device_cache
def parse_optimizer(parser):
opt_parser = parser.add_argument_group()
opt_parser.add_argument('--opt', dest='opt', type=str,
help='Type of optimizer')
opt_parser.add_argument('--opt-scheduler', dest='opt_scheduler', type=str,
help='Type of optimizer scheduler. By default none')
opt_parser.add_argument('--opt-restart', dest='opt_restart', type=int,
help='Number of epochs before restart (by default set to 0 which means no restart)')
opt_parser.add_argument('--opt-decay-step', dest='opt_decay_step', type=int,
help='Number of epochs before decay')
opt_parser.add_argument('--opt-decay-rate', dest='opt_decay_rate', type=float,
help='Learning rate decay ratio')
opt_parser.add_argument('--lr', dest='lr', type=float,
help='Learning rate.')
opt_parser.add_argument('--clip', dest='clip', type=float,
help='Gradient clipping.')
opt_parser.add_argument('--weight_decay', type=float,
help='Optimizer weight decay.')
def build_optimizer(args, params):
weight_decay = args.weight_decay
filter_fn = filter(lambda p : p.requires_grad, params)
if args.opt == 'adam':
optimizer = optim.Adam(filter_fn, lr=args.lr, weight_decay=weight_decay)
elif args.opt == 'sgd':
optimizer = optim.SGD(filter_fn, lr=args.lr, momentum=0.95,
weight_decay=weight_decay)
elif args.opt == 'rmsprop':
optimizer = optim.RMSprop(filter_fn, lr=args.lr, weight_decay=weight_decay)
elif args.opt == 'adagrad':
optimizer = optim.Adagrad(filter_fn, lr=args.lr, weight_decay=weight_decay)
if args.opt_scheduler == 'none':
return None, optimizer
elif args.opt_scheduler == 'step':
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.opt_decay_step, gamma=args.opt_decay_rate)
elif args.opt_scheduler == 'cos':
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.opt_restart)
return scheduler, optimizer
def batch_nx_graphs_original(graphs, anchors=None):
#motifs_batch = [pyg_utils.from_networkx(
# nx.convert_node_labels_to_integers(graph)) for graph in graphs]
#loader = DataLoader(motifs_batch, batch_size=len(motifs_batch))
#for b in loader: batch = b
augmenter = feature_preprocess.FeatureAugment()
if anchors is not None:
for anchor, g in zip(anchors, graphs):
for v in g.nodes:
g.nodes[v]["node_feature"] = torch.tensor([float(v == anchor)])
# print('DSGraph len {}'.format(len([DSGraph(g) for g in graphs])))
batch = Batch.from_data_list([DSGraph(g) for g in graphs])
batch = augmenter.augment(batch)
batch = batch.to(get_device())
return batch
def batch_nx_graphs(graphs, align1=[], align2=[], anchors=None, ht_encoder = None):
# print("new one")
# motifs_batch = [pyg_utils.from_networkx(
# nx.convert_node_labels_to_integers(graph)) for graph in graphs]
# loader = DataLoader(motifs_batch, batch_size=len(motifs_batch))
# for b in loader: batch = b
augmenter = feature_preprocess.FeatureAugment()
if ht_encoder != None:
enc = pkl.load(open(ht_encoder, 'rb'))
if anchors is not None:
for anchor, g in zip(anchors, graphs):
for v in g.nodes:
if ht_encoder != None:
g.nodes[v]["node_feature"]= torch.tensor([float(v == anchor), enc.transform([g.nodes[v]['label']])])
else:
g.nodes[v]["node_feature"] = torch.tensor([float(v == anchor)])
# Added by TC
if len(align1) != 0 and len(align2) != 0:
for i, (anchor, g) in enumerate(zip(anchors, graphs)):
# print(i)
# print(len(g.nodes))
for nd in g.nodes:
g.nodes[nd]["node_label"] = nd
g.nodes[nd]["graph_label"] = i
# print(g.nodes[nd]["graph_label"])
# break
if nd in align2[i]:
g.nodes[nd]['alignment'] = align1[i][align2[i].index(nd)]
else:
g.nodes[nd]['alignment'] = None
else:
for i, (anchor, g) in enumerate(zip(anchors, graphs)):
for nd in g.nodes:
g.nodes[nd]["node_label"] = nd
g.nodes[nd]["graph_label"] = i
g.nodes[nd]['alignment'] = None
# print('DSGraph len {}'.format(len([DSGraph(g) for g in graphs])))
batch = Batch.from_data_list([DSGraph(g) for g in graphs])
batch = augmenter.augment(batch)
# batch = batch.to(get_device())
return batch
| 11,535 | 39.477193 | 120 | py |
DeepGAR | DeepGAR-main/common/data.py | import os
import pickle
import random
from deepsnap.graph import Graph as DSGraph
from deepsnap.batch import Batch
from deepsnap.dataset import GraphDataset, Generator
import networkx as nx
import numpy as np
from sklearn.manifold import TSNE
import torch
import torch.multiprocessing as mp
import torch.nn.functional as F
import torch.optim as optim
from torch_geometric.data import DataLoader
from torch.utils.data import DataLoader as TorchDataLoader
from torch_geometric.datasets import TUDataset, PPI, QM9
import torch_geometric.utils as pyg_utils
import torch_geometric.nn as pyg_nn
from tqdm import tqdm
import queue
import scipy.stats as stats
from common import combined_syn
from common import feature_preprocess
from common import utils
from sklearn import preprocessing
import pickle as pkl
import scipy.stats as stats
def load_dataset(name):
""" Load real-world datasets, available in PyTorch Geometric.
Used as a helper for DiskDataSource.
"""
task = "graph"
if name == "enzymes":
dataset = TUDataset(root="/tmp/ENZYMES", name="ENZYMES")
elif name == "proteins":
dataset = TUDataset(root="/tmp/PROTEINS", name="PROTEINS")
elif name == "cox2":
dataset = TUDataset(root="/tmp/cox2", name="COX2")
elif name == "aids":
dataset = TUDataset(root="/tmp/AIDS", name="AIDS")
elif name == "reddit-binary":
dataset = TUDataset(root="/tmp/REDDIT-BINARY", name="REDDIT-BINARY")
elif name == "imdb-binary":
dataset = TUDataset(root="/tmp/IMDB-BINARY", name="IMDB-BINARY")
elif name == "firstmm_db":
dataset = TUDataset(root="/tmp/FIRSTMM_DB", name="FIRSTMM_DB")
elif name == "dblp":
dataset = TUDataset(root="/tmp/DBLP_v1", name="DBLP_v1")
elif name == "ppi":
dataset = PPI(root="/tmp/PPI")
elif name == "qm9":
dataset = QM9(root="/tmp/QM9")
elif name == "atlas":
dataset = [g for g in nx.graph_atlas_g()[1:] if nx.is_connected(g)]
elif name == "amn":
# with open('C:/Users/tchowdh6/Documents/Analogical_Reasoning/NeuralAnalogy/data/amn_syn_data.pkl', 'rb') as f:
# dataset = pkl.load(f)
with open('data/all_g_pair.pkl', 'rb') as f:
dataset_pre = pkl.load(f)
# print(len(dataset_pre))
dataset = []
pos_a_list = []
pos_b_list = []
# Added by TC
pos_a_align_list = []
pos_b_align_list = []
for i, data_dict in tqdm(enumerate(dataset_pre)):
for pos_a_graph in data_dict['target']:
pos_a_list.append(pos_a_graph)
pos_a_align_list.append(data_dict['alignment1'])
# pos_a_list.append(pos_a_graph.to_undirected()) # Converting to Undirected Graph
for pos_b_graph in data_dict['base']:
pos_b_list.append(pos_b_graph)
pos_b_align_list.append(data_dict['alignment2'])
# pos_b_list.append(pos_b_graph.to_undirected()) # Converting to Undirected Graph
for i, pos_samp in tqdm(enumerate(pos_a_list)):
# dataset.append([pos_a_list[i], pos_b_list[i]])
dataset.append([[pos_a_list[i], pos_b_list[i]], [pos_a_align_list[i], pos_b_align_list[i]]])
if task == "graph":
train_len = int(0.8 * len(dataset))
train, test = [], []
dataset = list(dataset)
# random.shuffle(dataset)
# has_name = hasattr(dataset[0], "name")
# for i, graph in tqdm(enumerate(dataset)):
# if name == "amn":
# graph = graph.to_undirected()
# if not type(graph) == nx.Graph:
# print(i)
# print(type(graph))
# if has_name: del graph.name
# graph = pyg_utils.to_networkx(graph).to_undirected()
# if i < train_len:
# train.append(graph)
# else:
# test.append(graph)
# for amn
for i, graph in tqdm(enumerate(dataset)):
if i < train_len:
train.append(graph)
else:
test.append(graph)
return train, test, task
class DataSource:
def gen_batch(batch_target, batch_neg_target, batch_neg_query, train):
raise NotImplementedError
class OTFSynDataSource(DataSource):
""" On-the-fly generated synthetic data for training the subgraph model.
At every iteration, new batch of graphs (positive and negative) are generated
with a pre-defined generator (see combined_syn.py).
DeepSNAP transforms are used to generate the positive and negative examples.
"""
def __init__(self, max_size=29, min_size=5, n_workers=4,
max_queue_size=256, node_anchored=False):
self.closed = False
self.max_size = max_size
self.min_size = min_size
self.node_anchored = node_anchored
self.generator = combined_syn.get_generator(np.arange(
self.min_size + 1, self.max_size + 1))
def gen_data_loaders(self, size, batch_size, train=True,
use_distributed_sampling=False):
loaders = []
for i in range(2):
dataset = combined_syn.get_dataset("graph", size // 2,
np.arange(self.min_size + 1, self.max_size + 1))
sampler = torch.utils.data.distributed.DistributedSampler(
dataset, num_replicas=hvd.size(), rank=hvd.rank()) if \
use_distributed_sampling else None
loaders.append(TorchDataLoader(dataset,
collate_fn=Batch.collate([]), batch_size=batch_size // 2 if i
== 0 else batch_size // 2,
sampler=sampler, shuffle=False))
loaders.append([None]*(size // batch_size))
return loaders
def gen_batch(self, batch_target, batch_neg_target, batch_neg_query,
train):
def sample_subgraph(graph, offset=0, use_precomp_sizes=False,
filter_negs=False, supersample_small_graphs=False, neg_target=None,
hard_neg_idxs=None):
if neg_target is not None: graph_idx = graph.G.graph["idx"]
use_hard_neg = (hard_neg_idxs is not None and graph.G.graph["idx"]
in hard_neg_idxs)
done = False
n_tries = 0
while not done:
if use_precomp_sizes:
size = graph.G.graph["subgraph_size"]
else:
if train and supersample_small_graphs:
sizes = np.arange(self.min_size + offset,
len(graph.G) + offset)
ps = (sizes - self.min_size + 2) ** (-1.1)
ps /= ps.sum()
size = stats.rv_discrete(values=(sizes, ps)).rvs()
else:
d = 1 if train else 0
size = random.randint(self.min_size + offset - d,
len(graph.G) - 1 + offset)
start_node = random.choice(list(graph.G.nodes))
neigh = [start_node]
frontier = list(set(graph.G.neighbors(start_node)) - set(neigh))
visited = set([start_node])
while len(neigh) < size:
new_node = random.choice(list(frontier))
assert new_node not in neigh
neigh.append(new_node)
visited.add(new_node)
frontier += list(graph.G.neighbors(new_node))
frontier = [x for x in frontier if x not in visited]
if self.node_anchored:
anchor = neigh[0]
for v in graph.G.nodes:
graph.G.nodes[v]["node_feature"] = (torch.ones(1) if
anchor == v else torch.zeros(1))
#print(v, graph.G.nodes[v]["node_feature"])
neigh = graph.G.subgraph(neigh)
if use_hard_neg and train:
neigh = neigh.copy()
if random.random() < 1.0 or not self.node_anchored: # add edges
non_edges = list(nx.non_edges(neigh))
if len(non_edges) > 0:
for u, v in random.sample(non_edges, random.randint(1,
min(len(non_edges), 5))):
neigh.add_edge(u, v)
else: # perturb anchor
anchor = random.choice(list(neigh.nodes))
for v in neigh.nodes:
neigh.nodes[v]["node_feature"] = (torch.ones(1) if
anchor == v else torch.zeros(1))
if (filter_negs and train and len(neigh) <= 6 and neg_target is
not None):
matcher = nx.algorithms.isomorphism.GraphMatcher(
neg_target[graph_idx], neigh)
if not matcher.subgraph_is_isomorphic(): done = True
else:
done = True
return graph, DSGraph(neigh)
augmenter = feature_preprocess.FeatureAugment()
pos_target = batch_target
pos_target, pos_query = pos_target.apply_transform_multi(sample_subgraph)
neg_target = batch_neg_target
# TODO: use hard negs
hard_neg_idxs = set(random.sample(range(len(neg_target.G)),
int(len(neg_target.G) * 1/2)))
#hard_neg_idxs = set()
batch_neg_query = Batch.from_data_list(
[DSGraph(self.generator.generate(size=len(g))
if i not in hard_neg_idxs else g)
for i, g in enumerate(neg_target.G)])
for i, g in enumerate(batch_neg_query.G):
g.graph["idx"] = i
_, neg_query = batch_neg_query.apply_transform_multi(sample_subgraph,
hard_neg_idxs=hard_neg_idxs)
if self.node_anchored:
def add_anchor(g, anchors=None):
if anchors is not None:
anchor = anchors[g.G.graph["idx"]]
else:
anchor = random.choice(list(g.G.nodes))
for v in g.G.nodes:
if "node_feature" not in g.G.nodes[v]:
g.G.nodes[v]["node_feature"] = (torch.ones(1) if anchor == v
else torch.zeros(1))
return g
neg_target = neg_target.apply_transform(add_anchor)
pos_target = augmenter.augment(pos_target).to(utils.get_device())
pos_query = augmenter.augment(pos_query).to(utils.get_device())
neg_target = augmenter.augment(neg_target).to(utils.get_device())
neg_query = augmenter.augment(neg_query).to(utils.get_device())
#print(len(pos_target.G[0]), len(pos_query.G[0]))
return pos_target, pos_query, neg_target, neg_query
class OTFSynImbalancedDataSource(OTFSynDataSource):
""" Imbalanced on-the-fly synthetic data.
Unlike the balanced dataset, this data source does not use 1:1 ratio for
positive and negative examples. Instead, it randomly samples 2 graphs from
the on-the-fly generator, and records the groundtruth label for the pair (subgraph or not).
As a result, the data is imbalanced (subgraph relationships are rarer).
This setting is a challenging model inference scenario.
"""
def __init__(self, max_size=29, min_size=5, n_workers=4,
max_queue_size=256, node_anchored=False):
super().__init__(max_size=max_size, min_size=min_size,
n_workers=n_workers, node_anchored=node_anchored)
self.batch_idx = 0
def gen_batch(self, graphs_a, graphs_b, _, train):
def add_anchor(g):
anchor = random.choice(list(g.G.nodes))
for v in g.G.nodes:
g.G.nodes[v]["node_feature"] = (torch.ones(1) if anchor == v
or not self.node_anchored else torch.zeros(1))
return g
pos_a, pos_b, neg_a, neg_b = [], [], [], []
fn = "data/cache/imbalanced-{}-{}".format(str(self.node_anchored),
self.batch_idx)
if not os.path.exists(fn):
graphs_a = graphs_a.apply_transform(add_anchor)
graphs_b = graphs_b.apply_transform(add_anchor)
for graph_a, graph_b in tqdm(list(zip(graphs_a.G, graphs_b.G))):
matcher = nx.algorithms.isomorphism.GraphMatcher(graph_a, graph_b,
node_match=(lambda a, b: (a["node_feature"][0] > 0.5) ==
(b["node_feature"][0] > 0.5)) if self.node_anchored else None)
if matcher.subgraph_is_isomorphic():
pos_a.append(graph_a)
pos_b.append(graph_b)
else:
neg_a.append(graph_a)
neg_b.append(graph_b)
if not os.path.exists("data/cache"):
os.makedirs("data/cache")
with open(fn, "wb") as f:
pickle.dump((pos_a, pos_b, neg_a, neg_b), f)
print("saved", fn)
else:
with open(fn, "rb") as f:
print("loaded", fn)
pos_a, pos_b, neg_a, neg_b = pickle.load(f)
print(len(pos_a), len(neg_a))
if pos_a:
pos_a = utils.batch_nx_graphs(pos_a)
pos_b = utils.batch_nx_graphs(pos_b)
neg_a = utils.batch_nx_graphs(neg_a)
neg_b = utils.batch_nx_graphs(neg_b)
self.batch_idx += 1
return pos_a, pos_b, neg_a, neg_b
class DiskDataSource(DataSource):
""" Uses a set of graphs saved in a dataset file to train the subgraph model.
At every iteration, new batch of graphs (positive and negative) are generated
by sampling subgraphs from a given dataset.
See the load_dataset function for supported datasets.
"""
def __init__(self, dataset_name, node_anchored=False, min_size=5,
max_size=29):
self.node_anchored = node_anchored
self.dataset = load_dataset(dataset_name)
self.min_size = min_size
self.max_size = max_size
# self.batch = 0
train, test, task = self.dataset
self.train_set_gr = []
for graph in train:
self.train_set_gr.append(graph[0][0])
self.test_set_gr = []
for graph in test:
self.test_set_gr.append(graph[0][0])
def gen_data_loaders(self, size, batch_size, train=True,
use_distributed_sampling=False):
# print(len([batch_svb tize]*(size // batch_size))) # List of 64s 64
loaders = [[batch_size]*(size // batch_size) for i in range(3)]
return loaders
def gen_batch(self, a, b, c, train, max_size=15, min_size=5, seed=None,
filter_negs=False, sample_method="tree-pair"):
ht_encoder_file = '/root/NeuralAnology/subgraph_matching/ckpt/hot_encoder.pt'
# print("Calling gen_batch")
batch_size = a
train_set, test_set, task = self.dataset
if seed is not None:
random.seed(seed)
graphs = self.train_set_gr if train else self.test_set_gr
graphs_full = train_set if train else test_set
# graphs = []
pos_a, pos_b = [], []
pos_a_anchors, pos_b_anchors = [], []
pos_a_align, pos_b_align = [], []
for i in range(batch_size // 2):
ps = np.array([len(g) for g in graphs], dtype=np.float)
ps /= np.sum(ps)
dist = stats.rv_discrete(values=(np.arange(len(graphs)), ps))
idx = dist.rvs()
pos_a.append(graphs_full[idx][0][0])
pos_b.append(graphs_full[idx][0][1])
pos_a_align.append(graphs_full[idx][1][0])
pos_b_align.append(graphs_full[idx][1][1])
if self.node_anchored:
# anchor = list(graph[0].nodes)[0]
# Problem to look. May be do not need to anchore the first node only. Though in the oroginal code they only consider the first node
pos_a_anchors.append(list(graphs_full[idx][0][0].nodes)[0])
pos_b_anchors.append(list(graphs_full[idx][0][1].nodes)[0])
neg_a, neg_b = [], []
neg_a_anchors, neg_b_anchors = [], []
while len(neg_a) < batch_size // 2:
if sample_method == "tree-pair":
size = random.randint(min_size+1, max_size)
graph_a, a = utils.sample_neigh(graphs, size)
graph_b, b = utils.sample_neigh(graphs, random.randint(min_size,
size - 1))
elif sample_method == "subgraph-tree":
graph_a = None
while graph_a is None or len(graph_a) < min_size + 1:
graph_a = random.choice(graphs)
a = graph_a.nodes
graph_b, b = utils.sample_neigh(graphs, random.randint(min_size,
len(graph_a) - 1))
if self.node_anchored:
neg_a_anchors.append(list(graph_a.nodes)[0])
neg_b_anchors.append(list(graph_b.nodes)[0])
neigh_a, neigh_b = graph_a.subgraph(a), graph_b.subgraph(b)
if filter_negs:
matcher = nx.algorithms.isomorphism.GraphMatcher(neigh_a, neigh_b)
if matcher.subgraph_is_isomorphic(): # a <= b (b is subgraph of a)
continue
neg_a.append(neigh_a)
neg_b.append(neigh_b)
pos_a = utils.batch_nx_graphs(pos_a, pos_a_align, pos_b_align, ht_encoder = ht_encoder_file, anchors=pos_a_anchors if
self.node_anchored else None)
pos_b = utils.batch_nx_graphs(pos_b, pos_b_align, pos_a_align, ht_encoder = ht_encoder_file, anchors=pos_b_anchors if
self.node_anchored else None)
neg_a = utils.batch_nx_graphs(neg_a, ht_encoder = ht_encoder_file, anchors=neg_a_anchors if
self.node_anchored else None)
neg_b = utils.batch_nx_graphs(neg_b, ht_encoder = ht_encoder_file, anchors=neg_b_anchors if
self.node_anchored else None)
return pos_a, pos_b, neg_a, neg_b
class DiskImbalancedDataSource(OTFSynDataSource):
""" Imbalanced on-the-fly real data.
Unlike the balanced dataset, this data source does not use 1:1 ratio for
positive and negative examples. Instead, it randomly samples 2 graphs from
the on-the-fly generator, and records the groundtruth label for the pair (subgraph or not).
As a result, the data is imbalanced (subgraph relationships are rarer).
This setting is a challenging model inference scenario.
"""
def __init__(self, dataset_name, max_size=29, min_size=5, n_workers=4,
max_queue_size=256, node_anchored=False):
super().__init__(max_size=max_size, min_size=min_size,
n_workers=n_workers, node_anchored=node_anchored)
self.batch_idx = 0
self.dataset = load_dataset(dataset_name)
self.train_set, self.test_set, _ = self.dataset
self.dataset_name = dataset_name
def gen_data_loaders(self, size, batch_size, train=True,
use_distributed_sampling=False):
loaders = []
for i in range(2):
neighs = []
for j in range(size // 2):
graph, neigh = utils.sample_neigh(self.train_set if train else
self.test_set, random.randint(self.min_size, self.max_size))
neighs.append(graph.subgraph(neigh))
dataset = GraphDataset(neighs)
loaders.append(TorchDataLoader(dataset,
collate_fn=Batch.collate([]), batch_size=batch_size // 2 if i
== 0 else batch_size // 2,
sampler=None, shuffle=False))
loaders.append([None]*(size // batch_size))
return loaders
def gen_batch(self, graphs_a, graphs_b, _, train):
def add_anchor(g):
anchor = random.choice(list(g.G.nodes))
for v in g.G.nodes:
g.G.nodes[v]["node_feature"] = (torch.ones(1) if anchor == v
or not self.node_anchored else torch.zeros(1))
return g
pos_a, pos_b, neg_a, neg_b = [], [], [], []
fn = "data/cache/imbalanced-{}-{}-{}".format(self.dataset_name.lower(),
str(self.node_anchored), self.batch_idx)
if not os.path.exists(fn):
graphs_a = graphs_a.apply_transform(add_anchor)
graphs_b = graphs_b.apply_transform(add_anchor)
for graph_a, graph_b in tqdm(list(zip(graphs_a.G, graphs_b.G))):
matcher = nx.algorithms.isomorphism.GraphMatcher(graph_a, graph_b,
node_match=(lambda a, b: (a["node_feature"][0] > 0.5) ==
(b["node_feature"][0] > 0.5)) if self.node_anchored else None)
if matcher.subgraph_is_isomorphic():
pos_a.append(graph_a)
pos_b.append(graph_b)
else:
neg_a.append(graph_a)
neg_b.append(graph_b)
if not os.path.exists("data/cache"):
os.makedirs("data/cache")
with open(fn, "wb") as f:
pickle.dump((pos_a, pos_b, neg_a, neg_b), f)
print("saved", fn)
else:
with open(fn, "rb") as f:
print("loaded", fn)
pos_a, pos_b, neg_a, neg_b = pickle.load(f)
# print(len(pos_a), len(neg_a))
if pos_a:
pos_a = utils.batch_nx_graphs(pos_a)
pos_b = utils.batch_nx_graphs(pos_b)
neg_a = utils.batch_nx_graphs(neg_a)
neg_b = utils.batch_nx_graphs(neg_b)
self.batch_idx += 1
return pos_a, pos_b, neg_a, neg_b
def messed_samples(data_trial):
messed_samples = []
for idx in range(len(data_trial)):
if len(list(data_trial[idx][0][0].nodes)) < len(list(data_trial[idx][0][1].nodes)):
messed_samples.append("{}: {}, {}".format(idx, len(list(data_trial[idx][0][0].nodes)),
len(list(data_trial[idx][0][1].nodes))))
print(len(messed_samples))
return messed_samples
def hot_encoder(dataname = "amn", file_name = 'hot_encoder.pt' ):
train, test, task = load_dataset(dataname)
label_type_list = []
for data_pair in train:
for data in data_pair[0]:
for node in list(data.nodes(data=True)):
if node[1]['label'] not in label_type_list:
label_type_list.append(node[1]['label'])
# print("Done with training data")
# print(len(label_type_list))
for data_pair in test:
for data in data_pair[0]:
for node in list(data.nodes(data=True)):
if node[1]['label'] not in label_type_list:
label_type_list.append(node[1]['label'])
enc = preprocessing.LabelEncoder()
feature_array = np.array(label_type_list)
enc.fit(feature_array)
pkl.dump(enc, open(file_name, 'wb'))
if __name__ == "__main__":
import matplotlib.pyplot as plt
plt.rcParams.update({"font.size": 14})
# for name in ["enzymes", "reddit-binary", "cox2"]:
for name in ["amn"]:
data_source = DiskDataSource(name)
train, test, _ = data_source.dataset
graphs = []
for graph in train:
graphs.append(graph[0][0])
i = 11
neighs = [utils.sample_neigh(graphs, i) for j in range(10)]
clustering = [nx.average_clustering(graph.subgraph(nodes)) for graph,
nodes in neighs]
path_length = [nx.average_shortest_path_length(graph.subgraph(nodes))
for graph, nodes in neighs]
#plt.subplot(1, 2, i-9)
plt.scatter(clustering, path_length, s=10, label=name)
plt.legend()
plt.savefig("clustering-vs-path-length.png")
| 24,005 | 44.20904 | 159 | py |
DeepGAR | DeepGAR-main/common/models.py | """Defines all graph embedding models"""
from functools import reduce
import random
import networkx as nx
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch_geometric.nn as pyg_nn
import torch_geometric.utils as pyg_utils
from common import utils
from common import feature_preprocess
from sklearn.metrics import roc_auc_score
import sklearn.metrics
# GNN -> concat -> MLP graph classification baseline
class BaselineMLP(nn.Module):
def __init__(self, input_dim, hidden_dim, args):
super(BaselineMLP, self).__init__()
self.emb_model = SkipLastGNN(input_dim, hidden_dim, hidden_dim, args)
self.mlp = nn.Sequential(nn.Linear(2 * hidden_dim, 256), nn.ReLU(),
nn.Linear(256, 2))
def forward(self, emb_motif, emb_motif_mod):
pred = self.mlp(torch.cat((emb_motif, emb_motif_mod), dim=1))
pred = F.log_softmax(pred, dim=1)
return pred
def predict(self, pred):
return pred#.argmax(dim=1)
def criterion(self, pred, _, label):
return F.nll_loss(pred, label)
class MLP_Align_Predictor(nn.Module):
def __init__(self, h_feats):
super().__init__()
self.W1 = nn.Linear(h_feats * 2, h_feats)
self.W2 = nn.Linear(h_feats, 1)
def apply_edges(self, src, dst):
h = torch.cat([src, dst], 0)
score = self.W2(F.relu(self.W1(h)))
return score
def predict(self, pred, align_mat_batch):
"""Predict if b is a subgraph of a (batched), where emb_as, emb_bs = pred.
pred: list (emb_as, emb_bs) of embeddings of graph pairs
Returns: list of bools (whether a is subgraph of b in the pair)
"""
emb_as, emb_bs, emb_as_nodes, emb_bs_nodes = pred
e = torch.sum(torch.max(torch.zeros_like(emb_as,
device=emb_as.device), emb_bs - emb_as) ** 2, dim=1)
e2_all = []
for i in range(len(emb_as_nodes)):
e2 = torch.sum((emb_bs_nodes[i].unsqueeze(0).repeat(emb_as_nodes[i].size(0), 1, 1) -
emb_as_nodes[i].unsqueeze(1).repeat(1, emb_bs_nodes[i].size(0), 1)) ** 2, dim=2).flatten()
e2_all.append(e2)
e2_all_2 = torch.cat(e2_all, dim=-1)
# Node alignment
align_mat_batch_scores = []
for i, align_mat in enumerate(align_mat_batch):
# MLP Predictor
align_mat_scores = self.mlp_model_nodes(emb_as_nodes[i], emb_bs_nodes[i])
align_mat_batch_scores.append(align_mat_scores)
return e, e2_all_2, align_mat_batch_scores
def forward(self, emb_as_nodes, emb_bs_nodes, align_mat_batch):
# Similarity Matrix
# euclid_dist=sklearn.metrics.pairwise.euclidean_distances(emb_pos_a_nodes.detach().numpy(), emb_pos_b_nodes.detach().numpy())
# cos_dist = sklearn.metrics.pairwise.cosine_similarity(emb_pos_a_nodes.detach().numpy(), emb_pos_b_nodes.detach().numpy())
align_mat_batch_scores = []
for i, align_mat in enumerate(align_mat_batch):
# Score Matrix
align_mat_score = torch.zeros(len(emb_as_nodes[i]), len(emb_bs_nodes[i]))
# for i, a_n in enumerate(pos_a.node_label[sample_idx]):
# print(align_mat_score.shape,emb_as_nodes[i].shape, emb_bs_nodes[i].shape)
for j, a_n in enumerate(emb_as_nodes[i]):
for k, b_n in enumerate(emb_bs_nodes[i]):
align_mat_score[j][k]=self.apply_edges(a_n, b_n)
align_mat_batch_scores.append(align_mat_score.flatten())
pred_scores = torch.cat(align_mat_batch_scores, dim=-1)
return pred_scores
# Order embedder model -- contains a graph embedding model `emb_model`
class OrderEmbedder(nn.Module):
def __init__(self, input_dim, hidden_dim, args):
super(OrderEmbedder, self).__init__()
self.emb_model= SkipLastGNN(input_dim, hidden_dim, hidden_dim, args)
self.margin = args.margin
self.use_intersection = False
self.clf_model = nn.Sequential(nn.Linear(1, 2), nn.LogSoftmax(dim=-1))
self.clf_model_nodes = nn.Sequential(nn.Linear(1, 2), nn.LogSoftmax(dim=-1))
self.mlp_model_nodes = MLP_Align_Predictor(hidden_dim)
def forward(self, emb_as, emb_bs, emb_as_nodes, emb_bs_nodes):
return emb_as, emb_bs, emb_as_nodes, emb_bs_nodes
def predict(self, pred):
"""Predict if b is a subgraph of a (batched), where emb_as, emb_bs = pred.
pred: list (emb_as, emb_bs) of embeddings of graph pairs
Returns: list of bools (whether a is subgraph of b in the pair)
"""
emb_as, emb_bs, emb_as_nodes, emb_bs_nodes = pred
e = torch.sum(torch.max(torch.zeros_like(emb_as,
device=emb_as.device), emb_bs - emb_as)**2, dim=1)
e2_all = []
for i in range(len(emb_as_nodes)):
e2 = torch.sum((emb_bs_nodes[i].unsqueeze(0).repeat(emb_as_nodes[i].size(0), 1, 1) -
emb_as_nodes[i].unsqueeze(1).repeat(1, emb_bs_nodes[i].size(0), 1)) ** 2, dim=2).flatten()
e2_all.append(e2)
e2_all_2 = torch.cat(e2_all, dim=-1)
return e, e2_all_2
def criterion(self, pred, intersect_embs, labels, align_mat_batch):
"""Loss function for order emb.
The e term is the amount of violation (if b is a subgraph of a).
For positive examples, the e term is minimized (close to 0);
for negative examples, the e term is trained to be at least greater than self.margin.
pred: lists of embeddings outputted by forward
intersect_embs: not used
labels: subgraph labels for each entry in pred
"""
emb_as, emb_bs, emb_as_nodes, emb_bs_nodes = pred
e1 = torch.sum(torch.max(torch.zeros_like(emb_as,
device=utils.get_device()), emb_bs - emb_as)**2, dim=1)
margin = self.margin
e1[labels == 0] = torch.max(torch.tensor(0.0,
device=utils.get_device()), margin - e1)[labels == 0] # find the index of labels where label==0 and then use that index to find the e for loss
# node_align_loss = []
# # # node_alignment_loss = 0
# for i, align_mat in enumerate(align_mat_batch):
# e2 = torch.sum(torch.max(torch.zeros(emb_bs_nodes[i].shape, device=utils.get_device()), emb_bs_nodes[i] - align_mat_batch[i].to(emb_bs_nodes[i].device).T @ emb_as_nodes[i])**2)
e2 = torch.sum((emb_bs_nodes[i].unsqueeze(0).repeat(emb_as_nodes[i].size(0),1,1) -
emb_as_nodes[i].unsqueeze(1).repeat(1,emb_bs_nodes[i].size(0),1))**2, dim=2)
e2[align_mat == 0] = torch.max(torch.tensor(0.0,
device=utils.get_device()), margin - e2)[align_mat == 0]
node_align_loss.append(torch.sum(e2))
node_align_loss.append(e2)
# # MLP Predictor
# align_mat_scores = self.mlp_model_nodes(emb_as_nodes[i], emb_bs_nodes[i])
# node_alignment_loss += F.binary_cross_entropy_with_logits(align_mat_scores, align_mat)
relation_loss = torch.sum(e1) + torch.sum(torch.tensor(node_align_loss))
# relation_loss = torch.sum(e1) + torch.sum(torch.tensor(node_align_loss)) / len(node_align_loss)
# + torch.sum(torch.tensor(node_align_loss))
return relation_loss
class SkipLastGNN(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, args):
super(SkipLastGNN, self).__init__()
self.dropout = args.dropout
self.n_layers = args.n_layers
if len(feature_preprocess.FEATURE_AUGMENT) > 0:
self.feat_preprocess = feature_preprocess.Preprocess(input_dim)
input_dim = self.feat_preprocess.dim_out
else:
self.feat_preprocess = None
self.pre_mp = nn.Sequential(nn.Linear(input_dim, 3*hidden_dim if
args.conv_type == "PNA" else hidden_dim))
conv_model = self.build_conv_model(args.conv_type, 1)
if args.conv_type == "PNA":
self.convs_sum = nn.ModuleList()
self.convs_mean = nn.ModuleList()
self.convs_max = nn.ModuleList()
else:
self.convs = nn.ModuleList()
if args.skip == 'learnable':
self.learnable_skip = nn.Parameter(torch.ones(self.n_layers,
self.n_layers))
for l in range(args.n_layers):
if args.skip == 'all' or args.skip == 'learnable':
hidden_input_dim = hidden_dim * (l + 1)
else:
hidden_input_dim = hidden_dim
if args.conv_type == "PNA":
self.convs_sum.append(conv_model(3*hidden_input_dim, hidden_dim))
self.convs_mean.append(conv_model(3*hidden_input_dim, hidden_dim))
self.convs_max.append(conv_model(3*hidden_input_dim, hidden_dim))
else:
self.convs.append(conv_model(hidden_input_dim, hidden_dim))
post_input_dim = hidden_dim * (args.n_layers + 1)
if args.conv_type == "PNA":
post_input_dim *= 3
self.post_mp = nn.Sequential(
nn.Linear(post_input_dim, hidden_dim), nn.Dropout(args.dropout),
nn.LeakyReLU(0.1),
nn.Linear(hidden_dim, output_dim),
nn.ReLU(),
nn.Linear(hidden_dim, 256), nn.ReLU(),
nn.Linear(256, hidden_dim))
#self.batch_norm = nn.BatchNorm1d(output_dim, eps=1e-5, momentum=0.1)
self.skip = args.skip
self.conv_type = args.conv_type
def build_conv_model(self, model_type, n_inner_layers):
if model_type == "GCN":
return pyg_nn.GCNConv
elif model_type == "GIN":
#return lambda i, h: pyg_nn.GINConv(nn.Sequential(
# nn.Linear(i, h), nn.ReLU()))
return lambda i, h: GINConv(nn.Sequential(
nn.Linear(i, h), nn.ReLU(), nn.Linear(h, h)
))
elif model_type == "SAGE":
return SAGEConv
elif model_type == "graph":
return pyg_nn.GraphConv
elif model_type == "GAT":
return pyg_nn.GATConv
elif model_type == "gated":
return lambda i, h: pyg_nn.GatedGraphConv(h, n_inner_layers)
elif model_type == "PNA":
return SAGEConv
else:
print("unrecognized model type")
def forward(self, data):
# if data.x is None:
# data.x = torch.ones((data.num_nodes, 1), device=utils.get_device())
# x = self.pre_mp(x)
if self.feat_preprocess is not None:
if not hasattr(data, "preprocessed"):
data = self.feat_preprocess(data)
data.preprocessed = True
x, edge_index, batch, node_label = data.node_feature, data.edge_index, data.batch, data.node_label
x = self.pre_mp(x)
all_emb = x.unsqueeze(1)
emb = x
for i in range(len(self.convs_sum) if self.conv_type == "PNA" else
len(self.convs)):
if self.skip == 'learnable':
skip_vals = self.learnable_skip[i,
:i + 1].unsqueeze(0).unsqueeze(-1)
curr_emb = all_emb * torch.sigmoid(skip_vals)
curr_emb = curr_emb.view(x.size(0), -1)
if self.conv_type == "PNA":
x = torch.cat((self.convs_sum[i](curr_emb, edge_index),
self.convs_mean[i](curr_emb, edge_index),
self.convs_max[i](curr_emb, edge_index)), dim=-1)
else:
x = self.convs[i](curr_emb, edge_index)
elif self.skip == 'all':
if self.conv_type == "PNA":
x = torch.cat((self.convs_sum[i](emb, edge_index),
self.convs_mean[i](emb, edge_index),
self.convs_max[i](emb, edge_index)), dim=-1)
else:
x = self.convs[i](emb, edge_index)
else:
x = self.convs[i](x, edge_index)
x = F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
emb = torch.cat((emb, x), 1)
if self.skip == 'learnable':
all_emb = torch.cat((all_emb, x.unsqueeze(1)), 1)
# x = pyg_nn.global_mean_pool(x, batch)
emb = pyg_nn.global_add_pool(emb, batch)
emb = self.post_mp(emb)
dim_0, dim_1 = torch.unique(batch, return_counts=True)
dim_0, dim_1 = dim_0.to(data.node_feature.device), dim_1.to(data.node_feature.device)
node_embedding = torch.zeros(len(dim_0), torch.max(dim_1), all_emb.shape[-1]).to(data.node_feature.device)
node_count = 0
for d0 in range(len(dim_0)):
node_embedding[d0, 0:dim_1[d0], :] = all_emb[node_count:node_count + dim_1[d0], -1, :]
node_count += dim_1[d0]
node_emb_mat = []
for i, nd_lbl in enumerate(node_label):
node_emb_mat.append(node_embedding[i][0:len(nd_lbl), :])
# emb = self.batch_norm(emb) # TODO: test
# out = F.log_softmax(emb, dim=1)
return emb, node_emb_mat
def loss(self, pred, label):
return F.nll_loss(pred, label)
class SAGEConv(pyg_nn.MessagePassing):
def __init__(self, in_channels, out_channels, aggr="add"):
super(SAGEConv, self).__init__(aggr=aggr)
self.in_channels = in_channels
self.out_channels = out_channels
self.lin = nn.Linear(in_channels, out_channels)
self.lin_update = nn.Linear(out_channels + in_channels,
out_channels)
def forward(self, x, edge_index, edge_weight=None, size=None,
res_n_id=None):
"""
Args:
res_n_id (Tensor, optional): Residual node indices coming from
:obj:`DataFlow` generated by :obj:`NeighborSampler` are used to
select central node features in :obj:`x`.
Required if operating in a bipartite graph and :obj:`concat` is
:obj:`True`. (default: :obj:`None`)
"""
#edge_index, edge_weight = add_remaining_self_loops(
# edge_index, edge_weight, 1, x.size(self.node_dim))
edge_index, _ = pyg_utils.remove_self_loops(edge_index)
return self.propagate(edge_index, size=size, x=x,
edge_weight=edge_weight, res_n_id=res_n_id)
def message(self, x_j, edge_weight):
#return x_j if edge_weight is None else edge_weight.view(-1, 1) * x_j
return self.lin(x_j)
def update(self, aggr_out, x, res_n_id):
aggr_out = torch.cat([aggr_out, x], dim=-1)
aggr_out = self.lin_update(aggr_out)
#aggr_out = torch.matmul(aggr_out, self.weight)
#if self.bias is not None:
# aggr_out = aggr_out + self.bias
#if self.normalize:
# aggr_out = F.normalize(aggr_out, p=2, dim=-1)
return aggr_out
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.in_channels,
self.out_channels)
# pytorch geom GINConv + weighted edges
class GINConv(pyg_nn.MessagePassing):
def __init__(self, nn, eps=0, train_eps=False, **kwargs):
super(GINConv, self).__init__(aggr='add', **kwargs)
self.nn = nn
self.initial_eps = eps
if train_eps:
self.eps = torch.nn.Parameter(torch.Tensor([eps]))
else:
self.register_buffer('eps', torch.Tensor([eps]))
self.reset_parameters()
def reset_parameters(self):
#reset(self.nn)
self.eps.data.fill_(self.initial_eps)
def forward(self, x, edge_index, edge_weight=None):
""""""
x = x.unsqueeze(-1) if x.dim() == 1 else x
edge_index, edge_weight = pyg_utils.remove_self_loops(edge_index,
edge_weight)
out = self.nn((1 + self.eps) * x + self.propagate(edge_index, x=x,
edge_weight=edge_weight))
return out
def message(self, x_j, edge_weight):
return x_j if edge_weight is None else edge_weight.view(-1, 1) * x_j
def __repr__(self):
return '{}(nn={})'.format(self.__class__.__name__, self.nn)
| 16,350 | 41.250646 | 191 | py |
nepali-ner | nepali-ner-master/app.py | """
Needs code structuring
Date - 08/14/2020
"""
import torch
import logging
import sys
from flask import Flask, render_template, request
from utils.dataloader2 import Dataloader
from models.models import LSTMTagger
from config.config import Configuration
app = Flask(__name__)
def get_logger():
logger = logging.getLogger("logger")
logger.setLevel(logging.DEBUG)
logging.basicConfig(format="%(message)s", level=logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter(
"%(levelname)s:%(message)s"
))
logging.getLogger().addHandler(handler)
return logger
def get_config():
config_file = "./config/config.ini"
logger = get_logger()
config = Configuration(config_file=config_file, logger=logger)
config.model_file = './saved_models/lstm_1.pth'
config.vocab_file = './vocab/vocab.pkl'
config.label_file = './vocab/labels.pkl'
config.device = 'cpu'
config.verbose = False
config.eval = False
config.use_pos = False
config.infer = True
return config
def pred_to_tag(dataloader, predictions):
return ' '.join([dataloader.label_field.vocab.itos[i] for i in predictions]).split()
def infer(config, dataloader, model):
sent_tok = config.txt
X = [dataloader.txt_field.vocab.stoi[t] for t in sent_tok]
X = torch.LongTensor(X).to(config.device)
X = X.unsqueeze(0)
pred = model(X, None)
pred_idx = torch.max(pred, 1)[1]
y_pred_val = pred_idx.cpu().data.numpy().tolist()
pred_tag = pred_to_tag(dataloader, y_pred_val)
return pred_tag
# Inference section
def inference(config):
dataloader = Dataloader(config, '1')
model = LSTMTagger(config, dataloader).to(config.device)
# Load the model trained on gpu, to currently specified device
model.load_state_dict(torch.load(config.model_file, map_location=config.device)['state_dict'])
pred_tag = infer(config, dataloader, model)
return pred_tag
@app.route('/')
def hello():
return render_template('index.html')
@app.route('/post', methods=['GET', 'POST'])
def post():
config = get_config()
errors = []
text = request.form['input']
config.txt = text.split()
res = inference(config)
results = zip(config.txt, res)
if request.method == "GET":
return render_template('index.html')
else:
return render_template('index.html', errors=errors, results=results)
if __name__ == "__main__":
app.run()
| 2,497 | 25.294737 | 98 | py |
nepali-ner | nepali-ner-master/train.py | #!/usr/bin/env python3
'''
Trainer
Author: Oyesh Mann Singh
'''
import os
from utils.eval import Evaluator
from tqdm import tqdm, tqdm_notebook, tnrange
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.metrics import accuracy_score
torch.manual_seed(163)
tqdm.pandas(desc='Progress')
# Decay functions to be used with lr_scheduler
def lr_decay_noam(config):
return lambda t: (
10.0 * config.hidden_dim ** -0.5 * min(
(t + 1) * config.learning_rate_warmup_steps ** -1.5, (t + 1) ** -0.5))
def lr_decay_exp(config):
return lambda t: config.learning_rate_falloff ** t
# Map names to lr decay functions
lr_decay_map = {
'noam': lr_decay_noam,
'exp': lr_decay_exp
}
class Trainer:
def __init__(self, config, logger, dataloader, model, k):
self.config = config
self.logger = logger
self.dataloader = dataloader
self.verbose = config.verbose
self.use_pos = config.use_pos
self.train_dl, self.val_dl, self.test_dl = dataloader.load_data(batch_size=config.batch_size)
### DO NOT DELETE
### DEBUGGING PURPOSE
# sample = next(iter(self.train_dl))
# print(sample.TEXT)
# print(sample.LABEL)
# print(sample.POS)
self.train_dlen = len(self.train_dl)
self.val_dlen = len(self.val_dl)
self.test_dlen = len(self.test_dl)
self.model = model
self.epochs = config.epochs
self.loss_fn = nn.NLLLoss()
self.opt = optim.Adam(filter(lambda p: p.requires_grad, self.model.parameters()),
lr=config.learning_rate,
weight_decay=config.weight_decay)
self.lr_scheduler_step = self.lr_scheduler_epoch = None
# Set up learing rate decay scheme
if config.use_lr_decay:
if '_' not in config.lr_rate_decay:
raise ValueError("Malformed learning_rate_decay")
lrd_scheme, lrd_range = config.lr_rate_decay.split('_')
if lrd_scheme not in lr_decay_map:
raise ValueError("Unknown lr decay scheme {}".format(lrd_scheme))
lrd_func = lr_decay_map[lrd_scheme]
lr_scheduler = optim.lr_scheduler.LambdaLR(
self.opt,
lrd_func(config),
last_epoch=-1
)
# For each scheme, decay can happen every step or every epoch
if lrd_range == 'epoch':
self.lr_scheduler_epoch = lr_scheduler
elif lrd_range == 'step':
self.lr_scheduler_step = lr_scheduler
else:
raise ValueError("Unknown lr decay range {}".format(lrd_range))
self.k = k
self.model_name = config.model_name + self.k
self.file_name = self.model_name + '.pth'
self.model_file = os.path.join(config.output_dir, self.file_name)
self.total_train_loss = []
self.total_train_acc = []
self.total_val_loss = []
self.total_val_acc = []
self.early_max_patience = config.early_max_patience
def load_checkpoint(self):
checkpoint = torch.load(self.model_file)
self.model.load_state_dict(checkpoint['state_dict'])
self.opt = checkpoint['opt']
self.opt.load_state_dict(checkpoint['opt_state'])
self.total_train_loss = checkpoint['train_loss']
self.total_train_acc = checkpoint['train_acc']
self.total_val_loss = checkpoint['val_loss']
self.total_val_acc = checkpoint['val_acc']
self.epochs = checkpoint['epochs']
def save_checkpoint(self):
save_parameters = {'state_dict': self.model.state_dict(),
'opt': self.opt,
'opt_state': self.opt.state_dict(),
'train_loss': self.total_train_loss,
'train_acc': self.total_train_acc,
'val_loss': self.total_val_loss,
'val_acc': self.total_val_acc,
'epochs': self.epochs}
torch.save(save_parameters, self.model_file)
def fit(self):
prev_lstm_val_acc = 0.0
prev_val_loss = 100.0
counter = 0
patience_limit = 10
for epoch in tnrange(0, self.epochs):
y_true_train = list()
y_pred_train = list()
total_loss_train = 0
t = tqdm(iter(self.train_dl), leave=False, total=self.train_dlen)
for (k, v) in t:
t.set_description(f'Epoch {epoch + 1}')
self.model.train()
self.opt.zero_grad()
if self.use_pos:
(X, p, y) = k
pred = self.model(X, p)
else:
(X, y) = k
pred = self.model(X, None)
y = y.view(-1)
loss = self.loss_fn(pred, y)
loss.backward()
self.opt.step()
if self.lr_scheduler_step:
self.lr_scheduler_step.step()
t.set_postfix(loss=loss.item())
pred_idx = torch.max(pred, dim=1)[1]
y_true_train += list(y.cpu().data.numpy())
y_pred_train += list(pred_idx.cpu().data.numpy())
total_loss_train += loss.item()
train_acc = accuracy_score(y_true_train, y_pred_train)
train_loss = total_loss_train / self.train_dlen
self.total_train_loss.append(train_loss)
self.total_train_acc.append(train_acc)
if self.val_dl:
y_true_val = list()
y_pred_val = list()
total_loss_val = 0
v = tqdm(iter(self.val_dl), leave=False)
for (k, v) in v:
if self.use_pos:
(X, p, y) = k
pred = self.model(X, p)
else:
(X, y) = k
pred = self.model(X, None)
y = y.view(-1)
loss = self.loss_fn(pred, y)
pred_idx = torch.max(pred, 1)[1]
y_true_val += list(y.cpu().data.numpy())
y_pred_val += list(pred_idx.cpu().data.numpy())
total_loss_val += loss.item()
valacc = accuracy_score(y_true_val, y_pred_val)
valloss = total_loss_val / self.val_dlen
self.logger.info(
f'Epoch {epoch + 1}: train_loss: {train_loss:.4f} train_acc: {train_acc:.4f} | val_loss: {valloss:.4f} val_acc: {valacc:.4f}')
else:
self.logger.info(f'Epoch {epoch + 1}: train_loss: {train_loss:.4f} train_acc: {train_acc:.4f}')
self.total_val_loss.append(valloss)
self.total_val_acc.append(valacc)
if self.lr_scheduler_epoch:
self.lr_scheduler_epoch.step()
if valloss < prev_val_loss:
self.save_checkpoint()
prev_val_loss = valloss
counter = 0
self.logger.info("Best model saved!!!")
else:
counter += 1
if counter >= self.early_max_patience:
self.logger.info("Training stopped because maximum tolerance reached!!!")
break
# Predict
def predict(self):
self.model.eval()
evaluate = Evaluator(self.config, self.logger, self.model, self.dataloader, self.model_name)
self.logger.info("Writing results")
evaluate.write_results()
self.logger.info("Evaluate results")
acc, prec, rec, f1 = evaluate.conll_eval()
return (acc, prec, rec, f1)
# Infer
def infer(self, sent):
"""
Prints the result
"""
evaluate = Evaluator(self.config, self.logger, self.model, self.dataloader, self.model_name)
return evaluate.infer(sent)
| 8,092 | 34.034632 | 146 | py |
nepali-ner | nepali-ner-master/models/models.py | '''
Models
Author: Oyesh Mann Singh
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from tqdm import tqdm
from uniseg.graphemecluster import grapheme_clusters
tqdm.pandas(desc='Progress')
class LSTMTagger(nn.Module):
def __init__(self, config, dataloader):
super(LSTMTagger, self).__init__()
self.bidirectional = config.bidirection
self.num_layers = config.num_layers
self.batch_size = config.batch_size
self.hidden_dim = config.hidden_dim
self.vocab_size = dataloader.vocab_size
self.tagset_size = dataloader.tagset_size
self.embedding_dim = config.embedding_dim
self.device = config.device
self.use_pos = config.use_pos
if config.pretrained and not config.infer:
self.word_embeddings = nn.Embedding.from_pretrained(dataloader.weights)
else:
self.word_embeddings = nn.Embedding(self.vocab_size, self.embedding_dim)
if self.use_pos:
self.pos_size = dataloader.pos_size
self.embedding_dim = config.embedding_dim + self.pos_size
pos_one_hot = np.eye(self.pos_size)
one_hot_weight = torch.from_numpy(pos_one_hot).float()
self.one_hot_embeddings = nn.Embedding(self.pos_size, self.pos_size, _weight=one_hot_weight)
self.lstm = nn.LSTM(self.embedding_dim, self.hidden_dim,
bidirectional=self.bidirectional,
num_layers=self.num_layers)
if self.bidirectional:
self.hidden2tag = nn.Linear(self.hidden_dim * 2, self.tagset_size)
else:
self.hidden2tag = nn.Linear(self.hidden_dim, self.tagset_size)
self.dropout = nn.Dropout(config.dropout)
def init_hidden(self, tensor_size):
if self.bidirectional:
h0 = torch.zeros(2 * self.num_layers, tensor_size[1], self.hidden_dim)
c0 = torch.zeros(2 * self.num_layers, tensor_size[1], self.hidden_dim)
else:
h0 = torch.zeros(self.num_layers, tensor_size[1], self.hidden_dim)
c0 = torch.zeros(self.num_layers, tensor_size[1], self.hidden_dim)
if self.device:
h0 = h0.to(self.device)
c0 = c0.to(self.device)
return h0, c0
def forward(self, X, y):
X = self.word_embeddings(X)
# Concatenate POS-embedding here
if self.use_pos:
POS = self.one_hot_embeddings(y)
X = torch.cat((X, POS), dim=-1)
X, _ = self.lstm(self.dropout(X))
tag_space = self.hidden2tag(X.view(-1, X.shape[2]))
tag_scores = F.log_softmax(tag_space, dim=1)
return tag_scores
class CharLSTMTagger(nn.Module):
def __init__(self, config, dataloader):
super(CharLSTMTagger, self).__init__()
self.dataloader = dataloader
self.bidirectional = config.bidirection
self.num_layers = config.num_layers
self.batch_size = config.batch_size
self.hidden_dim = config.hidden_dim
self.vocab_size = dataloader.vocab_size
self.tagset_size = dataloader.tagset_size
self.device = config.device
self.char_embed_num = dataloader.char_vocab_size
self.graph_embed_num = dataloader.graph_vocab_size
self.char_dim = config.char_dim
self.embedding_dim = config.embedding_dim + config.char_dim
if config.char_pretrained:
self.char_embeddings = nn.Embedding.from_pretrained(dataloader.char_weights)
self.graph_embeddings = nn.Embedding.from_pretrained(dataloader.graph_weights)
else:
self.char_embeddings = nn.Embedding(self.char_embed_num, self.char_dim, padding_idx=0)
self.graph_embeddings = nn.Embedding(self.graph_embed_num, self.char_dim, padding_idx=0)
nn.init.xavier_uniform_(self.char_embeddings.weight)
nn.init.xavier_uniform_(self.graph_embeddings.weight)
self.char_level = config.use_char
self.use_pos = config.use_pos
if self.use_pos:
self.pos_size = dataloader.pos_size
self.embedding_dim = self.embedding_dim + self.pos_size
pos_one_hot = np.eye(self.pos_size)
one_hot_weight = torch.from_numpy(pos_one_hot).float()
self.one_hot_embeddings = nn.Embedding(self.pos_size, self.pos_size, _weight=one_hot_weight)
if config.pretrained:
self.word_embeddings = nn.Embedding.from_pretrained(dataloader.weights)
else:
self.word_embeddings = nn.Embedding(self.vocab_size, self.embedding_dim)
self.lstm = nn.LSTM(self.embedding_dim, self.hidden_dim,
bidirectional=self.bidirectional,
num_layers=self.num_layers)
if self.bidirectional:
self.hidden2tag = nn.Linear(self.hidden_dim * 2, self.tagset_size)
else:
self.hidden2tag = nn.Linear(self.hidden_dim * 2, self.tagset_size)
self.dropout = nn.Dropout(config.dropout)
self.dropout_embed = nn.Dropout(config.dropout_embed)
# ------------CNN
# Changed here for padding_idx error
self.conv_filter_sizes = [3, 4]
self.conv_filter_nums = self.char_dim # 30
self.convs = nn.ModuleList([
nn.Conv3d(in_channels=1,
out_channels=self.conv_filter_nums,
kernel_size=(1, fs, self.char_dim))
for fs in self.conv_filter_sizes
])
self.fc = nn.Linear(len(self.conv_filter_sizes) * self.conv_filter_nums, self.char_dim)
def tensortosent(self, tense):
'''
Returns the corresponding TEXT of given tensor
'''
return ' '.join([self.dataloader.txt_field.vocab.itos[i] for i in tense.cpu().data.numpy()])
def get_char_tensor(self, X):
word_int = []
length = 0
# Go through each tensor in each batch
for b in range(0, X.shape[0]):
each_X = X[b]
char_int = []
w = []
# For character-level
if self.char_level:
# Get all the characters
w += (list(x) for x in self.tensortosent(each_X).split())
# Get all the index of those characters
char_int += ([self.dataloader.char_field.vocab.stoi[c] for c in each] for each in w)
# For grapheme-level
else:
w += (list(grapheme_clusters(x)) for x in self.tensortosent(each_X).split())
char_int += ([self.dataloader.graph_field.vocab.stoi[c] for c in each] for each in w)
if length < max(map(len, char_int)):
length = max(map(len, char_int))
word_int.append(char_int)
# Padding to match the max_length words whose size is less than max(filter_size)
if length < max(self.conv_filter_sizes):
length += max(self.conv_filter_sizes) - length
# Make each tensor equal in size
X_char = np.array([[xi + [0] * (length - len(xi)) for xi in each] for each in word_int])
# Convert to tensor from numpy array
X_char = torch.from_numpy(X_char)
return X_char
# ---------------------------------------CHARACTER FORWARD
def _char_forward(self, inputs):
"""
Args:
inputs: 3D tensor, [bs, max_len, max_len_char]
Returns:
char_conv_outputs: 3D tensor, [bs, max_len, output_dim]
"""
max_len, max_len_char = inputs.size(1), inputs.size(2)
inputs = inputs.view(-1, max_len * max_len_char) # [bs, max_len * max_len_char]
inputs = inputs.to(self.device)
if self.char_level:
input_embed = self.char_embeddings(inputs) # [bs, ml*ml_c, feature_dim]
else:
input_embed = self.graph_embeddings(inputs)
# Since convolution is 3-dimension, we need 5 dimension tensor
input_embed = input_embed.view(-1, 1, max_len, max_len_char,
self.char_dim) # [bs, 1, max_len, max_len_char, feature_dim]
# Convolution
# conved = [F.relu(conv(input_embed)).squeeze(3) for conv in self.convs]
conved = [F.relu(conv(input_embed)) for conv in self.convs]
# Pooling
pooled = [torch.squeeze(torch.max(conv, -2)[0], -1) for conv in conved]
cat = self.dropout(torch.cat(pooled, dim=1))
cat = cat.permute(0, 2, 1)
return self.fc(cat)
def init_hidden(self, tensor_size):
if self.bidirectional:
h0 = torch.zeros(2 * self.num_layers, tensor_size[1], self.hidden_dim)
c0 = torch.zeros(2 * self.num_layers, tensor_size[1], self.hidden_dim)
else:
h0 = torch.zeros(self.num_layers, tensor_size[1], self.hidden_dim)
c0 = torch.zeros(self.num_layers, tensor_size[1], self.hidden_dim)
if self.device:
h0 = h0.to(self.device)
c0 = c0.to(self.device)
return (h0, c0)
def forward(self, X, y):
X_char = self.get_char_tensor(X)
X = self.word_embeddings(X)
char_conv = self._char_forward(X_char)
X = torch.cat((X, char_conv), dim=-1)
# Concatenate POS-embedding here
if self.use_pos:
POS = self.one_hot_embeddings(y)
X = torch.cat((X, POS), dim=-1)
X, _ = self.lstm(self.dropout(X))
tag_space = self.hidden2tag(X.view(-1, X.shape[2]))
tag_scores = F.log_softmax(tag_space, dim=1)
return tag_scores
| 9,650 | 37.146245 | 104 | py |
nepali-ner | nepali-ner-master/utils/dataloader2.py | #!/usr/bin/env python3
'''
NER Dataloader
Author: Oyesh Mann Singh
Date: 10/14/2019
Data format:
<WORD> <NER-tag>
'''
import os
import pickle
from torchtext import data, vocab
from torchtext.datasets import SequenceTaggingDataset
class Dataloader():
def __init__(self, config, k):
self.device = config.device
self.use_pos = config.use_pos
self.txt_field = pickle.load(open(config.vocab_file, 'rb'))
self.label_field = pickle.load(open(config.label_file, 'rb'))
# Save vocab and label file like this
# For future reference
# output = open('vocab.pkl', 'wb')
# pickle.dump(self.txt_field, output)
# output_label = open('labels.pkl', 'wb')
# pickle.dump(self.label_field, output_label)
self.vocab_size = len(self.txt_field.vocab)
self.tagset_size = len(self.label_field.vocab)
self.weights = self.txt_field.vocab.vectors
def tokenizer(self, x):
return x.split()
# def train_ds(self):
# return self.train_ds
#
# def val_ds(self):
# return self.val_ds
#
# def test_ds(self):
# return self.test_ds
def txt_field(self):
return self.txt_field
def label_field(self):
return self.label_field
def vocab_size(self):
return self.vocab_size
def tagset_size(self):
return self.tagset_size
def weights(self):
return self.weights
def print_stat(self):
print('Length of text vocab (unique words in dataset) = ', self.vocab_size)
print('Length of label vocab (unique tags in labels) = ', self.tagset_size)
# if self.use_pos:
# print('Length of POS vocab (unique tags in POS) = ', self.pos_size)
# print('Length of char vocab (unique characters in dataset) = ', self.char_vocab_size)
# print('Length of grapheme vocab (unique graphemes in dataset) = ', self.graph_vocab_size)
# def load_data(self, batch_size, shuffle=True):
# train_iter, val_iter, test_iter = data.BucketIterator.splits(
# datasets=(self.train_ds, self.val_ds, self.test_ds),
# batch_sizes=(batch_size, batch_size, batch_size),
# sort_key=lambda x: len(x.TEXT),
# device=self.device,
# sort_within_batch=True,
# repeat=False,
# shuffle=True)
#
# return train_iter, val_iter, test_iter
| 2,467 | 27.367816 | 99 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.