repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
ActiveLearningForHumanPose | ActiveLearningForHumanPose-main/code/models/hrnet/pose_hrnet.py | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# Minor Modifications made for ActiveLearningForHumanPose code base
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import logging
import torch
import torch.nn as nn
BN_MOMENTUM = 0.1
logger = logging.getLogger(__name__)
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion,
momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class HighResolutionModule(nn.Module):
def __init__(self, num_branches, blocks, num_blocks, num_inchannels,
num_channels, fuse_method, multi_scale_output=True):
super(HighResolutionModule, self).__init__()
self._check_branches(
num_branches, blocks, num_blocks, num_inchannels, num_channels)
self.num_inchannels = num_inchannels
self.fuse_method = fuse_method
self.num_branches = num_branches
self.multi_scale_output = multi_scale_output
self.branches = self._make_branches(
num_branches, blocks, num_blocks, num_channels)
self.fuse_layers = self._make_fuse_layers()
self.relu = nn.ReLU(True)
def _check_branches(self, num_branches, blocks, num_blocks,
num_inchannels, num_channels):
if num_branches != len(num_blocks):
error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format(
num_branches, len(num_blocks))
logger.error(error_msg)
raise ValueError(error_msg)
if num_branches != len(num_channels):
error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format(
num_branches, len(num_channels))
logger.error(error_msg)
raise ValueError(error_msg)
if num_branches != len(num_inchannels):
error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format(
num_branches, len(num_inchannels))
logger.error(error_msg)
raise ValueError(error_msg)
def _make_one_branch(self, branch_index, block, num_blocks, num_channels,
stride=1):
downsample = None
if stride != 1 or \
self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.num_inchannels[branch_index],
num_channels[branch_index] * block.expansion,
kernel_size=1, stride=stride, bias=False
),
nn.BatchNorm2d(
num_channels[branch_index] * block.expansion,
momentum=BN_MOMENTUM
),
)
layers = []
layers.append(
block(
self.num_inchannels[branch_index],
num_channels[branch_index],
stride,
downsample
)
)
self.num_inchannels[branch_index] = \
num_channels[branch_index] * block.expansion
for i in range(1, num_blocks[branch_index]):
layers.append(
block(
self.num_inchannels[branch_index],
num_channels[branch_index]
)
)
return nn.Sequential(*layers)
def _make_branches(self, num_branches, block, num_blocks, num_channels):
branches = []
for i in range(num_branches):
branches.append(
self._make_one_branch(i, block, num_blocks, num_channels)
)
return nn.ModuleList(branches)
def _make_fuse_layers(self):
if self.num_branches == 1:
return None
num_branches = self.num_branches
num_inchannels = self.num_inchannels
fuse_layers = []
for i in range(num_branches if self.multi_scale_output else 1):
fuse_layer = []
for j in range(num_branches):
if j > i:
fuse_layer.append(
nn.Sequential(
nn.Conv2d(
num_inchannels[j],
num_inchannels[i],
1, 1, 0, bias=False
),
nn.BatchNorm2d(num_inchannels[i]),
nn.Upsample(scale_factor=2**(j-i), mode='nearest')
)
)
elif j == i:
fuse_layer.append(None)
else:
conv3x3s = []
for k in range(i-j):
if k == i - j - 1:
num_outchannels_conv3x3 = num_inchannels[i]
conv3x3s.append(
nn.Sequential(
nn.Conv2d(
num_inchannels[j],
num_outchannels_conv3x3,
3, 2, 1, bias=False
),
nn.BatchNorm2d(num_outchannels_conv3x3)
)
)
else:
num_outchannels_conv3x3 = num_inchannels[j]
conv3x3s.append(
nn.Sequential(
nn.Conv2d(
num_inchannels[j],
num_outchannels_conv3x3,
3, 2, 1, bias=False
),
nn.BatchNorm2d(num_outchannels_conv3x3),
nn.ReLU(True)
)
)
fuse_layer.append(nn.Sequential(*conv3x3s))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def get_num_inchannels(self):
return self.num_inchannels
def forward(self, x):
if self.num_branches == 1:
return [self.branches[0](x[0])]
for i in range(self.num_branches):
x[i] = self.branches[i](x[i])
x_fuse = []
for i in range(len(self.fuse_layers)):
y = x[0] if i == 0 else self.fuse_layers[i][0](x[0])
for j in range(1, self.num_branches):
if i == j:
y = y + x[j]
else:
y = y + self.fuse_layers[i][j](x[j])
x_fuse.append(self.relu(y))
return x_fuse
blocks_dict = {
'BASIC': BasicBlock,
'BOTTLENECK': Bottleneck
}
class PoseHighResolutionNet(nn.Module):
def __init__(self, arch, auxnet, intermediate_features):
self.auxnet = auxnet
self.inplanes = 64
self.intermediate_features = intermediate_features
super(PoseHighResolutionNet, self).__init__()
if torch.cuda.device_count() > 1:
# Use the first and last available GPU
self.cuda_devices = [torch.device('cuda:0'), torch.device('cuda:1')]
else:
self.cuda_devices = [torch.device('cuda:0'), torch.device('cuda:0')]
# stem net
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(Bottleneck, 64, 4)
self.stage1 = nn.Sequential(
self.conv1, self.bn1, self.conv2, self.bn2, self.relu, self.layer1).cuda(self.cuda_devices[0])
self.stage2_cfg = arch['STAGE2']
num_channels = self.stage2_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage2_cfg['BLOCK']]
num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition1 = self._make_transition_layer([256], num_channels).cuda(self.cuda_devices[0])
self.stage2, pre_stage_channels = self._make_stage(self.stage2_cfg, num_channels)
self.stage2.cuda(self.cuda_devices[0])
self.stage3_cfg = arch['STAGE3']
num_channels = self.stage3_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage3_cfg['BLOCK']]
num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels).cuda(self.cuda_devices[0])
self.stage3, pre_stage_channels = self._make_stage(self.stage3_cfg, num_channels)
self.stage3.cuda(self.cuda_devices[-1])
self.stage4_cfg = arch['STAGE4']
num_channels = self.stage4_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage4_cfg['BLOCK']]
num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels).cuda(self.cuda_devices[-1])
self.stage4, pre_stage_channels = self._make_stage(self.stage4_cfg, num_channels, multi_scale_output=False)
self.stage4.cuda(self.cuda_devices[-1])
self.final_layer = nn.Conv2d(
in_channels=pre_stage_channels[0],
out_channels = arch['num_hm'],
kernel_size=arch['FINAL_CONV_KERNEL'],
stride=1,
padding=1 if arch['FINAL_CONV_KERNEL'] == 3 else 0
).cuda(self.cuda_devices[-1])
self.pretrained_layers = arch['PRETRAINED_LAYERS']
# Megh addition
self.global_avg_pool = nn.AvgPool2d(kernel_size=(64, 64), stride=1).cuda(self.cuda_devices[-1])
def _make_transition_layer(
self, num_channels_pre_layer, num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if i < num_branches_pre:
if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
transition_layers.append(
nn.Sequential(
nn.Conv2d(
num_channels_pre_layer[i],
num_channels_cur_layer[i],
3, 1, 1, bias=False
),
nn.BatchNorm2d(num_channels_cur_layer[i]),
nn.ReLU(inplace=True)
)
)
else:
transition_layers.append(None)
else:
conv3x3s = []
for j in range(i+1-num_branches_pre):
inchannels = num_channels_pre_layer[-1]
outchannels = num_channels_cur_layer[i] \
if j == i-num_branches_pre else inchannels
conv3x3s.append(
nn.Sequential(
nn.Conv2d(
inchannels, outchannels, 3, 2, 1, bias=False
),
nn.BatchNorm2d(outchannels),
nn.ReLU(inplace=True)
)
)
transition_layers.append(nn.Sequential(*conv3x3s))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False
),
nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _make_stage(self, layer_config, num_inchannels,
multi_scale_output=True):
num_modules = layer_config['NUM_MODULES']
num_branches = layer_config['NUM_BRANCHES']
num_blocks = layer_config['NUM_BLOCKS']
num_channels = layer_config['NUM_CHANNELS']
block = blocks_dict[layer_config['BLOCK']]
fuse_method = layer_config['FUSE_METHOD']
modules = []
for i in range(num_modules):
# multi_scale_output is only used last module
if not multi_scale_output and i == num_modules - 1:
reset_multi_scale_output = False
else:
reset_multi_scale_output = True
modules.append(
HighResolutionModule(
num_branches,
block,
num_blocks,
num_inchannels,
num_channels,
fuse_method,
reset_multi_scale_output
)
)
num_inchannels = modules[-1].get_num_inchannels()
return nn.Sequential(*modules), num_inchannels
def forward(self, x):
# STAGE 1 ------------------------------------------------------------------------------------------------------
x = x.permute(0, 3, 1, 2).cuda(self.cuda_devices[0])
x = self.stage1(x)
# STAGE 2 ------------------------------------------------------------------------------------------------------
x_list = []
for i in range(self.stage2_cfg['NUM_BRANCHES']):
if self.transition1[i] is not None:
x_list.append(self.transition1[i](x))
else:
x_list.append(x)
y_list = self.stage2(x_list)
# STAGE 3 + intermediate layers --------------------------------------------------------------------------------
x_list = []
for i in range(self.stage3_cfg['NUM_BRANCHES']):
if self.transition2[i] is not None:
x_list.append(self.transition2[i](y_list[-1]).cuda(self.cuda_devices[-1]))
else:
x_list.append(y_list[i].cuda(self.cuda_devices[-1]))
y_list = self.stage3(x_list)
# Extracting intermediate features
hrnet_dict = dict()
if self.intermediate_features == 'conv' and self.auxnet:
for i in range(len(self.stage3_cfg['NUM_CHANNELS'])):
hrnet_dict['feature_{}'.format(i+1)] = y_list[-(i+1)].clone().detach().to(
'cuda:{}'.format(torch.cuda.device_count() - 1))
# STAGE 4 + penultimate extraction -----------------------------------------------------------------------------
x_list = []
for i in range(self.stage4_cfg['NUM_BRANCHES']):
if self.transition3[i] is not None:
x_list.append(self.transition3[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.stage4(x_list)
hrnet_dict['penultimate'] = self.global_avg_pool(y_list[0]).clone().detach().to(
'cuda:{}'.format(torch.cuda.device_count() - 1)).reshape(y_list[0].shape[0], -1)
x = self.final_layer(y_list[0])
# To maintain compatibility with Hourglass stacks
x.unsqueeze_(1)
return x, hrnet_dict
def init_weights(self, pretrained=''):
print('WHY IS INIT_WEIGHTS BEING CALLED SOS SOS SOS LOOK INTO IT.')
exit()
logger.info('=> init weights from normal distribution')
for m in self.modules():
if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
nn.init.normal_(m.weight, std=0.001)
for name, _ in m.named_parameters():
if name in ['bias']:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.ConvTranspose2d):
nn.init.normal_(m.weight, std=0.001)
for name, _ in m.named_parameters():
if name in ['bias']:
nn.init.constant_(m.bias, 0)
if os.path.isfile(pretrained):
pretrained_state_dict = torch.load(pretrained)
logger.info('=> loading pretrained model {}'.format(pretrained))
need_init_state_dict = {}
for name, m in pretrained_state_dict.items():
if name.split('.')[0] in self.pretrained_layers \
or self.pretrained_layers[0] is '*':
need_init_state_dict[name] = m
self.load_state_dict(need_init_state_dict, strict=False)
elif pretrained:
logger.error('=> please download pre-trained models first!')
raise ValueError('{} is not exist!'.format(pretrained))
def get_pose_net(cfg, is_train, **kwargs):
model = PoseHighResolutionNet(cfg, **kwargs)
if is_train and cfg['MODEL']['INIT_WEIGHTS']:
model.init_weights(cfg['MODEL']['PRETRAINED'])
return model
| 20,055 | 37.274809 | 120 | py |
ActiveLearningForHumanPose | ActiveLearningForHumanPose-main/code/models/auxiliary/AuxiliaryNet.py | import logging
import torch
import numpy as np
import torch.nn as nn
from torch.nn.parameter import Parameter
class AuxNet(nn.Module):
def __init__(self, arch):#num_feat, pose_num_channels, convolution=False, pose_feat_shape=(64, 32, 16, 8, 4)):
"""
:param num_feat:
:param pose_num_channels: Pose estimator number of channels
:param convolution: (bool) Does the AuxNet need a convolutional feature extractor?
:param pose_feat_shape: Spatial dimension of intermediate pose estimator features
"""
super(AuxNet, self).__init__()
# From configuration
self.fc_arch = arch['fc']
self.is_conv = True if arch['conv_or_avg_pooling'] == 'conv' else False
# Derived from Hourglass / HRNet
self.conv_arch_spatial = arch['spatial_dim']
self.conv_arch_channels = arch['channels']
# List that houses the network
self.pytorch_layers = []
if self.is_conv:
self.pytorch_layers.append(ConvolutionFeatureExtractor(channels=self.conv_arch_channels, spatial=self.conv_arch_spatial))
# Initializing for input-output chaining across layers
input_nodes_fc_network = arch['channels'][-1]
in_feat = input_nodes_fc_network
for out_feat in self.fc_arch:
self.pytorch_layers.append(nn.Linear(in_features=in_feat, out_features=out_feat))
self.pytorch_layers.append(nn.ReLU())
in_feat = out_feat
self.pytorch_layers = self.pytorch_layers[:-1] # Removing the ReLU after the output layer
self.pytorch_layers = nn.ModuleList(self.pytorch_layers)
def forward(self, x):
"""
:param x:
:return:
"""
# Conv feature extractor
if self.is_conv:
# Restoring heatmaps
with torch.no_grad():
conv_x = []
border = 0
for size in self.conv_arch_spatial:
conv_x.append(x[:, :, border: border + (size**2)].reshape(x.shape[0], x.shape[1], size, size))
border += (size**2)
x = self.pytorch_layers[0](conv_x)
# [1:] skips the ConvFeatExtract layer
for layer in self.pytorch_layers[1:]:
x = layer(x)
return x
# GAP feature extractor
else:
for layer in self.pytorch_layers:
x = layer(x)
return x
class ConvolutionFeatureExtractor(nn.Module):
def __init__(self, channels, spatial):
super(ConvolutionFeatureExtractor, self).__init__()
self.hg_conv_feat_extract = []
self.depth = len(channels)
# Down from 64 to 4
for i in range(self.depth-1): # 32 --> 16, 16 --> 8, 8 --> 4
self.hg_conv_feat_extract.append(torch.nn.Conv2d(in_channels=channels[i], out_channels=channels[i+1],
kernel_size=(2, 2), stride=2, padding=0))
self.hg_conv_feat_extract.append(torch.nn.ReLU())
# Down from 4 to 1
self.hg_conv_feat_extract.append(torch.nn.Conv2d(in_channels=channels[-1],
out_channels=channels[-1],
kernel_size=(spatial[-1], spatial[-1]), stride=1, padding=0))
self.hg_conv_feat_extract.append(torch.nn.ReLU())
self.hg_conv_feat_extract = nn.ModuleList(self.hg_conv_feat_extract)
def forward(self, x):
'''
:param x:
:return:
'''
x_ = x[0]
for i in range(self.depth - 1):
x_ = self.hg_conv_feat_extract[2 * i](x_)
x_ = self.hg_conv_feat_extract[(2 * i) + 1](x_)
x_ = x[i+1] + x_
out = self.hg_conv_feat_extract[-2](x_).squeeze()
out = self.hg_conv_feat_extract[-1](out)
return out
| 3,943 | 33.295652 | 133 | py |
ActiveLearningForHumanPose | ActiveLearningForHumanPose-main/code/models/learning_loss/LearningLoss.py | import logging
import torch
import numpy as np
import torch.nn as nn
from torch.nn.parameter import Parameter
class LearnLossActive(nn.Module):
def __init__(self, num_feat, hg_feat, hg_depth, original=False, hg_feat_shape=(64, 32, 16, 8, 4)):
'''
:param num_feat:
:param hg_feat:
'''
super(LearnLossActive, self).__init__()
# List that houses the network
self.fc_layers = []
self.original = original
self.hg_feat_shape = hg_feat_shape
out = num_feat[-1] # out = 1
num_feat = num_feat[:-1]
if original:
assert num_feat == [128, 64, 32, 16, 8]
in_feat = hg_feat # We are dealing with 256 x 5, not
else:
assert num_feat == [128, 64, 32, 16, 8]
in_feat = hg_feat
if not original:
self.fc_layers.append(ConvolutionHourglassFeatureExtractor(4 + 1, hg_feat))
for out_feat in num_feat:
self.fc_layers.append(nn.Linear(in_features=in_feat, out_features=out_feat))
self.fc_layers.append(nn.ReLU())
in_feat = out_feat
self.fc_layers.append(nn.Linear(in_features=in_feat, out_features=out))
self.fc_layers = nn.ModuleList(self.fc_layers)
def forward(self, x):
'''
:param x:
:return:
'''
encodings=None
# GAP feature extractor
if self.original:
for layer in self.fc_layers:
x = layer(x)
if x.shape[-1] != 1:
encodings = x.clone().detach()
return x, encodings
# Conv feature extractor
else:
# Restoring heatmaps
with torch.no_grad():
conv_x = []
border = 0
for size in self.hg_feat_shape:
conv_x.append(x[:, :, border: border + (size**2)].reshape(x.shape[0], x.shape[1], size, size))
border += (size**2)
x = self.fc_layers[0](conv_x)
# [1:] skips the ConvFeatExtract layer
for layer in self.fc_layers[1:]:
x = layer(x)
if x.shape[-1] != 1 and isinstance(layer, torch.nn.ReLU):
encodings = x.clone().detach()
return x, encodings
class ConvolutionHourglassFeatureExtractor(nn.Module):
def __init__(self, depth, hg_feat):
super(ConvolutionHourglassFeatureExtractor, self).__init__()
self.hg_conv_feat_extract = []
self.depth = depth
# Down from 64 to 4
for i in range(1, depth): # 32 --> 16, 16 --> 8, 8 --> 4
self.hg_conv_feat_extract.append(torch.nn.Conv2d(in_channels=hg_feat, out_channels=hg_feat,
kernel_size=(2, 2), stride=2, padding=0))
self.hg_conv_feat_extract.append(torch.nn.ReLU())
# Down from 4 to 1
self.hg_conv_feat_extract.append(torch.nn.Conv2d(in_channels=hg_feat,
out_channels=hg_feat,
kernel_size=(4, 4), stride=1, padding=0))
self.hg_conv_feat_extract.append(torch.nn.ReLU())
self.hg_conv_feat_extract = nn.ModuleList(self.hg_conv_feat_extract)
def forward(self, x):
'''
:param x:
:return:
'''
x_ = x[0]
for i in range(self.depth - 1):
x_ = self.hg_conv_feat_extract[2 * i](x_)
x_ = self.hg_conv_feat_extract[(2 * i) + 1](x_)
x_ = x[i+1] + x_
out = self.hg_conv_feat_extract[-2](x_).squeeze()
out = self.hg_conv_feat_extract[-1](out)
return out
| 3,801 | 31.775862 | 114 | py |
assimp | assimp-master/port/PyAssimp/scripts/transformations.py | # -*- coding: utf-8 -*-
# transformations.py
# Copyright (c) 2006, Christoph Gohlke
# Copyright (c) 2006-2009, The Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Homogeneous Transformation Matrices and Quaternions.
A library for calculating 4x4 matrices for translating, rotating, reflecting,
scaling, shearing, projecting, orthogonalizing, and superimposing arrays of
3D homogeneous coordinates as well as for converting between rotation matrices,
Euler angles, and quaternions. Also includes an Arcball control object and
functions to decompose transformation matrices.
:Authors:
`Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`__,
Laboratory for Fluorescence Dynamics, University of California, Irvine
:Version: 20090418
Requirements
------------
* `Python 2.6 <http://www.python.org>`__
* `Numpy 1.3 <http://numpy.scipy.org>`__
* `transformations.c 20090418 <http://www.lfd.uci.edu/~gohlke/>`__
(optional implementation of some functions in C)
Notes
-----
Matrices (M) can be inverted using numpy.linalg.inv(M), concatenated using
numpy.dot(M0, M1), or used to transform homogeneous coordinates (v) using
numpy.dot(M, v) for shape (4, \*) "point of arrays", respectively
numpy.dot(v, M.T) for shape (\*, 4) "array of points".
Calculations are carried out with numpy.float64 precision.
This Python implementation is not optimized for speed.
Vector, point, quaternion, and matrix function arguments are expected to be
"array like", i.e. tuple, list, or numpy arrays.
Return types are numpy arrays unless specified otherwise.
Angles are in radians unless specified otherwise.
Quaternions ix+jy+kz+w are represented as [x, y, z, w].
Use the transpose of transformation matrices for OpenGL glMultMatrixd().
A triple of Euler angles can be applied/interpreted in 24 ways, which can
be specified using a 4 character string or encoded 4-tuple:
*Axes 4-string*: e.g. 'sxyz' or 'ryxy'
- first character : rotations are applied to 's'tatic or 'r'otating frame
- remaining characters : successive rotation axis 'x', 'y', or 'z'
*Axes 4-tuple*: e.g. (0, 0, 0, 0) or (1, 1, 1, 1)
- inner axis: code of axis ('x':0, 'y':1, 'z':2) of rightmost matrix.
- parity : even (0) if inner axis 'x' is followed by 'y', 'y' is followed
by 'z', or 'z' is followed by 'x'. Otherwise odd (1).
- repetition : first and last axis are same (1) or different (0).
- frame : rotations are applied to static (0) or rotating (1) frame.
References
----------
(1) Matrices and transformations. Ronald Goldman.
In "Graphics Gems I", pp 472-475. Morgan Kaufmann, 1990.
(2) More matrices and transformations: shear and pseudo-perspective.
Ronald Goldman. In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991.
(3) Decomposing a matrix into simple transformations. Spencer Thomas.
In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991.
(4) Recovering the data from the transformation matrix. Ronald Goldman.
In "Graphics Gems II", pp 324-331. Morgan Kaufmann, 1991.
(5) Euler angle conversion. Ken Shoemake.
In "Graphics Gems IV", pp 222-229. Morgan Kaufmann, 1994.
(6) Arcball rotation control. Ken Shoemake.
In "Graphics Gems IV", pp 175-192. Morgan Kaufmann, 1994.
(7) Representing attitude: Euler angles, unit quaternions, and rotation
vectors. James Diebel. 2006.
(8) A discussion of the solution for the best rotation to relate two sets
of vectors. W Kabsch. Acta Cryst. 1978. A34, 827-828.
(9) Closed-form solution of absolute orientation using unit quaternions.
BKP Horn. J Opt Soc Am A. 1987. 4(4), 629-642.
(10) Quaternions. Ken Shoemake.
http://www.sfu.ca/~jwa3/cmpt461/files/quatut.pdf
(11) From quaternion to matrix and back. JMP van Waveren. 2005.
http://www.intel.com/cd/ids/developer/asmo-na/eng/293748.htm
(12) Uniform random rotations. Ken Shoemake.
In "Graphics Gems III", pp 124-132. Morgan Kaufmann, 1992.
Examples
--------
>>> alpha, beta, gamma = 0.123, -1.234, 2.345
>>> origin, xaxis, yaxis, zaxis = (0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1)
>>> I = identity_matrix()
>>> Rx = rotation_matrix(alpha, xaxis)
>>> Ry = rotation_matrix(beta, yaxis)
>>> Rz = rotation_matrix(gamma, zaxis)
>>> R = concatenate_matrices(Rx, Ry, Rz)
>>> euler = euler_from_matrix(R, 'rxyz')
>>> numpy.allclose([alpha, beta, gamma], euler)
True
>>> Re = euler_matrix(alpha, beta, gamma, 'rxyz')
>>> is_same_transform(R, Re)
True
>>> al, be, ga = euler_from_matrix(Re, 'rxyz')
>>> is_same_transform(Re, euler_matrix(al, be, ga, 'rxyz'))
True
>>> qx = quaternion_about_axis(alpha, xaxis)
>>> qy = quaternion_about_axis(beta, yaxis)
>>> qz = quaternion_about_axis(gamma, zaxis)
>>> q = quaternion_multiply(qx, qy)
>>> q = quaternion_multiply(q, qz)
>>> Rq = quaternion_matrix(q)
>>> is_same_transform(R, Rq)
True
>>> S = scale_matrix(1.23, origin)
>>> T = translation_matrix((1, 2, 3))
>>> Z = shear_matrix(beta, xaxis, origin, zaxis)
>>> R = random_rotation_matrix(numpy.random.rand(3))
>>> M = concatenate_matrices(T, R, Z, S)
>>> scale, shear, angles, trans, persp = decompose_matrix(M)
>>> numpy.allclose(scale, 1.23)
True
>>> numpy.allclose(trans, (1, 2, 3))
True
>>> numpy.allclose(shear, (0, math.tan(beta), 0))
True
>>> is_same_transform(R, euler_matrix(axes='sxyz', *angles))
True
>>> M1 = compose_matrix(scale, shear, angles, trans, persp)
>>> is_same_transform(M, M1)
True
"""
from __future__ import division
import warnings
import math
import numpy
# Documentation in HTML format can be generated with Epydoc
__docformat__ = "restructuredtext en"
def identity_matrix():
"""Return 4x4 identity/unit matrix.
>>> I = identity_matrix()
>>> numpy.allclose(I, numpy.dot(I, I))
True
>>> numpy.sum(I), numpy.trace(I)
(4.0, 4.0)
>>> numpy.allclose(I, numpy.identity(4, dtype=numpy.float64))
True
"""
return numpy.identity(4, dtype=numpy.float64)
def translation_matrix(direction):
"""Return matrix to translate by direction vector.
>>> v = numpy.random.random(3) - 0.5
>>> numpy.allclose(v, translation_matrix(v)[:3, 3])
True
"""
M = numpy.identity(4)
M[:3, 3] = direction[:3]
return M
def translation_from_matrix(matrix):
"""Return translation vector from translation matrix.
>>> v0 = numpy.random.random(3) - 0.5
>>> v1 = translation_from_matrix(translation_matrix(v0))
>>> numpy.allclose(v0, v1)
True
"""
return numpy.array(matrix, copy=False)[:3, 3].copy()
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.0
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2., numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def reflection_from_matrix(matrix):
"""Return mirror plane point and normal vector from reflection matrix.
>>> v0 = numpy.random.random(3) - 0.5
>>> v1 = numpy.random.random(3) - 0.5
>>> M0 = reflection_matrix(v0, v1)
>>> point, normal = reflection_from_matrix(M0)
>>> M1 = reflection_matrix(point, normal)
>>> is_same_transform(M0, M1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
# normal: unit eigenvector corresponding to eigenvalue -1
l, V = numpy.linalg.eig(M[:3, :3])
i = numpy.where(abs(numpy.real(l) + 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue -1")
normal = numpy.real(V[:, i[0]]).squeeze()
# point: any unit eigenvector corresponding to eigenvalue 1
l, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
return point, normal
def rotation_matrix(angle, direction, point=None):
"""Return matrix to rotate about axis defined by point and direction.
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> R1 = rotation_matrix(angle-2*math.pi, direc, point)
>>> is_same_transform(R0, R1)
True
>>> R0 = rotation_matrix(angle, direc, point)
>>> R1 = rotation_matrix(-angle, -direc, point)
>>> is_same_transform(R0, R1)
True
>>> I = numpy.identity(4, numpy.float64)
>>> numpy.allclose(I, rotation_matrix(math.pi*2, direc))
True
>>> numpy.allclose(2., numpy.trace(rotation_matrix(math.pi/2,
... direc, point)))
True
"""
sina = math.sin(angle)
cosa = math.cos(angle)
direction = unit_vector(direction[:3])
# rotation matrix around unit vector
R = numpy.array(((cosa, 0.0, 0.0),
(0.0, cosa, 0.0),
(0.0, 0.0, cosa)), dtype=numpy.float64)
R += numpy.outer(direction, direction) * (1.0 - cosa)
direction *= sina
R += numpy.array((( 0.0, -direction[2], direction[1]),
( direction[2], 0.0, -direction[0]),
(-direction[1], direction[0], 0.0)),
dtype=numpy.float64)
M = numpy.identity(4)
M[:3, :3] = R
if point is not None:
# rotation not around origin
point = numpy.array(point[:3], dtype=numpy.float64, copy=False)
M[:3, 3] = point - numpy.dot(R, point)
return M
def rotation_from_matrix(matrix):
"""Return rotation angle and axis from rotation matrix.
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> angle, direc, point = rotation_from_matrix(R0)
>>> R1 = rotation_matrix(angle, direc, point)
>>> is_same_transform(R0, R1)
True
"""
R = numpy.array(matrix, dtype=numpy.float64, copy=False)
R33 = R[:3, :3]
# direction: unit eigenvector of R33 corresponding to eigenvalue of 1
l, W = numpy.linalg.eig(R33.T)
i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
direction = numpy.real(W[:, i[-1]]).squeeze()
# point: unit eigenvector of R33 corresponding to eigenvalue of 1
l, Q = numpy.linalg.eig(R)
i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = numpy.real(Q[:, i[-1]]).squeeze()
point /= point[3]
# rotation angle depending on direction
cosa = (numpy.trace(R33) - 1.0) / 2.0
if abs(direction[2]) > 1e-8:
sina = (R[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2]
elif abs(direction[1]) > 1e-8:
sina = (R[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1]
else:
sina = (R[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0]
angle = math.atan2(sina, cosa)
return angle, direction, point
def scale_matrix(factor, origin=None, direction=None):
"""Return matrix to scale by factor around origin in direction.
Use factor -1 for point symmetry.
>>> v = (numpy.random.rand(4, 5) - 0.5) * 20.0
>>> v[3] = 1.0
>>> S = scale_matrix(-1.234)
>>> numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3])
True
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S = scale_matrix(factor, origin)
>>> S = scale_matrix(factor, origin, direct)
"""
if direction is None:
# uniform scaling
M = numpy.array(((factor, 0.0, 0.0, 0.0),
(0.0, factor, 0.0, 0.0),
(0.0, 0.0, factor, 0.0),
(0.0, 0.0, 0.0, 1.0)), dtype=numpy.float64)
if origin is not None:
M[:3, 3] = origin[:3]
M[:3, 3] *= 1.0 - factor
else:
# nonuniform scaling
direction = unit_vector(direction[:3])
factor = 1.0 - factor
M = numpy.identity(4)
M[:3, :3] -= factor * numpy.outer(direction, direction)
if origin is not None:
M[:3, 3] = (factor * numpy.dot(origin[:3], direction)) * direction
return M
def scale_from_matrix(matrix):
"""Return scaling factor, origin and direction from scaling matrix.
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S0 = scale_matrix(factor, origin)
>>> factor, origin, direction = scale_from_matrix(S0)
>>> S1 = scale_matrix(factor, origin, direction)
>>> is_same_transform(S0, S1)
True
>>> S0 = scale_matrix(factor, origin, direct)
>>> factor, origin, direction = scale_from_matrix(S0)
>>> S1 = scale_matrix(factor, origin, direction)
>>> is_same_transform(S0, S1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
factor = numpy.trace(M33) - 2.0
try:
# direction: unit eigenvector corresponding to eigenvalue factor
l, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(l) - factor) < 1e-8)[0][0]
direction = numpy.real(V[:, i]).squeeze()
direction /= vector_norm(direction)
except IndexError:
# uniform scaling
factor = (factor + 2.0) / 3.0
direction = None
# origin: any eigenvector corresponding to eigenvalue 1
l, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 1")
origin = numpy.real(V[:, i[-1]]).squeeze()
origin /= origin[3]
return factor, origin, direction
def projection_matrix(point, normal, direction=None,
perspective=None, pseudo=False):
"""Return matrix to project onto plane defined by point and normal.
Using either perspective point, projection direction, or none of both.
If pseudo is True, perspective projections will preserve relative depth
such that Perspective = dot(Orthogonal, PseudoPerspective).
>>> P = projection_matrix((0, 0, 0), (1, 0, 0))
>>> numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:])
True
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(3) - 0.5
>>> P0 = projection_matrix(point, normal)
>>> P1 = projection_matrix(point, normal, direction=direct)
>>> P2 = projection_matrix(point, normal, perspective=persp)
>>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True)
>>> is_same_transform(P2, numpy.dot(P0, P3))
True
>>> P = projection_matrix((3, 0, 0), (1, 1, 0), (1, 0, 0))
>>> v0 = (numpy.random.rand(4, 5) - 0.5) * 20.0
>>> v0[3] = 1.0
>>> v1 = numpy.dot(P, v0)
>>> numpy.allclose(v1[1], v0[1])
True
>>> numpy.allclose(v1[0], 3.0-v1[1])
True
"""
M = numpy.identity(4)
point = numpy.array(point[:3], dtype=numpy.float64, copy=False)
normal = unit_vector(normal[:3])
if perspective is not None:
# perspective projection
perspective = numpy.array(perspective[:3], dtype=numpy.float64,
copy=False)
M[0, 0] = M[1, 1] = M[2, 2] = numpy.dot(perspective-point, normal)
M[:3, :3] -= numpy.outer(perspective, normal)
if pseudo:
# preserve relative depth
M[:3, :3] -= numpy.outer(normal, normal)
M[:3, 3] = numpy.dot(point, normal) * (perspective+normal)
else:
M[:3, 3] = numpy.dot(point, normal) * perspective
M[3, :3] = -normal
M[3, 3] = numpy.dot(perspective, normal)
elif direction is not None:
# parallel projection
direction = numpy.array(direction[:3], dtype=numpy.float64, copy=False)
scale = numpy.dot(direction, normal)
M[:3, :3] -= numpy.outer(direction, normal) / scale
M[:3, 3] = direction * (numpy.dot(point, normal) / scale)
else:
# orthogonal projection
M[:3, :3] -= numpy.outer(normal, normal)
M[:3, 3] = numpy.dot(point, normal) * normal
return M
def projection_from_matrix(matrix, pseudo=False):
"""Return projection plane and perspective point from projection matrix.
Return values are same as arguments for projection_matrix function:
point, normal, direction, perspective, and pseudo.
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(3) - 0.5
>>> P0 = projection_matrix(point, normal)
>>> result = projection_from_matrix(P0)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, direct)
>>> result = projection_from_matrix(P0)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=False)
>>> result = projection_from_matrix(P0, pseudo=False)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=True)
>>> result = projection_from_matrix(P0, pseudo=True)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
l, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
if not pseudo and len(i):
# point: any eigenvector corresponding to eigenvalue 1
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
# direction: unit eigenvector corresponding to eigenvalue 0
l, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(l)) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 0")
direction = numpy.real(V[:, i[0]]).squeeze()
direction /= vector_norm(direction)
# normal: unit eigenvector of M33.T corresponding to eigenvalue 0
l, V = numpy.linalg.eig(M33.T)
i = numpy.where(abs(numpy.real(l)) < 1e-8)[0]
if len(i):
# parallel projection
normal = numpy.real(V[:, i[0]]).squeeze()
normal /= vector_norm(normal)
return point, normal, direction, None, False
else:
# orthogonal projection, where normal equals direction vector
return point, direction, None, None, False
else:
# perspective projection
i = numpy.where(abs(numpy.real(l)) > 1e-8)[0]
if not len(i):
raise ValueError(
"no eigenvector not corresponding to eigenvalue 0")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
normal = - M[3, :3]
perspective = M[:3, 3] / numpy.dot(point[:3], normal)
if pseudo:
perspective -= normal
return point, normal, None, perspective, pseudo
def clip_matrix(left, right, bottom, top, near, far, perspective=False):
"""Return matrix to obtain normalized device coordinates from frustrum.
The frustrum bounds are axis-aligned along x (left, right),
y (bottom, top) and z (near, far).
Normalized device coordinates are in range [-1, 1] if coordinates are
inside the frustrum.
If perspective is True the frustrum is a truncated pyramid with the
perspective point at origin and direction along z axis, otherwise an
orthographic canonical view volume (a box).
Homogeneous coordinates transformed by the perspective clip matrix
need to be dehomogenized (divided by w coordinate).
>>> frustrum = numpy.random.rand(6)
>>> frustrum[1] += frustrum[0]
>>> frustrum[3] += frustrum[2]
>>> frustrum[5] += frustrum[4]
>>> M = clip_matrix(*frustrum, perspective=False)
>>> numpy.dot(M, [frustrum[0], frustrum[2], frustrum[4], 1.0])
array([-1., -1., -1., 1.])
>>> numpy.dot(M, [frustrum[1], frustrum[3], frustrum[5], 1.0])
array([ 1., 1., 1., 1.])
>>> M = clip_matrix(*frustrum, perspective=True)
>>> v = numpy.dot(M, [frustrum[0], frustrum[2], frustrum[4], 1.0])
>>> v / v[3]
array([-1., -1., -1., 1.])
>>> v = numpy.dot(M, [frustrum[1], frustrum[3], frustrum[4], 1.0])
>>> v / v[3]
array([ 1., 1., -1., 1.])
"""
if left >= right or bottom >= top or near >= far:
raise ValueError("invalid frustrum")
if perspective:
if near <= _EPS:
raise ValueError("invalid frustrum: near <= 0")
t = 2.0 * near
M = ((-t/(right-left), 0.0, (right+left)/(right-left), 0.0),
(0.0, -t/(top-bottom), (top+bottom)/(top-bottom), 0.0),
(0.0, 0.0, -(far+near)/(far-near), t*far/(far-near)),
(0.0, 0.0, -1.0, 0.0))
else:
M = ((2.0/(right-left), 0.0, 0.0, (right+left)/(left-right)),
(0.0, 2.0/(top-bottom), 0.0, (top+bottom)/(bottom-top)),
(0.0, 0.0, 2.0/(far-near), (far+near)/(near-far)),
(0.0, 0.0, 0.0, 1.0))
return numpy.array(M, dtype=numpy.float64)
def shear_matrix(angle, direction, point, normal):
"""Return matrix to shear by angle along direction vector on shear plane.
The shear plane is defined by a point and normal vector. The direction
vector must be orthogonal to the plane's normal vector.
A point P is transformed by the shear matrix into P" such that
the vector P-P" is parallel to the direction vector and its extent is
given by the angle of P-P'-P", where P' is the orthogonal projection
of P onto the shear plane.
>>> angle = (random.random() - 0.5) * 4*math.pi
>>> direct = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.cross(direct, numpy.random.random(3))
>>> S = shear_matrix(angle, direct, point, normal)
>>> numpy.allclose(1.0, numpy.linalg.det(S))
True
"""
normal = unit_vector(normal[:3])
direction = unit_vector(direction[:3])
if abs(numpy.dot(normal, direction)) > 1e-6:
raise ValueError("direction and normal vectors are not orthogonal")
angle = math.tan(angle)
M = numpy.identity(4)
M[:3, :3] += angle * numpy.outer(direction, normal)
M[:3, 3] = -angle * numpy.dot(point[:3], normal) * direction
return M
def shear_from_matrix(matrix):
"""Return shear angle, direction and plane from shear matrix.
>>> angle = (random.random() - 0.5) * 4*math.pi
>>> direct = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.cross(direct, numpy.random.random(3))
>>> S0 = shear_matrix(angle, direct, point, normal)
>>> angle, direct, point, normal = shear_from_matrix(S0)
>>> S1 = shear_matrix(angle, direct, point, normal)
>>> is_same_transform(S0, S1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
# normal: cross independent eigenvectors corresponding to the eigenvalue 1
l, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-4)[0]
if len(i) < 2:
raise ValueError("No two linear independent eigenvectors found %s" % l)
V = numpy.real(V[:, i]).squeeze().T
lenorm = -1.0
for i0, i1 in ((0, 1), (0, 2), (1, 2)):
n = numpy.cross(V[i0], V[i1])
l = vector_norm(n)
if l > lenorm:
lenorm = l
normal = n
normal /= lenorm
# direction and angle
direction = numpy.dot(M33 - numpy.identity(3), normal)
angle = vector_norm(direction)
direction /= angle
angle = math.atan(angle)
# point: eigenvector corresponding to eigenvalue 1
l, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 1")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
return angle, direction, point, normal
def decompose_matrix(matrix):
"""Return sequence of transformations from transformation matrix.
matrix : array_like
Non-degenerative homogeneous transformation matrix
Return tuple of:
scale : vector of 3 scaling factors
shear : list of shear factors for x-y, x-z, y-z axes
angles : list of Euler angles about static x, y, z axes
translate : translation vector along x, y, z axes
perspective : perspective partition of matrix
Raise ValueError if matrix is of wrong type or degenerative.
>>> T0 = translation_matrix((1, 2, 3))
>>> scale, shear, angles, trans, persp = decompose_matrix(T0)
>>> T1 = translation_matrix(trans)
>>> numpy.allclose(T0, T1)
True
>>> S = scale_matrix(0.123)
>>> scale, shear, angles, trans, persp = decompose_matrix(S)
>>> scale[0]
0.123
>>> R0 = euler_matrix(1, 2, 3)
>>> scale, shear, angles, trans, persp = decompose_matrix(R0)
>>> R1 = euler_matrix(*angles)
>>> numpy.allclose(R0, R1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=True).T
if abs(M[3, 3]) < _EPS:
raise ValueError("M[3, 3] is zero")
M /= M[3, 3]
P = M.copy()
P[:, 3] = 0, 0, 0, 1
if not numpy.linalg.det(P):
raise ValueError("Matrix is singular")
scale = numpy.zeros((3, ), dtype=numpy.float64)
shear = [0, 0, 0]
angles = [0, 0, 0]
if any(abs(M[:3, 3]) > _EPS):
perspective = numpy.dot(M[:, 3], numpy.linalg.inv(P.T))
M[:, 3] = 0, 0, 0, 1
else:
perspective = numpy.array((0, 0, 0, 1), dtype=numpy.float64)
translate = M[3, :3].copy()
M[3, :3] = 0
row = M[:3, :3].copy()
scale[0] = vector_norm(row[0])
row[0] /= scale[0]
shear[0] = numpy.dot(row[0], row[1])
row[1] -= row[0] * shear[0]
scale[1] = vector_norm(row[1])
row[1] /= scale[1]
shear[0] /= scale[1]
shear[1] = numpy.dot(row[0], row[2])
row[2] -= row[0] * shear[1]
shear[2] = numpy.dot(row[1], row[2])
row[2] -= row[1] * shear[2]
scale[2] = vector_norm(row[2])
row[2] /= scale[2]
shear[1:] /= scale[2]
if numpy.dot(row[0], numpy.cross(row[1], row[2])) < 0:
scale *= -1
row *= -1
angles[1] = math.asin(-row[0, 2])
if math.cos(angles[1]):
angles[0] = math.atan2(row[1, 2], row[2, 2])
angles[2] = math.atan2(row[0, 1], row[0, 0])
else:
#angles[0] = math.atan2(row[1, 0], row[1, 1])
angles[0] = math.atan2(-row[2, 1], row[1, 1])
angles[2] = 0.0
return scale, shear, angles, translate, perspective
def compose_matrix(scale=None, shear=None, angles=None, translate=None,
perspective=None):
"""Return transformation matrix from sequence of transformations.
This is the inverse of the decompose_matrix function.
Sequence of transformations:
scale : vector of 3 scaling factors
shear : list of shear factors for x-y, x-z, y-z axes
angles : list of Euler angles about static x, y, z axes
translate : translation vector along x, y, z axes
perspective : perspective partition of matrix
>>> scale = numpy.random.random(3) - 0.5
>>> shear = numpy.random.random(3) - 0.5
>>> angles = (numpy.random.random(3) - 0.5) * (2*math.pi)
>>> trans = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(4) - 0.5
>>> M0 = compose_matrix(scale, shear, angles, trans, persp)
>>> result = decompose_matrix(M0)
>>> M1 = compose_matrix(*result)
>>> is_same_transform(M0, M1)
True
"""
M = numpy.identity(4)
if perspective is not None:
P = numpy.identity(4)
P[3, :] = perspective[:4]
M = numpy.dot(M, P)
if translate is not None:
T = numpy.identity(4)
T[:3, 3] = translate[:3]
M = numpy.dot(M, T)
if angles is not None:
R = euler_matrix(angles[0], angles[1], angles[2], 'sxyz')
M = numpy.dot(M, R)
if shear is not None:
Z = numpy.identity(4)
Z[1, 2] = shear[2]
Z[0, 2] = shear[1]
Z[0, 1] = shear[0]
M = numpy.dot(M, Z)
if scale is not None:
S = numpy.identity(4)
S[0, 0] = scale[0]
S[1, 1] = scale[1]
S[2, 2] = scale[2]
M = numpy.dot(M, S)
M /= M[3, 3]
return M
def orthogonalization_matrix(lengths, angles):
"""Return orthogonalization matrix for crystallographic cell coordinates.
Angles are expected in degrees.
The de-orthogonalization matrix is the inverse.
>>> O = orthogonalization_matrix((10., 10., 10.), (90., 90., 90.))
>>> numpy.allclose(O[:3, :3], numpy.identity(3, float) * 10)
True
>>> O = orthogonalization_matrix([9.8, 12.0, 15.5], [87.2, 80.7, 69.7])
>>> numpy.allclose(numpy.sum(O), 43.063229)
True
"""
a, b, c = lengths
angles = numpy.radians(angles)
sina, sinb, _ = numpy.sin(angles)
cosa, cosb, cosg = numpy.cos(angles)
co = (cosa * cosb - cosg) / (sina * sinb)
return numpy.array((
( a*sinb*math.sqrt(1.0-co*co), 0.0, 0.0, 0.0),
(-a*sinb*co, b*sina, 0.0, 0.0),
( a*cosb, b*cosa, c, 0.0),
( 0.0, 0.0, 0.0, 1.0)),
dtype=numpy.float64)
def superimposition_matrix(v0, v1, scaling=False, usesvd=True):
"""Return matrix to transform given vector set into second vector set.
v0 and v1 are shape (3, \*) or (4, \*) arrays of at least 3 vectors.
If usesvd is True, the weighted sum of squared deviations (RMSD) is
minimized according to the algorithm by W. Kabsch [8]. Otherwise the
quaternion based algorithm by B. Horn [9] is used (slower when using
this Python implementation).
The returned matrix performs rotation, translation and uniform scaling
(if specified).
>>> v0 = numpy.random.rand(3, 10)
>>> M = superimposition_matrix(v0, v0)
>>> numpy.allclose(M, numpy.identity(4))
True
>>> R = random_rotation_matrix(numpy.random.random(3))
>>> v0 = ((1,0,0), (0,1,0), (0,0,1), (1,1,1))
>>> v1 = numpy.dot(R, v0)
>>> M = superimposition_matrix(v0, v1)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20.0
>>> v0[3] = 1.0
>>> v1 = numpy.dot(R, v0)
>>> M = superimposition_matrix(v0, v1)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> S = scale_matrix(random.random())
>>> T = translation_matrix(numpy.random.random(3)-0.5)
>>> M = concatenate_matrices(T, R, S)
>>> v1 = numpy.dot(M, v0)
>>> v0[:3] += numpy.random.normal(0.0, 1e-9, 300).reshape(3, -1)
>>> M = superimposition_matrix(v0, v1, scaling=True)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> M = superimposition_matrix(v0, v1, scaling=True, usesvd=False)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> v = numpy.empty((4, 100, 3), dtype=numpy.float64)
>>> v[:, :, 0] = v0
>>> M = superimposition_matrix(v0, v1, scaling=True, usesvd=False)
>>> numpy.allclose(v1, numpy.dot(M, v[:, :, 0]))
True
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=False)[:3]
v1 = numpy.array(v1, dtype=numpy.float64, copy=False)[:3]
if v0.shape != v1.shape or v0.shape[1] < 3:
raise ValueError("Vector sets are of wrong shape or type.")
# move centroids to origin
t0 = numpy.mean(v0, axis=1)
t1 = numpy.mean(v1, axis=1)
v0 = v0 - t0.reshape(3, 1)
v1 = v1 - t1.reshape(3, 1)
if usesvd:
# Singular Value Decomposition of covariance matrix
u, s, vh = numpy.linalg.svd(numpy.dot(v1, v0.T))
# rotation matrix from SVD orthonormal bases
R = numpy.dot(u, vh)
if numpy.linalg.det(R) < 0.0:
# R does not constitute right handed system
R -= numpy.outer(u[:, 2], vh[2, :]*2.0)
s[-1] *= -1.0
# homogeneous transformation matrix
M = numpy.identity(4)
M[:3, :3] = R
else:
# compute symmetric matrix N
xx, yy, zz = numpy.sum(v0 * v1, axis=1)
xy, yz, zx = numpy.sum(v0 * numpy.roll(v1, -1, axis=0), axis=1)
xz, yx, zy = numpy.sum(v0 * numpy.roll(v1, -2, axis=0), axis=1)
N = ((xx+yy+zz, yz-zy, zx-xz, xy-yx),
(yz-zy, xx-yy-zz, xy+yx, zx+xz),
(zx-xz, xy+yx, -xx+yy-zz, yz+zy),
(xy-yx, zx+xz, yz+zy, -xx-yy+zz))
# quaternion: eigenvector corresponding to most positive eigenvalue
l, V = numpy.linalg.eig(N)
q = V[:, numpy.argmax(l)]
q /= vector_norm(q) # unit quaternion
q = numpy.roll(q, -1) # move w component to end
# homogeneous transformation matrix
M = quaternion_matrix(q)
# scale: ratio of rms deviations from centroid
if scaling:
v0 *= v0
v1 *= v1
M[:3, :3] *= math.sqrt(numpy.sum(v1) / numpy.sum(v0))
# translation
M[:3, 3] = t1
T = numpy.identity(4)
T[:3, 3] = -t0
M = numpy.dot(M, T)
return M
def euler_matrix(ai, aj, ak, axes='sxyz'):
"""Return homogeneous rotation matrix from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> R = euler_matrix(1, 2, 3, 'syxz')
>>> numpy.allclose(numpy.sum(R[0]), -1.34786452)
True
>>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1))
>>> numpy.allclose(numpy.sum(R[0]), -0.383436184)
True
>>> ai, aj, ak = (4.0*math.pi) * (numpy.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R = euler_matrix(ai, aj, ak, axes)
>>> for axes in _TUPLE2AXES.keys():
... R = euler_matrix(ai, aj, ak, axes)
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes]
except (AttributeError, KeyError):
_ = _TUPLE2AXES[axes]
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
if frame:
ai, ak = ak, ai
if parity:
ai, aj, ak = -ai, -aj, -ak
si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak)
ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak)
cc, cs = ci*ck, ci*sk
sc, ss = si*ck, si*sk
M = numpy.identity(4)
if repetition:
M[i, i] = cj
M[i, j] = sj*si
M[i, k] = sj*ci
M[j, i] = sj*sk
M[j, j] = -cj*ss+cc
M[j, k] = -cj*cs-sc
M[k, i] = -sj*ck
M[k, j] = cj*sc+cs
M[k, k] = cj*cc-ss
else:
M[i, i] = cj*ck
M[i, j] = sj*sc-cs
M[i, k] = sj*cc+ss
M[j, i] = cj*sk
M[j, j] = sj*ss+cc
M[j, k] = sj*cs-sc
M[k, i] = -sj
M[k, j] = cj*si
M[k, k] = cj*ci
return M
def euler_from_matrix(matrix, axes='sxyz'):
"""Return Euler angles from rotation matrix for specified axis sequence.
axes : One of 24 axis sequences as string or encoded tuple
Note that many Euler angle triplets can describe one matrix.
>>> R0 = euler_matrix(1, 2, 3, 'syxz')
>>> al, be, ga = euler_from_matrix(R0, 'syxz')
>>> R1 = euler_matrix(al, be, ga, 'syxz')
>>> numpy.allclose(R0, R1)
True
>>> angles = (4.0*math.pi) * (numpy.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R0 = euler_matrix(axes=axes, *angles)
... R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes))
... if not numpy.allclose(R0, R1): print axes, "failed"
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_ = _TUPLE2AXES[axes]
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:3, :3]
if repetition:
sy = math.sqrt(M[i, j]*M[i, j] + M[i, k]*M[i, k])
if sy > _EPS:
ax = math.atan2( M[i, j], M[i, k])
ay = math.atan2( sy, M[i, i])
az = math.atan2( M[j, i], -M[k, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2( sy, M[i, i])
az = 0.0
else:
cy = math.sqrt(M[i, i]*M[i, i] + M[j, i]*M[j, i])
if cy > _EPS:
ax = math.atan2( M[k, j], M[k, k])
ay = math.atan2(-M[k, i], cy)
az = math.atan2( M[j, i], M[i, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2(-M[k, i], cy)
az = 0.0
if parity:
ax, ay, az = -ax, -ay, -az
if frame:
ax, az = az, ax
return ax, ay, az
def euler_from_quaternion(quaternion, axes='sxyz'):
"""Return Euler angles from quaternion for specified axis sequence.
>>> angles = euler_from_quaternion([0.06146124, 0, 0, 0.99810947])
>>> numpy.allclose(angles, [0.123, 0, 0])
True
"""
return euler_from_matrix(quaternion_matrix(quaternion), axes)
def quaternion_from_euler(ai, aj, ak, axes='sxyz'):
"""Return quaternion from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> q = quaternion_from_euler(1, 2, 3, 'ryxz')
>>> numpy.allclose(q, [0.310622, -0.718287, 0.444435, 0.435953])
True
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_ = _TUPLE2AXES[axes]
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
if frame:
ai, ak = ak, ai
if parity:
aj = -aj
ai /= 2.0
aj /= 2.0
ak /= 2.0
ci = math.cos(ai)
si = math.sin(ai)
cj = math.cos(aj)
sj = math.sin(aj)
ck = math.cos(ak)
sk = math.sin(ak)
cc = ci*ck
cs = ci*sk
sc = si*ck
ss = si*sk
quaternion = numpy.empty((4, ), dtype=numpy.float64)
if repetition:
quaternion[i] = cj*(cs + sc)
quaternion[j] = sj*(cc + ss)
quaternion[k] = sj*(cs - sc)
quaternion[3] = cj*(cc - ss)
else:
quaternion[i] = cj*sc - sj*cs
quaternion[j] = cj*ss + sj*cc
quaternion[k] = cj*cs - sj*sc
quaternion[3] = cj*cc + sj*ss
if parity:
quaternion[j] *= -1
return quaternion
def quaternion_about_axis(angle, axis):
"""Return quaternion for rotation about axis.
>>> q = quaternion_about_axis(0.123, (1, 0, 0))
>>> numpy.allclose(q, [0.06146124, 0, 0, 0.99810947])
True
"""
quaternion = numpy.zeros((4, ), dtype=numpy.float64)
quaternion[:3] = axis[:3]
qlen = vector_norm(quaternion)
if qlen > _EPS:
quaternion *= math.sin(angle/2.0) / qlen
quaternion[3] = math.cos(angle/2.0)
return quaternion
def quaternion_matrix(quaternion):
"""Return homogeneous rotation matrix from quaternion.
>>> R = quaternion_matrix([0.06146124, 0, 0, 0.99810947])
>>> numpy.allclose(R, rotation_matrix(0.123, (1, 0, 0)))
True
"""
q = numpy.array(quaternion[:4], dtype=numpy.float64, copy=True)
nq = numpy.dot(q, q)
if nq < _EPS:
return numpy.identity(4)
q *= math.sqrt(2.0 / nq)
q = numpy.outer(q, q)
return numpy.array((
(1.0-q[1, 1]-q[2, 2], q[0, 1]-q[2, 3], q[0, 2]+q[1, 3], 0.0),
( q[0, 1]+q[2, 3], 1.0-q[0, 0]-q[2, 2], q[1, 2]-q[0, 3], 0.0),
( q[0, 2]-q[1, 3], q[1, 2]+q[0, 3], 1.0-q[0, 0]-q[1, 1], 0.0),
( 0.0, 0.0, 0.0, 1.0)
), dtype=numpy.float64)
def quaternion_from_matrix(matrix):
"""Return quaternion from rotation matrix.
>>> R = rotation_matrix(0.123, (1, 2, 3))
>>> q = quaternion_from_matrix(R)
>>> numpy.allclose(q, [0.0164262, 0.0328524, 0.0492786, 0.9981095])
True
"""
q = numpy.empty((4, ), dtype=numpy.float64)
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:4, :4]
t = numpy.trace(M)
if t > M[3, 3]:
q[3] = t
q[2] = M[1, 0] - M[0, 1]
q[1] = M[0, 2] - M[2, 0]
q[0] = M[2, 1] - M[1, 2]
else:
i, j, k = 0, 1, 2
if M[1, 1] > M[0, 0]:
i, j, k = 1, 2, 0
if M[2, 2] > M[i, i]:
i, j, k = 2, 0, 1
t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3]
q[i] = t
q[j] = M[i, j] + M[j, i]
q[k] = M[k, i] + M[i, k]
q[3] = M[k, j] - M[j, k]
q *= 0.5 / math.sqrt(t * M[3, 3])
return q
def quaternion_multiply(quaternion1, quaternion0):
"""Return multiplication of two quaternions.
>>> q = quaternion_multiply([1, -2, 3, 4], [-5, 6, 7, 8])
>>> numpy.allclose(q, [-44, -14, 48, 28])
True
"""
x0, y0, z0, w0 = quaternion0
x1, y1, z1, w1 = quaternion1
return numpy.array((
x1*w0 + y1*z0 - z1*y0 + w1*x0,
-x1*z0 + y1*w0 + z1*x0 + w1*y0,
x1*y0 - y1*x0 + z1*w0 + w1*z0,
-x1*x0 - y1*y0 - z1*z0 + w1*w0), dtype=numpy.float64)
def quaternion_conjugate(quaternion):
"""Return conjugate of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_conjugate(q0)
>>> q1[3] == q0[3] and all(q1[:3] == -q0[:3])
True
"""
return numpy.array((-quaternion[0], -quaternion[1],
-quaternion[2], quaternion[3]), dtype=numpy.float64)
def quaternion_inverse(quaternion):
"""Return inverse of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_inverse(q0)
>>> numpy.allclose(quaternion_multiply(q0, q1), [0, 0, 0, 1])
True
"""
return quaternion_conjugate(quaternion) / numpy.dot(quaternion, quaternion)
def quaternion_slerp(quat0, quat1, fraction, spin=0, shortestpath=True):
"""Return spherical linear interpolation between two quaternions.
>>> q0 = random_quaternion()
>>> q1 = random_quaternion()
>>> q = quaternion_slerp(q0, q1, 0.0)
>>> numpy.allclose(q, q0)
True
>>> q = quaternion_slerp(q0, q1, 1.0, 1)
>>> numpy.allclose(q, q1)
True
>>> q = quaternion_slerp(q0, q1, 0.5)
>>> angle = math.acos(numpy.dot(q0, q))
>>> numpy.allclose(2.0, math.acos(numpy.dot(q0, q1)) / angle) or \
numpy.allclose(2.0, math.acos(-numpy.dot(q0, q1)) / angle)
True
"""
q0 = unit_vector(quat0[:4])
q1 = unit_vector(quat1[:4])
if fraction == 0.0:
return q0
elif fraction == 1.0:
return q1
d = numpy.dot(q0, q1)
if abs(abs(d) - 1.0) < _EPS:
return q0
if shortestpath and d < 0.0:
# invert rotation
d = -d
q1 *= -1.0
angle = math.acos(d) + spin * math.pi
if abs(angle) < _EPS:
return q0
isin = 1.0 / math.sin(angle)
q0 *= math.sin((1.0 - fraction) * angle) * isin
q1 *= math.sin(fraction * angle) * isin
q0 += q1
return q0
def random_quaternion(rand=None):
"""Return uniform random unit quaternion.
rand: array like or None
Three independent random variables that are uniformly distributed
between 0 and 1.
>>> q = random_quaternion()
>>> numpy.allclose(1.0, vector_norm(q))
True
>>> q = random_quaternion(numpy.random.random(3))
>>> q.shape
(4,)
"""
if rand is None:
rand = numpy.random.rand(3)
else:
assert len(rand) == 3
r1 = numpy.sqrt(1.0 - rand[0])
r2 = numpy.sqrt(rand[0])
pi2 = math.pi * 2.0
t1 = pi2 * rand[1]
t2 = pi2 * rand[2]
return numpy.array((numpy.sin(t1)*r1,
numpy.cos(t1)*r1,
numpy.sin(t2)*r2,
numpy.cos(t2)*r2), dtype=numpy.float64)
def random_rotation_matrix(rand=None):
"""Return uniform random rotation matrix.
rnd: array like
Three independent random variables that are uniformly distributed
between 0 and 1 for each returned quaternion.
>>> R = random_rotation_matrix()
>>> numpy.allclose(numpy.dot(R.T, R), numpy.identity(4))
True
"""
return quaternion_matrix(random_quaternion(rand))
class Arcball(object):
"""Virtual Trackball Control.
>>> ball = Arcball()
>>> ball = Arcball(initial=numpy.identity(4))
>>> ball.place([320, 320], 320)
>>> ball.down([500, 250])
>>> ball.drag([475, 275])
>>> R = ball.matrix()
>>> numpy.allclose(numpy.sum(R), 3.90583455)
True
>>> ball = Arcball(initial=[0, 0, 0, 1])
>>> ball.place([320, 320], 320)
>>> ball.setaxes([1,1,0], [-1, 1, 0])
>>> ball.setconstrain(True)
>>> ball.down([400, 200])
>>> ball.drag([200, 400])
>>> R = ball.matrix()
>>> numpy.allclose(numpy.sum(R), 0.2055924)
True
>>> ball.next()
"""
def __init__(self, initial=None):
"""Initialize virtual trackball control.
initial : quaternion or rotation matrix
"""
self._axis = None
self._axes = None
self._radius = 1.0
self._center = [0.0, 0.0]
self._vdown = numpy.array([0, 0, 1], dtype=numpy.float64)
self._constrain = False
if initial is None:
self._qdown = numpy.array([0, 0, 0, 1], dtype=numpy.float64)
else:
initial = numpy.array(initial, dtype=numpy.float64)
if initial.shape == (4, 4):
self._qdown = quaternion_from_matrix(initial)
elif initial.shape == (4, ):
initial /= vector_norm(initial)
self._qdown = initial
else:
raise ValueError("initial not a quaternion or matrix.")
self._qnow = self._qpre = self._qdown
def place(self, center, radius):
"""Place Arcball, e.g. when window size changes.
center : sequence[2]
Window coordinates of trackball center.
radius : float
Radius of trackball in window coordinates.
"""
self._radius = float(radius)
self._center[0] = center[0]
self._center[1] = center[1]
def setaxes(self, *axes):
"""Set axes to constrain rotations."""
if axes is None:
self._axes = None
else:
self._axes = [unit_vector(axis) for axis in axes]
def setconstrain(self, constrain):
"""Set state of constrain to axis mode."""
self._constrain = constrain == True
def getconstrain(self):
"""Return state of constrain to axis mode."""
return self._constrain
def down(self, point):
"""Set initial cursor window coordinates and pick constrain-axis."""
self._vdown = arcball_map_to_sphere(point, self._center, self._radius)
self._qdown = self._qpre = self._qnow
if self._constrain and self._axes is not None:
self._axis = arcball_nearest_axis(self._vdown, self._axes)
self._vdown = arcball_constrain_to_axis(self._vdown, self._axis)
else:
self._axis = None
def drag(self, point):
"""Update current cursor window coordinates."""
vnow = arcball_map_to_sphere(point, self._center, self._radius)
if self._axis is not None:
vnow = arcball_constrain_to_axis(vnow, self._axis)
self._qpre = self._qnow
t = numpy.cross(self._vdown, vnow)
if numpy.dot(t, t) < _EPS:
self._qnow = self._qdown
else:
q = [t[0], t[1], t[2], numpy.dot(self._vdown, vnow)]
self._qnow = quaternion_multiply(q, self._qdown)
def next(self, acceleration=0.0):
"""Continue rotation in direction of last drag."""
q = quaternion_slerp(self._qpre, self._qnow, 2.0+acceleration, False)
self._qpre, self._qnow = self._qnow, q
def matrix(self):
"""Return homogeneous rotation matrix."""
return quaternion_matrix(self._qnow)
def arcball_map_to_sphere(point, center, radius):
"""Return unit sphere coordinates from window coordinates."""
v = numpy.array(((point[0] - center[0]) / radius,
(center[1] - point[1]) / radius,
0.0), dtype=numpy.float64)
n = v[0]*v[0] + v[1]*v[1]
if n > 1.0:
v /= math.sqrt(n) # position outside of sphere
else:
v[2] = math.sqrt(1.0 - n)
return v
def arcball_constrain_to_axis(point, axis):
"""Return sphere point perpendicular to axis."""
v = numpy.array(point, dtype=numpy.float64, copy=True)
a = numpy.array(axis, dtype=numpy.float64, copy=True)
v -= a * numpy.dot(a, v) # on plane
n = vector_norm(v)
if n > _EPS:
if v[2] < 0.0:
v *= -1.0
v /= n
return v
if a[2] == 1.0:
return numpy.array([1, 0, 0], dtype=numpy.float64)
return unit_vector([-a[1], a[0], 0])
def arcball_nearest_axis(point, axes):
"""Return axis, which arc is nearest to point."""
point = numpy.array(point, dtype=numpy.float64, copy=False)
nearest = None
mx = -1.0
for axis in axes:
t = numpy.dot(arcball_constrain_to_axis(point, axis), point)
if t > mx:
nearest = axis
mx = t
return nearest
# epsilon for testing whether a number is close to zero
_EPS = numpy.finfo(float).eps * 4.0
# axis sequences for Euler angles
_NEXT_AXIS = [1, 2, 0, 1]
# map axes strings to/from tuples of inner axis, parity, repetition, frame
_AXES2TUPLE = {
'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0),
'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0),
'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0),
'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0),
'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1),
'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1),
'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1),
'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)}
_TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items())
# helper functions
def vector_norm(data, axis=None, out=None):
"""Return length, i.e. eucledian norm, of ndarray along axis.
>>> v = numpy.random.random(3)
>>> n = vector_norm(v)
>>> numpy.allclose(n, numpy.linalg.norm(v))
True
>>> v = numpy.random.rand(6, 5, 3)
>>> n = vector_norm(v, axis=-1)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=2)))
True
>>> n = vector_norm(v, axis=1)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
True
>>> v = numpy.random.rand(5, 4, 3)
>>> n = numpy.empty((5, 3), dtype=numpy.float64)
>>> vector_norm(v, axis=1, out=n)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
True
>>> vector_norm([])
0.0
>>> vector_norm([1.0])
1.0
"""
data = numpy.array(data, dtype=numpy.float64, copy=True)
if out is None:
if data.ndim == 1:
return math.sqrt(numpy.dot(data, data))
data *= data
out = numpy.atleast_1d(numpy.sum(data, axis=axis))
numpy.sqrt(out, out)
return out
else:
data *= data
numpy.sum(data, axis=axis, out=out)
numpy.sqrt(out, out)
def unit_vector(data, axis=None, out=None):
"""Return ndarray normalized by length, i.e. eucledian norm, along axis.
>>> v0 = numpy.random.random(3)
>>> v1 = unit_vector(v0)
>>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0))
True
>>> v0 = numpy.random.rand(5, 4, 3)
>>> v1 = unit_vector(v0, axis=-1)
>>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2)
>>> numpy.allclose(v1, v2)
True
>>> v1 = unit_vector(v0, axis=1)
>>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1)
>>> numpy.allclose(v1, v2)
True
>>> v1 = numpy.empty((5, 4, 3), dtype=numpy.float64)
>>> unit_vector(v0, axis=1, out=v1)
>>> numpy.allclose(v1, v2)
True
>>> list(unit_vector([]))
[]
>>> list(unit_vector([1.0]))
[1.0]
"""
if out is None:
data = numpy.array(data, dtype=numpy.float64, copy=True)
if data.ndim == 1:
data /= math.sqrt(numpy.dot(data, data))
return data
else:
if out is not data:
out[:] = numpy.array(data, copy=False)
data = out
length = numpy.atleast_1d(numpy.sum(data*data, axis))
numpy.sqrt(length, length)
if axis is not None:
length = numpy.expand_dims(length, axis)
data /= length
if out is None:
return data
def random_vector(size):
"""Return array of random doubles in the half-open interval [0.0, 1.0).
>>> v = random_vector(10000)
>>> numpy.all(v >= 0.0) and numpy.all(v < 1.0)
True
>>> v0 = random_vector(10)
>>> v1 = random_vector(10)
>>> numpy.any(v0 == v1)
False
"""
return numpy.random.random(size)
def inverse_matrix(matrix):
"""Return inverse of square transformation matrix.
>>> M0 = random_rotation_matrix()
>>> M1 = inverse_matrix(M0.T)
>>> numpy.allclose(M1, numpy.linalg.inv(M0.T))
True
>>> for size in range(1, 7):
... M0 = numpy.random.rand(size, size)
... M1 = inverse_matrix(M0)
... if not numpy.allclose(M1, numpy.linalg.inv(M0)): print size
"""
return numpy.linalg.inv(matrix)
def concatenate_matrices(*matrices):
"""Return concatenation of series of transformation matrices.
>>> M = numpy.random.rand(16).reshape((4, 4)) - 0.5
>>> numpy.allclose(M, concatenate_matrices(M))
True
>>> numpy.allclose(numpy.dot(M, M.T), concatenate_matrices(M, M.T))
True
"""
M = numpy.identity(4)
for i in matrices:
M = numpy.dot(M, i)
return M
def is_same_transform(matrix0, matrix1):
"""Return True if two matrices perform same transformation.
>>> is_same_transform(numpy.identity(4), numpy.identity(4))
True
>>> is_same_transform(numpy.identity(4), random_rotation_matrix())
False
"""
matrix0 = numpy.array(matrix0, dtype=numpy.float64, copy=True)
matrix0 /= matrix0[3, 3]
matrix1 = numpy.array(matrix1, dtype=numpy.float64, copy=True)
matrix1 /= matrix1[3, 3]
return numpy.allclose(matrix0, matrix1)
def _import_module(module_name, warn=True, prefix='_py_', ignore='_'):
"""Try import all public attributes from module into global namespace.
Existing attributes with name clashes are renamed with prefix.
Attributes starting with underscore are ignored by default.
Return True on successful import.
"""
try:
module = __import__(module_name)
except ImportError:
if warn:
warnings.warn("Failed to import module " + module_name)
else:
for attr in dir(module):
if ignore and attr.startswith(ignore):
continue
if prefix:
if attr in globals():
globals()[prefix + attr] = globals()[attr]
elif warn:
warnings.warn("No Python implementation of " + attr)
globals()[attr] = getattr(module, attr)
return True
| 57,695 | 32.819461 | 79 | py |
sVAE | sVAE-main/svae/_utils.py | import torch
class GumbelSigmoid(torch.nn.Module):
def __init__(self, num_action, num_latent, freeze=False, drawhard=True, tau=1):
super(GumbelSigmoid, self).__init__()
self.shape = (num_action, num_latent)
self.freeze = freeze
self.drawhard = drawhard
self.log_alpha = torch.nn.Parameter(torch.zeros(self.shape))
self.tau = tau
# useful to make sure these parameters will be pushed to the GPU
self.uniform = torch.distributions.uniform.Uniform(0, 1)
self.register_buffer("fixed_mask", torch.ones(self.shape))
self.reset_parameters()
# changed this to draw one action per minibatch sample...
def forward(self, action):
bs = action.shape[0]
if self.freeze:
y = self.fixed_mask[action, :]
return y
else:
shape = tuple([bs] + [self.shape[1]])
logistic_noise = (
self.sample_logistic(shape)
.type(self.log_alpha.type())
.to(self.log_alpha.device)
)
y_soft = torch.sigmoid((self.log_alpha[action] + logistic_noise) / self.tau)
if self.drawhard:
y_hard = (y_soft > 0.5).type(y_soft.type())
# This weird line does two things:
# 1) at forward, we get a hard sample.
# 2) at backward, we differentiate the gumbel sigmoid
y = y_hard.detach() - y_soft.detach() + y_soft
else:
y = y_soft
return y
def get_proba(self):
"""Returns probability of getting one"""
if self.freeze:
return self.fixed_mask
else:
return torch.sigmoid(self.log_alpha)
def reset_parameters(self):
torch.nn.init.constant_(
self.log_alpha, 5
) # 5) # will yield a probability ~0.99. Inspired by DCDI
def sample_logistic(self, shape):
u = self.uniform.sample(shape)
return torch.log(u) - torch.log(1 - u)
def threshold(self):
proba = self.get_proba()
self.fixed_mask.copy_((proba > 0.5).type(proba.type()))
self.freeze = True
| 2,200 | 32.861538 | 88 | py |
sVAE | sVAE-main/svae/_module.py | # -*- coding: utf-8 -*-
"""Main module."""
from typing import Callable, Iterable, Optional
import numpy as np
import torch
from scvi import REGISTRY_KEYS
from scvi._compat import Literal
from scvi.distributions import NegativeBinomial
from scvi.module.base import BaseModuleClass, LossRecorder, auto_move_data
from scvi.nn import DecoderSCVI, Encoder
from torch import logsumexp
from torch.distributions import Normal
from torch.distributions import kl_divergence as kl
from svae._utils import GumbelSigmoid
torch.backends.cudnn.benchmark = True
# VAE model
class SpikeSlabVAEModule(BaseModuleClass):
"""
Variational auto-encoder model.
This is light reimplementation of the scVI model described in [Lopez18]_
with a spike slab prior for sparse mechanism shift modeling
Parameters
----------
n_input
Number of input genes
n_batch
Number of batches, if 0, no batch correction is performed.
n_labels
Number of labels
n_hidden
Number of nodes per hidden layer
n_latent
Dimensionality of the latent space
n_layers
Number of hidden layers used for encoder and decoder NNs
n_continuous_cov
Number of continuous covarites
n_cats_per_cov
Number of categories for each extra categorical covariate
dropout_rate
Dropout rate for neural networks
latent_distribution
One of
* ``'normal'`` - Isotropic normal
* ``'ln'`` - Logistic normal with normal params N(0, 1)
encode_covariates
Whether to concatenate covariates to expression in encoder
deeply_inject_covariates
Whether to concatenate covariates into output of hidden layers in encoder/decoder. This option
only applies when `n_layers` > 1. The covariates are concatenated to the input of subsequent hidden layers.
use_layer_norm
Whether to use layer norm in layers
var_activation
Callable used to ensure positivity of the variational distributions' variance.
When `None`, defaults to `torch.exp`.
"""
def __init__(
self,
n_input: int,
n_batch: int = 0,
n_labels: int = 0,
n_hidden: int = 128,
n_latent: int = 10,
n_layers: int = 1,
n_continuous_cov: int = 0,
n_cats_per_cov: Optional[Iterable[int]] = None,
dropout_rate: float = 0.1,
latent_distribution: str = "normal",
encode_covariates: bool = False,
use_chem_prior: bool = True,
deeply_inject_covariates: bool = True,
use_batch_norm: Literal["encoder", "decoder", "none", "both"] = "both",
use_layer_norm: Literal["encoder", "decoder", "none", "both"] = "none",
var_activation: Optional[Callable] = None,
):
super().__init__()
self.n_latent = n_latent
self.n_batch = n_batch
self.n_labels = n_labels
self.latent_distribution = latent_distribution
self.encode_covariates = encode_covariates
self.use_chem_prior = use_chem_prior
self.beta = 1
self.warmup = True
self.sparse_mask_penalty = 1
self.px_r = torch.nn.Parameter(torch.randn(n_input))
use_batch_norm_encoder = use_batch_norm == "encoder" or use_batch_norm == "both"
use_batch_norm_decoder = use_batch_norm == "decoder" or use_batch_norm == "both"
use_layer_norm_encoder = use_layer_norm == "encoder" or use_layer_norm == "both"
use_layer_norm_decoder = use_layer_norm == "decoder" or use_layer_norm == "both"
# z encoder goes from the n_input-dimensional data to an n_latent-d
# latent space representation
n_input_encoder = n_input + n_continuous_cov * encode_covariates
cat_list = [n_batch] + list([] if n_cats_per_cov is None else n_cats_per_cov)
encoder_cat_list = cat_list if encode_covariates else None
self.z_encoder = Encoder(
n_input_encoder,
n_latent,
n_cat_list=encoder_cat_list,
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
distribution=latent_distribution,
inject_covariates=deeply_inject_covariates,
use_batch_norm=use_batch_norm_encoder,
use_layer_norm=use_layer_norm_encoder,
var_activation=var_activation,
return_dist=True,
)
# l encoder goes from n_input-dimensional data to 1-d library size
self.l_encoder = Encoder(
n_input_encoder,
1,
n_layers=1,
n_cat_list=encoder_cat_list,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
inject_covariates=deeply_inject_covariates,
use_batch_norm=use_batch_norm_encoder,
use_layer_norm=use_layer_norm_encoder,
var_activation=var_activation,
return_dist=True,
)
# decoder goes from n_latent-dimensional space to n_input-d data
n_input_decoder = n_latent + n_continuous_cov
self.decoder = DecoderSCVI(
n_input_decoder,
n_input,
n_cat_list=cat_list,
n_layers=n_layers,
n_hidden=n_hidden,
inject_covariates=deeply_inject_covariates,
use_batch_norm=use_batch_norm_decoder,
use_layer_norm=use_layer_norm_decoder,
scale_activation="softmax",
)
# mu_a
self.action_prior_mean = torch.nn.parameter.Parameter(
torch.randn((n_labels, n_latent))
)
# p_a
self.action_prior_logit_weight = torch.nn.parameter.Parameter(
1 * torch.ones((n_labels, n_latent))
)
# q_a
self.gumbel_action = GumbelSigmoid(num_action=n_labels, num_latent=n_latent)
self.use_global_kl = True
def _get_inference_input(self, tensors):
x = tensors[REGISTRY_KEYS.X_KEY]
batch_index = tensors[REGISTRY_KEYS.BATCH_KEY]
# y = tensors[REGISTRY_KEYS.LABELS_KEY]
cont_key = REGISTRY_KEYS.CONT_COVS_KEY
cont_covs = tensors[cont_key] if cont_key in tensors.keys() else None
cat_key = REGISTRY_KEYS.CAT_COVS_KEY
cat_covs = tensors[cat_key] if cat_key in tensors.keys() else None
input_dict = dict(
x=x, batch_index=batch_index, cont_covs=cont_covs, cat_covs=cat_covs
)
return input_dict
def _get_generative_input(self, tensors, inference_outputs):
z = inference_outputs["z"]
library = inference_outputs["library"]
batch_index = tensors[REGISTRY_KEYS.BATCH_KEY]
y = tensors[REGISTRY_KEYS.LABELS_KEY]
cont_key = REGISTRY_KEYS.CONT_COVS_KEY
cont_covs = tensors[cont_key] if cont_key in tensors.keys() else None
cat_key = REGISTRY_KEYS.CAT_COVS_KEY
cat_covs = tensors[cat_key] if cat_key in tensors.keys() else None
input_dict = dict(
z=z,
library=library,
batch_index=batch_index,
y=y,
cont_covs=cont_covs,
cat_covs=cat_covs,
)
return input_dict
@auto_move_data
def inference(self, x, batch_index, cont_covs=None, cat_covs=None, n_samples=1):
"""
High level inference method.
Runs the inference (encoder) model.
"""
x_ = x
library = torch.log(x.sum(1)).unsqueeze(1)
x_ = torch.log(1 + x_)
if cont_covs is not None and self.encode_covariates:
encoder_input = torch.cat((x_, cont_covs), dim=-1)
else:
encoder_input = x_
if cat_covs is not None and self.encode_covariates:
categorical_input = torch.split(cat_covs, 1, dim=1)
else:
categorical_input = tuple()
qz, z = self.z_encoder(encoder_input, batch_index, *categorical_input)
ql = None
if n_samples > 1:
untran_z = qz.sample((n_samples,))
z = self.z_encoder.z_transformation(untran_z)
library = library.unsqueeze(0).expand(
(n_samples, library.size(0), library.size(1))
)
outputs = dict(z=z, qz=qz, ql=ql, library=library)
return outputs
@auto_move_data
def generative(
self,
z,
library,
batch_index,
cont_covs=None,
cat_covs=None,
y=None,
transform_batch=None,
):
"""Runs the generative model."""
if cont_covs is None:
decoder_input = z
elif z.dim() != cont_covs.dim():
decoder_input = torch.cat(
[z, cont_covs.unsqueeze(0).expand(z.size(0), -1, -1)], dim=-1
)
else:
decoder_input = torch.cat([z, cont_covs], dim=-1)
if cat_covs is not None:
categorical_input = torch.split(cat_covs, 1, dim=1)
else:
categorical_input = tuple()
if transform_batch is not None:
batch_index = torch.ones_like(batch_index) * transform_batch
px_scale, _, px_rate, _ = self.decoder(
"gene",
decoder_input,
library,
batch_index,
*categorical_input,
# y, IMPORTANT TO BE TAKEN AWAY, otherwise we have leakage of y into the decoder by other means than the shift
)
px_r = torch.exp(self.px_r)
px = NegativeBinomial(mu=px_rate, theta=px_r, scale=px_scale)
# Priors
pl = None
# sample mask (size chemical times latent) for each datapoint
mask = self.gumbel_action(y[:, 0].long()) # batch x latentdim
# subsample actions we care about
# extract chemical specific means
mean_z = torch.index_select(
self.action_prior_mean, 0, y[:, 0].long()
) # batch x latent dim
# prune out entries according to mask
if self.use_chem_prior:
pz = Normal(mean_z * mask, torch.ones_like(z))
else:
pz = Normal(torch.zeros_like(z), torch.ones_like(z))
# we will enforce proba of mask to be sparse, so that means that most of the time mask should be zero, and turn off the action specific prior
return dict(
px=px,
pl=pl,
pz=pz,
)
def freeze_params(self):
# freeze
for param in self.decoder.parameters():
param.requires_grad = False
for param in self.z_encoder.parameters():
param.requires_grad = False
self.px_r.requires_grad = False
self.action_prior_logit_weight.requires_grad = False
for _, mod in self.decoder.named_modules():
if isinstance(mod, torch.nn.BatchNorm1d):
mod.momentum = 0
for _, mod in self.z_encoder.named_modules():
if isinstance(mod, torch.nn.BatchNorm1d):
mod.momentum = 0
def reinit_actsparse_and_freeze(self, loc):
# here we must reinit for the embeddings that may have been shrank by sparsity
with torch.no_grad():
self.action_prior_mean[loc] = 0
self.gumbel_action.log_alpha[loc] = 5
self.gumbel_action.threshold()
def loss(
self,
tensors,
inference_outputs,
generative_outputs,
kl_weight: float = 1.0,
n_obs: int = 1.0,
):
x = tensors[REGISTRY_KEYS.X_KEY]
kl_divergence_z = kl(inference_outputs["qz"], generative_outputs["pz"]).sum(
dim=1
)
kl_divergence_l = 0.0
reconst_loss = -generative_outputs["px"].log_prob(x).sum(-1)
kl_local_for_warmup = kl_divergence_z
kl_local_no_warmup = kl_divergence_l
if self.warmup:
weighted_kl_local = (
self.beta * kl_weight * kl_local_for_warmup + kl_local_no_warmup
)
else:
weighted_kl_local = kl_local_for_warmup + kl_local_no_warmup
kl_local = dict(
kl_divergence_l=kl_divergence_l, kl_divergence_z=kl_divergence_z
)
q_discrete = self.gumbel_action.get_proba()
p_discrete = torch.sigmoid(self.action_prior_logit_weight)
kl_discrete = torch.sum(q_discrete * torch.log(q_discrete / p_discrete))
prior_w = torch.ones_like(self.action_prior_logit_weight)
logp_qw = (
torch.distributions.Beta(prior_w, prior_w * self.sparse_mask_penalty)
.log_prob(q_discrete)
.sum()
)
logp_w = (
torch.distributions.Beta(prior_w, prior_w * self.sparse_mask_penalty)
.log_prob(p_discrete)
.sum()
)
if self.use_global_kl:
# Implementation detail described in the paper: line below describes the mathematical derivations in the paper
# kl_global = torch.tensor(0.0) + kl_discrete - logp_w
# Line below is the practical implementation, setting p_discrete to q_discrete
kl_global = torch.tensor(0.0) - logp_qw
loss = (
n_obs * torch.mean(reconst_loss + weighted_kl_local)
+ kl_weight * kl_global
)
else:
loss = n_obs * torch.mean(reconst_loss + weighted_kl_local)
kl_global = torch.tensor(0.0)
return LossRecorder(loss, reconst_loss, kl_local, kl_global)
@torch.no_grad()
@auto_move_data
def marginal_ll(self, tensors, n_mc_samples):
sample_batch = tensors[REGISTRY_KEYS.X_KEY]
to_sum = torch.zeros(sample_batch.size()[0], n_mc_samples)
for i in range(n_mc_samples):
# Distribution parameters and sampled variables
inference_outputs, generative_outputs, losses = self.forward(tensors)
z = inference_outputs["z"]
p_za = generative_outputs["pz"].log_prob(z).sum(dim=1)
q_z_x = inference_outputs["qz"].log_prob(z).sum(dim=1)
p_x_za = -losses.reconstruction_loss
log_prob_sum = p_x_za + p_za - q_z_x
to_sum[:, i] = log_prob_sum
batch_log_lkl = logsumexp(to_sum, dim=-1) - np.log(n_mc_samples)
return batch_log_lkl
| 14,243 | 34.969697 | 149 | py |
sVAE | sVAE-main/svae/_model.py | import logging
from typing import List, Optional, Sequence
import numpy as np
import pandas as pd
import torch
from anndata import AnnData
from scvi import REGISTRY_KEYS
from scvi._compat import Literal
from scvi.data import AnnDataManager
from scvi.data.fields import (
CategoricalJointObsField,
CategoricalObsField,
LayerField,
NumericalJointObsField,
NumericalObsField,
)
from scvi.model.base import (
ArchesMixin,
BaseModelClass,
RNASeqMixin,
UnsupervisedTrainingMixin,
VAEMixin,
)
from scvi.utils import setup_anndata_dsp
from ._module import SpikeSlabVAEModule
logger = logging.getLogger(__name__)
class SpikeSlabVAE(
RNASeqMixin, VAEMixin, ArchesMixin, UnsupervisedTrainingMixin, BaseModelClass
):
"""
single-cell Variational Inference [Lopez18]_.
Parameters
----------
adata
AnnData object that has been registered via :meth:`~scvi.model.SCVI.setup_anndata`.
n_hidden
Number of nodes per hidden layer.
n_latent
Dimensionality of the latent space.
n_layers
Number of hidden layers used for encoder and decoder NNs.
dropout_rate
Dropout rate for neural networks.
dispersion
One of the following:
* ``'gene'`` - dispersion parameter of NB is constant per gene across cells
* ``'gene-batch'`` - dispersion can differ between different batches
* ``'gene-label'`` - dispersion can differ between different labels
* ``'gene-cell'`` - dispersion can differ for every gene in every cell
gene_likelihood
One of:
* ``'nb'`` - Negative binomial distribution
* ``'zinb'`` - Zero-inflated negative binomial distribution
* ``'poisson'`` - Poisson distribution
latent_distribution
One of:
* ``'normal'`` - Normal distribution
* ``'ln'`` - Logistic normal distribution (Normal(0, I) transformed by softmax)
**model_kwargs
Keyword args for :class:`~scvi.module.VAE`
Examples
--------
>>> adata = anndata.read_h5ad(path_to_anndata)
>>> scvi.model.SCVI.setup_anndata(adata, batch_key="batch")
>>> vae = scvi.model.SCVI(adata)
>>> vae.train()
>>> adata.obsm["X_scVI"] = vae.get_latent_representation()
>>> adata.obsm["X_normalized_scVI"] = vae.get_normalized_expression()
Notes
-----
See further usage examples in the following tutorials:
1. :doc:`/tutorials/notebooks/api_overview`
2. :doc:`/tutorials/notebooks/harmonization`
3. :doc:`/tutorials/notebooks/scarches_scvi_tools`
4. :doc:`/tutorials/notebooks/scvi_in_R`
"""
def __init__(
self,
adata: AnnData,
n_hidden: int = 128,
n_latent: int = 10,
n_layers: int = 1,
dropout_rate: float = 0.1,
latent_distribution: Literal["normal", "ln"] = "normal",
**module_kwargs,
):
super().__init__(adata)
n_cats_per_cov = (
self.adata_manager.get_state_registry(
REGISTRY_KEYS.CAT_COVS_KEY
).n_cats_per_key
if REGISTRY_KEYS.CAT_COVS_KEY in self.adata_manager.data_registry
else None
)
n_batch = self.summary_stats.n_batch
self.module = SpikeSlabVAEModule(
n_input=self.summary_stats.n_vars,
n_batch=n_batch,
n_labels=self.summary_stats.n_labels,
n_continuous_cov=self.summary_stats.get("n_extra_continuous_covs", 0),
n_cats_per_cov=n_cats_per_cov,
n_hidden=n_hidden,
n_latent=n_latent,
n_layers=n_layers,
dropout_rate=dropout_rate,
latent_distribution=latent_distribution,
**module_kwargs,
)
self._model_summary_string = (
"SpikeSlabVAE Model with the following params: \nn_hidden: {}, n_latent: {}, n_layers: {}, dropout_rate: "
"{}, latent_distribution: {}"
).format(
n_hidden,
n_latent,
n_layers,
dropout_rate,
latent_distribution,
)
self.init_params_ = self._get_init_params(locals())
@classmethod
@setup_anndata_dsp.dedent
def setup_anndata(
cls,
adata: AnnData,
layer: Optional[str] = None,
batch_key: Optional[str] = None,
labels_key: Optional[str] = None,
size_factor_key: Optional[str] = None,
categorical_covariate_keys: Optional[List[str]] = None,
continuous_covariate_keys: Optional[List[str]] = None,
**kwargs,
):
"""
%(summary)s.
Parameters
----------
%(param_layer)s
%(param_batch_key)s
%(param_labels_key)s
%(param_size_factor_key)s
%(param_cat_cov_keys)s
%(param_cont_cov_keys)s
"""
setup_method_args = cls._get_setup_method_args(**locals())
anndata_fields = [
LayerField(REGISTRY_KEYS.X_KEY, layer, is_count_data=True),
CategoricalObsField(REGISTRY_KEYS.BATCH_KEY, batch_key),
CategoricalObsField(REGISTRY_KEYS.LABELS_KEY, labels_key),
NumericalObsField(
REGISTRY_KEYS.SIZE_FACTOR_KEY, size_factor_key, required=False
),
CategoricalJointObsField(
REGISTRY_KEYS.CAT_COVS_KEY, categorical_covariate_keys
),
NumericalJointObsField(
REGISTRY_KEYS.CONT_COVS_KEY, continuous_covariate_keys
),
]
adata_manager = AnnDataManager(
fields=anndata_fields, setup_method_args=setup_method_args
)
adata_manager.register_fields(adata, **kwargs)
cls.register_manager(adata_manager)
@torch.no_grad()
def get_elbo(
self,
adata: Optional[AnnData] = None,
indices: Optional[Sequence[int]] = None,
batch_size: Optional[int] = None,
agg: bool = False,
) -> float:
"""
Return the ELBO for the data.
The ELBO is a lower bound on the log likelihood of the data used for optimization
of VAEs. Note, this is not the negative ELBO, higher is better.
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
indices
Indices of cells in adata to use. If `None`, all cells are used.
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
"""
adata = self._validate_anndata(adata)
scdl = self._make_data_loader(
adata=adata, indices=indices, batch_size=batch_size
)
# Iterate once over the data and compute the elbo
elbo = []
for tensors in scdl:
_, _, scvi_loss = self.module(tensors)
recon_loss = scvi_loss.reconstruction_loss
kl_local = scvi_loss.kl_local
elbo += [(recon_loss + kl_local).cpu().numpy()]
# now aggregate by chemical
elbo = np.concatenate(elbo)
elbo_res = {}
label_key = self.adata_manager.get_state_registry(REGISTRY_KEYS.LABELS_KEY)[
"original_key"
]
cat = self.adata_manager.get_state_registry(REGISTRY_KEYS.LABELS_KEY)[
"categorical_mapping"
]
for c in cat:
if indices is not None:
ind = np.where(adata.obs[label_key][indices].values == c)[0]
else:
ind = np.where(adata.obs[label_key].values == c)[0]
if len(ind) > 10:
elbo_res[c] = np.mean(elbo[ind])
if agg:
return pd.Series(elbo_res).values.mean()
else:
return elbo_res
@torch.no_grad()
def get_marginal_ll(
self,
adata: Optional[AnnData] = None,
indices: Optional[Sequence[int]] = None,
n_mc_samples: int = 1000,
batch_size: Optional[int] = None,
agg: bool = False,
) -> float:
"""
Return the marginal LL for the data, calculated by label
The computation here is a biased estimator of the marginal log likelihood of the data.
Note, this is not the negative log likelihood, higher is better.
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
indices
Indices of cells in adata to use. If `None`, all cells are used.
n_mc_samples
Number of Monte Carlo samples to use for marginal LL estimation.
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
"""
adata = self._validate_anndata(adata)
if indices is None:
indices = np.arange(adata.n_obs)
scdl = self._make_data_loader(
adata=adata, indices=indices, batch_size=batch_size
)
if hasattr(self.module, "marginal_ll"):
log_lkl = []
for tensors in scdl:
log_lkl += [self.module.marginal_ll(tensors, n_mc_samples=n_mc_samples)]
else:
raise NotImplementedError(
"marginal_ll is not implemented for current model. "
"Please raise an issue on github if you need it."
)
# now aggregate by chemical
log_lkl = np.concatenate(log_lkl)
log_lkl_res = {}
label_key = self.adata_manager.get_state_registry(REGISTRY_KEYS.LABELS_KEY)[
"original_key"
]
cat = self.adata_manager.get_state_registry(REGISTRY_KEYS.LABELS_KEY)[
"categorical_mapping"
]
for c in cat:
if indices is not None:
ind = np.where(adata.obs[label_key][indices].values == c)[0]
else:
ind = np.where(adata.obs[label_key].values == c)[0]
if len(ind) > 10:
log_lkl_res[c] = np.mean(log_lkl[ind])
if agg:
return pd.Series(log_lkl_res).values.mean()
else:
return log_lkl_res
| 10,309 | 32.803279 | 118 | py |
sVAE | sVAE-main/svae/metrics.py | import torch
import numpy as np
from scipy.stats import spearmanr
from scipy.optimize import linear_sum_assignment
from sklearn.linear_model import LinearRegression
def get_linear_score(x, y):
reg = LinearRegression().fit(x, y)
return reg.score(x, y)
def linear_regression_metric(z, z_hat, num_samples=int(1e5), indices=None):
score = get_linear_score(z_hat, z)
# masking z_hat
# TODO: this does not take into account case where z_block_size > 1
if indices is not None:
z_hat_m = z_hat[:, indices[-z.shape[0] :]]
score_m = get_linear_score(z_hat_m, z)
else:
score_m = 0
return score, score_m
def mean_corr_coef_np(x, y, method="pearson"):
"""
A numpy implementation of the mean correlation coefficient metric.
:param x: numpy.ndarray
:param y: numpy.ndarray
:param method: str, optional
The method used to compute the correlation coefficients.
The options are 'pearson' and 'spearman'
'pearson':
use Pearson's correlation coefficient
'spearman':
use Spearman's nonparametric rank correlation coefficient
:return: float
"""
d = x.shape[1]
if method == "pearson":
cc = np.corrcoef(x, y, rowvar=False)[:d, d:]
elif method == "spearman":
cc = spearmanr(x, y)[0][:d, d:]
else:
raise ValueError("not a valid method: {}".format(method))
cc = np.abs(cc)
score = cc[linear_sum_assignment(-1 * cc)]
return score, score.mean()
def mean_corr_coef(x, y, method="pearson"):
if type(x) != type(y):
raise ValueError(
"inputs are of different types: ({}, {})".format(type(x), type(y))
)
if isinstance(x, np.ndarray):
return mean_corr_coef_np(x, y, method)
elif isinstance(x, torch.Tensor):
return mean_corr_coef_pt(x, y, method)
else:
raise ValueError("not a supported input type: {}".format(type(x)))
| 2,000 | 29.784615 | 78 | py |
sVAE | sVAE-main/svae/baselines/_module.py | # -*- coding: utf-8 -*-
"""Main module."""
from typing import Callable, Iterable, Optional
import numpy as np
import torch
from scvi import REGISTRY_KEYS
from scvi._compat import Literal
from scvi.distributions import NegativeBinomial
from scvi.module.base import BaseModuleClass, LossRecorder, auto_move_data
from scvi.nn import DecoderSCVI, Encoder
from torch import logsumexp
from torch.distributions import Normal
from torch.distributions import kl_divergence as kl
from .._utils import GumbelSigmoid
torch.backends.cudnn.benchmark = True
# VAE model
class sVAEModule(BaseModuleClass):
"""
Variational auto-encoder model.
This is a light modification implementation of the scVI model described in [Lopez18]_
to accomodate for iVAE, betaVAE, and sVAE
Parameters
----------
n_input
Number of input genes
n_batch
Number of batches, if 0, no batch correction is performed.
n_labels
Number of labels
n_hidden
Number of nodes per hidden layer
n_latent
Dimensionality of the latent space
n_layers
Number of hidden layers used for encoder and decoder NNs
n_continuous_cov
Number of continuous covarites
n_cats_per_cov
Number of categories for each extra categorical covariate
dropout_rate
Dropout rate for neural networks
latent_distribution
One of
* ``'normal'`` - Isotropic normal
* ``'ln'`` - Logistic normal with normal params N(0, 1)
encode_covariates
Whether to concatenate covariates to expression in encoder
deeply_inject_covariates
Whether to concatenate covariates into output of hidden layers in encoder/decoder. This option
only applies when `n_layers` > 1. The covariates are concatenated to the input of subsequent hidden layers.
use_layer_norm
Whether to use layer norm in layers
var_activation
Callable used to ensure positivity of the variational distributions' variance.
When `None`, defaults to `torch.exp`.
"""
def __init__(
self,
n_input: int,
n_batch: int = 0,
n_labels: int = 0,
n_hidden: int = 128,
n_latent: int = 10,
n_layers: int = 1,
n_continuous_cov: int = 0,
n_cats_per_cov: Optional[Iterable[int]] = None,
dropout_rate: float = 0.1,
latent_distribution: str = "normal",
encode_covariates: bool = False,
use_chem_prior: bool = True,
deeply_inject_covariates: bool = True,
use_batch_norm: Literal["encoder", "decoder", "none", "both"] = "both",
use_layer_norm: Literal["encoder", "decoder", "none", "both"] = "none",
var_activation: Optional[Callable] = None,
):
super().__init__()
self.n_latent = n_latent
self.n_batch = n_batch
self.n_labels = n_labels
self.latent_distribution = latent_distribution
self.encode_covariates = encode_covariates
self.use_chem_prior = use_chem_prior
self.beta = 1
self.sparse_mask_penalty = 1
self.px_r = torch.nn.Parameter(torch.randn(n_input))
use_batch_norm_encoder = use_batch_norm == "encoder" or use_batch_norm == "both"
use_batch_norm_decoder = use_batch_norm == "decoder" or use_batch_norm == "both"
use_layer_norm_encoder = use_layer_norm == "encoder" or use_layer_norm == "both"
use_layer_norm_decoder = use_layer_norm == "decoder" or use_layer_norm == "both"
# z encoder goes from the n_input-dimensional data to an n_latent-d
# latent space representation
n_input_encoder = n_input + n_continuous_cov * encode_covariates
cat_list = [n_batch] + list([] if n_cats_per_cov is None else n_cats_per_cov)
encoder_cat_list = cat_list if encode_covariates else None
self.z_encoder = Encoder(
n_input_encoder,
n_latent,
n_cat_list=encoder_cat_list,
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
distribution=latent_distribution,
inject_covariates=deeply_inject_covariates,
use_batch_norm=use_batch_norm_encoder,
use_layer_norm=use_layer_norm_encoder,
var_activation=var_activation,
return_dist=True,
)
# l encoder goes from n_input-dimensional data to 1-d library size
self.l_encoder = Encoder(
n_input_encoder,
1,
n_layers=1,
n_cat_list=encoder_cat_list,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
inject_covariates=deeply_inject_covariates,
use_batch_norm=use_batch_norm_encoder,
use_layer_norm=use_layer_norm_encoder,
var_activation=var_activation,
return_dist=True,
)
# decoder goes from n_latent-dimensional space to n_input-d data
n_input_decoder = n_latent + n_continuous_cov
self.decoder = DecoderSCVI(
n_input_decoder,
n_input,
n_cat_list=cat_list,
n_layers=n_layers,
n_hidden=n_hidden,
inject_covariates=deeply_inject_covariates,
use_batch_norm=use_batch_norm_decoder,
use_layer_norm=use_layer_norm_decoder,
scale_activation="softmax",
)
self.action_prior_mean = torch.nn.parameter.Parameter(
torch.randn((n_labels, n_latent))
)
self.gumbel_action = GumbelSigmoid(num_action=n_labels, num_latent=n_latent)
def _get_inference_input(self, tensors):
x = tensors[REGISTRY_KEYS.X_KEY]
batch_index = tensors[REGISTRY_KEYS.BATCH_KEY]
cont_key = REGISTRY_KEYS.CONT_COVS_KEY
cont_covs = tensors[cont_key] if cont_key in tensors.keys() else None
cat_key = REGISTRY_KEYS.CAT_COVS_KEY
cat_covs = tensors[cat_key] if cat_key in tensors.keys() else None
input_dict = dict(
x=x, batch_index=batch_index, cont_covs=cont_covs, cat_covs=cat_covs
)
return input_dict
def _get_generative_input(self, tensors, inference_outputs):
z = inference_outputs["z"]
library = inference_outputs["library"]
batch_index = tensors[REGISTRY_KEYS.BATCH_KEY]
y = tensors[REGISTRY_KEYS.LABELS_KEY]
cont_key = REGISTRY_KEYS.CONT_COVS_KEY
cont_covs = tensors[cont_key] if cont_key in tensors.keys() else None
cat_key = REGISTRY_KEYS.CAT_COVS_KEY
cat_covs = tensors[cat_key] if cat_key in tensors.keys() else None
input_dict = dict(
z=z,
library=library,
batch_index=batch_index,
y=y,
cont_covs=cont_covs,
cat_covs=cat_covs,
)
return input_dict
@auto_move_data
def inference(self, x, batch_index, cont_covs=None, cat_covs=None, n_samples=1):
"""
High level inference method.
Runs the inference (encoder) model.
"""
x_ = x
library = torch.log(x.sum(1)).unsqueeze(1)
x_ = torch.log(1 + x_)
if cont_covs is not None and self.encode_covariates:
encoder_input = torch.cat((x_, cont_covs), dim=-1)
else:
encoder_input = x_
if cat_covs is not None and self.encode_covariates:
categorical_input = torch.split(cat_covs, 1, dim=1)
else:
categorical_input = tuple()
qz, z = self.z_encoder(encoder_input, batch_index, *categorical_input)
ql = None
if n_samples > 1:
untran_z = qz.sample((n_samples,))
z = self.z_encoder.z_transformation(untran_z)
library = library.unsqueeze(0).expand(
(n_samples, library.size(0), library.size(1))
)
outputs = dict(z=z, qz=qz, ql=ql, library=library)
return outputs
@auto_move_data
def generative(
self,
z,
library,
batch_index,
cont_covs=None,
cat_covs=None,
y=None,
transform_batch=None,
):
"""Runs the generative model."""
if cont_covs is None:
decoder_input = z
elif z.dim() != cont_covs.dim():
decoder_input = torch.cat(
[z, cont_covs.unsqueeze(0).expand(z.size(0), -1, -1)], dim=-1
)
else:
decoder_input = torch.cat([z, cont_covs], dim=-1)
if cat_covs is not None:
categorical_input = torch.split(cat_covs, 1, dim=1)
else:
categorical_input = tuple()
if transform_batch is not None:
batch_index = torch.ones_like(batch_index) * transform_batch
px_scale, _, px_rate, _ = self.decoder(
"gene",
decoder_input,
library,
batch_index,
*categorical_input,
# y, IMPORTANT TO BE TAKEN AWAY, otherwise we have leakage of y into the decoder by other means than the shift
)
px_r = torch.exp(self.px_r)
px = NegativeBinomial(mu=px_rate, theta=px_r, scale=px_scale)
# Priors
pl = None
# sample mask (size chemical times latent) for each datapoint
mask = self.gumbel_action(y[:, 0].long()) # batch x latentdim
# subsample actions we care about
# extract chemical specific means
mean_z = torch.index_select(
self.action_prior_mean, 0, y[:, 0].long()
) # batch x latent dim
# prune out entries according to mask
if self.use_chem_prior:
pz = Normal(mean_z * mask, torch.ones_like(z))
else:
pz = Normal(torch.zeros_like(z), torch.ones_like(z))
return dict(
px=px,
pl=pl,
pz=pz,
)
def freeze_params(self):
# freeze
for param in self.decoder.parameters():
param.requires_grad = False
for param in self.z_encoder.parameters():
param.requires_grad = False
self.px_r.requires_grad = False
for _, mod in self.decoder.named_modules():
if isinstance(mod, torch.nn.BatchNorm1d):
mod.momentum = 0
def reinit_actsparse_and_freeze(self, loc):
# here we must reinit for the embeddings that may have been shrank by sparsity
with torch.no_grad():
self.action_prior_mean[loc] = 0
self.gumbel_action.log_alpha[loc] = 5
self.gumbel_action.threshold()
def loss(
self,
tensors,
inference_outputs,
generative_outputs,
kl_weight: float = 1.0,
):
x = tensors[REGISTRY_KEYS.X_KEY]
kl_divergence_z = kl(inference_outputs["qz"], generative_outputs["pz"]).sum(
dim=1
)
kl_divergence_l = 0.0
reconst_loss = -generative_outputs["px"].log_prob(x).sum(-1)
kl_local_for_warmup = kl_divergence_z
kl_local_no_warmup = kl_divergence_l
weighted_kl_local = (
self.beta * kl_weight * kl_local_for_warmup + kl_local_no_warmup
)
loss = torch.mean(reconst_loss + weighted_kl_local)
# add sparsity
loss += self.sparse_mask_penalty * torch.mean(self.gumbel_action.get_proba())
kl_local = dict(
kl_divergence_l=kl_divergence_l, kl_divergence_z=kl_divergence_z
)
kl_global = torch.tensor(0.0)
return LossRecorder(loss, reconst_loss, kl_local, kl_global)
@torch.no_grad()
@auto_move_data
def marginal_ll(self, tensors, n_mc_samples):
sample_batch = tensors[REGISTRY_KEYS.X_KEY]
to_sum = torch.zeros(sample_batch.size()[0], n_mc_samples)
for i in range(n_mc_samples):
# Distribution parameters and sampled variables
inference_outputs, generative_outputs, losses = self.forward(tensors)
z = inference_outputs["z"]
p_za = generative_outputs["pz"].log_prob(z).sum(dim=1)
q_z_x = inference_outputs["qz"].log_prob(z).sum(dim=1)
p_x_za = -losses.reconstruction_loss
log_prob_sum = p_x_za + p_za - q_z_x
to_sum[:, i] = log_prob_sum
batch_log_lkl = logsumexp(to_sum, dim=-1) - np.log(n_mc_samples)
return batch_log_lkl
| 12,419 | 33.985915 | 133 | py |
sVAE | sVAE-main/svae/baselines/_model.py | import logging
from typing import List, Optional, Sequence
import numpy as np
import pandas as pd
import torch
from anndata import AnnData
from scvi import REGISTRY_KEYS
from scvi._compat import Literal
from scvi.data import AnnDataManager
from scvi.data.fields import (
CategoricalJointObsField,
CategoricalObsField,
LayerField,
NumericalJointObsField,
NumericalObsField,
)
from scvi.model.base import (
ArchesMixin,
BaseModelClass,
RNASeqMixin,
UnsupervisedTrainingMixin,
VAEMixin,
)
from scvi.utils import setup_anndata_dsp
from svae.baselines._module import sVAEModule
logger = logging.getLogger(__name__)
class sVAE(
RNASeqMixin, VAEMixin, ArchesMixin, UnsupervisedTrainingMixin, BaseModelClass
):
"""
single-cell Variational Inference [Lopez18]_.
Parameters
----------
adata
AnnData object that has been registered via :meth:`~scvi.model.SCVI.setup_anndata`.
n_hidden
Number of nodes per hidden layer.
n_latent
Dimensionality of the latent space.
n_layers
Number of hidden layers used for encoder and decoder NNs.
dropout_rate
Dropout rate for neural networks.
dispersion
One of the following:
* ``'gene'`` - dispersion parameter of NB is constant per gene across cells
* ``'gene-batch'`` - dispersion can differ between different batches
* ``'gene-label'`` - dispersion can differ between different labels
* ``'gene-cell'`` - dispersion can differ for every gene in every cell
gene_likelihood
One of:
* ``'nb'`` - Negative binomial distribution
* ``'zinb'`` - Zero-inflated negative binomial distribution
* ``'poisson'`` - Poisson distribution
latent_distribution
One of:
* ``'normal'`` - Normal distribution
* ``'ln'`` - Logistic normal distribution (Normal(0, I) transformed by softmax)
**model_kwargs
Keyword args for :class:`~scvi.module.VAE`
Examples
--------
>>> adata = anndata.read_h5ad(path_to_anndata)
>>> scvi.model.SCVI.setup_anndata(adata, batch_key="batch")
>>> vae = scvi.model.SCVI(adata)
>>> vae.train()
>>> adata.obsm["X_scVI"] = vae.get_latent_representation()
>>> adata.obsm["X_normalized_scVI"] = vae.get_normalized_expression()
Notes
-----
See further usage examples in the following tutorials:
1. :doc:`/tutorials/notebooks/api_overview`
2. :doc:`/tutorials/notebooks/harmonization`
3. :doc:`/tutorials/notebooks/scarches_scvi_tools`
4. :doc:`/tutorials/notebooks/scvi_in_R`
"""
def __init__(
self,
adata: AnnData,
n_hidden: int = 128,
n_latent: int = 10,
n_layers: int = 1,
dropout_rate: float = 0.1,
latent_distribution: Literal["normal", "ln"] = "normal",
**module_kwargs,
):
super().__init__(adata)
n_cats_per_cov = (
self.adata_manager.get_state_registry(
REGISTRY_KEYS.CAT_COVS_KEY
).n_cats_per_key
if REGISTRY_KEYS.CAT_COVS_KEY in self.adata_manager.data_registry
else None
)
n_batch = self.summary_stats.n_batch
self.module = sVAEModule(
n_input=self.summary_stats.n_vars,
n_batch=n_batch,
n_labels=self.summary_stats.n_labels,
n_continuous_cov=self.summary_stats.get("n_extra_continuous_covs", 0),
n_cats_per_cov=n_cats_per_cov,
n_hidden=n_hidden,
n_latent=n_latent,
n_layers=n_layers,
dropout_rate=dropout_rate,
latent_distribution=latent_distribution,
**module_kwargs,
)
self._model_summary_string = (
"sVAE Model with the following params: \nn_hidden: {}, n_latent: {}, n_layers: {}, dropout_rate: "
"{}, latent_distribution: {}"
).format(
n_hidden,
n_latent,
n_layers,
dropout_rate,
latent_distribution,
)
self.init_params_ = self._get_init_params(locals())
@classmethod
@setup_anndata_dsp.dedent
def setup_anndata(
cls,
adata: AnnData,
layer: Optional[str] = None,
batch_key: Optional[str] = None,
labels_key: Optional[str] = None,
size_factor_key: Optional[str] = None,
categorical_covariate_keys: Optional[List[str]] = None,
continuous_covariate_keys: Optional[List[str]] = None,
**kwargs,
):
"""
%(summary)s.
Parameters
----------
%(param_layer)s
%(param_batch_key)s
%(param_labels_key)s
%(param_size_factor_key)s
%(param_cat_cov_keys)s
%(param_cont_cov_keys)s
"""
setup_method_args = cls._get_setup_method_args(**locals())
anndata_fields = [
LayerField(REGISTRY_KEYS.X_KEY, layer, is_count_data=True),
CategoricalObsField(REGISTRY_KEYS.BATCH_KEY, batch_key),
CategoricalObsField(REGISTRY_KEYS.LABELS_KEY, labels_key),
NumericalObsField(
REGISTRY_KEYS.SIZE_FACTOR_KEY, size_factor_key, required=False
),
CategoricalJointObsField(
REGISTRY_KEYS.CAT_COVS_KEY, categorical_covariate_keys
),
NumericalJointObsField(
REGISTRY_KEYS.CONT_COVS_KEY, continuous_covariate_keys
),
]
adata_manager = AnnDataManager(
fields=anndata_fields, setup_method_args=setup_method_args
)
adata_manager.register_fields(adata, **kwargs)
cls.register_manager(adata_manager)
@torch.no_grad()
def get_elbo(
self,
adata: Optional[AnnData] = None,
indices: Optional[Sequence[int]] = None,
batch_size: Optional[int] = None,
agg: bool = False,
) -> float:
"""
Return the ELBO for the data.
The ELBO is a lower bound on the log likelihood of the data used for optimization
of VAEs. Note, this is not the negative ELBO, higher is better.
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
indices
Indices of cells in adata to use. If `None`, all cells are used.
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
"""
adata = self._validate_anndata(adata)
scdl = self._make_data_loader(
adata=adata, indices=indices, batch_size=batch_size
)
# Iterate once over the data and compute the elbo
elbo = []
for tensors in scdl:
_, _, scvi_loss = self.module(tensors)
recon_loss = scvi_loss.reconstruction_loss
kl_local = scvi_loss.kl_local
elbo += [(recon_loss + kl_local).cpu().numpy()]
# now aggregate by chemical
elbo = np.concatenate(elbo)
elbo_res = {}
label_key = self.adata_manager.get_state_registry(REGISTRY_KEYS.LABELS_KEY)[
"original_key"
]
cat = self.adata_manager.get_state_registry(REGISTRY_KEYS.LABELS_KEY)[
"categorical_mapping"
]
for c in cat:
if indices is not None:
ind = np.where(adata.obs[label_key][indices].values == c)[0]
else:
ind = np.where(adata.obs[label_key].values == c)[0]
if len(ind) > 10:
elbo_res[c] = np.mean(elbo[ind])
if agg:
return pd.Series(elbo_res).values.mean()
else:
return elbo_res
@torch.no_grad()
def get_marginal_ll(
self,
adata: Optional[AnnData] = None,
indices: Optional[Sequence[int]] = None,
n_mc_samples: int = 1000,
batch_size: Optional[int] = None,
agg: bool = False,
) -> float:
"""
Return the marginal LL for the data, calculated by label
The computation here is a biased estimator of the marginal log likelihood of the data.
Note, this is not the negative log likelihood, higher is better.
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
indices
Indices of cells in adata to use. If `None`, all cells are used.
n_mc_samples
Number of Monte Carlo samples to use for marginal LL estimation.
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
"""
adata = self._validate_anndata(adata)
if indices is None:
indices = np.arange(adata.n_obs)
scdl = self._make_data_loader(
adata=adata, indices=indices, batch_size=batch_size
)
if hasattr(self.module, "marginal_ll"):
log_lkl = []
for tensors in scdl:
log_lkl += [self.module.marginal_ll(tensors, n_mc_samples=n_mc_samples)]
else:
raise NotImplementedError(
"marginal_ll is not implemented for current model. "
"Please raise an issue on github if you need it."
)
# now aggregate by chemical
log_lkl = np.concatenate(log_lkl)
log_lkl_res = {}
label_key = self.adata_manager.get_state_registry(REGISTRY_KEYS.LABELS_KEY)[
"original_key"
]
cat = self.adata_manager.get_state_registry(REGISTRY_KEYS.LABELS_KEY)[
"categorical_mapping"
]
for c in cat:
if indices is not None:
ind = np.where(adata.obs[label_key][indices].values == c)[0]
else:
ind = np.where(adata.obs[label_key].values == c)[0]
if len(ind) > 10:
log_lkl_res[c] = np.mean(log_lkl[ind])
if agg:
return pd.Series(log_lkl_res).values.mean()
else:
return log_lkl_res
| 10,291 | 32.744262 | 110 | py |
sVAE | sVAE-main/entry_points/run_real_data_replogle_wandb.py | import argparse
import logging
import numpy as np
import pandas as pd
import scanpy as sc
import torch
import wandb
from pytorch_lightning.loggers import WandbLogger
logger = logging.getLogger("scvi")
settings = wandb.Settings(start_method="fork")
from svae import SpikeSlabVAE, sVAE
EXOSOME = [
"ZC3H3",
"ZFC3H1",
"CAMTA2",
"DHX29",
"DIS3",
"EXOSC1",
"EXOSC2",
"EXOSC3",
"EXOSC4",
"EXOSC5",
"EXOSC6",
"EXOSC7",
"EXOSC8",
"EXOSC9",
"MBNL1",
"PABPN1",
"PIBF1",
"MTREX",
"ST20-MTHFS",
"THAP2",
]
SPLICEOSOME = [
"ZMAT2",
"CLNS1A",
"DDX20",
"DDX41",
"DDX46",
"ECD",
"GEMIN4",
"GEMIN5",
"GEMIN6",
"GEMIN8",
"INTS3",
"INTS4",
"INTS9",
"ICE1",
"LSM2",
"LSM3",
"LSM5",
"LSM5",
"LSM6",
"LSM7",
"MMP17",
"PHAX",
"PRPF4",
"PRPF6",
"SART3",
"SF3A2",
"SMN2",
"SNAPC1",
"SNAPC3",
"SNRPD3",
"SNRPG",
"TIPARP",
"TTC27",
"TXNL4A",
"USPL1",
]
MEDIATOR_COMPLEX = [
"ZDHHC7",
"ADAM10",
"EPS8L1",
"FAM136A",
"POGLUT3",
"MED10",
"MED11",
"MED12",
"MED14",
"MED17",
"MED18",
"MED19",
"MED1",
"MED20",
"MED21",
"MED22",
"MED28",
"MED29",
"MED30",
"MED6",
"MED7",
"MED8",
"MED9",
"SUPT6H",
"BRIX1",
"TMX2",
]
NUCLEOTIDE_EXCISION_REPAIR = [
"C1QBP",
"CCNH",
"ERCC2",
"ERCC3",
"GPN1",
"GPN3",
"GTF2E1",
"GTF2E2",
"GTF2H1",
"GTF2H4",
"MNAT1",
"NUMA1",
"PDRG1",
"PFDN2",
"POLR2B",
"POLR2F",
"POLR2G",
"RPAP1",
"RPAP2",
"RPAP3",
"TANGO6",
"TMEM161B",
"UXT",
]
S40_RIBOSOMAL_UNIT = [
"ZCCHC9",
"ZNF236",
"C1orf131",
"ZNF84",
"ZNHIT6",
"CCDC59",
"AATF",
"CPEB1",
"DDX10",
"DDX18",
"DDX21",
"DDX47",
"DDX52",
"DHX33",
"DHX37",
"DIMT1",
"DKC1",
"DNTTIP2",
"ESF1",
"FBL",
"FBXL14",
"FCF1",
"GLB1",
"HOXA3",
"IMP4",
"IMPA2",
"KRI1",
"KRR1",
"LTV1",
"MPHOSPH10",
"MRM1",
"NAF1",
"NOB1",
"NOC4L",
"NOL6",
"NOP10",
"PDCD11",
"ABT1",
"PNO1",
"POP1",
"POP4",
"POP5",
"PSMG4",
"PWP2",
"RCL1",
"RIOK1",
"RIOK2",
"RNF31",
"RPP14",
"RPP30",
"RPP40",
"RPS10-NUDT3",
"RPS10",
"RPS11",
"RPS12",
"RPS13",
"RPS15A",
"RPS18",
"RPS19BP1",
"RPS19",
"RPS21",
"RPS23",
"RPS24",
"RPS27A",
"RPS27",
"RPS28",
"RPS29",
"RPS2",
"RPS3A",
"RPS3",
"RPS4X",
"RPS5",
"RPS6",
"RPS7",
"RPS9",
"RPSA",
"RRP12",
"RRP7A",
"RRP9",
"SDR39U1",
"SRFBP1",
"TBL3",
"TRMT112",
"TSR1",
"TSR2",
"BYSL",
"C12orf45",
"USP36",
"UTP11",
"UTP20",
"UTP23",
"UTP6",
"BUD23",
"WDR36",
"WDR3",
"WDR46",
"AAR2",
]
S39_RIBOSOMAL_UNIT = [
"AARS2",
"DHX30",
"GFM1",
"HMGB3",
"MALSU1",
"MRPL10",
"MRPL11",
"MRPL13",
"MRPL14",
"MRPL16",
"MRPL17",
"MRPL18",
"MRPL19",
"MRPL22",
"MRPL23",
"MRPL24",
"MRPL27",
"MRPL2",
"MRPL33",
"MRPL35",
"MRPL36",
"MRPL37",
"MRPL38",
"MRPL39",
"MRPL3",
"MRPL41",
"MRPL42",
"MRPL43",
"MRPL44",
"MRPL4",
"MRPL50",
"MRPL51",
"MRPL53",
"MRPL55",
"MRPL9",
"MRPS18A",
"MRPS30",
"NARS2",
"PTCD1",
"RPUSD4",
"TARS2",
"VARS2",
"YARS2",
]
S60_RIBOSOMAL_UNIT = [
"CARF",
"CCDC86",
"DDX24",
"DDX51",
"DDX56",
"EIF6",
"ABCF1",
"GNL2",
"LSG1",
"MAK16",
"MDN1",
"MYBBP1A",
"NIP7",
"NLE1",
"NOL8",
"NOP16",
"NVL",
"PES1",
"PPAN",
"RBM28",
"RPL10A",
"RPL10",
"RPL11",
"RPL13",
"RPL14",
"RPL17",
"RPL19",
"RPL21",
"RPL23A",
"RPL23",
"RPL24",
"RPL26",
"RPL27A",
"RPL30",
"RPL31",
"RPL32",
"RPL34",
"RPL36",
"RPL37A",
"RPL37",
"RPL38",
"RPL4",
"RPL5",
"RPL6",
"RPL7",
"RPL8",
"RPL9",
"RRS1",
"RSL1D1",
"SDAD1",
"BOP1",
"TEX10",
"WDR12",
]
MT_PROTEIN_TRANSLOCATION = [
"AARS",
"CHCHD4",
"DNAJA3",
"DNAJC19",
"EIF2B1",
"EIF2B2",
"EIF2B3",
"EIF2B4",
"EIF2B5",
"FARSA",
"FARSB",
"GFER",
"GRPEL1",
"HARS",
"HSPA9",
"HSPD1",
"HSPE1",
"IARS2",
"LARS",
"LETM1",
"NARS",
"OXA1L",
"PGS1",
"PHB2",
"PHB",
"PMPCA",
"PMPCB",
"ATP5F1A",
"ATP5F1B",
"ATP5PD",
"QARS",
"RARS",
"SAMM50",
"PRELID3B",
"TARS",
"TIMM23B",
"TIMM44",
"TOMM22",
"TTC1",
"VARS",
]
def reinit_model(model):
# create new model and copy module inside of it
# this will create a new trainer etc..
temp_module = model.module
model_ = model.__class__(adata, n_latent=args.n_latent, n_layers=args.n_layers)
model_.module = temp_module
return model_
if __name__ == "__main__":
parser = argparse.ArgumentParser("sVAE benchmark experiment")
parser.add_argument("--seed", type=int, default=1024)
parser.add_argument("--split", type=str, default="SPLICEOSOME")
parser.add_argument("--n_latent", type=int, default=15)
parser.add_argument("--n_epoch", type=int, default=50)
parser.add_argument("--n_layers", type=int, default=2)
parser.add_argument("--beta", type=float, default=1.0)
parser.add_argument("--sparse_penalty", type=float, default=0)
parser.add_argument("--method", type=str, default="SpikeSlabVAE")
parser.add_argument("--num_gpus", type=int, default=1)
args = parser.parse_args()
wandb_logger = WandbLogger(project="wandb-replogle-svae", log_model=True)
# set up seeds ############################################################
torch.backends.cudnn.benchmark = True
torch.cuda.manual_seed_all(args.seed)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
save_dir = f"simulations/scvi_replogle_sm_beta{args.beta}_latent{args.n_latent}_meth_{args.method}_sparse{args.sparse_penalty}_split{args.split}"
# load data ###############################################################
adata = sc.read_h5ad("replogle.h5ad")
adata.obs["chem"] = adata.obs["gene"]
pathway_samples = []
pathway_list = [
EXOSOME,
SPLICEOSOME,
MEDIATOR_COMPLEX,
NUCLEOTIDE_EXCISION_REPAIR,
S40_RIBOSOMAL_UNIT,
S39_RIBOSOMAL_UNIT,
S60_RIBOSOMAL_UNIT,
MT_PROTEIN_TRANSLOCATION,
]
pathway_names = [
"EXOSOME",
"SPLICEOSOME",
"MEDIATOR_COMPLEX",
"NUCLEOTIDE_EXCISION_REPAIR",
"S40_RIBOSOMAL_UNIT",
"S39_RIBOSOMAL_UNIT",
"S60_RIBOSOMAL_UNIT",
"MT_PROTEIN_TRANSLOCATION",
]
for perturb in adata.obs["gene"]:
matched = False
for pathway_id, pathway_ in enumerate(pathway_list):
if perturb in pathway_:
pathway_samples += [pathway_names[pathway_id]]
matched = True
if not matched:
pathway_samples += ["OTHER"]
if args.split == "effect":
# get some form of energy distance using MMD
def mmd_linear(X, Y):
XX = np.dot(X, X.T)
YY = np.dot(Y, Y.T)
XY = np.dot(X, Y.T)
return XX.mean() + YY.mean() - 2 * XY.mean()
res_energy = {}
for guide in adata.obs["guide_ids"].unique():
index_control = np.where(adata.obs["guide_ids"] == "")[0][:1000]
index_condition = np.where(adata.obs["guide_ids"] == guide)[0][:1000]
res_energy[guide] = mmd_linear(
adata.obsm["X_pca"][index_control], adata.obsm["X_pca"][index_condition]
)
series = pd.Series(res_energy)
hold_out_guides = series.sort_values(ascending=False)[:30].index
elif args.split in pathway_names:
hold_out_guides = pathway_list[
np.where(np.array(pathway_names) == args.split)[0][0]
]
# convert format
adata_train = adata[[x not in hold_out_guides for x in adata.obs["chem"]]].copy()
adata_test = adata[[x in hold_out_guides for x in adata.obs["chem"]]].copy()
# split anndata in train / test
if args.method != "SpikeSlabVAE":
sVAE.setup_anndata(adata, labels_key="chem")
model = sVAE(adata, n_latent=args.n_latent, n_layers=args.n_layers)
if args.method == "SpikeSlabVAE":
if args.sparse_penalty == 0:
args.sparse_penalty = 1
SpikeSlabVAE.setup_anndata(adata, labels_key="chem")
model = SpikeSlabVAE(adata, n_latent=args.n_latent, n_layers=args.n_layers)
# train or load model #####################################################
# train the VAE
guide_list = (
adata.obs[["_scvi_labels", "chem"]].groupby("_scvi_labels").first().values[:, 0]
)
test_range = [
np.where(x == guide_list)[0][0] for x in adata_test.obs["chem"].unique()
]
chem_prior = (
args.method == "sVAE" or args.method == "iVAE" or args.method == "SpikeSlabVAE"
)
# hack: focus on train only, but leave params for all chemicals
model.adata = adata_train
model.module.use_chem_prior = chem_prior
model.module.beta = args.beta
model.module.sparse_mask_penalty = args.sparse_penalty
model.train(
max_epochs=args.n_epoch,
check_val_every_n_epoch=1,
early_stopping=True,
plan_kwargs={
"n_epochs_kl_warmup": 10,
},
logger=wandb_logger,
)
elbo_train = model.get_elbo(adata_train, agg=True)
elbo_val = model.get_elbo(adata_train, indices=model.validation_indices, agg=True)
wandb_logger.experiment.config.update(
{
"seed": args.seed,
"method": args.method,
"split": args.split,
"beta": args.beta,
"sparse_penalty": args.sparse_penalty,
"n_latent": args.n_latent,
"n_layers": args.n_layers,
}
)
model.save(save_dir, overwrite=True)
# obtain latents ##########################################################
latents = model.get_latent_representation(adata_train)
# adjust model for sparsity binarization
elbo_pre_pre_test = model.get_elbo(adata_test, agg=True)
model = reinit_model(model)
# 1. binarize the sparse mask -> this will add 1 to the unseen chemicals, and freeze all the mask
model.module.reinit_actsparse_and_freeze(test_range)
rate_hit = np.mean(
model.module.gumbel_action.get_proba().detach().cpu().numpy() > 0.5
)
# 2. re-train the model on train data
model.adata = adata_train
model.module.use_chem_prior = chem_prior
model.module.sparse_mask_penalty = args.sparse_penalty
if args.method == "SpikeSlabVAE":
# if SpikeSlabVAE, make sure you disable to categorical KL, or you'll get nans
model.module.use_global_kl = False
model.module.warmup = False
model.train(
max_epochs=args.n_epoch,
check_val_every_n_epoch=1,
early_stopping=True,
logger=False,
plan_kwargs={
"n_epochs_kl_warmup": 1,
},
)
model.save(save_dir + "_binarized_train", overwrite=True)
elbo_pre_test = model.get_elbo(adata_test, agg=True)
iwelbo_pre_test = model.get_marginal_ll(adata_test, n_mc_samples=1000, agg=True)
# 3. fine-tune model on test data (freeze generative model and fit)
model = reinit_model(model)
model.module.freeze_params()
model.adata = adata_test
model.module.use_chem_prior = chem_prior
model.module.sparse_mask_penalty = args.sparse_penalty
if args.method == "SpikeSlabVAE":
# if SpikeSlabVAE, make sure you disable to categorical KL, or you'll get nans
model.module.use_global_kl = False
model.module.warmup = False
if args.method == "SpikeSlabVAE" or args.method == "sVAE" or args.method == "iVAE":
model.train(
max_epochs=args.n_epoch,
train_size=1,
early_stopping=False,
logger=False,
plan_kwargs={"n_epochs_kl_warmup": 1, "lr": 0.005},
)
model.save(save_dir + "_test", overwrite=True)
elbo_test = model.get_elbo(adata_test, agg=True)
iwelbo_test = model.get_marginal_ll(adata_test, n_mc_samples=1000, agg=True)
wandb.log(
{
f"elbo_train": elbo_train,
f"elbo_val": elbo_val,
f"elbo_test": elbo_test,
f"iwelbo_test": iwelbo_test,
f"rate_hit": rate_hit,
f"elbo_pre_test": elbo_pre_test,
f"iwelbo_pre_test": iwelbo_pre_test,
f"elbo_pre_pre_test": elbo_pre_pre_test,
}
)
wandb.finish()
| 13,055 | 20.723794 | 149 | py |
sVAE | sVAE-main/entry_points/demo.py | import argparse
import logging
import os
import numpy as np
import torch
import wandb
from pytorch_lightning.loggers import WandbLogger
logger = logging.getLogger("scvi")
settings = wandb.Settings(start_method="fork")
from svae import SpikeSlabVAE, metrics, sparse_shift, sVAE
def reinit_model(model):
# create new model and copy module inside of it
# this will create a new trainer etc..
temp_module = model.module
model_ = model.__class__(adata, n_latent=args.n_latent, n_layers=args.n_layers)
model_.module = temp_module
return model_
if __name__ == "__main__":
parser = argparse.ArgumentParser("sVAE benchmark experiment")
parser.add_argument("--seed", type=int, default=1024)
parser.add_argument("--dataset", type=str, default="simulation")
parser.add_argument("--n_latent", type=int, default=15)
parser.add_argument("--n_cells_per_chem", type=int, default=250)
parser.add_argument("--n_chem", type=int, default=100)
parser.add_argument("--n_genes", type=int, default=100)
parser.add_argument("--n_epoch", type=int, default=300)
parser.add_argument("--n_layers", type=int, default=2)
parser.add_argument("--sparse_penalty", type=float, default=0)
parser.add_argument("--method", type=str, default="SpikeSlabVAE")
parser.add_argument("--num_gpus", type=int, default=1)
args = parser.parse_args()
wandb_logger = WandbLogger(project="wandb-svae", log_model=True)
# set up seeds ############################################################
torch.backends.cudnn.benchmark = True
torch.cuda.manual_seed_all(args.seed)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
save_dir = f"svae_lat{args.n_latent}mth_{args.method}_sp{args.sparse_penalty}"
# logger.info("Generating dataset")
# load data ###############################################################
adata = sparse_shift(
n_latent=args.n_latent,
n_cells_per_chem=args.n_cells_per_chem,
n_chem=args.n_chem,
n_genes=args.n_genes,
)
# split anndata in train / test
if args.method != "SpikeSlabVAE":
sVAE.setup_anndata(adata, labels_key="chem")
model = sVAE(adata, n_latent=args.n_latent, n_layers=args.n_layers)
if args.method == "SpikeSlabVAE":
if args.sparse_penalty == 0:
args.sparse_penalty = 1
SpikeSlabVAE.setup_anndata(adata, labels_key="chem")
model = SpikeSlabVAE(adata, n_latent=args.n_latent, n_layers=args.n_layers)
# train or load model #####################################################
test_range = range(80, 100)
adata_train = adata[[x not in test_range for x in adata.obs["chem"]]].copy()
adata_test = adata[[x in test_range for x in adata.obs["chem"]]].copy()
chem_prior = (
args.method == "sVAE" or args.method == "iVAE" or args.method == "SpikeSlabVAE"
)
# hack: focus on train only, but leave params for all chemicals
model.adata = adata_train
model.module.use_chem_prior = chem_prior
model.module.sparse_mask_penalty = args.sparse_penalty
model.train(
max_epochs=args.n_epoch,
check_val_every_n_epoch=1,
early_stopping=True,
plan_kwargs={
"n_epochs_kl_warmup": 50,
},
logger=wandb_logger,
)
elbo_train = model.get_elbo(adata_train, agg=True)
elbo_val = model.get_elbo(adata_train, indices=model.validation_indices, agg=True)
wandb_logger.experiment.config.update(
{
"seed": args.seed,
"method": args.method,
"sparse_penalty": args.sparse_penalty,
"n_latent": args.n_latent,
"n_layers": args.n_layers,
}
)
model.save(save_dir, overwrite=True)
# obtain latents ##########################################################
latents = model.get_latent_representation(adata_train)
gt_latents = adata_train.obsm["groundtruth_latent"]
# compute and report metrics ##############################################
score_mat_pearson, score_pearson = metrics.mean_corr_coef_np(
gt_latents, latents, method="pearson"
)
score_mat_spearman, score_spearman = metrics.mean_corr_coef_np(
gt_latents, latents, method="spearman"
)
score_r2, _ = metrics.linear_regression_metric(gt_latents,
latents)
# evaluate sparsity pattern of graph on training data
from scipy.optimize import linear_sum_assignment
from sklearn.metrics import f1_score, precision_score, recall_score
if args.n_latent == adata_train.obsm["groundtruth_latent"].shape[1]:
# match dimensions
w = -np.corrcoef(latents, gt_latents, rowvar=False)[
: args.n_latent, args.n_latent :
]
if args.method in ["VAE", "iVAE"]:
# evaluate with top 2 latent per chemical
y1, y2 = np.abs(
model.module.action_prior_mean.detach().cpu().numpy().T
).argsort(axis=0)[-2:, :]
mat_A = np.zeros_like(
model.module.action_prior_mean.detach().cpu().numpy().T
)
mat_A[y1, np.arange(args.n_chem)] = 1
mat_A[y2, np.arange(args.n_chem)] = 1
else:
mat_A = (
model.module.gumbel_action.get_proba().detach().cpu().numpy().T > 0.5
).astype(float)
rate_hit = np.mean(mat_A)
mat_B = (np.abs(adata.uns["prior_mean"].T) > 0).astype(float)
y_true, y_pred = mat_B.flatten(), mat_A[linear_sum_assignment(w.T)[1]].flatten()
score_p_graph, score_r_graph, score_f1_graph = (
precision_score(y_true, y_pred),
recall_score(y_true, y_pred),
f1_score(y_true, y_pred),
)
# adjust model for sparsity binarization
elbo_pre_pre_test = model.get_elbo(adata_test, agg=True)
model = reinit_model(model)
# 1. binarize the sparse mask -> this adds 1 to the unseen chemicals, and freeze mask
model.module.reinit_actsparse_and_freeze(np.arange(80, 100))
# 2. re-train the model on train data
model.adata = adata_train
model.module.use_chem_prior = chem_prior
model.module.sparse_mask_penalty = args.sparse_penalty
if args.method == "SpikeSlabVAE":
# if ssMVI, make sure you disable to categorical KL, or you'll get nans
model.module.use_global_kl = False
model.module.warmup = False
model.train(
max_epochs=300,
check_val_every_n_epoch=1,
early_stopping=True,
logger=False,
plan_kwargs={
"n_epochs_kl_warmup": 1,
},
)
model.save(save_dir + "_binarized_train", overwrite=True)
elbo_pre_test = model.get_elbo(adata_test, agg=True)
iwelbo_pre_test = model.get_marginal_ll(adata_test, agg=True)
# 3. fine-tune model on test data (freeze generative model and fit)
model = reinit_model(model)
model.module.freeze_params()
model.adata = adata_test
model.module.use_chem_prior = chem_prior
model.module.sparse_mask_penalty = args.sparse_penalty
if args.method == "SpikeSlabVAE":
model.module.use_global_kl = False
model.module.warmup = False
model.train(
max_epochs=300,
train_size=1,
early_stopping=False,
logger=False,
plan_kwargs={"n_epochs_kl_warmup": 1, "lr": 0.005},
)
model.save(save_dir + "_test", overwrite=True)
elbo_test = model.get_elbo(adata_test, agg=True)
iwelbo_test = model.get_marginal_ll(adata_test, agg=True)
wandb.log(
{
f"mcc_spearman": score_spearman,
f"mcc_pearson": score_pearson,
f"r_2": score_r2,
f"precision": score_p_graph,
f"recall": score_r_graph,
f"f1_graph": score_f1_graph,
f"elbo_train": elbo_train,
f"elbo_val": elbo_val,
f"elbo_test": elbo_test,
f"iwelbo_test": iwelbo_test,
f"rate_hit": rate_hit,
f"elbo_pre_test": elbo_pre_test,
f"iwelbo_pre_test": iwelbo_pre_test,
f"elbo_pre_pre_test": elbo_pre_pre_test,
}
)
wandb.finish()
| 8,242 | 34.995633 | 89 | py |
RGTSI | RGTSI-main/test.py | import time
import pdb
from options.test_options import TestOptions
from data.dataprocess import DataProcess
from models.model import create_model
import torchvision
from torch.utils import data
from torch.utils.tensorboard import SummaryWriter
import os
import torch
from PIL import Image
import numpy as np
from glob import glob
from tqdm import tqdm
import torchvision.transforms as transforms
if __name__ == "__main__":
img_transform = transforms.Compose([
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
mask_transform = transforms.Compose([
transforms.Resize((256, 256)),
transforms.ToTensor()
])
results_dir = r'./result/'
if not os.path.exists( results_dir):
os.mkdir(results_dir)
opt = TestOptions().parse()
writer = SummaryWriter(log_dir=dir, comment=opt.name)
model = create_model(opt)
net_EN = torch.load("./checkpoints/RGTSI/net_EN.pth")
net_RefEN = torch.load("./checkpoints/RGTSI/net_RefEN.pth")
net_DE = torch.load("./checkpoints/RGTSI/net_DE.pth")
net_RGTSI = torch.load("./checkpoints/RGTSI/net_RGTSI.pth")
model.netEN.module.load_state_dict(net_EN['net'])
model.netRefEN.module.load_state_dict(net_RefEN['net'])
model.netDE.module.load_state_dict(net_DE['net'])
model.netRGTSI.module.load_state_dict(net_RGTSI['net'])
input_mask_paths = glob('{:s}/*'.format("/project/liutaorong/RGTSI/data/DPED10K/test/input_mask/"))
input_mask_paths.sort()
de_paths = glob('{:s}/*'.format("/project/liutaorong/RGTSI/data/DPED10K/test/images/"))
de_paths.sort()
st_path = glob('{:s}/*'.format("/project/liutaorong/RGTSI/data/DPED10K/test/structure/"))
st_path.sort()
ref_paths = glob('{:s}/*'.format("/project/liutaorong/RGTSI/data/DPED10K/test/reference/"))
ref_paths.sort()
image_len = len(de_paths)
for i in tqdm(range(image_len)):
path_im = input_mask_paths[i]
path_de = de_paths[i]
(filepath,tempfilename) = os.path.split(path_de)
(filename,extension) = os.path.splitext(tempfilename)
path_st = st_path[i]
path_rf = ref_paths[i]
input_mask = Image.open(path_im).convert("RGB")
detail = Image.open(path_de).convert("RGB")
structure = Image.open(path_st).convert("RGB")
reference = Image.open(path_rf).convert("RGB")
input_mask = mask_transform(input_mask)
detail = img_transform(detail)
structure = img_transform(structure)
reference = img_transform(reference)
input_mask = torch.unsqueeze(input_mask, 0)
detail = torch.unsqueeze(detail, 0)
structure = torch.unsqueeze(structure,0)
reference = torch.unsqueeze(reference,0)
with torch.no_grad():
model.set_input(detail,structure,input_mask,reference)
model.forward()
fake_out = model.fake_out
fake_out = fake_out.detach().cpu() * input_mask + detail*(1-input_mask)
fake_image = (fake_out+1)/2.0
output = fake_image.detach().numpy()[0].transpose((1, 2, 0))*255
output = Image.fromarray(output.astype(np.uint8))
output.save(results_dir+filename+".jpg")
input, reference, output, GT = model.get_current_visuals()
image_out = torch.cat([input,reference,output,GT], 0)
grid = torchvision.utils.make_grid(image_out)
writer.add_image('picture(%d)' % i,grid,i)
| 3,552 | 36.010417 | 103 | py |
RGTSI | RGTSI-main/train.py | import time
from options.train_options import TrainOptions
from data.dataprocess import DataProcess
from models.model import create_model
import torchvision
from torch.utils import data
from torch.utils.tensorboard import SummaryWriter
import os
import torch
if __name__ == "__main__":
opt = TrainOptions().parse()
# define the dataset
dataset = DataProcess(opt.de_root, opt.st_root, opt.input_mask_root, opt.ref_root, opt, opt.isTrain)
#dataset = DataProcess(opt.de_root, opt.st_root, opt.input_mask_root, opt.ref_root, opt, opt.isTrain)
iterator_train = (data.DataLoader(dataset, batch_size=opt.batchSize, shuffle=False, num_workers=opt.num_workers, drop_last=False, pin_memory=True))
# Create model
model = create_model(opt)
total_steps=0
# Create the logs
dir = os.path.join(opt.log_dir, opt.name).replace('\\', '/')
if not os.path.exists(dir):
os.mkdir(dir)
writer = SummaryWriter(log_dir=dir, comment=opt.name)
# Start Training
for epoch in range (opt.epoch_count, opt.niter + opt.niter_decay + 1):
epoch_start_time = time.time()
epoch_iter = 0
for detail, structure,mask,reference in iterator_train:
iter_start_time = time.time()
total_steps += opt.batchSize
epoch_iter += opt.batchSize
model.set_input(detail,structure,mask,reference)
model.optimize_parameters()
# display the training processing
if total_steps % opt.display_freq == 0:
input,reference,output, GT = model.get_current_visuals()
image_out = torch.cat([reference,input,output,GT], 0)
grid = torchvision.utils.make_grid(image_out)
writer.add_image('Epoch_(%d)_(%d)' % (epoch, total_steps + 1), grid, total_steps + 1)
# display the training loss
if total_steps % opt.print_freq == 0:
errors = model.get_current_errors()
t = (time.time() - iter_start_time) / opt.batchSize
writer.add_scalar('G_GAN', errors['G_GAN'], total_steps + 1)
writer.add_scalar('G_L1', errors['G_L1'], total_steps + 1)
writer.add_scalar('G_stde', errors['G_stde'], total_steps + 1)
writer.add_scalar('D_loss', errors['D'], total_steps + 1)
writer.add_scalar('F_loss', errors['F'], total_steps + 1)
print('iteration time: %d' % t)
if epoch % opt.save_epoch_freq == 0:
print('saving the model at the end of epoch %d, iters %d' %
(epoch, total_steps))
model.save_networks(epoch)
print('End of epoch %d / %d \t Time Taken: %d sec' %
(epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))
model.update_learning_rate()
writer.close()
| 2,859 | 47.474576 | 151 | py |
RGTSI | RGTSI-main/options/base_options.py | import argparse
import os
from util import util
import torch
class BaseOptions():
def __init__(self):
self.initialized = False
def initialize(self, parser):
parser.add_argument('--st_root', type=str, default=r'./data/datasets/structure', help='path to structure images')
parser.add_argument('--de_root', type=str, default=r'./data/datasets/images', help='path to detail images (which are the groundtruth)')
parser.add_argument('--input_mask_root', type=str, default=r'./data/datasets/input_mask', help='path to mask, we use the datasetsets of partial conv hear')
parser.add_argument('--ref_root', type=str, default=r'./data/datasets/reference', help='path to mask, we use the datasetsets of partial conv hear')
parser.add_argument('--batchSize', type=int, default=1, help='input batch size')
parser.add_argument('--num_workers', type=int, default=8, help='numbers of the core of CPU')
parser.add_argument('--name', type=str, default='RBED',
help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--fineSize', type=int, default=256, help='then crop to this size')
parser.add_argument('--input_nc', type=int, default=6, help='# of input image channels')
parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels')
parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer')
parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer')
parser.add_argument('--n_layers_D', type=int, default=3, help='only used if which_model_netD==n_layers')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2')
parser.add_argument('--model', type=str, default='training1', help='set the names of current training process')
parser.add_argument('--nThreads', default=2, type=int, help='# threads for loading data')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization')
parser.add_argument('--use_dropout', action='store_true', help='use dropout for the generator')
parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]')
parser.add_argument('--lambda_L1', type=int, default=1, help='weight on L1 term in objective')
parser.add_argument('--lambda_S', type=int, default=250, help='weight on Style loss in objective')
parser.add_argument('--lambda_P', type=int, default=0.2, help='weight on Perceptual loss in objective')
parser.add_argument('--lambda_Gan', type=int, default=0.2, help='weight on GAN term in objective')
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
self.initialized = True
return parser
def gather_options(self):
# initialize parser with basic options
if not self.initialized:
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
self.parser = parser
return parser.parse_args()
def print_options(self, opt):
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
# save to the disk
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
def parse(self):
opt = self.gather_options()
opt.isTrain = self.isTrain # train or test
# process opt.suffix
self.print_options(opt)
# set gpu ids
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
opt.gpu_ids.append(id)
if len(opt.gpu_ids) > 0:
torch.cuda.set_device(opt.gpu_ids[0])
self.opt = opt
return self.opt
| 4,787 | 49.4 | 163 | py |
RGTSI | RGTSI-main/models/base_model.py | import os
import torch
class BaseModel():
def __init__(self, opt):
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.Tensor = torch.cuda.FloatTensor if self.gpu_ids else torch.Tensor
self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)
self.modelname = opt.model
#self.num_channel = 64 #256
#self.mode = "add"
def name(self):
return 'BaseModel'
def set_input(self, input):
self.input = input
def forward(self):
pass
def test(self):
pass
def get_image_paths(self):
pass
def optimize_parameters(self):
pass
def get_current_visuals(self):
return self.input
def get_current_errors(self):
return {}
def save(self, label):
pass
# helper saving function that can be used by subclasses
def save_networks(self, which_epoch):
for name in self.model_names:
if isinstance(name, str):
save_filename = '%s_net_%s.pth' % (which_epoch, name)
save_path = os.path.join(self.save_dir, save_filename).replace('\\', '/')
net = getattr(self, 'net' + name)
optimize = getattr(self, 'optimizer_' + name)
if len(self.gpu_ids) > 0 and torch.cuda.is_available():
torch.save({'net': net.module.cpu().state_dict(), 'optimize': optimize.state_dict()}, save_path)
net.cuda(self.gpu_ids[0])
else:
torch.save(net.cpu().state_dict(), save_path)
# helper loading function that can be used by subclasses
def load_networks(self, which_epoch):
for name in self.model_names:
if isinstance(name, str):
load_filename = '%s_net_%s.pth' % (which_epoch, name)
load_path = os.path.join(self.save_dir, load_filename)
net = getattr(self, 'net' + name)
optimize = getattr(self, 'optimizer_' + name)
if isinstance(net, torch.nn.DataParallel):
net = net.module
# if you are using PyTorch newer than 0.4 (e.g., built from
# GitHub source), you can remove str() on self.device
state_dict = torch.load(load_path.replace('\\', '/'), map_location=str(self.device))
optimize.load_state_dict(state_dict['optimize'])
net.load_state_dict(state_dict['net'])
# update learning rate (called once every epoch)
def update_learning_rate(self):
for scheduler in self.schedulers:
scheduler.step()
lr = self.optimizers[0].param_groups[0]['lr']
print('learning rate = %.7f' % lr)
def set_requires_grad(self, nets, requires_grad=False):
"""Set requies_grad=Fasle for all the networks to avoid unnecessary computations
Parameters:
nets (network list) -- a list of networks
requires_grad (bool) -- whether the networks require gradients or not
"""
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
| 3,444 | 34.885417 | 116 | py |
RGTSI | RGTSI-main/models/Decoder.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from models import model
class UnetSkipConnectionDBlock(nn.Module):
def __init__(self, inner_nc, outer_nc, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d,
use_dropout=False):
super(UnetSkipConnectionDBlock, self).__init__()
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc, affine=True)
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1)
up = [uprelu, upconv, upnorm]
if outermost:
up = [uprelu, upconv, nn.Tanh()]
model = up
elif innermost:
up = [uprelu, upconv, upnorm]
model = up
else:
up = [uprelu, upconv, upnorm]
model = up
self.model = nn.Sequential(*model)
def forward(self, x):
x=x.clone()
x=self.model(x)
return x
class Decoder(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64,
norm_layer=nn.BatchNorm2d, use_dropout=False):
super(Decoder, self).__init__()
# construct unet structure
Decoder_1 = UnetSkipConnectionDBlock(ngf * 8, ngf * 8, norm_layer=norm_layer, use_dropout=use_dropout,
innermost=True)
Decoder_2 = UnetSkipConnectionDBlock(ngf * 16, ngf * 8, norm_layer=norm_layer, use_dropout=use_dropout)
Decoder_3 = UnetSkipConnectionDBlock(ngf * 16, ngf * 4, norm_layer=norm_layer, use_dropout=use_dropout)
Decoder_4 = UnetSkipConnectionDBlock(ngf * 8, ngf * 2, norm_layer=norm_layer, use_dropout=use_dropout)
Decoder_5 = UnetSkipConnectionDBlock(ngf * 4, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
Decoder_6 = UnetSkipConnectionDBlock(ngf * 2, output_nc, norm_layer=norm_layer, use_dropout=use_dropout, outermost=True)
self.Decoder_1 = Decoder_1
self.Decoder_2 = Decoder_2
self.Decoder_3 = Decoder_3
self.Decoder_4 = Decoder_4
self.Decoder_5 = Decoder_5
self.Decoder_6 = Decoder_6
def forward(self, input_1, input_2, input_3, input_4, input_5, input_6):
y_1 = self.Decoder_1(input_6)
#最小的那个
y_2 = self.Decoder_2(torch.cat([y_1, input_5], 1))
y_3 = self.Decoder_3(torch.cat([y_2, input_4], 1))
y_4 = self.Decoder_4(torch.cat([y_3, input_3], 1))
y_5 = self.Decoder_5(torch.cat([y_4, input_2], 1))
y_6 = self.Decoder_6(torch.cat([y_5, input_1], 1))
out = y_6
return out
#cat 两个张量 按一维拼接在一起 | 2,677 | 37.811594 | 128 | py |
RGTSI | RGTSI-main/models/Discriminator.py | import torch.nn as nn
import functools
def spectral_norm(module, mode=True):
if mode:
return nn.utils.spectral_norm(module)
return module
class NLayerDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False):
super(NLayerDiscriminator, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = 1
sequence = [
spectral_norm(nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),True),
nn.LeakyReLU(0.2, True)
]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min(2**n, 8)
sequence += [
spectral_norm(nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=2, padding=padw, bias=use_bias),True),
nn.LeakyReLU(0.2, True),
]
nf_mult_prev = nf_mult
nf_mult = min(2**n_layers, 8)
sequence += [
spectral_norm(nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=2, padding=padw, bias=use_bias), True),
nn.LeakyReLU(0.2, True)
]
sequence += [spectral_norm(nn.Conv2d(ndf * nf_mult, 1,
kernel_size=kw, stride=2, padding=padw, bias=use_bias),True)]
if use_sigmoid:
sequence += [nn.Sigmoid()]
self.model = nn.Sequential(*sequence)
def forward(self, input):
return self.model(input) | 1,756 | 31.537037 | 99 | py |
RGTSI | RGTSI-main/models/loss.py |
import torch
import torch.nn as nn
import torchvision.models as models
import torch.nn.functional as F
class VGG16(torch.nn.Module):
def __init__(self):
super(VGG16, self).__init__()
features = models.vgg16(pretrained=True).features
self.relu1_1 = torch.nn.Sequential()
self.relu1_2 = torch.nn.Sequential()
self.relu2_1 = torch.nn.Sequential()
self.relu2_2 = torch.nn.Sequential()
self.relu3_1 = torch.nn.Sequential()
self.relu3_2 = torch.nn.Sequential()
self.relu3_3 = torch.nn.Sequential()
self.max3 = torch.nn.Sequential()
self.relu4_1 = torch.nn.Sequential()
self.relu4_2 = torch.nn.Sequential()
self.relu4_3 = torch.nn.Sequential()
self.relu5_1 = torch.nn.Sequential()
self.relu5_2 = torch.nn.Sequential()
self.relu5_3 = torch.nn.Sequential()
for x in range(2):
self.relu1_1.add_module(str(x), features[x])
for x in range(2, 4):
self.relu1_2.add_module(str(x), features[x])
for x in range(4, 7):
self.relu2_1.add_module(str(x), features[x])
for x in range(7, 9):
self.relu2_2.add_module(str(x), features[x])
for x in range(9, 12):
self.relu3_1.add_module(str(x), features[x])
for x in range(12, 14):
self.relu3_2.add_module(str(x), features[x])
for x in range(14, 16):
self.relu3_3.add_module(str(x), features[x])
for x in range(16, 17):
self.max3.add_module(str(x), features[x])
for x in range(17, 19):
self.relu4_1.add_module(str(x), features[x])
for x in range(19, 21):
self.relu4_2.add_module(str(x), features[x])
for x in range(21, 23):
self.relu4_3.add_module(str(x), features[x])
for x in range(23, 26):
self.relu5_1.add_module(str(x), features[x])
for x in range(26, 28):
self.relu5_2.add_module(str(x), features[x])
for x in range(28, 30):
self.relu5_3.add_module(str(x), features[x])
# don't need the gradients, just want the features
for param in self.parameters():
param.requires_grad = False
def forward(self, x):
relu1_1 = self.relu1_1(x)
relu1_2 = self.relu1_2(relu1_1)
relu2_1 = self.relu2_1(relu1_2)
relu2_2 = self.relu2_2(relu2_1)
relu3_1 = self.relu3_1(relu2_2)
relu3_2 = self.relu3_2(relu3_1)
relu3_3 = self.relu3_3(relu3_2)
max_3 = self.max3(relu3_3)
relu4_1 = self.relu4_1(max_3)
relu4_2 = self.relu4_2(relu4_1)
relu4_3 = self.relu4_3(relu4_2)
relu5_1 = self.relu5_1(relu4_3)
relu5_2 = self.relu5_1(relu5_1)
relu5_3 = self.relu5_1(relu5_2)
out = {
'relu1_1': relu1_1,
'relu1_2': relu1_2,
'relu2_1': relu2_1,
'relu2_2': relu2_2,
'relu3_1': relu3_1,
'relu3_2': relu3_2,
'relu3_3': relu3_3,
'max_3':max_3,
'relu4_1': relu4_1,
'relu4_2': relu4_2,
'relu4_3': relu4_3,
'relu5_1': relu5_1,
'relu5_2': relu5_2,
'relu5_3': relu5_3,
}
return out
class StyleLoss(nn.Module):
r"""
Perceptual loss, VGG-based
https://arxiv.org/abs/1603.08155
https://github.com/dxyang/StyleTransfer/blob/master/utils.py
"""
def __init__(self):
super(StyleLoss, self).__init__()
self.add_module('vgg', VGG16().cuda())
self.criterion = torch.nn.L1Loss()
def compute_gram(self, x):
b, ch, h, w = x.size()
f = x.view(b, ch, w * h)
f_T = f.transpose(1, 2)
G = f.bmm(f_T) / (h * w * ch)
return G
def __call__(self, x, y):
# Compute features
x_vgg, y_vgg = self.vgg(x), self.vgg(y)
# Compute loss
style_loss = 0.0
style_loss += self.criterion(self.compute_gram(x_vgg['relu2_2']), self.compute_gram(y_vgg['relu2_2']))
style_loss += self.criterion(self.compute_gram(x_vgg['relu3_3']), self.compute_gram(y_vgg['relu3_3']))
style_loss += self.criterion(self.compute_gram(x_vgg['relu4_3']), self.compute_gram(y_vgg['relu4_3']))
style_loss += self.criterion(self.compute_gram(x_vgg['relu5_2']), self.compute_gram(y_vgg['relu5_2']))
return style_loss
class PerceptualLoss(nn.Module):
r"""
Perceptual loss, VGG-based
https://arxiv.org/abs/1603.08155
https://github.com/dxyang/StyleTransfer/blob/master/utils.py
"""
def __init__(self, weights=[1.0, 1.0, 1.0, 1.0, 1.0]):
super(PerceptualLoss, self).__init__()
self.add_module('vgg', VGG16().cuda())
self.criterion = torch.nn.L1Loss()
self.weights = weights
def __call__(self, x, y):
# Compute features
x_vgg, y_vgg = self.vgg(x), self.vgg(y)
content_loss = 0.0
content_loss += self.weights[0] * self.criterion(x_vgg['relu1_1'], y_vgg['relu1_1'])
content_loss += self.weights[1] * self.criterion(x_vgg['relu2_1'], y_vgg['relu2_1'])
content_loss += self.weights[2] * self.criterion(x_vgg['relu3_1'], y_vgg['relu3_1'])
content_loss += self.weights[3] * self.criterion(x_vgg['relu4_1'], y_vgg['relu4_1'])
content_loss += self.weights[4] * self.criterion(x_vgg['relu5_1'], y_vgg['relu5_1'])
return content_loss
class GANLoss(nn.Module):
def __init__(self, target_real_label=1.0, target_fake_label=0.0,
tensor=torch.FloatTensor):
super(GANLoss, self).__init__()
self.real_label = target_real_label
self.fake_label = target_fake_label
self.real_label_var = None
self.fake_label_var = None
self.Tensor = tensor
#这个我可以改,改成我自己定义的GAN
def get_target_tensor(self, input, target_is_real):
target_tensor = None
if target_is_real:
create_label = ((self.real_label_var is None) or
(self.real_label_var.numel() != input.numel()))
if create_label:
self.real_label_var = self.Tensor(input.size()).fill_(self.real_label)
target_tensor = self.real_label_var
else:
create_label = ((self.fake_label_var is None) or
(self.fake_label_var.numel() != input.numel()))
if create_label:
self.fake_label_var= self.Tensor(input.size()).fill_(self.real_label)
target_tensor = self.fake_label_var
return target_tensor
def __call__(self, y_pred_fake, y_pred, target_is_real):
target_tensor = self.get_target_tensor(y_pred_fake, target_is_real)
if target_is_real:
errD = (torch.mean((y_pred - torch.mean(y_pred_fake) - target_tensor) ** 2) + torch.mean(
(y_pred_fake - torch.mean(y_pred) + target_tensor) ** 2)) / 2
return errD
else:
errG = (torch.mean((y_pred - torch.mean(y_pred_fake) + target_tensor) ** 2) + torch.mean(
(y_pred_fake - torch.mean(y_pred) - target_tensor) ** 2)) / 2
return errG
# class DESTLOSS(nn.Module):
# def __init__(self):
# super(DESTLOSS, self).__init__()
# self.criterion = torch.nn.L1Loss()
#
# def __call__(self, Gt_de, Gt_st, Fake_de, Fake_st):
# Gt_de = F.interpolate (Gt_de, size=(32,32), mode='bilinear')
# Gt_st = F.interpolate (Gt_st, size=(32,32), mode='bilinear')
#
#
#
# return content_loss | 7,644 | 32.384279 | 110 | py |
RGTSI | RGTSI-main/models/model.py | from models.RGTSI import RGTSI
import torch
def create_model(opt):
model = RGTSI(opt)
#model = torch.nn.DataParallel(model.to(opt.device), device_ids=opt.gpu_ids, output_device=opt.gpu_ids[0])
print("model [%s] was created" % (model.name()))
return model
| 274 | 24 | 110 | py |
RGTSI | RGTSI-main/models/networks.py | # Define networks, init networks
import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.optim import lr_scheduler
from models.PCconv import PCconv
from models.InnerCos import InnerCos
from models.Encoder import Encoder, RefEncoder
from models.Discriminator import NLayerDiscriminator
from models.Decoder import Decoder
def get_norm_layer(norm_type='instance'):
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=True)
elif norm_type == 'none':
norm_layer = None
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
if opt.lr_policy == 'lambda':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + 1 + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.niter, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def init_weights(net, init_type='normal', gain=0.02):
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal(m.weight.data, 0.0, gain)
elif init_type == 'xavier':
init.xavier_normal(m.weight.data, gain=gain)
elif init_type == 'kaiming':
init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal(m.weight.data, gain=gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
init.normal(m.weight.data, 1.0, gain)
init.constant(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func)
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids)
init_weights(net, init_type, gain=init_gain)
return net
def define_G(input_nc, output_nc, ngf, norm='batch', use_dropout=False, init_type='normal', gpu_ids=[], init_gain=0.02):
norm_layer = get_norm_layer(norm_type=norm)
stde_list = []
netEN = Encoder(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
netRefEN = RefEncoder(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
netDE = Decoder(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
PCBlock = PCblock(stde_list)
return init_net(netEN, init_type, init_gain, gpu_ids), init_net(netRefEN, init_type, init_gain, gpu_ids), init_net(netDE, init_type, init_gain, gpu_ids), init_net(PCBlock, init_type, init_gain, gpu_ids),stde_list
def define_D(input_nc, ndf, n_layers_D=3, norm='batch', init_type='normal', gpu_ids=[], init_gain=0.02):
netD = None
norm_layer = get_norm_layer(norm_type=norm)
netD = NLayerDiscriminator(input_nc, ndf, n_layers=n_layers_D, norm_layer=norm_layer, use_sigmoid=False)
return init_net(netD, init_type, init_gain, gpu_ids)
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print('Total number of parameters: %d' % num_params)
class PCblock(nn.Module):
def __init__(self, stde_list):
super(PCblock, self).__init__()
self.pc_block = PCconv()
innerloss = InnerCos()
stde_list.append(innerloss)
loss = [innerloss]
self.loss=nn.Sequential(*loss)
def forward(self,input,reference,mask):
out = self.pc_block(input,reference,mask)
out = self.loss(out)
return out
| 4,649 | 35.614173 | 216 | py |
RGTSI | RGTSI-main/models/Encoder.py | import torch.nn as nn
# Define the resnet block
class ResnetBlock(nn.Module):
def __init__(self, dim, dilation=1):
super(ResnetBlock, self).__init__()
self.conv_block = nn.Sequential(
nn.ReflectionPad2d(dilation),
nn.Conv2d(in_channels=dim, out_channels=dim, kernel_size=3, padding=0, dilation=dilation, bias=False),
nn.InstanceNorm2d(dim, track_running_stats=False),
nn.ReLU(True),
nn.ReflectionPad2d(1),
nn.Conv2d(in_channels=dim, out_channels=dim, kernel_size=3, padding=0, dilation=1, bias=False),
nn.InstanceNorm2d(dim, track_running_stats=False),
)
def forward(self, x):
out = x + self.conv_block(x)
return out
# define the Encoder unit
class UnetSkipConnectionEBlock(nn.Module):
def __init__(self, outer_nc, inner_nc, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d,
use_dropout=False):
super(UnetSkipConnectionEBlock, self).__init__()
downconv = nn.Conv2d(outer_nc, inner_nc, kernel_size=4,
stride=2, padding=1)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc, affine=True)
if outermost:
down = [downconv]
model = down
elif innermost:
down = [downrelu, downconv]
model = down
else:
down = [downrelu, downconv, downnorm]
if use_dropout:
model = down + [nn.Dropout(0.5)]
else:
model = down
self.model = nn.Sequential(*model)
def forward(self, x):
return self.model(x)
class Encoder(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, res_num=4, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(Encoder, self).__init__()
# construct unet structure
Encoder_1 = UnetSkipConnectionEBlock(input_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, outermost=True)
Encoder_2 = UnetSkipConnectionEBlock(ngf, ngf * 2, norm_layer=norm_layer, use_dropout=use_dropout)
Encoder_3 = UnetSkipConnectionEBlock(ngf * 2, ngf * 4, norm_layer=norm_layer, use_dropout=use_dropout)
Encoder_4 = UnetSkipConnectionEBlock(ngf * 4, ngf * 8, norm_layer=norm_layer, use_dropout=use_dropout)
Encoder_5 = UnetSkipConnectionEBlock(ngf * 8, ngf * 8, norm_layer=norm_layer, use_dropout=use_dropout)
Encoder_6 = UnetSkipConnectionEBlock(ngf * 8, ngf * 8, norm_layer=norm_layer, use_dropout=use_dropout, innermost=True)
blocks = []
for _ in range(res_num):
block = ResnetBlock(ngf * 8, 2)
blocks.append(block)
self.middle = nn.Sequential(*blocks)
self.Encoder_1 = Encoder_1
self.Encoder_2 = Encoder_2
self.Encoder_3 = Encoder_3
self.Encoder_4 = Encoder_4
self.Encoder_5 = Encoder_5
self.Encoder_6 = Encoder_6
def forward(self, input):
y_1 = self.Encoder_1(input)
y_2 = self.Encoder_2(y_1)
y_3 = self.Encoder_3(y_2)
y_4 = self.Encoder_4(y_3)
y_5 = self.Encoder_5(y_4)
y_6 = self.Encoder_6(y_5)
y_7 = self.middle(y_6)
return y_1, y_2, y_3, y_4, y_5, y_7
class RefEncoder(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, res_num=4, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(RefEncoder, self).__init__()
# construct unet structure
Encoder_1 = UnetSkipConnectionEBlock(3, ngf, norm_layer=norm_layer, use_dropout=use_dropout, outermost=True)
Encoder_2 = UnetSkipConnectionEBlock(ngf, ngf * 2, norm_layer=norm_layer, use_dropout=use_dropout)
Encoder_3 = UnetSkipConnectionEBlock(ngf * 2, ngf * 4, norm_layer=norm_layer, use_dropout=use_dropout)
Encoder_4 = UnetSkipConnectionEBlock(ngf * 4, ngf * 8, norm_layer=norm_layer, use_dropout=use_dropout)
Encoder_5 = UnetSkipConnectionEBlock(ngf * 8, ngf * 8, norm_layer=norm_layer, use_dropout=use_dropout)
Encoder_6 = UnetSkipConnectionEBlock(ngf * 8, ngf * 8, norm_layer=norm_layer, use_dropout=use_dropout, innermost=True)
blocks = []
for _ in range(res_num):
block = ResnetBlock(ngf * 8, 2)
blocks.append(block)
self.middle = nn.Sequential(*blocks)
self.Encoder_1 = Encoder_1
self.Encoder_2 = Encoder_2
self.Encoder_3 = Encoder_3
self.Encoder_4 = Encoder_4
self.Encoder_5 = Encoder_5
self.Encoder_6 = Encoder_6
def forward(self, input):
y_1 = self.Encoder_1(input)
y_2 = self.Encoder_2(y_1)
y_3 = self.Encoder_3(y_2)
y_4 = self.Encoder_4(y_3)
y_5 = self.Encoder_5(y_4)
y_6 = self.Encoder_6(y_5)
y_7 = self.middle(y_6)
return y_1, y_2, y_3, y_4, y_5, y_7 | 4,895 | 38.483871 | 126 | py |
RGTSI | RGTSI-main/models/PCconv.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.nn.functional as F
import torch
import torch.nn as nn
from models.FAM.FeatureAlignment import FAM
import util.util as util
from util.Selfpatch import Selfpatch
from util.util import saveoffset, showpatch
# SE MODEL
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Conv2d(channel, channel // reduction, kernel_size=1, stride=1, padding=0),
nn.ReLU(inplace=True),
nn.Conv2d(channel // reduction, channel, kernel_size=1, stride=1, padding=0),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c, 1, 1)
y = self.fc(y)
return x * y.expand_as(x)
class Convnorm(nn.Module):
def __init__(self, in_ch, out_ch, sample='none-3', activ='leaky'):
super().__init__()
self.bn = nn.InstanceNorm2d(out_ch, affine=True)
if sample == 'down-3':
self.conv = nn.Conv2d(in_ch, out_ch, 3, 2, 1, bias=False)
else:
self.conv = nn.Conv2d(in_ch, out_ch, 3, 1)
if activ == 'leaky':
self.activation = nn.LeakyReLU(negative_slope=0.2)
def forward(self, input):
out = input
out = self.conv(out)
out = self.bn(out)
if hasattr(self, 'activation'):
out = self.activation(out[0])
return out
class PCBActiv(nn.Module):
def __init__(self, in_ch, out_ch, bn=True, sample='none-3', activ='leaky',
conv_bias=False, innorm=False, inner=False, outer=False):
super().__init__()
if sample == 'same-5':
self.conv = PartialConv(in_ch, out_ch, 5, 1, 2, bias=conv_bias)
elif sample == 'same-7':
self.conv = PartialConv(in_ch, out_ch, 7, 1, 3, bias=conv_bias)
elif sample == 'down-3':
self.conv = PartialConv(in_ch, out_ch, 3, 2, 1, bias=conv_bias)
else:
self.conv = PartialConv(in_ch, out_ch, 3, 1, 1, bias=conv_bias)
if bn:
self.bn = nn.InstanceNorm2d(out_ch, affine=True)
if activ == 'relu':
self.activation = nn.ReLU()
elif activ == 'leaky':
self.activation = nn.LeakyReLU(negative_slope=0.2)
self.innorm = innorm
self.inner = inner
self.outer = outer
def forward(self, input):
out = input
if self.inner:
out[0] = self.bn(out[0])
out[0] = self.activation(out[0])
out = self.conv(out)
out[0] = self.bn(out[0])
out[0] = self.activation(out[0])
elif self.innorm:
out = self.conv(out)
out[0] = self.bn(out[0])
out[0] = self.activation(out[0])
elif self.outer:
out = self.conv(out)
out[0] = self.bn(out[0])
else:
out = self.conv(out)
out[0] = self.bn(out[0])
if hasattr(self, 'activation'):
out[0] = self.activation(out[0])
return out
class ConvDown(nn.Module):
def __init__(self, in_c, out_c, kernel, stride, padding=0, dilation=1, groups=1, bias=False, layers=1, activ=True):
super().__init__()
nf_mult = 1
nums = out_c / 64
sequence = []
for i in range(1, layers + 1):
nf_mult_prev = nf_mult
if nums == 8:
if in_c == 512:
nfmult = 1
else:
nf_mult = 2
else:
nf_mult = min(2 ** i, 8)
if kernel != 1:
if activ == False and layers == 1:
sequence += [
nn.Conv2d(nf_mult_prev * in_c, nf_mult * in_c,
kernel_size=kernel, stride=stride, padding=padding, bias=bias),
nn.InstanceNorm2d(nf_mult * in_c)
]
else:
sequence += [
nn.Conv2d(nf_mult_prev * in_c, nf_mult * in_c,
kernel_size=kernel, stride=stride, padding=padding, bias=bias),
nn.InstanceNorm2d(nf_mult * in_c),
nn.LeakyReLU(0.2, True)
]
else:
sequence += [
nn.Conv2d(in_c, out_c,
kernel_size=kernel, stride=stride, padding=padding, bias=bias),
nn.InstanceNorm2d(out_c),
nn.LeakyReLU(0.2, True)
]
if activ == False:
if i + 1 == layers:
if layers == 2:
sequence += [
nn.Conv2d(nf_mult * in_c, nf_mult * in_c,
kernel_size=kernel, stride=stride, padding=padding, bias=bias),
nn.InstanceNorm2d(nf_mult * in_c)
]
else:
sequence += [
nn.Conv2d(nf_mult_prev * in_c, nf_mult * in_c,
kernel_size=kernel, stride=stride, padding=padding, bias=bias),
nn.InstanceNorm2d(nf_mult * in_c)
]
break
self.model = nn.Sequential(*sequence)
def forward(self, input):
return self.model(input)
class ConvUp(nn.Module):
def __init__(self, in_c, out_c, kernel, stride, padding=0, dilation=1, groups=1, bias=False):
super().__init__()
self.conv = nn.Conv2d(in_c, out_c, kernel,
stride, padding, dilation, groups, bias)
self.bn = nn.InstanceNorm2d(out_c)
self.relu = nn.LeakyReLU(negative_slope=0.2)
def forward(self, input, size):
out = F.interpolate(input=input, size=size, mode='bilinear')
out = self.conv(out)
out = self.bn(out)
out = self.relu(out)
return out
class BASE(nn.Module):
def __init__(self, inner_nc):
super(BASE, self).__init__()
se = SELayer(inner_nc, 16)
model = [se]
gus = util.gussin(1.5).cuda()
self.gus = torch.unsqueeze(gus, 1).double()
self.model = nn.Sequential(*model)
self.down = nn.Sequential(
nn.Conv2d(1024, 512, 1, 1, 0, bias=False),
nn.InstanceNorm2d(512),
nn.LeakyReLU(negative_slope=0.2)
)
def forward(self, x):
Nonparm = Selfpatch()
out_32 = self.model(x)
b, c, h, w = out_32.size()
gus = self.gus.float()
gus_out = out_32[0].expand(h * w, c, h, w)
gus_out = gus * gus_out
gus_out = torch.sum(gus_out, -1)
gus_out = torch.sum(gus_out, -1)
gus_out = gus_out.contiguous().view(b, c, h, w)
csa2_in = F.sigmoid(out_32)
csa2_f = torch.nn.functional.pad(csa2_in, (1, 1, 1, 1))
csa2_ff = torch.nn.functional.pad(out_32, (1, 1, 1, 1))
csa2_fff, csa2_f, csa2_conv = Nonparm.buildAutoencoder(csa2_f[0], csa2_in[0], csa2_ff[0], 3, 1)
csa2_conv = csa2_conv.expand_as(csa2_f)
csa_a = csa2_conv * csa2_f
csa_a = torch.mean(csa_a, 1)
a_c, a_h, a_w = csa_a.size()
csa_a = csa_a.contiguous().view(a_c, -1)
csa_a = F.softmax(csa_a, dim=1)
csa_a = csa_a.contiguous().view(a_c, 1, a_h, a_h)
out = csa_a * csa2_fff
out = torch.sum(out, -1)
out = torch.sum(out, -1)
out_csa = out.contiguous().view(b, c, h, w)
out_32 = torch.cat([gus_out, out_csa], 1)
out_32 = self.down(out_32)
return out_32
class PartialConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super().__init__()
self.input_conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, bias)
self.mask_conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, False)
torch.nn.init.constant_(self.mask_conv.weight, 1.0)
# mask is not updated
for param in self.mask_conv.parameters():
param.requires_grad = False
def forward(self, inputt):
# http://masc.cs.gmu.edu/wiki/partialconv
# C(X) = W^T * X + b, C(0) = b, D(M) = 1 * M + 0 = sum(M)
# W^T* (M .* X) / sum(M) + b = [C(M .* X) – C(0)] / D(M) + C(0)
input = inputt[0]
mask = inputt[1].float().cuda()
output = self.input_conv(input * mask)
if self.input_conv.bias is not None:
output_bias = self.input_conv.bias.view(1, -1, 1, 1).expand_as(
output)
else:
output_bias = torch.zeros_like(output)
with torch.no_grad():
output_mask = self.mask_conv(mask)
no_update_holes = output_mask == 0
mask_sum = output_mask.masked_fill_(no_update_holes.bool(), 1.0)
output_pre = (output - output_bias) / mask_sum + output_bias
output = output_pre.masked_fill_(no_update_holes.bool(), 0.0)
new_mask = torch.ones_like(output)
new_mask = new_mask.masked_fill_(no_update_holes.bool(), 0.0)
out = []
out.append(output)
out.append(new_mask)
return out
class PCconv(nn.Module):
def __init__(self):
super(PCconv, self).__init__()
self.down_128 = ConvDown(64, 128, 4, 2, padding=1, layers=2)
self.down_64 = ConvDown(128, 256, 4, 2, padding=1)
self.down_32 = ConvDown(256, 256, 1, 1)
self.down_16 = ConvDown(512, 512, 4, 2, padding=1, activ=False)
self.down_8 = ConvDown(512, 512, 4, 2, padding=1, layers=2, activ=False)
self.down_4 = ConvDown(512, 512, 4, 2, padding=1, layers=3, activ=False)
self.down = ConvDown(768, 256, 1, 1)
self.fuse = ConvDown(512, 512, 1, 1)
self.up = ConvUp(512, 256, 1, 1)
self.up_128 = ConvUp(512, 64, 1, 1)
self.up_64 = ConvUp(512, 128, 1, 1)
self.up_32 = ConvUp(512, 256, 1, 1)
self.base= BASE(512)
seuqence_3 = []
seuqence_5 = []
seuqence_7 = []
for i in range(5):
seuqence_3 += [PCBActiv(256, 256, innorm=True)]
seuqence_5 += [PCBActiv(256, 256, sample='same-5', innorm=True)]
seuqence_7 += [PCBActiv(256, 256, sample='same-7', innorm=True)]
self.cov_3 = nn.Sequential(*seuqence_3)
self.cov_5 = nn.Sequential(*seuqence_5)
self.cov_7 = nn.Sequential(*seuqence_7)
self.activation = nn.LeakyReLU(negative_slope=0.2)
self.TextureAlignment = FAM(in_channels=768)
self.StructureAlignment = FAM(in_channels=768)
def forward(self,input,reference,mask):
#def forward(self,input,reference,input_Mask):
mask = util.cal_feat_mask(mask, 3, 1)
# input[2]:256 32 32
b, c, h, w = input[2].size()
mask_1 = torch.add(torch.neg(mask.float()), 1)
mask_1 = mask_1.expand(b, c, h, w)
x_1 = self.activation(input[0])
x_2 = self.activation(input[1])
x_3 = self.activation(input[2])
x_4 = self.activation(input[3])
x_5 = self.activation(input[4])
x_6 = self.activation(input[5])
y_1 = self.activation(reference[0])
y_2 = self.activation(reference[1])
y_3 = self.activation(reference[2])
y_4 = self.activation(reference[3])
y_5 = self.activation(reference[4])
y_6 = self.activation(reference[5])
# Change the shape of each layer and intergrate low-level/high-level features
x_1 = self.down_128(x_1)
x_2 = self.down_64(x_2)
x_3 = self.down_32(x_3)
x_4 = self.up(x_4, (32, 32))
x_5 = self.up(x_5, (32, 32))
x_6 = self.up(x_6, (32, 32))
y_1 = self.down_128(y_1)
y_2 = self.down_64(y_2)
y_3 = self.down_32(y_3)
y_4 = self.up(y_4, (32,32))
y_5 = self.up(y_5, (32,32))
y_6 = self.up(y_6, (32,32))
# The first three layers are Texture
# The last three layers are Structure
x_INDE = torch.cat([x_1, x_2, x_3], 1)
x_INST = torch.cat([x_4, x_5, x_6], 1)
y_RFDE = torch.cat([y_1, y_2, y_3], 1)
y_RFST = torch.cat([y_4, y_5, y_6], 1)
#Feature Aligned 合并
x_DE = self.TextureAlignment(x_INDE,y_RFDE)
x_ST = self.StructureAlignment(x_INST,y_RFST)
x_ST = self.down(x_ST)
x_DE = self.down(x_DE)
x_ST = [x_ST, mask_1]
x_DE = [x_DE, mask_1]
# Multi Scale PConv fill the Details
x_DE_3 = self.cov_3(x_DE)
x_DE_5 = self.cov_5(x_DE)
x_DE_7 = self.cov_7(x_DE)
x_DE_fuse = torch.cat([x_DE_3[0], x_DE_5[0], x_DE_7[0]], 1)
x_DE_fi = self.down(x_DE_fuse)
# Multi Scale PConv fill the Structure
x_ST_3 = self.cov_3(x_ST)
x_ST_5 = self.cov_5(x_ST)
x_ST_7 = self.cov_7(x_ST)
x_ST_fuse = torch.cat([x_ST_3[0], x_ST_5[0], x_ST_7[0]], 1)
x_ST_fi = self.down(x_ST_fuse)
x_cat = torch.cat([x_ST_fi, x_DE_fi], 1)
x_cat_fuse = self.fuse(x_cat)
# Feature equalizations
x_final = self.base(x_cat_fuse)
# Add back to the input
x_ST = x_final
x_DE = x_final
x_1 = self.up_128(x_DE, (128, 128)) + input[0]
x_2 = self.up_64(x_DE, (64, 64)) + input[1]
x_3 = self.up_32(x_DE, (32, 32)) + input[2]
x_4 = self.down_16(x_ST) + input[3]
x_5 = self.down_8(x_ST) + input[4]
x_6 = self.down_4(x_ST) + input[5]
out_final = [x_1, x_2, x_3, x_4, x_5, x_6, x_ST_fi, x_DE_fi]
return out_final
| 14,147 | 35.748052 | 119 | py |
RGTSI | RGTSI-main/models/RGTSI.py | import torch
import random
from collections import OrderedDict
from torch.autograd import Variable
from PIL import Image
import torch.nn.functional as F
from models.base_model import BaseModel
from models import networks
from .loss import VGG16, PerceptualLoss, StyleLoss, GANLoss
class RGTSI(BaseModel):
def __init__(self, opt):
super(RGTSI, self).__init__(opt)
self.isTrain = opt.isTrain
self.opt = opt
self.device = torch.device('cuda')
# define tensors
self.vgg = VGG16()
self.PerceptualLoss = PerceptualLoss()
self.StyleLoss = StyleLoss()
self.input_DE = self.Tensor(opt.batchSize, opt.input_nc, opt.fineSize, opt.fineSize)
self.input_ST = self.Tensor(opt.batchSize, opt.output_nc, opt.fineSize, opt.fineSize)
self.ref_DE = self.Tensor(opt.batchSize, opt.output_nc, opt.fineSize, opt.fineSize)
self.fake_input_p_1 = self.Tensor(opt.batchSize, 6, opt.fineSize, opt.fineSize)
self.Gt_Local = self.Tensor(opt.batchSize, opt.output_nc, opt.fineSize, opt.fineSize)
self.Gt_DE = self.Tensor(opt.batchSize, opt.output_nc, opt.fineSize, opt.fineSize)
self.Gt_ST = self.Tensor(opt.batchSize, opt.output_nc, opt.fineSize, opt.fineSize)
self.Gt_RF = self.Tensor(opt.batchSize, opt.output_nc, opt.fineSize, opt.fineSize)
self.input_mask_global = self.Tensor(opt.batchSize, 1, opt.fineSize, opt.fineSize)
self.model_names = []
if len(opt.gpu_ids) > 0:
self.use_gpu = True
self.vgg = self.vgg.to(self.gpu_ids[0])
self.vgg = torch.nn.DataParallel(self.vgg, self.gpu_ids)
# load/define networks EN:Encoder RefEN:RefEncoder DE:Decoder RGTSI: Reference-Guided Texture and Structure Inference
self.netEN, self.netRefEN, self.netDE, self.netRGTSI, self.stde_loss = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.norm,
opt.use_dropout, opt.init_type,
self.gpu_ids,
opt.init_gain)
self.model_names=[ 'EN','RefEN','DE', 'RGTSI']
if self.isTrain:
self.netD = networks.define_D(3, opt.ndf,
opt.n_layers_D, opt.norm, opt.init_type, self.gpu_ids, opt.init_gain)
self.netF = networks.define_D(3, opt.ndf,
opt.n_layers_D, opt.norm, opt.init_type, self.gpu_ids, opt.init_gain)
self.model_names.append('D')
self.model_names.append('F')
if self.isTrain:
self.old_lr = opt.lr
# define loss functions
self.criterionGAN = GANLoss(tensor=self.Tensor)
self.criterionL1 = torch.nn.L1Loss()
self.criterionL2 = torch.nn.MSELoss()
# initialize optimizers
self.schedulers = []
self.optimizers = []
self.optimizer_EN = torch.optim.Adam(self.netEN.parameters(),
lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_RefEN = torch.optim.Adam(self.netRefEN.parameters(),
lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_DE = torch.optim.Adam(self.netDE.parameters(),
lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_RGTSI = torch.optim.Adam(self.netRGTSI.parameters(),
lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D = torch.optim.Adam(self.netD.parameters(),
lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_F = torch.optim.Adam(self.netF.parameters(),
lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_EN)
self.optimizers.append(self.optimizer_RefEN)
self.optimizers.append(self.optimizer_DE)
self.optimizers.append(self.optimizer_RGTSI)
self.optimizers.append(self.optimizer_D)
self.optimizers.append(self.optimizer_F)
for optimizer in self.optimizers:
self.schedulers.append(networks.get_scheduler(optimizer, opt))
print('---------- Networks initialized -------------')
networks.print_network(self.netEN)
networks.print_network(self.netRefEN)
networks.print_network(self.netDE)
networks.print_network(self.netRGTSI)
if self.isTrain:
networks.print_network(self.netD)
networks.print_network(self.netF)
print('-----------------------------------------------')
#####modified
if self.isTrain:
if opt.continue_train :
print('Loading pre-trained network!')
self.load_networks(self.netEN, 'EN', opt.which_epoch)
self.load_networks(self.netRefEN, 'RefEN', opt.which_epoch)
self.load_networks(self.netDE, 'DE', opt.which_epoch)
self.load_networks(self.netRGTSI, 'RGTSI', opt.which_epoch)
self.load_networks(self.netD, 'D', opt.which_epoch)
self.load_networks(self.netF, 'F', opt.which_epoch)
def name(self):
return self.modelname
def mask_process(self, mask):
mask = mask[0][0]
mask = torch.unsqueeze(mask, 0)
mask = torch.unsqueeze(mask, 1)
mask = mask.byte()
return mask
def set_input(self, input_De,input_St,input_Mask,ref_De):
self.Gt_DE = input_De.to(self.device)
self.Gt_ST = input_St.to(self.device)
self.input_DE = input_De.to(self.device)
self.ref_DE = ref_De.to(self.device)
self.input_mask_global = self.mask_process(input_Mask.to(self.device))
self.Gt_Local = input_De.to(self.device)
# define local area which send to the local discriminator
self.crop_x = random.randint(0, 191)
self.crop_y = random.randint(0, 191)
self.Gt_Local = self.Gt_Local[:, :, self.crop_x:self.crop_x + 64, self.crop_y:self.crop_y + 64]
self.ex_input_mask = self.input_mask_global.expand(self.input_mask_global.size(0), 3, self.input_mask_global.size(2),
self.input_mask_global.size(3))
#unpositve with original mask
self.inv_ex_input_mask = torch.add(torch.neg(self.ex_input_mask.float()), 1).float()
# set loss groundtruth for two branch
self.stde_loss[0].set_target(self.Gt_DE, self.Gt_ST)
# Do not set the mask regions as 0
self.input_DE.narrow(1, 0, 1).masked_fill_(self.input_mask_global.narrow(1, 0, 1).bool(), 2 * 123.0 / 255.0 - 1.0)
self.input_DE.narrow(1, 1, 1).masked_fill_(self.input_mask_global.narrow(1, 0, 1).bool(), 2 * 104.0 / 255.0 - 1.0)
self.input_DE.narrow(1, 2, 1).masked_fill_(self.input_mask_global.narrow(1, 0, 1).bool(), 2 * 117.0 / 255.0 - 1.0)
def forward(self):
fake_input_p_1, fake_input_p_2, fake_input_p_3, fake_input_p_4, fake_input_p_5, fake_input_p_6 = self.netEN(
torch.cat([self.input_DE, self.inv_ex_input_mask], 1))
De_in = [fake_input_p_1, fake_input_p_2, fake_input_p_3, fake_input_p_4, fake_input_p_5, fake_input_p_6]
fake_ref_p_1, fake_ref_p_2, fake_ref_p_3, fake_ref_p_4, fake_ref_p_5, fake_ref_p_6 = self.netRefEN(self.ref_DE)
Ref_in = [fake_ref_p_1,fake_ref_p_2, fake_ref_p_3, fake_ref_p_4, fake_ref_p_5, fake_ref_p_6]
#De_in=[fake_p_1,fake_p_2,fake_p_3,fake_p_4,fake_p_5,fake_p_6]
x_out = self.netRGTSI(De_in, Ref_in, self.input_mask_global)
#x_out返回为,图片+损失[x_1, x_2, x_3, x_4, x_5, x_6, x_ST_fi, x_DE_fi]
self.fake_out = self.netDE(x_out[0], x_out[1], x_out[2], x_out[3], x_out[4], x_out[5])
def backward_D(self):
fake_AB = self.fake_out
real_AB = self.Gt_DE # GroundTruth
real_local = self.Gt_Local
fake_local = self.fake_out[:, :, self.crop_x:self.crop_x + 64, self.crop_y:self.crop_y + 64]
# Global Discriminator
self.pred_fake = self.netD(fake_AB.detach())
self.pred_real = self.netD(real_AB)
self.loss_D_fake = self.criterionGAN(self.pred_fake, self.pred_real, True)
# Local discriminator
self.pred_fake_F = self.netF(fake_local.detach())
self.pred_real_F = self.netF(real_local)
self.loss_F_fake = self.criterionGAN(self.pred_fake_F, self.pred_real_F, True)
self.loss_D = self.loss_D_fake + self.loss_F_fake
self.loss_D.backward()
def backward_G(self):
# First, The generator should fake the discriminator
real_AB = self.Gt_DE
fake_AB = self.fake_out
real_local = self.Gt_Local
fake_local = self.fake_out[:, :, self.crop_x:self.crop_x + 64, self.crop_y:self.crop_y + 64]
# Global discriminator
pred_real = self.netD(real_AB)
pred_fake = self.netD(fake_AB)
# Local discriminator
pred_real_F = self.netF(real_local)
pred_fake_f = self.netF(fake_local)
self.loss_G_GAN = self.criterionGAN(pred_fake, pred_real, False) + self.criterionGAN(pred_fake_f, pred_real_F,
False)
# Second, Reconstruction loss
self.loss_L1 = self.criterionL1(self.fake_out, self.Gt_DE)
self.Perceptual_loss = self.PerceptualLoss(self.fake_out, self.Gt_DE)
self.Style_Loss = self.StyleLoss(self.fake_out, self.Gt_DE)
# self.loss_G = self.loss_G_L1 + self.loss_G_GAN *0.2 + self.Perceptual_loss * 0.2 + self.Style_Loss *250
self.loss_G = self.loss_L1 * self.opt.lambda_L1 + self.loss_G_GAN * self.opt.lambda_Gan + \
self.Perceptual_loss * self.opt.lambda_P + self.Style_Loss * self.opt.lambda_S
self.stde_loss_value = 0
for loss in self.stde_loss:
self.stde_loss_value += loss.backward()
self.stde_loss_value += loss.loss
self.loss_G += self.stde_loss_value
self.loss_G.backward()
def optimize_parameters(self):
self.forward()
# Optimize the D and F first
self.set_requires_grad(self.netF, True)
self.set_requires_grad(self.netD, True)
self.set_requires_grad(self.netEN, False)
self.set_requires_grad(self.netRefEN, False)
self.set_requires_grad(self.netDE, False)
self.set_requires_grad(self.netRGTSI, False)
self.optimizer_D.zero_grad()
self.optimizer_F.zero_grad()
self.backward_D()
self.optimizer_D.step()
self.optimizer_F.step()
# Optimize EN, RefEN, DE, MEDEF
self.set_requires_grad(self.netF, False)
self.set_requires_grad(self.netD, False)
self.set_requires_grad(self.netEN, True)
self.set_requires_grad(self.netRefEN, True)
self.set_requires_grad(self.netDE, True)
self.set_requires_grad(self.netRGTSI, True)
self.optimizer_EN.zero_grad()
self.optimizer_RefEN.zero_grad()
self.optimizer_DE.zero_grad()
self.optimizer_RGTSI.zero_grad()
self.backward_G()
self.optimizer_RGTSI.step()
self.optimizer_EN.step()
self.optimizer_RefEN.step()
self.optimizer_DE.step()
def get_current_errors(self):
# show the current loss
return OrderedDict([('G_GAN', self.loss_G_GAN.data),
('G_L1', self.loss_G.data),
('G_stde', self.stde_loss_value.data),
('D', self.loss_D_fake.data),
('F', self.loss_F_fake.data)
])
# You can also see the Tensorborad
def get_current_visuals(self):
input_image = (self.input_DE.data.cpu()+1)/2.0
ref_image = (self.ref_DE.data.cpu()+1)/2.0
fake_image = (self.fake_out.data.cpu()+1)/2.0
real_gt = (self.Gt_DE.data.cpu()+1)/2.0
return input_image, ref_image,fake_image, real_gt
| 12,491 | 45.438662 | 144 | py |
RGTSI | RGTSI-main/models/InnerCos.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import util.util as util
class InnerCos(nn.Module):
def __init__(self):
super(InnerCos, self).__init__()
self.criterion = nn.L1Loss()
self.target = None
self.down_model = nn.Sequential(
nn.Conv2d(256, 3, kernel_size=1,stride=1, padding=0),
nn.Tanh()
)
def set_target(self, targetde, targetst):
self.targetst = F.interpolate(targetst, size=(32, 32), mode='bilinear')
self.targetde = F.interpolate(targetde, size=(32, 32), mode='bilinear')
def get_target(self):
return self.target
def forward(self, in_data):
self.ST = self.down_model(in_data[6])
self.DE = self.down_model(in_data[7])
self.loss = self.criterion(self.ST, self.targetst)+self.criterion(self.DE, self.targetde)
self.output = [in_data[0],in_data[1],in_data[2],in_data[3],in_data[4],in_data[5]]
return self.output
def backward(self, retain_graph=True):
self.loss.backward(retain_graph=retain_graph)
return self.loss
def __repr__(self):
return self.__class__.__name__ | 1,212 | 31.783784 | 97 | py |
RGTSI | RGTSI-main/models/FAM/non_local_embedded_gaussian.py | import torch
from torch import nn
from torch.nn import functional as F
class _NonLocalBlockND(nn.Module):
def __init__(self, in_channels, inter_channels=None, dimension=3, sub_sample=True, bn_layer=True):
"""
:param in_channels:
:param inter_channels:
:param dimension:
:param sub_sample:
:param bn_layer:
"""
super(_NonLocalBlockND, self).__init__()
assert dimension in [1, 2, 3]
self.dimension = dimension
self.sub_sample = sub_sample
self.in_channels = in_channels
self.inter_channels = inter_channels
if self.inter_channels is None:
self.inter_channels = in_channels // 2
if self.inter_channels == 0:
self.inter_channels = 1
if dimension == 3:
conv_nd = nn.Conv3d
max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2))
bn = nn.BatchNorm3d
elif dimension == 2:
conv_nd = nn.Conv2d
max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2))
bn = nn.BatchNorm2d
else:
conv_nd = nn.Conv1d
max_pool_layer = nn.MaxPool1d(kernel_size=(2))
bn = nn.BatchNorm1d
self.g = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
if bn_layer:
self.W = nn.Sequential(
conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0),
bn(self.in_channels)
)
nn.init.constant_(self.W[1].weight, 0)
nn.init.constant_(self.W[1].bias, 0)
else:
self.W = conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0)
nn.init.constant_(self.W.weight, 0)
nn.init.constant_(self.W.bias, 0)
self.theta = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
self.phi = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
if sub_sample:
self.g = nn.Sequential(self.g, max_pool_layer)
self.phi = nn.Sequential(self.phi, max_pool_layer)
def forward(self, x, return_nl_map=False):
"""
:param x: (b, c, t, h, w)
:param return_nl_map: if True return z, nl_map, else only return z.
:return:
"""
batch_size = x.size(0)
g_x = self.g(x).view(batch_size, self.inter_channels, -1)
g_x = g_x.permute(0, 2, 1)
theta_x = self.theta(x).view(batch_size, self.inter_channels, -1)
theta_x = theta_x.permute(0, 2, 1)
phi_x = self.phi(x).view(batch_size, self.inter_channels, -1)
f = torch.matmul(theta_x, phi_x)
f_div_C = F.softmax(f, dim=-1)
y = torch.matmul(f_div_C, g_x)
y = y.permute(0, 2, 1).contiguous()
y = y.view(batch_size, self.inter_channels, *x.size()[2:])
W_y = self.W(y)
z = W_y + x
if return_nl_map:
return z, f_div_C
return z
class NONLocalBlock1D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock1D, self).__init__(in_channels,
inter_channels=inter_channels,
dimension=1, sub_sample=sub_sample,
bn_layer=bn_layer)
class NONLocalBlock2D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock2D, self).__init__(in_channels,
inter_channels=inter_channels,
dimension=2, sub_sample=sub_sample,
bn_layer=bn_layer,)
class NONLocalBlock3D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock3D, self).__init__(in_channels,
inter_channels=inter_channels,
dimension=3, sub_sample=sub_sample,
bn_layer=bn_layer,)
if __name__ == '__main__':
import torch
for (sub_sample_, bn_layer_) in [(True, True), (False, False), (True, False), (False, True)]:
img = torch.zeros(2, 3, 20)
net = NONLocalBlock1D(3, sub_sample=sub_sample_, bn_layer=bn_layer_)
out = net(img)
print(out.size())
img = torch.zeros(2, 3, 20, 20)
net = NONLocalBlock2D(3, sub_sample=sub_sample_, bn_layer=bn_layer_)
out = net(img)
print(out.size())
img = torch.randn(2, 3, 8, 20, 20)
net = NONLocalBlock3D(3, sub_sample=sub_sample_, bn_layer=bn_layer_)
out = net(img)
print(out.size())
| 5,241 | 35.657343 | 102 | py |
RGTSI | RGTSI-main/models/FAM/FeatureAlignment.py | import torch.nn as nn
import torch
from models.FAM.DeformableBlock import DeformableConvBlock
from util.util import showpatch
class FAM(nn.Module):
def __init__(self,in_channels):
super(FAM, self).__init__()
self.deformblock = DeformableConvBlock(input_channels = in_channels*2)
def forward(self,ist_feature, rst_feature):
st_out = self.deformblock(ist_feature, rst_feature) #输出aligned feature
out = torch.add(ist_feature,st_out)
return out | 502 | 28.588235 | 78 | py |
RGTSI | RGTSI-main/models/FAM/Model_utils.py | import torch
import torch.nn as nn
class L1_Charbonnier_loss(nn.Module):
"""L1 Charbonnierloss."""
def __init__(self):
super(L1_Charbonnier_loss, self).__init__()
self.eps = 1e-6
def forward(self, X, Y):
diff = torch.add(X, -Y)
error = torch.sqrt( diff * diff + self.eps)
loss = torch.sum(error)
return loss
class residual_block(nn.Module):
def __init__(self, input_channel = 256, output_channel = 256, bias = False):
super(residual_block, self).__init__()
self.conv1 = nn.Conv2d(in_channels=input_channel,out_channels=input_channel, kernel_size=3, padding=1, bias=bias)
self.conv2 = nn.Conv2d(in_channels=input_channel,out_channels=output_channel, kernel_size=3, padding=1, bias = bias)
self.relu = nn.ReLU(inplace=True)
def forward(self,x):
out = self.relu(self.conv1(x))
out = self.conv2(out)
# out *= 0.1 for bigmodel
out = torch.add(out,x)
return out
def make_residual_block(blocknum=32, input_channel = 64, output_channel = 64, bias = False):
residual_layers = []
#residual_layers.append(residual_block(input_channel=input_channel, output_channel = output_channel,bias=bias))
for i in range(blocknum):
residual_layers.append(residual_block(input_channel=output_channel, output_channel = output_channel, bias = bias))
blockpart_model = nn.Sequential(*residual_layers)
return blockpart_model
def make_downsampling_network(layernum = 2, in_channels = 3, out_channels = 64):
layers = []
layers.append(nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=2, bias=False, padding=1))
for _ in range(layernum-1):
layers.append(nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=2, bias=False,padding=1))
print(layers)
model = nn.Sequential(*layers)
return model
def DOE_downsample_block(input_channelsize):
layers = []
layers.append(
nn.Conv2d(in_channels=input_channelsize, out_channels=64, kernel_size=3, stride=2, padding=1, bias=True))
layers.append(nn.LeakyReLU(inplace=True))
pre_model = nn.Sequential(*layers)
return pre_model
def DOE_upsample_block(in_odd = True, in_channels = 64, out_channels = 64):
layers = []
if in_odd:
layers.append(
nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=2, padding=1,
output_padding=1, bias=True))
else:
layers.append(
nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=2, padding=0,
bias=True))
layers.append(nn.LeakyReLU(inplace=True))
post_model = nn.Sequential(*layers)
return post_model | 2,849 | 38.041096 | 132 | py |
RGTSI | RGTSI-main/models/FAM/Dynamic_offset_estimator.py | import torch.nn as nn
import torch
from models.FAM.non_local_embedded_gaussian import NONLocalBlock2D
from models.FAM.Model_utils import DOE_downsample_block, DOE_upsample_block
class Dynamic_offset_estimator(nn.Module):
def __init__(self,input_channelsize):
super(Dynamic_offset_estimator, self).__init__()
self.downblock1 = DOE_downsample_block(input_channelsize)
self.downblock2 = DOE_downsample_block(64)
self.downblock3 = DOE_downsample_block(64)
self.attentionblock1 = NONLocalBlock2D(in_channels=64)
self.attentionblock2 = NONLocalBlock2D(in_channels=64)
self.attentionblock3 = NONLocalBlock2D(in_channels=64)
self.upblock1 = DOE_upsample_block(in_channels=64,out_channels=64)
self.upblock2 = DOE_upsample_block(in_channels=64,out_channels=64)
self.upblock3 = DOE_upsample_block(in_channels=64,out_channels=64)
self.channelscaling_block = nn.Conv2d(in_channels= 64, out_channels=input_channelsize, kernel_size=3, padding=1, bias=True)
def forward(self,x):
halfscale_feature = self.downblock1(x)#1/2
quarterscale_feature = self.downblock2(halfscale_feature)#1/4
octascale_feature = self.downblock3(quarterscale_feature)#1/8
octascale_NLout = self.attentionblock1(octascale_feature)
octascale_NLout = torch.add(octascale_NLout, octascale_feature)
octascale_upsampled = self.upblock1(octascale_NLout)
quarterscale_NLout = self.attentionblock2(octascale_upsampled)
quarterscale_NLout = torch.add(quarterscale_NLout, quarterscale_feature)
quarterscale_upsampled = self.upblock2(quarterscale_NLout)
halfscale_NLout = self.attentionblock3(quarterscale_upsampled)
halfscale_NLout = torch.add(halfscale_NLout,halfscale_feature)
halfscale_upsampled = self.upblock3(halfscale_NLout)
out = self.channelscaling_block(halfscale_upsampled)
return out
| 1,981 | 41.170213 | 131 | py |
RGTSI | RGTSI-main/models/FAM/DeformableBlock.py | import torch
import torch.nn as nn
from models.FAM.Dynamic_offset_estimator import Dynamic_offset_estimator
from mmcv.ops.deform_conv import DeformConv2d
from util.util import saveoffset, showpatch
class DeformableConvBlock(nn.Module):
def __init__(self, input_channels):
super(DeformableConvBlock, self).__init__()
self.offset_estimator = Dynamic_offset_estimator(input_channelsize=input_channels)
self.offset_conv = nn.Conv2d(in_channels=input_channels, out_channels=1 * 2 * 9, kernel_size=3, padding=1,bias=False)
self.deformconv = DeformConv2d(in_channels=768, out_channels=768, kernel_size=3,
padding=1, bias=False)
def forward(self, input_features, reference_features):
input_offset = torch.cat((input_features, reference_features), dim=1)
estimated_offset = self.offset_estimator(input_offset)
estimated_offset = self.offset_conv(estimated_offset)
output = self.deformconv(x=reference_features, offset=estimated_offset)
return output
# 返回aligned feature | 1,112 | 40.222222 | 125 | py |
RGTSI | RGTSI-main/util/Selfpatch.py | import torch
import torch.nn as nn
class Selfpatch(object):
def buildAutoencoder(self, target_img, target_img_2, target_img_3, patch_size=1, stride=1):
nDim = 3
assert target_img.dim() == nDim, 'target image must be of dimension 3.'
C = target_img.size(0)
self.Tensor = torch.cuda.FloatTensor if torch.cuda.is_available else torch.Tensor
patches_features = self._extract_patches(target_img, patch_size, stride)
patches_features_f = self._extract_patches(target_img_3, patch_size, stride)
patches_on = self._extract_patches(target_img_2, 1, stride)
return patches_features_f, patches_features, patches_on
def build(self, target_img, patch_size=5, stride=1):
nDim = 3
assert target_img.dim() == nDim, 'target image must be of dimension 3.'
C = target_img.size(0)
self.Tensor = torch.cuda.FloatTensor if torch.cuda.is_available else torch.Tensor
patches_features = self._extract_patches(target_img, patch_size, stride)
return patches_features
def _build(self, patch_size, stride, C, target_patches, npatches, normalize, interpolate, type):
# for each patch, divide by its L2 norm.
if type == 1:
enc_patches = target_patches.clone()
for i in range(npatches):
enc_patches[i] = enc_patches[i]*(1/(enc_patches[i].norm(2)+1e-8))
conv_enc = nn.Conv2d(npatches, npatches, kernel_size=1, stride=stride, bias=False, groups=npatches)
conv_enc.weight.data = enc_patches
return conv_enc
# normalize is not needed, it doesn't change the result!
if normalize:
raise NotImplementedError
if interpolate:
raise NotImplementedError
else:
conv_dec = nn.ConvTranspose2d(npatches, C, kernel_size=patch_size, stride=stride, bias=False)
conv_dec.weight.data = target_patches
return conv_dec
def _extract_patches(self, img, patch_size, stride):
n_dim = 3
assert img.dim() == n_dim, 'image must be of dimension 3.'
kH, kW = patch_size, patch_size
dH, dW = stride, stride
input_windows = img.unfold(1, kH, dH).unfold(2, kW, dW)
i_1, i_2, i_3, i_4, i_5 = input_windows.size(0), input_windows.size(1), input_windows.size(2), input_windows.size(3), input_windows.size(4)
input_windows = input_windows.permute(1,2,0,3,4).contiguous().view(i_2*i_3, i_1, i_4, i_5)
patches_all = input_windows
return patches_all
| 2,597 | 37.776119 | 147 | py |
RGTSI | RGTSI-main/util/se_module.py | from torch import nn
import torch
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Conv2d(channel, channel // reduction, kernel_size=1,stride=1, padding=0),
nn.ReLU(inplace=True),
nn.Conv2d(channel // reduction, channel, kernel_size=1, stride=1, padding=0),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c,1,1)
y = self.fc(y)
return x * y.expand_as(x)
# b, c, _, _ = x.size()
# latter=x.clone()
# y = self.avg_pool(x).view(b, c,1,1)
#
# y = self.fc(y)
# top,ind=torch.topk(y,int(c/2),1)
# ind=ind.view(-1)
#
# x=torch.index_select(x,1,ind)
#
# return torch.cat([x, latter], 1)
| 950 | 28.71875 | 89 | py |
RGTSI | RGTSI-main/util/util.py | from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import random
import inspect, re
import numpy as np
import os
import collections
import math
import torch.nn.functional as F
from torch.autograd import Variable
import torch.nn as nn
import matplotlib.pyplot as plt
# Converts a Tensor into a Numpy array
# |imtype|: the desired type of the converted numpy array
def tensor2im(image_tensor, imtype=np.uint8):
image_numpy = image_tensor[0].cpu().float().numpy()
if image_numpy.shape[0] == 1:
image_numpy = np.tile(image_numpy, (3,1,1))
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
return image_numpy.astype(imtype)
def diagnose_network(net, name='network'):
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def binary_mask(in_mask, threshold):
assert in_mask.dim() == 2, "mask must be 2 dimensions"
output = torch.ByteTensor(in_mask.size())
output = (output > threshold).float().mul_(1)
return output
def gussin(v):
outk = []
v = v
for i in range(32):
for k in range(32):
out = []
for x in range(32):
row = []
for y in range(32):
cord_x = i
cord_y = k
dis_x = np.abs(x - cord_x)
dis_y = np.abs(y - cord_y)
dis_add = -(dis_x * dis_x + dis_y * dis_y)
dis_add = dis_add / (2 * v * v)
dis_add = math.exp(dis_add) / (2 * math.pi * v * v)
row.append(dis_add)
out.append(row)
outk.append(out)
out = np.array(outk)
f = out.sum(-1).sum(-1)
q = []
for i in range(1024):
g = out[i] / f[i]
q.append(g)
out = np.array(q)
return torch.from_numpy(out)
def cal_feat_mask(inMask, conv_layers, threshold):
assert inMask.dim() == 4, "mask must be 4 dimensions"
assert inMask.size(0) == 1, "the first dimension must be 1 for mask"
inMask = inMask.float()
convs = []
inMask = Variable(inMask, requires_grad = False)
for id_net in range(conv_layers):
conv = nn.Conv2d(1,1,4,2,1, bias=False)
conv.weight.data.fill_(1/16)
convs.append(conv)
lnet = nn.Sequential(*convs)
if inMask.is_cuda:
lnet = lnet.cuda()
output = lnet(inMask)
output = (output > threshold).float().mul_(1)
return output
def cal_mask_given_mask_thred(img, mask, patch_size, stride, mask_thred):
assert img.dim() == 3, 'img has to be 3 dimenison!'
assert mask.dim() == 2, 'mask has to be 2 dimenison!'
dim = img.dim()
#math.floor 是向下取整
_, H, W = img.size(dim-3), img.size(dim-2), img.size(dim-1)
nH = int(math.floor((H-patch_size)/stride + 1))
nW = int(math.floor((W-patch_size)/stride + 1))
N = nH*nW
flag = torch.zeros(N).long()
offsets_tmp_vec = torch.zeros(N).long()
#返回的是一个list类型的数据
nonmask_point_idx_all = torch.zeros(N).long()
tmp_non_mask_idx = 0
mask_point_idx_all = torch.zeros(N).long()
tmp_mask_idx = 0
#所有的像素点都浏览一遍
for i in range(N):
h = int(math.floor(i/nW))
w = int(math.floor(i%nW))
# print(h, w)
#截取一个个1×1的小方片
mask_tmp = mask[h*stride:h*stride + patch_size,
w*stride:w*stride + patch_size]
if torch.sum(mask_tmp) < mask_thred:
nonmask_point_idx_all[tmp_non_mask_idx] = i
tmp_non_mask_idx += 1
else:
mask_point_idx_all[tmp_mask_idx] = i
tmp_mask_idx += 1
flag[i] = 1
offsets_tmp_vec[i] = -1
# print(flag) #checked
# print(offsets_tmp_vec) # checked
non_mask_num = tmp_non_mask_idx
mask_num = tmp_mask_idx
nonmask_point_idx = nonmask_point_idx_all.narrow(0, 0, non_mask_num)
mask_point_idx=mask_point_idx_all.narrow(0, 0, mask_num)
# get flatten_offsets
flatten_offsets_all = torch.LongTensor(N).zero_()
for i in range(N):
offset_value = torch.sum(offsets_tmp_vec[0:i+1])
if flag[i] == 1:
offset_value = offset_value + 1
# print(i+offset_value)
flatten_offsets_all[i+offset_value] = -offset_value
flatten_offsets = flatten_offsets_all.narrow(0, 0, non_mask_num)
# print('flatten_offsets')
# print(flatten_offsets) # checked
# print('nonmask_point_idx')
# print(nonmask_point_idx) #checked
return flag, nonmask_point_idx, flatten_offsets, mask_point_idx
# sp_x: LongTensor
# sp_y: LongTensor
def cal_sps_for_Advanced_Indexing(h, w):
sp_y = torch.arange(0, w).long()
sp_y = torch.cat([sp_y]*h)
lst = []
for i in range(h):
lst.extend([i]*w)
sp_x = torch.from_numpy(np.array(lst))
return sp_x, sp_y
def save_image(image_numpy, image_path):
image_pil = Image.fromarray(image_numpy)
image_pil.save(image_path)
def info(object, spacing=10, collapse=1):
"""Print methods and doc strings.
Takes module, class, list, dictionary, or string."""
methodList = [e for e in dir(object) if isinstance(getattr(object, e), collections.Callable)]
processFunc = collapse and (lambda s: " ".join(s.split())) or (lambda s: s)
print( "\n".join(["%s %s" %
(method.ljust(spacing),
processFunc(str(getattr(object, method).__doc__)))
for method in methodList]) )
def varname(p):
for line in inspect.getframeinfo(inspect.currentframe().f_back)[3]:
m = re.search(r'\bvarname\s*\(\s*([A-Za-z_][A-Za-z0-9_]*)\s*\)', line)
if m:
return m.group(1)
def print_numpy(x, val=True, shp=False):
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
def showpatch(imagepatch, modelname ,foldername=None, istensor = True):
batchsize = imagepatch.shape[0]
channelsize = imagepatch.shape[1]
#print(imagepatch.shape)
#print(batchsize)
if istensor:
imagepatch = np.array(imagepatch.cpu().detach())
folderpath = os.path.join("Network_patches",modelname, foldername)
print("start visulization {}, channelsize : {}".format(foldername,channelsize))
if not os.path.isdir(os.path.join("Network_patches",modelname)):
os.mkdir(os.path.join("Network_patches",modelname))
if not os.path.isdir(folderpath):
os.mkdir(folderpath)
for index in range(batchsize):
patches = imagepatch[index]
for channel in range(0,channelsize,128):
#for channel in range(0,channelsize):
image = regularization_image(patches[channel])
image = (image*255).astype(np.uint8)
#PIL_Input_Image = Image.fromarray(image)
#PIL_Input_Image.save(os.path.join(folderpath,"image{}.png".format(index)))
plt.imshow(image, 'gray')
plt.savefig(os.path.join(folderpath,"image{}.png".format(channel)))
def saveoffset(offsetbatch, modelname, foldername, istensor = False):
if istensor:
offsetbatch = np.array(offsetbatch.cpu().detach())
offsetbatch = np.transpose(offsetbatch, (0, 2, 3, 1))
offsetbatch = np.squeeze(offsetbatch)
sizetemp = offsetbatch.shape[:-1]
offset_coord = np.zeros((*sizetemp, int(offsetbatch.shape[-1] / 2), 2), dtype=np.float32)
for y in range(offset_coord.shape[0]):
for x in range(offset_coord.shape[1]):
for i in range(offset_coord.shape[2]):
coordtuple = offsetbatch[y,x,i*2:(i+1)*2]
offset_coord[y,x,i] = coordtuple
folderpath = os.path.join("Network_patches",modelname, foldername)
if not os.path.isdir(folderpath):
os.mkdir(folderpath)
for i in range(offsetbatch.shape[0]):
np.save(os.path.join(folderpath, "offset_{}.npy".format(i)), offsetbatch[i])
for i in range(offsetbatch.shape[0]):
np.save(os.path.join(folderpath, "offset_{}.npy".format(i)), offsetbatch[i])
return offset_coord
def regularization_image(image):
min = np.min(image)
temp_image = image-min
max = np.max(temp_image)
temp_image = temp_image/max
return temp_image
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
| 8,839 | 29.909091 | 97 | py |
RGTSI | RGTSI-main/data/dataprocess.py | import random
import torch
import torch.utils.data
from PIL import Image
from glob import glob
import numpy as np
import torchvision.transforms as transforms
class DataProcess(torch.utils.data.Dataset):
def __init__(self, de_root, st_root, input_mask_root,ref_root,opt, train=True):
super(DataProcess, self).__init__()
self.img_transform = transforms.Compose([
transforms.Resize((opt.fineSize,opt.fineSize)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),(0.5, 0.5, 0.5))
])
# mask should not normalize, is just have 0 or 1
self.mask_transform = transforms.Compose([
transforms.Resize((opt.fineSize,opt.fineSize)),
transforms.ToTensor()
])
self.Train = False
self.opt = opt
if train:
self.de_paths = sorted(glob('{:s}/*'.format(de_root), recursive=True))
self.st_paths = sorted(glob('{:s}/*'.format(st_root), recursive=True))
self.mask_paths = sorted(glob('{:s}/*'.format(input_mask_root), recursive=True))
self.ref_paths = sorted(glob('{:s}/*'.format(ref_root), recursive=True))
self.Train=True
self.N_mask = len(self.mask_paths)
print(self.N_mask)
def __getitem__(self, index):
de_img = Image.open(self.de_paths[index])
st_img = Image.open(self.st_paths[index])
ref_img = Image.open(self.ref_paths[index])
mask_img = Image.open(self.mask_paths[random.randint(0, self.N_mask - 1)])
de_img = self.img_transform(de_img.convert('RGB'))
st_img = self.img_transform(st_img.convert('RGB'))
ref_img = self.img_transform(ref_img.convert('RGB'))
mask_img = self.mask_transform(mask_img.convert('RGB'))
return de_img, st_img,mask_img,ref_img
def __len__(self):
return len(self.de_paths)
| 1,920 | 36.666667 | 92 | py |
pygsp | pygsp-master/doc/conf.py | # -*- coding: utf-8 -*-
import pygsp
extensions = [
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'sphinx.ext.mathjax',
'sphinx.ext.inheritance_diagram',
]
extensions.append('sphinx.ext.autodoc')
autodoc_default_options = {
'members': True,
'undoc-members': True,
'member-order': 'groupwise', # alphabetical, groupwise, bysource
}
extensions.append('sphinx.ext.intersphinx')
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://numpy.org/doc/stable', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('https://matplotlib.org/stable', None),
'pyunlocbox': ('https://pyunlocbox.readthedocs.io/en/stable', None),
'networkx': ('https://networkx.org/documentation/stable', None),
'graph_tool': ('https://graph-tool.skewed.de/static/doc', None),
}
extensions.append('numpydoc')
numpydoc_show_class_members = False
numpydoc_use_plots = True # Add the plot directive whenever mpl is imported.
extensions.append('matplotlib.sphinxext.plot_directive')
plot_include_source = True
plot_html_show_source_link = False
plot_html_show_formats = False
plot_working_directory = '.'
plot_rcparams = {
'figure.figsize': (10, 4)
}
plot_pre_code = """
import numpy as np
from pygsp import graphs, filters, utils, plotting
"""
extensions.append('sphinx_gallery.gen_gallery')
sphinx_gallery_conf = {
'examples_dirs': '../examples',
'gallery_dirs': 'examples',
'filename_pattern': '/',
'reference_url': {'pygsp': None},
'backreferences_dir': 'backrefs',
'doc_module': 'pygsp',
'show_memory': True,
}
extensions.append('sphinx_copybutton')
copybutton_prompt_text = ">>> "
extensions.append('sphinxcontrib.bibtex')
bibtex_bibfiles = ['references.bib']
exclude_patterns = ['_build']
source_suffix = '.rst'
master_doc = 'index'
project = 'PyGSP'
version = pygsp.__version__
release = pygsp.__version__
copyright = 'EPFL LTS2'
pygments_style = 'sphinx'
html_theme = 'sphinx_rtd_theme'
html_theme_options = {
'navigation_depth': 2,
}
latex_elements = {
'papersize': 'a4paper',
'pointsize': '10pt',
}
latex_documents = [
('index', 'pygsp.tex', 'PyGSP documentation',
'EPFL LTS2', 'manual'),
]
| 2,259 | 25.27907 | 77 | py |
vad-sli-asr | vad-sli-asr-master/scripts/exp_asr-eval.py | import os
import pandas as pd
import torchaudio
from datasets import Dataset
from helpers.asr import configure_w2v2_for_inference
from jiwer import wer, cer
EVAL_MODELS_DATASETS = [
# Evaluation on the same test set using model trained using different amounts of data
("data/exps/asr/checkpoints/train-100", "data/exps/asr/datasets/test.tsv"),
("data/exps/asr/checkpoints/train-80", "data/exps/asr/datasets/test.tsv"),
("data/exps/asr/checkpoints/train-60", "data/exps/asr/datasets/test.tsv"),
("data/exps/asr/checkpoints/train-40", "data/exps/asr/datasets/test.tsv"),
("data/exps/asr/checkpoints/train-20", "data/exps/asr/datasets/test.tsv"),
("data/exps/asr/checkpoints/train-10", "data/exps/asr/datasets/test.tsv"),
("data/exps/asr/checkpoints/train-05", "data/exps/asr/datasets/test.tsv"),
("data/exps/asr/checkpoints/train-01", "data/exps/asr/datasets/test.tsv"),
# Baseline model with no additional fine-tuning
("facebook/wav2vec2-large-robust-ft-swbd-300h", "data/exps/asr/datasets/test.tsv"),
# Cross-validation on 10 different train-test splits with models trained using only
# 60% of training split and no language model
("data/exps/asr/checkpoints/bootstrap/no-lm/b-1", "data/exps/asr/datasets/bootstrap-1-test20.tsv"),
("data/exps/asr/checkpoints/bootstrap/no-lm/b-2", "data/exps/asr/datasets/bootstrap-2-test20.tsv"),
("data/exps/asr/checkpoints/bootstrap/no-lm/b-3", "data/exps/asr/datasets/bootstrap-3-test20.tsv"),
("data/exps/asr/checkpoints/bootstrap/no-lm/b-4", "data/exps/asr/datasets/bootstrap-4-test20.tsv"),
("data/exps/asr/checkpoints/bootstrap/no-lm/b-5", "data/exps/asr/datasets/bootstrap-5-test20.tsv"),
("data/exps/asr/checkpoints/bootstrap/no-lm/b-6", "data/exps/asr/datasets/bootstrap-6-test20.tsv"),
("data/exps/asr/checkpoints/bootstrap/no-lm/b-7", "data/exps/asr/datasets/bootstrap-7-test20.tsv"),
("data/exps/asr/checkpoints/bootstrap/no-lm/b-8", "data/exps/asr/datasets/bootstrap-8-test20.tsv"),
("data/exps/asr/checkpoints/bootstrap/no-lm/b-9", "data/exps/asr/datasets/bootstrap-9-test20.tsv"),
("data/exps/asr/checkpoints/bootstrap/no-lm/b-10", "data/exps/asr/datasets/bootstrap-10-test20.tsv"),
# Cross-validation on 10 different train-test splits with models trained using only
# 60% of training split and a 2-gram language model
("data/exps/asr/checkpoints/bootstrap/lm/b-1", "data/exps/asr/datasets/bootstrap-1-test20.tsv"),
("data/exps/asr/checkpoints/bootstrap/lm/b-2", "data/exps/asr/datasets/bootstrap-2-test20.tsv"),
("data/exps/asr/checkpoints/bootstrap/lm/b-3", "data/exps/asr/datasets/bootstrap-3-test20.tsv"),
("data/exps/asr/checkpoints/bootstrap/lm/b-4", "data/exps/asr/datasets/bootstrap-4-test20.tsv"),
("data/exps/asr/checkpoints/bootstrap/lm/b-5", "data/exps/asr/datasets/bootstrap-5-test20.tsv"),
("data/exps/asr/checkpoints/bootstrap/lm/b-6", "data/exps/asr/datasets/bootstrap-6-test20.tsv"),
("data/exps/asr/checkpoints/bootstrap/lm/b-7", "data/exps/asr/datasets/bootstrap-7-test20.tsv"),
("data/exps/asr/checkpoints/bootstrap/lm/b-8", "data/exps/asr/datasets/bootstrap-8-test20.tsv"),
("data/exps/asr/checkpoints/bootstrap/lm/b-9", "data/exps/asr/datasets/bootstrap-9-test20.tsv"),
("data/exps/asr/checkpoints/bootstrap/lm/b-10", "data/exps/asr/datasets/bootstrap-10-test20.tsv")
# Baseline model with no additional fine-tuning
("facebook/wav2vec2-large-robust-ft-swbd-300h", "data/exps/asr/datasets/bootstrap-1-test20.tsv"),
("facebook/wav2vec2-large-robust-ft-swbd-300h", "data/exps/asr/datasets/bootstrap-2-test20.tsv"),
("facebook/wav2vec2-large-robust-ft-swbd-300h", "data/exps/asr/datasets/bootstrap-3-test20.tsv"),
("facebook/wav2vec2-large-robust-ft-swbd-300h", "data/exps/asr/datasets/bootstrap-4-test20.tsv"),
("facebook/wav2vec2-large-robust-ft-swbd-300h", "data/exps/asr/datasets/bootstrap-5-test20.tsv"),
("facebook/wav2vec2-large-robust-ft-swbd-300h", "data/exps/asr/datasets/bootstrap-6-test20.tsv"),
("facebook/wav2vec2-large-robust-ft-swbd-300h", "data/exps/asr/datasets/bootstrap-7-test20.tsv"),
("facebook/wav2vec2-large-robust-ft-swbd-300h", "data/exps/asr/datasets/bootstrap-8-test20.tsv"),
("facebook/wav2vec2-large-robust-ft-swbd-300h", "data/exps/asr/datasets/bootstrap-9-test20.tsv"),
("facebook/wav2vec2-large-robust-ft-swbd-300h", "data/exps/asr/datasets/bootstrap-10-test20.tsv")
]
EVAL_RESULTS = []
def read_clip(batch):
batch['speech'] = torchaudio.load(batch['path'])[0]
return batch
def make_all_lowercase(batch):
batch["sentence"] = batch["sentence"].lower()
batch["transcription"] = batch["transcription"].lower()
return batch
for model_path, testset_path in EVAL_MODELS_DATASETS:
print(f"Reading in data from {testset_path} ...")
test_ds = Dataset.from_pandas(pd.read_csv(testset_path, sep = '\t'))
test_ds = test_ds.map(read_clip)
_, processor, transcribe_speech = configure_w2v2_for_inference(model_path)
print(f"Obtaining predictions using model from {model_path} ...")
test_ds = test_ds.map(transcribe_speech, remove_columns=["speech"])
test_ds = test_ds.map(make_all_lowercase)
EVAL_RESULTS.append({
"model" : os.path.basename(model_path),
"model_lm" : type(processor).__name__ == 'Wav2Vec2ProcessorWithLM',
"testset" : os.path.basename(testset_path),
"wer" : round(wer(test_ds['sentence'], test_ds['transcription']), 3),
"cer" : round(cer(test_ds['sentence'], test_ds['transcription']), 3)
})
results_df = pd.DataFrame(EVAL_RESULTS)
results_df.to_csv("data/exps/asr/asr_wer-csr.csv", index=False)
print("Results written to data/exps/asr/asr_wer-csr.csv")
| 5,752 | 58.309278 | 105 | py |
vad-sli-asr | vad-sli-asr-master/scripts/run_asr-by-w2v2.py | from argparse import ArgumentParser
from datasets import Dataset
from helpers.asr import configure_w2v2_for_inference
from transformers import logging
import pandas as pd
import pympi.Elan as Elan
import os
import re
import torchaudio
parser = ArgumentParser(
prog='run_asr-by-w2v2',
description='Run automatic speech recognition (ASR) using wav2vec 2.0',
)
parser.add_argument('repo_path_or_name', help = "Pre-trained wav2vec 2.0 model, local path or HuggingFace repo name")
parser.add_argument('wav_file', help = "wav file to perform ASR on")
parser.add_argument('--roi_tier', default="_sli", help = "Tier containing speech regions of interest to transcribe")
parser.add_argument('--roi_filter', default="eng", help = "Regular expression to filter regions of interest")
parser.add_argument('--asr_tier', default="_asr", help = "Tier to write transcriptions to")
parser.add_argument('--overwrite', help = "overwrite _asr tier on existing .eaf file?", dest='overwrite', action='store_true')
parser.add_argument('--cache_dir', default="tmp/cache", help = "Directory for downloading pre-trained models")
parser.set_defaults(overwrite=False)
args = parser.parse_args()
logging.set_verbosity(40)
assert os.path.exists(args.wav_file), f"Specified wav file does not exist: {args.wav_file}"
eaf_path = os.path.splitext(args.wav_file)[0] + ".eaf"
eaf_exists = os.path.exists(eaf_path)
assert eaf_exists is True, f"Expected eaf file does not exist at: {eaf_path}"
eaf_data = Elan.Eaf(file_path=eaf_path if eaf_exists else None)
eaf_tiers = eaf_data.tiers.keys()
if args.asr_tier not in eaf_data.tiers.keys():
# Add _asr tier if it doesn't already exist
eaf_data.add_tier(args.asr_tier)
else:
# If _asr tier already exists, check if we should overwrite
# If overwrite, clear the tier first
# If not, exit script
if args.overwrite is True:
eaf_data.remove_all_annotations_from_tier(args.asr_tier, clean=True)
else:
print(f"Skipping ASR on {eaf_path}, _asr tier already exists and overwite is set to False (use --overwrite to set to True)")
exit()
# All conditions met to actually do ASR
waveform, sample_rate = torchaudio.load(args.wav_file)
if sample_rate != 16_000:
print("Resampling audio to 16 kHz ...")
samp_to_16k = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=16_000)
waveform = samp_to_16k(waveform)
def get_speech_excerpts(batch):
start_sample = batch["start_ms"] * 16
end_sample = batch["end_ms"] * 16
batch["speech"] = waveform[:, start_sample:end_sample].squeeze().numpy()
return batch
_, _, transcribe_speech = configure_w2v2_for_inference(args.repo_path_or_name, cache_dir=args.cache_dir)
roi_annots = eaf_data.get_annotation_data_for_tier(args.roi_tier)
roi_annots = [ r for r in roi_annots if bool(re.search(args.roi_filter, r[2])) ]
roi_dataset = pd.DataFrame(roi_annots, columns=['start_ms', 'end_ms', 'annotation'])
roi_dataset = Dataset.from_pandas(roi_dataset).remove_columns(['annotation'])
print("Gathering speech regions into a dataset ...")
roi_dataset = roi_dataset.map(get_speech_excerpts)
print("Running ASR on each region of interest ...")
roi_dataset = roi_dataset.map(transcribe_speech)
for index, region in enumerate(roi_annots):
start_ms, end_ms, annot = region
annot = roi_dataset['transcription'][index] if annot == 'eng' else ''
eaf_data.add_annotation(args.asr_tier, start=start_ms, end=end_ms, value=annot)
eaf_data.to_file(eaf_path)
print(f"Transcribed regions written to {args.asr_tier} tier in {eaf_path}")
| 3,610 | 35.11 | 132 | py |
vad-sli-asr | vad-sli-asr-master/scripts/run_sli-by-sblr.py | from argparse import ArgumentParser
from speechbrain.pretrained import EncoderClassifier
from tqdm import tqdm
import pickle
import pympi.Elan as Elan
import os
import torch
import torchaudio
parser = ArgumentParser(
prog='run_sli-by-sblr',
description='Spoken language identification (SLI) using SpeechBrain embeddings as input to a logistic regression classifier.',
)
parser.add_argument('logreg_pkl', help = "pickle file containing a trained logistic regression classifier")
parser.add_argument('wav_file', help = "wav file to perform SLI on")
parser.add_argument('--vad_tier', default="_vad", help = "Tier containing speech regions to classify")
parser.add_argument('--sli_tier', default="_sli", help = "Tier to write classified speech regions")
parser.add_argument('--overwrite', help = "overwrite _vad tier on existing .eaf file?", dest='overwrite', action='store_true')
parser.add_argument('--rm_vad_tier', help = "remove _vad tier after finishing SLI task", dest='rm_vad_tier', action='store_true')
parser.add_argument('--cache_dir', default="tmp/cache", help = "Directory for downloading pre-trained models")
parser.set_defaults(overwrite=False, rm_vad_tier=False)
args = parser.parse_args()
assert os.path.exists(args.logreg_pkl), f"Pickle file does not exist at: {args.logreg_pkl}"
assert os.path.exists(args.wav_file), f"Specified wav file does not exist: {args.wav_file}"
eaf_path = os.path.splitext(args.wav_file)[0] + ".eaf"
eaf_exists = os.path.exists(eaf_path)
assert eaf_exists is True, f"Expected eaf file does not exist at: {eaf_path}"
eaf_data = Elan.Eaf(file_path=eaf_path if eaf_exists else None)
eaf_tiers = eaf_data.tiers.keys()
assert args.vad_tier in eaf_tiers, f"VAD tier '{args.vad_tier}' does not exist in {eaf_path}"
if args.sli_tier not in eaf_data.tiers.keys():
# Add _sli tier if it doesn't already exist
eaf_data.add_tier(args.sli_tier)
else:
# If _sli tier already exists, check if we should overwrite
# If overwrite, clear the tier first
# If not, exit script
if args.overwrite is True:
eaf_data.remove_all_annotations_from_tier(args.sli_tier, clean=True)
else:
print(f"Skipping SLI on {eaf_path}, _sli tier already exists and overwite is set to False (use --overwrite to set to True)")
exit()
# All conditions met to actually do SLI
waveform, sample_rate = torchaudio.load(args.wav_file)
if sample_rate != 16_000:
print("Resampling audio to 16 kHz ...")
samp_to_16k = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=16_000)
waveform = samp_to_16k(waveform)
sb_embd = EncoderClassifier.from_hparams(source="speechbrain/lang-id-voxlingua107-ecapa", savedir=args.cache_dir)
sli_clf = pickle.load(open(args.logreg_pkl, 'rb'))
vad_regions = eaf_data.get_annotation_data_for_tier(args.vad_tier)
print("Starting spoken language identification ...")
for start_ms, end_ms, _ in tqdm(vad_regions):
start_sample = max(0, start_ms - 250) * 16
end_sample = end_ms * 16
clip = waveform[:, start_sample:end_sample]
emb = sb_embd.encode_batch(clip).reshape((1, 256))
lang = sli_clf.predict(emb)[0]
eaf_data.add_annotation(args.sli_tier, start=start_ms, end=end_ms, value=lang)
if args.rm_vad_tier is True:
eaf_data.remove_tier(args.vad_tier)
eaf_data.to_file(eaf_path)
print(f"Identified languages written to {args.sli_tier} tier in {eaf_path}")
| 3,440 | 36.813187 | 132 | py |
vad-sli-asr | vad-sli-asr-master/scripts/train_asr-by-w2v2-ft.py | import json
import math
import os
import torch
from argparse import ArgumentParser
from datasets import load_metric
from helpers.asr import (
configure_lm,
configure_w2v2_for_training,
DataCollatorCTCWithPadding,
dataset_from_dict,
get_metrics_computer,
preprocess_text,
process_data
)
from transformers import (
EarlyStoppingCallback,
logging,
Trainer,
TrainingArguments
)
parser = ArgumentParser(
prog='train_asr-by-w2v2-ft',
description='Train an ASR model by fine-tuning a pre-trained wav2vec 2.0 model',
)
parser.add_argument('repo_path_or_name', help = "Pre-trained wav2vec 2.0 model, local path or HuggingFace repo name")
parser.add_argument('output_dir', help = "The output directory where the model predictions and checkpoints will be written")
parser.add_argument('train_tsv', help = "Training data. Two-column tab-separated file with 'path' (path to wav file) and 'sentence' (transcription)")
parser.add_argument('eval_tsv', help = "Evaluation data. Two-column tab-separated file with 'path' (path to wav file) and 'sentence' (transcription)")
parser.add_argument('--use_target_vocab', default=True, help='Use a vocabulary created from target transcriptions (training and evaluation)')
parser.add_argument('--lm_arpa', default=None, help='Path to language model .arpa file (optional)')
parser.add_argument('--hft_logging', default=40, help='HuggingFace Transformers verbosity level (40 = errors, 30 = warnings, 20 = info, 10 = debug)')
args = parser.parse_args()
# Turns out bool('False') evaluates to True in Python (only bool('') is False)
args.use_target_vocab = False if args.use_target_vocab == 'False' else True
logging.set_verbosity(args.hft_logging)
# For debugging
# args.repo_path_or_name = "facebook/wav2vec2-large-robust-ft-swbd-300h"
# args.train_tsv = 'data/train-asr/train.tsv'
# args.eval_tsv = 'data/train-asr/test.tsv'
# args.output_dir = 'data/asr-temp'
# args.use_target_vocab = False
os.makedirs(args.output_dir, exist_ok=True)
dataset = dataset_from_dict({
'train': args.train_tsv,
'eval' : args.eval_tsv
})
w2v2_config = {
"feature_extractor" : {
"return_attention_mask" : True
},
"model_kwargs" : {
"mask_time_prob" : 0,
"gradient_checkpointing" : True,
"ctc_loss_reduction" : "mean"
}
}
dataset, vocab_dict = preprocess_text(dataset)
model, processor = configure_w2v2_for_training(dataset, args, vocab_dict, w2v2_config)
if args.lm_arpa is not None:
processor = configure_lm(processor, args.lm_arpa, args.output_dir)
dataset = process_data(dataset, processor)
# Set logging to 'INFO' or else progress bar gets hidden
logging.set_verbosity(20)
n_epochs = 50
batch_size = 32
# How many epochs between evals?
eps_b_eval = 5
# Save/Eval/Logging steps
sel_steps = int(math.ceil(len(dataset['train']) / batch_size) * eps_b_eval)
training_args = TrainingArguments(
output_dir=args.output_dir,
group_by_length=True,
per_device_train_batch_size=batch_size,
gradient_accumulation_steps=1,
evaluation_strategy="steps",
num_train_epochs=n_epochs,
fp16=True if torch.cuda.is_available() else False,
seed=7135,
save_steps=sel_steps,
eval_steps=sel_steps,
logging_steps=sel_steps,
learning_rate=1e-4,
# Warm up: 100 steps or 10% of total optimisation steps
warmup_steps=min(100, int(0.1 * sel_steps * n_epochs)),
report_to="none",
# 2022-03-09: manually set optmizier to PyTorch implementation torch.optim.AdamW
# 'adamw_torch' to get rid of deprecation warning for default optimizer 'adamw_hf'
optim="adamw_torch",
metric_for_best_model="wer",
save_total_limit=5,
load_best_model_at_end = True,
# Lower WER is better
greater_is_better=False
)
trainer = Trainer(
model=model,
data_collator=DataCollatorCTCWithPadding(processor=processor, padding=True),
args=training_args,
compute_metrics=get_metrics_computer(processor=processor),
train_dataset=dataset['train'],
eval_dataset=dataset['eval'],
tokenizer=processor.feature_extractor,
callbacks = [EarlyStoppingCallback(early_stopping_patience=3)]
)
print("Training model ...")
trainer.train()
| 4,228 | 31.037879 | 150 | py |
vad-sli-asr | vad-sli-asr-master/scripts/run_vad-by-pyannote.py | from pyannote.audio.pipelines import VoiceActivityDetection
from argparse import ArgumentParser
import pympi.Elan as Elan
import os
import sys
import torch
from helpers.eaf import get_eaf_file
parser = ArgumentParser(
prog='run_vad-by-pyannote',
description='Voice activity detection with PyAnnote. Writes intervals onto _vad tier in sidecar file.',
)
parser.add_argument('wav_file', help = "wav file to perform VAD on")
parser.add_argument('--eaf_file', help = "write to sidecar file (e.g. myfile.eaf for myfile.wav) unless specified.")
parser.add_argument('--vad_tier', default="_vad", help = "Tier containing speech regions to classify")
parser.add_argument('--overwrite', help = "overwrite _vad tier on existing .eaf file?", dest='overwrite', action='store_true')
parser.set_defaults(overwrite=False)
args = parser.parse_args()
assert os.path.exists(args.wav_file), f"Specified wav file does not exist: {args.wav_file}"
eaf_path, eaf_exists = get_eaf_file(args.wav_file)
eaf_data = Elan.Eaf(file_path=eaf_path if eaf_exists else None)
if eaf_exists is not True:
# Add wav file as linked file to newly created eaf object
eaf_data.add_linked_file(args.wav_file, relpath=os.path.basename(args.wav_file))
# Remove 'default' tier from newly created eaf object
eaf_data.remove_tier('default')
if args.vad_tier not in eaf_data.tiers.keys():
# Add _vad tier if it doesn't already exist
eaf_data.add_tier(args.vad_tier)
else:
# If _vad tier already exists, check if we should overwrite
# If overwrite, clear the tier first
# If not, exit script
if args.overwrite is True:
eaf_data.remove_all_annotations_from_tier(args.vad_tier, clean=True)
else:
print(f"Skipping VAD on {eaf_path}, _vad tier already exists and overwite is set to False (use --overwrite to set to True)")
exit()
print(f"Detecting speech regions in {args.wav_file} ...")
vad = VoiceActivityDetection({
"checkpoint":"pyannote/segmentation",
"map_location": torch.device("cuda" if torch.cuda.is_available() else "cpu")
}) \
.instantiate({
"onset": 0.5,
"offset": 0.5,
"min_duration_on": 0.0,
"min_duration_off": 0.0
})
segments = vad(args.wav_file)
for segment in segments.itersegments():
start_ms = int(segment.start * 1000)
end_ms = int(segment.end * 1000)
eaf_data.add_annotation(args.vad_tier, start=start_ms, end=end_ms, value='')
eaf_data.to_file(eaf_path)
print(f"Detected speech regions written to {args.vad_tier} tier in {eaf_path}")
| 2,545 | 32.064935 | 132 | py |
vad-sli-asr | vad-sli-asr-master/scripts/run_vad-by-silero.py | from argparse import ArgumentParser
import pympi.Elan as Elan
import os
import sys
import torch
import torchaudio
from helpers.eaf import get_eaf_file
parser = ArgumentParser(
prog='run_vad-by-silero',
description='Voice activity detection with Silero. Writes intervals onto _vad tier in sidecar file.',
)
parser.add_argument('wav_file', help = "wav file to perform VAD on")
parser.add_argument('--eaf_file', help = "write to sidecar file (e.g. myfile.eaf for myfile.wav) unless specified.")
parser.add_argument('--vad_tier', default="_vad", help = "Tier containing speech regions to classify")
parser.add_argument('--overwrite', help = "overwrite _vad tier on existing .eaf file?", dest='overwrite', action='store_true')
parser.add_argument('--cache_dir', default="tmp/cache", help = "Directory for downloading pre-trained models")
parser.set_defaults(overwrite=False)
args = parser.parse_args()
assert os.path.exists(args.wav_file), f"Specified wav file does not exist: {args.wav_file}"
eaf_path, eaf_exists = get_eaf_file(args.wav_file)
eaf_data = Elan.Eaf(file_path=eaf_path if eaf_exists else None)
eaf_tiers = eaf_data.tiers.keys()
if eaf_exists is not True:
# Add wav file as linked file to newly created eaf object
eaf_data.add_linked_file(args.wav_file, relpath=os.path.basename(args.wav_file))
# Remove 'default' tier from newly created eaf object
eaf_data.remove_tier('default')
if args.vad_tier not in eaf_data.tiers.keys():
# Add _vad tier if it doesn't already exist
eaf_data.add_tier(args.vad_tier)
else:
# If _vad tier already exists, check if we should overwrite
# If overwrite, clear the tier first
# If not, exit script
if args.overwrite is True:
eaf_data.remove_all_annotations_from_tier(args.vad_tier, clean=True)
else:
print(f"Skipping VAD on {eaf_path}, _vad tier already exists and overwite is set to False (use --overwrite to set to True)")
exit()
# All conditions met to actually do VAD
waveform, sample_rate = torchaudio.load(args.wav_file)
if sample_rate != 16_000:
print("Resampling audio to 16 kHz ...")
samp_to_16k = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=16_000)
waveform = samp_to_16k(waveform)
print("Loading VAD model ...")
torch.hub.set_dir(args.cache_dir)
vad_model, vad_utils = torch.hub.load(repo_or_dir='snakers4/silero-vad', model='silero_vad', force_reload=False)
(get_speech_timestamps, save_audio, read_audio, VADIterator, collect_chunks) = vad_utils
print(f"Detecting speech regions in {args.wav_file} ...")
speech_timestamps = get_speech_timestamps(waveform, vad_model, sampling_rate=16_000)
for ts in speech_timestamps:
# Basically (sample_number/16000) * 1000 or just sample_number/16
start_ms, end_ms = [ts['start']/16, ts['end']/16]
eaf_data.add_annotation(args.vad_tier, start=round(start_ms), end=round(end_ms), value='')
eaf_data.to_file(eaf_path)
print(f"Detected speech regions written to {args.vad_tier} tier in {eaf_path}")
| 3,051 | 34.488372 | 132 | py |
vad-sli-asr | vad-sli-asr-master/scripts/helpers/asr.py | import json
import numpy as np
import glob
import os
import pandas as pd
import re
import torch
from dataclasses import dataclass
from datasets import (
Audio,
Dataset,
DatasetDict,
disable_progress_bar,
enable_progress_bar,
load_metric
)
from typing import Dict, List, Union
from pyctcdecode import build_ctcdecoder
from transformers import (
AutoConfig,
AutoProcessor,
Wav2Vec2CTCTokenizer,
Wav2Vec2FeatureExtractor,
Wav2Vec2ForCTC,
Wav2Vec2Processor,
Wav2Vec2ProcessorWithLM
)
def dataset_from_dict(dataset_dict):
dataset = DatasetDict()
for k in dataset_dict.keys():
dataset[k] = Dataset.from_pandas(pd.read_csv(dataset_dict[k], sep='\t'))
return dataset
def remove_special_characters(batch):
chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“\%\‘\”\�]'
batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"])
return batch
def extract_all_chars(batch):
all_text = " ".join(batch["sentence"])
vocab = sorted(list(set(all_text)))
return {"vocab": [vocab], "all_text": [all_text]}
def create_vocab(dataset_dict, word_delimiter_token = "|", special_tokens = ["<s>", "</s>", "<unk>", "<pad>"]):
vocab_list = []
for ds_name, ds_data in dataset_dict.items():
vocab = ds_data.map(extract_all_chars, batched=True, batch_size=-1, keep_in_memory=True, remove_columns=ds_data.column_names)
vocab_list.extend(vocab["vocab"][0])
vocab_list = sorted(list(set(vocab_list)))
vocab_dict = { v: k for k, v in enumerate(vocab_list) }
vocab_dict[word_delimiter_token] = vocab_dict[" "]
del vocab_dict[" "]
for t in special_tokens:
vocab_dict[t] = len(vocab_dict)
vocab_dict = dict(sorted(vocab_dict.items(), key=lambda item: item[1]))
return vocab_dict
def preprocess_text(dataset_dict):
disable_progress_bar()
print("Pre-processing transcriptions ...")
dataset_dict = dataset_dict.map(remove_special_characters)
vocab_path = create_vocab(dataset_dict)
enable_progress_bar()
return dataset_dict, vocab_path
def process_data(dataset_dict, processor):
print("Processing data ...")
def _helper(batch, processor=processor):
audio = batch["path"]
# batched output is "un-batched"
batch["input_values"] = processor(audio["array"], sampling_rate=audio["sampling_rate"]).input_values[0]
# 2022-03-09:
# Comment out input_length, not sure what actually requires this column
# But including it results in a warning from Wav2Vec2ForCTC.forward
# batch["input_length"] = len(batch["input_values"])
with processor.as_target_processor():
batch["labels"] = processor(batch["sentence"]).input_ids
return batch
dataset_dict = dataset_dict.cast_column("path", Audio(sampling_rate=16_000))
for ds_name, ds_data in dataset_dict.items():
dataset_dict[ds_name] = ds_data.map(_helper, remove_columns=ds_data.column_names)
return dataset_dict
@dataclass
class DataCollatorCTCWithPadding:
processor: Wav2Vec2Processor
padding: Union[bool, str] = True
def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
input_features = [{"input_values": feature["input_values"]} for feature in features]
label_features = [{"input_ids": feature["labels"]} for feature in features]
batch = self.processor.pad(
input_features,
padding=self.padding,
return_tensors="pt",
)
with self.processor.as_target_processor():
labels_batch = self.processor.pad(
label_features,
padding=self.padding,
return_tensors="pt",
)
# replace padding with -100 to ignore loss correctly
labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
batch["labels"] = labels
return batch
def get_metrics_computer(processor):
wer_metric = load_metric("wer")
cer_metric = load_metric("cer")
def compute_metrics(pred):
pred_logits = pred.predictions
if type(processor).__name__ == "Wav2Vec2ProcessorWithLM":
pred_str = processor.batch_decode(pred_logits).text
else:
pred_ids = np.argmax(pred_logits, axis=-1)
pred_str = processor.batch_decode(pred_ids)
# Replace data collator padding with tokenizer's padding
pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id
# Retrieve labels as characters, e.g. 'hello', from label_ids, e.g. [5, 3, 10, 10, 2] (where 5 = 'h')
label_str = processor.tokenizer.batch_decode(pred.label_ids, group_tokens=False)
print(pd.DataFrame({
"pred_str" : pred_str,
"label_str" : label_str
}))
wer = wer_metric.compute(predictions=pred_str, references=label_str)
cer = cer_metric.compute(predictions=pred_str, references=label_str)
return {"wer": wer, "cer": cer}
return compute_metrics
def configure_w2v2_for_training(dataset, args, vocab_dict, w2v2_config={}):
feature_extractor_kwargs = w2v2_config["feature_extractor"] if "feature_extractor" in w2v2_config.keys() else {}
model_kwargs = w2v2_config["model_kwargs"] if "model_kwargs" in w2v2_config.keys() else {}
if args.use_target_vocab is True:
vocab_path = os.path.join(args.output_dir, 'vocab.json')
print(f"Writing created vocabulary to {vocab_path}")
with open(vocab_path, 'w') as vocab_file:
json.dump(vocab_dict, vocab_file)
AutoConfig.from_pretrained(args.repo_path_or_name).save_pretrained(args.output_dir)
tokenizer = Wav2Vec2CTCTokenizer(vocab_path)
else:
print("Using vocabulary from tokenizer ...")
tokenizer = Wav2Vec2CTCTokenizer.from_pretrained(args.repo_path_or_name)
feature_extractor = Wav2Vec2FeatureExtractor(**feature_extractor_kwargs)
processor = Wav2Vec2Processor(
tokenizer=tokenizer,
feature_extractor=feature_extractor
)
processor.save_pretrained(args.output_dir)
if args.use_target_vocab:
model = Wav2Vec2ForCTC.from_pretrained(
pretrained_model_name_or_path=args.repo_path_or_name,
pad_token_id=processor.tokenizer.pad_token_id,
vocab_size=len(processor.tokenizer),
**model_kwargs
)
else:
model = Wav2Vec2ForCTC.from_pretrained(
pretrained_model_name_or_path=args.repo_path_or_name,
**model_kwargs
)
model.freeze_feature_encoder()
return model, processor
def configure_w2v2_for_inference(repo_path_or_name, cache_dir="tmp/"):
if os.path.isdir(repo_path_or_name):
cp_path = glob.glob(os.path.join(repo_path_or_name, 'checkpoint-*'))[0]
model = Wav2Vec2ForCTC.from_pretrained(cp_path, cache_dir=cache_dir)
processor = AutoProcessor.from_pretrained(repo_path_or_name, cache_dir=cache_dir)
else:
model = Wav2Vec2ForCTC.from_pretrained(repo_path_or_name)
processor = AutoProcessor.from_pretrained(repo_path_or_name)
if torch.cuda.is_available():
model.to("cuda")
def predict(batch):
input_values = processor(batch["speech"], return_tensors="pt", padding="longest", sampling_rate=16_000).input_values
with torch.no_grad():
logits = model(input_values.to("cuda")).logits if torch.cuda.is_available() else model(input_values).logits
if type(processor).__name__ == 'Wav2Vec2ProcessorWithLM':
transcription = processor.batch_decode(logits.cpu().numpy()).text
else:
predicted_ids = torch.argmax(logits, dim=-1)
transcription = processor.batch_decode(predicted_ids)
batch["transcription"] = transcription[0] if isinstance(transcription, list) else transcription
return batch
return model, processor, predict
def configure_lm(processor, arpa_path, output_dir):
vocab_dict = processor.tokenizer.get_vocab()
sorted_vocab_dict = {k.lower(): v for k, v in sorted(vocab_dict.items(), key=lambda item: item[1])}
decoder = build_ctcdecoder(
labels=list(sorted_vocab_dict.keys()),
kenlm_model_path=arpa_path,
)
processor_with_lm = Wav2Vec2ProcessorWithLM(
feature_extractor=processor.feature_extractor,
tokenizer=processor.tokenizer,
decoder=decoder
)
processor_with_lm.save_pretrained(output_dir)
return processor_with_lm
| 8,823 | 31.20438 | 133 | py |
vad-sli-asr | vad-sli-asr-master/scripts/helpers/sli.py | import glob
import os
import numpy as np
import pandas as pd
import torch
import torchaudio
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.utils import shuffle
from sklearn.utils._testing import ignore_warnings
from speechbrain.pretrained import EncoderClassifier
from tqdm import tqdm
def get_sli_df(sli_train_dir):
langs = os.listdir(sli_train_dir)
lang_dfs = {}
for l in langs:
wav_paths = glob.glob(os.path.join(sli_train_dir, l, "*.wav"))
lang_dfs[l] = pd.DataFrame.from_dict({
'wav_path' : wav_paths,
'lang' : l
})
sli_df = pd.concat(lang_dfs.values(), ignore_index=True)
return sli_df
def get_sb_encoder(save_dir="tmp"):
sb_encoder = EncoderClassifier.from_hparams(
source="speechbrain/lang-id-voxlingua107-ecapa",
savedir=save_dir,
run_opts={"device": "cuda" if torch.cuda.is_available() else "cpu" }
)
return sb_encoder
def add_sbemb_cols(sli_df, sb_encoder):
def enc_helper(wav_path):
waveform, sample_rate = torchaudio.load(wav_path)
if sample_rate != 16_000:
samp_to_16k = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=16_000)
waveform = samp_to_16k(waveform)
emb = sb_encoder.encode_batch(waveform)
return emb.reshape((1, 256)).cpu().detach().numpy()
sbemb_df = pd.concat([ pd.DataFrame(enc_helper(f)) for f in tqdm(sli_df["wav_path"].to_list()) ])
sli_df = pd.concat([sli_df.reset_index(drop=True), sbemb_df.reset_index(drop=True)], axis=1)
return sli_df
def colsplit_feats_labels(sli_df):
# Split data frame columns, return features and labels separately
return sli_df.iloc[:, -256:], sli_df.lang
@ignore_warnings(category=ConvergenceWarning)
def get_logreg_f1(train_df, test_df):
train_feats, train_labels = colsplit_feats_labels(shuffle(train_df))
test_feats, test_labels = colsplit_feats_labels(test_df)
logreg = LogisticRegression(class_weight='balanced', max_iter = 1000, random_state=0)
logreg.fit(train_feats, train_labels)
test_pred = logreg.predict(test_feats)
results_dict = classification_report(test_labels, test_pred, output_dict=True, zero_division=0)
f1 = round(results_dict['weighted avg']['f1-score'], 3)
return f1, test_pred
| 2,451 | 30.037975 | 101 | py |
mipGNN | mipGNN-master/gnn_models/train_class.py | import sys
sys.path.insert(0, '..')
sys.path.insert(0, '../..')
sys.path.insert(0, '.')
import os
import os.path as osp
import networkx as nx
from sklearn.model_selection import train_test_split
from torchmetrics import F1, Precision, Recall, Accuracy
from torch_geometric.data import (InMemoryDataset, Data)
from torch_geometric.data import DataLoader
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
from gnn_models.EdgeConv.mip_bipartite_class import SimpleNet as EdgeConv
from gnn_models.EdgeConv.mip_bipartite_simple_class import SimpleNet as EdgeConvSimple
from gnn_models.GIN.mip_bipartite_class import SimpleNet as GIN
from gnn_models.GIN.mip_bipartite_simple_class import SimpleNet as GINSimple
from gnn_models.Sage.mip_bipartite_class import SimpleNet as Sage
from gnn_models.Sage.mip_bipartite_simple_class import SimpleNet as SageSimple
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Preprocessing to create Torch dataset.
class GraphDataset(InMemoryDataset):
def __init__(self, name, root, data_path, bias_threshold, transform=None, pre_transform=None,
pre_filter=None):
super(GraphDataset, self).__init__(root, transform, pre_transform, pre_filter)
self.data, self.slices = torch.load(self.processed_paths[0])
self.bias_threshold = bias_threshold
global global_name
global global_data_path
@property
def raw_file_names(self):
return name
@property
def processed_file_names(self):
return name
def download(self):
pass
def process(self):
print("Preprocessing.")
data_list = []
num_graphs = len(os.listdir(pd))
print(pd)
# Iterate over instance files and create data objects.
for num, filename in enumerate(os.listdir(pd)):
print(filename, num, num_graphs)
# Get graph.
graph = nx.read_gpickle(pd + filename)
# Make graph directed.
graph = nx.convert_node_labels_to_integers(graph)
graph = graph.to_directed() if not nx.is_directed(graph) else graph
data = Data()
# Maps networkx ids to new variable node ids.
node_to_varnode = {}
# Maps networkx ids to new constraint node ids.
node_to_connode = {}
# Number of variables.
num_nodes_var = 0
# Number of constraints.
num_nodes_con = 0
# Targets (classes).
y = []
y_real = []
# Features for variable nodes.
feat_var = []
# Feature for constraints nodes.
feat_con = []
# Right-hand sides of equations.
feat_rhs = []
index = []
index_var = []
obj = []
# Iterate over nodes, and collect features.
for i, (node, node_data) in enumerate(graph.nodes(data=True)):
# Node is a variable node.
if node_data['bipartite'] == 0:
node_to_varnode[i] = num_nodes_var
num_nodes_var += 1
y_real.append(node_data['bias'])
if (node_data['bias'] < bias_threshold):
y.append(0)
else:
y.append(1)
if 'objcoeff' in node_data:
feat_var.append([node_data['objcoeff'], graph.degree[i]])
# feat_var.append([node_data['objcoeff']])
obj.append([node_data['objcoeff']])
else:
feat_var.append([node_data['obj_coeff'], graph.degree[i]])
# feat_var.append([node_data['obj_coeff']])
obj.append([node_data['obj_coeff']])
index_var.append(0)
# Node is constraint node.
elif node_data['bipartite'] == 1:
node_to_connode[i] = num_nodes_con
num_nodes_con += 1
if 'rhs' in node_data:
rhs = node_data['rhs']
else:
rhs = node_data['bound']
feat_rhs.append([rhs])
feat_con.append([rhs, graph.degree[i]])
# feat_con.append([rhs])
index.append(0)
else:
print("Error in graph format.")
exit(-1)
# Edge list for var->con graphs.
edge_list_var = []
# Edge list for con->var graphs.
edge_list_con = []
# Create features matrices for variable nodes.
edge_features_var = []
# Create features matrices for constraint nodes.
edge_features_con = []
# Remark: graph is directed, i.e., each edge exists for each direction.
# Flow of messages: source -> target.
for i, (s, t, edge_data) in enumerate(graph.edges(data=True)):
# Source node is con, target node is var.
if graph.nodes[s]['bipartite'] == 1:
# Source node is constraint. C->V.
edge_list_con.append([node_to_connode[s], node_to_varnode[t]])
edge_features_con.append([edge_data['coeff']])
else:
# Source node is variable. V->C.
edge_list_var.append([node_to_varnode[s], node_to_connode[t]])
edge_features_var.append([edge_data['coeff']])
edge_index_var = torch.tensor(edge_list_var).t().contiguous()
edge_index_con = torch.tensor(edge_list_con).t().contiguous()
# Create data object.
data.edge_index_var = edge_index_var
data.edge_index_con = edge_index_con
data.y = torch.from_numpy(np.array(y)).to(torch.long)
data.y_real = torch.from_numpy(np.array(y_real)).to(torch.float)
data.var_node_features = torch.from_numpy(np.array(feat_var)).to(torch.float)
data.con_node_features = torch.from_numpy(np.array(feat_con)).to(torch.float)
data.rhs = torch.from_numpy(np.array(feat_rhs)).to(torch.float)
data.obj = torch.from_numpy(np.array(obj)).to(torch.float)
data.edge_features_con = torch.from_numpy(np.array(edge_features_con)).to(torch.float)
data.edge_features_var = torch.from_numpy(np.array(edge_features_var)).to(torch.float)
data.num_nodes_var = num_nodes_var
data.num_nodes_con = num_nodes_con
data.index = torch.from_numpy(np.array(index)).to(torch.long)
data.index_var = torch.from_numpy(np.array(index_var)).to(torch.long)
data_list.append(data)
data, slices = self.collate(data_list)
torch.save((data, slices), self.processed_paths[0])
# Preprocess indices of bipartite graphs to make batching work.
class MyData(Data):
def __inc__(self, key, value):
if key in ['edge_index_var']:
return torch.tensor([self.num_nodes_var, self.num_nodes_con]).view(2, 1)
elif key in ['edge_index_con']:
return torch.tensor([self.num_nodes_con, self.num_nodes_var]).view(2, 1)
elif key in ['index']:
return torch.tensor(self.num_nodes_con)
elif key in ['index_var']:
return torch.tensor(self.num_nodes_var)
else:
return 0
class MyTransform(object):
def __call__(self, data):
new_data = MyData()
for key, item in data:
new_data[key] = item
return new_data
dataset_list = [
"../data_new/data_graphsonly/gisp/p_hat300-2.clq/train/",
"../data_new/data_graphsonly/gisp/p_hat300-2.clq/test/",
"../data_new/data_graphsonly/gisp/C250.9.clq/train/",
"../data_new/data_graphsonly/gisp/C250.9.clq/test/",
"../data_new/data_graphsonly/gisp/keller4.clq/train/",
"../data_new/data_graphsonly/gisp/keller4.clq/test/",
"../data_new/data_graphsonly/gisp/hamming8-4.clq/train/",
"../data_new/data_graphsonly/gisp/hamming8-4.clq/test/",
"../data_new/data_graphsonly/gisp/gen200_p0.9_55.clq/train/",
"../data_new/data_graphsonly/gisp/gen200_p0.9_55.clq/test/",
"../data_new/data_graphsonly/gisp/gen200_p0.9_44.clq/train/",
"../data_new/data_graphsonly/gisp/gen200_p0.9_44.clq/test/",
"../data_new/data_graphsonly/gisp/C125.9.clq/train/",
"../data_new/data_graphsonly/gisp/C125.9.clq/test/",
"../data_new/data_graphsonly/gisp/p_hat300-1.clq/train/",
"../data_new/data_graphsonly/gisp/p_hat300-1.clq/test/",
"../data_new/data_graphsonly/gisp/brock200_4.clq/train/",
"../data_new/data_graphsonly/gisp/brock200_4.clq/test/",
"../data_new/data_graphsonly/gisp/brock200_2.clq/train/",
"../data_new/data_graphsonly/gisp/brock200_2.clq/test/",
"../data_new/data_graphsonly/fcmnf/L_n200_p0.02_c500/train/",
"../data_new/data_graphsonly/fcmnf/L_n200_p0.02_c500/test/"
]
name_list = [
"p_hat300-2.clq_train",
"p_hat300-2.clq_test",
"gisp_C250.9.clq_train",
"C250.9.clq_test",
"keller4.clq_train",
"keller4.clq_test",
"hamming8-4.clq_train",
"hamming8-4.clq_test",
"gen200_p0.9_55.clq_train",
"gen200_p0.9_55.clq_test",
"gen200_p0.9_44.clq_train",
"gen200_p0.9_44.clq_test",
"C125.9.clq_train",
"C125.9.clq_test",
"p_hat300-1.clq_train",
"p_hat300-1.clq_test",
"brock200_4.clq_train",
"brock200_4.clq_test",
"brock200_2.clq_train",
"brock200_2.clq_test",
# "L_n200_p0.02_c500_train",
# "L_n200_p0.02_c500_test"
]
test_scores = []
for rep in [0, 1, 2, 3, 4]:
for i in [0, 2, 4, 6, 8, 10, 12, 14, 16, 18]:
# Bias.
for bias in [0.0, 0.001, 0.1]:
# GNN.
for m in ["ECS", "GINS", "SGS", "EC", "GIN", "SG"]:
log = []
# Setup model.
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if m == "EC":
model = EdgeConv(hidden=64, num_layers=4, aggr="mean", regression=False).to(device)
model_name = "EC_" + name_list[i] + str(bias) + "_" + str(rep)
print(model_name, bias, name_list[i])
elif m == "ECS":
model = EdgeConvSimple(hidden=64, num_layers=4, aggr="mean", regression=False).to(device)
model_name = "ECS_" + name_list[i] + str(bias) + "_" + str(rep)
print(model_name, bias, name_list[i])
elif m == "GIN":
model = GIN(hidden=64, num_layers=4, aggr="mean", regression=False).to(device)
model_name = "GIN_" + name_list[i] + str(bias) + "_" + str(rep)
print(model_name, bias, name_list[i])
elif m == "GINS":
model = GINSimple(hidden=64, num_layers=4, aggr="mean", regression=False).to(device)
model_name = "GINS_" + name_list[i] + str(bias) + "_" + str(rep)
print(model_name, bias, name_list[i])
elif m == "SG":
model = Sage(hidden=64, num_layers=4, aggr="mean", regression=False).to(device)
model_name = "SG_" + name_list[i] + str(bias) + "_" + str(rep)
print(model_name, bias, name_list[i])
elif m == "SGS":
model = SageSimple(hidden=64, num_layers=4, aggr="mean", regression=False).to(device)
model_name = "SGS_" + name_list[i] + str(bias) + "_" + str(rep)
print(model_name, bias, name_list[i])
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',
factor=0.8, patience=10,
min_lr=0.0000001)
# Prepare data.
bias_threshold = bias
batch_size = 10
num_epochs = 30
pathr = osp.join(osp.dirname(osp.realpath(__file__)), '.', 'data', 'DS')
pd = path_train = path_trainpath_train = dataset_list[i]
name = name_train = name_list[i]
train_dataset = GraphDataset(name_train, pathr, path_train, bias_threshold,
transform=MyTransform()).shuffle()
pd = path_test = path_testpath_test = dataset_list[i + 1]
name = name_test = name_list[i + 1]
test_dataset = GraphDataset(name_test, pathr, path_test, bias_threshold,
transform=MyTransform()).shuffle()
train_index, val_index = train_test_split(list(range(0, len(train_dataset))), test_size=0.2)
val_dataset = train_dataset[val_index].shuffle()
train_dataset = train_dataset[train_index].shuffle()
test_dataset = test_dataset.shuffle()
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True)
def train(epoch):
model.train()
# loss_all = 0
zero = torch.tensor([0]).to(device)
one = torch.tensor([1]).to(device)
loss_all = 0
for data in train_loader:
data = data.to(device)
y = data.y_real
y = torch.where(y <= bias_threshold, zero, one).to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, y)
loss.backward()
loss_all += batch_size * loss.item()
optimizer.step()
return loss_all / len(train_dataset)
@torch.no_grad()
def test(loader):
model.eval()
zero = torch.tensor([0]).to(device)
one = torch.tensor([1]).to(device)
f1 = F1(num_classes=2, average="macro").to(device)
pr = Precision(num_classes=2, average="macro").to(device)
re = Recall(num_classes=2, average="macro").to(device)
acc = Accuracy(num_classes=2).to(device)
first = True
for data in loader:
data = data.to(device)
pred = model(data)
y = data.y_real
y = torch.where(y <= bias_threshold, zero, one).to(device)
pred = pred.max(dim=1)[1]
if not first:
pred_all = torch.cat([pred_all, pred])
y_all = torch.cat([y_all, y])
else:
pred_all = pred
y_all = y
first = False
return acc(pred_all, y_all), f1(pred_all, y_all), pr(pred_all, y_all), re(pred_all, y_all)
best_val = 0.0
test_acc = 0.0
test_f1 = 0.0
test_re = 0.0
test_pr = 0.0
for epoch in range(1, num_epochs + 1):
train_loss = train(epoch)
train_acc, train_f1, train_pr, train_re = test(train_loader)
val_acc, val_f1, val_pr, val_re = test(val_loader)
scheduler.step(val_acc)
lr = scheduler.optimizer.param_groups[0]['lr']
if val_acc > best_val:
best_val = val_acc
test_acc, test_f1, test_pr, test_re = test(test_loader)
torch.save(model.state_dict(), "./model_new_reps/" + model_name)
log.append(
[epoch, train_loss, train_acc, train_f1, train_pr, train_re, val_acc, val_f1, val_pr, val_re,
best_val, test_acc, test_f1, test_pr, test_re])
# Break if learning rate is smaller 10**-6.
if lr < 0.000001 or epoch == num_epochs:
print([model_name, test_acc, test_f1, test_pr, test_re])
test_scores.append([model_name, test_acc, test_f1, test_pr, test_re])
log = np.array(log)
np.savetxt("./model_new_reps/" + model_name + ".log", log, delimiter=",",
fmt='%1.5f')
break
torch.cuda.empty_cache()
| 17,218 | 39.325527 | 117 | py |
mipGNN | mipGNN-master/gnn_models/Sage/mip_bipartite_class.py | import sys
sys.path.insert(0, '..')
sys.path.insert(0, '../..')
sys.path.insert(0, '.')
import torch_geometric.utils.softmax
import torch
import torch.nn.functional as F
from torch.nn import BatchNorm1d as BN
from torch.nn import Sequential, Linear, ReLU, Sigmoid
from torch_geometric.nn import MessagePassing
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Update constraint embeddings based on variable embeddings.
class VarConBipartiteLayer(MessagePassing):
def __init__(self, edge_dim, dim, var_assigment, aggr):
super(VarConBipartiteLayer, self).__init__(aggr=aggr, flow="source_to_target")
# Maps edge features to the same number of components as node features.
self.edge_encoder = Sequential(Linear(edge_dim, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
self.lin_l = Linear(dim, dim, bias=True)
self.lin_r = Linear(dim, dim, bias=False)
# Maps variable embeddings to scalar variable assigment.
self.var_assigment = var_assigment
# Maps variable embeddings + assignment to joint embedding.
self.joint_var = Sequential(Linear(dim + 1, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
self.mlp = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
self.eps = torch.nn.Parameter(torch.Tensor([0]))
self.initial_eps = 0
def forward(self, source, target, edge_index, edge_attr, rhs, size):
# Map edge features to embeddings with the same number of components as node embeddings.
edge_embedding = self.edge_encoder(edge_attr)
# Compute scalar variable assignment.
var_assignment = self.var_assigment(source)
# Compute joint embedding of variable embeddings and scalar variable assignment.
new_source = self.joint_var(torch.cat([source, var_assignment], dim=-1))
out = self.propagate(edge_index, x=new_source, size=size, edge_attr=edge_embedding)
out = self.lin_l(out)
out += self.lin_r(target)
out = F.normalize(out, p=2., dim=-1)
return out
def message(self, x_j, edge_attr):
return F.relu(x_j + edge_attr)
def update(self, aggr_out):
return aggr_out
# Compute error signal.
class ErrorLayer(MessagePassing):
def __init__(self, dim, var_assignment):
super(ErrorLayer, self).__init__(aggr="add", flow="source_to_target")
self.var_assignment = var_assignment
self.error_encoder = Sequential(Linear(1, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
def forward(self, source, edge_index, edge_attr, rhs, index, size):
# Compute scalar variable assignment.
new_source = self.var_assignment(source)
tmp = self.propagate(edge_index, x=new_source, edge_attr=edge_attr, size=size)
# Compute residual, i.e., Ax-b.
out = tmp - rhs
out = self.error_encoder(out)
out = torch_geometric.utils.softmax(out, index)
return out
def message(self, x_j, edge_attr):
msg = x_j * edge_attr
return msg
def update(self, aggr_out):
return aggr_out
# Update variable embeddings based on constraint embeddings.
class ConVarBipartiteLayer(MessagePassing):
def __init__(self, edge_dim, dim, aggr):
super(ConVarBipartiteLayer, self).__init__(aggr=aggr, flow="source_to_target")
# Maps edge features to the same number of components as node features.
self.edge_encoder = Sequential(Linear(edge_dim, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
# Learn joint representation of contraint embedding and error.
self.joint_con_encoder = Sequential(Linear(dim + dim, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
self.lin_l = Linear(dim, dim, bias=True)
self.lin_r = Linear(dim, dim, bias=False)
def forward(self, source, target, edge_index, edge_attr, error_con, size):
# Map edge features to embeddings with the same number of components as node embeddings.
edge_embedding = self.edge_encoder(edge_attr)
new_source = self.joint_con_encoder(torch.cat([source, error_con], dim=-1))
out = self.propagate(edge_index, x=new_source, size=size, edge_attr=edge_embedding)
out = self.lin_l(out)
out += self.lin_r(target)
out = F.normalize(out, p=2., dim=-1)
return out
def message(self, x_j, edge_attr):
return F.relu(x_j + edge_attr)
def update(self, aggr_out):
return aggr_out
class SimpleNet(torch.nn.Module):
def __init__(self, hidden, aggr, num_layers, regression=False):
super(SimpleNet, self).__init__()
self.num_layers = num_layers
self.regression = regression
# Embed initial node features.
self.var_node_encoder = Sequential(Linear(2, hidden), ReLU(), Linear(hidden, hidden))
self.con_node_encoder = Sequential(Linear(2, hidden), ReLU(), Linear(hidden, hidden))
# Compute variable assignement.
self.layers_ass = []
for i in range(self.num_layers):
self.layers_ass.append(Sequential(Linear(hidden, hidden), ReLU(), Linear(hidden, 1), Sigmoid()))
# Bipartite GNN architecture.
self.layers_con = []
self.layers_var = []
self.layers_err = []
for i in range(self.num_layers):
self.layers_con.append(ConVarBipartiteLayer(1, hidden, aggr=aggr))
self.layers_var.append(VarConBipartiteLayer(1, hidden, self.layers_ass[i], aggr=aggr))
self.layers_err.append(ErrorLayer(hidden, self.layers_ass[i]))
self.layers_con = torch.nn.ModuleList(self.layers_con)
self.layers_var = torch.nn.ModuleList(self.layers_var)
self.layers_err = torch.nn.ModuleList(self.layers_err)
# MLP used for classification.
self.lin1 = Linear((self.num_layers + 1) * hidden, hidden)
self.lin2 = Linear(hidden, hidden)
self.lin3 = Linear(hidden, hidden)
if not self.regression:
self.lin4 = Linear(hidden, 2)
else:
self.lin4 = Linear(hidden, 1)
def forward(self, data):
# Get data of batch.
var_node_features = data.var_node_features
con_node_features = data.con_node_features
edge_index_var = data.edge_index_var
edge_index_con = data.edge_index_con
edge_features_var = data.edge_features_var
edge_features_con = data.edge_features_con
num_nodes_var = data.num_nodes_var
num_nodes_con = data.num_nodes_con
rhs = data.rhs
index = data.index
obj = data.obj
# Compute initial node embeddings.
var_node_features_0 = self.var_node_encoder(var_node_features)
con_node_features_0 = self.con_node_encoder(con_node_features)
x_var = [var_node_features_0]
x_con = [con_node_features_0]
x_err = []
for i in range(self.num_layers):
x_err.append(self.layers_err[i](x_var[-1], edge_index_var, edge_features_var, rhs, index,
(var_node_features_0.size(0), con_node_features.size(0))))
x_con.append(F.relu(self.layers_var[i](x_var[-1], x_con[-1], edge_index_var, edge_features_var, rhs,
(var_node_features_0.size(0), con_node_features.size(0)))))
x_var.append(F.relu(self.layers_con[i](x_con[-1], x_var[-1], edge_index_con, edge_features_con, x_err[-1],
(con_node_features.size(0), var_node_features_0.size(0)))))
x = torch.cat(x_var[:], dim=-1)
x = F.relu(self.lin1(x))
x = F.relu(self.lin2(x))
x = F.relu(self.lin3(x))
x = self.lin4(x)
if not self.regression:
return F.log_softmax(x, dim=-1)
else:
return x.view(-1)
def __repr__(self):
return self.__class__.__name__
| 8,206 | 36.646789 | 118 | py |
mipGNN | mipGNN-master/gnn_models/Sage/mip_bipartite_simple_class.py | import sys
sys.path.insert(0, '..')
sys.path.insert(0, '../..')
sys.path.insert(0, '.')
import torch
import torch.nn.functional as F
from torch.nn import BatchNorm1d as BN
from torch.nn import Sequential, Linear, ReLU
from torch_geometric.nn import MessagePassing
from torch_sparse import matmul
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class SimpleBipartiteLayer(MessagePassing):
def __init__(self, edge_dim, dim, aggr):
super(SimpleBipartiteLayer, self).__init__(aggr=aggr, flow="source_to_target")
# Maps edge features to the same number of components as node features.
self.edge_encoder = Sequential(Linear(edge_dim, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
self.lin_l = Linear(dim, dim, bias=True)
self.lin_r = Linear(dim, dim, bias=False)
def forward(self, source, target, edge_index, edge_attr, size):
edge_emb = self.edge_encoder(edge_attr)
out = self.propagate(edge_index, x=source, size=size, edge_emb=edge_emb)
out = self.lin_l(out)
out += self.lin_r(target)
out = F.normalize(out, p=2., dim=-1)
return out
def message(self, x_j, edge_emb):
return F.relu(x_j + edge_emb)
def message_and_aggregate(self, adj_t, x):
adj_t = adj_t.set_value(None, layout=None)
return matmul(adj_t, x[0], reduce=self.aggr)
def __repr__(self):
return '{}(nn={})'.format(self.__class__.__name__, self.nn)
class SimpleNet(torch.nn.Module):
def __init__(self, hidden, aggr, num_layers, regression=False):
super(SimpleNet, self).__init__()
self.num_layers = num_layers
self.regression = regression
# Embed initial node features.
self.var_node_encoder = Sequential(Linear(2, hidden), ReLU(), Linear(hidden, hidden))
self.con_node_encoder = Sequential(Linear(2, hidden), ReLU(), Linear(hidden, hidden))
# Bipartite GNN architecture.
self.layers_con = []
self.layers_var = []
for i in range(self.num_layers):
self.layers_con.append(SimpleBipartiteLayer(1, hidden, aggr=aggr))
self.layers_var.append(SimpleBipartiteLayer(1, hidden, aggr=aggr))
self.layers_con = torch.nn.ModuleList(self.layers_con)
self.layers_var = torch.nn.ModuleList(self.layers_var)
# MLP used for classification.
self.lin1 = Linear((num_layers + 1) * hidden, hidden)
self.lin2 = Linear(hidden, hidden)
self.lin3 = Linear(hidden, hidden)
if not self.regression:
self.lin4 = Linear(hidden, 2)
else:
self.lin4 = Linear(hidden, 1)
def forward(self, data):
# Get data of batch.
var_node_features = data.var_node_features
con_node_features = data.con_node_features
edge_index_var = data.edge_index_var
edge_index_con = data.edge_index_con
edge_features_var = data.edge_features_var
edge_features_con = data.edge_features_con
num_nodes_var = data.num_nodes_var
num_nodes_con = data.num_nodes_con
# Compute initial node embeddings.
var_node_features_0 = self.var_node_encoder(var_node_features)
con_node_features_0 = self.con_node_encoder(con_node_features)
x_var = [var_node_features_0]
x_con = [con_node_features_0]
for i in range(self.num_layers):
x_con.append(F.relu(self.layers_var[i](x_var[-1], x_con[-1], edge_index_var, edge_features_var,
(var_node_features_0.size(0), con_node_features.size(0)))))
x_var.append(F.relu(self.layers_con[i](x_con[-1], x_var[-1], edge_index_con, edge_features_con,
(con_node_features.size(0), var_node_features_0.size(0)))))
x = torch.cat(x_var[:], dim=-1)
x = F.relu(self.lin1(x))
x = F.relu(self.lin2(x))
x = F.relu(self.lin3(x))
x = self.lin4(x)
if not self.regression:
return F.log_softmax(x, dim=-1)
else:
return x.view(-1)
def __repr__(self):
return self.__class__.__name__
| 4,248 | 34.408333 | 110 | py |
mipGNN | mipGNN-master/gnn_models/GIN/mip_bipartite_class.py | import sys
sys.path.insert(0, '..')
sys.path.insert(0, '../..')
sys.path.insert(0, '.')
import torch_geometric.utils.softmax
import torch
import torch.nn.functional as F
from torch.nn import BatchNorm1d as BN
from torch.nn import Sequential, Linear, ReLU, Sigmoid
from torch_geometric.nn import MessagePassing
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Update constraint embeddings based on variable embeddings.
class VarConBipartiteLayer(MessagePassing):
def __init__(self, edge_dim, dim, var_assigment, aggr):
super(VarConBipartiteLayer, self).__init__(aggr=aggr, flow="source_to_target")
# Maps edge features to the same number of components as node features.
self.edge_encoder = Sequential(Linear(edge_dim, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
# Learn joint representation of contraint embedding and error.
self.joint_var = Sequential(Linear(dim + 1, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
# Maps variable embeddings to scalar variable assigment.
self.var_assigment = var_assigment
self.mlp = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim), ReLU(), BN(dim))
self.eps = torch.nn.Parameter(torch.Tensor([0]))
self.initial_eps = 0
def forward(self, source, target, edge_index, edge_attr, rhs, size):
# Compute scalar variable assignment.
var_assignment = self.var_assigment(source)
source = self.joint_var(torch.cat([source, var_assignment], dim=-1))
# Map edge features to embeddings with the same number of components as node embeddings.
edge_embedding = self.edge_encoder(edge_attr)
tmp = self.propagate(edge_index, x=source, edge_attr=edge_embedding, size=size)
out = self.mlp((1 + self.eps) * target + tmp)
return out
def message(self, x_j, edge_attr):
return F.relu(x_j + edge_attr)
def __repr__(self):
return '{}(nn={})'.format(self.__class__.__name__, self.nn)
# Compute error signal.
class ErrorLayer(MessagePassing):
def __init__(self, dim, var_assignment):
super(ErrorLayer, self).__init__(aggr="add", flow="source_to_target")
self.var_assignment = var_assignment
self.error_encoder = Sequential(Linear(1, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
# Learn joint representation of contraint embedding and error.
self.joint_var = Sequential(Linear(dim + dim, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
def forward(self, source, edge_index, edge_attr, rhs, index, size):
# Compute scalar variable assignment.
new_source = self.var_assignment(source)
tmp = self.propagate(edge_index, x=new_source, edge_attr=edge_attr, size=size)
# Compute residual, i.e., Ax-b.
out = tmp - rhs
out = self.error_encoder(out)
out = torch_geometric.utils.softmax(out, index)
return out
def message(self, x_j, edge_attr):
msg = x_j * edge_attr
return msg
def update(self, aggr_out):
return aggr_out
# Update variable embeddings based on constraint embeddings.
class ConVarBipartiteLayer(MessagePassing):
def __init__(self, edge_dim, dim, aggr):
super(ConVarBipartiteLayer, self).__init__(aggr=aggr, flow="source_to_target")
# Maps edge features to the same number of components as node features.
self.edge_encoder = Sequential(Linear(edge_dim, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
# Learn joint representation of contraint embedding and error.
self.joint_var = Sequential(Linear(dim + dim, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
self.mlp = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim), ReLU(), BN(dim))
self.eps = torch.nn.Parameter(torch.Tensor([0]))
self.initial_eps = 0
def forward(self, source, target, edge_index, edge_attr, error_con, size):
# Map edge features to embeddings with the same number of components as node embeddings.
edge_embedding = self.edge_encoder(edge_attr)
source = self.joint_var(torch.cat([source, error_con], dim=-1))
tmp = self.propagate(edge_index, x=source, edge_attr=edge_embedding, size=size)
out = self.mlp((1 + self.eps) * target + tmp)
return out
def message(self, x_j, edge_attr):
return F.relu(x_j + edge_attr)
def __repr__(self):
return '{}(nn={})'.format(self.__class__.__name__, self.nn)
class SimpleNet(torch.nn.Module):
def __init__(self, hidden, aggr, num_layers, regression=False):
super(SimpleNet, self).__init__()
self.num_layers = num_layers
self.regression = regression
# Embed initial node features.
self.var_node_encoder = Sequential(Linear(2, hidden), ReLU(), Linear(hidden, hidden))
self.con_node_encoder = Sequential(Linear(2, hidden), ReLU(), Linear(hidden, hidden))
# Compute variable assignement.
self.layers_ass = []
for i in range(self.num_layers):
self.layers_ass.append(Sequential(Linear(hidden, hidden), ReLU(), Linear(hidden, 1), Sigmoid()))
# Bipartite GNN architecture.
self.layers_con = []
self.layers_var = []
self.layers_err = []
for i in range(self.num_layers):
self.layers_con.append(ConVarBipartiteLayer(1, hidden, aggr=aggr))
self.layers_var.append(VarConBipartiteLayer(1, hidden, self.layers_ass[i], aggr=aggr))
self.layers_err.append(ErrorLayer(hidden, self.layers_ass[i]))
self.layers_con = torch.nn.ModuleList(self.layers_con)
self.layers_var = torch.nn.ModuleList(self.layers_var)
self.layers_err = torch.nn.ModuleList(self.layers_err)
# MLP used for classification.
self.lin1 = Linear((self.num_layers + 1) * hidden, hidden)
self.lin2 = Linear(hidden, hidden)
self.lin3 = Linear(hidden, hidden)
if not self.regression:
self.lin4 = Linear(hidden, 2)
else:
self.lin4 = Linear(hidden, 1)
def forward(self, data):
# Get data of batch.
var_node_features = data.var_node_features
con_node_features = data.con_node_features
edge_index_var = data.edge_index_var
edge_index_con = data.edge_index_con
edge_features_var = data.edge_features_var
edge_features_con = data.edge_features_con
num_nodes_var = data.num_nodes_var
num_nodes_con = data.num_nodes_con
rhs = data.rhs
index = data.index
obj = data.obj
# Compute initial node embeddings.
var_node_features_0 = self.var_node_encoder(var_node_features)
con_node_features_0 = self.con_node_encoder(con_node_features)
x_var = [var_node_features_0]
x_con = [con_node_features_0]
x_err = []
for i in range(self.num_layers):
x_err.append(self.layers_err[i](x_var[-1], edge_index_var, edge_features_var, rhs, index,
(var_node_features_0.size(0), con_node_features.size(0))))
x_con.append(F.relu(self.layers_var[i](x_var[-1], x_con[-1], edge_index_var, edge_features_var, rhs,
(var_node_features_0.size(0), con_node_features.size(0)))))
x_var.append(F.relu(self.layers_con[i](x_con[-1], x_var[-1], edge_index_con, edge_features_con, x_err[-1],
(con_node_features.size(0), var_node_features_0.size(0)))))
x = torch.cat(x_var[:], dim=-1)
x = F.relu(self.lin1(x))
x = F.relu(self.lin2(x))
x = F.relu(self.lin3(x))
x = self.lin4(x)
if not self.regression:
return F.log_softmax(x, dim=-1)
else:
return x.view(-1)
def __repr__(self):
return self.__class__.__name__
| 8,207 | 36.309091 | 118 | py |
mipGNN | mipGNN-master/gnn_models/GIN/mip_bipartite_simple_class.py | import sys
sys.path.insert(0, '..')
sys.path.insert(0, '../..')
sys.path.insert(0, '.')
import torch
import torch.nn.functional as F
from torch.nn import BatchNorm1d as BN
from torch.nn import Sequential, Linear, ReLU
from torch_geometric.nn import MessagePassing
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class SimpleBipartiteLayer(MessagePassing):
def __init__(self, edge_dim, dim, aggr):
super(SimpleBipartiteLayer, self).__init__(aggr=aggr, flow="source_to_target")
self.edge_encoder = Sequential(Linear(edge_dim, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
self.mlp = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim), ReLU(), BN(dim))
self.eps = torch.nn.Parameter(torch.Tensor([0]))
self.initial_eps = 0
def forward(self, source, target, edge_index, edge_attr, size):
# Map edge features to embeddings with the same number of components as node embeddings.
edge_embedding = self.edge_encoder(edge_attr)
tmp = self.propagate(edge_index, x=source, edge_attr=edge_embedding, size=size)
out = self.mlp((1 + self.eps) * target + tmp)
return out
def message(self, x_j, edge_attr):
return F.relu(x_j + edge_attr)
def update(self, aggr_out):
return aggr_out
class SimpleNet(torch.nn.Module):
def __init__(self, hidden, aggr, num_layers, regression=False):
super(SimpleNet, self).__init__()
self.num_layers = num_layers
self.regression = regression
# Embed initial node features.
self.var_node_encoder = Sequential(Linear(2, hidden), ReLU(), Linear(hidden, hidden))
self.con_node_encoder = Sequential(Linear(2, hidden), ReLU(), Linear(hidden, hidden))
# Bipartite GNN architecture.
self.layers_con = []
self.layers_var = []
for i in range(self.num_layers):
self.layers_con.append(SimpleBipartiteLayer(1, hidden, aggr=aggr))
self.layers_var.append(SimpleBipartiteLayer(1, hidden, aggr=aggr))
self.layers_con = torch.nn.ModuleList(self.layers_con)
self.layers_var = torch.nn.ModuleList(self.layers_var)
# MLP used for classification.
self.lin1 = Linear((num_layers + 1) * hidden, hidden)
self.lin2 = Linear(hidden, hidden)
self.lin3 = Linear(hidden, hidden)
if not self.regression:
self.lin4 = Linear(hidden, 2)
else:
self.lin4 = Linear(hidden, 1)
def forward(self, data):
# Get data of batch.
var_node_features = data.var_node_features
con_node_features = data.con_node_features
edge_index_var = data.edge_index_var
edge_index_con = data.edge_index_con
edge_features_var = data.edge_features_var
edge_features_con = data.edge_features_con
num_nodes_var = data.num_nodes_var
num_nodes_con = data.num_nodes_con
# Compute initial node embeddings.
var_node_features_0 = self.var_node_encoder(var_node_features)
con_node_features_0 = self.con_node_encoder(con_node_features)
x_var = [var_node_features_0]
x_con = [con_node_features_0]
for i in range(self.num_layers):
x_con.append(F.relu(self.layers_var[i](x_var[-1], x_con[-1], edge_index_var, edge_features_var,
(var_node_features_0.size(0), con_node_features.size(0)))))
x_var.append(F.relu(self.layers_con[i](x_con[-1], x_var[-1], edge_index_con, edge_features_con,
(con_node_features.size(0), var_node_features_0.size(0)))))
x = torch.cat(x_var[:], dim=-1)
x = F.relu(self.lin1(x))
x = F.relu(self.lin2(x))
x = F.relu(self.lin3(x))
x = self.lin4(x)
if not self.regression:
return F.log_softmax(x, dim=-1)
else:
return x.view(-1)
def __repr__(self):
return self.__class__.__name__
| 4,082 | 35.132743 | 110 | py |
mipGNN | mipGNN-master/gnn_models/EdgeConv/mip_bipartite_class.py | import sys
sys.path.insert(0, '..')
sys.path.insert(0, '../..')
sys.path.insert(0, '.')
import torch_geometric.utils.softmax
import torch
import torch.nn.functional as F
from torch.nn import BatchNorm1d as BN
from torch.nn import Sequential, Linear, ReLU, Sigmoid
from torch_geometric.nn import MessagePassing
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Variables to constrains.
class VarConBipartiteLayer(MessagePassing):
def __init__(self, edge_dim, dim, var_assigment, aggr):
super(VarConBipartiteLayer, self).__init__(aggr=aggr, flow="source_to_target")
# Combine node and edge features of adjacent nodes.
self.nn = Sequential(Linear(3 * dim + 1, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
# Maps edge features to the same number of components as node features.
self.edge_encoder = Sequential(Linear(edge_dim, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
# Maps variable embeddings to scalar variable assigment.
self.var_assigment = var_assigment
def forward(self, source, target, edge_index, edge_attr, rhs, size):
# Compute scalar variable assignment.
var_assignment = self.var_assigment(source)
# Map edge features to embeddings with the same number of components as node embeddings.
edge_embedding = self.edge_encoder(edge_attr)
out = self.propagate(edge_index, x=source, t=target, v=var_assignment, edge_attr=edge_embedding, size=size)
return out
def message(self, x_j, t_i, v_j, edge_attr):
return self.nn(torch.cat([t_i, x_j, v_j, edge_attr], dim=-1))
def __repr__(self):
return '{}(nn={})'.format(self.__class__.__name__, self.nn)
# Compute error signal.
class ErrorLayer(MessagePassing):
def __init__(self, dim, var_assignment):
super(ErrorLayer, self).__init__(aggr="add", flow="source_to_target")
self.var_assignment = var_assignment
self.error_encoder = Sequential(Linear(1, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
def forward(self, source, edge_index, edge_attr, rhs, index, size):
# Compute scalar variable assignment.
new_source = self.var_assignment(source)
tmp = self.propagate(edge_index, x=new_source, edge_attr=edge_attr, size=size)
# Compute residual, i.e., Ax-b.
out = tmp - rhs
out = self.error_encoder(out)
out = torch_geometric.utils.softmax(out, index)
return out
def message(self, x_j, edge_attr):
msg = x_j * edge_attr
return msg
def update(self, aggr_out):
return aggr_out
class ConVarBipartiteLayer(MessagePassing):
def __init__(self, edge_dim, dim, aggr):
super(ConVarBipartiteLayer, self).__init__(aggr=aggr, flow="source_to_target")
# Combine node and edge features of adjacent nodes.
self.nn = Sequential(Linear(4 * dim, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
# Maps edge features to the same number of components as node features.
self.edge_encoder = Sequential(Linear(edge_dim, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
def forward(self, source, target, edge_index, edge_attr, error_con, size):
# Map edge features to embeddings with the same number of components as node embeddings.
edge_embedding = self.edge_encoder(edge_attr)
out = self.propagate(edge_index, x=source, t=target, e=error_con, edge_attr=edge_embedding, size=size)
return out
def message(self, x_j, t_i, e_j, edge_attr):
return self.nn(torch.cat([t_i, x_j, e_j, edge_attr], dim=-1))
def __repr__(self):
return '{}(nn={})'.format(self.__class__.__name__, self.nn)
class SimpleNet(torch.nn.Module):
def __init__(self, hidden, aggr, num_layers, regression=False):
super(SimpleNet, self).__init__()
self.num_layers = num_layers
self.regression = regression
# Embed initial node features.
self.var_node_encoder = Sequential(Linear(2, hidden), ReLU(), Linear(hidden, hidden))
self.con_node_encoder = Sequential(Linear(2, hidden), ReLU(), Linear(hidden, hidden))
# Compute variable assignement.
self.layers_ass = []
for i in range(self.num_layers):
self.layers_ass.append(Sequential(Linear(hidden, hidden), ReLU(), Linear(hidden, 1), Sigmoid()))
# Bipartite GNN architecture.
self.layers_con = []
self.layers_var = []
self.layers_err = []
for i in range(self.num_layers):
self.layers_con.append(ConVarBipartiteLayer(1, hidden, aggr=aggr))
self.layers_var.append(VarConBipartiteLayer(1, hidden, self.layers_ass[i], aggr=aggr))
self.layers_err.append(ErrorLayer(hidden, self.layers_ass[i]))
self.layers_con = torch.nn.ModuleList(self.layers_con)
self.layers_var = torch.nn.ModuleList(self.layers_var)
self.layers_err = torch.nn.ModuleList(self.layers_err)
# MLP used for classification.
self.lin1 = Linear((self.num_layers + 1) * hidden, hidden)
self.lin2 = Linear(hidden, hidden)
self.lin3 = Linear(hidden, hidden)
if not self.regression:
self.lin4 = Linear(hidden, 2)
else:
self.lin4 = Linear(hidden, 1)
def forward(self, data):
# Get data of batch.
var_node_features = data.var_node_features
con_node_features = data.con_node_features
edge_index_var = data.edge_index_var
edge_index_con = data.edge_index_con
edge_features_var = data.edge_features_var
edge_features_con = data.edge_features_con
num_nodes_var = data.num_nodes_var
num_nodes_con = data.num_nodes_con
rhs = data.rhs
index = data.index
obj = data.obj
var_node_features_0 = self.var_node_encoder(var_node_features)
con_node_features_0 = self.con_node_encoder(con_node_features)
x_var = [var_node_features_0]
x_con = [con_node_features_0]
x_err = []
num_var = var_node_features_0.size(0)
num_con = con_node_features_0.size(0)
for i in range(self.num_layers):
x_err.append(self.layers_err[i](x_var[-1], edge_index_var, edge_features_var, rhs, index,
(num_var, num_con)))
x_con.append(F.relu(self.layers_var[i](x_var[-1], x_con[-1], edge_index_var, edge_features_var, rhs,
(num_var, num_con))))
x_var.append(F.relu(self.layers_con[i](x_con[-1], x_var[-1], edge_index_con, edge_features_con, x_err[-1],
(num_con, num_var))))
x = torch.cat(x_var[:], dim=-1)
x = F.relu(self.lin1(x))
x = F.relu(self.lin2(x))
x = F.relu(self.lin3(x))
x = self.lin4(x)
if not self.regression:
return F.log_softmax(x, dim=-1)
else:
return x.view(-1)
def __repr__(self):
return self.__class__.__name__
| 7,303 | 35.52 | 118 | py |
mipGNN | mipGNN-master/gnn_models/EdgeConv/mip_bipartite_simple_class.py | import sys
sys.path.insert(0, '..')
sys.path.insert(0, '../..')
sys.path.insert(0, '.')
import torch
import torch.nn.functional as F
from torch.nn import BatchNorm1d as BN
from torch.nn import Sequential, Linear, ReLU
from torch_geometric.nn import MessagePassing
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class SimpleBipartiteLayer(MessagePassing):
def __init__(self, edge_dim, dim, aggr):
super(SimpleBipartiteLayer, self).__init__(aggr=aggr, flow="source_to_target")
self.nn = Sequential(Linear(3 * dim, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
# Maps edge features to the same number of components as node features.
self.edge_encoder = Sequential(Linear(edge_dim, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
def forward(self, source, target, edge_index, edge_attr, size):
# Map edge features to embeddings with the same number of components as node embeddings.
edge_embedding = self.edge_encoder(edge_attr)
out = self.propagate(edge_index, x=source, t=target, edge_attr=edge_embedding, size=size)
return out
def message(self, x_j, t_i, edge_attr):
return self.nn(torch.cat([t_i, x_j, edge_attr], dim=-1))
def __repr__(self):
return '{}(nn={})'.format(self.__class__.__name__, self.nn)
class SimpleNet(torch.nn.Module):
def __init__(self, hidden, aggr, num_layers, regression=False):
super(SimpleNet, self).__init__()
self.num_layers = num_layers
self.regression = regression
# Embed initial node features.
self.var_node_encoder = Sequential(Linear(2, hidden), ReLU(), Linear(hidden, hidden))
self.con_node_encoder = Sequential(Linear(2, hidden), ReLU(), Linear(hidden, hidden))
# Bipartite GNN architecture.
self.layers_con = []
self.layers_var = []
for i in range(self.num_layers):
self.layers_con.append(SimpleBipartiteLayer(1, hidden, aggr=aggr))
self.layers_var.append(SimpleBipartiteLayer(1, hidden, aggr=aggr))
self.layers_con = torch.nn.ModuleList(self.layers_con)
self.layers_var = torch.nn.ModuleList(self.layers_var)
# MLP used for classification.
self.lin1 = Linear((num_layers + 1) * hidden, hidden)
self.lin2 = Linear(hidden, hidden)
self.lin3 = Linear(hidden, hidden)
if not self.regression:
self.lin4 = Linear(hidden, 2)
else:
self.lin4 = Linear(hidden, 1)
def forward(self, data):
# Get data of batch.
var_node_features = data.var_node_features
con_node_features = data.con_node_features
edge_index_var = data.edge_index_var
edge_index_con = data.edge_index_con
edge_features_var = data.edge_features_var
edge_features_con = data.edge_features_con
num_nodes_var = data.num_nodes_var
num_nodes_con = data.num_nodes_con
# Compute initial node embeddings.
var_node_features_0 = self.var_node_encoder(var_node_features)
con_node_features_0 = self.con_node_encoder(con_node_features)
x_var = [var_node_features_0]
x_con = [con_node_features_0]
num_var = var_node_features_0.size(0)
num_con = con_node_features_0.size(0)
for i in range(self.num_layers):
x_con.append(F.relu(self.layers_var[i](x_var[-1], x_con[-1], edge_index_var, edge_features_var,
(num_var, num_con))))
x_var.append(F.relu(self.layers_con[i](x_con[-1], x_var[-1], edge_index_con, edge_features_con,
(num_con, num_var))))
x = torch.cat(x_var[:], dim=-1)
x = F.relu(self.lin1(x))
x = F.relu(self.lin2(x))
x = F.relu(self.lin3(x))
x = self.lin4(x)
if not self.regression:
return F.log_softmax(x, dim=-1)
else:
return x.view(-1)
def __repr__(self):
return self.__class__.__name__
| 4,148 | 35.078261 | 107 | py |
mipGNN | mipGNN-master/gisp_generator/read_data.py | import networkx as nx
import torch_geometric
# pickle file containing the bipartite graph corresponding to a single GISP instance
# the last integer in the filename refers to the random seed that generated this instance
data_path = "DATA/test/C125.9.clq_SET2_0.75_100_0.pk"
# vcg is the Variable-Constraint bipartite graph
vcg = nx.read_gpickle(data_path)
# Each node of vcg has a bipartite attribute = 0 for "variable nodes", 1 for "constraint nodes"
# Each node/edge of vcg is identified by its name (a string)
# Variable nodes are named by the corresponding variable's name in the actual MIP instance; they have the following attributes, in addition to bipartite=0:
# 'bias', the label we want predict, continuous in [0,1]
# 'objcoeff', the objective function coefficient in the MIP, assuming minimization
# Constraint nodes are named cx, where x is an integer denoting the index of the constraint in the actual MIP instance; they have the following attributes, in addition to bipartite=1:
# 'rhs', the right-hand side value of the constraint, assuming <= constraints
# Edges of vcg have the following attributes:
# 'coeff', the coefficient of the variable in the constraint matrix of Ax <= b
# Example: accessing the biases of the variable nodes
print([node_data['bias'] for node, node_data in vcg.nodes(data=True) if node_data['bipartite']==0])
exit()
# Converting the networkx graph into a torch geometric data object; untested
# https://pytorch-geometric.readthedocs.io/en/latest/modules/utils.html#torch_geometric.utils.from_networkx
vcg_torch = torch_geometric.data.from_networkx(vcg) | 1,607 | 52.6 | 184 | py |
mipGNN | mipGNN-master/model_execution/inference.py | import sys
sys.path.insert(0, '..')
sys.path.insert(0, '../..')
sys.path.insert(0, '.')
import os
import os.path as osp
import numpy as np
import networkx as nx
import argparse
import io
import heapq
from pathlib import Path
import time
import math
import torch
from torch_geometric.data import (InMemoryDataset, Data)
from torch_geometric.data import DataLoader
from gnn_models.EdgeConv.mip_bipartite_simple_class import SimpleNet
import cplex
import callbacks_cplex
import utils
import predict
import pickle
sys.path.extend(["/home/khalile2/projects/def-khalile2/software/DiscreteNet"])
from discretenet.problems.gisp import GISPProblem
from discretenet.problems.fcmnf import FCMNFProblem
sys.path.extend(["../datagen"])
import bias_search
def rename_variables(var_names):
for i in range(len(var_names)):
name = var_names[i]
name = name.replace('(','[')
name = name.replace(')',']')
name = name.replace('_',',')
var_names[i] = name
return var_names
# direction=1: branch on most integer first
def set_cplex_priorities(instance_cpx, prediction, direction=1):
# score variables based on bias prediction
scores = np.max(((1-prediction), prediction), axis=0)
priorities = np.argsort(direction * scores)
# set priorities
# reference: https://www.ibm.com/support/knowledgecenter/SSSA5P_12.7.1/ilog.odms.cplex.help/refpythoncplex/html/cplex._internal._subinterfaces.OrderInterface-class.html
order_tuples = []
var_names = instance_cpx.variables.get_names()
cur_priority = 0
for priority, var_cpxid in enumerate(priorities):
var_name = var_names[var_cpxid]
# print(scores[var_cpxid], scores[priorities[priority-1]])
# if priority > 0 and scores[var_cpxid] > scores[priorities[priority-1]] + 1e-3:
cur_priority += 1
# print(cur_priority)
order_tuples += [(var_name, cur_priority, instance_cpx.order.branch_direction.up)]
# print(cur_priority)
# z=1/0
instance_cpx.order.set(order_tuples)
def mipeval(
method,
instance,
graph='',
instance_params='',
model='',
logfile='sys.stdout',
barebones=0,
cpx_emphasis=1,
cpx_threads=1,
cpx_tmp='./cpx_tmp',
timelimit=60,
memlimit=1024,
freq_best=100,
lb_threshold=5,
num_mipstarts=10,
mipstart_strategy='repair',
branching_direction=1,
zero_damping=1.0
):
print(locals())
assert (len(method) >= 1)
assert (cpx_emphasis >= 0 and cpx_emphasis <= 4)
assert (timelimit > 0)
""" Create CPLEX instance """
instance_cpx = cplex.Cplex(instance)
sense_str = instance_cpx.objective.sense[instance_cpx.objective.get_sense()]
num_variables = instance_cpx.variables.get_num()
num_constraints = instance_cpx.linear_constraints.get_num()
start_time = instance_cpx.get_time()
""" CPLEX output management """
logstring = sys.stdout
summary_string = sys.stdout
if logfile != 'sys.stdout':
logstring = io.StringIO()
summary_string = io.StringIO()
instance_cpx.set_log_stream(logstring)
instance_cpx.set_results_stream(logstring)
instance_cpx.set_warning_stream(logstring)
# instance_cpx.set_error_stream(logstring)
instance_cpx.set_error_stream(open(os.devnull, 'w'))
""" Set CPLEX parameters, if any """
instance_cpx.parameters.timelimit.set(timelimit)
instance_cpx.parameters.emphasis.mip.set(cpx_emphasis)
instance_cpx.parameters.mip.display.set(3)
instance_cpx.parameters.threads.set(cpx_threads)
instance_cpx.parameters.workmem.set(memlimit)
instance_cpx.parameters.mip.limits.treememory.set(20000)
instance_cpx.parameters.mip.strategy.file.set(2)
instance_cpx.parameters.workdir.set(cpx_tmp)
if barebones:
instance_cpx.parameters.mip.limits.cutpasses.set(-1)
instance_cpx.parameters.mip.strategy.heuristicfreq.set(-1)
instance_cpx.parameters.preprocessing.presolve.set(0)
# DFS = 0, BEST-BOUND = 1 (default), BEST-EST = 2, BEST-EST-ALT = 3
# instance_cpx.parameters.mip.strategy.nodeselect.set(3)
time_rem_cplex = timelimit
time_vcg = time.time()
time_vcg_reading = 0
time_pred = 0
is_primal_mipstart = False
""" Solve CPLEX instance with user-selected method """
if 'default' not in method[0]:
assert (len(graph) > 0 or len(instance_params) > 0) and len(model) > 0
""" Read in the pickled graph and the trained model """
time_vcg_reading = time.time()
if len(graph) > 0:
print("Reading VCG...")
graph = nx.read_gpickle(graph)
print("\t took %g secs." % (time.time()-time_vcg))
elif len(instance_params) > 0:
parameters_path = instance_params
with open(parameters_path, "rb") as fd:
params = pickle.load(fd)
if "gisp" in instance:
loaded_problem = GISPProblem(**params)
elif "fcmnf" in instance:
loaded_problem = FCMNFProblem(**params)
graph = loaded_problem.get_variable_constraint_graph()
bias_search.labelVCG(graph, np.zeros(num_variables), instance_cpx)
print("\t took %g secs." % (time.time()-time_vcg))
time_vcg_reading = time.time() - time_vcg_reading
print("Predicting...")
timestamp_pred = time.time()
prediction, node_to_varnode = predict.get_prediction(model_name=model, graph=graph)
dict_varname_seqid = predict.get_variable_cpxid(graph, node_to_varnode, prediction)
print("\t took %g secs." % (time.time()-timestamp_pred))
time_pred = time.time() - timestamp_pred
# print(prediction)
# todo check dimensions of p
time_rem_cplex = timelimit - time_pred
print("time_rem_cplex = %g" % time_rem_cplex)
instance_cpx.parameters.timelimit.set(time_rem_cplex)
var_names = rename_variables(instance_cpx.variables.get_names())
prediction_reord = [dict_varname_seqid[var_name][1] for var_name in var_names]
prediction = np.array(prediction_reord)
if len(method) == 1 and ('local_branching' in method[0]):
pred_one_coeff = (prediction >= 0.9) * (-1)
pred_zero_coeff = (prediction <= 0.1)
num_ones = -np.sum(pred_one_coeff)
coeffs = pred_one_coeff + pred_zero_coeff
local_branching_coeffs = [list(range(len(prediction))), coeffs.tolist()]
if method[0] == 'local_branching_approx':
instance_cpx.linear_constraints.add(
lin_expr=[local_branching_coeffs],
senses=['L'],
rhs=[float(lb_threshold - num_ones)],
names=['local_branching'])
elif method[0] == 'local_branching_exact':
branch_cb = instance_cpx.register_callback(callbacks_cplex.branch_local_exact)
branch_cb.coeffs = local_branching_coeffs
branch_cb.threshold = lb_threshold - num_ones
branch_cb.is_root = True
if 'branching_priorities' in method:
set_cplex_priorities(instance_cpx, prediction, branching_direction)
if 'node_selection' in method:
# score variables based on bias prediction
scores = np.max(((1-prediction), prediction), axis=0)
rounding = np.round(prediction)
print(np.mean(scores), np.mean(rounding))
print(np.argsort(prediction), np.sort(prediction)[:10], np.sort(prediction)[-10:])
branch_cb = instance_cpx.register_callback(callbacks_cplex.branch_attach_data2)
node_cb = instance_cpx.register_callback(callbacks_cplex.node_selection3)
branch_cb.scoring_function = 'sum' #'estimate'
branch_cb.scores = scores
branch_cb.rounding = rounding
branch_cb.zero_damping = zero_damping
node_cb.last_best = 0
node_cb.freq_best = freq_best
node_priority = []
branch_cb.node_priority = node_priority
node_cb.node_priority = node_priority
branch_cb.time = 0
node_cb.time = 0
if ('primal_mipstart' in method) or ('primal_mipstart_only' in method):
is_primal_mipstart = True
if not barebones or 'primal_mipstart_only' in method:
instance_cpx.parameters.mip.limits.cutpasses.set(-1)
instance_cpx.parameters.mip.strategy.heuristicfreq.set(-1)
instance_cpx.parameters.preprocessing.presolve.set(0)
mipstart_string = sys.stdout if logfile == "sys.stdout" else io.StringIO()
#frac_variables = [0.001*(1.5**i) for i in range(18)] #[0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]
#frac_variables = [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]
#frac_variables = np.flip(np.linspace(0, 1, num=num_mipstarts+1))[:-1]
#print(frac_variables)
#threshold_set = np.minimum(prediction, 1-prediction)
#threshold_set = np.sort(threshold_set)#[:mipstart_numthresholds]
#threshold_set = [threshold_set[max([0, int(math.ceil(frac_variables[i]*num_variables)) - 1])] for i in range(len(frac_variables))]
threshold_set = [0.01*(2**i) for i in range(6)]
threshold_set.reverse()
threshold_set = np.clip(threshold_set, a_min=0, a_max=0.5)
print("threshold_set = ", threshold_set)
if mipstart_strategy == 'repair':
mipstart_strategy_int = instance_cpx.MIP_starts.effort_level.repair
elif mipstart_strategy == 'solve_MIP':
mipstart_strategy_int = instance_cpx.MIP_starts.effort_level.solve_MIP
else:
print("invalid mipstart_strategy %s" % mipstart_strategy)
exit()
best_objval_mipstart = -math.inf if sense_str == 'maximize' else math.inf
for idx, threshold in enumerate(threshold_set):
time_rem_cplex = timelimit - time_pred #(time.time() - time_vcg)
if time_rem_cplex <= 0:
break
indices_integer = np.where((prediction >= 1-threshold) | (prediction <= threshold))[0]
print(idx, threshold, len(indices_integer), len(prediction))
if len(indices_integer) == 0:
continue
instance_cpx.parameters.mip.display.set(0)
instance_cpx.parameters.mip.limits.nodes.set(0)
print("time_rem_cplex = %g" % time_rem_cplex)
instance_cpx.parameters.timelimit.set(time_rem_cplex)
instance_cpx.MIP_starts.add(
cplex.SparsePair(
ind=indices_integer.tolist(),
val=np.round(prediction[indices_integer]).tolist()),
mipstart_strategy_int)
instance_cpx.solve()
instance_cpx.MIP_starts.delete()
if instance_cpx.solution.is_primal_feasible(): #and instance_cpx.solution.get_objective_value() > best_objval_mipstart:
is_sol_better = (instance_cpx.solution.get_objective_value() > best_objval_mipstart) if sense_str == 'maximize' else (instance_cpx.solution.get_objective_value() < best_objval_mipstart)
if not is_sol_better:
continue
best_objval_mipstart = instance_cpx.solution.get_objective_value()
best_time = time.time() - time_vcg
incb_str_cur = ("Found incumbent of value %g after %g sec. mipstart %d %g %g\n" % (best_objval_mipstart, best_time, len(indices_integer), threshold, len(indices_integer)/num_variables))
print(incb_str_cur)
mipstart_string.write(incb_str_cur)#"Found incumbent of value %g after %g sec. mipstart %d %g %g\n" % (best_objval_mipstart, best_time, len(indices_integer), threshold))
instance_cpx.parameters.mip.display.set(3)
if not barebones and not 'primal_mipstart_only' in method:
instance_cpx.parameters.mip.limits.cutpasses.set(0)
instance_cpx.parameters.mip.strategy.heuristicfreq.set(0)
instance_cpx.parameters.preprocessing.presolve.set(1)
if 'primal_mipstart_only' not in method:
instance_cpx.parameters.mip.limits.nodes.set(1e9)
elif method[0] == 'default_emptycb':
branch_cb = instance_cpx.register_callback(callbacks_cplex.branch_empty)
time_rem_cplex = timelimit - time_pred #(time.time() - time_vcg)
print("time_rem_cplex = %g" % time_rem_cplex)
if time_rem_cplex > 0:
instance_cpx.parameters.timelimit.set(time_rem_cplex)
# todo: consider runseeds
# https://www.ibm.com/support/knowledgecenter/SSSA5P_12.9.0/ilog.odms.cplex.help/refpythoncplex/html/cplex.Cplex-class.html?view=kc#runseeds
instance_cpx.solve()
end_time = instance_cpx.get_time()
""" Get solving performance statistics """
incumbent_str = ''
cplex_status = instance_cpx.solution.get_status_string()
best_bound = instance_cpx.solution.MIP.get_best_objective()
num_nodes = instance_cpx.solution.progress.get_num_nodes_processed()
total_time = end_time - start_time
instance_name = os.path.splitext(os.path.basename(instance))[0]
best_objval, gap = math.inf, math.inf
if instance_cpx.solution.is_primal_feasible():
best_objval = instance_cpx.solution.get_objective_value()
gap = instance_cpx.solution.MIP.get_mip_relative_gap()
summary_string.write('solving stats,%s,%g,%g,%g,%g,%i,%g,%s,%i,%i,%g,%g\n' % (
cplex_status,
best_objval,
best_bound,
gap,
total_time,
num_nodes,
timelimit - time_rem_cplex,
instance_name,
num_variables,
num_constraints,
time_vcg_reading,
time_pred))
if logfile != 'sys.stdout':
if instance_cpx.solution.is_primal_feasible():
incumbent_str = ''
if is_primal_mipstart:
incumbent_str += utils.parse_cplex_log(mipstart_string.getvalue(), time_offset=time_pred)
incumbent_str += utils.parse_cplex_log(logstring.getvalue(), time_offset=time_pred)
print(incumbent_str)
summary_string.write(incumbent_str)
summary_string = summary_string.getvalue()
with open(logfile, 'w') as logfile:
logfile.write(summary_string)
if __name__ == '__main__':
""" Parse arguments """
parser = argparse.ArgumentParser()
# parser.add_argument("-method", type=str, default='default')
parser.add_argument('-method', nargs='+', type=str, required=True)
parser.add_argument("-instance", type=str)
parser.add_argument("-graph", type=str, default='')
parser.add_argument("-instance_params", type=str, default='')
parser.add_argument("-model", type=str, default="../gnn_models/EdgeConv/trained_p_hat300-2")
parser.add_argument("-cpx_emphasis", type=int, default=1)
parser.add_argument("-cpx_threads", type=int, default=1)
parser.add_argument("-cpx_tmp", type=str, default='./cpx_tmp/')
parser.add_argument("-barebones", type=int, default=0)
parser.add_argument("-timelimit", type=float, default=60)
parser.add_argument("-memlimit", type=float, default=1024)
parser.add_argument("-logfile", type=str, default='sys.stdout')
# Parameters for node selection
parser.add_argument("-freq_best", type=int, default=100)
parser.add_argument("-zero_damping", type=float, default=1.0)
# Parameters for exact local branching
parser.add_argument("-lb_threshold", type=int, default=5)
# Parameters for primal heuristic mip start
parser.add_argument("-num_mipstarts", type=int, default=6)
parser.add_argument("-mipstart_strategy", type=str, default="repair")
# Parameters for branching priorities
parser.add_argument("-branching_direction", type=int, default=1)
args = parser.parse_args()
print(args)
mipeval(**vars(args))
| 16,424 | 40.582278 | 205 | py |
mipGNN | mipGNN-master/model_execution/spo_train.py | # todo: check CPLEX status
# todo: solve LP instead of MIP
import os
import sys
import numpy as np
import argparse
from pathlib import Path
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
from sklearn import svm
from sklearn import neural_network
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
import pandas as pd
import pickle
import time
import cplex
import concurrent.futures
import torch
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
import spo_torch
from spo_torch import SPONet, SPOLoss
import spo_utils
def build_dict(mode, data_filenames, data_full, args):
sol_true = []
objval_true = []
instance_cpx = []
coeffs_true = []
model_indices = []
to_delete_data = []
for instance_idx, instance in enumerate(data_filenames):
file_sol = "../gisp_generator/SOL/%s.sol" % instance
file_npz = "../gisp_generator/SOL/%s.npz" % instance
file_lp = "../gisp_generator/LP/%s.lp" % instance
if not os.path.isfile(file_lp) or (mode == 'train' and not os.path.isfile(file_npz)) or (mode == 'validation' and not os.path.isfile(file_sol)):
to_delete_data += [instance_idx]
continue
# get true opt
if mode == 'train':
sol_pool = np.load(file_npz)['solutions']
sol_true += [torch.unsqueeze(torch.tensor(sol_pool[0, 1:]), 1)]
objval_true += [torch.tensor([sol_pool[0,0]])]
elif mode == 'validation':
sol_true += [torch.empty(0)]
objval_true += [torch.tensor([spo_utils.read_optval(file_sol)])]
instance_cpx += [cplex.Cplex(file_lp)]
coeffs_true += [torch.unsqueeze(torch.tensor(instance_cpx[-1].objective.get_linear()), 1)]
model_indices += [[data_full[instance_idx][:,0] == 0, data_full[instance_idx][:,0] == 1]]
if instance_cpx[-1].objective.sense[instance_cpx[-1].objective.get_sense()] == 'maximize':
instance_cpx[-1].objective.set_sense(instance_cpx[-1].objective.sense.minimize)
coeffs_true[-1] *= -1
objval_true[-1] *= -1
spo_utils.disable_output_cpx(instance_cpx[-1])
instance_cpx[-1].parameters.timelimit.set(args.nn_cpx_timelimit)
instance_cpx[-1].parameters.emphasis.mip.set(1)
instance_cpx[-1].parameters.threads.set(args.nn_cpx_threads)
dict_all = {}
dict_all['sol_true'] = sol_true
dict_all['objval_true'] = objval_true
dict_all['model_indices'] = model_indices
dict_all['instance_cpx'] = instance_cpx
dict_all['coeffs_true'] = coeffs_true
for counter, idx in enumerate(to_delete_data):
del data_full[idx - counter]
dict_all['data'] = data_full
dict_all['num_instances'] = len(sol_true)
return dict_all
def combine_datasets(directory, operation='combine', poly_degree=1):
try:
data_full = []
if poly_degree > 1:
poly = PolynomialFeatures(poly_degree, include_bias=False)
filenames = []
for entry in os.scandir(directory):
if entry.name.endswith('.csv'):
data_cur = pd.read_csv(entry.path, sep=',',header=None).values
if poly_degree > 1:
features = poly.fit_transform(data_cur[:,1:-1])
data_cur = np.concatenate((
data_cur[:,0:1],
features,
np.expand_dims(data_cur[:,-1], axis=1)), axis=1)
if operation == 'combine':
if len(filenames) == 0:
data_full = np.empty((0, data_cur.shape[1]))
data_full = np.append(data_full, data_cur, axis=0)
elif operation == 'list':
data_full += [torch.tensor(data_cur)]
print(data_full[-1].size())
filenames += [entry.path[entry.path.find('/SPO_DATA/')+10:-4]]
except OSError:
if not os.path.exists(directory):
raise
return data_full, filenames
def test_model(model, X, y):
# Make predictions using the testing set
time_test = time.time()
y_pred = model.predict(X)
time_test = time.time() - time_test
# The mean squared error
mse_test = mean_squared_error(y, y_pred)
print('Mean squared error: %.2f' % mse_test)
# The coefficient of determination: 1 is perfect prediction
r2_test = r2_score(y, y_pred)
print('Coefficient of determination: %.2f' % r2_test)
return time_test, mse_test, r2_test
def main(args):
# based on this: https://stackoverflow.com/a/61879723
global solveIP_obj_local
""" Parse arguments """
parser = argparse.ArgumentParser()
parser.add_argument("-method", type=str, default='2stage')
parser.add_argument("-data_train_dir", type=str)
parser.add_argument("-data_validation_dir", type=str, default='')
parser.add_argument("-output_dir", type=str)
parser.add_argument("-single_model", type=int, default=0)
parser.add_argument("-model_type", type=str, default='linear')
parser.add_argument("-poly_degree", type=int, default=2)
parser.add_argument("-ridge_reg", type=float, default=1.0)
# Training parameters
parser.add_argument("-nn_epochs", type=int, default=1000)
parser.add_argument("-nn_lr_init", type=float, default=1e-5)
parser.add_argument("-nn_lr_decay", type=int, default=1)
parser.add_argument("-nn_reg", type=float, default=1e-5)
parser.add_argument("-nn_termination", type=float, default=0.05)
parser.add_argument("-nn_patience", type=int, default=200)
parser.add_argument("-nn_batchsize", type=int, default=1)
parser.add_argument("-nn_poolsize", type=int, default=1)
parser.add_argument("-nn_sgd_nesterov", type=int, default=0)
parser.add_argument("-nn_sgd_momentum", type=float, default=0.0)
# Tensorboard parameters
parser.add_argument("-nn_tb_dir", type=str, default='SPO_TENSORBOARD')
# Architecture parameters
parser.add_argument("-nn_depth", type=int, default=0)
parser.add_argument("-nn_width", type=int, default=100)
# CPLEX parameters
parser.add_argument("-nn_cpx_timelimit", type=float, default=60)
parser.add_argument("-nn_cpx_threads", type=int, default=1)
# Warmstart parameters
parser.add_argument("-nn_warmstart_dir", type=str, default='')
parser.add_argument("-nn_warmstart_prefix", type=str, default='')
# Data parameters
parser.add_argument("-nn_poly_degree", type=int, default=1)
args = parser.parse_args(args)
# args = parser.parse_args()
print(args)
# output directories
output_dir = "SPO_MODELS/" + args.output_dir
try:
os.makedirs(output_dir)
except OSError:
if not os.path.exists(output_dir):
raise
time_dataread, time_train, time_validation = 0.0, 0.0, 0.0
mse_train, r2_train = 0.0, 0.0
mse_validation, r2_validation = 0.0, 0.0
validation_bool = len(args.data_validation_dir) > 0
if args.method == '2stage':
# aggregate data in args.data_train_dir
time_dataread = time.time()
data_train_full, _ = combine_datasets(args.data_train_dir)
num_features = len(data_train_full[0][0,:]) - 2
if args.single_model:
data_train_full[:,0] = 0
if validation_bool:
data_validation_full, _ = combine_datasets(args.data_validation_dir)
if args.single_model:
data_validation_full[:,0] = 0
model_indicators = np.unique(data_train_full[:,0])
time_dataread = time.time() - time_dataread
for indicator in model_indicators:
rows_indicator = np.where(data_train_full[:,0] == indicator)[0]
X_train, y_train = data_train_full[rows_indicator,1:num_features+1], data_train_full[rows_indicator,-1]
# Create linear regression object
model_filename = args.model_type
if args.model_type == 'linear':
regr = linear_model.LinearRegression()
elif args.model_type == 'svm_poly':
regr = svm.SVR(kernel='poly', degree=args.poly_degree)
model_filename += '_' + args.poly_degree
elif args.model_type == 'ridge_poly':
regr = make_pipeline(PolynomialFeatures(args.poly_degree), Ridge(alpha=args.ridge_reg))
model_filename += '_%d_%g' % (args.poly_degree, args.ridge_reg)
elif args.model_type == 'mlp':
regr = neural_network.MLPRegressor((10,10))
# Train the model using the training sets
time_train = time.time()
regr.fit(X_train, y_train)
time_train = time.time() - time_train
_, mse_train, r2_train = test_model(regr, X_train, y_train)
# Write model to file
model_filename = '%s/%s_%d.pk' % (output_dir, model_filename, indicator)
pickle.dump(regr, open(model_filename, 'wb'))
if validation_bool:
rows_indicator = np.where(data_validation_full[:,0] == indicator)[0]
X_validation, y_validation = data_validation_full[rows_indicator,1:num_features+1], data_validation_full[rows_indicator,-1]
time_validation, mse_validation, r2_validation = test_model(regr, X_validation, y_validation)
results_str = "%d,%s,%g,%g,%g,%g,%g,%g,%g,%s" % (
indicator,
args.model_type,
time_train,
mse_train,
r2_train,
time_validation,
mse_validation,
r2_validation,
time_dataread,
model_filename)
print(results_str)
results_filename = '%s/%s_%d_%d.csv' % (output_dir, args.model_type, args.poly_degree, indicator)
with open(results_filename, "w+") as results_file:
results_file.write(results_str)
elif args.method == 'spo':
np.random.seed(0)
torch.manual_seed(0)
warmstart_bool = int(args.nn_warmstart_dir != '' and args.nn_warmstart_prefix != '')
stages = ['train']
num_epochs = args.nn_epochs
model_indicators = [0,1]
dtype = torch.double
device = torch.device("cpu")
torch.set_default_dtype(dtype)
filename_noext = 'depth_%d_width_%d_reg_%g_polydeg_%d_lrinit_%g_lrdecay_%d_sgdnesterov_%d_sgdmom_%g_batchsize_%d_warmst_%d' % (
args.nn_depth,
args.nn_width,
args.nn_reg,
args.nn_poly_degree,
args.nn_lr_init,
args.nn_lr_decay,
args.nn_sgd_nesterov,
args.nn_sgd_momentum,
args.nn_batchsize,
warmstart_bool)
model_filename = '%s/%s.pt' % (output_dir, filename_noext)
print(filename_noext)
# Tensorboard setup
tb_dirname = '%s/%s/%s' % (args.nn_tb_dir, args.output_dir, filename_noext)
try:
os.makedirs(tb_dirname)
except OSError:
if not os.path.exists(tb_dirname):
raise
tb_writer = SummaryWriter(tb_dirname)
meta_dict = {}
data_train_full, data_train_filenames = combine_datasets(args.data_train_dir, operation='list', poly_degree=args.nn_poly_degree)
meta_dict['train'] = build_dict('train', data_train_filenames, data_train_full, args)
num_features = len(data_train_full[0][0,:]) - 2
if validation_bool:
data_validation_full, data_validation_filenames = combine_datasets(args.data_validation_dir, operation='list', poly_degree=args.nn_poly_degree)
meta_dict['validation'] = build_dict('validation', data_validation_filenames, data_validation_full, args)
stages += ['validation']
loss_fn = SPOLoss.apply
depth, width = args.nn_depth, args.nn_width
models = [SPONet(num_features, depth, width, relu_sign=-1), SPONet(num_features, depth, width, relu_sign=1)]
print(models[1].layers[0].weight.data)
if warmstart_bool and depth == 0:
models_pretrained = spo_utils.read_sklearn_model(args.nn_warmstart_dir, args.nn_warmstart_prefix)
assert(len(models_pretrained) == len(model_indicators))
for indicator in model_indicators:
models[indicator].layers[0].weight.data = torch.unsqueeze(torch.tensor(models_pretrained[indicator].coef_), 0)
models[indicator].layers[0].bias.data = torch.unsqueeze(torch.tensor(models_pretrained[indicator].intercept_), 0)
optimizer = optim.SGD(
list(models[0].parameters())+list(models[1].parameters()),
lr=args.nn_lr_init,
nesterov=bool(args.nn_sgd_nesterov),
momentum=args.nn_sgd_momentum)
lmbda = lambda epoch: args.nn_lr_init/(np.sqrt(epoch+1))
running_loss_best, running_loss_withreg_best, epoch_best = np.inf, np.inf, -1
time_solve = 0.0
for epoch in range(num_epochs):
sys.stdout.flush()
for stage in stages:
if stage == 'validation':
for model in models:
model.eval()
batchsize = meta_dict['validation']['num_instances']
if stage == 'train':
print('---------------')
print("Learning rate =", optimizer.param_groups[0]['lr'])
if args.nn_lr_decay:
optimizer.param_groups[0]['lr'] = lmbda(epoch)
batchsize = args.nn_batchsize
perm = torch.tensor(np.random.permutation(range(meta_dict[stage]['num_instances'])))
batches = torch.split(perm, batchsize)
print(batches)
running_loss, running_loss_withreg = 0.0, 0.0
for batch_cur_ in batches:
optimizer.zero_grad()
batch_cur = batch_cur_.tolist()
print(batch_cur)
coeffs = []
for instance_idx_ in batch_cur:
instance_idx = int(instance_idx_)
# print(instance_idx)
indices = meta_dict[stage]['model_indices'][instance_idx]
coeffs_cur = [models[0](meta_dict[stage]['data'][instance_idx][indices[0], 1:num_features+1]), models[1](meta_dict[stage]['data'][instance_idx][indices[1], 1:num_features+1])]
coeffs += [torch.cat((coeffs_cur[0], coeffs_cur[1]), 0)]
def solveIP_obj_local(idx):
instance_idx = batch_cur[idx]
obj_subgradient = (2*coeffs[idx] - meta_dict[stage]['coeffs_true'][instance_idx]) / (torch.max(torch.abs(coeffs[idx])) + 1)
time_cur = time.time()
sol_spo_cur, _ = spo_torch.solveIP_obj(meta_dict[stage]['instance_cpx'][instance_idx], obj_subgradient)
time_cur = time.time() - time_cur
return (sol_spo_cur, time_cur)
# def solveIP_obj2(coeffs_cur, coeffs_true_cur, instance_cpx_cur):
# obj_subgradient = (2*coeffs_cur - coeffs_true_cur / (torch.max(torch.abs(coeffs_cur)) + 1))
# time_cur = time.time()
# sol_spo_cur, _ = spo_torch.solveIP_obj(instance_cpx_cur, obj_subgradient)
# time_cur = time.time() - time_cur
# return (sol_spo_cur, time_cur)
# print([(coeffs[idx], meta_dict[stage]['coeffs_true'][batch_cur[idx]], meta_dict[stage]['instance_cpx'][batch_cur[idx]]) for idx in range(len(batch_cur))])
ret_vals = []
if args.nn_poolsize > 1 and args.nn_batchsize > 1:
with concurrent.futures.ProcessPoolExecutor(max_workers=min([args.nn_poolsize, len(batch_cur)])) as executor:
ret_vals = executor.map(solveIP_obj_local, range(len(batch_cur)))
# with mp.Pool(args.nn_poolsize) as p:
# ret_vals = p.map(solveIP_obj, range(len(batch_cur)))
# ret_vals = p.starmap(solveIP_obj2,
# [(coeffs[idx], meta_dict[stage]['coeffs_true'][batch_cur[idx]], meta_dict[stage]['instance_cpx'][batch_cur[idx]]) for idx in range(len(batch_cur))])
else:
for idx in range(len(batch_cur)):
sol_spo_cur, time_cur = solveIP_obj_local(idx)
ret_vals += [(sol_spo_cur, time_cur)]
for idx, ret in enumerate(ret_vals):
instance_idx = batch_cur[idx]
loss_val_cur = loss_fn(
coeffs[idx],
meta_dict[stage]['coeffs_true'][instance_idx],
ret[0],
meta_dict[stage]['sol_true'][instance_idx])
loss_spo_cur = float(loss_val_cur - meta_dict[stage]['objval_true'][instance_idx])
loss_spo_cur_scaled = loss_spo_cur / ((1e-9 + np.abs(meta_dict[stage]['objval_true'][instance_idx])) * meta_dict[stage]['num_instances'])
print("%s -- SPO loss [%d-%d] = %g = (%g) - (%g)" % (stage, epoch, instance_idx, loss_spo_cur, loss_val_cur, meta_dict[stage]['objval_true'][instance_idx]))
loss_val_cur += args.nn_reg * torch.norm(coeffs[idx], p=1)
running_loss_withreg += loss_val_cur.data / meta_dict[stage]['num_instances']
running_loss += loss_spo_cur_scaled
time_solve += ret[1] if stage == 'train' else 0
loss_val_cur /= 1.0*len(batch_cur)
if stage == 'train':
loss_val_cur.backward()
if stage == 'train':
optimizer.step()
print('%s loss' % stage, running_loss, epoch)
tb_writer.add_scalar('%s loss' % stage, running_loss, epoch)
if stage == 'train':
tb_writer.add_scalar('learning rate', optimizer.param_groups[0]['lr'], epoch)
tb_writer.add_scalar('solver time', time_solve, epoch)
tb_writer.add_histogram('model0 gradients', models[0].layers[0].weight.grad, epoch)
tb_writer.add_histogram('model1 gradients', models[1].layers[0].weight.grad, epoch)
tb_writer.add_histogram('model0 weights', models[0].layers[0].weight.data, epoch)
tb_writer.add_histogram('model1 weights', models[1].layers[0].weight.data, epoch)
print(models[0].layers[0].weight.grad)
if stage == 'validation':
if running_loss < running_loss_best:# or running_loss_withreg < running_loss_withreg_best:
running_loss_best = running_loss
running_loss_withreg_best = running_loss_withreg
epoch_best = epoch
torch.save({
'epoch': epoch,
'num_features': num_features,
'nn_poly_degree': args.nn_poly_degree,
'depth': depth,
'width': width,
'model0_state_dict': models[0].state_dict(),
'model1_state_dict': models[1].state_dict(),
'loss_spo': running_loss_best,
'loss_spo_withreg': running_loss_withreg_best
}, model_filename)
print("New incumbent: %g, %g, %d" % (running_loss_best, running_loss_withreg_best, epoch))
if epoch - epoch_best >= args.nn_patience or running_loss_best <= args.nn_termination:
print("Early termination!")
print("epoch - epoch_best =", epoch - epoch_best)
print("running_loss_best =", running_loss_best)
exit()
if __name__ == '__main__':
main(sys.argv[1:])
| 20,676 | 41.370902 | 199 | py |
mipGNN | mipGNN-master/model_execution/spo_torch.py | import torch
import torch.nn as nn
import cplex
import os
import numpy as np
import argparse
def solveIP(instance_cpx):
instance_cpx.solve()
optval = instance_cpx.solution.get_objective_value()
solution = np.array(instance_cpx.solution.get_values())
return solution, optval
def solveIP_obj(instance_cpx, objective_new):
instance_cpx.objective.set_linear([(idx, float(objective_new[idx])) for idx in range(len(objective_new))])
instance_cpx.solve()
optval = instance_cpx.solution.get_objective_value()
solution = np.array(instance_cpx.solution.get_values())
return solution, optval
class SPOLoss(torch.autograd.Function):
"""
We can implement our own custom autograd Functions by subclassing
torch.autograd.Function and implementing the forward and backward passes
which operate on Tensors.
"""
@staticmethod
def forward(ctx, coeffs_predictions, coeffs_true, sol_spo, sol_true):
"""
In the forward pass we receive a Tensor containing the input and return
a Tensor containing the output. ctx is a context object that can be used
to stash information for backward computation. You can cache arbitrary
objects for use in the backward pass using the ctx.save_for_backward method.
"""
# optimize with obj = 2*coeff_predicted - coeff_true
# obj_subgradient = (2*coeffs_predictions - coeffs_true) / (torch.max(torch.abs(coeffs_predictions)) + 1)
# instance_cpx.objective.set_linear([(idx, float(obj_subgradient[idx])) for idx in range(len(coeffs_predictions))])
# sol_spo, optval_spo = solveIP(instance_cpx)
# print("fake_optval =", optval_spo)
# print("coeffs_predictions stats: ",
# torch.mean(coeffs_predictions),
# torch.median(coeffs_predictions),
# torch.min(coeffs_predictions),
# torch.max(coeffs_predictions))
# print("obj_subgradient stats: ",
# torch.mean(obj_subgradient),
# torch.median(obj_subgradient),
# torch.min(obj_subgradient),
# torch.max(obj_subgradient))
sol_spo = torch.unsqueeze(torch.tensor(sol_spo), 1)
objval_predictions_true = torch.dot(sol_spo[:,0], coeffs_true[:,0])
ctx.save_for_backward(
sol_spo,
sol_true)
return torch.tensor([objval_predictions_true])
@staticmethod
def backward(ctx, grad_output):
"""
In the backward pass we receive a Tensor containing the gradient of the loss
with respect to the output, and we need to compute the gradient of the loss
with respect to the input.
"""
grad_input = grad_output.clone()
sol_spo, sol_true = ctx.saved_tensors
subgradient = 2*(sol_true - sol_spo)
# print("^^^^^^", grad_input, '\n', torch.sum(subgradient > 0))
grad_input = grad_input * subgradient
return grad_input, None, None, None
class SPONet(torch.nn.Module):
def __init__(self, num_features, depth, width, relu_sign):
"""
In the constructor we instantiate four parameters and assign them as
member parameters.
"""
super().__init__()
self.layers = nn.ModuleList([nn.Linear(width, width) for i in range(depth-1)])
layer0_outdim = 1 if depth == 0 else width
self.layers.insert(0, nn.Linear(num_features, layer0_outdim))
if depth > 0:
self.layers.append(nn.Linear(width, 1, bias=False))
self.relu_sign = relu_sign
self.nonlinearity = nn.LeakyReLU()
print(self.layers)
def forward(self, x):
"""
In the forward function we accept a Tensor of input data and we must return
a Tensor of output data. We can use Modules defined in the constructor as
well as arbitrary operators on Tensors.
"""
for i, layer in enumerate(self.layers):
x = layer(x)
# print(torch.t(x))
if i < len(self.layers) - 1:
x = self.nonlinearity(x)
# return x
return self.relu_sign * torch.abs(x)
# return self.relu_sign * torch.relu(self.relu_sign * x)
| 4,260 | 35.110169 | 123 | py |
mipGNN | mipGNN-master/model_execution/slurm_train.py | import spo_train
import spo_utils
import submitit
import random
from random import sample
output_dir = 'spo_torch_polydeg2_warmstart_hypersearch'
num_cpus = 25
executor = submitit.AutoExecutor(folder="log_%s" % output_dir)
print(executor.which())
executor.update_parameters(
additional_parameters={"account": "rrg-khalile2"},
timeout_min=719,
mem_gb=16,
cpus_per_task=num_cpus)
#dict_allvals = {'-nn_depth': ['1','2','3'], '-nn_width': ['10','20','40', '80'], '-nn_lr_decay': ['0', '1'], '-nn_lr_init': ['1e-3', '5e-3'], '-nn_reg': ['0', '1'], '-nn_batchsize': ['5', '10', '20', '50', '100'], '-nn_sgd_nesterov': ['0', '1'], '-nn_sgd_momentum': ['0', '0.2', '0.4', '0.8']}
dict_allvals = {'-nn_warmstart_dir': ['SPO_MODELS/2stage/'], '-nn_warmstart_prefix': ['linear'], '-nn_poly_degree': ['1'], '-nn_depth': ['0'], '-nn_width': ['0'], '-nn_lr_decay': ['0', '1'], '-nn_lr_init': ['1e-3', '5e-3', '1e-2', '1e-1', '1e0'], '-nn_reg': ['1e-6', '1e-4', '1e-2', '0'], '-nn_batchsize': ['10'], '-nn_sgd_nesterov': ['1'], '-nn_sgd_momentum': ['0.2', '0.4', '0.8']}
configs = list(spo_utils.dict_product(dict_allvals))
print("total number of configurations =", len(configs))
# job = executor.submit(spo_train.main,
# [
# '-method', 'spo',
# '-data_train_dir', '../gisp_generator/SPO_DATA/spo_gisp_er/150_150/alpha_0.75_numFeat_10_biasnodes_100_biasedges_10_halfwidth_0.5_polydeg_2/train',
# '-data_validation_dir', '../gisp_generator/SPO_DATA/spo_gisp_er/150_150/alpha_0.75_numFeat_10_biasnodes_100_biasedges_10_halfwidth_0.5_polydeg_2/valid',
# '-output_dir', 'spo_torch_valid_polydeg2',
# '-nn_poly_degree', '1',
# '-nn_depth', '2',
# '-nn_width', '50',
# '-nn_lr_decay', '0',
# '-nn_lr_init', '1e-2',
# '-nn_reg', '0',
# '-nn_batchsize', '10',
# '-nn_poolsize', '10'
# ])
random.seed(0)
configs = sample(configs, min([500, len(configs)]))
for idx, config in enumerate(configs):
#if idx < 87:
# continue
print("config", idx)
arg_list = [
'-method', 'spo',
'-data_train_dir', '../gisp_generator/SPO_DATA/spo_gisp_er/150_150/alpha_0.75_numFeat_10_biasnodes_100_biasedges_10_halfwidth_0.5_polydeg_2/train',
'-data_validation_dir', '../gisp_generator/SPO_DATA/spo_gisp_er/150_150/alpha_0.75_numFeat_10_biasnodes_100_biasedges_10_halfwidth_0.5_polydeg_2/valid',
'-output_dir', output_dir,
'-nn_poolsize', str(num_cpus)
]
for arg, value in config.items():
arg_list += [arg, value]
print(arg_list)
while True:
try:
job = executor.submit(spo_train.main, arg_list)
break
except submitit.core.utils.FailedJobError:
continue
print(job.job_id) # ID of your job
#output = job.result() # waits for completion and returns output
| 2,949 | 39.410959 | 383 | py |
mipGNN | mipGNN-master/model_execution/predict.py | import sys
sys.path.insert(0, '..')
sys.path.insert(0, '../..')
sys.path.insert(0, '.')
import os
import os.path as osp
import numpy as np
import networkx as nx
from pathlib import Path
import torch
from torch_geometric.data import (InMemoryDataset, Data)
from torch_geometric.data import DataLoader
#from gnn_models.EdgeConv.mip_bipartite_simple_class import SimpleNet
# TODO: Uncomment for second model.
#from gnn_models.EdgeConv.mip_bipartite_class_test import SimpleNet
def get_prediction(model_name, graph, bias_threshold=0.00):
model_name_only = os.path.basename(model_name)
if model_name_only[0:3] == "ECS" or model_name_only[0:8] == "Rand_ECS" or "trained" in model_name:
from gnn_models.EdgeConv.mip_bipartite_simple_class import SimpleNet
elif model_name_only[0:3] == "EC_":
from gnn_models.EdgeConv.mip_bipartite_class import SimpleNet
elif model_name_only[0:4] == "GINS":
from gnn_models.GIN.mip_bipartite_simple_class import SimpleNet
elif model_name_only[0:4] == "GIN_":
from gnn_models.GIN.mip_bipartite_class import SimpleNet
elif model_name_only[0:3] == "SGS":
from gnn_models.Sage.mip_bipartite_simple_class import SimpleNet
elif model_name_only[0:3] == "SG_":
from gnn_models.Sage.mip_bipartite_class import SimpleNet
else:
print("Model name does match any model!")
exit()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = SimpleNet(64, aggr="mean", num_layers=4).to(device)
model.load_state_dict(torch.load(model_name, map_location=device))
# data, var_node, node_var = create_data_object(graph)
data, node_to_varnode, _ = create_data_object(graph, 0.0)
model.eval()
data = data.to(device)
# out = model(data).max(dim=1)[1].cpu().detach().numpy()
out = model(data).exp()[:,1].cpu().detach().numpy()
#y_real = data.y_real.cpu().detach().numpy()
# return out, node_var, var_node
return out, node_to_varnode#, y_real
def get_variable_cpxid(graph, node_to_varnode, prediction):
node_names_list = list(graph.nodes())
dict_varname_seqid = {}
for var_graphid, var_seqid in node_to_varnode.items():
# print(var_graphid, var_seqid)
dict_varname_seqid[node_names_list[var_graphid]] = (var_seqid, prediction[var_seqid])
return dict_varname_seqid
def create_data_object(graph, bias_threshold):
# Make graph directed.
graph = nx.convert_node_labels_to_integers(graph)
graph = graph.to_directed() if not nx.is_directed(graph) else graph
data = Data()
# Maps networkx ids to new variable node ids.
node_to_varnode = {}
# Maps networkx ids to new constraint node ids.
node_to_connode = {}
# Number of variables.
num_nodes_var = 0
# Number of constraints.
num_nodes_con = 0
# Targets (classes).
y = []
y_real = []
# Features for variable nodes.
feat_var = []
# Feature for constraints nodes.
feat_con = []
# Right-hand sides of equations.
feat_rhs = []
index = []
index_var = []
obj = []
# Iterate over nodes, and collect features.
for i, (node, node_data) in enumerate(graph.nodes(data=True)):
# Node is a variable node.
if node_data['bipartite'] == 0:
node_to_varnode[i] = num_nodes_var
num_nodes_var += 1
if 'bias' in node_data and node_data['bias'] is not None:
y_real.append(node_data['bias'])
if (node_data['bias'] <= bias_threshold):
y.append(0)
else:
y.append(1)
if 'objcoeff' in node_data:
feat_var.append([node_data['objcoeff'], graph.degree[i]])
obj.append([node_data['objcoeff']])
else:
feat_var.append([node_data['obj_coeff'], graph.degree[i]])
obj.append([node_data['obj_coeff']])
index_var.append(0)
# Node is constraint node.
elif node_data['bipartite'] == 1:
node_to_connode[i] = num_nodes_con
num_nodes_con += 1
if 'rhs' in node_data:
rhs = node_data['rhs']
else:
rhs = node_data['bound']
feat_rhs.append([rhs])
feat_con.append([rhs, graph.degree[i]])
index.append(0)
else:
print("Error in graph format.")
exit(-1)
# Edge list for var->con graphs.
edge_list_var = []
# Edge list for con->var graphs.
edge_list_con = []
# Create features matrices for variable nodes.
edge_features_var = []
# Create features matrices for constraint nodes.
edge_features_con = []
# Remark: graph is directed, i.e., each edge exists for each direction.
# Flow of messages: source -> target.
for i, (s, t, edge_data) in enumerate(graph.edges(data=True)):
# Source node is con, target node is var.
if graph.nodes[s]['bipartite'] == 1:
# Source node is constraint. C->V.
edge_list_con.append([node_to_connode[s], node_to_varnode[t]])
edge_features_con.append([edge_data['coeff']])
else:
# Source node is variable. V->C.
edge_list_var.append([node_to_varnode[s], node_to_connode[t]])
edge_features_var.append([edge_data['coeff']])
edge_index_var = torch.tensor(edge_list_var).t().contiguous()
edge_index_con = torch.tensor(edge_list_con).t().contiguous()
# Create data object.
data.edge_index_var = edge_index_var
data.edge_index_con = edge_index_con
data.y = torch.from_numpy(np.array(y)).to(torch.long)
data.y_real = torch.from_numpy(np.array(y_real)).to(torch.float)
data.var_node_features = torch.from_numpy(np.array(feat_var)).to(torch.float)
data.con_node_features = torch.from_numpy(np.array(feat_con)).to(torch.float)
data.rhs = torch.from_numpy(np.array(feat_rhs)).to(torch.float)
data.obj = torch.from_numpy(np.array(obj)).to(torch.float)
data.edge_features_con = torch.from_numpy(np.array(edge_features_con)).to(torch.float)
data.edge_features_var = torch.from_numpy(np.array(edge_features_var)).to(torch.float)
data.num_nodes_var = num_nodes_var
data.num_nodes_con = num_nodes_con
data.index = torch.from_numpy(np.array(index)).to(torch.long)
data.index_var = torch.from_numpy(np.array(index_var)).to(torch.long)
return data, node_to_varnode, node_to_connode
# pd = "../data_new/data_graphsonly/gisp/p_hat300-2.clq/train/"
#pd = "../datagen/data/gisp/p_hat300-2.clq/test/"
#filename = os.listdir(pd)[0]
#graph = nx.read_gpickle(pd + filename)
#
#
#out, node_to_varnode, y_real =get_prediction("../gnn_models/EdgeConv/trained_p_hat300-2", graph)
#
#print(out, out.mean())
#print(y_real)
#
#dict_results = get_variable_cpxid(graph, node_to_varnode, out)
#print(dict_results)
#y_prediction = [out[var_seqid] for
#loss = torch.nn.MSELoss()
#output = loss(torch.from_numpy(out), torch.from_numpy(y_real))
#print(output)
#loss = torch.nn.L1Loss()
#output = loss(torch.from_numpy(out), torch.from_numpy(y_real))
#print(output)
#np.random.shuffle(out)
#output = loss(torch.from_numpy(out), torch.from_numpy(y_real))
#print(output)
| 7,296 | 34.081731 | 102 | py |
mipGNN | mipGNN-master/model_execution/spo_test.py | import os
import numpy as np
import argparse
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.preprocessing import PolynomialFeatures
import pandas as pd
import networkx as nx
import cplex
import pickle
import time
import re
import torch
import spo_utils
from spo_torch import SPONet, SPOLoss
if __name__ == '__main__':
""" Parse arguments """
parser = argparse.ArgumentParser()
parser.add_argument("-instance", type=str)
parser.add_argument("-model_dir", type=str)
parser.add_argument("-model_prefix", type=str)
parser.add_argument("-graph", type=str)
parser.add_argument("-groundtruth", type=str)
parser.add_argument("-logfile", type=str)
parser.add_argument("-method", type=str, default='standard')
parser.add_argument("-single_model", type=bool, default=False)
parser.add_argument("-nn_cpx_timelimit", type=float, default=3600)
parser.add_argument("-nn_cpx_threads", type=int, default=8)
args = parser.parse_args()
print(args)
graph = nx.read_gpickle(args.graph)
# Read true optval to get regret
objval_true = spo_utils.read_optval(args.groundtruth)
torch_bool = False
if args.method != 'mipgnn':
if not args.model_dir.endswith('.pt'):
model_filename = args.model_dir + '/' + args.model_prefix
models = spo_utils.read_sklearn_model(args.model_dir, args.model_prefix, args.single_model)
else:
model_filename = args.model_dir
torch_bool = True
checkpoint = torch.load(model_filename)
nn_poly_degree = checkpoint['nn_poly_degree']
if nn_poly_degree > 1:
poly = PolynomialFeatures(nn_poly_degree, include_bias=False)
num_features = checkpoint['num_features']
ints_in_filename = re.findall('\d+',model_filename)#map(int, re.findall(r'\d+', model_filename))
depth, width = int(ints_in_filename[0]), int(ints_in_filename[1])
models = [SPONet(num_features, depth, width, relu_sign=-1), SPONet(num_features, depth, width, relu_sign=1)]
models[0].load_state_dict(checkpoint['model0_state_dict'])
models[1].load_state_dict(checkpoint['model1_state_dict'])
# Read MIP
instance_cpx = cplex.Cplex(args.instance)
instance_cpx.parameters.timelimit.set(args.nn_cpx_timelimit)
instance_cpx.parameters.threads.set(args.nn_cpx_threads)
instance_obj_true = np.array(instance_cpx.objective.get_linear())
num_variables = len(instance_obj_true)
if instance_cpx.objective.sense[instance_cpx.objective.get_sense()] == 'maximize':
instance_cpx.objective.set_sense(instance_cpx.objective.sense.minimize)
instance_obj_true *= -1
objval_true *= -1
# print(instance_cpx.objective.get_linear())
start_time = time.time()
for node, node_data in graph.nodes(data=True):
if node_data['bipartite'] == 0:
indicator = node_data['model_indicator']
if torch_bool:
features = node_data['features']
if nn_poly_degree > 1:
features = poly.fit_transform([features])
prediction = float(models[indicator](torch.tensor(features, dtype=models[0].layers[0].weight.dtype)))
else:
prediction = models[indicator].predict([node_data['features']])[0]
instance_cpx.objective.set_linear(node, prediction)
time_predictions = time.time() - start_time
# print(instance_cpx.objective.get_linear())
start_time = instance_cpx.get_time()
instance_cpx.solve()
time_solve_prediction = instance_cpx.get_time() - start_time
solution_prediction = np.array(instance_cpx.solution.get_values())
objval_prediction = solution_prediction.dot(instance_obj_true)
instance_obj_prediction = np.array(instance_cpx.objective.get_linear())
objval_prediction_predictedobj = instance_cpx.solution.get_objective_value()
regret_ambiguous = np.abs(objval_true - objval_prediction)
# To get unambiguous SPO loss:
# 1- reset objective of instance_cpx to instance_obj_true
# turn problem into maximization
instance_cpx.objective.set_sense(instance_cpx.objective.sense.maximize)
instance_cpx.objective.set_linear([(idx, instance_obj_true[idx]) for idx in range(num_variables)])
# 2- add constraint that predicted obj value isn't worsened
instance_cpx.linear_constraints.add(
lin_expr=[[[idx for idx in range(num_variables)], [instance_obj_prediction[idx] for idx in range(num_variables)]]],
senses=['L'],
rhs=[objval_prediction_predictedobj + 1e-6])
# 3- warm start with opt of instance_cpx
instance_cpx.MIP_starts.add(
[(idx, int(solution_prediction[idx])) for idx in range(num_variables)],
instance_cpx.MIP_starts.effort_level.check_feasibility)
# 4- solve worst case problem and retrieve value
start_time = instance_cpx.get_time()
instance_cpx.solve()
time_solve_loss = instance_cpx.get_time() - start_time
objval_prediction_worstcase = instance_cpx.solution.get_objective_value()
# todo: take into account suboptimality of objval_true e.g. timeout
regret_unambiguous = np.abs(objval_true - objval_prediction_worstcase)
else:
prediction, node_to_varnode = predict.get_prediction(model_name=args.model, graph=graph)
dict_varname_seqid = predict.get_variable_cpxid(graph, node_to_varnode, prediction)
print("Ambiguous Regret = ", regret_ambiguous)
print(objval_true, objval_prediction)
print("Unambiguous Regret = ", regret_unambiguous)
print(objval_true, objval_prediction_worstcase)
results_str = "%s,%.5f,%.5f,%g,%g,%g,%.5f,%.5f,%.5f" % (
model_filename,
regret_unambiguous,
regret_ambiguous,
time_predictions,
time_solve_loss,
time_solve_prediction,
objval_true,
objval_prediction,
objval_prediction_worstcase)
print(results_str)
with open(args.logfile, "w+") as results_file:
results_file.write(results_str)
| 6,383 | 41 | 127 | py |
mipGNN | mipGNN-master/code/gnn_models/train_class.py | import sys
sys.path.insert(0, '..')
sys.path.insert(0, '../..')
sys.path.insert(0, '.')
import os
import os.path as osp
import networkx as nx
from sklearn.model_selection import train_test_split
from torchmetrics import F1, Precision, Recall, Accuracy
from torch_geometric.data import (InMemoryDataset, Data)
from torch_geometric.data import DataLoader
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
from gnn_models.EdgeConv.mip_bipartite_class import SimpleNet as EdgeConv
from gnn_models.EdgeConv.mip_bipartite_simple_class import SimpleNet as EdgeConvSimple
from gnn_models.GIN.mip_bipartite_class import SimpleNet as GIN
from gnn_models.GIN.mip_bipartite_simple_class import SimpleNet as GINSimple
from gnn_models.Sage.mip_bipartite_class import SimpleNet as Sage
from gnn_models.Sage.mip_bipartite_simple_class import SimpleNet as SageSimple
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Preprocessing to create Torch dataset.
class GraphDataset(InMemoryDataset):
def __init__(self, name, root, data_path, bias_threshold, transform=None, pre_transform=None,
pre_filter=None):
super(GraphDataset, self).__init__(root, transform, pre_transform, pre_filter)
self.data, self.slices = torch.load(self.processed_paths[0])
self.bias_threshold = bias_threshold
global global_name
global global_data_path
@property
def raw_file_names(self):
return name
@property
def processed_file_names(self):
return name
def download(self):
pass
def process(self):
print("Preprocessing.")
data_list = []
num_graphs = len(os.listdir(pd))
print(pd)
# Iterate over instance files and create data objects.
for num, filename in enumerate(os.listdir(pd)):
print(filename, num, num_graphs)
# Get graph.
graph = nx.read_gpickle(pd + filename)
# Make graph directed.
graph = nx.convert_node_labels_to_integers(graph)
graph = graph.to_directed() if not nx.is_directed(graph) else graph
data = Data()
# Maps networkx ids to new variable node ids.
node_to_varnode = {}
# Maps networkx ids to new constraint node ids.
node_to_connode = {}
# Number of variables.
num_nodes_var = 0
# Number of constraints.
num_nodes_con = 0
# Targets (classes).
y = []
y_real = []
# Features for variable nodes.
feat_var = []
# Feature for constraints nodes.
feat_con = []
# Right-hand sides of equations.
feat_rhs = []
index = []
index_var = []
obj = []
# Iterate over nodes, and collect features.
for i, (node, node_data) in enumerate(graph.nodes(data=True)):
# Node is a variable node.
if node_data['bipartite'] == 0:
node_to_varnode[i] = num_nodes_var
num_nodes_var += 1
y_real.append(node_data['bias'])
if (node_data['bias'] < bias_threshold):
y.append(0)
else:
y.append(1)
if 'objcoeff' in node_data:
feat_var.append([node_data['objcoeff'], graph.degree[i]])
# feat_var.append([node_data['objcoeff']])
obj.append([node_data['objcoeff']])
else:
feat_var.append([node_data['obj_coeff'], graph.degree[i]])
# feat_var.append([node_data['obj_coeff']])
obj.append([node_data['obj_coeff']])
index_var.append(0)
# Node is constraint node.
elif node_data['bipartite'] == 1:
node_to_connode[i] = num_nodes_con
num_nodes_con += 1
if 'rhs' in node_data:
rhs = node_data['rhs']
else:
rhs = node_data['bound']
feat_rhs.append([rhs])
feat_con.append([rhs, graph.degree[i]])
# feat_con.append([rhs])
index.append(0)
else:
print("Error in graph format.")
exit(-1)
# Edge list for var->con graphs.
edge_list_var = []
# Edge list for con->var graphs.
edge_list_con = []
# Create features matrices for variable nodes.
edge_features_var = []
# Create features matrices for constraint nodes.
edge_features_con = []
# Remark: graph is directed, i.e., each edge exists for each direction.
# Flow of messages: source -> target.
for i, (s, t, edge_data) in enumerate(graph.edges(data=True)):
# Source node is con, target node is var.
if graph.nodes[s]['bipartite'] == 1:
# Source node is constraint. C->V.
edge_list_con.append([node_to_connode[s], node_to_varnode[t]])
edge_features_con.append([edge_data['coeff']])
else:
# Source node is variable. V->C.
edge_list_var.append([node_to_varnode[s], node_to_connode[t]])
edge_features_var.append([edge_data['coeff']])
edge_index_var = torch.tensor(edge_list_var).t().contiguous()
edge_index_con = torch.tensor(edge_list_con).t().contiguous()
# Create data object.
data.edge_index_var = edge_index_var
data.edge_index_con = edge_index_con
data.y = torch.from_numpy(np.array(y)).to(torch.long)
data.y_real = torch.from_numpy(np.array(y_real)).to(torch.float)
data.var_node_features = torch.from_numpy(np.array(feat_var)).to(torch.float)
data.con_node_features = torch.from_numpy(np.array(feat_con)).to(torch.float)
data.rhs = torch.from_numpy(np.array(feat_rhs)).to(torch.float)
data.obj = torch.from_numpy(np.array(obj)).to(torch.float)
data.edge_features_con = torch.from_numpy(np.array(edge_features_con)).to(torch.float)
data.edge_features_var = torch.from_numpy(np.array(edge_features_var)).to(torch.float)
data.num_nodes_var = num_nodes_var
data.num_nodes_con = num_nodes_con
data.index = torch.from_numpy(np.array(index)).to(torch.long)
data.index_var = torch.from_numpy(np.array(index_var)).to(torch.long)
data_list.append(data)
data, slices = self.collate(data_list)
torch.save((data, slices), self.processed_paths[0])
# Preprocess indices of bipartite graphs to make batching work.
class MyData(Data):
def __inc__(self, key, value):
if key in ['edge_index_var']:
return torch.tensor([self.num_nodes_var, self.num_nodes_con]).view(2, 1)
elif key in ['edge_index_con']:
return torch.tensor([self.num_nodes_con, self.num_nodes_var]).view(2, 1)
elif key in ['index']:
return torch.tensor(self.num_nodes_con)
elif key in ['index_var']:
return torch.tensor(self.num_nodes_var)
else:
return 0
class MyTransform(object):
def __call__(self, data):
new_data = MyData()
for key, item in data:
new_data[key] = item
return new_data
dataset_list = [
"../data_new/data_graphsonly/gisp/p_hat300-2.clq/train/",
"../data_new/data_graphsonly/gisp/p_hat300-2.clq/test/",
"../data_new/data_graphsonly/gisp/C250.9.clq/train/",
"../data_new/data_graphsonly/gisp/C250.9.clq/test/",
"../data_new/data_graphsonly/gisp/keller4.clq/train/",
"../data_new/data_graphsonly/gisp/keller4.clq/test/",
"../data_new/data_graphsonly/gisp/hamming8-4.clq/train/",
"../data_new/data_graphsonly/gisp/hamming8-4.clq/test/",
"../data_new/data_graphsonly/gisp/gen200_p0.9_55.clq/train/",
"../data_new/data_graphsonly/gisp/gen200_p0.9_55.clq/test/",
"../data_new/data_graphsonly/gisp/gen200_p0.9_44.clq/train/",
"../data_new/data_graphsonly/gisp/gen200_p0.9_44.clq/test/",
"../data_new/data_graphsonly/gisp/C125.9.clq/train/",
"../data_new/data_graphsonly/gisp/C125.9.clq/test/",
"../data_new/data_graphsonly/gisp/p_hat300-1.clq/train/",
"../data_new/data_graphsonly/gisp/p_hat300-1.clq/test/",
"../data_new/data_graphsonly/gisp/brock200_4.clq/train/",
"../data_new/data_graphsonly/gisp/brock200_4.clq/test/",
"../data_new/data_graphsonly/gisp/brock200_2.clq/train/",
"../data_new/data_graphsonly/gisp/brock200_2.clq/test/",
"../data_new/data_graphsonly/fcmnf/L_n200_p0.02_c500/train/",
"../data_new/data_graphsonly/fcmnf/L_n200_p0.02_c500/test/"
]
name_list = [
"p_hat300-2.clq_train",
"p_hat300-2.clq_test",
"gisp_C250.9.clq_train",
"C250.9.clq_test",
"keller4.clq_train",
"keller4.clq_test",
"hamming8-4.clq_train",
"hamming8-4.clq_test",
"gen200_p0.9_55.clq_train",
"gen200_p0.9_55.clq_test",
"gen200_p0.9_44.clq_train",
"gen200_p0.9_44.clq_test",
"C125.9.clq_train",
"C125.9.clq_test",
"p_hat300-1.clq_train",
"p_hat300-1.clq_test",
"brock200_4.clq_train",
"brock200_4.clq_test",
"brock200_2.clq_train",
"brock200_2.clq_test",
# "L_n200_p0.02_c500_train",
# "L_n200_p0.02_c500_test"
]
test_scores = []
for i in [0, 2, 4, 6, 8, 10, 12, 14, 16, 18]:
# Bias.
for bias in [0.0, 0.001, 0.1]:
# GNN.
for m in ["ECS", "GINS", "SGS", "EC", "GIN", "SG"]:
log = []
# Setup model.
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if m == "EC":
model = EdgeConv(hidden=64, num_layers=4, aggr="mean", regression=False).to(device)
model_name = "EC_" + name_list[i] + str(bias)
print(model_name, bias, name_list[i])
elif m == "ECS":
model = EdgeConvSimple(hidden=64, num_layers=4, aggr="mean", regression=False).to(device)
model_name = "ECS_" + name_list[i] + str(bias)
print(model_name, bias, name_list[i])
elif m == "GIN":
model = GIN(hidden=64, num_layers=4, aggr="mean", regression=False).to(device)
model_name = "GIN_" + name_list[i] + str(bias)
print(model_name, bias, name_list[i])
elif m == "GINS":
model = GINSimple(hidden=64, num_layers=4, aggr="mean", regression=False).to(device)
model_name = "GINS_" + name_list[i] + str(bias)
print(model_name, bias, name_list[i])
elif m == "SG":
model = Sage(hidden=64, num_layers=4, aggr="mean", regression=False).to(device)
model_name = "SG_" + name_list[i] + str(bias)
print(model_name, bias, name_list[i])
elif m == "SGS":
model = SageSimple(hidden=64, num_layers=4, aggr="mean", regression=False).to(device)
model_name = "SGS_" + name_list[i] + str(bias)
print(model_name, bias, name_list[i])
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',
factor=0.8, patience=10,
min_lr=0.0000001)
# Prepare data.
bias_threshold = bias
batch_size = 10
num_epochs = 30
pathr = osp.join(osp.dirname(osp.realpath(__file__)), '.', 'data', 'DS')
pd = path_train = path_trainpath_train = dataset_list[i]
name = name_train = name_list[i]
train_dataset = GraphDataset(name_train, pathr, path_train, bias_threshold,
transform=MyTransform()).shuffle()
pd = path_test = path_testpath_test = dataset_list[i + 1]
name = name_test = name_list[i + 1]
test_dataset = GraphDataset(name_test, pathr, path_test, bias_threshold,
transform=MyTransform()).shuffle()
train_index, val_index = train_test_split(list(range(0, len(train_dataset))), test_size=0.2)
val_dataset = train_dataset[val_index].shuffle()
train_dataset = train_dataset[train_index].shuffle()
test_dataset = test_dataset.shuffle()
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True)
def train(epoch):
model.train()
# loss_all = 0
zero = torch.tensor([0]).to(device)
one = torch.tensor([1]).to(device)
loss_all = 0
for data in train_loader:
data = data.to(device)
y = data.y_real
y = torch.where(y <= bias_threshold, zero, one).to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, y)
loss.backward()
loss_all += batch_size * loss.item()
optimizer.step()
return loss_all / len(train_dataset)
@torch.no_grad()
def test(loader):
model.eval()
zero = torch.tensor([0]).to(device)
one = torch.tensor([1]).to(device)
f1 = F1(num_classes=2, average="macro").to(device)
pr = Precision(num_classes=2, average="macro").to(device)
re = Recall(num_classes=2, average="macro").to(device)
acc = Accuracy(num_classes=2).to(device)
first = True
for data in loader:
data = data.to(device)
pred = model(data)
y = data.y_real
y = torch.where(y <= bias_threshold, zero, one).to(device)
pred = pred.max(dim=1)[1]
if not first:
pred_all = torch.cat([pred_all, pred])
y_all = torch.cat([y_all, y])
else:
pred_all = pred
y_all = y
first = False
return acc(pred_all, y_all), f1(pred_all, y_all), pr(pred_all, y_all), re(pred_all, y_all)
best_val = 0.0
test_acc = 0.0
test_f1 = 0.0
test_re = 0.0
test_pr = 0.0
for epoch in range(1, num_epochs + 1):
train_loss = train(epoch)
train_acc, train_f1, train_pr, train_re = test(train_loader)
val_acc, val_f1, val_pr, val_re = test(val_loader)
scheduler.step(val_acc)
lr = scheduler.optimizer.param_groups[0]['lr']
if val_acc > best_val:
best_val = val_acc
test_acc, test_f1, test_pr, test_re = test(test_loader)
torch.save(model.state_dict(), "./models/" + model_name)
log.append(
[epoch, train_loss, train_acc, train_f1, train_pr, train_re, val_acc, val_f1, val_pr, val_re,
best_val, test_acc, test_f1, test_pr, test_re])
# Break if learning rate is smaller 10**-6.
if lr < 0.000001 or epoch == num_epochs:
print([model_name, test_acc, test_f1, test_pr, test_re])
test_scores.append([model_name, test_acc, test_f1, test_pr, test_re])
log = np.array(log)
np.savetxt("./model_new_reps/" + model_name + ".log", log, delimiter=",",
fmt='%1.5f')
break
torch.cuda.empty_cache()
| 16,583 | 38.021176 | 113 | py |
mipGNN | mipGNN-master/code/gnn_models/Sage/mip_bipartite_class.py | import sys
sys.path.insert(0, '..')
sys.path.insert(0, '../..')
sys.path.insert(0, '.')
import torch_geometric.utils.softmax
import torch
import torch.nn.functional as F
from torch.nn import BatchNorm1d as BN
from torch.nn import Sequential, Linear, ReLU, Sigmoid
from torch_geometric.nn import MessagePassing
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Update constraint embeddings based on variable embeddings.
class VarConBipartiteLayer(MessagePassing):
def __init__(self, edge_dim, dim, var_assigment, aggr):
super(VarConBipartiteLayer, self).__init__(aggr=aggr, flow="source_to_target")
# Maps edge features to the same number of components as node features.
self.edge_encoder = Sequential(Linear(edge_dim, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
self.lin_l = Linear(dim, dim, bias=True)
self.lin_r = Linear(dim, dim, bias=False)
# Maps variable embeddings to scalar variable assigment.
self.var_assigment = var_assigment
# Maps variable embeddings + assignment to joint embedding.
self.joint_var = Sequential(Linear(dim + 1, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
self.mlp = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
self.eps = torch.nn.Parameter(torch.Tensor([0]))
self.initial_eps = 0
def forward(self, source, target, edge_index, edge_attr, rhs, size):
# Map edge features to embeddings with the same number of components as node embeddings.
edge_embedding = self.edge_encoder(edge_attr)
# Compute scalar variable assignment.
var_assignment = self.var_assigment(source)
# Compute joint embedding of variable embeddings and scalar variable assignment.
new_source = self.joint_var(torch.cat([source, var_assignment], dim=-1))
out = self.propagate(edge_index, x=new_source, size=size, edge_attr=edge_embedding)
out = self.lin_l(out)
out += self.lin_r(target)
out = F.normalize(out, p=2., dim=-1)
return out
def message(self, x_j, edge_attr):
return F.relu(x_j + edge_attr)
def update(self, aggr_out):
return aggr_out
# Compute error signal.
class ErrorLayer(MessagePassing):
def __init__(self, dim, var_assignment):
super(ErrorLayer, self).__init__(aggr="add", flow="source_to_target")
self.var_assignment = var_assignment
self.error_encoder = Sequential(Linear(1, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
def forward(self, source, edge_index, edge_attr, rhs, index, size):
# Compute scalar variable assignment.
new_source = self.var_assignment(source)
tmp = self.propagate(edge_index, x=new_source, edge_attr=edge_attr, size=size)
# Compute residual, i.e., Ax-b.
out = tmp - rhs
out = self.error_encoder(out)
out = torch_geometric.utils.softmax(out, index)
return out
def message(self, x_j, edge_attr):
msg = x_j * edge_attr
return msg
def update(self, aggr_out):
return aggr_out
# Update variable embeddings based on constraint embeddings.
class ConVarBipartiteLayer(MessagePassing):
def __init__(self, edge_dim, dim, aggr):
super(ConVarBipartiteLayer, self).__init__(aggr=aggr, flow="source_to_target")
# Maps edge features to the same number of components as node features.
self.edge_encoder = Sequential(Linear(edge_dim, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
# Learn joint representation of contraint embedding and error.
self.joint_con_encoder = Sequential(Linear(dim + dim, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
self.lin_l = Linear(dim, dim, bias=True)
self.lin_r = Linear(dim, dim, bias=False)
def forward(self, source, target, edge_index, edge_attr, error_con, size):
# Map edge features to embeddings with the same number of components as node embeddings.
edge_embedding = self.edge_encoder(edge_attr)
new_source = self.joint_con_encoder(torch.cat([source, error_con], dim=-1))
out = self.propagate(edge_index, x=new_source, size=size, edge_attr=edge_embedding)
out = self.lin_l(out)
out += self.lin_r(target)
out = F.normalize(out, p=2., dim=-1)
return out
def message(self, x_j, edge_attr):
return F.relu(x_j + edge_attr)
def update(self, aggr_out):
return aggr_out
class SimpleNet(torch.nn.Module):
def __init__(self, hidden, aggr, num_layers, regression=False):
super(SimpleNet, self).__init__()
self.num_layers = num_layers
self.regression = regression
# Embed initial node features.
self.var_node_encoder = Sequential(Linear(2, hidden), ReLU(), Linear(hidden, hidden))
self.con_node_encoder = Sequential(Linear(2, hidden), ReLU(), Linear(hidden, hidden))
# Compute variable assignement.
self.layers_ass = []
for i in range(self.num_layers):
self.layers_ass.append(Sequential(Linear(hidden, hidden), ReLU(), Linear(hidden, 1), Sigmoid()))
# Bipartite GNN architecture.
self.layers_con = []
self.layers_var = []
self.layers_err = []
for i in range(self.num_layers):
self.layers_con.append(ConVarBipartiteLayer(1, hidden, aggr=aggr))
self.layers_var.append(VarConBipartiteLayer(1, hidden, self.layers_ass[i], aggr=aggr))
self.layers_err.append(ErrorLayer(hidden, self.layers_ass[i]))
self.layers_con = torch.nn.ModuleList(self.layers_con)
self.layers_var = torch.nn.ModuleList(self.layers_var)
self.layers_err = torch.nn.ModuleList(self.layers_err)
# MLP used for classification.
self.lin1 = Linear((self.num_layers + 1) * hidden, hidden)
self.lin2 = Linear(hidden, hidden)
self.lin3 = Linear(hidden, hidden)
if not self.regression:
self.lin4 = Linear(hidden, 2)
else:
self.lin4 = Linear(hidden, 1)
def forward(self, data):
# Get data of batch.
var_node_features = data.var_node_features
con_node_features = data.con_node_features
edge_index_var = data.edge_index_var
edge_index_con = data.edge_index_con
edge_features_var = data.edge_features_var
edge_features_con = data.edge_features_con
num_nodes_var = data.num_nodes_var
num_nodes_con = data.num_nodes_con
rhs = data.rhs
index = data.index
obj = data.obj
# Compute initial node embeddings.
var_node_features_0 = self.var_node_encoder(var_node_features)
con_node_features_0 = self.con_node_encoder(con_node_features)
x_var = [var_node_features_0]
x_con = [con_node_features_0]
x_err = []
for i in range(self.num_layers):
x_err.append(self.layers_err[i](x_var[-1], edge_index_var, edge_features_var, rhs, index,
(var_node_features_0.size(0), con_node_features.size(0))))
x_con.append(F.relu(self.layers_var[i](x_var[-1], x_con[-1], edge_index_var, edge_features_var, rhs,
(var_node_features_0.size(0), con_node_features.size(0)))))
x_var.append(F.relu(self.layers_con[i](x_con[-1], x_var[-1], edge_index_con, edge_features_con, x_err[-1],
(con_node_features.size(0), var_node_features_0.size(0)))))
x = torch.cat(x_var[:], dim=-1)
x = F.relu(self.lin1(x))
x = F.relu(self.lin2(x))
x = F.relu(self.lin3(x))
x = self.lin4(x)
if not self.regression:
return F.log_softmax(x, dim=-1)
else:
return x.view(-1)
def __repr__(self):
return self.__class__.__name__
| 8,206 | 36.646789 | 118 | py |
mipGNN | mipGNN-master/code/gnn_models/Sage/mip_bipartite_simple_class.py | import sys
sys.path.insert(0, '..')
sys.path.insert(0, '../..')
sys.path.insert(0, '.')
import torch
import torch.nn.functional as F
from torch.nn import BatchNorm1d as BN
from torch.nn import Sequential, Linear, ReLU
from torch_geometric.nn import MessagePassing
from torch_sparse import matmul
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class SimpleBipartiteLayer(MessagePassing):
def __init__(self, edge_dim, dim, aggr):
super(SimpleBipartiteLayer, self).__init__(aggr=aggr, flow="source_to_target")
# Maps edge features to the same number of components as node features.
self.edge_encoder = Sequential(Linear(edge_dim, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
self.lin_l = Linear(dim, dim, bias=True)
self.lin_r = Linear(dim, dim, bias=False)
def forward(self, source, target, edge_index, edge_attr, size):
edge_emb = self.edge_encoder(edge_attr)
out = self.propagate(edge_index, x=source, size=size, edge_emb=edge_emb)
out = self.lin_l(out)
out += self.lin_r(target)
out = F.normalize(out, p=2., dim=-1)
return out
def message(self, x_j, edge_emb):
return F.relu(x_j + edge_emb)
def message_and_aggregate(self, adj_t, x):
adj_t = adj_t.set_value(None, layout=None)
return matmul(adj_t, x[0], reduce=self.aggr)
def __repr__(self):
return '{}(nn={})'.format(self.__class__.__name__, self.nn)
class SimpleNet(torch.nn.Module):
def __init__(self, hidden, aggr, num_layers, regression=False):
super(SimpleNet, self).__init__()
self.num_layers = num_layers
self.regression = regression
# Embed initial node features.
self.var_node_encoder = Sequential(Linear(2, hidden), ReLU(), Linear(hidden, hidden))
self.con_node_encoder = Sequential(Linear(2, hidden), ReLU(), Linear(hidden, hidden))
# Bipartite GNN architecture.
self.layers_con = []
self.layers_var = []
for i in range(self.num_layers):
self.layers_con.append(SimpleBipartiteLayer(1, hidden, aggr=aggr))
self.layers_var.append(SimpleBipartiteLayer(1, hidden, aggr=aggr))
self.layers_con = torch.nn.ModuleList(self.layers_con)
self.layers_var = torch.nn.ModuleList(self.layers_var)
# MLP used for classification.
self.lin1 = Linear((num_layers + 1) * hidden, hidden)
self.lin2 = Linear(hidden, hidden)
self.lin3 = Linear(hidden, hidden)
if not self.regression:
self.lin4 = Linear(hidden, 2)
else:
self.lin4 = Linear(hidden, 1)
def forward(self, data):
# Get data of batch.
var_node_features = data.var_node_features
con_node_features = data.con_node_features
edge_index_var = data.edge_index_var
edge_index_con = data.edge_index_con
edge_features_var = data.edge_features_var
edge_features_con = data.edge_features_con
num_nodes_var = data.num_nodes_var
num_nodes_con = data.num_nodes_con
# Compute initial node embeddings.
var_node_features_0 = self.var_node_encoder(var_node_features)
con_node_features_0 = self.con_node_encoder(con_node_features)
x_var = [var_node_features_0]
x_con = [con_node_features_0]
for i in range(self.num_layers):
x_con.append(F.relu(self.layers_var[i](x_var[-1], x_con[-1], edge_index_var, edge_features_var,
(var_node_features_0.size(0), con_node_features.size(0)))))
x_var.append(F.relu(self.layers_con[i](x_con[-1], x_var[-1], edge_index_con, edge_features_con,
(con_node_features.size(0), var_node_features_0.size(0)))))
x = torch.cat(x_var[:], dim=-1)
x = F.relu(self.lin1(x))
x = F.relu(self.lin2(x))
x = F.relu(self.lin3(x))
x = self.lin4(x)
if not self.regression:
return F.log_softmax(x, dim=-1)
else:
return x.view(-1)
def __repr__(self):
return self.__class__.__name__
| 4,248 | 34.408333 | 110 | py |
mipGNN | mipGNN-master/code/gnn_models/GIN/mip_bipartite_class.py | import sys
sys.path.insert(0, '..')
sys.path.insert(0, '../..')
sys.path.insert(0, '.')
import torch_geometric.utils.softmax
import torch
import torch.nn.functional as F
from torch.nn import BatchNorm1d as BN
from torch.nn import Sequential, Linear, ReLU, Sigmoid
from torch_geometric.nn import MessagePassing
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Update constraint embeddings based on variable embeddings.
class VarConBipartiteLayer(MessagePassing):
def __init__(self, edge_dim, dim, var_assigment, aggr):
super(VarConBipartiteLayer, self).__init__(aggr=aggr, flow="source_to_target")
# Maps edge features to the same number of components as node features.
self.edge_encoder = Sequential(Linear(edge_dim, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
# Learn joint representation of contraint embedding and error.
self.joint_var = Sequential(Linear(dim + 1, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
# Maps variable embeddings to scalar variable assigment.
self.var_assigment = var_assigment
self.mlp = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim), ReLU(), BN(dim))
self.eps = torch.nn.Parameter(torch.Tensor([0]))
self.initial_eps = 0
def forward(self, source, target, edge_index, edge_attr, rhs, size):
# Compute scalar variable assignment.
var_assignment = self.var_assigment(source)
source = self.joint_var(torch.cat([source, var_assignment], dim=-1))
# Map edge features to embeddings with the same number of components as node embeddings.
edge_embedding = self.edge_encoder(edge_attr)
tmp = self.propagate(edge_index, x=source, edge_attr=edge_embedding, size=size)
out = self.mlp((1 + self.eps) * target + tmp)
return out
def message(self, x_j, edge_attr):
return F.relu(x_j + edge_attr)
def __repr__(self):
return '{}(nn={})'.format(self.__class__.__name__, self.nn)
# Compute error signal.
class ErrorLayer(MessagePassing):
def __init__(self, dim, var_assignment):
super(ErrorLayer, self).__init__(aggr="add", flow="source_to_target")
self.var_assignment = var_assignment
self.error_encoder = Sequential(Linear(1, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
# Learn joint representation of contraint embedding and error.
self.joint_var = Sequential(Linear(dim + dim, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
def forward(self, source, edge_index, edge_attr, rhs, index, size):
# Compute scalar variable assignment.
new_source = self.var_assignment(source)
tmp = self.propagate(edge_index, x=new_source, edge_attr=edge_attr, size=size)
# Compute residual, i.e., Ax-b.
out = tmp - rhs
out = self.error_encoder(out)
out = torch_geometric.utils.softmax(out, index)
return out
def message(self, x_j, edge_attr):
msg = x_j * edge_attr
return msg
def update(self, aggr_out):
return aggr_out
# Update variable embeddings based on constraint embeddings.
class ConVarBipartiteLayer(MessagePassing):
def __init__(self, edge_dim, dim, aggr):
super(ConVarBipartiteLayer, self).__init__(aggr=aggr, flow="source_to_target")
# Maps edge features to the same number of components as node features.
self.edge_encoder = Sequential(Linear(edge_dim, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
# Learn joint representation of contraint embedding and error.
self.joint_var = Sequential(Linear(dim + dim, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
self.mlp = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim), ReLU(), BN(dim))
self.eps = torch.nn.Parameter(torch.Tensor([0]))
self.initial_eps = 0
def forward(self, source, target, edge_index, edge_attr, error_con, size):
# Map edge features to embeddings with the same number of components as node embeddings.
edge_embedding = self.edge_encoder(edge_attr)
source = self.joint_var(torch.cat([source, error_con], dim=-1))
tmp = self.propagate(edge_index, x=source, edge_attr=edge_embedding, size=size)
out = self.mlp((1 + self.eps) * target + tmp)
return out
def message(self, x_j, edge_attr):
return F.relu(x_j + edge_attr)
def __repr__(self):
return '{}(nn={})'.format(self.__class__.__name__, self.nn)
class SimpleNet(torch.nn.Module):
def __init__(self, hidden, aggr, num_layers, regression=False):
super(SimpleNet, self).__init__()
self.num_layers = num_layers
self.regression = regression
# Embed initial node features.
self.var_node_encoder = Sequential(Linear(2, hidden), ReLU(), Linear(hidden, hidden))
self.con_node_encoder = Sequential(Linear(2, hidden), ReLU(), Linear(hidden, hidden))
# Compute variable assignement.
self.layers_ass = []
for i in range(self.num_layers):
self.layers_ass.append(Sequential(Linear(hidden, hidden), ReLU(), Linear(hidden, 1), Sigmoid()))
# Bipartite GNN architecture.
self.layers_con = []
self.layers_var = []
self.layers_err = []
for i in range(self.num_layers):
self.layers_con.append(ConVarBipartiteLayer(1, hidden, aggr=aggr))
self.layers_var.append(VarConBipartiteLayer(1, hidden, self.layers_ass[i], aggr=aggr))
self.layers_err.append(ErrorLayer(hidden, self.layers_ass[i]))
self.layers_con = torch.nn.ModuleList(self.layers_con)
self.layers_var = torch.nn.ModuleList(self.layers_var)
self.layers_err = torch.nn.ModuleList(self.layers_err)
# MLP used for classification.
self.lin1 = Linear((self.num_layers + 1) * hidden, hidden)
self.lin2 = Linear(hidden, hidden)
self.lin3 = Linear(hidden, hidden)
if not self.regression:
self.lin4 = Linear(hidden, 2)
else:
self.lin4 = Linear(hidden, 1)
def forward(self, data):
# Get data of batch.
var_node_features = data.var_node_features
con_node_features = data.con_node_features
edge_index_var = data.edge_index_var
edge_index_con = data.edge_index_con
edge_features_var = data.edge_features_var
edge_features_con = data.edge_features_con
num_nodes_var = data.num_nodes_var
num_nodes_con = data.num_nodes_con
rhs = data.rhs
index = data.index
obj = data.obj
# Compute initial node embeddings.
var_node_features_0 = self.var_node_encoder(var_node_features)
con_node_features_0 = self.con_node_encoder(con_node_features)
x_var = [var_node_features_0]
x_con = [con_node_features_0]
x_err = []
for i in range(self.num_layers):
x_err.append(self.layers_err[i](x_var[-1], edge_index_var, edge_features_var, rhs, index,
(var_node_features_0.size(0), con_node_features.size(0))))
x_con.append(F.relu(self.layers_var[i](x_var[-1], x_con[-1], edge_index_var, edge_features_var, rhs,
(var_node_features_0.size(0), con_node_features.size(0)))))
x_var.append(F.relu(self.layers_con[i](x_con[-1], x_var[-1], edge_index_con, edge_features_con, x_err[-1],
(con_node_features.size(0), var_node_features_0.size(0)))))
x = torch.cat(x_var[:], dim=-1)
x = F.relu(self.lin1(x))
x = F.relu(self.lin2(x))
x = F.relu(self.lin3(x))
x = self.lin4(x)
if not self.regression:
return F.log_softmax(x, dim=-1)
else:
return x.view(-1)
def __repr__(self):
return self.__class__.__name__
| 8,207 | 36.309091 | 118 | py |
mipGNN | mipGNN-master/code/gnn_models/GIN/mip_bipartite_simple_class.py | import sys
sys.path.insert(0, '..')
sys.path.insert(0, '../..')
sys.path.insert(0, '.')
import torch
import torch.nn.functional as F
from torch.nn import BatchNorm1d as BN
from torch.nn import Sequential, Linear, ReLU
from torch_geometric.nn import MessagePassing
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class SimpleBipartiteLayer(MessagePassing):
def __init__(self, edge_dim, dim, aggr):
super(SimpleBipartiteLayer, self).__init__(aggr=aggr, flow="source_to_target")
self.edge_encoder = Sequential(Linear(edge_dim, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
self.mlp = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim), ReLU(), BN(dim))
self.eps = torch.nn.Parameter(torch.Tensor([0]))
self.initial_eps = 0
def forward(self, source, target, edge_index, edge_attr, size):
# Map edge features to embeddings with the same number of components as node embeddings.
edge_embedding = self.edge_encoder(edge_attr)
tmp = self.propagate(edge_index, x=source, edge_attr=edge_embedding, size=size)
out = self.mlp((1 + self.eps) * target + tmp)
return out
def message(self, x_j, edge_attr):
return F.relu(x_j + edge_attr)
def update(self, aggr_out):
return aggr_out
class SimpleNet(torch.nn.Module):
def __init__(self, hidden, aggr, num_layers, regression=False):
super(SimpleNet, self).__init__()
self.num_layers = num_layers
self.regression = regression
# Embed initial node features.
self.var_node_encoder = Sequential(Linear(2, hidden), ReLU(), Linear(hidden, hidden))
self.con_node_encoder = Sequential(Linear(2, hidden), ReLU(), Linear(hidden, hidden))
# Bipartite GNN architecture.
self.layers_con = []
self.layers_var = []
for i in range(self.num_layers):
self.layers_con.append(SimpleBipartiteLayer(1, hidden, aggr=aggr))
self.layers_var.append(SimpleBipartiteLayer(1, hidden, aggr=aggr))
self.layers_con = torch.nn.ModuleList(self.layers_con)
self.layers_var = torch.nn.ModuleList(self.layers_var)
# MLP used for classification.
self.lin1 = Linear((num_layers + 1) * hidden, hidden)
self.lin2 = Linear(hidden, hidden)
self.lin3 = Linear(hidden, hidden)
if not self.regression:
self.lin4 = Linear(hidden, 2)
else:
self.lin4 = Linear(hidden, 1)
def forward(self, data):
# Get data of batch.
var_node_features = data.var_node_features
con_node_features = data.con_node_features
edge_index_var = data.edge_index_var
edge_index_con = data.edge_index_con
edge_features_var = data.edge_features_var
edge_features_con = data.edge_features_con
num_nodes_var = data.num_nodes_var
num_nodes_con = data.num_nodes_con
# Compute initial node embeddings.
var_node_features_0 = self.var_node_encoder(var_node_features)
con_node_features_0 = self.con_node_encoder(con_node_features)
x_var = [var_node_features_0]
x_con = [con_node_features_0]
for i in range(self.num_layers):
x_con.append(F.relu(self.layers_var[i](x_var[-1], x_con[-1], edge_index_var, edge_features_var,
(var_node_features_0.size(0), con_node_features.size(0)))))
x_var.append(F.relu(self.layers_con[i](x_con[-1], x_var[-1], edge_index_con, edge_features_con,
(con_node_features.size(0), var_node_features_0.size(0)))))
x = torch.cat(x_var[:], dim=-1)
x = F.relu(self.lin1(x))
x = F.relu(self.lin2(x))
x = F.relu(self.lin3(x))
x = self.lin4(x)
if not self.regression:
return F.log_softmax(x, dim=-1)
else:
return x.view(-1)
def __repr__(self):
return self.__class__.__name__
| 4,082 | 35.132743 | 110 | py |
mipGNN | mipGNN-master/code/gnn_models/EdgeConv/mip_bipartite_class.py | import sys
sys.path.insert(0, '..')
sys.path.insert(0, '../..')
sys.path.insert(0, '.')
import torch_geometric.utils.softmax
import torch
import torch.nn.functional as F
from torch.nn import BatchNorm1d as BN
from torch.nn import Sequential, Linear, ReLU, Sigmoid
from torch_geometric.nn import MessagePassing
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Variables to constrains.
class VarConBipartiteLayer(MessagePassing):
def __init__(self, edge_dim, dim, var_assigment, aggr):
super(VarConBipartiteLayer, self).__init__(aggr=aggr, flow="source_to_target")
# Combine node and edge features of adjacent nodes.
self.nn = Sequential(Linear(3 * dim + 1, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
# Maps edge features to the same number of components as node features.
self.edge_encoder = Sequential(Linear(edge_dim, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
# Maps variable embeddings to scalar variable assigment.
self.var_assigment = var_assigment
def forward(self, source, target, edge_index, edge_attr, rhs, size):
# Compute scalar variable assignment.
var_assignment = self.var_assigment(source)
# Map edge features to embeddings with the same number of components as node embeddings.
edge_embedding = self.edge_encoder(edge_attr)
out = self.propagate(edge_index, x=source, t=target, v=var_assignment, edge_attr=edge_embedding, size=size)
return out
def message(self, x_j, t_i, v_j, edge_attr):
return self.nn(torch.cat([t_i, x_j, v_j, edge_attr], dim=-1))
def __repr__(self):
return '{}(nn={})'.format(self.__class__.__name__, self.nn)
# Compute error signal.
class ErrorLayer(MessagePassing):
def __init__(self, dim, var_assignment):
super(ErrorLayer, self).__init__(aggr="add", flow="source_to_target")
self.var_assignment = var_assignment
self.error_encoder = Sequential(Linear(1, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
def forward(self, source, edge_index, edge_attr, rhs, index, size):
# Compute scalar variable assignment.
new_source = self.var_assignment(source)
tmp = self.propagate(edge_index, x=new_source, edge_attr=edge_attr, size=size)
# Compute residual, i.e., Ax-b.
out = tmp - rhs
out = self.error_encoder(out)
out = torch_geometric.utils.softmax(out, index)
return out
def message(self, x_j, edge_attr):
msg = x_j * edge_attr
return msg
def update(self, aggr_out):
return aggr_out
class ConVarBipartiteLayer(MessagePassing):
def __init__(self, edge_dim, dim, aggr):
super(ConVarBipartiteLayer, self).__init__(aggr=aggr, flow="source_to_target")
# Combine node and edge features of adjacent nodes.
self.nn = Sequential(Linear(4 * dim, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
# Maps edge features to the same number of components as node features.
self.edge_encoder = Sequential(Linear(edge_dim, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
def forward(self, source, target, edge_index, edge_attr, error_con, size):
# Map edge features to embeddings with the same number of components as node embeddings.
edge_embedding = self.edge_encoder(edge_attr)
out = self.propagate(edge_index, x=source, t=target, e=error_con, edge_attr=edge_embedding, size=size)
return out
def message(self, x_j, t_i, e_j, edge_attr):
return self.nn(torch.cat([t_i, x_j, e_j, edge_attr], dim=-1))
def __repr__(self):
return '{}(nn={})'.format(self.__class__.__name__, self.nn)
class SimpleNet(torch.nn.Module):
def __init__(self, hidden, aggr, num_layers, regression=False):
super(SimpleNet, self).__init__()
self.num_layers = num_layers
self.regression = regression
# Embed initial node features.
self.var_node_encoder = Sequential(Linear(2, hidden), ReLU(), Linear(hidden, hidden))
self.con_node_encoder = Sequential(Linear(2, hidden), ReLU(), Linear(hidden, hidden))
# Compute variable assignement.
self.layers_ass = []
for i in range(self.num_layers):
self.layers_ass.append(Sequential(Linear(hidden, hidden), ReLU(), Linear(hidden, 1), Sigmoid()))
# Bipartite GNN architecture.
self.layers_con = []
self.layers_var = []
self.layers_err = []
for i in range(self.num_layers):
self.layers_con.append(ConVarBipartiteLayer(1, hidden, aggr=aggr))
self.layers_var.append(VarConBipartiteLayer(1, hidden, self.layers_ass[i], aggr=aggr))
self.layers_err.append(ErrorLayer(hidden, self.layers_ass[i]))
self.layers_con = torch.nn.ModuleList(self.layers_con)
self.layers_var = torch.nn.ModuleList(self.layers_var)
self.layers_err = torch.nn.ModuleList(self.layers_err)
# MLP used for classification.
self.lin1 = Linear((self.num_layers + 1) * hidden, hidden)
self.lin2 = Linear(hidden, hidden)
self.lin3 = Linear(hidden, hidden)
if not self.regression:
self.lin4 = Linear(hidden, 2)
else:
self.lin4 = Linear(hidden, 1)
def forward(self, data):
# Get data of batch.
var_node_features = data.var_node_features
con_node_features = data.con_node_features
edge_index_var = data.edge_index_var
edge_index_con = data.edge_index_con
edge_features_var = data.edge_features_var
edge_features_con = data.edge_features_con
num_nodes_var = data.num_nodes_var
num_nodes_con = data.num_nodes_con
rhs = data.rhs
index = data.index
obj = data.obj
var_node_features_0 = self.var_node_encoder(var_node_features)
con_node_features_0 = self.con_node_encoder(con_node_features)
x_var = [var_node_features_0]
x_con = [con_node_features_0]
x_err = []
num_var = var_node_features_0.size(0)
num_con = con_node_features_0.size(0)
for i in range(self.num_layers):
x_err.append(self.layers_err[i](x_var[-1], edge_index_var, edge_features_var, rhs, index,
(num_var, num_con)))
x_con.append(F.relu(self.layers_var[i](x_var[-1], x_con[-1], edge_index_var, edge_features_var, rhs,
(num_var, num_con))))
x_var.append(F.relu(self.layers_con[i](x_con[-1], x_var[-1], edge_index_con, edge_features_con, x_err[-1],
(num_con, num_var))))
x = torch.cat(x_var[:], dim=-1)
x = F.relu(self.lin1(x))
x = F.relu(self.lin2(x))
x = F.relu(self.lin3(x))
x = self.lin4(x)
if not self.regression:
return F.log_softmax(x, dim=-1)
else:
return x.view(-1)
def __repr__(self):
return self.__class__.__name__
| 7,303 | 35.52 | 118 | py |
mipGNN | mipGNN-master/code/gnn_models/EdgeConv/mip_bipartite_simple_class.py | import sys
sys.path.insert(0, '..')
sys.path.insert(0, '../..')
sys.path.insert(0, '.')
import torch
import torch.nn.functional as F
from torch.nn import BatchNorm1d as BN
from torch.nn import Sequential, Linear, ReLU
from torch_geometric.nn import MessagePassing
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class SimpleBipartiteLayer(MessagePassing):
def __init__(self, edge_dim, dim, aggr):
super(SimpleBipartiteLayer, self).__init__(aggr=aggr, flow="source_to_target")
self.nn = Sequential(Linear(3 * dim, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
# Maps edge features to the same number of components as node features.
self.edge_encoder = Sequential(Linear(edge_dim, dim), ReLU(), Linear(dim, dim), ReLU(),
BN(dim))
def forward(self, source, target, edge_index, edge_attr, size):
# Map edge features to embeddings with the same number of components as node embeddings.
edge_embedding = self.edge_encoder(edge_attr)
out = self.propagate(edge_index, x=source, t=target, edge_attr=edge_embedding, size=size)
return out
def message(self, x_j, t_i, edge_attr):
return self.nn(torch.cat([t_i, x_j, edge_attr], dim=-1))
def __repr__(self):
return '{}(nn={})'.format(self.__class__.__name__, self.nn)
class SimpleNet(torch.nn.Module):
def __init__(self, hidden, aggr, num_layers, regression=False):
super(SimpleNet, self).__init__()
self.num_layers = num_layers
self.regression = regression
# Embed initial node features.
self.var_node_encoder = Sequential(Linear(2, hidden), ReLU(), Linear(hidden, hidden))
self.con_node_encoder = Sequential(Linear(2, hidden), ReLU(), Linear(hidden, hidden))
# Bipartite GNN architecture.
self.layers_con = []
self.layers_var = []
for i in range(self.num_layers):
self.layers_con.append(SimpleBipartiteLayer(1, hidden, aggr=aggr))
self.layers_var.append(SimpleBipartiteLayer(1, hidden, aggr=aggr))
self.layers_con = torch.nn.ModuleList(self.layers_con)
self.layers_var = torch.nn.ModuleList(self.layers_var)
# MLP used for classification.
self.lin1 = Linear((num_layers + 1) * hidden, hidden)
self.lin2 = Linear(hidden, hidden)
self.lin3 = Linear(hidden, hidden)
if not self.regression:
self.lin4 = Linear(hidden, 2)
else:
self.lin4 = Linear(hidden, 1)
def forward(self, data):
# Get data of batch.
var_node_features = data.var_node_features
con_node_features = data.con_node_features
edge_index_var = data.edge_index_var
edge_index_con = data.edge_index_con
edge_features_var = data.edge_features_var
edge_features_con = data.edge_features_con
num_nodes_var = data.num_nodes_var
num_nodes_con = data.num_nodes_con
# Compute initial node embeddings.
var_node_features_0 = self.var_node_encoder(var_node_features)
con_node_features_0 = self.con_node_encoder(con_node_features)
x_var = [var_node_features_0]
x_con = [con_node_features_0]
num_var = var_node_features_0.size(0)
num_con = con_node_features_0.size(0)
for i in range(self.num_layers):
x_con.append(F.relu(self.layers_var[i](x_var[-1], x_con[-1], edge_index_var, edge_features_var,
(num_var, num_con))))
x_var.append(F.relu(self.layers_con[i](x_con[-1], x_var[-1], edge_index_con, edge_features_con,
(num_con, num_var))))
x = torch.cat(x_var[:], dim=-1)
x = F.relu(self.lin1(x))
x = F.relu(self.lin2(x))
x = F.relu(self.lin3(x))
x = self.lin4(x)
if not self.regression:
return F.log_softmax(x, dim=-1)
else:
return x.view(-1)
def __repr__(self):
return self.__class__.__name__
| 4,148 | 35.078261 | 107 | py |
mipGNN | mipGNN-master/code/model_execution/inference.py | import sys
sys.path.insert(0, '..')
sys.path.insert(0, '../..')
sys.path.insert(0, '.')
import os
import os.path as osp
import numpy as np
import networkx as nx
import argparse
import io
import heapq
from pathlib import Path
import time
import math
import torch
from torch_geometric.data import (InMemoryDataset, Data)
from torch_geometric.data import DataLoader
from gnn_models.EdgeConv.mip_bipartite_simple_class import SimpleNet
import cplex
import callbacks_cplex
import utils
import predict
import pickle
def rename_variables(var_names):
for i in range(len(var_names)):
name = var_names[i]
name = name.replace('(','[')
name = name.replace(')',']')
name = name.replace('_',',')
var_names[i] = name
return var_names
# direction=1: branch on most integer first
def set_cplex_priorities(instance_cpx, prediction, direction=1):
# score variables based on bias prediction
scores = np.max(((1-prediction), prediction), axis=0)
priorities = np.argsort(direction * scores)
# set priorities
# reference: https://www.ibm.com/support/knowledgecenter/SSSA5P_12.7.1/ilog.odms.cplex.help/refpythoncplex/html/cplex._internal._subinterfaces.OrderInterface-class.html
order_tuples = []
var_names = instance_cpx.variables.get_names()
cur_priority = 0
for priority, var_cpxid in enumerate(priorities):
var_name = var_names[var_cpxid]
# print(scores[var_cpxid], scores[priorities[priority-1]])
# if priority > 0 and scores[var_cpxid] > scores[priorities[priority-1]] + 1e-3:
cur_priority += 1
# print(cur_priority)
order_tuples += [(var_name, cur_priority, instance_cpx.order.branch_direction.up)]
# print(cur_priority)
# z=1/0
instance_cpx.order.set(order_tuples)
def mipeval(
method,
instance,
graph='',
instance_params='',
model='',
logfile='sys.stdout',
barebones=0,
cpx_emphasis=1,
cpx_threads=1,
cpx_tmp='./cpx_tmp',
timelimit=60,
memlimit=1024,
freq_best=100,
lb_threshold=5,
num_mipstarts=10,
mipstart_strategy='repair',
branching_direction=1,
zero_damping=1.0
):
print(locals())
assert (len(method) >= 1)
assert (cpx_emphasis >= 0 and cpx_emphasis <= 4)
assert (timelimit > 0)
""" Create CPLEX instance """
instance_cpx = cplex.Cplex(instance)
sense_str = instance_cpx.objective.sense[instance_cpx.objective.get_sense()]
num_variables = instance_cpx.variables.get_num()
num_constraints = instance_cpx.linear_constraints.get_num()
start_time = instance_cpx.get_time()
""" CPLEX output management """
logstring = sys.stdout
summary_string = sys.stdout
if logfile != 'sys.stdout':
logstring = io.StringIO()
summary_string = io.StringIO()
instance_cpx.set_log_stream(logstring)
instance_cpx.set_results_stream(logstring)
instance_cpx.set_warning_stream(logstring)
# instance_cpx.set_error_stream(logstring)
instance_cpx.set_error_stream(open(os.devnull, 'w'))
""" Set CPLEX parameters, if any """
instance_cpx.parameters.timelimit.set(timelimit)
instance_cpx.parameters.emphasis.mip.set(cpx_emphasis)
instance_cpx.parameters.mip.display.set(3)
instance_cpx.parameters.threads.set(cpx_threads)
instance_cpx.parameters.workmem.set(memlimit)
instance_cpx.parameters.mip.limits.treememory.set(20000)
instance_cpx.parameters.mip.strategy.file.set(2)
instance_cpx.parameters.workdir.set(cpx_tmp)
if barebones:
instance_cpx.parameters.mip.limits.cutpasses.set(-1)
instance_cpx.parameters.mip.strategy.heuristicfreq.set(-1)
instance_cpx.parameters.preprocessing.presolve.set(0)
# DFS = 0, BEST-BOUND = 1 (default), BEST-EST = 2, BEST-EST-ALT = 3
# instance_cpx.parameters.mip.strategy.nodeselect.set(3)
time_rem_cplex = timelimit
time_vcg = time.time()
time_vcg_reading = 0
time_pred = 0
is_primal_mipstart = False
""" Solve CPLEX instance with user-selected method """
if 'default' not in method[0]:
assert (len(graph) > 0 or len(instance_params) > 0) and len(model) > 0
""" Read in the pickled graph and the trained model """
time_vcg_reading = time.time()
print("Reading VCG...")
graph = nx.read_gpickle(graph)
print("\t took %g secs." % (time.time()-time_vcg))
time_vcg_reading = time.time() - time_vcg_reading
print("Predicting...")
timestamp_pred = time.time()
prediction, node_to_varnode = predict.get_prediction(model_name=model, graph=graph)
dict_varname_seqid = predict.get_variable_cpxid(graph, node_to_varnode, prediction)
print("\t took %g secs." % (time.time()-timestamp_pred))
time_pred = time.time() - timestamp_pred
# print(prediction)
# todo check dimensions of p
time_rem_cplex = timelimit - time_pred
print("time_rem_cplex = %g" % time_rem_cplex)
instance_cpx.parameters.timelimit.set(time_rem_cplex)
var_names = rename_variables(instance_cpx.variables.get_names())
prediction_reord = [dict_varname_seqid[var_name][1] for var_name in var_names]
prediction = np.array(prediction_reord)
if len(method) == 1 and ('local_branching' in method[0]):
pred_one_coeff = (prediction >= 0.9) * (-1)
pred_zero_coeff = (prediction <= 0.1)
num_ones = -np.sum(pred_one_coeff)
coeffs = pred_one_coeff + pred_zero_coeff
local_branching_coeffs = [list(range(len(prediction))), coeffs.tolist()]
if method[0] == 'local_branching_approx':
instance_cpx.linear_constraints.add(
lin_expr=[local_branching_coeffs],
senses=['L'],
rhs=[float(lb_threshold - num_ones)],
names=['local_branching'])
elif method[0] == 'local_branching_exact':
branch_cb = instance_cpx.register_callback(callbacks_cplex.branch_local_exact)
branch_cb.coeffs = local_branching_coeffs
branch_cb.threshold = lb_threshold - num_ones
branch_cb.is_root = True
if 'branching_priorities' in method:
set_cplex_priorities(instance_cpx, prediction, branching_direction)
if 'node_selection' in method:
# score variables based on bias prediction
scores = np.max(((1-prediction), prediction), axis=0)
rounding = np.round(prediction)
print(np.mean(scores), np.mean(rounding))
print(np.argsort(prediction), np.sort(prediction)[:10], np.sort(prediction)[-10:])
branch_cb = instance_cpx.register_callback(callbacks_cplex.branch_attach_data2)
node_cb = instance_cpx.register_callback(callbacks_cplex.node_selection3)
branch_cb.scoring_function = 'sum' #'estimate'
branch_cb.scores = scores
branch_cb.rounding = rounding
branch_cb.zero_damping = zero_damping
node_cb.last_best = 0
node_cb.freq_best = freq_best
node_priority = []
branch_cb.node_priority = node_priority
node_cb.node_priority = node_priority
branch_cb.time = 0
node_cb.time = 0
if ('primal_mipstart' in method) or ('primal_mipstart_only' in method):
is_primal_mipstart = True
if not barebones or 'primal_mipstart_only' in method:
instance_cpx.parameters.mip.limits.cutpasses.set(-1)
instance_cpx.parameters.mip.strategy.heuristicfreq.set(-1)
instance_cpx.parameters.preprocessing.presolve.set(0)
mipstart_string = sys.stdout if logfile == "sys.stdout" else io.StringIO()
#frac_variables = [0.001*(1.5**i) for i in range(18)] #[0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]
#frac_variables = [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]
#frac_variables = np.flip(np.linspace(0, 1, num=num_mipstarts+1))[:-1]
#print(frac_variables)
#threshold_set = np.minimum(prediction, 1-prediction)
#threshold_set = np.sort(threshold_set)#[:mipstart_numthresholds]
#threshold_set = [threshold_set[max([0, int(math.ceil(frac_variables[i]*num_variables)) - 1])] for i in range(len(frac_variables))]
threshold_set = [0.01*(2**i) for i in range(6)]
threshold_set.reverse()
threshold_set = np.clip(threshold_set, a_min=0, a_max=0.5)
print("threshold_set = ", threshold_set)
if mipstart_strategy == 'repair':
mipstart_strategy_int = instance_cpx.MIP_starts.effort_level.repair
elif mipstart_strategy == 'solve_MIP':
mipstart_strategy_int = instance_cpx.MIP_starts.effort_level.solve_MIP
else:
print("invalid mipstart_strategy %s" % mipstart_strategy)
exit()
best_objval_mipstart = -math.inf if sense_str == 'maximize' else math.inf
for idx, threshold in enumerate(threshold_set):
time_rem_cplex = timelimit - time_pred #(time.time() - time_vcg)
if time_rem_cplex <= 0:
break
indices_integer = np.where((prediction >= 1-threshold) | (prediction <= threshold))[0]
print(idx, threshold, len(indices_integer), len(prediction))
if len(indices_integer) == 0:
continue
instance_cpx.parameters.mip.display.set(0)
instance_cpx.parameters.mip.limits.nodes.set(0)
print("time_rem_cplex = %g" % time_rem_cplex)
instance_cpx.parameters.timelimit.set(time_rem_cplex)
instance_cpx.MIP_starts.add(
cplex.SparsePair(
ind=indices_integer.tolist(),
val=np.round(prediction[indices_integer]).tolist()),
mipstart_strategy_int)
instance_cpx.solve()
instance_cpx.MIP_starts.delete()
if instance_cpx.solution.is_primal_feasible(): #and instance_cpx.solution.get_objective_value() > best_objval_mipstart:
is_sol_better = (instance_cpx.solution.get_objective_value() > best_objval_mipstart) if sense_str == 'maximize' else (instance_cpx.solution.get_objective_value() < best_objval_mipstart)
if not is_sol_better:
continue
best_objval_mipstart = instance_cpx.solution.get_objective_value()
best_time = time.time() - time_vcg
incb_str_cur = ("Found incumbent of value %g after %g sec. mipstart %d %g %g\n" % (best_objval_mipstart, best_time, len(indices_integer), threshold, len(indices_integer)/num_variables))
print(incb_str_cur)
mipstart_string.write(incb_str_cur)#"Found incumbent of value %g after %g sec. mipstart %d %g %g\n" % (best_objval_mipstart, best_time, len(indices_integer), threshold))
instance_cpx.parameters.mip.display.set(3)
if not barebones and not 'primal_mipstart_only' in method:
instance_cpx.parameters.mip.limits.cutpasses.set(0)
instance_cpx.parameters.mip.strategy.heuristicfreq.set(0)
instance_cpx.parameters.preprocessing.presolve.set(1)
if 'primal_mipstart_only' not in method:
instance_cpx.parameters.mip.limits.nodes.set(1e9)
elif method[0] == 'default_emptycb':
branch_cb = instance_cpx.register_callback(callbacks_cplex.branch_empty)
time_rem_cplex = timelimit - time_pred #(time.time() - time_vcg)
print("time_rem_cplex = %g" % time_rem_cplex)
if time_rem_cplex > 0:
instance_cpx.parameters.timelimit.set(time_rem_cplex)
# todo: consider runseeds
# https://www.ibm.com/support/knowledgecenter/SSSA5P_12.9.0/ilog.odms.cplex.help/refpythoncplex/html/cplex.Cplex-class.html?view=kc#runseeds
instance_cpx.solve()
end_time = instance_cpx.get_time()
""" Get solving performance statistics """
incumbent_str = ''
cplex_status = instance_cpx.solution.get_status_string()
best_bound = instance_cpx.solution.MIP.get_best_objective()
num_nodes = instance_cpx.solution.progress.get_num_nodes_processed()
total_time = end_time - start_time
instance_name = os.path.splitext(os.path.basename(instance))[0]
best_objval, gap = math.inf, math.inf
if instance_cpx.solution.is_primal_feasible():
best_objval = instance_cpx.solution.get_objective_value()
gap = instance_cpx.solution.MIP.get_mip_relative_gap()
summary_string.write('solving stats,%s,%g,%g,%g,%g,%i,%g,%s,%i,%i,%g,%g\n' % (
cplex_status,
best_objval,
best_bound,
gap,
total_time,
num_nodes,
timelimit - time_rem_cplex,
instance_name,
num_variables,
num_constraints,
time_vcg_reading,
time_pred))
if logfile != 'sys.stdout':
if instance_cpx.solution.is_primal_feasible():
incumbent_str = ''
if is_primal_mipstart:
incumbent_str += utils.parse_cplex_log(mipstart_string.getvalue(), time_offset=time_pred)
incumbent_str += utils.parse_cplex_log(logstring.getvalue(), time_offset=time_pred)
print(incumbent_str)
summary_string.write(incumbent_str)
summary_string = summary_string.getvalue()
with open(logfile, 'w') as logfile:
logfile.write(summary_string)
if __name__ == '__main__':
""" Parse arguments """
parser = argparse.ArgumentParser()
# parser.add_argument("-method", type=str, default='default')
parser.add_argument('-method', nargs='+', type=str, required=True)
parser.add_argument("-instance", type=str)
parser.add_argument("-graph", type=str, default='')
parser.add_argument("-instance_params", type=str, default='')
parser.add_argument("-model", type=str, default="../gnn_models/EdgeConv/trained_p_hat300-2")
parser.add_argument("-cpx_emphasis", type=int, default=1)
parser.add_argument("-cpx_threads", type=int, default=1)
parser.add_argument("-cpx_tmp", type=str, default='./cpx_tmp/')
parser.add_argument("-barebones", type=int, default=0)
parser.add_argument("-timelimit", type=float, default=60)
parser.add_argument("-memlimit", type=float, default=1024)
parser.add_argument("-logfile", type=str, default='sys.stdout')
# Parameters for node selection
parser.add_argument("-freq_best", type=int, default=100)
parser.add_argument("-zero_damping", type=float, default=1.0)
# Parameters for exact local branching
parser.add_argument("-lb_threshold", type=int, default=5)
# Parameters for primal heuristic mip start
parser.add_argument("-num_mipstarts", type=int, default=6)
parser.add_argument("-mipstart_strategy", type=str, default="repair")
# Parameters for branching priorities
parser.add_argument("-branching_direction", type=int, default=1)
args = parser.parse_args()
print(args)
mipeval(**vars(args))
| 15,580 | 40.328912 | 205 | py |
mipGNN | mipGNN-master/code/model_execution/predict.py | import sys
sys.path.insert(0, '..')
sys.path.insert(0, '../..')
sys.path.insert(0, '.')
import os
import os.path as osp
import numpy as np
import networkx as nx
from pathlib import Path
import torch
from torch_geometric.data import (InMemoryDataset, Data)
from torch_geometric.data import DataLoader
#from gnn_models.EdgeConv.mip_bipartite_simple_class import SimpleNet
# TODO: Uncomment for second model.
#from gnn_models.EdgeConv.mip_bipartite_class_test import SimpleNet
def get_prediction(model_name, graph, bias_threshold=0.00):
model_name_only = os.path.basename(model_name)
if model_name_only[0:3] == "ECS" or model_name_only[0:8] == "Rand_ECS" or "trained" in model_name:
from gnn_models.EdgeConv.mip_bipartite_simple_class import SimpleNet
elif model_name_only[0:3] == "EC_":
from gnn_models.EdgeConv.mip_bipartite_class import SimpleNet
elif model_name_only[0:4] == "GINS":
from gnn_models.GIN.mip_bipartite_simple_class import SimpleNet
elif model_name_only[0:4] == "GIN_":
from gnn_models.GIN.mip_bipartite_class import SimpleNet
elif model_name_only[0:3] == "SGS":
from gnn_models.Sage.mip_bipartite_simple_class import SimpleNet
elif model_name_only[0:3] == "SG_":
from gnn_models.Sage.mip_bipartite_class import SimpleNet
else:
print("Model name does match any model!")
exit()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = SimpleNet(64, aggr="mean", num_layers=4).to(device)
model.load_state_dict(torch.load(model_name, map_location=device))
# data, var_node, node_var = create_data_object(graph)
data, node_to_varnode, _ = create_data_object(graph, 0.0)
model.eval()
data = data.to(device)
# out = model(data).max(dim=1)[1].cpu().detach().numpy()
out = model(data).exp()[:,1].cpu().detach().numpy()
#y_real = data.y_real.cpu().detach().numpy()
# return out, node_var, var_node
return out, node_to_varnode#, y_real
def get_variable_cpxid(graph, node_to_varnode, prediction):
node_names_list = list(graph.nodes())
dict_varname_seqid = {}
for var_graphid, var_seqid in node_to_varnode.items():
# print(var_graphid, var_seqid)
dict_varname_seqid[node_names_list[var_graphid]] = (var_seqid, prediction[var_seqid])
return dict_varname_seqid
def create_data_object(graph, bias_threshold):
# Make graph directed.
graph = nx.convert_node_labels_to_integers(graph)
graph = graph.to_directed() if not nx.is_directed(graph) else graph
data = Data()
# Maps networkx ids to new variable node ids.
node_to_varnode = {}
# Maps networkx ids to new constraint node ids.
node_to_connode = {}
# Number of variables.
num_nodes_var = 0
# Number of constraints.
num_nodes_con = 0
# Targets (classes).
y = []
y_real = []
# Features for variable nodes.
feat_var = []
# Feature for constraints nodes.
feat_con = []
# Right-hand sides of equations.
feat_rhs = []
index = []
index_var = []
obj = []
# Iterate over nodes, and collect features.
for i, (node, node_data) in enumerate(graph.nodes(data=True)):
# Node is a variable node.
if node_data['bipartite'] == 0:
node_to_varnode[i] = num_nodes_var
num_nodes_var += 1
if 'bias' in node_data and node_data['bias'] is not None:
y_real.append(node_data['bias'])
if (node_data['bias'] <= bias_threshold):
y.append(0)
else:
y.append(1)
if 'objcoeff' in node_data:
feat_var.append([node_data['objcoeff'], graph.degree[i]])
obj.append([node_data['objcoeff']])
else:
feat_var.append([node_data['obj_coeff'], graph.degree[i]])
obj.append([node_data['obj_coeff']])
index_var.append(0)
# Node is constraint node.
elif node_data['bipartite'] == 1:
node_to_connode[i] = num_nodes_con
num_nodes_con += 1
if 'rhs' in node_data:
rhs = node_data['rhs']
else:
rhs = node_data['bound']
feat_rhs.append([rhs])
feat_con.append([rhs, graph.degree[i]])
index.append(0)
else:
print("Error in graph format.")
exit(-1)
# Edge list for var->con graphs.
edge_list_var = []
# Edge list for con->var graphs.
edge_list_con = []
# Create features matrices for variable nodes.
edge_features_var = []
# Create features matrices for constraint nodes.
edge_features_con = []
# Remark: graph is directed, i.e., each edge exists for each direction.
# Flow of messages: source -> target.
for i, (s, t, edge_data) in enumerate(graph.edges(data=True)):
# Source node is con, target node is var.
if graph.nodes[s]['bipartite'] == 1:
# Source node is constraint. C->V.
edge_list_con.append([node_to_connode[s], node_to_varnode[t]])
edge_features_con.append([edge_data['coeff']])
else:
# Source node is variable. V->C.
edge_list_var.append([node_to_varnode[s], node_to_connode[t]])
edge_features_var.append([edge_data['coeff']])
edge_index_var = torch.tensor(edge_list_var).t().contiguous()
edge_index_con = torch.tensor(edge_list_con).t().contiguous()
# Create data object.
data.edge_index_var = edge_index_var
data.edge_index_con = edge_index_con
data.y = torch.from_numpy(np.array(y)).to(torch.long)
data.y_real = torch.from_numpy(np.array(y_real)).to(torch.float)
data.var_node_features = torch.from_numpy(np.array(feat_var)).to(torch.float)
data.con_node_features = torch.from_numpy(np.array(feat_con)).to(torch.float)
data.rhs = torch.from_numpy(np.array(feat_rhs)).to(torch.float)
data.obj = torch.from_numpy(np.array(obj)).to(torch.float)
data.edge_features_con = torch.from_numpy(np.array(edge_features_con)).to(torch.float)
data.edge_features_var = torch.from_numpy(np.array(edge_features_var)).to(torch.float)
data.num_nodes_var = num_nodes_var
data.num_nodes_con = num_nodes_con
data.index = torch.from_numpy(np.array(index)).to(torch.long)
data.index_var = torch.from_numpy(np.array(index_var)).to(torch.long)
return data, node_to_varnode, node_to_connode
| 6,527 | 35.066298 | 102 | py |
eegnet-based-embedded-bci | eegnet-based-embedded-bci-master/main_global.py | #*----------------------------------------------------------------------------*
#* Copyright (C) 2020 ETH Zurich, Switzerland *
#* SPDX-License-Identifier: Apache-2.0 *
#* *
#* Licensed under the Apache License, Version 2.0 (the "License"); *
#* you may not use this file except in compliance with the License. *
#* You may obtain a copy of the License at *
#* *
#* http://www.apache.org/licenses/LICENSE-2.0 *
#* *
#* Unless required by applicable law or agreed to in writing, software *
#* distributed under the License is distributed on an "AS IS" BASIS, *
#* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
#* See the License for the specific language governing permissions and *
#* limitations under the License. *
#* *
#* Authors: Batuhan Toemekce, Burak Kaya, Michael Hersche *
#*----------------------------------------------------------------------------*
#!/usr/bin/env python3
#################################################
#
# Global model training and validation
#
#################################################
import numpy as np
import os
#
import get_data as get
from tensorflow.keras import utils as np_utils
from keras.callbacks import LearningRateScheduler
from keras.optimizers import Adam
from keras import backend as K
from sklearn.model_selection import KFold
# EEGNet models
import models as models
# Channel reduction, downsampling, time window
from eeg_reduction import eeg_reduction
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
#################################################
#
# Learning Rate Constant Scheduling
#
#################################################
def step_decay(epoch):
if(epoch < 20):
lr = 0.01
elif(epoch < 50):
lr = 0.001
else:
lr = 0.0001
return lr
lrate = LearningRateScheduler(step_decay)
#################################################
#
# Save results
#
#################################################
def save_results(history,num_classes,n_ds,n_ch,T,split_ctr):
# Save metrics
results = np.zeros((4,len(history.history['acc'])))
results[0] = history.history['acc']
results[1] = history.history['val_acc']
results[2] = history.history['loss']
results[3] = history.history['val_loss']
results_str = os.path.join(results_dir,f'stats/global_class_{num_classes}_ds{n_ds}_nch{n_ch}_T{T}_split_{split_ctr}.csv')
np.savetxt(results_str, np.transpose(results))
return results[0:2,-1]
# CHANGE EXPERIMENT NAME FOR DIFFERENT TESTS!!
experiment_name = 'your-global-experiment'
datapath = "/usr/scratch/xavier/herschmi/EEG_data/physionet/"
results_dir=f'results/{experiment_name}/'
#os.makedirs(results_dir, exist_ok=True)
os.makedirs(f'{results_dir}{experiment_name}/stats', exist_ok=True)
os.makedirs(f'{results_dir}{experiment_name}/model', exist_ok=True)
os.makedirs(f'{results_dir}{experiment_name}/plots', exist_ok=True)
# HYPERPARAMETER TO SET
num_classes_list = [4] # list of number of classes to test {2,3,4}
n_epochs = 2 # number of epochs for training
n_ds = 1 # downsamlping factor {1,2,3}
n_ch_list = [64] # number of channels {8,19,27,38,64}
T_list = [3] # duration to classify {1,2,3}
# model settings
kernLength = int(np.ceil(128/n_ds))
poolLength = int(np.ceil(8/n_ds))
num_splits = 5
acc = np.zeros((num_splits,2))
for num_classes in num_classes_list:
for n_ch in n_ch_list:
for T in T_list:
# Load data
X, y = get.get_data(datapath, n_classes=num_classes)
######## If you want to save the data after loading once from .edf (faster)
#np.savez(datapath+f'{num_classes}class',X_Train = X_Train, y_Train = y_Train)
npzfile = np.load(datapath+f'{num_classes}class.npz')
X, y = npzfile['X_Train'], npzfile['y_Train']
# reduce EEG data (downsample, number of channels, time window)
X = eeg_reduction(X,n_ds = n_ds, n_ch = n_ch, T = T)
# Expand dimensions to match expected EEGNet input
X = (np.expand_dims(X, axis=-1))
# number of temporal sample per trial
n_samples = np.shape(X)[2]
# convert labels to one-hot encodings.
y_cat = np_utils.to_categorical(y)
# using 5 folds
kf = KFold(n_splits = num_splits)
split_ctr = 0
for train, test in kf.split(X, y):
# init model
model = models.EEGNet(nb_classes = num_classes, Chans=n_ch, Samples=n_samples, regRate=0.25,
dropoutRate=0.2, kernLength=kernLength, poolLength=poolLength, numFilters=8,
dropoutType='Dropout')
#print(model.summary())
# Set Learning Rate
adam_alpha = Adam(lr=(0.0001))
model.compile(loss='categorical_crossentropy', optimizer=adam_alpha, metrics = ['accuracy'])
np.random.seed(42*(split_ctr+1))
np.random.shuffle(train)
# do training
history = model.fit(X[train], y_cat[train],
validation_data=(X[test], y_cat[test]),
batch_size = 16, epochs = n_epochs, callbacks=[lrate], verbose = 2)
acc[split_ctr] = save_results(history,num_classes,n_ds,n_ch,T,split_ctr)
print('Fold {:}\t{:.4f}\t{:.4f}'.format(split_ctr,acc[split_ctr,0], acc[split_ctr,1]))
#Save model
model.save(os.path.join(results_dir,f'model/global_class_{num_classes}_ds{n_ds}_nch{n_ch}_T{T}_split_{split_ctr}.h5'))
#Clear Models
K.clear_session()
split_ctr = split_ctr + 1
print('AVG \t {:.4f}\t{:.4f}'.format(acc[:,0].mean(), acc[:,1].mean()))
| 6,501 | 37.473373 | 134 | py |
eegnet-based-embedded-bci | eegnet-based-embedded-bci-master/models.py | #*----------------------------------------------------------------------------*
#* Copyright (C) 2020 ETH Zurich, Switzerland *
#* SPDX-License-Identifier: Apache-2.0 *
#* *
#* Licensed under the Apache License, Version 2.0 (the "License"); *
#* you may not use this file except in compliance with the License. *
#* You may obtain a copy of the License at *
#* *
#* http://www.apache.org/licenses/LICENSE-2.0 *
#* *
#* Unless required by applicable law or agreed to in writing, software *
#* distributed under the License is distributed on an "AS IS" BASIS, *
#* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
#* See the License for the specific language governing permissions and *
#* limitations under the License. *
#* *
#* Authors: Batuhan Toemekce, Burak Kaya, Michael Hersche *
#*----------------------------------------------------------------------------*
from keras.models import Model
from keras.layers.core import Dense, Activation
from keras.layers.convolutional import Conv2D, AveragePooling2D
from keras.layers.convolutional import SeparableConv2D
from keras.layers.normalization import BatchNormalization
from keras.layers import SpatialDropout2D, Dropout
from keras.regularizers import l1_l2
from keras.layers import Input, Flatten
from keras.layers import DepthwiseConv2D
from tensorflow.keras.constraints import max_norm
###################################################################
############## EEGnet model changed from loaded model #############
###################################################################
def EEGNet(nb_classes, Chans=64, Samples=128, regRate=.25,
dropoutRate=0.1, kernLength=128,poolLength=8,
numFilters=8, dropoutType='Dropout'):
"""
Inputs:
nb_classes: int, number of classes to classify
Chans, Samples: number of channels and time points in the EEG data
regRate: regularization parameter for L1 and L2 penalties
dropoutRate: dropout fraction
kernLength: length of temporal convolution in first layer
numFilters: number of temporal-spatial filter pairs to learn
Depending on the task, using numFilters = 4 or 8 seemed to do pretty well
across tasks.
"""
F1 = numFilters
D = 2
F2= numFilters*2
if dropoutType == 'SpatialDropout2D':
dropoutType = SpatialDropout2D
elif dropoutType == 'Dropout':
dropoutType = Dropout
else:
raise ValueError('dropoutType must be one of SpatialDropout2D '
'or Dropout, passed as a string.')
input1 = Input(shape = (Chans, Samples,1))
##################################################################
block1 = Conv2D(F1, (1, kernLength), padding = 'same',
input_shape = (Chans, Samples,1),
use_bias = False)(input1)
block1 = BatchNormalization(axis = 1)(block1)
block1 = DepthwiseConv2D((Chans, 1), use_bias = False,
depth_multiplier = D,
depthwise_constraint = max_norm(1.))(block1)
block1 = BatchNormalization(axis = 1)(block1)
block1 = Activation('elu')(block1)
block1 = AveragePooling2D((1, poolLength))(block1) # changed from 4 to 8
block1 = dropoutType(dropoutRate)(block1)
block2 = SeparableConv2D(F2, (1, 16),
use_bias = False, padding = 'same')(block1)
block2 = BatchNormalization(axis = 1)(block2)
block2 = Activation('elu')(block2)
block2 = AveragePooling2D((1, 8))(block2)
block2 = dropoutType(dropoutRate)(block2)
flatten = Flatten(name = 'flatten')(block2)
dense = Dense(nb_classes, name = 'dense',
kernel_constraint = max_norm(regRate))(flatten)
softmax = Activation('softmax', name = 'softmax')(dense)
return Model(inputs=input1, outputs=softmax)
| 4,269 | 43.479167 | 79 | py |
eegnet-based-embedded-bci | eegnet-based-embedded-bci-master/main_ss.py | #*----------------------------------------------------------------------------*
#* Copyright (C) 2020 ETH Zurich, Switzerland *
#* SPDX-License-Identifier: Apache-2.0 *
#* *
#* Licensed under the Apache License, Version 2.0 (the "License"); *
#* you may not use this file except in compliance with the License. *
#* You may obtain a copy of the License at *
#* *
#* http://www.apache.org/licenses/LICENSE-2.0 *
#* *
#* Unless required by applicable law or agreed to in writing, software *
#* distributed under the License is distributed on an "AS IS" BASIS, *
#* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
#* See the License for the specific language governing permissions and *
#* limitations under the License. *
#* *
#* Authors: Batuhan Toemekce, Burak Kaya, Michael Hersche *
#*----------------------------------------------------------------------------*
#!/usr/bin/env python3
#################################################
# 5 Global models have alredy been trained
# now, these models are used, and further
# (subject-specific) retrained.
#
# Finally, results within, and across
# subjects are averaged.
#
#################################################
import numpy as np
import os, io, sys
import pdb
from tensorflow.keras import utils as np_utils
from keras.optimizers import Adam
from keras.models import load_model
from keras import backend as K
from keras.callbacks import LearningRateScheduler
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
import get_data as get
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
#################################################
#
# Remove excluded subjects from subjects list
#
#################################################
def exclude_subjects(all_subjects=range(1,110), excluded_subjects=[88,92,100,104]):
subjects = [x for x in all_subjects if (x not in excluded_subjects)]
return subjects
#################################################
#
# Learning Rate Constant Scheduling for subject-
# specific transfer learning
#
#################################################
# def step_decay(epoch):
# if(epoch < 2):
# lr = 0.01
# elif(epoch < 5):
# lr = 0.001
# else:
# lr = 0.0001
# return lr
# lrate = LearningRateScheduler(step_decay)
#################################################
#
# Save results
#
#################################################
def save_results(first_eval,tr_hist,num_classes,sub,split,n_ds,n_ch,T):
# Save metrics
results = np.zeros((4,1+len(tr_hist.history['acc'])))
# validation results w/o retraining
results[0,0] = np.nan
results[1,0] = first_eval[1]
results[2,0] = np.nan
results[3,0] = first_eval[0]
# retraing results
results[0,1:] = tr_hist.history['acc']
results[1,1:] = tr_hist.history['val_acc']
results[2,1:] = tr_hist.history['loss']
results[3,1:] = tr_hist.history['val_loss']
sub_str = '{0:03d}'.format(sub)
results_str = f'{results_dir}/stats/ss_class_{num_classes}_ds{n_ds}_nch{n_ch}_T{T}_sub{sub_str}_split_{split}.csv'
np.savetxt(results_str, np.transpose(results))
return results[0:2,-1]
##############################################
# CHANGE EXPERIMENT NAME FOR DIFFERENT TESTS!!
ss_experiment = 'your-ss-experiment'
global_experiment = 'your-global-experiment'
##############################################
datapath = "/usr/scratch/xavier/herschmi/EEG_data/physionet/"
global_model_path = f'results/{global_experiment}/model/'
# Make necessary directories for files
results_dir=f'results/{ss_experiment}'
os.makedirs(f'{results_dir}/stats', exist_ok=True)
os.makedirs(f'{results_dir}/model', exist_ok=True)
os.makedirs(f'{results_dir}/plots', exist_ok=True)
# Specify number of classses for input data
num_classes_list = [4]
# Exclude subjects whose data we do not use
subjects = exclude_subjects()
n_subjects = len(subjects)
n_ds = 1
T = 3
n_ch = 64
verbose = 0 # verbosity for data loader and keras: 0 minimum,
# retraining parameters
n_epochs = 5
lr = 1e-3
for num_classes in num_classes_list:
# using 5 folds
num_splits = 5
kf_global = KFold(n_splits = num_splits)
split_ctr = 0
acc = np.zeros((n_subjects,4,2))
# run over 5 global folds
for _, test_sub_global in kf_global.split(subjects):
for sub_idx in test_sub_global:
subject = subjects[sub_idx]
X_sub, y_sub = get.get_data(datapath, n_classes=num_classes, subjects_list=[subject])
X_sub = np.expand_dims(X_sub, axis=-1)
y_sub_cat = np_utils.to_categorical(y_sub)
n_samples = np.shape(X_sub)[2]
# split data while balancing classes
kf_subject = StratifiedKFold(n_splits=4, shuffle=True, random_state=42)
sub_split_ctr = 0
for train_sub, test_sub in kf_subject.split(X_sub, y_sub):
# load global model
model = load_model(global_model_path+f'global_class_{num_classes}_ds{n_ds}_nch{n_ch}_T{T}_split_{split_ctr}.h5')
first_eval = model.evaluate(X_sub[test_sub], y_sub_cat[test_sub], batch_size=16, verbose = verbose)
adam_alpha = Adam(lr=lr)
model.compile(loss='categorical_crossentropy', optimizer=adam_alpha, metrics = ['accuracy'])
# creating a history object
history = model.fit(X_sub[train_sub], y_sub_cat[train_sub],
validation_data=(X_sub[test_sub], y_sub_cat[test_sub]),
batch_size = 16, epochs = n_epochs, verbose = verbose) # callbacks=[lrate]
# save results
acc[sub_idx,sub_split_ctr]=save_results(first_eval,history,num_classes,subject,sub_split_ctr,n_ds,n_ch,T)
K.clear_session()
sub_split_ctr = sub_split_ctr + 1
print("S{:d}\t{:.4f}\t{:.4f}".format(subject,acc[sub_idx,:,0].mean(),acc[sub_idx,:,1].mean()))
split_ctr = split_ctr + 1
print("AVG\t{:.4f}\t{:.4f}".format(acc[:,:,0].mean(),acc[:,:,1].mean()))
| 6,798 | 37.196629 | 128 | py |
ConvoSource | ConvoSource-master/generate_pybdsf_solutions2.py | """
This script outputs the PyBDSF results in the same format as the AutoSource results, again assuming the images have size 50x50 pixels and are spaced 50 pixels apart. The following command gets the PyBDSF results on the 560MHz data at 8h exposure time, at an SNR of 1.
Usage:
python generate_pybdsf_solutions2.py --img_inc 50 --img_size 50 --freq 1 --bg_fits '/path/to/background_fits/SKAMid_B1_8h_pbf_corrected_v3.pybdsm.rmsd_I.fits' --save_dir '/path/where/to/save/results/' --snr 1 --pybdsf_table '/path/to/pybdsf/table/SKAMid_B1_8h_pbf_corrected_v3_table.csv' --load_solutions '/path/to/segmented/solutions/solutions_50_50_1_final.npy'
Author: Vesna Lukic, E-mail: `vlukic973@gmail.com`
"""
from __future__ import division
import pandas as pd
import numpy as np
from astropy.io import fits
from sklearn.metrics.pairwise import euclidean_distances
import operator
import matplotlib.pyplot as plt
import scipy
from scipy import ndimage
def pybdsf_sols(args):
PyBDSF_table=pd.read_csv(args.pybdsf_table,skiprows=5)
PyBDSF_table[' Xposn']=np.round(PyBDSF_table[' Xposn'])
PyBDSF_table[' Yposn']=np.round(PyBDSF_table[' Yposn'])
if (args.freq==1):
PyBDSF_table=PyBDSF_table[(PyBDSF_table[' Xposn']>16300) & (PyBDSF_table[' Xposn']<20300) & (PyBDSF_table[' Yposn']>16300) & (PyBDSF_table[' Yposn']<20300)]
hdul = fits.open(args.bg_fits)
data=hdul[0].data[0,0,16300:20300,16300:20300]
data[np.isnan(data)] = 0
PyBDSF_table=PyBDSF_table[PyBDSF_table[' Total_flux']>args.snr*np.mean(data)]
PyBDSF_table=PyBDSF_table.reset_index(drop=True)
PyBDSF_table[' Xposn']=PyBDSF_table[' Xposn']-16300
PyBDSF_table[' Yposn']=PyBDSF_table[' Yposn']-16300
data = np.zeros((4000,4000,1), dtype=np.uint8 )
for i in range(0,len(PyBDSF_table)):
print(i)
data[int(PyBDSF_table[' Yposn'][i:i+1]),int(PyBDSF_table[' Xposn'][i:i+1])] = [1]
data=data.reshape(4000,4000)
a=-1
b=-1
result_array = np.empty((0,args.img_size,args.img_size))
for i in range(0,4000,args.img_inc):
a+=1
for j in range(0,4000,args.img_inc):
b+=1
if (b==int(4000.0/args.img_inc)):
b=0
print(a,b)
result_array = np.append(result_array, [data[i+0:i+args.img_size,j+0:j+args.img_size]], axis=0)
else:
print(a,b)
result_array = np.append(result_array, [data[i+0:i+args.img_size,j+0:j+args.img_size]], axis=0)
if (args.freq==2):
PyBDSF_table=PyBDSF_table[(PyBDSF_table[' Xposn']>16300) & (PyBDSF_table[' Xposn']<20500) & (PyBDSF_table[' Yposn']>16300) & (PyBDSF_table[' Yposn']<20500)]
hdul = fits.open(args.bg_fits)
data=hdul[0].data[0,0,16300:20500,16300:20500]
data[np.isnan(data)] = 0
PyBDSF_table=PyBDSF_table[PyBDSF_table[' Total_flux']>args.snr*np.mean(data)]
print(len(PyBDSF_table))
PyBDSF_table=PyBDSF_table.reset_index(drop=True)
PyBDSF_table[' Xposn']=PyBDSF_table[' Xposn']-16300
PyBDSF_table[' Yposn']=PyBDSF_table[' Yposn']-16300
data = np.zeros((4200,4200,1), dtype=np.uint8 )
for i in range(0,len(PyBDSF_table)):
print(i)
data[int(PyBDSF_table[' Yposn'][i:i+1]),int(PyBDSF_table[' Xposn'][i:i+1])] = [1]
data=data.reshape(4200,4200)
a=-1
b=-1
result_array = np.empty((0,args.img_size,args.img_size))
for i in range(0,4200,args.img_inc):
a+=1
for j in range(0,4200,args.img_inc):
b+=1
if (b==int(4200.0/args.img_inc)):
b=0
print(a,b)
result_array = np.append(result_array, [data[i+0:i+args.img_size,j+0:j+args.img_size]], axis=0)
else:
print(a,b)
result_array = np.append(result_array, [data[i+0:i+args.img_size,j+0:j+args.img_size]], axis=0)
if (args.freq==3):
PyBDSF_table=PyBDSF_table[(PyBDSF_table[' Xposn']>16300) & (PyBDSF_table[' Xposn']<20300) & (PyBDSF_table[' Yposn']>21700) & (PyBDSF_table[' Yposn']<25700)]
hdul = fits.open(args.bg_fits)
data=hdul[0].data[0,0,21700:25700,16300:20300]
data[np.isnan(data)] = 0
PyBDSF_table=PyBDSF_table[PyBDSF_table[' Total_flux']>args.snr*np.mean(data)]
PyBDSF_table=PyBDSF_table.reset_index(drop=True)
PyBDSF_table[' Xposn']=PyBDSF_table[' Xposn']-16300
PyBDSF_table[' Yposn']=PyBDSF_table[' Yposn']-21700
data = np.zeros((4000,4000,1), dtype=np.uint8 )
for i in range(0,len(PyBDSF_table)):
print(i)
data[int(PyBDSF_table[' Yposn'][i:i+1]),int(PyBDSF_table[' Xposn'][i:i+1])] = [1]
data=data.reshape(4000,4000)
a=-1
b=-1
result_array = np.empty((0,args.img_size,args.img_size))
for i in range(0,4000,args.img_inc):
a+=1
for j in range(0,4000,args.img_inc):
b+=1
if (b==int(4000.0/args.img_inc)):
b=0
print(a,b)
result_array = np.append(result_array, [data[i+0:i+args.img_size,j+0:j+args.img_size]], axis=0)
else:
print(a,b)
result_array = np.append(result_array, [data[i+0:i+args.img_size,j+0:j+args.img_size]], axis=0)
np.save(args.save_dir+'PyBDSF_solutions_'+str(args.freq)+'_'+str(args.img_size)+'_'+str(args.img_inc)+'.npy',result_array)
return result_array
def test(args):
"""
Testing
"""
class_image=np.load(args.load_solutions)
data2=class_image
data2=np.where(data2==2, 1, data2)
data2=np.where(data2==3, 1, data2)
solutions_orig=data2
solutions_all=solutions_orig
solutions_all=solutions_all.reshape(solutions_all.shape[0],solutions_all.shape[1],solutions_all.shape[2],1)
class_image=class_image.reshape(class_image.shape[0],class_image.shape[1],class_image.shape[2],1)
train_proportion=args.train_prop
test_Y=solutions_all[int(train_proportion*len(solutions_all)):len(solutions_all)]
class_test=class_image[int(train_proportion*len(solutions_all)):len(solutions_all)]
print(test_Y.shape)
print(class_test.shape)
test_Y = test_Y.astype('float32')
class_test = class_test.astype('float32')
recon_image=np.load(args.save_dir+'PyBDSF_solutions_'+str(args.freq)+'_'+str(args.img_size)+'_'+str(args.img_inc)+'.npy')
recon_image=recon_image[int(train_proportion*len(solutions_all)):len(solutions_all)]
print(int(train_proportion*len(solutions_all)),len(solutions_all))
recon_image=recon_image.reshape(recon_image.shape[0],recon_image.shape[1],recon_image.shape[2],1)
print(recon_image.shape)
recon_image = recon_image.astype('float32')
if (args.show_img=='True'):
print('Now showing images and detected features')
plt.ion()
for i in np.where(class_test==1)[0][0:10]:
plt.subplot(1,2,1); plt.axis('off'); plt.imshow(class_test[i,:,:,0])
plt.subplot(1,2,2); plt.axis('off'); plt.imshow(recon_image[i,:,:,0])
plt.savefig(args.save_dir+'test_X_Y_recon_pybdsf'+str(i)+'.png')
plt.close()
return test_Y,class_test,recon_image,np.sum(test_Y),np.sum(recon_image)
def calculate_metrics(test_Y,class_test,recon_image):
precision_sfg_list=[]
recall_sfg_list=[]
F1_score_sfg_list=[]
accuracy_sfg_list=[]
precision_ss_list=[]
recall_ss_list=[]
F1_score_ss_list=[]
accuracy_ss_list=[]
precision_fs_list=[]
recall_fs_list=[]
F1_score_fs_list=[]
accuracy_fs_list=[]
precision_all_list=[]
recall_all_list=[]
F1_score_all_list=[]
accuracy_all_list=[]
for recon_threshold in np.round(np.linspace(0.1,0.99,11),2):
print(recon_threshold)
array_loc=[]
true_loc=[]
class_ss=[]
class_fs=[]
class_sfg=[]
class_all=[]
sum_source=[]
for i in range(0,len(recon_image)):
array_loc.append(np.where(recon_threshold*np.max(recon_image[i,:,:,0])<recon_image[i,:,:,0]))
class_sfg.append(np.where(class_test[i,:,:,0]==3))
class_ss.append(np.where(class_test[i,:,:,0]==1))
class_fs.append(np.where(class_test[i,:,:,0]==2))
class_all.append(np.where(test_Y[i,:,:,0]==1))
sum_source.append(np.sum(class_test[i]))
pred_loc=[]
class_sfg1=[]
class_ss1=[]
class_fs1=[]
class_all1=[]
for i in range(0,len(recon_image)):
pred_loc.append(zip(array_loc[i][0],array_loc[i][1]))
class_sfg1.append(zip(class_sfg[i][0],class_sfg[i][1]))
class_ss1.append(zip(class_ss[i][0],class_ss[i][1]))
class_fs1.append(zip(class_fs[i][0],class_fs[i][1]))
class_all1.append(zip(class_all[i][0],class_all[i][1]))
pred_sfg=[i for i,x in enumerate(class_sfg1) if x]
pred_ss=[i for i,x in enumerate(class_ss1) if x]
pred_fs=[i for i,x in enumerate(class_fs1) if x]
pred_all=[i for i,x in enumerate(class_all1) if x]
pred_sfg1 = [pred_loc[i] for i in pred_sfg]
pred_ss1 = [pred_loc[i] for i in pred_ss]
pred_fs1 = [pred_loc[i] for i in pred_fs]
pred_all1 = [pred_loc[i] for i in pred_all]
class_sfg2 = [class_sfg1[i] for i in pred_sfg]
class_ss2 = [class_ss1[i] for i in pred_ss]
class_fs2 = [class_fs1[i] for i in pred_fs]
class_all2 = [class_all1[i] for i in pred_all]
from sklearn.metrics.pairwise import euclidean_distances
euc_dist_sfg=[]
euc_dist_ss=[]
euc_dist_fs=[]
euc_dist_all=[]
for i in range(0,len(pred_sfg1)):
if (pred_sfg1[i]!=[]):
euc_dist_sfg.append(np.round(euclidean_distances(pred_sfg1[i],pred_sfg1[i])))
else:
euc_dist_sfg.append([])
for i in range(0,len(pred_ss1)):
if (pred_ss1[i]!=[]):
euc_dist_ss.append(np.round(euclidean_distances(pred_ss1[i],pred_ss1[i])))
else:
euc_dist_ss.append([])
for i in range(0,len(pred_fs1)):
if (pred_fs1[i]!=[]):
euc_dist_fs.append(np.round(euclidean_distances(pred_fs1[i],pred_fs1[i])))
else:
euc_dist_fs.append([])
for i in range(0,len(pred_all1)):
if (pred_all1[i]!=[]):
euc_dist_all.append(np.round(euclidean_distances(pred_all1[i],pred_all1[i])))
else:
euc_dist_all.append([])
t=1
des_elem=[]
des_elem2=[]
des_elem_sfg=[]
for i in range(0,len(pred_sfg1)):
for j in range(1,len(euc_dist_sfg[i])):
des_elem.append((0))
if (euc_dist_sfg[i].item((j,j-t))>1):
des_elem.append(j)
des_elem2.append(des_elem)
des_elem_sfg.append(list(set(des_elem2[0])))
des_elem=[]
des_elem2=[]
for n, i in enumerate(des_elem_sfg):
if i == []:
des_elem_sfg[n] = [0]
ind_loc=[]
ind_loc_sfg=[]
for i in range(0,len(pred_sfg1)):
for j in range(0,len(des_elem_sfg[i])):
if (pred_sfg1[i]!=[]):
ind_loc.append(pred_sfg1[i][des_elem_sfg[i][j]])
else:
ind_loc.append([])
ind_loc_sfg.append(ind_loc)
ind_loc=[]
import pandas as pd
df_sfg=pd.DataFrame()
df_sfg['pred_loc']=pred_sfg1
df_sfg['des_elem_sfg']=des_elem_sfg
df_sfg['ind_loc2']=ind_loc_sfg
t=1
des_elem=[]
des_elem2=[]
des_elem_ss=[]
for i in range(0,len(pred_ss1)):
for j in range(1,len(euc_dist_ss[i])):
des_elem.append((0))
if (euc_dist_ss[i].item((j,j-t))>1):
des_elem.append(j)
des_elem2.append(des_elem)
des_elem_ss.append(list(set(des_elem2[0])))
des_elem=[]
des_elem2=[]
for n, i in enumerate(des_elem_ss):
if i == []:
des_elem_ss[n] = [0]
ind_loc=[]
ind_loc_ss=[]
for i in range(0,len(pred_ss1)):
for j in range(0,len(des_elem_ss[i])):
if (pred_ss1[i]!=[]):
ind_loc.append(pred_ss1[i][des_elem_ss[i][j]])
else:
ind_loc.append([])
ind_loc_ss.append(ind_loc)
ind_loc=[]
import pandas as pd
df_ss=pd.DataFrame()
df_ss['pred_loc']=pred_ss1
df_ss['des_elem_ss']=des_elem_ss
df_ss['ind_loc2']=ind_loc_ss
t=1
des_elem=[]
des_elem2=[]
des_elem_fs=[]
for i in range(0,len(pred_fs1)):
for j in range(1,len(euc_dist_fs[i])):
des_elem.append((0))
if (euc_dist_fs[i].item((j,j-t))>1):
des_elem.append(j)
des_elem2.append(des_elem)
des_elem_fs.append(list(set(des_elem2[0])))
des_elem=[]
des_elem2=[]
for n, i in enumerate(des_elem_fs):
if i == []:
des_elem_fs[n] = [0]
ind_loc=[]
ind_loc_fs=[]
for i in range(0,len(pred_fs1)):
for j in range(0,len(des_elem_fs[i])):
if (pred_fs1[i]!=[]):
ind_loc.append(pred_fs1[i][des_elem_fs[i][j]])
else:
ind_loc.append([])
ind_loc_fs.append(ind_loc)
ind_loc=[]
import pandas as pd
df_fs=pd.DataFrame()
df_fs['pred_loc']=pred_fs1
df_fs['des_elem_fs']=des_elem_fs
df_fs['ind_loc2']=ind_loc_fs
t=1
des_elem=[]
des_elem2=[]
des_elem_all=[]
for i in range(0,len(pred_all1)):
for j in range(1,len(euc_dist_all[i])):
des_elem.append((0))
if (euc_dist_all[i].item((j,j-t))>1):
des_elem.append(j)
des_elem2.append(des_elem)
des_elem_all.append(list(set(des_elem2[0])))
des_elem=[]
des_elem2=[]
for n, i in enumerate(des_elem_all):
if i == []:
des_elem_all[n] = [0]
ind_loc=[]
ind_loc_all=[]
for i in range(0,len(pred_all1)):
for j in range(0,len(des_elem_all[i])):
if (pred_all1[i]!=[]):
ind_loc.append(pred_all1[i][des_elem_all[i][j]])
else:
ind_loc.append([])
ind_loc_all.append(ind_loc)
ind_loc=[]
import pandas as pd
df_all=pd.DataFrame()
df_all['pred_loc']=pred_all1
df_all['des_elem_all']=des_elem_all
df_all['ind_loc2']=ind_loc_all
class_ss=[]
class_ss_shape=[]
class_sfg=[]
class_sfg_shape=[]
class_fs=[]
class_fs_shape=[]
class_all=[]
class_all_shape=[]
for i in range(0,len(class_sfg2)):
if ((class_sfg2[i]!=[]) & (ind_loc_sfg[i]!=[[]])):
class_sfg.append(np.round(euclidean_distances(ind_loc_sfg[i],class_sfg2[i])))
class_sfg_shape.append(euclidean_distances(ind_loc_sfg[i],class_sfg2[i]).shape)
else:
class_sfg.append([])
class_sfg_shape.append([])
for i in range(0,len(class_ss2)):
if ((class_ss2[i]!=[]) & (ind_loc_ss[i]!=[[]])):
class_ss.append(np.round(euclidean_distances(ind_loc_ss[i],class_ss2[i])))
class_ss_shape.append(euclidean_distances(ind_loc_ss[i],class_ss2[i]).shape)
else:
class_ss.append([])
class_ss_shape.append([])
for i in range(0,len(class_fs2)):
if ((class_fs2[i]!=[]) & (ind_loc_fs[i]!=[[]])):
class_fs.append(np.round(euclidean_distances(ind_loc_fs[i],class_fs2[i])))
class_fs_shape.append(euclidean_distances(ind_loc_fs[i],class_fs2[i]).shape)
else:
class_fs.append([])
class_fs_shape.append([])
for i in range(0,len(class_all2)):
if ((class_all2[i]!=[]) & (ind_loc_all[i]!=[[]])):
class_all.append(np.round(euclidean_distances(ind_loc_all[i],class_all2[i])))
class_all_shape.append(euclidean_distances(ind_loc_all[i],class_all2[i]).shape)
else:
class_all.append([])
class_all_shape.append([])
TP_sfg_list=[]
TP_ss_list=[]
TP_fs_list=[]
TP_all_list=[]
FN_sfg_list=[]
FN_ss_list=[]
FN_fs_list=[]
FN_all_list=[]
FP_sfg_list2=[]
FP_ss_list2=[]
FP_fs_list2=[]
FP_all_list2=[]
pred_sfg_loc_len=[]
pred_ss_loc_len=[]
pred_fs_loc_len=[]
pred_all_loc_len=[]
class_loc_len=[]
pix_threshold=3
for i in range(0,len(class_sfg2)):
TP_sfg_list.append(np.sum(class_sfg[i]<pix_threshold))
pred_sfg_loc_len.append(len(pred_sfg1[i]))
for i in range(0,len(class_ss2)):
TP_ss_list.append(np.sum(class_ss[i]<pix_threshold))
pred_ss_loc_len.append(len(pred_ss1[i]))
for i in range(0,len(class_fs2)):
TP_fs_list.append(np.sum(class_fs[i]<pix_threshold))
pred_fs_loc_len.append(len(pred_fs1[i]))
for i in range(0,len(class_all2)):
TP_all_list.append(np.sum(class_all[i]<pix_threshold))
pred_all_loc_len.append(len(pred_all1[i]))
for i in range(0,len(class_sfg2)):
if (class_sfg_shape[i]!=[]):
FN_sfg_list.append(class_sfg_shape[i][1]-class_sfg_shape[i][0])
FP_sfg_list2.append(class_sfg_shape[i][0]-class_sfg_shape[i][1])
else:
FN_sfg_list.append(0)
FP_sfg_list2.append(0)
for i in range(0,len(class_ss2)):
if (class_ss_shape[i]!=[]):
FN_ss_list.append(class_ss_shape[i][1]-class_ss_shape[i][0])
FP_ss_list2.append(class_ss_shape[i][0]-class_ss_shape[i][1])
else:
FN_ss_list.append(0)
FP_ss_list2.append(0)
for i in range(0,len(class_fs2)):
if (class_fs_shape[i]!=[]):
FN_fs_list.append(class_fs_shape[i][1]-class_fs_shape[i][0])
FP_fs_list2.append(class_fs_shape[i][0]-class_fs_shape[i][1])
else:
FN_fs_list.append(0)
FP_fs_list2.append(0)
for i in range(0,len(class_all2)):
if (class_all_shape[i]!=[]):
FN_all_list.append(class_all_shape[i][1]-class_all_shape[i][0])
FP_all_list2.append(class_all_shape[i][0]-class_all_shape[i][1])
else:
FN_all_list.append(0)
FP_all_list2.append(0)
i_df=[]
j_df=[]
a=-1
b=-1
for i in range(0,50,1):
a+=1
for j in range(0,50,1):
b+=1
if (b==50):
b=0
i_df.append(i);j_df.append(j)
else:
i_df.append(i);j_df.append(j)
all_coords=zip(i_df,j_df)
true_sfg_negatives_list=[]
true_ss_negatives_list=[]
true_fs_negatives_list=[]
true_all_negatives_list=[]
for i in range(0,len(class_sfg2)):
not_in_sfg_loc1 = list(set(all_coords) - set(class_sfg1[i]))
not_in_sfg_pred_loc = list(set(all_coords) - set(pred_sfg1[i]))
true_sfg_negatives=len(list(set(not_in_sfg_loc1).intersection(not_in_sfg_pred_loc)))
true_sfg_negatives_list.append(true_sfg_negatives)
for i in range(0,len(class_ss2)):
not_in_ss_loc1 = list(set(all_coords) - set(class_ss1[i]))
not_in_ss_pred_loc = list(set(all_coords) - set(pred_ss1[i]))
true_ss_negatives=len(list(set(not_in_ss_loc1).intersection(not_in_ss_pred_loc)))
true_ss_negatives_list.append(true_ss_negatives)
for i in range(0,len(class_fs2)):
not_in_fs_loc1 = list(set(all_coords) - set(class_fs1[i]))
not_in_fs_pred_loc = list(set(all_coords) - set(pred_fs1[i]))
true_fs_negatives=len(list(set(not_in_fs_loc1).intersection(not_in_fs_pred_loc)))
true_fs_negatives_list.append(true_fs_negatives)
for i in range(0,len(class_all2)):
not_in_all_loc1 = list(set(all_coords) - set(class_all1[i]))
not_in_all_pred_loc = list(set(all_coords) - set(pred_all1[i]))
true_all_negatives=len(list(set(not_in_all_loc1).intersection(not_in_all_pred_loc)))
true_all_negatives_list.append(true_all_negatives)
df_sfg=pd.DataFrame()
df_sfg['euc_dist_shape']=class_sfg_shape
df_sfg['pred_sfg_loc_len']=pred_sfg_loc_len
df_sfg['TP_sfg']=TP_sfg_list
df_sfg['FP_sfg_list1']=df_sfg['pred_sfg_loc_len']-df_sfg['TP_sfg']
df_sfg.loc[df_sfg['FP_sfg_list1'] < 0, 'FP_sfg_list1'] = 0
df_sfg['FN_sfg']=FN_sfg_list
df_sfg['FP_sfg_list2']=FP_sfg_list2
df_sfg.loc[df_sfg['FP_sfg_list2'] < 0, 'FP_sfg_list2'] = 0
df_sfg.loc[df_sfg['FN_sfg'] < 0, 'FN_sfg'] = 0
df_sfg['FP_sfg']=df_sfg['FP_sfg_list1']+df_sfg['FP_sfg_list2']
df_sfg['TN_sfg']=true_sfg_negatives_list
df_ss=pd.DataFrame()
df_ss['euc_dist_shape']=class_ss_shape
df_ss['pred_ss_loc_len']=pred_ss_loc_len
df_ss['TP_ss']=TP_ss_list
df_ss['FP_ss_list1']=df_ss['pred_ss_loc_len']-df_ss['TP_ss']
df_ss.loc[df_ss['FP_ss_list1'] < 0, 'FP_ss_list1'] = 0
df_ss['FN_ss']=FN_ss_list
df_ss['FP_ss_list2']=FP_ss_list2
df_ss.loc[df_ss['FP_ss_list2'] < 0, 'FP_ss_list2'] = 0
df_ss.loc[df_ss['FN_ss'] < 0, 'FN_ss'] = 0
df_ss['FP_ss']=df_ss['FP_ss_list1']+df_ss['FP_ss_list2']
df_ss['TN_ss']=true_ss_negatives_list
df_fs=pd.DataFrame()
df_fs['euc_dist_shape']=class_fs_shape
df_fs['pred_fs_loc_len']=pred_fs_loc_len
df_fs['TP_fs']=TP_fs_list
df_fs['FP_fs_list1']=df_fs['pred_fs_loc_len']-df_fs['TP_fs']
df_fs.loc[df_fs['FP_fs_list1'] < 0, 'FP_fs_list1'] = 0
df_fs['FN_fs']=FN_fs_list
df_fs['FP_fs_list2']=FP_fs_list2
df_fs.loc[df_fs['FP_fs_list2'] < 0, 'FP_fs_list2'] = 0
df_fs.loc[df_fs['FN_fs'] < 0, 'FN_fs'] = 0
df_fs['FP_fs']=df_fs['FP_fs_list1']+df_fs['FP_fs_list2']
df_fs['TN_fs']=true_fs_negatives_list
df_all=pd.DataFrame()
df_all['euc_dist_shape']=class_all_shape
df_all['pred_all_loc_len']=pred_all_loc_len
df_all['TP_all']=TP_all_list
df_all['FP_all_list1']=df_all['pred_all_loc_len']-df_all['TP_all']
df_all.loc[df_all['FP_all_list1'] < 0, 'FP_all_list1'] = 0
df_all['FN_all']=FN_all_list
df_all['FP_all_list2']=FP_all_list2
df_all.loc[df_all['FP_all_list2'] < 0, 'FP_all_list2'] = 0
df_all.loc[df_all['FN_all'] < 0, 'FN_all'] = 0
df_all['FP_all']=df_all['FP_all_list1']+df_all['FP_all_list2']
df_all['TN_all']=true_all_negatives_list
precision_sfg=np.sum(df_sfg['TP_sfg'])/(np.sum(df_sfg['TP_sfg'])+np.sum(df_sfg['FP_sfg'])+0.00001)
recall_sfg=np.sum(df_sfg['TP_sfg'])/(np.sum(df_sfg['TP_sfg'])+np.sum(df_sfg['FN_sfg'])+0.00001)
F1_score_sfg=2*precision_sfg*recall_sfg/(precision_sfg+recall_sfg+0.00001)
accuracy_sfg=(np.sum(df_sfg['TP_sfg'])+np.sum(df_sfg['TN_sfg']))/(np.sum(df_sfg['TP_sfg'])+np.sum(df_sfg['TN_sfg'])+np.sum(df_sfg['FP_sfg'])+np.sum(df_sfg['FN_sfg'])+0.00001)
precision_ss=np.sum(df_ss['TP_ss'])/(np.sum(df_ss['TP_ss'])+np.sum(df_ss['FP_ss'])+0.00001)
recall_ss=np.sum(df_ss['TP_ss'])/(np.sum(df_ss['TP_ss'])+np.sum(df_ss['FN_ss'])+0.00001)
F1_score_ss=2*precision_ss*recall_ss/(precision_ss+recall_ss+0.00001)
accuracy_ss=(np.sum(df_ss['TP_ss'])+np.sum(df_ss['TN_ss']))/(np.sum(df_ss['TP_ss'])+np.sum(df_ss['TN_ss'])+np.sum(df_ss['FP_ss'])+np.sum(df_ss['FN_ss'])+0.00001)
precision_fs=np.sum(df_fs['TP_fs'])/(np.sum(df_fs['TP_fs'])+np.sum(df_fs['FP_fs'])+0.00001)
recall_fs=np.sum(df_fs['TP_fs'])/(np.sum(df_fs['TP_fs'])+np.sum(df_fs['FN_fs'])+0.00001)
F1_score_fs=2*precision_fs*recall_fs/(precision_fs+recall_fs+0.00001)
accuracy_fs=(np.sum(df_fs['TP_fs'])+np.sum(df_fs['TN_fs']))/(np.sum(df_fs['TP_fs'])+np.sum(df_fs['TN_fs'])+np.sum(df_fs['FP_fs'])+np.sum(df_fs['FN_fs'])+0.00001)
precision_all=np.sum(df_all['TP_all'])/(np.sum(df_all['TP_all'])+np.sum(df_all['FP_all'])+0.00001)
recall_all=np.sum(df_all['TP_all'])/(np.sum(df_all['TP_all'])+np.sum(df_all['FN_all'])+0.00001)
F1_score_all=2*precision_all*recall_all/(precision_all+recall_all+0.00001)
accuracy_all=(np.sum(df_all['TP_all'])+np.sum(df_all['TN_all']))/(np.sum(df_all['TP_all'])+np.sum(df_all['TN_all'])+np.sum(df_all['FP_all'])+np.sum(df_all['FN_all'])+0.00001)
precision_sfg_list.append(precision_sfg)
recall_sfg_list.append(recall_sfg)
F1_score_sfg_list.append(F1_score_sfg)
accuracy_sfg_list.append(accuracy_sfg)
precision_ss_list.append(precision_ss)
recall_ss_list.append(recall_ss)
F1_score_ss_list.append(F1_score_ss)
accuracy_ss_list.append(accuracy_ss)
precision_fs_list.append(precision_fs)
recall_fs_list.append(recall_fs)
F1_score_fs_list.append(F1_score_fs)
accuracy_fs_list.append(accuracy_fs)
precision_all_list.append(precision_all)
recall_all_list.append(recall_all)
F1_score_all_list.append(F1_score_all)
accuracy_all_list.append(accuracy_all)
return precision_sfg,recall_sfg,F1_score_sfg,accuracy_sfg,precision_ss,recall_ss,F1_score_ss,accuracy_ss,precision_fs,recall_fs,F1_score_fs,accuracy_fs, precision_all,recall_all,F1_score_all,accuracy_all,np.sum(df_sfg['TP_sfg']),np.sum(df_sfg['FP_sfg']),np.sum(df_sfg['TN_sfg']),np.sum(df_sfg['FN_sfg']),np.sum(df_ss['TP_ss']),np.sum(df_ss['FP_ss']),np.sum(df_ss['TN_ss']),np.sum(df_ss['FN_ss']),np.sum(df_fs['TP_fs']),np.sum(df_fs['FP_fs']),np.sum(df_fs['TN_fs']),np.sum(df_fs['FN_fs']),np.sum(df_all['TP_all']),np.sum(df_all['FP_all']),np.sum(df_all['TN_all']),np.sum(df_all['FN_all']),df_all
if __name__ == "__main__":
import os
import argparse
from keras import callbacks
parser = argparse.ArgumentParser(description="Get metrics from the PyBDSF source-finder")
parser.add_argument('--save_dir', default='/path/where/to/save/results/')
parser.add_argument('--load_solutions', default='/path/to/segmented/solutions/solutions_50_505_final.npy')
parser.add_argument('--bg_fits', default='/path/to/background_fits/SKAMid_B1_8h_pbf_corrected_v3.pybdsm.rmsd_I.fits')
parser.add_argument('--pybdsf_table', default='/path/to/pybdsf/table/SKAMid_B1_8h_pb_corrected_v3_table.csv')
parser.add_argument('--freq', default=1,type=int)
parser.add_argument('--snr', default=5, type=int)
parser.add_argument('--train_prop', default=0.8,type=float)
parser.add_argument('--img_inc', default=50,type=int)
parser.add_argument('--img_size', default=50,type=int)
parser.add_argument('--show_img', default='F')
args = parser.parse_args()
print(args)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
# load data
Training_set= pybdsf_sols(args)
test_Y11,class_test11,recon_image11,sum_test_Y,sum_recon_image= test(args)
print(test_Y11.shape,class_test11.shape,recon_image11.shape)
data1=np.array([sum_test_Y,sum_recon_image])
precision_sfg,recall_sfg,F1_score_sfg,accuracy_sfg,precision_ss,recall_ss,F1_score_ss,accuracy_ss,precision_fs,recall_fs,F1_score_fs,accuracy_fs, precision_all,recall_all,F1_score_all,accuracy_all,sfg_tp,sfg_fp,sfg_tn,sfg_fn,ss_tp,ss_fp,ss_tn,ss_fn,fs_tp,fs_fp,fs_tn,fs_fn,all_tp,all_fp,all_tn,all_fn,df_all=calculate_metrics(test_Y11,class_test11,recon_image11)
print(sfg_tp,sfg_fp,sfg_tn,sfg_fn,ss_tp,ss_fp,ss_tn,ss_fn,fs_tp,fs_fp,fs_tn,fs_fn,all_tp,all_fp,all_tn,all_fn)
data=np.array([precision_sfg,recall_sfg,F1_score_sfg,accuracy_sfg,precision_ss,recall_ss,F1_score_ss,accuracy_ss,precision_fs,recall_fs,F1_score_fs,accuracy_fs, precision_all,recall_all,F1_score_all,accuracy_all])*100
np.savetxt(args.save_dir+str(args.snr)+'_'+str(args.freq)+'_pybdsf.txt', data,fmt='%f')
np.savetxt(args.save_dir+str(args.snr)+'_'+str(args.freq)+'_sum_test_Y_recon_pybdsf.txt', data1,fmt='%f')
| 25,031 | 31.807339 | 598 | py |
ConvoSource | ConvoSource-master/generate_real_data_and_solutions_Bx_yh_v1.py | """
This script generates the segmented real maps and solutions at a chosen exposure time, frequency and SNR on the simulated SKA data. The script command as it is generates 50x50 pixel images that are each spaced 50 pixels apart. The following commands segment the 560MHz at 8h exposure time dataset, at an SNR of 1. Run this first before 'source_finding_DNN_Bx_yh_v3.py'.
Usage:
... ...
python generate_real_data_and_solutions_Bx_yh_v1.py --generate_real_data 'F' --generate_solutions 'T' --img_inc 50 --img_size 50 --freq 1 --bg_fits '/path/to/bg_fits/SKAMid_B1_8h_pbf_corrected_v3.pybdsm.rmsd_I.fits' --fits_pbc '/path/to/primary_beam_corrected_image/SKAMid_B1_8h_pbf_corrected_v3.fits' --training_set '/path/to/training_set/TrainingSet_B1_v2.txt' --save_dir '/path/where/to/save/maps/' --snr 1
python generate_real_data_and_solutions_Bx_yh_v1.py --generate_real_data 'T' --generate_solutions 'F' --img_inc 50 --img_size 50 --freq 1 --bg_fits '/path/to/bg_fits/SKAMid_B1_8h_pbf_corrected_v3.pybdsm.rmsd_I.fits' --fits_pbc '/path/to/primary_beam_corrected_image/SKAMid_B1_8h_pbf_corrected_v3.fits' --training_set '/path/to/training_set/TrainingSet_B1_v2.txt' --save_dir '/path/where/to/save/maps/' --snr 1
Author: Vesna Lukic, E-mail: `vlukic973@gmail.com`
"""
from astropy.io import fits
import pandas as pd
import astropy
from astropy import coordinates
import astropy.units as u
import numpy as np
import matplotlib.pyplot as plt
import bdsf
def generate_data(args):
"""
Generating data
"""
hdu1 = fits.open(args.fits_pbc)
TrainingSet=pd.read_csv(args.training_set,skiprows=17,delimiter='\s+')
TrainingSet=TrainingSet[TrainingSet.columns[0:15]]
TrainingSet.columns=['ID','RA (core)','DEC (core)','RA (centroid)','DEC (centroid)','FLUX','Core frac','BMAJ','BMIN','PA','SIZE','CLASS','SELECTION','x','y']
TrainingSet=TrainingSet[TrainingSet['SELECTION']==1]
fits_image_filename = args.bg_fits
hdul = fits.open(fits_image_filename)
if (args.freq==1):
data1=hdul[0].data[0,0,16300:20300,16300:20300]
print('frequency is 560MHz')
if (args.freq==2):
data1=hdul[0].data[0,0,16300:20500,16300:20500]
print('frequency is 1400MHz')
if (args.freq==3):
data1=hdul[0].data[0,0,21700:25700,16300:20300]
print('frequency is 9200MHz')
data1[np.isnan(data1)] = 0
TrainingSet=TrainingSet[TrainingSet['FLUX']>args.snr*np.mean(data1)]
print('Cut-off threshold is:',args.snr*np.mean(data1))
TrainingSet['x']=np.round(TrainingSet['x'])
TrainingSet['y']=np.round(TrainingSet['y'])
TrainingSet['x']=TrainingSet['x'].astype(int)
TrainingSet['y']=TrainingSet['y'].astype(int)
TrainingSet=TrainingSet.reset_index(drop=True)
if (args.freq==1):
TrainingSet['x']=TrainingSet['x']-16300
TrainingSet['y']=TrainingSet['y']-16300
real_data1=hdu1[0].data[0,0,16300:20300,16300:20300]
if (args.freq==2):
print('resetting Training indices')
TrainingSet['x']=TrainingSet['x']-16300
TrainingSet['y']=TrainingSet['y']-16300
real_data1=hdu1[0].data[0,0,16300:20500,16300:20500]
if (args.freq==3):
TrainingSet['x']=TrainingSet['x']-16300
TrainingSet['y']=TrainingSet['y']-21700
real_data1=hdu1[0].data[0,0,21700:25700,16300:20300]
if ((args.img_inc==20) & (args.freq==1)):
cutout_size=3950
data=np.zeros((4000,4000,1), dtype=np.uint8 )
if ((args.img_inc==50) & (args.freq==1)):
cutout_size=4000
data=np.zeros((4000,4000,1), dtype=np.uint8 )
if ((args.img_inc==20) & (args.freq==2)):
cutout_size=4150
data=np.zeros((4200,4200,1), dtype=np.uint8 )
if ((args.img_inc==50) & (args.freq==2)):
cutout_size=4200
data=np.zeros((4200,4200,1), dtype=np.uint8 )
if ((args.img_inc==20) & (args.freq==3)):
cutout_size=3950
data=np.zeros((4000,4000,1), dtype=np.uint8 )
if ((args.img_inc==50) & (args.freq==3)):
cutout_size=4000
data=np.zeros((4000,4000,1), dtype=np.uint8 )
print(cutout_size)
for i in range(0,len(TrainingSet)):
print(i)
data[int(TrainingSet['y'][i:i+1]),int(TrainingSet['x'][i:i+1])] = TrainingSet['CLASS'][i:i+1]
data=data.reshape(data.shape[0],data.shape[1])
a=-1
b=-1
result_array = np.empty((0,args.img_size,args.img_size))
if (args.generate_real_data=='T'):
print('generating real data')
for i in range(0,cutout_size,args.img_inc):
a+=1
for j in range(0,cutout_size,args.img_inc):
b+=1
if (b==int(cutout_size/args.img_inc)):
b=0
print(a,b)
result_array = np.append(result_array, [real_data1[i+0:i+args.img_size,j+0:j+args.img_size]], axis=0)
else:
print(a,b)
result_array = np.append(result_array, [real_data1[i+0:i+args.img_size,j+0:j+args.img_size]], axis=0)
np.save(args.save_dir+'real_images_'+str(args.img_size)+'_'+str(args.img_inc)+'_'+str(args.snr)+'_final.npy',result_array)
else:
print('not generating real data')
a=-1
b=-1
result_array = np.empty((0,args.img_size,args.img_size))
if (args.generate_solutions=='T'):
print('generating solutions')
for i in range(0,cutout_size,args.img_inc):
a+=1
for j in range(0,cutout_size,args.img_inc):
b+=1
if (b==int(cutout_size/args.img_inc)):
b=0
print(a,b)
result_array = np.append(result_array, [data[i+0:i+args.img_size,j+0:j+args.img_size]], axis=0)
else:
print(a,b)
result_array = np.append(result_array, [data[i+0:i+args.img_size,j+0:j+args.img_size]], axis=0)
np.save(args.save_dir+'solutions_'+str(args.img_size)+'_'+str(args.img_inc)+'_'+str(args.snr)+'_final.npy',result_array)
else:
print('not generating solutions')
return TrainingSet
if __name__ == "__main__":
import os
import argparse
from keras import callbacks
parser = argparse.ArgumentParser(description="Generate data for source-finding")
parser.add_argument('--fits_pbc', default='/path/to/primary_beam_corrected_image/SKAMid_B1_8h_pbf_corrected_v3.fits')
parser.add_argument('--training_set', default='/path/to/training_set/TrainingSet_B1_v2.txt')
parser.add_argument('--save_dir', default='/path/where/to/save/maps/')
parser.add_argument('--bg_fits', default='/path/to/bg_fits/SKAMid_B1_8h_pbf_corrected_v3.pybdsm.rmsd_I.fits')
parser.add_argument('--freq', default=1,type=int)
parser.add_argument('--snr', default=5, type=int)
parser.add_argument('--img_inc', default=50,type=int)
parser.add_argument('--img_size', default=50,type=int)
parser.add_argument('--generate_real_data', default='F')
parser.add_argument('--generate_solutions', default='F')
args = parser.parse_args()
print(args)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
# load data
Training_set= generate_data(args)
print(Training_set[0:5])
| 6,962 | 32.637681 | 409 | py |
ConvoSource | ConvoSource-master/source_finding_DNN_Bx_yh_v3.py |
"""
This script trains and tests AutoSource on the segmented real maps and solutions at a chosen exposure time, frequency and SNR on the simulated SKA data. Run 'generate_real_data_and_solutions_Bx_yh_v1.py' first before running this script. The script commands as they are currently assume there are 50x50 pixel segmented maps space 50 pixels apart. The following commands run AutoSource on the 560MHz data at 8h exposure time, at an SNR of 1.
Usage:
... ...
python source_finding_DNN_Bx_yh_v3.py --img_inc 50 --img_size 50 --freq 1 --bg_fits '/path/to/background_fits/SKAMid_B1_8h_pbf_corrected_v3.pybdsm.rmsd_I.fits' --save_dir '/path/where/to/save/results/' --augment 'None' --epochs 50 --load_images '/path/to/segmented/images/real_images_50_50_1_final.npy' --load_solutions '/path/to/segmented/solutions/solutions_50_50_1_final.npy' --snr 1
python source_finding_DNN_Bx_yh_v3.py --img_inc 50 --img_size 50 --freq 1 --bg_fits '/path/to/background_fits/SKAMid_B1_8h_pbf_corrected_v3.pybdsm.rmsd_I.fits' --save_dir '/path/where/to/save/results/' --augment 'Extended' --epochs 50 --load_images '/path/to/segmented/images/real_images_50_50_1_final.npy' --load_solutions '/path/to/segmented/solutions/solutions_50_50_1_final.npy' --snr 1
python source_finding_DNN_Bx_yh_v3.py --img_inc 50 --img_size 50 --freq 1 --bg_fits '/path/to/background_fits/SKAMid_B1_8h_pbf_corrected_v3.pybdsm.rmsd_I.fits' --save_dir '/path/where/to/save/results/' --augment 'All' --epochs 50 --load_images '/path/to/segmented/images/real_images_50_50_1_final.npy' --load_solutions '/path/to/segmented/solutions/solutions_50_50_1_final.npy' --snr 1
Author: Vesna Lukic, E-mail: `vlukic973@gmail.com`
"""
from __future__ import division
import numpy as np
import scipy
import scipy.misc
import scipy.stats
from astropy.stats import sigma_clip
import matplotlib as mpl
mpl.use('tkagg')
import matplotlib.pyplot as plt
from astropy.io import fits
from keras.utils.vis_utils import plot_model
import pandas as pd
from keras.layers import Conv2D, MaxPooling2D
from sklearn.metrics.pairwise import euclidean_distances
import operator
import skimage
from skimage.transform import warp
from astropy.io import fits
import scipy
from scipy import ndimage
import datetime
from keras.layers import Conv2D, Conv2DTranspose, Dense, Flatten, Reshape
from keras.models import Sequential, Model
from keras.utils.vis_utils import plot_model
import numpy as np
import os
import keras
import numpy as np
import keras.backend as K
from scipy.misc import imread
from sklearn.metrics import accuracy_score, normalized_mutual_info_score
import numpy as np
import time
from sklearn.cluster import KMeans
from keras import callbacks
from keras.models import Model
from keras.optimizers import SGD
from keras.layers import Dense, Input
from keras.initializers import VarianceScaling
from keras.engine.topology import Layer, InputSpec
from keras.layers import Dense, Dropout, Flatten
from keras import regularizers
from keras import optimizers
from scipy.misc import imread
from sklearn.metrics import accuracy_score, normalized_mutual_info_score
from keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D
from keras.models import Model
from keras import backend as K
from keras.callbacks import ModelCheckpoint
from keras.models import load_model
from keras.layers.normalization import BatchNormalization
import glob
def train(train_X,train_Y,test_X,test_Y):
"""
Training
"""
input_img = Input(shape=(50, 50, 1)) # adapt this if using `channels_first` image data format
x = Conv2D(16, (7, 7), strides=1, activation='relu', padding='same')(input_img)
x = Dropout(0.25)(x)
x = Conv2D(32, (5, 5), strides=1, activation='relu', padding='same')(x)
x = Conv2D(64, (3, 3), strides=1, activation='relu', padding='same')(x)
decoded = Dense(1,activation='sigmoid')(x)
autoencoder = Model(input_img, decoded)
autoencoder.summary()
es=keras.callbacks.EarlyStopping(monitor='val_loss', patience=5)
adadelta = keras.optimizers.adadelta(lr=1.0, decay=0.0, rho=0.99)
autoencoder.compile(optimizer=adadelta, loss='binary_crossentropy')
weight_save_callback = ModelCheckpoint(args.save_dir+'weights_B'+str(args.freq)+'_'+str(args.img_inc)+'_'+str(args.img_size)+'_.{epoch:02d}_loss-{val_loss:.4f}'+str(args.augment)+'.hdf5', monitor='val_loss', verbose=0, save_best_only=True, mode='auto')
print(weight_save_callback)
start = time.time()
autoencoder_locs=autoencoder.fit(train_X, train_Y, epochs=args.epochs, batch_size=128, shuffle=True, validation_data=(test_X, test_Y),callbacks=[es,weight_save_callback])
end = time.time()
print(end-start)
loss = autoencoder_locs.history['loss']
val_loss = autoencoder_locs.history['val_loss']
epochs = range(len(loss))
np.save(args.save_dir+datetime.datetime.now().isoformat()[0:18]+'_loss.npy',loss)
np.save(args.save_dir+datetime.datetime.now().isoformat()[0:18]+'_val_loss.npy',val_loss)
np.save(args.save_dir+datetime.datetime.now().isoformat()[0:18]+'epochs.npy',epochs)
return loss,val_loss,epochs
def test(test_X,test_Y,class_test):
"""
Testing
"""
input_img = Input(shape=(50, 50, 1)) # adapt this if using `channels_first` image data format
x = Conv2D(16, (7, 7), strides=1, activation='relu', padding='same')(input_img)
x = Dropout(0.25)(x)
x = Conv2D(32, (5, 5), strides=1, activation='relu', padding='same')(x)
x = Conv2D(64, (3, 3), strides=1, activation='relu', padding='same')(x)
decoded = Dense(1,activation='sigmoid')(x)
autoencoder = Model(input_img, decoded)
autoencoder.summary()
es=keras.callbacks.EarlyStopping(monitor='val_loss', patience=5)
adadelta = keras.optimizers.adadelta(lr=1.0, decay=0.0, rho=0.99)
autoencoder.compile(optimizer=adadelta, loss='binary_crossentropy')
if (args.augment=='None'):
list_of_files = glob.glob(args.save_dir+'/*None.hdf5') # * means all if need specific format then *.csv
latest_file = max(list_of_files, key=os.path.getctime)
if (args.augment=='Extended'):
list_of_files = glob.glob(args.save_dir+'/*Extended.hdf5') # * means all if need specific format then *.csv
latest_file = max(list_of_files, key=os.path.getctime)
if (args.augment=='All'):
list_of_files = glob.glob(args.save_dir+'/*All.hdf5') # * means all if need specific format then *.csv
latest_file = max(list_of_files, key=os.path.getctime)
autoencoder.load_weights(latest_file)
reconstructed = Model(inputs=autoencoder.input, outputs=autoencoder.output)
first_layer_activation = Model(inputs=autoencoder.input, outputs=autoencoder.layers[1].output)
second_layer_activation = Model(inputs=autoencoder.input, outputs=autoencoder.layers[2].output)
third_layer_activation = Model(inputs=autoencoder.input, outputs=autoencoder.layers[3].output)
fourth_layer_activation = Model(inputs=autoencoder.input, outputs=autoencoder.layers[4].output)
fifth_layer_activation = Model(inputs=autoencoder.input, outputs=autoencoder.layers[5].output)
recon_image=reconstructed.predict(test_X)
first_layer_out=first_layer_activation.predict(test_X)
second_layer_out=second_layer_activation.predict(test_X)
third_layer_out=third_layer_activation.predict(test_X)
fourth_layer_out=fourth_layer_activation.predict(test_X)
fifth_layer_out=fifth_layer_activation.predict(test_X)
recon_image=recon_image.reshape(recon_image.shape[0],recon_image.shape[1],recon_image.shape[2],1)
print(class_test.shape)
print(np.max(recon_image))
if (args.show_img=='True'):
print('Now showing images and detected features')
plt.ion()
for i in np.where(class_test==1)[0][0:10]:
plt.subplot(1,3,1); plt.axis('off'); plt.imshow(test_X[i,:,:,0])
plt.subplot(1,3,2); plt.axis('off'); plt.imshow(class_test[i,:,:,0])
plt.subplot(1,3,3); plt.axis('off'); plt.imshow(recon_image[i,:,:,0])
plt.savefig(args.save_dir+'test_X_Y_recon'+str(i)+'.png')
plt.close()
precision_sfg_list=[]
recall_sfg_list=[]
F1_score_sfg_list=[]
accuracy_sfg_list=[]
precision_ss_list=[]
recall_ss_list=[]
F1_score_ss_list=[]
accuracy_ss_list=[]
precision_fs_list=[]
recall_fs_list=[]
F1_score_fs_list=[]
accuracy_fs_list=[]
precision_all_list=[]
recall_all_list=[]
F1_score_all_list=[]
accuracy_all_list=[]
for recon_threshold in np.round(np.linspace(0.1,0.99,11),2):
print(recon_threshold)
array_loc=[]
true_loc=[]
class_ss=[]
class_fs=[]
class_sfg=[]
class_all=[]
sum_source=[]
for i in range(0,len(recon_image)):
array_loc.append(np.where(recon_threshold*np.max(recon_image[i,:,:,0])<recon_image[i,:,:,0]))
class_sfg.append(np.where(class_test[i,:,:,0]==3))
class_ss.append(np.where(class_test[i,:,:,0]==1))
class_fs.append(np.where(class_test[i,:,:,0]==2))
class_all.append(np.where(test_Y[i,:,:,0]==1))
sum_source.append(np.sum(class_test[i]))
pred_loc=[]
class_sfg1=[]
class_ss1=[]
class_fs1=[]
class_all1=[]
for i in range(0,len(recon_image)):
pred_loc.append(zip(array_loc[i][0],array_loc[i][1]))
class_sfg1.append(zip(class_sfg[i][0],class_sfg[i][1]))
class_ss1.append(zip(class_ss[i][0],class_ss[i][1]))
class_fs1.append(zip(class_fs[i][0],class_fs[i][1]))
class_all1.append(zip(class_all[i][0],class_all[i][1]))
pred_sfg=[i for i,x in enumerate(class_sfg1) if x]
pred_ss=[i for i,x in enumerate(class_ss1) if x]
pred_fs=[i for i,x in enumerate(class_fs1) if x]
pred_all=[i for i,x in enumerate(class_all1) if x]
pred_sfg1 = [pred_loc[i] for i in pred_sfg]
pred_ss1 = [pred_loc[i] for i in pred_ss]
pred_fs1 = [pred_loc[i] for i in pred_fs]
pred_all1 = [pred_loc[i] for i in pred_all]
class_sfg2 = [class_sfg1[i] for i in pred_sfg]
class_ss2 = [class_ss1[i] for i in pred_ss]
class_fs2 = [class_fs1[i] for i in pred_fs]
class_all2 = [class_all1[i] for i in pred_all]
euc_dist_sfg=[]
euc_dist_ss=[]
euc_dist_fs=[]
euc_dist_all=[]
for i in range(0,len(pred_sfg1)):
if (pred_sfg1[i]!=[]):
euc_dist_sfg.append(np.round(euclidean_distances(pred_sfg1[i],pred_sfg1[i])))
else:
euc_dist_sfg.append([])
for i in range(0,len(pred_ss1)):
if (pred_ss1[i]!=[]):
euc_dist_ss.append(np.round(euclidean_distances(pred_ss1[i],pred_ss1[i])))
else:
euc_dist_ss.append([])
for i in range(0,len(pred_fs1)):
if (pred_fs1[i]!=[]):
euc_dist_fs.append(np.round(euclidean_distances(pred_fs1[i],pred_fs1[i])))
else:
euc_dist_fs.append([])
for i in range(0,len(pred_all1)):
if (pred_all1[i]!=[]):
euc_dist_all.append(np.round(euclidean_distances(pred_all1[i],pred_all1[i])))
else:
euc_dist_all.append([])
t=1
des_elem=[]
des_elem2=[]
des_elem_sfg=[]
for i in range(0,len(pred_sfg1)):
for j in range(1,len(euc_dist_sfg[i])):
des_elem.append((0))
if (euc_dist_sfg[i].item((j,j-t))>1):
des_elem.append(j)
des_elem2.append(des_elem)
des_elem_sfg.append(list(set(des_elem2[0])))
des_elem=[]
des_elem2=[]
for n, i in enumerate(des_elem_sfg):
if i == []:
des_elem_sfg[n] = [0]
ind_loc=[]
ind_loc_sfg=[]
for i in range(0,len(pred_sfg1)):
for j in range(0,len(des_elem_sfg[i])):
if (pred_sfg1[i]!=[]):
ind_loc.append(pred_sfg1[i][des_elem_sfg[i][j]])
else:
ind_loc.append([])
ind_loc_sfg.append(ind_loc)
ind_loc=[]
df_sfg=pd.DataFrame()
df_sfg['pred_loc']=pred_sfg1
df_sfg['des_elem_sfg']=des_elem_sfg
df_sfg['ind_loc2']=ind_loc_sfg
t=1
des_elem=[]
des_elem2=[]
des_elem_ss=[]
for i in range(0,len(pred_ss1)):
for j in range(1,len(euc_dist_ss[i])):
des_elem.append((0))
if (euc_dist_ss[i].item((j,j-t))>1):
des_elem.append(j)
des_elem2.append(des_elem)
des_elem_ss.append(list(set(des_elem2[0])))
des_elem=[]
des_elem2=[]
for n, i in enumerate(des_elem_ss):
if i == []:
des_elem_ss[n] = [0]
ind_loc=[]
ind_loc_ss=[]
for i in range(0,len(pred_ss1)):
for j in range(0,len(des_elem_ss[i])):
if (pred_ss1[i]!=[]):
ind_loc.append(pred_ss1[i][des_elem_ss[i][j]])
else:
ind_loc.append([])
ind_loc_ss.append(ind_loc)
ind_loc=[]
df_ss=pd.DataFrame()
df_ss['pred_loc']=pred_ss1
df_ss['des_elem_ss']=des_elem_ss
df_ss['ind_loc2']=ind_loc_ss
t=1
des_elem=[]
des_elem2=[]
des_elem_fs=[]
for i in range(0,len(pred_fs1)):
for j in range(1,len(euc_dist_fs[i])):
des_elem.append((0))
if (euc_dist_fs[i].item((j,j-t))>1):
des_elem.append(j)
des_elem2.append(des_elem)
des_elem_fs.append(list(set(des_elem2[0])))
des_elem=[]
des_elem2=[]
for n, i in enumerate(des_elem_fs):
if i == []:
des_elem_fs[n] = [0]
ind_loc=[]
ind_loc_fs=[]
for i in range(0,len(pred_fs1)):
for j in range(0,len(des_elem_fs[i])):
if (pred_fs1[i]!=[]):
ind_loc.append(pred_fs1[i][des_elem_fs[i][j]])
else:
ind_loc.append([])
ind_loc_fs.append(ind_loc)
ind_loc=[]
df_fs=pd.DataFrame()
df_fs['pred_loc']=pred_fs1
df_fs['des_elem_fs']=des_elem_fs
df_fs['ind_loc2']=ind_loc_fs
t=1
des_elem=[]
des_elem2=[]
des_elem_all=[]
for i in range(0,len(pred_all1)):
for j in range(1,len(euc_dist_all[i])):
des_elem.append((0))
if (euc_dist_all[i].item((j,j-t))>1):
des_elem.append(j)
des_elem2.append(des_elem)
des_elem_all.append(list(set(des_elem2[0])))
des_elem=[]
des_elem2=[]
for n, i in enumerate(des_elem_all):
if i == []:
des_elem_all[n] = [0]
ind_loc=[]
ind_loc_all=[]
for i in range(0,len(pred_all1)):
for j in range(0,len(des_elem_all[i])):
if (pred_all1[i]!=[]):
ind_loc.append(pred_all1[i][des_elem_all[i][j]])
else:
ind_loc.append([])
ind_loc_all.append(ind_loc)
ind_loc=[]
df_all=pd.DataFrame()
df_all['pred_loc']=pred_all1
df_all['des_elem_all']=des_elem_all
df_all['ind_loc2']=ind_loc_all
class_ss=[]
class_ss_shape=[]
class_sfg=[]
class_sfg_shape=[]
class_fs=[]
class_fs_shape=[]
class_all=[]
class_all_shape=[]
for i in range(0,len(class_sfg2)):
if ((class_sfg2[i]!=[]) & (ind_loc_sfg[i]!=[[]])):
class_sfg.append(np.round(euclidean_distances(ind_loc_sfg[i],class_sfg2[i])))
class_sfg_shape.append(euclidean_distances(ind_loc_sfg[i],class_sfg2[i]).shape)
else:
class_sfg.append([])
class_sfg_shape.append([])
for i in range(0,len(class_ss2)):
if ((class_ss2[i]!=[]) & (ind_loc_ss[i]!=[[]])):
class_ss.append(np.round(euclidean_distances(ind_loc_ss[i],class_ss2[i])))
class_ss_shape.append(euclidean_distances(ind_loc_ss[i],class_ss2[i]).shape)
else:
class_ss.append([])
class_ss_shape.append([])
for i in range(0,len(class_fs2)):
if ((class_fs2[i]!=[]) & (ind_loc_fs[i]!=[[]])):
class_fs.append(np.round(euclidean_distances(ind_loc_fs[i],class_fs2[i])))
class_fs_shape.append(euclidean_distances(ind_loc_fs[i],class_fs2[i]).shape)
else:
class_fs.append([])
class_fs_shape.append([])
for i in range(0,len(class_all2)):
if ((class_all2[i]!=[]) & (ind_loc_all[i]!=[[]])):
class_all.append(np.round(euclidean_distances(ind_loc_all[i],class_all2[i])))
class_all_shape.append(euclidean_distances(ind_loc_all[i],class_all2[i]).shape)
else:
class_all.append([])
class_all_shape.append([])
TP_sfg_list=[]
TP_ss_list=[]
TP_fs_list=[]
TP_all_list=[]
FN_sfg_list=[]
FN_ss_list=[]
FN_fs_list=[]
FN_all_list=[]
FP_sfg_list2=[]
FP_ss_list2=[]
FP_fs_list2=[]
FP_all_list2=[]
pred_sfg_loc_len=[]
pred_ss_loc_len=[]
pred_fs_loc_len=[]
pred_all_loc_len=[]
class_loc_len=[]
pix_threshold=3
for i in range(0,len(class_sfg2)):
TP_sfg_list.append(np.sum(class_sfg[i]<pix_threshold))
pred_sfg_loc_len.append(len(pred_sfg1[i]))
for i in range(0,len(class_ss2)):
TP_ss_list.append(np.sum(class_ss[i]<pix_threshold))
pred_ss_loc_len.append(len(pred_ss1[i]))
for i in range(0,len(class_fs2)):
TP_fs_list.append(np.sum(class_fs[i]<pix_threshold))
pred_fs_loc_len.append(len(pred_fs1[i]))
for i in range(0,len(class_all2)):
TP_all_list.append(np.sum(class_all[i]<pix_threshold))
pred_all_loc_len.append(len(pred_all1[i]))
for i in range(0,len(class_sfg2)):
if (class_sfg_shape[i]!=[]):
FN_sfg_list.append(class_sfg_shape[i][1]-class_sfg_shape[i][0])
FP_sfg_list2.append(class_sfg_shape[i][0]-class_sfg_shape[i][1])
else:
FN_sfg_list.append(0)
FP_sfg_list2.append(0)
for i in range(0,len(class_ss2)):
if (class_ss_shape[i]!=[]):
FN_ss_list.append(class_ss_shape[i][1]-class_ss_shape[i][0])
FP_ss_list2.append(class_ss_shape[i][0]-class_ss_shape[i][1])
else:
FN_ss_list.append(0)
FP_ss_list2.append(0)
for i in range(0,len(class_fs2)):
if (class_fs_shape[i]!=[]):
FN_fs_list.append(class_fs_shape[i][1]-class_fs_shape[i][0])
FP_fs_list2.append(class_fs_shape[i][0]-class_fs_shape[i][1])
else:
FN_fs_list.append(0)
FP_fs_list2.append(0)
for i in range(0,len(class_all2)):
if (class_all_shape[i]!=[]):
FN_all_list.append(class_all_shape[i][1]-class_all_shape[i][0])
FP_all_list2.append(class_all_shape[i][0]-class_all_shape[i][1])
else:
FN_all_list.append(0)
FP_all_list2.append(0)
i_df=[]
j_df=[]
a=-1
b=-1
for i in range(0,50,1):
a+=1
for j in range(0,50,1):
b+=1
if (b==50):
b=0
i_df.append(i);j_df.append(j)
else:
i_df.append(i);j_df.append(j)
all_coords=zip(i_df,j_df)
true_sfg_negatives_list=[]
true_ss_negatives_list=[]
true_fs_negatives_list=[]
true_all_negatives_list=[]
for i in range(0,len(class_sfg2)):
not_in_sfg_loc1 = list(set(all_coords) - set(class_sfg1[i]))
not_in_sfg_pred_loc = list(set(all_coords) - set(pred_sfg1[i]))
true_sfg_negatives=len(list(set(not_in_sfg_loc1).intersection(not_in_sfg_pred_loc)))
true_sfg_negatives_list.append(true_sfg_negatives)
for i in range(0,len(class_ss2)):
not_in_ss_loc1 = list(set(all_coords) - set(class_ss1[i]))
not_in_ss_pred_loc = list(set(all_coords) - set(pred_ss1[i]))
true_ss_negatives=len(list(set(not_in_ss_loc1).intersection(not_in_ss_pred_loc)))
true_ss_negatives_list.append(true_ss_negatives)
for i in range(0,len(class_fs2)):
not_in_fs_loc1 = list(set(all_coords) - set(class_fs1[i]))
not_in_fs_pred_loc = list(set(all_coords) - set(pred_fs1[i]))
true_fs_negatives=len(list(set(not_in_fs_loc1).intersection(not_in_fs_pred_loc)))
true_fs_negatives_list.append(true_fs_negatives)
for i in range(0,len(class_all2)):
not_in_all_loc1 = list(set(all_coords) - set(class_all1[i]))
not_in_all_pred_loc = list(set(all_coords) - set(pred_all1[i]))
true_all_negatives=len(list(set(not_in_all_loc1).intersection(not_in_all_pred_loc)))
true_all_negatives_list.append(true_all_negatives)
df_sfg=pd.DataFrame()
df_sfg['euc_dist_shape']=class_sfg_shape
df_sfg['pred_sfg_loc_len']=pred_sfg_loc_len
df_sfg['TP_sfg']=TP_sfg_list
df_sfg['FP_sfg_list1']=df_sfg['pred_sfg_loc_len']-df_sfg['TP_sfg']
df_sfg.loc[df_sfg['FP_sfg_list1'] < 0, 'FP_sfg_list1'] = 0
df_sfg['FN_sfg']=FN_sfg_list
df_sfg['FP_sfg_list2']=FP_sfg_list2
df_sfg.loc[df_sfg['FP_sfg_list2'] < 0, 'FP_sfg_list2'] = 0
df_sfg.loc[df_sfg['FN_sfg'] < 0, 'FN_sfg'] = 0
df_sfg['FP_sfg']=df_sfg['FP_sfg_list1']+df_sfg['FP_sfg_list2']
df_sfg['TN_sfg']=true_sfg_negatives_list
df_ss=pd.DataFrame()
df_ss['euc_dist_shape']=class_ss_shape
df_ss['pred_ss_loc_len']=pred_ss_loc_len
df_ss['TP_ss']=TP_ss_list
df_ss['FP_ss_list1']=df_ss['pred_ss_loc_len']-df_ss['TP_ss']
df_ss.loc[df_ss['FP_ss_list1'] < 0, 'FP_ss_list1'] = 0
df_ss['FN_ss']=FN_ss_list
df_ss['FP_ss_list2']=FP_ss_list2
df_ss.loc[df_ss['FP_ss_list2'] < 0, 'FP_ss_list2'] = 0
df_ss.loc[df_ss['FN_ss'] < 0, 'FN_ss'] = 0
df_ss['FP_ss']=df_ss['FP_ss_list1']+df_ss['FP_ss_list2']
df_ss['TN_ss']=true_ss_negatives_list
df_fs=pd.DataFrame()
df_fs['euc_dist_shape']=class_fs_shape
df_fs['pred_fs_loc_len']=pred_fs_loc_len
df_fs['TP_fs']=TP_fs_list
df_fs['FP_fs_list1']=df_fs['pred_fs_loc_len']-df_fs['TP_fs']
df_fs.loc[df_fs['FP_fs_list1'] < 0, 'FP_fs_list1'] = 0
df_fs['FN_fs']=FN_fs_list
df_fs['FP_fs_list2']=FP_fs_list2
df_fs.loc[df_fs['FP_fs_list2'] < 0, 'FP_fs_list2'] = 0
df_fs.loc[df_fs['FN_fs'] < 0, 'FN_fs'] = 0
df_fs['FP_fs']=df_fs['FP_fs_list1']+df_fs['FP_fs_list2']
df_fs['TN_fs']=true_fs_negatives_list
df_all=pd.DataFrame()
df_all['euc_dist_shape']=class_all_shape
df_all['pred_all_loc_len']=pred_all_loc_len
df_all['TP_all']=TP_all_list
df_all['FP_all_list1']=df_all['pred_all_loc_len']-df_all['TP_all']
df_all.loc[df_all['FP_all_list1'] < 0, 'FP_all_list1'] = 0
df_all['FN_all']=FN_all_list
df_all['FP_all_list2']=FP_all_list2
df_all.loc[df_all['FP_all_list2'] < 0, 'FP_all_list2'] = 0
df_all.loc[df_all['FN_all'] < 0, 'FN_all'] = 0
df_all['FP_all']=df_all['FP_all_list1']+df_all['FP_all_list2']
df_all['TN_all']=true_all_negatives_list
precision_sfg=np.sum(df_sfg['TP_sfg'])/(np.sum(df_sfg['TP_sfg'])+np.sum(df_sfg['FP_sfg'])+0.00001)
recall_sfg=np.sum(df_sfg['TP_sfg'])/(np.sum(df_sfg['TP_sfg'])+np.sum(df_sfg['FN_sfg'])+0.00001)
F1_score_sfg=2*precision_sfg*recall_sfg/(precision_sfg+recall_sfg+0.00001)
accuracy_sfg=(np.sum(df_sfg['TP_sfg'])+np.sum(df_sfg['TN_sfg']))/(np.sum(df_sfg['TP_sfg'])+np.sum(df_sfg['TN_sfg'])+np.sum(df_sfg['FP_sfg'])+np.sum(df_sfg['FN_sfg'])+0.00001)
precision_ss=np.sum(df_ss['TP_ss'])/(np.sum(df_ss['TP_ss'])+np.sum(df_ss['FP_ss'])+0.00001)
recall_ss=np.sum(df_ss['TP_ss'])/(np.sum(df_ss['TP_ss'])+np.sum(df_ss['FN_ss'])+0.00001)
F1_score_ss=2*precision_ss*recall_ss/(precision_ss+recall_ss+0.00001)
accuracy_ss=(np.sum(df_ss['TP_ss'])+np.sum(df_ss['TN_ss']))/(np.sum(df_ss['TP_ss'])+np.sum(df_ss['TN_ss'])+np.sum(df_ss['FP_ss'])+np.sum(df_ss['FN_ss'])+0.00001)
precision_fs=np.sum(df_fs['TP_fs'])/(np.sum(df_fs['TP_fs'])+np.sum(df_fs['FP_fs'])+0.00001)
recall_fs=np.sum(df_fs['TP_fs'])/(np.sum(df_fs['TP_fs'])+np.sum(df_fs['FN_fs'])+0.00001)
F1_score_fs=2*precision_fs*recall_fs/(precision_fs+recall_fs+0.00001)
accuracy_fs=(np.sum(df_fs['TP_fs'])+np.sum(df_fs['TN_fs']))/(np.sum(df_fs['TP_fs'])+np.sum(df_fs['TN_fs'])+np.sum(df_fs['FP_fs'])+np.sum(df_fs['FN_fs'])+0.00001)
precision_all=np.sum(df_all['TP_all'])/(np.sum(df_all['TP_all'])+np.sum(df_all['FP_all'])+0.00001)
recall_all=np.sum(df_all['TP_all'])/(np.sum(df_all['TP_all'])+np.sum(df_all['FN_all'])+0.00001)
F1_score_all=2*precision_all*recall_all/(precision_all+recall_all+0.00001)
accuracy_all=(np.sum(df_all['TP_all'])+np.sum(df_all['TN_all']))/(np.sum(df_all['TP_all'])+np.sum(df_all['TN_all'])+np.sum(df_all['FP_all'])+np.sum(df_all['FN_all'])+0.00001)
precision_sfg_list.append(precision_sfg)
recall_sfg_list.append(recall_sfg)
F1_score_sfg_list.append(F1_score_sfg)
accuracy_sfg_list.append(accuracy_sfg)
precision_ss_list.append(precision_ss)
recall_ss_list.append(recall_ss)
F1_score_ss_list.append(F1_score_ss)
accuracy_ss_list.append(accuracy_ss)
precision_fs_list.append(precision_fs)
recall_fs_list.append(recall_fs)
F1_score_fs_list.append(F1_score_fs)
accuracy_fs_list.append(accuracy_fs)
precision_all_list.append(precision_all)
recall_all_list.append(recall_all)
F1_score_all_list.append(F1_score_all)
accuracy_all_list.append(accuracy_all)
index, value = max(enumerate(F1_score_all_list), key=operator.itemgetter(1))
precision_sfg=precision_sfg_list[index]
recall_sfg=recall_sfg_list[index]
F1_score_sfg=F1_score_sfg_list[index]
accuracy_sfg=accuracy_sfg_list[index]
precision_ss=precision_ss_list[index]
recall_ss=recall_ss_list[index]
F1_score_ss=F1_score_ss_list[index]
accuracy_ss=accuracy_ss_list[index]
precision_fs=precision_fs_list[index]
recall_fs=recall_fs_list[index]
F1_score_fs=F1_score_fs_list[index]
accuracy_fs=accuracy_fs_list[index]
precision_all=precision_all_list[index]
recall_all=recall_all_list[index]
F1_score_all=F1_score_all_list[index]
accuracy_all=accuracy_all_list[index]
return precision_sfg,recall_sfg,F1_score_sfg,accuracy_sfg,precision_ss,recall_ss,F1_score_ss,accuracy_ss,precision_fs,recall_fs,F1_score_fs,accuracy_fs, precision_all,recall_all,F1_score_all,accuracy_all,np.sum(df_sfg['TP_sfg']),np.sum(df_sfg['FP_sfg']),np.sum(df_sfg['TN_sfg']),np.sum(df_sfg['FN_sfg']),np.sum(df_ss['TP_ss']),np.sum(df_ss['FP_ss']),np.sum(df_ss['TN_ss']),np.sum(df_ss['FN_ss']),np.sum(df_fs['TP_fs']),np.sum(df_fs['FP_fs']),np.sum(df_fs['TN_fs']),np.sum(df_fs['FN_fs']),np.sum(df_all['TP_all']),np.sum(df_all['FP_all']),np.sum(df_all['TN_all']),np.sum(df_all['FN_all']),df_all
def augment_none(args):
images_orig=np.load(args.load_images)
class_image=np.load(args.load_solutions)
data2=class_image
data2=np.where(data2==2, 1, data2)
data2=np.where(data2==3, 1, data2)
solutions_orig=data2
hdul = fits.open(args.bg_fits)
if (args.freq==1):
data=hdul[0].data[0,0,16300:20300,16300:20300]
print('frequency is 560MHz')
if (args.freq==2):
data=hdul[0].data[0,0,16300:20500,16300:20500]
print('frequency is 1400MHz')
if (args.freq==3):
data=hdul[0].data[0,0,21700:25700,16300:20300]
print('frequency is 9200MHz')
data[np.isnan(data)] = 0
images_orig[np.isnan(images_orig)] = np.mean(data)
images_orig=images_orig.reshape(images_orig.shape[0],args.img_size,args.img_size)
images_orig=images_orig*args.mult
print('Maximum array value is:')
print(np.max(images_orig))
images_all=images_orig
solutions_all=solutions_orig
images_all=images_all.reshape(images_all.shape[0],images_all.shape[1],images_all.shape[2],1)
solutions_all=solutions_all.reshape(solutions_all.shape[0],solutions_all.shape[1],solutions_all.shape[2],1)
class_image=class_image.reshape(class_image.shape[0],class_image.shape[1],class_image.shape[2],1)
train_proportion=args.train_prop
train_X=images_all[0:int(train_proportion*len(images_all))]
test_X=images_all[int(train_proportion*len(images_all)):len(images_all)]
train_Y=solutions_all[0:int(train_proportion*len(images_all))]
test_Y=solutions_all[int(train_proportion*len(images_all)):len(images_all)]
class_train=class_image[0:int(train_proportion*len(images_all))]
class_test=class_image[int(train_proportion*len(images_all)):len(images_all)]
print(train_X.shape)
print(test_X.shape)
print(train_Y.shape)
print(test_Y.shape)
print(class_train.shape)
print(class_test.shape)
train_X = train_X.astype('float32')
train_Y = train_Y.astype('float32')
test_X = test_X.astype('float32')
test_Y = test_Y.astype('float32')
class_train = class_train.astype('float32')
class_test = class_test.astype('float32')
return train_X,train_Y,test_X,test_Y,class_train,class_test
def augment(args):
images_orig=np.load(args.load_images)
class_image=np.load(args.load_solutions)
data2=class_image
data2=np.where(data2==2, 1, data2)
data2=np.where(data2==3, 1, data2)
solutions_orig=data2
hdul = fits.open(args.bg_fits)
if (args.freq==1):
data=hdul[0].data[0,0,16300:20300,16300:20300]
print('frequency is 560MHz')
if (args.freq==2):
data=hdul[0].data[0,0,16300:20500,16300:20500]
print('frequency is 1400MHz')
if (args.freq==3):
data=hdul[0].data[0,0,19700:25700,19700:25700]
print('frequency is 9200MHz')
data[np.isnan(data)] = 0
images_orig[np.isnan(images_orig)] = np.mean(data)
images_orig=images_orig.reshape(images_orig.shape[0],args.img_size,args.img_size)
images_orig=images_orig*args.mult
print('Maximum array value is:')
print(np.max(images_orig))
images_all=images_orig
solutions_all=solutions_orig
images_all=images_all.reshape(images_all.shape[0],images_all.shape[1],images_all.shape[2],1)
solutions_all=solutions_all.reshape(solutions_all.shape[0],solutions_all.shape[1],solutions_all.shape[2],1)
class_image=class_image.reshape(class_image.shape[0],class_image.shape[1],class_image.shape[2],1)
train_proportion=args.train_prop
train_X=images_all[0:int(train_proportion*len(images_all))]
test_X=images_all[int(train_proportion*len(images_all)):len(images_all)]
train_Y=solutions_all[0:int(train_proportion*len(images_all))]
test_Y=solutions_all[int(train_proportion*len(images_all)):len(images_all)]
class_train=class_image[0:int(train_proportion*len(images_all))]
class_test=class_image[int(train_proportion*len(images_all)):len(images_all)]
print(train_X.shape)
print(test_X.shape)
print(train_Y.shape)
print(test_Y.shape)
print(class_train.shape)
print(class_test.shape)
train_X = train_X.astype('float32')
train_Y = train_Y.astype('float32')
test_X = test_X.astype('float32')
test_Y = test_Y.astype('float32')
class_train = class_train.astype('float32')
class_test = class_test.astype('float32')
extended_index_train=np.unique(np.concatenate((np.where(class_train==1)[0], np.where(class_train==2)[0]), axis=0))
train_X_extended=train_X[extended_index_train]
train_Y_extended=train_Y[extended_index_train]
if (args.augment=='Extended'):
x_augment=train_X_extended
y_augment=train_Y_extended
if (args.augment=='All'):
x_augment=train_X
y_augment=train_Y
real_img_lr = np.empty((0,50,50))
real_img_ud = np.empty((0,50,50))
real_img_90 = np.empty((0,50,50))
real_img_180 = np.empty((0,50,50))
real_img_270 = np.empty((0,50,50))
solutions_lr = np.empty((0,50,50))
solutions_ud = np.empty((0,50,50))
solutions_90 = np.empty((0,50,50))
solutions_180 = np.empty((0,50,50))
solutions_270 = np.empty((0,50,50))
for i in range(0,len(x_augment)):
print(i)
real_img_lr = np.append(real_img_lr, np.fliplr(x_augment[i]).reshape(1,50,50),axis=0)
real_img_ud = np.append(real_img_ud, np.flipud(x_augment[i]).reshape(1,50,50),axis=0)
real_img_90 = np.append(real_img_90, scipy.ndimage.rotate(x_augment[i],angle=90,reshape=True,cval=0).reshape(1,50,50),axis=0)
real_img_180 = np.append(real_img_180, scipy.ndimage.rotate(x_augment[i],angle=180,reshape=True,cval=0).reshape(1,50,50),axis=0)
real_img_270 = np.append(real_img_270, scipy.ndimage.rotate(x_augment[i],angle=270,reshape=True,cval=0).reshape(1,50,50),axis=0)
solutions_lr = np.append(solutions_lr, np.fliplr(y_augment[i]).reshape(1,50,50),axis=0)
solutions_ud = np.append(solutions_ud, np.flipud(y_augment[i]).reshape(1,50,50),axis=0)
solutions_90 = np.append(solutions_90, scipy.ndimage.rotate(y_augment[i],angle=90,reshape=True,cval=0).reshape(1,50,50),axis=0)
solutions_180 = np.append(solutions_180, scipy.ndimage.rotate(y_augment[i],angle=180,reshape=True,cval=0).reshape(1,50,50),axis=0)
solutions_270 = np.append(solutions_270, scipy.ndimage.rotate(y_augment[i],angle=270,reshape=True,cval=0).reshape(1,50,50),axis=0)
#train_X=train_X.reshape(train_X.shape[0],train_X.shape[1],train_X.shape[2],1)
#train_Y=train_Y.reshape(train_Y.shape[0],train_Y.shape[1],train_Y.shape[2],1)
x_augment=x_augment.reshape(x_augment.shape[0],x_augment.shape[1],x_augment.shape[2],1)
y_augment=y_augment.reshape(y_augment.shape[0],y_augment.shape[1],y_augment.shape[2],1)
train_X1=np.concatenate((x_augment,real_img_lr.reshape(real_img_lr.shape[0],real_img_lr.shape[1],real_img_lr.shape[2],1),real_img_ud.reshape(real_img_ud.shape[0],real_img_ud.shape[1],real_img_ud.shape[2],1),real_img_90.reshape(real_img_90.shape[0],real_img_90.shape[1],real_img_90.shape[2],1),real_img_180.reshape(real_img_180.shape[0],real_img_180.shape[1],real_img_180.shape[2],1),real_img_270.reshape(real_img_270.shape[0],real_img_270.shape[1],real_img_270.shape[2],1)))
train_Y1=np.concatenate((y_augment,solutions_lr.reshape(solutions_lr.shape[0],solutions_lr.shape[1],solutions_lr.shape[2],1),solutions_ud.reshape(solutions_ud.shape[0],solutions_ud.shape[1],solutions_ud.shape[2],1),solutions_90.reshape(solutions_90.shape[0],solutions_90.shape[1],solutions_90.shape[2],1),solutions_180.reshape(solutions_180.shape[0],solutions_180.shape[1],solutions_180.shape[2],1),solutions_270.reshape(solutions_270.shape[0],solutions_270.shape[1],solutions_270.shape[2],1)))
train_X=np.concatenate((train_X,train_X1))
train_Y=np.concatenate((train_Y,train_Y1))
return train_X,train_Y,test_X,test_Y,class_train,class_test
if __name__ == "__main__":
import os
import argparse
from keras import callbacks
parser = argparse.ArgumentParser(description="Run the AutoSource source-finder")
parser.add_argument('--load_images', default='/path/to/segmented/images/real_images_50_505_final.npy')
parser.add_argument('--load_solutions', default='/path/to/segmented/solutions/solutions_50_505_final.npy')
parser.add_argument('--save_dir', default='/path/where/to/save/results/')
parser.add_argument('--bg_fits', default='/path/to/background_fits/SKAMid_B1_8h_pbf_corrected_v3.pybdsm.rmsd_I.fits')
parser.add_argument('--freq', default=1,type=int)
parser.add_argument('--snr', default=5, type=int)
parser.add_argument('--img_inc', default=50,type=int)
parser.add_argument('--img_size', default=50,type=int)
parser.add_argument('--mult', default=10e6,type=int)
parser.add_argument('--show_img', default='F')
parser.add_argument('--train_prop', default=0.8,type=float)
parser.add_argument('--epochs', default=50,type=int)
parser.add_argument('--augment', default='None')
args = parser.parse_args()
print(args)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
# load data
if (args.augment=='None'):
print('No augmentation')
start = time.time()
train_X,train_Y,test_X,test_Y,class_train,class_test=augment_none(args)
loss,val_loss,epochs= train(train_X,train_Y,test_X,test_Y)
precision_sfg,recall_sfg,F1_score_sfg,accuracy_sfg,precision_ss,recall_ss,F1_score_ss,accuracy_ss,precision_fs,recall_fs,F1_score_fs,accuracy_fs, precision_all,recall_all,F1_score_all,accuracy_all,sfg_tp,sfg_fp,sfg_tn,sfg_fn,ss_tp,ss_fp,ss_tn,ss_fn,fs_tp,fs_fp,fs_tn,fs_fn,all_tp,all_fp,all_tn,all_fn,df_all= test(test_X,test_Y,class_test)
data=np.array([precision_sfg,recall_sfg,F1_score_sfg,accuracy_sfg,precision_ss,recall_ss,F1_score_ss,accuracy_ss,precision_fs,recall_fs,F1_score_fs,accuracy_fs, precision_all,recall_all,F1_score_all,accuracy_all])*100
end = time.time()
print(end-start)
print(sfg_tp,sfg_fp,sfg_tn,sfg_fn,ss_tp,ss_fp,ss_tn,ss_fn,fs_tp,fs_fp,fs_tn,fs_fn,all_tp,all_fp,all_tn,all_fn)
np.savetxt(args.save_dir+str(args.snr)+'_'+str(args.freq)+'_none.txt', data,fmt='%f')
if (args.augment=='Extended'):
print('Augmenting extended sources')
start = time.time()
train_X,train_Y,test_X,test_Y,class_train,class_test=augment(args)
loss,val_loss,epochs= train(train_X,train_Y,test_X,test_Y)
precision_sfg,recall_sfg,F1_score_sfg,accuracy_sfg,precision_ss,recall_ss,F1_score_ss,accuracy_ss,precision_fs,recall_fs,F1_score_fs,accuracy_fs, precision_all,recall_all,F1_score_all,accuracy_all,sfg_tp,sfg_fp,sfg_tn,sfg_fn,ss_tp,ss_fp,ss_tn,ss_fn,fs_tp,fs_fp,fs_tn,fs_fn,all_tp,all_fp,all_tn,all_fn,df_all= test(test_X,test_Y,class_test)
end = time.time()
print(end-start)
data=np.array([precision_sfg,recall_sfg,F1_score_sfg,accuracy_sfg,precision_ss,recall_ss,F1_score_ss,accuracy_ss,precision_fs,recall_fs,F1_score_fs,accuracy_fs, precision_all,recall_all,F1_score_all,accuracy_all])*100
print(sfg_tp,sfg_fp,sfg_tn,sfg_fn,ss_tp,ss_fp,ss_tn,ss_fn,fs_tp,fs_fp,fs_tn,fs_fn,all_tp,all_fp,all_tn,all_fn)
np.savetxt(args.save_dir+str(args.snr)+'_'+str(args.freq)+'_extended.txt', data,fmt='%f')
if (args.augment=='All'):
start = time.time()
print('Augmenting all sources')
train_X,train_Y,test_X,test_Y,class_train,class_test=augment(args)
loss,val_loss,epochs= train(train_X,train_Y,test_X,test_Y)
precision_sfg,recall_sfg,F1_score_sfg,accuracy_sfg,precision_ss,recall_ss,F1_score_ss,accuracy_ss,precision_fs,recall_fs,F1_score_fs,accuracy_fs, precision_all,recall_all,F1_score_all,accuracy_all,sfg_tp,sfg_fp,sfg_tn,sfg_fn,ss_tp,ss_fp,ss_tn,ss_fn,fs_tp,fs_fp,fs_tn,fs_fn,all_tp,all_fp,all_tn,all_fn,df_all= test(test_X,test_Y,class_test)
end=time.time()
print(end-start)
data=np.array([precision_sfg,recall_sfg,F1_score_sfg,accuracy_sfg,precision_ss,recall_ss,F1_score_ss,accuracy_ss,precision_fs,recall_fs,F1_score_fs,accuracy_fs, precision_all,recall_all,F1_score_all,accuracy_all])*100
print(sfg_tp,sfg_fp,sfg_tn,sfg_fn,ss_tp,ss_fp,ss_tn,ss_fn,fs_tp,fs_fp,fs_tn,fs_fn,all_tp,all_fp,all_tn,all_fn)
np.savetxt(args.save_dir+str(args.snr)+'_'+str(args.freq)+'_all.txt', data,fmt='%f')
| 36,730 | 36.328252 | 598 | py |
STM-Evaluation | STM-Evaluation-main/classification/main.py | """
Modified from DeiT official training and evaluation code.
"""
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import json
import time
import argparse
import datetime
from pathlib import Path
import torch
import numpy as np
from torch.backends import cudnn
from timm.data.mixup import Mixup
from timm.models import create_model
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.utils import ModelEma
from optim_factory import create_optimizer, LayerDecayValueAssigner
from datasets import build_dataset
from engine import train_one_epoch, evaluate
from samplers import RASampler
from utils import NativeScalerWithGradNormCount as NativeScaler
import utils
# timm register while not used
import models
torch.backends.cudnn.enabled = True
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.allow_tf32 = True
torch.backends.cuda.matmul.allow_tf32 = True
def str2bool(v):
"""
Converts string to bool type; enables command line
arguments in the format of '--arg1 true --arg2 false'
"""
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
if v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
raise argparse.ArgumentTypeError('Boolean value expected.')
def get_args_parser():
parser = argparse.ArgumentParser('training and evaluation script for image classification', add_help=False)
parser.add_argument('--batch_size', default=1024, type=int,
help='Per GPU batch size')
parser.add_argument('--epochs', default=300, type=int)
parser.add_argument('--update_freq', default=1, type=int,
help='gradient accumulation steps')
# Model parameters
parser.add_argument('--model', default='unified_swin_tiny', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--input_size', default=224, type=int,
help='image input size')
# EMA related parameters
parser.add_argument('--model_ema', type=str2bool, default=True)
parser.add_argument('--model_ema_decay', type=float, default=0.9999, help='')
parser.add_argument('--model_ema_force_cpu', type=str2bool, default=False, help='')
parser.add_argument('--model_ema_eval', type=str2bool, default=True, help='Using ema to eval during training.')
# Optimization parameters
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "adamw"')
parser.add_argument('--opt_eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt_betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--clip_grad', type=float, default=5.0, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight_decay', type=float, default=0.05,
help='weight decay (default: 0.05)')
parser.add_argument('--weight_decay_end', type=float, default=None, help="""Final value of the
weight decay. We use a cosine schedule for WD and using a larger decay by
the end of training improves performance for ViTs.""")
parser.add_argument('--layer_scale_init_value', type=float, default=1e-6,
help='the initial value for layer scale, default=1e-6')
parser.add_argument('--lr', type=float, default=1e-3, metavar='LR',
help='learning rate (default: 4e-3), with total batch size 4096')
parser.add_argument('--warmup_init_lr', type=float, default=1e-6, metavar='LR',
help='initial learning rate for before warm up')
parser.add_argument('--layer_decay', type=float, default=1.0)
parser.add_argument('--min_lr', type=float, default=1e-5, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-6)')
parser.add_argument('--warmup_epochs', type=int, default=20, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--warmup_steps', type=int, default=-1, metavar='N',
help='num of steps to warmup LR, will overload warmup_epochs if set > 0')
# Augmentation parameters
parser.add_argument('--color_jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--smoothing', type=float, default=0.1,
help='Label smoothing (default: 0.1)')
parser.add_argument('--train_interpolation', type=str, default='bicubic',
help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
parser.add_argument('--repeated_aug', type=str2bool, default=False,
help="Use repeated augmentation.")
# Evaluation parameters
parser.add_argument('--crop_pct', type=float, default=0.875)
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', type=str2bool, default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0.')
parser.add_argument('--cutmix', type=float, default=1.0,
help='cutmix alpha, cutmix enabled if > 0.')
parser.add_argument('--cutmix_minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup_prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup_switch_prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup_mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# * Finetuning params
parser.add_argument('--finetune', default='',
help='finetune from checkpoint')
parser.add_argument('--head_init_scale', default=1.0, type=float,
help='classifier head initial scale, typically adjusted in fine-tuning')
parser.add_argument('--model_key', default='model|module', type=str,
help='which key to load from saved state dict, usually model or model_ema')
parser.add_argument('--model_prefix', default='', type=str)
# Dataset parameters
parser.add_argument('--data_path', default='./minidata', type=str,
help='dataset path')
parser.add_argument('--eval_data_path', default=None, type=str,
help='dataset path for evaluation')
parser.add_argument('--data_on_memory', default=False, type=str2bool,
help='loading training data to memory')
parser.add_argument('--nb_classes', default=1000, type=int,
help='number of the classification types')
parser.add_argument('--imagenet_default_mean_and_std', type=str2bool, default=True)
parser.add_argument('--data_set', default='IMNET1k',
choices=['IMNET1k'],
type=str, help='ImageNet dataset path')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--log_dir', default=None,
help='path where to tensorboard log')
parser.add_argument('--device', default='cuda' if torch.cuda.is_available() else 'cpu',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='',
help='resume from checkpoint')
parser.add_argument('--auto_resume', type=str2bool, default=True)
parser.add_argument('--save_ckpt', type=str2bool, default=True)
parser.add_argument('--save_interval_ckpt', type=str2bool, default=False)
parser.add_argument('--save_ckpt_freq', default=1, type=int)
parser.add_argument('--save_ckpt_num', default=3, type=int)
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', type=str2bool, default=False,
help='Perform evaluation only')
parser.add_argument('--dist_eval', type=str2bool, default=True,
help='Enabling distributed evaluation')
parser.add_argument('--disable_eval', type=str2bool, default=False,
help='Disabling evaluation during training')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin_mem', type=str2bool, default=True,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--dist_on_itp', type=str2bool, default=False)
parser.add_argument('--dist_url', default='env://',
help='url used to set up distributed training')
parser.add_argument('--use_amp', type=str2bool, default=True,
help="Use PyTorch's AMP (Automatic Mixed Precision) or not")
# Weights and Biases arguments
parser.add_argument('--enable_wandb', type=str2bool, default=False,
help="enable logging to Weights and Biases")
parser.add_argument('--project', default='unified_model_eval', type=str,
help="The name of the W&B project where you're sending the new run.")
parser.add_argument('--name', default='unified_model_eval', type=str,
help="The name of the new run.")
parser.add_argument('--wandb_ckpt', type=str2bool, default=False,
help="Save model checkpoints as W&B Artifacts.")
return parser
def main(args):
utils.init_distributed_mode(args)
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
dataset_train, args.nb_classes = build_dataset(is_train=True, args=args)
if args.disable_eval:
args.dist_eval = False
dataset_val = None
else:
dataset_val, _ = build_dataset(is_train=False, args=args)
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
if not args.repeated_aug:
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True, seed=args.seed,
)
else:
sampler_train = RASampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
print("Sampler_train = %s" % str(sampler_train))
if args.dist_eval:
if len(dataset_val) % num_tasks != 0:
print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
'This will slightly alter validation results as extra duplicate entries are added to achieve '
'equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(
dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=False)
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
if global_rank == 0 and args.log_dir is not None:
os.makedirs(args.log_dir, exist_ok=True)
log_writer = utils.TensorboardLogger(log_dir=args.log_dir)
else:
log_writer = None
if global_rank == 0 and args.enable_wandb:
wandb_logger = utils.WandbLogger(args)
else:
wandb_logger = None
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
persistent_workers=True,
)
if dataset_val is not None:
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=int(args.batch_size),
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False,
persistent_workers=True,
)
else:
data_loader_val = None
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
print("Mixup is activated!")
mixup_fn = Mixup(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.nb_classes)
model = create_model(
args.model,
pretrained=False,
num_classes=args.nb_classes,
layer_scale_init_value=args.layer_scale_init_value,
)
if args.finetune:
if args.finetune.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.finetune, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.finetune, map_location='cpu')
print("Load ckpt from %s" % args.finetune)
checkpoint_model = None
for model_key in args.model_key.split('|'):
if model_key in checkpoint:
checkpoint_model = checkpoint[model_key]
print("Load state_dict by model_key = %s" % model_key)
break
if checkpoint_model is None:
checkpoint_model = checkpoint
state_dict = model.state_dict()
for k in ['head.weight', 'head.bias']:
if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
utils.load_state_dict(model, checkpoint_model, prefix=args.model_prefix)
model.to(device)
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEma(
model,
decay=args.model_ema_decay,
device='cpu' if args.model_ema_force_cpu else '',
resume='')
print("Using EMA with decay = %.8f" % args.model_ema_decay)
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Model = %s" % str(model_without_ddp))
print('number of params:', n_parameters)
total_batch_size = args.batch_size * args.update_freq * utils.get_world_size()
num_training_steps_per_epoch = len(dataset_train) // total_batch_size
print("LR = %.8f" % args.lr)
print("Batch size = %d" % total_batch_size)
print("Update frequent = %d" % args.update_freq)
print("Number of training examples = %d" % len(dataset_train))
print("Number of training training per epoch = %d" % num_training_steps_per_epoch)
if args.layer_decay < 1.0 or args.layer_decay > 1.0:
num_layers = 12
assigner = LayerDecayValueAssigner(list(args.layer_decay ** (num_layers + 1 - i) for i in range(num_layers + 2)))
else:
assigner = None
if assigner is not None:
print("Assigned values = %s" % str(assigner.values))
if args.distributed:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=False)
model_without_ddp = model.module
optimizer = create_optimizer(
args, model_without_ddp, skip_list=None,
get_num_layer=assigner.get_layer_id if assigner is not None else None,
get_layer_scale=assigner.get_scale if assigner is not None else None)
loss_scaler = NativeScaler() # if args.use_amp is False, this won't be used
print("Use Cosine LR scheduler")
lr_schedule_values = utils.cosine_scheduler(
args.lr, args.min_lr, args.epochs, num_training_steps_per_epoch,
warmup_epochs=args.warmup_epochs,
start_warmup_value=args.warmup_init_lr,
warmup_steps=args.warmup_steps,
)
if args.weight_decay_end is None:
args.weight_decay_end = args.weight_decay
wd_schedule_values = utils.cosine_scheduler(
args.weight_decay, args.weight_decay_end, args.epochs, num_training_steps_per_epoch)
print("Max WD = %.7f, Min WD = %.7f" % (max(wd_schedule_values), min(wd_schedule_values)))
if mixup_fn is not None:
# smoothing is handled with mixup label transform
criterion = SoftTargetCrossEntropy()
elif args.smoothing > 0.:
criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
criterion = torch.nn.CrossEntropyLoss()
print("criterion = %s" % str(criterion))
utils.auto_load_model(
args=args, model=model, model_without_ddp=model_without_ddp,
optimizer=optimizer, loss_scaler=loss_scaler, model_ema=model_ema)
if args.eval:
print(f"Eval only mode")
test_stats = evaluate(data_loader_val, model, device, use_amp=args.use_amp)
print(f"Accuracy of the network on {len(dataset_val)} test images: {test_stats['acc1']:.5f}%")
return test_stats['acc1']
max_accuracy = 0.0
if args.model_ema and args.model_ema_eval:
max_accuracy_ema = 0.0
print("Start training for %d epochs" % args.epochs)
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
if log_writer is not None:
log_writer.set_step(epoch * num_training_steps_per_epoch * args.update_freq)
if wandb_logger:
wandb_logger.set_steps()
train_stats = train_one_epoch(
model, criterion, data_loader_train, optimizer,
device, epoch, loss_scaler, args.clip_grad, model_ema, mixup_fn,
log_writer=log_writer, wandb_logger=wandb_logger, start_steps=epoch * num_training_steps_per_epoch,
lr_schedule_values=lr_schedule_values, wd_schedule_values=wd_schedule_values,
num_training_steps_per_epoch=num_training_steps_per_epoch, update_freq=args.update_freq,
use_amp=args.use_amp
)
if args.output_dir and args.save_ckpt:
if (epoch + 1) % args.save_ckpt_freq == 0 or epoch + 1 == args.epochs:
utils.save_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
loss_scaler=loss_scaler, epoch=epoch, model_ema=model_ema)
if data_loader_val is not None:
test_stats = evaluate(data_loader_val, model, device, use_amp=args.use_amp)
print(f"Accuracy of the model on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
if max_accuracy < test_stats["acc1"]:
max_accuracy = test_stats["acc1"]
if args.output_dir and args.save_ckpt:
utils.save_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
loss_scaler=loss_scaler, epoch="best", model_ema=model_ema)
print(f'Max accuracy: {max_accuracy:.2f}%')
if log_writer is not None:
log_writer.update(test_acc1=test_stats['acc1'], head="perf", step=epoch)
log_writer.update(test_acc5=test_stats['acc5'], head="perf", step=epoch)
log_writer.update(test_loss=test_stats['loss'], head="perf", step=epoch)
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
# repeat testing routines for EMA, if ema eval is turned on
if args.model_ema and args.model_ema_eval:
test_stats_ema = evaluate(data_loader_val, model_ema.ema, device, use_amp=args.use_amp)
print(f"Accuracy of the model EMA on {len(dataset_val)} test images: {test_stats_ema['acc1']:.1f}%")
if max_accuracy_ema < test_stats_ema["acc1"]:
max_accuracy_ema = test_stats_ema["acc1"]
if args.output_dir and args.save_ckpt:
utils.save_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
loss_scaler=loss_scaler, epoch="best-ema", model_ema=model_ema)
print(f'Max EMA accuracy: {max_accuracy_ema:.2f}%')
if log_writer is not None:
log_writer.update(test_acc1_ema=test_stats_ema['acc1'], head="perf", step=epoch)
log_stats.update({**{f'test_{k}_ema': v for k, v in test_stats_ema.items()}})
else:
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
if args.output_dir and utils.is_main_process():
if log_writer is not None:
log_writer.flush()
with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f:
f.write(json.dumps(log_stats) + "\n")
if wandb_logger:
wandb_logger.log_epoch_metrics(log_stats)
if wandb_logger and args.wandb_ckpt and args.save_ckpt and args.output_dir:
wandb_logger.log_checkpoints()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
parser = argparse.ArgumentParser('training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
| 23,878 | 46.285149 | 121 | py |
STM-Evaluation | STM-Evaluation-main/classification/engine.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from logging import critical
import math
from typing import Iterable, Optional
import torch
import torch.nn.functional as F
from timm.data import Mixup
from timm.utils import accuracy, ModelEma
from timm.loss import SoftTargetCrossEntropy
import utils
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,
model_ema: Optional[ModelEma] = None, mixup_fn: Optional[Mixup] = None, log_writer=None,
wandb_logger=None, start_steps=None, lr_schedule_values=None, wd_schedule_values=None,
num_training_steps_per_epoch=None, update_freq=None, use_amp=False):
model.train(True)
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
metric_logger.add_meter('min_lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
optimizer.zero_grad()
for data_iter_step, (samples, targets) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
step = data_iter_step // update_freq
if step >= num_training_steps_per_epoch:
continue
it = start_steps + step # global training iteration
# Update LR & WD for the first acc
if lr_schedule_values is not None or wd_schedule_values is not None and data_iter_step % update_freq == 0:
for i, param_group in enumerate(optimizer.param_groups):
if lr_schedule_values is not None:
param_group["lr"] = lr_schedule_values[it] * param_group["lr_scale"]
if wd_schedule_values is not None and param_group["weight_decay"] > 0:
param_group["weight_decay"] = wd_schedule_values[it]
samples = samples.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
if mixup_fn is not None:
samples, targets = mixup_fn(samples, targets)
if use_amp:
with torch.cuda.amp.autocast():
output = model(samples)
loss = criterion(output, targets)
else: # full precision
output = model(samples)
loss = criterion(output, targets)
loss_value = loss.item()
if not math.isfinite(loss_value): # this could trigger if using AMP
print("Loss is {}, stopping training".format(loss_value))
assert math.isfinite(loss_value)
if use_amp:
# this attribute is added by timm on one optimizer (adahessian)
is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
loss /= update_freq
grad_norm = loss_scaler(loss, optimizer, clip_grad=max_norm,
parameters=model.parameters(), create_graph=is_second_order,
update_grad=(data_iter_step + 1) % update_freq == 0)
if (data_iter_step + 1) % update_freq == 0:
optimizer.zero_grad()
if model_ema is not None:
model_ema.update(model)
else: # full precision
loss /= update_freq
loss.backward()
if max_norm is not None: # clip grad
grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
else:
grad_norm = utils.get_grad_norm_(model.parameters())
if (data_iter_step + 1) % update_freq == 0:
optimizer.step()
optimizer.zero_grad()
if model_ema is not None:
model_ema.update(model)
if torch.cuda.is_available():
torch.cuda.synchronize()
if mixup_fn is None:
class_acc = (output.max(-1)[-1] == targets).float().mean()
else:
class_acc = None
metric_logger.update(loss=loss_value)
metric_logger.update(class_acc=class_acc)
min_lr = 10.
max_lr = 0.
for group in optimizer.param_groups:
min_lr = min(min_lr, group["lr"])
max_lr = max(max_lr, group["lr"])
metric_logger.update(lr=max_lr)
metric_logger.update(min_lr=min_lr)
weight_decay_value = None
for group in optimizer.param_groups:
if group["weight_decay"] > 0:
weight_decay_value = group["weight_decay"]
metric_logger.update(weight_decay=weight_decay_value)
if use_amp:
metric_logger.update(grad_norm=grad_norm)
if log_writer is not None:
log_writer.update(loss=loss_value, head="loss")
log_writer.update(class_acc=class_acc, head="loss")
log_writer.update(lr=max_lr, head="opt")
log_writer.update(min_lr=min_lr, head="opt")
log_writer.update(weight_decay=weight_decay_value, head="opt")
# if use_amp:
# log_writer.update(grad_norm=grad_norm, head="opt")
log_writer.update(grad_norm=grad_norm, head="opt")
log_writer.set_step()
if wandb_logger:
wandb_logger._wandb.log({
'Rank-0 Batch Wise/train_loss': loss_value,
'Rank-0 Batch Wise/train_max_lr': max_lr,
'Rank-0 Batch Wise/train_min_lr': min_lr
}, commit=False)
if class_acc:
wandb_logger._wandb.log({'Rank-0 Batch Wise/train_class_acc': class_acc}, commit=False)
if use_amp:
wandb_logger._wandb.log({'Rank-0 Batch Wise/train_grad_norm': grad_norm}, commit=False)
wandb_logger._wandb.log({'Rank-0 Batch Wise/global_train_step': it})
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def evaluate(data_loader, model, device, use_amp=False):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
# switch to evaluation mode
model.eval()
for batch in metric_logger.log_every(data_loader, 10, header):
images = batch[0]
target = batch[-1]
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
# compute output
if use_amp:
with torch.cuda.amp.autocast():
output = model(images)
loss = criterion(output, target)
else:
output = model(images)
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
batch_size = images.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'
.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
class VarianceCollateFN:
def __init__(self, standard_transform, variance_transforms: dict):
self.standard_transform = standard_transform
self.variance_transforms = variance_transforms
def __call__(self, data_list):
standard_img = []
variance_img = {}
gold = []
for img, target in data_list:
standard_img.append(self.standard_transform(img))
gold.append(target)
for n, t in self.variance_transforms.items():
if n not in variance_img:
variance_img[n] = []
variance_img[n].append(t(img))
for key in variance_img:
variance_img[key] = torch.stack(variance_img[key], dim=0)
return {
'standard_img': torch.stack(standard_img, dim=0),
'variance_img': variance_img,
'gold': torch.LongTensor(gold),
}
@torch.no_grad()
def evaluate_invariance(data_loader, model, device, use_amp=False):
#criterion = torch.nn.CrossEntropyLoss()
criterion = SoftTargetCrossEntropy()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Invariance Test:'
# switch to evaluation mode
model.eval()
for batch in metric_logger.log_every(data_loader, 10, header):
images = batch['standard_img']
images = images.to(device, non_blocking=True)
gold_target = batch['gold'].to(device, non_blocking=True)
if use_amp:
with torch.cuda.amp.autocast():
pred_logits = model(images)
else:
pred_logits = model(images)
pred_target = F.softmax(pred_logits, dim=-1)
pred_label = pred_logits.max(dim=1)[1]
batch_size = images.shape[0]
for variance_name, transformed_images in batch['variance_img'].items():
transformed_images = transformed_images.to(device)
if use_amp:
with torch.cuda.amp.autocast():
output = model(transformed_images)
loss = criterion(output, pred_target)
else:
output = model(transformed_images)
loss = criterion(output, pred_target)
metric_logger.meters[f'{variance_name} loss'].update(loss.item(), n=batch_size)
acc1, acc5 = accuracy(output, gold_target, topk=(1, 5))
consistency = accuracy(output, pred_label)[0]
metric_logger.meters[f'{variance_name} acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters[f'{variance_name} acc5'].update(acc5.item(), n=batch_size)
metric_logger.meters[f'{variance_name} consistency'].update(consistency.item(), n=batch_size)
acc1, acc5 = accuracy(pred_logits, gold_target, topk=(1, 5))
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print('* Eval Results')
for key, value in metric_logger.meters.items():
print(f'\t{key}: {value}')
return metric_logger.meters
| 10,886 | 38.879121 | 114 | py |
STM-Evaluation | STM-Evaluation-main/classification/invariance_eval_all.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import argparse
from pathlib import Path
from xml.sax import default_parser_list
import torch
import numpy as np
from torch.backends import cudnn
from timm.models import create_model
from datasets import build_dataset
from engine import evaluate_invariance, VarianceCollateFN
# timm register while not used
import utils
import models
from main import str2bool
from tools.variance_transforms import standard_transform, position_jitter_transform, rotate_transform
def get_args_parser():
parser = argparse.ArgumentParser('evaluation script for variance in image classification',
add_help=False)
parser.add_argument('--batch_size', default=2, type=int,
help='Per GPU batch size')
# Model parameters
parser.add_argument('--model', default='conv_swin_tiny', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--input_size', default=224, type=int,
help='image input size')
parser.add_argument('--layer_scale_init_value', type=float, default=1e-6,
help='the initial value for layer scale, default=1e-6')
# Evaluation parameters
parser.add_argument('--crop_pct', type=float, default=0.875)
# Dataset parameters
parser.add_argument('--data_path', default='./minidata', type=str,
help='dataset path')
parser.add_argument('--data_on_memory', default=False, type=str2bool,
help='loading training data to memory')
parser.add_argument('--nb_classes', default=1000, type=int,
help='number of the classification types')
parser.add_argument('--variance_type', default="translation",
choices=['translation', 'pre_rotation', 'post_rotation', 'scale'])
# parser.add_argument('--jitter_strength', default=0, type=int)
# parser.add_argument('--rotation_angle', default=0, type=int)
parser.add_argument('--imagenet_default_mean_and_std', type=str2bool, default=True)
parser.add_argument('--data_set', default='IMNET1k',
# choices=['CIFAR', 'IMNET', 'image_folder'],
choices=['IMNET1k'],
type=str, help='ImageNet dataset path')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--log_dir', default=None,
help='path where to tensorboard log')
parser.add_argument('--device', default='cuda' if torch.cuda.is_available() else 'cpu',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='/data1/shimin/model_parameters/backbone/swin_tiny/checkpoint-best.pth',
help='resume from checkpoint')
parser.add_argument('--dist_eval', type=str2bool, default=True,
help='Enabling distributed evaluation')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin_mem', type=str2bool, default=True,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--dist_on_itp', type=str2bool, default=False)
parser.add_argument('--dist_url', default='env://',
help='url used to set up distributed training')
parser.add_argument('--use_amp', type=str2bool, default=False,
help="Use PyTorch's AMP (Automatic Mixed Precision) or not")
# Weights and Biases arguments
parser.add_argument('--enable_wandb', type=str2bool, default=False,
help="enable logging to Weights and Biases")
parser.add_argument('--project', default='unified_model_eval', type=str,
help="The name of the W&B project where you're sending the new run.")
parser.add_argument('--name', default='unified_model_eval', type=str,
help="The name of the new run.")
parser.add_argument('--wandb_ckpt', type=str2bool, default=False,
help="Save model checkpoints as W&B Artifacts.")
return parser
def main(args):
utils.init_distributed_mode(args)
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
dataset_val, args.nb_classes = build_dataset(is_train=False, args=args)
dataset_val.transform = None
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
if args.dist_eval:
if len(dataset_val) % num_tasks != 0:
# print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
# 'This will slightly alter validation results as extra duplicate entries are added to achieve '
# 'equal num of samples per-process.', file=sys.stderr)
raise NotImplementedError('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
'This will slightly alter validation results as extra duplicate entries are added to achieve '
'equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(dataset_val,
num_replicas=num_tasks,
rank=global_rank,
shuffle=False)
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
transform = standard_transform(img_size=args.input_size,
crop_ratio=args.crop_pct)
variance_transforms = {}
if args.variance_type == 'translation':
for strength in range(0, 64+1, 4):
variance_transforms[f'position jitter {strength}'] = position_jitter_transform(img_size=args.input_size,
crop_ratio=args.crop_pct,
jitter_strength=strength)
if args.variance_type == 'pre_rotation':
for angle in range(0, 90+1, 5):
variance_transforms[f'pre rotation {angle}'] = rotate_transform(img_size=args.input_size,
crop_ratio=args.crop_pct,
angle=angle,
pre_rotate=True)
if args.variance_type == 'post_rotation':
for angle in range(0, 90+1, 5):
variance_transforms[f'pre rotation {angle}'] = rotate_transform(img_size=args.input_size,
crop_ratio=args.crop_pct,
angle=angle,
pre_rotate=False)
if args.variance_type == 'scale':
for ratio in range(200, 2000+1, 125):
ratio = ratio / 1000
variance_transforms[f'scale {ratio}'] = standard_transform(img_size=args.input_size,
crop_ratio=ratio)
print('num variance transforms:', len(variance_transforms))
collate_fn = VarianceCollateFN(standard_transform=transform,
variance_transforms=variance_transforms)
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=int(args.batch_size),
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False,
collate_fn=collate_fn,
)
model = create_model(
args.model,
pretrained=False,
num_classes=args.nb_classes,
layer_scale_init_value=args.layer_scale_init_value,
).to(device)
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f"Model = {args.model}")
print('number of params:', n_parameters)
if args.distributed:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = torch.nn.parallel.DistributedDataParallel(model,
device_ids=[args.gpu],
find_unused_parameters=False)
model_without_ddp = model.module
else:
model = model.cuda()
model_without_ddp = model
args.auto_resume = None
utils.auto_load_model(args=args,
model=model,
model_without_ddp=model_without_ddp,
optimizer=None, loss_scaler=None, model_ema=None)
test_stats = evaluate_invariance(data_loader_val, model, device, use_amp=args.use_amp)
with open(os.path.join(args.output_dir, f'variance_{args.variance_type}.txt'), 'w', encoding='utf-8') as file:
file.write('* Eval Results\n')
for key, value in test_stats.items():
file.write(f'\t{key}: {value}\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser('evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
# srun -p VC --gres=gpu:1 --quotatype=spot --ntasks=1 --ntasks-per-node=1 python invariance_eval_all.py --batch_size 1024 --data_path /mnt/cache/share/images/ --output_dir ./backbone_outputdir/eval --model conv_swin_tiny --resume /mnt/petrelfs/share_data/shimin/share_checkpoint/swin/swin_tiny/checkpoint-best.pth
| 10,598 | 45.69163 | 317 | py |
STM-Evaluation | STM-Evaluation-main/classification/utils.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import math
import time
import datetime
import subprocess
from pathlib import Path
from collections import defaultdict, deque
import numpy as np
from timm.utils import get_state_dict
import torch
import torch.distributed as dist
from torch._six import inf
from tensorboardX import SummaryWriter
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if v is None:
continue
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
log_msg = [
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
]
if torch.cuda.is_available():
log_msg.append('max mem: {memory:.0f}')
log_msg = self.delimiter.join(log_msg)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
self.update(time=int(total_time))
class TensorboardLogger(object):
def __init__(self, log_dir):
self.writer = SummaryWriter(logdir=log_dir)
self.step = 0
def set_step(self, step=None):
if step is not None:
self.step = step
else:
self.step += 1
def update(self, head='scalar', step=None, **kwargs):
for k, v in kwargs.items():
if v is None:
continue
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.writer.add_scalar(head + "/" + k, v, self.step if step is None else step)
def flush(self):
self.writer.flush()
class WandbLogger(object):
def __init__(self, args):
self.args = args
try:
import wandb
self._wandb = wandb
except ImportError:
raise ImportError(
"To use the Weights and Biases Logger please install wandb."
"Run `pip install wandb` to install it."
)
# Initialize a W&B run
if self._wandb.run is None:
self._wandb.init(
project=args.project,
name=args.name,
config=args
)
def log_epoch_metrics(self, metrics, commit=True):
"""
Log train/test metrics onto W&B.
"""
# Log number of model parameters as W&B summary
self._wandb.summary['n_parameters'] = metrics.get('n_parameters', None)
metrics.pop('n_parameters', None)
# Log current epoch
self._wandb.log({'epoch': metrics.get('epoch')}, commit=False)
metrics.pop('epoch')
for k, v in metrics.items():
if 'train' in k:
self._wandb.log({f'Global Train/{k}': v}, commit=False)
elif 'test' in k:
self._wandb.log({f'Global Test/{k}': v}, commit=False)
self._wandb.log({})
def log_checkpoints(self):
output_dir = self.args.output_dir
model_artifact = self._wandb.Artifact(
self._wandb.run.id + "_model", type="model"
)
model_artifact.add_dir(output_dir)
self._wandb.log_artifact(model_artifact, aliases=["latest", "best"])
def set_steps(self):
# Set global training step
self._wandb.define_metric('Rank-0 Batch Wise/*', step_metric='Rank-0 Batch Wise/global_train_step')
# Set epoch-wise step
self._wandb.define_metric('Global Train/*', step_metric='epoch')
self._wandb.define_metric('Global Test/*', step_metric='epoch')
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if args.dist_on_itp:
args.rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
args.dist_url = "tcp://%s:%s" % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT'])
os.environ['LOCAL_RANK'] = str(args.gpu)
os.environ['RANK'] = str(args.rank)
os.environ['WORLD_SIZE'] = str(args.world_size)
# ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"]
elif 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
# by jqxu
world_size = int(os.environ["SLURM_NTASKS"])
args.world_size = world_size
local_size = int(os.environ["SLURM_NTASKS_PER_NODE"])
args.local_size = local_size
if "MASTER_PORT" not in os.environ:
os.environ["MASTER_PORT"] = "22110"
node_list = os.environ["SLURM_NODELIST"]
addr = subprocess.getoutput(f"scontrol show hostname {node_list} | head -n1")
if "MASTER_ADDR" not in os.environ:
os.environ["MASTER_ADDR"] = addr
os.environ['RANK'] = str(args.rank)
os.environ['LOCAL_RANK'] = str(args.gpu)
os.environ['LOCAL_SIZE'] = str(args.local_size)
os.environ['WORLD_SIZE'] = str(args.world_size)
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}, gpu {}'.format(
args.rank, args.dist_url, args.gpu), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
def load_state_dict(model, state_dict, prefix='', ignore_missing="relative_position_index"):
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(
prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix=prefix)
warn_missing_keys = []
ignore_missing_keys = []
for key in missing_keys:
keep_flag = True
for ignore_key in ignore_missing.split('|'):
if ignore_key in key:
keep_flag = False
break
if keep_flag:
warn_missing_keys.append(key)
else:
ignore_missing_keys.append(key)
missing_keys = warn_missing_keys
if len(missing_keys) > 0:
print("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
print("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(ignore_missing_keys) > 0:
print("Ignored weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, ignore_missing_keys))
if len(error_msgs) > 0:
print('\n'.join(error_msgs))
class NativeScalerWithGradNormCount:
state_dict_key = "amp_scaler"
def __init__(self):
self._scaler = torch.cuda.amp.GradScaler()
def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True):
self._scaler.scale(loss).backward(create_graph=create_graph)
if update_grad:
if clip_grad is not None:
assert parameters is not None
self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place
norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad)
else:
self._scaler.unscale_(optimizer)
norm = get_grad_norm_(parameters)
self._scaler.step(optimizer)
self._scaler.update()
else:
norm = None
return norm
def state_dict(self):
return self._scaler.state_dict()
def load_state_dict(self, state_dict):
self._scaler.load_state_dict(state_dict)
def get_grad_norm_(parameters, norm_type: float = 2.0) -> torch.Tensor:
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = [p for p in parameters if p.grad is not None]
norm_type = float(norm_type)
if len(parameters) == 0:
return torch.tensor(0.)
device = parameters[0].grad.device
if norm_type == inf:
total_norm = max(p.grad.detach().abs().max().to(device) for p in parameters)
else:
total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type)
return total_norm
def cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0,
start_warmup_value=0, warmup_steps=-1):
warmup_schedule = np.array([])
warmup_iters = warmup_epochs * niter_per_ep
if warmup_steps > 0:
warmup_iters = warmup_steps
print("Set warmup steps = %d" % warmup_iters)
if warmup_epochs > 0:
warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters)
iters = np.arange(epochs * niter_per_ep - warmup_iters)
schedule = np.array(
[final_value + 0.5 * (base_value - final_value) * (1 + math.cos(math.pi * i / (len(iters)))) for i in iters])
schedule = np.concatenate((warmup_schedule, schedule))
assert len(schedule) == epochs * niter_per_ep
return schedule
def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler, model_ema=None):
output_dir = Path(args.output_dir)
epoch_name = str(epoch)
checkpoint_paths = [output_dir / ('checkpoint-%s.pth' % epoch_name)]
for checkpoint_path in checkpoint_paths:
to_save = {
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch,
'scaler': loss_scaler.state_dict(),
'args': args,
}
if model_ema is not None:
to_save['model_ema'] = get_state_dict(model_ema)
save_on_master(to_save, checkpoint_path)
if is_main_process() and isinstance(epoch, int):
to_del = epoch - args.save_ckpt_num * args.save_ckpt_freq
old_ckpt = output_dir / ('checkpoint-%s.pth' % to_del)
if os.path.exists(old_ckpt):
if args.save_interval_ckpt and (to_del + 1) % 50 == 0:
pass
else:
os.remove(old_ckpt)
def auto_load_model(args, model, model_without_ddp, optimizer, loss_scaler, model_ema=None):
output_dir = Path(args.output_dir)
if args.auto_resume and len(args.resume) == 0:
import glob
all_checkpoints = glob.glob(os.path.join(output_dir, 'checkpoint-*.pth'))
latest_ckpt = -1
for ckpt in all_checkpoints:
t = ckpt.split('-')[-1].split('.')[0]
if t.isdigit():
latest_ckpt = max(int(t), latest_ckpt)
if latest_ckpt >= 0:
args.resume = os.path.join(output_dir, 'checkpoint-%d.pth' % latest_ckpt)
print("Auto resume checkpoint: %s" % args.resume, file=sys.stderr)
if args.resume:
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
print("Resume checkpoint %s" % args.resume, file=sys.stderr)
if 'optimizer' in checkpoint and 'epoch' in checkpoint and optimizer is not None:
optimizer.load_state_dict(checkpoint['optimizer'])
if not isinstance(checkpoint['epoch'], str): # does not support resuming with 'best', 'best-ema'
args.start_epoch = checkpoint['epoch'] + 1
else:
assert args.eval, 'Does not support resuming with checkpoint-best'
if hasattr(args, 'model_ema') and args.model_ema:
if 'model_ema' in checkpoint.keys():
model_ema.ema.load_state_dict(checkpoint['model_ema'])
else:
model_ema.ema.load_state_dict(checkpoint['model'])
if 'scaler' in checkpoint and loss_scaler is not None:
loss_scaler.load_state_dict(checkpoint['scaler'])
print("With optim & sched!", file=sys.stderr)
def set_seed(seed):
import random
import torch
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
| 18,383 | 32.981516 | 128 | py |
STM-Evaluation | STM-Evaluation-main/classification/datasets.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import os.path as osp
from torchvision import datasets, transforms
import torch
import math
from tqdm import tqdm
from timm.data.constants import \
IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.data import create_transform
import mmcv
from mmcv.fileio import FileClient
import json
from PIL import Image
from abc import abstractmethod
import torch.utils.data as data
import logging
_logger = logging.getLogger(__name__)
_ERROR_RETRY = 50
def build_dataset(is_train, args):
transform = build_transform(is_train, args)
print("Transform = ")
if isinstance(transform, tuple):
for trans in transform:
print(" - - - - - - - - - - ")
for t in trans.transforms:
print(t)
else:
for t in transform.transforms:
print(t)
print("---------------------------")
if args.data_set == 'CIFAR':
dataset = datasets.CIFAR100(args.data_path, train=is_train, transform=transform, download=True)
nb_classes = 100
elif args.data_set == 'IMNET':
print("reading from datapath", args.data_path)
root = os.path.join(args.data_path, 'train' if is_train else 'val')
dataset = datasets.ImageFolder(root, transform=transform)
nb_classes = 1000
elif args.data_set == "image_folder":
root = args.data_path if is_train else args.eval_data_path
dataset = datasets.ImageFolder(root, transform=transform)
nb_classes = len(dataset.classes)
assert len(dataset.class_to_idx) == nb_classes
elif args.data_set == "IMNET1k":
split = 'train' if is_train else 'val'
dataset = ImageCephDataset(args.data_path, split, transform=transform, on_memory=args.data_on_memory)
nb_classes = 1000
else:
raise NotImplementedError()
print(f"Number of the class = {nb_classes}")
return dataset, nb_classes
def build_transform(is_train, args):
resize_im = args.input_size > 32
imagenet_default_mean_and_std = args.imagenet_default_mean_and_std
mean = IMAGENET_INCEPTION_MEAN if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_MEAN
std = IMAGENET_INCEPTION_STD if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_STD
if is_train:
# this should always dispatch to transforms_imagenet_train
transform = create_transform(
input_size=args.input_size,
is_training=True,
color_jitter=args.color_jitter,
auto_augment=args.aa,
interpolation=args.train_interpolation,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
mean=mean,
std=std,
)
if not resize_im:
transform.transforms[0] = transforms.RandomCrop(
args.input_size, padding=4)
return transform
t = []
if resize_im:
# warping (no cropping) when evaluated at 384 or larger
if args.input_size >= 384:
t.append(
transforms.Resize((args.input_size, args.input_size),
interpolation=transforms.InterpolationMode.BICUBIC),
)
print(f"Warping {args.input_size} size input images...")
else:
if args.crop_pct is None:
args.crop_pct = 224 / 256
size = int(args.input_size / args.crop_pct)
t.append(
# to maintain same ratio w.r.t. 224 images
transforms.Resize(size, interpolation=transforms.InterpolationMode.BICUBIC),
)
t.append(transforms.CenterCrop(args.input_size))
t.append(transforms.ToTensor())
t.append(transforms.Normalize(mean, std))
return transforms.Compose(t)
class ImageCephDataset(data.Dataset):
def __init__(
self,
root,
split,
parser=None,
transform=None,
target_transform=None,
on_memory=False,
):
if '22k' in root and split == 'train':
raise NotImplementedError()
else:
annotation_root = osp.join(root, 'meta')
if parser is None or isinstance(parser, str):
parser = ParserCephImage(root=root, split=split,
annotation_root=annotation_root,
on_memory=on_memory)
self.parser = parser
self.transform = transform
self.target_transform = target_transform
self._consecutive_errors = 0
def __getitem__(self, index):
img, target = self.parser[index]
self._consecutive_errors = 0
if self.transform is not None:
img = self.transform(img)
if target is None:
target = -1
elif self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.parser)
def filename(self, index, basename=False, absolute=False):
return self.parser.filename(index, basename, absolute)
def filenames(self, basename=False, absolute=False):
return self.parser.filenames(basename, absolute)
class Parser:
def __init__(self):
pass
@abstractmethod
def _filename(self, index, basename=False, absolute=False):
pass
def filename(self, index, basename=False, absolute=False):
return self._filename(index, basename=basename, absolute=absolute)
def filenames(self, basename=False, absolute=False):
return [self._filename(index, basename=basename, absolute=absolute) for index in range(len(self))]
class ParserCephImage(Parser):
def __init__(
self,
root,
split,
annotation_root,
on_memory=False,
**kwargs):
super().__init__()
self.file_client = None
self.kwargs = kwargs
self.split = split
self.root = root
if '22k' in root:
self.io_backend = 'petrel'
with open(osp.join(annotation_root, '22k_class_to_idx.json'), 'r') as f:
self.class_to_idx = json.loads(f.read())
with open(osp.join(annotation_root, '22k_label.txt'), 'r') as f:
self.samples = f.read().splitlines()
else:
self.io_backend = 'disk'
self.class_to_idx = None
with open(osp.join(annotation_root, f'{split}.txt'), 'r') as f:
self.samples = f.read().splitlines()
local_rank = None
local_size = None
self._consecutive_errors = 0
self.on_memory = on_memory
if on_memory:
self.holder = {}
if local_rank is None:
local_rank = int(os.environ.get('LOCAL_RANK', 0))
if local_size is None:
local_size = int(os.environ.get('LOCAL_SIZE', 1))
self.local_rank = local_rank
self.local_size = local_size
self.rank = int(os.environ.get('RANK', 0))
self.world_size = int(os.environ.get('WORLD_SIZE', 1))
self.num_replicas = int(os.environ.get('WORLD_SIZE', 1))
self.num_parts = local_size
self.num_samples = int(
math.ceil(len(self.samples) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
self.total_size_parts = self.num_samples * self.num_replicas // self.num_parts
self.load_onto_memory_v2()
def load_onto_memory_v2(self):
# print("Loading images onto memory...", self.local_rank, self.local_size)
t = torch.Generator()
t.manual_seed(0)
indices = torch.randperm(len(self.samples), generator=t).tolist()
# indices = range(len(self.samples))
indices = [i for i in indices if i % self.num_parts == self.local_rank]
# add extra samples to make it evenly divisible
indices += indices[:(self.total_size_parts - len(indices))]
assert len(indices) == self.total_size_parts, f'{len(indices)}, {self.total_size_parts}'
# subsample
indices = indices[self.rank //
self.num_parts:self.total_size_parts:self.num_replicas // self.num_parts]
assert len(indices) == self.num_samples, f'{len(indices)}, {self.num_samples}'
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
for index in tqdm(indices):
if index % self.local_size != self.local_rank:
continue
path, _ = self.samples[index].split(' ')
path = osp.join(self.root, self.split, path)
img_bytes = self.file_client.get(path)
self.holder[path] = img_bytes
print("Loading complete!")
def __getitem__(self, index):
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
filepath, target = self.samples[index].split(' ')
filepath = osp.join(self.root, self.split, filepath)
try:
if self.on_memory:
img_bytes = self.holder[filepath]
else:
# pass
img_bytes = self.file_client.get(filepath)
img = mmcv.imfrombytes(img_bytes)[:, :, ::-1]
except Exception as e:
_logger.warning(
f'Skipped sample (index {index}, file {filepath}). {str(e)}')
self._consecutive_errors += 1
if self._consecutive_errors < _ERROR_RETRY:
return self.__getitem__((index + 1) % len(self))
else:
raise e
self._consecutive_errors = 0
img = Image.fromarray(img)
if self.class_to_idx is not None:
target = self.class_to_idx[target]
else:
target = int(target)
return img, target
def __len__(self):
return len(self.samples)
def _filename(self, index, basename=False, absolute=False):
filename, _ = self.samples[index].split(' ')
filename = osp.join(self.root, filename)
return filename
| 10,490 | 34.562712 | 109 | py |
STM-Evaluation | STM-Evaluation-main/classification/samplers.py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
import torch
import torch.distributed as dist
import math
class RASampler(torch.utils.data.Sampler):
"""Sampler that restricts data loading to a subset of the dataset for distributed,
with repeated augmentation.
It ensures that different each augmented version of a sample will be visible to a
different process (GPU)
Heavily based on torch.utils.data.DistributedSampler
"""
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True, num_repeats: int = 3):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
if num_repeats < 1:
raise ValueError("num_repeats should be greater than 0")
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.num_repeats = num_repeats
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * self.num_repeats / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
# self.num_selected_samples = int(math.ceil(len(self.dataset) / self.num_replicas))
self.num_selected_samples = int(math.floor(len(self.dataset) // 256 * 256 / self.num_replicas))
self.shuffle = shuffle
def __iter__(self):
if self.shuffle:
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g)
else:
indices = torch.arange(start=0, end=len(self.dataset))
# add extra samples to make it evenly divisible
indices = torch.repeat_interleave(indices, repeats=self.num_repeats, dim=0).tolist()
padding_size: int = self.total_size - len(indices)
if padding_size > 0:
indices += indices[:padding_size]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices[:self.num_selected_samples])
def __len__(self):
return self.num_selected_samples
def set_epoch(self, epoch):
self.epoch = epoch
| 2,584 | 38.769231 | 103 | py |
STM-Evaluation | STM-Evaluation-main/classification/optim_factory.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import optim as optim
from timm.optim.adafactor import Adafactor
from timm.optim.adahessian import Adahessian
from timm.optim.adamp import AdamP
from timm.optim.lookahead import Lookahead
from timm.optim.lamb import Lamb
from timm.optim.nadam import Nadam
from timm.optim.nvnovograd import NvNovoGrad
from timm.optim.radam import RAdam
from timm.optim.rmsprop_tf import RMSpropTF
from timm.optim.sgdp import SGDP
import json
try:
from apex.optimizers import FusedNovoGrad, FusedAdam, FusedLAMB, FusedSGD
has_apex = True
except ImportError:
has_apex = False
def get_num_layer_for_convnext(var_name):
"""
Divide [3, 3, 27, 3] layers into 12 groups; each group is three
consecutive blocks, including possible neighboring downsample layers;
adapted from https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py
"""
num_max_layer = 12
if var_name.startswith("downsample_layers"):
stage_id = int(var_name.split('.')[1])
if stage_id == 0:
layer_id = 0
elif stage_id == 1 or stage_id == 2:
layer_id = stage_id + 1
elif stage_id == 3:
layer_id = 12
return layer_id
elif var_name.startswith("stages"):
stage_id = int(var_name.split('.')[1])
block_id = int(var_name.split('.')[2])
if stage_id == 0 or stage_id == 1:
layer_id = stage_id + 1
elif stage_id == 2:
layer_id = 3 + block_id // 3
elif stage_id == 3:
layer_id = 12
return layer_id
else:
return num_max_layer + 1
class LayerDecayValueAssigner(object):
def __init__(self, values):
self.values = values
def get_scale(self, layer_id):
return self.values[layer_id]
def get_layer_id(self, var_name):
return get_num_layer_for_convnext(var_name)
def get_parameter_groups(model, weight_decay=1e-5, skip_list=(), get_num_layer=None, get_layer_scale=None):
parameter_group_names = {}
parameter_group_vars = {}
for name, param in model.named_parameters():
if not param.requires_grad:
continue # frozen weights
if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list:
group_name = "no_decay"
this_weight_decay = 0.
else:
group_name = "decay"
this_weight_decay = weight_decay
if get_num_layer is not None:
layer_id = get_num_layer(name)
group_name = "layer_%d_%s" % (layer_id, group_name)
else:
layer_id = None
if group_name not in parameter_group_names:
if get_layer_scale is not None:
scale = get_layer_scale(layer_id)
else:
scale = 1.
parameter_group_names[group_name] = {
"weight_decay": this_weight_decay,
"params": [],
"lr_scale": scale
}
parameter_group_vars[group_name] = {
"weight_decay": this_weight_decay,
"params": [],
"lr_scale": scale
}
parameter_group_vars[group_name]["params"].append(param)
parameter_group_names[group_name]["params"].append(name)
print("Param groups = %s" % json.dumps(parameter_group_names, indent=2))
return list(parameter_group_vars.values())
def create_optimizer(args, model, get_num_layer=None, get_layer_scale=None, filter_bias_and_bn=True, skip_list=None):
opt_lower = args.opt.lower()
weight_decay = args.weight_decay
# if weight_decay and filter_bias_and_bn:
if filter_bias_and_bn:
skip = {}
if skip_list is not None:
skip = skip_list
elif hasattr(model, 'no_weight_decay'):
skip = model.no_weight_decay()
parameters = get_parameter_groups(model, weight_decay, skip, get_num_layer, get_layer_scale)
weight_decay = 0.
else:
parameters = model.parameters()
if 'fused' in opt_lower:
assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers'
opt_args = dict(lr=args.lr, weight_decay=weight_decay)
if hasattr(args, 'opt_eps') and args.opt_eps is not None:
opt_args['eps'] = args.opt_eps
if hasattr(args, 'opt_betas') and args.opt_betas is not None:
opt_args['betas'] = args.opt_betas
opt_split = opt_lower.split('_')
opt_lower = opt_split[-1]
if opt_lower == 'sgd' or opt_lower == 'nesterov':
opt_args.pop('eps', None)
optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'momentum':
opt_args.pop('eps', None)
optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)
elif opt_lower == 'adam':
optimizer = optim.Adam(parameters, **opt_args)
elif opt_lower == 'adamw':
optimizer = optim.AdamW(parameters, **opt_args)
elif opt_lower == 'lamb':
optimizer = Lamb(parameters, **opt_args)
elif opt_lower == 'nadam':
optimizer = Nadam(parameters, **opt_args)
elif opt_lower == 'radam':
optimizer = RAdam(parameters, **opt_args)
elif opt_lower == 'adamp':
optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args)
elif opt_lower == 'sgdp':
optimizer = SGDP(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'adadelta':
optimizer = optim.Adadelta(parameters, **opt_args)
elif opt_lower == 'adafactor':
if not args.lr:
opt_args['lr'] = None
optimizer = Adafactor(parameters, **opt_args)
elif opt_lower == 'adahessian':
optimizer = Adahessian(parameters, **opt_args)
elif opt_lower == 'rmsprop':
optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=args.momentum, **opt_args)
elif opt_lower == 'rmsproptf':
optimizer = RMSpropTF(parameters, alpha=0.9, momentum=args.momentum, **opt_args)
# elif opt_lower == 'novograd':
# optimizer = NovoGrad(parameters, **opt_args)
elif opt_lower == 'nvnovograd':
optimizer = NvNovoGrad(parameters, **opt_args)
elif opt_lower == 'fusedsgd':
opt_args.pop('eps', None)
optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'fusedmomentum':
opt_args.pop('eps', None)
optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)
elif opt_lower == 'fusedadam':
optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args)
elif opt_lower == 'fusedadamw':
optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args)
elif opt_lower == 'fusedlamb':
optimizer = FusedLAMB(parameters, **opt_args)
elif opt_lower == 'fusednovograd':
opt_args.setdefault('betas', (0.95, 0.98))
optimizer = FusedNovoGrad(parameters, **opt_args)
else:
assert False and "Invalid optimizer"
if len(opt_split) > 1:
if opt_split[0] == 'lookahead':
optimizer = Lookahead(optimizer)
return optimizer
| 7,412 | 36.251256 | 117 | py |
STM-Evaluation | STM-Evaluation-main/classification/tools/variance_transforms.py | """
data transform modules for invariance analysis
"""
import torch
from torchvision import transforms
from torchvision.transforms.functional import rotate
import numpy as np
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from PIL import Image
def standard_transform(img_size=224, crop_ratio=0.875):
size = int(img_size / crop_ratio)
return transforms.Compose([
transforms.Resize(size, interpolation=transforms.InterpolationMode.BICUBIC),
transforms.CenterCrop(img_size),
transforms.ToTensor(),
transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD),
])
def position_jitter_transform(img_size=224, crop_ratio=0.875, jitter_strength=0):
return PositionJitterTransform(input_size=img_size,
resizing_size=int(img_size / crop_ratio),
jitter_strength=jitter_strength)
def rotate_transform(img_size=224, crop_ratio=0.875, angle=0, pre_rotate=False):
return RotateTransform(input_size=img_size,
resizing_size=int(img_size / crop_ratio),
angle=angle,
pre_rotate=pre_rotate)
class PositionJitterTransform:
def __init__(self, jitter_strength=0, resizing_size=256, input_size=224):
self.resizing_size = resizing_size
self.input_size = input_size
self.jitter_strength = jitter_strength
self.resize = transforms.Resize(self.resizing_size,
interpolation=transforms.InterpolationMode.BICUBIC)
self.to_tensor = transforms.ToTensor()
self.norm = transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)
def __call__(self, image):
resized_image = self.resize(image)
resized_image = self.to_tensor(resized_image)
resized_image = self.positon_jitter_crop(resized_image)
resized_image = self.norm(resized_image)
return resized_image
def positon_jitter_crop(self, image):
mode = np.random.randint(7)
center = [image.shape[-2] // 2, image.shape[-1] // 2]
h, w = (self.input_size // 2, self.input_size // 2)
# randomly choose one direction from 8 directions.
if mode == 0:
center[0] = center[0] + self.jitter_strength
elif mode == 1:
center[1] = center[1] + self.jitter_strength
elif mode == 2:
center[0] = center[0] - self.jitter_strength
elif mode == 3:
center[1] = center[1] - self.jitter_strength
elif mode == 4:
center[0] = center[0] + self.jitter_strength
center[1] = center[1] + self.jitter_strength
elif mode == 5:
center[0] = center[0] - self.jitter_strength
center[1] = center[1] + self.jitter_strength
elif mode == 6:
center[0] = center[0] - self.jitter_strength
center[1] = center[1] - self.jitter_strength
elif mode == 7:
center[0] = center[0] + self.jitter_strength
center[1] = center[1] - self.jitter_strength
top = min(center[0]+h, image.shape[-2])
bottom = max(center[0]-h, 0)
right = min(center[1]+w, image.shape[-1])
left = max(center[1]-w, 0)
cropped_image = image[:, bottom:top, left:right]
image_height, image_width = cropped_image.shape[-2:]
crop_height, crop_width = self.input_size, self.input_size
if cropped_image.shape[1] < self.input_size or cropped_image.shape[0] < self.input_size:
padding_lrtb = [
(crop_width - image_width) // 2 if crop_width > image_width else 0,
(crop_width - image_width + 1) // 2 if crop_width > image_width else 0,
(crop_height - image_height) // 2 if crop_height > image_height else 0,
(crop_height - image_height + 1) // 2 if crop_height > image_height else 0,
]
cropped_image = torch.nn.functional.pad(cropped_image, padding_lrtb)
return cropped_image
class RotateTransform:
def __init__(self, angle=0, resizing_size=256, input_size=224, pre_rotate=False):
self.angle = angle
self.resizing_size = resizing_size
self.input_size = input_size
self.pre_rotate = pre_rotate
self.resize = transforms.Resize(self.resizing_size,
interpolation=transforms.InterpolationMode.BICUBIC)
self.crop = transforms.CenterCrop(input_size)
self.to_tensor = transforms.ToTensor()
self.norm = transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)
def __call__(self, image):
resized_image = self.resize(image)
if self.pre_rotate:
resized_image = self.rotate(resized_image)
resized_image = self.crop(resized_image)
if not self.pre_rotate:
resized_image = self.rotate(resized_image)
resized_image = self.to_tensor(resized_image)
resized_image = self.norm(resized_image)
return resized_image
def rotate(self, image):
return rotate(image,
angle=self.angle,
interpolation=transforms.InterpolationMode.BICUBIC)
| 5,303 | 39.181818 | 96 | py |
STM-Evaluation | STM-Evaluation-main/classification/models/meta_arch.py | import torch
import torch.nn.functional as F
from torch import nn
from timm.models.layers import to_2tuple, trunc_normal_
class LayerNorm2d(nn.LayerNorm):
""" LayerNorm for channels of '2D' spatial NCHW tensors """
def __init__(self, num_channels, eps=1e-6, affine=True):
super().__init__(num_channels, eps=eps, elementwise_affine=affine)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x.permute(0, 2, 3, 1).contiguous()
x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
x = x.permute(0, 3, 1, 2).contiguous()
return x
class Stem(nn.Module):
def __init__(self,
in_channels,
out_channels,
img_size,
norm_layer,
act_layer,
ratio=0.5,
**kwargs):
super().__init__()
img_size = to_2tuple(img_size)
self.grid_size = (img_size[0] // 4, img_size[1] // 4)
# input_shape: B x C x H x W
self.stem = nn.Sequential(
nn.Conv2d(in_channels, int(out_channels * ratio),
kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)),
norm_layer(int(out_channels * ratio)),
act_layer(),
nn.Conv2d(int(out_channels * ratio), out_channels,
kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)),
norm_layer(out_channels)
)
def forward(self, x):
return self.stem(x)
class DownsampleLayer(nn.Module):
def __init__(self,
in_channels,
out_channels,
norm_layer,
**kwargs):
super().__init__()
# input_shape: B x C x H x W
self.reduction = nn.Sequential(
nn.Conv2d(in_channels, out_channels,
kernel_size=(3, 3),
stride=(2, 2),
padding=(1, 1),
bias=False),
norm_layer(out_channels),
)
def forward(self, x):
return self.reduction(x)
class MetaArch(nn.Module):
def __init__(self,
img_size=224,
in_channels=3,
num_classes=1000,
depths=(3, 3, 9, 3),
dims=(96, 192, 384, 768),
drop_path_rate=0.,
layer_scale_init_value=1e-6,
stem_type=Stem,
stem_kwargs=None,
block_type=None,
block_kwargs=None,
downsample_type=DownsampleLayer,
downsample_kwargs=None,
extra_transform=True,
extra_transform_ratio=1.5,
norm_layer=LayerNorm2d,
norm_every_stage=True,
norm_after_avg=False,
act_layer=nn.GELU,
forward_kwargs=None,
**kwargs,
):
super().__init__()
stem_kwargs = stem_kwargs or {}
block_kwargs = block_kwargs or {}
downsample_kwargs = downsample_kwargs or {}
forward_kwargs = forward_kwargs or {}
self.depths = depths
self.block_type = block_type
self.forward_kwargs = forward_kwargs
# stem + downsample_layers
stem = stem_type(in_channels=in_channels,
out_channels=dims[0],
img_size=img_size,
norm_layer=norm_layer,
norm_first=False,
act_layer=act_layer,
**stem_kwargs)
# H, W
self.patch_grid = stem.grid_size
self.downsample_layers = nn.ModuleList([stem])
for i in range(3):
self.downsample_layers.append(downsample_type(in_channels=dims[i],
out_channels=dims[i+1],
norm_layer=norm_layer,
norm_first=True,
img_size=(self.patch_grid[0] // (2 ** i), self.patch_grid[1] // (2 ** i)),
**downsample_kwargs))
# blocks
cur = 0
dp_rates = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
self.stages = nn.ModuleList()
self.stage_norms = nn.ModuleList()
for i, (depth, dim) in enumerate(zip(depths, dims)):
self.stages.append(nn.Sequential(
*[block_type(dim=dim,
drop_path=dp_rates[cur + j],
stage=i,
depth=j,
total_depth=cur+j,
input_resolution=(self.patch_grid[0] // (2 ** i), self.patch_grid[1] // (2 ** i)),
layer_scale_init_value=layer_scale_init_value,
**block_kwargs)
for j in range(depth)]
))
self.stage_norms.append(norm_layer(dim) if norm_every_stage else nn.Identity())
cur += depths[i]
self.stage_end_norm = nn.Identity() if norm_every_stage or norm_after_avg else norm_layer(dims[-1])
self.conv_head = nn.Sequential(
nn.Conv2d(dims[-1], int(dims[-1] * extra_transform_ratio), 1, 1, 0, bias=False),
nn.BatchNorm2d(int(dims[-1] * extra_transform_ratio)),
act_layer()
) if extra_transform else nn.Identity()
features = int(dims[-1] * extra_transform_ratio) if extra_transform else dims[-1]
self.avg_head = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
norm_layer(features) if norm_after_avg else nn.Identity(),
nn.Flatten(1),
)
if num_classes > 0:
self.head = nn.Linear(features, num_classes)
else:
self.head = nn.Identity()
self.apply(self._init_weights)
@ torch.jit.ignore
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
@ torch.jit.ignore
def no_weight_decay(self):
# from swin v1
no_weight_decay = {'absolute_pos_embed'}
for name, _ in self.named_parameters():
if 'relative_position_bias_table' in name:
no_weight_decay.add(name)
return no_weight_decay
def forward_features(self, x):
extra_inputs = None
if hasattr(self.block_type, 'extra_inputs'): # dcn_v3
extra_inputs = self.block_type.extra_inputs(x, **self.forward_kwargs)
# shape: (B, C, H, W)
for i in range(len(self.depths)):
x = self.downsample_layers[i](x)
if hasattr(self.block_type, 'pre_stage_transform'): # halonet
x = self.block_type.pre_stage_transform(x)
x = self.stages[i](x if extra_inputs is None else (x, extra_inputs[i]))
if hasattr(self.block_type, 'post_stage_transform'):
x = self.block_type.post_stage_transform(x)
x = x if extra_inputs is None else x[0]
x = self.stage_norms[i](x)
x = self.stage_end_norm(x)
x = self.conv_head(x)
x = self.avg_head(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
| 7,604 | 34.537383 | 132 | py |
STM-Evaluation | STM-Evaluation-main/classification/models/blocks/pvt.py | # --------------------------------------------------------
# Modified from original PVT block implementation.
# https://github.com/whai362/PVT
# --------------------------------------------------------
import torch
import torch.nn as nn
from timm.models.layers import DropPath, trunc_normal_
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., sr_ratio=1):
super().__init__()
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.sr_ratio = sr_ratio
if sr_ratio > 1:
self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio)
self.norm = nn.LayerNorm(dim)
def forward(self, x, H, W):
B, N, C = x.shape
q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
if self.sr_ratio > 1:
x_ = x.permute(0, 2, 1).reshape(B, C, H, W)
x_ = self.sr(x_).reshape(B, C, -1).permute(0, 2, 1)
x_ = self.norm(x_)
kv = self.kv(x_).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
else:
kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
k, v = kv[0], kv[1]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class PvtBlock(nn.Module):
def __init__(self,
dim,
drop_path,
layer_scale_init_value,
num_heads,
input_resolution,
stage,
depth,
mlp_ratios,
sr_ratios,
qkv_bias,
qk_scale=None,
drop=0.,
attn_drop=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
**kwargs):
super().__init__()
self.pos_embed = None
self.pos_drop = None
if depth == 0:
self.pos_embed = nn.Parameter(torch.zeros(1, dim, *input_resolution))
self.pos_drop = nn.Dropout(p=drop)
trunc_normal_(self.pos_embed, std=.02)
self.norm1 = norm_layer(dim)
self.attn = Attention(dim,
num_heads=num_heads[stage], qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratios[stage])
self.gamma_1 = nn.Parameter(layer_scale_init_value * torch.ones((dim)),
requires_grad=True) if layer_scale_init_value > 0 else 1
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratios[stage])
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.gamma_2 = nn.Parameter(layer_scale_init_value * torch.ones((dim)),
requires_grad=True) if layer_scale_init_value > 0 else 1
def forward(self, x):
B, C, H, W = x.shape
if self.pos_embed is not None:
x = self.pos_drop(x + self.pos_embed)
x = x.flatten(2).permute(0, 2, 1).contiguous()
x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), H, W))
x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
return x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
| 4,728 | 35.376923 | 112 | py |
STM-Evaluation | STM-Evaluation-main/classification/models/blocks/pvt_v2.py | # --------------------------------------------------------
# Modified from original PVT block v2 implementation.
# https://github.com/whai362/PVT
# --------------------------------------------------------
import math
import torch
from torch import nn
from timm.models.layers import DropPath, trunc_normal_, to_2tuple
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0., linear=False):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.dwconv = DWConv(hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
self.linear = linear
if self.linear:
self.relu = nn.ReLU(inplace=True)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, H, W):
x = self.fc1(x)
if self.linear:
x = self.relu(x)
x = self.dwconv(x, H, W)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., sr_ratio=1, linear=False):
super().__init__()
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.linear = linear
self.sr_ratio = sr_ratio
if not linear:
if sr_ratio > 1:
self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio)
self.norm = nn.LayerNorm(dim)
else:
self.pool = nn.AdaptiveAvgPool2d(7)
self.sr = nn.Conv2d(dim, dim, kernel_size=1, stride=1)
self.norm = nn.LayerNorm(dim)
self.act = nn.GELU()
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, H, W):
B, N, C = x.shape
q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
if not self.linear:
if self.sr_ratio > 1:
x_ = x.permute(0, 2, 1).reshape(B, C, H, W)
x_ = self.sr(x_).reshape(B, C, -1).permute(0, 2, 1)
x_ = self.norm(x_)
kv = self.kv(x_).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
else:
kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
else:
x_ = x.permute(0, 2, 1).reshape(B, C, H, W)
x_ = self.sr(self.pool(x_)).reshape(B, C, -1).permute(0, 2, 1)
x_ = self.norm(x_)
x_ = self.act(x_)
kv = self.kv(x_).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
k, v = kv[0], kv[1]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class PvtV2Block(nn.Module):
def __init__(self, dim, drop_path, layer_scale_init_value, stage, num_heads,
mlp_ratios, sr_ratios, qkv_bias, qk_scale=None, drop=0., attn_drop=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm, linear=False,
**kwargs):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads[stage], qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratios[stage], linear=linear)
self.gamma_1 = nn.Parameter(layer_scale_init_value * torch.ones((dim)),
requires_grad=True) if layer_scale_init_value > 0 else None
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratios[stage])
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop, linear=linear)
self.gamma_2 = nn.Parameter(layer_scale_init_value * torch.ones((dim)),
requires_grad=True) if layer_scale_init_value > 0 else None
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
B, C, H, W = x.shape
# (B, C, H, W) -> (B, N, C)
x = x.view(B, C, H * W).permute(0, 2, 1).contiguous()
shortcut = x
x = self.drop_path(self.attn(self.norm1(x), H, W))
if self.gamma_1 is not None:
x = self.gamma_1 * x
x = shortcut + x
shortcut = x
x = self.drop_path(self.mlp(self.norm2(x), H, W))
if self.gamma_2 is not None:
x = self.gamma_2 * x
x = shortcut + x
# (B, N, C') -> (B, H, W, C') -> (B, C', H, W)
x = x.view(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
return x
class DWConv(nn.Module):
def __init__(self, dim=768):
super(DWConv, self).__init__()
self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim)
def forward(self, x, H, W):
B, N, C = x.shape
x = x.transpose(1, 2).view(B, C, H, W)
x = self.dwconv(x)
x = x.flatten(2).transpose(1, 2)
return x
class OverlapPatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, in_channels, out_channels, img_size, patch_size, stride, norm_layer, **kwargs):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
assert max(patch_size) > stride, "Set larger patch_size than stride"
self.img_size = img_size
self.patch_size = patch_size
self.grid_size = (img_size[0] // stride, img_size[1] // stride)
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.proj = nn.Conv2d(in_channels, out_channels, kernel_size=patch_size, stride=stride,
padding=(patch_size[0] // 2, patch_size[1] // 2))
self.norm = norm_layer(out_channels)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
x = self.proj(x)
x = self.norm(x)
return x
| 9,466 | 36.717131 | 126 | py |
STM-Evaluation | STM-Evaluation-main/classification/models/blocks/swin.py | # Modified from official swin-transformer implementation
# --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# --------------------------------------------------------
import torch
from torch import nn
from timm.models import register_model
from timm.models.layers import DropPath, Mlp, to_2tuple, _assert
from timm.models.swin_transformer import WindowAttention, window_partition, window_reverse
from ..meta_arch import MetaArch, LayerNorm2d
class SwinBlock(nn.Module):
def __init__(self, dim, drop_path, layer_scale_init_value,
input_resolution, stage, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0.,
head_dim=None, act_layer=nn.GELU, norm_layer=LayerNorm2d,
**kwargs):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.window_size = window_size
self.shift_size = 0 if (depth % 2 == 0) else window_size // 2
self.mlp_ratio = mlp_ratio
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim, num_heads=num_heads[stage], head_dim=head_dim, window_size=to_2tuple(self.window_size),
qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)
self.gamma_1 = nn.Parameter(layer_scale_init_value * torch.ones((dim)),
requires_grad=True) if layer_scale_init_value > 0 else None
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop)
if self.shift_size > 0:
# calculate attention mask for SW-MSA
H, W = self.input_resolution
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
cnt = 0
for h in (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None)):
for w in (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None)):
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size) # num_win, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask)
self.gamma_2 = nn.Parameter(layer_scale_init_value * torch.ones((dim)),
requires_grad=True) if layer_scale_init_value > 0 else None
def forward(self, x):
B, C, H, W = x.shape
shortcut = x
x = self.norm1(x)
# B, H, W, C
x = x.permute(0, 2, 3, 1).contiguous()
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # num_win*B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # num_win*B, window_size*window_size, C
# W-MSA/SW-MSA
attn_windows = self.attn(x_windows, mask=self.attn_mask) # num_win*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
if self.gamma_1 is not None:
x = self.gamma_1 * x
# B, H, W, C -> B, C, H, W
x = x.permute(0, 3, 1, 2).contiguous()
x = shortcut + self.drop_path(x)
# FFN
shortcut = x
x = self.mlp(self.norm2(x).permute(0, 2, 3, 1).contiguous())
if self.gamma_2 is not None:
x = self.gamma_2 * x
x = x.permute(0, 3, 1, 2).contiguous()
x = shortcut + self.drop_path(x)
return x
class SwinStem(nn.Module):
def __init__(self, in_channels, out_channels, img_size, patch_size, norm_layer, **kwargs):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.proj = nn.Conv2d(in_channels, out_channels, kernel_size=patch_size, stride=patch_size)
self.norm = norm_layer(out_channels) if norm_layer else nn.Identity()
def forward(self, x):
B, C, H, W = x.shape
_assert(H == self.img_size[0], f"Input image height ({H}) doesn't match model ({self.img_size[0]}).")
_assert(W == self.img_size[1], f"Input image width ({W}) doesn't match model ({self.img_size[1]}).")
x = self.proj(x)
x = self.norm(x)
return x
class SwinDownsampleLayer(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super().__init__()
self.dim = in_channels
self.out_dim = out_channels or 2 * in_channels
self.norm = nn.LayerNorm(4 * in_channels)
self.reduction = nn.Linear(4 * in_channels, self.out_dim, bias=False)
def forward(self, x):
"""
x: B, C, H, W
"""
B, C, H, W = x.shape
_assert(H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even.")
# x = x.view(B, H, W, C)
x = x.permute(0, 2, 3, 1).contiguous()
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = x.view(B, H//2, W//2, 4 * C)
x = self.norm(x)
x = self.reduction(x)
x = x.permute(0, 3, 1, 2).contiguous()
return x
| 7,149 | 39.39548 | 119 | py |
STM-Evaluation | STM-Evaluation-main/classification/models/blocks/halonet.py | """
Modified from Timm lib's implementation of Halo-Attention
https://github.com/rwightman/pytorch-image-models/blob/main/timm/models/layers/halo_attn.py
Following modifications are made:
1. A query-free related positional embedding (PE) is added. This PE runs faster but slightly
decrease the performance.
2. We add the attention masks which mask out the padded pixels at the edges during haloing.
The original paper says masks will decrease the performance. But we find attention masks
beneficial in our cases (at the cost of slightly increasing the inference speed).
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.layers import DropPath, Mlp
from timm.models.layers.halo_attn import rel_logits_1d
def make_divisible(v, divisor=8, min_value=None, round_limit=.9):
min_value = min_value or divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < round_limit * v:
new_v += divisor
return new_v
class QueryFreePosEmbedRel(nn.Module):
def __init__(self, block_size, win_size, num_heads) -> None:
super().__init__()
self.block_size = block_size
self.win_size = win_size
self.num_heads = num_heads
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * win_size - 1) * (2 * win_size - 1), num_heads))
self.register_buffer(
"relative_position_index",
self._get_relative_position_index(win_size, win_size, block_size,
block_size))
def _get_rel_pos_bias(self) -> torch.Tensor:
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index.view(-1)].view(
self.block_size**2, self.win_size * self.win_size,
-1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(
2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
return relative_position_bias.unsqueeze(0).unsqueeze(2)
def _get_relative_position_index(self, win_h, win_w, block_h, block_w):
# get pair-wise relative position index for each token inside the window
'''
coords = torch.stack(
torch.meshgrid(
[torch.arange(win_h), torch.arange(win_w)],
indexing='ij')) # 2, Wh, Ww
'''
# shimin: for lower version torch, "indexing" arugment is not supported
coords = torch.stack(
torch.meshgrid(
[torch.arange(win_h), torch.arange(win_w)])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :,
None] - coords_flatten[:,
None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(
1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += win_h - 1 # shift to start from 0
relative_coords[:, :, 1] += win_w - 1
relative_coords[:, :, 0] *= 2 * win_w - 1
relative_coords = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
_sh, _sw = (win_h - block_h) // 2, (win_w - block_w) // 2
relative_coords = relative_coords.reshape(win_h, win_w, win_h, win_w)
relative_coords = relative_coords[_sh:_sh + block_h,
_sw:_sw + block_w, :, 0:win_w]
relative_coords = relative_coords.reshape(block_h * block_w,
win_h * win_w)
return relative_coords.contiguous()
def forward(self, _):
# 1, 4, 1, 49, 169
# 1, num_heads, 1, block_size ** 2, win_size ** 2
return self._get_rel_pos_bias()
class QueryRelatedPosEmbedRel(nn.Module):
"""
Relative Position Embedding
As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2
Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925
"""
def __init__(self, block_size, win_size, dim_head, scale):
"""
Args:
block_size (int): block size
win_size (int): neighbourhood window size
dim_head (int): attention head dim
scale (float): scale factor (for init)
"""
super().__init__()
self.block_size = block_size
self.dim_head = dim_head
self.height_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * scale)
self.width_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * scale)
def forward(self, q):
B, NH, BB, HW, C = q.shape
q = q.flatten(0, 1)
B = B * NH
# relative logits in width dimension.
q = q.reshape(-1, self.block_size, self.block_size, self.dim_head)
rel_logits_w = rel_logits_1d(q, self.width_rel, permute_mask=(0, 1, 3, 2, 4))
# relative logits in height dimension.
q = q.transpose(1, 2)
rel_logits_h = rel_logits_1d(q, self.height_rel, permute_mask=(0, 3, 1, 4, 2))
rel_logits = rel_logits_h + rel_logits_w
rel_logits = rel_logits.reshape(B, BB, HW, -1)
# bsz, num_heads, num_blocks ** 2, block_size ** 2, win_size ** 2
return rel_logits.reshape(B // NH, NH, BB, HW, -1)
class HaloAttn(nn.Module):
""" Halo Attention
Paper: `Scaling Local Self-Attention for Parameter Efficient Visual Backbones`
- https://arxiv.org/abs/2103.12731
The internal dimensions of the attention module are controlled by the interaction of several arguments.
* the output dimension of the module is specified by dim_out, which falls back to input dim if not set
* the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim
* the query and key (qk) dimensions are determined by
* num_heads * dim_head if dim_head is not None
* num_heads * (dim_out * attn_ratio // num_heads) if dim_head is None
* as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not used
Args:
dim (int): input dimension to the module
dim_out (int): output dimension of the module, same as dim if not set
feat_size (Tuple[int, int]): size of input feature_map (not used, for arg compat with bottle/lambda)
stride: output stride of the module, query downscaled if > 1 (default: 1).
num_heads: parallel attention heads (default: 8).
dim_head: dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set
block_size (int): size of blocks. (default: 8)
halo_size (int): size of halo overlap. (default: 3)
qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0)
qkv_bias (bool) : add bias to q, k, and v projections
avg_down (bool): use average pool downsample instead of strided query blocks
scale_pos_embed (bool): scale the position embedding as well as Q @ K
"""
def __init__(self,
dim,
dim_out=None,
feat_size=None,
stride=1,
num_heads=8,
dim_head=None,
block_size=8,
halo_size=3,
qk_ratio=1.0,
qkv_bias=False,
avg_down=False,
pos_embed_type='query_free',
scale_pos_embed=False):
super().__init__()
dim_out = dim_out or dim
assert dim_out % num_heads == 0
assert stride in (1, 2)
self.num_heads = num_heads
self.dim_head_qk = dim_head or make_divisible(dim_out * qk_ratio,
divisor=8) // num_heads
self.dim_head_v = dim_out // self.num_heads
self.dim_out_qk = num_heads * self.dim_head_qk
self.dim_out_v = num_heads * self.dim_head_v
self.scale = self.dim_head_qk**-0.5
self.scale_pos_embed = scale_pos_embed
self.block_size = self.block_size_ds = block_size
self.halo_size = halo_size
self.win_size = block_size + halo_size * 2 # neighbourhood window size
self.block_stride = 1
use_avg_pool = False
# FIXME not clear if this stride behaviour is what the paper intended
# Also, the paper mentions using a 3D conv for dealing with the blocking/gather, and leaving
# data in unfolded block form. I haven't wrapped my head around how that'd look.
self.kv = nn.Conv2d(dim,
self.dim_out_qk + self.dim_out_v,
1,
bias=qkv_bias)
self.q = nn.Linear(dim, self.dim_out_qk, bias=qkv_bias)
if pos_embed_type == 'query_free':
self.pos_embed = QueryFreePosEmbedRel(block_size=self.block_size_ds,
win_size=self.win_size,
num_heads=num_heads)
elif pos_embed_type == 'query_related':
self.pos_embed = QueryRelatedPosEmbedRel(block_size=self.block_size,
win_size=self.win_size,
dim_head=self.dim_head_qk,
scale=self.scale)
else:
raise NotImplementedError(pos_embed_type)
self.pool = nn.AvgPool2d(2, 2) if use_avg_pool else nn.Identity()
self.proj = nn.Linear(self.dim_out_v, self.dim_out_v)
self.H, self.W = None, None
self.mask = None
def forward(self, x):
B, H, W, C = x.shape
assert H % self.block_size == 0
assert W % self.block_size == 0
num_h_blocks = H // self.block_size
num_w_blocks = W // self.block_size
num_blocks = num_h_blocks * num_w_blocks
q = self.q(x)
# unfold
q = q.reshape(-1, num_h_blocks, self.block_size_ds, num_w_blocks,
self.block_size_ds, self.num_heads,
self.dim_head_qk).permute(0, 5, 1, 3, 2, 4, 6).contiguous()
# B, num_heads, num_h_blocks, num_w_blocks, block_size_ds, block_size_ds, dim_head_qk
q = q.reshape(-1, self.num_heads, num_blocks, self.block_size**2,
self.dim_head_qk)
# B, num_heads, num_blocks, block_size ** 2, dim_head
kv = self.kv(x.permute(0, 3, 1, 2).contiguous())
kv = F.pad(
kv,
[
self.halo_size,
self.halo_size,
self.halo_size,
self.halo_size,
],
)
kv = kv.unfold(2, self.win_size, self.block_size).unfold(
3, self.win_size,
self.block_size).reshape(-1, self.num_heads,
self.dim_head_qk + self.dim_head_v,
num_blocks,
self.win_size**2).permute(0, 1, 3, 4, 2).contiguous()
k, v = torch.split(kv, [self.dim_head_qk, self.dim_head_v], dim=-1)
k = k.reshape(-1, self.num_heads, num_blocks, self.win_size,
self.win_size, self.dim_head_qk)
v = v.reshape(-1, self.num_heads, num_blocks, self.win_size,
self.win_size, self.dim_head_v)
k = k.flatten(3, 4)
v = v.flatten(3, 4)
if self.scale_pos_embed:
attn = (q @ k.transpose(-1, -2) + self.pos_embed()) * self.scale
else:
attn = (q * self.scale) @ k.transpose(-1, -2) + self.pos_embed(q)
max_neg_value = -torch.finfo(attn.dtype).max
attn.masked_fill_(self.get_mask(H, W, attn.device), max_neg_value)
# B, num_heads, num_blocks, block_size ** 2, win_size ** 2
attn = attn.softmax(dim=-1)
out = attn @ v
# B, num_heads, num_blocks, block_size ** 2, dim_head_v
# fold
out = out.reshape(-1, self.num_heads, num_h_blocks, num_w_blocks,
self.block_size_ds, self.block_size_ds,
self.dim_head_qk)
out = out.permute(0, 2, 4, 3, 5, 1, 6).reshape(B, H, W, self.dim_out_v).contiguous()
out = self.proj(out)
# B, H, W, dim_out
return out
def get_mask(self, H, W, device):
if self.H == H and self.W == W and self.mask is not None:
return self.mask
num_h_blocks = H // self.block_size
num_w_blocks = W // self.block_size
num_blocks = num_h_blocks * num_w_blocks
mask = torch.ones((1, 1, H, W), device=device)
mask = F.pad(mask, [self.halo_size, self.halo_size, self.halo_size, self.halo_size])
mask = mask.unfold(2, self.win_size, self.block_size)
mask = mask.unfold(3, self.win_size, self.block_size)
mask = mask.reshape(1, num_blocks, self.win_size * self.win_size)
mask = mask.unsqueeze(-2)
# 1, num_blocks, 1, win_size * win_size
mask = mask.bool()
self.H = H
self.W = W
self.mask = ~mask
return self.mask
class UnifiedHaloBlock(nn.Module):
def __init__(self,
dim,
drop_path,
layer_scale_init_value,
block_size,
halo_size,
stage,
num_heads,
mlp_ratio=4.,
drop=0.,
act_layer=nn.GELU,
pos_embed_type='query_related',
**kwargs):
super().__init__()
self.dim = dim
self.mlp_ratio = mlp_ratio
self.norm1 = nn.LayerNorm((dim, ))
self.attn = HaloAttn(dim=dim,
dim_out=dim,
num_heads=num_heads[stage],
block_size=block_size,
halo_size=halo_size,
pos_embed_type=pos_embed_type)
self.gamma_1 = nn.Parameter(layer_scale_init_value * torch.ones((1, 1, 1, dim)),
requires_grad=True) if layer_scale_init_value > 0 else None
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = nn.LayerNorm((dim, ))
self.mlp = Mlp(in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
drop=drop)
self.gamma_2 = nn.Parameter(layer_scale_init_value * torch.ones((1, 1, 1, dim)),
requires_grad=True) if layer_scale_init_value > 0 else None
@staticmethod
def pre_stage_transform(x):
return x.permute(0, 2, 3, 1).contiguous()
@staticmethod
def post_stage_transform(x):
return x.permute(0, 3, 1, 2).contiguous()
def forward(self, x):
# shape: (B, H, W, C)
shortcut = x
x = self.attn(self.norm1(x))
if self.gamma_1 is not None:
x = self.gamma_1 * x
x = shortcut + self.drop_path(x)
# FFN
shortcut = x
x = self.mlp(self.norm2(x))
if self.gamma_2 is not None:
x = self.gamma_2 * x
x = shortcut + self.drop_path(x)
return x | 15,562 | 40.723861 | 112 | py |
STM-Evaluation | STM-Evaluation-main/classification/models/blocks/convnext.py | '''
Modified from official ConvNeXt implementation
Note that, the unified ConvNeXt block is very different
from the official implementation. In the unified ConvNeXt,
depth-wise convolution with input&output projection is used
as the spatial token mixer, but the block design still follows
the original transformer's block architecture.
'''
# --------------------------------------------------------
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
import torch
from torch import nn
from timm.models.layers import DropPath, to_2tuple
from ..meta_arch import LayerNorm2d
# ConvNeXt original implementation
class ConvNeXtBlock(nn.Module):
def __init__(self, dim, drop_path, layer_scale_init_value, kernel_size=7, **kwargs):
super().__init__()
self.dwconv = nn.Conv2d(dim, dim,
kernel_size=kernel_size,
padding=kernel_size // 2,
groups=dim) # depthwise conv
self.norm = nn.LayerNorm(dim, eps=1e-6)
# pointwise/1x1 convs, implemented with linear layers
self.pwconv1 = nn.Linear(dim, 4 * dim)
self.act = nn.GELU()
self.pwconv2 = nn.Linear(4 * dim, dim)
self.gamma = nn.Parameter(layer_scale_init_value * torch.ones((dim)),
requires_grad=True) if layer_scale_init_value > 0 else None
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
shortcut = x.permute(0, 2, 3, 1).contiguous()
x = self.dwconv(x)
# (N, C, H, W) -> (N, H, W, C)
x = x.permute(0, 2, 3, 1).contiguous()
x = self.norm(x)
x = self.pwconv1(x)
x = self.act(x)
x = self.pwconv2(x)
if self.gamma is not None:
x = self.gamma * x
x = shortcut + self.drop_path(x)
# (N, H, W, C) -> (N, C, H, W)
x = x.permute(0, 3, 1, 2).contiguous()
return x
class UnifiedConvNeXtBlock(nn.Module):
# double res + in/out proj
def __init__(self, dim, drop_path, layer_scale_init_value, kernel_size=7, **kwargs):
super().__init__()
self.dw_norm = LayerNorm2d(dim, eps=1e-6)
self.dw_input_proj = nn.Conv2d(dim, dim, kernel_size=1, stride=1, padding=0)
self.dwconv = nn.Conv2d(dim, dim,
kernel_size=kernel_size,
padding=kernel_size // 2,
groups=dim)
self.dw_out_proj = nn.Conv2d(dim, dim, kernel_size=1, stride=1, padding=0)
self.gamma_1 = nn.Parameter(layer_scale_init_value * torch.ones((1, dim, 1, 1)),
requires_grad=True) if layer_scale_init_value > 0 else None
# pointwise/1x1 convs, implemented with linear layers
self.pw_norm = nn.LayerNorm(dim, eps=1e-6)
self.pwconv1 = nn.Linear(dim, 4 * dim)
self.act = nn.GELU()
self.pwconv2 = nn.Linear(4 * dim, dim)
self.gamma_2 = nn.Parameter(layer_scale_init_value * torch.ones((1, 1, 1, dim)),
requires_grad=True) if layer_scale_init_value > 0 else None
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
shortcut = x
x = self.dwconv(self.dw_input_proj(self.dw_norm(x)))
x = self.dw_out_proj(x)
if self.gamma_1 is not None:
x = self.gamma_1 * x
x = shortcut + self.drop_path(x)
# (N, C, H, W) -> (N, H, W, C)
x = x.permute(0, 2, 3, 1).contiguous()
shortcut = x
x = self.pw_norm(x)
x = self.pwconv1(x)
x = self.act(x)
x = self.pwconv2(x)
if self.gamma_2 is not None:
x = self.gamma_2 * x
x = shortcut + self.drop_path(x)
# (N, H, W, C) -> (N, C, H, W)
x = x.permute(0, 3, 1, 2).contiguous()
return x
class ConvNeXtStem(nn.Module):
def __init__(self, in_channels, out_channels, img_size, **kwargs):
super().__init__()
img_size = to_2tuple(img_size)
self.stem = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=4, stride=4),
LayerNorm2d(out_channels, eps=1e-6)
)
self.grid_size = (img_size[0] // 4, img_size[1] // 4)
def forward(self, x):
return self.stem(x)
class ConvNeXtDownsampleLayer(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super().__init__()
self.reduction = nn.Sequential(
LayerNorm2d(in_channels, eps=1e-6),
nn.Conv2d(in_channels, out_channels, kernel_size=2, stride=2),
)
def forward(self, x):
return self.reduction(x)
| 4,985 | 35.661765 | 95 | py |
STM-Evaluation | STM-Evaluation-main/detection/mmdet_test.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.cnn import fuse_conv_bn
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
wrap_fp16_model)
from mmdet.apis import multi_gpu_test, single_gpu_test
from mmdet.datasets import (build_dataloader, build_dataset,
replace_ImageToTensor)
from mmdet.models import build_detector
from mmdet.utils import (build_ddp, build_dp, compat_cfg, get_device,
replace_cfg_vals, setup_multi_processes,
update_data_root)
import mmdet_custom
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--work-dir',
help='the directory to save the file containing evaluation metrics')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='(Deprecated, please use --gpu-id) ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument(
'--gpu-id',
type=int,
default=0,
help='id of gpu to use '
'(only applicable to non-distributed testing)')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--show-score-thr',
type=float,
default=0.3,
help='score threshold (default: 0.3)')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results.')
parser.add_argument(
'--tmpdir',
help='tmp directory used for collecting results from multiple '
'workers, available when gpu-collect is not specified')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function (deprecate), '
'change to --eval-options instead.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.eval_options:
raise ValueError(
'--options and --eval-options cannot be both '
'specified, --options is deprecated in favor of --eval-options')
if args.options:
warnings.warn('--options is deprecated in favor of --eval-options')
args.eval_options = args.options
return args
def main():
args = parse_args()
assert args.out or args.eval or args.format_only or args.show \
or args.show_dir, \
('Please specify at least one operation (save/eval/format/show the '
'results / save the results) with the argument "--out", "--eval"'
', "--format-only", "--show" or "--show-dir"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = Config.fromfile(args.config)
# replace the ${key} with the value of cfg.key
cfg = replace_cfg_vals(cfg)
# update data root according to MMDET_DATASETS
update_data_root(cfg)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
cfg = compat_cfg(cfg)
# set multi-process settings
setup_multi_processes(cfg)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
if 'pretrained' in cfg.model:
cfg.model.pretrained = None
elif 'init_cfg' in cfg.model.backbone:
cfg.model.backbone.init_cfg = None
if cfg.model.get('neck'):
if isinstance(cfg.model.neck, list):
for neck_cfg in cfg.model.neck:
if neck_cfg.get('rfp_backbone'):
if neck_cfg.rfp_backbone.get('pretrained'):
neck_cfg.rfp_backbone.pretrained = None
elif cfg.model.neck.get('rfp_backbone'):
if cfg.model.neck.rfp_backbone.get('pretrained'):
cfg.model.neck.rfp_backbone.pretrained = None
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids[0:1]
warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. '
'Because we only support single GPU mode in '
'non-distributed testing. Use the first GPU '
'in `gpu_ids` now.')
else:
cfg.gpu_ids = [args.gpu_id]
cfg.device = get_device()
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
test_dataloader_default_args = dict(
samples_per_gpu=1, workers_per_gpu=2, dist=distributed, shuffle=False)
# in case the test dataset is concatenated
if isinstance(cfg.data.test, dict):
cfg.data.test.test_mode = True
if cfg.data.test_dataloader.get('samples_per_gpu', 1) > 1:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.test.pipeline = replace_ImageToTensor(
cfg.data.test.pipeline)
elif isinstance(cfg.data.test, list):
for ds_cfg in cfg.data.test:
ds_cfg.test_mode = True
if cfg.data.test_dataloader.get('samples_per_gpu', 1) > 1:
for ds_cfg in cfg.data.test:
ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline)
test_loader_cfg = {
**test_dataloader_default_args,
**cfg.data.get('test_dataloader', {})
}
rank, _ = get_dist_info()
# allows not to create
if args.work_dir is not None and rank == 0:
mmcv.mkdir_or_exist(osp.abspath(args.work_dir))
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
json_file = osp.join(args.work_dir, f'eval_{timestamp}.json')
# build the dataloader
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(dataset, **test_loader_cfg)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
if args.fuse_conv_bn:
model = fuse_conv_bn(model)
# old versions did not save class info in checkpoints, this walkaround is
# for backward compatibility
if 'CLASSES' in checkpoint.get('meta', {}):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if not distributed:
model = build_dp(model, cfg.device, device_ids=cfg.gpu_ids)
outputs = single_gpu_test(model, data_loader, args.show, args.show_dir,
args.show_score_thr)
else:
model = build_ddp(
model,
cfg.device,
device_ids=[int(os.environ['LOCAL_RANK'])],
broadcast_buffers=False)
outputs = multi_gpu_test(
model, data_loader, args.tmpdir, args.gpu_collect
or cfg.evaluation.get('gpu_collect', False))
rank, _ = get_dist_info()
if rank == 0:
if args.out:
print(f'\nwriting results to {args.out}')
mmcv.dump(outputs, args.out)
kwargs = {} if args.eval_options is None else args.eval_options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
eval_kwargs = cfg.get('evaluation', {}).copy()
# hard-code way to remove EvalHook args
for key in [
'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best',
'rule', 'dynamic_intervals'
]:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.eval, **kwargs))
metric = dataset.evaluate(outputs, **eval_kwargs)
print(metric)
metric_dict = dict(config=args.config, metric=metric)
if args.work_dir is not None and rank == 0:
mmcv.dump(metric_dict, json_file)
if __name__ == '__main__':
main()
| 10,515 | 36.827338 | 79 | py |
STM-Evaluation | STM-Evaluation-main/detection/mmdet_train.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import copy
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
import torch.distributed as dist
from mmcv import Config, DictAction
from mmcv.runner import get_dist_info, init_dist
from mmcv.utils import get_git_hash
from mmdet import __version__
from mmdet.apis import init_random_seed, set_random_seed, train_detector
from mmdet.datasets import build_dataset
from mmdet.models import build_detector
from mmdet.utils import (collect_env, get_device, get_root_logger,
replace_cfg_vals, setup_multi_processes,
update_data_root)
import mmdet_custom
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--auto-resume',
action='store_true',
help='resume from the latest checkpoint automatically')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='(Deprecated, please use --gpu-id) number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='(Deprecated, please use --gpu-id) ids of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-id',
type=int,
default=0,
help='id of gpu to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--diff-seed',
action='store_true',
help='Whether or not set different seeds for different ranks')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument(
'--auto-scale-lr',
action='store_true',
help='enable automatically scaling LR.')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
# replace the ${key} with the value of cfg.key
cfg = replace_cfg_vals(cfg)
# update data root according to MMDET_DATASETS
update_data_root(cfg)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
if args.auto_scale_lr:
if 'auto_scale_lr' in cfg and \
'enable' in cfg.auto_scale_lr and \
'base_batch_size' in cfg.auto_scale_lr:
cfg.auto_scale_lr.enable = True
else:
warnings.warn('Can not find "auto_scale_lr" or '
'"auto_scale_lr.enable" or '
'"auto_scale_lr.base_batch_size" in your'
' configuration file. Please update all the '
'configuration files to mmdet >= 2.24.1.')
# set multi-process settings
setup_multi_processes(cfg)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.resume_from is not None:
cfg.resume_from = args.resume_from
cfg.auto_resume = args.auto_resume
if args.gpus is not None:
cfg.gpu_ids = range(1)
warnings.warn('`--gpus` is deprecated because we only support '
'single GPU mode in non-distributed training. '
'Use `gpus=1` now.')
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids[0:1]
warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. '
'Because we only support single GPU mode in '
'non-distributed training. Use the first GPU '
'in `gpu_ids` now.')
if args.gpus is None and args.gpu_ids is None:
cfg.gpu_ids = [args.gpu_id]
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# re-set gpu_ids with distributed training mode
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
meta['config'] = cfg.pretty_text
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
cfg.device = get_device()
# set random seeds
seed = init_random_seed(args.seed, device=cfg.device)
seed = seed + dist.get_rank() if args.diff_seed else seed
logger.info(f'Set random seed to {seed}, '
f'deterministic: {args.deterministic}')
set_random_seed(seed, deterministic=args.deterministic)
cfg.seed = seed
meta['seed'] = seed
meta['exp_name'] = osp.basename(args.config)
model = build_detector(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
model.init_weights()
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
assert 'val' in [mode for (mode, _) in cfg.workflow]
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.get(
'pipeline', cfg.data.train.dataset.get('pipeline'))
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmdet version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmdet_version=__version__ + get_git_hash()[:7],
CLASSES=datasets[0].CLASSES)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
train_detector(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
main()
| 9,169 | 36.125506 | 79 | py |
STM-Evaluation | STM-Evaluation-main/detection/mmdet_custom/models/backbones/meta_arch.py | import torch
import torch.nn.functional as F
from torch import nn
from mmdet.utils import get_root_logger
from timm.models.layers import to_2tuple, trunc_normal_
class LayerNorm2d(nn.LayerNorm):
""" LayerNorm for channels of '2D' spatial NCHW tensors """
def __init__(self, num_channels, eps=1e-6, affine=True):
super().__init__(num_channels, eps=eps, elementwise_affine=affine)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x.permute(0, 2, 3, 1).contiguous()
x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
x = x.permute(0, 3, 1, 2).contiguous()
return x
class Stem(nn.Module):
def __init__(self,
in_channels,
out_channels,
img_size,
norm_layer,
act_layer,
ratio=0.5,
**kwargs):
super().__init__()
img_size = to_2tuple(img_size)
self.grid_size = (img_size[0] // 4, img_size[1] // 4)
# input_shape: B x C x H x W
self.stem = nn.Sequential(
nn.Conv2d(in_channels, int(out_channels * ratio),
kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)),
norm_layer(int(out_channels * ratio)),
act_layer(),
nn.Conv2d(int(out_channels * ratio), out_channels,
kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)),
norm_layer(out_channels)
)
def forward(self, x):
return self.stem(x)
class DownsampleLayer(nn.Module):
def __init__(self,
in_channels,
out_channels,
norm_layer,
**kwargs):
super().__init__()
# input_shape: B x C x H x W
self.reduction = nn.Sequential(
nn.Conv2d(in_channels, out_channels,
kernel_size=(3, 3),
stride=(2, 2),
padding=(1, 1),
bias=False),
norm_layer(out_channels),
)
def forward(self, x):
return self.reduction(x)
class MetaArch(nn.Module):
def __init__(self,
img_size=224,
in_channels=3,
# num_classes=1000,
depths=(3, 3, 9, 3),
dims=(96, 192, 384, 768),
drop_path_rate=0.,
layer_scale_init_value=1e-6,
stem_type=Stem,
stem_kwargs=None,
block_type=None,
block_kwargs=None,
downsample_type=DownsampleLayer,
downsample_kwargs=None,
# extra_transform=True,
# extra_transform_ratio=1.5,
norm_layer=LayerNorm2d,
norm_every_stage=True,
# norm_after_avg=False,
act_layer=nn.GELU,
forward_kwargs=None,
out_indices=(3,),
pretrained=None,
**kwargs,
):
super().__init__()
stem_kwargs = stem_kwargs or {}
block_kwargs = block_kwargs or {}
downsample_kwargs = downsample_kwargs or {}
forward_kwargs = forward_kwargs or {}
self.depths = depths
self.block_type = block_type
self.forward_kwargs = forward_kwargs
self.out_indices = out_indices
self.pretrained = pretrained
# stem + downsample_layers
stem = stem_type(in_channels=in_channels,
out_channels=dims[0],
img_size=img_size,
norm_layer=norm_layer,
norm_first=False,
act_layer=act_layer,
**stem_kwargs)
# H, W
self.patch_grid = stem.grid_size
self.downsample_layers = nn.ModuleList([stem])
for i in range(3):
self.downsample_layers.append(downsample_type(in_channels=dims[i],
out_channels=dims[i+1],
norm_layer=norm_layer,
norm_first=True,
img_size=(self.patch_grid[0] // (2 ** i), self.patch_grid[1] // (2 ** i)),
**downsample_kwargs))
# blocks
cur = 0
dp_rates = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
self.stages = nn.ModuleList()
# self.stage_norms = nn.ModuleList()
for i, (depth, dim) in enumerate(zip(depths, dims)):
self.stages.append(nn.Sequential(
*[block_type(dim=dim,
drop_path=dp_rates[cur + j],
stage=i,
depth=j,
total_depth=cur+j,
input_resolution=(self.patch_grid[0] // (2 ** i), self.patch_grid[1] // (2 ** i)),
layer_scale_init_value=layer_scale_init_value,
**block_kwargs)
for j in range(depth)]
))
# self.stage_norms.append(norm_layer(dim) if norm_every_stage else nn.Identity())
cur += depths[i]
if i in out_indices:
layer = norm_layer(dims[i])
layer_name = f'out_norm_{i}'
self.add_module(layer_name, layer)
# self.stage_end_norm = nn.Identity() if norm_every_stage or norm_after_avg else norm_layer(dims[-1])
# self.conv_head = nn.Sequential(
# nn.Conv2d(dims[-1], int(dims[-1] * extra_transform_ratio), 1, 1, 0, bias=False),
# nn.BatchNorm2d(int(dims[-1] * extra_transform_ratio)),
# act_layer()
# ) if extra_transform else nn.Identity()
# features = int(dims[-1] * extra_transform_ratio) if extra_transform else dims[-1]
# self.avg_head = nn.Sequential(
# nn.AdaptiveAvgPool2d(1),
# norm_layer(features) if norm_after_avg else nn.Identity(),
# nn.Flatten(1),
# )
# if num_classes > 0:
# self.head = nn.Linear(features, num_classes)
# else:
# self.head = nn.Identity()
self.apply(self._init_weights)
@ torch.jit.ignore
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def init_weights(self):
logger = get_root_logger()
if self.pretrained is not None:
raw_ckpt = torch.load(self.pretrained, map_location='cpu')['model']
ckpt = {}
for name, param in raw_ckpt.items():
if name.startswith('conv_head') or name.startswith('head') or 'attn_mask' in name:
continue
ckpt[name] = param
for name in ckpt:
if name not in self.state_dict().keys():
print('\nunexpected_keys: ', name)
assert 'stage_norms' in name
for name in self.state_dict().keys():
if name not in ckpt:
print('\nmissing_keys: ', name)
assert 'out_norm_' in name
self.load_state_dict(ckpt, False)
logger.warning('load ckpt from %s', self.pretrained)
else:
logger.warning('training start from scratch')
@ torch.jit.ignore
def no_weight_decay(self):
# from swin v1
no_weight_decay = {'absolute_pos_embed'}
for name, _ in self.named_parameters():
if 'relative_position_bias_table' in name:
no_weight_decay.add(name)
return no_weight_decay
def forward_features(self, x):
extra_inputs = None
if hasattr(self.block_type, 'extra_inputs'): # dcn_v3
extra_inputs = self.block_type.extra_inputs(x, **self.forward_kwargs)
# shape: (B, C, H, W)
outs = []
for i in range(len(self.depths)):
x = self.downsample_layers[i](x)
if hasattr(self.block_type, 'pre_stage_transform'): # halonet
x = self.block_type.pre_stage_transform(x)
x = self.stages[i](x if extra_inputs is None else (x, extra_inputs[i]))
if hasattr(self.block_type, 'post_stage_transform'):
x = self.block_type.post_stage_transform(x)
x = x if extra_inputs is None else x[0]
# x = self.stage_norms[i](x)
if i in self.out_indices:
norm_layer = getattr(self, f'out_norm_{i}')
out = norm_layer(x)
outs.append(out)
# x = self.stage_end_norm(x)
# x = self.conv_head(x)
# x = self.avg_head(x)
# return x
return outs
def forward(self, x):
x = self.forward_features(x)
# x = self.head(x)
return x
| 9,234 | 34.656371 | 132 | py |
STM-Evaluation | STM-Evaluation-main/detection/mmdet_custom/models/backbones/blocks/pvt.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from mmdet.models.builder import BACKBONES
from timm.models.layers import DropPath, trunc_normal_
from ..meta_arch import MetaArch
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., sr_ratio=1):
super().__init__()
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.sr_ratio = sr_ratio
if sr_ratio > 1:
self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio)
self.norm = nn.LayerNorm(dim)
def forward(self, x, H, W):
B, N, C = x.shape
q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
if self.sr_ratio > 1:
x_ = x.permute(0, 2, 1).reshape(B, C, H, W)
x_ = self.sr(x_).reshape(B, C, -1).permute(0, 2, 1)
x_ = self.norm(x_)
kv = self.kv(x_).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
else:
kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
k, v = kv[0], kv[1]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class PvtBlock(nn.Module):
def __init__(self,
dim,
drop_path,
layer_scale_init_value,
num_heads,
input_resolution,
stage,
depth,
mlp_ratios,
sr_ratios,
qkv_bias,
qk_scale=None,
drop=0.,
attn_drop=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
**kwargs):
super().__init__()
self.pos_embed = None
self.pos_drop = None
if depth == 0:
self.pos_embed = nn.Parameter(torch.zeros(1, dim, *input_resolution))
self.pos_drop = nn.Dropout(p=drop)
trunc_normal_(self.pos_embed, std=.02)
self.norm1 = norm_layer(dim)
self.attn = Attention(dim,
num_heads=num_heads[stage], qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratios[stage])
self.gamma_1 = nn.Parameter(layer_scale_init_value * torch.ones((dim)),
requires_grad=True) if layer_scale_init_value > 0 else 1
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratios[stage])
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.gamma_2 = nn.Parameter(layer_scale_init_value * torch.ones((dim)),
requires_grad=True) if layer_scale_init_value > 0 else 1
def forward(self, x):
B, C, H, W = x.shape
identity = x.flatten(2).permute(0, 2, 1).contiguous()
if self.pos_embed is not None:
pos_embed = F.interpolate(self.pos_embed,
size=(H, W),
mode='bilinear',
align_corners=True) # 1,C,H,W
x = self.pos_drop(x + pos_embed)
x = x.flatten(2).permute(0, 2, 1).contiguous()
x = identity + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), H, W))
x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
return x.reshape(B, H, W, C).permute(0, 3, 1, 2).contiguous()
@BACKBONES.register_module()
class UnifiedPVT(MetaArch):
def __init__(self, *args, **kwargs):
kwargs['block_type'] = PvtBlock
super().__init__(*args, **kwargs)
| 5,115 | 35.542857 | 112 | py |
STM-Evaluation | STM-Evaluation-main/detection/mmdet_custom/models/backbones/blocks/swin.py | import torch
import torch.nn.functional as F
from torch import nn
from mmdet.models.builder import BACKBONES
from timm.models.layers import DropPath, Mlp, to_2tuple, _assert
from timm.models.swin_transformer import WindowAttention, window_partition, window_reverse
from ..meta_arch import LayerNorm2d, MetaArch
class SwinBlock(nn.Module):
def __init__(self, dim, drop_path, layer_scale_init_value,
input_resolution, stage, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0.,
head_dim=None, act_layer=nn.GELU, norm_layer=LayerNorm2d,
**kwargs):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.window_size = window_size
self.shift_size = 0 if (depth % 2 == 0) else window_size // 2
self.mlp_ratio = mlp_ratio
# if min(self.input_resolution) <= self.window_size:
# # if window size is larger than input resolution, we don't partition windows
# self.shift_size = 0
# self.window_size = min(self.input_resolution)
# assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim, num_heads=num_heads[stage], head_dim=head_dim, window_size=to_2tuple(self.window_size),
qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)
self.gamma_1 = nn.Parameter(layer_scale_init_value * torch.ones((dim)),
requires_grad=True) if layer_scale_init_value > 0 else None
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop)
# if self.shift_size > 0:
# # calculate attention mask for SW-MSA
# H, W = self.input_resolution
# img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
# cnt = 0
# for h in (
# slice(0, -self.window_size),
# slice(-self.window_size, -self.shift_size),
# slice(-self.shift_size, None)):
# for w in (
# slice(0, -self.window_size),
# slice(-self.window_size, -self.shift_size),
# slice(-self.shift_size, None)):
# img_mask[:, h, w, :] = cnt
# cnt += 1
# mask_windows = window_partition(img_mask, self.window_size) # num_win, window_size, window_size, 1
# mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
# attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
# attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
# else:
# attn_mask = None
# self.register_buffer("attn_mask", attn_mask)
self.gamma_2 = nn.Parameter(layer_scale_init_value * torch.ones((dim)),
requires_grad=True) if layer_scale_init_value > 0 else None
def forward(self, x):
B, C, H_origin, W_origin = x.shape
shortcut = x
x = self.norm1(x)
# B, H, W, C
x = x.permute(0, 2, 3, 1).contiguous()
pad_r = (self.window_size - W_origin % self.window_size) % self.window_size
pad_b = (self.window_size - H_origin % self.window_size) % self.window_size
x = F.pad(x, (0, 0, 0, pad_r, 0, pad_b))
H, W = x.shape[1], x.shape[2]
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
# calculate attention mask for SW-MSA
img_mask = torch.zeros((1, H, W, 1), device=x.device)
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
# nW, window_size, window_size, 1
mask_windows = window_partition(img_mask, self.window_size)
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
shifted_x = x
attn_mask = None
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # num_win*B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # num_win*B, window_size*window_size, C
# W-MSA/SW-MSA
attn_windows = self.attn(x_windows, mask=attn_mask) # num_win*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
x = x[:, :H_origin, :W_origin].contiguous()
if self.gamma_1 is not None:
x = self.gamma_1 * x
# B, H, W, C -> B, C, H, W
x = x.permute(0, 3, 1, 2).contiguous()
x = shortcut + self.drop_path(x)
# FFN
shortcut = x
x = self.mlp(self.norm2(x).permute(0, 2, 3, 1).contiguous())
if self.gamma_2 is not None:
x = self.gamma_2 * x
x = x.permute(0, 3, 1, 2).contiguous()
x = shortcut + self.drop_path(x)
return x
class SwinStem(nn.Module):
def __init__(self, in_channels, out_channels, img_size, patch_size, norm_layer, **kwargs):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.proj = nn.Conv2d(in_channels, out_channels, kernel_size=patch_size, stride=patch_size)
self.norm = norm_layer(out_channels) if norm_layer else nn.Identity()
def forward(self, x):
B, C, H, W = x.shape
_assert(H == self.img_size[0], f"Input image height ({H}) doesn't match model ({self.img_size[0]}).")
_assert(W == self.img_size[1], f"Input image width ({W}) doesn't match model ({self.img_size[1]}).")
x = self.proj(x)
x = self.norm(x)
return x
class SwinDownsampleLayer(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super().__init__()
self.dim = in_channels
self.out_dim = out_channels or 2 * in_channels
self.norm = nn.LayerNorm(4 * in_channels)
self.reduction = nn.Linear(4 * in_channels, self.out_dim, bias=False)
def forward(self, x):
"""
x: B, C, H, W
"""
B, C, H, W = x.shape
_assert(H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even.")
# x = x.view(B, H, W, C)
x = x.permute(0, 2, 3, 1).contiguous()
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = x.view(B, H//2, W//2, 4 * C)
x = self.norm(x)
x = self.reduction(x)
x = x.permute(0, 3, 1, 2).contiguous()
return x
@BACKBONES.register_module()
class UnifiedSwinTransformer(MetaArch):
def __init__(self, *args, **kwargs):
kwargs['block_type'] = SwinBlock
super().__init__(*args, **kwargs)
| 8,506 | 40.70098 | 119 | py |
STM-Evaluation | STM-Evaluation-main/detection/mmdet_custom/models/backbones/blocks/halonet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from mmdet.models.builder import BACKBONES
from timm.models.layers import DropPath, Mlp
from timm.models.layers.halo_attn import rel_logits_1d
from ..meta_arch import MetaArch
def make_divisible(v, divisor=8, min_value=None, round_limit=.9):
min_value = min_value or divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < round_limit * v:
new_v += divisor
return new_v
class QueryFreePosEmbedRel(nn.Module):
def __init__(self, block_size, win_size, num_heads) -> None:
super().__init__()
self.block_size = block_size
self.win_size = win_size
self.num_heads = num_heads
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * win_size - 1) * (2 * win_size - 1), num_heads))
self.register_buffer(
"relative_position_index",
self._get_relative_position_index(win_size, win_size, block_size,
block_size))
def _get_rel_pos_bias(self) -> torch.Tensor:
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index.view(-1)].view(
self.block_size**2, self.win_size * self.win_size,
-1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(
2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
return relative_position_bias.unsqueeze(0).unsqueeze(2)
def _get_relative_position_index(self, win_h, win_w, block_h, block_w):
# get pair-wise relative position index for each token inside the window
'''
coords = torch.stack(
torch.meshgrid(
[torch.arange(win_h), torch.arange(win_w)],
indexing='ij')) # 2, Wh, Ww
'''
# shimin: for lower version torch, "indexing" arugment is not supported
coords = torch.stack(
torch.meshgrid(
[torch.arange(win_h), torch.arange(win_w)])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :,
None] - coords_flatten[:,
None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(
1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += win_h - 1 # shift to start from 0
relative_coords[:, :, 1] += win_w - 1
relative_coords[:, :, 0] *= 2 * win_w - 1
relative_coords = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
_sh, _sw = (win_h - block_h) // 2, (win_w - block_w) // 2
relative_coords = relative_coords.reshape(win_h, win_w, win_h, win_w)
relative_coords = relative_coords[_sh:_sh + block_h,
_sw:_sw + block_w, :, 0:win_w]
relative_coords = relative_coords.reshape(block_h * block_w,
win_h * win_w)
return relative_coords.contiguous()
def forward(self, _):
# 1, 4, 1, 49, 169
# 1, num_heads, 1, block_size ** 2, win_size ** 2
return self._get_rel_pos_bias()
class QueryRelatedPosEmbedRel(nn.Module):
""" Relative Position Embedding
As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2
Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925
"""
def __init__(self, block_size, win_size, dim_head, scale):
"""
Args:
block_size (int): block size
win_size (int): neighbourhood window size
dim_head (int): attention head dim
scale (float): scale factor (for init)
"""
super().__init__()
self.block_size = block_size
self.dim_head = dim_head
self.height_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * scale)
self.width_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * scale)
def forward(self, q):
B, NH, BB, HW, C = q.shape
q = q.flatten(0, 1)
B = B * NH
# relative logits in width dimension.
q = q.reshape(-1, self.block_size, self.block_size, self.dim_head)
rel_logits_w = rel_logits_1d(q, self.width_rel, permute_mask=(0, 1, 3, 2, 4))
# relative logits in height dimension.
q = q.transpose(1, 2)
rel_logits_h = rel_logits_1d(q, self.height_rel, permute_mask=(0, 3, 1, 4, 2))
rel_logits = rel_logits_h + rel_logits_w
rel_logits = rel_logits.reshape(B, BB, HW, -1)
# bsz, num_heads, num_blocks ** 2, block_size ** 2, win_size ** 2
return rel_logits.reshape(B // NH, NH, BB, HW, -1)
class HaloAttn(nn.Module):
""" Halo Attention
Paper: `Scaling Local Self-Attention for Parameter Efficient Visual Backbones`
- https://arxiv.org/abs/2103.12731
The internal dimensions of the attention module are controlled by the interaction of several arguments.
* the output dimension of the module is specified by dim_out, which falls back to input dim if not set
* the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim
* the query and key (qk) dimensions are determined by
* num_heads * dim_head if dim_head is not None
* num_heads * (dim_out * attn_ratio // num_heads) if dim_head is None
* as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not used
Args:
dim (int): input dimension to the module
dim_out (int): output dimension of the module, same as dim if not set
feat_size (Tuple[int, int]): size of input feature_map (not used, for arg compat with bottle/lambda)
stride: output stride of the module, query downscaled if > 1 (default: 1).
num_heads: parallel attention heads (default: 8).
dim_head: dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set
block_size (int): size of blocks. (default: 8)
halo_size (int): size of halo overlap. (default: 3)
qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0)
qkv_bias (bool) : add bias to q, k, and v projections
avg_down (bool): use average pool downsample instead of strided query blocks
scale_pos_embed (bool): scale the position embedding as well as Q @ K
"""
def __init__(self,
dim,
dim_out=None,
feat_size=None,
stride=1,
num_heads=8,
dim_head=None,
block_size=8,
halo_size=3,
qk_ratio=1.0,
qkv_bias=False,
avg_down=False,
pos_embed_type='query_free',
scale_pos_embed=False):
super().__init__()
dim_out = dim_out or dim
assert dim_out % num_heads == 0
assert stride in (1, 2)
self.num_heads = num_heads
self.dim_head_qk = dim_head or make_divisible(dim_out * qk_ratio,
divisor=8) // num_heads
self.dim_head_v = dim_out // self.num_heads
self.dim_out_qk = num_heads * self.dim_head_qk
self.dim_out_v = num_heads * self.dim_head_v
self.scale = self.dim_head_qk**-0.5
self.scale_pos_embed = scale_pos_embed
self.block_size = self.block_size_ds = block_size
self.halo_size = halo_size
self.win_size = block_size + halo_size * 2 # neighbourhood window size
self.block_stride = 1
use_avg_pool = False
# FIXME not clear if this stride behaviour is what the paper intended
# Also, the paper mentions using a 3D conv for dealing with the blocking/gather, and leaving
# data in unfolded block form. I haven't wrapped my head around how that'd look.
self.kv = nn.Conv2d(dim,
self.dim_out_qk + self.dim_out_v,
1,
bias=qkv_bias)
self.q = nn.Linear(dim, self.dim_out_qk, bias=qkv_bias)
if pos_embed_type == 'query_free':
self.pos_embed = QueryFreePosEmbedRel(block_size=self.block_size_ds,
win_size=self.win_size,
num_heads=num_heads)
elif pos_embed_type == 'query_related':
self.pos_embed = QueryRelatedPosEmbedRel(block_size=self.block_size,
win_size=self.win_size,
dim_head=self.dim_head_qk,
scale=self.scale)
else:
raise NotImplementedError(pos_embed_type)
self.pool = nn.AvgPool2d(2, 2) if use_avg_pool else nn.Identity()
self.proj = nn.Linear(self.dim_out_v, self.dim_out_v)
self.H, self.W = None, None
self.mask = None
def forward(self, x):
B, H_origin, W_origin, C = x.shape
pad_r = (self.block_size - W_origin % self.block_size) % self.block_size
pad_b = (self.block_size - H_origin % self.block_size) % self.block_size
x = F.pad(x, (0, 0, 0, pad_r, 0, pad_b))
H, W = x.shape[1], x.shape[2]
assert H % self.block_size == 0
assert W % self.block_size == 0
num_h_blocks = H // self.block_size
num_w_blocks = W // self.block_size
num_blocks = num_h_blocks * num_w_blocks
q = self.q(x)
# unfold
q = q.reshape(-1, num_h_blocks, self.block_size_ds, num_w_blocks,
self.block_size_ds, self.num_heads,
self.dim_head_qk).permute(0, 5, 1, 3, 2, 4, 6).contiguous()
# B, num_heads, num_h_blocks, num_w_blocks, block_size_ds, block_size_ds, dim_head_qk
q = q.reshape(-1, self.num_heads, num_blocks, self.block_size**2,
self.dim_head_qk)
# B, num_heads, num_blocks, block_size ** 2, dim_head
kv = self.kv(x.permute(0, 3, 1, 2).contiguous())
kv = F.pad(
kv,
[
self.halo_size,
self.halo_size,
self.halo_size,
self.halo_size,
],
)
kv = kv.unfold(2, self.win_size, self.block_size).unfold(
3, self.win_size,
self.block_size).reshape(-1, self.num_heads,
self.dim_head_qk + self.dim_head_v,
num_blocks,
self.win_size**2).permute(0, 1, 3, 4, 2).contiguous()
k, v = torch.split(kv, [self.dim_head_qk, self.dim_head_v], dim=-1)
k = k.reshape(-1, self.num_heads, num_blocks, self.win_size,
self.win_size, self.dim_head_qk)
v = v.reshape(-1, self.num_heads, num_blocks, self.win_size,
self.win_size, self.dim_head_v)
k = k.flatten(3, 4)
v = v.flatten(3, 4)
if self.scale_pos_embed:
attn = (q @ k.transpose(-1, -2) + self.pos_embed()) * self.scale
else:
attn = (q * self.scale) @ k.transpose(-1, -2) + self.pos_embed(q)
max_neg_value = -torch.finfo(attn.dtype).max
attn.masked_fill_(self.get_mask(H, W, attn.device), max_neg_value)
# B, num_heads, num_blocks, block_size ** 2, win_size ** 2
attn = attn.softmax(dim=-1)
out = attn @ v
# B, num_heads, num_blocks, block_size ** 2, dim_head_v
# fold
out = out.reshape(-1, self.num_heads, num_h_blocks, num_w_blocks,
self.block_size_ds, self.block_size_ds,
self.dim_head_qk)
out = out.permute(0, 2, 4, 3, 5, 1, 6).reshape(B, H, W, self.dim_out_v).contiguous()
out = out[:, :H_origin, :W_origin, :].contiguous()
out = self.proj(out)
# B, H, W, dim_out
return out
def get_mask(self, H, W, device):
if self.H == H and self.W == W and self.mask is not None:
return self.mask
num_h_blocks = H // self.block_size
num_w_blocks = W // self.block_size
num_blocks = num_h_blocks * num_w_blocks
mask = torch.ones((1, 1, H, W), device=device)
mask = F.pad(mask, [self.halo_size, self.halo_size, self.halo_size, self.halo_size])
mask = mask.unfold(2, self.win_size, self.block_size)
mask = mask.unfold(3, self.win_size, self.block_size)
mask = mask.reshape(1, num_blocks, self.win_size * self.win_size)
mask = mask.unsqueeze(-2)
# 1, num_blocks, 1, win_size * win_size
mask = mask.bool()
self.H = H
self.W = W
self.mask = ~mask
return self.mask
class HaloBlockV2(nn.Module):
def __init__(self,
dim,
drop_path,
layer_scale_init_value,
block_size,
halo_size,
stage,
num_heads,
mlp_ratio=4.,
drop=0.,
act_layer=nn.GELU,
pos_embed_type='query_related',
**kwargs):
super().__init__()
self.dim = dim
self.mlp_ratio = mlp_ratio
self.norm1 = nn.LayerNorm((dim, ))
self.attn = HaloAttn(dim=dim,
dim_out=dim,
num_heads=num_heads[stage],
block_size=block_size,
halo_size=halo_size,
pos_embed_type=pos_embed_type)
self.gamma_1 = nn.Parameter(layer_scale_init_value * torch.ones((1, 1, 1, dim)),
requires_grad=True) if layer_scale_init_value > 0 else None
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = nn.LayerNorm((dim, ))
self.mlp = Mlp(in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
drop=drop)
self.gamma_2 = nn.Parameter(layer_scale_init_value * torch.ones((1, 1, 1, dim)),
requires_grad=True) if layer_scale_init_value > 0 else None
@staticmethod
def pre_stage_transform(x):
return x.permute(0, 2, 3, 1).contiguous()
@staticmethod
def post_stage_transform(x):
return x.permute(0, 3, 1, 2).contiguous()
def forward(self, x):
# shape: (B, H, W, C)
shortcut = x
x = self.attn(self.norm1(x))
if self.gamma_1 is not None:
x = self.gamma_1 * x
x = shortcut + self.drop_path(x)
# FFN
shortcut = x
x = self.mlp(self.norm2(x))
if self.gamma_2 is not None:
x = self.gamma_2 * x
x = shortcut + self.drop_path(x)
return x
@BACKBONES.register_module()
class UnifiedHalonet(MetaArch):
def __init__(self, *args, **kwargs):
kwargs['block_type'] = HaloBlockV2
super().__init__(*args, **kwargs)
| 15,537 | 40.10582 | 112 | py |
STM-Evaluation | STM-Evaluation-main/detection/mmdet_custom/models/backbones/blocks/convnext.py | import torch
from torch import nn
from mmdet.models.builder import BACKBONES
from timm.models.layers import DropPath, to_2tuple
from ..meta_arch import LayerNorm2d, MetaArch
class ConvNeXtBlock(nn.Module):
def __init__(self, dim, drop_path, layer_scale_init_value, kernel_size=7, **kwargs):
super().__init__()
self.dwconv = nn.Conv2d(dim, dim,
kernel_size=kernel_size,
padding=kernel_size // 2,
groups=dim) # depthwise conv
self.norm = nn.LayerNorm(dim, eps=1e-6)
# pointwise/1x1 convs, implemented with linear layers
self.pwconv1 = nn.Linear(dim, 4 * dim)
self.act = nn.GELU()
self.pwconv2 = nn.Linear(4 * dim, dim)
self.gamma = nn.Parameter(layer_scale_init_value * torch.ones((dim)),
requires_grad=True) if layer_scale_init_value > 0 else None
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
shortcut = x.permute(0, 2, 3, 1).contiguous()
x = self.dwconv(x)
# (N, C, H, W) -> (N, H, W, C)
x = x.permute(0, 2, 3, 1).contiguous()
x = self.norm(x)
x = self.pwconv1(x)
x = self.act(x)
x = self.pwconv2(x)
if self.gamma is not None:
x = self.gamma * x
x = shortcut + self.drop_path(x)
# (N, H, W, C) -> (N, C, H, W)
x = x.permute(0, 3, 1, 2).contiguous()
return x
"""
To distinguish between the original convnext block, we use ConvNeXtV3 to denote the model we use in our paper
"""
class ConvNeXtV3Block(nn.Module):
# double res + in/out proj
def __init__(self, dim, drop_path, layer_scale_init_value, kernel_size=7, **kwargs):
super().__init__()
self.dw_norm = LayerNorm2d(dim, eps=1e-6)
self.dw_input_proj = nn.Conv2d(dim, dim, kernel_size=1, stride=1, padding=0)
self.dwconv = nn.Conv2d(dim, dim,
kernel_size=kernel_size,
padding=kernel_size // 2,
groups=dim)
self.dw_out_proj = nn.Conv2d(dim, dim, kernel_size=1, stride=1, padding=0)
self.gamma_1 = nn.Parameter(layer_scale_init_value * torch.ones((1, dim, 1, 1)),
requires_grad=True) if layer_scale_init_value > 0 else None
# pointwise/1x1 convs, implemented with linear layers
self.pw_norm = nn.LayerNorm(dim, eps=1e-6)
self.pwconv1 = nn.Linear(dim, 4 * dim)
self.act = nn.GELU()
self.pwconv2 = nn.Linear(4 * dim, dim)
self.gamma_2 = nn.Parameter(layer_scale_init_value * torch.ones((1, 1, 1, dim)),
requires_grad=True) if layer_scale_init_value > 0 else None
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
shortcut = x
x = self.dwconv(self.dw_input_proj(self.dw_norm(x)))
x = self.dw_out_proj(x)
if self.gamma_1 is not None:
x = self.gamma_1 * x
x = shortcut + self.drop_path(x)
# (N, C, H, W) -> (N, H, W, C)
x = x.permute(0, 2, 3, 1).contiguous()
shortcut = x
x = self.pw_norm(x)
x = self.pwconv1(x)
x = self.act(x)
x = self.pwconv2(x)
if self.gamma_2 is not None:
x = self.gamma_2 * x
x = shortcut + self.drop_path(x)
# (N, H, W, C) -> (N, C, H, W)
x = x.permute(0, 3, 1, 2).contiguous()
return x
class ConvNeXtStem(nn.Module):
def __init__(self, in_channels, out_channels, img_size, **kwargs):
super().__init__()
img_size = to_2tuple(img_size)
self.stem = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=4, stride=4),
LayerNorm2d(out_channels, eps=1e-6)
)
self.grid_size = (img_size[0] // 4, img_size[1] // 4)
def forward(self, x):
return self.stem(x)
class ConvNeXtDownsampleLayer(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super().__init__()
self.reduction = nn.Sequential(
LayerNorm2d(in_channels, eps=1e-6),
nn.Conv2d(in_channels, out_channels, kernel_size=2, stride=2),
)
def forward(self, x):
return self.reduction(x)
@BACKBONES.register_module()
class UnifiedConvNeXt(MetaArch):
def __init__(self, *args, **kwargs):
kwargs['block_type'] = ConvNeXtV3Block
super().__init__(*args, **kwargs)
| 4,654 | 34.534351 | 109 | py |
STM-Evaluation | STM-Evaluation-main/detection/configs/_base_/models/mask_rcnn_r50_fpn.py | # model settings
model = dict(
type='MaskRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=dict(
type='FCNMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=2000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)))
| 4,054 | 32.512397 | 79 | py |
loop | loop-master/generate.py | # Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import argparse
import numpy as np
import phonemizer
import string
import torch
from torch.autograd import Variable
from model import Loop
from data import NpzFolder
from utils import generate_merlin_wav
parser = argparse.ArgumentParser(description='PyTorch Phonological Loop \
Generation')
parser.add_argument('--npz', type=str, default='',
help='Dataset sample to generate.')
parser.add_argument('--text', default='',
type=str, help='Free text to generate.')
parser.add_argument('--spkr', default=0,
type=int, help='Speaker id.')
parser.add_argument('--checkpoint', default='checkpoints/vctk/lastmodel.pth',
type=str, help='Model used for generation.')
parser.add_argument('--gpu', default=-1,
type=int, help='GPU device ID, use -1 for CPU.')
# init
args = parser.parse_args()
if args.gpu >= 0:
torch.cuda.set_device(args.gpu)
def text2phone(text, char2code):
seperator = phonemizer.separator.Separator('', '', ' ')
ph = phonemizer.phonemize(text, separator=seperator)
ph = ph.split(' ')
ph.remove('')
result = [char2code[p] for p in ph]
return torch.LongTensor(result)
def trim_pred(out, attn):
tq = attn.abs().sum(1).data
for stopi in range(1, tq.size(0)):
col_sum = attn[:stopi, :].abs().sum(0).data.squeeze()
if tq[stopi][0] < 0.5 and col_sum[-1] > 4:
break
out = out[:stopi, :]
attn = attn[:stopi, :]
return out, attn
def npy_loader_phonemes(path):
feat = np.load(path)
txt = feat['phonemes'].astype('int64')
txt = torch.from_numpy(txt)
audio = feat['audio_features']
audio = torch.from_numpy(audio)
return txt, audio
def main():
weights = torch.load(args.checkpoint,
map_location=lambda storage, loc: storage)
opt = torch.load(os.path.dirname(args.checkpoint) + '/args.pth')
train_args = opt[0]
char2code = {'aa': 0, 'ae': 1, 'ah': 2, 'ao': 3, 'aw': 4, 'ax': 5, 'ay': 6,
'b': 7, 'ch': 8, 'd': 9, 'dh': 10, 'eh': 11, 'er': 12, 'ey': 13,
'f': 14, 'g': 15, 'hh': 16, 'i': 17, 'ih': 18, 'iy': 19, 'jh': 20,
'k': 21, 'l': 22, 'm': 23, 'n': 24, 'ng': 25, 'ow': 26, 'oy': 27,
'p': 28, 'pau': 29, 'r': 30, 's': 31, 'sh': 32, 'ssil': 33,
't': 34, 'th': 35, 'uh': 36, 'uw': 37, 'v': 38, 'w': 39, 'y': 40,
'z': 41}
nspkr = train_args.nspk
norm_path = None
if os.path.exists(train_args.data + '/norm_info/norm.dat'):
norm_path = train_args.data + '/norm_info/norm.dat'
elif os.path.exists(os.path.dirname(args.checkpoint) + '/norm.dat'):
norm_path = os.path.dirname(args.checkpoint) + '/norm.dat'
else:
print('ERROR: Failed to find norm file.')
return
train_args.noise = 0
model = Loop(train_args)
model.load_state_dict(weights)
if args.gpu >= 0:
model.cuda()
model.eval()
if args.spkr not in range(nspkr):
print('ERROR: Unknown speaker id: %d.' % args.spkr)
return
txt, feat, spkr, output_fname = None, None, None, None
if args.npz is not '':
txt, feat = npy_loader_phonemes(args.npz)
txt = Variable(txt.unsqueeze(1), volatile=True)
feat = Variable(feat.unsqueeze(1), volatile=True)
spkr = Variable(torch.LongTensor([args.spkr]), volatile=True)
fname = os.path.basename(args.npz)[:-4]
output_fname = fname + '.gen_' + str(args.spkr)
elif args.text is not '':
txt = text2phone(args.text, char2code)
feat = torch.FloatTensor(txt.size(0)*20, 63)
spkr = torch.LongTensor([args.spkr])
txt = Variable(txt.unsqueeze(1), volatile=True)
feat = Variable(feat.unsqueeze(1), volatile=True)
spkr = Variable(spkr, volatile=True)
# slugify input string to file name
fname = args.text.replace(' ', '_')
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
fname = ''.join(c for c in fname if c in valid_chars)
output_fname = fname + '.gen_' + str(args.spkr)
else:
print('ERROR: Must supply npz file path or text as source.')
return
if args.gpu >= 0:
txt = txt.cuda()
feat = feat.cuda()
spkr = spkr.cuda()
out, attn = model([txt, spkr], feat)
out, attn = trim_pred(out, attn)
output_dir = os.path.join(os.path.dirname(args.checkpoint), 'results')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
generate_merlin_wav(out.data.cpu().numpy(),
output_dir,
output_fname,
norm_path)
if args.npz is not '':
output_orig_fname = os.path.basename(args.npz)[:-4] + '.orig'
generate_merlin_wav(feat[:, 0, :].data.cpu().numpy(),
output_dir,
output_orig_fname,
norm_path)
if __name__ == '__main__':
main()
| 5,316 | 30.461538 | 83 | py |
loop | loop-master/utils.py | # Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
import os
import logging
import numpy
import subprocess
import time
from datetime import timedelta
import torch
from torch.autograd import Variable
class LogFormatter():
def __init__(self):
self.start_time = time.time()
def format(self, record):
elapsed_seconds = round(record.created - self.start_time)
prefix = "%s - %s - %s" % (
record.levelname,
time.strftime('%x %X'),
timedelta(seconds=elapsed_seconds)
)
message = record.getMessage()
message = message.replace('\n', '\n' + ' ' * (len(prefix) + 3))
return "%s - %s" % (prefix, message)
def create_output_dir(opt):
filepath = os.path.join(opt.expName, 'main.log')
if not os.path.exists(opt.expName):
os.makedirs(opt.expName)
# Safety check
if os.path.exists(filepath) and opt.checkpoint == "":
logging.warning("Experiment already exists!")
# Create logger
log_formatter = LogFormatter()
# create file handler and set level to debug
file_handler = logging.FileHandler(filepath, "a")
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(log_formatter)
# create console handler and set level to info
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(log_formatter)
# create logger and set level to debug
logger = logging.getLogger()
logger.handlers = []
logger.setLevel(logging.DEBUG)
logger.propagate = False
logger.addHandler(file_handler)
logger.addHandler(console_handler)
# quite down visdom
logging.getLogger("requests").setLevel(logging.CRITICAL)
logging.getLogger("urllib3").setLevel(logging.CRITICAL)
# reset logger elapsed time
def reset_time():
log_formatter.start_time = time.time()
logger.reset_time = reset_time
logger.info(opt)
return logger
def wrap(data, **kwargs):
if torch.is_tensor(data):
var = Variable(data, **kwargs).cuda()
return var
else:
return tuple([wrap(x, **kwargs) for x in data])
def check_grad(params, clip_th, ignore_th):
befgad = torch.nn.utils.clip_grad_norm(params, clip_th)
return (not numpy.isfinite(befgad) or (befgad > ignore_th))
# Code taken from kastnerkyle gist:
# https://gist.github.com/kastnerkyle/cc0ac48d34860c5bb3f9112f4d9a0300
# Convenience function to reuse the defined env
def pwrap(args, shell=False):
p = subprocess.Popen(args, shell=shell, stdout=subprocess.PIPE,
stdin=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
return p
# Print output
# http://stackoverflow.com/questions/4417546/constantly-print-subprocess-output-while-process-is-running
def execute(cmd, shell=False):
popen = pwrap(cmd, shell=shell)
for stdout_line in iter(popen.stdout.readline, ""):
yield stdout_line
popen.stdout.close()
return_code = popen.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, cmd)
def pe(cmd, shell=False):
"""
Print and execute command on system
"""
for line in execute(cmd, shell=shell):
print(line, end="")
def array_to_binary_file(data, output_file_name):
data = numpy.array(data, 'float32')
fid = open(output_file_name, 'wb')
data.tofile(fid)
fid.close()
def load_binary_file_frame(file_name, dimension):
fid_lab = open(file_name, 'rb')
features = numpy.fromfile(fid_lab, dtype=numpy.float32)
fid_lab.close()
assert features.size % float(dimension) == 0.0,'specified dimension %s not compatible with data'%(dimension)
frame_number = features.size / dimension
features = features[:(dimension * frame_number)]
features = features.reshape((-1, dimension))
return features, frame_number
def generate_merlin_wav(
data, gen_dir, file_basename, norm_info_file,
do_post_filtering=True, mgc_dim=60, fl=1024, sr=16000):
# Made from Jose's code and Merlin
gen_dir = os.path.abspath(gen_dir) + "/"
if file_basename is None:
base = "tmp_gen_wav"
else:
base = file_basename
if not os.path.exists(gen_dir):
os.mkdir(gen_dir)
file_name = os.path.join(gen_dir, base + ".cmp")
fid = open(norm_info_file, 'rb')
cmp_info = numpy.fromfile(fid, dtype=numpy.float32)
fid.close()
cmp_info = cmp_info.reshape((2, -1))
cmp_mean = cmp_info[0, ]
cmp_std = cmp_info[1, ]
data = data * cmp_std + cmp_mean
array_to_binary_file(data, file_name)
# This code was adapted from Merlin. All licenses apply
out_dimension_dict = {'bap': 1, 'lf0': 1, 'mgc': 60, 'vuv': 1}
stream_start_index = {}
file_extension_dict = {
'mgc': '.mgc', 'bap': '.bap', 'lf0': '.lf0',
'dur': '.dur', 'cmp': '.cmp'}
gen_wav_features = ['mgc', 'lf0', 'bap']
dimension_index = 0
for feature_name in out_dimension_dict.keys():
stream_start_index[feature_name] = dimension_index
dimension_index += out_dimension_dict[feature_name]
dir_name = os.path.dirname(file_name)
file_id = os.path.splitext(os.path.basename(file_name))[0]
features, frame_number = load_binary_file_frame(file_name, 63)
for feature_name in gen_wav_features:
current_features = features[
:, stream_start_index[feature_name]:
stream_start_index[feature_name] +
out_dimension_dict[feature_name]]
gen_features = current_features
if feature_name in ['lf0', 'F0']:
if 'vuv' in stream_start_index.keys():
vuv_feature = features[
:, stream_start_index['vuv']:stream_start_index['vuv'] + 1]
for i in range(frame_number):
if vuv_feature[i, 0] < 0.5:
gen_features[i, 0] = -1.0e+10 # self.inf_float
new_file_name = os.path.join(
dir_name, file_id + file_extension_dict[feature_name])
array_to_binary_file(gen_features, new_file_name)
pf_coef = 1.4
fw_alpha = 0.58
co_coef = 511
sptkdir = os.path.abspath(os.path.dirname(__file__) + "/tools/SPTK-3.9/") + '/'
sptk_path = {
'SOPR': sptkdir + 'sopr',
'FREQT': sptkdir + 'freqt',
'VSTAT': sptkdir + 'vstat',
'MGC2SP': sptkdir + 'mgc2sp',
'MERGE': sptkdir + 'merge',
'BCP': sptkdir + 'bcp',
'MC2B': sptkdir + 'mc2b',
'C2ACR': sptkdir + 'c2acr',
'MLPG': sptkdir + 'mlpg',
'VOPR': sptkdir + 'vopr',
'B2MC': sptkdir + 'b2mc',
'X2X': sptkdir + 'x2x',
'VSUM': sptkdir + 'vsum'}
worlddir = os.path.abspath(os.path.dirname(__file__) + "/tools/WORLD/") + '/'
world_path = {
'ANALYSIS': worlddir + 'analysis',
'SYNTHESIS': worlddir + 'synth'}
fw_coef = fw_alpha
fl_coef = fl
files = {'sp': base + '.sp',
'mgc': base + '.mgc',
'f0': base + '.f0',
'lf0': base + '.lf0',
'ap': base + '.ap',
'bap': base + '.bap',
'wav': base + '.wav'}
mgc_file_name = files['mgc']
cur_dir = os.getcwd()
os.chdir(gen_dir)
# post-filtering
if do_post_filtering:
line = "echo 1 1 "
for i in range(2, mgc_dim):
line = line + str(pf_coef) + " "
pe(
'{line} | {x2x} +af > {weight}'
.format(
line=line, x2x=sptk_path['X2X'],
weight=os.path.join(gen_dir, 'weight')), shell=True)
pe(
'{freqt} -m {order} -a {fw} -M {co} -A 0 < {mgc} | '
'{c2acr} -m {co} -M 0 -l {fl} > {base_r0}'
.format(
freqt=sptk_path['FREQT'], order=mgc_dim - 1,
fw=fw_coef, co=co_coef, mgc=files['mgc'],
c2acr=sptk_path['C2ACR'], fl=fl_coef,
base_r0=files['mgc'] + '_r0'), shell=True)
pe(
'{vopr} -m -n {order} < {mgc} {weight} | '
'{freqt} -m {order} -a {fw} -M {co} -A 0 | '
'{c2acr} -m {co} -M 0 -l {fl} > {base_p_r0}'
.format(
vopr=sptk_path['VOPR'], order=mgc_dim - 1,
mgc=files['mgc'],
weight=os.path.join(gen_dir, 'weight'),
freqt=sptk_path['FREQT'], fw=fw_coef, co=co_coef,
c2acr=sptk_path['C2ACR'], fl=fl_coef,
base_p_r0=files['mgc'] + '_p_r0'), shell=True)
pe(
'{vopr} -m -n {order} < {mgc} {weight} | '
'{mc2b} -m {order} -a {fw} | '
'{bcp} -n {order} -s 0 -e 0 > {base_b0}'
.format(
vopr=sptk_path['VOPR'], order=mgc_dim - 1,
mgc=files['mgc'],
weight=os.path.join(gen_dir, 'weight'),
mc2b=sptk_path['MC2B'], fw=fw_coef,
bcp=sptk_path['BCP'], base_b0=files['mgc'] + '_b0'), shell=True)
pe(
'{vopr} -d < {base_r0} {base_p_r0} | '
'{sopr} -LN -d 2 | {vopr} -a {base_b0} > {base_p_b0}'
.format(
vopr=sptk_path['VOPR'],
base_r0=files['mgc'] + '_r0',
base_p_r0=files['mgc'] + '_p_r0',
sopr=sptk_path['SOPR'],
base_b0=files['mgc'] + '_b0',
base_p_b0=files['mgc'] + '_p_b0'), shell=True)
pe(
'{vopr} -m -n {order} < {mgc} {weight} | '
'{mc2b} -m {order} -a {fw} | '
'{bcp} -n {order} -s 1 -e {order} | '
'{merge} -n {order2} -s 0 -N 0 {base_p_b0} | '
'{b2mc} -m {order} -a {fw} > {base_p_mgc}'
.format(
vopr=sptk_path['VOPR'], order=mgc_dim - 1,
mgc=files['mgc'],
weight=os.path.join(gen_dir, 'weight'),
mc2b=sptk_path['MC2B'], fw=fw_coef,
bcp=sptk_path['BCP'],
merge=sptk_path['MERGE'], order2=mgc_dim - 2,
base_p_b0=files['mgc'] + '_p_b0',
b2mc=sptk_path['B2MC'],
base_p_mgc=files['mgc'] + '_p_mgc'), shell=True)
mgc_file_name = files['mgc'] + '_p_mgc'
# Vocoder WORLD
pe(
'{sopr} -magic -1.0E+10 -EXP -MAGIC 0.0 {lf0} | '
'{x2x} +fd > {f0}'
.format(
sopr=sptk_path['SOPR'], lf0=files['lf0'],
x2x=sptk_path['X2X'], f0=files['f0']), shell=True)
pe(
'{sopr} -c 0 {bap} | {x2x} +fd > {ap}'.format(
sopr=sptk_path['SOPR'], bap=files['bap'],
x2x=sptk_path['X2X'], ap=files['ap']), shell=True)
pe(
'{mgc2sp} -a {alpha} -g 0 -m {order} -l {fl} -o 2 {mgc} | '
'{sopr} -d 32768.0 -P | {x2x} +fd > {sp}'.format(
mgc2sp=sptk_path['MGC2SP'], alpha=fw_alpha,
order=mgc_dim - 1, fl=fl, mgc=mgc_file_name,
sopr=sptk_path['SOPR'], x2x=sptk_path['X2X'], sp=files['sp']),
shell=True)
pe(
'{synworld} {fl} {sr} {f0} {sp} {ap} {wav}'.format(
synworld=world_path['SYNTHESIS'], fl=fl, sr=sr,
f0=files['f0'], sp=files['sp'], ap=files['ap'],
wav=files['wav']),
shell=True)
pe(
'rm -f {ap} {sp} {f0} {bap} {lf0} {mgc} {mgc}_b0 {mgc}_p_b0 '
'{mgc}_p_mgc {mgc}_p_r0 {mgc}_r0 {cmp} weight'.format(
ap=files['ap'], sp=files['sp'], f0=files['f0'],
bap=files['bap'], lf0=files['lf0'], mgc=files['mgc'],
cmp=base + '.cmp'),
shell=True)
os.chdir(cur_dir)
| 11,826 | 32.036313 | 112 | py |
loop | loop-master/model.py | # Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn.utils.rnn import pad_packed_sequence as unpack
from torch.nn.utils.rnn import pack_padded_sequence as pack
def getLinear(dim_in, dim_out):
return nn.Sequential(nn.Linear(dim_in, dim_in/10),
nn.ReLU(),
nn.Linear(dim_in/10, dim_out))
class MaskedMSE(nn.Module):
def __init__(self):
super(MaskedMSE, self).__init__()
self.criterion = nn.MSELoss(size_average=False)
# Taken from
# https://github.com/spro/practical-pytorch/blob/master/seq2seq-translation
@staticmethod
def _sequence_mask(sequence_length, max_len):
batch_size = sequence_length.size(0)
seq_range = torch.arange(0, max_len).long()
seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)
seq_range_expand = Variable(seq_range_expand)
if sequence_length.is_cuda:
seq_range_expand = seq_range_expand.cuda()
seq_length_expand = sequence_length.unsqueeze(1) \
.expand_as(seq_range_expand)
return (seq_range_expand < seq_length_expand).t().float()
def forward(self, input, target, lengths):
max_len = input.size(0)
mask = self._sequence_mask(lengths, max_len).unsqueeze(2)
mask_ = mask.expand_as(input)
self.loss = self.criterion(input*mask_, target*mask_)
self.loss = self.loss / mask.sum()
return self.loss
class Encoder(nn.Module):
def __init__(self, opt):
super(Encoder, self).__init__()
self.hidden_size = opt.hidden_size
self.vocabulary_size = opt.vocabulary_size
self.nspk = opt.nspk
self.lut_p = nn.Embedding(self.vocabulary_size,
self.hidden_size,
max_norm=1.0)
self.lut_s = nn.Embedding(self.nspk,
self.hidden_size,
max_norm=1.0)
def forward(self, input, speakers):
if isinstance(input, tuple):
lengths = input[1].data.view(-1).tolist()
outputs = pack(self.lut_p(input[0]), lengths)
else:
outputs = self.lut_p(input)
if isinstance(input, tuple):
outputs = unpack(outputs)[0]
ident = self.lut_s(speakers)
if ident.dim() == 3:
ident = ident.squeeze(1)
return outputs, ident
class GravesAttention(nn.Module):
COEF = 0.3989422917366028 # numpy.sqrt(1/(2*numpy.pi))
def __init__(self, batch_size, mem_elem, K, attention_alignment):
super(GravesAttention, self).__init__()
self.K = K
self.attention_alignment = attention_alignment
self.epsilon = 1e-5
self.sm = nn.Softmax()
self.N_a = getLinear(mem_elem, 3*K)
self.J = Variable(torch.arange(0, 500)
.expand_as(torch.Tensor(batch_size,
self.K,
500)),
requires_grad=False)
def forward(self, C, context, mu_tm1):
gbk_t = self.N_a(C.view(C.size(0), C.size(1) * C.size(2)))
gbk_t = gbk_t.view(gbk_t.size(0), -1, self.K)
# attention model parameters
g_t = gbk_t[:, 0, :]
b_t = gbk_t[:, 1, :]
k_t = gbk_t[:, 2, :]
# attention GMM parameters
g_t = self.sm(g_t) + self.epsilon
sig_t = torch.exp(b_t) + self.epsilon
mu_t = mu_tm1 + self.attention_alignment * torch.exp(k_t)
g_t = g_t.unsqueeze(2).expand(g_t.size(0),
g_t.size(1),
context.size(1))
sig_t = sig_t.unsqueeze(2).expand_as(g_t)
mu_t_ = mu_t.unsqueeze(2).expand_as(g_t)
j = self.J[:g_t.size(0), :, :context.size(1)]
# attention weights
phi_t = g_t * torch.exp(-0.5 * sig_t * (mu_t_ - j)**2)
alpha_t = self.COEF * torch.sum(phi_t, 1)
c_t = torch.bmm(alpha_t, context).transpose(0, 1).squeeze(0)
return c_t, mu_t, alpha_t
class Decoder(nn.Module):
def __init__(self, opt):
super(Decoder, self).__init__()
self.K = opt.K
self.hidden_size = opt.hidden_size
self.output_size = opt.output_size
self.mem_size = opt.mem_size
self.mem_feat_size = opt.output_size + opt.hidden_size
self.mem_elem = self.mem_size * self.mem_feat_size
self.attn = GravesAttention(opt.batch_size,
self.mem_elem,
self.K,
opt.attention_alignment)
self.N_o = getLinear(self.mem_elem, self.hidden_size)
self.output = nn.Linear(self.hidden_size, self.output_size)
self.N_u = getLinear(self.mem_elem, self.mem_feat_size)
self.F_u = nn.Linear(self.hidden_size, self.hidden_size)
self.F_o = nn.Linear(self.hidden_size, self.hidden_size)
def init_buffer(self, ident, start=True):
mem_feat_size = self.hidden_size + self.output_size
batch_size = ident.size(0)
if start:
self.mu_t = Variable(ident.data.new(batch_size, self.K).zero_())
self.S_t = Variable(ident.data.new(batch_size,
mem_feat_size,
self.mem_size).zero_())
# initialize with identity
self.S_t[:, :self.hidden_size, :] = ident.unsqueeze(2) \
.expand(ident.size(0),
ident.size(1),
self.mem_size)
else:
self.mu_t = self.mu_t.detach()
self.S_t = self.S_t.detach()
def update_buffer(self, S_tm1, c_t, o_tm1, ident):
# concat previous output & context
idt = torch.tanh(self.F_u(ident))
o_tm1 = o_tm1.squeeze(0)
z_t = torch.cat([c_t + idt, o_tm1/30], 1)
z_t = z_t.unsqueeze(2)
Sp = torch.cat([z_t, S_tm1[:, :, :-1]], 2)
# update S
u = self.N_u(Sp.view(Sp.size(0), -1))
u[:, :idt.size(1)] = u[:, :idt.size(1)] + idt
u = u.unsqueeze(2)
S = torch.cat([u, S_tm1[:, :, :-1]], 2)
return S
def forward(self, x, ident, context, start=True):
out, attns = [], []
o_t = x[0]
self.init_buffer(ident, start)
for o_tm1 in torch.split(x, 1):
if not self.training:
o_tm1 = o_t.unsqueeze(0)
# predict weighted context based on S
c_t, mu_t, alpha_t = self.attn(self.S_t,
context.transpose(0, 1),
self.mu_t)
# advance mu and update buffer
self.S_t = self.update_buffer(self.S_t, c_t, o_tm1, ident)
self.mu_t = mu_t
# predict next time step based on buffer content
ot_out = self.N_o(self.S_t.view(self.S_t.size(0), -1))
sp_out = self.F_o(ident)
o_t = self.output(ot_out + sp_out)
out += [o_t]
attns += [alpha_t.squeeze()]
out_seq = torch.stack(out)
attns_seq = torch.stack(attns)
return out_seq, attns_seq
class Loop(nn.Module):
def __init__(self, opt):
super(Loop, self).__init__()
self.encoder = Encoder(opt)
self.decoder = Decoder(opt)
self.noise = opt.noise
self.output_size = opt.output_size
def init_input(self, tgt, start):
if start:
self.x_tm1 = torch.zeros(1, tgt.size(1), tgt.size(2)).type_as(tgt.data)
if tgt.size(0) > 1:
inp = torch.cat([self.x_tm1, tgt[:-1].data])
else:
inp = self.x_tm1
if self.noise > 0:
noise = tgt.data.new(inp.size()).normal_(0, self.noise)
inp += noise
if not self.training:
inp.zero_()
self.x_tm1 = tgt[-1].data.unsqueeze(0)
return Variable(inp)
def cuda(self, device_id=None):
nn.Module.cuda(self, device_id)
self.decoder.attn.J = self.decoder.attn.J.cuda(device_id)
def forward(self, src, tgt, start=True):
x = self.init_input(tgt, start)
context, ident = self.encoder(src[0], src[1])
out, attn = self.decoder(x, ident, context, start)
return out, attn
| 8,810 | 34.103586 | 83 | py |
loop | loop-master/data.py | # Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from functools import partial
from collections import defaultdict
import numpy as np
import os
import torch
import torch.utils.data as data
# Taken from
# https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/Dataset.py
def batchify(data):
out, lengths = None, None
lengths = [x.size(0) for x in data]
max_length = max(lengths)
if data[0].dim() == 1:
out = data[0].new(len(data), max_length).fill_(0)
for i in range(len(data)):
data_length = data[i].size(0)
out[i].narrow(0, 0, data_length).copy_(data[i])
else:
feat_size = data[0].size(1)
out = data[0].new(len(data), max_length, feat_size).fill_(0)
for i in range(len(data)):
data_length = data[i].size(0)
out[i].narrow(0, 0, data_length).copy_(data[i])
return out, lengths
def collate_by_input_length(batch, max_seq_len):
"Puts each data field into a tensor with outer dimension batch size"
if torch.is_tensor(batch[0]):
return batchify(batch)
elif isinstance(batch[0], int):
return torch.LongTensor(batch)
else:
new_batch = [x for x in batch if x[1].size(0) < max_seq_len]
if len(batch) == 0:
return (None, None), (None, None), None
batch = new_batch
transposed = zip(*batch)
(srcBatch, srcLengths), (tgtBatch, tgtLengths), speakers = \
[collate_by_input_length(samples, max_seq_len)
for samples in transposed]
# within batch sorting by decreasing length for variable length rnns
batch = zip(srcBatch, tgtBatch, tgtLengths, speakers)
batch, srcLengths = zip(*sorted(zip(batch, srcLengths),
key=lambda x: -x[1]))
srcBatch, tgtBatch, tgtLengths, speakers = zip(*batch)
srcBatch = torch.stack(srcBatch, 0).transpose(0, 1).contiguous()
tgtBatch = torch.stack(tgtBatch, 0).transpose(0, 1).contiguous()
srcLengths = torch.LongTensor(srcLengths)
tgtLengths = torch.LongTensor(tgtLengths)
speakers = torch.LongTensor(speakers).view(-1, 1)
return (srcBatch, srcLengths), (tgtBatch, tgtLengths), speakers
raise TypeError(("batch must contain tensors, numbers, dicts or \
lists; found {}".format(type(batch[0]))))
class NpzFolder(data.Dataset):
NPZ_EXTENSION = 'npz'
def __init__(self, root, single_spkr=False):
self.root = root
self.npzs = self.make_dataset(self.root)
if len(self.npzs) == 0:
raise(RuntimeError("Found 0 npz in subfolders of: " + root + "\n"
"Supported image extensions are: " +
self.NPZ_EXTENSION))
if single_spkr:
self.speakers = defaultdict(lambda: 0)
else:
self.speakers = []
for fname in self.npzs:
self.speakers += [os.path.basename(fname).split('_')[0]]
self.speakers = list(set(self.speakers))
self.speakers.sort()
self.speakers = {v: i for i, v in enumerate(self.speakers)}
code2phone = np.load(self.npzs[0])['code2phone']
self.dict = {v: k for k, v in enumerate(code2phone)}
def __getitem__(self, index):
path = self.npzs[index]
txt, feat, spkr = self.loader(path)
return txt, feat, self.speakers[spkr]
def __len__(self):
return len(self.npzs)
def make_dataset(self, dir):
images = []
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if self.NPZ_EXTENSION in fname:
path = os.path.join(root, fname)
images.append(path)
return images
def loader(self, path):
feat = np.load(path)
txt = feat['phonemes'].astype('int64')
txt = torch.from_numpy(txt)
audio = feat['audio_features']
audio = torch.from_numpy(audio)
spkr = os.path.basename(path).split('_')[0]
return txt, audio, spkr
class NpzLoader(data.DataLoader):
def __init__(self, *args, **kwargs):
kwargs['collate_fn'] = partial(collate_by_input_length,
max_seq_len=kwargs['max_seq_len'])
del kwargs['max_seq_len']
data.DataLoader.__init__(self, *args, **kwargs)
class TBPTTIter(object):
"""
Iterator for truncated batch propagation through time(tbptt) training.
Target sequence is segmented while input sequence remains the same.
"""
def __init__(self, src, trgt, spkr, seq_len):
self.seq_len = seq_len
self.start = True
self.speakers = spkr
self.srcBatch = src[0]
self.srcLenths = src[1]
# split batch
self.tgtBatch = list(torch.split(trgt[0], self.seq_len, 0))
self.tgtBatch.reverse()
self.len = len(self.tgtBatch)
# split length list
batch_seq_len = len(self.tgtBatch)
self.tgtLenths = [self.split_length(l, batch_seq_len) for l in trgt[1]]
self.tgtLenths = torch.stack(self.tgtLenths)
self.tgtLenths = list(torch.split(self.tgtLenths, 1, 1))
self.tgtLenths = [x.squeeze() for x in self.tgtLenths]
self.tgtLenths.reverse()
assert len(self.tgtLenths) == len(self.tgtBatch)
def split_length(self, seq_size, batch_seq_len):
seq = [self.seq_len] * (seq_size / self.seq_len)
if seq_size % self.seq_len != 0:
seq += [seq_size % self.seq_len]
seq += [0] * (batch_seq_len - len(seq))
return torch.LongTensor(seq)
def __next__(self):
if len(self.tgtBatch) == 0:
raise StopIteration()
if self.len > len(self.tgtBatch):
self.start = False
return (self.srcBatch, self.srcLenths), \
(self.tgtBatch.pop(), self.tgtLenths.pop()), \
self.speakers, self.start
next = __next__
def __iter__(self):
return self
def __len__(self):
return self.len
| 6,273 | 31.174359 | 79 | py |
loop | loop-master/train.py | # Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import argparse
import visdom
import numpy as np
from tqdm import tqdm
import torch
import torch.optim as optim
from data import NpzFolder, NpzLoader, TBPTTIter
from model import Loop, MaskedMSE
from utils import create_output_dir, wrap, check_grad
parser = argparse.ArgumentParser(description='PyTorch Loop')
# Env options:
parser.add_argument('--epochs', type=int, default=92, metavar='N',
help='number of epochs to train (default: 92)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--expName', type=str, default='vctk', metavar='E',
help='Experiment name')
parser.add_argument('--data', default='data/vctk',
metavar='D', type=str, help='Data path')
parser.add_argument('--checkpoint', default='',
metavar='C', type=str, help='Checkpoint path')
parser.add_argument('--gpu', default=0,
metavar='G', type=int, help='GPU device ID')
parser.add_argument('--visualize', action='store_true',
help='Visualize train and validation loss.')
# Data options
parser.add_argument('--seq-len', type=int, default=100,
help='Sequence length for tbptt')
parser.add_argument('--max-seq-len', type=int, default=1000,
help='Max sequence length for tbptt')
parser.add_argument('--batch-size', type=int, default=64,
help='Batch size')
parser.add_argument('--lr', type=float, default=1e-4,
help='Learning rate')
parser.add_argument('--clip-grad', type=float, default=0.5,
help='maximum norm of gradient clipping')
parser.add_argument('--ignore-grad', type=float, default=10000.0,
help='ignore grad before clipping')
# Model options
parser.add_argument('--vocabulary-size', type=int, default=44,
help='Vocabulary size')
parser.add_argument('--output-size', type=int, default=63,
help='Size of decoder output vector')
parser.add_argument('--hidden-size', type=int, default=256,
help='Hidden layer size')
parser.add_argument('--K', type=int, default=10,
help='No. of attention guassians')
parser.add_argument('--noise', type=int, default=4,
help='Noise level to use')
parser.add_argument('--attention-alignment', type=float, default=0.05,
help='# of features per letter/phoneme')
parser.add_argument('--nspk', type=int, default=22,
help='Number of speakers')
parser.add_argument('--mem-size', type=int, default=20,
help='Memory number of segments')
# init
args = parser.parse_args()
args.expName = os.path.join('checkpoints', args.expName)
torch.cuda.set_device(args.gpu)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
logging = create_output_dir(args)
vis = visdom.Visdom(env=args.expName)
# data
logging.info("Building dataset.")
train_dataset = NpzFolder(args.data + '/numpy_features', args.nspk == 1)
train_loader = NpzLoader(train_dataset,
max_seq_len=args.max_seq_len,
batch_size=args.batch_size,
num_workers=4,
pin_memory=True,
shuffle=True)
valid_dataset = NpzFolder(args.data + '/numpy_features_valid', args.nspk == 1)
valid_loader = NpzLoader(valid_dataset,
max_seq_len=args.max_seq_len,
batch_size=args.batch_size,
num_workers=4,
pin_memory=True)
logging.info("Dataset ready!")
def train(model, criterion, optimizer, epoch, train_losses):
total = 0 # Reset every plot_every
model.train()
train_enum = tqdm(train_loader, desc='Train epoch %d' % epoch)
for full_txt, full_feat, spkr in train_enum:
batch_iter = TBPTTIter(full_txt, full_feat, spkr, args.seq_len)
batch_total = 0
for txt, feat, spkr, start in batch_iter:
input = wrap(txt)
target = wrap(feat)
spkr = wrap(spkr)
# Zero gradients
if start:
optimizer.zero_grad()
# Forward
output, _ = model([input, spkr], target[0], start)
loss = criterion(output, target[0], target[1])
# Backward
loss.backward()
if check_grad(model.parameters(), args.clip_grad, args.ignore_grad):
logging.info('Not a finite gradient or too big, ignoring.')
optimizer.zero_grad()
continue
optimizer.step()
# Keep track of loss
batch_total += loss.data[0]
batch_total = batch_total/len(batch_iter)
total += batch_total
train_enum.set_description('Train (loss %.2f) epoch %d' %
(batch_total, epoch))
avg = total / len(train_loader)
train_losses.append(avg)
if args.visualize:
vis.line(Y=np.asarray(train_losses),
X=torch.arange(1, 1 + len(train_losses)),
opts=dict(title="Train"),
win='Train loss ' + args.expName)
logging.info('====> Train set loss: {:.4f}'.format(avg))
def evaluate(model, criterion, epoch, eval_losses):
total = 0
valid_enum = tqdm(valid_loader, desc='Valid epoch %d' % epoch)
for txt, feat, spkr in valid_enum:
input = wrap(txt, volatile=True)
target = wrap(feat, volatile=True)
spkr = wrap(spkr, volatile=True)
output, _ = model([input, spkr], target[0])
loss = criterion(output, target[0], target[1])
total += loss.data[0]
valid_enum.set_description('Valid (loss %.2f) epoch %d' %
(loss.data[0], epoch))
avg = total / len(valid_loader)
eval_losses.append(avg)
if args.visualize:
vis.line(Y=np.asarray(eval_losses),
X=torch.arange(1, 1 + len(eval_losses)),
opts=dict(title="Eval"),
win='Eval loss ' + args.expName)
logging.info('====> Test set loss: {:.4f}'.format(avg))
return avg
def main():
start_epoch = 1
model = Loop(args)
model.cuda()
if args.checkpoint != '':
checkpoint_args_path = os.path.dirname(args.checkpoint) + '/args.pth'
checkpoint_args = torch.load(checkpoint_args_path)
start_epoch = checkpoint_args[3]
model.load_state_dict(torch.load(args.checkpoint))
criterion = MaskedMSE().cuda()
optimizer = optim.Adam(model.parameters(), lr=args.lr)
# Keep track of losses
train_losses = []
eval_losses = []
best_eval = float('inf')
# Begin!
for epoch in range(start_epoch, start_epoch + args.epochs):
train(model, criterion, optimizer, epoch, train_losses)
eval_loss = evaluate(model, criterion, epoch, eval_losses)
if eval_loss < best_eval:
torch.save(model.state_dict(), '%s/bestmodel.pth' % (args.expName))
best_eval = eval_loss
torch.save(model.state_dict(), '%s/lastmodel.pth' % (args.expName))
torch.save([args, train_losses, eval_losses, epoch],
'%s/args.pth' % (args.expName))
if __name__ == '__main__':
main()
| 7,591 | 34.811321 | 80 | py |
ParallelWaveGAN | ParallelWaveGAN-master/setup.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Setup Parallel WaveGAN libarary."""
import os
import pip
import sys
from distutils.version import LooseVersion
from setuptools import find_packages
from setuptools import setup
if LooseVersion(sys.version) < LooseVersion("3.7"):
raise RuntimeError(
"parallel-wavegan requires Python>=3.7, "
"but your Python is {}".format(sys.version)
)
if LooseVersion(pip.__version__) < LooseVersion("19"):
raise RuntimeError(
"pip>=19.0.0 is required, but your pip is {}. "
'Try again after "pip install -U pip"'.format(pip.__version__)
)
requirements = {
"install": [
"torch>=1.4",
"setuptools>=38.5.1",
"librosa>=0.8.0",
"soundfile>=0.10.2",
"tensorboardX>=1.8",
"matplotlib>=3.1.0",
"PyYAML>=3.12",
"tqdm>=4.26.1",
"kaldiio>=2.14.1",
"h5py>=2.9.0",
"yq>=2.10.0",
"gdown",
"filelock",
# https://github.com/kan-bayashi/ParallelWaveGAN/runs/6646851174?check_suite_focus=true#step:6:294
"protobuf<=3.20.1",
],
"setup": [
"numpy",
"pytest-runner",
],
"test": [
"pytest>=3.3.0",
"hacking>=4.1.0",
"flake8-docstrings>=1.3.1",
"black",
"isort",
],
}
entry_points = {
"console_scripts": [
"parallel-wavegan-preprocess=parallel_wavegan.bin.preprocess:main",
"parallel-wavegan-compute-statistics=parallel_wavegan.bin.compute_statistics:main",
"parallel-wavegan-normalize=parallel_wavegan.bin.normalize:main",
"parallel-wavegan-train=parallel_wavegan.bin.train:main",
"parallel-wavegan-decode=parallel_wavegan.bin.decode:main",
]
}
install_requires = requirements["install"]
setup_requires = requirements["setup"]
tests_require = requirements["test"]
extras_require = {
k: v for k, v in requirements.items() if k not in ["install", "setup"]
}
dirname = os.path.dirname(__file__)
setup(
name="parallel_wavegan",
version="0.6.0",
url="http://github.com/kan-bayashi/ParallelWaveGAN",
author="Tomoki Hayashi",
author_email="hayashi.tomoki@g.sp.m.is.nagoya-u.ac.jp",
description="Parallel WaveGAN implementation",
long_description=open(os.path.join(dirname, "README.md"), encoding="utf-8").read(),
long_description_content_type="text/markdown",
license="MIT License",
packages=find_packages(include=["parallel_wavegan*"]),
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
extras_require=extras_require,
entry_points=entry_points,
classifiers=[
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Intended Audience :: Science/Research",
"Operating System :: POSIX :: Linux",
"License :: OSI Approved :: MIT License",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| 3,056 | 29.878788 | 106 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.