code stringlengths 17 6.64M |
|---|
class Resnet3dCSNiRMultiClass(Resnet3dChannelSeparated_ir):
def __init__(self, tw=16, sample_size=112, e_dim=7, n_classes=2):
super(Resnet3dCSNiRMultiClass, self).__init__(tw=tw, sample_size=sample_size, e_dim=e_dim, n_classes=n_classes, decoders=[DecoderMultiClass(n_classes=n_classes), DecoderEmbedding(n_classes=e_dim)])
|
def propagate3d(model, inputs, ref_mask, proposals):
assert (inputs.shape[2] >= 2)
e2 = model(inputs, ref_mask)
return e2
|
def run_forward(model, inputs, ref_masks, proposals):
return propagate3d(model, inputs, ref_masks, proposals)
|
def get_backbone_fn(backbone):
'\n Returns a funtion that creates the required backbone\n :param backbone: name of the backbone function\n :return:\n '
backbones = inspect.getmembers(Resnet3d)
_fn = [_f for (name, _f) in backbones if (name == backbone)]
if (len(_fn) == 0):
raise ValueError("Backbone {} can't be found".format(backbone))
return _fn[0]
|
def get_module(module):
backbones = inspect.getmembers(Modules)
_cls = [_c for (name, _c) in backbones if (name == module)]
return _cls[0]
|
class _NonLocalBlockND(nn.Module):
def __init__(self, in_channels, inter_channels=None, dimension=3, sub_sample=True, bn_layer=True, return_sim=False):
super(_NonLocalBlockND, self).__init__()
assert (dimension in [1, 2, 3])
self.dimension = dimension
self.sub_sample = sub_sample
self.return_sim = return_sim
self.in_channels = in_channels
self.inter_channels = inter_channels
if (self.inter_channels is None):
self.inter_channels = (in_channels // 2)
if (self.inter_channels == 0):
self.inter_channels = 1
if (dimension == 3):
conv_nd = nn.Conv3d
max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2))
bn = nn.BatchNorm3d
elif (dimension == 2):
conv_nd = nn.Conv2d
max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2))
bn = nn.BatchNorm2d
else:
conv_nd = nn.Conv1d
max_pool_layer = nn.MaxPool1d(kernel_size=2)
bn = nn.BatchNorm1d
self.g = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0)
if bn_layer:
self.W = nn.Sequential(conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels, kernel_size=1, stride=1, padding=0), bn(self.in_channels))
nn.init.constant_(self.W[1].weight, 0)
nn.init.constant_(self.W[1].bias, 0)
else:
self.W = conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels, kernel_size=1, stride=1, padding=0)
nn.init.constant_(self.W.weight, 0)
nn.init.constant_(self.W.bias, 0)
self.theta = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0)
self.phi = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0)
if sub_sample:
self.g = nn.Sequential(self.g, max_pool_layer)
self.phi = nn.Sequential(self.phi, max_pool_layer)
def forward(self, x):
'\n :param x: (b, c, t, h, w)\n :return:\n '
batch_size = x.size(0)
g_x = self.g(x).view(batch_size, self.inter_channels, (- 1))
g_x = g_x.permute(0, 2, 1)
theta_x = self.theta(x).view(batch_size, self.inter_channels, (- 1))
theta_x = theta_x.permute(0, 2, 1)
phi_x = self.phi(x).view(batch_size, self.inter_channels, (- 1))
f = torch.matmul(theta_x, phi_x)
f_div_C = F.softmax(f, dim=(- 1))
y = torch.matmul(f_div_C, g_x)
y = y.permute(0, 2, 1).contiguous()
y = y.view(batch_size, self.inter_channels, *x.size()[2:])
W_y = self.W(y)
z = (W_y + x)
return ((z, f_div_C) if self.return_sim else z)
|
class NONLocalBlock1D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock1D, self).__init__(in_channels, inter_channels=inter_channels, dimension=1, sub_sample=sub_sample, bn_layer=bn_layer)
|
class NONLocalBlock2D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock2D, self).__init__(in_channels, inter_channels=inter_channels, dimension=2, sub_sample=sub_sample, bn_layer=bn_layer)
|
class NONLocalBlock3D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True, return_sim=False):
super(NONLocalBlock3D, self).__init__(in_channels, inter_channels=inter_channels, dimension=3, sub_sample=sub_sample, bn_layer=bn_layer, return_sim=return_sim)
|
def r2plus1d_34(num_classes, pretrained=False, progress=False, arch=None):
model = VideoResNet(block=BasicBlock, conv_makers=([Conv2Plus1D] * 4), layers=[3, 4, 6, 3], stem=R2Plus1dStem)
model.fc = nn.Linear(model.fc.in_features, out_features=num_classes)
model.layer2[0].conv2[0] = Conv2Plus1D(128, 128, 288)
model.layer3[0].conv2[0] = Conv2Plus1D(256, 256, 576)
model.layer4[0].conv2[0] = Conv2Plus1D(512, 512, 1152)
for m in model.modules():
if isinstance(m, nn.BatchNorm3d):
m.eps = 0.001
m.momentum = 0.9
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
model.conv1 = model.stem
return model
|
class Encoder(nn.Module):
def __init__(self, n_classes=1):
super(Encoder, self).__init__()
self.conv1_p = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=True)
resnet = models.resnet50(pretrained=True)
self.conv1 = resnet.conv1
self.bn1 = resnet.bn1
self.relu = resnet.relu
self.maxpool = resnet.maxpool
self.res2 = resnet.layer1
self.res3 = resnet.layer2
self.res4 = resnet.layer3
self.res5 = resnet.layer4
self.register_buffer('mean', torch.FloatTensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
self.register_buffer('std', torch.FloatTensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
def freeze_batchnorm(self):
print('Freezing batchnorm for Resnet50')
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
for p in m.parameters():
p.requires_grad = False
def forward(self, in_f, in_p):
assert ((in_f is not None) or (in_p is not None))
f = ((in_f - self.mean) / self.std)
if (in_f is None):
p = in_p.float()
if (len(in_p.shape) < 4):
p = torch.unsqueeze(in_p, dim=1).float()
x = self.conv1_p(p)
elif (in_p is not None):
p = in_p.float()
if (len(in_p.shape) < 4):
p = torch.unsqueeze(in_p, dim=1).float()
x = (self.conv1(f) + self.conv1_p(p))
else:
x = self.conv1(f)
x = self.bn1(x)
c1 = self.relu(x)
x = self.maxpool(c1)
r2 = self.res2(x)
r3 = self.res3(r2)
r4 = self.res4(r3)
r5 = self.res5(r4)
return (r5, r4, r3, r2)
|
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
mdim = 256
self.GC = GC(4096, mdim)
self.convG1 = nn.Conv2d(mdim, mdim, kernel_size=3, padding=1)
self.convG2 = nn.Conv2d(mdim, mdim, kernel_size=3, padding=1)
self.RF4 = Refine(1024, mdim)
self.RF3 = Refine(512, mdim)
self.RF2 = Refine(256, mdim)
self.pred5 = nn.Conv2d(mdim, 2, kernel_size=(3, 3), padding=(1, 1), stride=1)
self.pred4 = nn.Conv2d(mdim, 2, kernel_size=(3, 3), padding=(1, 1), stride=1)
self.pred3 = nn.Conv2d(mdim, 2, kernel_size=(3, 3), padding=(1, 1), stride=1)
self.pred2 = nn.Conv2d(mdim, 2, kernel_size=(3, 3), padding=(1, 1), stride=1)
def forward(self, r5, x5, r4, r3, r2):
x = torch.cat((r5, x5), dim=1)
x = self.GC(x)
r = self.convG1(F.relu(x))
r = self.convG2(F.relu(r))
m5 = (x + r)
m4 = self.RF4(r4, m5)
m3 = self.RF3(r3, m4)
m2 = self.RF2(r2, m3)
p2 = self.pred2(F.relu(m2))
p3 = self.pred3(F.relu(m3))
p4 = self.pred4(F.relu(m4))
p5 = self.pred5(F.relu(m5))
p = F.interpolate(p2, scale_factor=4, mode='bilinear')
return (p, p2, p3, p4, p5)
|
class RGMP(BaseNetwork):
def __init__(self):
super(RGMP, self).__init__()
self.encoder = Encoder()
self.decoder = Decoder()
|
def conv3x3x3(in_planes, out_planes, stride=1):
return nn.Conv3d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
|
def downsample_basic_block(x, planes, stride):
out = F.avg_pool3d(x, kernel_size=1, stride=stride)
zero_pads = torch.Tensor(out.size(0), (planes - out.size(1)), out.size(2), out.size(3), out.size(4)).zero_()
if isinstance(out.data, torch.cuda.FloatTensor):
zero_pads = zero_pads.cuda()
out = Variable(torch.cat([out.data, zero_pads], dim=1))
return out
|
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm3d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3x3(planes, planes)
self.bn2 = nn.BatchNorm3d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm3d(planes)
self.conv2 = nn.Conv3d(planes, planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm3d(planes)
self.conv3 = nn.Conv3d(planes, (planes * 4), kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm3d((planes * 4))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
class Bottleneck_depthwise_ip(Bottleneck):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):
super(Bottleneck_depthwise_ip, self).__init__(inplanes, planes, stride, downsample, dilation)
self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm3d(planes)
self.conv2 = nn.Conv3d(planes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm3d(planes)
self.conv3 = nn.Conv3d(planes, planes, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, bias=False, groups=planes)
self.bn3 = nn.BatchNorm3d(planes)
self.conv4 = nn.Conv3d(planes, (planes * 4), kernel_size=1, bias=False)
self.bn4 = nn.BatchNorm3d((planes * 4))
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.relu(out)
out = self.conv4(out)
out = self.bn4(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
class Bottleneck_depthwise_ir(Bottleneck):
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):
super(Bottleneck_depthwise_ir, self).__init__(inplanes, planes, stride, downsample)
self.conv2 = nn.Conv3d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False, groups=planes)
|
class ResNet(nn.Module):
def __init__(self, block, layers, sample_size, sample_duration, shortcut_type='B', num_classes=400, last_fc=True):
self.last_fc = last_fc
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv3d(3, 64, kernel_size=7, stride=(1, 2, 2), padding=(3, 3, 3), bias=False)
self.bn1 = nn.BatchNorm3d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], shortcut_type)
self.layer2 = self._make_layer(block, 128, layers[1], shortcut_type, stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], shortcut_type, stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], shortcut_type, stride=2)
last_duration = math.ceil((sample_duration / 16))
last_size = math.ceil((sample_size / 32))
self.avgpool = nn.AvgPool3d((last_duration, last_size, last_size), stride=1)
self.fc = nn.Linear((512 * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv3d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, shortcut_type, stride=1, dilation=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
if (shortcut_type == 'A'):
downsample = partial(downsample_basic_block, planes=(planes * block.expansion), stride=stride)
else:
downsample = nn.Sequential(nn.Conv3d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm3d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, dilation))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
if self.last_fc:
x = self.fc(x)
return x
|
class ResNetNoTS(ResNet):
def __init__(self, block, layers, sample_size, sample_duration, shortcut_type='B', num_classes=400, last_fc=True):
self.last_fc = last_fc
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv3d(3, 64, kernel_size=7, stride=(1, 2, 2), padding=(3, 3, 3), bias=False)
self.bn1 = nn.BatchNorm3d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], shortcut_type)
self.layer2 = self._make_layer(block, 128, layers[1], shortcut_type, stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], shortcut_type, stride=(1, 2, 2), dilation=(2, 1, 1))
self.layer4 = self._make_layer(block, 512, layers[3], shortcut_type, stride=(1, 2, 2), dilation=(2, 1, 1))
last_duration = math.ceil((sample_duration / 16))
last_size = math.ceil((sample_size / 32))
self.avgpool = nn.AvgPool3d((last_duration, last_size, last_size), stride=1)
self.fc = nn.Linear((512 * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv3d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
|
def get_fine_tuning_parameters(model, ft_begin_index):
if (ft_begin_index == 0):
return model.parameters()
ft_module_names = []
for i in range(ft_begin_index, 5):
ft_module_names.append('layer{}'.format(ft_begin_index))
ft_module_names.append('fc')
parameters = []
for (k, v) in model.named_parameters():
for ft_module in ft_module_names:
if (ft_module in k):
parameters.append({'params': v})
break
else:
parameters.append({'params': v, 'lr': 0.0})
return parameters
|
def resnet10(**kwargs):
'Constructs a ResNet-18 model.\n '
model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)
return model
|
def resnet18(**kwargs):
'Constructs a ResNet-18 model.\n '
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
|
def resnet34(**kwargs):
'Constructs a ResNet-34 model.\n '
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
return model
|
def resnet50(**kwargs):
'Constructs a ResNet-50 model.\n '
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
return model
|
def resnet50_no_ts(**kwargs):
'Constructs a ResNet-50 model.\n '
model = ResNetNoTS(Bottleneck, [3, 4, 6, 3], **kwargs)
return model
|
def resnet50_csn_ir(**kwargs):
'Constructs a channel-separated ResNet-152 model with reduced interactions.\n '
model = ResNet(Bottleneck_depthwise_ir, [3, 4, 6, 3], **kwargs)
model.conv1 = nn.Conv3d(3, 64, kernel_size=(3, 7, 7), stride=(1, 2, 2), padding=(1, 3, 3), bias=False)
return model
|
def resnet101(**kwargs):
'Constructs a ResNet-101 model.\n '
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
return model
|
def resnet152(**kwargs):
'Constructs a ResNet-101 model.\n '
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
return model
|
def resnet200(**kwargs):
'Constructs a ResNet-101 model.\n '
model = ResNet(Bottleneck, [3, 24, 36, 3], **kwargs)
return model
|
def resnet152_csn_ip(**kwargs):
'Constructs a channel-separated ResNet-152 model with perserved interactions.\n '
model = ResNet(Bottleneck_depthwise_ip, [3, 8, 36, 3], **kwargs)
model.conv1 = nn.Conv3d(3, 64, kernel_size=(3, 7, 7), stride=(1, 2, 2), padding=(3, 3, 3), bias=False)
return model
|
def resnet152_csn_ir(**kwargs):
'Constructs a channel-separated ResNet-152 model with reduced interactions.\n '
model = ResNet(Bottleneck_depthwise_ir, [3, 8, 36, 3], **kwargs)
model.conv1 = nn.Conv3d(3, 64, kernel_size=(3, 7, 7), stride=(1, 2, 2), padding=(1, 3, 3), bias=False)
return model
|
def biggerStem():
return nn.Sequential(nn.Conv3d(3, 45, kernel_size=(1, 7, 7), stride=(1, 2, 2), padding=(0, 3, 3), bias=False), nn.BatchNorm3d(45), nn.ReLU(inplace=True), nn.Conv3d(45, 64, kernel_size=(3, 1, 1), stride=(1, 1, 1), padding=(1, 0, 0), bias=False))
|
class Encoder3d(Encoder):
def __init__(self, tw=16, sample_size=112, resnet=None):
super(Encoder3d, self).__init__()
self.conv1_p = nn.Conv3d(1, 64, kernel_size=7, stride=(1, 2, 2), padding=(3, 3, 3), bias=False)
resnet = (resnet50(sample_size=sample_size, sample_duration=tw) if (resnet is None) else resnet)
self.resnet = resnet
self.conv1 = resnet.conv1
self.bn1 = resnet.bn1
self.relu = resnet.relu
self.maxpool = nn.MaxPool3d(kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1))
self.layer1 = resnet.layer1
self.layer2 = resnet.layer2
self.layer3 = resnet.layer3
self.layer4 = resnet.layer4
self.register_buffer('mean', torch.FloatTensor([114.7748, 107.7354, 99.475]).view(1, 3, 1, 1, 1))
self.register_buffer('std', torch.FloatTensor([1, 1, 1]).view(1, 3, 1, 1, 1))
def freeze_batchnorm(self):
print('Freezing batchnorm for Encoder3d')
for m in self.modules():
if (isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm3d)):
for p in m.parameters():
p.requires_grad = False
def forward(self, in_f, in_p=None):
assert ((in_f is not None) or (in_p is not None))
f = (((in_f * 255.0) - self.mean) / self.std)
f /= 255.0
if (in_f is None):
p = in_p
if (len(in_p.shape) < 4):
p = torch.unsqueeze(in_p, dim=1)
x = self.conv1_p(p)
elif (in_p is not None):
p = in_p
if (len(in_p.shape) < 4):
p = torch.unsqueeze(in_p, dim=1)
x = (self.conv1(f) + self.conv1_p(p))
else:
x = self.conv1(f)
x = self.bn1(x)
c1 = self.relu(x)
x = self.maxpool(c1)
r2 = self.layer1(x)
r3 = self.layer2(r2)
r4 = self.layer3(r3)
r5 = self.layer4(r4)
return (r5, r4, r3, r2)
|
class Encoder101(Encoder):
def __init__(self):
super(Encoder101, self).__init__()
self.resnet = deeplabv3_resnet101(pretrained=True)
self.conv1_p = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=True)
resnet = fcn_resnet101(pretrained=True)
self.conv1 = resnet.backbone.conv1
self.bn1 = resnet.backbone.bn1
self.relu = resnet.backbone.relu
self.maxpool = resnet.backbone.maxpool
self.res2 = resnet.backbone.layer1
self.res3 = resnet.backbone.layer2
self.res4 = resnet.backbone.layer3
self.res5 = resnet.backbone.layer4
self.register_buffer('mean', torch.FloatTensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
self.register_buffer('std', torch.FloatTensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
|
class EncoderR2plus1d_34(Encoder3d):
def __init__(self, tw=8, sample_size=112):
super(EncoderR2plus1d_34, self).__init__(tw, sample_size)
resnet = r2plus1d_34(num_classes=359, pretrained=True, arch='r2plus1d_34_32_ig65m')
self.resnet = resnet
self.conv1 = resnet.stem
self.bn1 = nn.Identity()
self.relu = nn.Identity()
self.layer1 = resnet.layer1
self.layer2 = resnet.layer2
self.layer3 = resnet.layer3
self.layer4 = resnet.layer4
|
class Encoder3d_csn_ip(Encoder3d):
def __init__(self, tw=16, sample_size=112):
super(Encoder3d_csn_ip, self).__init__(tw, sample_size)
resnet = resnet152_csn_ip(sample_size=sample_size, sample_duration=tw)
self.resnet = resnet
self.conv1 = resnet.conv1
self.bn1 = resnet.bn1
self.relu = resnet.relu
self.layer1 = resnet.layer1
self.layer2 = resnet.layer2
self.layer3 = resnet.layer3
self.layer4 = resnet.layer4
|
class Encoder3d_csn_ir(Encoder3d):
def __init__(self, tw=16, sample_size=112):
super(Encoder3d_csn_ir, self).__init__(tw, sample_size)
resnet = resnet152_csn_ir(sample_size=sample_size, sample_duration=tw)
self.resnet = resnet
self.conv1 = resnet.conv1
self.bn1 = resnet.bn1
self.relu = resnet.relu
self.layer1 = resnet.layer1
self.layer2 = resnet.layer2
self.layer3 = resnet.layer3
self.layer4 = resnet.layer4
|
class Decoder3d(nn.Module):
def __init__(self, n_classes=2, pred_scale_factor=(1, 4, 4), inter_block=GC3d, refine_block=Refine3d):
super(Decoder3d, self).__init__()
mdim = 256
self.pred_scale_factor = pred_scale_factor
self.GC = inter_block(2048, mdim)
self.convG1 = nn.Conv3d(mdim, mdim, kernel_size=3, padding=1)
self.convG2 = nn.Conv3d(mdim, mdim, kernel_size=3, padding=1)
self.RF4 = refine_block(1024, mdim)
self.RF3 = refine_block(512, mdim)
self.RF2 = refine_block(256, mdim)
self.pred5 = nn.Conv3d(mdim, n_classes, kernel_size=3, padding=1, stride=1)
self.pred4 = nn.Conv3d(mdim, n_classes, kernel_size=3, padding=1, stride=1)
self.pred3 = nn.Conv3d(mdim, n_classes, kernel_size=3, padding=1, stride=1)
self.pred2 = nn.Conv3d(mdim, n_classes, kernel_size=3, padding=1, stride=1)
def forward(self, r5, r4, r3, r2, support):
x = self.GC(r5)
r = self.convG1(F.relu(x))
r = self.convG2(F.relu(r))
m5 = (x + r)
m4 = self.RF4(r4, m5)
m3 = self.RF3(r3, m4)
m2 = self.RF2(r2, m3)
p2 = self.pred2(F.relu(m2))
p3 = self.pred3(F.relu(m3))
p4 = self.pred4(F.relu(m4))
p5 = self.pred5(F.relu(m5))
p = F.interpolate(p2, scale_factor=self.pred_scale_factor, mode='trilinear')
return p
|
class Decoder3dNoGC(Decoder3d):
def __init__(self, n_classes=2):
super(Decoder3dNoGC, self).__init__(n_classes=n_classes)
self.GC = nn.Conv3d(2048, 256, kernel_size=3, padding=1)
|
class Decoder3dNonLocal(Decoder3d):
def __init__(self, n_classes=2):
super(Decoder3dNonLocal, self).__init__(n_classes=n_classes)
self.GC = nn.Sequential(nn.Conv3d(2048, 256, kernel_size=1), NONLocalBlock3D(256, sub_sample=True))
|
class DecoderR2plus1d(Decoder3d):
def __init__(self, n_classes=2, inter_block=GC3d, refine_block=Refine3d):
super(DecoderR2plus1d, self).__init__(n_classes=n_classes)
mdim = 256
self.GC = inter_block(512, 256)
self.RF4 = refine_block(256, mdim)
self.RF3 = refine_block(128, mdim)
self.RF2 = refine_block(64, mdim)
|
class Resnet3d(BaseNetwork):
def __init__(self, tw=16, sample_size=112):
super(Resnet3d, self).__init__()
self.encoder = Encoder3d(tw, sample_size)
self.decoder = Decoder3d()
def forward(self, x, ref):
if ((ref is not None) and (len(ref.shape) == 4)):
(r5, r4, r3, r2) = self.encoder.forward(x, ref.unsqueeze(2))
else:
(r5, r4, r3, r2) = self.encoder.forward(x, ref)
p = self.decoder.forward(r5, r4, r3, r2, None)
return [p]
|
class Resnet3d101(Resnet3d):
def __init__(self, tw=8, sample_size=112, e_dim=7, decoders=None, inter_block=GC3d, refine_block=Refine3d):
super(Resnet3d101, self).__init__(tw=tw, sample_size=sample_size)
resnet = resnet101(sample_size=sample_size, sample_duration=tw)
self.encoder = Encoder3d(tw, sample_size, resnet=resnet)
decoders = ([Decoder3d(inter_block=inter_block, refine_block=refine_block)] if (decoders is None) else decoders)
self.decoders = nn.ModuleList()
for decoder in decoders:
self.decoders.append(decoder)
print('Using decoders {}'.format(self.decoders))
def forward(self, x, ref=None):
(r5, r4, r3, r2) = self.encoder.forward(x, ref)
flatten = (lambda lst: ([lst] if (type(lst) is torch.Tensor) else reduce(torch.add, [flatten(ele) for ele in lst])))
p = flatten([decoder.forward(r5, r4, r3, r2, None) for decoder in self.decoders])
return p
|
class R2plus1d(Resnet3d101):
def __init__(self, tw=8, sample_size=112, e_dim=7, decoders=None, inter_block=GC3d, refine_block=Refine3d):
decoders = [DecoderR2plus1d(inter_block=inter_block, refine_block=refine_block)]
super(R2plus1d, self).__init__(tw, sample_size, e_dim, decoders)
self.encoder = EncoderR2plus1d_34(tw, sample_size)
|
class ResnetCSN(Resnet3d101):
def __init__(self, tw=8, sample_size=112, e_dim=7, decoders=None, inter_block=GC3d, refine_block=Refine3d):
super(ResnetCSN, self).__init__(tw, sample_size, e_dim, decoders, inter_block=inter_block, refine_block=refine_block)
self.encoder = Encoder3d_csn_ir(tw, sample_size)
|
class ResnetCSNNoGC(Resnet3d101):
def __init__(self, tw=8, sample_size=112, e_dim=7, decoders=None):
decoders = ([Decoder3dNoGC()] if (decoders is None) else decoders)
print('Creating decoders {}'.format(decoders))
super(ResnetCSNNoGC, self).__init__(tw, sample_size, e_dim, decoders)
self.encoder = Encoder3d_csn_ir(tw, sample_size)
|
class ResnetCSNNonLocal(ResnetCSNNoGC):
def __init__(self, tw=8, sample_size=112, e_dim=7):
decoders = [Decoder3dNonLocal()]
super(ResnetCSNNonLocal, self).__init__(tw, sample_size, e_dim, decoders)
|
def csn_ip(pretrained=False, progress=False, **kwargs):
model = resnet152_csn_ip(sample_size=224, sample_duration=32)
num_classes = 400
model.fc = nn.Linear(model.fc.in_features, out_features=num_classes)
for m in model.modules():
if isinstance(m, nn.BatchNorm3d):
m.eps = 0.001
m.momentum = 0.9
return model
|
def csn_ir(pretrained=False, progress=False, **kwargs):
model = resnet152_csn_ir(sample_size=224, sample_duration=32)
num_classes = 400
model.fc = nn.Linear(model.fc.in_features, out_features=num_classes)
for m in model.modules():
if isinstance(m, nn.BatchNorm3d):
m.eps = 0.001
m.momentum = 0.9
return model
|
def blobs_from_pkl(path, num_classes=400):
with path.open(mode='rb') as f:
pkl = pickle.load(f, encoding='latin1')
blobs = pkl['blobs']
assert ((('last_out_L' + str(num_classes)) + '_w') in blobs), 'Number of --classes argument doesnt matche the last linear layer in pkl'
assert ((('last_out_L' + str(num_classes)) + '_b') in blobs), 'Number of --classes argument doesnt matche the last linear layer in pkl'
return blobs
|
def copy_tensor(data, blobs, name):
tensor = torch.from_numpy(blobs[name])
del blobs[name]
assert (data.size() == tensor.size()), f'Torch tensor has size {data.size()}, while Caffe2 tensor has size {tensor.size()}'
assert (data.dtype == tensor.dtype)
data.copy_(tensor)
|
def copy_conv(module, blobs, prefix):
assert isinstance(module, nn.Conv3d)
assert (module.bias is None)
copy_tensor(module.weight.data, blobs, (prefix + '_w'))
|
def copy_bn(module, blobs, prefix):
assert isinstance(module, nn.BatchNorm3d)
copy_tensor(module.weight.data, blobs, (prefix + '_s'))
copy_tensor(module.running_mean.data, blobs, (prefix + '_rm'))
copy_tensor(module.running_var.data, blobs, (prefix + '_riv'))
copy_tensor(module.bias.data, blobs, (prefix + '_b'))
|
def copy_fc(module, blobs):
assert isinstance(module, nn.Linear)
n = module.out_features
copy_tensor(module.bias.data, blobs, (('last_out_L' + str(n)) + '_b'))
copy_tensor(module.weight.data, blobs, (('last_out_L' + str(n)) + '_w'))
|
def copy_stem(module, blobs):
assert isinstance(module, nn.Sequential)
assert (len(module) == 4)
copy_conv(module[0], blobs, 'conv1_middle')
copy_bn(module[1], blobs, 'conv1_middle_spatbn_relu')
assert isinstance(module[2], nn.ReLU)
copy_conv(module[3], blobs, 'conv1')
|
def copy_conv2plus1d(module, blobs, i, j):
assert isinstance(module, Conv2Plus1D)
assert (len(module) == 4)
copy_conv(module[0], blobs, (((('comp_' + str(i)) + '_conv_') + str(j)) + '_middle'))
copy_bn(module[1], blobs, (((('comp_' + str(i)) + '_spatbn_') + str(j)) + '_middle'))
assert isinstance(module[2], nn.ReLU)
copy_conv(module[3], blobs, ((('comp_' + str(i)) + '_conv_') + str(j)))
|
def copy_basicblock(module, blobs, i):
assert isinstance(module, BasicBlock)
assert (len(module.conv1) == 3)
assert isinstance(module.conv1[0], Conv2Plus1D)
copy_conv2plus1d(module.conv1[0], blobs, i, 1)
assert isinstance(module.conv1[1], nn.BatchNorm3d)
copy_bn(module.conv1[1], blobs, ((('comp_' + str(i)) + '_spatbn_') + str(1)))
assert isinstance(module.conv1[2], nn.ReLU)
assert (len(module.conv2) == 2)
assert isinstance(module.conv2[0], Conv2Plus1D)
copy_conv2plus1d(module.conv2[0], blobs, i, 2)
assert isinstance(module.conv2[1], nn.BatchNorm3d)
copy_bn(module.conv2[1], blobs, ((('comp_' + str(i)) + '_spatbn_') + str(2)))
if (module.downsample is not None):
assert (i in [3, 7, 13])
assert (len(module.downsample) == 2)
assert isinstance(module.downsample[0], nn.Conv3d)
assert isinstance(module.downsample[1], nn.BatchNorm3d)
copy_conv(module.downsample[0], blobs, ('shortcut_projection_' + str(i)))
copy_bn(module.downsample[1], blobs, (('shortcut_projection_' + str(i)) + '_spatbn'))
|
def copy_bottleneck(module, blobs, i):
assert isinstance(module, Bottleneck)
copy_conv(module.conv1, blobs, ((('comp_' + str(i)) + '_conv_') + str(1)))
copy_bn(module.bn1, blobs, ((('comp_' + str(i)) + '_spatbn_') + str(1)))
copy_conv(module.conv2, blobs, ((('comp_' + str(i)) + '_conv_') + str(3)))
copy_bn(module.bn2, blobs, ((('comp_' + str(i)) + '_spatbn_') + str(3)))
copy_conv(module.conv3, blobs, ((('comp_' + str(i)) + '_conv_') + str(4)))
copy_bn(module.bn3, blobs, ((('comp_' + str(i)) + '_spatbn_') + str(4)))
if (module.downsample is not None):
assert (i in [0, 3, 11, 47]), str(i)
assert (len(module.downsample) == 2)
copy_conv(module.downsample[0], blobs, ('shortcut_projection_' + str(i)))
copy_bn(module.downsample[1], blobs, (('shortcut_projection_' + str(i)) + '_spatbn'))
|
def copy_bottleneck_csn_ip(module, blobs, i):
assert isinstance(module, Bottleneck)
copy_conv(module.conv1, blobs, ((('comp_' + str(i)) + '_conv_') + str(1)))
copy_bn(module.bn1, blobs, ((('comp_' + str(i)) + '_spatbn_') + str(1)))
copy_conv(module.conv2, blobs, (((('comp_' + str(i)) + '_conv_') + str(2)) + '_middle'))
copy_bn(module.bn2, blobs, (((('comp_' + str(i)) + '_spatbn_') + str(2)) + '_middle'))
copy_conv(module.conv3, blobs, ((('comp_' + str(i)) + '_conv_') + str(2)))
copy_bn(module.bn3, blobs, ((('comp_' + str(i)) + '_spatbn_') + str(2)))
copy_conv(module.conv4, blobs, ((('comp_' + str(i)) + '_conv_') + str(3)))
copy_bn(module.bn4, blobs, ((('comp_' + str(i)) + '_spatbn_') + str(3)))
if (module.downsample is not None):
assert (i in [0, 3, 11, 47]), str(i)
assert (len(module.downsample) == 2)
copy_conv(module.downsample[0], blobs, ('shortcut_projection_' + str(i)))
copy_bn(module.downsample[1], blobs, (('shortcut_projection_' + str(i)) + '_spatbn'))
|
def init_canary(model):
nan = float('nan')
for m in model.modules():
if isinstance(m, nn.Conv3d):
assert (m.bias is None)
nn.init.constant_(m.weight, nan)
elif isinstance(m, nn.BatchNorm3d):
nn.init.constant_(m.weight, nan)
nn.init.constant_(m.running_mean, nan)
nn.init.constant_(m.running_var, nan)
nn.init.constant_(m.bias, nan)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.weight, nan)
nn.init.constant_(m.bias, nan)
|
def check_canary(model):
for m in model.modules():
if isinstance(m, nn.Conv3d):
assert (m.bias is None)
assert (not torch.isnan(m.weight).any())
elif isinstance(m, nn.BatchNorm3d):
assert (not torch.isnan(m.weight).any())
assert (not torch.isnan(m.running_mean).any())
assert (not torch.isnan(m.running_var).any())
assert (not torch.isnan(m.bias).any())
elif isinstance(m, nn.Linear):
assert (not torch.isnan(m.weight).any())
assert (not torch.isnan(m.bias).any())
|
def main(args):
blobs = blobs_from_pkl(args.pkl)
if (args.model == 'csn_ip'):
model = csn_ip()
elif (args.model == 'csn_ir'):
model = csn_ir()
else:
raise ValueError((args.model + ' is unknown'))
init_canary(model)
copy_conv(model.conv1, blobs, 'conv1')
copy_bn(model.bn1, blobs, 'conv1_spatbn_relu')
layers = [model.layer1, model.layer2, model.layer3, model.layer4]
blocks = [0, 3, 11, 47]
for (layer, i) in zip(layers, blocks):
assert ({0: 3, 3: 8, 11: 36, 47: 3}[i] == len(layer))
j = i
for bottleneck in layer:
if (args.model == 'csn_ip'):
copy_bottleneck_csn_ip(bottleneck, blobs, j)
else:
copy_bottleneck(bottleneck, blobs, j)
j += 1
copy_fc(model.fc, blobs)
assert (not blobs)
check_canary(model)
torch.save(model.state_dict(), args.out.with_suffix('.pth'))
if (args.model == 'csn_ip'):
model = csn_ip()
elif (args.model == 'csn_ir'):
model = csn_ir()
model.load_state_dict(torch.load(args.out.with_suffix('.pth')))
|
class NonLocalBlock3DWithDownsampling(nn.Module):
def __init__(self, in_channels, intermediate_channels, downsampling_factor, out_channels=None):
super(self.__class__, self).__init__()
self.theta = nn.Conv3d(in_channels, intermediate_channels, kernel_size=1, stride=1, padding=0)
self.phi = nn.Conv3d(in_channels, intermediate_channels, kernel_size=1, stride=1, padding=0)
self.g = nn.Conv3d(in_channels, intermediate_channels, kernel_size=1, stride=1, padding=0)
if (not out_channels):
out_channels = in_channels
self.W = nn.Conv3d((intermediate_channels + 3), out_channels, kernel_size=(1, 1, 1), padding=(0, 0, 0))
self.intermediate_channels = intermediate_channels
assert (downsampling_factor >= 1)
if (downsampling_factor == 1):
self.pooler = nn.Identity()
else:
ksize = (downsampling_factor + 1)
ksize = (1, ksize, ksize)
stride = downsampling_factor
stride = (1, stride, stride)
padding = int(math.floor(((downsampling_factor + 1) / 2)))
padding = (0, padding, padding)
self.pooler = nn.AvgPool3d(kernel_size=ksize, stride=stride, padding=padding)
@staticmethod
def create_spatiotemporal_grid(height, width, time, t_scale, dtype=torch.float32, device='cpu'):
x = ((torch.arange(width).float().cuda() / ((width - 1) * 0.25)) - 2)
y = ((torch.arange(height).float().cuda() / ((height - 1) * 0.5)) - 1)
t = (((torch.arange(time).float().cuda() / ((time - 1) * 0.5)) - 1) * t_scale)
return torch.stack(torch.meshgrid(t, y, x), dim=0)
def forward(self, x):
'\n :param x: tensor of shape [N, C, T, H, W]\n :return: tensor of shape [N, C, T, H, W]\n '
(N, C, T, H, W) = x.shape
theta_x = self.theta(x).permute(0, 2, 3, 4, 1).reshape(N, ((T * H) * W), self.intermediate_channels)
phi_x = self.pooler(self.phi(x))
(Hd, Wd) = phi_x.shape[(- 2):]
phi_x = phi_x.view(N, self.intermediate_channels, ((T * Hd) * Wd))
f_x = torch.matmul(theta_x, phi_x)
f_x = f_x.softmax(dim=(- 1))
g_x = self.g(x)
g_x = self.pooler(g_x)
grid = self.create_spatiotemporal_grid(Hd, Wd, T, 0.1, x.dtype, x.device).unsqueeze(0).expand(N, (- 1), (- 1), (- 1), (- 1))
g_x = torch.cat((g_x, grid.detach()), dim=1)
g_x = g_x.permute(0, 2, 3, 4, 1).reshape(N, ((T * Hd) * Wd), (self.intermediate_channels + 3))
y = torch.matmul(f_x, g_x)
y = y.permute(0, 2, 1).reshape(N, (self.intermediate_channels + 3), T, H, W)
return self.W(y)
|
class NonlocalOffsetEmbeddingHead(nn.Module):
def __init__(self, in_channels, nonlocal_inter_channels, embedding_size, downsampling_factor, add_spatial_coord=True):
super(self.__class__, self).__init__()
self.nonlocal_block = NonLocalBlock3DWithDownsampling(in_channels, nonlocal_inter_channels, downsampling_factor)
self.conv_offset = nn.Conv3d((in_channels + 3), embedding_size, kernel_size=3, stride=1, padding=1, bias=False)
self.embedding_size = embedding_size
self.add_spatial_coord = add_spatial_coord
def forward(self, x):
'\n :param x: tensor of shape [N, C, T, H, W]\n :return: embedding map of shape [N, E, T, H, W]\n '
(N, C, T, H, W) = x.shape
grid = self.nonlocal_block.create_spatiotemporal_grid(H, W, T, 0.1, x.dtype, x.device).unsqueeze(0).expand(N, (- 1), (- 1), (- 1), (- 1))
zeros = torch.zeros(N, (C - 3), T, H, W, dtype=x.dtype, device=x.device)
grid_cat = torch.cat((grid, zeros), dim=1)
x = (x + grid_cat.detach())
x = (x + self.nonlocal_block(x))
x = torch.cat((x, grid), dim=1)
x = self.conv_offset(x)
if (self.embedding_size > 3):
zeros = torch.zeros(N, (self.embedding_size - 3), T, H, W, dtype=x.dtype, device=x.device)
grid_cat = torch.cat((grid, zeros), dim=1)
elif (self.embedding_size == 3):
grid_cat = grid
else:
grid_cat = torch.tensor(0, dtype=x.dtype, device=x.device)
return ((x + grid_cat.detach()) if self.add_spatial_coord else x)
|
class BaseNetwork(nn.Module):
def __init__(self, tw=5):
super(BaseNetwork, self).__init__()
self.tw = tw
|
class Encoder3d(nn.Module):
def __init__(self, backbone, tw, pixel_mean, pixel_std):
super(Encoder3d, self).__init__()
self.conv1_p = nn.Conv3d(1, 64, kernel_size=7, stride=(1, 2, 2), padding=(3, 3, 3), bias=False)
resnet = get_backbone_fn(backbone.NAME)(sample_size=112, sample_duration=tw)
if backbone.PRETRAINED_WTS:
print('Loading pretrained weights for the backbone from {} {}{}...'.format(Constants.font.BOLD, backbone.PRETRAINED_WTS, Constants.font.END))
chkpt = torch.load(backbone.PRETRAINED_WTS)
resnet.load_state_dict(chkpt)
self.resnet = resnet
self.resnet = resnet
self.conv1 = resnet.conv1
self.bn1 = resnet.bn1
self.relu = resnet.relu
self.maxpool = nn.MaxPool3d(kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1))
self.layer1 = resnet.layer1
self.layer2 = resnet.layer2
self.layer3 = resnet.layer3
self.layer4 = resnet.layer4
self.register_buffer('mean', torch.FloatTensor(pixel_mean).view(1, 3, 1, 1, 1))
self.register_buffer('std', torch.FloatTensor(pixel_std).view(1, 3, 1, 1, 1))
if backbone.FREEZE_BN:
self.freeze_batchnorm()
def freeze_batchnorm(self):
print('Freezing batchnorm for Encoder3d')
for m in self.modules():
if (isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm3d)):
for p in m.parameters():
p.requires_grad = False
def forward(self, in_f, in_p=None):
assert ((in_f is not None) or (in_p is not None))
f = (((in_f * 255.0) - self.mean) / self.std)
f /= 255.0
if (in_f is None):
p = in_p
if (len(in_p.shape) < 4):
p = torch.unsqueeze(in_p, dim=1)
x = self.conv1_p(p)
elif (in_p is not None):
p = in_p
if (len(in_p.shape) < 4):
p = torch.unsqueeze(in_p, dim=1)
x = (self.conv1(f) + self.conv1_p(p))
else:
x = self.conv1(f)
x = self.bn1(x)
c1 = self.relu(x)
x = self.maxpool(c1)
r2 = self.layer1(x)
r3 = self.layer2(r2)
r4 = self.layer3(r3)
r5 = self.layer4(r4)
return (r5, r4, r3, r2)
|
class Decoder3d(nn.Module):
def __init__(self, n_classes, inter_block, refine_block, pred_scale_factor=(1, 4, 4)):
super(Decoder3d, self).__init__()
mdim = 256
self.pred_scale_factor = pred_scale_factor
self.GC = get_module(inter_block)(2048, mdim)
self.convG1 = nn.Conv3d(mdim, mdim, kernel_size=3, padding=1)
self.convG2 = nn.Conv3d(mdim, mdim, kernel_size=3, padding=1)
refine_cls = get_module(refine_block)
self.RF4 = refine_cls(1024, mdim)
self.RF3 = refine_cls(512, mdim)
self.RF2 = refine_cls(256, mdim)
self.pred5 = nn.Conv3d(mdim, n_classes, kernel_size=3, padding=1, stride=1)
self.pred4 = nn.Conv3d(mdim, n_classes, kernel_size=3, padding=1, stride=1)
self.pred3 = nn.Conv3d(mdim, n_classes, kernel_size=3, padding=1, stride=1)
self.pred2 = nn.Conv3d(mdim, n_classes, kernel_size=3, padding=1, stride=1)
def forward(self, r5, r4, r3, r2, support):
x = self.GC(r5)
r = self.convG1(F.relu(x))
r = self.convG2(F.relu(r))
m5 = (x + r)
m4 = self.RF4(r4, m5)
m3 = self.RF3(r3, m4)
m2 = self.RF2(r2, m3)
p2 = self.pred2(F.relu(m2))
p3 = self.pred3(F.relu(m3))
p4 = self.pred4(F.relu(m4))
p5 = self.pred5(F.relu(m5))
p = F.interpolate(p2, scale_factor=self.pred_scale_factor, mode='trilinear')
return p
|
class SaliencyNetwork(BaseNetwork):
def __init__(self, cfg):
super(SaliencyNetwork, self).__init__()
self.encoder = Encoder3d(cfg.MODEL.BACKBONE, cfg.INPUT.TW, cfg.MODEL.PIXEL_MEAN, cfg.MODEL.PIXEL_STD)
decoders = [Decoder3d(cfg.MODEL.N_CLASSES, inter_block=cfg.MODEL.DECODER.INTER_BLOCK, refine_block=cfg.MODEL.DECODER.REFINE_BLOCK)]
self.decoders = nn.ModuleList()
for decoder in decoders:
self.decoders.append(decoder)
if cfg.MODEL.FREEZE_BN:
self.encoder.freeze_batchnorm()
def forward(self, x, ref=None):
(r5, r4, r3, r2) = self.encoder.forward(x, ref)
flatten = (lambda lst: ([lst] if (type(lst) is torch.Tensor) else reduce(torch.add, [flatten(ele) for ele in lst])))
p = flatten([decoder.forward(r5, r4, r3, r2, None) for decoder in self.decoders])
return p
|
class MultiscaleCombinedHeadLongTemporalWindow(nn.Module):
def __init__(self, in_channels, num_classes, variance_output, variance_per_axis, **kwargs):
super().__init__()
self.embedding_size = 3
self.variance_channels = ((self.embedding_size if variance_per_axis else 1) if variance_output else 0)
self.seed_map = kwargs.get('seed_map', False)
nonlocal_inter_channels = kwargs.get('nonlocal_inter_channels', 128)
conv_inter_channels = kwargs.get('conv_inter_channels', 128)
self.add_spatial_coord = kwargs.get('add_spatial_coord', False)
if (not self.add_spatial_coord):
print('Spatial coordinates are not added to the feature maps in the embedding head')
nl_in_channels = ((in_channels + 3) if self.add_spatial_coord else in_channels)
self.nonlocal_32x = NonLocalBlock3DWithDownsampling(nl_in_channels, nonlocal_inter_channels, 1, in_channels)
self.conv_32x_1 = nn.Conv3d(in_channels, conv_inter_channels, kernel_size=(1, 3, 3), padding=(0, 1, 1))
self.conv_32x_2 = nn.Conv3d(conv_inter_channels, conv_inter_channels, kernel_size=3, padding=1)
self.nonlocal_16x = NonLocalBlock3DWithDownsampling(nl_in_channels, nonlocal_inter_channels, 1, in_channels)
self.conv_16x_1 = nn.Conv3d(in_channels, conv_inter_channels, kernel_size=(1, 3, 3), padding=(0, 1, 1))
self.conv_8x_1 = nn.Conv3d(in_channels, conv_inter_channels, kernel_size=3, padding=(1, 2, 2), dilation=(1, 2, 2))
self.conv_4x_1 = nn.Conv3d(in_channels, conv_inter_channels, kernel_size=3, padding=1)
self.conv_semseg = nn.Conv3d((conv_inter_channels * 4), conv_inter_channels, kernel_size=3, padding=1)
self.conv_semseg_out = nn.Conv3d(conv_inter_channels, num_classes, kernel_size=1, padding=0, bias=False)
self.conv_embedding = nn.Conv3d((conv_inter_channels * 4), conv_inter_channels, kernel_size=3, padding=1)
self.conv_embedding_out = nn.Conv3d(conv_inter_channels, self.embedding_size, kernel_size=1, padding=0, bias=False)
if (self.variance_channels > 0):
self.conv_variance_out = nn.Conv3d(conv_inter_channels, self.variance_channels, kernel_size=1, padding=0, bias=True)
if self.seed_map:
self.conv_seed_out = nn.Conv3d(conv_inter_channels, 1, kernel_size=1, padding=0, bias=True)
self.register_buffer('time_scale', torch.tensor(kwargs.get('time_scale', 0.2), dtype=torch.float32))
self.register_buffer('tanh_premultiplier', torch.tensor(0.25, dtype=torch.float32))
def forward_32_8(self, x):
(N, C, T, H, W) = x.shape
grid = self.nonlocal_32x.create_spatiotemporal_grid(H, W, T, self.time_scale, x.dtype, x.device).unsqueeze(0).expand(N, (- 1), (- 1), (- 1), (- 1))
t = (torch.cat((x, grid.detach()), dim=1) if self.add_spatial_coord else x)
x = (x + self.nonlocal_32x(t))
x = F.interpolate(x, scale_factor=2.0, mode='trilinear', align_corners=False)
x = F.relu(self.conv_32x_1(x))
x = F.interpolate(x, scale_factor=2.0, mode='trilinear', align_corners=False)
x = F.relu(self.conv_32x_2(x))
return F.interpolate(x, scale_factor=2.0, mode='trilinear', align_corners=False)
def forward_16_4(self, x):
(N, C, T, H, W) = x.shape
grid = self.nonlocal_16x.create_spatiotemporal_grid(H, W, T, self.time_scale, x.dtype, x.device).unsqueeze(0).expand(N, (- 1), (- 1), (- 1), (- 1))
t = (torch.cat((x, grid.detach()), dim=1) if self.add_spatial_coord else x)
x = (x + self.nonlocal_16x(t))
x = F.interpolate(x, scale_factor=2.0, mode='trilinear', align_corners=False)
x = F.relu(self.conv_16x_1(x))
return F.interpolate(x, scale_factor=2.0, mode='trilinear', align_corners=False)
def forward_8_2(self, x):
x = F.relu(self.conv_8x_1(x))
return F.interpolate(x, scale_factor=2.0, mode='trilinear', align_corners=False)
def forward_4_1(self, x):
return F.relu(self.conv_4x_1(x))
def semseg_branch(self, x):
x = F.relu(self.conv_semseg(x))
return self.conv_semseg_out(x)
def embedding_branch(self, x):
x = F.relu(self.conv_embedding(x))
(N, C, T, H, W) = x.shape
grid = self.nonlocal_32x.create_spatiotemporal_grid(H, W, T, self.time_scale, x.dtype, x.device).unsqueeze(0).expand(N, (- 1), (- 1), (- 1), (- 1))
embeddings = self.conv_embedding_out(x)
embeddings = ((embeddings * self.tanh_premultiplier).tanh() + grid.detach())
if (self.variance_channels > 0):
variances = self.conv_variance_out(x)
embeddings = torch.cat((embeddings, variances), dim=1)
if self.seed_map:
seed = self.conv_seed_out(x)
embeddings = torch.cat((embeddings, seed), dim=1)
return embeddings
def forward(self, x):
"\n :param x: list of multiscale feature map tensors of shape [N, C, T, H, W].\n Order should be [32/8, 16/4, 8/2, 4/1]\n :return: dict with keys 'embeddings', 'variances' and 'semseg' maps of shape [N, E/E/C, T, H, W]\n "
assert (len(x) == 4)
scale_forward_fns = [self.forward_32_8, self.forward_16_4, self.forward_8_2, self.forward_4_1]
x = [fn(feats) for (fn, feats) in zip(scale_forward_fns, x)]
x = torch.cat(x, dim=1)
semseg_logits = self.semseg_branch(x)
embeddings = self.embedding_branch(x)
return (semseg_logits, embeddings)
|
def str2bool(v):
if isinstance(v, bool):
return v
if (v.lower() in ('yes', 'true', 't', 'y', '1')):
return True
elif (v.lower() in ('no', 'false', 'f', 'n', '0')):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
|
def parse_argsV2():
parser = argparse.ArgumentParser(description='SaliencySegmentation')
parser.add_argument('--config', '-c', required=True, type=str)
parser.add_argument('--num_workers', dest='num_workers', help='num_workers', default=4, type=int)
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--print_freq', dest='print_freq', help='Frequency of statistics printing', default=1, type=int)
parser.add_argument('--loadepoch', dest='loadepoch', help='epoch to load model', default=None, type=str)
parser.add_argument('--task', dest='task', help='task in <train, eval>', default='train', type=str)
parser.add_argument('--pretrained', dest='pretrained', help='load pretrained weights for PWCNet', default='weights/pwc_net.pth.tar', type=str)
parser.add_argument('--wts', '-w', dest='wts', help='weights file to resume training', default=None, type=str)
parser.add_argument('--show_image_summary', dest='show_image_summary', help='load the best model', default=False, type=bool)
args = parser.parse_args()
return args
|
class AverageMeter(object):
'Computes and stores the average and current value'
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count)
|
class AverageMeterDict(object):
'Computes and stores the average and current value'
def __init__(self):
self.reset()
def reset(self):
self.val = {}
self.avg = {}
self.sum = None
self.count = 0
def update(self, in_dict, n=1):
self.val = in_dict
self.sum = (in_dict if (self.sum is None) else dict([(key, ((val * n) + self.sum[key])) for (key, val) in in_dict.items()]))
self.count += n
self.avg = dict([(key, (val / self.count)) for (key, val) in self.sum.items()])
def __str__(self):
val = dict([(key, '{:.4f}'.format(val)) for (key, val) in self.val.items()])
avg = dict([(key, '{:.4f}'.format(val)) for (key, val) in self.avg.items()])
return (((str(val) + '(') + str(avg)) + ')')
|
class font():
PURPLE = '\x1b[95m'
CYAN = '\x1b[96m'
DARKCYAN = '\x1b[36m'
BLUE = '\x1b[94m'
GREEN = '\x1b[92m'
YELLOW = '\x1b[93m'
RED = '\x1b[91m'
BOLD = '\x1b[1m'
UNDERLINE = '\x1b[4m'
END = '\x1b[0m'
|
def findContours(*args, **kwargs):
'\n Wraps cv2.findContours to maintain compatiblity between versions\n 3 and 4\n\n Returns:\n contours, hierarchy\n '
if cv2.__version__.startswith('4'):
(contours, hierarchy) = cv2.findContours(*args, **kwargs)
elif cv2.__version__.startswith('3'):
(_, contours, hierarchy) = cv2.findContours(*args, **kwargs)
else:
raise AssertionError('cv2 must be either version 3 or 4 to call this method')
return (contours, hierarchy)
|
class AnalysisPipelineConfig(PipelineConfig):
def __init__(self, d, layers, tensors):
super().__init__(d)
self.stage_to_model = {stage_id: self.realize_stage(layers, tensors, stage_id, device='cpu') for stage_id in range(self.n_stages)}
try_jit = False
if try_jit:
for (i, v) in self.stage_to_model.items():
try:
torch.jit.script(v)
except Exception as e:
print(f'-V- could not script stage {i}')
print(e)
def model_inputs(self):
return self.d['model_inputs']
def model_outputs(self):
return self.d['model_outputs']
def get_inputs_req_grad_for_stage_tuple(self, stage_id: int):
my_inputs = self.d['stages'][stage_id]['inputs']
if ('req_grad' in next(iter(my_inputs.values()))):
return tuple((v['req_grad'] for (i, v) in my_inputs.items()))
else:
raise NotImplementedError()
def get_outputs_req_grad_for_stage_tuple(self, stage_id: int):
my_outs = self.d['stages'][stage_id]['outputs']
if ('req_grad' in next(iter(my_outs.values()))):
return tuple((v['req_grad'] for (i, v) in my_outs.items()))
else:
raise NotImplementedError()
def get_all_stage_inputs(self, stage_id):
return self.d['stages'][stage_id]['inputs']
def get_all_stage_outputs(self, stage_id):
return self.d['stages'][stage_id]['outputs']
|
def extra_communication_time_lower_bound(comp_time, comm_time):
'communication is completely parallel to computation '
if (comp_time >= comm_time):
return 0
else:
return (comm_time - comp_time)
|
def extra_communication_time_upper_bound(comp_time, comm_time):
'communication is completely not parallel to computation '
return comm_time
|
def upper_utilization_bound(comp_time, comm_time):
'communication is completely parallel to computation '
comm_time = extra_communication_time_lower_bound(comp_time, comm_time)
return (comp_time / (comm_time + comp_time))
|
def lower_utilization_bound(comp_time, comm_time):
'communication is completely not parallel to computation '
comm_time = extra_communication_time_upper_bound(comp_time, comm_time)
return (comp_time / (comm_time + comp_time))
|
def apply_ratio(upper, lower, ratio):
return ((upper * (1 - ratio)) + (lower * ratio))
|
def convert_to_analysis_format(config: Dict, layers: Dict[(str, torch.nn.Module)], tensors: Dict[(str, Tensor)]) -> AnalysisPipelineConfig:
'convert a pipeline configuration to format used by the analysis module'
return AnalysisPipelineConfig(config, layers, tensors)
|
def run_partitions(model_inputs, analysis_config: AnalysisPipelineConfig, device='cuda'):
if (not torch.cuda.is_available()):
device = 'cpu'
if isinstance(model_inputs, dict):
model_inputs = tuple([model_inputs[i] for i in analysis_config.model_inputs()])
if (not isinstance(model_inputs, tuple)):
model_inputs = (model_inputs,)
n_partitions = analysis_config.n_stages
activations = {}
for i in range(n_partitions):
analysis_config.stage_to_model[i] = analysis_config.stage_to_model[i].to(device)
analysis_config.stage_to_model[i].device = device
for (i, t) in zip(analysis_config.model_inputs(), model_inputs):
activations[i] = move_tensors(t, device)
parts = deque(range(n_partitions))
while (len(parts) > 0):
idx = parts.popleft()
if all(((tensor in activations) for tensor in analysis_config.get_all_stage_inputs(idx))):
inputs = [activations[tensor] for tensor in analysis_config.get_all_stage_inputs(idx)]
outs = analysis_config.stage_to_model[idx](*inputs)
for (o, t) in zip(analysis_config.get_all_stage_outputs(idx), outs):
activations[o] = t
else:
parts.append(idx)
return [activations[o] for o in analysis_config.model_outputs()]
|
def add_dicts(d1, d2):
assert (len(d1) == len(d2))
d = {}
for ((i1, v1), (i2, v2)) in zip(d1.items(), d2.items()):
assert (i1 == i2)
d[i1] = (v1 + v2)
return d
|
def add_stds_dicts(d1, d2):
' var(x+y) = var(x)+var(y) + cov(x,y)\n we assume for simplicity cov(x,y) is 0.\n '
assert (len(d1) == len(d2))
d = {}
for ((i1, v1), (i2, v2)) in zip(d1.items(), d2.items()):
assert (i1 == i2)
d[i1] = math.sqrt(((v1 ** 2) + (v2 ** 2)))
return d
|
def get_tensor_req_grad(ts):
def get_req_grad(t):
if isinstance(t, Tensor):
return t.requires_grad
return False
return nested_map(get_req_grad, ts)
|
def run_partitions_fwd(model_inputs, analysis_config: AnalysisPipelineConfig, device='cpu', return_info_for_bwd=False):
if isinstance(model_inputs, dict):
model_inputs = tuple([model_inputs[i] for i in analysis_config.model_inputs()])
n_partitions = analysis_config.n_stages
if (not isinstance(model_inputs, tuple)):
model_inputs = (model_inputs,)
activations = {}
req_grad = {}
device = torch.device(device)
for i in range(n_partitions):
analysis_config.stage_to_model[i] = analysis_config.stage_to_model[i].to('cpu')
for (i, t) in zip(analysis_config.model_inputs(), model_inputs):
activations[i] = move_tensors(t, 'cpu')
req_grad[i] = get_tensor_req_grad(t)
parts = deque(range(n_partitions))
while (len(parts) > 0):
idx = parts.popleft()
if all(((tensor in activations) for tensor in analysis_config.get_all_stage_inputs(idx))):
inputs = [activations[tensor] for tensor in analysis_config.get_all_stage_inputs(idx)]
for i in range(len(inputs)):
for j in range((i + 1), len(inputs)):
err = f'inputs {i} and {j} of stage {idx} are the same tensor'
assert (inputs[i] is not inputs[j]), err
analysis_config.stage_to_model[idx] = analysis_config.stage_to_model[idx].to(device)
inputs = move_tensors(inputs, device)
outs = analysis_config.stage_to_model[idx](*inputs)
for i in range(len(outs)):
for j in range((i + 1), len(outs)):
err = f'outputs {i} and {j} of stage {idx} are the same tensor'
assert (outs[i] is not outs[j]), err
analysis_config.stage_to_model[idx] = analysis_config.stage_to_model[idx].to('cpu')
outs = move_tensors(outs, 'cpu')
for (o, rg, t) in zip(analysis_config.get_all_stage_outputs(idx), analysis_config.get_outputs_req_grad_for_stage_tuple(idx), outs):
req_grad[o] = rg
activations[o] = t
else:
parts.append(idx)
outputs = [move_tensors(activations[o], device) for o in analysis_config.model_outputs()]
if return_info_for_bwd:
return (outputs, (activations, req_grad))
else:
return outputs
|
def run_partitions_bwd(analysis_config: AnalysisPipelineConfig, activations, req_grad):
n_partitions = analysis_config.n_stages
for i in range(n_partitions):
analysis_config.stage_to_model[i] = analysis_config.stage_to_model[i].to('cpu')
parts = deque(range(n_partitions))
grads = {tensor: torch.ones_like(activations[tensor], requires_grad=False) for tensor in analysis_config.model_outputs() if req_grad[tensor]}
while (len(parts) > 0):
idx = parts.popleft()
if all(((tensor in grads) for tensor in analysis_config.get_all_stage_outputs(idx) if req_grad[tensor])):
grad_in = [grads[tensor] for tensor in analysis_config.get_all_stage_outputs(idx) if req_grad[tensor]]
outputs = [activations[tensor] for tensor in analysis_config.get_all_stage_outputs(idx) if req_grad[tensor]]
torch.autograd.backward(outputs, grad_in)
torch.cuda.synchronize()
for tensor in analysis_config.get_all_stage_inputs(idx):
if req_grad[tensor]:
grads[tensor] = activations[tensor].grad
assert (grads[tensor] is not None)
else:
parts.append(idx)
|
def run_analysis(sample, model, n_workers, bw_GBps=12, verbose=True, comm_comp_concurrency_ratio=0):
' Assuming bw_GBps is bw between worker and master.\n Assuming all samples are the same size.\n currently n_workers is not relevant, theoretically its linearly scaling.\n '
comp_time = cuda_computation_times(model, sample)
return theoretical_analysis(model, bw_GBps, comp_time, comm_comp_concurrency_ratio, n_workers)
|
def theoretical_analysis(model, bw_GBps, comp_time, comm_comp_concurrency_ratio, n_workers):
send_mb = (sum([(p.nelement() * p.element_size()) for p in model.parameters()]) / 1000000.0)
single_send_time = (send_mb / bw_GBps)
worker_to_master_sends = 1
master_to_worker_sends = 1
num_sends = (worker_to_master_sends + master_to_worker_sends)
total_send_time = (num_sends * single_send_time)
comm_time_lower_bound = extra_communication_time_lower_bound(comp_time, total_send_time)
comm_time_upper_bound = total_send_time
_lower_utilization_bound = lower_utilization_bound(comp_time, total_send_time)
_upper_utilization_bound = upper_utilization_bound(comp_time, total_send_time)
comm_time_with_ratio = apply_ratio(upper=comm_time_upper_bound, lower=comm_time_lower_bound, ratio=comm_comp_concurrency_ratio)
utilization = (comp_time / (comp_time + comm_time_with_ratio))
expected_speedup = (utilization * n_workers)
d = dict(n_workers=n_workers, send_mb=send_mb, single_send_time=single_send_time, num_sends=num_sends, total_send_time=total_send_time, comp_time=comp_time, comm_time_upper_bound=comm_time_upper_bound, comm_time_lower_bound=comm_time_lower_bound, utilization_lower_bound=_lower_utilization_bound, utilization_upper_bound=_upper_utilization_bound, comm_comp_concurrency_ratio=comm_comp_concurrency_ratio, comm_time_with_ratio=comm_time_with_ratio, utilization=utilization, expected_speedup=expected_speedup)
return (expected_speedup, d)
|
def asgd_anayslsis_speedup_vs_ratio_graph(all_ratios, sample, model, n_workers, bw_GBps=12, verbose=True):
comp_time = cuda_computation_times(model, sample)
speedups = []
ratios = all_ratios
for ratio in ratios:
(s, d) = theoretical_analysis(model, bw_GBps, comp_time, ratio, n_workers)
speedups.append(s)
return (speedups, ratios)
|
def theoretical_analysis(graph, recomputation=True, async_pipeline=False):
" find execution time of partitions based on the model's graph using 2 a sequential assumption and parallel assumption\n the sequential assumption is that in the partition all operation are linear.\n the parallel assumption assumes that all computation paths are concurrent.\n "
n_parts = len(set((n.stage_id for n in graph.nodes)))
print(f'Theoretical analysis found n_parts={n_parts}')
parallel_b = dict()
parallel_f = dict()
tensor_names = set()
stage_outputs = defaultdict(list)
for n in graph.nodes:
if ((n.type != NodeTypes.IN) and any(((o.stage_id != n.stage_id) for o in n.out_edges))):
tensor_names.add(n.scope)
stage_outputs[n.stage_id].append(n.scope)
elif (n in graph.outputs):
tensor_names.add(n.scope)
stage_outputs[n.stage_id].append(n.scope)
sequential_f = {i: 0 for i in range(n_parts)}
sequential_b = {i: 0 for i in range(n_parts)}
nodes = dict()
for node in graph.nodes:
if (graph.input_kw_ids.get(node.id, node.scope) in tensor_names):
nodes[graph.input_kw_ids.get(node.id, node.scope)] = node
sequential_f[node.stage_id] += extract_time(node.weight, forward=True)
sequential_b[node.stage_id] += extract_time(node.weight, forward=False)
for i in range(n_parts):
partition_specific_computation = recomputation
is_last_partition = (i == (n_parts - 1))
if (async_pipeline and is_last_partition):
partition_specific_computation = False
outputs = [nodes[name] for name in stage_outputs[i]]
cache = dict()
parallel_f[i] = 0
parallel_b[i] = 0
for o in outputs:
(f, b) = parallel_execution_analysis(o, i, cache)
parallel_f[i] = max(parallel_f[i], f)
parallel_b[i] = max(parallel_b[i], b)
if partition_specific_computation:
sequential_b[i] += sequential_f[i]
parallel_b[i] += parallel_f[i]
return (sequential_f, sequential_b, parallel_f, parallel_b)
|
def parallel_execution_analysis(node, part_idx, cache):
if (node.scope in cache):
return cache[node.scope]
elif (node.stage_id != part_idx):
cache[node.scope] = (0, 0)
return (0, 0)
(longest_f, longest_b) = (0, 0)
for n in node.in_edges:
(f, b) = parallel_execution_analysis(n, part_idx, cache)
longest_f = max(f, longest_f)
longest_b = max(b, longest_b)
longest_f += extract_time(node.weight, forward=True)
longest_b += extract_time(node.weight, forward=False)
cache[node.scope] = (longest_f, longest_b)
return (longest_f, longest_b)
|
def extract_time(w, forward=False):
if hasattr(w, 'weight'):
w = w.weight
if (not hasattr(w, 'forward_time')):
return 0
if forward:
return w.forward_time
return w.backward_time
|
def maybe_do_theoretical_analysis(DO_THEORETICAL, PRINT_THEORETICAL, PRINT_MIN_MAX_BALANCE, async_pipeline, graph, recomputation):
s = ''
if ((graph is not None) and DO_THEORETICAL):
(sequential_f, sequential_b, parallel_f, parallel_b) = theoretical_analysis(graph, recomputation=recomputation, async_pipeline=async_pipeline)
edges = edge_cut(graph)
theoretical_sequential_b_balance = worst_balance(sequential_b)
theoretical_sequential_f_balance = worst_balance(sequential_f)
theoretical_parallel_b_balance = worst_balance(parallel_b)
theoretical_parallel_f_balance = worst_balance(parallel_f)
if (edges is not None):
s += f'''cutting edges are edges between partitions
'''
s += f'''number of cutting edges: {len(edges)}
'''
if PRINT_THEORETICAL:
s += f'''
theoretical times are execution time based on sum of graph weights ms
'''
s += f'''
sequential forward {sequential_f}
sequential backward {sequential_b}
'''
s += f'''parallel forward {parallel_f}
parallel backward {parallel_b}
'''
if PRINT_MIN_MAX_BALANCE:
s += f'''
balance is ratio of computation time between fastest and slowest parts.'''
s += ' (between 0 and 1 higher is better)\n'
if PRINT_THEORETICAL:
s += f'''theoretical sequential balance:
'''
s += f'''forward {theoretical_sequential_f_balance:.3f}
backward {theoretical_sequential_b_balance:.3f}
'''
s += f'''theoretical parallel balance:
'''
s += f'''forward {theoretical_parallel_f_balance:.3f}
backward {theoretical_parallel_b_balance:.3f}
'''
return s
|
def edge_cut(graph):
'\n find the cutting edges of the graph\n '
edges = []
for n in graph.nodes:
stages = set()
for o in n.out_edges:
if ((n.stage_id != o.stage_id) and (o.stage_id not in stages)):
stages.add(o.stage_id)
edges.append((n, o))
return edges
|
def worst_balance(times):
return (min(times.values()) / max(times.values()))
|
def pipedream_extimated_time(N, m, L=1):
baseline_complexity = 709789824
baseline_seconds = 8
complexity = ((L * (N ** 3)) * (m ** 2))
estimated_time = (baseline_seconds * (complexity / baseline_complexity))
return estimated_time
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.