code stringlengths 17 6.64M |
|---|
def rlav3_resnet50(rla_channel=32):
' Constructs a RLAv3_ResNet-50 model.\n default: \n num_classes=1000, rla_channel=32, SE=False, ECA=None\n ECA: a list of kernel sizes in ECA\n '
print('Constructing rlav3_resnet50......')
model = RLAv3_ResNet(RLAv3_Bottleneck, [3, 4, 6, 3])
return model
|
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
'3x3 convolution with padding'
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation)
|
def conv1x1(in_planes, out_planes, stride=1):
'1x1 convolution'
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
|
class RLAv4_Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, rla_channel=32, SE=False, ECA_size=None, groups=1, base_width=64, dilation=1, norm_layer=None, reduction=16):
super(RLAv4_Bottleneck, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
width = (int((planes * (base_width / 64.0))) * groups)
self.conv1 = conv1x1((inplanes + rla_channel), width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, (planes * self.expansion))
self.bn3 = norm_layer((planes * self.expansion))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.averagePooling = None
if ((downsample is not None) and (stride != 1)):
self.averagePooling = nn.AvgPool2d((2, 2), stride=(2, 2))
self.se = None
if SE:
self.se = SELayer((planes * self.expansion), reduction)
self.eca = None
if (ECA_size != None):
self.eca = eca_layer((planes * self.expansion), int(ECA_size))
def forward(self, x, h):
identity = x
x = torch.cat((x, h), dim=1)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.se != None):
out = self.se(out)
if (self.eca != None):
out = self.eca(out)
y = out
if (self.downsample is not None):
identity = self.downsample(identity)
if (self.averagePooling is not None):
h = self.averagePooling(h)
out += identity
out = self.relu(out)
return (out, y, h, identity)
|
class RLAv4_ResNet(nn.Module):
'\n rla_channel: the number of filters of the shared(recurrent) conv in RLA\n SE: whether use SE or not \n ECA: None: not use ECA, or specify a list of kernel sizes\n '
def __init__(self, block, layers, num_classes=1000, rla_channel=32, SE=False, ECA=None, zero_init_last_bn=True, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None):
super(RLAv4_ResNet, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if (replace_stride_with_dilation is None):
replace_stride_with_dilation = [False, False, False]
if (len(replace_stride_with_dilation) != 3):
raise ValueError('replace_stride_with_dilation should be None or a 3-element tuple, got {}'.format(replace_stride_with_dilation))
if (ECA is None):
ECA = ([None] * 4)
elif (len(ECA) != 4):
raise ValueError('argument ECA should be a 4-element tuple, got {}'.format(ECA))
self.rla_channel = rla_channel
self.flops = False
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
conv_outs = ([None] * 4)
recurrent_convs = ([None] * 4)
stages = ([None] * 4)
stage_bns = ([None] * 4)
(stages[0], stage_bns[0], conv_outs[0], recurrent_convs[0]) = self._make_layer(block, 64, layers[0], rla_channel=rla_channel, SE=SE, ECA_size=ECA[0])
(stages[1], stage_bns[1], conv_outs[1], recurrent_convs[1]) = self._make_layer(block, 128, layers[1], rla_channel=rla_channel, SE=SE, ECA_size=ECA[1], stride=2, dilate=replace_stride_with_dilation[0])
(stages[2], stage_bns[2], conv_outs[2], recurrent_convs[2]) = self._make_layer(block, 256, layers[2], rla_channel=rla_channel, SE=SE, ECA_size=ECA[2], stride=2, dilate=replace_stride_with_dilation[1])
(stages[3], stage_bns[3], conv_outs[3], recurrent_convs[3]) = self._make_layer(block, 512, layers[3], rla_channel=rla_channel, SE=SE, ECA_size=ECA[3], stride=2, dilate=replace_stride_with_dilation[2])
self.conv_outs = nn.ModuleList(conv_outs)
self.recurrent_convs = nn.ModuleList(recurrent_convs)
self.stages = nn.ModuleList(stages)
self.stage_bns = nn.ModuleList(stage_bns)
self.tanh = nn.Tanh()
self.bn2 = norm_layer(rla_channel)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(((512 * block.expansion) + rla_channel), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_last_bn:
for m in self.modules():
if isinstance(m, RLAv4_Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
def _make_layer(self, block, planes, blocks, rla_channel, SE, ECA_size, stride=1, dilate=False):
conv_out = conv1x1((planes * block.expansion), rla_channel)
recurrent_conv = conv3x3(rla_channel, rla_channel)
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), norm_layer((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, rla_channel=rla_channel, SE=SE, ECA_size=ECA_size, groups=self.groups, base_width=self.base_width, dilation=previous_dilation, norm_layer=norm_layer))
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, rla_channel=rla_channel, SE=SE, ECA_size=ECA_size, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer))
bns = [norm_layer(rla_channel) for _ in range(blocks)]
return (nn.ModuleList(layers), nn.ModuleList(bns), conv_out, recurrent_conv)
def _forward_impl(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
(batch, _, height, width) = x.size()
if self.flops:
h = torch.zeros(batch, self.rla_channel, height, width)
else:
h = torch.zeros(batch, self.rla_channel, height, width, device='cuda')
for (layers, bns, conv_out, recurrent_conv) in zip(self.stages, self.stage_bns, self.conv_outs, self.recurrent_convs):
for (layer, bn) in zip(layers, bns):
(x, y, h, identity) = layer(x, h)
h = bn(h)
h = self.tanh(h)
h = recurrent_conv(h)
x_out = conv_out(x)
h = (h + x_out)
h = self.bn2(h)
h = self.relu(h)
x = torch.cat((x, h), dim=1)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x):
return self._forward_impl(x)
|
def rlav4_resnet50(rla_channel=32):
' Constructs a RLAv4_ResNet-50 model.\n default: \n num_classes=1000, rla_channel=32, SE=False, ECA=None\n ECA: a list of kernel sizes in ECA\n '
print('Constructing rlav4_resnet50......')
model = RLAv4_ResNet(RLAv4_Bottleneck, [3, 4, 6, 3])
return model
|
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
'3x3 convolution with padding'
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation)
|
def conv1x1(in_planes, out_planes, stride=1):
'1x1 convolution'
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
|
class RLAv5_Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, rla_channel=32, SE=False, ECA_size=None, groups=1, base_width=64, dilation=1, norm_layer=None, reduction=16):
super(RLAv5_Bottleneck, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
width = (int((planes * (base_width / 64.0))) * groups)
self.conv1 = conv1x1((inplanes + rla_channel), width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, (planes * self.expansion))
self.bn3 = norm_layer((planes * self.expansion))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.averagePooling = None
if ((downsample is not None) and (stride != 1)):
self.averagePooling = nn.AvgPool2d((2, 2), stride=(2, 2))
self.se = None
if SE:
self.se = SELayer((planes * self.expansion), reduction)
self.eca = None
if (ECA_size != None):
self.eca = eca_layer((planes * self.expansion), int(ECA_size))
def forward(self, x, h):
identity = x
x = torch.cat((x, h), dim=1)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.se != None):
out = self.se(out)
if (self.eca != None):
out = self.eca(out)
y = out
if (self.downsample is not None):
identity = self.downsample(identity)
if (self.averagePooling is not None):
h = self.averagePooling(h)
out += identity
out = self.relu(out)
return (out, y, h, identity)
|
class RLAv5_ResNet(nn.Module):
'\n rla_channel: the number of filters of the shared(recurrent) conv in RLA\n SE: whether use SE or not \n ECA: None: not use ECA, or specify a list of kernel sizes\n '
def __init__(self, block, layers, num_classes=1000, rla_channel=32, SE=False, ECA=None, zero_init_last_bn=True, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None):
super(RLAv5_ResNet, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if (replace_stride_with_dilation is None):
replace_stride_with_dilation = [False, False, False]
if (len(replace_stride_with_dilation) != 3):
raise ValueError('replace_stride_with_dilation should be None or a 3-element tuple, got {}'.format(replace_stride_with_dilation))
if (ECA is None):
ECA = ([None] * 4)
elif (len(ECA) != 4):
raise ValueError('argument ECA should be a 4-element tuple, got {}'.format(ECA))
self.rla_channel = rla_channel
self.flops = False
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
conv_outs = ([None] * 4)
recurrent_convs = ([None] * 4)
stages = ([None] * 4)
stage_bns = ([None] * 4)
(stages[0], stage_bns[0], conv_outs[0], recurrent_convs[0]) = self._make_layer(block, 64, layers[0], rla_channel=rla_channel, SE=SE, ECA_size=ECA[0])
(stages[1], stage_bns[1], conv_outs[1], recurrent_convs[1]) = self._make_layer(block, 128, layers[1], rla_channel=rla_channel, SE=SE, ECA_size=ECA[1], stride=2, dilate=replace_stride_with_dilation[0])
(stages[2], stage_bns[2], conv_outs[2], recurrent_convs[2]) = self._make_layer(block, 256, layers[2], rla_channel=rla_channel, SE=SE, ECA_size=ECA[2], stride=2, dilate=replace_stride_with_dilation[1])
(stages[3], stage_bns[3], conv_outs[3], recurrent_convs[3]) = self._make_layer(block, 512, layers[3], rla_channel=rla_channel, SE=SE, ECA_size=ECA[3], stride=2, dilate=replace_stride_with_dilation[2])
self.conv_outs = nn.ModuleList(conv_outs)
self.recurrent_convs = nn.ModuleList(recurrent_convs)
self.stages = nn.ModuleList(stages)
self.stage_bns = nn.ModuleList(stage_bns)
self.tanh = nn.Tanh()
self.bn2 = norm_layer(rla_channel)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(((512 * block.expansion) + rla_channel), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_last_bn:
for m in self.modules():
if isinstance(m, RLAv5_Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
def _make_layer(self, block, planes, blocks, rla_channel, SE, ECA_size, stride=1, dilate=False):
conv_out = conv1x1((planes * block.expansion), rla_channel)
recurrent_conv = conv3x3(rla_channel, rla_channel)
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), norm_layer((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, rla_channel=rla_channel, SE=SE, ECA_size=ECA_size, groups=self.groups, base_width=self.base_width, dilation=previous_dilation, norm_layer=norm_layer))
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, rla_channel=rla_channel, SE=SE, ECA_size=ECA_size, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer))
bns = [norm_layer(rla_channel) for _ in range(blocks)]
return (nn.ModuleList(layers), nn.ModuleList(bns), conv_out, recurrent_conv)
def _forward_impl(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
(batch, _, height, width) = x.size()
if self.flops:
h = torch.zeros(batch, self.rla_channel, height, width)
else:
h = torch.zeros(batch, self.rla_channel, height, width, device='cuda')
for (layers, bns, conv_out, recurrent_conv) in zip(self.stages, self.stage_bns, self.conv_outs, self.recurrent_convs):
for (layer, bn) in zip(layers, bns):
(x, y, h, identity) = layer(x, h)
identity_out = conv_out(identity)
h = (h + identity_out)
h = bn(h)
h = self.tanh(h)
h = recurrent_conv(h)
h = self.bn2(h)
h = self.relu(h)
x = torch.cat((x, h), dim=1)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x):
return self._forward_impl(x)
|
def rlav5_resnet50(rla_channel=32):
' Constructs a RLAv5_ResNet-50 model.\n default: \n num_classes=1000, rla_channel=32, SE=False, ECA=None\n ECA: a list of kernel sizes in ECA\n '
print('Constructing rlav5_resnet50......')
model = RLAv5_ResNet(RLAv5_Bottleneck, [3, 4, 6, 3])
return model
|
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
'3x3 convolution with padding'
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation)
|
def conv1x1(in_planes, out_planes, stride=1):
'1x1 convolution'
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
|
class RLAv6_Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, rla_channel=32, SE=False, ECA_size=None, groups=1, base_width=64, dilation=1, norm_layer=None, reduction=16):
super(RLAv6_Bottleneck, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
width = (int((planes * (base_width / 64.0))) * groups)
self.conv1 = conv1x1((inplanes + rla_channel), width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, (planes * self.expansion))
self.bn3 = norm_layer((planes * self.expansion))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.averagePooling = None
if ((downsample is not None) and (stride != 1)):
self.averagePooling = nn.AvgPool2d((2, 2), stride=(2, 2))
self.se = None
if SE:
self.se = SELayer((planes * self.expansion), reduction)
self.eca = None
if (ECA_size != None):
self.eca = eca_layer((planes * self.expansion), int(ECA_size))
def forward(self, x, h):
identity = x
x = torch.cat((x, h), dim=1)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.se != None):
out = self.se(out)
if (self.eca != None):
out = self.eca(out)
y = out
if (self.downsample is not None):
identity = self.downsample(identity)
if (self.averagePooling is not None):
h = self.averagePooling(h)
out += identity
out = self.relu(out)
return (out, y, h, identity)
|
class RLAv6_ResNet(nn.Module):
'\n rla_channel: the number of filters of the shared(recurrent) conv in RLA\n SE: whether use SE or not \n ECA: None: not use ECA, or specify a list of kernel sizes\n '
def __init__(self, block, layers, num_classes=1000, rla_channel=32, SE=False, ECA=None, zero_init_last_bn=True, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None):
super(RLAv6_ResNet, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if (replace_stride_with_dilation is None):
replace_stride_with_dilation = [False, False, False]
if (len(replace_stride_with_dilation) != 3):
raise ValueError('replace_stride_with_dilation should be None or a 3-element tuple, got {}'.format(replace_stride_with_dilation))
if (ECA is None):
ECA = ([None] * 4)
elif (len(ECA) != 4):
raise ValueError('argument ECA should be a 4-element tuple, got {}'.format(ECA))
self.rla_channel = rla_channel
self.flops = False
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
conv_outs = ([None] * 4)
recurrent_convs = ([None] * 4)
stages = ([None] * 4)
stage_bns = ([None] * 4)
(stages[0], stage_bns[0], conv_outs[0], recurrent_convs[0]) = self._make_layer(block, 64, layers[0], rla_channel=rla_channel, SE=SE, ECA_size=ECA[0])
(stages[1], stage_bns[1], conv_outs[1], recurrent_convs[1]) = self._make_layer(block, 128, layers[1], rla_channel=rla_channel, SE=SE, ECA_size=ECA[1], stride=2, dilate=replace_stride_with_dilation[0])
(stages[2], stage_bns[2], conv_outs[2], recurrent_convs[2]) = self._make_layer(block, 256, layers[2], rla_channel=rla_channel, SE=SE, ECA_size=ECA[2], stride=2, dilate=replace_stride_with_dilation[1])
(stages[3], stage_bns[3], conv_outs[3], recurrent_convs[3]) = self._make_layer(block, 512, layers[3], rla_channel=rla_channel, SE=SE, ECA_size=ECA[3], stride=2, dilate=replace_stride_with_dilation[2])
self.conv_outs = nn.ModuleList(conv_outs)
self.recurrent_convs = nn.ModuleList(recurrent_convs)
self.stages = nn.ModuleList(stages)
self.stage_bns = nn.ModuleList(stage_bns)
self.tanh = nn.Tanh()
self.bn2 = norm_layer(rla_channel)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(((512 * block.expansion) + rla_channel), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_last_bn:
for m in self.modules():
if isinstance(m, RLAv6_Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
def _make_layer(self, block, planes, blocks, rla_channel, SE, ECA_size, stride=1, dilate=False):
conv_out = conv1x1((planes * block.expansion), rla_channel)
recurrent_conv = conv3x3(rla_channel, rla_channel)
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), norm_layer((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, rla_channel=rla_channel, SE=SE, ECA_size=ECA_size, groups=self.groups, base_width=self.base_width, dilation=previous_dilation, norm_layer=norm_layer))
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, rla_channel=rla_channel, SE=SE, ECA_size=ECA_size, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer))
bns = [norm_layer(rla_channel) for _ in range(blocks)]
return (nn.ModuleList(layers), nn.ModuleList(bns), conv_out, recurrent_conv)
def _forward_impl(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
(batch, _, height, width) = x.size()
if self.flops:
h = torch.zeros(batch, self.rla_channel, height, width)
else:
h = torch.zeros(batch, self.rla_channel, height, width, device='cuda')
for (layers, bns, conv_out, recurrent_conv) in zip(self.stages, self.stage_bns, self.conv_outs, self.recurrent_convs):
for (layer, bn) in zip(layers, bns):
(x, y, h, identity) = layer(x, h)
h = bn(h)
h = self.tanh(h)
h = recurrent_conv(h)
identity_out = conv_out(identity)
h = (h + identity_out)
h = self.bn2(h)
h = self.relu(h)
x = torch.cat((x, h), dim=1)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x):
return self._forward_impl(x)
|
def rlav6_resnet50(rla_channel=32):
' Constructs a RLAv6_ResNet-50 model.\n default: \n num_classes=1000, rla_channel=32, SE=False, ECA=None\n ECA: a list of kernel sizes in ECA\n '
print('Constructing rlav6_resnet50......')
model = RLAv6_ResNet(RLAv6_Bottleneck, [3, 4, 6, 3])
return model
|
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(nn.Linear(channel, (channel // reduction), bias=False), nn.ReLU(inplace=True), nn.Linear((channel // reduction), channel, bias=False), nn.Sigmoid())
def forward(self, x):
(b, c, _, _) = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return (x * y.expand_as(x))
|
def main():
global args
args = parser.parse_args()
model = models.__dict__[args.arch]()
print(model)
input = torch.randn(1, 3, args.input_size, args.input_size)
model.train()
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
model = model.to(device)
input = input.to(device)
(flops, params) = profile(model, inputs=(input,))
print('flops = ', flops)
print('params = ', params)
(flops, params) = clever_format([flops, params], '%.3f')
print('flops = ', flops)
print('params = ', params)
|
def clever_format(nums, format='%.2f'):
clever_nums = []
for num in nums:
if (num > 1000000000000.0):
clever_nums.append(((format % (num / (1024 ** 4))) + 'T'))
elif (num > 1000000000.0):
clever_nums.append(((format % (num / (1024 ** 3))) + 'G'))
elif (num > 1000000.0):
clever_nums.append(((format % (num / (1024 ** 2))) + 'M'))
elif (num > 1000.0):
clever_nums.append(((format % (num / 1024)) + 'K'))
else:
clever_nums.append(((format % num) + 'B'))
clever_nums = (clever_nums[0] if (len(clever_nums) == 1) else (*clever_nums,))
return clever_nums
|
def main():
global args
args = parser.parse_args()
if (args.seed is not None):
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. This will turn on the CUDNN deterministic setting, which can slow down your training considerably! You may see unexpected behavior when restarting from checkpoints.')
if (args.gpu is not None):
warnings.warn('You have chosen a specific GPU. This will completely disable data parallelism.')
if ((args.dist_url == 'env://') and (args.world_size == (- 1))):
args.world_size = int(os.environ['WORLD_SIZE'])
args.distributed = ((args.world_size > 1) or args.multiprocessing_distributed)
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
args.world_size = (ngpus_per_node * args.world_size)
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
main_worker(args.gpu, ngpus_per_node, args)
|
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if (args.gpu is not None):
print('Use GPU: {} for training'.format(args.gpu))
if args.distributed:
if ((args.dist_url == 'env://') and (args.rank == (- 1))):
args.rank = int(os.environ['RANK'])
if args.multiprocessing_distributed:
args.rank = ((args.rank * ngpus_per_node) + gpu)
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank)
print("=> creating model '{}' for training".format(args.arch))
if (args.rla == None):
model = models.__dict__[args.arch]()
else:
model = models.__dict__[args.arch](rla_channel=args.rla)
if args.pretrained_conv1:
pretrain_model = None
arch = args.arch
if (arch.find('resnet50') > (- 1)):
pretrain_model = torchmodels.resnet50(pretrained=True)
elif (arch.find('resnet101') > (- 1)):
pretrain_model = torchmodels.resnet101(pretrained=True)
elif (arch.find('resnet152') > (- 1)):
pretrain_model = torchmodels.resnet152(pretrained=True)
elif (arch.find('resnext50_32x4d') > (- 1)):
pretrain_model = torchmodels.resnext50_32x4d(pretrained=True)
elif (arch.find('resnext101_32x4d') > (- 1)):
pretrain_model = torchmodels.resnext101_32x8d(pretrained=True)
print('[!] resnext101_32x4d is not available in torchvision.models, proceed with resnext101_32x8d pretrained conv1')
if (pretrain_model is not None):
model.conv1.weight = pretrain_model.conv1.weight
model.conv1.weight.requires_grad = False
print('[!] Using pretrained conv1')
if (not torch.cuda.is_available()):
print('using CPU, this will be slow')
elif args.distributed:
if (args.gpu is not None):
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
args.batch_size = int((args.batch_size / ngpus_per_node))
args.workers = int((((args.workers + ngpus_per_node) - 1) / ngpus_per_node))
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model, find_unused_parameters=True)
elif (args.gpu is not None):
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
model = torch.nn.DataParallel(model).cuda()
print('Number of models parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if (args.gpu is None):
checkpoint = torch.load(args.resume)
else:
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if (args.gpu is not None):
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(traindir, transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(valdir, transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize])), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
if args.evaluate:
m = time.time()
(_, _) = validate(val_loader, model, criterion, args)
n = time.time()
print('evaluate_time (h): ', ((n - m) / 3600))
return
if (not os.path.exists(os.path.abspath(args.work_dir))):
os.mkdir(os.path.abspath(args.work_dir))
save_path = ('%s/%s/' % (args.work_dir, ((args.arch + '_') + args.action)))
if (not os.path.exists(save_path)):
os.mkdir(save_path)
print('save checkpoint file and log in: ', save_path)
loss_plot = {}
train_acc1_plot = {}
train_acc5_plot = {}
val_acc1_plot = {}
val_acc5_plot = {}
for epoch in range(args.start_epoch, args.epochs):
start_time = time.time()
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
(loss_temp, train_acc1_temp, train_acc5_temp) = train(train_loader, model, criterion, optimizer, epoch, args)
loss_plot[epoch] = loss_temp
train_acc1_plot[epoch] = train_acc1_temp
train_acc5_plot[epoch] = train_acc5_temp
(acc1, acc5) = validate(val_loader, model, criterion, args)
val_acc1_plot[epoch] = acc1
val_acc5_plot[epoch] = acc5
is_best = (acc1 > best_acc1)
best_acc1 = max(acc1, best_acc1)
if ((not args.multiprocessing_distributed) or (args.multiprocessing_distributed and ((args.rank % ngpus_per_node) == 0))):
save_checkpoint({'epoch': (epoch + 1), 'state_dict': model.state_dict(), 'best_acc1': best_acc1, 'optimizer': optimizer.state_dict()}, is_best, args)
data_save((save_path + 'loss_plot.txt'), loss_plot)
data_save((save_path + 'train_acc1.txt'), train_acc1_plot)
data_save((save_path + 'train_acc5.txt'), train_acc5_plot)
data_save((save_path + 'val_acc1.txt'), val_acc1_plot)
data_save((save_path + 'val_acc5.txt'), val_acc5_plot)
end_time = time.time()
time_value = ((end_time - start_time) / 3600)
print(('-' * 80))
print('epoch {} train_time (h): {}'.format(epoch, time_value))
print(('-' * 80))
|
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
losses_batch = {}
progress = ProgressMeter(len(train_loader), [batch_time, data_time, losses, top1, top5], prefix='Epoch: [{}]'.format(epoch))
model.train()
end = time.time()
for (i, (images, target)) in enumerate(train_loader):
data_time.update((time.time() - end))
if (args.gpu is not None):
images = images.cuda(args.gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(args.gpu, non_blocking=True)
output = model(images)
loss = criterion(output, target)
(acc1, acc5) = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update((time.time() - end))
end = time.time()
if ((args.gpu is None) or (args.gpu == 0)):
if ((i % args.print_freq) == 0):
lr_temp = (args.lr * (0.1 ** (epoch // 30)))
print('Epoch: [{0}][{1}/{2}]\tLR {3:.5f}\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})\tData {data_time.val:.3f} ({data_time.avg:.3f})\tLoss {loss.val:.4f} ({loss.avg:.4f})\tAcc@1 {top1.val:.3f} ({top1.avg:.3f})\tAcc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(epoch, i, len(train_loader), lr_temp, batch_time=batch_time, data_time=data_time, loss=losses, top1=top1, top5=top5))
return (losses.avg, top1.avg, top5.avg)
|
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(len(val_loader), [batch_time, losses, top1, top5], prefix='Test: ')
model.eval()
with torch.no_grad():
end = time.time()
for (i, (images, target)) in enumerate(val_loader):
if (args.gpu is not None):
images = images.cuda(args.gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(args.gpu, non_blocking=True)
output = model(images)
loss = criterion(output, target)
(acc1, acc5) = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
batch_time.update((time.time() - end))
end = time.time()
if ((args.gpu is None) or (args.gpu == 0)):
if ((i % args.print_freq) == 0):
print('Test: [{0}/{1}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})\tLoss {loss.val:.4f} ({loss.avg:.4f})\tAcc@1 {top1.val:.3f} ({top1.avg:.3f})\tAcc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(i, len(val_loader), batch_time=batch_time, loss=losses, top1=top1, top5=top5))
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'.format(top1=top1, top5=top5))
return (top1.avg, top5.avg)
|
def save_checkpoint(state, is_best, args, filename='checkpoint.pth.tar'):
save_path = ('%s/%s/' % (args.work_dir, ((args.arch + '_') + args.action)))
filepath = os.path.join(save_path, filename)
bestpath = os.path.join(save_path, 'model_best.pth.tar')
torch.save(state, filepath)
if is_best:
shutil.copyfile(filepath, bestpath)
|
class AverageMeter(object):
'Computes and stores the average and current value'
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count)
def __str__(self):
fmtstr = (((('{name} {val' + self.fmt) + '} ({avg') + self.fmt) + '})')
return fmtstr.format(**self.__dict__)
|
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=''):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [(self.prefix + self.batch_fmtstr.format(batch))]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str((num_batches // 1)))
fmt = (('{:' + str(num_digits)) + 'd}')
return (((('[' + fmt) + '/') + fmt.format(num_batches)) + ']')
|
def adjust_learning_rate(optimizer, epoch, args):
'Sets the learning rate to the initial LR decayed by 10 every 30 epochs'
lr = (args.lr * (0.1 ** (epoch // 30)))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
|
def accuracy(output, target, topk=(1,)):
'Computes the accuracy over the k top predictions for the specified values of k'
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape((- 1)).float().sum(0, keepdim=True)
res.append(correct_k.mul_((100.0 / batch_size)))
return res
|
def data_save(root, file):
if (not os.path.exists(root)):
os.mknod(root)
file_temp = open(root, 'r')
lines = file_temp.readlines()
if (not lines):
epoch = (- 1)
else:
epoch = lines[(- 1)][:lines[(- 1)].index(' ')]
epoch = int(epoch)
file_temp.close()
file_temp = open(root, 'a')
for line in file:
if (line > epoch):
file_temp.write((((str(line) + ' ') + str(file[line])) + '\n'))
file_temp.close()
|
def main():
global args, best_acc1
args = parser.parse_args()
if (args.seed is not None):
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. This will turn on the CUDNN deterministic setting, which can slow down your training considerably! You may see unexpected behavior when restarting from checkpoints.')
if (args.gpu is not None):
warnings.warn('You have chosen a specific GPU. This will completely disable data parallelism.')
args.distributed = (args.world_size > 1)
if args.distributed:
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size)
print("=> creating model '{}' for training".format(args.arch))
if (args.rla == None):
model = models.__dict__[args.arch]()
else:
model = models.__dict__[args.arch](rla_channel=args.rla)
if args.pretrained_conv1:
pretrain_model = None
arch = args.arch
if (arch.find('mobilenetv2') > (- 1)):
pretrain_model = torchmodels.mobilenet_v2(pretrained=True)
if (pretrain_model is not None):
model.conv1[0].weight = pretrain_model.features[0][0].weight
model.conv1[0].weight.requires_grad = False
print('[!] Using pretrained conv1')
if (args.gpu is not None):
model = model.cuda(args.gpu)
elif args.distributed:
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model)
elif (args.arch.startswith('alexnet') or args.arch.startswith('vgg')):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
print('Number of models parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
del checkpoint
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(traindir, transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(valdir, transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize])), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
if args.evaluate:
m = time.time()
(_, _) = validate(val_loader, model, criterion)
n = time.time()
print('evaluate_time (h): ', ((n - m) / 3600))
return
if (not os.path.exists(os.path.abspath(args.work_dir))):
os.mkdir(os.path.abspath(args.work_dir))
directory = ('%s/%s/' % (args.work_dir, ((args.arch + '_') + args.action)))
if (not os.path.exists(directory)):
os.mkdir(directory)
print('save checkpoint file and log in: ', directory)
loss_plot = {}
train_acc1_plot = {}
train_acc5_plot = {}
val_acc1_plot = {}
val_acc5_plot = {}
for epoch in range(args.start_epoch, args.epochs):
start_time = time.time()
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch)
(loss_temp, train_acc1_temp, train_acc5_temp) = train(train_loader, model, criterion, optimizer, epoch)
loss_plot[epoch] = loss_temp
train_acc1_plot[epoch] = train_acc1_temp
train_acc5_plot[epoch] = train_acc5_temp
(acc1, acc5) = validate(val_loader, model, criterion)
val_acc1_plot[epoch] = acc1
val_acc5_plot[epoch] = acc5
is_best = (acc1 > best_acc1)
best_acc1 = max(acc1, best_acc1)
save_checkpoint({'epoch': (epoch + 1), 'arch': args.arch, 'state_dict': model.state_dict(), 'best_acc1': best_acc1, 'optimizer': optimizer.state_dict()}, is_best)
data_save((directory + 'loss_plot.txt'), loss_plot)
data_save((directory + 'train_acc1.txt'), train_acc1_plot)
data_save((directory + 'train_acc5.txt'), train_acc5_plot)
data_save((directory + 'val_acc1.txt'), val_acc1_plot)
data_save((directory + 'val_acc5.txt'), val_acc5_plot)
end_time = time.time()
time_value = ((end_time - start_time) / 3600)
print(('-' * 80))
print('epoch {} train_time (h): {}'.format(epoch, time_value))
print(('-' * 80))
|
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
losses_batch = {}
model.train()
directory = ('%s/%s/' % (args.work_dir, ((args.arch + '_') + args.action)))
end = time.time()
for (i, (input, target)) in enumerate(train_loader):
data_time.update((time.time() - end))
if (args.gpu is not None):
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
output = model(input)
loss = criterion(output, target)
(acc1, acc5) = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update((time.time() - end))
end = time.time()
if ((args.gpu is None) or (args.gpu == 0)):
if ((i % args.print_freq) == 0):
lr_temp = (args.lr * (0.98 ** epoch))
print('Epoch: [{0}][{1}/{2}]\tLR {3:.5f}\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})\tData {data_time.val:.3f} ({data_time.avg:.3f})\tLoss {loss.val:.4f} ({loss.avg:.4f})\tAcc@1 {top1.val:.3f} ({top1.avg:.3f})\tAcc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(epoch, i, len(train_loader), lr_temp, batch_time=batch_time, data_time=data_time, loss=losses, top1=top1, top5=top5))
return (losses.avg, top1.avg, top5.avg)
|
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
model.eval()
with torch.no_grad():
end = time.time()
for (i, (input, target)) in enumerate(val_loader):
if (args.gpu is not None):
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
output = model(input)
loss = criterion(output, target)
(acc1, acc5) = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
batch_time.update((time.time() - end))
end = time.time()
if ((i % args.print_freq) == 0):
print('Test: [{0}/{1}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})\tLoss {loss.val:.4f} ({loss.avg:.4f})\tAcc@1 {top1.val:.3f} ({top1.avg:.3f})\tAcc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(i, len(val_loader), batch_time=batch_time, loss=losses, top1=top1, top5=top5))
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'.format(top1=top1, top5=top5))
return (top1.avg, top5.avg)
|
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
directory = ('%s/%s/' % (args.work_dir, ((args.arch + '_') + args.action)))
filename = (directory + filename)
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, (directory + 'model_best.pth.tar'))
|
class AverageMeter(object):
'Computes and stores the average and current value'
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count)
|
def adjust_learning_rate(optimizer, epoch):
'Sets the learning rate to the initial LR decayed by 10 every 30 epochs'
lr = (args.lr * (0.98 ** epoch))
print('lr = ', lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
|
def accuracy(output, target, topk=(1,)):
'Computes the accuracy@k for the specified values of k'
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape((- 1)).float().sum(0, keepdim=True)
res.append(correct_k.mul_((100.0 / batch_size)))
return res
|
def data_save(root, file):
if (not os.path.exists(root)):
os.mknod(root)
file_temp = open(root, 'r')
lines = file_temp.readlines()
if (not lines):
epoch = (- 1)
else:
epoch = lines[(- 1)][:lines[(- 1)].index(' ')]
epoch = int(epoch)
file_temp.close()
file_temp = open(root, 'a')
for line in file:
if (line > epoch):
file_temp.write((((str(line) + ' ') + str(file[line])) + '\n'))
file_temp.close()
|
class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow):
'The Main Window of the graphical user interface.\n\n The class MainWindow inherits from Ui_MainWindow, which is\n defined in maxent_ui.py. The latter file is autogenerated\n by pyuic from maxent_ui.ui [`pyuic5 maxent_ui.ui -o maxent_ui.py`]\n The ui file can be edited by the QtDesigner.\n '
def __init__(self, *args, obj=None, **kwargs):
'Connect the widgets, instantiate the main classes.'
super(MainWindow, self).__init__(*args, **kwargs)
self.setupUi(self)
self.realgrid = RealFrequencyGrid(wmax=float(self.max_real_freq.text()), nw=int(self.num_real_freq.text()), type=str(self.grid_type_combo.currentText()))
self.connect_realgrid_button()
self.connect_wmax()
self.connect_nw()
self.connect_grid_type()
self.connect_load_button_text()
self.connect_show_button_2()
self.connect_select_button_2()
self.text_output.setReadOnly(True)
self.connect_doit_button()
self.output_data = OutputData()
self.connect_select_output_button()
self.connect_save_button()
def connect_realgrid_button(self):
self.gen_real_grid_button.clicked.connect((lambda : self.realgrid.create_grid()))
def connect_wmax(self):
self.max_real_freq.returnPressed.connect((lambda : self.realgrid.update_wmax(float(self.max_real_freq.text()))))
self.max_real_freq.editingFinished.connect((lambda : self.realgrid.update_wmax(float(self.max_real_freq.text()))))
def connect_nw(self):
self.num_real_freq.returnPressed.connect((lambda : self.realgrid.update_nw(int(self.num_real_freq.text()))))
self.num_real_freq.editingFinished.connect((lambda : self.realgrid.update_nw(int(self.num_real_freq.text()))))
def connect_grid_type(self):
self.grid_type_combo.activated.connect((lambda : self.realgrid.update_type(str(self.grid_type_combo.currentText()))))
def connect_fname_input(self):
self.inp_file_name.editingFinished.connect((lambda : self.input_data.update_fname(str(self.inp_file_name.text()))))
self.inp_file_name.textChanged.connect((lambda : self.input_data.update_fname(str(self.inp_file_name.text()))))
def get_fname(self):
self.inp_file_name.setText(QtWidgets.QFileDialog.getOpenFileName(self, 'Open file', os.getcwd(), 'HDF5 files (*.hdf5)')[0])
def get_fname_text(self):
self.inp_file_name_2.setText(QtWidgets.QFileDialog.getOpenFileName(self, 'Open file', os.getcwd(), 'text files (*.dat *.txt)')[0])
def connect_select_button_2(self):
self.select_file_button_2.clicked.connect(self.get_fname_text)
def connect_num_mats(self):
self.num_mats_freq.editingFinished.connect((lambda : self.input_data.update_num_mats(int(self.num_mats_freq.text()))))
def connect_show_button_2(self):
self.show_data_button_2.clicked.connect((lambda : self.input_data.plot()))
def load_text_data(self):
self.input_data = TextInputData(fname=str(self.inp_file_name_2.text()), data_type='bosonic', n_skip=str(self.n_skip.text()), num_mats=str(self.num_mats_freq_text.text()))
self.input_data.read_data()
def connect_load_button_text(self):
self.load_data_button_2.clicked.connect(self.load_text_data)
def get_preblur(self):
preblur_checked = self.preblur_checkbox.isChecked()
try:
bw = (float(self.blur_width.text()) if preblur_checked else 0.0)
except ValueError:
print('Invalid input for blur width, setting to 0.')
bw = 0.0
preblur = (preblur_checked and (bw > 0.0))
return (preblur, bw)
def main_function(self):
'Main function for the analytic continuation procedure.\n\n This function is called when the "Do it" button is clicked.\n It performs an analytical continuation for the present settings\n and shows a plot.\n '
self.ana_cont_probl = cont.AnalyticContinuationProblem(im_axis=self.input_data.mats, im_data=self.input_data.value.real, re_axis=self.realgrid.grid, kernel_mode='freq_bosonic')
model = np.ones_like(self.realgrid.grid)
model /= np.trapz(model, self.realgrid.grid)
(preblur, bw) = self.get_preblur()
sol = self.ana_cont_probl.solve(method='maxent_svd', optimizer='newton', alpha_determination='chi2kink', model=model, stdev=self.input_data.error, interactive=False, alpha_start=10000000000.0, alpha_end=0.001, preblur=preblur, blur_width=bw)
inp_str = 'atom {}, orb {}, spin {}, blur {}: '.format(self.input_data.atom, self.input_data.orbital, self.input_data.spin, bw)
all_chis = np.isfinite(np.array([s.chi2 for s in sol[1]]))
res_str = 'alpha_opt={:3.2f}, chi2(alpha_opt)={:3.2f}, min(chi2)={:3.2f}'.format(sol[0].alpha, sol[0].chi2, np.amin(all_chis))
self.text_output.append((inp_str + res_str))
alphas = [s.alpha for s in sol[1]]
chis = [s.chi2 for s in sol[1]]
self.output_data.update(self.realgrid.grid, sol[0].A_opt, self.input_data)
(fig, ax) = plt.subplots(ncols=2, nrows=2, figsize=(11.75, 8.25))
ax[(0, 0)].loglog(alphas, chis, marker='s', color='black')
ax[(0, 0)].loglog(sol[0].alpha, sol[0].chi2, marker='*', color='red', markersize=15)
ax[(0, 0)].set_xlabel('$\\alpha$')
ax[(0, 0)].set_ylabel('$\\chi^2(\\alpha)$')
ax[(1, 0)].plot(self.realgrid.grid, sol[0].A_opt)
ax[(1, 0)].set_xlabel('$\\omega$')
ax[(1, 0)].set_ylabel('spectrum')
ax[(0, 1)].plot(self.input_data.mats, self.input_data.value.real, color='blue', ls=':', marker='x', markersize=5, label='Re[data]')
ax[(0, 1)].plot(self.input_data.mats, self.input_data.value.imag, color='green', ls=':', marker='+', markersize=5, label='Im[data]')
ax[(0, 1)].plot(self.input_data.mats, sol[0].backtransform.real, ls='--', color='gray', label='Re[fit]')
ax[(0, 1)].plot(self.input_data.mats, sol[0].backtransform.imag, color='gray', label='Im[fit]')
ax[(0, 1)].set_xlabel('$\\nu_n$')
ax[(0, 1)].set_ylabel(self.input_data.data_type)
ax[(0, 1)].legend()
ax[(1, 1)].plot(self.input_data.mats, (self.input_data.value - sol[0].backtransform).real, ls='--', label='real part')
ax[(1, 1)].plot(self.input_data.mats, (self.input_data.value - sol[0].backtransform).imag, label='imaginary part')
ax[(1, 1)].set_xlabel('$\\nu_n$')
ax[(1, 1)].set_ylabel('data $-$ fit')
ax[(1, 1)].legend()
plt.tight_layout()
plt.show()
def connect_doit_button(self):
self.doit_button.clicked.connect((lambda : self.main_function()))
def connect_fname_output(self):
self.out_file_name.editingFinished.connect((lambda : self.output_data.update_fname(str(self.out_file_name.text()))))
self.inp_file_name.textChanged.connect((lambda : self.output_data.update_fname(str(self.out_file_name.text()))))
def get_fname_output(self):
fname_out = QtWidgets.QFileDialog.getSaveFileName(self, 'Save as', '/'.join(self.input_data.fname.split('/')[:(- 1)]), 'DAT files (*.dat)')[0]
self.out_file_name.setText(fname_out)
self.output_data.update_fname(fname_out)
def connect_select_output_button(self):
self.output_directory_button.clicked.connect(self.get_fname_output)
def save_output(self):
fname_out = str(self.out_file_name.text())
if (fname_out == ''):
print('Error in saving: First you have to specify the output file name.')
return 1
self.output_data.update_fname(fname_out)
try:
self.output_data.save(interpolate=self.interpolate_checkbox.isChecked(), n_reg=int(self.n_interpolation.text()))
except AttributeError:
print('Error in saving: First you have to specify the output file name.')
def connect_save_button(self):
self.save_button.clicked.connect((lambda : self.save_output()))
|
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName('MainWindow')
MainWindow.resize(759, 629)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName('centralwidget')
self.real_freq_frame = QtWidgets.QFrame(self.centralwidget)
self.real_freq_frame.setGeometry(QtCore.QRect(10, 10, 231, 171))
self.real_freq_frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.real_freq_frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.real_freq_frame.setObjectName('real_freq_frame')
self.label = QtWidgets.QLabel(self.real_freq_frame)
self.label.setGeometry(QtCore.QRect(10, 10, 141, 17))
self.label.setObjectName('label')
self.label_3 = QtWidgets.QLabel(self.real_freq_frame)
self.label_3.setGeometry(QtCore.QRect(10, 70, 31, 17))
self.label_3.setObjectName('label_3')
self.label_4 = QtWidgets.QLabel(self.real_freq_frame)
self.label_4.setGeometry(QtCore.QRect(10, 110, 21, 17))
self.label_4.setObjectName('label_4')
self.grid_type_combo = QtWidgets.QComboBox(self.real_freq_frame)
self.grid_type_combo.setGeometry(QtCore.QRect(10, 40, 201, 25))
self.grid_type_combo.setObjectName('grid_type_combo')
self.grid_type_combo.addItem('')
self.grid_type_combo.addItem('')
self.max_real_freq = QtWidgets.QLineEdit(self.real_freq_frame)
self.max_real_freq.setGeometry(QtCore.QRect(40, 70, 41, 25))
self.max_real_freq.setObjectName('max_real_freq')
self.num_real_freq = QtWidgets.QLineEdit(self.real_freq_frame)
self.num_real_freq.setGeometry(QtCore.QRect(40, 110, 41, 25))
self.num_real_freq.setObjectName('num_real_freq')
self.gen_real_grid_button = QtWidgets.QPushButton(self.real_freq_frame)
self.gen_real_grid_button.setGeometry(QtCore.QRect(90, 110, 71, 25))
self.gen_real_grid_button.setObjectName('gen_real_grid_button')
self.input_data_tabs = QtWidgets.QTabWidget(self.centralwidget)
self.input_data_tabs.setGeometry(QtCore.QRect(250, 10, 501, 171))
self.input_data_tabs.setObjectName('input_data_tabs')
self.text_tab = QtWidgets.QWidget()
self.text_tab.setObjectName('text_tab')
self.label_14 = QtWidgets.QLabel(self.text_tab)
self.label_14.setGeometry(QtCore.QRect(10, 10, 221, 17))
self.label_14.setObjectName('label_14')
self.label_16 = QtWidgets.QLabel(self.text_tab)
self.label_16.setGeometry(QtCore.QRect(10, 40, 67, 17))
self.label_16.setObjectName('label_16')
self.inp_file_name_2 = QtWidgets.QLineEdit(self.text_tab)
self.inp_file_name_2.setGeometry(QtCore.QRect(90, 40, 361, 25))
self.inp_file_name_2.setObjectName('inp_file_name_2')
self.select_file_button_2 = QtWidgets.QToolButton(self.text_tab)
self.select_file_button_2.setGeometry(QtCore.QRect(460, 40, 26, 24))
self.select_file_button_2.setObjectName('select_file_button_2')
self.load_data_button_2 = QtWidgets.QPushButton(self.text_tab)
self.load_data_button_2.setGeometry(QtCore.QRect(300, 110, 89, 25))
self.load_data_button_2.setObjectName('load_data_button_2')
self.show_data_button_2 = QtWidgets.QPushButton(self.text_tab)
self.show_data_button_2.setGeometry(QtCore.QRect(400, 110, 89, 25))
self.show_data_button_2.setObjectName('show_data_button_2')
self.label_17 = QtWidgets.QLabel(self.text_tab)
self.label_17.setGeometry(QtCore.QRect(10, 80, 31, 17))
self.label_17.setObjectName('label_17')
self.n_skip = QtWidgets.QLineEdit(self.text_tab)
self.n_skip.setGeometry(QtCore.QRect(50, 80, 31, 25))
self.n_skip.setObjectName('n_skip')
self.label_18 = QtWidgets.QLabel(self.text_tab)
self.label_18.setGeometry(QtCore.QRect(90, 80, 151, 17))
self.label_18.setObjectName('label_18')
self.label_19 = QtWidgets.QLabel(self.text_tab)
self.label_19.setGeometry(QtCore.QRect(10, 110, 31, 17))
self.label_19.setObjectName('label_19')
self.num_mats_freq_text = QtWidgets.QLineEdit(self.text_tab)
self.num_mats_freq_text.setGeometry(QtCore.QRect(50, 110, 31, 25))
self.num_mats_freq_text.setObjectName('num_mats_freq_text')
self.label_20 = QtWidgets.QLabel(self.text_tab)
self.label_20.setGeometry(QtCore.QRect(90, 110, 161, 17))
self.label_20.setObjectName('label_20')
self.input_data_tabs.addTab(self.text_tab, '')
self.continuation_frame = QtWidgets.QFrame(self.centralwidget)
self.continuation_frame.setGeometry(QtCore.QRect(10, 190, 741, 391))
self.continuation_frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.continuation_frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.continuation_frame.setObjectName('continuation_frame')
self.doit_button = QtWidgets.QPushButton(self.continuation_frame)
self.doit_button.setGeometry(QtCore.QRect(590, 20, 131, 41))
self.doit_button.setObjectName('doit_button')
self.blur_width = QtWidgets.QLineEdit(self.continuation_frame)
self.blur_width.setGeometry(QtCore.QRect(80, 40, 113, 25))
self.blur_width.setObjectName('blur_width')
self.label_11 = QtWidgets.QLabel(self.continuation_frame)
self.label_11.setGeometry(QtCore.QRect(30, 40, 51, 17))
self.label_11.setObjectName('label_11')
self.text_output = QtWidgets.QTextEdit(self.continuation_frame)
self.text_output.setGeometry(QtCore.QRect(30, 80, 691, 231))
self.text_output.setObjectName('text_output')
self.save_button = QtWidgets.QPushButton(self.continuation_frame)
self.save_button.setGeometry(QtCore.QRect(630, 360, 89, 25))
self.save_button.setObjectName('save_button')
self.output_directory_button = QtWidgets.QToolButton(self.continuation_frame)
self.output_directory_button.setGeometry(QtCore.QRect(120, 360, 26, 24))
self.output_directory_button.setObjectName('output_directory_button')
self.out_file_name = QtWidgets.QLineEdit(self.continuation_frame)
self.out_file_name.setGeometry(QtCore.QRect(160, 360, 451, 25))
self.out_file_name.setObjectName('out_file_name')
self.label_12 = QtWidgets.QLabel(self.continuation_frame)
self.label_12.setGeometry(QtCore.QRect(30, 360, 91, 17))
self.label_12.setObjectName('label_12')
self.n_interpolation = QtWidgets.QLineEdit(self.continuation_frame)
self.n_interpolation.setGeometry(QtCore.QRect(200, 320, 41, 25))
self.n_interpolation.setObjectName('n_interpolation')
self.label_13 = QtWidgets.QLabel(self.continuation_frame)
self.label_13.setGeometry(QtCore.QRect(250, 320, 201, 17))
self.label_13.setObjectName('label_13')
self.preblur_checkbox = QtWidgets.QCheckBox(self.continuation_frame)
self.preblur_checkbox.setGeometry(QtCore.QRect(30, 10, 92, 23))
self.preblur_checkbox.setObjectName('preblur_checkbox')
self.interpolate_checkbox = QtWidgets.QCheckBox(self.continuation_frame)
self.interpolate_checkbox.setGeometry(QtCore.QRect(30, 320, 171, 23))
self.interpolate_checkbox.setObjectName('interpolate_checkbox')
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 759, 22))
self.menubar.setObjectName('menubar')
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName('statusbar')
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.input_data_tabs.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate('MainWindow', 'MainWindow'))
self.real_freq_frame.setToolTip(_translate('MainWindow', 'configure real-frequency grid'))
self.real_freq_frame.setWhatsThis(_translate('MainWindow', 'real-frequency grid'))
self.label.setText(_translate('MainWindow', 'Real-frequency grid'))
self.label_3.setText(_translate('MainWindow', 'max'))
self.label_4.setText(_translate('MainWindow', 'n'))
self.grid_type_combo.setToolTip(_translate('MainWindow', 'equispaced or centered grid (denser around Fermi energy)'))
self.grid_type_combo.setItemText(0, _translate('MainWindow', 'equispaced positive'))
self.grid_type_combo.setItemText(1, _translate('MainWindow', 'centered positive'))
self.max_real_freq.setToolTip(_translate('MainWindow', 'upper border of real-frequency grid. (lower border is set symmetrically)'))
self.max_real_freq.setText(_translate('MainWindow', '20'))
self.num_real_freq.setToolTip(_translate('MainWindow', 'number frequencies on real axis; should be an odd number'))
self.num_real_freq.setText(_translate('MainWindow', '401'))
self.gen_real_grid_button.setText(_translate('MainWindow', 'Generate'))
self.label_14.setText(_translate('MainWindow', 'Load susceptibility from text file'))
self.label_16.setText(_translate('MainWindow', 'file name'))
self.inp_file_name_2.setToolTip(_translate('MainWindow', 'file path and name of a w2dynamics output file'))
self.select_file_button_2.setToolTip(_translate('MainWindow', 'choose an input file'))
self.select_file_button_2.setText(_translate('MainWindow', '...'))
self.load_data_button_2.setText(_translate('MainWindow', 'Load data'))
self.show_data_button_2.setToolTip(_translate('MainWindow', 'click this if you want to plot the data after loading'))
self.show_data_button_2.setText(_translate('MainWindow', 'Show data'))
self.label_17.setText(_translate('MainWindow', 'Skip'))
self.label_18.setText(_translate('MainWindow', 'lines at the beginning'))
self.label_19.setText(_translate('MainWindow', 'Use'))
self.label_20.setText(_translate('MainWindow', 'Matsubara frequencies'))
self.input_data_tabs.setTabText(self.input_data_tabs.indexOf(self.text_tab), _translate('MainWindow', 'text file'))
self.doit_button.setToolTip(_translate('MainWindow', 'perform the analytical continuation'))
self.doit_button.setText(_translate('MainWindow', 'Do it!'))
self.blur_width.setToolTip(_translate('MainWindow', 'set the blur width here'))
self.blur_width.setText(_translate('MainWindow', '0.1'))
self.label_11.setText(_translate('MainWindow', 'Width'))
self.text_output.setToolTip(_translate('MainWindow', 'in this field some output will be shown'))
self.save_button.setToolTip(_translate('MainWindow', 'click this button to save the output'))
self.save_button.setText(_translate('MainWindow', 'Save'))
self.output_directory_button.setToolTip(_translate('MainWindow', 'Choose a directory, where you want to save the output'))
self.output_directory_button.setText(_translate('MainWindow', '...'))
self.out_file_name.setToolTip(_translate('MainWindow', 'type full output name here (including path)'))
self.label_12.setText(_translate('MainWindow', 'Output file:'))
self.n_interpolation.setToolTip(_translate('MainWindow', 'number of regularly spaced grid points for interpolation'))
self.n_interpolation.setText(_translate('MainWindow', '0'))
self.label_13.setText(_translate('MainWindow', 'regularly spaced grid points'))
self.preblur_checkbox.setToolTip(_translate('MainWindow', 'check this if you want to use preblur'))
self.preblur_checkbox.setText(_translate('MainWindow', 'Preblur'))
self.interpolate_checkbox.setToolTip(_translate('MainWindow', 'check this for interpolating output to regular grid'))
self.interpolate_checkbox.setText(_translate('MainWindow', 'Interpolate output to'))
|
class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow):
'The Main Window of the graphical user interface.\n\n The class MainWindow inherits from Ui_MainWindow, which is\n defined in maxent_ui.py. The latter file is autogenerated\n by pyuic from maxent_ui.ui [`pyuic5 maxent_ui.ui -o maxent_ui.py`]\n The ui file can be edited by the QtDesigner.\n '
def __init__(self, *args, obj=None, **kwargs):
'Connect the widgets, instantiate the main classes.'
super(MainWindow, self).__init__(*args, **kwargs)
self.setupUi(self)
self.realgrid = RealFrequencyGrid(wmax=float(self.max_real_freq.text()), nw=int(self.num_real_freq.text()), type=str(self.grid_type_combo.currentText()))
self.connect_realgrid_button()
self.connect_wmax()
self.connect_nw()
self.connect_grid_type()
self.input_data = InputData(fname=str(self.inp_file_name.text()), iter_type=str(self.iteration_type_combo.currentText()), iter_num=str(self.iteration_number.text()), data_type=str(self.inp_data_type.currentText()), atom=str(self.atom_number.text()), orbital=str(self.orbital_number.text()), spin=str(self.spin_type_combo.currentText()), num_mats=str(self.num_mats_freq.text()))
self.connect_select_button()
self.connect_load_button()
self.connect_show_button()
self.connect_load_button_text()
self.connect_show_button_2()
self.connect_select_button_2()
self.text_output.setReadOnly(True)
self.connect_doit_button()
self.output_data = OutputData()
self.connect_select_output_button()
self.connect_save_button()
def connect_realgrid_button(self):
self.gen_real_grid_button.clicked.connect((lambda : self.realgrid.create_grid()))
def connect_wmax(self):
self.max_real_freq.returnPressed.connect((lambda : self.realgrid.update_wmax(float(self.max_real_freq.text()))))
self.max_real_freq.editingFinished.connect((lambda : self.realgrid.update_wmax(float(self.max_real_freq.text()))))
def connect_nw(self):
self.num_real_freq.returnPressed.connect((lambda : self.realgrid.update_nw(int(self.num_real_freq.text()))))
self.num_real_freq.editingFinished.connect((lambda : self.realgrid.update_nw(int(self.num_real_freq.text()))))
def connect_grid_type(self):
self.grid_type_combo.activated.connect((lambda : self.realgrid.update_type(str(self.grid_type_combo.currentText()))))
def preset_fnames(self, fname):
self.inp_file_name.setText(fname)
self.inp_file_name_2.setText(fname)
def connect_fname_input(self):
self.inp_file_name.editingFinished.connect((lambda : self.input_data.update_fname(str(self.inp_file_name.text()))))
self.inp_file_name.textChanged.connect((lambda : self.input_data.update_fname(str(self.inp_file_name.text()))))
def get_fname(self):
self.inp_file_name.setText(QtWidgets.QFileDialog.getOpenFileName(self, 'Open file', os.getcwd(), 'HDF5 files (*.hdf5)')[0])
def connect_select_button(self):
self.select_file_button.clicked.connect(self.get_fname)
def get_fname_text(self):
self.inp_file_name_2.setText(QtWidgets.QFileDialog.getOpenFileName(self, 'Open file', os.getcwd(), 'text files (*.dat *.txt)')[0])
def connect_select_button_2(self):
self.select_file_button_2.clicked.connect(self.get_fname_text)
def connect_data_type(self):
self.inp_data_type.activated.connect((lambda : self.input_data.update_data_type(str(self.inp_data_type.currentText()))))
def connect_iteration_type(self):
self.iteration_type_combo.activated.connect((lambda : self.input_data.update_iter_type(str(self.iteration_type_combo.currentText()))))
def connect_iteration_number(self):
self.iteration_number.editingFinished.connect((lambda : self.input_data.update_iter_num(str(self.iteration_number.text()))))
def connect_atom(self):
self.atom_number.editingFinished.connect((lambda : self.input_data.update_atom(int(self.atom_number.text()))))
def connect_orbital(self):
self.orbital_number.editingFinished.connect((lambda : self.input_data.update_orbital(int(self.orbital_number.text()))))
def connect_spin(self):
self.spin_type_combo.activated.connect((lambda : self.input_data.update_spin(str(self.spin_type_combo.currentText()))))
def connect_num_mats(self):
self.num_mats_freq.editingFinished.connect((lambda : self.input_data.update_num_mats(int(self.num_mats_freq.text()))))
def connect_show_button(self):
self.show_data_button.clicked.connect((lambda : self.input_data.plot()))
def connect_show_button_2(self):
self.show_data_button_2.clicked.connect((lambda : self.input_data.plot()))
def load_w2dynamics_data(self):
self.input_data = InputData(fname=str(self.inp_file_name.text()), iter_type=str(self.iteration_type_combo.currentText()), iter_num=str(self.iteration_number.text()), data_type=str(self.inp_data_type.currentText()), atom=str(self.atom_number.text()), orbital=str(self.orbital_number.text()), spin=str(self.spin_type_combo.currentText()), num_mats=str(self.num_mats_freq.text()), ignore_real_part=self.ignore_checkbox.isChecked())
self.input_data.load_data()
def connect_load_button(self):
self.load_data_button.clicked.connect(self.load_w2dynamics_data)
def load_text_data(self):
self.input_data = TextInputData(fname=str(self.inp_file_name_2.text()), data_type=str(self.inp_data_type_text.currentText()), n_skip=str(self.n_skip.text()), num_mats=str(self.num_mats_freq_text.text()))
self.input_data.read_data()
def connect_load_button_text(self):
self.load_data_button_2.clicked.connect(self.load_text_data)
def get_preblur(self):
preblur_checked = self.preblur_checkbox.isChecked()
try:
bw = (float(self.blur_width.text()) if preblur_checked else 0.0)
except ValueError:
print('Invalid input for blur width, setting to 0.')
bw = 0.0
preblur = (preblur_checked and (bw > 0.0))
return (preblur, bw)
def main_function(self):
'Main function for the analytic continuation procedure.\n\n This function is called when the "Do it" button is clicked.\n It performs an analytical continuation for the present settings\n and shows a plot.\n '
self.ana_cont_probl = cont.AnalyticContinuationProblem(im_axis=self.input_data.mats, im_data=self.input_data.value, re_axis=self.realgrid.grid, kernel_mode='freq_fermionic')
model = np.ones_like(self.realgrid.grid)
model /= np.trapz(model, self.realgrid.grid)
(preblur, bw) = self.get_preblur()
sol = self.ana_cont_probl.solve(method='maxent_svd', optimizer='newton', alpha_determination='chi2kink', model=model, stdev=self.input_data.error, interactive=False, alpha_start=100000000000000.0, alpha_end=0.001, preblur=preblur, blur_width=bw)
inp_str = 'atom {}, orb {}, spin {}, blur {}: '.format(self.input_data.atom, self.input_data.orbital, self.input_data.spin, bw)
all_chis = np.isfinite(np.array([s.chi2 for s in sol[1]]))
res_str = 'alpha_opt={:3.2f}, chi2(alpha_opt)={:3.2f}, min(chi2)={:3.2f}'.format(sol[0].alpha, sol[0].chi2, np.amin(all_chis))
self.text_output.append((inp_str + res_str))
alphas = [s.alpha for s in sol[1]]
chis = [s.chi2 for s in sol[1]]
self.output_data.update(self.realgrid.grid, sol[0].A_opt, self.input_data)
(fig, ax) = plt.subplots(ncols=2, nrows=2, figsize=(11.75, 8.25))
ax[(0, 0)].loglog(alphas, chis, marker='s', color='black')
ax[(0, 0)].loglog(sol[0].alpha, sol[0].chi2, marker='*', color='red', markersize=15)
ax[(0, 0)].set_xlabel('$\\alpha$')
ax[(0, 0)].set_ylabel('$\\chi^2(\\alpha)$')
ax[(1, 0)].plot(self.realgrid.grid, sol[0].A_opt)
ax[(1, 0)].set_xlabel('$\\omega$')
ax[(1, 0)].set_ylabel('spectrum')
ax[(0, 1)].plot(self.input_data.mats, self.input_data.value.real, color='blue', ls=':', marker='x', markersize=5, label='Re[data]')
ax[(0, 1)].plot(self.input_data.mats, self.input_data.value.imag, color='green', ls=':', marker='+', markersize=5, label='Im[data]')
ax[(0, 1)].plot(self.input_data.mats, sol[0].backtransform.real, ls='--', color='gray', label='Re[fit]')
ax[(0, 1)].plot(self.input_data.mats, sol[0].backtransform.imag, color='gray', label='Im[fit]')
ax[(0, 1)].set_xlabel('$\\nu_n$')
ax[(0, 1)].set_ylabel(self.input_data.data_type)
ax[(0, 1)].legend()
ax[(1, 1)].plot(self.input_data.mats, (self.input_data.value - sol[0].backtransform).real, ls='--', label='real part')
ax[(1, 1)].plot(self.input_data.mats, (self.input_data.value - sol[0].backtransform).imag, label='imaginary part')
ax[(1, 1)].set_xlabel('$\\nu_n$')
ax[(1, 1)].set_ylabel('data $-$ fit')
ax[(1, 1)].legend()
plt.tight_layout()
plt.show()
def connect_doit_button(self):
self.doit_button.clicked.connect((lambda : self.main_function()))
def connect_fname_output(self):
self.out_file_name.editingFinished.connect((lambda : self.output_data.update_fname(str(self.out_file_name.text()))))
self.inp_file_name.textChanged.connect((lambda : self.output_data.update_fname(str(self.out_file_name.text()))))
def get_fname_output(self):
fname_out = QtWidgets.QFileDialog.getSaveFileName(self, 'Save as', '/'.join(self.input_data.fname.split('/')[:(- 1)]), 'DAT files (*.dat)')[0]
self.out_file_name.setText(fname_out)
self.output_data.update_fname(fname_out)
def connect_select_output_button(self):
self.output_directory_button.clicked.connect(self.get_fname_output)
def save_output(self):
fname_out = str(self.out_file_name.text())
if (fname_out == ''):
print('Error in saving: First you have to specify the output file name.')
return 1
self.output_data.update_fname(fname_out)
try:
self.output_data.save(interpolate=self.interpolate_checkbox.isChecked(), n_reg=int(self.n_interpolation.text()))
except AttributeError:
print('Error in saving: First you have to specify the output file name.')
def connect_save_button(self):
self.save_button.clicked.connect((lambda : self.save_output()))
|
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName('MainWindow')
MainWindow.resize(760, 633)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName('centralwidget')
self.real_freq_frame = QtWidgets.QFrame(self.centralwidget)
self.real_freq_frame.setGeometry(QtCore.QRect(10, 10, 171, 171))
self.real_freq_frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.real_freq_frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.real_freq_frame.setObjectName('real_freq_frame')
self.label = QtWidgets.QLabel(self.real_freq_frame)
self.label.setGeometry(QtCore.QRect(10, 10, 141, 17))
self.label.setObjectName('label')
self.label_3 = QtWidgets.QLabel(self.real_freq_frame)
self.label_3.setGeometry(QtCore.QRect(10, 70, 31, 17))
self.label_3.setObjectName('label_3')
self.label_4 = QtWidgets.QLabel(self.real_freq_frame)
self.label_4.setGeometry(QtCore.QRect(10, 110, 21, 17))
self.label_4.setObjectName('label_4')
self.grid_type_combo = QtWidgets.QComboBox(self.real_freq_frame)
self.grid_type_combo.setGeometry(QtCore.QRect(10, 40, 141, 25))
self.grid_type_combo.setObjectName('grid_type_combo')
self.grid_type_combo.addItem('')
self.grid_type_combo.addItem('')
self.max_real_freq = QtWidgets.QLineEdit(self.real_freq_frame)
self.max_real_freq.setGeometry(QtCore.QRect(40, 70, 41, 25))
self.max_real_freq.setObjectName('max_real_freq')
self.num_real_freq = QtWidgets.QLineEdit(self.real_freq_frame)
self.num_real_freq.setGeometry(QtCore.QRect(40, 110, 41, 25))
self.num_real_freq.setObjectName('num_real_freq')
self.gen_real_grid_button = QtWidgets.QPushButton(self.real_freq_frame)
self.gen_real_grid_button.setGeometry(QtCore.QRect(90, 110, 71, 25))
self.gen_real_grid_button.setObjectName('gen_real_grid_button')
self.continuation_frame = QtWidgets.QFrame(self.centralwidget)
self.continuation_frame.setGeometry(QtCore.QRect(10, 190, 741, 391))
self.continuation_frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.continuation_frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.continuation_frame.setObjectName('continuation_frame')
self.doit_button = QtWidgets.QPushButton(self.continuation_frame)
self.doit_button.setGeometry(QtCore.QRect(590, 20, 131, 41))
self.doit_button.setObjectName('doit_button')
self.blur_width = QtWidgets.QLineEdit(self.continuation_frame)
self.blur_width.setGeometry(QtCore.QRect(80, 40, 113, 25))
self.blur_width.setObjectName('blur_width')
self.label_11 = QtWidgets.QLabel(self.continuation_frame)
self.label_11.setGeometry(QtCore.QRect(30, 40, 51, 17))
self.label_11.setObjectName('label_11')
self.text_output = QtWidgets.QTextEdit(self.continuation_frame)
self.text_output.setGeometry(QtCore.QRect(30, 80, 691, 231))
self.text_output.setObjectName('text_output')
self.save_button = QtWidgets.QPushButton(self.continuation_frame)
self.save_button.setGeometry(QtCore.QRect(630, 360, 89, 25))
self.save_button.setObjectName('save_button')
self.output_directory_button = QtWidgets.QToolButton(self.continuation_frame)
self.output_directory_button.setGeometry(QtCore.QRect(120, 360, 26, 24))
self.output_directory_button.setObjectName('output_directory_button')
self.out_file_name = QtWidgets.QLineEdit(self.continuation_frame)
self.out_file_name.setGeometry(QtCore.QRect(160, 360, 451, 25))
self.out_file_name.setObjectName('out_file_name')
self.label_12 = QtWidgets.QLabel(self.continuation_frame)
self.label_12.setGeometry(QtCore.QRect(30, 360, 91, 17))
self.label_12.setObjectName('label_12')
self.n_interpolation = QtWidgets.QLineEdit(self.continuation_frame)
self.n_interpolation.setGeometry(QtCore.QRect(200, 320, 41, 25))
self.n_interpolation.setObjectName('n_interpolation')
self.label_13 = QtWidgets.QLabel(self.continuation_frame)
self.label_13.setGeometry(QtCore.QRect(250, 320, 201, 17))
self.label_13.setObjectName('label_13')
self.preblur_checkbox = QtWidgets.QCheckBox(self.continuation_frame)
self.preblur_checkbox.setGeometry(QtCore.QRect(30, 10, 92, 23))
self.preblur_checkbox.setObjectName('preblur_checkbox')
self.interpolate_checkbox = QtWidgets.QCheckBox(self.continuation_frame)
self.interpolate_checkbox.setGeometry(QtCore.QRect(30, 320, 171, 23))
self.interpolate_checkbox.setObjectName('interpolate_checkbox')
self.input_data_tabs = QtWidgets.QTabWidget(self.centralwidget)
self.input_data_tabs.setGeometry(QtCore.QRect(190, 10, 561, 171))
self.input_data_tabs.setObjectName('input_data_tabs')
self.w2dyn_tab = QtWidgets.QWidget()
self.w2dyn_tab.setObjectName('w2dyn_tab')
self.input_data_frame = QtWidgets.QFrame(self.w2dyn_tab)
self.input_data_frame.setGeometry(QtCore.QRect(0, 0, 561, 141))
self.input_data_frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.input_data_frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.input_data_frame.setObjectName('input_data_frame')
self.label_2 = QtWidgets.QLabel(self.input_data_frame)
self.label_2.setGeometry(QtCore.QRect(10, 10, 161, 17))
self.label_2.setObjectName('label_2')
self.inp_data_type = QtWidgets.QComboBox(self.input_data_frame)
self.inp_data_type.setGeometry(QtCore.QRect(180, 10, 121, 25))
self.inp_data_type.setObjectName('inp_data_type')
self.inp_data_type.addItem('')
self.inp_data_type.addItem('')
self.label_5 = QtWidgets.QLabel(self.input_data_frame)
self.label_5.setGeometry(QtCore.QRect(10, 40, 67, 17))
self.label_5.setObjectName('label_5')
self.inp_file_name = QtWidgets.QLineEdit(self.input_data_frame)
self.inp_file_name.setGeometry(QtCore.QRect(80, 40, 421, 25))
self.inp_file_name.setObjectName('inp_file_name')
self.label_6 = QtWidgets.QLabel(self.input_data_frame)
self.label_6.setGeometry(QtCore.QRect(10, 70, 67, 17))
self.label_6.setObjectName('label_6')
self.iteration_type_combo = QtWidgets.QComboBox(self.input_data_frame)
self.iteration_type_combo.setGeometry(QtCore.QRect(80, 70, 86, 25))
self.iteration_type_combo.setObjectName('iteration_type_combo')
self.iteration_type_combo.addItem('')
self.iteration_type_combo.addItem('')
self.iteration_type_combo.addItem('')
self.iteration_number = QtWidgets.QLineEdit(self.input_data_frame)
self.iteration_number.setGeometry(QtCore.QRect(170, 70, 31, 25))
self.iteration_number.setObjectName('iteration_number')
self.label_7 = QtWidgets.QLabel(self.input_data_frame)
self.label_7.setGeometry(QtCore.QRect(230, 70, 41, 17))
self.label_7.setObjectName('label_7')
self.atom_number = QtWidgets.QLineEdit(self.input_data_frame)
self.atom_number.setGeometry(QtCore.QRect(280, 70, 21, 25))
self.atom_number.setObjectName('atom_number')
self.label_8 = QtWidgets.QLabel(self.input_data_frame)
self.label_8.setGeometry(QtCore.QRect(320, 70, 51, 17))
self.label_8.setObjectName('label_8')
self.orbital_number = QtWidgets.QLineEdit(self.input_data_frame)
self.orbital_number.setGeometry(QtCore.QRect(380, 70, 21, 25))
self.orbital_number.setObjectName('orbital_number')
self.label_9 = QtWidgets.QLabel(self.input_data_frame)
self.label_9.setGeometry(QtCore.QRect(420, 70, 31, 17))
self.label_9.setObjectName('label_9')
self.spin_type_combo = QtWidgets.QComboBox(self.input_data_frame)
self.spin_type_combo.setGeometry(QtCore.QRect(460, 70, 81, 25))
self.spin_type_combo.setObjectName('spin_type_combo')
self.spin_type_combo.addItem('')
self.spin_type_combo.addItem('')
self.spin_type_combo.addItem('')
self.load_data_button = QtWidgets.QPushButton(self.input_data_frame)
self.load_data_button.setGeometry(QtCore.QRect(350, 110, 89, 25))
self.load_data_button.setObjectName('load_data_button')
self.label_10 = QtWidgets.QLabel(self.input_data_frame)
self.label_10.setGeometry(QtCore.QRect(10, 110, 241, 17))
self.label_10.setObjectName('label_10')
self.num_mats_freq = QtWidgets.QLineEdit(self.input_data_frame)
self.num_mats_freq.setGeometry(QtCore.QRect(250, 110, 41, 25))
self.num_mats_freq.setObjectName('num_mats_freq')
self.show_data_button = QtWidgets.QPushButton(self.input_data_frame)
self.show_data_button.setGeometry(QtCore.QRect(450, 110, 89, 25))
self.show_data_button.setObjectName('show_data_button')
self.select_file_button = QtWidgets.QToolButton(self.input_data_frame)
self.select_file_button.setGeometry(QtCore.QRect(510, 40, 26, 24))
self.select_file_button.setObjectName('select_file_button')
self.ignore_checkbox = QtWidgets.QCheckBox(self.input_data_frame)
self.ignore_checkbox.setGeometry(QtCore.QRect(350, 10, 131, 23))
self.ignore_checkbox.setObjectName('ignore_checkbox')
self.input_data_tabs.addTab(self.w2dyn_tab, '')
self.text_tab = QtWidgets.QWidget()
self.text_tab.setObjectName('text_tab')
self.inp_data_type_text = QtWidgets.QComboBox(self.text_tab)
self.inp_data_type_text.setGeometry(QtCore.QRect(50, 10, 121, 25))
self.inp_data_type_text.setObjectName('inp_data_type_text')
self.inp_data_type_text.addItem('')
self.inp_data_type_text.addItem('')
self.label_14 = QtWidgets.QLabel(self.text_tab)
self.label_14.setGeometry(QtCore.QRect(10, 10, 41, 17))
self.label_14.setObjectName('label_14')
self.label_15 = QtWidgets.QLabel(self.text_tab)
self.label_15.setGeometry(QtCore.QRect(180, 10, 91, 17))
self.label_15.setObjectName('label_15')
self.label_16 = QtWidgets.QLabel(self.text_tab)
self.label_16.setGeometry(QtCore.QRect(10, 40, 67, 17))
self.label_16.setObjectName('label_16')
self.inp_file_name_2 = QtWidgets.QLineEdit(self.text_tab)
self.inp_file_name_2.setGeometry(QtCore.QRect(80, 40, 421, 25))
self.inp_file_name_2.setObjectName('inp_file_name_2')
self.select_file_button_2 = QtWidgets.QToolButton(self.text_tab)
self.select_file_button_2.setGeometry(QtCore.QRect(510, 40, 26, 24))
self.select_file_button_2.setObjectName('select_file_button_2')
self.load_data_button_2 = QtWidgets.QPushButton(self.text_tab)
self.load_data_button_2.setGeometry(QtCore.QRect(350, 110, 89, 25))
self.load_data_button_2.setObjectName('load_data_button_2')
self.show_data_button_2 = QtWidgets.QPushButton(self.text_tab)
self.show_data_button_2.setGeometry(QtCore.QRect(450, 110, 89, 25))
self.show_data_button_2.setObjectName('show_data_button_2')
self.label_17 = QtWidgets.QLabel(self.text_tab)
self.label_17.setGeometry(QtCore.QRect(10, 80, 31, 17))
self.label_17.setObjectName('label_17')
self.n_skip = QtWidgets.QLineEdit(self.text_tab)
self.n_skip.setGeometry(QtCore.QRect(50, 80, 31, 25))
self.n_skip.setObjectName('n_skip')
self.label_18 = QtWidgets.QLabel(self.text_tab)
self.label_18.setGeometry(QtCore.QRect(90, 80, 151, 17))
self.label_18.setObjectName('label_18')
self.label_19 = QtWidgets.QLabel(self.text_tab)
self.label_19.setGeometry(QtCore.QRect(10, 110, 31, 17))
self.label_19.setObjectName('label_19')
self.num_mats_freq_text = QtWidgets.QLineEdit(self.text_tab)
self.num_mats_freq_text.setGeometry(QtCore.QRect(50, 110, 31, 25))
self.num_mats_freq_text.setObjectName('num_mats_freq_text')
self.label_20 = QtWidgets.QLabel(self.text_tab)
self.label_20.setGeometry(QtCore.QRect(90, 110, 161, 17))
self.label_20.setObjectName('label_20')
self.input_data_tabs.addTab(self.text_tab, '')
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 760, 22))
self.menubar.setObjectName('menubar')
self.menuMaxEnt = QtWidgets.QMenu(self.menubar)
self.menuMaxEnt.setObjectName('menuMaxEnt')
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName('statusbar')
MainWindow.setStatusBar(self.statusbar)
self.menubar.addAction(self.menuMaxEnt.menuAction())
self.retranslateUi(MainWindow)
self.input_data_tabs.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate('MainWindow', 'MainWindow'))
self.real_freq_frame.setToolTip(_translate('MainWindow', 'configure real-frequency grid'))
self.real_freq_frame.setWhatsThis(_translate('MainWindow', 'real-frequency grid'))
self.label.setText(_translate('MainWindow', 'Real-frequency grid'))
self.label_3.setText(_translate('MainWindow', 'max'))
self.label_4.setText(_translate('MainWindow', 'n'))
self.grid_type_combo.setToolTip(_translate('MainWindow', 'equispaced or centered grid (denser around Fermi energy)'))
self.grid_type_combo.setItemText(0, _translate('MainWindow', 'equispaced symmetric'))
self.grid_type_combo.setItemText(1, _translate('MainWindow', 'centered symmetric'))
self.max_real_freq.setToolTip(_translate('MainWindow', 'upper border of real-frequency grid. (lower border is set symmetrically)'))
self.max_real_freq.setText(_translate('MainWindow', '20'))
self.num_real_freq.setToolTip(_translate('MainWindow', 'number frequencies on real axis; should be an odd number'))
self.num_real_freq.setText(_translate('MainWindow', '401'))
self.gen_real_grid_button.setText(_translate('MainWindow', 'Generate'))
self.doit_button.setToolTip(_translate('MainWindow', 'perform the analytical continuation'))
self.doit_button.setText(_translate('MainWindow', 'Do it!'))
self.blur_width.setToolTip(_translate('MainWindow', 'set the blur width here'))
self.blur_width.setText(_translate('MainWindow', '0.1'))
self.label_11.setText(_translate('MainWindow', 'Width'))
self.text_output.setToolTip(_translate('MainWindow', 'in this field some output will be shown'))
self.save_button.setToolTip(_translate('MainWindow', 'click this button to save the output'))
self.save_button.setText(_translate('MainWindow', 'Save'))
self.output_directory_button.setToolTip(_translate('MainWindow', 'Choose a directory, where you want to save the output'))
self.output_directory_button.setText(_translate('MainWindow', '...'))
self.out_file_name.setToolTip(_translate('MainWindow', 'type full output name here (including path)'))
self.label_12.setText(_translate('MainWindow', 'Output file:'))
self.n_interpolation.setToolTip(_translate('MainWindow', 'number of regularly spaced grid points for interpolation'))
self.n_interpolation.setText(_translate('MainWindow', '0'))
self.label_13.setText(_translate('MainWindow', 'regularly spaced grid points'))
self.preblur_checkbox.setToolTip(_translate('MainWindow', 'check this if you want to use preblur'))
self.preblur_checkbox.setText(_translate('MainWindow', 'Preblur'))
self.interpolate_checkbox.setToolTip(_translate('MainWindow', 'check this for interpolating output to regular grid'))
self.interpolate_checkbox.setText(_translate('MainWindow', 'Interpolate output to'))
self.label_2.setText(_translate('MainWindow', 'Load w2dynamics data'))
self.inp_data_type.setItemText(0, _translate('MainWindow', 'Self-energy'))
self.inp_data_type.setItemText(1, _translate('MainWindow', "Green's function"))
self.label_5.setText(_translate('MainWindow', 'file name'))
self.inp_file_name.setToolTip(_translate('MainWindow', 'file path and name of a w2dynamics output file'))
self.label_6.setText(_translate('MainWindow', 'iteration'))
self.iteration_type_combo.setItemText(0, _translate('MainWindow', 'DMFT'))
self.iteration_type_combo.setItemText(1, _translate('MainWindow', 'STAT'))
self.iteration_type_combo.setItemText(2, _translate('MainWindow', 'WORM'))
self.iteration_number.setToolTip(_translate('MainWindow', 'integer; leave empty for last iteration'))
self.label_7.setText(_translate('MainWindow', 'Atom'))
self.atom_number.setToolTip(_translate('MainWindow', 'choose inequivalent atom (one-based integer)'))
self.atom_number.setText(_translate('MainWindow', '1'))
self.label_8.setText(_translate('MainWindow', 'Orbital'))
self.orbital_number.setToolTip(_translate('MainWindow', 'choose orbital (one-based integer)'))
self.orbital_number.setText(_translate('MainWindow', '1'))
self.label_9.setText(_translate('MainWindow', 'Spin'))
self.spin_type_combo.setToolTip(_translate('MainWindow', 'choose spin up/down; average for paramagnetic system'))
self.spin_type_combo.setItemText(0, _translate('MainWindow', 'average'))
self.spin_type_combo.setItemText(1, _translate('MainWindow', 'up'))
self.spin_type_combo.setItemText(2, _translate('MainWindow', 'down'))
self.load_data_button.setText(_translate('MainWindow', 'Load data'))
self.label_10.setText(_translate('MainWindow', 'Number of Matsubara frequencies'))
self.num_mats_freq.setToolTip(_translate('MainWindow', 'How many Matsubara frequencies do you want to use for the continuation?'))
self.show_data_button.setToolTip(_translate('MainWindow', 'click this if you want to plot the data after loading'))
self.show_data_button.setText(_translate('MainWindow', 'Show data'))
self.select_file_button.setToolTip(_translate('MainWindow', 'choose an input file'))
self.select_file_button.setText(_translate('MainWindow', '...'))
self.ignore_checkbox.setText(_translate('MainWindow', 'Ignore real part'))
self.input_data_tabs.setTabText(self.input_data_tabs.indexOf(self.w2dyn_tab), _translate('MainWindow', ' w2dynamics file'))
self.inp_data_type_text.setItemText(0, _translate('MainWindow', 'Self-energy'))
self.inp_data_type_text.setItemText(1, _translate('MainWindow', "Green's function"))
self.label_14.setText(_translate('MainWindow', 'Load'))
self.label_15.setText(_translate('MainWindow', 'from text file'))
self.label_16.setText(_translate('MainWindow', 'file name'))
self.inp_file_name_2.setToolTip(_translate('MainWindow', 'file path and name of a w2dynamics output file'))
self.select_file_button_2.setToolTip(_translate('MainWindow', 'choose an input file'))
self.select_file_button_2.setText(_translate('MainWindow', '...'))
self.load_data_button_2.setText(_translate('MainWindow', 'Load data'))
self.show_data_button_2.setToolTip(_translate('MainWindow', 'click this if you want to plot the data after loading'))
self.show_data_button_2.setText(_translate('MainWindow', 'Show data'))
self.label_17.setText(_translate('MainWindow', 'Skip'))
self.label_18.setText(_translate('MainWindow', 'lines at the beginning'))
self.label_19.setText(_translate('MainWindow', 'Use'))
self.label_20.setText(_translate('MainWindow', 'Matsubara frequencies'))
self.input_data_tabs.setTabText(self.input_data_tabs.indexOf(self.text_tab), _translate('MainWindow', 'text file'))
self.menuMaxEnt.setTitle(_translate('MainWindow', 'MaxEnt'))
|
class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow):
'The Main Window of the graphical user interface.\n\n The class MainWindow inherits from Ui_MainWindow, which is\n defined in pade_ui.py. The latter file is autogenerated\n by pyuic from pade_ui.ui [`pyuic5 pade_ui.ui -o pade_ui.py`]\n The ui file can be edited by the QtDesigner.\n '
def __init__(self, *args, obj=None, **kwargs):
'Connect the widgets, instantiate the main classes.'
super(MainWindow, self).__init__(*args, **kwargs)
self.setupUi(self)
self.realgrid = RealFrequencyGrid(wmax=float(self.max_real_freq.text()), nw=int(self.num_real_freq.text()), type=str(self.grid_type_combo.currentText()))
self.connect_realgrid_button()
self.connect_wmax()
self.connect_nw()
self.connect_grid_type()
self.input_data = InputData(fname=str(self.inp_file_name.text()), iter_type=str(self.iteration_type_combo.currentText()), iter_num=str(self.iteration_number.text()), data_type=str(self.inp_data_type.currentText()), atom=str(self.atom_number.text()), orbital=str(self.orbital_number.text()), spin=str(self.spin_type_combo.currentText()), num_mats=str(self.num_mats_freq.text()))
self.connect_select_button()
self.connect_load_button()
self.connect_show_button()
self.connect_load_button_text()
self.connect_show_button_2()
self.connect_select_button_2()
self.connect_doit_button()
self.output_data = OutputData()
self.connect_select_output_button()
self.connect_save_button()
def connect_realgrid_button(self):
self.gen_real_grid_button.clicked.connect((lambda : self.realgrid.create_grid()))
def connect_wmax(self):
self.max_real_freq.returnPressed.connect((lambda : self.realgrid.update_wmax(float(self.max_real_freq.text()))))
self.max_real_freq.editingFinished.connect((lambda : self.realgrid.update_wmax(float(self.max_real_freq.text()))))
def connect_nw(self):
self.num_real_freq.returnPressed.connect((lambda : self.realgrid.update_nw(int(self.num_real_freq.text()))))
self.num_real_freq.editingFinished.connect((lambda : self.realgrid.update_nw(int(self.num_real_freq.text()))))
def connect_grid_type(self):
self.grid_type_combo.activated.connect((lambda : self.realgrid.update_type(str(self.grid_type_combo.currentText()))))
def connect_fname_input(self):
self.inp_file_name.editingFinished.connect((lambda : self.input_data.update_fname(str(self.inp_file_name.text()))))
self.inp_file_name.textChanged.connect((lambda : self.input_data.update_fname(str(self.inp_file_name.text()))))
def get_fname(self):
self.inp_file_name.setText(QtWidgets.QFileDialog.getOpenFileName(self, 'Open file', os.getcwd(), 'HDF5 files (*.hdf5)')[0])
def connect_select_button(self):
self.select_file_button.clicked.connect(self.get_fname)
def get_fname_text(self):
self.inp_file_name_2.setText(QtWidgets.QFileDialog.getOpenFileName(self, 'Open file', os.getcwd(), 'text files (*.dat *.txt)')[0])
def connect_select_button_2(self):
self.select_file_button_2.clicked.connect(self.get_fname_text)
def connect_show_button(self):
self.show_data_button.clicked.connect((lambda : self.input_data.plot()))
def connect_show_button_2(self):
self.show_data_button_2.clicked.connect((lambda : self.input_data.plot()))
def load_w2dynamics_data(self):
self.input_data = InputData(fname=str(self.inp_file_name.text()), iter_type=str(self.iteration_type_combo.currentText()), iter_num=str(self.iteration_number.text()), data_type=str(self.inp_data_type.currentText()), atom=str(self.atom_number.text()), orbital=str(self.orbital_number.text()), spin=str(self.spin_type_combo.currentText()), num_mats=str(self.num_mats_freq.text()), ignore_real_part=self.ignore_checkbox.isChecked())
self.input_data.load_data()
def connect_load_button(self):
self.load_data_button.clicked.connect(self.load_w2dynamics_data)
def load_text_data(self):
self.input_data = TextInputData(fname=str(self.inp_file_name_2.text()), data_type=str(self.inp_data_type_text.currentText()), n_skip=str(self.n_skip.text()), num_mats=str(self.num_mats_freq_text.text()))
self.input_data.read_data()
def connect_load_button_text(self):
self.load_data_button_2.clicked.connect(self.load_text_data)
def parse_mats_ind(self):
mats_ind_str = self.mats_ind_inp.text()
mats_list_str = [part.strip() for part in mats_ind_str.split(',')]
if ('' in mats_list_str):
mats_list_str.remove('')
mats_ind = np.array([int(ind) for ind in mats_list_str])
print(mats_ind)
return mats_ind
def main_function(self):
'Main function for the analytic continuation procedure.\n\n This function is called when the "Do it" button is clicked.\n It performs an analytical continuation for the present settings\n and shows a plot.\n '
mats_ind = self.parse_mats_ind()
self.ana_cont_probl = cont.AnalyticContinuationProblem(im_axis=self.input_data.mats[mats_ind], im_data=self.input_data.value[mats_ind], re_axis=self.realgrid.grid, kernel_mode='freq_fermionic')
sol = self.ana_cont_probl.solve(method='pade')
check_axis = np.linspace(0.0, (1.25 * self.input_data.mats[mats_ind[(- 1)]]), num=500)
check = self.ana_cont_probl.solver.check(im_axis_fine=check_axis)
self.output_data.update(self.realgrid.grid, sol.A_opt, self.input_data)
(fig, ax) = plt.subplots(ncols=2, nrows=2, figsize=(11.75, 8.25))
ax[(0, 0)].plot(self.realgrid.grid, sol.A_opt)
ax[(0, 0)].set_xlabel('$\\omega$')
ax[(0, 0)].set_ylabel('spectrum')
ax[(0, 1)].plot(self.input_data.mats[mats_ind], self.input_data.value.real[mats_ind], color='red', ls='None', marker='.', markersize=12, alpha=0.33, label='Re[selected data]')
ax[(0, 1)].plot(self.input_data.mats[mats_ind], self.input_data.value.imag[mats_ind], color='red', ls='None', marker='.', markersize=12, alpha=0.33, label='Im[selected data]')
ax[(0, 1)].plot(self.input_data.mats, self.input_data.value.real, color='blue', ls=':', marker='x', markersize=5, label='Re[full data]')
ax[(0, 1)].plot(self.input_data.mats, self.input_data.value.imag, color='green', ls=':', marker='+', markersize=5, label='Im[full data]')
ax[(1, 0)].plot(self.input_data.mats[mats_ind], self.input_data.value.real[mats_ind], color='red', ls='None', marker='.', markersize=12, alpha=0.33, label='Re[selected data]')
ax[(1, 0)].plot(self.input_data.mats[mats_ind], self.input_data.value.imag[mats_ind], color='red', ls='None', marker='.', markersize=12, alpha=0.33, label='Im[selected data]')
ax[(1, 0)].plot(check_axis, check.real, ls='--', color='gray', label='Re[Pade interpolation]')
ax[(1, 0)].plot(check_axis, check.imag, color='gray', label='Im[Pade interpolation]')
ax[(1, 0)].set_xlabel('$\\nu_n$')
ax[(1, 0)].set_ylabel(self.input_data.data_type)
ax[(1, 0)].legend()
ax[(1, 0)].set_xlim(0.0, (1.05 * check_axis[(- 1)]))
plt.tight_layout()
plt.show()
def connect_doit_button(self):
self.doit_button.clicked.connect((lambda : self.main_function()))
def connect_fname_output(self):
self.out_file_name.editingFinished.connect((lambda : self.output_data.update_fname(str(self.out_file_name.text()))))
self.inp_file_name.textChanged.connect((lambda : self.output_data.update_fname(str(self.out_file_name.text()))))
def get_fname_output(self):
fname_out = QtWidgets.QFileDialog.getSaveFileName(self, 'Save as', '/'.join(self.input_data.fname.split('/')[:(- 1)]), 'DAT files (*.dat)')[0]
self.out_file_name.setText(fname_out)
self.output_data.update_fname(fname_out)
def connect_select_output_button(self):
self.output_directory_button.clicked.connect(self.get_fname_output)
def save_output(self):
fname_out = str(self.out_file_name.text())
if (fname_out == ''):
print('Error in saving: First you have to specify the output file name.')
return 1
self.output_data.update_fname(fname_out)
self.output_data.save(interpolate=False)
def connect_save_button(self):
self.save_button.clicked.connect((lambda : self.save_output()))
|
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName('MainWindow')
MainWindow.resize(800, 399)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName('centralwidget')
self.real_freq_frame = QtWidgets.QFrame(self.centralwidget)
self.real_freq_frame.setGeometry(QtCore.QRect(20, 20, 171, 171))
self.real_freq_frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.real_freq_frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.real_freq_frame.setObjectName('real_freq_frame')
self.label = QtWidgets.QLabel(self.real_freq_frame)
self.label.setGeometry(QtCore.QRect(10, 10, 141, 17))
self.label.setObjectName('label')
self.label_3 = QtWidgets.QLabel(self.real_freq_frame)
self.label_3.setGeometry(QtCore.QRect(10, 70, 31, 17))
self.label_3.setObjectName('label_3')
self.label_4 = QtWidgets.QLabel(self.real_freq_frame)
self.label_4.setGeometry(QtCore.QRect(10, 110, 21, 17))
self.label_4.setObjectName('label_4')
self.grid_type_combo = QtWidgets.QComboBox(self.real_freq_frame)
self.grid_type_combo.setGeometry(QtCore.QRect(10, 40, 141, 25))
self.grid_type_combo.setObjectName('grid_type_combo')
self.grid_type_combo.addItem('')
self.grid_type_combo.addItem('')
self.max_real_freq = QtWidgets.QLineEdit(self.real_freq_frame)
self.max_real_freq.setGeometry(QtCore.QRect(40, 70, 41, 25))
self.max_real_freq.setObjectName('max_real_freq')
self.num_real_freq = QtWidgets.QLineEdit(self.real_freq_frame)
self.num_real_freq.setGeometry(QtCore.QRect(40, 110, 41, 25))
self.num_real_freq.setObjectName('num_real_freq')
self.gen_real_grid_button = QtWidgets.QPushButton(self.real_freq_frame)
self.gen_real_grid_button.setGeometry(QtCore.QRect(90, 110, 71, 25))
self.gen_real_grid_button.setObjectName('gen_real_grid_button')
self.input_data_tabs = QtWidgets.QTabWidget(self.centralwidget)
self.input_data_tabs.setGeometry(QtCore.QRect(210, 20, 561, 171))
self.input_data_tabs.setObjectName('input_data_tabs')
self.w2dyn_tab = QtWidgets.QWidget()
self.w2dyn_tab.setObjectName('w2dyn_tab')
self.input_data_frame = QtWidgets.QFrame(self.w2dyn_tab)
self.input_data_frame.setGeometry(QtCore.QRect(0, 0, 561, 141))
self.input_data_frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.input_data_frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.input_data_frame.setObjectName('input_data_frame')
self.label_2 = QtWidgets.QLabel(self.input_data_frame)
self.label_2.setGeometry(QtCore.QRect(10, 10, 161, 17))
self.label_2.setObjectName('label_2')
self.inp_data_type = QtWidgets.QComboBox(self.input_data_frame)
self.inp_data_type.setGeometry(QtCore.QRect(180, 10, 121, 25))
self.inp_data_type.setObjectName('inp_data_type')
self.inp_data_type.addItem('')
self.inp_data_type.addItem('')
self.label_5 = QtWidgets.QLabel(self.input_data_frame)
self.label_5.setGeometry(QtCore.QRect(10, 40, 67, 17))
self.label_5.setObjectName('label_5')
self.inp_file_name = QtWidgets.QLineEdit(self.input_data_frame)
self.inp_file_name.setGeometry(QtCore.QRect(80, 40, 421, 25))
self.inp_file_name.setObjectName('inp_file_name')
self.label_6 = QtWidgets.QLabel(self.input_data_frame)
self.label_6.setGeometry(QtCore.QRect(10, 70, 67, 17))
self.label_6.setObjectName('label_6')
self.iteration_type_combo = QtWidgets.QComboBox(self.input_data_frame)
self.iteration_type_combo.setGeometry(QtCore.QRect(80, 70, 86, 25))
self.iteration_type_combo.setObjectName('iteration_type_combo')
self.iteration_type_combo.addItem('')
self.iteration_type_combo.addItem('')
self.iteration_type_combo.addItem('')
self.iteration_number = QtWidgets.QLineEdit(self.input_data_frame)
self.iteration_number.setGeometry(QtCore.QRect(170, 70, 31, 25))
self.iteration_number.setObjectName('iteration_number')
self.label_7 = QtWidgets.QLabel(self.input_data_frame)
self.label_7.setGeometry(QtCore.QRect(230, 70, 41, 17))
self.label_7.setObjectName('label_7')
self.atom_number = QtWidgets.QLineEdit(self.input_data_frame)
self.atom_number.setGeometry(QtCore.QRect(280, 70, 21, 25))
self.atom_number.setObjectName('atom_number')
self.label_8 = QtWidgets.QLabel(self.input_data_frame)
self.label_8.setGeometry(QtCore.QRect(320, 70, 51, 17))
self.label_8.setObjectName('label_8')
self.orbital_number = QtWidgets.QLineEdit(self.input_data_frame)
self.orbital_number.setGeometry(QtCore.QRect(380, 70, 21, 25))
self.orbital_number.setObjectName('orbital_number')
self.label_9 = QtWidgets.QLabel(self.input_data_frame)
self.label_9.setGeometry(QtCore.QRect(420, 70, 31, 17))
self.label_9.setObjectName('label_9')
self.spin_type_combo = QtWidgets.QComboBox(self.input_data_frame)
self.spin_type_combo.setGeometry(QtCore.QRect(460, 70, 81, 25))
self.spin_type_combo.setObjectName('spin_type_combo')
self.spin_type_combo.addItem('')
self.spin_type_combo.addItem('')
self.spin_type_combo.addItem('')
self.load_data_button = QtWidgets.QPushButton(self.input_data_frame)
self.load_data_button.setGeometry(QtCore.QRect(350, 110, 89, 25))
self.load_data_button.setObjectName('load_data_button')
self.label_10 = QtWidgets.QLabel(self.input_data_frame)
self.label_10.setGeometry(QtCore.QRect(10, 110, 241, 17))
self.label_10.setObjectName('label_10')
self.num_mats_freq = QtWidgets.QLineEdit(self.input_data_frame)
self.num_mats_freq.setGeometry(QtCore.QRect(250, 110, 41, 25))
self.num_mats_freq.setObjectName('num_mats_freq')
self.show_data_button = QtWidgets.QPushButton(self.input_data_frame)
self.show_data_button.setGeometry(QtCore.QRect(450, 110, 89, 25))
self.show_data_button.setObjectName('show_data_button')
self.select_file_button = QtWidgets.QToolButton(self.input_data_frame)
self.select_file_button.setGeometry(QtCore.QRect(510, 40, 26, 24))
self.select_file_button.setObjectName('select_file_button')
self.ignore_checkbox = QtWidgets.QCheckBox(self.input_data_frame)
self.ignore_checkbox.setGeometry(QtCore.QRect(320, 10, 131, 23))
self.ignore_checkbox.setObjectName('ignore_checkbox')
self.input_data_tabs.addTab(self.w2dyn_tab, '')
self.text_tab = QtWidgets.QWidget()
self.text_tab.setObjectName('text_tab')
self.inp_data_type_text = QtWidgets.QComboBox(self.text_tab)
self.inp_data_type_text.setGeometry(QtCore.QRect(50, 10, 121, 25))
self.inp_data_type_text.setObjectName('inp_data_type_text')
self.inp_data_type_text.addItem('')
self.inp_data_type_text.addItem('')
self.inp_data_type_text.addItem('')
self.label_14 = QtWidgets.QLabel(self.text_tab)
self.label_14.setGeometry(QtCore.QRect(10, 10, 41, 17))
self.label_14.setObjectName('label_14')
self.label_15 = QtWidgets.QLabel(self.text_tab)
self.label_15.setGeometry(QtCore.QRect(180, 10, 91, 17))
self.label_15.setObjectName('label_15')
self.label_16 = QtWidgets.QLabel(self.text_tab)
self.label_16.setGeometry(QtCore.QRect(10, 40, 67, 17))
self.label_16.setObjectName('label_16')
self.inp_file_name_2 = QtWidgets.QLineEdit(self.text_tab)
self.inp_file_name_2.setGeometry(QtCore.QRect(80, 40, 421, 25))
self.inp_file_name_2.setObjectName('inp_file_name_2')
self.select_file_button_2 = QtWidgets.QToolButton(self.text_tab)
self.select_file_button_2.setGeometry(QtCore.QRect(510, 40, 26, 24))
self.select_file_button_2.setObjectName('select_file_button_2')
self.load_data_button_2 = QtWidgets.QPushButton(self.text_tab)
self.load_data_button_2.setGeometry(QtCore.QRect(350, 110, 89, 25))
self.load_data_button_2.setObjectName('load_data_button_2')
self.show_data_button_2 = QtWidgets.QPushButton(self.text_tab)
self.show_data_button_2.setGeometry(QtCore.QRect(450, 110, 89, 25))
self.show_data_button_2.setObjectName('show_data_button_2')
self.label_17 = QtWidgets.QLabel(self.text_tab)
self.label_17.setGeometry(QtCore.QRect(10, 80, 31, 17))
self.label_17.setObjectName('label_17')
self.n_skip = QtWidgets.QLineEdit(self.text_tab)
self.n_skip.setGeometry(QtCore.QRect(50, 80, 31, 25))
self.n_skip.setObjectName('n_skip')
self.label_18 = QtWidgets.QLabel(self.text_tab)
self.label_18.setGeometry(QtCore.QRect(90, 80, 151, 17))
self.label_18.setObjectName('label_18')
self.label_19 = QtWidgets.QLabel(self.text_tab)
self.label_19.setGeometry(QtCore.QRect(10, 110, 31, 17))
self.label_19.setObjectName('label_19')
self.num_mats_freq_text = QtWidgets.QLineEdit(self.text_tab)
self.num_mats_freq_text.setGeometry(QtCore.QRect(50, 110, 31, 25))
self.num_mats_freq_text.setObjectName('num_mats_freq_text')
self.label_20 = QtWidgets.QLabel(self.text_tab)
self.label_20.setGeometry(QtCore.QRect(90, 110, 161, 17))
self.label_20.setObjectName('label_20')
self.input_data_tabs.addTab(self.text_tab, '')
self.doit_button = QtWidgets.QPushButton(self.centralwidget)
self.doit_button.setGeometry(QtCore.QRect(650, 230, 131, 41))
self.doit_button.setObjectName('doit_button')
self.label_12 = QtWidgets.QLabel(self.centralwidget)
self.label_12.setGeometry(QtCore.QRect(50, 300, 91, 17))
self.label_12.setObjectName('label_12')
self.output_directory_button = QtWidgets.QToolButton(self.centralwidget)
self.output_directory_button.setGeometry(QtCore.QRect(140, 300, 26, 24))
self.output_directory_button.setObjectName('output_directory_button')
self.save_button = QtWidgets.QPushButton(self.centralwidget)
self.save_button.setGeometry(QtCore.QRect(650, 300, 89, 25))
self.save_button.setObjectName('save_button')
self.out_file_name = QtWidgets.QLineEdit(self.centralwidget)
self.out_file_name.setGeometry(QtCore.QRect(180, 300, 451, 25))
self.out_file_name.setObjectName('out_file_name')
self.mats_ind_inp = QtWidgets.QLineEdit(self.centralwidget)
self.mats_ind_inp.setGeometry(QtCore.QRect(240, 240, 391, 25))
self.mats_ind_inp.setObjectName('mats_ind_inp')
self.label_11 = QtWidgets.QLabel(self.centralwidget)
self.label_11.setGeometry(QtCore.QRect(50, 240, 191, 17))
self.label_11.setObjectName('label_11')
self.real_freq_frame.raise_()
self.doit_button.raise_()
self.label_12.raise_()
self.output_directory_button.raise_()
self.save_button.raise_()
self.out_file_name.raise_()
self.mats_ind_inp.raise_()
self.label_11.raise_()
self.input_data_tabs.raise_()
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 22))
self.menubar.setObjectName('menubar')
self.menuPade = QtWidgets.QMenu(self.menubar)
self.menuPade.setObjectName('menuPade')
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName('statusbar')
MainWindow.setStatusBar(self.statusbar)
self.menubar.addAction(self.menuPade.menuAction())
self.retranslateUi(MainWindow)
self.input_data_tabs.setCurrentIndex(1)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate('MainWindow', 'MainWindow'))
self.real_freq_frame.setToolTip(_translate('MainWindow', 'configure real-frequency grid'))
self.real_freq_frame.setWhatsThis(_translate('MainWindow', 'real-frequency grid'))
self.label.setText(_translate('MainWindow', 'Real-frequency grid'))
self.label_3.setText(_translate('MainWindow', 'max'))
self.label_4.setText(_translate('MainWindow', 'n'))
self.grid_type_combo.setToolTip(_translate('MainWindow', 'equispaced or centered grid (denser around Fermi energy)'))
self.grid_type_combo.setItemText(0, _translate('MainWindow', 'equispaced symmetric'))
self.grid_type_combo.setItemText(1, _translate('MainWindow', 'equispaced positive'))
self.max_real_freq.setToolTip(_translate('MainWindow', 'upper border of real-frequency grid. (lower border is set symmetrically)'))
self.max_real_freq.setText(_translate('MainWindow', '20'))
self.num_real_freq.setToolTip(_translate('MainWindow', 'number frequencies on real axis; should be an odd number'))
self.num_real_freq.setText(_translate('MainWindow', '401'))
self.gen_real_grid_button.setText(_translate('MainWindow', 'Generate'))
self.label_2.setText(_translate('MainWindow', 'Load w2dynamics data'))
self.inp_data_type.setItemText(0, _translate('MainWindow', 'Self-energy'))
self.inp_data_type.setItemText(1, _translate('MainWindow', "Green's function"))
self.label_5.setText(_translate('MainWindow', 'file name'))
self.inp_file_name.setToolTip(_translate('MainWindow', 'file path and name of a w2dynamics output file'))
self.label_6.setText(_translate('MainWindow', 'iteration'))
self.iteration_type_combo.setItemText(0, _translate('MainWindow', 'DMFT'))
self.iteration_type_combo.setItemText(1, _translate('MainWindow', 'STAT'))
self.iteration_type_combo.setItemText(2, _translate('MainWindow', 'WORM'))
self.iteration_number.setToolTip(_translate('MainWindow', 'integer; leave empty for last iteration'))
self.label_7.setText(_translate('MainWindow', 'Atom'))
self.atom_number.setToolTip(_translate('MainWindow', 'choose inequivalent atom (one-based integer)'))
self.atom_number.setText(_translate('MainWindow', '1'))
self.label_8.setText(_translate('MainWindow', 'Orbital'))
self.orbital_number.setToolTip(_translate('MainWindow', 'choose orbital (one-based integer)'))
self.orbital_number.setText(_translate('MainWindow', '1'))
self.label_9.setText(_translate('MainWindow', 'Spin'))
self.spin_type_combo.setToolTip(_translate('MainWindow', 'choose spin up/down; average for paramagnetic system'))
self.spin_type_combo.setItemText(0, _translate('MainWindow', 'average'))
self.spin_type_combo.setItemText(1, _translate('MainWindow', 'up'))
self.spin_type_combo.setItemText(2, _translate('MainWindow', 'down'))
self.load_data_button.setText(_translate('MainWindow', 'Load data'))
self.label_10.setText(_translate('MainWindow', 'Number of Matsubara frequencies'))
self.num_mats_freq.setToolTip(_translate('MainWindow', 'How many Matsubara frequencies do you want to use for the continuation?'))
self.show_data_button.setToolTip(_translate('MainWindow', 'click this if you want to plot the data after loading'))
self.show_data_button.setText(_translate('MainWindow', 'Show data'))
self.select_file_button.setToolTip(_translate('MainWindow', 'choose an input file'))
self.select_file_button.setText(_translate('MainWindow', '...'))
self.ignore_checkbox.setText(_translate('MainWindow', 'Ignore real part'))
self.input_data_tabs.setTabText(self.input_data_tabs.indexOf(self.w2dyn_tab), _translate('MainWindow', ' w2dynamics file'))
self.inp_data_type_text.setItemText(0, _translate('MainWindow', 'Self-energy'))
self.inp_data_type_text.setItemText(1, _translate('MainWindow', "Green's function"))
self.inp_data_type_text.setItemText(2, _translate('MainWindow', 'bosonic'))
self.label_14.setText(_translate('MainWindow', 'Load'))
self.label_15.setText(_translate('MainWindow', 'from text file'))
self.label_16.setText(_translate('MainWindow', 'file name'))
self.inp_file_name_2.setToolTip(_translate('MainWindow', 'file path and name of a w2dynamics output file'))
self.select_file_button_2.setToolTip(_translate('MainWindow', 'choose an input file'))
self.select_file_button_2.setText(_translate('MainWindow', '...'))
self.load_data_button_2.setText(_translate('MainWindow', 'Load data'))
self.show_data_button_2.setToolTip(_translate('MainWindow', 'click this if you want to plot the data after loading'))
self.show_data_button_2.setText(_translate('MainWindow', 'Show data'))
self.label_17.setText(_translate('MainWindow', 'Skip'))
self.label_18.setText(_translate('MainWindow', 'lines at the beginning'))
self.label_19.setText(_translate('MainWindow', 'Use'))
self.label_20.setText(_translate('MainWindow', 'Matsubara frequencies'))
self.input_data_tabs.setTabText(self.input_data_tabs.indexOf(self.text_tab), _translate('MainWindow', 'text file'))
self.doit_button.setToolTip(_translate('MainWindow', 'perform the analytical continuation'))
self.doit_button.setText(_translate('MainWindow', 'Do it!'))
self.label_12.setText(_translate('MainWindow', 'Output file:'))
self.output_directory_button.setToolTip(_translate('MainWindow', 'Choose a directory, where you want to save the output'))
self.output_directory_button.setText(_translate('MainWindow', '...'))
self.save_button.setToolTip(_translate('MainWindow', 'click this button to save the output'))
self.save_button.setText(_translate('MainWindow', 'Save'))
self.out_file_name.setToolTip(_translate('MainWindow', 'type full output name here (including path)'))
self.mats_ind_inp.setToolTip(_translate('MainWindow', 'which Matsubara indices to use (start from 0). There must be AT LEAST TWO numbers'))
self.mats_ind_inp.setText(_translate('MainWindow', '0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10'))
self.label_11.setText(_translate('MainWindow', 'Use Matsubara frequencies'))
self.menuPade.setTitle(_translate('MainWindow', 'Pade'))
|
def gauss_peak(maxpos, width, weight, wgrid):
a = ((weight / (np.sqrt((2.0 * np.pi)) * width)) * np.exp((((- 0.5) * ((wgrid - maxpos) ** 2)) / (width ** 2))))
a -= ((weight / (np.sqrt((2.0 * np.pi)) * width)) * np.exp((((- 0.5) * ((wgrid + maxpos) ** 2)) / (width ** 2))))
return a
|
def noise(sigma, iwgrid):
return (np.random.randn(iwgrid.shape[0]) * sigma)
|
def gauss_peak(maxpos, width, weight, wgrid):
a = ((weight / (np.sqrt((2.0 * np.pi)) * width)) * np.exp((((- 0.5) * ((wgrid - maxpos) ** 2)) / (width ** 2))))
return a
|
def noise(sigma, iwgrid):
return (np.random.randn(iwgrid.shape[0]) * sigma)
|
def gauss_peak(maxpos, width, weight, wgrid):
a = ((weight / (np.sqrt((2.0 * np.pi)) * width)) * np.exp((((- 0.5) * ((wgrid - maxpos) ** 2)) / (width ** 2))))
return a
|
def noise(sigma, iwgrid):
return (np.random.randn(iwgrid.shape[0]) * sigma)
|
def gauss_peak(maxpos, width, weight, wgrid):
a = ((weight / (np.sqrt((2.0 * np.pi)) * width)) * np.exp((((- 0.5) * ((wgrid - maxpos) ** 2)) / (width ** 2))))
a -= ((weight / (np.sqrt((2.0 * np.pi)) * width)) * np.exp((((- 0.5) * ((wgrid + maxpos) ** 2)) / (width ** 2))))
return a
|
def noise(sigma, iwgrid):
return (np.random.randn(iwgrid.shape[0]) * sigma)
|
def gauss_peak(maxpos, width, weight, wgrid):
a = ((weight / (np.sqrt((2.0 * np.pi)) * width)) * np.exp((((- 0.5) * ((wgrid - maxpos) ** 2)) / (width ** 2))))
return a
|
def noise(sigma, iwgrid):
return (np.random.randn(iwgrid.shape[0]) * sigma)
|
def gauss_peak(maxpos, width, weight, wgrid):
a = ((weight / (np.sqrt((2.0 * np.pi)) * width)) * np.exp((((- 0.5) * ((wgrid - maxpos) ** 2)) / (width ** 2))))
return a
|
def noise(sigma, iwgrid):
return (np.random.randn(iwgrid.shape[0]) * sigma)
|
def connect(PORT):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('', PORT))
sock.listen(1)
(conn, addr) = sock.accept()
return conn
|
def send(conn, data):
coded_data = data.tostring()
conn.sendall(coded_data)
|
def recv(conn):
data = conn.recv(1024)
data_cast = array.array('f', data)
return data_cast
|
def disconnect(conn):
conn.close()
|
def get_benckmark_arg_parser():
parser = argparse.ArgumentParser('Benchmark inference speed of Deformable DETR.')
parser.add_argument('--num_iters', type=int, default=300, help='total iters to benchmark speed')
parser.add_argument('--warm_iters', type=int, default=5, help='ignore first several iters that are very slow')
parser.add_argument('--batch_size', type=int, default=1, help='batch size in inference')
parser.add_argument('--resume', type=str, help='load the pre-trained checkpoint')
return parser
|
@torch.no_grad()
def measure_average_inference_time(model, inputs, num_iters=100, warm_iters=5):
ts = []
for iter_ in range(num_iters):
torch.cuda.synchronize()
t_ = time.perf_counter()
model(inputs)
torch.cuda.synchronize()
t = (time.perf_counter() - t_)
if (iter_ >= warm_iters):
ts.append(t)
print(ts)
return (sum(ts) / len(ts))
|
def benchmark():
(args, _) = get_benckmark_arg_parser().parse_known_args()
main_args = get_main_args_parser().parse_args(_)
assert ((args.warm_iters < args.num_iters) and (args.num_iters > 0) and (args.warm_iters >= 0))
assert (args.batch_size > 0)
assert ((args.resume is None) or os.path.exists(args.resume))
dataset = build_dataset('val', main_args)
(model, _, _) = build_model(main_args)
model.cuda()
model.eval()
if (args.resume is not None):
ckpt = torch.load(args.resume, map_location=(lambda storage, loc: storage))
model.load_state_dict(ckpt['model'])
inputs = nested_tensor_from_tensor_list([dataset.__getitem__(0)[0].cuda() for _ in range(args.batch_size)])
t = measure_average_inference_time(model, inputs, args.num_iters, args.warm_iters)
return ((1.0 / t) * args.batch_size)
|
def get_coco_api_from_dataset(dataset):
for _ in range(10):
if isinstance(dataset, torch.utils.data.Subset):
dataset = dataset.dataset
if isinstance(dataset, CocoDetection):
return dataset.coco
|
def build_dataset(image_set, args):
if (args.dataset_file == 'coco'):
return build_coco(image_set, args)
if (args.dataset_file == 'coco_panoptic'):
from .coco_panoptic import build as build_coco_panoptic
return build_coco_panoptic(image_set, args)
raise ValueError(f'dataset {args.dataset_file} not supported')
|
def to_cuda(samples, targets, device):
samples = samples.to(device, non_blocking=True)
targets = [{k: v.to(device, non_blocking=True) for (k, v) in t.items()} for t in targets]
return (samples, targets)
|
class data_prefetcher():
def __init__(self, loader, device, prefetch=True):
self.loader = iter(loader)
self.prefetch = prefetch
self.device = device
if prefetch:
self.stream = torch.cuda.Stream()
self.preload()
def preload(self):
try:
(self.next_samples, self.next_targets) = next(self.loader)
except StopIteration:
self.next_samples = None
self.next_targets = None
return
with torch.cuda.stream(self.stream):
(self.next_samples, self.next_targets) = to_cuda(self.next_samples, self.next_targets, self.device)
def next(self):
if self.prefetch:
torch.cuda.current_stream().wait_stream(self.stream)
samples = self.next_samples
targets = self.next_targets
if (samples is not None):
samples.record_stream(torch.cuda.current_stream())
if (targets is not None):
for t in targets:
for (k, v) in t.items():
v.record_stream(torch.cuda.current_stream())
self.preload()
else:
try:
(samples, targets) = next(self.loader)
(samples, targets) = to_cuda(samples, targets, self.device)
except StopIteration:
samples = None
targets = None
return (samples, targets)
|
class PanopticEvaluator(object):
def __init__(self, ann_file, ann_folder, output_dir='panoptic_eval'):
self.gt_json = ann_file
self.gt_folder = ann_folder
if utils.is_main_process():
if (not os.path.exists(output_dir)):
os.mkdir(output_dir)
self.output_dir = output_dir
self.predictions = []
def update(self, predictions):
for p in predictions:
with open(os.path.join(self.output_dir, p['file_name']), 'wb') as f:
f.write(p.pop('png_string'))
self.predictions += predictions
def synchronize_between_processes(self):
all_predictions = utils.all_gather(self.predictions)
merged_predictions = []
for p in all_predictions:
merged_predictions += p
self.predictions = merged_predictions
def summarize(self):
if utils.is_main_process():
json_data = {'annotations': self.predictions}
predictions_json = os.path.join(self.output_dir, 'predictions.json')
with open(predictions_json, 'w') as f:
f.write(json.dumps(json_data))
return pq_compute(self.gt_json, predictions_json, gt_folder=self.gt_folder, pred_folder=self.output_dir)
return None
|
class DistributedSampler(Sampler):
'Sampler that restricts data loading to a subset of the dataset.\n It is especially useful in conjunction with\n :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each\n process can pass a DistributedSampler instance as a DataLoader sampler,\n and load a subset of the original dataset that is exclusive to it.\n .. note::\n Dataset is assumed to be of constant size.\n Arguments:\n dataset: Dataset used for sampling.\n num_replicas (optional): Number of processes participating in\n distributed training.\n rank (optional): Rank of the current process within num_replicas.\n '
def __init__(self, dataset, num_replicas=None, rank=None, local_rank=None, local_size=None, shuffle=True):
if (num_replicas is None):
if (not dist.is_available()):
raise RuntimeError('Requires distributed package to be available')
num_replicas = dist.get_world_size()
if (rank is None):
if (not dist.is_available()):
raise RuntimeError('Requires distributed package to be available')
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(((len(self.dataset) * 1.0) / self.num_replicas)))
self.total_size = (self.num_samples * self.num_replicas)
self.shuffle = shuffle
def __iter__(self):
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
indices += indices[:(self.total_size - len(indices))]
assert (len(indices) == self.total_size)
offset = (self.num_samples * self.rank)
indices = indices[offset:(offset + self.num_samples)]
assert (len(indices) == self.num_samples)
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
|
class NodeDistributedSampler(Sampler):
'Sampler that restricts data loading to a subset of the dataset.\n It is especially useful in conjunction with\n :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each\n process can pass a DistributedSampler instance as a DataLoader sampler,\n and load a subset of the original dataset that is exclusive to it.\n .. note::\n Dataset is assumed to be of constant size.\n Arguments:\n dataset: Dataset used for sampling.\n num_replicas (optional): Number of processes participating in\n distributed training.\n rank (optional): Rank of the current process within num_replicas.\n '
def __init__(self, dataset, num_replicas=None, rank=None, local_rank=None, local_size=None, shuffle=True):
if (num_replicas is None):
if (not dist.is_available()):
raise RuntimeError('Requires distributed package to be available')
num_replicas = dist.get_world_size()
if (rank is None):
if (not dist.is_available()):
raise RuntimeError('Requires distributed package to be available')
rank = dist.get_rank()
if (local_rank is None):
local_rank = int(os.environ.get('LOCAL_RANK', 0))
if (local_size is None):
local_size = int(os.environ.get('LOCAL_SIZE', 1))
self.dataset = dataset
self.shuffle = shuffle
self.num_replicas = num_replicas
self.num_parts = local_size
self.rank = rank
self.local_rank = local_rank
self.epoch = 0
self.num_samples = int(math.ceil(((len(self.dataset) * 1.0) / self.num_replicas)))
self.total_size = (self.num_samples * self.num_replicas)
self.total_size_parts = ((self.num_samples * self.num_replicas) // self.num_parts)
def __iter__(self):
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
indices = [i for i in indices if ((i % self.num_parts) == self.local_rank)]
indices += indices[:(self.total_size_parts - len(indices))]
assert (len(indices) == self.total_size_parts)
indices = indices[(self.rank // self.num_parts):self.total_size_parts:(self.num_replicas // self.num_parts)]
assert (len(indices) == self.num_samples)
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
|
class CocoDetection(VisionDataset):
'`MS Coco Detection <http://mscoco.org/dataset/#detections-challenge2016>`_ Dataset.\n Args:\n root (string): Root directory where images are downloaded to.\n annFile (string): Path to json annotation file.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.ToTensor``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n transforms (callable, optional): A function/transform that takes input sample and its target as entry\n and returns a transformed version.\n '
def __init__(self, root, annFile, transform=None, target_transform=None, transforms=None, cache_mode=False, local_rank=0, local_size=1):
super(CocoDetection, self).__init__(root, transforms, transform, target_transform)
from pycocotools.coco import COCO
self.coco = COCO(annFile)
self.ids = list(sorted(self.coco.imgs.keys()))
self.cache_mode = cache_mode
self.local_rank = local_rank
self.local_size = local_size
if cache_mode:
self.cache = {}
self.cache_images()
def cache_images(self):
self.cache = {}
for (index, img_id) in zip(tqdm.trange(len(self.ids)), self.ids):
if ((index % self.local_size) != self.local_rank):
continue
path = self.coco.loadImgs(img_id)[0]['file_name']
with open(os.path.join(self.root, path), 'rb') as f:
self.cache[path] = f.read()
def get_image(self, path):
if self.cache_mode:
if (path not in self.cache.keys()):
with open(os.path.join(self.root, path), 'rb') as f:
self.cache[path] = f.read()
return Image.open(BytesIO(self.cache[path])).convert('RGB')
return Image.open(os.path.join(self.root, path)).convert('RGB')
def __getitem__(self, index):
'\n Args:\n index (int): Index\n Returns:\n tuple: Tuple (image, target). target is the object returned by ``coco.loadAnns``.\n '
coco = self.coco
img_id = self.ids[index]
ann_ids = coco.getAnnIds(imgIds=img_id)
target = coco.loadAnns(ann_ids)
path = coco.loadImgs(img_id)[0]['file_name']
img = self.get_image(path)
if (self.transforms is not None):
(img, target) = self.transforms(img, target)
return (img, target)
def __len__(self):
return len(self.ids)
|
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module, data_loader: Iterable, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, max_norm: float=0):
model.train()
criterion.train()
metric_logger = utils.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
metric_logger.add_meter('grad_norm', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
prefetcher = data_prefetcher(data_loader, device, prefetch=True)
(samples, targets) = prefetcher.next()
for _ in metric_logger.log_every(range(len(data_loader)), print_freq, header):
outputs = model(samples)
loss_dict = criterion(outputs, targets)
weight_dict = criterion.weight_dict
losses = sum(((loss_dict[k] * weight_dict[k]) for k in loss_dict.keys() if (k in weight_dict)))
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_unscaled = {f'{k}_unscaled': v for (k, v) in loss_dict_reduced.items()}
loss_dict_reduced_scaled = {k: (v * weight_dict[k]) for (k, v) in loss_dict_reduced.items() if (k in weight_dict)}
losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())
loss_value = losses_reduced_scaled.item()
if (not math.isfinite(loss_value)):
print('Loss is {}, stopping training'.format(loss_value))
print(loss_dict_reduced)
sys.exit(1)
optimizer.zero_grad()
losses.backward()
if (max_norm > 0):
grad_total_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
else:
grad_total_norm = utils.get_total_grad_norm(model.parameters(), max_norm)
optimizer.step()
metric_logger.update(loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled)
metric_logger.update(class_error=loss_dict_reduced['class_error'])
metric_logger.update(lr=optimizer.param_groups[0]['lr'])
metric_logger.update(grad_norm=grad_total_norm)
(samples, targets) = prefetcher.next()
metric_logger.synchronize_between_processes()
print('Averaged stats:', metric_logger)
return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()}
|
@torch.no_grad()
def evaluate(model, criterion, postprocessors, data_loader, base_ds, device, output_dir):
model.eval()
criterion.eval()
metric_logger = utils.MetricLogger(delimiter=' ')
metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
header = 'Test:'
iou_types = tuple((k for k in ('segm', 'bbox') if (k in postprocessors.keys())))
coco_evaluator = CocoEvaluator(base_ds, iou_types)
panoptic_evaluator = None
if ('panoptic' in postprocessors.keys()):
panoptic_evaluator = PanopticEvaluator(data_loader.dataset.ann_file, data_loader.dataset.ann_folder, output_dir=os.path.join(output_dir, 'panoptic_eval'))
for (samples, targets) in metric_logger.log_every(data_loader, 10, header):
samples = samples.to(device)
targets = [{k: v.to(device) for (k, v) in t.items()} for t in targets]
outputs = model(samples)
loss_dict = criterion(outputs, targets)
weight_dict = criterion.weight_dict
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_scaled = {k: (v * weight_dict[k]) for (k, v) in loss_dict_reduced.items() if (k in weight_dict)}
loss_dict_reduced_unscaled = {f'{k}_unscaled': v for (k, v) in loss_dict_reduced.items()}
metric_logger.update(loss=sum(loss_dict_reduced_scaled.values()), **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled)
metric_logger.update(class_error=loss_dict_reduced['class_error'])
orig_target_sizes = torch.stack([t['orig_size'] for t in targets], dim=0)
results = postprocessors['bbox'](outputs, orig_target_sizes)
if ('segm' in postprocessors.keys()):
target_sizes = torch.stack([t['size'] for t in targets], dim=0)
results = postprocessors['segm'](results, outputs, orig_target_sizes, target_sizes)
res = {target['image_id'].item(): output for (target, output) in zip(targets, results)}
if (coco_evaluator is not None):
coco_evaluator.update(res)
if (panoptic_evaluator is not None):
res_pano = postprocessors['panoptic'](outputs, target_sizes, orig_target_sizes)
for (i, target) in enumerate(targets):
image_id = target['image_id'].item()
file_name = f'{image_id:012d}.png'
res_pano[i]['image_id'] = image_id
res_pano[i]['file_name'] = file_name
panoptic_evaluator.update(res_pano)
metric_logger.synchronize_between_processes()
print('Averaged stats:', metric_logger)
if (coco_evaluator is not None):
coco_evaluator.synchronize_between_processes()
if (panoptic_evaluator is not None):
panoptic_evaluator.synchronize_between_processes()
if (coco_evaluator is not None):
coco_evaluator.accumulate()
coco_evaluator.summarize()
panoptic_res = None
if (panoptic_evaluator is not None):
panoptic_res = panoptic_evaluator.summarize()
stats = {k: meter.global_avg for (k, meter) in metric_logger.meters.items()}
if (coco_evaluator is not None):
if ('bbox' in postprocessors.keys()):
stats['coco_eval_bbox'] = coco_evaluator.coco_eval['bbox'].stats.tolist()
if ('segm' in postprocessors.keys()):
stats['coco_eval_masks'] = coco_evaluator.coco_eval['segm'].stats.tolist()
if (panoptic_res is not None):
stats['PQ_all'] = panoptic_res['All']
stats['PQ_th'] = panoptic_res['Things']
stats['PQ_st'] = panoptic_res['Stuff']
return (stats, coco_evaluator)
|
def get_args_parser():
parser = argparse.ArgumentParser('Deformable DETR Detector', add_help=False)
parser.add_argument('--lr', default=0.0002, type=float)
parser.add_argument('--lr_backbone_names', default=['backbone.0'], type=str, nargs='+')
parser.add_argument('--lr_backbone', default=2e-05, type=float)
parser.add_argument('--lr_linear_proj_names', default=['reference_points', 'sampling_offsets'], type=str, nargs='+')
parser.add_argument('--lr_linear_proj_mult', default=0.1, type=float)
parser.add_argument('--batch_size', default=2, type=int)
parser.add_argument('--weight_decay', default=0.0001, type=float)
parser.add_argument('--epochs', default=50, type=int)
parser.add_argument('--lr_drop', default=40, type=int)
parser.add_argument('--lr_drop_epochs', default=None, type=int, nargs='+')
parser.add_argument('--clip_max_norm', default=0.1, type=float, help='gradient clipping max norm')
parser.add_argument('--sgd', action='store_true')
parser.add_argument('--with_box_refine', default=False, action='store_true')
parser.add_argument('--two_stage', default=False, action='store_true')
parser.add_argument('--frozen_weights', type=str, default=None, help='Path to the pretrained model. If set, only the mask head will be trained')
parser.add_argument('--backbone', default='resnet50', type=str, help='Name of the convolutional backbone to use')
parser.add_argument('--dilation', action='store_true', help='If true, we replace stride with dilation in the last convolutional block (DC5)')
parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'), help='Type of positional embedding to use on top of the image features')
parser.add_argument('--position_embedding_scale', default=(2 * np.pi), type=float, help='position / size * scale')
parser.add_argument('--num_feature_levels', default=4, type=int, help='number of feature levels')
parser.add_argument('--enc_layers', default=6, type=int, help='Number of encoding layers in the transformer')
parser.add_argument('--dec_layers', default=6, type=int, help='Number of decoding layers in the transformer')
parser.add_argument('--dim_feedforward', default=1024, type=int, help='Intermediate size of the feedforward layers in the transformer blocks')
parser.add_argument('--hidden_dim', default=256, type=int, help='Size of the embeddings (dimension of the transformer)')
parser.add_argument('--dropout', default=0.1, type=float, help='Dropout applied in the transformer')
parser.add_argument('--nheads', default=8, type=int, help="Number of attention heads inside the transformer's attentions")
parser.add_argument('--num_queries', default=300, type=int, help='Number of query slots')
parser.add_argument('--dec_n_points', default=4, type=int)
parser.add_argument('--enc_n_points', default=4, type=int)
parser.add_argument('--masks', action='store_true', help='Train segmentation head if the flag is provided')
parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false', help='Disables auxiliary decoding losses (loss at each layer)')
parser.add_argument('--assign_first_stage', action='store_true')
parser.add_argument('--assign_second_stage', action='store_true')
parser.add_argument('--set_cost_class', default=2, type=float, help='Class coefficient in the matching cost')
parser.add_argument('--set_cost_bbox', default=5, type=float, help='L1 box coefficient in the matching cost')
parser.add_argument('--set_cost_giou', default=2, type=float, help='giou box coefficient in the matching cost')
parser.add_argument('--mask_loss_coef', default=1, type=float)
parser.add_argument('--dice_loss_coef', default=1, type=float)
parser.add_argument('--cls_loss_coef', default=2, type=float)
parser.add_argument('--bbox_loss_coef', default=5, type=float)
parser.add_argument('--giou_loss_coef', default=2, type=float)
parser.add_argument('--focal_alpha', default=0.25, type=float)
parser.add_argument('--dataset_file', default='coco')
parser.add_argument('--coco_path', default='./data/coco', type=str)
parser.add_argument('--coco_panoptic_path', type=str)
parser.add_argument('--remove_difficult', action='store_true')
parser.add_argument('--bigger', action='store_true')
parser.add_argument('--output_dir', default='', help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda', help='device to use for training / testing')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--finetune', default='', help='finetune from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch')
parser.add_argument('--eval', action='store_true')
parser.add_argument('--num_workers', default=2, type=int)
parser.add_argument('--cache_mode', default=False, action='store_true', help='whether to cache images on memory')
return parser
|
def main(args):
utils.init_distributed_mode(args)
print('git:\n {}\n'.format(utils.get_sha()))
if (args.frozen_weights is not None):
assert args.masks, 'Frozen training is meant for segmentation only'
print(args)
device = torch.device(args.device)
seed = (args.seed + utils.get_rank())
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
(model, criterion, postprocessors) = build_model(args)
model.to(device)
model_without_ddp = model
n_parameters = sum((p.numel() for p in model.parameters() if p.requires_grad))
print('number of params:', n_parameters)
dataset_train = build_dataset(image_set='train', args=args)
dataset_val = build_dataset(image_set='val', args=args)
if args.distributed:
if args.cache_mode:
sampler_train = samplers.NodeDistributedSampler(dataset_train)
sampler_val = samplers.NodeDistributedSampler(dataset_val, shuffle=False)
else:
sampler_train = samplers.DistributedSampler(dataset_train)
sampler_val = samplers.DistributedSampler(dataset_val, shuffle=False)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
batch_sampler_train = torch.utils.data.BatchSampler(sampler_train, args.batch_size, drop_last=True)
data_loader_train = DataLoader(dataset_train, batch_sampler=batch_sampler_train, collate_fn=utils.collate_fn, num_workers=args.num_workers, pin_memory=True)
data_loader_val = DataLoader(dataset_val, args.batch_size, sampler=sampler_val, drop_last=False, collate_fn=utils.collate_fn, num_workers=args.num_workers, pin_memory=True)
def match_name_keywords(n, name_keywords):
out = False
for b in name_keywords:
if (b in n):
out = True
break
return out
for (n, p) in model_without_ddp.named_parameters():
print(n)
param_dicts = [{'params': [p for (n, p) in model_without_ddp.named_parameters() if ((not match_name_keywords(n, args.lr_backbone_names)) and (not match_name_keywords(n, args.lr_linear_proj_names)) and p.requires_grad)], 'lr': args.lr}, {'params': [p for (n, p) in model_without_ddp.named_parameters() if (match_name_keywords(n, args.lr_backbone_names) and p.requires_grad)], 'lr': args.lr_backbone}, {'params': [p for (n, p) in model_without_ddp.named_parameters() if (match_name_keywords(n, args.lr_linear_proj_names) and p.requires_grad)], 'lr': (args.lr * args.lr_linear_proj_mult)}]
if args.sgd:
optimizer = torch.optim.SGD(param_dicts, lr=args.lr, momentum=0.9, weight_decay=args.weight_decay)
else:
optimizer = torch.optim.AdamW(param_dicts, lr=args.lr, weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop)
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
if (args.dataset_file == 'coco_panoptic'):
coco_val = datasets.coco.build('val', args)
base_ds = get_coco_api_from_dataset(coco_val)
else:
base_ds = get_coco_api_from_dataset(dataset_val)
if (args.frozen_weights is not None):
checkpoint = torch.load(args.frozen_weights, map_location='cpu')
model_without_ddp.detr.load_state_dict(checkpoint['model'])
output_dir = Path(args.output_dir)
if args.finetune:
checkpoint = torch.load(args.finetune, map_location='cpu')
state_dict = checkpoint['model']
for k in list(state_dict.keys()):
if ('class_embed' in k):
print('removing', k)
del state_dict[k]
(missing_keys, unexpected_keys) = model_without_ddp.load_state_dict(state_dict, strict=False)
unexpected_keys = [k for k in unexpected_keys if (not (k.endswith('total_params') or k.endswith('total_ops')))]
if (len(missing_keys) > 0):
print('Missing Keys: {}'.format(missing_keys))
if (len(unexpected_keys) > 0):
print('Unexpected Keys: {}'.format(unexpected_keys))
print('finetuning from epoch', checkpoint['epoch'])
if args.resume:
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
(missing_keys, unexpected_keys) = model_without_ddp.load_state_dict(checkpoint['model'], strict=False)
unexpected_keys = [k for k in unexpected_keys if (not (k.endswith('total_params') or k.endswith('total_ops')))]
if (len(missing_keys) > 0):
print('Missing Keys: {}'.format(missing_keys))
if (len(unexpected_keys) > 0):
print('Unexpected Keys: {}'.format(unexpected_keys))
if ((not args.eval) and ('optimizer' in checkpoint) and ('lr_scheduler' in checkpoint) and ('epoch' in checkpoint)):
import copy
p_groups = copy.deepcopy(optimizer.param_groups)
optimizer.load_state_dict(checkpoint['optimizer'])
for (pg, pg_old) in zip(optimizer.param_groups, p_groups):
pg['lr'] = pg_old['lr']
pg['initial_lr'] = pg_old['initial_lr']
print(optimizer.param_groups)
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
args.override_resumed_lr_drop = True
if args.override_resumed_lr_drop:
print('Warning: (hack) args.override_resumed_lr_drop is set to True, so args.lr_drop would override lr_drop in resumed lr_scheduler.')
lr_scheduler.step_size = args.lr_drop
lr_scheduler.base_lrs = list(map((lambda group: group['initial_lr']), optimizer.param_groups))
lr_scheduler.step(lr_scheduler.last_epoch)
args.start_epoch = (checkpoint['epoch'] + 1)
if (not args.eval):
(test_stats, coco_evaluator) = evaluate(model, criterion, postprocessors, data_loader_val, base_ds, device, args.output_dir)
if args.eval:
(test_stats, coco_evaluator) = evaluate(model, criterion, postprocessors, data_loader_val, base_ds, device, args.output_dir)
if args.output_dir:
utils.save_on_master(coco_evaluator.coco_eval['bbox'].eval, (output_dir / 'eval.pth'))
return
print('Start training')
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
sampler_train.set_epoch(epoch)
train_stats = train_one_epoch(model, criterion, data_loader_train, optimizer, device, epoch, args.clip_max_norm)
lr_scheduler.step()
if args.output_dir:
checkpoint_paths = [(output_dir / 'checkpoint.pth')]
if ((((epoch + 1) % args.lr_drop) == 0) or (((epoch + 1) % 5) == 0)):
checkpoint_paths.append((output_dir / f'checkpoint{epoch:04}.pth'))
for checkpoint_path in checkpoint_paths:
utils.save_on_master({'model': model_without_ddp.state_dict(), 'optimizer': optimizer.state_dict(), 'lr_scheduler': lr_scheduler.state_dict(), 'epoch': epoch, 'args': args}, checkpoint_path)
(test_stats, coco_evaluator) = evaluate(model, criterion, postprocessors, data_loader_val, base_ds, device, args.output_dir)
log_stats = {**{f'train_{k}': v for (k, v) in train_stats.items()}, **{f'test_{k}': v for (k, v) in test_stats.items()}, 'epoch': epoch, 'n_parameters': n_parameters}
if (args.output_dir and utils.is_main_process()):
with (output_dir / 'log.txt').open('a') as f:
f.write((json.dumps(log_stats) + '\n'))
if (coco_evaluator is not None):
(output_dir / 'eval').mkdir(exist_ok=True)
if ('bbox' in coco_evaluator.coco_eval):
filenames = ['latest.pth']
if ((epoch % 50) == 0):
filenames.append(f'{epoch:03}.pth')
for name in filenames:
torch.save(coco_evaluator.coco_eval['bbox'].eval, ((output_dir / 'eval') / name))
total_time = (time.time() - start_time)
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
|
def build_model(args):
return build(args)
|
class FrozenBatchNorm2d(torch.nn.Module):
'\n BatchNorm2d where the batch statistics and the affine parameters are fixed.\n\n Copy-paste from torchvision.misc.ops with added eps before rqsrt,\n without which any other models than torchvision.models.resnet[18,34,50,101]\n produce nans.\n '
def __init__(self, n, eps=1e-05):
super(FrozenBatchNorm2d, self).__init__()
self.register_buffer('weight', torch.ones(n))
self.register_buffer('bias', torch.zeros(n))
self.register_buffer('running_mean', torch.zeros(n))
self.register_buffer('running_var', torch.ones(n))
self.eps = eps
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
num_batches_tracked_key = (prefix + 'num_batches_tracked')
if (num_batches_tracked_key in state_dict):
del state_dict[num_batches_tracked_key]
super(FrozenBatchNorm2d, self)._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)
def forward(self, x):
w = self.weight.reshape(1, (- 1), 1, 1)
b = self.bias.reshape(1, (- 1), 1, 1)
rv = self.running_var.reshape(1, (- 1), 1, 1)
rm = self.running_mean.reshape(1, (- 1), 1, 1)
eps = self.eps
scale = (w * (rv + eps).rsqrt())
bias = (b - (rm * scale))
return ((x * scale) + bias)
|
class BackboneBase(nn.Module):
def __init__(self, backbone: nn.Module, train_backbone: bool, return_interm_layers: bool):
super().__init__()
for (name, parameter) in backbone.named_parameters():
if ((not train_backbone) or (('layer2' not in name) and ('layer3' not in name) and ('layer4' not in name))):
parameter.requires_grad_(False)
if return_interm_layers:
return_layers = {'layer2': '0', 'layer3': '1', 'layer4': '2'}
self.strides = [8, 16, 32]
self.num_channels = [512, 1024, 2048]
else:
return_layers = {'layer4': '0'}
self.strides = [32]
self.num_channels = [2048]
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
def forward(self, tensor_list: NestedTensor):
xs = self.body(tensor_list.tensors)
out: Dict[(str, NestedTensor)] = {}
for (name, x) in xs.items():
m = tensor_list.mask
assert (m is not None)
mask = F.interpolate(m[None].float(), size=x.shape[(- 2):]).to(torch.bool)[0]
out[name] = NestedTensor(x, mask)
return out
|
class Backbone(BackboneBase):
'ResNet backbone with frozen BatchNorm.'
def __init__(self, name: str, train_backbone: bool, return_interm_layers: bool, dilation: bool):
norm_layer = FrozenBatchNorm2d
backbone = getattr(torchvision.models, name)(replace_stride_with_dilation=[False, False, dilation], pretrained=is_main_process(), norm_layer=norm_layer)
assert (name not in ('resnet18', 'resnet34')), 'number of channels are hard coded'
super().__init__(backbone, train_backbone, return_interm_layers)
if dilation:
self.strides[(- 1)] = (self.strides[(- 1)] // 2)
|
class SwinBackbone(nn.Module):
def __init__(self):
super().__init__()
self.body = get_swinl()
self.features = ['res3', 'res4', 'res5']
self.strides = [8, 16, 32]
self.num_channels = [384, 768, 1536]
def forward(self, tensor_list: NestedTensor):
xs = self.body(tensor_list.tensors)
m = tensor_list.mask[None]
assert (m is not None)
out: Dict[(str, NestedTensor)] = {}
for name in self.features:
mask = F.interpolate(m.float(), size=xs[name].shape[(- 2):]).to(torch.bool)[0]
out[name] = NestedTensor(xs[name], mask)
return out
|
class Joiner(nn.Sequential):
def __init__(self, backbone, position_embedding):
super().__init__(backbone, position_embedding)
self.strides = backbone.strides
self.num_channels = backbone.num_channels
def forward(self, tensor_list: NestedTensor):
xs = self[0](tensor_list)
out: List[NestedTensor] = []
pos = []
for (name, x) in sorted(xs.items()):
out.append(x)
for x in out:
pos.append(self[1](x).to(x.tensors.dtype))
return (out, pos)
|
def build_backbone(args):
position_embedding = build_position_encoding(args)
train_backbone = (args.lr_backbone > 0)
return_interm_layers = (args.masks or (args.num_feature_levels > 1))
if ('swin' in args.backbone):
backbone = SwinBackbone()
else:
backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args.dilation)
model = Joiner(backbone, position_embedding)
return model
|
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, 'src')
main_file = glob.glob(os.path.join(extensions_dir, '*.cpp'))
source_cpu = glob.glob(os.path.join(extensions_dir, 'cpu', '*.cpp'))
source_cuda = glob.glob(os.path.join(extensions_dir, 'cuda', '*.cu'))
sources = (main_file + source_cpu)
extension = CppExtension
extra_compile_args = {'cxx': []}
define_macros = []
if (torch.cuda.is_available() and (CUDA_HOME is not None)):
extension = CUDAExtension
sources += source_cuda
define_macros += [('WITH_CUDA', None)]
extra_compile_args['nvcc'] = ['-DCUDA_HAS_FP16=1', '-D__CUDA_NO_HALF_OPERATORS__', '-D__CUDA_NO_HALF_CONVERSIONS__', '-D__CUDA_NO_HALF2_OPERATORS__']
else:
raise NotImplementedError('Cuda is not availabel')
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [extension('MultiScaleDeformableAttention', sources, include_dirs=include_dirs, define_macros=define_macros, extra_compile_args=extra_compile_args)]
return ext_modules
|
@torch.no_grad()
def check_forward_equal_with_pytorch_double():
value = (torch.rand(N, S, M, D).cuda() * 0.01)
sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()
attention_weights = (torch.rand(N, Lq, M, L, P).cuda() + 1e-05)
attention_weights /= attention_weights.sum((- 1), keepdim=True).sum((- 2), keepdim=True)
im2col_step = 2
output_pytorch = ms_deform_attn_core_pytorch(value.double(), shapes, sampling_locations.double(), attention_weights.double()).detach().cpu()
output_cuda = MSDeformAttnFunction.apply(value.double(), shapes, level_start_index, sampling_locations.double(), attention_weights.double(), im2col_step).detach().cpu()
fwdok = torch.allclose(output_cuda, output_pytorch)
max_abs_err = (output_cuda - output_pytorch).abs().max()
max_rel_err = ((output_cuda - output_pytorch).abs() / output_pytorch.abs()).max()
print(f'* {fwdok} check_forward_equal_with_pytorch_double: max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}')
|
@torch.no_grad()
def check_forward_equal_with_pytorch_float():
value = (torch.rand(N, S, M, D).cuda() * 0.01)
sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()
attention_weights = (torch.rand(N, Lq, M, L, P).cuda() + 1e-05)
attention_weights /= attention_weights.sum((- 1), keepdim=True).sum((- 2), keepdim=True)
im2col_step = 2
output_pytorch = ms_deform_attn_core_pytorch(value, shapes, sampling_locations, attention_weights).detach().cpu()
output_cuda = MSDeformAttnFunction.apply(value, shapes, level_start_index, sampling_locations, attention_weights, im2col_step).detach().cpu()
fwdok = torch.allclose(output_cuda, output_pytorch, rtol=0.01, atol=0.001)
max_abs_err = (output_cuda - output_pytorch).abs().max()
max_rel_err = ((output_cuda - output_pytorch).abs() / output_pytorch.abs()).max()
print(f'* {fwdok} check_forward_equal_with_pytorch_float: max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}')
|
def check_gradient_numerical(channels=4, grad_value=True, grad_sampling_loc=True, grad_attn_weight=True):
value = (torch.rand(N, S, M, channels).cuda() * 0.01)
sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()
attention_weights = (torch.rand(N, Lq, M, L, P).cuda() + 1e-05)
attention_weights /= attention_weights.sum((- 1), keepdim=True).sum((- 2), keepdim=True)
im2col_step = 2
func = MSDeformAttnFunction.apply
value.requires_grad = grad_value
sampling_locations.requires_grad = grad_sampling_loc
attention_weights.requires_grad = grad_attn_weight
gradok = gradcheck(func, (value.double(), shapes, level_start_index, sampling_locations.double(), attention_weights.double(), im2col_step))
print(f'* {gradok} check_gradient_numerical(D={channels})')
|
def parse_args():
'\n Helper function parsing the command line options\n @retval ArgumentParser\n '
parser = ArgumentParser(description='PyTorch distributed training launch helper utilty that will spawn up multiple distributed processes')
parser.add_argument('--nnodes', type=int, default=1, help='The number of nodes to use for distributed training')
parser.add_argument('--node_rank', type=int, default=0, help='The rank of the node for multi-node distributed training')
parser.add_argument('--nproc_per_node', type=int, default=1, help='The number of processes to launch on each node, for GPU training, this is recommended to be set to the number of GPUs in your system so that each process can be bound to a single GPU.')
parser.add_argument('--master_addr', default='127.0.0.1', type=str, help="Master node (rank 0)'s address, should be either the IP address or the hostname of node 0, for single node multi-proc training, the --master_addr can simply be 127.0.0.1")
parser.add_argument('--master_port', default=29500, type=int, help="Master node (rank 0)'s free port that needs to be used for communciation during distributed training")
parser.add_argument('training_script', type=str, help='The full path to the single GPU training program/script to be launched in parallel, followed by all the arguments for the training script')
parser.add_argument('training_script_args', nargs=REMAINDER)
return parser.parse_args()
|
def main():
args = parse_args()
dist_world_size = (args.nproc_per_node * args.nnodes)
current_env = os.environ.copy()
current_env['MASTER_ADDR'] = args.master_addr
current_env['MASTER_PORT'] = str(args.master_port)
current_env['WORLD_SIZE'] = str(dist_world_size)
processes = []
for local_rank in range(0, args.nproc_per_node):
dist_rank = ((args.nproc_per_node * args.node_rank) + local_rank)
current_env['RANK'] = str(dist_rank)
current_env['LOCAL_RANK'] = str(local_rank)
cmd = ([args.training_script] + args.training_script_args)
process = subprocess.Popen(cmd, env=current_env)
processes.append(process)
for process in processes:
process.wait()
if (process.returncode != 0):
raise subprocess.CalledProcessError(returncode=process.returncode, cmd=process.args)
|
def embed(params, data, policy, states, k=100):
if (params['embedding'] == 'a_s'):
embedding = np.concatenate([policy.forward(x, eval=False) for x in states], axis=0)
return embedding
|
def get_experiment(params):
if (params['env_name'] in ['HalfCheetah-v2', 'HalfCheetah-v1']):
params['h_dim'] = 32
params['layers'] = 2
params['sensings'] = 100
params['learning_rate'] = 0.05
params['sigma'] = 0.1
params['steps'] = 1000
elif (params['env_name'] in ['Walker2d-v2']):
params['h_dim'] = 32
params['layers'] = 2
params['sensings'] = 100
params['learning_rate'] = 0.05
params['sigma'] = 0.1
params['steps'] = 1000
elif (params['env_name'] == 'Swimmer-v2'):
params['h_dim'] = 16
params['layers'] = 2
params['sensings'] = 100
params['learning_rate'] = 0.05
params['sigma'] = 0.1
params['steps'] = 1000
elif (params['env_name'] == 'BipedalWalker-v2'):
params['h_dim'] = 32
params['layers'] = 2
params['sensings'] = 100
params['learning_rate'] = 0.05
params['sigma'] = 0.1
params['steps'] = 1600
elif (params['env_name'] == 'point-v0'):
params['h_dim'] = 16
params['layers'] = 2
params['sensings'] = 100
params['learning_rate'] = 0.05
params['sigma'] = 0.1
params['steps'] = 50
return params
|
class Learner(object):
def __init__(self, params):
params['zeros'] = False
self.agents = {i: get_policy(params, (params['seed'] + (1000 * i))) for i in range(params['num_agents'])}
self.timesteps = 0
self.w_reward = 1
self.w_size = 0
self.dists = 0
self.adam_params = {i: [0, 0] for i in range(params['num_agents'])}
self.buffer = []
self.states = []
self.embeddings = {i: [] for i in range(params['num_agents'])}
self.best = {i: (- 9999) for i in range(params['num_agents'])}
self.reward = {i: [(- 9999)] for i in range(params['num_agents'])}
self.min_dist = 0
self.num_workers = params['num_workers']
self.init_workers(params)
def init_workers(self, params):
deltas_id = create_shared_noise.remote()
self.deltas = SharedNoiseTable(ray.get(deltas_id), seed=(params['seed'] + 3))
self.workers = [Worker.remote((params['seed'] + (7 * i)), env_name=params['env_name'], policy=params['policy'], h_dim=params['h_dim'], layers=params['layers'], deltas=deltas_id, rollout_length=params['steps'], delta_std=params['sigma'], num_evals=params['num_evals'], ob_filter=params['ob_filter']) for i in range(params['num_workers'])]
def get_agent(self):
self.policy = deepcopy(self.agents[self.agent])
self.embedding = self.embeddings[self.agent].copy()
self.m = self.adam_params[self.agent][0]
self.v = self.adam_params[self.agent][1]
def update_agent(self):
self.agents[self.agent] = deepcopy(self.policy)
self.embeddings[self.agent] = self.embedding.copy()
self.adam_params[self.agent] = [self.m, self.v]
def update_embeddings(self, params, data=[]):
for j in range(params['num_agents']):
if (params['embedding'] == 'a_s'):
self.embeddings[j] = [embed(params, [], self.agents[j], self.selected)]
else:
self.embeddings[j] = [embed(params, s, self.agents[j], self.selected) for s in data[j][1]]
def calc_pairwise_dists(self, params):
dists = np.zeros([params['num_agents'], params['num_agents']])
min_dist = 999
for i in range(params['num_agents']):
for j in range(params['num_agents']):
dists[i][j] = np.linalg.norm((self.embeddings[i][0] - self.embeddings[j][0]))
if ((i != j) & (dists[i][j] < min_dist)):
min_dist = dists[i][j]
self.dists = np.mean(dists)
self.min_dist = min_dist
self.dist_vec = np.mean(dists, axis=1)
self.dist_vec /= np.sum(self.dist_vec)
def select_agent(self):
if (min([x[(- 1)] for x in list(self.reward.values())]) > (- 9999)):
reward_vec = rankdata([max(x[(- 5):]) for x in list(self.reward.values())])
reward_vec /= np.sum(reward_vec)
dist_vec = rankdata(self.dist_vec)
dist_vec /= np.sum(dist_vec)
vec = ((dist_vec + reward_vec) / 2)
self.agent = np.argmax(np.random.multinomial(1, vec))
else:
self.agent = np.argmax(np.random.multinomial(1, self.dist_vec))
|
def get_policy(params, seed=None):
if seed:
params['seed'] = seed
return FullyConnected(params, params['seed'])
|
class FullyConnected(object):
def __init__(self, params, seed=0):
np.random.seed(seed)
self.layers = params['layers']
self.hidden = {}
self.bias = {}
self.observation_filter = get_filter(params['ob_filter'], shape=(params['ob_dim'],))
self.update_filter = True
self.hidden['h1'] = (np.random.randn(params['h_dim'], params['ob_dim']) / np.sqrt((params['h_dim'] * params['ob_dim'])))
self.bias['b1'] = (np.random.randn(params['h_dim']) / np.sqrt(params['h_dim']))
if (params['layers'] > 1):
for i in range(2, (params['layers'] + 1)):
self.hidden[('h%s' % str(i))] = (np.random.randn(params['h_dim'], params['h_dim']) / np.sqrt((params['h_dim'] * params['h_dim'])))
self.bias[('b%s' % str(i))] = (np.random.randn(params['h_dim']) / np.sqrt(params['h_dim']))
self.hidden['h999'] = (np.random.randn(params['ac_dim'], params['h_dim']) / np.sqrt((params['ac_dim'] * params['h_dim'])))
self.w_hidden = np.concatenate([self.hidden[x].reshape(self.hidden[x].size) for x in self.hidden.keys()])
self.w_bias = np.concatenate([self.bias[x].reshape(self.bias[x].size) for x in self.bias.keys()])
self.params = np.concatenate((self.w_hidden, self.w_bias))
self.used = 1
self.N = self.params.size
def get_observation_filter(self):
return self.observation_filter
def get_weights_plus_stats(self):
(mu, std) = self.observation_filter.get_stats()
aux = np.asarray([self.weights, mu, std])
return aux
def forward(self, x, eval=True):
x = self.observation_filter(x, update=self.update_filter)
self.used = 0
a = x.copy()
for i in range(1, (self.layers + 1)):
a = np.tanh((np.dot(self.hidden[('h%s' % str(i))], a) + self.bias[('b%s' % str(i))]))
action = np.tanh(np.dot(self.hidden['h999'], a))
return action
def update(self, w):
w_hidden = w[:self.w_hidden.size]
w = w[self.w_hidden.size:]
w_bias = w
for i in range(1, len(self.hidden.keys())):
update = w_hidden[:self.hidden[('h%s' % i)].size]
w_hidden = w_hidden[self.hidden[('h%s' % i)].size:]
self.hidden[('h%s' % i)] = update.reshape(self.hidden[('h%s' % i)].shape)
self.hidden['h999'] = w_hidden.reshape(self.hidden['h999'].shape)
for i in range(1, (len(self.bias.keys()) + 1)):
update = w_bias[:self.bias[('b%s' % i)].size]
w_bias = w_bias[self.bias[('b%s' % i)].size:]
self.bias[('b%s' % i)] = update.reshape(self.bias[('b%s' % i)].shape)
self.w_hidden = np.concatenate([self.hidden[x].reshape(self.hidden[x].size) for x in self.hidden.keys()])
self.w_bias = np.concatenate([self.bias[x].reshape(self.bias[x].size) for x in self.bias.keys()])
self.params = np.concatenate((self.w_hidden, self.w_bias))
def rollout(self, env, steps, incl_data=False, seed=0, train=True):
if (not hasattr(env, 'tasks')):
env.seed(seed)
state = env.reset()
env._max_episode_steps = steps
total_reward = 0
done = False
data = []
while (not done):
action = self.forward(state)
action = np.clip(action, env.action_space.low[0], env.action_space.high[0])
action = action.reshape(len(action))
(state, reward, done, _) = env.step(action)
total_reward += reward
data.append([state, reward, action])
self.observation_filter.stats_increment()
if incl_data:
return (total_reward, data)
else:
return total_reward
|
class PointEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
mujoco_env.MujocoEnv.__init__(self, 'point.xml', 2)
utils.EzPickle.__init__(self)
def step(self, action):
action = np.clip(action, (- 1.0), 1.0)
self.do_simulation(action, self.frame_skip)
next_obs = self._get_obs()
qpos = next_obs[:2]
goal = [25.0, 0.0]
reward = (- np.linalg.norm((goal - qpos)))
return (next_obs, reward, False, {})
def _get_obs(self):
return np.concatenate([self.sim.data.qpos.flat, self.sim.data.qvel.flat])
def reset_model(self):
qpos = (self.init_qpos + self.np_random.uniform(low=(- 0.1), high=0.1, size=self.model.nq))
qvel = (self.init_qvel + (self.np_random.randn(self.model.nv) * 0.1))
self.set_state(qpos, qvel)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.distance = (self.model.stat.extent * 0.5)
|
def select_states(master, params, states):
if (int(params['states'].split('-')[1]) < len(states)):
selected = sample(states, int(params['states'].split('-')[1]))
return selected
else:
return states
|
def reset_ray(master, params):
ray.disconnect()
ray.shutdown()
time.sleep(5)
del os.environ['RAY_USE_NEW_GCS']
ray.init(plasma_directory='/tmp')
os.environ['RAY_USE_NEW_GCS'] = 'True'
flush_policy = ray.experimental.SimpleGcsFlushPolicy(flush_period_secs=0.1)
ray.experimental.set_flushing_policy(flush_policy)
|
def train(params):
env = gym.make(params['env_name'])
params['ob_dim'] = env.observation_space.shape[0]
params['ac_dim'] = env.action_space.shape[0]
master = Learner(params)
n_eps = 0
n_iter = 0
ts_cumulative = 0
(ts, rollouts, rewards, max_rwds, dists, min_dists, agents, lambdas) = ([0], [0], [], [], [], [], [], [])
params['num_sensings'] = params['sensings']
master.agent = 0
population = [master.agents[x].rollout(env, params['steps'], incl_data=True) for x in master.agents.keys()]
all_states = [s[0] for x in population for s in x[1]]
master.selected = select_states(master, params, all_states)
master.update_embeddings(params, population)
master.calc_pairwise_dists(params)
master.select_agent()
master.get_agent()
reward = master.policy.rollout(env, params['steps'], incl_data=False)
rewards.append(reward)
agents.append(master.agent)
dists.append(master.dists)
max_reward = reward
max_rwds.append(max_reward)
min_dists.append(master.min_dist)
if (params['w_nov'] < 0):
bb = BayesianBandits()
params['w_nov'] = 0
lambdas.append(params['w_nov'])
while (n_iter < params['max_iter']):
print(('Iter: %s, Eps: %s, Mean: %s, Max: %s, Best: %s, MeanD: %s, MinD: %s, Lam: %s' % (n_iter, n_eps, np.round(reward, 4), np.round(max_reward, 4), master.agent, np.round(master.dists, 4), np.round(master.min_dist, 4), params['w_nov'])))
if ((n_iter > 0) & (params['num_agents'] > 1)):
master.calc_pairwise_dists(params)
master.select_agent()
master.get_agent()
params['n_iter'] = n_iter
if (params['num_agents'] > 1):
(gradient, timesteps) = population_update(master, params)
n_eps += ((2 * params['num_sensings']) * params['num_agents'])
else:
(gradient, timesteps) = individual_update(master, params)
n_eps += (2 * params['num_sensings'])
ts_cumulative += timesteps
all_states += master.states
if (params['num_sensings'] < len(all_states)):
all_states = sample(all_states, params['num_sensings'])
gradient /= ((np.linalg.norm(gradient) / master.policy.N) + 1e-08)
n_iter += 1
update = Adam(gradient, master, params['learning_rate'], n_iter)
(rwds, trajectories) = ([], [])
if (params['num_evals'] > 0):
seeds = [int((np.random.uniform() * 10000)) for _ in range(params['num_evals'])]
for i in range(params['num_agents']):
master.agent = i
master.get_agent()
master.policy.update((master.policy.params + update[(i * master.policy.N):((i + 1) * master.policy.N)]))
if (params['num_evals'] > 0):
reward = 0
for j in range(params['num_evals']):
(r, traj) = master.policy.rollout(env, params['steps'], incl_data=True, seed=seeds[j])
reward += r
reward /= params['num_evals']
else:
(reward, traj) = master.policy.rollout(env, params['steps'], incl_data=True)
rwds.append(reward)
trajectories.append(traj)
if (reward > master.best[i]):
master.best[i] = reward
np.save(('data/%s/weights/Seed%s_Agent%s' % (params['dir'], params['seed'], i)), master.policy.params)
master.reward[i].append(reward)
master.update_agent()
reward = np.mean(rwds)
max_reward = max(rwds)
traj = trajectories[np.argmax(rwds)]
master.agent = np.argmax(rwds)
master.selected = select_states(master, params, all_states)
master.update_embeddings(params)
master.embedding = embed(params, traj, master.policy, master.selected)
rewards.append(reward)
max_rwds.append(max_reward)
master.reward[master.agent].append(reward)
if (reward > master.best[master.agent]):
master.best[master.agent] = reward
np.save(('data/%s/weights/Seed%s_Agent%s' % (params['dir'], params['seed'], master.agent)), master.policy.params)
try:
bb.update_dists(reward)
params['w_nov'] = bb.sample()
except NameError:
pass
lambdas.append(params['w_nov'])
rollouts.append(n_eps)
agents.append(master.agent)
dists.append(master.dists)
min_dists.append(master.min_dist)
ts.append(ts_cumulative)
master.update_agent()
if ((n_iter % params['flush']) == 0):
reset_ray(master, params)
master.init_workers(params)
out = pd.DataFrame({'Rollouts': rollouts, 'Reward': rewards, 'Max': max_rwds, 'Timesteps': ts, 'Dists': dists, 'Min_Dist': min_dists, 'Agent': agents, 'Lambda': lambdas})
out.to_csv(('data/%s/results/Seed%s.csv' % (params['dir'], params['seed'])), index=False)
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--env_name', type=str, default='point-v0')
parser.add_argument('--num_agents', '-na', type=int, default=5)
parser.add_argument('--seed', '-sd', type=int, default=0)
parser.add_argument('--max_iter', '-it', type=int, default=2000)
parser.add_argument('--policy', '-po', type=str, default='FC')
parser.add_argument('--embedding', '-em', type=str, default='a_s')
parser.add_argument('--num_workers', '-nw', type=int, default=4)
parser.add_argument('--filename', '-f', type=str, default='')
parser.add_argument('--num_evals', '-ne', type=int, default=0)
parser.add_argument('--flush', '-fl', type=int, default=1000)
parser.add_argument('--ob_filter', '-ob', type=str, default='MeanStdFilter')
parser.add_argument('--w_nov', '-wn', type=float, default=(- 1))
parser.add_argument('--dpp_kernel', '-ke', type=str, default='rbf')
parser.add_argument('--states', '-ss', type=str, default='random-20')
parser.add_argument('--update_states', '-us', type=int, default=20)
args = parser.parse_args()
params = vars(args)
params = get_experiment(params)
ray.init()
os.environ['RAY_USE_NEW_GCS'] = 'True'
state_word = [(str(params['states'].split('-')[0]) if (params['w_nov'] > 0) else '')][0]
params['dir'] = ((((((((((((((((params['env_name'] + '_Net') + str(params['layers'])) + 'x') + str(params['h_dim'])) + '_Agents') + str(params['num_agents'])) + '_Novelty') + str(params['w_nov'])) + state_word) + 'kernel_') + params['dpp_kernel']) + '_lr') + str(params['learning_rate'])) + '_') + params['filename']) + params['ob_filter'])
if (not os.path.exists(('data/' + params['dir']))):
os.makedirs(('data/' + params['dir']))
os.makedirs((('data/' + params['dir']) + '/weights'))
os.makedirs((('data/' + params['dir']) + '/results'))
train(params)
|
def batched_weighted_sum(weights, vecs, batch_size):
total = 0
num_items_summed = 0
for (batch_weights, batch_vecs) in zip(itergroups(weights, batch_size), itergroups(vecs, batch_size)):
assert (len(batch_weights) == len(batch_vecs) <= batch_size)
total += np.dot(np.asarray(batch_weights, dtype=np.float64), np.asarray(batch_vecs, dtype=np.float64))
num_items_summed += len(batch_weights)
return (total, num_items_summed)
|
def itergroups(items, group_size):
assert (group_size >= 1)
group = []
for x in items:
group.append(x)
if (len(group) == group_size):
(yield tuple(group))
del group[:]
if group:
(yield tuple(group))
|
def evaluate(env, params, p):
return p.rollout(env, params['steps'], incl_data=True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.