code
stringlengths
17
6.64M
class LeNet(nn.Module): def __init__(self): super(LeNet, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(((16 * 5) * 5), 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x): out = F.relu(self.conv1(x)) out = F.max_pool2d(out, 2) out = F.relu(self.conv2(out)) out = F.max_pool2d(out, 2) out = out.view(out.size(0), (- 1)) out = F.relu(self.fc1(out)) out = F.relu(self.fc2(out)) out = self.fc3(out) return out
class Block(nn.Module): 'Depthwise conv + Pointwise conv' def __init__(self, in_planes, out_planes, stride=1): super(Block, self).__init__() self.conv1 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=in_planes, bias=False) self.bn1 = nn.BatchNorm2d(in_planes) self.conv2 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False) self.bn2 = nn.BatchNorm2d(out_planes) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = F.relu(self.bn2(self.conv2(out))) return out
class MobileNet(nn.Module): cfg = [64, (128, 2), 128, (256, 2), 256, (512, 2), 512, 512, 512, 512, 512, (1024, 2), 1024] def __init__(self, num_classes=10): super(MobileNet, self).__init__() self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(32) self.layers = self._make_layers(in_planes=32) self.linear = nn.Linear(1024, num_classes) def _make_layers(self, in_planes): layers = [] for x in self.cfg: out_planes = (x if isinstance(x, int) else x[0]) stride = (1 if isinstance(x, int) else x[1]) layers.append(Block(in_planes, out_planes, stride)) in_planes = out_planes return nn.Sequential(*layers) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.layers(out) out = F.avg_pool2d(out, 2) out = out.view(out.size(0), (- 1)) out = self.linear(out) return out
def test(): net = MobileNet() x = torch.randn(1, 3, 32, 32) y = net(Variable(x)) print(y.size())
class Block(nn.Module): 'expand + depthwise + pointwise' def __init__(self, in_planes, out_planes, expansion, stride): super(Block, self).__init__() self.stride = stride planes = (expansion * in_planes) self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, stride=1, padding=0, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, groups=planes, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False) self.bn3 = nn.BatchNorm2d(out_planes) self.shortcut = nn.Sequential() if ((stride == 1) and (in_planes != out_planes)): self.shortcut = nn.Sequential(nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False), nn.BatchNorm2d(out_planes)) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = F.relu(self.bn2(self.conv2(out))) out = self.bn3(self.conv3(out)) out = ((out + self.shortcut(x)) if (self.stride == 1) else out) return out
class MobileNetV2(nn.Module): cfg = [(1, 16, 1, 1), (6, 24, 2, 1), (6, 32, 3, 2), (6, 64, 4, 2), (6, 96, 3, 1), (6, 160, 3, 2), (6, 320, 1, 1)] def __init__(self, num_classes=10): super(MobileNetV2, self).__init__() self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(32) self.layers = self._make_layers(in_planes=32) self.conv2 = nn.Conv2d(320, 1280, kernel_size=1, stride=1, padding=0, bias=False) self.bn2 = nn.BatchNorm2d(1280) self.linear = nn.Linear(1280, num_classes) def _make_layers(self, in_planes): layers = [] for (expansion, out_planes, num_blocks, stride) in self.cfg: strides = ([stride] + ([1] * (num_blocks - 1))) for stride in strides: layers.append(Block(in_planes, out_planes, expansion, stride)) in_planes = out_planes return nn.Sequential(*layers) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.layers(out) out = F.relu(self.bn2(self.conv2(out))) out = F.avg_pool2d(out, 4) out = out.view(out.size(0), (- 1)) out = self.linear(out) return out
def test(): net = MobileNetV2() x = Variable(torch.randn(2, 3, 32, 32)) y = net(x) print(y.size())
class SepConv(nn.Module): 'Separable Convolution.' def __init__(self, in_planes, out_planes, kernel_size, stride): super(SepConv, self).__init__() self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding=((kernel_size - 1) // 2), bias=False, groups=in_planes) self.bn1 = nn.BatchNorm2d(out_planes) def forward(self, x): return self.bn1(self.conv1(x))
class CellA(nn.Module): def __init__(self, in_planes, out_planes, stride=1): super(CellA, self).__init__() self.stride = stride self.sep_conv1 = SepConv(in_planes, out_planes, kernel_size=7, stride=stride) if (stride == 2): self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False) self.bn1 = nn.BatchNorm2d(out_planes) def forward(self, x): y1 = self.sep_conv1(x) y2 = F.max_pool2d(x, kernel_size=3, stride=self.stride, padding=1) if (self.stride == 2): y2 = self.bn1(self.conv1(y2)) return F.relu((y1 + y2))
class CellB(nn.Module): def __init__(self, in_planes, out_planes, stride=1): super(CellB, self).__init__() self.stride = stride self.sep_conv1 = SepConv(in_planes, out_planes, kernel_size=7, stride=stride) self.sep_conv2 = SepConv(in_planes, out_planes, kernel_size=3, stride=stride) self.sep_conv3 = SepConv(in_planes, out_planes, kernel_size=5, stride=stride) if (stride == 2): self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False) self.bn1 = nn.BatchNorm2d(out_planes) self.conv2 = nn.Conv2d((2 * out_planes), out_planes, kernel_size=1, stride=1, padding=0, bias=False) self.bn2 = nn.BatchNorm2d(out_planes) def forward(self, x): y1 = self.sep_conv1(x) y2 = self.sep_conv2(x) y3 = F.max_pool2d(x, kernel_size=3, stride=self.stride, padding=1) if (self.stride == 2): y3 = self.bn1(self.conv1(y3)) y4 = self.sep_conv3(x) b1 = F.relu((y1 + y2)) b2 = F.relu((y3 + y4)) y = torch.cat([b1, b2], 1) return F.relu(self.bn2(self.conv2(y)))
class PNASNet(nn.Module): def __init__(self, cell_type, num_cells, num_planes): super(PNASNet, self).__init__() self.in_planes = num_planes self.cell_type = cell_type self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(num_planes) self.layer1 = self._make_layer(num_planes, num_cells=6) self.layer2 = self._downsample((num_planes * 2)) self.layer3 = self._make_layer((num_planes * 2), num_cells=6) self.layer4 = self._downsample((num_planes * 4)) self.layer5 = self._make_layer((num_planes * 4), num_cells=6) self.linear = nn.Linear((num_planes * 4), 10) def _make_layer(self, planes, num_cells): layers = [] for _ in range(num_cells): layers.append(self.cell_type(self.in_planes, planes, stride=1)) self.in_planes = planes return nn.Sequential(*layers) def _downsample(self, planes): layer = self.cell_type(self.in_planes, planes, stride=2) self.in_planes = planes return layer def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = self.layer4(out) out = self.layer5(out) out = F.avg_pool2d(out, 8) out = self.linear(out.view(out.size(0), (- 1))) return out
def PNASNetA(): return PNASNet(CellA, num_cells=6, num_planes=44)
def PNASNetB(): return PNASNet(CellB, num_cells=6, num_planes=32)
def test(): net = PNASNetB() print(net) x = Variable(torch.randn(1, 3, 32, 32)) y = net(x) print(y)
class PreActBlock(nn.Module): 'Pre-activation version of the BasicBlock.' expansion = 1 def __init__(self, in_planes, planes, stride=1): super(PreActBlock, self).__init__() self.bn1 = nn.BatchNorm2d(in_planes) self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) if ((stride != 1) or (in_planes != (self.expansion * planes))): self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=False)) def forward(self, x): out = F.relu(self.bn1(x)) shortcut = (self.shortcut(out) if hasattr(self, 'shortcut') else x) out = self.conv1(out) out = self.conv2(F.relu(self.bn2(out))) out += shortcut return out
class PreActBottleneck(nn.Module): 'Pre-activation version of the original Bottleneck module.' expansion = 4 def __init__(self, in_planes, planes, stride=1): super(PreActBottleneck, self).__init__() self.bn1 = nn.BatchNorm2d(in_planes) self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn3 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, (self.expansion * planes), kernel_size=1, bias=False) if ((stride != 1) or (in_planes != (self.expansion * planes))): self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=False)) def forward(self, x): out = F.relu(self.bn1(x)) shortcut = (self.shortcut(out) if hasattr(self, 'shortcut') else x) out = self.conv1(out) out = self.conv2(F.relu(self.bn2(out))) out = self.conv3(F.relu(self.bn3(out))) out += shortcut return out
class PreActResNet(nn.Module): def __init__(self, block, num_blocks, num_classes=10): super(PreActResNet, self).__init__() self.in_planes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1) self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2) self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2) self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2) self.linear = nn.Linear((512 * block.expansion), num_classes) def _make_layer(self, block, planes, num_blocks, stride): strides = ([stride] + ([1] * (num_blocks - 1))) layers = [] for stride in strides: layers.append(block(self.in_planes, planes, stride)) self.in_planes = (planes * block.expansion) return nn.Sequential(*layers) def forward(self, x): out = self.conv1(x) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = self.layer4(out) out = F.avg_pool2d(out, 4) out = out.view(out.size(0), (- 1)) out = self.linear(out) return out
def PreActResNet18(): return PreActResNet(PreActBlock, [2, 2, 2, 2])
def PreActResNet34(): return PreActResNet(PreActBlock, [3, 4, 6, 3])
def PreActResNet50(): return PreActResNet(PreActBottleneck, [3, 4, 6, 3])
def PreActResNet101(): return PreActResNet(PreActBottleneck, [3, 4, 23, 3])
def PreActResNet152(): return PreActResNet(PreActBottleneck, [3, 8, 36, 3])
def test(): net = PreActResNet18() y = net(Variable(torch.randn(1, 3, 32, 32))) print(y.size())
class BasicBlock(nn.Module): expansion = 1 def __init__(self, in_planes, planes, stride=1): super(BasicBlock, self).__init__() self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.shortcut = nn.Sequential() if ((stride != 1) or (in_planes != (self.expansion * planes))): self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((self.expansion * planes))) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.bn2(self.conv2(out)) out += self.shortcut(x) out = F.relu(out) return out
class Bottleneck(nn.Module): expansion = 4 def __init__(self, in_planes, planes, stride=1): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, (self.expansion * planes), kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d((self.expansion * planes)) self.shortcut = nn.Sequential() if ((stride != 1) or (in_planes != (self.expansion * planes))): self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((self.expansion * planes))) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = F.relu(self.bn2(self.conv2(out))) out = self.bn3(self.conv3(out)) out += self.shortcut(x) out = F.relu(out) return out
class ResNet(nn.Module): def __init__(self, block, num_blocks, num_classes=10): super(ResNet, self).__init__() self.in_planes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(64) self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1) self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2) self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2) self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2) self.linear = nn.Linear((512 * block.expansion), num_classes) def _make_layer(self, block, planes, num_blocks, stride): strides = ([stride] + ([1] * (num_blocks - 1))) layers = [] for stride in strides: layers.append(block(self.in_planes, planes, stride)) self.in_planes = (planes * block.expansion) return nn.Sequential(*layers) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = self.layer4(out) out = F.avg_pool2d(out, 4) out = out.view(out.size(0), (- 1)) out = self.linear(out) return out
def ResNet18(num_classes=10): return ResNet(BasicBlock, [2, 2, 2, 2], num_classes=num_classes)
def ResNet34(): return ResNet(BasicBlock, [3, 4, 6, 3])
def ResNet50(): return ResNet(Bottleneck, [3, 4, 6, 3])
def ResNet101(): return ResNet(Bottleneck, [3, 4, 23, 3])
def ResNet152(): return ResNet(Bottleneck, [3, 8, 36, 3])
def test(): net = ResNet18() y = net(Variable(torch.randn(1, 3, 32, 32))) print(y.size())
class Block(nn.Module): 'Grouped convolution block.' expansion = 2 def __init__(self, in_planes, cardinality=32, bottleneck_width=4, stride=1): super(Block, self).__init__() group_width = (cardinality * bottleneck_width) self.conv1 = nn.Conv2d(in_planes, group_width, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(group_width) self.conv2 = nn.Conv2d(group_width, group_width, kernel_size=3, stride=stride, padding=1, groups=cardinality, bias=False) self.bn2 = nn.BatchNorm2d(group_width) self.conv3 = nn.Conv2d(group_width, (self.expansion * group_width), kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d((self.expansion * group_width)) self.shortcut = nn.Sequential() if ((stride != 1) or (in_planes != (self.expansion * group_width))): self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * group_width), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((self.expansion * group_width))) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = F.relu(self.bn2(self.conv2(out))) out = self.bn3(self.conv3(out)) out += self.shortcut(x) out = F.relu(out) return out
class ResNeXt(nn.Module): def __init__(self, num_blocks, cardinality, bottleneck_width, num_classes=10): super(ResNeXt, self).__init__() self.cardinality = cardinality self.bottleneck_width = bottleneck_width self.in_planes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(64) self.layer1 = self._make_layer(num_blocks[0], 1) self.layer2 = self._make_layer(num_blocks[1], 2) self.layer3 = self._make_layer(num_blocks[2], 2) self.linear = nn.Linear(((cardinality * bottleneck_width) * 8), num_classes) def _make_layer(self, num_blocks, stride): strides = ([stride] + ([1] * (num_blocks - 1))) layers = [] for stride in strides: layers.append(Block(self.in_planes, self.cardinality, self.bottleneck_width, stride)) self.in_planes = ((Block.expansion * self.cardinality) * self.bottleneck_width) self.bottleneck_width *= 2 return nn.Sequential(*layers) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = F.avg_pool2d(out, 8) out = out.view(out.size(0), (- 1)) out = self.linear(out) return out
def ResNeXt29_2x64d(): return ResNeXt(num_blocks=[3, 3, 3], cardinality=2, bottleneck_width=64)
def ResNeXt29_4x64d(): return ResNeXt(num_blocks=[3, 3, 3], cardinality=4, bottleneck_width=64)
def ResNeXt29_8x64d(): return ResNeXt(num_blocks=[3, 3, 3], cardinality=8, bottleneck_width=64)
def ResNeXt29_32x4d(): return ResNeXt(num_blocks=[3, 3, 3], cardinality=32, bottleneck_width=4)
def test_resnext(): net = ResNeXt29_2x64d() x = torch.randn(1, 3, 32, 32) y = net(Variable(x)) print(y.size())
class BasicBlock(nn.Module): def __init__(self, in_planes, planes, stride=1): super(BasicBlock, self).__init__() self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.shortcut = nn.Sequential() if ((stride != 1) or (in_planes != planes)): self.shortcut = nn.Sequential(nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes)) self.fc1 = nn.Conv2d(planes, (planes // 16), kernel_size=1) self.fc2 = nn.Conv2d((planes // 16), planes, kernel_size=1) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.bn2(self.conv2(out)) w = F.avg_pool2d(out, out.size(2)) w = F.relu(self.fc1(w)) w = F.sigmoid(self.fc2(w)) out = (out * w) out += self.shortcut(x) out = F.relu(out) return out
class PreActBlock(nn.Module): def __init__(self, in_planes, planes, stride=1): super(PreActBlock, self).__init__() self.bn1 = nn.BatchNorm2d(in_planes) self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) if ((stride != 1) or (in_planes != planes)): self.shortcut = nn.Sequential(nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False)) self.fc1 = nn.Conv2d(planes, (planes // 16), kernel_size=1) self.fc2 = nn.Conv2d((planes // 16), planes, kernel_size=1) def forward(self, x): out = F.relu(self.bn1(x)) shortcut = (self.shortcut(out) if hasattr(self, 'shortcut') else x) out = self.conv1(out) out = self.conv2(F.relu(self.bn2(out))) w = F.avg_pool2d(out, out.size(2)) w = F.relu(self.fc1(w)) w = F.sigmoid(self.fc2(w)) out = (out * w) out += shortcut return out
class SENet(nn.Module): def __init__(self, block, num_blocks, num_classes=10): super(SENet, self).__init__() self.in_planes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(64) self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1) self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2) self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2) self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2) self.linear = nn.Linear(512, num_classes) def _make_layer(self, block, planes, num_blocks, stride): strides = ([stride] + ([1] * (num_blocks - 1))) layers = [] for stride in strides: layers.append(block(self.in_planes, planes, stride)) self.in_planes = planes return nn.Sequential(*layers) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = self.layer4(out) out = F.avg_pool2d(out, 4) out = out.view(out.size(0), (- 1)) out = self.linear(out) return out
def SENet18(): return SENet(PreActBlock, [2, 2, 2, 2])
def test(): net = SENet18() y = net(Variable(torch.randn(1, 3, 32, 32))) print(y.size())
class ShuffleBlock(nn.Module): def __init__(self, groups): super(ShuffleBlock, self).__init__() self.groups = groups def forward(self, x): 'Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]' (N, C, H, W) = x.size() g = self.groups return x.view(N, g, (C / g), H, W).permute(0, 2, 1, 3, 4).contiguous().view(N, C, H, W)
class Bottleneck(nn.Module): def __init__(self, in_planes, out_planes, stride, groups): super(Bottleneck, self).__init__() self.stride = stride mid_planes = (out_planes / 4) g = (1 if (in_planes == 24) else groups) self.conv1 = nn.Conv2d(in_planes, mid_planes, kernel_size=1, groups=g, bias=False) self.bn1 = nn.BatchNorm2d(mid_planes) self.shuffle1 = ShuffleBlock(groups=g) self.conv2 = nn.Conv2d(mid_planes, mid_planes, kernel_size=3, stride=stride, padding=1, groups=mid_planes, bias=False) self.bn2 = nn.BatchNorm2d(mid_planes) self.conv3 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, groups=groups, bias=False) self.bn3 = nn.BatchNorm2d(out_planes) self.shortcut = nn.Sequential() if (stride == 2): self.shortcut = nn.Sequential(nn.AvgPool2d(3, stride=2, padding=1)) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.shuffle1(out) out = F.relu(self.bn2(self.conv2(out))) out = self.bn3(self.conv3(out)) res = self.shortcut(x) out = (F.relu(torch.cat([out, res], 1)) if (self.stride == 2) else F.relu((out + res))) return out
class ShuffleNet(nn.Module): def __init__(self, cfg): super(ShuffleNet, self).__init__() out_planes = cfg['out_planes'] num_blocks = cfg['num_blocks'] groups = cfg['groups'] self.conv1 = nn.Conv2d(3, 24, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(24) self.in_planes = 24 self.layer1 = self._make_layer(out_planes[0], num_blocks[0], groups) self.layer2 = self._make_layer(out_planes[1], num_blocks[1], groups) self.layer3 = self._make_layer(out_planes[2], num_blocks[2], groups) self.linear = nn.Linear(out_planes[2], 10) def _make_layer(self, out_planes, num_blocks, groups): layers = [] for i in range(num_blocks): stride = (2 if (i == 0) else 1) cat_planes = (self.in_planes if (i == 0) else 0) layers.append(Bottleneck(self.in_planes, (out_planes - cat_planes), stride=stride, groups=groups)) self.in_planes = out_planes return nn.Sequential(*layers) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = F.avg_pool2d(out, 4) out = out.view(out.size(0), (- 1)) out = self.linear(out) return out
def ShuffleNetG2(): cfg = {'out_planes': [200, 400, 800], 'num_blocks': [4, 8, 4], 'groups': 2} return ShuffleNet(cfg)
def ShuffleNetG3(): cfg = {'out_planes': [240, 480, 960], 'num_blocks': [4, 8, 4], 'groups': 3} return ShuffleNet(cfg)
def test(): net = ShuffleNetG2() x = Variable(torch.randn(1, 3, 32, 32)) y = net(x) print(y)
class VGG(nn.Module): def __init__(self, vgg_name): super(VGG, self).__init__() self.features = self._make_layers(cfg[vgg_name]) self.classifier = nn.Linear(512, 10) def forward(self, x): out = self.features(x) out = out.view(out.size(0), (- 1)) out = self.classifier(out) return out def _make_layers(self, cfg): layers = [] in_channels = 3 for x in cfg: if (x == 'M'): layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1), nn.BatchNorm2d(x), nn.ReLU(inplace=True)] in_channels = x layers += [nn.AvgPool2d(kernel_size=1, stride=1)] return nn.Sequential(*layers)
def get_process_ros(node_name, doprint=False): node_api = rosnode.get_api_uri(rospy.get_master(), node_name, skip_cache=True)[2] if (not node_api): rospy.logwarn(('could not get api of node %s (%s)' % (node_name, node_api))) return False try: response = ServerProxy(node_api).getPid('/NODEINFO') except: rospy.logwarn(('failed to get of the pid of ros node %s (%s)' % (node_name, node_api))) return False try: process = psutil.Process(response[2]) if doprint: rospy.loginfo(('adding new node monitor %s (pid %d)' % (node_name, process.pid))) return process except: rospy.logwarn(('unable to open psutil object for %s' % response[2])) return False
def get_process_name(process_name, doprint=False): processes = [] for proc in psutil.process_iter(): (name, exe, cmdline) = ('', '', []) try: name = proc.name() cmdline = proc.cmdline() exe = proc.exe() except (psutil.AccessDenied, psutil.ZombieProcess): pass except psutil.NoSuchProcess: continue if ((name == process_name) or (cmdline[0] == process_name) or (os.path.basename(exe) == process_name)): if doprint: rospy.loginfo(('adding new node monitor (pid %d)' % proc.pid)) processes.append(proc) if (len(processes) > 0): return processes rospy.logerr(('unable to find process for %s' % process_name)) return False
def launch_setup(context): config_path = LaunchConfiguration('config_path').perform(context) if (not config_path): configs_dir = os.path.join(get_package_share_directory('ov_msckf'), 'config') available_configs = os.listdir(configs_dir) config = LaunchConfiguration('config').perform(context) if (config in available_configs): config_path = os.path.join(get_package_share_directory('ov_msckf'), 'config', config, 'estimator_config.yaml') else: return [LogInfo(msg="ERROR: unknown config: '{}' - Available configs are: {} - not starting OpenVINS".format(config, ', '.join(available_configs)))] elif (not os.path.isfile(config_path)): return [LogInfo(msg="ERROR: config_path file: '{}' - does not exist. - not starting OpenVINS".format(config_path))] node1 = Node(package='ov_msckf', executable='run_subscribe_msckf', condition=IfCondition(LaunchConfiguration('ov_enable')), namespace=LaunchConfiguration('namespace'), output='screen', parameters=[{'verbosity': LaunchConfiguration('verbosity')}, {'use_stereo': LaunchConfiguration('use_stereo')}, {'max_cameras': LaunchConfiguration('max_cameras')}, {'save_total_state': LaunchConfiguration('save_total_state')}, {'config_path': config_path}]) node2 = Node(package='rviz2', executable='rviz2', condition=IfCondition(LaunchConfiguration('rviz_enable')), arguments=[('-d' + os.path.join(get_package_share_directory('ov_msckf'), 'launch', 'display_ros2.rviz')), '--ros-args', '--log-level', 'warn']) return [node1, node2]
def generate_launch_description(): opfunc = OpaqueFunction(function=launch_setup) ld = LaunchDescription(launch_args) ld.add_action(opfunc) return ld
def complex_flatten(real, imag): real = tf.keras.layers.Flatten()(real) imag = tf.keras.layers.Flatten()(imag) return (real, imag)
def CReLU(real, imag): real = tf.keras.layers.ReLU()(real) imag = tf.keras.layers.ReLU()(imag) return (real, imag)
def zReLU(real, imag): real = tf.keras.layers.ReLU()(real) imag = tf.keras.layers.ReLU()(imag) real_flag = tf.cast(tf.cast(real, tf.bool), tf.float32) imag_flag = tf.cast(tf.cast(imag, tf.bool), tf.float32) flag = (real_flag * imag_flag) real = tf.math.multiply(real, flag) imag = tf.math.multiply(imag, flag) return (real, imag)
def modReLU(real, imag): norm = tf.abs(tf.complex(real, imag)) bias = tf.Variable(np.zeros([norm.get_shape()[(- 1)]]), trainable=True, dtype=tf.float32) relu = tf.nn.relu((norm + bias)) real = tf.math.multiply(((relu / norm) + 100000.0), real) imag = tf.math.multiply(((relu / norm) + 100000.0), imag) return (real, imag)
def CLeaky_ReLU(real, imag): real = tf.nn.leaky_relu(real) imag = tf.nn.leaky_relu(imag) return (real, imag)
def complex_tanh(real, imag): real = tf.nn.tanh(real) imag = tf.nn.tanh(imag) return (real, imag)
def complex_softmax(real, imag): magnitude = tf.abs(tf.complex(real, imag)) magnitude = tf.keras.layers.Softmax()(magnitude) return magnitude
def get_planetoid_dataset(name, normalize_features=False, transform=None, split='public'): path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', name) if (split == 'complete'): dataset = Planetoid(path, name) dataset[0].train_mask.fill_(False) dataset[0].train_mask[:(dataset[0].num_nodes - 1000)] = 1 dataset[0].val_mask.fill_(False) dataset[0].val_mask[(dataset[0].num_nodes - 1000):(dataset[0].num_nodes - 500)] = 1 dataset[0].test_mask.fill_(False) dataset[0].test_mask[(dataset[0].num_nodes - 500):] = 1 else: dataset = Planetoid(path, name, split=split) if ((transform is not None) and normalize_features): dataset.transform = T.Compose([T.NormalizeFeatures(), transform]) elif normalize_features: dataset.transform = T.NormalizeFeatures() elif (transform is not None): dataset.transform = transform return dataset
class Net_orig(torch.nn.Module): def __init__(self, dataset): super(Net2, self).__init__() self.conv1 = GCNConv(dataset.num_features, args.hidden) self.conv2 = GCNConv(args.hidden, dataset.num_classes) def reset_parameters(self): self.conv1.reset_parameters() self.conv2.reset_parameters() def forward(self, data): (x, edge_index) = (data.x, data.edge_index) x = F.relu(self.conv1(x, edge_index)) x = F.dropout(x, p=args.dropout, training=self.training) x = self.conv2(x, edge_index) return F.log_softmax(x, dim=1)
class CRD(torch.nn.Module): def __init__(self, d_in, d_out, p): super(CRD, self).__init__() self.conv = GCNConv(d_in, d_out, cached=True) self.p = p def reset_parameters(self): self.conv.reset_parameters() def forward(self, x, edge_index, mask=None): x = F.relu(self.conv(x, edge_index)) x = F.dropout(x, p=self.p, training=self.training) return x
class CLS(torch.nn.Module): def __init__(self, d_in, d_out): super(CLS, self).__init__() self.conv = GCNConv(d_in, d_out, cached=True) def reset_parameters(self): self.conv.reset_parameters() def forward(self, x, edge_index, mask=None): x = self.conv(x, edge_index) x = F.log_softmax(x, dim=1) return x
class Net(torch.nn.Module): def __init__(self, dataset): super(Net, self).__init__() self.crd = CRD(dataset.num_features, args.hidden, args.dropout) self.cls = CLS(args.hidden, dataset.num_classes) def reset_parameters(self): self.crd.reset_parameters() self.cls.reset_parameters() def forward(self, data): (x, edge_index) = (data.x, data.edge_index) x = self.crd(x, edge_index, data.train_mask) x = self.cls(x, edge_index, data.train_mask) return x
def run(dataset, model, str_optimizer, str_preconditioner, runs, epochs, lr, weight_decay, early_stopping, logger, momentum, eps, update_freq, gamma, alpha, hyperparam): if (logger is not None): if hyperparam: logger += f'-{hyperparam}{eval(hyperparam)}' path_logger = os.path.join(path_runs, logger) print(f'path logger: {path_logger}') ut.empty_dir(path_logger) logger = (SummaryWriter(log_dir=os.path.join(path_runs, logger)) if (logger is not None) else None) (val_losses, accs, durations) = ([], [], []) torch.manual_seed(42) for i_run in range(runs): data = dataset[0] data = data.to(device) model.to(device).reset_parameters() if (str_preconditioner == 'KFAC'): preconditioner = psgd.KFAC(model, eps, sua=False, pi=False, update_freq=update_freq, alpha=(alpha if (alpha is not None) else 1.0), constraint_norm=False) else: preconditioner = None if (str_optimizer == 'Adam'): optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay) elif (str_optimizer == 'SGD'): optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=momentum) if torch.cuda.is_available(): torch.cuda.synchronize() t_start = time.perf_counter() best_val_loss = float('inf') test_acc = 0 val_loss_history = [] for epoch in range(1, (epochs + 1)): lam = (((float(epoch) / float(epochs)) ** gamma) if (gamma is not None) else 0.0) train(model, optimizer, data, preconditioner, lam) eval_info = evaluate(model, data) eval_info['epoch'] = int(epoch) eval_info['run'] = int((i_run + 1)) eval_info['time'] = (time.perf_counter() - t_start) eval_info['eps'] = eps eval_info['update-freq'] = update_freq if (gamma is not None): eval_info['gamma'] = gamma if (alpha is not None): eval_info['alpha'] = alpha if (logger is not None): for (k, v) in eval_info.items(): logger.add_scalar(k, v, global_step=epoch) if (eval_info['val loss'] < best_val_loss): best_val_loss = eval_info['val loss'] test_acc = eval_info['test acc'] val_loss_history.append(eval_info['val loss']) if ((early_stopping > 0) and (epoch > (epochs // 2))): tmp = tensor(val_loss_history[(- (early_stopping + 1)):(- 1)]) if (eval_info['val loss'] > tmp.mean().item()): break if torch.cuda.is_available(): torch.cuda.synchronize() t_end = time.perf_counter() val_losses.append(best_val_loss) accs.append(test_acc) durations.append((t_end - t_start)) if (logger is not None): logger.close() (loss, acc, duration) = (tensor(val_losses), tensor(accs), tensor(durations)) print('Val Loss: {:.4f}, Test Accuracy: {:.2f} ± {:.2f}, Duration: {:.3f} \n'.format(loss.mean().item(), (100 * acc.mean().item()), (100 * acc.std().item()), duration.mean().item()))
def train(model, optimizer, data, preconditioner=None, lam=0.0): model.train() optimizer.zero_grad() out = model(data) label = out.max(1)[1] label[data.train_mask] = data.y[data.train_mask] label.requires_grad = False loss = F.nll_loss(out[data.train_mask], label[data.train_mask]) loss += (lam * F.nll_loss(out[(~ data.train_mask)], label[(~ data.train_mask)])) loss.backward(retain_graph=True) if preconditioner: preconditioner.step(lam=lam) optimizer.step()
def evaluate(model, data): model.eval() with torch.no_grad(): logits = model(data) outs = {} for key in ['train', 'val', 'test']: mask = data['{}_mask'.format(key)] loss = F.nll_loss(logits[mask], data.y[mask]).item() pred = logits[mask].max(1)[1] acc = (pred.eq(data.y[mask]).sum().item() / mask.sum().item()) outs['{} loss'.format(key)] = loss outs['{} acc'.format(key)] = acc return outs
class Txt2ImgIterableBaseDataset(IterableDataset): '\n Define an interface to make the IterableDatasets for text2img data chainable\n ' def __init__(self, num_records=0, valid_ids=None, size=256): super().__init__() self.num_records = num_records self.valid_ids = valid_ids self.sample_ids = valid_ids self.size = size print(f'{self.__class__.__name__} dataset contains {self.__len__()} examples.') def __len__(self): return self.num_records @abstractmethod def __iter__(self): pass
def synset2idx(path_to_yaml='data/index_synset.yaml'): with open(path_to_yaml) as f: di2s = yaml.load(f) return dict(((v, k) for (k, v) in di2s.items()))
class ImageNetBase(Dataset): def __init__(self, config=None): self.config = (config or OmegaConf.create()) if (not (type(self.config) == dict)): self.config = OmegaConf.to_container(self.config) self.keep_orig_class_label = self.config.get('keep_orig_class_label', False) self.process_images = True self._prepare() self._prepare_synset_to_human() self._prepare_idx_to_synset() self._prepare_human_to_integer_label() self._load() def __len__(self): return len(self.data) def __getitem__(self, i): return self.data[i] def _prepare(self): raise NotImplementedError() def _filter_relpaths(self, relpaths): ignore = set(['n06596364_9591.JPEG']) relpaths = [rpath for rpath in relpaths if (not (rpath.split('/')[(- 1)] in ignore))] if ('sub_indices' in self.config): indices = str_to_indices(self.config['sub_indices']) synsets = give_synsets_from_indices(indices, path_to_yaml=self.idx2syn) self.synset2idx = synset2idx(path_to_yaml=self.idx2syn) files = [] for rpath in relpaths: syn = rpath.split('/')[0] if (syn in synsets): files.append(rpath) return files else: return relpaths def _prepare_synset_to_human(self): SIZE = 2655750 URL = 'https://heibox.uni-heidelberg.de/f/9f28e956cd304264bb82/?dl=1' self.human_dict = os.path.join(self.root, 'synset_human.txt') if ((not os.path.exists(self.human_dict)) or (not (os.path.getsize(self.human_dict) == SIZE))): download(URL, self.human_dict) def _prepare_idx_to_synset(self): URL = 'https://heibox.uni-heidelberg.de/f/d835d5b6ceda4d3aa910/?dl=1' self.idx2syn = os.path.join(self.root, 'index_synset.yaml') if (not os.path.exists(self.idx2syn)): download(URL, self.idx2syn) def _prepare_human_to_integer_label(self): URL = 'https://heibox.uni-heidelberg.de/f/2362b797d5be43b883f6/?dl=1' self.human2integer = os.path.join(self.root, 'imagenet1000_clsidx_to_labels.txt') if (not os.path.exists(self.human2integer)): download(URL, self.human2integer) with open(self.human2integer, 'r') as f: lines = f.read().splitlines() assert (len(lines) == 1000) self.human2integer_dict = dict() for line in lines: (value, key) = line.split(':') self.human2integer_dict[key] = int(value) def _load(self): with open(self.txt_filelist, 'r') as f: self.relpaths = f.read().splitlines() l1 = len(self.relpaths) self.relpaths = self._filter_relpaths(self.relpaths) print('Removed {} files from filelist during filtering.'.format((l1 - len(self.relpaths)))) self.synsets = [p.split('/')[0] for p in self.relpaths] self.abspaths = [os.path.join(self.datadir, p) for p in self.relpaths] unique_synsets = np.unique(self.synsets) class_dict = dict(((synset, i) for (i, synset) in enumerate(unique_synsets))) if (not self.keep_orig_class_label): self.class_labels = [class_dict[s] for s in self.synsets] else: self.class_labels = [self.synset2idx[s] for s in self.synsets] with open(self.human_dict, 'r') as f: human_dict = f.read().splitlines() human_dict = dict((line.split(maxsplit=1) for line in human_dict)) self.human_labels = [human_dict[s] for s in self.synsets] labels = {'relpath': np.array(self.relpaths), 'synsets': np.array(self.synsets), 'class_label': np.array(self.class_labels), 'human_label': np.array(self.human_labels)} if self.process_images: self.size = retrieve(self.config, 'size', default=256) self.data = ImagePaths(self.abspaths, labels=labels, size=self.size, random_crop=self.random_crop) else: self.data = self.abspaths
class ImageNetTrain(ImageNetBase): NAME = 'ILSVRC2012_train' URL = 'http://www.image-net.org/challenges/LSVRC/2012/' AT_HASH = 'a306397ccf9c2ead27155983c254227c0fd938e2' FILES = ['ILSVRC2012_img_train.tar'] SIZES = [147897477120] def __init__(self, process_images=True, data_root=None, **kwargs): self.process_images = process_images self.data_root = data_root super().__init__(**kwargs) def _prepare(self): if self.data_root: self.root = os.path.join(self.data_root, self.NAME) else: cachedir = os.environ.get('XDG_CACHE_HOME', os.path.expanduser('~/.cache')) self.root = os.path.join(cachedir, 'autoencoders/data', self.NAME) self.datadir = os.path.join(self.root, 'data') self.txt_filelist = os.path.join(self.root, 'filelist.txt') self.expected_length = 1281167 self.random_crop = retrieve(self.config, 'ImageNetTrain/random_crop', default=True) if (not tdu.is_prepared(self.root)): print('Preparing dataset {} in {}'.format(self.NAME, self.root)) datadir = self.datadir if (not os.path.exists(datadir)): path = os.path.join(self.root, self.FILES[0]) if ((not os.path.exists(path)) or (not (os.path.getsize(path) == self.SIZES[0]))): import academictorrents as at atpath = at.get(self.AT_HASH, datastore=self.root) assert (atpath == path) print('Extracting {} to {}'.format(path, datadir)) os.makedirs(datadir, exist_ok=True) with tarfile.open(path, 'r:') as tar: tar.extractall(path=datadir) print('Extracting sub-tars.') subpaths = sorted(glob.glob(os.path.join(datadir, '*.tar'))) for subpath in tqdm(subpaths): subdir = subpath[:(- len('.tar'))] os.makedirs(subdir, exist_ok=True) with tarfile.open(subpath, 'r:') as tar: tar.extractall(path=subdir) filelist = glob.glob(os.path.join(datadir, '**', '*.JPEG')) filelist = [os.path.relpath(p, start=datadir) for p in filelist] filelist = sorted(filelist) filelist = ('\n'.join(filelist) + '\n') with open(self.txt_filelist, 'w') as f: f.write(filelist) tdu.mark_prepared(self.root)
class ImageNetValidation(ImageNetBase): NAME = 'ILSVRC2012_validation' URL = 'http://www.image-net.org/challenges/LSVRC/2012/' AT_HASH = '5d6d0df7ed81efd49ca99ea4737e0ae5e3a5f2e5' VS_URL = 'https://heibox.uni-heidelberg.de/f/3e0f6e9c624e45f2bd73/?dl=1' FILES = ['ILSVRC2012_img_val.tar', 'validation_synset.txt'] SIZES = [6744924160, 1950000] def __init__(self, process_images=True, data_root=None, **kwargs): self.data_root = data_root self.process_images = process_images super().__init__(**kwargs) def _prepare(self): if self.data_root: self.root = os.path.join(self.data_root, self.NAME) else: cachedir = os.environ.get('XDG_CACHE_HOME', os.path.expanduser('~/.cache')) self.root = os.path.join(cachedir, 'autoencoders/data', self.NAME) self.datadir = os.path.join(self.root, 'data') self.txt_filelist = os.path.join(self.root, 'filelist.txt') self.expected_length = 50000 self.random_crop = retrieve(self.config, 'ImageNetValidation/random_crop', default=False) if (not tdu.is_prepared(self.root)): print('Preparing dataset {} in {}'.format(self.NAME, self.root)) datadir = self.datadir if (not os.path.exists(datadir)): path = os.path.join(self.root, self.FILES[0]) if ((not os.path.exists(path)) or (not (os.path.getsize(path) == self.SIZES[0]))): import academictorrents as at atpath = at.get(self.AT_HASH, datastore=self.root) assert (atpath == path) print('Extracting {} to {}'.format(path, datadir)) os.makedirs(datadir, exist_ok=True) with tarfile.open(path, 'r:') as tar: tar.extractall(path=datadir) vspath = os.path.join(self.root, self.FILES[1]) if ((not os.path.exists(vspath)) or (not (os.path.getsize(vspath) == self.SIZES[1]))): download(self.VS_URL, vspath) with open(vspath, 'r') as f: synset_dict = f.read().splitlines() synset_dict = dict((line.split() for line in synset_dict)) print('Reorganizing into synset folders') synsets = np.unique(list(synset_dict.values())) for s in synsets: os.makedirs(os.path.join(datadir, s), exist_ok=True) for (k, v) in synset_dict.items(): src = os.path.join(datadir, k) dst = os.path.join(datadir, v) shutil.move(src, dst) filelist = glob.glob(os.path.join(datadir, '**', '*.JPEG')) filelist = [os.path.relpath(p, start=datadir) for p in filelist] filelist = sorted(filelist) filelist = ('\n'.join(filelist) + '\n') with open(self.txt_filelist, 'w') as f: f.write(filelist) tdu.mark_prepared(self.root)
class ImageNetSR(Dataset): def __init__(self, size=None, degradation=None, downscale_f=4, min_crop_f=0.5, max_crop_f=1.0, random_crop=True): '\n Imagenet Superresolution Dataloader\n Performs following ops in order:\n 1. crops a crop of size s from image either as random or center crop\n 2. resizes crop to size with cv2.area_interpolation\n 3. degrades resized crop with degradation_fn\n\n :param size: resizing to size after cropping\n :param degradation: degradation_fn, e.g. cv_bicubic or bsrgan_light\n :param downscale_f: Low Resolution Downsample factor\n :param min_crop_f: determines crop size s,\n where s = c * min_img_side_len with c sampled from interval (min_crop_f, max_crop_f)\n :param max_crop_f: ""\n :param data_root:\n :param random_crop:\n ' self.base = self.get_base() assert size assert (size / downscale_f).is_integer() self.size = size self.LR_size = int((size / downscale_f)) self.min_crop_f = min_crop_f self.max_crop_f = max_crop_f assert (max_crop_f <= 1.0) self.center_crop = (not random_crop) self.image_rescaler = albumentations.SmallestMaxSize(max_size=size, interpolation=cv2.INTER_AREA) self.pil_interpolation = False if (degradation == 'bsrgan'): self.degradation_process = partial(degradation_fn_bsr, sf=downscale_f) elif (degradation == 'bsrgan_light'): self.degradation_process = partial(degradation_fn_bsr_light, sf=downscale_f) else: interpolation_fn = {'cv_nearest': cv2.INTER_NEAREST, 'cv_bilinear': cv2.INTER_LINEAR, 'cv_bicubic': cv2.INTER_CUBIC, 'cv_area': cv2.INTER_AREA, 'cv_lanczos': cv2.INTER_LANCZOS4, 'pil_nearest': PIL.Image.NEAREST, 'pil_bilinear': PIL.Image.BILINEAR, 'pil_bicubic': PIL.Image.BICUBIC, 'pil_box': PIL.Image.BOX, 'pil_hamming': PIL.Image.HAMMING, 'pil_lanczos': PIL.Image.LANCZOS}[degradation] self.pil_interpolation = degradation.startswith('pil_') if self.pil_interpolation: self.degradation_process = partial(TF.resize, size=self.LR_size, interpolation=interpolation_fn) else: self.degradation_process = albumentations.SmallestMaxSize(max_size=self.LR_size, interpolation=interpolation_fn) def __len__(self): return len(self.base) def __getitem__(self, i): example = self.base[i] image = Image.open(example['file_path_']) if (not (image.mode == 'RGB')): image = image.convert('RGB') image = np.array(image).astype(np.uint8) min_side_len = min(image.shape[:2]) crop_side_len = (min_side_len * np.random.uniform(self.min_crop_f, self.max_crop_f, size=None)) crop_side_len = int(crop_side_len) if self.center_crop: self.cropper = albumentations.CenterCrop(height=crop_side_len, width=crop_side_len) else: self.cropper = albumentations.RandomCrop(height=crop_side_len, width=crop_side_len) image = self.cropper(image=image)['image'] image = self.image_rescaler(image=image)['image'] if self.pil_interpolation: image_pil = PIL.Image.fromarray(image) LR_image = self.degradation_process(image_pil) LR_image = np.array(LR_image).astype(np.uint8) else: LR_image = self.degradation_process(image=image)['image'] example['image'] = ((image / 127.5) - 1.0).astype(np.float32) example['LR_image'] = ((LR_image / 127.5) - 1.0).astype(np.float32) return example
class ImageNetSRTrain(ImageNetSR): def __init__(self, **kwargs): super().__init__(**kwargs) def get_base(self): with open('data/imagenet_train_hr_indices.p', 'rb') as f: indices = pickle.load(f) dset = ImageNetTrain(process_images=False) return Subset(dset, indices)
class ImageNetSRValidation(ImageNetSR): def __init__(self, **kwargs): super().__init__(**kwargs) def get_base(self): with open('data/imagenet_val_hr_indices.p', 'rb') as f: indices = pickle.load(f) dset = ImageNetValidation(process_images=False) return Subset(dset, indices)
class LambdaWarmUpCosineScheduler(): '\n note: use with a base_lr of 1.0\n ' def __init__(self, warm_up_steps, lr_min, lr_max, lr_start, max_decay_steps, verbosity_interval=0): self.lr_warm_up_steps = warm_up_steps self.lr_start = lr_start self.lr_min = lr_min self.lr_max = lr_max self.lr_max_decay_steps = max_decay_steps self.last_lr = 0.0 self.verbosity_interval = verbosity_interval def schedule(self, n, **kwargs): if (self.verbosity_interval > 0): if ((n % self.verbosity_interval) == 0): print(f'current step: {n}, recent lr-multiplier: {self.last_lr}') if (n < self.lr_warm_up_steps): lr = ((((self.lr_max - self.lr_start) / self.lr_warm_up_steps) * n) + self.lr_start) self.last_lr = lr return lr else: t = ((n - self.lr_warm_up_steps) / (self.lr_max_decay_steps - self.lr_warm_up_steps)) t = min(t, 1.0) lr = (self.lr_min + ((0.5 * (self.lr_max - self.lr_min)) * (1 + np.cos((t * np.pi))))) self.last_lr = lr return lr def __call__(self, n, **kwargs): return self.schedule(n, **kwargs)
class LambdaWarmUpCosineScheduler2(): '\n supports repeated iterations, configurable via lists\n note: use with a base_lr of 1.0.\n ' def __init__(self, warm_up_steps, f_min, f_max, f_start, cycle_lengths, verbosity_interval=0): assert (len(warm_up_steps) == len(f_min) == len(f_max) == len(f_start) == len(cycle_lengths)) self.lr_warm_up_steps = warm_up_steps self.f_start = f_start self.f_min = f_min self.f_max = f_max self.cycle_lengths = cycle_lengths self.cum_cycles = np.cumsum(([0] + list(self.cycle_lengths))) self.last_f = 0.0 self.verbosity_interval = verbosity_interval def find_in_interval(self, n): interval = 0 for cl in self.cum_cycles[1:]: if (n <= cl): return interval interval += 1 def schedule(self, n, **kwargs): cycle = self.find_in_interval(n) n = (n - self.cum_cycles[cycle]) if (self.verbosity_interval > 0): if ((n % self.verbosity_interval) == 0): print(f'current step: {n}, recent lr-multiplier: {self.last_f}, current cycle {cycle}') if (n < self.lr_warm_up_steps[cycle]): f = ((((self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle]) * n) + self.f_start[cycle]) self.last_f = f return f else: t = ((n - self.lr_warm_up_steps[cycle]) / (self.cycle_lengths[cycle] - self.lr_warm_up_steps[cycle])) t = min(t, 1.0) f = (self.f_min[cycle] + ((0.5 * (self.f_max[cycle] - self.f_min[cycle])) * (1 + np.cos((t * np.pi))))) self.last_f = f return f def __call__(self, n, **kwargs): return self.schedule(n, **kwargs)
class LambdaLinearScheduler(LambdaWarmUpCosineScheduler2): def schedule(self, n, **kwargs): cycle = self.find_in_interval(n) n = (n - self.cum_cycles[cycle]) if (self.verbosity_interval > 0): if ((n % self.verbosity_interval) == 0): print(f'current step: {n}, recent lr-multiplier: {self.last_f}, current cycle {cycle}') if (n < self.lr_warm_up_steps[cycle]): f = ((((self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle]) * n) + self.f_start[cycle]) self.last_f = f return f else: f = (self.f_min[cycle] + (((self.f_max[cycle] - self.f_min[cycle]) * (self.cycle_lengths[cycle] - n)) / self.cycle_lengths[cycle])) self.last_f = f return f
class VQModel(pl.LightningModule): def __init__(self, ddconfig, lossconfig, n_embed, embed_dim, ckpt_path=None, ignore_keys=[], image_key='image', colorize_nlabels=None, monitor=None, batch_resize_range=None, scheduler_config=None, lr_g_factor=1.0, remap=None, sane_index_shape=False, use_ema=False): super().__init__() self.embed_dim = embed_dim self.n_embed = n_embed self.image_key = image_key self.encoder = Encoder(**ddconfig) self.decoder = Decoder(**ddconfig) self.loss = instantiate_from_config(lossconfig) self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25, remap=remap, sane_index_shape=sane_index_shape) self.quant_conv = torch.nn.Conv2d(ddconfig['z_channels'], embed_dim, 1) self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig['z_channels'], 1) if (colorize_nlabels is not None): assert (type(colorize_nlabels) == int) self.register_buffer('colorize', torch.randn(3, colorize_nlabels, 1, 1)) if (monitor is not None): self.monitor = monitor self.batch_resize_range = batch_resize_range if (self.batch_resize_range is not None): print(f'{self.__class__.__name__}: Using per-batch resizing in range {batch_resize_range}.') self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self) print(f'Keeping EMAs of {len(list(self.model_ema.buffers()))}.') if (ckpt_path is not None): self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) self.scheduler_config = scheduler_config self.lr_g_factor = lr_g_factor @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.parameters()) self.model_ema.copy_to(self) if (context is not None): print(f'{context}: Switched to EMA weights') try: (yield None) finally: if self.use_ema: self.model_ema.restore(self.parameters()) if (context is not None): print(f'{context}: Restored training weights') def init_from_ckpt(self, path, ignore_keys=list()): sd = torch.load(path, map_location='cpu')['state_dict'] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print('Deleting key {} from state_dict.'.format(k)) del sd[k] (missing, unexpected) = self.load_state_dict(sd, strict=False) print(f'Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys') if (len(missing) > 0): print(f'Missing Keys: {missing}') print(f'Unexpected Keys: {unexpected}') def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self) def encode(self, x): h = self.encoder(x) h = self.quant_conv(h) (quant, emb_loss, info) = self.quantize(h) return (quant, emb_loss, info) def encode_to_prequant(self, x): h = self.encoder(x) h = self.quant_conv(h) return h def decode(self, quant): quant = self.post_quant_conv(quant) dec = self.decoder(quant) return dec def decode_code(self, code_b): quant_b = self.quantize.embed_code(code_b) dec = self.decode(quant_b) return dec def forward(self, input, return_pred_indices=False): (quant, diff, (_, _, ind)) = self.encode(input) dec = self.decode(quant) if return_pred_indices: return (dec, diff, ind) return (dec, diff) def get_input(self, batch, k): x = batch[k] if (len(x.shape) == 3): x = x[(..., None)] x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float() if (self.batch_resize_range is not None): lower_size = self.batch_resize_range[0] upper_size = self.batch_resize_range[1] if (self.global_step <= 4): new_resize = upper_size else: new_resize = np.random.choice(np.arange(lower_size, (upper_size + 16), 16)) if (new_resize != x.shape[2]): x = F.interpolate(x, size=new_resize, mode='bicubic') x = x.detach() return x def training_step(self, batch, batch_idx, optimizer_idx): x = self.get_input(batch, self.image_key) (xrec, qloss, ind) = self(x, return_pred_indices=True) if (optimizer_idx == 0): (aeloss, log_dict_ae) = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, last_layer=self.get_last_layer(), split='train', predicted_indices=ind) self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True) return aeloss if (optimizer_idx == 1): (discloss, log_dict_disc) = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, last_layer=self.get_last_layer(), split='train') self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True) return discloss def validation_step(self, batch, batch_idx): log_dict = self._validation_step(batch, batch_idx) with self.ema_scope(): log_dict_ema = self._validation_step(batch, batch_idx, suffix='_ema') return log_dict def _validation_step(self, batch, batch_idx, suffix=''): x = self.get_input(batch, self.image_key) (xrec, qloss, ind) = self(x, return_pred_indices=True) (aeloss, log_dict_ae) = self.loss(qloss, x, xrec, 0, self.global_step, last_layer=self.get_last_layer(), split=('val' + suffix), predicted_indices=ind) (discloss, log_dict_disc) = self.loss(qloss, x, xrec, 1, self.global_step, last_layer=self.get_last_layer(), split=('val' + suffix), predicted_indices=ind) rec_loss = log_dict_ae[f'val{suffix}/rec_loss'] self.log(f'val{suffix}/rec_loss', rec_loss, prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True) self.log(f'val{suffix}/aeloss', aeloss, prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True) if (version.parse(pl.__version__) >= version.parse('1.4.0')): del log_dict_ae[f'val{suffix}/rec_loss'] self.log_dict(log_dict_ae) self.log_dict(log_dict_disc) return self.log_dict def configure_optimizers(self): lr_d = self.learning_rate lr_g = (self.lr_g_factor * self.learning_rate) print('lr_d', lr_d) print('lr_g', lr_g) opt_ae = torch.optim.Adam(((((list(self.encoder.parameters()) + list(self.decoder.parameters())) + list(self.quantize.parameters())) + list(self.quant_conv.parameters())) + list(self.post_quant_conv.parameters())), lr=lr_g, betas=(0.5, 0.9)) opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), lr=lr_d, betas=(0.5, 0.9)) if (self.scheduler_config is not None): scheduler = instantiate_from_config(self.scheduler_config) print('Setting up LambdaLR scheduler...') scheduler = [{'scheduler': LambdaLR(opt_ae, lr_lambda=scheduler.schedule), 'interval': 'step', 'frequency': 1}, {'scheduler': LambdaLR(opt_disc, lr_lambda=scheduler.schedule), 'interval': 'step', 'frequency': 1}] return ([opt_ae, opt_disc], scheduler) return ([opt_ae, opt_disc], []) def get_last_layer(self): return self.decoder.conv_out.weight def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs): log = dict() x = self.get_input(batch, self.image_key) x = x.to(self.device) if only_inputs: log['inputs'] = x return log (xrec, _) = self(x) if (x.shape[1] > 3): assert (xrec.shape[1] > 3) x = self.to_rgb(x) xrec = self.to_rgb(xrec) log['inputs'] = x log['reconstructions'] = xrec if plot_ema: with self.ema_scope(): (xrec_ema, _) = self(x) if (x.shape[1] > 3): xrec_ema = self.to_rgb(xrec_ema) log['reconstructions_ema'] = xrec_ema return log def to_rgb(self, x): assert (self.image_key == 'segmentation') if (not hasattr(self, 'colorize')): self.register_buffer('colorize', torch.randn(3, x.shape[1], 1, 1).to(x)) x = F.conv2d(x, weight=self.colorize) x = (((2.0 * (x - x.min())) / (x.max() - x.min())) - 1.0) return x
class VQModelInterface(VQModel): def __init__(self, embed_dim, *args, **kwargs): super().__init__(*args, embed_dim=embed_dim, **kwargs) self.embed_dim = embed_dim def encode(self, x): h = self.encoder(x) h = self.quant_conv(h) return h def decode(self, h, force_not_quantize=False): if (not force_not_quantize): (quant, emb_loss, info) = self.quantize(h) else: quant = h quant = self.post_quant_conv(quant) dec = self.decoder(quant) return dec
class AutoencoderKL(pl.LightningModule): def __init__(self, ddconfig, lossconfig, embed_dim, ckpt_path=None, ignore_keys=[], image_key='image', colorize_nlabels=None, monitor=None): super().__init__() self.image_key = image_key self.encoder = Encoder(**ddconfig) self.decoder = Decoder(**ddconfig) self.loss = instantiate_from_config(lossconfig) assert ddconfig['double_z'] self.quant_conv = torch.nn.Conv2d((2 * ddconfig['z_channels']), (2 * embed_dim), 1) self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig['z_channels'], 1) self.embed_dim = embed_dim if (colorize_nlabels is not None): assert (type(colorize_nlabels) == int) self.register_buffer('colorize', torch.randn(3, colorize_nlabels, 1, 1)) if (monitor is not None): self.monitor = monitor if (ckpt_path is not None): self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) def init_from_ckpt(self, path, ignore_keys=list()): sd = torch.load(path, map_location='cpu')['state_dict'] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print('Deleting key {} from state_dict.'.format(k)) del sd[k] self.load_state_dict(sd, strict=False) print(f'Restored from {path}') def encode(self, x): h = self.encoder(x) moments = self.quant_conv(h) posterior = DiagonalGaussianDistribution(moments) return posterior def decode(self, z): z = self.post_quant_conv(z) dec = self.decoder(z) return dec def forward(self, input, sample_posterior=True): posterior = self.encode(input) if sample_posterior: z = posterior.sample() else: z = posterior.mode() dec = self.decode(z) return (dec, posterior) def get_input(self, batch, k): x = batch[k] if (len(x.shape) == 3): x = x[(..., None)] x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float() return x def training_step(self, batch, batch_idx, optimizer_idx): inputs = self.get_input(batch, self.image_key) (reconstructions, posterior) = self(inputs) if (optimizer_idx == 0): (aeloss, log_dict_ae) = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step, last_layer=self.get_last_layer(), split='train') self.log('aeloss', aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False) return aeloss if (optimizer_idx == 1): (discloss, log_dict_disc) = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step, last_layer=self.get_last_layer(), split='train') self.log('discloss', discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False) return discloss def validation_step(self, batch, batch_idx): inputs = self.get_input(batch, self.image_key) (reconstructions, posterior) = self(inputs) (aeloss, log_dict_ae) = self.loss(inputs, reconstructions, posterior, 0, self.global_step, last_layer=self.get_last_layer(), split='val') (discloss, log_dict_disc) = self.loss(inputs, reconstructions, posterior, 1, self.global_step, last_layer=self.get_last_layer(), split='val') self.log('val/rec_loss', log_dict_ae['val/rec_loss']) self.log_dict(log_dict_ae) self.log_dict(log_dict_disc) return self.log_dict def configure_optimizers(self): lr = self.learning_rate opt_ae = torch.optim.Adam((((list(self.encoder.parameters()) + list(self.decoder.parameters())) + list(self.quant_conv.parameters())) + list(self.post_quant_conv.parameters())), lr=lr, betas=(0.5, 0.9)) opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9)) return ([opt_ae, opt_disc], []) def get_last_layer(self): return self.decoder.conv_out.weight @torch.no_grad() def log_images(self, batch, only_inputs=False, **kwargs): log = dict() x = self.get_input(batch, self.image_key) x = x.to(self.device) if (not only_inputs): (xrec, posterior) = self(x) if (x.shape[1] > 3): assert (xrec.shape[1] > 3) x = self.to_rgb(x) xrec = self.to_rgb(xrec) log['samples'] = self.decode(torch.randn_like(posterior.sample())) log['reconstructions'] = xrec log['inputs'] = x return log def to_rgb(self, x): assert (self.image_key == 'segmentation') if (not hasattr(self, 'colorize')): self.register_buffer('colorize', torch.randn(3, x.shape[1], 1, 1).to(x)) x = F.conv2d(x, weight=self.colorize) x = (((2.0 * (x - x.min())) / (x.max() - x.min())) - 1.0) return x
class IdentityFirstStage(torch.nn.Module): def __init__(self, *args, vq_interface=False, **kwargs): self.vq_interface = vq_interface super().__init__() def encode(self, x, *args, **kwargs): return x def decode(self, x, *args, **kwargs): return x def quantize(self, x, *args, **kwargs): if self.vq_interface: return (x, None, [None, None, None]) return x def forward(self, x, *args, **kwargs): return x
class DDIMSampler(object): def __init__(self, model, schedule='linear', **kwargs): super().__init__() self.model = model self.ddpm_num_timesteps = model.num_timesteps self.schedule = schedule def register_buffer(self, name, attr): if (type(attr) == torch.Tensor): if (attr.device != torch.device('cuda')): attr = attr.to(torch.device('cuda')) setattr(self, name, attr) def make_schedule(self, ddim_num_steps, ddim_discretize='uniform', ddim_eta=0.0, verbose=True): self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, num_ddpm_timesteps=self.ddpm_num_timesteps, verbose=verbose) alphas_cumprod = self.model.alphas_cumprod assert (alphas_cumprod.shape[0] == self.ddpm_num_timesteps), 'alphas have to be defined for each timestep' to_torch = (lambda x: x.clone().detach().to(torch.float32).to(self.model.device)) self.register_buffer('betas', to_torch(self.model.betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt((1.0 - alphas_cumprod.cpu())))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log((1.0 - alphas_cumprod.cpu())))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt((1.0 / alphas_cumprod.cpu())))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(((1.0 / alphas_cumprod.cpu()) - 1)))) (ddim_sigmas, ddim_alphas, ddim_alphas_prev) = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), ddim_timesteps=self.ddim_timesteps, eta=ddim_eta, verbose=verbose) self.register_buffer('ddim_sigmas', ddim_sigmas) self.register_buffer('ddim_alphas', ddim_alphas) self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt((1.0 - ddim_alphas))) sigmas_for_original_sampling_steps = (ddim_eta * torch.sqrt((((1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod)) * (1 - (self.alphas_cumprod / self.alphas_cumprod_prev))))) self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) @torch.no_grad() def sample(self, S, batch_size, shape, conditioning=None, callback=None, normals_sequence=None, img_callback=None, quantize_x0=False, eta=0.0, mask=None, x0=None, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, verbose=True, x_T=None, log_every_t=100, unconditional_guidance_scale=1.0, unconditional_conditioning=None, **kwargs): if (conditioning is not None): if isinstance(conditioning, dict): ctmp = conditioning[list(conditioning.keys())[0]] while isinstance(ctmp, list): ctmp = ctmp[0] cbs = ctmp.shape[0] if (cbs != batch_size): print(f'Warning: Got {cbs} conditionings but batch-size is {batch_size}') elif (conditioning.shape[0] != batch_size): print(f'Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}') self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) (C, H, W) = shape size = (batch_size, C, H, W) print(f'Data shape for DDIM sampling is {size}, eta {eta}') (samples, intermediates) = self.ddim_sampling(conditioning, size, callback=callback, img_callback=img_callback, quantize_denoised=quantize_x0, mask=mask, x0=x0, ddim_use_original_steps=False, noise_dropout=noise_dropout, temperature=temperature, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, x_T=x_T, log_every_t=log_every_t, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning) return (samples, intermediates) @torch.no_grad() def ddim_sampling(self, cond, shape, x_T=None, ddim_use_original_steps=False, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, log_every_t=100, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None): device = self.model.betas.device b = shape[0] if (x_T is None): img = torch.randn(shape, device=device) else: img = x_T if (timesteps is None): timesteps = (self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps) elif ((timesteps is not None) and (not ddim_use_original_steps)): subset_end = (int((min((timesteps / self.ddim_timesteps.shape[0]), 1) * self.ddim_timesteps.shape[0])) - 1) timesteps = self.ddim_timesteps[:subset_end] intermediates = {'x_inter': [img], 'pred_x0': [img]} time_range = (reversed(range(0, timesteps)) if ddim_use_original_steps else np.flip(timesteps)) total_steps = (timesteps if ddim_use_original_steps else timesteps.shape[0]) print(f'Running DDIM Sampling with {total_steps} timesteps') iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps) for (i, step) in enumerate(iterator): index = ((total_steps - i) - 1) ts = torch.full((b,), step, device=device, dtype=torch.long) if (mask is not None): assert (x0 is not None) img_orig = self.model.q_sample(x0, ts) img = ((img_orig * mask) + ((1.0 - mask) * img)) outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, quantize_denoised=quantize_denoised, temperature=temperature, noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning) (img, pred_x0) = outs if callback: callback(i) if img_callback: img_callback(pred_x0, i) if (((index % log_every_t) == 0) or (index == (total_steps - 1))): intermediates['x_inter'].append(img) intermediates['pred_x0'].append(pred_x0) return (img, intermediates) @torch.no_grad() def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None): (b, *_, device) = (*x.shape, x.device) if ((unconditional_conditioning is None) or (unconditional_guidance_scale == 1.0)): e_t = self.model.apply_model(x, t, c) else: x_in = torch.cat(([x] * 2)) t_in = torch.cat(([t] * 2)) if isinstance(c, dict): assert isinstance(unconditional_conditioning, dict) c_in = dict() for k in c: if isinstance(c[k], list): c_in[k] = [torch.cat([unconditional_conditioning[k][i], c[k][i]]) for i in range(len(c[k]))] else: c_in[k] = torch.cat([unconditional_conditioning[k], c[k]]) else: c_in = torch.cat([unconditional_conditioning, c]) (e_t_uncond, e_t) = self.model.apply_model(x_in, t_in, c_in).chunk(2) e_t = (e_t_uncond + (unconditional_guidance_scale * (e_t - e_t_uncond))) if (score_corrector is not None): assert (self.model.parameterization == 'eps') e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) alphas = (self.model.alphas_cumprod if use_original_steps else self.ddim_alphas) alphas_prev = (self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev) sqrt_one_minus_alphas = (self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas) sigmas = (self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas) a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device) pred_x0 = ((x - (sqrt_one_minus_at * e_t)) / a_t.sqrt()) if quantize_denoised: (pred_x0, _, *_) = self.model.first_stage_model.quantize(pred_x0) dir_xt = (((1.0 - a_prev) - (sigma_t ** 2)).sqrt() * e_t) noise = ((sigma_t * noise_like(x.shape, device, repeat_noise)) * temperature) if (noise_dropout > 0.0): noise = torch.nn.functional.dropout(noise, p=noise_dropout) x_prev = (((a_prev.sqrt() * pred_x0) + dir_xt) + noise) return (x_prev, pred_x0) @torch.no_grad() def stochastic_encode(self, x0, t, use_original_steps=False, noise=None): if use_original_steps: sqrt_alphas_cumprod = self.sqrt_alphas_cumprod sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod else: sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas) sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas if (noise is None): noise = torch.randn_like(x0) return ((extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0) + (extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)) @torch.no_grad() def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None, use_original_steps=False): timesteps = (np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps) timesteps = timesteps[:t_start] time_range = np.flip(timesteps) total_steps = timesteps.shape[0] print(f'Running DDIM Sampling with {total_steps} timesteps') iterator = tqdm(time_range, desc='Decoding image', total=total_steps) x_dec = x_latent for (i, step) in enumerate(iterator): index = ((total_steps - i) - 1) ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long) (x_dec, _) = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning) return x_dec
class PLMSSampler(object): def __init__(self, model, schedule='linear', **kwargs): super().__init__() self.model = model self.ddpm_num_timesteps = model.num_timesteps self.schedule = schedule def register_buffer(self, name, attr): if (type(attr) == torch.Tensor): if (attr.device != torch.device('cuda')): attr = attr.to(torch.device('cuda')) setattr(self, name, attr) def make_schedule(self, ddim_num_steps, ddim_discretize='uniform', ddim_eta=0.0, verbose=True): if (ddim_eta != 0): raise ValueError('ddim_eta must be 0 for PLMS') self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, num_ddpm_timesteps=self.ddpm_num_timesteps, verbose=verbose) alphas_cumprod = self.model.alphas_cumprod assert (alphas_cumprod.shape[0] == self.ddpm_num_timesteps), 'alphas have to be defined for each timestep' to_torch = (lambda x: x.clone().detach().to(torch.float32).to(self.model.device)) self.register_buffer('betas', to_torch(self.model.betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt((1.0 - alphas_cumprod.cpu())))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log((1.0 - alphas_cumprod.cpu())))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt((1.0 / alphas_cumprod.cpu())))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(((1.0 / alphas_cumprod.cpu()) - 1)))) (ddim_sigmas, ddim_alphas, ddim_alphas_prev) = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), ddim_timesteps=self.ddim_timesteps, eta=ddim_eta, verbose=verbose) self.register_buffer('ddim_sigmas', ddim_sigmas) self.register_buffer('ddim_alphas', ddim_alphas) self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt((1.0 - ddim_alphas))) sigmas_for_original_sampling_steps = (ddim_eta * torch.sqrt((((1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod)) * (1 - (self.alphas_cumprod / self.alphas_cumprod_prev))))) self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) @torch.no_grad() def sample(self, S, batch_size, shape, conditioning=None, callback=None, normals_sequence=None, img_callback=None, quantize_x0=False, eta=0.0, mask=None, x0=None, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, verbose=True, x_T=None, log_every_t=100, unconditional_guidance_scale=1.0, unconditional_conditioning=None, **kwargs): if (conditioning is not None): if isinstance(conditioning, dict): cbs = conditioning[list(conditioning.keys())[0]].shape[0] if (cbs != batch_size): print(f'Warning: Got {cbs} conditionings but batch-size is {batch_size}') elif (conditioning.shape[0] != batch_size): print(f'Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}') self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) (C, H, W) = shape size = (batch_size, C, H, W) print(f'Data shape for PLMS sampling is {size}') (samples, intermediates) = self.plms_sampling(conditioning, size, callback=callback, img_callback=img_callback, quantize_denoised=quantize_x0, mask=mask, x0=x0, ddim_use_original_steps=False, noise_dropout=noise_dropout, temperature=temperature, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, x_T=x_T, log_every_t=log_every_t, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning) return (samples, intermediates) @torch.no_grad() def plms_sampling(self, cond, shape, x_T=None, ddim_use_original_steps=False, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, log_every_t=100, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None): device = self.model.betas.device b = shape[0] if (x_T is None): img = torch.randn(shape, device=device) else: img = x_T if (timesteps is None): timesteps = (self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps) elif ((timesteps is not None) and (not ddim_use_original_steps)): subset_end = (int((min((timesteps / self.ddim_timesteps.shape[0]), 1) * self.ddim_timesteps.shape[0])) - 1) timesteps = self.ddim_timesteps[:subset_end] intermediates = {'x_inter': [img], 'pred_x0': [img]} time_range = (list(reversed(range(0, timesteps))) if ddim_use_original_steps else np.flip(timesteps)) total_steps = (timesteps if ddim_use_original_steps else timesteps.shape[0]) print(f'Running PLMS Sampling with {total_steps} timesteps') iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps) old_eps = [] for (i, step) in enumerate(iterator): index = ((total_steps - i) - 1) ts = torch.full((b,), step, device=device, dtype=torch.long) ts_next = torch.full((b,), time_range[min((i + 1), (len(time_range) - 1))], device=device, dtype=torch.long) if (mask is not None): assert (x0 is not None) img_orig = self.model.q_sample(x0, ts) img = ((img_orig * mask) + ((1.0 - mask) * img)) outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, quantize_denoised=quantize_denoised, temperature=temperature, noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning, old_eps=old_eps, t_next=ts_next) (img, pred_x0, e_t) = outs old_eps.append(e_t) if (len(old_eps) >= 4): old_eps.pop(0) if callback: callback(i) if img_callback: img_callback(pred_x0, i) if (((index % log_every_t) == 0) or (index == (total_steps - 1))): intermediates['x_inter'].append(img) intermediates['pred_x0'].append(pred_x0) return (img, intermediates) @torch.no_grad() def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None, old_eps=None, t_next=None): (b, *_, device) = (*x.shape, x.device) def get_model_output(x, t): if ((unconditional_conditioning is None) or (unconditional_guidance_scale == 1.0)): e_t = self.model.apply_model(x, t, c) else: x_in = torch.cat(([x] * 2)) t_in = torch.cat(([t] * 2)) c_in = torch.cat([unconditional_conditioning, c]) (e_t_uncond, e_t) = self.model.apply_model(x_in, t_in, c_in).chunk(2) e_t = (e_t_uncond + (unconditional_guidance_scale * (e_t - e_t_uncond))) if (score_corrector is not None): assert (self.model.parameterization == 'eps') e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) return e_t alphas = (self.model.alphas_cumprod if use_original_steps else self.ddim_alphas) alphas_prev = (self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev) sqrt_one_minus_alphas = (self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas) sigmas = (self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas) def get_x_prev_and_pred_x0(e_t, index): a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device) pred_x0 = ((x - (sqrt_one_minus_at * e_t)) / a_t.sqrt()) if quantize_denoised: (pred_x0, _, *_) = self.model.first_stage_model.quantize(pred_x0) dir_xt = (((1.0 - a_prev) - (sigma_t ** 2)).sqrt() * e_t) noise = ((sigma_t * noise_like(x.shape, device, repeat_noise)) * temperature) if (noise_dropout > 0.0): noise = torch.nn.functional.dropout(noise, p=noise_dropout) x_prev = (((a_prev.sqrt() * pred_x0) + dir_xt) + noise) return (x_prev, pred_x0) e_t = get_model_output(x, t) if (len(old_eps) == 0): (x_prev, pred_x0) = get_x_prev_and_pred_x0(e_t, index) e_t_next = get_model_output(x_prev, t_next) e_t_prime = ((e_t + e_t_next) / 2) elif (len(old_eps) == 1): e_t_prime = (((3 * e_t) - old_eps[(- 1)]) / 2) elif (len(old_eps) == 2): e_t_prime = ((((23 * e_t) - (16 * old_eps[(- 1)])) + (5 * old_eps[(- 2)])) / 12) elif (len(old_eps) >= 3): e_t_prime = (((((55 * e_t) - (59 * old_eps[(- 1)])) + (37 * old_eps[(- 2)])) - (9 * old_eps[(- 3)])) / 24) (x_prev, pred_x0) = get_x_prev_and_pred_x0(e_t_prime, index) return (x_prev, pred_x0, e_t)
def exists(val): return (val is not None)
def uniq(arr): return {el: True for el in arr}.keys()
def default(val, d): if exists(val): return val return (d() if isfunction(d) else d)
def max_neg_value(t): return (- torch.finfo(t.dtype).max)
def init_(tensor): dim = tensor.shape[(- 1)] std = (1 / math.sqrt(dim)) tensor.uniform_((- std), std) return tensor
class GEGLU(nn.Module): def __init__(self, dim_in, dim_out): super().__init__() self.proj = nn.Linear(dim_in, (dim_out * 2)) def forward(self, x): (x, gate) = self.proj(x).chunk(2, dim=(- 1)) return (x * F.gelu(gate))
class FeedForward(nn.Module): def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.0): super().__init__() inner_dim = int((dim * mult)) dim_out = default(dim_out, dim) project_in = (nn.Sequential(nn.Linear(dim, inner_dim), nn.GELU()) if (not glu) else GEGLU(dim, inner_dim)) self.net = nn.Sequential(project_in, nn.Dropout(dropout), nn.Linear(inner_dim, dim_out)) def forward(self, x): return self.net(x)
def zero_module(module): '\n Zero out the parameters of a module and return it.\n ' for p in module.parameters(): p.detach().zero_() return module
def Normalize(in_channels): return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-06, affine=True)
class LinearAttention(nn.Module): def __init__(self, dim, heads=4, dim_head=32): super().__init__() self.heads = heads hidden_dim = (dim_head * heads) self.to_qkv = nn.Conv2d(dim, (hidden_dim * 3), 1, bias=False) self.to_out = nn.Conv2d(hidden_dim, dim, 1) def forward(self, x): (b, c, h, w) = x.shape qkv = self.to_qkv(x) (q, k, v) = rearrange(qkv, 'b (qkv heads c) h w -> qkv b heads c (h w)', heads=self.heads, qkv=3) k = k.softmax(dim=(- 1)) context = torch.einsum('bhdn,bhen->bhde', k, v) out = torch.einsum('bhde,bhdn->bhen', context, q) out = rearrange(out, 'b heads c (h w) -> b (heads c) h w', heads=self.heads, h=h, w=w) return self.to_out(out)
class SpatialSelfAttention(nn.Module): def __init__(self, in_channels): super().__init__() self.in_channels = in_channels self.norm = Normalize(in_channels) self.q = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) self.k = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) self.v = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) self.proj_out = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) def forward(self, x): h_ = x h_ = self.norm(h_) q = self.q(h_) k = self.k(h_) v = self.v(h_) (b, c, h, w) = q.shape q = rearrange(q, 'b c h w -> b (h w) c') k = rearrange(k, 'b c h w -> b c (h w)') w_ = torch.einsum('bij,bjk->bik', q, k) w_ = (w_ * (int(c) ** (- 0.5))) w_ = torch.nn.functional.softmax(w_, dim=2) v = rearrange(v, 'b c h w -> b c (h w)') w_ = rearrange(w_, 'b i j -> b j i') h_ = torch.einsum('bij,bjk->bik', v, w_) h_ = rearrange(h_, 'b c (h w) -> b c h w', h=h) h_ = self.proj_out(h_) return (x + h_)
class CrossAttention(nn.Module): def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0): super().__init__() inner_dim = (dim_head * heads) context_dim = default(context_dim, query_dim) self.scale = (dim_head ** (- 0.5)) self.heads = heads self.to_q = nn.Linear(query_dim, inner_dim, bias=False) self.to_k = nn.Linear(context_dim, inner_dim, bias=False) self.to_v = nn.Linear(context_dim, inner_dim, bias=False) self.to_out = nn.Sequential(nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)) def forward(self, x, context=None, mask=None): h = self.heads q = self.to_q(x) context = default(context, x) k = self.to_k(context) v = self.to_v(context) (q, k, v) = map((lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h)), (q, k, v)) sim = (einsum('b i d, b j d -> b i j', q, k) * self.scale) if exists(mask): mask = rearrange(mask, 'b ... -> b (...)') max_neg_value = (- torch.finfo(sim.dtype).max) mask = repeat(mask, 'b j -> (b h) () j', h=h) sim.masked_fill_((~ mask), max_neg_value) attn = sim.softmax(dim=(- 1)) out = einsum('b i j, b j d -> b i d', attn, v) out = rearrange(out, '(b h) n d -> b n (h d)', h=h) return self.to_out(out)
class BasicTransformerBlock(nn.Module): def __init__(self, dim, n_heads, d_head, dropout=0.0, context_dim=None, gated_ff=True, checkpoint=True): super().__init__() self.attn1 = CrossAttention(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout) self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff) self.attn2 = CrossAttention(query_dim=dim, context_dim=context_dim, heads=n_heads, dim_head=d_head, dropout=dropout) self.norm1 = nn.LayerNorm(dim) self.norm2 = nn.LayerNorm(dim) self.norm3 = nn.LayerNorm(dim) self.checkpoint = checkpoint def forward(self, x, context=None): return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint) def _forward(self, x, context=None): x = (self.attn1(self.norm1(x)) + x) x = (self.attn2(self.norm2(x), context=context) + x) x = (self.ff(self.norm3(x)) + x) return x