code stringlengths 17 6.64M |
|---|
class GoogLeNet(nn.Module):
def __init__(self, num_classes=1000):
super(GoogLeNet, self).__init__()
self.pre_layers = nn.Sequential(nn.Conv2d(3, 192, kernel_size=3, padding=1), nn.BatchNorm2d(192), nn.ReLU(True))
self.a3 = Inception(192, 64, 96, 128, 16, 32, 32)
self.b3 = Inception(256, 128, 128, 192, 32, 96, 64)
self.a4 = Inception(480, 192, 96, 208, 16, 48, 64)
self.b4 = Inception(512, 160, 112, 224, 24, 64, 64)
self.c4 = Inception(512, 128, 128, 256, 24, 64, 64)
self.d4 = Inception(512, 112, 144, 288, 32, 64, 64)
self.e4 = Inception(528, 256, 160, 320, 32, 128, 128)
self.a5 = Inception(832, 256, 160, 320, 32, 128, 128)
self.b5 = Inception(832, 384, 192, 384, 48, 128, 128)
self.linear = nn.Linear(1024, num_classes)
def forward(self, x):
x = self.pre_layers(x)
x = self.a3(x)
x = self.b3(x)
x = F.max_pool2d(x, 3, stride=2, padding=1)
x = self.a4(x)
x = self.b4(x)
x = self.c4(x)
x = self.d4(x)
x = self.e4(x)
x = F.max_pool2d(x, 3, stride=2, padding=1)
x = self.a5(x)
x = self.b5(x)
x = F.avg_pool2d(x, 8, stride=1)
x = x.view(x.size(0), (- 1))
x = self.linear(x)
return x
|
class LeNet(nn.Module):
def __init__(self, num_classes=1000):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(3, 6, kernel_size=5)
self.conv2 = nn.Conv2d(6, 16, kernel_size=5)
self.fc1 = nn.Linear(((16 * 5) * 5), 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, num_classes)
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
self.relu3 = nn.ReLU()
self.relu4 = nn.ReLU()
self.max_pool2d1 = nn.MaxPool2d(2)
self.max_pool2d2 = nn.MaxPool2d(2)
def forward(self, x):
x = self.relu1(self.conv1(x))
x = self.max_pool2d1(x)
x = self.relu2(self.conv2(x))
x = self.max_pool2d2(x)
x = x.view(x.size(0), (- 1))
x = self.relu3(self.fc1(x))
x = self.relu4(self.fc2(x))
x = self.fc3(x)
return x
|
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
'3x3 convolution with padding'
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation)
|
def conv1x1(in_planes, out_planes, stride=1):
'1x1 convolution'
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
|
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
if ((groups != 1) or (base_width != 64)):
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if (dilation > 1):
raise NotImplementedError('Dilation > 1 not supported in BasicBlock')
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = F.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = F.relu(out)
return out
|
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
width = (int((planes * (base_width / 64.0))) * groups)
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, (planes * self.expansion))
self.bn3 = norm_layer((planes * self.expansion))
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = F.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = F.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = F.relu(out)
return out
|
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None):
super(ResNet, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if (replace_stride_with_dilation is None):
replace_stride_with_dilation = [False, False, False]
if (len(replace_stride_with_dilation) != 3):
raise ValueError('replace_stride_with_dilation should be None or a 3-element tuple, got {}'.format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
self.fc = nn.Linear((512 * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), norm_layer((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer))
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x)
x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = F.adaptive_avg_pool2d(x, (1, 1))
x = torch.flatten(x, 1)
x = self.fc(x)
return x
|
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
from torchvision.models.utils import load_state_dict_from_url
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
return model
|
def resnet18(pretrained=False, progress=True, **kwargs):
'ResNet-18 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, **kwargs)
|
def resnet34(pretrained=False, progress=True, **kwargs):
'ResNet-34 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress, **kwargs)
|
def resnet50(pretrained=False, progress=True, **kwargs):
'ResNet-50 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
|
def resnet101(pretrained=False, progress=True, **kwargs):
'ResNet-101 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
|
def resnet152(pretrained=False, progress=True, **kwargs):
'ResNet-152 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress, **kwargs)
|
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
'ResNeXt-50 32x4d model from\n `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
|
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
'ResNeXt-101 32x8d model from\n `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
|
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
'Wide ResNet-50-2 model from\n `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_\n\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
kwargs['width_per_group'] = (64 * 2)
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
|
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
'Wide ResNet-101-2 model from\n `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_\n\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
kwargs['width_per_group'] = (64 * 2)
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
|
class Fire(nn.Module):
def __init__(self, inplanes, squeeze_planes, expand1x1_planes, expand3x3_planes):
super(Fire, self).__init__()
self.inplanes = inplanes
self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1)
self.squeeze_activation = nn.ReLU(inplace=True)
self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes, kernel_size=1)
self.expand1x1_activation = nn.ReLU(inplace=True)
self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes, kernel_size=3, padding=1)
self.expand3x3_activation = nn.ReLU(inplace=True)
def forward(self, x):
x = self.squeeze_activation(self.squeeze(x))
return torch.cat([self.expand1x1_activation(self.expand1x1(x)), self.expand3x3_activation(self.expand3x3(x))], 1)
|
class SqueezeNet(nn.Module):
def __init__(self, version=1.0, num_classes=1000):
super(SqueezeNet, self).__init__()
if (version not in [1.0, 1.1]):
raise ValueError('Unsupported SqueezeNet version {version}:1.0 or 1.1 expected'.format(version=version))
self.num_classes = num_classes
if (version == 1.0):
self.features = nn.Sequential(nn.Conv2d(3, 96, kernel_size=7, stride=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(96, 16, 64, 64), Fire(128, 16, 64, 64), Fire(128, 32, 128, 128), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(256, 32, 128, 128), Fire(256, 48, 192, 192), Fire(384, 48, 192, 192), Fire(384, 64, 256, 256), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(512, 64, 256, 256))
else:
self.features = nn.Sequential(nn.Conv2d(3, 64, kernel_size=3, stride=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(64, 16, 64, 64), Fire(128, 16, 64, 64), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(128, 32, 128, 128), Fire(256, 32, 128, 128), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(256, 48, 192, 192), Fire(384, 48, 192, 192), Fire(384, 64, 256, 256), Fire(512, 64, 256, 256))
final_conv = nn.Conv2d(512, self.num_classes, kernel_size=1)
self.classifier = nn.Sequential(nn.Dropout(p=0.5), final_conv, nn.ReLU(inplace=True), nn.AdaptiveAvgPool2d((1, 1)))
for m in self.modules():
if isinstance(m, nn.Conv2d):
if (m is final_conv):
init.normal_(m.weight, mean=0.0, std=0.01)
else:
init.kaiming_uniform_(m.weight)
if (m.bias is not None):
init.constant_(m.bias, 0)
def forward(self, x):
x = self.features(x)
x = self.classifier(x)
return x.view(x.size(0), self.num_classes)
|
def squeezenet1_0(pretrained=False, **kwargs):
'SqueezeNet model architecture from the `"SqueezeNet: AlexNet-level\n accuracy with 50x fewer parameters and <0.5MB model size"\n <https://arxiv.org/abs/1602.07360>`_ paper.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = SqueezeNet(version=1.0, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['squeezenet1_0']))
return model
|
def squeezenet1_1(pretrained=False, **kwargs):
'SqueezeNet 1.1 model from the `official SqueezeNet repo\n <https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_.\n SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters\n than SqueezeNet 1.0, without sacrificing accuracy.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = SqueezeNet(version=1.1, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['squeezenet1_1']))
return model
|
class VGG(nn.Module):
def __init__(self, features, num_classes=1000, init_weights=True):
super(VGG, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(nn.Linear(((512 * 7) * 7), 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, num_classes))
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if (m.bias is not None):
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
|
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if (v == 'M'):
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
|
def vgg11(pretrained=False, **kwargs):
'VGG 11-layer model (configuration "A")\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['A']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg11']))
return model
|
def vgg11_bn(pretrained=False, **kwargs):
'VGG 11-layer model (configuration "A") with batch normalization\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['A'], batch_norm=True), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg11_bn']))
return model
|
def vgg13(pretrained=False, **kwargs):
'VGG 13-layer model (configuration "B")\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['B']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg13']))
return model
|
def vgg13_bn(pretrained=False, **kwargs):
'VGG 13-layer model (configuration "B") with batch normalization\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['B'], batch_norm=True), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg13_bn']))
return model
|
def vgg16(pretrained=False, **kwargs):
'VGG 16-layer model (configuration "D")\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['D']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg16']))
return model
|
def vgg16_bn(pretrained=False, **kwargs):
'VGG 16-layer model (configuration "D") with batch normalization\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['D'], batch_norm=True), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg16_bn']))
return model
|
def vgg19(pretrained=False, **kwargs):
'VGG 19-layer model (configuration "E")\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['E']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg19']))
return model
|
def vgg19_bn(pretrained=False, **kwargs):
"VGG 19-layer model (configuration 'E') with batch normalization\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n "
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['E'], batch_norm=True), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg19_bn']))
return model
|
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, drop_rate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.droprate = drop_rate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (((not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=False)) or None)
if (self.droprate > 0):
self.dropout = nn.Dropout(p=self.droprate)
def forward(self, x):
if (not self.equalInOut):
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1((out if self.equalInOut else x))))
if (self.droprate > 0):
out = self.dropout(out)
out = self.conv2(out)
return torch.add((x if self.equalInOut else self.convShortcut(x)), out)
|
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
@staticmethod
def _make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(nb_layers):
layers.append(block((((i == 0) and in_planes) or out_planes), out_planes, (((i == 0) and stride) or 1), dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
|
class WideResNet(nn.Module):
def __init__(self, depth=10, num_classes=1000, widen_factor=1, drop_rate=0.0):
super(WideResNet, self).__init__()
n_channels = [16, (16 * widen_factor), (32 * widen_factor), (64 * widen_factor)]
assert (((depth - 4) % 6) == 0)
n = int(((depth - 4) / 6))
block = BasicBlock
self.conv1 = nn.Conv2d(3, n_channels[0], kernel_size=3, stride=1, padding=1, bias=False)
self.block1 = NetworkBlock(n, n_channels[0], n_channels[1], block, 1, drop_rate)
self.block2 = NetworkBlock(n, n_channels[1], n_channels[2], block, 2, drop_rate)
self.block3 = NetworkBlock(n, n_channels[2], n_channels[3], block, 2, drop_rate)
self.bn1 = nn.BatchNorm2d(n_channels[3])
self.relu = nn.ReLU(inplace=True)
self.avg_pool = nn.AvgPool2d(8)
self.fc = nn.Linear(n_channels[3], num_classes)
self.nChannels = n_channels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = self.avg_pool(out)
out = out.view((- 1), self.nChannels)
return self.fc(out)
|
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, drop_rate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.GroupNorm((in_planes // 16), in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.GroupNorm((out_planes // 16), out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.droprate = drop_rate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (((not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=False)) or None)
if (self.droprate > 0):
self.dropout = nn.Dropout(p=self.droprate)
def forward(self, x):
if (not self.equalInOut):
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1((out if self.equalInOut else x))))
if (self.droprate > 0):
out = self.dropout(out)
out = self.conv2(out)
return torch.add((x if self.equalInOut else self.convShortcut(x)), out)
|
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
@staticmethod
def _make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(nb_layers):
layers.append(block((((i == 0) and in_planes) or out_planes), out_planes, (((i == 0) and stride) or 1), dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
|
class WideResNet(nn.Module):
def __init__(self, depth=10, num_classes=1000, widen_factor=1, drop_rate=0.0):
super(WideResNet, self).__init__()
n_channels = [16, (16 * widen_factor), (32 * widen_factor), (64 * widen_factor)]
assert (((depth - 4) % 6) == 0)
n = int(((depth - 4) / 6))
block = BasicBlock
self.conv1 = nn.Conv2d(3, n_channels[0], kernel_size=3, stride=1, padding=1, bias=False)
self.block1 = NetworkBlock(n, n_channels[0], n_channels[1], block, 1, drop_rate)
self.block2 = NetworkBlock(n, n_channels[1], n_channels[2], block, 2, drop_rate)
self.block3 = NetworkBlock(n, n_channels[2], n_channels[3], block, 2, drop_rate)
self.bn1 = nn.GroupNorm((n_channels[3] // 16), n_channels[3])
self.relu = nn.ReLU(inplace=True)
self.avg_pool = nn.AvgPool2d(8)
self.fc = nn.Linear(n_channels[3], num_classes)
self.nChannels = n_channels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.GroupNorm):
try:
m.weight.data.fill_(1)
m.bias.data.zero_()
except:
print('Faild to init GroupNorm. Its not the point anyway')
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = self.avg_pool(out)
out = out.view((- 1), self.nChannels)
return self.fc(out)
|
def relu_conv_bn(in_channels: int, out_channels: int, kernel_size: int=1, stride: int=1, padding: int=0) -> nn.Module:
return nn.Sequential(nn.ReLU(inplace=False), nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=False), nn.BatchNorm2d(out_channels))
|
class Classify(nn.Module):
def __init__(self, channels_prev: int, num_classes: int):
super().__init__()
self.pool = nn.AvgPool2d(7)
self.flat = nn.Flatten()
self.fc = nn.Linear(channels_prev, num_classes)
def forward(self, states: Tuple[(Tensor, Tensor)]) -> Tensor:
(x, _) = states
x = self.pool(x)
x = self.flat(x)
x = self.fc(x)
return x
|
class Stem(nn.Sequential):
def __init__(self, channels: int):
super().__init__(nn.ReLU(inplace=False), nn.Conv2d(3, channels, 3, stride=2, padding=1, bias=False), nn.BatchNorm2d(channels))
|
class Cell(nn.Module):
def __init__(self, channels_prev_prev: int, channels_prev: int, channels: int, reduction: bool, reduction_prev: bool):
super().__init__()
self.reduce1 = relu_conv_bn(in_channels=channels_prev, out_channels=channels)
self.reduce2: nn.Module = nn.Identity()
if reduction_prev:
self.reduce2 = FactorizedReduce(channels_prev_prev, channels)
elif (channels_prev_prev != channels):
self.reduce2 = relu_conv_bn(in_channels=channels_prev_prev, out_channels=channels)
if reduction:
(self.indices, op_classes) = zip(*REDUCTION_OPERATIONS)
self.concat = REDUCTION_CONCAT
else:
(self.indices, op_classes) = zip(*NORMAL_OPERATIONS)
self.concat = NORMAL_CONCAT
self.operations = nn.ModuleList()
for (i, op_class) in zip(self.indices, op_classes):
if (reduction and (i < 2)):
stride = 2
else:
stride = 1
op = op_class(channels, stride)
self.operations.append(op)
def extra_repr(self) -> str:
return f'indices: {self.indices}'
def forward(self, input_or_states: Union[(Tensor, Tuple[(Tensor, Tensor)])]) -> Tuple[(Tensor, Tensor)]:
if isinstance(input_or_states, tuple):
(s1, s2) = input_or_states
else:
s1 = s2 = input_or_states
skip = s1
s1 = self.reduce1(s1)
s2 = self.reduce2(s2)
_states = [s1, s2]
for i in range(0, len(self.operations), 2):
h1 = _states[self.indices[i]]
h2 = _states[self.indices[(i + 1)]]
op1 = self.operations[i]
op2 = self.operations[(i + 1)]
h1 = op1(h1)
h2 = op2(h2)
s = (h1 + h2)
_states.append(s)
return (torch.cat([_states[i] for i in self.concat], dim=1), skip)
|
def amoebanetd(num_classes: int=10, num_layers: int=4, num_filters: int=512) -> nn.Sequential:
'Builds an AmoebaNet-D model for ImageNet.'
layers = OrderedDict()
repeat_normal_cells = (num_layers // 3)
channels = (num_filters // 4)
channels_prev_prev = channels_prev = channels
reduction_prev = False
def make_cells(reduction: bool, channels_scale: int, repeat: int) -> Iterator[Cell]:
nonlocal channels_prev_prev
nonlocal channels_prev
nonlocal channels
nonlocal reduction_prev
channels *= channels_scale
for i in range(repeat):
cell = Cell(channels_prev_prev, channels_prev, channels, reduction, reduction_prev)
channels_prev_prev = channels_prev
channels_prev = (channels * len(cell.concat))
reduction_prev = reduction
(yield cell)
def reduction_cell() -> Cell:
return next(make_cells(reduction=True, channels_scale=2, repeat=1))
def normal_cells() -> Iterator[Tuple[(int, Cell)]]:
return enumerate(make_cells(reduction=False, channels_scale=1, repeat=repeat_normal_cells))
layers['stem1'] = Stem(channels)
layers['stem2'] = reduction_cell()
layers['stem3'] = reduction_cell()
layers.update(((f'cell1_normal{(i + 1)}', cell) for (i, cell) in normal_cells()))
layers['cell2_reduction'] = reduction_cell()
layers.update(((f'cell3_normal{(i + 1)}', cell) for (i, cell) in normal_cells()))
layers['cell4_reduction'] = reduction_cell()
layers.update(((f'cell5_normal{(i + 1)}', cell) for (i, cell) in normal_cells()))
layers['classify'] = Classify(channels_prev, num_classes)
return nn.Sequential(layers)
|
def create_pipeline_configuration(DEBUG=False, batch_size=4):
config = {'batch_dim': 0, 'depth': 10000, 'basic_blocks': (Softmax, Linear, Tanh, Gelu, Embedding, LayerNorm, Dropout), 'model_inputs': {'attention_mask': {'shape': torch.Size([4, 384]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'input_ids': {'shape': torch.Size([4, 384]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'token_type_ids': {'shape': torch.Size([4, 384]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}}, 'model_outputs': {'BertForQuestionAnswering/Linear[qa_outputs]': {'shape': torch.Size([4, 384, 2]), 'dtype': torch.float32, 'is_batched': True, 'created_by': 1}}, 'stages': {0: {'stage_cls': Partition0, 'inputs': {'attention_mask': {'shape': torch.Size([4, 384]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'input_ids': {'shape': torch.Size([4, 384]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'token_type_ids': {'shape': torch.Size([4, 384]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}}, 'outputs': {'BertForQuestionAnswering/BertModel[bert]/Tensor::__mul___12': {'shape': torch.Size([4, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [1]}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([4, 384, 768]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Tensor::permute_582': {'shape': torch.Size([4, 12, 384, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]': {'shape': torch.Size([4, 12, 384, 384]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}}, 'devices': [('cpu' if DEBUG else 'cuda:0')], 'stage_depth': 1}, 1: {'stage_cls': Partition1, 'inputs': {'BertForQuestionAnswering/BertModel[bert]/Tensor::__mul___12': {'shape': torch.Size([4, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 0}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([4, 384, 768]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Tensor::permute_582': {'shape': torch.Size([4, 12, 384, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]': {'shape': torch.Size([4, 12, 384, 384]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}}, 'outputs': {'BertForQuestionAnswering/Linear[qa_outputs]': {'shape': torch.Size([4, 384, 2]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [(- 1)]}}, 'devices': [('cpu' if DEBUG else 'cuda:1')], 'stage_depth': 0}}}
batch_dim = config['batch_dim']
for d in chain(config['model_inputs'].values(), config['model_outputs'].values()):
if d['is_batched']:
shape = d['shape']
d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):]))
for s in config['stages'].values():
for d in chain(s['inputs'].values(), s['outputs'].values()):
if d['is_batched']:
shape = d['shape']
d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):]))
return config
|
class Partition0(nn.Module):
LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Embedding[word_embeddings]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Embedding[position_embeddings]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Embedding[token_type_embeddings]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:0'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1]
self.lookup = {'l_0': 'bert.embeddings.word_embeddings', 'l_1': 'bert.embeddings.position_embeddings', 'l_2': 'bert.embeddings.token_type_embeddings', 'l_3': 'bert.embeddings.LayerNorm', 'l_4': 'bert.embeddings.dropout', 'l_5': 'bert.encoder.0.attention.self.query', 'l_6': 'bert.encoder.0.attention.self.key', 'l_7': 'bert.encoder.0.attention.self.value', 'l_8': 'bert.encoder.0.attention.self.softmax', 'l_9': 'bert.encoder.0.attention.self.dropout', 'l_10': 'bert.encoder.0.attention.output.dense', 'l_11': 'bert.encoder.0.attention.output.dropout', 'l_12': 'bert.encoder.0.attention.output.LayerNorm', 'l_13': 'bert.encoder.0.intermediate.dense', 'l_14': 'bert.encoder.0.intermediate.intermediate_act_fn', 'l_15': 'bert.encoder.0.output.dense', 'l_16': 'bert.encoder.0.output.dropout', 'l_17': 'bert.encoder.0.output.LayerNorm', 'l_18': 'bert.encoder.1.attention.self.query', 'l_19': 'bert.encoder.1.attention.self.key', 'l_20': 'bert.encoder.1.attention.self.value', 'l_21': 'bert.encoder.1.attention.self.softmax', 'l_22': 'bert.encoder.1.attention.self.dropout', 'l_23': 'bert.encoder.1.attention.output.dense', 'l_24': 'bert.encoder.1.attention.output.dropout', 'l_25': 'bert.encoder.1.attention.output.LayerNorm', 'l_26': 'bert.encoder.1.intermediate.dense', 'l_27': 'bert.encoder.1.intermediate.intermediate_act_fn', 'l_28': 'bert.encoder.1.output.dense', 'l_29': 'bert.encoder.1.output.dropout', 'l_30': 'bert.encoder.1.output.LayerNorm', 'l_31': 'bert.encoder.2.attention.self.query', 'l_32': 'bert.encoder.2.attention.self.key', 'l_33': 'bert.encoder.2.attention.self.value', 'l_34': 'bert.encoder.2.attention.self.softmax', 'l_35': 'bert.encoder.2.attention.self.dropout', 'l_36': 'bert.encoder.2.attention.output.dense', 'l_37': 'bert.encoder.2.attention.output.dropout', 'l_38': 'bert.encoder.2.attention.output.LayerNorm', 'l_39': 'bert.encoder.2.intermediate.dense', 'l_40': 'bert.encoder.2.intermediate.intermediate_act_fn', 'l_41': 'bert.encoder.2.output.dense', 'l_42': 'bert.encoder.2.output.dropout', 'l_43': 'bert.encoder.2.output.LayerNorm', 'l_44': 'bert.encoder.3.attention.self.query', 'l_45': 'bert.encoder.3.attention.self.key', 'l_46': 'bert.encoder.3.attention.self.value', 'l_47': 'bert.encoder.3.attention.self.softmax', 'l_48': 'bert.encoder.3.attention.self.dropout', 'l_49': 'bert.encoder.3.attention.output.dense', 'l_50': 'bert.encoder.3.attention.output.dropout', 'l_51': 'bert.encoder.3.attention.output.LayerNorm', 'l_52': 'bert.encoder.3.intermediate.dense', 'l_53': 'bert.encoder.3.intermediate.intermediate_act_fn', 'l_54': 'bert.encoder.3.output.dense', 'l_55': 'bert.encoder.3.output.dropout', 'l_56': 'bert.encoder.3.output.LayerNorm', 'l_57': 'bert.encoder.4.attention.self.query', 'l_58': 'bert.encoder.4.attention.self.key', 'l_59': 'bert.encoder.4.attention.self.value', 'l_60': 'bert.encoder.4.attention.self.softmax', 'l_61': 'bert.encoder.4.attention.self.dropout', 'l_62': 'bert.encoder.4.attention.output.dense', 'l_63': 'bert.encoder.4.attention.output.dropout', 'l_64': 'bert.encoder.4.attention.output.LayerNorm', 'l_65': 'bert.encoder.4.intermediate.dense', 'l_66': 'bert.encoder.4.intermediate.intermediate_act_fn', 'l_67': 'bert.encoder.4.output.dense', 'l_68': 'bert.encoder.4.output.dropout', 'l_69': 'bert.encoder.4.output.LayerNorm', 'l_70': 'bert.encoder.5.attention.self.query', 'l_71': 'bert.encoder.5.attention.self.key', 'l_72': 'bert.encoder.5.attention.self.value', 'l_73': 'bert.encoder.5.attention.self.softmax'}
self.to(self.device)
def forward(self, *args):
(attention_mask, input_ids, token_type_ids) = unflatten(args, self.input_structure)
t_0 = self.l_0(input_ids)
t_1 = self.l_2(token_type_ids)
t_2 = attention_mask.unsqueeze(1)
t_2 = t_2.unsqueeze(2)
t_2 = t_2.to(dtype=torch.float32)
t_2 = (1.0 - t_2)
t_2 = (t_2 * (- 10000.0))
t_3 = input_ids.size(1)
t_3 = torch.arange(t_3, dtype=torch.int64, device=self.device)
t_3 = t_3.unsqueeze(0)
t_3 = t_3.expand_as(input_ids)
t_3 = self.l_1(t_3)
t_3 = (t_0 + t_3)
t_1 = (t_3 + t_1)
t_1 = self.l_3(t_1)
t_1 = self.l_4(t_1)
t_3 = self.l_5(t_1)
t_0 = self.l_6(t_1)
t_4 = self.l_7(t_1)
t_5 = t_3.size()
t_6 = t_0.size()
t_7 = t_4.size()
t_5 = t_5[slice(None, (- 1), None)]
t_5 = (t_5 + (12, 64))
t_8 = t_5[0]
t_9 = t_5[1]
t_10 = t_5[2]
t_5 = t_5[3]
t_5 = t_3.view(t_8, t_9, t_10, t_5)
t_5 = t_5.permute(0, 2, 1, 3)
t_6 = t_6[slice(None, (- 1), None)]
t_6 = (t_6 + (12, 64))
t_10 = t_6[0]
t_9 = t_6[1]
t_8 = t_6[2]
t_6 = t_6[3]
t_6 = t_0.view(t_10, t_9, t_8, t_6)
t_6 = t_6.permute(0, 2, 1, 3)
t_7 = t_7[slice(None, (- 1), None)]
t_7 = (t_7 + (12, 64))
t_8 = t_7[0]
t_9 = t_7[1]
t_10 = t_7[2]
t_7 = t_7[3]
t_7 = t_4.view(t_8, t_9, t_10, t_7)
t_7 = t_7.permute(0, 2, 1, 3)
t_6 = t_6.transpose((- 1), (- 2))
t_6 = torch.matmul(t_5, t_6)
t_5 = math.sqrt(64)
t_5 = (t_6 / t_5)
t_5 = (t_5 + t_2)
t_5 = self.l_8(t_5)
t_5 = self.l_9(t_5)
t_7 = torch.matmul(t_5, t_7)
t_7 = t_7.permute(0, 2, 1, 3)
t_7 = t_7.contiguous()
t_5 = t_7.size()
t_5 = t_5[slice(None, (- 2), None)]
t_5 = (t_5 + (768,))
t_6 = t_5[0]
t_10 = t_5[1]
t_5 = t_5[2]
t_5 = t_7.view(t_6, t_10, t_5)
t_5 = self.l_10(t_5)
t_5 = self.l_11(t_5)
t_1 = (t_5 + t_1)
t_1 = self.l_12(t_1)
t_5 = self.l_13(t_1)
t_5 = self.l_14(t_5)
t_5 = self.l_15(t_5)
t_5 = self.l_16(t_5)
t_1 = (t_5 + t_1)
t_1 = self.l_17(t_1)
t_5 = self.l_18(t_1)
t_10 = self.l_19(t_1)
t_6 = self.l_20(t_1)
t_7 = t_5.size()
t_9 = t_10.size()
t_8 = t_6.size()
t_7 = t_7[slice(None, (- 1), None)]
t_7 = (t_7 + (12, 64))
t_4 = t_7[0]
t_0 = t_7[1]
t_3 = t_7[2]
t_7 = t_7[3]
t_7 = t_5.view(t_4, t_0, t_3, t_7)
t_7 = t_7.permute(0, 2, 1, 3)
t_9 = t_9[slice(None, (- 1), None)]
t_9 = (t_9 + (12, 64))
t_3 = t_9[0]
t_0 = t_9[1]
t_4 = t_9[2]
t_9 = t_9[3]
t_9 = t_10.view(t_3, t_0, t_4, t_9)
t_9 = t_9.permute(0, 2, 1, 3)
t_8 = t_8[slice(None, (- 1), None)]
t_8 = (t_8 + (12, 64))
t_4 = t_8[0]
t_0 = t_8[1]
t_3 = t_8[2]
t_8 = t_8[3]
t_8 = t_6.view(t_4, t_0, t_3, t_8)
t_8 = t_8.permute(0, 2, 1, 3)
t_9 = t_9.transpose((- 1), (- 2))
t_9 = torch.matmul(t_7, t_9)
t_7 = math.sqrt(64)
t_7 = (t_9 / t_7)
t_7 = (t_7 + t_2)
t_7 = self.l_21(t_7)
t_7 = self.l_22(t_7)
t_8 = torch.matmul(t_7, t_8)
t_8 = t_8.permute(0, 2, 1, 3)
t_8 = t_8.contiguous()
t_7 = t_8.size()
t_7 = t_7[slice(None, (- 2), None)]
t_7 = (t_7 + (768,))
t_9 = t_7[0]
t_3 = t_7[1]
t_7 = t_7[2]
t_7 = t_8.view(t_9, t_3, t_7)
t_7 = self.l_23(t_7)
t_7 = self.l_24(t_7)
t_1 = (t_7 + t_1)
t_1 = self.l_25(t_1)
t_7 = self.l_26(t_1)
t_7 = self.l_27(t_7)
t_7 = self.l_28(t_7)
t_7 = self.l_29(t_7)
t_1 = (t_7 + t_1)
t_1 = self.l_30(t_1)
t_7 = self.l_31(t_1)
t_3 = self.l_32(t_1)
t_9 = self.l_33(t_1)
t_8 = t_7.size()
t_0 = t_3.size()
t_4 = t_9.size()
t_8 = t_8[slice(None, (- 1), None)]
t_8 = (t_8 + (12, 64))
t_6 = t_8[0]
t_10 = t_8[1]
t_5 = t_8[2]
t_8 = t_8[3]
t_8 = t_7.view(t_6, t_10, t_5, t_8)
t_8 = t_8.permute(0, 2, 1, 3)
t_0 = t_0[slice(None, (- 1), None)]
t_0 = (t_0 + (12, 64))
t_5 = t_0[0]
t_10 = t_0[1]
t_6 = t_0[2]
t_0 = t_0[3]
t_0 = t_3.view(t_5, t_10, t_6, t_0)
t_0 = t_0.permute(0, 2, 1, 3)
t_4 = t_4[slice(None, (- 1), None)]
t_4 = (t_4 + (12, 64))
t_6 = t_4[0]
t_10 = t_4[1]
t_5 = t_4[2]
t_4 = t_4[3]
t_4 = t_9.view(t_6, t_10, t_5, t_4)
t_4 = t_4.permute(0, 2, 1, 3)
t_0 = t_0.transpose((- 1), (- 2))
t_0 = torch.matmul(t_8, t_0)
t_8 = math.sqrt(64)
t_8 = (t_0 / t_8)
t_8 = (t_8 + t_2)
t_8 = self.l_34(t_8)
t_8 = self.l_35(t_8)
t_4 = torch.matmul(t_8, t_4)
t_4 = t_4.permute(0, 2, 1, 3)
t_4 = t_4.contiguous()
t_8 = t_4.size()
t_8 = t_8[slice(None, (- 2), None)]
t_8 = (t_8 + (768,))
t_0 = t_8[0]
t_5 = t_8[1]
t_8 = t_8[2]
t_8 = t_4.view(t_0, t_5, t_8)
t_8 = self.l_36(t_8)
t_8 = self.l_37(t_8)
t_1 = (t_8 + t_1)
t_1 = self.l_38(t_1)
t_8 = self.l_39(t_1)
t_8 = self.l_40(t_8)
t_8 = self.l_41(t_8)
t_8 = self.l_42(t_8)
t_1 = (t_8 + t_1)
t_1 = self.l_43(t_1)
t_8 = self.l_44(t_1)
t_5 = self.l_45(t_1)
t_0 = self.l_46(t_1)
t_4 = t_8.size()
t_10 = t_5.size()
t_6 = t_0.size()
t_4 = t_4[slice(None, (- 1), None)]
t_4 = (t_4 + (12, 64))
t_9 = t_4[0]
t_3 = t_4[1]
t_7 = t_4[2]
t_4 = t_4[3]
t_4 = t_8.view(t_9, t_3, t_7, t_4)
t_4 = t_4.permute(0, 2, 1, 3)
t_10 = t_10[slice(None, (- 1), None)]
t_10 = (t_10 + (12, 64))
t_7 = t_10[0]
t_3 = t_10[1]
t_9 = t_10[2]
t_10 = t_10[3]
t_10 = t_5.view(t_7, t_3, t_9, t_10)
t_10 = t_10.permute(0, 2, 1, 3)
t_6 = t_6[slice(None, (- 1), None)]
t_6 = (t_6 + (12, 64))
t_9 = t_6[0]
t_3 = t_6[1]
t_7 = t_6[2]
t_6 = t_6[3]
t_6 = t_0.view(t_9, t_3, t_7, t_6)
t_6 = t_6.permute(0, 2, 1, 3)
t_10 = t_10.transpose((- 1), (- 2))
t_10 = torch.matmul(t_4, t_10)
t_4 = math.sqrt(64)
t_4 = (t_10 / t_4)
t_4 = (t_4 + t_2)
t_4 = self.l_47(t_4)
t_4 = self.l_48(t_4)
t_6 = torch.matmul(t_4, t_6)
t_6 = t_6.permute(0, 2, 1, 3)
t_6 = t_6.contiguous()
t_4 = t_6.size()
t_4 = t_4[slice(None, (- 2), None)]
t_4 = (t_4 + (768,))
t_10 = t_4[0]
t_7 = t_4[1]
t_4 = t_4[2]
t_4 = t_6.view(t_10, t_7, t_4)
t_4 = self.l_49(t_4)
t_4 = self.l_50(t_4)
t_1 = (t_4 + t_1)
t_1 = self.l_51(t_1)
t_4 = self.l_52(t_1)
t_4 = self.l_53(t_4)
t_4 = self.l_54(t_4)
t_4 = self.l_55(t_4)
t_1 = (t_4 + t_1)
t_1 = self.l_56(t_1)
t_4 = self.l_57(t_1)
t_7 = self.l_58(t_1)
t_10 = self.l_59(t_1)
t_6 = t_4.size()
t_3 = t_7.size()
t_9 = t_10.size()
t_6 = t_6[slice(None, (- 1), None)]
t_6 = (t_6 + (12, 64))
t_0 = t_6[0]
t_5 = t_6[1]
t_8 = t_6[2]
t_6 = t_6[3]
t_6 = t_4.view(t_0, t_5, t_8, t_6)
t_6 = t_6.permute(0, 2, 1, 3)
t_3 = t_3[slice(None, (- 1), None)]
t_3 = (t_3 + (12, 64))
t_8 = t_3[0]
t_5 = t_3[1]
t_0 = t_3[2]
t_3 = t_3[3]
t_3 = t_7.view(t_8, t_5, t_0, t_3)
t_3 = t_3.permute(0, 2, 1, 3)
t_9 = t_9[slice(None, (- 1), None)]
t_9 = (t_9 + (12, 64))
t_0 = t_9[0]
t_5 = t_9[1]
t_8 = t_9[2]
t_9 = t_9[3]
t_9 = t_10.view(t_0, t_5, t_8, t_9)
t_9 = t_9.permute(0, 2, 1, 3)
t_3 = t_3.transpose((- 1), (- 2))
t_3 = torch.matmul(t_6, t_3)
t_6 = math.sqrt(64)
t_6 = (t_3 / t_6)
t_6 = (t_6 + t_2)
t_6 = self.l_60(t_6)
t_6 = self.l_61(t_6)
t_9 = torch.matmul(t_6, t_9)
t_9 = t_9.permute(0, 2, 1, 3)
t_9 = t_9.contiguous()
t_6 = t_9.size()
t_6 = t_6[slice(None, (- 2), None)]
t_6 = (t_6 + (768,))
t_3 = t_6[0]
t_8 = t_6[1]
t_6 = t_6[2]
t_6 = t_9.view(t_3, t_8, t_6)
t_6 = self.l_62(t_6)
t_6 = self.l_63(t_6)
t_1 = (t_6 + t_1)
t_1 = self.l_64(t_1)
t_6 = self.l_65(t_1)
t_6 = self.l_66(t_6)
t_6 = self.l_67(t_6)
t_6 = self.l_68(t_6)
t_1 = (t_6 + t_1)
t_1 = self.l_69(t_1)
t_6 = self.l_70(t_1)
t_8 = self.l_71(t_1)
t_3 = self.l_72(t_1)
t_9 = t_6.size()
t_5 = t_8.size()
t_0 = t_3.size()
t_9 = t_9[slice(None, (- 1), None)]
t_9 = (t_9 + (12, 64))
t_10 = t_9[0]
t_7 = t_9[1]
t_4 = t_9[2]
t_9 = t_9[3]
t_9 = t_6.view(t_10, t_7, t_4, t_9)
t_9 = t_9.permute(0, 2, 1, 3)
t_5 = t_5[slice(None, (- 1), None)]
t_5 = (t_5 + (12, 64))
t_4 = t_5[0]
t_7 = t_5[1]
t_10 = t_5[2]
t_5 = t_5[3]
t_5 = t_8.view(t_4, t_7, t_10, t_5)
t_5 = t_5.permute(0, 2, 1, 3)
t_0 = t_0[slice(None, (- 1), None)]
t_0 = (t_0 + (12, 64))
t_10 = t_0[0]
t_7 = t_0[1]
t_4 = t_0[2]
t_0 = t_0[3]
t_0 = t_3.view(t_10, t_7, t_4, t_0)
t_0 = t_0.permute(0, 2, 1, 3)
t_5 = t_5.transpose((- 1), (- 2))
t_5 = torch.matmul(t_9, t_5)
t_9 = math.sqrt(64)
t_9 = (t_5 / t_9)
t_9 = (t_9 + t_2)
t_9 = self.l_73(t_9)
return list(flatten((t_2, t_1, t_0, t_9)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition1(nn.Module):
LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertPooler[pooler]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertPooler[pooler]/Tanh[activation]', 'BertForQuestionAnswering/Linear[qa_outputs]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:1'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1, 1]
self.lookup = {'l_0': 'bert.encoder.5.attention.self.dropout', 'l_1': 'bert.encoder.5.attention.output.dense', 'l_2': 'bert.encoder.5.attention.output.dropout', 'l_3': 'bert.encoder.5.attention.output.LayerNorm', 'l_4': 'bert.encoder.5.intermediate.dense', 'l_5': 'bert.encoder.5.intermediate.intermediate_act_fn', 'l_6': 'bert.encoder.5.output.dense', 'l_7': 'bert.encoder.5.output.dropout', 'l_8': 'bert.encoder.5.output.LayerNorm', 'l_9': 'bert.encoder.6.attention.self.query', 'l_10': 'bert.encoder.6.attention.self.key', 'l_11': 'bert.encoder.6.attention.self.value', 'l_12': 'bert.encoder.6.attention.self.softmax', 'l_13': 'bert.encoder.6.attention.self.dropout', 'l_14': 'bert.encoder.6.attention.output.dense', 'l_15': 'bert.encoder.6.attention.output.dropout', 'l_16': 'bert.encoder.6.attention.output.LayerNorm', 'l_17': 'bert.encoder.6.intermediate.dense', 'l_18': 'bert.encoder.6.intermediate.intermediate_act_fn', 'l_19': 'bert.encoder.6.output.dense', 'l_20': 'bert.encoder.6.output.dropout', 'l_21': 'bert.encoder.6.output.LayerNorm', 'l_22': 'bert.encoder.7.attention.self.query', 'l_23': 'bert.encoder.7.attention.self.key', 'l_24': 'bert.encoder.7.attention.self.value', 'l_25': 'bert.encoder.7.attention.self.softmax', 'l_26': 'bert.encoder.7.attention.self.dropout', 'l_27': 'bert.encoder.7.attention.output.dense', 'l_28': 'bert.encoder.7.attention.output.dropout', 'l_29': 'bert.encoder.7.attention.output.LayerNorm', 'l_30': 'bert.encoder.7.intermediate.dense', 'l_31': 'bert.encoder.7.intermediate.intermediate_act_fn', 'l_32': 'bert.encoder.7.output.dense', 'l_33': 'bert.encoder.7.output.dropout', 'l_34': 'bert.encoder.7.output.LayerNorm', 'l_35': 'bert.encoder.8.attention.self.query', 'l_36': 'bert.encoder.8.attention.self.key', 'l_37': 'bert.encoder.8.attention.self.value', 'l_38': 'bert.encoder.8.attention.self.softmax', 'l_39': 'bert.encoder.8.attention.self.dropout', 'l_40': 'bert.encoder.8.attention.output.dense', 'l_41': 'bert.encoder.8.attention.output.dropout', 'l_42': 'bert.encoder.8.attention.output.LayerNorm', 'l_43': 'bert.encoder.8.intermediate.dense', 'l_44': 'bert.encoder.8.intermediate.intermediate_act_fn', 'l_45': 'bert.encoder.8.output.dense', 'l_46': 'bert.encoder.8.output.dropout', 'l_47': 'bert.encoder.8.output.LayerNorm', 'l_48': 'bert.encoder.9.attention.self.query', 'l_49': 'bert.encoder.9.attention.self.key', 'l_50': 'bert.encoder.9.attention.self.value', 'l_51': 'bert.encoder.9.attention.self.softmax', 'l_52': 'bert.encoder.9.attention.self.dropout', 'l_53': 'bert.encoder.9.attention.output.dense', 'l_54': 'bert.encoder.9.attention.output.dropout', 'l_55': 'bert.encoder.9.attention.output.LayerNorm', 'l_56': 'bert.encoder.9.intermediate.dense', 'l_57': 'bert.encoder.9.intermediate.intermediate_act_fn', 'l_58': 'bert.encoder.9.output.dense', 'l_59': 'bert.encoder.9.output.dropout', 'l_60': 'bert.encoder.9.output.LayerNorm', 'l_61': 'bert.encoder.10.attention.self.query', 'l_62': 'bert.encoder.10.attention.self.key', 'l_63': 'bert.encoder.10.attention.self.value', 'l_64': 'bert.encoder.10.attention.self.softmax', 'l_65': 'bert.encoder.10.attention.self.dropout', 'l_66': 'bert.encoder.10.attention.output.dense', 'l_67': 'bert.encoder.10.attention.output.dropout', 'l_68': 'bert.encoder.10.attention.output.LayerNorm', 'l_69': 'bert.encoder.10.intermediate.dense', 'l_70': 'bert.encoder.10.intermediate.intermediate_act_fn', 'l_71': 'bert.encoder.10.output.dense', 'l_72': 'bert.encoder.10.output.dropout', 'l_73': 'bert.encoder.10.output.LayerNorm', 'l_74': 'bert.encoder.11.attention.self.query', 'l_75': 'bert.encoder.11.attention.self.key', 'l_76': 'bert.encoder.11.attention.self.value', 'l_77': 'bert.encoder.11.attention.self.softmax', 'l_78': 'bert.encoder.11.attention.self.dropout', 'l_79': 'bert.encoder.11.attention.output.dense', 'l_80': 'bert.encoder.11.attention.output.dropout', 'l_81': 'bert.encoder.11.attention.output.LayerNorm', 'l_82': 'bert.encoder.11.intermediate.dense', 'l_83': 'bert.encoder.11.intermediate.intermediate_act_fn', 'l_84': 'bert.encoder.11.output.dense', 'l_85': 'bert.encoder.11.output.dropout', 'l_86': 'bert.encoder.11.output.LayerNorm', 'l_87': 'bert.pooler.dense', 'l_88': 'bert.pooler.activation', 'l_89': 'qa_outputs'}
self.to(self.device)
def forward(self, *args):
(x0, x1, x2, x3) = unflatten(args, self.input_structure)
t_0 = self.l_0(x3)
t_0 = torch.matmul(t_0, x2)
t_0 = t_0.permute(0, 2, 1, 3)
t_0 = t_0.contiguous()
t_1 = t_0.size()
t_1 = t_1[slice(None, (- 2), None)]
t_1 = (t_1 + (768,))
t_2 = t_1[0]
t_3 = t_1[1]
t_1 = t_1[2]
t_1 = t_0.view(t_2, t_3, t_1)
t_1 = self.l_1(t_1)
t_1 = self.l_2(t_1)
t_1 = (t_1 + x1)
t_1 = self.l_3(t_1)
t_3 = self.l_4(t_1)
t_3 = self.l_5(t_3)
t_3 = self.l_6(t_3)
t_3 = self.l_7(t_3)
t_1 = (t_3 + t_1)
t_1 = self.l_8(t_1)
t_3 = self.l_9(t_1)
t_2 = self.l_10(t_1)
t_0 = self.l_11(t_1)
t_4 = t_3.size()
t_5 = t_2.size()
t_6 = t_0.size()
t_4 = t_4[slice(None, (- 1), None)]
t_4 = (t_4 + (12, 64))
t_7 = t_4[0]
t_8 = t_4[1]
t_9 = t_4[2]
t_4 = t_4[3]
t_4 = t_3.view(t_7, t_8, t_9, t_4)
t_4 = t_4.permute(0, 2, 1, 3)
t_5 = t_5[slice(None, (- 1), None)]
t_5 = (t_5 + (12, 64))
t_9 = t_5[0]
t_8 = t_5[1]
t_7 = t_5[2]
t_5 = t_5[3]
t_5 = t_2.view(t_9, t_8, t_7, t_5)
t_5 = t_5.permute(0, 2, 1, 3)
t_6 = t_6[slice(None, (- 1), None)]
t_6 = (t_6 + (12, 64))
t_7 = t_6[0]
t_8 = t_6[1]
t_9 = t_6[2]
t_6 = t_6[3]
t_6 = t_0.view(t_7, t_8, t_9, t_6)
t_6 = t_6.permute(0, 2, 1, 3)
t_5 = t_5.transpose((- 1), (- 2))
t_5 = torch.matmul(t_4, t_5)
t_4 = math.sqrt(64)
t_4 = (t_5 / t_4)
t_4 = (t_4 + x0)
t_4 = self.l_12(t_4)
t_4 = self.l_13(t_4)
t_6 = torch.matmul(t_4, t_6)
t_6 = t_6.permute(0, 2, 1, 3)
t_6 = t_6.contiguous()
t_4 = t_6.size()
t_4 = t_4[slice(None, (- 2), None)]
t_4 = (t_4 + (768,))
t_5 = t_4[0]
t_9 = t_4[1]
t_4 = t_4[2]
t_4 = t_6.view(t_5, t_9, t_4)
t_4 = self.l_14(t_4)
t_4 = self.l_15(t_4)
t_1 = (t_4 + t_1)
t_1 = self.l_16(t_1)
t_4 = self.l_17(t_1)
t_4 = self.l_18(t_4)
t_4 = self.l_19(t_4)
t_4 = self.l_20(t_4)
t_1 = (t_4 + t_1)
t_1 = self.l_21(t_1)
t_4 = self.l_22(t_1)
t_9 = self.l_23(t_1)
t_5 = self.l_24(t_1)
t_6 = t_4.size()
t_8 = t_9.size()
t_7 = t_5.size()
t_6 = t_6[slice(None, (- 1), None)]
t_6 = (t_6 + (12, 64))
t_0 = t_6[0]
t_2 = t_6[1]
t_3 = t_6[2]
t_6 = t_6[3]
t_6 = t_4.view(t_0, t_2, t_3, t_6)
t_6 = t_6.permute(0, 2, 1, 3)
t_8 = t_8[slice(None, (- 1), None)]
t_8 = (t_8 + (12, 64))
t_3 = t_8[0]
t_2 = t_8[1]
t_0 = t_8[2]
t_8 = t_8[3]
t_8 = t_9.view(t_3, t_2, t_0, t_8)
t_8 = t_8.permute(0, 2, 1, 3)
t_7 = t_7[slice(None, (- 1), None)]
t_7 = (t_7 + (12, 64))
t_0 = t_7[0]
t_2 = t_7[1]
t_3 = t_7[2]
t_7 = t_7[3]
t_7 = t_5.view(t_0, t_2, t_3, t_7)
t_7 = t_7.permute(0, 2, 1, 3)
t_8 = t_8.transpose((- 1), (- 2))
t_8 = torch.matmul(t_6, t_8)
t_6 = math.sqrt(64)
t_6 = (t_8 / t_6)
t_6 = (t_6 + x0)
t_6 = self.l_25(t_6)
t_6 = self.l_26(t_6)
t_7 = torch.matmul(t_6, t_7)
t_7 = t_7.permute(0, 2, 1, 3)
t_7 = t_7.contiguous()
t_6 = t_7.size()
t_6 = t_6[slice(None, (- 2), None)]
t_6 = (t_6 + (768,))
t_8 = t_6[0]
t_3 = t_6[1]
t_6 = t_6[2]
t_6 = t_7.view(t_8, t_3, t_6)
t_6 = self.l_27(t_6)
t_6 = self.l_28(t_6)
t_1 = (t_6 + t_1)
t_1 = self.l_29(t_1)
t_6 = self.l_30(t_1)
t_6 = self.l_31(t_6)
t_6 = self.l_32(t_6)
t_6 = self.l_33(t_6)
t_1 = (t_6 + t_1)
t_1 = self.l_34(t_1)
t_6 = self.l_35(t_1)
t_3 = self.l_36(t_1)
t_8 = self.l_37(t_1)
t_7 = t_6.size()
t_2 = t_3.size()
t_0 = t_8.size()
t_7 = t_7[slice(None, (- 1), None)]
t_7 = (t_7 + (12, 64))
t_5 = t_7[0]
t_9 = t_7[1]
t_4 = t_7[2]
t_7 = t_7[3]
t_7 = t_6.view(t_5, t_9, t_4, t_7)
t_7 = t_7.permute(0, 2, 1, 3)
t_2 = t_2[slice(None, (- 1), None)]
t_2 = (t_2 + (12, 64))
t_4 = t_2[0]
t_9 = t_2[1]
t_5 = t_2[2]
t_2 = t_2[3]
t_2 = t_3.view(t_4, t_9, t_5, t_2)
t_2 = t_2.permute(0, 2, 1, 3)
t_0 = t_0[slice(None, (- 1), None)]
t_0 = (t_0 + (12, 64))
t_5 = t_0[0]
t_9 = t_0[1]
t_4 = t_0[2]
t_0 = t_0[3]
t_0 = t_8.view(t_5, t_9, t_4, t_0)
t_0 = t_0.permute(0, 2, 1, 3)
t_2 = t_2.transpose((- 1), (- 2))
t_2 = torch.matmul(t_7, t_2)
t_7 = math.sqrt(64)
t_7 = (t_2 / t_7)
t_7 = (t_7 + x0)
t_7 = self.l_38(t_7)
t_7 = self.l_39(t_7)
t_0 = torch.matmul(t_7, t_0)
t_0 = t_0.permute(0, 2, 1, 3)
t_0 = t_0.contiguous()
t_7 = t_0.size()
t_7 = t_7[slice(None, (- 2), None)]
t_7 = (t_7 + (768,))
t_2 = t_7[0]
t_4 = t_7[1]
t_7 = t_7[2]
t_7 = t_0.view(t_2, t_4, t_7)
t_7 = self.l_40(t_7)
t_7 = self.l_41(t_7)
t_1 = (t_7 + t_1)
t_1 = self.l_42(t_1)
t_7 = self.l_43(t_1)
t_7 = self.l_44(t_7)
t_7 = self.l_45(t_7)
t_7 = self.l_46(t_7)
t_1 = (t_7 + t_1)
t_1 = self.l_47(t_1)
t_7 = self.l_48(t_1)
t_4 = self.l_49(t_1)
t_2 = self.l_50(t_1)
t_0 = t_7.size()
t_9 = t_4.size()
t_5 = t_2.size()
t_0 = t_0[slice(None, (- 1), None)]
t_0 = (t_0 + (12, 64))
t_8 = t_0[0]
t_3 = t_0[1]
t_6 = t_0[2]
t_0 = t_0[3]
t_0 = t_7.view(t_8, t_3, t_6, t_0)
t_0 = t_0.permute(0, 2, 1, 3)
t_9 = t_9[slice(None, (- 1), None)]
t_9 = (t_9 + (12, 64))
t_6 = t_9[0]
t_3 = t_9[1]
t_8 = t_9[2]
t_9 = t_9[3]
t_9 = t_4.view(t_6, t_3, t_8, t_9)
t_9 = t_9.permute(0, 2, 1, 3)
t_5 = t_5[slice(None, (- 1), None)]
t_5 = (t_5 + (12, 64))
t_8 = t_5[0]
t_3 = t_5[1]
t_6 = t_5[2]
t_5 = t_5[3]
t_5 = t_2.view(t_8, t_3, t_6, t_5)
t_5 = t_5.permute(0, 2, 1, 3)
t_9 = t_9.transpose((- 1), (- 2))
t_9 = torch.matmul(t_0, t_9)
t_0 = math.sqrt(64)
t_0 = (t_9 / t_0)
t_0 = (t_0 + x0)
t_0 = self.l_51(t_0)
t_0 = self.l_52(t_0)
t_5 = torch.matmul(t_0, t_5)
t_5 = t_5.permute(0, 2, 1, 3)
t_5 = t_5.contiguous()
t_0 = t_5.size()
t_0 = t_0[slice(None, (- 2), None)]
t_0 = (t_0 + (768,))
t_9 = t_0[0]
t_6 = t_0[1]
t_0 = t_0[2]
t_0 = t_5.view(t_9, t_6, t_0)
t_0 = self.l_53(t_0)
t_0 = self.l_54(t_0)
t_1 = (t_0 + t_1)
t_1 = self.l_55(t_1)
t_0 = self.l_56(t_1)
t_0 = self.l_57(t_0)
t_0 = self.l_58(t_0)
t_0 = self.l_59(t_0)
t_1 = (t_0 + t_1)
t_1 = self.l_60(t_1)
t_0 = self.l_61(t_1)
t_6 = self.l_62(t_1)
t_9 = self.l_63(t_1)
t_5 = t_0.size()
t_3 = t_6.size()
t_8 = t_9.size()
t_5 = t_5[slice(None, (- 1), None)]
t_5 = (t_5 + (12, 64))
t_2 = t_5[0]
t_4 = t_5[1]
t_7 = t_5[2]
t_5 = t_5[3]
t_5 = t_0.view(t_2, t_4, t_7, t_5)
t_5 = t_5.permute(0, 2, 1, 3)
t_3 = t_3[slice(None, (- 1), None)]
t_3 = (t_3 + (12, 64))
t_7 = t_3[0]
t_4 = t_3[1]
t_2 = t_3[2]
t_3 = t_3[3]
t_3 = t_6.view(t_7, t_4, t_2, t_3)
t_3 = t_3.permute(0, 2, 1, 3)
t_8 = t_8[slice(None, (- 1), None)]
t_8 = (t_8 + (12, 64))
t_2 = t_8[0]
t_4 = t_8[1]
t_7 = t_8[2]
t_8 = t_8[3]
t_8 = t_9.view(t_2, t_4, t_7, t_8)
t_8 = t_8.permute(0, 2, 1, 3)
t_3 = t_3.transpose((- 1), (- 2))
t_3 = torch.matmul(t_5, t_3)
t_5 = math.sqrt(64)
t_5 = (t_3 / t_5)
t_5 = (t_5 + x0)
t_5 = self.l_64(t_5)
t_5 = self.l_65(t_5)
t_8 = torch.matmul(t_5, t_8)
t_8 = t_8.permute(0, 2, 1, 3)
t_8 = t_8.contiguous()
t_5 = t_8.size()
t_5 = t_5[slice(None, (- 2), None)]
t_5 = (t_5 + (768,))
t_3 = t_5[0]
t_7 = t_5[1]
t_5 = t_5[2]
t_5 = t_8.view(t_3, t_7, t_5)
t_5 = self.l_66(t_5)
t_5 = self.l_67(t_5)
t_1 = (t_5 + t_1)
t_1 = self.l_68(t_1)
t_5 = self.l_69(t_1)
t_5 = self.l_70(t_5)
t_5 = self.l_71(t_5)
t_5 = self.l_72(t_5)
t_1 = (t_5 + t_1)
t_1 = self.l_73(t_1)
t_5 = self.l_74(t_1)
t_7 = self.l_75(t_1)
t_3 = self.l_76(t_1)
t_8 = t_5.size()
t_4 = t_7.size()
t_2 = t_3.size()
t_8 = t_8[slice(None, (- 1), None)]
t_8 = (t_8 + (12, 64))
t_9 = t_8[0]
t_6 = t_8[1]
t_0 = t_8[2]
t_8 = t_8[3]
t_8 = t_5.view(t_9, t_6, t_0, t_8)
t_8 = t_8.permute(0, 2, 1, 3)
t_4 = t_4[slice(None, (- 1), None)]
t_4 = (t_4 + (12, 64))
t_0 = t_4[0]
t_6 = t_4[1]
t_9 = t_4[2]
t_4 = t_4[3]
t_4 = t_7.view(t_0, t_6, t_9, t_4)
t_4 = t_4.permute(0, 2, 1, 3)
t_2 = t_2[slice(None, (- 1), None)]
t_2 = (t_2 + (12, 64))
t_9 = t_2[0]
t_6 = t_2[1]
t_0 = t_2[2]
t_2 = t_2[3]
t_2 = t_3.view(t_9, t_6, t_0, t_2)
t_2 = t_2.permute(0, 2, 1, 3)
t_4 = t_4.transpose((- 1), (- 2))
t_4 = torch.matmul(t_8, t_4)
t_8 = math.sqrt(64)
t_8 = (t_4 / t_8)
t_8 = (t_8 + x0)
t_8 = self.l_77(t_8)
t_8 = self.l_78(t_8)
t_2 = torch.matmul(t_8, t_2)
t_2 = t_2.permute(0, 2, 1, 3)
t_2 = t_2.contiguous()
t_8 = t_2.size()
t_8 = t_8[slice(None, (- 2), None)]
t_8 = (t_8 + (768,))
t_4 = t_8[0]
t_0 = t_8[1]
t_8 = t_8[2]
t_8 = t_2.view(t_4, t_0, t_8)
t_8 = self.l_79(t_8)
t_8 = self.l_80(t_8)
t_1 = (t_8 + t_1)
t_1 = self.l_81(t_1)
t_8 = self.l_82(t_1)
t_8 = self.l_83(t_8)
t_8 = self.l_84(t_8)
t_8 = self.l_85(t_8)
t_1 = (t_8 + t_1)
t_1 = self.l_86(t_1)
t_8 = self.l_89(t_1)
t_1 = t_1[(slice(None, None, None), 0)]
t_1 = self.l_87(t_1)
t_1 = self.l_88(t_1)
return (t_8,)
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
def traverse_model(module: nn.Module, depth: int, prefix: Optional[str]=None, basic_blocks: Tuple[Type[nn.Module]]=(), full: bool=False) -> Iterator[Tuple[(nn.Module, str, nn.Module, Optional[bool])]]:
'\n iterate over model layers yielding the layer,layer_scope,encasing_module\n Parameters:\n -----------\n model:\n the model to iterate over\n depth:\n how far down in the model tree to go\n basic_blocks:\n a list of modules that if encountered will not be broken down\n full:\n whether to yield only layers specified by the depth and basic_block options or to yield all layers\n '
if (prefix is None):
prefix = type(module).__name__
for (name, sub_module) in module.named_children():
scope = (((prefix + '/') + type(sub_module).__name__) + f'[{name}]')
if ((len(list(sub_module.children())) == 0) or isinstance(sub_module, tuple(basic_blocks)) or (depth == 0)):
if full:
(yield (sub_module, scope, module, True))
else:
(yield (sub_module, scope, module))
else:
if full:
(yield (sub_module, scope, module, False))
(yield from traverse_model(sub_module, (depth - 1), scope, basic_blocks, full))
|
def layerDict(model: nn.Module, depth=1000, basic_blocks=()) -> Dict[(str, nn.Module)]:
return {s: l for (l, s, _) in traverse_model(model, depth, basic_blocks=basic_blocks)}
|
def traverse_params_buffs(module: nn.Module, prefix: Optional[str]=None) -> Iterator[Tuple[(torch.tensor, str)]]:
"\n iterate over model's buffers and parameters yielding obj,obj_scope\n\n Parameters:\n -----------\n model:\n the model to iterate over\n "
if (prefix is None):
prefix = type(module).__name__
for (param_name, param) in module.named_parameters(recurse=False):
param_scope = f'{prefix}/{type(param).__name__}[{param_name}]'
(yield (param, param_scope))
for (buffer_name, buffer) in module.named_buffers(recurse=False):
buffer_scope = f'{prefix}/{type(buffer).__name__}[{buffer_name}]'
(yield (buffer, buffer_scope))
for (name, sub_module) in module.named_children():
(yield from traverse_params_buffs(sub_module, (((prefix + '/') + type(sub_module).__name__) + f'[{name}]')))
|
def tensorDict(model: nn.Module) -> OrderedDict[(str, Tensor)]:
return collections.OrderedDict(((s, t) for (t, s) in traverse_params_buffs(model)))
|
def move_tensors(ts, device):
def move(t):
if isinstance(t, (nn.Module, Tensor)):
return t.to(device)
return t
return nested_map(move, ts)
|
def nested_map(func, ts, full=False):
if isinstance(ts, torch.Size):
return func(ts)
elif isinstance(ts, (list, tuple, set)):
return type(ts)((nested_map(func, t, full=full) for t in ts))
elif isinstance(ts, dict):
return {k: nested_map(func, v, full=full) for (k, v) in ts.items()}
elif (isinstance(ts, slice) and full):
start = nested_map(func, ts.start, full=full)
stop = nested_map(func, ts.stop, full=full)
step = nested_map(func, ts.step, full=full)
return slice(start, stop, step)
return func(ts)
|
def flatten(ts):
if isinstance(ts, torch.Size):
(yield ts)
elif isinstance(ts, (list, tuple, set)):
(yield from chain(*[flatten(t) for t in ts]))
elif isinstance(ts, dict):
(yield from chain(*[flatten(t) for (k, t) in sorted(ts.items(), key=(lambda t: t[0]))]))
else:
(yield ts)
|
def unflatten(xs, structure):
return _unflatten(xs, structure)[0]
|
def _unflatten(xs, structure):
if isinstance(structure, torch.Size):
return (xs[0], 1)
if (not isinstance(structure, (list, tuple, set, dict))):
return (xs[0], 1)
if isinstance(structure, (list, tuple, set)):
offset = 0
elements = []
for s in structure:
(e, n) = _unflatten(xs[offset:], s)
elements.append(e)
offset += n
return (type(structure)(elements), offset)
assert isinstance(structure, dict)
offset = 0
elements = dict()
for (k, v) in sorted(structure.items(), key=(lambda t: t[0])):
(e, n) = _unflatten(xs[offset:], v)
elements[k] = e
offset += n
return (elements, offset)
|
def state_dict(partition, *args, **kwargs):
state = nn.Module.state_dict(partition, *args, **kwargs)
lookup = partition.lookup
result = dict()
for (k, v) in state.items():
if (k in lookup):
result[lookup[k]] = v
else:
assert ('.' in k)
split_idx = k.find('.')
new_k = (lookup[k[:split_idx]] + k[split_idx:])
result[new_k] = v
return result
|
def load_state_dict(partition, state_dict, strict=True):
reverse_lookup = {v: k for (k, v) in partition.lookup.items()}
device = partition.device
keys = list(partition.state_dict(None).keys())
new_state = dict()
for k in keys:
if (k in reverse_lookup):
new_state[reverse_lookup[k]] = state_dict[k].to(device)
continue
idx = k.rfind('.')
to_replace = k[:idx]
if (to_replace in reverse_lookup):
key = (reverse_lookup[to_replace] + k[idx:])
new_state[key] = state_dict[k].to(device)
nn.Module.load_state_dict(partition, new_state, strict=strict)
|
def named_buffers(partition, prefix='', recurse=True):
params = nn.Module.named_buffers(partition, prefix=prefix, recurse=recurse)
lookup = partition.lookup
for (k, v) in params:
if (k in lookup):
(yield (lookup[k], v))
else:
assert ('.' in k)
split_idx = k.find('.')
new_k = (lookup[k[:split_idx]] + k[split_idx:])
(yield (new_k, v))
|
def named_parameters(partition, prefix='', recurse=True):
params = nn.Module.named_parameters(partition, prefix=prefix, recurse=recurse)
lookup = partition.lookup
for (k, v) in params:
if (k in lookup):
(yield (lookup[k], v))
else:
assert ('.' in k)
split_idx = k.find('.')
new_k = (lookup[k[:split_idx]] + k[split_idx:])
(yield (new_k, v))
|
def cpu(partition):
partition.device = torch.device('cpu')
return nn.Module.cpu(partition)
|
def cuda(partition, device=None):
if (device is None):
device = torch.cuda.current_device()
partition.device = torch.device(device)
return nn.Module.cuda(partition, partition.device)
|
def to(partition, *args, **kwargs):
device = None
if ('device' in kwargs):
device = kwargs['device']
elif ('tensor' in kwargs):
device = kwargs['tensor'].device
if args:
if isinstance(args[0], (torch.device, int, str)):
device = args[0]
if torch.is_tensor(args[0]):
device = args[0].device
if (not (device is None)):
partition.device = torch.device(device)
return nn.Module.to(partition, *args, **kwargs)
|
def create_pipeline_configuration(DEBUG=False, batch_size=4):
config = {'batch_dim': 0, 'depth': 10000, 'basic_blocks': (Softmax, LayerNorm, Dropout, Linear, Embedding, Gelu, Tanh), 'model_inputs': {'attention_mask': {'shape': torch.Size([4, 384]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'input_ids': {'shape': torch.Size([4, 384]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'token_type_ids': {'shape': torch.Size([4, 384]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}}, 'model_outputs': {'BertForQuestionAnswering/Linear[qa_outputs]': {'shape': torch.Size([4, 384, 2]), 'dtype': torch.float32, 'is_batched': True, 'created_by': 1}}, 'stages': {0: {'stage_cls': Partition0, 'inputs': {'attention_mask': {'shape': torch.Size([4, 384]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'input_ids': {'shape': torch.Size([4, 384]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'token_type_ids': {'shape': torch.Size([4, 384]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}}, 'outputs': {'BertForQuestionAnswering/BertModel[bert]/Tensor::__mul___12': {'shape': torch.Size([4, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [1]}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([4, 384, 768]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/Linear[dense]': {'shape': torch.Size([4, 384, 768]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}}, 'devices': [('cpu' if DEBUG else 'cuda:0')], 'stage_depth': 1}, 1: {'stage_cls': Partition1, 'inputs': {'BertForQuestionAnswering/BertModel[bert]/Tensor::__mul___12': {'shape': torch.Size([4, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 0}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([4, 384, 768]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/Linear[dense]': {'shape': torch.Size([4, 384, 768]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}}, 'outputs': {'BertForQuestionAnswering/Linear[qa_outputs]': {'shape': torch.Size([4, 384, 2]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [(- 1)]}}, 'devices': [('cpu' if DEBUG else 'cuda:1')], 'stage_depth': 0}}}
batch_dim = config['batch_dim']
for d in chain(config['model_inputs'].values(), config['model_outputs'].values()):
if d['is_batched']:
shape = d['shape']
d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):]))
for s in config['stages'].values():
for d in chain(s['inputs'].values(), s['outputs'].values()):
if d['is_batched']:
shape = d['shape']
d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):]))
return config
|
class Partition0(nn.Module):
LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Embedding[word_embeddings]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Embedding[position_embeddings]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Embedding[token_type_embeddings]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/Linear[dense]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:0'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1]
self.lookup = {'l_0': 'bert.embeddings.word_embeddings', 'l_1': 'bert.embeddings.position_embeddings', 'l_2': 'bert.embeddings.token_type_embeddings', 'l_3': 'bert.embeddings.LayerNorm', 'l_4': 'bert.embeddings.dropout', 'l_5': 'bert.encoder.0.attention.self.query', 'l_6': 'bert.encoder.0.attention.self.key', 'l_7': 'bert.encoder.0.attention.self.value', 'l_8': 'bert.encoder.0.attention.self.softmax', 'l_9': 'bert.encoder.0.attention.self.dropout', 'l_10': 'bert.encoder.0.attention.output.dense', 'l_11': 'bert.encoder.0.attention.output.dropout', 'l_12': 'bert.encoder.0.attention.output.LayerNorm', 'l_13': 'bert.encoder.0.intermediate.dense', 'l_14': 'bert.encoder.0.intermediate.intermediate_act_fn', 'l_15': 'bert.encoder.0.output.dense', 'l_16': 'bert.encoder.0.output.dropout', 'l_17': 'bert.encoder.0.output.LayerNorm', 'l_18': 'bert.encoder.1.attention.self.query', 'l_19': 'bert.encoder.1.attention.self.key', 'l_20': 'bert.encoder.1.attention.self.value', 'l_21': 'bert.encoder.1.attention.self.softmax', 'l_22': 'bert.encoder.1.attention.self.dropout', 'l_23': 'bert.encoder.1.attention.output.dense', 'l_24': 'bert.encoder.1.attention.output.dropout', 'l_25': 'bert.encoder.1.attention.output.LayerNorm', 'l_26': 'bert.encoder.1.intermediate.dense', 'l_27': 'bert.encoder.1.intermediate.intermediate_act_fn', 'l_28': 'bert.encoder.1.output.dense', 'l_29': 'bert.encoder.1.output.dropout', 'l_30': 'bert.encoder.1.output.LayerNorm', 'l_31': 'bert.encoder.2.attention.self.query', 'l_32': 'bert.encoder.2.attention.self.key', 'l_33': 'bert.encoder.2.attention.self.value', 'l_34': 'bert.encoder.2.attention.self.softmax', 'l_35': 'bert.encoder.2.attention.self.dropout', 'l_36': 'bert.encoder.2.attention.output.dense', 'l_37': 'bert.encoder.2.attention.output.dropout', 'l_38': 'bert.encoder.2.attention.output.LayerNorm', 'l_39': 'bert.encoder.2.intermediate.dense', 'l_40': 'bert.encoder.2.intermediate.intermediate_act_fn', 'l_41': 'bert.encoder.2.output.dense', 'l_42': 'bert.encoder.2.output.dropout', 'l_43': 'bert.encoder.2.output.LayerNorm', 'l_44': 'bert.encoder.3.attention.self.query', 'l_45': 'bert.encoder.3.attention.self.key', 'l_46': 'bert.encoder.3.attention.self.value', 'l_47': 'bert.encoder.3.attention.self.softmax', 'l_48': 'bert.encoder.3.attention.self.dropout', 'l_49': 'bert.encoder.3.attention.output.dense', 'l_50': 'bert.encoder.3.attention.output.dropout', 'l_51': 'bert.encoder.3.attention.output.LayerNorm', 'l_52': 'bert.encoder.3.intermediate.dense', 'l_53': 'bert.encoder.3.intermediate.intermediate_act_fn', 'l_54': 'bert.encoder.3.output.dense', 'l_55': 'bert.encoder.3.output.dropout', 'l_56': 'bert.encoder.3.output.LayerNorm', 'l_57': 'bert.encoder.4.attention.self.query', 'l_58': 'bert.encoder.4.attention.self.key', 'l_59': 'bert.encoder.4.attention.self.value', 'l_60': 'bert.encoder.4.attention.self.softmax', 'l_61': 'bert.encoder.4.attention.self.dropout', 'l_62': 'bert.encoder.4.attention.output.dense', 'l_63': 'bert.encoder.4.attention.output.dropout', 'l_64': 'bert.encoder.4.attention.output.LayerNorm', 'l_65': 'bert.encoder.4.intermediate.dense', 'l_66': 'bert.encoder.4.intermediate.intermediate_act_fn', 'l_67': 'bert.encoder.4.output.dense', 'l_68': 'bert.encoder.4.output.dropout', 'l_69': 'bert.encoder.4.output.LayerNorm', 'l_70': 'bert.encoder.5.attention.self.query', 'l_71': 'bert.encoder.5.attention.self.key', 'l_72': 'bert.encoder.5.attention.self.value', 'l_73': 'bert.encoder.5.attention.self.softmax', 'l_74': 'bert.encoder.5.attention.self.dropout', 'l_75': 'bert.encoder.5.attention.output.dense', 'l_76': 'bert.encoder.5.attention.output.dropout', 'l_77': 'bert.encoder.5.attention.output.LayerNorm', 'l_78': 'bert.encoder.5.intermediate.dense', 'l_79': 'bert.encoder.5.intermediate.intermediate_act_fn', 'l_80': 'bert.encoder.5.output.dense'}
self.to(self.device)
def forward(self, *args):
(attention_mask, input_ids, token_type_ids) = unflatten(args, self.input_structure)
t_0 = self.l_0(input_ids)
t_1 = self.l_2(token_type_ids)
t_2 = attention_mask.unsqueeze(1)
t_2 = t_2.unsqueeze(2)
t_2 = t_2.to(dtype=torch.float32)
t_2 = (1.0 - t_2)
t_2 = (t_2 * (- 10000.0))
t_3 = input_ids.size(1)
t_3 = torch.arange(t_3, dtype=torch.int64, device=self.device)
t_3 = t_3.unsqueeze(0)
t_3 = t_3.expand_as(input_ids)
t_3 = self.l_1(t_3)
t_3 = (t_0 + t_3)
t_1 = (t_3 + t_1)
t_1 = self.l_3(t_1)
t_1 = self.l_4(t_1)
t_3 = self.l_5(t_1)
t_0 = self.l_6(t_1)
t_4 = self.l_7(t_1)
t_5 = t_3.size()
t_6 = t_0.size()
t_7 = t_4.size()
t_5 = t_5[slice(None, (- 1), None)]
t_5 = (t_5 + (12, 64))
t_8 = t_5[0]
t_9 = t_5[1]
t_10 = t_5[2]
t_5 = t_5[3]
t_5 = t_3.view(t_8, t_9, t_10, t_5)
t_5 = t_5.permute(0, 2, 1, 3)
t_6 = t_6[slice(None, (- 1), None)]
t_6 = (t_6 + (12, 64))
t_10 = t_6[0]
t_9 = t_6[1]
t_8 = t_6[2]
t_6 = t_6[3]
t_6 = t_0.view(t_10, t_9, t_8, t_6)
t_6 = t_6.permute(0, 2, 1, 3)
t_7 = t_7[slice(None, (- 1), None)]
t_7 = (t_7 + (12, 64))
t_8 = t_7[0]
t_9 = t_7[1]
t_10 = t_7[2]
t_7 = t_7[3]
t_7 = t_4.view(t_8, t_9, t_10, t_7)
t_7 = t_7.permute(0, 2, 1, 3)
t_6 = t_6.transpose((- 1), (- 2))
t_6 = torch.matmul(t_5, t_6)
t_5 = math.sqrt(64)
t_5 = (t_6 / t_5)
t_5 = (t_5 + t_2)
t_5 = self.l_8(t_5)
t_5 = self.l_9(t_5)
t_7 = torch.matmul(t_5, t_7)
t_7 = t_7.permute(0, 2, 1, 3)
t_7 = t_7.contiguous()
t_5 = t_7.size()
t_5 = t_5[slice(None, (- 2), None)]
t_5 = (t_5 + (768,))
t_6 = t_5[0]
t_10 = t_5[1]
t_5 = t_5[2]
t_5 = t_7.view(t_6, t_10, t_5)
t_5 = self.l_10(t_5)
t_5 = self.l_11(t_5)
t_1 = (t_5 + t_1)
t_1 = self.l_12(t_1)
t_5 = self.l_13(t_1)
t_5 = self.l_14(t_5)
t_5 = self.l_15(t_5)
t_5 = self.l_16(t_5)
t_1 = (t_5 + t_1)
t_1 = self.l_17(t_1)
t_5 = self.l_18(t_1)
t_10 = self.l_19(t_1)
t_6 = self.l_20(t_1)
t_7 = t_5.size()
t_9 = t_10.size()
t_8 = t_6.size()
t_7 = t_7[slice(None, (- 1), None)]
t_7 = (t_7 + (12, 64))
t_4 = t_7[0]
t_0 = t_7[1]
t_3 = t_7[2]
t_7 = t_7[3]
t_7 = t_5.view(t_4, t_0, t_3, t_7)
t_7 = t_7.permute(0, 2, 1, 3)
t_9 = t_9[slice(None, (- 1), None)]
t_9 = (t_9 + (12, 64))
t_3 = t_9[0]
t_0 = t_9[1]
t_4 = t_9[2]
t_9 = t_9[3]
t_9 = t_10.view(t_3, t_0, t_4, t_9)
t_9 = t_9.permute(0, 2, 1, 3)
t_8 = t_8[slice(None, (- 1), None)]
t_8 = (t_8 + (12, 64))
t_4 = t_8[0]
t_0 = t_8[1]
t_3 = t_8[2]
t_8 = t_8[3]
t_8 = t_6.view(t_4, t_0, t_3, t_8)
t_8 = t_8.permute(0, 2, 1, 3)
t_9 = t_9.transpose((- 1), (- 2))
t_9 = torch.matmul(t_7, t_9)
t_7 = math.sqrt(64)
t_7 = (t_9 / t_7)
t_7 = (t_7 + t_2)
t_7 = self.l_21(t_7)
t_7 = self.l_22(t_7)
t_8 = torch.matmul(t_7, t_8)
t_8 = t_8.permute(0, 2, 1, 3)
t_8 = t_8.contiguous()
t_7 = t_8.size()
t_7 = t_7[slice(None, (- 2), None)]
t_7 = (t_7 + (768,))
t_9 = t_7[0]
t_3 = t_7[1]
t_7 = t_7[2]
t_7 = t_8.view(t_9, t_3, t_7)
t_7 = self.l_23(t_7)
t_7 = self.l_24(t_7)
t_1 = (t_7 + t_1)
t_1 = self.l_25(t_1)
t_7 = self.l_26(t_1)
t_7 = self.l_27(t_7)
t_7 = self.l_28(t_7)
t_7 = self.l_29(t_7)
t_1 = (t_7 + t_1)
t_1 = self.l_30(t_1)
t_7 = self.l_31(t_1)
t_3 = self.l_32(t_1)
t_9 = self.l_33(t_1)
t_8 = t_7.size()
t_0 = t_3.size()
t_4 = t_9.size()
t_8 = t_8[slice(None, (- 1), None)]
t_8 = (t_8 + (12, 64))
t_6 = t_8[0]
t_10 = t_8[1]
t_5 = t_8[2]
t_8 = t_8[3]
t_8 = t_7.view(t_6, t_10, t_5, t_8)
t_8 = t_8.permute(0, 2, 1, 3)
t_0 = t_0[slice(None, (- 1), None)]
t_0 = (t_0 + (12, 64))
t_5 = t_0[0]
t_10 = t_0[1]
t_6 = t_0[2]
t_0 = t_0[3]
t_0 = t_3.view(t_5, t_10, t_6, t_0)
t_0 = t_0.permute(0, 2, 1, 3)
t_4 = t_4[slice(None, (- 1), None)]
t_4 = (t_4 + (12, 64))
t_6 = t_4[0]
t_10 = t_4[1]
t_5 = t_4[2]
t_4 = t_4[3]
t_4 = t_9.view(t_6, t_10, t_5, t_4)
t_4 = t_4.permute(0, 2, 1, 3)
t_0 = t_0.transpose((- 1), (- 2))
t_0 = torch.matmul(t_8, t_0)
t_8 = math.sqrt(64)
t_8 = (t_0 / t_8)
t_8 = (t_8 + t_2)
t_8 = self.l_34(t_8)
t_8 = self.l_35(t_8)
t_4 = torch.matmul(t_8, t_4)
t_4 = t_4.permute(0, 2, 1, 3)
t_4 = t_4.contiguous()
t_8 = t_4.size()
t_8 = t_8[slice(None, (- 2), None)]
t_8 = (t_8 + (768,))
t_0 = t_8[0]
t_5 = t_8[1]
t_8 = t_8[2]
t_8 = t_4.view(t_0, t_5, t_8)
t_8 = self.l_36(t_8)
t_8 = self.l_37(t_8)
t_1 = (t_8 + t_1)
t_1 = self.l_38(t_1)
t_8 = self.l_39(t_1)
t_8 = self.l_40(t_8)
t_8 = self.l_41(t_8)
t_8 = self.l_42(t_8)
t_1 = (t_8 + t_1)
t_1 = self.l_43(t_1)
t_8 = self.l_44(t_1)
t_5 = self.l_45(t_1)
t_0 = self.l_46(t_1)
t_4 = t_8.size()
t_10 = t_5.size()
t_6 = t_0.size()
t_4 = t_4[slice(None, (- 1), None)]
t_4 = (t_4 + (12, 64))
t_9 = t_4[0]
t_3 = t_4[1]
t_7 = t_4[2]
t_4 = t_4[3]
t_4 = t_8.view(t_9, t_3, t_7, t_4)
t_4 = t_4.permute(0, 2, 1, 3)
t_10 = t_10[slice(None, (- 1), None)]
t_10 = (t_10 + (12, 64))
t_7 = t_10[0]
t_3 = t_10[1]
t_9 = t_10[2]
t_10 = t_10[3]
t_10 = t_5.view(t_7, t_3, t_9, t_10)
t_10 = t_10.permute(0, 2, 1, 3)
t_6 = t_6[slice(None, (- 1), None)]
t_6 = (t_6 + (12, 64))
t_9 = t_6[0]
t_3 = t_6[1]
t_7 = t_6[2]
t_6 = t_6[3]
t_6 = t_0.view(t_9, t_3, t_7, t_6)
t_6 = t_6.permute(0, 2, 1, 3)
t_10 = t_10.transpose((- 1), (- 2))
t_10 = torch.matmul(t_4, t_10)
t_4 = math.sqrt(64)
t_4 = (t_10 / t_4)
t_4 = (t_4 + t_2)
t_4 = self.l_47(t_4)
t_4 = self.l_48(t_4)
t_6 = torch.matmul(t_4, t_6)
t_6 = t_6.permute(0, 2, 1, 3)
t_6 = t_6.contiguous()
t_4 = t_6.size()
t_4 = t_4[slice(None, (- 2), None)]
t_4 = (t_4 + (768,))
t_10 = t_4[0]
t_7 = t_4[1]
t_4 = t_4[2]
t_4 = t_6.view(t_10, t_7, t_4)
t_4 = self.l_49(t_4)
t_4 = self.l_50(t_4)
t_1 = (t_4 + t_1)
t_1 = self.l_51(t_1)
t_4 = self.l_52(t_1)
t_4 = self.l_53(t_4)
t_4 = self.l_54(t_4)
t_4 = self.l_55(t_4)
t_1 = (t_4 + t_1)
t_1 = self.l_56(t_1)
t_4 = self.l_57(t_1)
t_7 = self.l_58(t_1)
t_10 = self.l_59(t_1)
t_6 = t_4.size()
t_3 = t_7.size()
t_9 = t_10.size()
t_6 = t_6[slice(None, (- 1), None)]
t_6 = (t_6 + (12, 64))
t_0 = t_6[0]
t_5 = t_6[1]
t_8 = t_6[2]
t_6 = t_6[3]
t_6 = t_4.view(t_0, t_5, t_8, t_6)
t_6 = t_6.permute(0, 2, 1, 3)
t_3 = t_3[slice(None, (- 1), None)]
t_3 = (t_3 + (12, 64))
t_8 = t_3[0]
t_5 = t_3[1]
t_0 = t_3[2]
t_3 = t_3[3]
t_3 = t_7.view(t_8, t_5, t_0, t_3)
t_3 = t_3.permute(0, 2, 1, 3)
t_9 = t_9[slice(None, (- 1), None)]
t_9 = (t_9 + (12, 64))
t_0 = t_9[0]
t_5 = t_9[1]
t_8 = t_9[2]
t_9 = t_9[3]
t_9 = t_10.view(t_0, t_5, t_8, t_9)
t_9 = t_9.permute(0, 2, 1, 3)
t_3 = t_3.transpose((- 1), (- 2))
t_3 = torch.matmul(t_6, t_3)
t_6 = math.sqrt(64)
t_6 = (t_3 / t_6)
t_6 = (t_6 + t_2)
t_6 = self.l_60(t_6)
t_6 = self.l_61(t_6)
t_9 = torch.matmul(t_6, t_9)
t_9 = t_9.permute(0, 2, 1, 3)
t_9 = t_9.contiguous()
t_6 = t_9.size()
t_6 = t_6[slice(None, (- 2), None)]
t_6 = (t_6 + (768,))
t_3 = t_6[0]
t_8 = t_6[1]
t_6 = t_6[2]
t_6 = t_9.view(t_3, t_8, t_6)
t_6 = self.l_62(t_6)
t_6 = self.l_63(t_6)
t_1 = (t_6 + t_1)
t_1 = self.l_64(t_1)
t_6 = self.l_65(t_1)
t_6 = self.l_66(t_6)
t_6 = self.l_67(t_6)
t_6 = self.l_68(t_6)
t_1 = (t_6 + t_1)
t_1 = self.l_69(t_1)
t_6 = self.l_70(t_1)
t_8 = self.l_71(t_1)
t_3 = self.l_72(t_1)
t_9 = t_6.size()
t_5 = t_8.size()
t_0 = t_3.size()
t_9 = t_9[slice(None, (- 1), None)]
t_9 = (t_9 + (12, 64))
t_10 = t_9[0]
t_7 = t_9[1]
t_4 = t_9[2]
t_9 = t_9[3]
t_9 = t_6.view(t_10, t_7, t_4, t_9)
t_9 = t_9.permute(0, 2, 1, 3)
t_5 = t_5[slice(None, (- 1), None)]
t_5 = (t_5 + (12, 64))
t_4 = t_5[0]
t_7 = t_5[1]
t_10 = t_5[2]
t_5 = t_5[3]
t_5 = t_8.view(t_4, t_7, t_10, t_5)
t_5 = t_5.permute(0, 2, 1, 3)
t_0 = t_0[slice(None, (- 1), None)]
t_0 = (t_0 + (12, 64))
t_10 = t_0[0]
t_7 = t_0[1]
t_4 = t_0[2]
t_0 = t_0[3]
t_0 = t_3.view(t_10, t_7, t_4, t_0)
t_0 = t_0.permute(0, 2, 1, 3)
t_5 = t_5.transpose((- 1), (- 2))
t_5 = torch.matmul(t_9, t_5)
t_9 = math.sqrt(64)
t_9 = (t_5 / t_9)
t_9 = (t_9 + t_2)
t_9 = self.l_73(t_9)
t_9 = self.l_74(t_9)
t_0 = torch.matmul(t_9, t_0)
t_0 = t_0.permute(0, 2, 1, 3)
t_0 = t_0.contiguous()
t_9 = t_0.size()
t_9 = t_9[slice(None, (- 2), None)]
t_9 = (t_9 + (768,))
t_5 = t_9[0]
t_4 = t_9[1]
t_9 = t_9[2]
t_9 = t_0.view(t_5, t_4, t_9)
t_9 = self.l_75(t_9)
t_9 = self.l_76(t_9)
t_1 = (t_9 + t_1)
t_1 = self.l_77(t_1)
t_9 = self.l_78(t_1)
t_9 = self.l_79(t_9)
t_9 = self.l_80(t_9)
return list(flatten((t_2, t_1, t_9)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition1(nn.Module):
LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertPooler[pooler]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertPooler[pooler]/Tanh[activation]', 'BertForQuestionAnswering/Linear[qa_outputs]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:1'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1]
self.lookup = {'l_0': 'bert.encoder.5.output.dropout', 'l_1': 'bert.encoder.5.output.LayerNorm', 'l_2': 'bert.encoder.6.attention.self.query', 'l_3': 'bert.encoder.6.attention.self.key', 'l_4': 'bert.encoder.6.attention.self.value', 'l_5': 'bert.encoder.6.attention.self.softmax', 'l_6': 'bert.encoder.6.attention.self.dropout', 'l_7': 'bert.encoder.6.attention.output.dense', 'l_8': 'bert.encoder.6.attention.output.dropout', 'l_9': 'bert.encoder.6.attention.output.LayerNorm', 'l_10': 'bert.encoder.6.intermediate.dense', 'l_11': 'bert.encoder.6.intermediate.intermediate_act_fn', 'l_12': 'bert.encoder.6.output.dense', 'l_13': 'bert.encoder.6.output.dropout', 'l_14': 'bert.encoder.6.output.LayerNorm', 'l_15': 'bert.encoder.7.attention.self.query', 'l_16': 'bert.encoder.7.attention.self.key', 'l_17': 'bert.encoder.7.attention.self.value', 'l_18': 'bert.encoder.7.attention.self.softmax', 'l_19': 'bert.encoder.7.attention.self.dropout', 'l_20': 'bert.encoder.7.attention.output.dense', 'l_21': 'bert.encoder.7.attention.output.dropout', 'l_22': 'bert.encoder.7.attention.output.LayerNorm', 'l_23': 'bert.encoder.7.intermediate.dense', 'l_24': 'bert.encoder.7.intermediate.intermediate_act_fn', 'l_25': 'bert.encoder.7.output.dense', 'l_26': 'bert.encoder.7.output.dropout', 'l_27': 'bert.encoder.7.output.LayerNorm', 'l_28': 'bert.encoder.8.attention.self.query', 'l_29': 'bert.encoder.8.attention.self.key', 'l_30': 'bert.encoder.8.attention.self.value', 'l_31': 'bert.encoder.8.attention.self.softmax', 'l_32': 'bert.encoder.8.attention.self.dropout', 'l_33': 'bert.encoder.8.attention.output.dense', 'l_34': 'bert.encoder.8.attention.output.dropout', 'l_35': 'bert.encoder.8.attention.output.LayerNorm', 'l_36': 'bert.encoder.8.intermediate.dense', 'l_37': 'bert.encoder.8.intermediate.intermediate_act_fn', 'l_38': 'bert.encoder.8.output.dense', 'l_39': 'bert.encoder.8.output.dropout', 'l_40': 'bert.encoder.8.output.LayerNorm', 'l_41': 'bert.encoder.9.attention.self.query', 'l_42': 'bert.encoder.9.attention.self.key', 'l_43': 'bert.encoder.9.attention.self.value', 'l_44': 'bert.encoder.9.attention.self.softmax', 'l_45': 'bert.encoder.9.attention.self.dropout', 'l_46': 'bert.encoder.9.attention.output.dense', 'l_47': 'bert.encoder.9.attention.output.dropout', 'l_48': 'bert.encoder.9.attention.output.LayerNorm', 'l_49': 'bert.encoder.9.intermediate.dense', 'l_50': 'bert.encoder.9.intermediate.intermediate_act_fn', 'l_51': 'bert.encoder.9.output.dense', 'l_52': 'bert.encoder.9.output.dropout', 'l_53': 'bert.encoder.9.output.LayerNorm', 'l_54': 'bert.encoder.10.attention.self.query', 'l_55': 'bert.encoder.10.attention.self.key', 'l_56': 'bert.encoder.10.attention.self.value', 'l_57': 'bert.encoder.10.attention.self.softmax', 'l_58': 'bert.encoder.10.attention.self.dropout', 'l_59': 'bert.encoder.10.attention.output.dense', 'l_60': 'bert.encoder.10.attention.output.dropout', 'l_61': 'bert.encoder.10.attention.output.LayerNorm', 'l_62': 'bert.encoder.10.intermediate.dense', 'l_63': 'bert.encoder.10.intermediate.intermediate_act_fn', 'l_64': 'bert.encoder.10.output.dense', 'l_65': 'bert.encoder.10.output.dropout', 'l_66': 'bert.encoder.10.output.LayerNorm', 'l_67': 'bert.encoder.11.attention.self.query', 'l_68': 'bert.encoder.11.attention.self.key', 'l_69': 'bert.encoder.11.attention.self.value', 'l_70': 'bert.encoder.11.attention.self.softmax', 'l_71': 'bert.encoder.11.attention.self.dropout', 'l_72': 'bert.encoder.11.attention.output.dense', 'l_73': 'bert.encoder.11.attention.output.dropout', 'l_74': 'bert.encoder.11.attention.output.LayerNorm', 'l_75': 'bert.encoder.11.intermediate.dense', 'l_76': 'bert.encoder.11.intermediate.intermediate_act_fn', 'l_77': 'bert.encoder.11.output.dense', 'l_78': 'bert.encoder.11.output.dropout', 'l_79': 'bert.encoder.11.output.LayerNorm', 'l_80': 'bert.pooler.dense', 'l_81': 'bert.pooler.activation', 'l_82': 'qa_outputs'}
self.to(self.device)
def forward(self, *args):
(x0, x1, x2) = unflatten(args, self.input_structure)
t_0 = self.l_0(x2)
t_0 = (t_0 + x1)
t_0 = self.l_1(t_0)
t_1 = self.l_2(t_0)
t_2 = self.l_3(t_0)
t_3 = self.l_4(t_0)
t_4 = t_1.size()
t_5 = t_2.size()
t_6 = t_3.size()
t_4 = t_4[slice(None, (- 1), None)]
t_4 = (t_4 + (12, 64))
t_7 = t_4[0]
t_8 = t_4[1]
t_9 = t_4[2]
t_4 = t_4[3]
t_4 = t_1.view(t_7, t_8, t_9, t_4)
t_4 = t_4.permute(0, 2, 1, 3)
t_5 = t_5[slice(None, (- 1), None)]
t_5 = (t_5 + (12, 64))
t_9 = t_5[0]
t_8 = t_5[1]
t_7 = t_5[2]
t_5 = t_5[3]
t_5 = t_2.view(t_9, t_8, t_7, t_5)
t_5 = t_5.permute(0, 2, 1, 3)
t_6 = t_6[slice(None, (- 1), None)]
t_6 = (t_6 + (12, 64))
t_7 = t_6[0]
t_8 = t_6[1]
t_9 = t_6[2]
t_6 = t_6[3]
t_6 = t_3.view(t_7, t_8, t_9, t_6)
t_6 = t_6.permute(0, 2, 1, 3)
t_5 = t_5.transpose((- 1), (- 2))
t_5 = torch.matmul(t_4, t_5)
t_4 = math.sqrt(64)
t_4 = (t_5 / t_4)
t_4 = (t_4 + x0)
t_4 = self.l_5(t_4)
t_4 = self.l_6(t_4)
t_6 = torch.matmul(t_4, t_6)
t_6 = t_6.permute(0, 2, 1, 3)
t_6 = t_6.contiguous()
t_4 = t_6.size()
t_4 = t_4[slice(None, (- 2), None)]
t_4 = (t_4 + (768,))
t_5 = t_4[0]
t_9 = t_4[1]
t_4 = t_4[2]
t_4 = t_6.view(t_5, t_9, t_4)
t_4 = self.l_7(t_4)
t_4 = self.l_8(t_4)
t_0 = (t_4 + t_0)
t_0 = self.l_9(t_0)
t_4 = self.l_10(t_0)
t_4 = self.l_11(t_4)
t_4 = self.l_12(t_4)
t_4 = self.l_13(t_4)
t_0 = (t_4 + t_0)
t_0 = self.l_14(t_0)
t_4 = self.l_15(t_0)
t_9 = self.l_16(t_0)
t_5 = self.l_17(t_0)
t_6 = t_4.size()
t_8 = t_9.size()
t_7 = t_5.size()
t_6 = t_6[slice(None, (- 1), None)]
t_6 = (t_6 + (12, 64))
t_3 = t_6[0]
t_2 = t_6[1]
t_1 = t_6[2]
t_6 = t_6[3]
t_6 = t_4.view(t_3, t_2, t_1, t_6)
t_6 = t_6.permute(0, 2, 1, 3)
t_8 = t_8[slice(None, (- 1), None)]
t_8 = (t_8 + (12, 64))
t_1 = t_8[0]
t_2 = t_8[1]
t_3 = t_8[2]
t_8 = t_8[3]
t_8 = t_9.view(t_1, t_2, t_3, t_8)
t_8 = t_8.permute(0, 2, 1, 3)
t_7 = t_7[slice(None, (- 1), None)]
t_7 = (t_7 + (12, 64))
t_3 = t_7[0]
t_2 = t_7[1]
t_1 = t_7[2]
t_7 = t_7[3]
t_7 = t_5.view(t_3, t_2, t_1, t_7)
t_7 = t_7.permute(0, 2, 1, 3)
t_8 = t_8.transpose((- 1), (- 2))
t_8 = torch.matmul(t_6, t_8)
t_6 = math.sqrt(64)
t_6 = (t_8 / t_6)
t_6 = (t_6 + x0)
t_6 = self.l_18(t_6)
t_6 = self.l_19(t_6)
t_7 = torch.matmul(t_6, t_7)
t_7 = t_7.permute(0, 2, 1, 3)
t_7 = t_7.contiguous()
t_6 = t_7.size()
t_6 = t_6[slice(None, (- 2), None)]
t_6 = (t_6 + (768,))
t_8 = t_6[0]
t_1 = t_6[1]
t_6 = t_6[2]
t_6 = t_7.view(t_8, t_1, t_6)
t_6 = self.l_20(t_6)
t_6 = self.l_21(t_6)
t_0 = (t_6 + t_0)
t_0 = self.l_22(t_0)
t_6 = self.l_23(t_0)
t_6 = self.l_24(t_6)
t_6 = self.l_25(t_6)
t_6 = self.l_26(t_6)
t_0 = (t_6 + t_0)
t_0 = self.l_27(t_0)
t_6 = self.l_28(t_0)
t_1 = self.l_29(t_0)
t_8 = self.l_30(t_0)
t_7 = t_6.size()
t_2 = t_1.size()
t_3 = t_8.size()
t_7 = t_7[slice(None, (- 1), None)]
t_7 = (t_7 + (12, 64))
t_5 = t_7[0]
t_9 = t_7[1]
t_4 = t_7[2]
t_7 = t_7[3]
t_7 = t_6.view(t_5, t_9, t_4, t_7)
t_7 = t_7.permute(0, 2, 1, 3)
t_2 = t_2[slice(None, (- 1), None)]
t_2 = (t_2 + (12, 64))
t_4 = t_2[0]
t_9 = t_2[1]
t_5 = t_2[2]
t_2 = t_2[3]
t_2 = t_1.view(t_4, t_9, t_5, t_2)
t_2 = t_2.permute(0, 2, 1, 3)
t_3 = t_3[slice(None, (- 1), None)]
t_3 = (t_3 + (12, 64))
t_5 = t_3[0]
t_9 = t_3[1]
t_4 = t_3[2]
t_3 = t_3[3]
t_3 = t_8.view(t_5, t_9, t_4, t_3)
t_3 = t_3.permute(0, 2, 1, 3)
t_2 = t_2.transpose((- 1), (- 2))
t_2 = torch.matmul(t_7, t_2)
t_7 = math.sqrt(64)
t_7 = (t_2 / t_7)
t_7 = (t_7 + x0)
t_7 = self.l_31(t_7)
t_7 = self.l_32(t_7)
t_3 = torch.matmul(t_7, t_3)
t_3 = t_3.permute(0, 2, 1, 3)
t_3 = t_3.contiguous()
t_7 = t_3.size()
t_7 = t_7[slice(None, (- 2), None)]
t_7 = (t_7 + (768,))
t_2 = t_7[0]
t_4 = t_7[1]
t_7 = t_7[2]
t_7 = t_3.view(t_2, t_4, t_7)
t_7 = self.l_33(t_7)
t_7 = self.l_34(t_7)
t_0 = (t_7 + t_0)
t_0 = self.l_35(t_0)
t_7 = self.l_36(t_0)
t_7 = self.l_37(t_7)
t_7 = self.l_38(t_7)
t_7 = self.l_39(t_7)
t_0 = (t_7 + t_0)
t_0 = self.l_40(t_0)
t_7 = self.l_41(t_0)
t_4 = self.l_42(t_0)
t_2 = self.l_43(t_0)
t_3 = t_7.size()
t_9 = t_4.size()
t_5 = t_2.size()
t_3 = t_3[slice(None, (- 1), None)]
t_3 = (t_3 + (12, 64))
t_8 = t_3[0]
t_1 = t_3[1]
t_6 = t_3[2]
t_3 = t_3[3]
t_3 = t_7.view(t_8, t_1, t_6, t_3)
t_3 = t_3.permute(0, 2, 1, 3)
t_9 = t_9[slice(None, (- 1), None)]
t_9 = (t_9 + (12, 64))
t_6 = t_9[0]
t_1 = t_9[1]
t_8 = t_9[2]
t_9 = t_9[3]
t_9 = t_4.view(t_6, t_1, t_8, t_9)
t_9 = t_9.permute(0, 2, 1, 3)
t_5 = t_5[slice(None, (- 1), None)]
t_5 = (t_5 + (12, 64))
t_8 = t_5[0]
t_1 = t_5[1]
t_6 = t_5[2]
t_5 = t_5[3]
t_5 = t_2.view(t_8, t_1, t_6, t_5)
t_5 = t_5.permute(0, 2, 1, 3)
t_9 = t_9.transpose((- 1), (- 2))
t_9 = torch.matmul(t_3, t_9)
t_3 = math.sqrt(64)
t_3 = (t_9 / t_3)
t_3 = (t_3 + x0)
t_3 = self.l_44(t_3)
t_3 = self.l_45(t_3)
t_5 = torch.matmul(t_3, t_5)
t_5 = t_5.permute(0, 2, 1, 3)
t_5 = t_5.contiguous()
t_3 = t_5.size()
t_3 = t_3[slice(None, (- 2), None)]
t_3 = (t_3 + (768,))
t_9 = t_3[0]
t_6 = t_3[1]
t_3 = t_3[2]
t_3 = t_5.view(t_9, t_6, t_3)
t_3 = self.l_46(t_3)
t_3 = self.l_47(t_3)
t_0 = (t_3 + t_0)
t_0 = self.l_48(t_0)
t_3 = self.l_49(t_0)
t_3 = self.l_50(t_3)
t_3 = self.l_51(t_3)
t_3 = self.l_52(t_3)
t_0 = (t_3 + t_0)
t_0 = self.l_53(t_0)
t_3 = self.l_54(t_0)
t_6 = self.l_55(t_0)
t_9 = self.l_56(t_0)
t_5 = t_3.size()
t_1 = t_6.size()
t_8 = t_9.size()
t_5 = t_5[slice(None, (- 1), None)]
t_5 = (t_5 + (12, 64))
t_2 = t_5[0]
t_4 = t_5[1]
t_7 = t_5[2]
t_5 = t_5[3]
t_5 = t_3.view(t_2, t_4, t_7, t_5)
t_5 = t_5.permute(0, 2, 1, 3)
t_1 = t_1[slice(None, (- 1), None)]
t_1 = (t_1 + (12, 64))
t_7 = t_1[0]
t_4 = t_1[1]
t_2 = t_1[2]
t_1 = t_1[3]
t_1 = t_6.view(t_7, t_4, t_2, t_1)
t_1 = t_1.permute(0, 2, 1, 3)
t_8 = t_8[slice(None, (- 1), None)]
t_8 = (t_8 + (12, 64))
t_2 = t_8[0]
t_4 = t_8[1]
t_7 = t_8[2]
t_8 = t_8[3]
t_8 = t_9.view(t_2, t_4, t_7, t_8)
t_8 = t_8.permute(0, 2, 1, 3)
t_1 = t_1.transpose((- 1), (- 2))
t_1 = torch.matmul(t_5, t_1)
t_5 = math.sqrt(64)
t_5 = (t_1 / t_5)
t_5 = (t_5 + x0)
t_5 = self.l_57(t_5)
t_5 = self.l_58(t_5)
t_8 = torch.matmul(t_5, t_8)
t_8 = t_8.permute(0, 2, 1, 3)
t_8 = t_8.contiguous()
t_5 = t_8.size()
t_5 = t_5[slice(None, (- 2), None)]
t_5 = (t_5 + (768,))
t_1 = t_5[0]
t_7 = t_5[1]
t_5 = t_5[2]
t_5 = t_8.view(t_1, t_7, t_5)
t_5 = self.l_59(t_5)
t_5 = self.l_60(t_5)
t_0 = (t_5 + t_0)
t_0 = self.l_61(t_0)
t_5 = self.l_62(t_0)
t_5 = self.l_63(t_5)
t_5 = self.l_64(t_5)
t_5 = self.l_65(t_5)
t_0 = (t_5 + t_0)
t_0 = self.l_66(t_0)
t_5 = self.l_67(t_0)
t_7 = self.l_68(t_0)
t_1 = self.l_69(t_0)
t_8 = t_5.size()
t_4 = t_7.size()
t_2 = t_1.size()
t_8 = t_8[slice(None, (- 1), None)]
t_8 = (t_8 + (12, 64))
t_9 = t_8[0]
t_6 = t_8[1]
t_3 = t_8[2]
t_8 = t_8[3]
t_8 = t_5.view(t_9, t_6, t_3, t_8)
t_8 = t_8.permute(0, 2, 1, 3)
t_4 = t_4[slice(None, (- 1), None)]
t_4 = (t_4 + (12, 64))
t_3 = t_4[0]
t_6 = t_4[1]
t_9 = t_4[2]
t_4 = t_4[3]
t_4 = t_7.view(t_3, t_6, t_9, t_4)
t_4 = t_4.permute(0, 2, 1, 3)
t_2 = t_2[slice(None, (- 1), None)]
t_2 = (t_2 + (12, 64))
t_9 = t_2[0]
t_6 = t_2[1]
t_3 = t_2[2]
t_2 = t_2[3]
t_2 = t_1.view(t_9, t_6, t_3, t_2)
t_2 = t_2.permute(0, 2, 1, 3)
t_4 = t_4.transpose((- 1), (- 2))
t_4 = torch.matmul(t_8, t_4)
t_8 = math.sqrt(64)
t_8 = (t_4 / t_8)
t_8 = (t_8 + x0)
t_8 = self.l_70(t_8)
t_8 = self.l_71(t_8)
t_2 = torch.matmul(t_8, t_2)
t_2 = t_2.permute(0, 2, 1, 3)
t_2 = t_2.contiguous()
t_8 = t_2.size()
t_8 = t_8[slice(None, (- 2), None)]
t_8 = (t_8 + (768,))
t_4 = t_8[0]
t_3 = t_8[1]
t_8 = t_8[2]
t_8 = t_2.view(t_4, t_3, t_8)
t_8 = self.l_72(t_8)
t_8 = self.l_73(t_8)
t_0 = (t_8 + t_0)
t_0 = self.l_74(t_0)
t_8 = self.l_75(t_0)
t_8 = self.l_76(t_8)
t_8 = self.l_77(t_8)
t_8 = self.l_78(t_8)
t_0 = (t_8 + t_0)
t_0 = self.l_79(t_0)
t_8 = self.l_82(t_0)
t_0 = t_0[(slice(None, None, None), 0)]
t_0 = self.l_80(t_0)
t_0 = self.l_81(t_0)
return (t_8,)
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
def traverse_model(module: nn.Module, depth: int, prefix: Optional[str]=None, basic_blocks: Tuple[Type[nn.Module]]=(), full: bool=False) -> Iterator[Tuple[(nn.Module, str, nn.Module, Optional[bool])]]:
'\n iterate over model layers yielding the layer,layer_scope,encasing_module\n Parameters:\n -----------\n model:\n the model to iterate over\n depth:\n how far down in the model tree to go\n basic_blocks:\n a list of modules that if encountered will not be broken down\n full:\n whether to yield only layers specified by the depth and basic_block options or to yield all layers\n '
if (prefix is None):
prefix = type(module).__name__
for (name, sub_module) in module.named_children():
scope = (((prefix + '/') + type(sub_module).__name__) + f'[{name}]')
if ((len(list(sub_module.children())) == 0) or isinstance(sub_module, tuple(basic_blocks)) or (depth == 0)):
if full:
(yield (sub_module, scope, module, True))
else:
(yield (sub_module, scope, module))
else:
if full:
(yield (sub_module, scope, module, False))
(yield from traverse_model(sub_module, (depth - 1), scope, basic_blocks, full))
|
def layerDict(model: nn.Module, depth=1000, basic_blocks=()) -> Dict[(str, nn.Module)]:
return {s: l for (l, s, _) in traverse_model(model, depth, basic_blocks=basic_blocks)}
|
def traverse_params_buffs(module: nn.Module, prefix: Optional[str]=None) -> Iterator[Tuple[(torch.tensor, str)]]:
"\n iterate over model's buffers and parameters yielding obj,obj_scope\n\n Parameters:\n -----------\n model:\n the model to iterate over\n "
if (prefix is None):
prefix = type(module).__name__
for (param_name, param) in module.named_parameters(recurse=False):
param_scope = f'{prefix}/{type(param).__name__}[{param_name}]'
(yield (param, param_scope))
for (buffer_name, buffer) in module.named_buffers(recurse=False):
buffer_scope = f'{prefix}/{type(buffer).__name__}[{buffer_name}]'
(yield (buffer, buffer_scope))
for (name, sub_module) in module.named_children():
(yield from traverse_params_buffs(sub_module, (((prefix + '/') + type(sub_module).__name__) + f'[{name}]')))
|
def tensorDict(model: nn.Module) -> OrderedDict[(str, Tensor)]:
return collections.OrderedDict(((s, t) for (t, s) in traverse_params_buffs(model)))
|
def move_tensors(ts, device):
def move(t):
if isinstance(t, (nn.Module, Tensor)):
return t.to(device)
return t
return nested_map(move, ts)
|
def nested_map(func, ts, full=False):
if isinstance(ts, torch.Size):
return func(ts)
elif isinstance(ts, (list, tuple, set)):
return type(ts)((nested_map(func, t, full=full) for t in ts))
elif isinstance(ts, dict):
return {k: nested_map(func, v, full=full) for (k, v) in ts.items()}
elif (isinstance(ts, slice) and full):
start = nested_map(func, ts.start, full=full)
stop = nested_map(func, ts.stop, full=full)
step = nested_map(func, ts.step, full=full)
return slice(start, stop, step)
return func(ts)
|
def flatten(ts):
if isinstance(ts, torch.Size):
(yield ts)
elif isinstance(ts, (list, tuple, set)):
(yield from chain(*[flatten(t) for t in ts]))
elif isinstance(ts, dict):
(yield from chain(*[flatten(t) for (k, t) in sorted(ts.items(), key=(lambda t: t[0]))]))
else:
(yield ts)
|
def unflatten(xs, structure):
return _unflatten(xs, structure)[0]
|
def _unflatten(xs, structure):
if isinstance(structure, torch.Size):
return (xs[0], 1)
if (not isinstance(structure, (list, tuple, set, dict))):
return (xs[0], 1)
if isinstance(structure, (list, tuple, set)):
offset = 0
elements = []
for s in structure:
(e, n) = _unflatten(xs[offset:], s)
elements.append(e)
offset += n
return (type(structure)(elements), offset)
assert isinstance(structure, dict)
offset = 0
elements = dict()
for (k, v) in sorted(structure.items(), key=(lambda t: t[0])):
(e, n) = _unflatten(xs[offset:], v)
elements[k] = e
offset += n
return (elements, offset)
|
def state_dict(partition, *args, **kwargs):
state = nn.Module.state_dict(partition, *args, **kwargs)
lookup = partition.lookup
result = dict()
for (k, v) in state.items():
if (k in lookup):
result[lookup[k]] = v
else:
assert ('.' in k)
split_idx = k.find('.')
new_k = (lookup[k[:split_idx]] + k[split_idx:])
result[new_k] = v
return result
|
def load_state_dict(partition, state_dict, strict=True):
reverse_lookup = {v: k for (k, v) in partition.lookup.items()}
device = partition.device
keys = list(partition.state_dict(None).keys())
new_state = dict()
for k in keys:
if (k in reverse_lookup):
new_state[reverse_lookup[k]] = state_dict[k].to(device)
continue
idx = k.rfind('.')
to_replace = k[:idx]
if (to_replace in reverse_lookup):
key = (reverse_lookup[to_replace] + k[idx:])
new_state[key] = state_dict[k].to(device)
nn.Module.load_state_dict(partition, new_state, strict=strict)
|
def named_buffers(partition, prefix='', recurse=True):
params = nn.Module.named_buffers(partition, prefix=prefix, recurse=recurse)
lookup = partition.lookup
for (k, v) in params:
if (k in lookup):
(yield (lookup[k], v))
else:
assert ('.' in k)
split_idx = k.find('.')
new_k = (lookup[k[:split_idx]] + k[split_idx:])
(yield (new_k, v))
|
def named_parameters(partition, prefix='', recurse=True):
params = nn.Module.named_parameters(partition, prefix=prefix, recurse=recurse)
lookup = partition.lookup
for (k, v) in params:
if (k in lookup):
(yield (lookup[k], v))
else:
assert ('.' in k)
split_idx = k.find('.')
new_k = (lookup[k[:split_idx]] + k[split_idx:])
(yield (new_k, v))
|
def cpu(partition):
partition.device = torch.device('cpu')
return nn.Module.cpu(partition)
|
def cuda(partition, device=None):
if (device is None):
device = torch.cuda.current_device()
partition.device = torch.device(device)
return nn.Module.cuda(partition, partition.device)
|
def to(partition, *args, **kwargs):
device = None
if ('device' in kwargs):
device = kwargs['device']
elif ('tensor' in kwargs):
device = kwargs['tensor'].device
if args:
if isinstance(args[0], (torch.device, int, str)):
device = args[0]
if torch.is_tensor(args[0]):
device = args[0].device
if (not (device is None)):
partition.device = torch.device(device)
return nn.Module.to(partition, *args, **kwargs)
|
def create_pipeline_configuration(DEBUG=False):
depth = 10000
basic_blocks = (Tanh, Dropout, BertSelfAttention, LayerNorm, Embedding, Gelu, Linear)
blocks_path = ['torch.nn.modules.activation.Tanh', 'torch.nn.modules.dropout.Dropout', 'models.normal.NLP_models.modeling_bert_old.BertSelfAttention', 'torch.nn.modules.normalization.LayerNorm', 'torch.nn.modules.sparse.Embedding', 'models.normal.NLP_models.modeling_bert_old.Gelu', 'torch.nn.modules.linear.Linear']
module_path = os.path.relpath(__file__).replace('/', '.')[:(- 3)]
stages = {0: {'inputs': {'input_ids': {'shape': torch.Size([16, 384]), 'dtype': torch.int64, 'is_batched': True, 'req_grad': False}, 'attention_mask': {'shape': torch.Size([16, 384]), 'dtype': torch.int64, 'is_batched': True, 'req_grad': False}, 'token_type_ids': {'shape': torch.Size([16, 384]), 'dtype': torch.int64, 'is_batched': True, 'req_grad': False}}, 'outputs': {'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/tuple::__getitem___62': {'shape': torch.Size([16, 1, 1, 384]), 'dtype': torch.float32, 'is_batched': True, 'req_grad': False}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertOutput[output]/Tensor::__add___73': {'shape': torch.Size([16, 384, 1024]), 'dtype': torch.float32, 'is_batched': True, 'req_grad': True}}}, 1: {'inputs': {'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/tuple::__getitem___62': {'shape': torch.Size([16, 1, 1, 384]), 'dtype': torch.float32, 'is_batched': True, 'req_grad': False}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertOutput[output]/Tensor::__add___73': {'shape': torch.Size([16, 384, 1024]), 'dtype': torch.float32, 'is_batched': True, 'req_grad': True}}, 'outputs': {'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/tuple::__getitem___113': {'shape': torch.Size([16, 1, 1, 384]), 'dtype': torch.float32, 'is_batched': True, 'req_grad': False}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([16, 384, 1024]), 'dtype': torch.float32, 'is_batched': True, 'req_grad': True}}}, 2: {'inputs': {'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/tuple::__getitem___113': {'shape': torch.Size([16, 1, 1, 384]), 'dtype': torch.float32, 'is_batched': True, 'req_grad': False}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([16, 384, 1024]), 'dtype': torch.float32, 'is_batched': True, 'req_grad': True}}, 'outputs': {'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/tuple::__getitem___164': {'shape': torch.Size([16, 1, 1, 384]), 'dtype': torch.float32, 'is_batched': True, 'req_grad': False}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([16, 384, 1024]), 'dtype': torch.float32, 'is_batched': True, 'req_grad': True}}}, 3: {'inputs': {'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/tuple::__getitem___164': {'shape': torch.Size([16, 1, 1, 384]), 'dtype': torch.float32, 'is_batched': True, 'req_grad': False}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([16, 384, 1024]), 'dtype': torch.float32, 'is_batched': True, 'req_grad': True}}, 'outputs': {'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/tuple::__getitem___230': {'shape': torch.Size([16, 384, 1024]), 'dtype': torch.float32, 'is_batched': True, 'req_grad': True}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/tuple::__getitem___232': {'shape': torch.Size([16, 1, 1, 384]), 'dtype': torch.float32, 'is_batched': True, 'req_grad': False}}}, 4: {'inputs': {'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/tuple::__getitem___230': {'shape': torch.Size([16, 384, 1024]), 'dtype': torch.float32, 'is_batched': True, 'req_grad': True}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/tuple::__getitem___232': {'shape': torch.Size([16, 1, 1, 384]), 'dtype': torch.float32, 'is_batched': True, 'req_grad': False}}, 'outputs': {'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/tuple::__getitem___266': {'shape': torch.Size([16, 1, 1, 384]), 'dtype': torch.float32, 'is_batched': True, 'req_grad': False}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([16, 384, 1024]), 'dtype': torch.float32, 'is_batched': True, 'req_grad': True}}}, 5: {'inputs': {'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/tuple::__getitem___266': {'shape': torch.Size([16, 1, 1, 384]), 'dtype': torch.float32, 'is_batched': True, 'req_grad': False}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([16, 384, 1024]), 'dtype': torch.float32, 'is_batched': True, 'req_grad': True}}, 'outputs': {'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/tuple::__getitem___317': {'shape': torch.Size([16, 1, 1, 384]), 'dtype': torch.float32, 'is_batched': True, 'req_grad': False}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertOutput[output]/Tensor::__add___328': {'shape': torch.Size([16, 384, 1024]), 'dtype': torch.float32, 'is_batched': True, 'req_grad': True}}}, 6: {'inputs': {'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/tuple::__getitem___317': {'shape': torch.Size([16, 1, 1, 384]), 'dtype': torch.float32, 'is_batched': True, 'req_grad': False}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertOutput[output]/Tensor::__add___328': {'shape': torch.Size([16, 384, 1024]), 'dtype': torch.float32, 'is_batched': True, 'req_grad': True}}, 'outputs': {'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/tuple::__getitem___368': {'shape': torch.Size([16, 1, 1, 384]), 'dtype': torch.float32, 'is_batched': True, 'req_grad': False}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([16, 384, 1024]), 'dtype': torch.float32, 'is_batched': True, 'req_grad': True}}}, 7: {'inputs': {'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/tuple::__getitem___368': {'shape': torch.Size([16, 1, 1, 384]), 'dtype': torch.float32, 'is_batched': True, 'req_grad': False}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([16, 384, 1024]), 'dtype': torch.float32, 'is_batched': True, 'req_grad': True}}, 'outputs': {'BertForQuestionAnswering/Linear[qa_outputs]': {'shape': torch.Size([16, 384, 2]), 'dtype': torch.float32, 'is_batched': True, 'req_grad': True}}}}
stages[0]['stage_cls'] = (module_path + '.Partition0')
device = ('cpu' if DEBUG else 'cuda:0')
stages[0]['devices'] = [device]
stages[1]['stage_cls'] = (module_path + '.Partition1')
device = ('cpu' if DEBUG else 'cuda:1')
stages[1]['devices'] = [device]
stages[2]['stage_cls'] = (module_path + '.Partition2')
device = ('cpu' if DEBUG else 'cuda:2')
stages[2]['devices'] = [device]
stages[3]['stage_cls'] = (module_path + '.Partition3')
device = ('cpu' if DEBUG else 'cuda:3')
stages[3]['devices'] = [device]
stages[4]['stage_cls'] = (module_path + '.Partition4')
device = ('cpu' if DEBUG else 'cuda:4')
stages[4]['devices'] = [device]
stages[5]['stage_cls'] = (module_path + '.Partition5')
device = ('cpu' if DEBUG else 'cuda:5')
stages[5]['devices'] = [device]
stages[6]['stage_cls'] = (module_path + '.Partition6')
device = ('cpu' if DEBUG else 'cuda:6')
stages[6]['devices'] = [device]
stages[7]['stage_cls'] = (module_path + '.Partition7')
device = ('cpu' if DEBUG else 'cuda:7')
stages[7]['devices'] = [device]
config = dict()
config['batch_dim'] = 0
config['depth'] = depth
config['basic_blocks'] = blocks_path
config['model_inputs'] = {'input_ids': {'shape': torch.Size([16, 384]), 'dtype': torch.int64, 'is_batched': True}, 'attention_mask': {'shape': torch.Size([16, 384]), 'dtype': torch.int64, 'is_batched': True}, 'token_type_ids': {'shape': torch.Size([16, 384]), 'dtype': torch.int64, 'is_batched': True}}
config['model_outputs'] = {'BertForQuestionAnswering/Linear[qa_outputs]': {'shape': torch.Size([16, 384, 2]), 'dtype': torch.float32, 'is_batched': True}}
config['stages'] = stages
config['basic_blocks'] = basic_blocks
return config
|
class Partition0(nn.Module):
BASIC_BLOCKS = (LayerNorm, Linear, Embedding, Gelu, BertSelfAttention, Dropout)
LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Embedding[word_embeddings]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Embedding[position_embeddings]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Embedding[token_type_embeddings]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertOutput[output]/Dropout[dropout]']
TENSORS = []
def __init__(self, layers, tensors):
super(Partition0, self).__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device('cuda:0')
self.lookup = {'l_0': 'bert.embeddings.word_embeddings', 'l_1': 'bert.embeddings.position_embeddings', 'l_2': 'bert.embeddings.token_type_embeddings', 'l_3': 'bert.embeddings.LayerNorm', 'l_4': 'bert.embeddings.dropout', 'l_5': 'bert.encoder.0.attention.self', 'l_6': 'bert.encoder.0.attention.output.dense', 'l_7': 'bert.encoder.0.attention.output.dropout', 'l_8': 'bert.encoder.0.attention.output.LayerNorm', 'l_9': 'bert.encoder.0.intermediate.dense', 'l_10': 'bert.encoder.0.intermediate.intermediate_act_fn', 'l_11': 'bert.encoder.0.output.dense', 'l_12': 'bert.encoder.0.output.dropout', 'l_13': 'bert.encoder.0.output.LayerNorm', 'l_14': 'bert.encoder.1.attention.self', 'l_15': 'bert.encoder.1.attention.output.dense', 'l_16': 'bert.encoder.1.attention.output.dropout', 'l_17': 'bert.encoder.1.attention.output.LayerNorm', 'l_18': 'bert.encoder.1.intermediate.dense', 'l_19': 'bert.encoder.1.intermediate.intermediate_act_fn', 'l_20': 'bert.encoder.1.output.dense', 'l_21': 'bert.encoder.1.output.dropout', 'l_22': 'bert.encoder.1.output.LayerNorm', 'l_23': 'bert.encoder.2.attention.self', 'l_24': 'bert.encoder.2.attention.output.dense', 'l_25': 'bert.encoder.2.attention.output.dropout', 'l_26': 'bert.encoder.2.attention.output.LayerNorm', 'l_27': 'bert.encoder.2.intermediate.dense', 'l_28': 'bert.encoder.2.intermediate.intermediate_act_fn', 'l_29': 'bert.encoder.2.output.dense', 'l_30': 'bert.encoder.2.output.dropout'}
def forward(self, input_ids, attention_mask, token_type_ids):
t_0 = attention_mask.unsqueeze(1)
t_0 = t_0.unsqueeze(2)
t_0 = t_0.to(dtype=torch.float32)
t_0 = (1.0 - t_0)
t_0 = (t_0 * (- 10000.0))
t_1 = input_ids.size(1)
t_2 = input_ids.device
t_2 = torch.arange(t_1, dtype=torch.int64, device=t_2)
t_2 = t_2.unsqueeze(0)
t_2 = t_2.expand_as(input_ids)
t_1 = self.l_0(input_ids)
t_2 = self.l_1(t_2)
t_3 = self.l_2(token_type_ids)
t_2 = (t_1 + t_2)
t_3 = (t_2 + t_3)
t_3 = self.l_3(t_3)
t_3 = self.l_4(t_3)
t_2 = self.l_5(t_3, attention_mask=t_0, head_mask=None)
t_2 = self.l_6(t_2)
t_2 = self.l_7(t_2)
t_3 = (t_2 + t_3)
t_3 = self.l_8(t_3)
t_2 = self.l_9(t_3)
t_2 = self.l_10(t_2)
t_2 = self.l_11(t_2)
t_2 = self.l_12(t_2)
t_3 = (t_2 + t_3)
t_3 = self.l_13(t_3)
t_0 = (t_3, t_0)
t_3 = t_0[0]
t_0 = t_0[1]
t_2 = self.l_14(t_3, attention_mask=t_0, head_mask=None)
t_2 = self.l_15(t_2)
t_2 = self.l_16(t_2)
t_3 = (t_2 + t_3)
t_3 = self.l_17(t_3)
t_2 = self.l_18(t_3)
t_2 = self.l_19(t_2)
t_2 = self.l_20(t_2)
t_2 = self.l_21(t_2)
t_3 = (t_2 + t_3)
t_3 = self.l_22(t_3)
t_0 = (t_3, t_0)
t_3 = t_0[0]
t_0 = t_0[1]
t_2 = self.l_23(t_3, attention_mask=t_0, head_mask=None)
t_2 = self.l_24(t_2)
t_2 = self.l_25(t_2)
t_3 = (t_2 + t_3)
t_3 = self.l_26(t_3)
t_2 = self.l_27(t_3)
t_2 = self.l_28(t_2)
t_2 = self.l_29(t_2)
t_2 = self.l_30(t_2)
t_3 = (t_2 + t_3)
return (t_0, t_3)
def state_dict(self, device=None):
return state_dict(self, device=device)
def load_state_dict(self, state):
return load_state_dict(self, state)
def named_parameters(self, recurse=True):
return named_parameters(self, recurse=recurse)
def named_buffers(self, recurse=True):
return named_buffers(self, recurse=recurse)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition1(nn.Module):
BASIC_BLOCKS = (LayerNorm, Linear, Gelu, BertSelfAttention, Dropout)
LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/LayerNorm[LayerNorm]']
TENSORS = []
def __init__(self, layers, tensors):
super(Partition1, self).__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device('cuda:1')
self.lookup = {'l_0': 'bert.encoder.2.output.LayerNorm', 'l_1': 'bert.encoder.3.attention.self', 'l_2': 'bert.encoder.3.attention.output.dense', 'l_3': 'bert.encoder.3.attention.output.dropout', 'l_4': 'bert.encoder.3.attention.output.LayerNorm', 'l_5': 'bert.encoder.3.intermediate.dense', 'l_6': 'bert.encoder.3.intermediate.intermediate_act_fn', 'l_7': 'bert.encoder.3.output.dense', 'l_8': 'bert.encoder.3.output.dropout', 'l_9': 'bert.encoder.3.output.LayerNorm', 'l_10': 'bert.encoder.4.attention.self', 'l_11': 'bert.encoder.4.attention.output.dense', 'l_12': 'bert.encoder.4.attention.output.dropout', 'l_13': 'bert.encoder.4.attention.output.LayerNorm', 'l_14': 'bert.encoder.4.intermediate.dense', 'l_15': 'bert.encoder.4.intermediate.intermediate_act_fn', 'l_16': 'bert.encoder.4.output.dense', 'l_17': 'bert.encoder.4.output.dropout', 'l_18': 'bert.encoder.4.output.LayerNorm', 'l_19': 'bert.encoder.5.attention.self', 'l_20': 'bert.encoder.5.attention.output.dense', 'l_21': 'bert.encoder.5.attention.output.dropout', 'l_22': 'bert.encoder.5.attention.output.LayerNorm', 'l_23': 'bert.encoder.5.intermediate.dense', 'l_24': 'bert.encoder.5.intermediate.intermediate_act_fn', 'l_25': 'bert.encoder.5.output.dense', 'l_26': 'bert.encoder.5.output.dropout', 'l_27': 'bert.encoder.5.output.LayerNorm'}
def forward(self, x0, x1):
t_0 = self.l_0(x1)
t_0 = (t_0, x0)
t_1 = t_0[0]
t_0 = t_0[1]
t_2 = self.l_1(t_1, attention_mask=t_0, head_mask=None)
t_2 = self.l_2(t_2)
t_2 = self.l_3(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_4(t_1)
t_2 = self.l_5(t_1)
t_2 = self.l_6(t_2)
t_2 = self.l_7(t_2)
t_2 = self.l_8(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_9(t_1)
t_0 = (t_1, t_0)
t_1 = t_0[0]
t_0 = t_0[1]
t_2 = self.l_10(t_1, attention_mask=t_0, head_mask=None)
t_2 = self.l_11(t_2)
t_2 = self.l_12(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_13(t_1)
t_2 = self.l_14(t_1)
t_2 = self.l_15(t_2)
t_2 = self.l_16(t_2)
t_2 = self.l_17(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_18(t_1)
t_0 = (t_1, t_0)
t_1 = t_0[0]
t_0 = t_0[1]
t_2 = self.l_19(t_1, attention_mask=t_0, head_mask=None)
t_2 = self.l_20(t_2)
t_2 = self.l_21(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_22(t_1)
t_2 = self.l_23(t_1)
t_2 = self.l_24(t_2)
t_2 = self.l_25(t_2)
t_2 = self.l_26(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_27(t_1)
return (t_0, t_1)
def state_dict(self, device=None):
return state_dict(self, device=device)
def load_state_dict(self, state):
return load_state_dict(self, state)
def named_parameters(self, recurse=True):
return named_parameters(self, recurse=recurse)
def named_buffers(self, recurse=True):
return named_buffers(self, recurse=recurse)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition2(nn.Module):
BASIC_BLOCKS = (LayerNorm, Linear, Gelu, BertSelfAttention, Dropout)
LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/LayerNorm[LayerNorm]']
TENSORS = []
def __init__(self, layers, tensors):
super(Partition2, self).__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device('cuda:2')
self.lookup = {'l_0': 'bert.encoder.6.attention.self', 'l_1': 'bert.encoder.6.attention.output.dense', 'l_2': 'bert.encoder.6.attention.output.dropout', 'l_3': 'bert.encoder.6.attention.output.LayerNorm', 'l_4': 'bert.encoder.6.intermediate.dense', 'l_5': 'bert.encoder.6.intermediate.intermediate_act_fn', 'l_6': 'bert.encoder.6.output.dense', 'l_7': 'bert.encoder.6.output.dropout', 'l_8': 'bert.encoder.6.output.LayerNorm', 'l_9': 'bert.encoder.7.attention.self', 'l_10': 'bert.encoder.7.attention.output.dense', 'l_11': 'bert.encoder.7.attention.output.dropout', 'l_12': 'bert.encoder.7.attention.output.LayerNorm', 'l_13': 'bert.encoder.7.intermediate.dense', 'l_14': 'bert.encoder.7.intermediate.intermediate_act_fn', 'l_15': 'bert.encoder.7.output.dense', 'l_16': 'bert.encoder.7.output.dropout', 'l_17': 'bert.encoder.7.output.LayerNorm', 'l_18': 'bert.encoder.8.attention.self', 'l_19': 'bert.encoder.8.attention.output.dense', 'l_20': 'bert.encoder.8.attention.output.dropout', 'l_21': 'bert.encoder.8.attention.output.LayerNorm', 'l_22': 'bert.encoder.8.intermediate.dense', 'l_23': 'bert.encoder.8.intermediate.intermediate_act_fn', 'l_24': 'bert.encoder.8.output.dense', 'l_25': 'bert.encoder.8.output.dropout', 'l_26': 'bert.encoder.8.output.LayerNorm'}
def forward(self, x0, x1):
t_0 = (x1, x0)
t_1 = t_0[0]
t_0 = t_0[1]
t_2 = self.l_0(t_1, attention_mask=t_0, head_mask=None)
t_2 = self.l_1(t_2)
t_2 = self.l_2(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_3(t_1)
t_2 = self.l_4(t_1)
t_2 = self.l_5(t_2)
t_2 = self.l_6(t_2)
t_2 = self.l_7(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_8(t_1)
t_0 = (t_1, t_0)
t_1 = t_0[0]
t_0 = t_0[1]
t_2 = self.l_9(t_1, attention_mask=t_0, head_mask=None)
t_2 = self.l_10(t_2)
t_2 = self.l_11(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_12(t_1)
t_2 = self.l_13(t_1)
t_2 = self.l_14(t_2)
t_2 = self.l_15(t_2)
t_2 = self.l_16(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_17(t_1)
t_0 = (t_1, t_0)
t_1 = t_0[0]
t_0 = t_0[1]
t_2 = self.l_18(t_1, attention_mask=t_0, head_mask=None)
t_2 = self.l_19(t_2)
t_2 = self.l_20(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_21(t_1)
t_2 = self.l_22(t_1)
t_2 = self.l_23(t_2)
t_2 = self.l_24(t_2)
t_2 = self.l_25(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_26(t_1)
return (t_0, t_1)
def state_dict(self, device=None):
return state_dict(self, device=device)
def load_state_dict(self, state):
return load_state_dict(self, state)
def named_parameters(self, recurse=True):
return named_parameters(self, recurse=recurse)
def named_buffers(self, recurse=True):
return named_buffers(self, recurse=recurse)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition3(nn.Module):
BASIC_BLOCKS = (LayerNorm, Linear, Gelu, BertSelfAttention, Dropout)
LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertOutput[output]/LayerNorm[LayerNorm]']
TENSORS = []
def __init__(self, layers, tensors):
super(Partition3, self).__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device('cuda:3')
self.lookup = {'l_0': 'bert.encoder.9.attention.self', 'l_1': 'bert.encoder.9.attention.output.dense', 'l_2': 'bert.encoder.9.attention.output.dropout', 'l_3': 'bert.encoder.9.attention.output.LayerNorm', 'l_4': 'bert.encoder.9.intermediate.dense', 'l_5': 'bert.encoder.9.intermediate.intermediate_act_fn', 'l_6': 'bert.encoder.9.output.dense', 'l_7': 'bert.encoder.9.output.dropout', 'l_8': 'bert.encoder.9.output.LayerNorm', 'l_9': 'bert.encoder.10.attention.self', 'l_10': 'bert.encoder.10.attention.output.dense', 'l_11': 'bert.encoder.10.attention.output.dropout', 'l_12': 'bert.encoder.10.attention.output.LayerNorm', 'l_13': 'bert.encoder.10.intermediate.dense', 'l_14': 'bert.encoder.10.intermediate.intermediate_act_fn', 'l_15': 'bert.encoder.10.output.dense', 'l_16': 'bert.encoder.10.output.dropout', 'l_17': 'bert.encoder.10.output.LayerNorm', 'l_18': 'bert.encoder.11.attention.self', 'l_19': 'bert.encoder.11.attention.output.dense', 'l_20': 'bert.encoder.11.attention.output.dropout', 'l_21': 'bert.encoder.11.attention.output.LayerNorm', 'l_22': 'bert.encoder.11.intermediate.dense', 'l_23': 'bert.encoder.11.intermediate.intermediate_act_fn', 'l_24': 'bert.encoder.11.output.dense', 'l_25': 'bert.encoder.11.output.dropout', 'l_26': 'bert.encoder.11.output.LayerNorm'}
def forward(self, x0, x1):
t_0 = (x1, x0)
t_1 = t_0[0]
t_0 = t_0[1]
t_2 = self.l_0(t_1, attention_mask=t_0, head_mask=None)
t_2 = self.l_1(t_2)
t_2 = self.l_2(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_3(t_1)
t_2 = self.l_4(t_1)
t_2 = self.l_5(t_2)
t_2 = self.l_6(t_2)
t_2 = self.l_7(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_8(t_1)
t_0 = (t_1, t_0)
t_1 = t_0[0]
t_0 = t_0[1]
t_2 = self.l_9(t_1, attention_mask=t_0, head_mask=None)
t_2 = self.l_10(t_2)
t_2 = self.l_11(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_12(t_1)
t_2 = self.l_13(t_1)
t_2 = self.l_14(t_2)
t_2 = self.l_15(t_2)
t_2 = self.l_16(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_17(t_1)
t_0 = (t_1, t_0)
t_1 = t_0[0]
t_0 = t_0[1]
t_2 = self.l_18(t_1, attention_mask=t_0, head_mask=None)
t_2 = self.l_19(t_2)
t_2 = self.l_20(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_21(t_1)
t_2 = self.l_22(t_1)
t_2 = self.l_23(t_2)
t_2 = self.l_24(t_2)
t_2 = self.l_25(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_26(t_1)
t_0 = (t_1, t_0)
t_1 = t_0[0]
t_0 = t_0[1]
return (t_1, t_0)
def state_dict(self, device=None):
return state_dict(self, device=device)
def load_state_dict(self, state):
return load_state_dict(self, state)
def named_parameters(self, recurse=True):
return named_parameters(self, recurse=recurse)
def named_buffers(self, recurse=True):
return named_buffers(self, recurse=recurse)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition4(nn.Module):
BASIC_BLOCKS = (LayerNorm, Linear, Gelu, BertSelfAttention, Dropout)
LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfAttention[self]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfAttention[self]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfAttention[self]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertOutput[output]/LayerNorm[LayerNorm]']
TENSORS = []
def __init__(self, layers, tensors):
super(Partition4, self).__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device('cuda:4')
self.lookup = {'l_0': 'bert.encoder.12.attention.self', 'l_1': 'bert.encoder.12.attention.output.dense', 'l_2': 'bert.encoder.12.attention.output.dropout', 'l_3': 'bert.encoder.12.attention.output.LayerNorm', 'l_4': 'bert.encoder.12.intermediate.dense', 'l_5': 'bert.encoder.12.intermediate.intermediate_act_fn', 'l_6': 'bert.encoder.12.output.dense', 'l_7': 'bert.encoder.12.output.dropout', 'l_8': 'bert.encoder.12.output.LayerNorm', 'l_9': 'bert.encoder.13.attention.self', 'l_10': 'bert.encoder.13.attention.output.dense', 'l_11': 'bert.encoder.13.attention.output.dropout', 'l_12': 'bert.encoder.13.attention.output.LayerNorm', 'l_13': 'bert.encoder.13.intermediate.dense', 'l_14': 'bert.encoder.13.intermediate.intermediate_act_fn', 'l_15': 'bert.encoder.13.output.dense', 'l_16': 'bert.encoder.13.output.dropout', 'l_17': 'bert.encoder.13.output.LayerNorm', 'l_18': 'bert.encoder.14.attention.self', 'l_19': 'bert.encoder.14.attention.output.dense', 'l_20': 'bert.encoder.14.attention.output.dropout', 'l_21': 'bert.encoder.14.attention.output.LayerNorm', 'l_22': 'bert.encoder.14.intermediate.dense', 'l_23': 'bert.encoder.14.intermediate.intermediate_act_fn', 'l_24': 'bert.encoder.14.output.dense', 'l_25': 'bert.encoder.14.output.dropout', 'l_26': 'bert.encoder.14.output.LayerNorm'}
def forward(self, x0, x1):
t_0 = self.l_0(x0, attention_mask=x1, head_mask=None)
t_0 = self.l_1(t_0)
t_0 = self.l_2(t_0)
t_0 = (t_0 + x0)
t_0 = self.l_3(t_0)
t_1 = self.l_4(t_0)
t_1 = self.l_5(t_1)
t_1 = self.l_6(t_1)
t_1 = self.l_7(t_1)
t_0 = (t_1 + t_0)
t_0 = self.l_8(t_0)
t_0 = (t_0, x1)
t_1 = t_0[0]
t_0 = t_0[1]
t_2 = self.l_9(t_1, attention_mask=t_0, head_mask=None)
t_2 = self.l_10(t_2)
t_2 = self.l_11(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_12(t_1)
t_2 = self.l_13(t_1)
t_2 = self.l_14(t_2)
t_2 = self.l_15(t_2)
t_2 = self.l_16(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_17(t_1)
t_0 = (t_1, t_0)
t_1 = t_0[0]
t_0 = t_0[1]
t_2 = self.l_18(t_1, attention_mask=t_0, head_mask=None)
t_2 = self.l_19(t_2)
t_2 = self.l_20(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_21(t_1)
t_2 = self.l_22(t_1)
t_2 = self.l_23(t_2)
t_2 = self.l_24(t_2)
t_2 = self.l_25(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_26(t_1)
return (t_0, t_1)
def state_dict(self, device=None):
return state_dict(self, device=device)
def load_state_dict(self, state):
return load_state_dict(self, state)
def named_parameters(self, recurse=True):
return named_parameters(self, recurse=recurse)
def named_buffers(self, recurse=True):
return named_buffers(self, recurse=recurse)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition5(nn.Module):
BASIC_BLOCKS = (LayerNorm, Linear, Gelu, BertSelfAttention, Dropout)
LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfAttention[self]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfAttention[self]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfAttention[self]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertOutput[output]/Dropout[dropout]']
TENSORS = []
def __init__(self, layers, tensors):
super(Partition5, self).__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device('cuda:5')
self.lookup = {'l_0': 'bert.encoder.15.attention.self', 'l_1': 'bert.encoder.15.attention.output.dense', 'l_2': 'bert.encoder.15.attention.output.dropout', 'l_3': 'bert.encoder.15.attention.output.LayerNorm', 'l_4': 'bert.encoder.15.intermediate.dense', 'l_5': 'bert.encoder.15.intermediate.intermediate_act_fn', 'l_6': 'bert.encoder.15.output.dense', 'l_7': 'bert.encoder.15.output.dropout', 'l_8': 'bert.encoder.15.output.LayerNorm', 'l_9': 'bert.encoder.16.attention.self', 'l_10': 'bert.encoder.16.attention.output.dense', 'l_11': 'bert.encoder.16.attention.output.dropout', 'l_12': 'bert.encoder.16.attention.output.LayerNorm', 'l_13': 'bert.encoder.16.intermediate.dense', 'l_14': 'bert.encoder.16.intermediate.intermediate_act_fn', 'l_15': 'bert.encoder.16.output.dense', 'l_16': 'bert.encoder.16.output.dropout', 'l_17': 'bert.encoder.16.output.LayerNorm', 'l_18': 'bert.encoder.17.attention.self', 'l_19': 'bert.encoder.17.attention.output.dense', 'l_20': 'bert.encoder.17.attention.output.dropout', 'l_21': 'bert.encoder.17.attention.output.LayerNorm', 'l_22': 'bert.encoder.17.intermediate.dense', 'l_23': 'bert.encoder.17.intermediate.intermediate_act_fn', 'l_24': 'bert.encoder.17.output.dense', 'l_25': 'bert.encoder.17.output.dropout'}
def forward(self, x0, x1):
t_0 = (x1, x0)
t_1 = t_0[0]
t_0 = t_0[1]
t_2 = self.l_0(t_1, attention_mask=t_0, head_mask=None)
t_2 = self.l_1(t_2)
t_2 = self.l_2(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_3(t_1)
t_2 = self.l_4(t_1)
t_2 = self.l_5(t_2)
t_2 = self.l_6(t_2)
t_2 = self.l_7(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_8(t_1)
t_0 = (t_1, t_0)
t_1 = t_0[0]
t_0 = t_0[1]
t_2 = self.l_9(t_1, attention_mask=t_0, head_mask=None)
t_2 = self.l_10(t_2)
t_2 = self.l_11(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_12(t_1)
t_2 = self.l_13(t_1)
t_2 = self.l_14(t_2)
t_2 = self.l_15(t_2)
t_2 = self.l_16(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_17(t_1)
t_0 = (t_1, t_0)
t_1 = t_0[0]
t_0 = t_0[1]
t_2 = self.l_18(t_1, attention_mask=t_0, head_mask=None)
t_2 = self.l_19(t_2)
t_2 = self.l_20(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_21(t_1)
t_2 = self.l_22(t_1)
t_2 = self.l_23(t_2)
t_2 = self.l_24(t_2)
t_2 = self.l_25(t_2)
t_1 = (t_2 + t_1)
return (t_0, t_1)
def state_dict(self, device=None):
return state_dict(self, device=device)
def load_state_dict(self, state):
return load_state_dict(self, state)
def named_parameters(self, recurse=True):
return named_parameters(self, recurse=recurse)
def named_buffers(self, recurse=True):
return named_buffers(self, recurse=recurse)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition6(nn.Module):
BASIC_BLOCKS = (LayerNorm, Linear, Gelu, BertSelfAttention, Dropout)
LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfAttention[self]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfAttention[self]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfAttention[self]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertOutput[output]/LayerNorm[LayerNorm]']
TENSORS = []
def __init__(self, layers, tensors):
super(Partition6, self).__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device('cuda:6')
self.lookup = {'l_0': 'bert.encoder.17.output.LayerNorm', 'l_1': 'bert.encoder.18.attention.self', 'l_2': 'bert.encoder.18.attention.output.dense', 'l_3': 'bert.encoder.18.attention.output.dropout', 'l_4': 'bert.encoder.18.attention.output.LayerNorm', 'l_5': 'bert.encoder.18.intermediate.dense', 'l_6': 'bert.encoder.18.intermediate.intermediate_act_fn', 'l_7': 'bert.encoder.18.output.dense', 'l_8': 'bert.encoder.18.output.dropout', 'l_9': 'bert.encoder.18.output.LayerNorm', 'l_10': 'bert.encoder.19.attention.self', 'l_11': 'bert.encoder.19.attention.output.dense', 'l_12': 'bert.encoder.19.attention.output.dropout', 'l_13': 'bert.encoder.19.attention.output.LayerNorm', 'l_14': 'bert.encoder.19.intermediate.dense', 'l_15': 'bert.encoder.19.intermediate.intermediate_act_fn', 'l_16': 'bert.encoder.19.output.dense', 'l_17': 'bert.encoder.19.output.dropout', 'l_18': 'bert.encoder.19.output.LayerNorm', 'l_19': 'bert.encoder.20.attention.self', 'l_20': 'bert.encoder.20.attention.output.dense', 'l_21': 'bert.encoder.20.attention.output.dropout', 'l_22': 'bert.encoder.20.attention.output.LayerNorm', 'l_23': 'bert.encoder.20.intermediate.dense', 'l_24': 'bert.encoder.20.intermediate.intermediate_act_fn', 'l_25': 'bert.encoder.20.output.dense', 'l_26': 'bert.encoder.20.output.dropout', 'l_27': 'bert.encoder.20.output.LayerNorm'}
def forward(self, x0, x1):
t_0 = self.l_0(x1)
t_0 = (t_0, x0)
t_1 = t_0[0]
t_0 = t_0[1]
t_2 = self.l_1(t_1, attention_mask=t_0, head_mask=None)
t_2 = self.l_2(t_2)
t_2 = self.l_3(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_4(t_1)
t_2 = self.l_5(t_1)
t_2 = self.l_6(t_2)
t_2 = self.l_7(t_2)
t_2 = self.l_8(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_9(t_1)
t_0 = (t_1, t_0)
t_1 = t_0[0]
t_0 = t_0[1]
t_2 = self.l_10(t_1, attention_mask=t_0, head_mask=None)
t_2 = self.l_11(t_2)
t_2 = self.l_12(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_13(t_1)
t_2 = self.l_14(t_1)
t_2 = self.l_15(t_2)
t_2 = self.l_16(t_2)
t_2 = self.l_17(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_18(t_1)
t_0 = (t_1, t_0)
t_1 = t_0[0]
t_0 = t_0[1]
t_2 = self.l_19(t_1, attention_mask=t_0, head_mask=None)
t_2 = self.l_20(t_2)
t_2 = self.l_21(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_22(t_1)
t_2 = self.l_23(t_1)
t_2 = self.l_24(t_2)
t_2 = self.l_25(t_2)
t_2 = self.l_26(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_27(t_1)
return (t_0, t_1)
def state_dict(self, device=None):
return state_dict(self, device=device)
def load_state_dict(self, state):
return load_state_dict(self, state)
def named_parameters(self, recurse=True):
return named_parameters(self, recurse=recurse)
def named_buffers(self, recurse=True):
return named_buffers(self, recurse=recurse)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition7(nn.Module):
BASIC_BLOCKS = (Tanh, LayerNorm, Linear, Gelu, BertSelfAttention, Dropout)
LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfAttention[self]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfAttention[self]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfAttention[self]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertIntermediate[intermediate]/Gelu[intermediate_act_fn]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertPooler[pooler]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertPooler[pooler]/Tanh[activation]', 'BertForQuestionAnswering/Linear[qa_outputs]']
TENSORS = []
def __init__(self, layers, tensors):
super(Partition7, self).__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device('cuda:7')
self.lookup = {'l_0': 'bert.encoder.21.attention.self', 'l_1': 'bert.encoder.21.attention.output.dense', 'l_2': 'bert.encoder.21.attention.output.dropout', 'l_3': 'bert.encoder.21.attention.output.LayerNorm', 'l_4': 'bert.encoder.21.intermediate.dense', 'l_5': 'bert.encoder.21.intermediate.intermediate_act_fn', 'l_6': 'bert.encoder.21.output.dense', 'l_7': 'bert.encoder.21.output.dropout', 'l_8': 'bert.encoder.21.output.LayerNorm', 'l_9': 'bert.encoder.22.attention.self', 'l_10': 'bert.encoder.22.attention.output.dense', 'l_11': 'bert.encoder.22.attention.output.dropout', 'l_12': 'bert.encoder.22.attention.output.LayerNorm', 'l_13': 'bert.encoder.22.intermediate.dense', 'l_14': 'bert.encoder.22.intermediate.intermediate_act_fn', 'l_15': 'bert.encoder.22.output.dense', 'l_16': 'bert.encoder.22.output.dropout', 'l_17': 'bert.encoder.22.output.LayerNorm', 'l_18': 'bert.encoder.23.attention.self', 'l_19': 'bert.encoder.23.attention.output.dense', 'l_20': 'bert.encoder.23.attention.output.dropout', 'l_21': 'bert.encoder.23.attention.output.LayerNorm', 'l_22': 'bert.encoder.23.intermediate.dense', 'l_23': 'bert.encoder.23.intermediate.intermediate_act_fn', 'l_24': 'bert.encoder.23.output.dense', 'l_25': 'bert.encoder.23.output.dropout', 'l_26': 'bert.encoder.23.output.LayerNorm', 'l_27': 'bert.pooler.dense', 'l_28': 'bert.pooler.activation', 'l_29': 'qa_outputs'}
def forward(self, x0, x1):
t_0 = (x1, x0)
t_1 = t_0[0]
t_0 = t_0[1]
t_2 = self.l_0(t_1, attention_mask=t_0, head_mask=None)
t_2 = self.l_1(t_2)
t_2 = self.l_2(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_3(t_1)
t_2 = self.l_4(t_1)
t_2 = self.l_5(t_2)
t_2 = self.l_6(t_2)
t_2 = self.l_7(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_8(t_1)
t_0 = (t_1, t_0)
t_1 = t_0[0]
t_0 = t_0[1]
t_2 = self.l_9(t_1, attention_mask=t_0, head_mask=None)
t_2 = self.l_10(t_2)
t_2 = self.l_11(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_12(t_1)
t_2 = self.l_13(t_1)
t_2 = self.l_14(t_2)
t_2 = self.l_15(t_2)
t_2 = self.l_16(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_17(t_1)
t_0 = (t_1, t_0)
t_1 = t_0[0]
t_0 = t_0[1]
t_2 = self.l_18(t_1, attention_mask=t_0, head_mask=None)
t_2 = self.l_19(t_2)
t_2 = self.l_20(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_21(t_1)
t_2 = self.l_22(t_1)
t_2 = self.l_23(t_2)
t_2 = self.l_24(t_2)
t_2 = self.l_25(t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_26(t_1)
t_0 = (t_1, t_0)
t_1 = t_0[0]
t_0 = t_0[1]
t_2 = slice(None, None, None)
t_2 = (t_2, 0)
t_2 = t_1[t_2]
t_2 = self.l_27(t_2)
t_2 = self.l_28(t_2)
t_2 = (t_1, t_2)
t_2 = t_2[0]
t_2 = self.l_29(t_2)
return (t_2,)
def state_dict(self, device=None):
return state_dict(self, device=device)
def load_state_dict(self, state):
return load_state_dict(self, state)
def named_parameters(self, recurse=True):
return named_parameters(self, recurse=recurse)
def named_buffers(self, recurse=True):
return named_buffers(self, recurse=recurse)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
def traverse_model(module: nn.Module, depth: int, prefix: Optional[str]=None, basic_blocks: Tuple[nn.Module]=(), full: bool=False) -> Iterator[Tuple[(nn.Module, str, nn.Module)]]:
'\n iterate over model layers yielding the layer,layer_scope,encasing_module\n Parameters:\n -----------\n model:\n the model to iterate over\n depth:\n how far down in the model tree to go\n basic_blocks:\n a list of modules that if encountered will not be broken down\n full:\n whether to yield only layers specified by the depth and basick_block options or to yield all layers\n '
if (prefix is None):
prefix = type(module).__name__
for (name, sub_module) in module.named_children():
scope = (((prefix + '/') + type(sub_module).__name__) + f'[{name}]')
if ((len(list(sub_module.children())) == 0) or isinstance(sub_module, tuple(basic_blocks)) or (depth == 0)):
if full:
(yield (sub_module, scope, module, True))
else:
(yield (sub_module, scope, module))
else:
if full:
(yield (sub_module, scope, module, False))
(yield from traverse_model(sub_module, (depth - 1), scope, basic_blocks, full))
|
def layerDict(model: nn.Module, depth=1000, basic_blocks=()) -> Dict[(str, nn.Module)]:
return {s: l for (l, s, _) in traverse_model(model, depth, basic_blocks=basic_blocks)}
|
def traverse_params_buffs(module: nn.Module, prefix: Optional[str]=None) -> Iterator[Tuple[(torch.tensor, str)]]:
"\n iterate over model's buffers and parameters yielding obj,obj_scope\n\n Parameters:\n -----------\n model:\n the model to iterate over\n "
if (prefix is None):
prefix = type(module).__name__
for (param_name, param) in module.named_parameters(recurse=False):
param_scope = f'{prefix}/{type(param).__name__}[{param_name}]'
(yield (param, param_scope))
for (buffer_name, buffer) in module.named_buffers(recurse=False):
buffer_scope = f'{prefix}/{type(buffer).__name__}[{buffer_name}]'
(yield (buffer, buffer_scope))
for (name, sub_module) in module.named_children():
(yield from traverse_params_buffs(sub_module, (((prefix + '/') + type(sub_module).__name__) + f'[{name}]')))
|
def tensorDict(model: nn.Module) -> OrderedDict[(str, Tensor)]:
return collections.OrderedDict(((s, t) for (t, s) in traverse_params_buffs(model)))
|
def move_tensors(ts, device):
def move(t):
if isinstance(t, (nn.Module, Tensor)):
return t.to(device)
return t
return nested_map(move, ts)
|
def nested_map(func, ts):
if isinstance(ts, torch.Size):
return func(ts)
elif isinstance(ts, (list, tuple, set)):
return type(ts)((nested_map(func, t) for t in ts))
elif isinstance(ts, dict):
return {k: nested_map(func, v) for (k, v) in ts.items()}
elif isinstance(ts, slice):
start = nested_map(func, ts.start)
stop = nested_map(func, ts.stop)
step = nested_map(func, ts.step)
return slice(start, stop, step)
return func(ts)
|
def state_dict(partition, device=None):
state = nn.Module.state_dict(partition)
lookup = partition.lookup
result = dict()
for (k, v) in state.items():
if (k in lookup):
result[lookup[k]] = (v if (device is None) else v.to(device))
else:
assert ('.' in k)
split_idx = k.find('.')
new_k = (lookup[k[:split_idx]] + k[split_idx:])
result[new_k] = (v if (device is None) else v.to(device))
return result
|
def load_state_dict(partition, state):
reverse_lookup = {v: k for (k, v) in partition.lookup.items()}
device = partition.device
keys = list(partition.state_dict(None).keys())
new_state = dict()
for k in keys:
if (k in reverse_lookup):
new_state[reverse_lookup[k]] = state[k].to(device)
continue
idx = k.rfind('.')
to_replace = k[:idx]
if (to_replace in reverse_lookup):
key = (reverse_lookup[to_replace] + k[idx:])
new_state[key] = state[k].to(device)
nn.Module.load_state_dict(partition, new_state, strict=True)
|
def named_buffers(partition, recurse=True):
params = nn.Module.named_buffers(partition, recurse=recurse)
lookup = partition.lookup
for (k, v) in params:
if (k in lookup):
(yield (lookup[k], v))
else:
assert ('.' in k)
split_idx = k.find('.')
new_k = (lookup[k[:split_idx]] + k[split_idx:])
(yield (new_k, v))
|
def named_parameters(partition, recurse=True):
params = nn.Module.named_parameters(partition, recurse=recurse)
lookup = partition.lookup
for (k, v) in params:
if (k in lookup):
(yield (lookup[k], v))
else:
assert ('.' in k)
split_idx = k.find('.')
new_k = (lookup[k[:split_idx]] + k[split_idx:])
(yield (new_k, v))
|
def cpu(partition):
partition.device = torch.device('cpu')
return nn.Module.cpu(partition)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.