repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
RBNN | RBNN-master/imagenet/models_cifar/resnet2.py | '''ResNet in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from modules import *
__all__ =['resnet18A_1w1a','resnet18B_1w1a','resnet18C_1w1a','resnet18_1w1a']
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = BinarizeConv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = BinarizeConv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
BinarizeConv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.hardtanh(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.hardtanh(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = BinarizeConv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = BinarizeConv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = BinarizeConv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
BinarizeConv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_channel, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = num_channel[0]
self.conv1 = nn.Conv2d(3, num_channel[0], kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(num_channel[0])
self.layer1 = self._make_layer(block, num_channel[0], num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, num_channel[1], num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, num_channel[2], num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, num_channel[3], num_blocks[3], stride=2)
self.linear = nn.Linear(num_channel[3]*block.expansion, num_classes)
self.bn2 = nn.BatchNorm1d(num_channel[3]*block.expansion)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = self.bn1(self.conv1(x))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.bn2(out)
out = self.linear(out)
return out
def resnet18A_1w1a(**kwargs):
return ResNet(BasicBlock, [2,2,2,2],[32,32,64,128],**kwargs)
def resnet18B_1w1a(**kwargs):
return ResNet(BasicBlock, [2,2,2,2],[32,64,128,256],**kwargs)
def resnet18C_1w1a(**kwargs):
return ResNet(BasicBlock, [2,2,2,2],[64,64,128,256],**kwargs)
def resnet18_1w1a(**kwargs):
return ResNet(BasicBlock, [2,2,2,2],[64,128,256,512],**kwargs)
def ResNet34():
return ResNet(BasicBlock, [3,4,6,3])
def ResNet50():
return ResNet(Bottleneck, [3,4,6,3])
def ResNet101():
return ResNet(Bottleneck, [3,4,23,3])
def ResNet152():
return ResNet(Bottleneck, [3,8,36,3])
def test():
net = resnet18_1w1a()
y = net(torch.randn(1,3,32,32))
print(y.size())
# test()
| 4,704 | 33.343066 | 107 | py |
RBNN | RBNN-master/imagenet/models_cifar/resnet.py | '''
Properly implemented ResNet-s for CIFAR10 as described in paper [1].
The implementation and structure of this file is hugely influenced by [2]
which is implemented for ImageNet and doesn't have option A for identity.
Moreover, most of the implementations on the web is copy-paste from
torchvision's resnet and has wrong number of params.
Proper ResNet-s for CIFAR10 (for fair comparision and etc.) has following
number of layers and parameters:
name | layers | params
ResNet20 | 20 | 0.27M
ResNet32 | 32 | 0.46M
ResNet44 | 44 | 0.66M
ResNet56 | 56 | 0.85M
ResNet110 | 110 | 1.7M
ResNet1202| 1202 | 19.4m
which this implementation indeed has.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
[2] https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
If you use this implementation in you work, please don't forget to mention the
author, Yerlan Idelbayev.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from modules import *
from torch.autograd import Variable
__all__ = ['resnet20_1w1a']
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class BasicBlock_1w1a(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock_1w1a, self).__init__()
self.conv1 = BinarizeConv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = BinarizeConv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
BinarizeConv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = self.bn1(self.conv1(x))
out = F.hardtanh(out)
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.hardtanh(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.bn2 = nn.BatchNorm1d(64)
self.linear = nn.Linear(64, num_classes)
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1e-8)
m.bias.data.zero_()
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.hardtanh(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.bn2(out)
out = self.linear(out)
return out
def resnet20_1w1a(**kwargs):
return ResNet(BasicBlock_1w1a, [3, 3, 3],**kwargs)
def resnet20():
return ResNet(BasicBlock, [3, 3, 3])
def resnet32():
return ResNet(BasicBlock, [5, 5, 5])
def resnet44():
return ResNet(BasicBlock, [7, 7, 7])
def resnet56():
return ResNet(BasicBlock, [9, 9, 9])
def resnet110():
return ResNet(BasicBlock, [18, 18, 18])
def resnet1202():
return ResNet(BasicBlock, [200, 200, 200])
def test(net):
import numpy as np
total_params = 0
for x in filter(lambda p: p.requires_grad, net.parameters()):
total_params += np.prod(x.data.numpy().shape)
print("Total number of params", total_params)
print("Total layers", len(list(filter(lambda p: p.requires_grad and len(p.data.size())>1, net.parameters()))))
if __name__ == "__main__":
for net_name in __all__:
if net_name.startswith('resnet'):
print(net_name)
test(globals()[net_name]())
print()
| 6,508 | 31.708543 | 120 | py |
RBNN | RBNN-master/imagenet/models_cifar/vgg.py | '''VGG for CIFAR10. FC layers are removed.
(c) YANG, Wei
'''
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import math
from modules import *
__all__ = ['vgg_small_1w1a']
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
}
class VGG(nn.Module):
def __init__(self, features, num_classes=1000):
super(VGG, self).__init__()
self.features = features
self.bn2 = nn.BatchNorm1d(512 * block.expansion)
self.nonlinear2 = nn.Hardtanh(inplace=True)
self.classifier = nn.Linear(512, num_classes)
self.bn3 = nn.BatchNorm1d(num_classes)
self.logsoftmax = nn.LogSoftmax(dim=1)
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.bn2(x)
x = self.nonlinear2(x)
x = self.classifier(x)
x = self.bn3(x)
x = self.logsoftmax(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.Hardtanh(inplace=True)]
else:
layers += [conv2d, nn.Hardtanh(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
'F': [128, 128, 'M', 512, 512, 'M'],
}
def vgg11(**kwargs):
"""VGG 11-layer model (configuration "A")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['A']), **kwargs)
return model
def vgg11_bn(**kwargs):
"""VGG 11-layer model (configuration "A") with batch normalization"""
model = VGG(make_layers(cfg['A'], batch_norm=True), **kwargs)
return model
def vgg13(**kwargs):
"""VGG 13-layer model (configuration "B")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['B']), **kwargs)
return model
def vgg13_bn(**kwargs):
"""VGG 13-layer model (configuration "B") with batch normalization"""
model = VGG(make_layers(cfg['B'], batch_norm=True), **kwargs)
return model
def vgg16(**kwargs):
"""VGG 16-layer model (configuration "D")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['D']), **kwargs)
return model
def vgg16_bn(**kwargs):
"""VGG 16-layer model (configuration "D") with batch normalization"""
model = VGG(make_layers(cfg['D'], batch_norm=True), **kwargs)
return model
def vgg19(**kwargs):
"""VGG 19-layer model (configuration "E")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['E']), **kwargs)
return model
def vgg19_bn(**kwargs):
"""VGG 19-layer model (configuration 'E') with batch normalization"""
model = VGG(make_layers(cfg['E'], batch_norm=True), **kwargs)
return model
class VGG_SMALL_1W1A(nn.Module):
def __init__(self, num_classes=10):
super(VGG_SMALL_1W1A, self).__init__()
self.conv0 = nn.Conv2d(3, 128, kernel_size=3, padding=1, bias=False)
self.bn0 = nn.BatchNorm2d(128)
self.conv1 = BinarizeConv2d(128, 128, kernel_size=3, padding=1, bias=False)
self.pooling = nn.MaxPool2d(kernel_size=2, stride=2)
self.bn1 = nn.BatchNorm2d(128)
self.nonlinear = nn.Hardtanh(inplace=True)
self.conv2 = BinarizeConv2d(128, 256, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(256)
self.conv3 = BinarizeConv2d(256, 256, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(256)
self.conv4 = BinarizeConv2d(256, 512, kernel_size=3, padding=1, bias=False)
self.bn4 = nn.BatchNorm2d(512)
self.conv5 = BinarizeConv2d(512, 512, kernel_size=3, padding=1, bias=False)
self.bn5 = nn.BatchNorm2d(512)
self.fc = nn.Linear(512*4*4, num_classes)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, BinarizeConv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def forward(self, x):
x = self.conv0(x)
x = self.bn0(x)
x = self.nonlinear(x)
x = self.conv1(x)
x = self.pooling(x)
x = self.bn1(x)
x = self.nonlinear(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.nonlinear(x)
x = self.conv3(x)
x = self.pooling(x)
x = self.bn3(x)
x = self.nonlinear(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.nonlinear(x)
x = self.conv5(x)
x = self.pooling(x)
x = self.bn5(x)
x = self.nonlinear(x)
# x = self.pooling(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def vgg_small_1w1a(**kwargs):
model = VGG_SMALL_1W1A(**kwargs)
return model
| 7,083 | 30.766816 | 113 | py |
RBNN | RBNN-master/imagenet/models_cifar/resnet_bireal.py | '''
Properly implemented ResNet-s for CIFAR10 as described in paper [1].
The implementation and structure of this file is hugely influenced by [2]
which is implemented for ImageNet and doesn't have option A for identity.
Moreover, most of the implementations on the web is copy-paste from
torchvision's resnet and has wrong number of params.
Proper ResNet-s for CIFAR10 (for fair comparision and etc.) has following
number of layers and parameters:
name | layers | params
ResNet20 | 20 | 0.27M
ResNet32 | 32 | 0.46M
ResNet44 | 44 | 0.66M
ResNet56 | 56 | 0.85M
ResNet110 | 110 | 1.7M
ResNet1202| 1202 | 19.4m
which this implementation indeed has.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
[2] https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
If you use this implementation in you work, please don't forget to mention the
author, Yerlan Idelbayev.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from modules import *
from torch.autograd import Variable
__all__ = ['resnet20_bireal_1w1a']
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class BasicBlock_1w1a(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock_1w1a, self).__init__()
self.conv1 = BinarizeConv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = BinarizeConv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
BinarizeConv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = self.bn1(self.conv1(x))
out += self.shortcut(x)
out = F.hardtanh(out)
x1 = out
out = self.bn2(self.conv2(out))
out += x1
out = F.hardtanh(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.bn2 = nn.BatchNorm1d(64)
self.linear = nn.Linear(64, num_classes)
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1e-8)
m.bias.data.zero_()
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.hardtanh(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.bn2(out)
out = self.linear(out)
return out
def resnet20_bireal_1w1a(**kwargs):
return ResNet(BasicBlock_1w1a, [3, 3, 3],**kwargs)
def resnet20():
return ResNet(BasicBlock, [3, 3, 3])
def resnet32():
return ResNet(BasicBlock, [5, 5, 5])
def resnet44():
return ResNet(BasicBlock, [7, 7, 7])
def resnet56():
return ResNet(BasicBlock, [9, 9, 9])
def resnet110():
return ResNet(BasicBlock, [18, 18, 18])
def resnet1202():
return ResNet(BasicBlock, [200, 200, 200])
def test(net):
import numpy as np
total_params = 0
for x in filter(lambda p: p.requires_grad, net.parameters()):
total_params += np.prod(x.data.numpy().shape)
print("Total number of params", total_params)
print("Total layers", len(list(filter(lambda p: p.requires_grad and len(p.data.size())>1, net.parameters()))))
if __name__ == "__main__":
for net_name in __all__:
if net_name.startswith('resnet'):
print(net_name)
test(globals()[net_name]())
print()
| 6,557 | 31.626866 | 120 | py |
RBNN | RBNN-master/imagenet/utils/common.py | import os
import torch
import logging.config
import shutil
import torch.nn as nn
import numpy
import datetime
def setup_logging(log_file='log.txt',filemode='w'):
"""Setup logging configuration
"""
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s - %(levelname)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
filename=log_file,
filemode=filemode)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
def save_checkpoint(state, is_best, path='.', filename='checkpoint.pth.tar', save_all=False):
filename = os.path.join(path, filename)
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, os.path.join(path, 'model_best.pth.tar'))
if save_all:
shutil.copyfile(filename, os.path.join(
path, 'checkpoint_epoch_%s.pth.tar' % state['epoch']))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.float().topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def get_time(delta,epoch,epochs):
now = datetime.datetime.now()
clip = 0 if delta>=datetime.timedelta(hours=1) else 1
cost_time = ':'.join(str(delta).split(':')[clip:]).split('.')[0]
delta *= epochs-epoch-1
finish = now + delta
finish_time=finish.strftime('%Y-%m-%d %H:%M:%S')
return cost_time,finish_time
| 2,252 | 27.884615 | 93 | py |
RBNN | RBNN-master/imagenet/utils/options.py | import argparse
import os
"""
args
"""
parser = argparse.ArgumentParser(description='RotationNet')
# Logging
parser.add_argument(
'--results_dir',
metavar='RESULTS_DIR',
default='./results',
help='results dir')
parser.add_argument(
'--save',
metavar='SAVE',
default='',
help='saved folder (named by datetime)')
parser.add_argument(
'--resume',
dest='resume',
action='store_true',
help='resume to latest checkpoint')
parser.add_argument(
'-e',
'--evaluate',
type=str,
metavar='FILE',
help='evaluate model FILE on validation set')
parser.add_argument(
'--seed',
default=1234,
type=int,
help='random seed')
# Model
parser.add_argument(
'--model',
'-a',
metavar='MODEL',
default='resnet18_1w1a',
help='model architecture ')
parser.add_argument(
'--dataset',
default='imagenet',
type=str,
help='dataset, default:imagenet')
parser.add_argument(
'--data_path',
type=str,
default='/home/data',
help='The dictionary where the dataset is stored.')
parser.add_argument(
'--type',
default='torch.cuda.FloatTensor',
help='type of tensor - e.g torch.cuda.FloatTensor')
# Training
parser.add_argument(
'--gpus',
default='0',
help='gpus used for training - e.g 0,1,3')
parser.add_argument(
'--lr',
default=0.1,
type=float,
help='learning rate')
parser.add_argument(
'--weight_decay',
type=float,
default=1e-4,
help='Weight decay of loss. default:1e-4')
parser.add_argument(
'--momentum',
default=0.9,
type=float,
metavar='M',
help='momentum')
parser.add_argument(
'--workers',
default=8,
type=int,
metavar='N',
help='number of data loading workers (default: 8)')
parser.add_argument(
'--epochs',
default=150,
type=int,
metavar='N',
help='number of total epochs to run')
parser.add_argument(
'--start_epoch',
default=-1,
type=int,
metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument(
'-b',
'--batch_size',
default=512,
type=int,
metavar='N',
help='mini-batch size for training (default: 256)')
parser.add_argument(
'-bt',
'--batch_size_test',
default=256,
type=int,
help='mini-batch size for testing (default: 128)')
parser.add_argument(
'--print_freq',
'-p',
default=500,
type=int,
metavar='N',
help='print frequency (default: 500)')
parser.add_argument(
'--time_estimate',
default=1,
type=int,
metavar='N',
help='print estimating finish time,set to 0 to disable')
parser.add_argument(
'--rotation_update',
default=1,
type=int,
metavar='N',
help='interval of updating rotation matrix (default:1)')
parser.add_argument(
'--Tmin',
default=1e-2,
type=float,
metavar='M',
help='minimum of T (default:1e-2)')
parser.add_argument(
'--Tmax',
default=1e1,
type=float,
metavar='M',
help='maximum of T (default:1e1)')
parser.add_argument(
'--lr_type',
type=str,
default='cos',
help='choose lr_scheduler,(default:cos)')
parser.add_argument(
'--lr_decay_step',
nargs='+',
type=int,
help='lr decay step for MultiStepLR')
parser.add_argument(
'--a32',
dest='a32',
action='store_true',
help='w1a32')
parser.add_argument(
'--warm_up',
dest='warm_up',
action='store_true',
help='use warm up or not')
parser.add_argument(
'--use_dali',
dest='use_dali',
action='store_true',
help='use DALI to load dataset or not')
args = parser.parse_args() | 3,679 | 18.067358 | 60 | py |
RBNN | RBNN-master/cifar/main.py | import argparse
import os
import time
import logging
import random
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import models_cifar
import models_imagenet
import numpy as np
from torch.autograd import Variable
from utils.options import args
from utils.common import *
from modules import *
from datetime import datetime
import dataset
def main():
global args, best_prec1, conv_modules
best_prec1 = 0
random.seed(args.seed)
if args.evaluate:
args.results_dir = '/tmp'
save_path = os.path.join(args.results_dir, args.save)
if not os.path.exists(save_path):
os.makedirs(save_path)
if not args.resume:
with open(os.path.join(save_path,'config.txt'), 'w') as args_file:
args_file.write(str(datetime.now())+'\n\n')
for args_n,args_v in args.__dict__.items():
args_v = '' if not args_v and not isinstance(args_v,int) else args_v
args_file.write(str(args_n)+': '+str(args_v)+'\n')
setup_logging(os.path.join(save_path, 'logger.log'))
logging.info("saving to %s", save_path)
logging.debug("run arguments: %s", args)
else:
setup_logging(os.path.join(save_path, 'logger.log'), filemode='a')
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
if 'cuda' in args.type:
args.gpus = [int(i) for i in args.gpus.split(',')]
cudnn.benchmark = True
else:
args.gpus = None
if args.dataset=='tinyimagenet':
num_classes=200
model_zoo = 'models_imagenet.'
elif args.dataset=='imagenet':
num_classes=1000
model_zoo = 'models_imagenet.'
elif args.dataset=='cifar10':
num_classes=10
model_zoo = 'models_cifar.'
elif args.dataset=='cifar100':
num_classes=100
model_zoo = 'models_cifar.'
if len(args.gpus)==1:
model = eval(model_zoo+args.model)(num_classes=num_classes).cuda()
else:
model = nn.DataParallel(eval(model_zoo+args.model)(num_classes=num_classes))
if not args.resume:
logging.info("creating model %s", args.model)
logging.info("model structure: %s", model)
num_parameters = sum([l.nelement() for l in model.parameters()])
logging.info("number of parameters: %d", num_parameters)
# evaluate
if args.evaluate:
if not os.path.isfile(args.evaluate):
logging.error('invalid checkpoint: {}'.format(args.evaluate))
else:
checkpoint = torch.load(args.evaluate)
if len(args.gpus)>1:
checkpoint['state_dict'] = dataset.add_module_fromdict(checkpoint['state_dict'])
model.load_state_dict(checkpoint['state_dict'])
logging.info("loaded checkpoint '%s' (epoch %s)",
args.evaluate, checkpoint['epoch'])
elif args.resume:
checkpoint_file = os.path.join(save_path,'checkpoint.pth.tar')
if os.path.isdir(checkpoint_file):
checkpoint_file = os.path.join(
checkpoint_file, 'model_best.pth.tar')
if os.path.isfile(checkpoint_file):
checkpoint = torch.load(checkpoint_file)
if len(args.gpus)>1:
checkpoint['state_dict'] = dataset.add_module_fromdict(checkpoint['state_dict'])
args.start_epoch = checkpoint['epoch'] - 1
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
logging.info("loaded checkpoint '%s' (epoch %s)",
checkpoint_file, checkpoint['epoch'])
else:
logging.error("no checkpoint found at '%s'", args.resume)
criterion = nn.CrossEntropyLoss().cuda()
criterion = criterion.type(args.type)
model = model.type(args.type)
if args.evaluate:
val_loader = dataset.load_data(
type='val',
dataset=args.dataset,
data_path=args.data_path,
batch_size=args.batch_size,
batch_size_test=args.batch_size_test,
num_workers=args.workers)
with torch.no_grad():
val_loss, val_prec1, val_prec5 = validate(val_loader, model, criterion, 0)
logging.info('\n Validation Loss {val_loss:.4f} \t'
'Validation Prec@1 {val_prec1:.3f} \t'
'Validation Prec@5 {val_prec5:.3f} \n'
.format(val_loss=val_loss, val_prec1=val_prec1, val_prec5=val_prec5))
return
if args.dataset=='imagenet':
train_loader = dataset.get_imagenet(
type='train',
image_dir=args.data_path,
batch_size=args.batch_size,
num_threads=args.workers,
crop=224,
device_id='cuda:0',
num_gpus=1)
val_loader = dataset.get_imagenet(
type='val',
image_dir=args.data_path,
batch_size=args.batch_size_test,
num_threads=args.workers,
crop=224,
device_id='cuda:0',
num_gpus=1)
else:
train_loader, val_loader = dataset.load_data(
dataset=args.dataset,
data_path=args.data_path,
batch_size=args.batch_size,
batch_size_test=args.batch_size_test,
num_workers=args.workers)
optimizer = torch.optim.SGD([{'params':model.parameters(),'initial_lr':args.lr}], args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
def cosin(i,T,emin=0,emax=0.01):
"customized cos-lr"
return emin+(emax-emin)/2 * (1+np.cos(i*np.pi/T))
if args.resume:
for param_group in optimizer.param_groups:
param_group['lr'] = cosin(args.start_epoch-args.warm_up*4, args.epochs-args.warm_up*4,0, args.lr)
if args.lr_type == 'cos':
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epochs-args.warm_up*4, eta_min = 0, last_epoch=args.start_epoch-args.warm_up*4)
elif args.lr_type == 'step':
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, args.lr_decay_step, gamma=0.1, last_epoch=-1)
if not args.resume:
logging.info("criterion: %s", criterion)
logging.info('scheduler: %s', lr_scheduler)
def cpt_tk(epoch):
"compute t&k in back-propagation"
T_min, T_max = torch.tensor(args.Tmin).float(), torch.tensor(args.Tmax).float()
Tmin, Tmax = torch.log10(T_min), torch.log10(T_max)
t = torch.tensor([torch.pow(torch.tensor(10.), Tmin + (Tmax - Tmin) / args.epochs * epoch)]).float()
k = max(1/t,torch.tensor(1.)).float()
return t, k
#* setup conv_modules.epoch
conv_modules=[]
for name,module in model.named_modules():
if isinstance(module,nn.Conv2d):
conv_modules.append(module)
for epoch in range(args.start_epoch+1, args.epochs):
time_start = datetime.now()
#*warm up
if args.warm_up and epoch <5:
for param_group in optimizer.param_groups:
param_group['lr'] = args.lr * (epoch+1) / 5
for param_group in optimizer.param_groups:
logging.info('lr: %s', param_group['lr'])
#* compute t/k in back-propagation
t,k = cpt_tk(epoch)
for name,module in model.named_modules():
if isinstance(module,nn.Conv2d):
module.k = k.cuda()
module.t = t.cuda()
for module in conv_modules:
module.epoch = epoch
# train
train_loss, train_prec1, train_prec5 = train(
train_loader, model, criterion, epoch, optimizer)
#* adjust Lr
if epoch >= 4 * args.warm_up:
lr_scheduler.step()
# evaluate
with torch.no_grad():
for module in conv_modules:
module.epoch = -1
val_loss, val_prec1, val_prec5 = validate(
val_loader, model, criterion, epoch)
# remember best prec
is_best = val_prec1 > best_prec1
if is_best:
best_prec1 = max(val_prec1, best_prec1)
best_epoch = epoch
best_loss = val_loss
# save model
if epoch % 1 == 0:
model_state_dict = model.module.state_dict() if len(args.gpus) > 1 else model.state_dict()
model_parameters = model.module.parameters() if len(args.gpus) > 1 else model.parameters()
save_checkpoint({
'epoch': epoch + 1,
'model': args.model,
'state_dict': model_state_dict,
'best_prec1': best_prec1,
'parameters': list(model_parameters),
}, is_best, path=save_path)
if args.time_estimate > 0 and epoch % args.time_estimate==0:
time_end = datetime.now()
cost_time,finish_time = get_time(time_end-time_start,epoch,args.epochs)
logging.info('Time cost: '+cost_time+'\t'
'Time of Finish: '+finish_time)
logging.info('\n Epoch: {0}\t'
'Training Loss {train_loss:.4f} \t'
'Training Prec@1 {train_prec1:.3f} \t'
'Training Prec@5 {train_prec5:.3f} \t'
'Validation Loss {val_loss:.4f} \t'
'Validation Prec@1 {val_prec1:.3f} \t'
'Validation Prec@5 {val_prec5:.3f} \n'
.format(epoch + 1, train_loss=train_loss, val_loss=val_loss,
train_prec1=train_prec1, val_prec1=val_prec1,
train_prec5=train_prec5, val_prec5=val_prec5))
logging.info('*'*50+'DONE'+'*'*50)
logging.info('\n Best_Epoch: {0}\t'
'Best_Prec1 {prec1:.4f} \t'
'Best_Loss {loss:.3f} \t'
.format(best_epoch+1, prec1=best_prec1, loss=best_loss))
def forward(data_loader, model, criterion, epoch=0, training=True, optimizer=None):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
for i, (inputs, target) in enumerate(data_loader):
# measure data loading time
data_time.update(time.time() - end)
if i==1 and training:
for module in conv_modules:
module.epoch=-1
if args.gpus is not None:
inputs = inputs.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
input_var = Variable(inputs.type(args.type))
target_var = Variable(target)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
if type(output) is list:
output = output[0]
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data.item(), inputs.size(0))
top1.update(prec1.item(), inputs.size(0))
top5.update(prec5.item(), inputs.size(0))
if training:
# compute gradient
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
logging.info('{phase} - Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(data_loader),
phase='TRAINING' if training else 'EVALUATING',
batch_time=batch_time,
data_time=data_time, loss=losses,
top1=top1, top5=top5))
return losses.avg, top1.avg, top5.avg
def train(data_loader, model, criterion, epoch, optimizer):
model.train()
return forward(data_loader, model, criterion, epoch,
training=True, optimizer=optimizer)
def validate(data_loader, model, criterion, epoch):
model.eval()
return forward(data_loader, model, criterion, epoch,
training=False, optimizer=None)
if __name__ == '__main__':
main()
| 12,926 | 38.411585 | 161 | py |
RBNN | RBNN-master/cifar/modules/binarized_modules.py | import torch
import torch.nn as nn
import math
import numpy as np
import torch.nn.functional as F
from torch.autograd import Function, Variable
from scipy.stats import ortho_group
from utils.options import args
class BinarizeConv2d(nn.Conv2d):
def __init__(self, *kargs, **kwargs):
super(BinarizeConv2d, self).__init__(*kargs, **kwargs)
self.k = torch.tensor([10.]).float()
self.t = torch.tensor([0.1]).float()
self.epoch = -1
w = self.weight
self.a, self.b = get_ab(np.prod(w.shape[1:]))
R1 = torch.tensor(ortho_group.rvs(dim=self.a)).float().cuda()
R2 = torch.tensor(ortho_group.rvs(dim=self.b)).float().cuda()
self.register_buffer('R1', R1)
self.register_buffer('R2', R2)
self.Rweight = torch.ones_like(w)
sw = w.abs().view(w.size(0), -1).mean(-1).float().view(w.size(0), 1, 1).detach()
self.alpha = nn.Parameter(sw.cuda(), requires_grad=True)
self.rotate = nn.Parameter(torch.ones(w.size(0), 1, 1, 1).cuda()*np.pi/2, requires_grad=True)
self.Rotate = torch.zeros(1)
def forward(self, input):
a0 = input
w = self.weight
w1 = w - w.mean([1,2,3], keepdim=True)
w2 = w1 / w1.std([1,2,3], keepdim=True)
a1 = a0 - a0.mean([1,2,3], keepdim=True)
a2 = a1 / a1.std([1,2,3], keepdim=True)
a, b = self.a, self.b
X = w2.view(w.shape[0], a, b)
if self.epoch > -1 and self.epoch % args.rotation_update == 0:
for _ in range(3):
#* update B
V = self.R1.t() @ X.detach() @ self.R2
B = torch.sign(V)
#* update R1
D1 = sum([Bi@(self.R2.t())@(Xi.t()) for (Bi,Xi) in zip(B,X.detach())])
U1, S1, V1 = torch.svd(D1)
self.R1 = (V1@(U1.t()))
#* update R2
D2 = sum([(Xi.t())@self.R1@Bi for (Xi,Bi) in zip(X.detach(),B)])
U2, S2, V2 = torch.svd(D2)
self.R2 = (U2@(V2.t()))
self.Rweight = ((self.R1.t())@X@(self.R2)).view_as(w)
delta = self.Rweight.detach() - w2
w3 = w2 + torch.abs(torch.sin(self.rotate)) * delta
#* binarize
bw = BinaryQuantize().apply(w3, self.k.to(w.device), self.t.to(w.device))
if args.a32:
ba = a2
else:
ba = BinaryQuantize_a().apply(a2, self.k.to(w.device), self.t.to(w.device))
#* 1bit conv
output = F.conv2d(ba, bw, self.bias, self.stride, self.padding,
self.dilation, self.groups)
#* scaling factor
output = output * self.alpha
return output
class BinaryQuantize(Function):
@staticmethod
def forward(ctx, input, k, t):
ctx.save_for_backward(input, k, t)
out = torch.sign(input)
return out
@staticmethod
def backward(ctx, grad_output):
input, k, t = ctx.saved_tensors
grad_input = k * (2 * torch.sqrt(t**2 / 2) - torch.abs(t**2 * input))
grad_input = grad_input.clamp(min=0) * grad_output.clone()
return grad_input, None, None
class BinaryQuantize_a(Function):
@staticmethod
def forward(ctx, input, k, t):
ctx.save_for_backward(input, k, t)
out = torch.sign(input)
return out
@staticmethod
def backward(ctx, grad_output):
input, k, t = ctx.saved_tensors
k = torch.tensor(1.).to(input.device)
t = max(t, torch.tensor(1.).to(input.device))
grad_input = k * (2 * torch.sqrt(t**2 / 2) - torch.abs(t**2 * input))
grad_input = grad_input.clamp(min=0) * grad_output.clone()
return grad_input, None, None
def get_ab(N):
sqrt = int(np.sqrt(N))
for i in range(sqrt, 0, -1):
if N % i == 0:
return i, N // i
| 3,835 | 34.518519 | 101 | py |
RBNN | RBNN-master/cifar/dataset/dataset.py | from datetime import datetime
import os
import torch
from torch import nn
import torch.nn.functional as F
from torchvision import transforms, datasets
from torch.utils.data import DataLoader
def load_data(type='both',dataset='cifar10',data_path='/data',batch_size = 256,batch_size_test=256,num_workers=0):
# load data
param = {'cifar10':{'name':datasets.CIFAR10,'size':32,'normalize':[[0.485, 0.456, 0.406], [0.229, 0.224, 0.225]]},
'cifar100':{'name':datasets.CIFAR100,'size':32,'normalize':[(0.507, 0.487, 0.441), (0.267, 0.256, 0.276)]},
'mnist':{'name':datasets.MNIST,'size':32,'normalize':[(0.5,0.5,0.5),(0.5,0.5,0.5)]},
'tinyimagenet':{'name':datasets.ImageFolder,'size':224,'normalize':[(0.4802, 0.4481, 0.3975), (0.2302, 0.2265, 0.2262)]}}
data = param[dataset]
if data['name']==datasets.ImageFolder:
data_transforms = {
'train': transforms.Compose([
transforms.Resize(data['size']),
transforms.RandomRotation(20),
transforms.RandomHorizontalFlip(0.5),
transforms.ToTensor(),
transforms.Normalize(*data['normalize']),
]),
'val': transforms.Compose([
transforms.Resize(data['size']),
transforms.ToTensor(),
transforms.Normalize(*data['normalize']),
]),
'test': transforms.Compose([
transforms.Resize(data['size']),
transforms.ToTensor(),
transforms.Normalize(*data['normalize']),
])
}
data_dir = os.path.join(data_path,'tiny-imagenet-200')
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: DataLoader(image_datasets[x], batch_size=batch_size, shuffle=(x=='train'), num_workers=num_workers)
for x in ['train', 'val']}
return dataloaders.values()
else:
transform1 = transforms.Compose([
transforms.RandomCrop(data['size'],padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(*data['normalize']),
])
transform2 = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(*data['normalize']),
])
trainset = data['name'](root=data_path,
train=True,
download=False,
transform=transform1);
trainloader = DataLoader(
trainset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=True)
testset = data['name'](root=data_path,
train=False,
download=False,
transform=transform2);
testloader = DataLoader(
testset,
batch_size=batch_size_test,
shuffle=False,
num_workers=num_workers,
pin_memory=True)
if type=='both':
return trainloader, testloader
elif type=='train':
return trainloader
elif type=='val':
return testloader
def delete_module_fromdict(statedict):
from collections import OrderedDict
new_state_dict = OrderedDict()
for k,v in statedict.items():
name = k[7:]
new_state_dict[name] = v
return new_state_dict
def add_module_fromdict(statedict):
from collections import OrderedDict
new_state_dict = OrderedDict()
for k,v in statedict.items():
name = 'module.'+k
new_state_dict[name] = v
return new_state_dict
| 3,858 | 37.59 | 134 | py |
RBNN | RBNN-master/cifar/dataset/imagenet.py | import time
import torch.utils.data
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import torchvision.datasets as datasets
from nvidia.dali.pipeline import Pipeline
import torchvision.transforms as transforms
from nvidia.dali.plugin.pytorch import DALIClassificationIterator, DALIGenericIterator
class HybridTrainPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, data_dir, crop, dali_cpu=False, local_rank=0, world_size=1):
super(HybridTrainPipe, self).__init__(batch_size, num_threads, device_id, seed=12 + device_id)
dali_device = "gpu"
self.input = ops.FileReader(file_root=data_dir, shard_id=local_rank, num_shards=world_size, random_shuffle=True)
self.decode = ops.ImageDecoder(device="mixed", output_type=types.RGB)
self.res = ops.RandomResizedCrop(device="gpu", size=crop, random_area=[0.08, 1.25])
self.cmnp = ops.CropMirrorNormalize(device="gpu",
output_dtype=types.FLOAT,
output_layout=types.NCHW,
image_type=types.RGB,
mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
std=[0.229 * 255, 0.224 * 255, 0.225 * 255])
self.coin = ops.CoinFlip(probability=0.5)
print('DALI "{0}" variant'.format(dali_device))
def define_graph(self):
rng = self.coin()
self.jpegs, self.labels = self.input(name="Reader")
images = self.decode(self.jpegs)
images = self.res(images)
output = self.cmnp(images, mirror=rng)
return [output, self.labels]
class HybridValPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, data_dir, crop, size, local_rank=0, world_size=1):
super(HybridValPipe, self).__init__(batch_size, num_threads, device_id, seed=12 + device_id)
self.input = ops.FileReader(file_root=data_dir, shard_id=local_rank, num_shards=world_size,
random_shuffle=False)
self.decode = ops.ImageDecoder(device="mixed", output_type=types.RGB)
self.res = ops.Resize(device="gpu", resize_shorter=size, interp_type=types.INTERP_TRIANGULAR)
self.cmnp = ops.CropMirrorNormalize(device="gpu",
output_dtype=types.FLOAT,
output_layout=types.NCHW,
crop=(crop, crop),
image_type=types.RGB,
mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
std=[0.229 * 255, 0.224 * 255, 0.225 * 255])
def define_graph(self):
self.jpegs, self.labels = self.input(name="Reader")
images = self.decode(self.jpegs)
images = self.res(images)
output = self.cmnp(images)
return [output, self.labels]
def get_imagenet_iter_dali(type, image_dir, batch_size, num_threads, device_id, num_gpus, crop, val_size=256,
world_size=1,
local_rank=0):
if type == 'train':
pip_train = HybridTrainPipe(batch_size=batch_size, num_threads=num_threads, device_id=local_rank,
data_dir=image_dir + '/ILSVRC2012_img_train',
crop=crop, world_size=world_size, local_rank=local_rank)
pip_train.build()
dali_iter_train = DALIClassificationIterator(pip_train, size=pip_train.epoch_size("Reader") // world_size, auto_reset=True)
return dali_iter_train
elif type == 'val':
pip_val = HybridValPipe(batch_size=batch_size, num_threads=num_threads, device_id=local_rank,
data_dir=image_dir + '/val',
crop=crop, size=val_size, world_size=world_size, local_rank=local_rank)
pip_val.build()
dali_iter_val = DALIClassificationIterator(pip_val, size=pip_val.epoch_size("Reader") // world_size, auto_reset=True)
return dali_iter_val
def get_imagenet_iter_torch(type, image_dir, batch_size, num_threads, device_id, num_gpus, crop, val_size=256,
world_size=1, local_rank=0):
if type == 'train':
transform = transforms.Compose([
transforms.RandomResizedCrop(crop, scale=(0.08, 1.25)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
dataset = datasets.ImageFolder(image_dir + '/train', transform)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=num_threads,
pin_memory=True)
else:
transform = transforms.Compose([
transforms.Resize(val_size),
transforms.CenterCrop(crop),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
dataset = datasets.ImageFolder(image_dir + '/val', transform)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=num_threads,
pin_memory=True)
return dataloader
if __name__ == '__main__':
train_loader = get_imagenet_iter_dali(type='train', image_dir='/userhome/memory_data/imagenet', batch_size=256,
num_threads=4, crop=224, device_id=0, num_gpus=1)
print('start iterate')
start = time.time()
for i, data in enumerate(train_loader):
images = data[0]["data"].cuda(non_blocking=True)
labels = data[0]["label"].squeeze().long().cuda(non_blocking=True)
end = time.time()
print('end iterate')
print('dali iterate time: %fs' % (end - start))
train_loader = get_imagenet_iter_torch(type='train', image_dir='/userhome/data/imagenet', batch_size=256,
num_threads=4, crop=224, device_id=0, num_gpus=1)
print('start iterate')
start = time.time()
for i, data in enumerate(train_loader):
images = data[0].cuda(non_blocking=True)
labels = data[1].cuda(non_blocking=True)
end = time.time()
print('end iterate')
print('torch iterate time: %fs' % (end - start))
| 6,546 | 51.376 | 131 | py |
RBNN | RBNN-master/cifar/models_imagenet/resnet.py | import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import torch.nn.init as init
from modules import *
BN = None
__all__ = ['resnet18_1w1a', 'resnet34_1w1a']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
}
def conv3x3Binary(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return BinarizeConv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3Binary(inplanes, planes, stride)
self.bn1 = BN(planes)
self.nonlinear = nn.Hardtanh(inplace=True)
self.conv2 = conv3x3Binary(planes, planes)
self.bn2 = BN(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.nonlinear(out)
residual = out
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.nonlinear(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, deep_stem=False,
avg_down=False, bypass_last_bn=False,
bn_group_size=1,
bn_group=None,
bn_sync_stats=False,
use_sync_bn=True):
global BN, bypass_bn_weight_list
BN = nn.BatchNorm2d
bypass_bn_weight_list = []
self.inplanes = 64
super(ResNet, self).__init__()
self.deep_stem = deep_stem
self.avg_down = avg_down
if self.deep_stem:
self.conv1 = nn.Sequential(
nn.Conv2d(3, 32, kernel_size=3, stride=2, padding=1, bias=False),
BN(32),
nn.Hardtanh(inplace=True),
nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1, bias=False),
BN(32),
nn.Hardtanh(inplace=True),
nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1, bias=False),
)
else:
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = BN(64)
self.nonlinear1 = nn.Hardtanh(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.bn2 = nn.BatchNorm1d(512 * block.expansion)
self.nonlinear2 = nn.Hardtanh(inplace=True)
self.fc = nn.Linear(512 * block.expansion, num_classes)
self.bn3 = nn.BatchNorm1d(num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1e-8)
m.bias.data.zero_()
if bypass_last_bn:
for param in bypass_bn_weight_list:
param.data.zero_()
print('bypass {} bn.weight in BottleneckBlocks'.format(len(bypass_bn_weight_list)))
def _make_layer(self, block, planes, blocks, stride=1, avg_down=False):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
if self.avg_down:
downsample = nn.Sequential(
nn.AvgPool2d(stride, stride=stride, ceil_mode=True, count_include_pad=False),
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=1, bias=False),
BN(planes * block.expansion),
)
else:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
BN(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.nonlinear1(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.bn2(x)
x = self.fc(x)
return x
def resnet18_1w1a(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34_1w1a(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
| 5,965 | 31.248649 | 97 | py |
RBNN | RBNN-master/cifar/models_cifar/resnet2.py | '''ResNet in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from modules import *
__all__ =['resnet18A_1w1a','resnet18B_1w1a','resnet18C_1w1a','resnet18_1w1a']
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = BinarizeConv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = BinarizeConv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
BinarizeConv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.hardtanh(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.hardtanh(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = BinarizeConv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = BinarizeConv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = BinarizeConv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
BinarizeConv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_channel, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = num_channel[0]
self.conv1 = nn.Conv2d(3, num_channel[0], kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(num_channel[0])
self.layer1 = self._make_layer(block, num_channel[0], num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, num_channel[1], num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, num_channel[2], num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, num_channel[3], num_blocks[3], stride=2)
self.linear = nn.Linear(num_channel[3]*block.expansion, num_classes)
self.bn2 = nn.BatchNorm1d(num_channel[3]*block.expansion)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = self.bn1(self.conv1(x))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.bn2(out)
out = self.linear(out)
return out
def resnet18A_1w1a(**kwargs):
return ResNet(BasicBlock, [2,2,2,2],[32,32,64,128],**kwargs)
def resnet18B_1w1a(**kwargs):
return ResNet(BasicBlock, [2,2,2,2],[32,64,128,256],**kwargs)
def resnet18C_1w1a(**kwargs):
return ResNet(BasicBlock, [2,2,2,2],[64,64,128,256],**kwargs)
def resnet18_1w1a(**kwargs):
return ResNet(BasicBlock, [2,2,2,2],[64,128,256,512],**kwargs)
def ResNet34():
return ResNet(BasicBlock, [3,4,6,3])
def ResNet50():
return ResNet(Bottleneck, [3,4,6,3])
def ResNet101():
return ResNet(Bottleneck, [3,4,23,3])
def ResNet152():
return ResNet(Bottleneck, [3,8,36,3])
def test():
net = resnet18_1w1a()
y = net(torch.randn(1,3,32,32))
print(y.size())
# test()
| 4,704 | 33.343066 | 107 | py |
RBNN | RBNN-master/cifar/models_cifar/resnet.py | '''
Properly implemented ResNet-s for CIFAR10 as described in paper [1].
The implementation and structure of this file is hugely influenced by [2]
which is implemented for ImageNet and doesn't have option A for identity.
Moreover, most of the implementations on the web is copy-paste from
torchvision's resnet and has wrong number of params.
Proper ResNet-s for CIFAR10 (for fair comparision and etc.) has following
number of layers and parameters:
name | layers | params
ResNet20 | 20 | 0.27M
ResNet32 | 32 | 0.46M
ResNet44 | 44 | 0.66M
ResNet56 | 56 | 0.85M
ResNet110 | 110 | 1.7M
ResNet1202| 1202 | 19.4m
which this implementation indeed has.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
[2] https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
If you use this implementation in you work, please don't forget to mention the
author, Yerlan Idelbayev.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from modules import *
from torch.autograd import Variable
__all__ = ['resnet20_1w1a']
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class BasicBlock_1w1a(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock_1w1a, self).__init__()
self.conv1 = BinarizeConv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = BinarizeConv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
BinarizeConv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = self.bn1(self.conv1(x))
out = F.hardtanh(out)
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.hardtanh(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.bn2 = nn.BatchNorm1d(64)
self.linear = nn.Linear(64, num_classes)
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1e-8)
m.bias.data.zero_()
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.hardtanh(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.bn2(out)
out = self.linear(out)
return out
def resnet20_1w1a(**kwargs):
return ResNet(BasicBlock_1w1a, [3, 3, 3],**kwargs)
def resnet20():
return ResNet(BasicBlock, [3, 3, 3])
def resnet32():
return ResNet(BasicBlock, [5, 5, 5])
def resnet44():
return ResNet(BasicBlock, [7, 7, 7])
def resnet56():
return ResNet(BasicBlock, [9, 9, 9])
def resnet110():
return ResNet(BasicBlock, [18, 18, 18])
def resnet1202():
return ResNet(BasicBlock, [200, 200, 200])
def test(net):
import numpy as np
total_params = 0
for x in filter(lambda p: p.requires_grad, net.parameters()):
total_params += np.prod(x.data.numpy().shape)
print("Total number of params", total_params)
print("Total layers", len(list(filter(lambda p: p.requires_grad and len(p.data.size())>1, net.parameters()))))
if __name__ == "__main__":
for net_name in __all__:
if net_name.startswith('resnet'):
print(net_name)
test(globals()[net_name]())
print()
| 6,508 | 31.708543 | 120 | py |
RBNN | RBNN-master/cifar/models_cifar/vgg.py | '''VGG for CIFAR10. FC layers are removed.
(c) YANG, Wei
'''
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import math
from modules import *
__all__ = ['vgg_small_1w1a']
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
}
class VGG(nn.Module):
def __init__(self, features, num_classes=1000):
super(VGG, self).__init__()
self.features = features
self.bn2 = nn.BatchNorm1d(512 * block.expansion)
self.nonlinear2 = nn.Hardtanh(inplace=True)
self.classifier = nn.Linear(512, num_classes)
self.bn3 = nn.BatchNorm1d(num_classes)
self.logsoftmax = nn.LogSoftmax(dim=1)
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.bn2(x)
x = self.nonlinear2(x)
x = self.classifier(x)
x = self.bn3(x)
x = self.logsoftmax(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.Hardtanh(inplace=True)]
else:
layers += [conv2d, nn.Hardtanh(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
'F': [128, 128, 'M', 512, 512, 'M'],
}
def vgg11(**kwargs):
"""VGG 11-layer model (configuration "A")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['A']), **kwargs)
return model
def vgg11_bn(**kwargs):
"""VGG 11-layer model (configuration "A") with batch normalization"""
model = VGG(make_layers(cfg['A'], batch_norm=True), **kwargs)
return model
def vgg13(**kwargs):
"""VGG 13-layer model (configuration "B")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['B']), **kwargs)
return model
def vgg13_bn(**kwargs):
"""VGG 13-layer model (configuration "B") with batch normalization"""
model = VGG(make_layers(cfg['B'], batch_norm=True), **kwargs)
return model
def vgg16(**kwargs):
"""VGG 16-layer model (configuration "D")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['D']), **kwargs)
return model
def vgg16_bn(**kwargs):
"""VGG 16-layer model (configuration "D") with batch normalization"""
model = VGG(make_layers(cfg['D'], batch_norm=True), **kwargs)
return model
def vgg19(**kwargs):
"""VGG 19-layer model (configuration "E")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['E']), **kwargs)
return model
def vgg19_bn(**kwargs):
"""VGG 19-layer model (configuration 'E') with batch normalization"""
model = VGG(make_layers(cfg['E'], batch_norm=True), **kwargs)
return model
class VGG_SMALL_1W1A(nn.Module):
def __init__(self, num_classes=10):
super(VGG_SMALL_1W1A, self).__init__()
self.conv0 = nn.Conv2d(3, 128, kernel_size=3, padding=1, bias=False)
self.bn0 = nn.BatchNorm2d(128)
self.conv1 = BinarizeConv2d(128, 128, kernel_size=3, padding=1, bias=False)
self.pooling = nn.MaxPool2d(kernel_size=2, stride=2)
self.bn1 = nn.BatchNorm2d(128)
self.nonlinear = nn.Hardtanh(inplace=True)
self.conv2 = BinarizeConv2d(128, 256, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(256)
self.conv3 = BinarizeConv2d(256, 256, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(256)
self.conv4 = BinarizeConv2d(256, 512, kernel_size=3, padding=1, bias=False)
self.bn4 = nn.BatchNorm2d(512)
self.conv5 = BinarizeConv2d(512, 512, kernel_size=3, padding=1, bias=False)
self.bn5 = nn.BatchNorm2d(512)
self.fc = nn.Linear(512*4*4, num_classes)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, BinarizeConv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def forward(self, x):
x = self.conv0(x)
x = self.bn0(x)
x = self.nonlinear(x)
x = self.conv1(x)
x = self.pooling(x)
x = self.bn1(x)
x = self.nonlinear(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.nonlinear(x)
x = self.conv3(x)
x = self.pooling(x)
x = self.bn3(x)
x = self.nonlinear(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.nonlinear(x)
x = self.conv5(x)
x = self.pooling(x)
x = self.bn5(x)
x = self.nonlinear(x)
# x = self.pooling(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def vgg_small_1w1a(**kwargs):
model = VGG_SMALL_1W1A(**kwargs)
return model
| 7,083 | 30.766816 | 113 | py |
RBNN | RBNN-master/cifar/models_cifar/resnet_bireal.py | '''
Properly implemented ResNet-s for CIFAR10 as described in paper [1].
The implementation and structure of this file is hugely influenced by [2]
which is implemented for ImageNet and doesn't have option A for identity.
Moreover, most of the implementations on the web is copy-paste from
torchvision's resnet and has wrong number of params.
Proper ResNet-s for CIFAR10 (for fair comparision and etc.) has following
number of layers and parameters:
name | layers | params
ResNet20 | 20 | 0.27M
ResNet32 | 32 | 0.46M
ResNet44 | 44 | 0.66M
ResNet56 | 56 | 0.85M
ResNet110 | 110 | 1.7M
ResNet1202| 1202 | 19.4m
which this implementation indeed has.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
[2] https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
If you use this implementation in you work, please don't forget to mention the
author, Yerlan Idelbayev.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from modules import *
from torch.autograd import Variable
__all__ = ['resnet20_bireal_1w1a']
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class BasicBlock_1w1a(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock_1w1a, self).__init__()
self.conv1 = BinarizeConv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = BinarizeConv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
BinarizeConv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = self.bn1(self.conv1(x))
out += self.shortcut(x)
out = F.hardtanh(out)
x1 = out
out = self.bn2(self.conv2(out))
out += x1
out = F.hardtanh(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.bn2 = nn.BatchNorm1d(64)
self.linear = nn.Linear(64, num_classes)
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1e-8)
m.bias.data.zero_()
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.hardtanh(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.bn2(out)
out = self.linear(out)
return out
def resnet20_bireal_1w1a(**kwargs):
return ResNet(BasicBlock_1w1a, [3, 3, 3],**kwargs)
def resnet20():
return ResNet(BasicBlock, [3, 3, 3])
def resnet32():
return ResNet(BasicBlock, [5, 5, 5])
def resnet44():
return ResNet(BasicBlock, [7, 7, 7])
def resnet56():
return ResNet(BasicBlock, [9, 9, 9])
def resnet110():
return ResNet(BasicBlock, [18, 18, 18])
def resnet1202():
return ResNet(BasicBlock, [200, 200, 200])
def test(net):
import numpy as np
total_params = 0
for x in filter(lambda p: p.requires_grad, net.parameters()):
total_params += np.prod(x.data.numpy().shape)
print("Total number of params", total_params)
print("Total layers", len(list(filter(lambda p: p.requires_grad and len(p.data.size())>1, net.parameters()))))
if __name__ == "__main__":
for net_name in __all__:
if net_name.startswith('resnet'):
print(net_name)
test(globals()[net_name]())
print()
| 6,557 | 31.626866 | 120 | py |
RBNN | RBNN-master/cifar/utils/common.py | import os
import torch
import logging.config
import shutil
import torch.nn as nn
import numpy
import datetime
def setup_logging(log_file='log.txt',filemode='w'):
"""Setup logging configuration
"""
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s - %(levelname)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
filename=log_file,
filemode=filemode)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
def save_checkpoint(state, is_best, path='.', filename='checkpoint.pth.tar', save_all=False):
filename = os.path.join(path, filename)
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, os.path.join(path, 'model_best.pth.tar'))
if save_all:
shutil.copyfile(filename, os.path.join(
path, 'checkpoint_epoch_%s.pth.tar' % state['epoch']))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.float().topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def get_time(delta,epoch,epochs):
now = datetime.datetime.now()
clip = 0 if delta>=datetime.timedelta(hours=1) else 1
cost_time = ':'.join(str(delta).split(':')[clip:]).split('.')[0]
delta *= epochs-epoch-1
finish = now + delta
finish_time=finish.strftime('%Y-%m-%d %H:%M:%S')
return cost_time,finish_time
| 2,252 | 27.884615 | 93 | py |
RBNN | RBNN-master/cifar/utils/options.py | import argparse
import os
"""
args
"""
parser = argparse.ArgumentParser(description='RotationNet')
# Logging
parser.add_argument(
'--results_dir',
metavar='RESULTS_DIR',
default='./results',
help='results dir')
parser.add_argument(
'--save',
metavar='SAVE',
default='',
help='saved folder (named by datetime)')
parser.add_argument(
'--resume',
dest='resume',
action='store_true',
help='resume to latest checkpoint')
parser.add_argument(
'-e',
'--evaluate',
type=str,
metavar='FILE',
help='evaluate model FILE on validation set')
parser.add_argument(
'--seed',
default=1234,
type=int,
help='random seed')
# Model
parser.add_argument(
'--model',
'-a',
metavar='MODEL',
default='resnet20_bireal_1w1a',
help='model architecture ')
parser.add_argument(
'--dataset',
default='cifar10',
type=str,
help='dataset, default:cifar10')
parser.add_argument(
'--data_path',
type=str,
default='/home/data',
help='The dictionary where the dataset is stored.')
parser.add_argument(
'--type',
default='torch.cuda.FloatTensor',
help='type of tensor - e.g torch.cuda.FloatTensor')
# Training
parser.add_argument(
'--gpus',
default='0',
help='gpus used for training - e.g 0,1,3')
parser.add_argument(
'--lr',
default=0.1,
type=float,
help='learning rate')
parser.add_argument(
'--weight_decay',
type=float,
default=1e-4,
help='Weight decay of loss. default:1e-4')
parser.add_argument(
'--momentum',
default=0.9,
type=float,
metavar='M',
help='momentum')
parser.add_argument(
'--workers',
default=8,
type=int,
metavar='N',
help='number of data loading workers (default: 8)')
parser.add_argument(
'--epochs',
default=1000,
type=int,
metavar='N',
help='number of total epochs to run')
parser.add_argument(
'--start_epoch',
default=-1,
type=int,
metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument(
'-b',
'--batch_size',
default=256,
type=int,
metavar='N',
help='mini-batch size for training (default: 256)')
parser.add_argument(
'-bt',
'--batch_size_test',
default=128,
type=int,
help='mini-batch size for testing (default: 128)')
parser.add_argument(
'--print_freq',
'-p',
default=100,
type=int,
metavar='N',
help='print frequency (default: 100)')
parser.add_argument(
'--time_estimate',
default=1,
type=int,
metavar='N',
help='print estimating finish time,set to 0 to disable')
parser.add_argument(
'--rotation_update',
default=1,
type=int,
metavar='N',
help='interval of updating rotation matrix (default:1)')
parser.add_argument(
'--Tmin',
default=1e-2,
type=float,
metavar='M',
help='minimum of T (default:1e-2)')
parser.add_argument(
'--Tmax',
default=1e1,
type=float,
metavar='M',
help='maximum of T (default:1e1)')
parser.add_argument(
'--lr_type',
type=str,
default='cos',
help='choose lr_scheduler,(default:cos)')
parser.add_argument(
'--lr_decay_step',
nargs='+',
type=int,
help='lr decay step for MultiStepLR')
parser.add_argument(
'--a32',
dest='a32',
action='store_true',
help='w1a32')
parser.add_argument(
'--warm_up',
dest='warm_up',
action='store_true',
help='use warm up or not')
args = parser.parse_args() | 3,554 | 18.010695 | 60 | py |
DeepLabV3FineTuning | DeepLabV3FineTuning-master/sources/dataloader.py | # from __future__ import print_function
# from __future__ import division
import torch
import numpy as np
from torchvision import transforms
import os
import glob
from PIL import Image
class DataLoaderSegmentation(torch.utils.data.dataset.Dataset):
def __init__(self, folder_path, mode):
super(DataLoaderSegmentation, self).__init__()
self.img_files = glob.glob(os.path.join(folder_path,'Images','*.*'))
self.label_files = []
for img_path in self.img_files:
image_filename, _ = os.path.splitext(os.path.basename(img_path))
label_filename_with_ext = f"{image_filename}.png"
self.label_files.append(os.path.join(folder_path, 'Labels', label_filename_with_ext))
# Data augmentation and normalization for training
# Just normalization for validation
if "val" == mode :
self.transforms = transforms.Compose([
transforms.CenterCrop((224, 224)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406, 0], [0.229, 0.224, 0.225, 1])
])
else:
self.transforms = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
# transforms.RandomResizedCrop((512, 512)),
transforms.RandomCrop((224, 224)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406, 0], [0.229, 0.224, 0.225, 1])
])
def __getitem__(self, index):
img_path = self.img_files[index]
label_path = self.label_files[index]
image = Image.open(img_path)
label = Image.open(label_path)
# Concatenate image and label, to apply same transformation on both
image_np = np.asarray(image)
label_np = np.asarray(label)
new_shape = (image_np.shape[0], image_np.shape[1], image_np.shape[2] + 1)
image_and_label_np = np.zeros(new_shape, image_np.dtype)
image_and_label_np[:, :, 0:3] = image_np
image_and_label_np[:, :, 3] = label_np
# Convert to PIL
image_and_label = Image.fromarray(image_and_label_np)
# Apply Transforms
image_and_label = self.transforms(image_and_label)
# Extract image and label
image = image_and_label[0:3, :, :]
label = image_and_label[3, :, :].unsqueeze(0)
# Normalize back from [0, 1] to [0, 255]
label = label * 255
# Convert to int64 and remove second dimension
label = label.long().squeeze()
return image, label
def __len__(self):
return len(self.img_files) | 2,806 | 38.535211 | 97 | py |
DeepLabV3FineTuning | DeepLabV3FineTuning-master/sources/main_training.py | import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import os
import argparse
import pathlib
# Local import
from dataloader import DataLoaderSegmentation
from custom_model import initialize_model
from train import train_model
print("PyTorch Version: ",torch.__version__)
print("Torchvision Version: ",torchvision.__version__)
"""
Version requirements:
PyTorch Version: 1.4.0
Torchvision Version: 0.5.0
"""
def main(data_dir, dest_dir, num_classes, batch_size, num_epochs, keep_feature_extract, weight):
# def main():
print("Initializing Datasets and Dataloaders...")
# Create training and validation datasets
image_datasets = {x: DataLoaderSegmentation(os.path.join(data_dir, x), x) for x in ['train', 'val']}
# Create training and validation dataloaders
dataloaders_dict = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True, num_workers=4) for x in ['train', 'val']}
print("Initializing Model...")
# Initialize model
model_deeplabv3, input_size = initialize_model(num_classes, keep_feature_extract, use_pretrained=True)
# Detect if we have a GPU available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Send the model to GPU
model_deeplabv3 = model_deeplabv3.to(device)
# Gather the parameters to be optimized/updated in this run. If we are
# finetuning we will be updating all parameters. However, if we are
# doing feature extract method, we will only update the parameters
# that we have just initialized, i.e. the parameters with requires_grad
# is True.
params_to_update = model_deeplabv3.parameters()
print("Params to learn:")
if keep_feature_extract:
params_to_update = []
for name, param in model_deeplabv3.named_parameters():
if param.requires_grad:
params_to_update.append(param)
print("\t", name)
else:
for name, param in model_deeplabv3.named_parameters():
if param.requires_grad:
print("\t", name)
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(params_to_update, lr=0.001, momentum=0.9)
# Setup the loss function
criterion = nn.CrossEntropyLoss(weight=(torch.FloatTensor(weight).to(device) if weight else None))
# Prepare output directory
pathlib.Path(dest_dir).mkdir(parents=True, exist_ok=True)
print("Train...")
# Train and evaluate
model_deeplabv3_state_dict, hist = train_model(model_deeplabv3, num_classes, dataloaders_dict, criterion, optimizer_ft, device, dest_dir, num_epochs=num_epochs)
print("Save ...")
torch.save(model_deeplabv3_state_dict, os.path.join(dest_dir, "best_DeepLabV3_Skydiver.pth"))
def args_preprocess():
# Command line arguments
parser = argparse.ArgumentParser()
parser.add_argument(
"data_dir", help='Specify the dataset directory path, should contain train/Images, train/Labels, val/Images and val/Labels')
parser.add_argument(
"dest_dir", help='Specify the directory where model weights shall be stored.')
parser.add_argument("--num_classes", default=5, type=int, help="Number of classes in the dataset, index 0 for no-label should be included in the count")
parser.add_argument("--epochs", default=100, type=int, help="Number of epochs to train for")
parser.add_argument("--batch_size", default=16, type=int, help="Batch size for training (change depending on how much memory you have)")
parser.add_argument("--keep_feature_extract", action="store_true", help="Flag for feature extracting. When False, we finetune the whole model, when True we only update the reshaped layer params")
parser.add_argument('-w', action='append', type=float, help="Add more weight to some classes. If this argument is used, then it should be called as many times as there are classes (see --num_classes)")
args = parser.parse_args()
# Build weight list
weight = []
if args.w:
for w in args.w:
weight.append(w)
main(args.data_dir, args.dest_dir, args.num_classes, args.batch_size, args.epochs, args.keep_feature_extract, weight)
if __name__ == '__main__':
args_preprocess() | 4,292 | 39.5 | 205 | py |
DeepLabV3FineTuning | DeepLabV3FineTuning-master/sources/custom_model.py | import torchvision
from torchvision import models
import torch
class DeepLabV3Wrapper(torch.nn.Module):
def __init__(self, model):
super(DeepLabV3Wrapper, self).__init__()
self.model = model
def forward(self, input):
output = self.model(input)['out']
return output
def initialize_model(num_classes, keep_feature_extract=False, use_pretrained=True):
""" DeepLabV3 pretrained on a subset of COCO train2017, on the 20 categories that are present in the Pascal VOC dataset.
"""
model_deeplabv3 = models.segmentation.deeplabv3_resnet101(pretrained=use_pretrained, progress=True)
model_deeplabv3.aux_classifier = None
if keep_feature_extract:
for param in model_deeplabv3.parameters():
param.requires_grad = False
input_size = 224
model_deeplabv3.classifier = torchvision.models.segmentation.deeplabv3.DeepLabHead(2048, num_classes)
return model_deeplabv3, input_size
| 960 | 33.321429 | 124 | py |
DeepLabV3FineTuning | DeepLabV3FineTuning-master/sources/main_inference.py | import torch
import numpy as np
from torchvision import transforms
import cv2
from PIL import Image
import custom_model
# Number of classes in the dataset
num_classes = 5
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model, input_size = custom_model.initialize_model(num_classes, keep_feature_extract=True, use_pretrained=False)
state_dict = torch.load("training_output_Skydiver_dataset_final/best_DeepLabV3_Skydiver.pth", map_location=device)
model = model.to(device)
model.load_state_dict(state_dict)
model.eval()
transforms_image = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
for idx in range(1, 3000, 25):
image = Image.open(f"/tmp/pycharm_project_782/03.03.20_saut_4/{idx:06}.png")
image_np = np.asarray(image)
# image_np = cv2.resize(image_np, 0.5, 0.5, cv2.INTER_CUBIC)
width = int(image_np.shape[1] * 0.3)
height = int(image_np.shape[0] * 0.3)
dim = (width, height)
image_np = cv2.resize(image_np, dim, interpolation=cv2.INTER_AREA)
image = Image.fromarray(image_np)
image = transforms_image(image)
image = image.unsqueeze(0)
image = image.to(device)
outputs = model(image)["out"]
_, preds = torch.max(outputs, 1)
preds = preds.to("cpu")
preds_np = preds.squeeze(0).cpu().numpy().astype(np.uint8)
print(preds_np.shape)
print(image_np.shape)
# preds_np = cv2.cvtColor(preds_np, cv2.COLOR_GRAY2BGR)
image_np = cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR)
preds_np_color = cv2.applyColorMap(preds_np * 50, cv2.COLORMAP_HSV)
cv2.imwrite(f"./results/{idx:04}_segmentation.png", preds_np_color)
cv2.imwrite(f"./results/{idx:04}_image.png", image_np)
| 1,775 | 27.190476 | 114 | py |
DeepLabV3FineTuning | DeepLabV3FineTuning-master/sources/main_export.py | import torch
import custom_model
# Number of classes in the dataset
num_classes = 2
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model_deeplabv3, input_size = custom_model.initialize_model(num_classes, keep_feature_extract=True, use_pretrained=False)
state_dict = torch.load("training_output_Skydiver_dataset_person/best_DeepLabV3_Skydiver.pth", map_location=device)
model_deeplabv3 = model_deeplabv3.to(device)
model_deeplabv3.load_state_dict(state_dict)
model_deeplabv3.eval()
model_deeplabv3wrapper = custom_model.DeepLabV3Wrapper(model_deeplabv3)
dummy_input = torch.rand(1, 3, input_size, input_size).to(device)
traced_script_module = torch.jit.trace(model_deeplabv3wrapper, dummy_input)
traced_script_module.save("training_output_Skydiver_dataset_person/best_deeplabv3_skydiver.pt")
| 824 | 34.869565 | 121 | py |
DeepLabV3FineTuning | DeepLabV3FineTuning-master/sources/train.py | import os
import torch
import numpy as np
import time
import copy
import cv2
def debug_export_before_forward(inputs, labels, idx):
# im = inputs[0]*255;
im = inputs[0];
im = im.to('cpu').numpy()
im[0, :, :] = im[0, :, :] * 0.229 + 0.485
im[1, :, :] = im[1, :, :] * 0.224 + 0.456
im[2, :, :] = im[2, :, :] * 0.225 + 0.406
im = im * 255
im = im.astype(np.uint8)
la = labels[0].to(torch.uint8).to('cpu').numpy()
im = im.transpose([1, 2, 0])
im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
cv2.imwrite(f"{idx:06}_im.png", im)
cv2.imwrite(f"{idx:06}_la.png", la)
def iou(pred, target, n_classes = 3):
ious = []
pred = pred.view(-1)
target = target.view(-1)
# Ignore IoU for background class ("0")
for cls in range(1, n_classes): # This goes from 1:n_classes-1 -> class "0" is ignored
pred_inds = pred == cls
target_inds = target == cls
intersection = (pred_inds[target_inds]).long().sum().data.cpu().item() # Cast to long to prevent overflows
union = pred_inds.long().sum().data.cpu().item() + target_inds.long().sum().data.cpu().item() - intersection
if union > 0:
ious.append(float(intersection) / float(max(union, 1)))
return np.array(ious)
def train_model(model, num_classes, dataloaders, criterion, optimizer, device, dest_dir, num_epochs=25):
since = time.time()
val_acc_history = []
best_model_state_dict = copy.deepcopy(model.state_dict())
best_acc = 0.0
counter = 0
for epoch in range(1, num_epochs+1):
print('Epoch {}/{}'.format(epoch, num_epochs))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_iou_means = []
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# Security, skip this iteration if the batch_size is 1
if 1 == inputs.shape[0]:
print("Skipping iteration because batch_size = 1")
continue
# Debug
# debug_export_before_forward(inputs, labels, counter)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)['out']
loss = criterion(outputs, labels)
_, preds = torch.max(outputs, 1)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
iou_mean = iou(preds, labels, num_classes).mean()
running_loss += loss.item() * inputs.size(0)
running_iou_means.append(iou_mean)
# Increment counter
counter = counter + 1
epoch_loss = running_loss / len(dataloaders[phase].dataset)
if running_iou_means is not None:
epoch_acc = np.array(running_iou_means).mean()
else:
epoch_acc = 0.
print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_state_dict = copy.deepcopy(model.state_dict())
if phase == 'val':
val_acc_history.append(epoch_acc)
# Save current model every 25 epochs
if 0 == epoch%25:
current_model_path = os.path.join(dest_dir, f"checkpoint_{epoch:04}_DeepLabV3_Skydiver.pth")
print(f"Save current model : {current_model_path}")
torch.save(model.state_dict(), current_model_path)
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
return best_model_state_dict, val_acc_history
| 4,494 | 33.05303 | 112 | py |
tvm | tvm-main/conftest.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import hashlib
import pytest
import sys
import os
from pathlib import Path
pytest_plugins = ["tvm.testing.plugin"]
IS_IN_CI = os.getenv("CI", "") == "true"
REPO_ROOT = Path(__file__).resolve().parent
# These are long running tests (manually curated and extracted from CI logs)
# that should be allocated to test shards in a round-robin fashion. These are
# taken from the 20 (arbitrary number) of tests as from
# https://ci.tlcpack.ai/job/tvm/job/main/2907/testReport
_slowest_tests = [
"tests/python/frontend/tensorflow/test_forward.py::test_forward_broadcast_args",
"tests/python/frontend/tensorflow/test_forward.py::test_forward_broadcast_to",
"tests/python/topi/python/test_topi_conv2d_int8.py::test_conv2d_nchw[int8]",
"tests/python/topi/python/test_topi_conv2d_int8.py::test_conv2d_nchw[uint8]",
"tests/python/topi/python/test_topi_upsampling.py::test_upsampling3d",
"tests/python/topi/python/test_topi_upsampling.py::test_upsampling3d",
"tests/python/topi/python/test_topi_conv2d_int8.py::test_conv2d_nchw[int8]",
"tests/python/frontend/tflite/test_forward.py::test_all_elemwise",
"tests/python/frontend/pytorch/test_object_detection.py::test_detection_models",
"tests/python/topi/python/test_topi_conv2d_int8.py::test_conv2d_nchw[uint8]",
"tests/python/topi/python/test_topi_conv2d_NCHWc.py::test_conv2d_NCHWc",
"tests/python/topi/python/test_topi_conv2d_hwnc_tensorcore.py::test_conv2d_hwnc_tensorcore",
"tests/python/contrib/test_tensorrt.py::test_binary[compile]",
"tests/python/frontend/pytorch/test_forward.py::test_segmentation_models",
"tests/python/topi/python/test_topi_conv2d_NCHWc.py::test_conv2d_NCHWc",
"tests/python/relay/test_py_converter.py::test_global_recursion",
"tests/python/frontend/tensorflow/test_forward.py::test_forward_ptb",
"tests/python/relay/test_op_level6.py::test_topk",
"tests/python/topi/python/test_topi_conv2d_winograd.py::test_conv2d_nchw",
"tests/python/relay/test_py_converter.py::test_global_recursion",
]
HARDCODED_ALLOCATIONS = {}
for idx, test in enumerate(_slowest_tests):
HARDCODED_ALLOCATIONS[test] = idx
# These rely on running on the same node to pass successfully
FIXED_ALLOCATION_PREFIXES = {
"tests/python/unittest/test_tvm_testing_features.py": 0,
}
def find_shard_index(nodeid: str, num_shards: int) -> int:
"""
Return the index of the shard that should run this test
"""
for prefix, target_shard_idx in FIXED_ALLOCATION_PREFIXES.items():
if nodeid.startswith(prefix):
if target_shard_idx >= num_shards:
raise RuntimeError(
f"Cannot collect sharded tests, {nodeid} has hardcoded shard index {target_shard_idx} among only {num_shards} shards"
)
return target_shard_idx
if nodeid in HARDCODED_ALLOCATIONS:
hash = HARDCODED_ALLOCATIONS[nodeid]
else:
hash = hashlib.md5(nodeid.encode())
hash = int(hash.hexdigest(), 16)
return hash % num_shards
def pytest_collection_modifyitems(config, items):
if not all(k in os.environ for k in ["CI", "TVM_NUM_SHARDS", "TVM_SHARD_INDEX"]):
# Only apportion tests if in CI and in a job that is set up for it
return
num_shards = int(os.environ["TVM_NUM_SHARDS"])
shard_index = int(os.environ["TVM_SHARD_INDEX"])
print(f"Marking tests for shard {shard_index} of {num_shards}")
items_copy = list(items)
for item in items_copy:
item_shard_index = find_shard_index(item.nodeid, num_shards=num_shards)
if item_shard_index != shard_index:
items.remove(item)
def pytest_sessionstart():
if IS_IN_CI:
hook_script_dir = REPO_ROOT / "tests" / "scripts" / "request_hook"
sys.path.append(str(hook_script_dir))
import request_hook # pylint: disable=import-outside-toplevel
request_hook.init()
| 4,701 | 42.137615 | 137 | py |
tvm | tvm-main/apps/bundle_deploy/build_model.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Creates a simple TVM modules."""
import argparse
import os
import pathlib
from tvm import relay
import tvm
from tvm import runtime as tvm_runtime
import logging
from tvm.relay.backend import Runtime
from tvm.contrib import cc as _cc
RUNTIMES = [
(Runtime("crt", {"system-lib": True}), "{name}_c.{ext}"),
(Runtime("cpp", {"system-lib": True}), "{name}_cpp.{ext}"),
]
def build_module(opts):
dshape = (1, 3, 224, 224)
from mxnet.gluon.model_zoo.vision import get_model
block = get_model("mobilenet0.25", pretrained=True)
shape_dict = {"data": dshape}
mod, params = relay.frontend.from_mxnet(block, shape_dict)
func = mod["main"]
func = relay.Function(
func.params, relay.nn.softmax(func.body), None, func.type_params, func.attrs
)
for runtime, file_format_str in RUNTIMES:
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
graph, lib, params = relay.build(func, "llvm", runtime=runtime, params=params)
build_dir = os.path.abspath(opts.out_dir)
if not os.path.isdir(build_dir):
os.makedirs(build_dir)
ext = "tar" if str(runtime) == "crt" else "o"
lib_file_name = os.path.join(build_dir, file_format_str.format(name="model", ext=ext))
if str(runtime) == "crt":
lib.export_library(lib_file_name)
else:
# NOTE: at present, export_libarary will always create _another_ shared object, and you
# can't stably combine two shared objects together (in this case, init_array is not
# populated correctly when you do that). So for now, must continue to use save() with the
# C++ library.
# TODO(areusch): Obliterate runtime.cc and replace with libtvm_runtime.so.
lib.save(lib_file_name)
with open(
os.path.join(build_dir, file_format_str.format(name="graph", ext="json")), "w"
) as f_graph_json:
f_graph_json.write(graph)
with open(
os.path.join(build_dir, file_format_str.format(name="params", ext="bin")), "wb"
) as f_params:
f_params.write(tvm_runtime.save_param_dict(params))
def build_test_module(opts):
import numpy as np
x = relay.var("x", shape=(10, 5))
y = relay.var("y", shape=(1, 5))
z = relay.add(x, y)
func = relay.Function([x, y], z)
x_data = np.random.rand(10, 5).astype("float32")
y_data = np.random.rand(1, 5).astype("float32")
params = {"y": y_data}
for runtime, file_format_str in RUNTIMES:
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
graph, lib, lowered_params = relay.build(
tvm.IRModule.from_expr(func),
"llvm",
runtime=runtime,
params=params,
)
build_dir = os.path.abspath(opts.out_dir)
if not os.path.isdir(build_dir):
os.makedirs(build_dir)
ext = "tar" if str(runtime) == "crt" else "o"
lib_file_name = os.path.join(build_dir, file_format_str.format(name="test_model", ext=ext))
if str(runtime) == "crt":
lib.export_library(lib_file_name)
else:
# NOTE: at present, export_libarary will always create _another_ shared object, and you
# can't stably combine two shared objects together (in this case, init_array is not
# populated correctly when you do that). So for now, must continue to use save() with the
# C++ library.
# TODO(areusch): Obliterate runtime.cc and replace with libtvm_runtime.so.
lib.save(lib_file_name)
with open(
os.path.join(build_dir, file_format_str.format(name="test_graph", ext="json")), "w"
) as f_graph_json:
f_graph_json.write(graph)
with open(
os.path.join(build_dir, file_format_str.format(name="test_params", ext="bin")), "wb"
) as f_params:
f_params.write(tvm_runtime.save_param_dict(lowered_params))
with open(
os.path.join(build_dir, file_format_str.format(name="test_data", ext="bin")), "wb"
) as fp:
fp.write(x_data.astype(np.float32).tobytes())
x_output = x_data + y_data
with open(
os.path.join(build_dir, file_format_str.format(name="test_output", ext="bin")), "wb"
) as fp:
fp.write(x_output.astype(np.float32).tobytes())
def build_inputs(opts):
from tvm.contrib import download
from PIL import Image
import numpy as np
build_dir = os.path.abspath(opts.out_dir)
# Download test image
image_url = "https://homes.cs.washington.edu/~moreau/media/vta/cat.jpg"
image_fn = os.path.join(build_dir, "cat.png")
download.download(image_url, image_fn)
image = Image.open(image_fn).resize((224, 224))
def transform_image(image):
image = np.array(image) - np.array([123.0, 117.0, 104.0])
image /= np.array([58.395, 57.12, 57.375])
image = image.transpose((2, 0, 1))
image = image[np.newaxis, :]
return image
x = transform_image(image)
print("x", x.shape)
with open(os.path.join(build_dir, "cat.bin"), "wb") as fp:
fp.write(x.astype(np.float32).tobytes())
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--out-dir", default=".")
parser.add_argument("-t", "--test", action="store_true")
opts = parser.parse_args()
if opts.test:
build_test_module(opts)
else:
build_module(opts)
build_inputs(opts)
| 6,491 | 37.642857 | 101 | py |
tvm | tvm-main/apps/ios_rpc/tests/ios_rpc_mobilenet.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import os
import re
import sys
import coremltools
import numpy as np
import tvm
from mxnet import gluon
from PIL import Image
from tvm import relay, rpc
from tvm.contrib import coreml_runtime, graph_executor, utils, xcode
from tvm.contrib.download import download_testdata
from tvm.contrib.target import coreml as _coreml
from tvm.relay import transform
from tvm.relay.expr_functor import ExprMutator
from tvm.relay.op.annotation import compiler_begin, compiler_end
from tvm.relay.quantize.quantize import prerequisite_optimize
# Change target configuration, this is setting for iphone6s
# arch = "x86_64"
# sdk = "iphonesimulator"
arch = "arm64"
sdk = "iphoneos"
target_host = "llvm -mtriple=%s-apple-darwin" % arch
MODES = {"proxy": rpc.connect, "tracker": rpc.connect_tracker, "standalone": rpc.connect}
# override metal compiler to compile to iphone
@tvm.register_func("tvm_callback_metal_compile")
def compile_metal(src, target):
return xcode.compile_metal(src, sdk=sdk)
def prepare_input():
img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
img_name = "cat.png"
synset_url = "".join(
[
"https://gist.githubusercontent.com/zhreshold/",
"4d0b62f3d01426887599d4f7ede23ee5/raw/",
"596b27d23537e5a1b5751d2b0481ef172f58b539/",
"imagenet1000_clsid_to_human.txt",
]
)
synset_name = "imagenet1000_clsid_to_human.txt"
img_path = download_testdata(img_url, "cat.png", module="data")
synset_path = download_testdata(synset_url, synset_name, module="data")
with open(synset_path) as f:
synset = eval(f.read())
image = Image.open(img_path).resize((224, 224))
image = np.array(image) - np.array([123.0, 117.0, 104.0])
image /= np.array([58.395, 57.12, 57.375])
image = image.transpose((2, 0, 1))
image = image[np.newaxis, :]
return image.astype("float32"), synset
def get_model(model_name, data_shape):
gluon_model = gluon.model_zoo.vision.get_model(model_name, pretrained=True)
mod, params = relay.frontend.from_mxnet(gluon_model, {"data": data_shape})
# we want a probability so add a softmax operator
func = mod["main"]
func = relay.Function(
func.params, relay.nn.softmax(func.body), None, func.type_params, func.attrs
)
return func, params
def test_mobilenet(host, port, key, mode):
temp = utils.tempdir()
image, synset = prepare_input()
model, params = get_model("mobilenetv2_1.0", image.shape)
def run(mod, target):
with relay.build_config(opt_level=3):
lib = relay.build(
mod, target=tvm.target.Target(target, host=target_host), params=params
)
path_dso = temp.relpath("deploy.dylib")
lib.export_library(path_dso, xcode.create_dylib, arch=arch, sdk=sdk)
# connect to the proxy
if mode == "tracker":
remote = MODES[mode](host, port).request(key)
else:
remote = MODES[mode](host, port, key=key)
remote.upload(path_dso)
if target == "metal":
dev = remote.metal(0)
else:
dev = remote.cpu(0)
lib = remote.load_module("deploy.dylib")
m = graph_executor.GraphModule(lib["default"](dev))
m.set_input("data", tvm.nd.array(image, dev))
m.run()
tvm_output = m.get_output(0)
top1 = np.argmax(tvm_output.numpy()[0])
print("TVM prediction top-1:", top1, synset[top1])
# evaluate
ftimer = m.module.time_evaluator("run", dev, number=3, repeat=10)
prof_res = np.array(ftimer().results) * 1000
print("%-19s (%s)" % ("%.2f ms" % np.mean(prof_res), "%.2f ms" % np.std(prof_res)))
def annotate(func, compiler):
"""
An annotator for Core ML.
"""
# Bind free variables to the constant values.
bind_dict = {}
for arg in func.params:
name = arg.name_hint
if name in params:
bind_dict[arg] = relay.const(params[name])
func = relay.bind(func, bind_dict)
# Annotate the entire graph for Core ML
mod = tvm.IRModule()
mod["main"] = func
seq = tvm.transform.Sequential(
[
transform.SimplifyInference(),
transform.FoldConstant(),
transform.FoldScaleAxis(),
transform.AnnotateTarget(compiler),
transform.MergeCompilerRegions(),
transform.PartitionGraph(),
]
)
with relay.build_config(opt_level=3):
mod = seq(mod)
return mod
# CPU
run(model, target_host)
# Metal
run(model, "metal")
# CoreML
run(annotate(model, "coremlcompiler"), target_host)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Demo app demonstrates how ios_rpc works.")
parser.add_argument("--host", required=True, type=str, help="Adress of rpc server")
parser.add_argument("--port", type=int, default=9090, help="rpc port (default: 9090)")
parser.add_argument("--key", type=str, default="iphone", help="device key (default: iphone)")
parser.add_argument(
"--mode",
type=str,
default="tracker",
help="type of RPC connection (default: tracker), possible values: {}".format(
", ".join(MODES.keys())
),
)
args = parser.parse_args()
assert args.mode in MODES.keys()
test_mobilenet(args.host, args.port, args.key, args.mode)
| 6,368 | 33.241935 | 97 | py |
tvm | tvm-main/apps/cpp_rtvm/scripts/download_models.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
tmp_dir = "./model_data/"
dload_models = []
# Keras : Resnet50
try:
from tensorflow.keras.applications.resnet50 import ResNet50
model_file_name = "{}/{}".format(tmp_dir + "keras-resnet50", "resnet50.h5")
model = ResNet50(include_top=True, weights="imagenet", input_shape=(224, 224, 3), classes=1000)
model.save(model_file_name)
dload_models.append(model_file_name)
except ImportError:
LOG.warning("Keras is not installed, skipping Keras models")
print("Models:", dload_models)
| 1,339 | 35.216216 | 99 | py |
tvm | tvm-main/apps/android_camera/models/prepare_model.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import pathlib
from pathlib import Path
from typing import Union
import os
from os import environ
import json
import tvm
import tvm.relay as relay
from tvm.contrib import utils, ndk, graph_executor as runtime
from tvm.contrib.download import download_testdata, download
target = "llvm -mtriple=arm64-linux-android"
target_host = None
def del_dir(target: Union[Path, str], only_if_empty: bool = False):
target = Path(target).expanduser()
assert target.is_dir()
for p in sorted(target.glob("**/*"), reverse=True):
if not p.exists():
continue
p.chmod(0o666)
if p.is_dir():
p.rmdir()
else:
if only_if_empty:
raise RuntimeError(f"{p.parent} is not empty!")
p.unlink()
target.rmdir()
def get_model(model_name, batch_size=1):
if model_name == "resnet18_v1":
import mxnet as mx
from mxnet import gluon
from mxnet.gluon.model_zoo import vision
gluon_model = vision.get_model(model_name, pretrained=True)
img_size = 224
data_shape = (batch_size, 3, img_size, img_size)
net, params = relay.frontend.from_mxnet(gluon_model, {"data": data_shape})
return (net, params)
elif model_name == "mobilenet_v2":
import keras
from keras.applications.mobilenet_v2 import MobileNetV2
keras.backend.clear_session() # Destroys the current TF graph and creates a new one.
weights_url = "".join(
[
"https://github.com/JonathanCMitchell/",
"mobilenet_v2_keras/releases/download/v1.1/",
"mobilenet_v2_weights_tf_dim_ordering_tf_kernels_0.5_224.h5",
]
)
weights_file = "mobilenet_v2_weights.h5"
weights_path = download_testdata(weights_url, weights_file, module="keras")
keras_mobilenet_v2 = MobileNetV2(
alpha=0.5, include_top=True, weights=None, input_shape=(224, 224, 3), classes=1000
)
keras_mobilenet_v2.load_weights(weights_path)
img_size = 224
data_shape = (batch_size, 3, img_size, img_size)
mod, params = relay.frontend.from_keras(keras_mobilenet_v2, {"input_1": data_shape})
return (mod, params)
def main(model_str, output_path):
if output_path.exists():
del_dir(output_path)
output_path.mkdir()
output_path_str = os.fspath(output_path)
print(model_str)
print("getting model...")
net, params = get_model(model_str)
try:
os.mkdir(model_str)
except FileExistsError:
pass
print("building...")
with tvm.transform.PassContext(opt_level=3):
graph, lib, params = relay.build(net, tvm.target.Target(target, target_host), params=params)
print("dumping lib...")
lib.export_library(output_path_str + "/" + "deploy_lib_cpu.so", ndk.create_shared)
print("dumping graph...")
with open(output_path_str + "/" + "deploy_graph.json", "w") as f:
f.write(graph)
print("dumping params...")
with open(output_path_str + "/" + "deploy_param.params", "wb") as f:
f.write(tvm.runtime.save_param_dict(params))
print("dumping labels...")
synset_url = "".join(
[
"https://gist.githubusercontent.com/zhreshold/",
"4d0b62f3d01426887599d4f7ede23ee5/raw/",
"596b27d23537e5a1b5751d2b0481ef172f58b539/",
"imagenet1000_clsid_to_human.txt",
]
)
synset_path = output_path_str + "/image_net_labels"
download(synset_url, output_path_str + "/image_net_labels")
with open(synset_path) as fi:
synset = eval(fi.read())
with open(output_path_str + "/image_net_labels.json", "w") as fo:
json.dump(synset, fo, indent=4)
os.remove(synset_path)
if __name__ == "__main__":
if environ.get("TVM_NDK_CC") is None:
raise RuntimeError("Require environment variable TVM_NDK_CC")
models_path = Path().absolute().parent.joinpath("app/src/main/assets/models/")
if not models_path.exists():
models_path.mkdir(parents=True)
models = {
"mobilenet_v2": models_path.joinpath("mobilenet_v2"),
"resnet18_v1": models_path.joinpath("resnet18_v1"),
}
for model, output_path in models.items():
main(model, output_path)
| 5,118 | 35.564286 | 100 | py |
tvm | tvm-main/apps/pt_tvmdsoop/tests/test_torch_graph_module.py | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test script for torch module"""
import tempfile
import os
import logging
import torch
import numpy as np
import tvm
import tvm.testing
from tvm import te, relay
import tvm.contrib.torch
from tvm.contrib import graph_runtime
TVM_ASSETS = ["mod.so", "graph.json", "params"]
def test_use_pt_graph_module():
"""main test function"""
def build_export_graph(device):
"""relay build & export graph"""
x = relay.var("x", shape=(10, 5))
y = relay.var("y", shape=(1, 5))
z = relay.add(x, y)
z = relay.exp(z)
func = relay.Function([x, y], z)
x_data = np.random.rand(10, 5).astype("float32")
y_data = np.random.rand(1, 5).astype("float32")
params = {"y": y_data}
pt_device = torch.device(device)
if pt_device.type == "cuda":
target = "cuda"
ctx = tvm.cuda(pt_device.index)
else:
target = "llvm"
ctx = tvm.cpu(0)
graph, lib, params = relay.build(tvm.IRModule.from_expr(func), target=target, params=params)
mod = graph_runtime.create(graph, lib, device=ctx)
mod.set_input(**params)
mod.set_input(x=x_data)
mod.run()
res = mod.get_output(0).asnumpy()
ref_res = np.exp(y_data + x_data)
tvm.testing.assert_allclose(res, ref_res, atol=1e-5, rtol=1e-5)
# export to tempdir
export_dir = tempfile.mkdtemp("tvm_export")
lib.export_library(os.path.join(export_dir, TVM_ASSETS[0]))
with open(os.path.join(export_dir, TVM_ASSETS[1]), "w") as fout:
fout.write(graph)
with open(os.path.join(export_dir, TVM_ASSETS[2]), "wb") as fout:
fout.write(relay.save_param_dict(params))
return export_dir
def test_pt_run(device, trace=True, to_device=None):
"""test add lib with Pytorch wrapper"""
print("\n############## Test on device:", device, "#################")
export_dir = build_export_graph(device)
engine = tvm.contrib.torch.GraphModule(num_inputs=2, num_outputs=1).to(device)
x = np.random.rand(10, 5).astype("float32")
y = np.random.rand(1, 5).astype("float32")
expect = np.exp(y + x)
def get_inputs_by_device(device):
inps = [torch.Tensor(x), torch.Tensor(y)]
if device == "cpu":
return inps
else:
device_type, device_id = device.split(":")
assert device_type == "cuda"
return [inp.cuda(int(device_id)) for inp in inps]
assets = [os.path.join(export_dir, i) for i in TVM_ASSETS]
engine.init((x.shape, y.shape), *assets)
outputs = engine.forward(get_inputs_by_device(device))
tvm.testing.assert_allclose(outputs[0].cpu(), expect, atol=1e-5, rtol=1e-5)
if trace:
print("\n################ Test trace and load #################")
scripted = torch.jit.script(engine)
scripted_dir = tempfile.mkdtemp("scripted")
scripted_path = os.path.join(scripted_dir, "model.pt")
scripted.save(scripted_path)
loaded = torch.jit.load(scripted_path)
outputs = loaded.forward(get_inputs_by_device(device))
tvm.testing.assert_allclose(outputs[0].cpu(), expect, atol=1e-5, rtol=1e-5)
del scripted
del loaded
if to_device:
print(
"\n################ Test move from [{}] to [{}] #################".format(
device, to_device
)
)
engine = engine.to(to_device)
outputs = engine.forward(get_inputs_by_device(to_device))
tvm.testing.assert_allclose(outputs[0].cpu(), expect, atol=1e-5, rtol=1e-5)
del engine
test_pt_run(device="cuda:0", trace=True, to_device="cuda:1")
test_pt_run(device="cpu", trace=True)
if __name__ == "__main__":
test_use_pt_graph_module()
| 4,793 | 35.876923 | 100 | py |
tvm | tvm-main/apps/pt_tvmdsoop/tests/test_optimize_torch.py | # pylint: disable=missing-class-docstring
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test script for tvm torch module"""
import tempfile
import torch
from torch.utils import benchmark
from torchvision.models import resnet18
import tvm
import tvm.testing
from tvm.contrib.torch import optimize_torch
from tvm.meta_schedule import TuneConfig
def test_matmul_tuning_relay():
def matmul(x, w):
return torch.matmul(x, w)
x = torch.randn(15, 20)
w = torch.randn(20, 30)
example_inputs = (x, w)
rt_mod = optimize_torch(matmul, example_inputs)
torch_answer = torch.matmul(x, w).numpy()
tvm_answer = rt_mod(x, w).numpy()
tvm.testing.assert_allclose(torch_answer, tvm_answer, atol=1e-5, rtol=1e-5)
class InnerModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(1, 20, 5)
def forward(self, x):
return torch.nn.functional.relu(self.conv(x))
class SimpleModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(20, 20, 5)
self.relu = InnerModel()
def forward(self, x):
x = self.relu(x)
return torch.nn.functional.relu(self.conv(x))
def test_nested_module():
simple_module = SimpleModel()
example_input = torch.randn(20, 1, 10, 10)
optimized_module = optimize_torch(simple_module, example_input)
ret1 = simple_module(example_input).detach().numpy()
ret2 = optimized_module(example_input).detach().numpy()
tvm.testing.assert_allclose(ret1, ret2, atol=1e-5, rtol=1e-5)
def test_save_load_function():
def foo(x):
return 2 * x + 1
example_input = torch.rand(3)
opt_foo = optimize_torch(foo, example_input)
ret1 = opt_foo(example_input)
with tempfile.NamedTemporaryFile(suffix=".pt") as tmp:
torch.save(opt_foo, tmp.name)
loaded_mod = torch.load(tmp.name)
ret2 = loaded_mod(example_input)
tvm.testing.assert_allclose(ret1.numpy(), ret2.numpy(), atol=1e-5, rtol=1e-5)
class MyResNet18(torch.nn.Module):
def __init__(self, config, target=None):
super(MyResNet18, self).__init__()
self.means = torch.nn.Parameter(
torch.tensor([103.939, 116.779, 123.68]).resize_(1, 3, 1, 1)
).cuda()
self.resnet = optimize_torch(resnet18(), [torch.rand(1, 3, 224, 224)], config, target)
def forward(self, input):
return self.resnet(input - self.means)
class JitModule(torch.nn.Module):
def __init__(self):
super(JitModule, self).__init__()
self.means = torch.nn.Parameter(
torch.tensor([103.939, 116.779, 123.68]).resize_(1, 3, 1, 1)
).cuda()
self.resnet = torch.jit.optimize_for_inference(torch.jit.script(resnet18().cuda().eval()))
def forward(self, input):
return self.resnet(input - self.means)
# default config for testing
config = TuneConfig(
strategy="evolutionary",
num_trials_per_iter=4,
max_trials_per_task=8,
max_trials_global=16,
)
if torch.cuda.is_available():
target_cuda = "nvidia/geforce-rtx-3070"
meta_module_resnet18 = MyResNet18(config, target_cuda)
jit_module_resnet18 = JitModule()
def compare_optimize_resnet18_to_torchscript():
results = []
for i in range(20):
test_input = torch.rand(1, 3, 224, 224).half().cuda()
sub_label = f"[test {i}]"
results.append(
benchmark.Timer(
stmt="meta_module_resnet18(test_input)",
setup="from __main__ import meta_module_resnet18",
globals={"test_input": test_input},
sub_label=sub_label,
description="tuning by meta",
).blocked_autorange()
)
results.append(
benchmark.Timer(
stmt="jit_module_resnet18(test_input)",
setup="from __main__ import jit_module_resnet18",
globals={"test_input": test_input},
sub_label=sub_label,
description="tuning by jit",
).blocked_autorange()
)
compare = benchmark.Compare(results)
compare.print()
if __name__ == "__main__":
test_matmul_tuning_relay()
test_nested_module()
test_save_load_function()
if torch.cuda.is_available():
compare_optimize_resnet18_to_torchscript()
| 5,138 | 30.722222 | 98 | py |
tvm | tvm-main/apps/pt_tvmdsoop/tests/test_as_torch.py | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test script for tvm torch module"""
import tempfile
import numpy as np
import torch
import torch.nn
import tvm
from tvm.target.target import Target
import tvm.testing
from tvm.contrib.torch import as_torch
from tvm.script import tir as T
@as_torch
def matmul(M: int, N: int, K: int, dtype: str):
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [M, K], dtype=dtype)
B = T.match_buffer(b, [N, K], dtype=dtype)
C = T.match_buffer(c, [M, N], dtype=dtype)
for i, j, k in T.grid(M, N, K):
with T.block():
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
return main
@as_torch
@tvm.script.ir_module
class ModuleGPU:
@T.prim_func
def main(A: T.Buffer(8, "float32"), B: T.Buffer(8, "float32")) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
for i_0 in T.thread_binding(2, thread="blockIdx.x"):
for i_2 in T.thread_binding(2, thread="threadIdx.x"):
for i_1 in T.serial(2):
with T.block("B"):
vi = T.axis.spatial(8, i_0 * 4 + i_1 * 2 + i_2)
T.reads(A[vi])
T.writes(B[vi])
B[vi] = A[vi] + T.float32(1)
@as_torch
@T.prim_func
def func_with_part_access_region(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
with T.block():
for i, j in T.grid(128, 128):
with T.block("s1"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi, vj])
B[vi, vj] = A[vi, vj] + T.float32(1)
for i, j in T.grid(128, 128):
with T.block("s2"):
vi, vj = T.axis.remap("SS", [i, j])
T.writes(C[vi, vj])
C[vi, vj] = B[vi, vj] + T.float32(1)
@as_torch
@tvm.script.ir_module
class MyModule:
@T.prim_func
def main(a: T.handle, b: T.handle):
# We exchange data between function by handles, which are similar to pointer.
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# Create buffer from handles.
A = T.match_buffer(a, (8,), dtype="float32")
B = T.match_buffer(b, (8,), dtype="float32")
for i in range(8):
# A block is an abstraction for computation.
with T.block("B"):
# Define a spatial block iterator and bind it to value i.
vi = T.axis.spatial(8, i)
B[vi] = A[vi] + 1.0
@as_torch
@T.prim_func
def loop_split(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
for i, ko in T.grid(128, 4):
for ki in T.thread_binding(0, 32, thread="threadIdx.x"):
with T.block("B"):
vi = T.axis.S(128, i)
vk = T.axis.R(128, ko * 32 + ki)
T.reads([B[vi], A[vi, vk]])
T.writes([B[vi]])
with T.init():
B[vi] = T.float32(0)
B[vi] = B[vi] + A[vi, vk]
@as_torch
def elementwise_with_root(M: int, N: int, dtype: str):
@T.prim_func
def f(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [M, N])
B = T.match_buffer(b, [M, N])
C = T.match_buffer(c, [M, N])
with T.block():
for i, j in T.grid(M, N):
with T.block("s1"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] + T.float32(1)
for i, j in T.grid(M, N):
with T.block("s2"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + T.float32(1)
return f
class MinuesOnes(torch.nn.Module):
def __init__(self):
super(MinuesOnes, self).__init__()
self.engine = MyModule
def forward(self, *input):
self.engine.forward(*input)
return input[-1] - 1
def test_tvmscript_torch_matmul():
s1 = np.random.rand(128, 128).astype("float32")
s2 = np.random.rand(128, 128).astype("float32")
s3 = np.random.rand(128, 128).astype("float32")
q1 = torch.from_numpy(s1)
q2 = torch.from_numpy(s2)
q3 = torch.from_numpy(s3)
numpy_result = np.matmul(s1, np.transpose(s2))
nn_module = matmul(128, 128, 128, "float32")
nn_module(q1, q2, q3)
tvm.testing.assert_allclose(q3.numpy(), numpy_result, atol=1e-5, rtol=1e-5)
def test_tvmscript_torch_decorator():
q1 = torch.arange(8).type(torch.float32)
q2 = torch.zeros((8,), dtype=torch.float32)
MyModule(q1, q2)
tvm.testing.assert_allclose(q2.numpy(), (q1 + 1).numpy(), atol=1e-5, rtol=1e-5)
def test_tvmscript_torch_gpu():
cuda0 = torch.device("cuda:0")
q1 = torch.arange(8, device=cuda0).type(torch.float32)
q2 = torch.zeros((8,), dtype=torch.float32, device=cuda0)
with tempfile.NamedTemporaryFile(suffix=".pt") as tmp:
torch.save(ModuleGPU, tmp.name)
loaded_mod = torch.load(tmp.name)
loaded_mod(q1, q2)
tvm.testing.assert_allclose(q2.cpu().numpy(), (q1 + 1).cpu().numpy(), atol=1e-5, rtol=1e-5)
def test_torch_with_tvmscript():
ref_result = np.arange(8).astype("float32")
q1 = torch.arange(8).type(torch.float32)
q2 = torch.zeros((8,), dtype=torch.float32)
nn_module = MinuesOnes()
ret = nn_module.forward(q1, q2)
tvm.testing.assert_allclose(ret.numpy(), ref_result, atol=1e-5, rtol=1e-5)
def test_tvmscript_torch_func_with_part_access_region():
a1 = torch.rand(128, 128)
a2 = torch.zeros(128, 128)
a3 = torch.zeros(128, 128)
result = a1 + 2
func_with_part_access_region.tune()
func_with_part_access_region(a1, a2, a3)
tvm.testing.assert_allclose(a3.numpy(), result.numpy(), atol=1e-5, rtol=1e-5)
def test_tvmscript_torch_loop_split():
x = torch.rand(128, 128).cuda()
y = torch.zeros(128).cuda()
result = torch.sum(x.cpu(), dim=1).numpy()
loop_split.tune(
"nvidia/geforce-rtx-3070",
max_trials_global=128,
strategy="replay-trace",
)
loop_split(x, y)
tvm.testing.assert_allclose(y.cpu().numpy(), result, atol=1e-5, rtol=1e-5)
def test_tvmscript_torch_elementwise_with_root():
a1 = torch.rand(128, 128)
a2 = torch.zeros(128, 128)
a3 = torch.zeros(128, 128)
result = a1 + 2
func = elementwise_with_root(128, 128, "float32")
func.tune(
max_trials_global=128,
strategy="replay-trace",
)
func(a1, a2, a3)
tvm.testing.assert_allclose(a3.numpy(), result.numpy(), atol=1e-5, rtol=1e-5)
if __name__ == "__main__":
test_tvmscript_torch_matmul()
test_tvmscript_torch_decorator()
test_tvmscript_torch_gpu()
test_torch_with_tvmscript()
test_tvmscript_torch_func_with_part_access_region()
test_tvmscript_torch_loop_split()
test_tvmscript_torch_elementwise_with_root()
| 8,027 | 29.758621 | 95 | py |
tvm | tvm-main/apps/pt_tvmdsoop/tests/test_torch_compile_cpu.py | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test script for torch module"""
import torch
import time
import tvm
from tvm.contrib.torch import compile
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x: torch.Tensor):
return x * x
model = Model()
x = torch.rand([1, 3, 224, 224])
model_jit = torch.jit.trace(model, x)
print(model_jit.graph)
print("run torchscript...")
for i in range(20):
t = time.time()
model_jit(x)
print(time.time() - t)
option = {
"input_infos": [
("x", (1, 3, 224, 224)),
],
"default_dtype": "float16",
"export_dir": "pytorch_compiled",
"num_outputs": 1,
"tuning_n_trials": 1, # set zero to skip tuning
"tuning_log_file": "tuning.log",
"target": "llvm",
"device": tvm.cpu(),
}
pytorch_tvm_module = compile(model_jit, option)
torch.jit.script(pytorch_tvm_module).save("model_tvm.pt")
print("Run PyTorch...")
for i in range(20):
t = time.time()
outputs = pytorch_tvm_module.forward([x.cpu()])
print(1000 * (time.time() - t))
print(outputs[0].shape)
| 1,877 | 26.217391 | 62 | py |
tvm | tvm-main/apps/pt_tvmdsoop/tests/test_torch_vm_module.py | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test script for torch vm module"""
import tempfile
import os
import logging
import torch
import numpy as np
import tvm
from tvm.contrib.torch.pytorch_tvm import TVM_ASSETS
import tvm.testing
from tvm import te, relay
import tvm.contrib.torch
from tvm.contrib import graph_runtime
TVM_ASSETS = ["mod.so", "code.ro"]
def test_use_pt_vm_module():
"""main test function"""
def build_export_vm(device):
"""relay build & export graph"""
x = relay.var("x", shape=(10, 5))
y = relay.var("y", shape=(1, 5))
z = relay.add(x, y)
z = relay.exp(z)
func = relay.Function([x, y], z)
x_data = np.random.rand(10, 5).astype("float32")
y_data = np.random.rand(1, 5).astype("float32")
pt_device = torch.device(device)
if pt_device.type == "cuda":
target = "cuda"
ctx = tvm.cuda(pt_device.index)
else:
target = "llvm"
ctx = tvm.cpu(0)
exe = relay.vm.compile(tvm.IRModule.from_expr(func), target=target, params={})
code, lib = exe.save()
export_dir = tempfile.mkdtemp("tvm_export")
# export to tempdir
lib.export_library(os.path.join(export_dir, TVM_ASSETS[0]))
with open(os.path.join(export_dir, TVM_ASSETS[1]), "wb") as fout:
fout.write(code)
vm = tvm.runtime.vm.VirtualMachine(exe, ctx)
res = vm.run(x_data, y_data)
ref_res = np.exp(y_data + x_data)
tvm.testing.assert_allclose(res.numpy(), ref_res, atol=1e-5, rtol=1e-5)
return export_dir
def test_pt_run(device, trace=True, to_device=None, inp_on_cuda=False):
"""test add lib with Pytorch wrapper"""
print("\n############## Test on device:", device, "#################")
export_dir = build_export_vm(device)
engine = tvm.contrib.torch.VMModule(num_inputs=2, num_outputs=1).to(device)
x = np.random.rand(10, 5).astype("float32")
y = np.random.rand(1, 5).astype("float32")
expect = np.exp(y + x)
def get_inputs_by_device(device):
inps = [torch.Tensor(x), torch.Tensor(y)]
if device == "cpu":
return inps
else:
device_type, device_id = device.split(":")
assert device_type == "cuda"
return [inp.cuda(int(device_id)) for inp in inps]
assets = [os.path.join(export_dir, i) for i in TVM_ASSETS]
engine.init((x.shape, y.shape), *assets)
outputs = engine.forward(get_inputs_by_device(device))
tvm.testing.assert_allclose(outputs[0].cpu(), expect, atol=1e-5, rtol=1e-5)
if trace:
print("\n################ Test trace and load #################")
scripted = torch.jit.script(engine)
scripted_dir = tempfile.mkdtemp("scripted")
scripted_path = os.path.join(scripted_dir, "model.pt")
scripted.save(scripted_path)
loaded = torch.jit.load(scripted_path)
outputs = loaded.forward(get_inputs_by_device(device))
tvm.testing.assert_allclose(outputs[0].cpu(), expect, atol=1e-5, rtol=1e-5)
del scripted
del loaded
if to_device:
print(
"\n################ Test move from [{}] to [{}] #################".format(
device, to_device
)
)
engine = engine.to(to_device)
outputs = engine.forward(get_inputs_by_device(to_device))
tvm.testing.assert_allclose(outputs[0].cpu(), expect, atol=1e-5, rtol=1e-5)
del engine
test_pt_run(device="cuda:0", trace=True, to_device="cuda:1", inp_on_cuda=True)
test_pt_run(device="cpu", trace=True, inp_on_cuda=False)
if __name__ == "__main__":
test_use_pt_vm_module()
| 4,647 | 36.788618 | 90 | py |
tvm | tvm-main/apps/pt_tvmdsoop/tests/test_boolean_tensor.py | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test script for boolean tensor support"""
import tempfile
import torch
import tvm
import tvm.testing
from tvm.contrib.torch import as_torch, optimize_torch
from tvm.script import tir as T
def negate(x):
return x.logical_not()
def sum_up_tensor(x):
return x.size(dim=0) - torch.sum(x.int())
def tensor_boolean_operation(x):
arr1 = (x + 0.3).floor().bool()
arr2 = (~((x + 0.7).int().bool())).bool()
ret = ((arr1 & arr2).byte() + 0.5).half()
return ~(ret.bool())
def test_bool_tensor_negate():
input = torch.ones(1, dtype=torch.bool)
optimized_negate = optimize_torch(
negate,
input,
)
with tempfile.NamedTemporaryFile(suffix=".pt") as tmp:
torch.save(optimized_negate, tmp.name)
loaded_mod = torch.load(tmp.name)
output = loaded_mod(negate(input))
tvm.testing.assert_allclose(input.numpy(), output.numpy(), atol=1e-5, rtol=1e-5)
def test_sum_up_tensor():
x = torch.randint(0, 2, (16,))
y = x.bool()
optimized_func = optimize_torch(
sum_up_tensor,
(y,),
)
ret1 = (x[x == 0]).size(dim=0)
ret2 = optimized_func(y).numpy()
tvm.testing.assert_allclose(ret1, ret2, atol=1e-5, rtol=1e-5)
def test_tensor_boolean_operation():
input = torch.rand(200)
model = optimize_torch(
tensor_boolean_operation,
input,
)
ret1 = tensor_boolean_operation(input)
ret2 = model(input)
tvm.testing.assert_allclose(ret1, ret2, atol=1e-5, rtol=1e-5)
@as_torch
@T.prim_func
def negate_tvmscript(
X: T.Buffer((8, 8), "bool"),
Y: T.Buffer((8, 8), "float32"),
Z: T.Buffer((8, 8), "bool"),
U: T.Buffer((8, 8), "float32"),
) -> None:
for i, j in T.grid(8, 8):
with T.block():
if Y[i, j] > 0.0:
Z[i, j] = X[i, j]
U[i, j] = Y[i, j]
else:
Z[i, j] = not X[i, j]
U[i, j] = 0.0 - Y[i, j]
def negate_vanila(x, y):
z = torch.zeros(8, 8).bool()
for i in range(8):
for j in range(8):
if y[i, j] > 0:
z[i, j] = x[i, j]
else:
z[i, j] = ~x[i, j]
return z
def test_tvmscript_torch_decorator():
q1 = (torch.rand(8, 8) + 0.5).int().bool()
q2 = torch.rand(8, 8) - 0.5
q3 = torch.zeros(8, 8).bool()
q4 = torch.zeros(8, 8)
std1 = negate_vanila(q1, q2)
std2 = torch.abs(q2)
negate_tvmscript(q1, q2, q3, q4)
tvm.testing.assert_allclose(std1.numpy(), q3.numpy(), atol=1e-5, rtol=1e-5)
tvm.testing.assert_allclose(std2.numpy(), q4.numpy(), atol=1e-5, rtol=1e-5)
if __name__ == "__main__":
test_tvmscript_torch_decorator()
test_bool_tensor_negate()
test_sum_up_tensor()
test_tensor_boolean_operation()
| 3,590 | 26.623077 | 84 | py |
tvm | tvm-main/apps/pt_tvmdsoop/tests/test_torch_script.py | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test script for torch module"""
import os
import torch
import time
import numpy as np
import tvm
import tvm.testing
import tempfile
from tvm.contrib.torch import PyTorchTVMModule, compile
class Model(torch.nn.Module):
def forward(self, x, y):
return torch.matmul(x, y.softmax(1))
model = Model()
model.cuda().half()
x = torch.rand([1280, 2464, 4]).cuda().half()
y = torch.rand([1280, 4, 1]).cuda().half()
for i in range(20):
t = time.time()
o = model(x, y)
torch.cuda.synchronize()
print(1000 * (time.time() - t))
print(o.shape)
model_jit = torch.jit.script(model)
print(model_jit.graph)
input_shapes = [("x", list(x.shape)), ("y", list(y.shape))]
dtype = "float16"
export_dir = tempfile.mkdtemp("pytorch_compiled")
print("tmp export_dir:", export_dir)
mod = PyTorchTVMModule()
print("Converting...")
mod.from_pytorch(model_jit, input_shapes, dtype)
log_file = os.path.join(export_dir, "tuning.log")
if not os.path.exists(log_file):
print("Tuning...")
mod.tune_tvm(log_file=log_file, n_trial=20)
print("Building...")
tvm_mod = mod.build_tvm(export_dir)
pytorch_mod = mod.build_pytorch_module(num_inputs=2, num_outputs=1)
## Or you can load from a prebuilt tvm module
# mod = PyTorchTVMModule()
# tvm_mod = mod.load_tvm(export_dir)
# pytorch_mod = mod.build_pytorch_module(num_inputs=2, num_outputs=1, input_infos=input_shapes)
print("Run TVM...")
tvm_x = tvm.nd.array(x.cpu().numpy().astype(dtype), device=tvm.gpu(0))
tvm_y = tvm.nd.array(y.cpu().numpy().astype(dtype), device=tvm.gpu(0))
for i in range(20):
t = time.time()
tvm_mod.run(x=tvm_x, y=tvm_y)
print(1000 * (time.time() - t))
tvm_output = tvm_mod.get_output(0)
print(tvm_output.shape)
print("Run PyTorch...")
for i in range(20):
t = time.time()
outputs = pytorch_mod.forward([x, y])
torch.cuda.synchronize()
print(1000 * (time.time() - t))
print(outputs[0].shape)
class EnsembleModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer = torch.jit.script(pytorch_mod)
def forward(self, x, y, z) -> torch.Tensor:
if x > 1:
out = self.layer(y, z)[0]
else:
out = torch.ones([1280, 2464, 1])
return out
print("Exporting...")
scripted = torch.jit.script(EnsembleModel())
print(scripted.graph)
scripted_path = os.path.join(export_dir, "model_tvm.pt")
scripted.save(scripted_path)
# print(o == outputs[0])
# print(o - outputs[0])
| 3,270 | 26.957265 | 95 | py |
tvm | tvm-main/apps/pt_tvmdsoop/tests/test_trace_tvm_module.py | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test script for torch module"""
import torch
import time
import tvm
from tvm.contrib.torch import compile, TraceTvmModule, pytorch_tvm
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x: torch.Tensor, y: torch.Tensor):
return x * y
model = Model()
x = torch.rand([1, 2, 3])
y = torch.rand([1, 2, 3])
model_jit = torch.jit.script(model)
option = {
"input_infos": [("x", (1, 2, 3)), ("y", (1, 2, 3))],
"default_dtype": "float32",
"export_dir": "pytorch_compiled",
"num_outputs": 1,
"tuning_n_trials": 0, # set zero to skip tuning
"tuning_log_file": "tuning.log",
"target": "llvm",
"device": tvm.cpu(),
}
# use TraceTvmModule to convert List[Tensor] input/output
# to tuple of Tensors
pytorch_tvm_module = compile(model_jit, option)
scripted = torch.jit.script(pytorch_tvm_module)
traced = torch.jit.trace(TraceTvmModule(scripted), (x, y))
res_traced = traced.forward(x, y)
res_expected = pytorch_tvm_module.forward([x, y])[0]
tvm.testing.assert_allclose(res_traced, res_expected)
| 1,892 | 31.084746 | 66 | py |
tvm | tvm-main/apps/pt_tvmdsoop/tests/test_torch_compile_gpu.py | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test script for torch module"""
import torch
import time
from torchvision.models import resnet50
import tvm
from tvm.contrib.torch import compile
model = resnet50().half().cuda()
x = torch.rand([1, 3, 224, 224]).half().cuda()
model_jit = torch.jit.trace(model, x)
print(model_jit.graph)
print("run torchscript...")
for i in range(20):
t = time.time()
model_jit(x)
torch.cuda.synchronize()
print(time.time() - t)
option = {
"input_infos": [
("x", (1, 3, 224, 224)),
],
"default_dtype": "float16",
"export_dir": "pytorch_compiled",
"num_outputs": 1,
"tuning_n_trials": 1, # set zero to skip tuning
"tuning_log_file": "tuning.log",
"target": "cuda",
"device": tvm.cuda(0),
}
pytorch_tvm_module = compile(model_jit, option)
torch.jit.script(pytorch_tvm_module).save("model_tvm.pt")
print("Run PyTorch...")
for i in range(20):
t = time.time()
outputs = pytorch_tvm_module.forward([x])
torch.cuda.synchronize()
print(1000 * (time.time() - t))
print(outputs[0].shape)
| 1,857 | 28.03125 | 62 | py |
tvm | tvm-main/apps/benchmark/util.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utility for benchmark"""
import sys
from tvm import relay
from tvm.relay import testing
def get_network(name, batch_size, dtype="float32"):
"""Get the symbol definition and random weight of a network
Parameters
----------
name: str
The name of the network, can be 'resnet-18', 'resnet-50', 'vgg-16', 'inception_v3', 'mobilenet', ...
batch_size: int
batch size
dtype: str
Data type
Returns
-------
net: tvm.IRModule
The relay function of network definition
params: dict
The random parameters for benchmark
input_shape: tuple
The shape of input tensor
output_shape: tuple
The shape of output tensor
"""
input_shape = (batch_size, 3, 224, 224)
output_shape = (batch_size, 1000)
if name == "mobilenet":
net, params = testing.mobilenet.get_workload(batch_size=batch_size, dtype=dtype)
elif name == "inception_v3":
input_shape = (batch_size, 3, 299, 299)
net, params = testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
elif "resnet" in name:
n_layer = int(name.split("-")[1])
net, params = testing.resnet.get_workload(
num_layers=n_layer, batch_size=batch_size, dtype=dtype
)
elif "vgg" in name:
n_layer = int(name.split("-")[1])
net, params = testing.vgg.get_workload(
num_layers=n_layer, batch_size=batch_size, dtype=dtype
)
elif "densenet" in name:
n_layer = int(name.split("-")[1])
net, params = testing.densenet.get_workload(
densenet_size=n_layer, batch_size=batch_size, dtype=dtype
)
elif "squeezenet" in name:
version = name.split("_v")[1]
net, params = testing.squeezenet.get_workload(
batch_size=batch_size, version=version, dtype=dtype
)
elif name == "mxnet":
# an example for mxnet model
from mxnet.gluon.model_zoo.vision import get_model
block = get_model("resnet18_v1", pretrained=True)
net, params = relay.frontend.from_mxnet(block, shape={"data": input_shape}, dtype=dtype)
net = net["main"]
net = relay.Function(
net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs
)
net = tvm.IRModule.from_expr(net)
else:
raise ValueError("Unsupported network: " + name)
return net, params, input_shape, output_shape
def print_progress(msg):
"""print progress message
Parameters
----------
msg: str
The message to print
"""
sys.stdout.write(msg + "\r")
sys.stdout.flush()
| 3,448 | 32.813725 | 108 | py |
tvm | tvm-main/apps/benchmark/adreno/adreno_gpu_bench_texture.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Benchmark script for various models on Adreno GPU.
"""
import argparse
import numpy as np
import os
import sys
import tvm
from tvm import te
from tvm.relay import testing
from tvm.contrib.utils import tempdir
import tvm.contrib.graph_executor as runtime
from tvm import relay
from tvm import autotvm
from tvm.contrib import utils, ndk
def get_network(name, batch_size, dtype="float32"):
"""Get the symbol definition and random weight of a network
Parameters
----------
name: str
The name of the network, can be 'resnet-18', 'resnet-50', 'vgg-16', 'inception_v3', 'mobilenet', ...
batch_size: int
batch size
dtype: str
Data type
Returns
-------
net: tvm.IRModule
The relay function of network definition
params: dict
The random parameters for benchmark
input_shape: tuple
The shape of input tensor
output_shape: tuple
The shape of output tensor
"""
input_shape = (batch_size, 3, 224, 224)
output_shape = (batch_size, 1000)
if name == "mobilenet":
net, params = testing.mobilenet.get_workload(batch_size=batch_size, dtype=dtype)
elif name == "inception_v3":
input_shape = (batch_size, 3, 299, 299)
net, params = testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
elif "resnet" in name:
n_layer = int(name.split("-")[1])
net, params = testing.resnet.get_workload(
num_layers=n_layer, batch_size=batch_size, dtype=dtype
)
elif "vgg" in name:
n_layer = int(name.split("-")[1])
net, params = testing.vgg.get_workload(
num_layers=n_layer, batch_size=batch_size, dtype=dtype
)
elif "densenet" in name:
n_layer = int(name.split("-")[1])
net, params = testing.densenet.get_workload(
densenet_size=n_layer, batch_size=batch_size, dtype=dtype
)
elif "squeezenet" in name:
version = name.split("_v")[1]
net, params = testing.squeezenet.get_workload(
batch_size=batch_size, version=version, dtype=dtype
)
elif name == "mxnet":
# an example for mxnet model
from mxnet.gluon.model_zoo.vision import get_model
block = get_model("resnet18_v1", pretrained=True)
net, params = relay.frontend.from_mxnet(block, shape={"data": input_shape}, dtype=dtype)
net = net["main"]
net = relay.Function(
net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs
)
net = tvm.IRModule.from_expr(net)
else:
raise ValueError("Unsupported network: " + name)
return net, params, input_shape, output_shape
def print_progress(msg):
"""print progress message
Parameters
----------
msg: str
The message to print
"""
sys.stdout.write(msg + "\r")
sys.stdout.flush()
def tune_tasks(
tasks,
measure_option,
tuner="xgb",
n_trial=1024,
early_stopping=None,
log_filename="tuning.log",
):
from tvm.autotvm.tuner import XGBTuner
tmp_log_file = log_filename + ".tmp"
for i, tsk in enumerate(reversed(tasks)):
print("Task: ", tsk)
prefix = "[Task %2d/%2d] " % (i + 1, len(tasks))
# create tuner
if tuner == "xgb":
tuner_obj = XGBTuner(tsk, loss_type="reg")
elif tuner == "xgb_knob":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="knob")
elif tuner == "xgb_itervar":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="itervar")
elif tuner == "xgb_curve":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="curve")
elif tuner == "xgb_rank":
tuner_obj = XGBTuner(tsk, loss_type="rank")
elif tuner == "xgb_rank_knob":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="knob")
elif tuner == "xgb_rank_itervar":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="itervar")
elif tuner == "xgb_rank_curve":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="curve")
elif tuner == "xgb_rank_binary":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary")
elif tuner == "xgb_rank_binary_knob":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="knob")
elif tuner == "xgb_rank_binary_itervar":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="itervar")
elif tuner == "xgb_rank_binary_curve":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="curve")
elif tuner == "ga":
tuner_obj = GATuner(tsk, pop_size=50)
elif tuner == "random":
tuner_obj = RandomTuner(tsk)
elif tuner == "gridsearch":
tuner_obj = GridSearchTuner(tsk)
else:
raise ValueError("Invalid tuner: " + tuner)
tsk_trial = min(n_trial, len(tsk.config_space))
tuner_obj.tune(
n_trial=tsk_trial,
early_stopping=early_stopping,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(tsk_trial, prefix=prefix),
autotvm.callback.log_to_file(tmp_log_file),
],
)
autotvm.record.pick_best(tmp_log_file, log_filename)
def evaluate_network(network, target, target_host, dtype, repeat):
print_progress(network)
net, params, input_shape, output_shape = get_network(network, batch_size=1, dtype=dtype)
# Auto Tuning
tune_log = "adreno-" + network + "-" + dtype + ".log"
tuning_options = {
"log_filename": tune_log,
"early_stopping": None,
"measure_option": autotvm.measure_option(
builder=autotvm.LocalBuilder(build_func=ndk.create_shared, timeout=15),
runner=autotvm.RPCRunner(
args.rpc_key,
host=args.host,
port=args.port,
number=3,
timeout=600,
),
),
}
if args.tune:
tasks = autotvm.task.extract_from_program(
net, target=target, target_host=target_host, params=params
)
tune_tasks(tasks, **tuning_options)
print_progress("%-20s building..." % network)
# Build the tuning log
if os.path.exists(tune_log):
with autotvm.apply_history_best(tune_log):
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(
net, target=tvm.target.Target(target, host=target_host), params=params
)
else:
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(
net, target=tvm.target.Target(target, host=target_host), params=params
)
tmp = tempdir()
filename = "%s.so" % network
lib.export_library(tmp.relpath(filename), ndk.create_shared)
# upload library and params
print_progress("%-20s uploading..." % network)
# connect to remote device
tracker = tvm.rpc.connect_tracker(args.host, args.port)
remote = tracker.request(args.rpc_key)
dev = remote.device(str(target), 0)
remote.upload(tmp.relpath(filename))
rlib = remote.load_module(filename)
module = runtime.GraphModule(rlib["default"](dev))
data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype))
module.set_input("data", data_tvm)
# evaluate
print_progress("%-20s evaluating..." % network)
ftimer = module.module.time_evaluator("run", dev, number=1, repeat=repeat)
prof_res = np.array(ftimer().results) * 1000 # multiply 1000 for converting to millisecond
print(
"%-20s %-19s (%s)"
% (network + "-" + dtype, "%.2f ms" % np.mean(prof_res), "%.2f ms" % np.std(prof_res))
)
return (np.mean(prof_res), np.std(prof_res))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--network",
type=str,
choices=[
"resnet-18",
"resnet-34",
"resnet-50",
"vgg-16",
"vgg-19",
"densenet-121",
"inception_v3",
"mobilenet",
"squeezenet_v1.0",
"squeezenet_v1.1",
],
help="The name of neural network",
)
parser.add_argument("--host", type=str, default="127.0.0.1")
parser.add_argument("--port", type=int, default=9190)
parser.add_argument("--rpc-key", type=str, default="android")
parser.add_argument("--repeat", type=int, default=30)
parser.add_argument("--tune", type=bool, default=False)
args = parser.parse_args()
if args.network is None:
networks = [
"resnet-18",
"resnet-34",
"resnet-50",
"vgg-16",
"vgg-19",
"densenet-121",
"inception_v3",
"mobilenet",
"squeezenet_v1.0",
"squeezenet_v1.1",
]
else:
networks = [args.network]
target = "opencl -device=adreno"
target_host = "llvm -mtriple=arm64-linux-android"
print("--------------------------------------------------")
print("%-20s %-20s" % ("Network Name", "Mean Inference Time (std dev)"))
print("--------------------------------------------------")
results = {}
for network in networks:
ftime = evaluate_network(network, target, target_host, "float32", args.repeat)
results[network + "-float32"] = ftime
ftime = evaluate_network(network, target, target_host, "float16", args.repeat)
results[network + "-float16"] = ftime
print("----------------------------------------------------------------------")
print("%-30s %-30s" % ("Network Name", "Mean Inference Time (std dev)"))
print("----------------------------------------------------------------------")
for key, val in results.items():
print("%-30s %-30s (%s)" % (key, "%.2f ms" % val[0], "%.2f ms" % val[1]))
| 10,884 | 33.776358 | 108 | py |
tvm | tvm-main/apps/benchmark/adreno/adreno_gpu_bench_clml.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Benchmark script for various models on Adreno GPU.
"""
import argparse
import numpy as np
import os
import sys
import tvm
from tvm import te
from tvm.relay import testing
from tvm.contrib.utils import tempdir
from tvm.relay.op.contrib import clml
import tvm.contrib.graph_executor as runtime
from tvm import relay
from tvm import autotvm
from tvm.contrib import utils, ndk
def get_network(name, batch_size, dtype="float32"):
"""Get the symbol definition and random weight of a network
Parameters
----------
name: str
The name of the network, can be 'resnet-18', 'resnet-50', 'vgg-16', 'inception_v3', 'mobilenet', ...
batch_size: int
batch size
dtype: str
Data type
Returns
-------
net: tvm.IRModule
The relay function of network definition
params: dict
The random parameters for benchmark
input_shape: tuple
The shape of input tensor
output_shape: tuple
The shape of output tensor
"""
input_shape = (batch_size, 3, 224, 224)
output_shape = (batch_size, 1000)
if name == "mobilenet":
net, params = testing.mobilenet.get_workload(batch_size=batch_size, dtype=dtype)
elif name == "inception_v3":
input_shape = (batch_size, 3, 299, 299)
net, params = testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
elif "resnet" in name:
n_layer = int(name.split("-")[1])
net, params = testing.resnet.get_workload(
num_layers=n_layer, batch_size=batch_size, dtype=dtype
)
elif "vgg" in name:
n_layer = int(name.split("-")[1])
net, params = testing.vgg.get_workload(
num_layers=n_layer, batch_size=batch_size, dtype=dtype
)
elif "densenet" in name:
n_layer = int(name.split("-")[1])
net, params = testing.densenet.get_workload(
densenet_size=n_layer, batch_size=batch_size, dtype=dtype
)
elif "squeezenet" in name:
version = name.split("_v")[1]
net, params = testing.squeezenet.get_workload(
batch_size=batch_size, version=version, dtype=dtype
)
elif name == "mxnet":
# an example for mxnet model
from mxnet.gluon.model_zoo.vision import get_model
block = get_model("resnet18_v1", pretrained=True)
net, params = relay.frontend.from_mxnet(block, shape={"data": input_shape}, dtype=dtype)
net = net["main"]
net = relay.Function(
net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs
)
net = tvm.IRModule.from_expr(net)
else:
raise ValueError("Unsupported network: " + name)
return net, params, input_shape, output_shape
def print_progress(msg):
"""print progress message
Parameters
----------
msg: str
The message to print
"""
sys.stdout.write(msg + "\r")
sys.stdout.flush()
def tune_tasks(
tasks,
measure_option,
tuner="xgb",
n_trial=1024,
early_stopping=None,
log_filename="tuning.log",
):
from tvm.autotvm.tuner import XGBTuner
tmp_log_file = log_filename + ".tmp"
for i, tsk in enumerate(reversed(tasks)):
print("Task: ", tsk)
prefix = "[Task %2d/%2d] " % (i + 1, len(tasks))
# create tuner
if tuner == "xgb":
tuner_obj = XGBTuner(tsk, loss_type="reg")
elif tuner == "xgb_knob":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="knob")
elif tuner == "xgb_itervar":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="itervar")
elif tuner == "xgb_curve":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="curve")
elif tuner == "xgb_rank":
tuner_obj = XGBTuner(tsk, loss_type="rank")
elif tuner == "xgb_rank_knob":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="knob")
elif tuner == "xgb_rank_itervar":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="itervar")
elif tuner == "xgb_rank_curve":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="curve")
elif tuner == "xgb_rank_binary":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary")
elif tuner == "xgb_rank_binary_knob":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="knob")
elif tuner == "xgb_rank_binary_itervar":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="itervar")
elif tuner == "xgb_rank_binary_curve":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="curve")
elif tuner == "ga":
tuner_obj = GATuner(tsk, pop_size=50)
elif tuner == "random":
tuner_obj = RandomTuner(tsk)
elif tuner == "gridsearch":
tuner_obj = GridSearchTuner(tsk)
else:
raise ValueError("Invalid tuner: " + tuner)
tsk_trial = min(n_trial, len(tsk.config_space))
tuner_obj.tune(
n_trial=tsk_trial,
early_stopping=early_stopping,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(tsk_trial, prefix=prefix),
autotvm.callback.log_to_file(tmp_log_file),
],
)
autotvm.record.pick_best(tmp_log_file, log_filename)
def evaluate_network(network, target, target_host, dtype, repeat):
print_progress(network)
net, params, input_shape, output_shape = get_network(network, batch_size=1, dtype=dtype)
# Auto Tuning
tune_log = "adreno-" + network + "-" + dtype + ".log"
tuning_options = {
"log_filename": tune_log,
"early_stopping": None,
"measure_option": autotvm.measure_option(
builder=autotvm.LocalBuilder(build_func=ndk.create_shared, timeout=15),
runner=autotvm.RPCRunner(
args.rpc_key,
host=args.host,
port=args.port,
number=3,
timeout=600,
),
),
}
if args.tune:
tasks = autotvm.task.extract_from_program(
net, target=target, target_host=target_host, params=params
)
tune_tasks(tasks, **tuning_options)
print_progress("%-20s building..." % network)
# Build the tuning log
if os.path.exists(tune_log):
with autotvm.apply_history_best(tune_log):
with tvm.transform.PassContext(opt_level=3):
net = clml.partition_for_clml(net, params)
lib = relay.build(
net, target=tvm.target.Target(target, host=target_host), params=params
)
else:
with tvm.transform.PassContext(opt_level=3):
net = clml.partition_for_clml(net, params)
lib = relay.build(
net, target=tvm.target.Target(target, host=target_host), params=params
)
tmp = tempdir()
filename = "%s.so" % network
lib.export_library(tmp.relpath(filename), ndk.create_shared)
# upload library and params
print_progress("%-20s uploading..." % network)
# connect to remote device
tracker = tvm.rpc.connect_tracker(args.host, args.port)
remote = tracker.request(args.rpc_key)
dev = remote.device(str(target), 0)
remote.upload(tmp.relpath(filename))
rlib = remote.load_module(filename)
module = runtime.GraphModule(rlib["default"](dev))
data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype))
module.set_input("data", data_tvm)
# evaluate
print_progress("%-20s evaluating..." % network)
ftimer = module.module.time_evaluator("run", dev, number=1, repeat=repeat)
prof_res = np.array(ftimer().results) * 1000 # multiply 1000 for converting to millisecond
print(
"%-20s %-19s (%s)"
% (network + "-" + dtype, "%.2f ms" % np.mean(prof_res), "%.2f ms" % np.std(prof_res))
)
return (np.mean(prof_res), np.std(prof_res))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--network",
type=str,
choices=[
"resnet-18",
"resnet-34",
"resnet-50",
"vgg-16",
"vgg-19",
"densenet-121",
"inception_v3",
"mobilenet",
"squeezenet_v1.0",
"squeezenet_v1.1",
],
help="The name of neural network",
)
parser.add_argument("--host", type=str, default="127.0.0.1")
parser.add_argument("--port", type=int, default=9190)
parser.add_argument("--rpc-key", type=str, default="android")
parser.add_argument("--repeat", type=int, default=30)
parser.add_argument("--tune", type=bool, default=False)
args = parser.parse_args()
if args.network is None:
networks = [
"resnet-18",
"resnet-34",
"resnet-50",
# "vgg-16",
# "vgg-19",
"densenet-121",
"inception_v3",
"mobilenet",
"squeezenet_v1.0",
"squeezenet_v1.1",
]
else:
networks = [args.network]
target = "opencl"
target_host = "llvm -mtriple=arm64-linux-android"
print("--------------------------------------------------")
print("%-20s %-20s" % ("Network Name", "Mean Inference Time (std dev)"))
print("--------------------------------------------------")
results = {}
for network in networks:
ftime = evaluate_network(network, target, target_host, "float32", args.repeat)
results[network + "-float32"] = ftime
ftime = evaluate_network(network, target, target_host, "float16", args.repeat)
results[network + "-float16"] = ftime
print("----------------------------------------------------------------------")
print("%-30s %-30s" % ("Network Name", "Mean Inference Time (std dev)"))
print("----------------------------------------------------------------------")
for key, val in results.items():
print("%-30s %-30s (%s)" % (key, "%.2f ms" % val[0], "%.2f ms" % val[1]))
| 11,026 | 33.785489 | 108 | py |
tvm | tvm-main/gallery/tutorial/tvmc_command_line_driver.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Compiling and Optimizing a Model with TVMC
==========================================
**Authors**:
`Leandro Nunes <https://github.com/leandron>`_,
`Matthew Barrett <https://github.com/mbaret>`_,
`Chris Hoge <https://github.com/hogepodge>`_
In this section, we will work with TVMC, the TVM command line driver. TVMC is a
tool that exposes TVM features such as auto-tuning, compiling, profiling and
execution of models through a command line interface.
Upon completion of this section, we will have used TVMC to accomplish the
following tasks:
* Compile a pre-trained ResNet-50 v2 model for the TVM runtime.
* Run a real image through the compiled model, and interpret the output and
model performance.
* Tune the model on a CPU using TVM.
* Re-compile an optimized model using the tuning data collected by TVM.
* Run the image through the optimized model, and compare the output and model
performance.
The goal of this section is to give you an overview of TVM and TVMC's
capabilities, and set the stage for understanding how TVM works.
"""
################################################################################
# Using TVMC
# ----------
#
# TVMC is a Python application, part of the TVM Python package.
# When you install TVM using a Python package, you will get TVMC as
# as a command line application called ``tvmc``. The location of this command
# will vary depending on your platform and installation method.
#
# Alternatively, if you have TVM as a Python module on your
# ``$PYTHONPATH``, you can access the command line driver functionality
# via the executable python module, ``python -m tvm.driver.tvmc``.
#
# For simplicity, this tutorial will mention TVMC command line using
# ``tvmc <options>``, but the same results can be obtained with
# ``python -m tvm.driver.tvmc <options>``.
#
# You can check the help page using:
#
# .. code-block:: bash
#
# tvmc --help
#
# The main features of TVM available to ``tvmc`` are from subcommands
# ``compile``, and ``run``, and ``tune``. To read about specific options under
# a given subcommand, use ``tvmc <subcommand> --help``. We will cover each of
# these commands in this tutorial, but first we need to download a pre-trained
# model to work with.
#
################################################################################
# Obtaining the Model
# -------------------
#
# For this tutorial, we will be working with ResNet-50 v2. ResNet-50 is a
# convolutional neural network that is 50 layers deep and designed to classify
# images. The model we will be using has been pre-trained on more than a
# million images with 1000 different classifications. The network has an input
# image size of 224x224. If you are interested exploring more of how the
# ResNet-50 model is structured, we recommend downloading `Netron
# <https://netron.app>`_, a freely available ML model viewer.
#
# For this tutorial we will be using the model in ONNX format.
#
# .. code-block:: bash
#
# wget https://github.com/onnx/models/raw/b9a54e89508f101a1611cd64f4ef56b9cb62c7cf/vision/classification/resnet/model/resnet50-v2-7.onnx
#
################################################################################
# .. admonition:: Supported model formats
#
# TVMC supports models created with Keras, ONNX, TensorFlow, TFLite
# and Torch. Use the option ``--model-format`` if you need to
# explicitly provide the model format you are using. See ``tvmc
# compile --help`` for more information.
#
################################################################################
# .. admonition:: Adding ONNX Support to TVM
#
# TVM relies on the ONNX python library being available on your system. You can
# install ONNX using the command ``pip3 install --user onnx onnxoptimizer``. You
# may remove the ``--user`` option if you have root access and want to install
# ONNX globally. The ``onnxoptimizer`` dependency is optional, and is only used
# for ``onnx>=1.9``.
#
################################################################################
# Compiling an ONNX Model to the TVM Runtime
# ------------------------------------------
#
# Once we've downloaded the ResNet-50 model, the next step is to compile it. To
# accomplish that, we are going to use ``tvmc compile``. The output we get from
# the compilation process is a TAR package of the model compiled to a dynamic
# library for our target platform. We can run that model on our target device
# using the TVM runtime.
#
# .. code-block:: bash
#
# # This may take several minutes depending on your machine
# tvmc compile \
# --target "llvm" \
# --input-shapes "data:[1,3,224,224]" \
# --output resnet50-v2-7-tvm.tar \
# resnet50-v2-7.onnx
#
# Let's take a look at the files that ``tvmc compile`` creates in the module:
#
# .. code-block:: bash
#
# mkdir model
# tar -xvf resnet50-v2-7-tvm.tar -C model
# ls model
#
# You will see three files listed.
#
# * ``mod.so`` is the model, represented as a C++ library, that can be loaded
# by the TVM runtime.
# * ``mod.json`` is a text representation of the TVM Relay computation graph.
# * ``mod.params`` is a file containing the parameters for the pre-trained
# model.
#
# This module can be directly loaded by your application, and the model can be
# run via the TVM runtime APIs.
################################################################################
# .. admonition:: Defining the Correct Target
#
# Specifying the correct target (option ``--target``) can have a huge
# impact on the performance of the compiled module, as it can take
# advantage of hardware features available on the target. For more
# information, please refer to :ref:`Auto-tuning a convolutional network for
# x86 CPU <tune_relay_x86>`. We recommend identifying which CPU you are
# running, along with optional features, and set the target appropriately.
################################################################################
# Running the Model from The Compiled Module with TVMC
# ----------------------------------------------------
#
# Now that we've compiled the model to this module, we can use the TVM runtime
# to make predictions with it. TVMC has the TVM runtime built in to it,
# allowing you to run compiled TVM models. To use TVMC to run the model and
# make predictions, we need two things:
#
# - The compiled module, which we just produced.
# - Valid input to the model to make predictions on.
#
# Each model is particular when it comes to expected tensor shapes, formats and
# data types. For this reason, most models require some pre and
# post-processing, to ensure the input is valid and to interpret the output.
# TVMC has adopted NumPy's ``.npz`` format for both input and output data. This
# is a well-supported NumPy format to serialize multiple arrays into a file.
#
# As input for this tutorial, we will use the image of a cat, but you can feel
# free to substitute this image for any of your choosing.
#
# .. image:: https://s3.amazonaws.com/model-server/inputs/kitten.jpg
# :height: 224px
# :width: 224px
# :align: center
################################################################################
# Input pre-processing
# ~~~~~~~~~~~~~~~~~~~~
#
# For our ResNet-50 v2 model, the input is expected to be in ImageNet format.
# Here is an example of a script to pre-process an image for ResNet-50 v2.
#
# You will need to have a supported version of the Python Image Library
# installed. You can use ``pip3 install --user pillow`` to satisfy this
# requirement for the script.
#
# .. code-block:: python
# :caption: preprocess.py
# :name: preprocess.py
#
# #!python ./preprocess.py
# from tvm.contrib.download import download_testdata
# from PIL import Image
# import numpy as np
#
# img_url = "https://s3.amazonaws.com/model-server/inputs/kitten.jpg"
# img_path = download_testdata(img_url, "imagenet_cat.png", module="data")
#
# # Resize it to 224x224
# resized_image = Image.open(img_path).resize((224, 224))
# img_data = np.asarray(resized_image).astype("float32")
#
# # ONNX expects NCHW input, so convert the array
# img_data = np.transpose(img_data, (2, 0, 1))
#
# # Normalize according to ImageNet
# imagenet_mean = np.array([0.485, 0.456, 0.406])
# imagenet_stddev = np.array([0.229, 0.224, 0.225])
# norm_img_data = np.zeros(img_data.shape).astype("float32")
# for i in range(img_data.shape[0]):
# norm_img_data[i, :, :] = (img_data[i, :, :] / 255 - imagenet_mean[i]) / imagenet_stddev[i]
#
# # Add batch dimension
# img_data = np.expand_dims(norm_img_data, axis=0)
#
# # Save to .npz (outputs imagenet_cat.npz)
# np.savez("imagenet_cat", data=img_data)
#
################################################################################
# Running the Compiled Module
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# With both the model and input data in hand, we can now run TVMC to make a
# prediction:
#
# .. code-block:: bash
#
# tvmc run \
# --inputs imagenet_cat.npz \
# --output predictions.npz \
# resnet50-v2-7-tvm.tar
#
# Recall that the ``.tar`` model file includes a C++ library, a description of
# the Relay model, and the parameters for the model. TVMC includes the TVM
# runtime, which can load the model and make predictions against input. When
# running the above command, TVMC outputs a new file, ``predictions.npz``, that
# contains the model output tensors in NumPy format.
#
# In this example, we are running the model on the same machine that we used
# for compilation. In some cases we might want to run it remotely via an RPC
# Tracker. To read more about these options please check ``tvmc run --help``.
################################################################################
# Output Post-Processing
# ~~~~~~~~~~~~~~~~~~~~~~
#
# As previously mentioned, each model will have its own particular way of
# providing output tensors.
#
# In our case, we need to run some post-processing to render the outputs from
# ResNet-50 v2 into a more human-readable form, using the lookup-table provided
# for the model.
#
# The script below shows an example of the post-processing to extract labels
# from the output of our compiled module.
#
# .. code-block:: python
# :caption: postprocess.py
# :name: postprocess.py
#
# #!python ./postprocess.py
# import os.path
# import numpy as np
#
# from scipy.special import softmax
#
# from tvm.contrib.download import download_testdata
#
# # Download a list of labels
# labels_url = "https://s3.amazonaws.com/onnx-model-zoo/synset.txt"
# labels_path = download_testdata(labels_url, "synset.txt", module="data")
#
# with open(labels_path, "r") as f:
# labels = [l.rstrip() for l in f]
#
# output_file = "predictions.npz"
#
# # Open the output and read the output tensor
# if os.path.exists(output_file):
# with np.load(output_file) as data:
# scores = softmax(data["output_0"])
# scores = np.squeeze(scores)
# ranks = np.argsort(scores)[::-1]
#
# for rank in ranks[0:5]:
# print("class='%s' with probability=%f" % (labels[rank], scores[rank]))
#
# Running this script should produce the following output:
#
# .. code-block:: bash
#
# python postprocess.py
# # class='n02123045 tabby, tabby cat' with probability=0.610553
# # class='n02123159 tiger cat' with probability=0.367179
# # class='n02124075 Egyptian cat' with probability=0.019365
# # class='n02129604 tiger, Panthera tigris' with probability=0.001273
# # class='n04040759 radiator' with probability=0.000261
#
# Try replacing the cat image with other images, and see what sort of
# predictions the ResNet model makes.
################################################################################
# Automatically Tuning the ResNet Model
# -------------------------------------
#
# The previous model was compiled to work on the TVM runtime, but did not
# include any platform specific optimization. In this section, we will show you
# how to build an optimized model using TVMC to target your working platform.
#
# In some cases, we might not get the expected performance when running
# inferences using our compiled module. In cases like this, we can make use of
# the auto-tuner, to find a better configuration for our model and get a boost
# in performance. Tuning in TVM refers to the process by which a model is
# optimized to run faster on a given target. This differs from training or
# fine-tuning in that it does not affect the accuracy of the model, but only
# the runtime performance. As part of the tuning process, TVM will try running
# many different operator implementation variants to see which perform best.
# The results of these runs are stored in a tuning records file, which is
# ultimately the output of the ``tune`` subcommand.
#
# In the simplest form, tuning requires you to provide three things:
#
# - the target specification of the device you intend to run this model on
# - the path to an output file in which the tuning records will be stored, and
# finally
# - a path to the model to be tuned.
#
# The example below demonstrates how that works in practice:
#
# .. code-block:: bash
#
# # The default search algorithm requires xgboost, see below for further
# # details on tuning search algorithms
# pip install xgboost
#
# tvmc tune \
# --target "llvm" \
# --output resnet50-v2-7-autotuner_records.json \
# resnet50-v2-7.onnx
#
# In this example, you will see better results if you indicate a more specific
# target for the ``--target`` flag. For example, on an Intel i7 processor you
# could use ``--target llvm -mcpu=skylake``. For this tuning example, we are
# tuning locally on the CPU using LLVM as the compiler for the specified
# achitecture.
#
# TVMC will perform a search against the parameter space for the model, trying
# out different configurations for operators and choosing the one that runs
# fastest on your platform. Although this is a guided search based on the CPU
# and model operations, it can still take several hours to complete the search.
# The output of this search will be saved to the
# ``resnet50-v2-7-autotuner_records.json`` file, which will later be used to
# compile an optimized model.
#
# .. admonition:: Defining the Tuning Search Algorithm
#
# By default this search is guided using an ``XGBoost Grid`` algorithm.
# Depending on your model complexity and amount of time avilable, you might
# want to choose a different algorithm. A full list is available by
# consulting ``tvmc tune --help``.
#
# The output will look something like this for a consumer-level Skylake CPU:
#
# .. code-block:: bash
#
# tvmc tune \
# --target "llvm -mcpu=broadwell" \
# --output resnet50-v2-7-autotuner_records.json \
# resnet50-v2-7.onnx
# # [Task 1/24] Current/Best: 9.65/ 23.16 GFLOPS | Progress: (60/1000) | 130.74 s Done.
# # [Task 1/24] Current/Best: 3.56/ 23.16 GFLOPS | Progress: (192/1000) | 381.32 s Done.
# # [Task 2/24] Current/Best: 13.13/ 58.61 GFLOPS | Progress: (960/1000) | 1190.59 s Done.
# # [Task 3/24] Current/Best: 31.93/ 59.52 GFLOPS | Progress: (800/1000) | 727.85 s Done.
# # [Task 4/24] Current/Best: 16.42/ 57.80 GFLOPS | Progress: (960/1000) | 559.74 s Done.
# # [Task 5/24] Current/Best: 12.42/ 57.92 GFLOPS | Progress: (800/1000) | 766.63 s Done.
# # [Task 6/24] Current/Best: 20.66/ 59.25 GFLOPS | Progress: (1000/1000) | 673.61 s Done.
# # [Task 7/24] Current/Best: 15.48/ 59.60 GFLOPS | Progress: (1000/1000) | 953.04 s Done.
# # [Task 8/24] Current/Best: 31.97/ 59.33 GFLOPS | Progress: (972/1000) | 559.57 s Done.
# # [Task 9/24] Current/Best: 34.14/ 60.09 GFLOPS | Progress: (1000/1000) | 479.32 s Done.
# # [Task 10/24] Current/Best: 12.53/ 58.97 GFLOPS | Progress: (972/1000) | 642.34 s Done.
# # [Task 11/24] Current/Best: 30.94/ 58.47 GFLOPS | Progress: (1000/1000) | 648.26 s Done.
# # [Task 12/24] Current/Best: 23.66/ 58.63 GFLOPS | Progress: (1000/1000) | 851.59 s Done.
# # [Task 13/24] Current/Best: 25.44/ 59.76 GFLOPS | Progress: (1000/1000) | 534.58 s Done.
# # [Task 14/24] Current/Best: 26.83/ 58.51 GFLOPS | Progress: (1000/1000) | 491.67 s Done.
# # [Task 15/24] Current/Best: 33.64/ 58.55 GFLOPS | Progress: (1000/1000) | 529.85 s Done.
# # [Task 16/24] Current/Best: 14.93/ 57.94 GFLOPS | Progress: (1000/1000) | 645.55 s Done.
# # [Task 17/24] Current/Best: 28.70/ 58.19 GFLOPS | Progress: (1000/1000) | 756.88 s Done.
# # [Task 18/24] Current/Best: 19.01/ 60.43 GFLOPS | Progress: (980/1000) | 514.69 s Done.
# # [Task 19/24] Current/Best: 14.61/ 57.30 GFLOPS | Progress: (1000/1000) | 614.44 s Done.
# # [Task 20/24] Current/Best: 10.47/ 57.68 GFLOPS | Progress: (980/1000) | 479.80 s Done.
# # [Task 21/24] Current/Best: 34.37/ 58.28 GFLOPS | Progress: (308/1000) | 225.37 s Done.
# # [Task 22/24] Current/Best: 15.75/ 57.71 GFLOPS | Progress: (1000/1000) | 1024.05 s Done.
# # [Task 23/24] Current/Best: 23.23/ 58.92 GFLOPS | Progress: (1000/1000) | 999.34 s Done.
# # [Task 24/24] Current/Best: 17.27/ 55.25 GFLOPS | Progress: (1000/1000) | 1428.74 s Done.
#
# Tuning sessions can take a long time, so ``tvmc tune`` offers many options to customize your tuning
# process, in terms of number of repetitions (``--repeat`` and ``--number``, for example), the tuning
# algorithm to be used, and so on. Check ``tvmc tune --help`` for more information.
#
# In some situations it might be a good idea, to only tune specific tasks (i.e. the most relevant ones)
# to waste less time tuning simpler workworloads. The flag `--task` offers versatile options to limt
# the tasks used for tuning, e.g. `--task 20,22` or `--task 16-`. All available tasks can be printed
# using `--task list`.
#
################################################################################
# Compiling an Optimized Model with Tuning Data
# ----------------------------------------------
#
# As an output of the tuning process above, we obtained the tuning records
# stored in ``resnet50-v2-7-autotuner_records.json``. This file can be used in
# two ways:
#
# - As input to further tuning (via ``tvmc tune --tuning-records``).
# - As input to the compiler
#
# The compiler will use the results to generate high performance code for the
# model on your specified target. To do that we can use ``tvmc compile
# --tuning-records``. Check ``tvmc compile --help`` for more information.
#
# Now that tuning data for the model has been collected, we can re-compile the
# model using optimized operators to speed up our computations.
#
# .. code-block:: bash
#
# tvmc compile \
# --target "llvm" \
# --tuning-records resnet50-v2-7-autotuner_records.json \
# --output resnet50-v2-7-tvm_autotuned.tar \
# resnet50-v2-7.onnx
#
# Verify that the optimized model runs and produces the same results:
#
# .. code-block:: bash
#
# tvmc run \
# --inputs imagenet_cat.npz \
# --output predictions.npz \
# resnet50-v2-7-tvm_autotuned.tar
#
# python postprocess.py
#
# Verifying that the predictions are the same:
#
# .. code-block:: bash
#
# # class='n02123045 tabby, tabby cat' with probability=0.610550
# # class='n02123159 tiger cat' with probability=0.367181
# # class='n02124075 Egyptian cat' with probability=0.019365
# # class='n02129604 tiger, Panthera tigris' with probability=0.001273
# # class='n04040759 radiator' with probability=0.000261
################################################################################
# Comparing the Tuned and Untuned Models
# --------------------------------------
#
# TVMC gives you tools for basic performance benchmarking between the models.
# You can specify a number of repetitions and that TVMC report on the model run
# time (independent of runtime startup). We can get a rough idea of how much
# tuning has improved the model performance. For example, on a test Intel i7
# system, we see that the tuned model runs 47% faster than the untuned model:
#
# .. code-block:: bash
#
# tvmc run \
# --inputs imagenet_cat.npz \
# --output predictions.npz \
# --print-time \
# --repeat 100 \
# resnet50-v2-7-tvm_autotuned.tar
#
# # Execution time summary:
# # mean (ms) max (ms) min (ms) std (ms)
# # 92.19 115.73 89.85 3.15
#
# tvmc run \
# --inputs imagenet_cat.npz \
# --output predictions.npz \
# --print-time \
# --repeat 100 \
# resnet50-v2-7-tvm.tar
#
# # Execution time summary:
# # mean (ms) max (ms) min (ms) std (ms)
# # 193.32 219.97 185.04 7.11
#
################################################################################
# Final Remarks
# -------------
#
# In this tutorial, we presented TVMC, a command line driver for TVM. We
# demonstrated how to compile, run, and tune a model. We also discussed the
# need for pre and post-processing of inputs and outputs. After the tuning
# process, we demonstrated how to compare the performance of the unoptimized
# and optimize models.
#
# Here we presented a simple example using ResNet-50 v2 locally. However, TVMC
# supports many more features including cross-compilation, remote execution and
# profiling/benchmarking.
#
# To see what other options are available, please have a look at ``tvmc
# --help``.
#
# In the `next tutorial <tvmc_python>`, we introduce the Python interface to TVM,
# and in the tutorial after that,
# `Compiling and Optimizing a Model with the Python Interface <autotvm_relay_x86>`,
# we will cover the same compilation and optimization steps using the Python
# interface.
| 22,558 | 41.887833 | 138 | py |
tvm | tvm-main/gallery/tutorial/autotvm_matmul_x86.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
.. _tutorial-autotvm-matmul-x86:
Optimizing Operators with Schedule Templates and AutoTVM
========================================================
**Authors**:
`Lianmin Zheng <https://github.com/merrymercy>`_,
`Chris Hoge <https://github.com/hogepodge>`_
In this tutorial, we show how the TVM Tensor Expression (TE) language
can be used to write schedule templates that can be searched by AutoTVM to
find the optimal schedule. This process is called Auto-Tuning, which helps
automate the process of optimizing tensor computation.
This tutorial builds on the previous :doc:`tutorial on how to write a matrix
multiplication using TE <tensor_expr_get_started>`.
There are two steps in auto-tuning.
- The first step is defining a search space.
- The second step is running a search algorithm to explore through this space.
In this tutorial, you can learn how to perform these two steps in TVM. The whole
workflow is illustrated by a matrix multiplication example.
.. note::
Note that this tutorial will not run on Windows or recent versions of macOS.
To get it to run, you will need to wrap the body of this tutorial in a
:code:`if __name__ == "__main__":` block.
"""
################################################################################
# Install dependencies
# --------------------
# To use autotvm package in TVM, we need to install some extra dependencies.
#
# .. code-block:: bash
#
# pip3 install --user psutil xgboost cloudpickle
#
# To make TVM run faster in tuning, it is recommended to use cython as FFI of
# TVM. In the root directory of TVM, execute:
#
# .. code-block:: bash
#
# pip3 install --user cython
# sudo make cython3
#
# Now return to python code. Begin by importing the required packages.
import logging
import sys
import numpy as np
import tvm
from tvm import te
import tvm.testing
# the module is called `autotvm`
from tvm import autotvm
################################################################################
# Basic Matrix Multiplication with TE
# -----------------------------------
# Recall the basic implementation of matrix multiplication using TE. We write
# it down here with a few changes. We will wrap the multiplication in a python
# function definition. For simplicity, we will focus our attention on a split
# optimization, using a fixed value that defines the block size of the
# reordering.
def matmul_basic(N, L, M, dtype):
A = te.placeholder((N, L), name="A", dtype=dtype)
B = te.placeholder((L, M), name="B", dtype=dtype)
k = te.reduce_axis((0, L), name="k")
C = te.compute((N, M), lambda i, j: te.sum(A[i, k] * B[k, j], axis=k), name="C")
s = te.create_schedule(C.op)
# schedule
y, x = s[C].op.axis
k = s[C].op.reduce_axis[0]
yo, yi = s[C].split(y, 8)
xo, xi = s[C].split(x, 8)
s[C].reorder(yo, xo, k, yi, xi)
return s, [A, B, C]
################################################################################
# Matrix Multiplication with AutoTVM
# ----------------------------------
# In the previous schedule code, we use a constant "8" as the tiling factor.
# However, it might not be the best one because the best tiling factor depends
# on real hardware environment and input shape.
#
# If you want the schedule code to be portable across a wider range of input
# shapes and target hardware, it is better to define a set of candidate values
# and pick the best one according to the measurement results on target
# hardware.
#
# In autotvm, we can define a tunable parameter, or a "knob" for such kind of
# value.
################################################################################
# A Basic Matrix Multiplication Template
# --------------------------------------
# We begin with an example of how to create a tunable parameter set for the
# block size of the `split` scheduling operation.
# Matmul V1: List candidate values
@autotvm.template("tutorial/matmul_v1") # 1. use a decorator
def matmul_v1(N, L, M, dtype):
A = te.placeholder((N, L), name="A", dtype=dtype)
B = te.placeholder((L, M), name="B", dtype=dtype)
k = te.reduce_axis((0, L), name="k")
C = te.compute((N, M), lambda i, j: te.sum(A[i, k] * B[k, j], axis=k), name="C")
s = te.create_schedule(C.op)
# schedule
y, x = s[C].op.axis
k = s[C].op.reduce_axis[0]
# 2. get the config object
cfg = autotvm.get_config()
# 3. define search space
cfg.define_knob("tile_y", [1, 2, 4, 8, 16])
cfg.define_knob("tile_x", [1, 2, 4, 8, 16])
# 4. schedule according to config
yo, yi = s[C].split(y, cfg["tile_y"].val)
xo, xi = s[C].split(x, cfg["tile_x"].val)
s[C].reorder(yo, xo, k, yi, xi)
return s, [A, B, C]
################################################################################
# Here we make four modifications to the previous schedule code and get a
# tunable "template". We can explain the modifications one by one.
#
# 1. Use a decorator to mark this function as a simple template.
# 2. Get a config object: You can regard this :code:`cfg` as an argument of
# this function but we obtain it in a different way. With this argument, this
# function is no longer a deterministic schedule. Instead, we can pass
# different configurations to this function and get different schedules. A
# function that uses a configuration object like this is called a "template".
#
# To make the template function more compact, we can do two things to define
# the parameter search space within a single function.
#
# 1. Define a search space across a set values. This is done by making
# :code:`cfg` a :any:`ConfigSpace` object. It will collect all of the
# tunable knobs in this function and build a search space from it.
# 2. Schedule according to an entity in this space. This is done by making
# :code:`cfg` a :any:`ConfigEntity` object. When it is a
# :any:`ConfigEntity`, it will ignore all space definition API (namely,
# :code:`cfg.define_XXXXX(...)`). Instead, it will store deterministic
# values for all tunable knobs, and we schedule according to these values.
#
# During auto-tuning, we will first call this template with a
# :any:`ConfigSpace` object to build the search space. Then we call this
# template with different :any:`ConfigEntity` in the built space to get
# different schedules. Finally we will measure the code generated by
# different schedules and pick the best one.
#
# 3. Define two tunable knobs. The first one is :code:`tile_y` with 5 possible
# values. The second one is :code:`tile_x` with a same list of possible values.
# These two knobs are independent, so they span a search space with size 25 =
# 5x5.
# 4. The configuration knobs are passed to the :code:`split` schedule
# operation, allowing us to schedule according to the 5x5 deterministic values
# we previously defined in :code:`cfg`.
################################################################################
# A Matrix Multiplication Template with the Advanced Parameter API
# ----------------------------------------------------------------
# In the previous template, we manually listed all of the possible values for a
# knob. This is the lowest level API to define the space, and gives an explicit
# enumeration of the parameter space to search. However, we also provide
# another set of APIs that can make the definition of the search space easier
# and smarter. Where possible, we recommend you use this higher-level API
#
# In the following example, we use :any:`ConfigSpace.define_split` to define a
# split knob. It will enumerate all the possible ways to split an axis and
# construct the space.
#
# We also have :any:`ConfigSpace.define_reorder` for reorder knob and
# :any:`ConfigSpace.define_annotate` for annotation like unroll, vectorization,
# thread binding. When the high level API cannot meet your requirements, you
# can always fall back to using the low level API.
@autotvm.template("tutorial/matmul")
def matmul(N, L, M, dtype):
A = te.placeholder((N, L), name="A", dtype=dtype)
B = te.placeholder((L, M), name="B", dtype=dtype)
k = te.reduce_axis((0, L), name="k")
C = te.compute((N, M), lambda i, j: te.sum(A[i, k] * B[k, j], axis=k), name="C")
s = te.create_schedule(C.op)
# schedule
y, x = s[C].op.axis
k = s[C].op.reduce_axis[0]
##### define space begin #####
cfg = autotvm.get_config()
cfg.define_split("tile_y", y, num_outputs=2)
cfg.define_split("tile_x", x, num_outputs=2)
##### define space end #####
# schedule according to config
yo, yi = cfg["tile_y"].apply(s, C, y)
xo, xi = cfg["tile_x"].apply(s, C, x)
s[C].reorder(yo, xo, k, yi, xi)
return s, [A, B, C]
################################################################################
# .. admonition:: More Explanation on :code:`cfg.define_split`
#
# In this template, :code:`cfg.define_split("tile_y", y, num_outputs=2)` will
# enumerate all possible combinations that can split axis y into two axes with
# factors of the length of y. For example, if the length of y is 32 and we
# want to split it into two axes using factors of 32, then there are 6
# possible values for (length of outer axis, length of inner axis) pair,
# namely (32, 1), (16, 2), (8, 4), (4, 8), (2, 16) or (1, 32). These are all 6
# possible values of `tile_y`.
#
# During scheduling, :code:`cfg["tile_y"]` is a :code:`SplitEntity` object.
# We stores the lengths of outer axes and inner axes in
# :code:`cfg['tile_y'].size` (a tuple with two elements). In this template,
# we apply it by using :code:`yo, yi = cfg['tile_y'].apply(s, C, y)`.
# Actually, this is equivalent to :code:`yo, yi = s[C].split(y,
# cfg["tile_y"].size[1])` or :code:`yo, yi = s[C].split(y,
# nparts=cfg['tile_y"].size[0])`
#
# The advantage of using cfg.apply API is that it makes multi-level splits
# (that is, when num_outputs >= 3) easier.
################################################################################
# Step 2: Use AutoTVM to Optimize the Matrix Multiplication
# ---------------------------------------------------------
# In Step 1, we wrote a matrix multiplication template that allowed us to
# parameterize the block size used in the `split` schedule. We can now conduct
# a search over this parameter space. The next step is to pick a tuner to guide
# the exploration of this space.
#
# Auto-tuners in TVM
# ~~~~~~~~~~~~~~~~~~
# The job for a tuner can be described by following pseudo code
#
# .. code-block:: c
#
# ct = 0
# while ct < max_number_of_trials:
# propose a batch of configs
# measure this batch of configs on real hardware and get results
# ct += batch_size
#
# When proposing the next batch of configs, the tuner can take different
# strategies. Some of the tuner strategies provided by TVM include:
#
# * :any:`tvm.autotvm.tuner.RandomTuner`: Enumerate the space in a random order
# * :any:`tvm.autotvm.tuner.GridSearchTuner`: Enumerate the space in a grid search order
# * :any:`tvm.autotvm.tuner.GATuner`: Using genetic algorithm to search through the space
# * :any:`tvm.autotvm.tuner.XGBTuner`: Uses a model based method. Train a XGBoost model to
# predict the speed of lowered IR and pick the next batch according to the
# prediction.
#
# You can choose the tuner according to the size of your space, your time
# budget and other factors. For example, if your space is very small (less
# than 1000), a grid-search tuner or a random tuner is good enough. If your
# space is at the level of 10^9 (this is the space size of a conv2d operator on
# CUDA GPU), XGBoostTuner can explore more efficiently and find better configs.
################################################################################
# Begin tuning
# ~~~~~~~~~~~~
# Here we continue our matrix multiplication example. First we create a tuning
# task. We can also inspect the initialized search space. In this case, for a
# 512x512 square matrix multiplication, the space size is 10x10=100 Note that
# the task and search space are independent of the tuner picked.
N, L, M = 512, 512, 512
task = autotvm.task.create("tutorial/matmul", args=(N, L, M, "float32"), target="llvm")
print(task.config_space)
################################################################################
# Then we need to define how to measure the generated code and pick a tuner.
# Since our space is small, a random tuner is just okay.
#
# We only make 10 trials in this tutorial for demonstration. In practice, you
# can do more trials according to your time budget. We will log the tuning
# results into a log file. This file can be used to choose the best
# configuration discovered by the tuner later.
# logging config (for printing tuning log to the screen)
logging.getLogger("autotvm").setLevel(logging.DEBUG)
logging.getLogger("autotvm").addHandler(logging.StreamHandler(sys.stdout))
################################################################################
# There are two steps for measuring a config: build and run. By default, we use
# all CPU cores to compile program. We then measure them sequentially. To help
# reduce variance, we take 5 measurements and average them.
measure_option = autotvm.measure_option(builder="local", runner=autotvm.LocalRunner(number=5))
# Begin tuning with RandomTuner, log records to file `matmul.log`
# You can use alternatives like XGBTuner.
tuner = autotvm.tuner.RandomTuner(task)
tuner.tune(
n_trial=10,
measure_option=measure_option,
callbacks=[autotvm.callback.log_to_file("matmul.log")],
)
################################################################################
# With tuning completed, we can choose the configuration from the log file that
# has the best measured performance and compile the schedule with the
# corresponding parameters. We also do a quick verification that the schedule is
# producing correct answers. We can call the function :code:`matmul` directly
# under the :any:`autotvm.apply_history_best` context. When we call this
# function, it will query the dispatch context with its argument and get the
# best config with the same argument.
# apply history best from log file
with autotvm.apply_history_best("matmul.log"):
with tvm.target.Target("llvm"):
s, arg_bufs = matmul(N, L, M, "float32")
func = tvm.build(s, arg_bufs)
# check correctness
a_np = np.random.uniform(size=(N, L)).astype(np.float32)
b_np = np.random.uniform(size=(L, M)).astype(np.float32)
c_np = a_np.dot(b_np)
c_tvm = tvm.nd.empty(c_np.shape)
func(tvm.nd.array(a_np), tvm.nd.array(b_np), c_tvm)
tvm.testing.assert_allclose(c_np, c_tvm.numpy(), rtol=1e-4)
################################################################################
# Final Notes and Summary
# -----------------------
# In this tutorial, we have shown how to build operator templates that allow
# TVM to search a parameter space and choose optimized schedule configurations.
# To gain a deeper understanding of how this works, we recommend expanding on
# this example by adding new search parameters to the schedule based on
# schedule operations demonstrated in the :ref: `Getting Started With Tensor
# Expressions <tensor_expr_get_started>_` tutorial. In the upcoming sections, we
# will demonstrate the AutoScheduler, a method for TVM to optimize common
# operators without the need for the user to provide a user-defined template.
| 16,251 | 41.881266 | 94 | py |
tvm | tvm-main/gallery/tutorial/tvmc_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Getting Starting using TVMC Python: a high-level API for TVM
=============================================================
**Author**:
`Jocelyn Shiue <https://github.com/CircleSpin>`_
Hi! Here we explain the scripting tool designed for the complete TVM beginner. 🙂
Before we get started let's get an example model if you don't already have one.
Follow the steps to download a resnet model via the terminal:
.. code-block:: python
mkdir myscripts
cd myscripts
wget https://github.com/onnx/models/raw/b9a54e89508f101a1611cd64f4ef56b9cb62c7cf/vision/classification/resnet/model/resnet50-v2-7.onnx
mv resnet50-v2-7.onnx my_model.onnx
touch tvmcpythonintro.py
Let's start editing the python file in your favorite text editor.
"""
################################################################################
# Step 0: Imports
# ~~~~~~~~~~~~~~~
#
# .. code-block:: python
#
# from tvm.driver import tvmc
#
#
################################################################################
# Step 1: Load a model
# ~~~~~~~~~~~~~~~~~~~~
#
# Let's import our model into tvmc. This step converts a machine learning model from
# a supported framework into TVM's high level graph representation language called Relay.
# This is to have a unified starting point for all models in tvm. The frameworks we currently
# support are: Keras, ONNX, Tensorflow, TFLite, and PyTorch.
#
# .. code-block:: python
#
# model = tvmc.load('my_model.onnx') #Step 1: Load
#
# If you'd like to see the Relay, you can run:
# ``model.summary()``
#
# All frameworks support overwriting the input shapes with a shape_dict argument.
# For most frameworks this is optional, but for Pytorch this is necessary as
# TVM cannot automatically search for it.
#
# .. code-block:: python
#
# #model = tvmc.load('my_model.onnx', shape_dict={'input1' : [1, 2, 3, 4], 'input2' : [1, 2, 3, 4]}) #Step 1: Load + shape_dict
#
# A suggested way to see the model's input/shape_dict is via `netron <https://netron.app/>`_. After opening the model,
# click the first node to see the name(s) and shape(s) in the inputs section.
################################################################################
# Step 2: Compile
# ~~~~~~~~~~~~~~~
#
# Now that our model is in Relay, our next step is to compile it to a desired
# hardware to run on. We refer to this hardware as a target. This compilation process
# translates the model from Relay into a lower-level language that the
# target machine can understand.
#
# In order to compile a model a tvm.target string is required.
# To learn more about tvm.targets and their options look at the `documentation <https://tvm.apache.org/docs/api/python/target.html>`_.
# Some examples include:
#
# 1. cuda (Nvidia GPU)
# 2. llvm (CPU)
# 3. llvm -mcpu=cascadelake (Intel CPU)
#
# .. code-block:: python
#
# package = tvmc.compile(model, target="llvm") #Step 2: Compile
#
#
# The compilation step returns a package.
#
################################################################################
# Step 3: Run
# ~~~~~~~~~~~
#
# The compiled package can now be run on the hardware target. The device
# input options are: CPU, Cuda, CL, Metal, and Vulkan.
#
# .. code-block:: python
#
# result = tvmc.run(package, device="cpu") #Step 3: Run
#
# And you can print the results:
# ``print(result)``
#
################################################################################
# Step 1.5: Tune [Optional & Recommended]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Run speed can further be improved by tuning. This optional step uses
# machine learning to look at each operation within a model (a function) and
# tries to find a faster way to run it. We do this through a cost model, and
# benchmarking possible schedules.
#
# The target is the same as compile.
#
# .. code-block:: python
#
# tvmc.tune(model, target="llvm") #Step 1.5: Optional Tune
#
# The terminal output should look like:
#
# .. code-block:: python
#
# [Task 1/13] Current/Best: 82.00/ 106.29 GFLOPS | Progress: (48/769) | 18.56 s
# [Task 1/13] Current/Best: 54.47/ 113.50 GFLOPS | Progress: (240/769) | 85.36 s
# .....
#
# There may be UserWarnings that can be ignored.
# This should make the end result faster, but it can take hours to tune.
#
# See the section 'Saving the Tuning Results' below. Be sure to pass the tuning
# results into compile if you want the results to apply.
#
# .. code-block:: python
#
# #tvmc.compile(model, target="llvm", tuning_records = "records.log") #Step 2: Compile
################################################################################
# Save and then start the process in the terminal:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# .. code-block:: python
#
# python my_tvmc_script.py
#
# Note: Your fans may become very active
#
################################################################################
# Example results:
# ~~~~~~~~~~~~~~~~
#
# .. code-block:: python
#
# Time elapsed for training: 18.99 s
# Execution time summary:
# mean (ms) max (ms) min (ms) std (ms)
# 25.24 26.12 24.89 0.38
#
#
# Output Names:
# ['output_0']
#
################################################################################
# Additional TVMC Functionalities
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
################################################################################
# Saving the model
# ~~~~~~~~~~~~~~~~
#
# To make things faster for later, after loading the model (Step 1) save the Relay version.
# The model will then appear where you saved it for later in the coverted syntax.
#
# .. code-block:: python
#
# model = tvmc.load('my_model.onnx') #Step 1: Load
# model.save(desired_model_path)
#
#
################################################################################
# Saving the package
# ~~~~~~~~~~~~~~~~~~
#
# After the model has been compiled (Step 2) the package also is also saveable.
#
# .. code-block:: python
#
# tvmc.compile(model, target="llvm", package_path="whatever") #Step 2: Compile
#
# new_package = tvmc.TVMCPackage(package_path="whatever")
# result = tvmc.run(new_package, device="cpu") #Step 3: Run
#
#
################################################################################
# Using Autoscheduler
# ~~~~~~~~~~~~~~~~~~~
#
# Use the next generation of tvm to enable potentially faster run speed results.
# The search space of the schedules is automatically generated unlike
# previously where they needed to be hand written. (Learn more:
# `1 <https://tvm.apache.org/2021/03/03/intro-auto-scheduler>`_,
# `2 <https://arxiv.org/abs/2006.06762>`_)
#
# .. code-block:: python
#
# tvmc.tune(model, target="llvm", enable_autoscheduler = True)
#
#
################################################################################
# Saving the tuning results
# ~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The tuning results can be saved in a file for later reuse.
#
# Method 1:
# .. code-block:: python
#
# log_file = "hello.json"
#
# # Run tuning
# tvmc.tune(model, target="llvm", tuning_records=log_file)
#
# ...
#
# # Later run tuning and reuse tuning results
# tvmc.tune(model, target="llvm", prior_records=log_file)
#
# Method 2:
# .. code-block:: python
#
# # Run tuning
# tuning_records = tvmc.tune(model, target="llvm")
#
# ...
#
# # Later run tuning and reuse tuning results
# tvmc.tune(model, target="llvm", prior_records=tuning_records)
#
################################################################################
# Tuning a more complex model:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# If you notice T's printing that look like ``.........T.T..T..T..T.T.T.T.T.T.``
# increase the searching time frame:
#
# .. code-block:: python
#
# tvmc.tune(model,trials=10000,timeout=10,)
#
################################################################################
# Compiling a model for a remote device:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# A remote procedural call (RPC) is useful when you would like to compile for hardware
# that is not on your local machine. The tvmc methods support this.
# To set up the RPC server take a look at the 'Set up RPC Server on Device'
# section in this `document <https://tvm.apache.org/docs/tutorials/get_started/cross_compilation_and_rpc.html>`_.
#
# Within the TVMC Script include the following and adjust accordingly:
#
# .. code-block:: python
#
# tvmc.tune(
# model,
# target=target, # Compilation target as string // Device to compile for
# target_host=target_host, # Host processor
# hostname=host_ip_address, # The IP address of an RPC tracker, used when benchmarking remotely.
# port=port_number, # The port of the RPC tracker to connect to. Defaults to 9090.
# rpc_key=your_key, # The RPC tracker key of the target device. Required when rpc_tracker is provided
# )
#
| 9,741 | 32.136054 | 139 | py |
tvm | tvm-main/gallery/how_to/work_with_microtvm/micro_pytorch.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
.. _tutorial-micro-pytorch:
4. microTVM PyTorch Tutorial
============================
**Authors**:
`Mehrdad Hessar <https://github.com/mehrdadh>`_
This tutorial is showcasing microTVM host-driven AoT compilation with
a PyTorch model. This tutorial can be executed on a x86 CPU using C runtime (CRT).
**Note:** This tutorial only runs on x86 CPU using CRT and does not run on Zephyr
since the model would not fit on our current supported Zephyr boards.
"""
######################################################################
#
# .. include:: ../../../../gallery/how_to/work_with_microtvm/install_dependencies.rst
#
import pathlib
import torch
import torchvision
from torchvision import transforms
import numpy as np
from PIL import Image
import tvm
from tvm import relay
from tvm.contrib.download import download_testdata
from tvm.relay.backend import Executor
import tvm.micro.testing
##################################
# Load a pre-trained PyTorch model
# --------------------------------
#
# To begin with, load pre-trained MobileNetV2 from torchvision. Then,
# download a cat image and preprocess it to use as the model input.
#
model = torchvision.models.quantization.mobilenet_v2(weights="DEFAULT", quantize=True)
model = model.eval()
input_shape = [1, 3, 224, 224]
input_data = torch.randn(input_shape)
scripted_model = torch.jit.trace(model, input_data).eval()
img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
img_path = download_testdata(img_url, "cat.png", module="data")
img = Image.open(img_path).resize((224, 224))
# Preprocess the image and convert to tensor
my_preprocess = transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
img = my_preprocess(img)
img = np.expand_dims(img, 0)
input_name = "input0"
shape_list = [(input_name, input_shape)]
relay_mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)
#####################################
# Define Target, Runtime and Executor
# -----------------------------------
#
# In this tutorial we use AOT host-driven executor. To compile the model
# for an emulated embedded environment on an x86 machine we use C runtime (CRT)
# and we use `host` micro target. Using this setup, TVM compiles the model
# for C runtime which can run on a x86 CPU machine with the same flow that
# would run on a physical microcontroller.
# CRT Uses the main() from `src/runtime/crt/host/main.cc`
# To use physical hardware, replace `board` with another physical micro target, e.g. `nrf5340dk_nrf5340_cpuapp`
# or `mps2_an521` and change the platform type to Zephyr.
# See more target examples in :ref:`Training Vision Models for microTVM on Arduino <tutorial-micro-train-arduino>`
# and :ref:`microTVM TFLite Tutorial<tutorial_micro_tflite>`.
#
target = tvm.micro.testing.get_target(platform="crt", board=None)
# Use the C runtime (crt) and enable static linking by setting system-lib to True
runtime = tvm.relay.backend.Runtime("crt", {"system-lib": True})
# Use the AOT executor rather than graph or vm executors. Don't use unpacked API or C calling style.
executor = Executor("aot")
####################
# Compile the model
# ------------------
#
# Now, we compile the model for the target:
#
with tvm.transform.PassContext(
opt_level=3,
config={"tir.disable_vectorize": True},
):
module = tvm.relay.build(
relay_mod, target=target, runtime=runtime, executor=executor, params=params
)
###########################
# Create a microTVM project
# -------------------------
#
# Now that we have the compiled model as an IRModule, we need to create a firmware project
# to use the compiled model with microTVM. To do this, we use Project API.
#
template_project_path = pathlib.Path(tvm.micro.get_microtvm_template_projects("crt"))
project_options = {"verbose": False, "workspace_size_bytes": 6 * 1024 * 1024}
temp_dir = tvm.contrib.utils.tempdir() / "project"
project = tvm.micro.generate_project(
str(template_project_path),
module,
temp_dir,
project_options,
)
####################################
# Build, flash and execute the model
# ----------------------------------
# Next, we build the microTVM project and flash it. Flash step is specific to
# physical microcontroller and it is skipped if it is simulating a microcontroller
# via the host `main.cc`` or if a Zephyr emulated board is selected as the target.
#
project.build()
project.flash()
input_data = {input_name: tvm.nd.array(img.astype("float32"))}
with tvm.micro.Session(project.transport()) as session:
aot_executor = tvm.runtime.executor.aot_executor.AotModule(session.create_aot_executor())
aot_executor.set_input(**input_data)
aot_executor.run()
result = aot_executor.get_output(0).numpy()
#####################
# Look up synset name
# -------------------
# Look up prediction top 1 index in 1000 class synset.
#
synset_url = (
"https://raw.githubusercontent.com/Cadene/"
"pretrained-models.pytorch/master/data/"
"imagenet_synsets.txt"
)
synset_name = "imagenet_synsets.txt"
synset_path = download_testdata(synset_url, synset_name, module="data")
with open(synset_path) as f:
synsets = f.readlines()
synsets = [x.strip() for x in synsets]
splits = [line.split(" ") for line in synsets]
key_to_classname = {spl[0]: " ".join(spl[1:]) for spl in splits}
class_url = (
"https://raw.githubusercontent.com/Cadene/"
"pretrained-models.pytorch/master/data/"
"imagenet_classes.txt"
)
class_path = download_testdata(class_url, "imagenet_classes.txt", module="data")
with open(class_path) as f:
class_id_to_key = f.readlines()
class_id_to_key = [x.strip() for x in class_id_to_key]
# Get top-1 result for TVM
top1_tvm = np.argmax(result)
tvm_class_key = class_id_to_key[top1_tvm]
# Convert input to PyTorch variable and get PyTorch result for comparison
with torch.no_grad():
torch_img = torch.from_numpy(img)
output = model(torch_img)
# Get top-1 result for PyTorch
top1_torch = np.argmax(output.numpy())
torch_class_key = class_id_to_key[top1_torch]
print("Relay top-1 id: {}, class name: {}".format(top1_tvm, key_to_classname[tvm_class_key]))
print("Torch top-1 id: {}, class name: {}".format(top1_torch, key_to_classname[torch_class_key]))
| 7,190 | 33.572115 | 114 | py |
tvm | tvm-main/gallery/how_to/work_with_microtvm/micro_train.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
.. _tutorial-micro-train-arduino:
5. Training Vision Models for microTVM on Arduino
=================================================
**Author**: `Gavin Uberti <https://github.com/guberti>`_
This tutorial shows how MobileNetV1 models can be trained
to fit on embedded devices, and how those models can be
deployed to Arduino using TVM.
"""
######################################################################
# Motivation
# ----------
# When building IOT devices, we often want them to **see and understand** the world around them.
# This can take many forms, but often times a device will want to know if a certain **kind of
# object** is in its field of vision.
#
# For example, a security camera might look for **people**, so it can decide whether to save a video
# to memory. A traffic light might look for **cars**, so it can judge which lights should change
# first. Or a forest camera might look for a **kind of animal**, so they can estimate how large
# the animal population is.
#
# To make these devices affordable, we would like them to need only a low-cost processor like the
# `nRF52840 <https://www.nordicsemi.com/Products/nRF52840>`_ (costing five dollars each on Mouser) or the `RP2040 <https://www.raspberrypi.com/products/rp2040/>`_ (just $1.45 each!).
#
# These devices have very little memory (~250 KB RAM), meaning that no conventional edge AI
# vision model (like MobileNet or EfficientNet) will be able to run. In this tutorial, we will
# show how these models can be modified to work around this requirement. Then, we will use TVM
# to compile and deploy it for an Arduino that uses one of these processors.
#
# Installing the Prerequisites
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# This tutorial will use TensorFlow to train the model - a widely used machine learning library
# created by Google. TensorFlow is a very low-level library, however, so we will the Keras
# interface to talk to TensorFlow. We will also use TensorFlow Lite to perform quantization on
# our model, as TensorFlow by itself does not support this.
#
# Once we have our generated model, we will use TVM to compile and test it. To avoid having to
# build from source, we'll install ``tlcpack`` - a community build of TVM. Lastly, we'll also
# install ``imagemagick`` and ``curl`` to preprocess data:
#
# .. code-block:: bash
#
# %%shell
# pip install -q tensorflow tflite
# pip install -q tlcpack-nightly -f https://tlcpack.ai/wheels
# apt-get -qq install imagemagick curl
#
# # Install Arduino CLI and library for Nano 33 BLE
# curl -fsSL https://raw.githubusercontent.com/arduino/arduino-cli/master/install.sh | sh
# /content/bin/arduino-cli core update-index
# /content/bin/arduino-cli core install arduino:mbed_nano
#
# Using the GPU
# ^^^^^^^^^^^^^
#
# This tutorial demonstrates training a neural network, which is requires a lot of computing power
# and will go much faster if you have a GPU. If you are viewing this tutorial on Google Colab, you
# can enable a GPU by going to **Runtime->Change runtime type** and selecting "GPU" as our hardware
# accelerator. If you are running locally, you can `follow TensorFlow's guide <https://www.tensorflow.org/guide/gpu>`_ instead.
#
# We can test our GPU installation with the following code:
import tensorflow as tf
if not tf.test.gpu_device_name():
print("No GPU was detected!")
print("Model training will take much longer (~30 minutes instead of ~5)")
else:
print("GPU detected - you're good to go.")
######################################################################
# Choosing Our Work Dir
# ^^^^^^^^^^^^^^^^^^^^^
# We need to pick a directory where our image datasets, trained model, and eventual Arduino sketch
# will all live. If running on Google Colab, we'll save everything in ``/root`` (aka ``~``) but you'll
# probably want to store it elsewhere if running locally. Note that this variable only affects Python
# scripts - you'll have to adjust the Bash commands too.
import os
FOLDER = "/root"
# sphinx_gallery_start_ignore
import tempfile
FOLDER = tempfile.mkdtemp()
# sphinx_gallery_end_ignore
######################################################################
# Downloading the Data
# --------------------
# Convolutional neural networks usually learn by looking at many images, along with labels telling
# the network what those images are. To get these images, we'll need a publicly available dataset
# with thousands of images of all sorts of objects and labels of what's in each image. We'll also
# need a bunch of images that **aren't** of cars, as we're trying to distinguish these two classes.
#
# In this tutorial, we'll create a model to detect if an image contains a **car**, but you can use
# whatever category you like! Just change the source URL below to one containing images of another
# type of object.
#
# To get our car images, we'll be downloading the `Stanford Cars dataset <http://ai.stanford.edu/~jkrause/cars/car_dataset.html>`_,
# which contains 16,185 full color images of cars. We'll also need images of random things that
# aren't cars, so we'll use the `COCO 2017 <https://cocodataset.org/#home>`_ validation set (it's
# smaller, and thus faster to download than the full training set. Training on the full data set
# would yield better results). Note that there are some cars in the COCO 2017 data set, but it's
# a small enough fraction not to matter - just keep in mind that this will drive down our percieved
# accuracy slightly.
#
# We could use the TensorFlow dataloader utilities, but we'll instead do it manually to make sure
# it's easy to change the datasets being used. We'll end up with the following file hierarchy:
#
# .. code-block::
#
# /root
# ├── images
# │ ├── object
# │ │ ├── 000001.jpg
# │ │ │ ...
# │ │ └── 016185.jpg
# │ ├── object.tgz
# │ ├── random
# │ │ ├── 000000000139.jpg
# │ │ │ ...
# │ │ └── 000000581781.jpg
# │ └── random.zip
#
# We should also note that Stanford cars has 8k images, while the COCO 2017 validation set is 5k
# images - it is not a 50/50 split! If we wanted to, we could weight these classes differently
# during training to correct for this, but training will still work if we ignore it. It should
# take about **2 minutes** to download the Stanford Cars, while COCO 2017 validation will take
# **1 minute**.
import os
import shutil
import urllib.request
# Download datasets
os.makedirs(f"{FOLDER}/downloads")
os.makedirs(f"{FOLDER}/images")
urllib.request.urlretrieve(
"https://data.deepai.org/stanfordcars.zip", f"{FOLDER}/downloads/target.zip"
)
urllib.request.urlretrieve(
"http://images.cocodataset.org/zips/val2017.zip", f"{FOLDER}/downloads/random.zip"
)
# Extract them and rename their folders
shutil.unpack_archive(f"{FOLDER}/downloads/target.zip", f"{FOLDER}/downloads")
shutil.unpack_archive(f"{FOLDER}/downloads/random.zip", f"{FOLDER}/downloads")
shutil.move(f"{FOLDER}/downloads/cars_train/cars_train", f"{FOLDER}/images/target")
shutil.move(f"{FOLDER}/downloads/val2017", f"{FOLDER}/images/random")
######################################################################
# Loading the Data
# ----------------
# Currently, our data is stored on-disk as JPG files of various sizes. To train with it, we'll have
# to load the images into memory, resize them to be 64x64, and convert them to raw, uncompressed
# data. Keras's ``image_dataset_from_directory`` will take care of most of this, though it loads
# images such that each pixel value is a float from 0 to 255.
#
# We'll also need to load labels, though Keras will help with this. From our subdirectory structure,
# it knows the images in ``/objects`` are one class, and those in ``/random`` another. Setting
# ``label_mode='categorical'`` tells Keras to convert these into **categorical labels** - a 2x1 vector
# that's either ``[1, 0]`` for an object of our target class, or ``[0, 1]`` vector for anything else.
# We'll also set ``shuffle=True`` to randomize the order of our examples.
#
# We will also **batch** the data - grouping samples into clumps to make our training go faster.
# Setting ``batch_size = 32`` is a decent number.
#
# Lastly, in machine learning we generally want our inputs to be small numbers. We'll thus use a
# ``Rescaling`` layer to change our images such that each pixel is a float between ``0.0`` and ``1.0``,
# instead of ``0`` to ``255``. We need to be careful not to rescale our categorical labels though, so
# we'll use a ``lambda`` function.
IMAGE_SIZE = (64, 64, 3)
unscaled_dataset = tf.keras.utils.image_dataset_from_directory(
f"{FOLDER}/images",
batch_size=32,
shuffle=True,
label_mode="categorical",
image_size=IMAGE_SIZE[0:2],
)
rescale = tf.keras.layers.Rescaling(scale=1.0 / 255)
full_dataset = unscaled_dataset.map(lambda im, lbl: (rescale(im), lbl))
######################################################################
# What's Inside Our Dataset?
# ^^^^^^^^^^^^^^^^^^^^^^^^^^
# Before giving this data set to our neural network, we ought to give it a quick visual inspection.
# Does the data look properly transformed? Do the labels seem appropriate? And what's our ratio of
# objects to other stuff? We can display some examples from our datasets using ``matplotlib``:
import matplotlib.pyplot as plt
num_target_class = len(os.listdir(f"{FOLDER}/images/target/"))
num_random_class = len(os.listdir(f"{FOLDER}/images/random/"))
print(f"{FOLDER}/images/target contains {num_target_class} images")
print(f"{FOLDER}/images/random contains {num_random_class} images")
# Show some samples and their labels
SAMPLES_TO_SHOW = 10
plt.figure(figsize=(20, 10))
for i, (image, label) in enumerate(unscaled_dataset.unbatch()):
if i >= SAMPLES_TO_SHOW:
break
ax = plt.subplot(1, SAMPLES_TO_SHOW, i + 1)
plt.imshow(image.numpy().astype("uint8"))
plt.title(list(label.numpy()))
plt.axis("off")
######################################################################
# Validating our Accuracy
# ^^^^^^^^^^^^^^^^^^^^^^^
# While developing our model, we'll often want to check how accurate it is (e.g. to see if it
# improves during training). How do we do this? We could just train it on *all* of the data, and
# then ask it to classify that same data. However, our model could cheat by just memorizing all of
# the samples, which would make it *appear* to have very high accuracy, but perform very badly in
# reality. In practice, this "memorizing" is called **overfitting**.
#
# To prevent this, we will set aside some of the data (we'll use 20%) as a **validation set**. Our
# model will never be trained on validation data - we'll only use it to check our model's accuracy.
num_batches = len(full_dataset)
train_dataset = full_dataset.take(int(num_batches * 0.8))
validation_dataset = full_dataset.skip(len(train_dataset))
######################################################################
# Loading the Data
# ----------------
# In the past decade, `convolutional neural networks <https://en.wikipedia.org/wiki/Convolutional_neural_network>`_ have been widely
# adopted for image classification tasks. State-of-the-art models like `EfficientNet V2 <https://arxiv.org/abs/2104.00298>`_ are able
# to perform image classification better than even humans! Unfortunately, these models have tens of
# millions of parameters, and thus won't fit on cheap security camera computers.
#
# Our applications generally don't need perfect accuracy - 90% is good enough. We can thus use the
# older and smaller MobileNet V1 architecture. But this *still* won't be small enough - by default,
# MobileNet V1 with 224x224 inputs and alpha 1.0 takes ~50 MB to just **store**. To reduce the size
# of the model, there are three knobs we can turn. First, we can reduce the size of the input images
# from 224x224 to 96x96 or 64x64, and Keras makes it easy to do this. We can also reduce the **alpha**
# of the model, from 1.0 to 0.25, which downscales the width of the network (and the number of
# filters) by a factor of four. And if we were really strapped for space, we could reduce the
# number of **channels** by making our model take grayscale images instead of RGB ones.
#
# In this tutorial, we will use an RGB 64x64 input image and alpha 0.25. This is not quite
# ideal, but it allows the finished model to fit in 192 KB of RAM, while still letting us perform
# transfer learning using the official TensorFlow source models (if we used alpha <0.25 or a
# grayscale input, we wouldn't be able to do this).
#
# What is Transfer Learning?
# ^^^^^^^^^^^^^^^^^^^^^^^^^^
# Deep learning has `dominated image classification <https://paperswithcode.com/sota/image-classification-on-imagenet>`_ for a long time,
# but training neural networks takes a lot of time. When a neural network is trained "from scratch",
# its parameters start out randomly initialized, forcing it to learn very slowly how to tell images
# apart.
#
# With transfer learning, we instead start with a neural network that's **already** good at a
# specific task. In this example, that task is classifying images from `the ImageNet database <https://www.image-net.org/>`_. This
# means the network already has some object detection capabilities, and is likely closer to what you
# want then a random model would be.
#
# This works especially well with image processing neural networks like MobileNet. In practice, it
# turns out the convolutional layers of the model (i.e. the first 90% of the layers) are used for
# identifying low-level features like lines and shapes - only the last few fully connected layers
# are used to determine how those shapes make up the objects the network is trying to detect.
#
# We can take advantage of this by starting training with a MobileNet model that was trained on
# ImageNet, and already knows how to identify those lines and shapes. We can then just remove the
# last few layers from this pretrained model, and add our own final layers. We'll then train this
# conglomerate model for a few epochs on our cars vs non-cars dataset, to adjust the first layers
# and train from scratch the last layers. This process of training an already-partially-trained
# model is called *fine-tuning*.
#
# Source MobileNets for transfer learning have been `pretrained by the TensorFlow folks <https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.md>`_, so we
# can just download the one closest to what we want (the 128x128 input model with 0.25 depth scale).
os.makedirs(f"{FOLDER}/models")
WEIGHTS_PATH = f"{FOLDER}/models/mobilenet_2_5_128_tf.h5"
urllib.request.urlretrieve(
"https://storage.googleapis.com/tensorflow/keras-applications/mobilenet/mobilenet_2_5_128_tf.h5",
WEIGHTS_PATH,
)
pretrained = tf.keras.applications.MobileNet(
input_shape=IMAGE_SIZE, weights=WEIGHTS_PATH, alpha=0.25
)
######################################################################
# Modifying Our Network
# ^^^^^^^^^^^^^^^^^^^^^
# As mentioned above, our pretrained model is designed to classify the 1,000 ImageNet categories,
# but we want to convert it to classify cars. Since only the bottom few layers are task-specific,
# we'll **cut off the last five layers** of our original model. In their place we'll build our own
# "tail" to the model by performing respape, dropout, flatten, and softmax operations.
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.InputLayer(input_shape=IMAGE_SIZE))
model.add(tf.keras.Model(inputs=pretrained.inputs, outputs=pretrained.layers[-5].output))
model.add(tf.keras.layers.Reshape((-1,)))
model.add(tf.keras.layers.Dropout(0.1))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(2, activation="softmax"))
######################################################################
# Fine Tuning Our Network
# ^^^^^^^^^^^^^^^^^^^^^^^
# When training neural networks, we must set a parameter called the **learning rate** that controls
# how fast our network learns. It must be set carefully - too slow, and our network will take
# forever to train; too fast, and our network won't be able to learn some fine details. Generally
# for Adam (the optimizer we're using), ``0.001`` is a pretty good learning rate (and is what's
# recommended in the `original paper <https://arxiv.org/abs/1412.6980>`_). However, in this case
# ``0.0005`` seems to work a little better.
#
# We'll also pass the validation set from earlier to ``model.fit``. This will evaluate how good our
# model is each time we train it, and let us track how our model is improving. Once training is
# finished, the model should have a validation accuracy around ``0.98`` (meaning it was right 98% of
# the time on our validation set).
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=0.0005),
loss="categorical_crossentropy",
metrics=["accuracy"],
)
model.fit(train_dataset, validation_data=validation_dataset, epochs=3, verbose=2)
######################################################################
# Quantization
# ------------
# We've done a decent job of reducing our model's size so far - changing the input dimension,
# along with removing the bottom layers reduced the model to just 219k parameters. However, each of
# these parameters is a ``float32`` that takes four bytes, so our model will take up almost one MB!
#
# Additionally, it might be the case that our hardware doesn't have built-in support for floating
# point numbers. While most high-memory Arduinos (like the Nano 33 BLE) do have hardware support,
# some others (like the Arduino Due) do not. On any boards *without* dedicated hardware support,
# floating point multiplication will be extremely slow.
#
# To address both issues we will **quantize** the model - representing the weights as eight bit
# integers. It's more complex than just rounding, though - to get the best performance, TensorFlow
# tracks how each neuron in our model activates, so we can figure out how most accurately simulate
# the neuron's original activations with integer operations.
#
# We will help TensorFlow do this by creating a representative dataset - a subset of the original
# that is used for tracking how those neurons activate. We'll then pass this into a ``TFLiteConverter``
# (Keras itself does not have quantization support) with an ``Optimize`` flag to tell TFLite to perform
# the conversion. By default, TFLite keeps the inputs and outputs of our model as floats, so we must
# explicitly tell it to avoid this behavior.
def representative_dataset():
for image_batch, label_batch in full_dataset.take(10):
yield [image_batch]
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.uint8
converter.inference_output_type = tf.uint8
quantized_model = converter.convert()
######################################################################
# Download the Model if Desired
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# We've now got a finished model that you can use locally or in other tutorials (try autotuning
# this model or viewing it on `https://netron.app/ <https://netron.app/>`_). But before we do
# those things, we'll have to write it to a file (``quantized.tflite``). If you're running this
# tutorial on Google Colab, you'll have to uncomment the last two lines to download the file
# after writing it.
QUANTIZED_MODEL_PATH = f"{FOLDER}/models/quantized.tflite"
with open(QUANTIZED_MODEL_PATH, "wb") as f:
f.write(quantized_model)
# from google.colab import files
# files.download(QUANTIZED_MODEL_PATH)
######################################################################
# Compiling With TVM For Arduino
# ------------------------------
# TensorFlow has a built-in framework for deploying to microcontrollers - `TFLite Micro <https://www.tensorflow.org/lite/microcontrollers>`_. However,
# it's poorly supported by development boards and does not support autotuning. We will use Apache
# TVM instead.
#
# TVM can be used either with its command line interface (``tvmc``) or with its Python interface. The
# Python interface is fully-featured and more stable, so we'll use it here.
#
# TVM is an optimizing compiler, and optimizations to our model are performed in stages via
# **intermediate representations**. The first of these is `Relay <https://arxiv.org/abs/1810.00952>`_ a high-level intermediate
# representation emphasizing portability. The conversion from ``.tflite`` to Relay is done without any
# knowledge of our "end goal" - the fact we intend to run this model on an Arduino.
#
# Choosing an Arduino Board
# ^^^^^^^^^^^^^^^^^^^^^^^^^
# Next, we'll have to decide exactly which Arduino board to use. The Arduino sketch that we
# ultimately generate should be compatible with any board, but knowing which board we are using in
# advance allows TVM to adjust its compilation strategy to get better performance.
#
# There is one catch - we need enough **memory** (flash and RAM) to be able to run our model. We
# won't ever be able to run a complex vision model like a MobileNet on an Arduino Uno - that board
# only has 2 kB of RAM and 32 kB of flash! Our model has ~200,000 parameters, so there is just no
# way it could fit.
#
# For this tutorial, we will use the Nano 33 BLE, which has 1 MB of flash memory and 256 KB of RAM.
# However, any other Arduino with those specs or better should also work.
#
# Generating our project
# ^^^^^^^^^^^^^^^^^^^^^^
# Next, we'll compile the model to TVM's MLF (model library format) intermediate representation,
# which consists of C/C++ code and is designed for autotuning. To improve performance, we'll tell
# TVM that we're compiling for the ``nrf52840`` microprocessor (the one the Nano 33 BLE uses). We'll
# also tell it to use the C runtime (abbreviated ``crt``) and to use ahead-of-time memory allocation
# (abbreviated ``aot``, which helps reduce the model's memory footprint). Lastly, we will disable
# vectorization with ``"tir.disable_vectorize": True``, as C has no native vectorized types.
#
# Once we have set these configuration parameters, we will call ``tvm.relay.build`` to compile our
# Relay model into the MLF intermediate representation. From here, we just need to call
# ``tvm.micro.generate_project`` and pass in the Arduino template project to finish compilation.
import shutil
import tvm
import tvm.micro.testing
# Method to load model is different in TFLite 1 vs 2
try: # TFLite 2.1 and above
import tflite
tflite_model = tflite.Model.GetRootAsModel(quantized_model, 0)
except AttributeError: # Fall back to TFLite 1.14 method
import tflite.Model
tflite_model = tflite.Model.Model.GetRootAsModel(quantized_model, 0)
# Convert to the Relay intermediate representation
mod, params = tvm.relay.frontend.from_tflite(tflite_model)
# Set configuration flags to improve performance
target = tvm.micro.testing.get_target("zephyr", "nrf5340dk_nrf5340_cpuapp")
runtime = tvm.relay.backend.Runtime("crt")
executor = tvm.relay.backend.Executor("aot", {"unpacked-api": True})
# Convert to the MLF intermediate representation
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
mod = tvm.relay.build(mod, target, runtime=runtime, executor=executor, params=params)
# Generate an Arduino project from the MLF intermediate representation
shutil.rmtree(f"{FOLDER}/models/project", ignore_errors=True)
arduino_project = tvm.micro.generate_project(
tvm.micro.get_microtvm_template_projects("arduino"),
mod,
f"{FOLDER}/models/project",
{
"board": "nano33ble",
"arduino_cli_cmd": "/content/bin/arduino-cli",
"project_type": "example_project",
},
)
######################################################################
# Testing our Arduino Project
# ---------------------------
# Consider the following two 224x224 images from the author's camera roll - one of a car, one not.
# We will test our Arduino project by loading both of these images and executing the compiled model
# on them.
#
# .. image:: https://raw.githubusercontent.com/tlc-pack/web-data/main/testdata/microTVM/data/model_train_images_combined.png
# :align: center
# :height: 200px
# :width: 600px
#
# Currently, these are 224x224 PNG images we can download from Imgur. Before we can feed in these
# images, we'll need to resize and convert them to raw data, which can be done with ``imagemagick``.
#
# It's also challenging to load raw data onto an Arduino, as only C/CPP files (and similar) are
# compiled. We can work around this by embedding our raw data in a hard-coded C array with the
# built-in utility ``bin2c`` that will output a file like below:
#
# .. code-block:: c
#
# static const unsigned char CAR_IMAGE[] = {
# 0x22,0x23,0x14,0x22,
# ...
# 0x07,0x0e,0x08,0x08
# };
#
# We can do both of these things with a few lines of Bash code:
#
# .. code-block:: bash
#
# %%shell
# mkdir -p ~/tests
# curl "https://i.imgur.com/JBbEhxN.png" -o ~/tests/car_224.png
# convert ~/tests/car_224.png -resize 64 ~/tests/car_64.png
# stream ~/tests/car_64.png ~/tests/car.raw
# bin2c -c -st ~/tests/car.raw --name CAR_IMAGE > ~/models/project/car.c
#
# curl "https://i.imgur.com/wkh7Dx2.png" -o ~/tests/catan_224.png
# convert ~/tests/catan_224.png -resize 64 ~/tests/catan_64.png
# stream ~/tests/catan_64.png ~/tests/catan.raw
# bin2c -c -st ~/tests/catan.raw --name CATAN_IMAGE > ~/models/project/catan.c
######################################################################
# Writing our Arduino Script
# --------------------------
# We now need a little bit of Arduino code to read the two binary arrays we just generated, run the
# model on them, and log the output to the serial monitor. This file will replace ``arduino_sketch.ino``
# as the main file of our sketch. You'll have to copy this code in manually..
#
# .. code-block:: c
#
# %%writefile /root/models/project.ino
# #include "src/model.h"
# #include "car.c"
# #include "catan.c"
#
# void setup() {
# Serial.begin(9600);
# TVMInitialize();
# }
#
# void loop() {
# uint8_t result_data[2];
# Serial.println("Car results:");
# TVMExecute(const_cast<uint8_t*>(CAR_IMAGE), result_data);
# Serial.print(result_data[0]); Serial.print(", ");
# Serial.print(result_data[1]); Serial.println();
#
# Serial.println("Other object results:");
# TVMExecute(const_cast<uint8_t*>(CATAN_IMAGE), result_data);
# Serial.print(result_data[0]); Serial.print(", ");
# Serial.print(result_data[1]); Serial.println();
#
# delay(1000);
# }
#
# Compiling Our Code
# ^^^^^^^^^^^^^^^^^^
# Now that our project has been generated, TVM's job is mostly done! We can still call
# ``arduino_project.build()`` and ``arduino_project.upload()``, but these just use ``arduino-cli``'s
# compile and flash commands underneath. We could also begin autotuning our model, but that's a
# subject for a different tutorial. To finish up, we'll verify no compiler errors are thrown
# by our project:
shutil.rmtree(f"{FOLDER}/models/project/build", ignore_errors=True)
# sphinx_gallery_start_ignore
from unittest.mock import MagicMock
arduino_project = MagicMock()
# sphinx_gallery_end_ignore
arduino_project.build()
print("Compilation succeeded!")
######################################################################
# Uploading to Our Device
# -----------------------
# The very last step is uploading our sketch to an Arduino to make sure our code works properly.
# Unfortunately, we can't do that from Google Colab, so we'll have to download our sketch. This is
# simple enough to do - we'll just turn our project into a `.zip` archive, and call `files.download`.
# If you're running on Google Colab, you'll have to uncomment the last two lines to download the file
# after writing it.
ZIP_FOLDER = f"{FOLDER}/models/project"
shutil.make_archive(ZIP_FOLDER, "zip", ZIP_FOLDER)
# from google.colab import files
# files.download(f"{FOLDER}/models/project.zip")
# sphinx_gallery_start_ignore
# Run a few unit tests to make sure the Python code worked
# Ensure transfer learn model was correctly assembled
assert len(model.layers) == 5
assert model.count_params() == 219058 # Only 219,058 of these are trainable
assert len(quantized_model) >= 250000 # Quantized model will be 250 KB - 350 KB
assert len(quantized_model) <= 350000 # Exact value depends on quantization
# Assert .tflite and .zip files were written to disk
assert os.path.isfile(f"{FOLDER}/models/quantized.tflite")
assert os.path.isfile(f"{FOLDER}/models/project.zip")
# Assert MLF file was correctly generated
assert mod.executor.name == "aot"
# Remove the temporary folder we generated at the beginning
shutil.rmtree(FOLDER)
# sphinx_gallery_end_ignore
######################################################################
# From here, we'll need to open it in the Arduino IDE. You'll have to download the IDE as well as
# the SDK for whichever board you are using. For certain boards like the Sony SPRESENSE, you may
# have to change settings to control how much memory you want the board to use.
#
# Expected Results
# ^^^^^^^^^^^^^^^^
# If all works as expected, you should see the following output on a Serial monitor:
#
# .. code-block::
#
# Car results:
# 255, 0
# Other object results:
# 0, 255
#
# The first number represents the model's confidence that the object **is** a car and ranges from
# 0-255. The second number represents the model's confidence that the object **is not** a car and
# is also 0-255. These results mean the model is very sure that the first image is a car, and the
# second image is not (which is correct). Hence, our model is working!
#
# Summary
# -------
# In this tutorial, we used transfer learning to quickly train an image recognition model to
# identify cars. We modified its input dimensions and last few layers to make it better at this,
# and to make it faster and smaller. We then quantified the model and compiled it using TVM to
# create an Arduino sketch. Lastly, we tested the model using two static images to prove it works
# as intended.
#
# Next Steps
# ^^^^^^^^^^
# From here, we could modify the model to read live images from the camera - we have another
# Arduino tutorial for how to do that `on GitHub <https://github.com/guberti/tvm-arduino-demos/tree/master/examples/person_detection>`_. Alternatively, we could also
# `use TVM's autotuning capabilities <https://tvm.apache.org/docs/how_to/work_with_microtvm/micro_autotune.html>`_ to dramatically improve the model's performance.
#
| 31,816 | 48.40528 | 182 | py |
tvm | tvm-main/gallery/how_to/work_with_pytorch/using_as_torch.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Wrap Your TVMScript as PyTorch Module
=====================================
**Author**:
`Yaoda Zhou <https://github.com/juda>`_
This article is a tutorial on wrapping the TVMScript code as the PyTorch module.
Using the decorator `as_torch`, users can wrap TVMScript code into a PyTorch nn.Module naturally.
To follow the tutorial, PyTorch should be installed:
.. code-block:: bash
%%shell
pip install torch
"""
# Import PyTorch, as well as necessary libraries
import torch
import torch.nn.functional as F
import torch.utils.benchmark as benchmark
import tvm
from tvm.contrib.torch import as_torch
from tvm.script import tir as T
######################################################################
# Write your own PyTorch operator by TVMScript
# --------------------------------------------
# PyTorch is a very popular machine learning framework which contains
# optimized implementations of most commonly used operators.
# Nevertheless, sometimes you might want to write your own operators in PyTorch.
# In that case, the performance of such custom operators might not be satisfactory for your needs.
#
# For example, suppose that we are going to define a 1-d depthwise convolution operator.
# Assume the number of in_channel and out_channel are both 70,
# the width is 80 and the kernel size is 20,
# then the 1-d depthwise conv could be written in PyTorch in one line:
in_channel = 70
out_channel = 70
width = 80
kernel_size = 20
def torch_depthwise(inputs, filters):
return F.conv1d(inputs, filters.view(out_channel, 1, kernel_size), groups=out_channel)
# We can run this function as:
inputs = torch.randn(in_channel, width)
filters = torch.randn(out_channel, kernel_size)
ret_torch = torch_depthwise(inputs, filters)
# The `torch_depthwise` function, in a plain Python code, could be written as:
def vanilla_depthwise(input, weight):
ret = torch.zeros(out_channel, width - kernel_size + 1)
for j in range(out_channel):
for i in range(width - kernel_size + 1):
for k in range(kernel_size):
ret[j, i] += weight[j, k] * input[j, i + k]
return ret
# Then, we plan to optimize the `depthwise` function by leveraging the power of TVM.
# TVM community proposes an embedded Domain Specific Language in Python called TVMScript,
# which serves as the high-level frontend for TVM's Tensor IR.
# The depthwise 1D convolution code above can be translated to TVMScript as follows.
# We provide an `as_torch` decorator, which converts the TVMScript code to PyTorch's nn.Module automatically.
@as_torch
@T.prim_func
def tvm_depthwise(
A: T.Buffer((70, 80), "float32"),
B: T.Buffer((70, 20), "float32"),
C: T.Buffer((70, 61), "float32"),
) -> None:
for j, i, k in T.grid(70, 61, 20):
with T.block():
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vj, vi] = T.float32(0)
C[vj, vi] += B[vj, vk] * A[vj, vi + vk]
# We can build the TVMScript code by calling the `tune` method in default setting.
# Without providing extra information, the model will be tuned for CPU.
tvm_depthwise.tune()
# We can print out the tuned TVMScript code to see how the program is transformed, as
print(tvm_depthwise.script())
# We can verify that the two outputs are the same:
ret_tvm = torch.zeros(out_channel, width - kernel_size + 1)
tvm_depthwise(inputs, filters, ret_tvm)
testing.assert_allclose(ret_torch.cpu().numpy(), ret_tvm.cpu().numpy(), atol=1e-5, rtol=1e-5)
######################################################################
# Benchmark
# ---------
results = []
for i in range(5):
inputs = torch.randn(out_channel, width)
filters = torch.randn(out_channel, kernel_size)
res = torch.zeros(out_channel, width - kernel_size + 1)
sub_label = f"[test {i}]"
results.append(
benchmark.Timer(
stmt="tvm_depthwise(inputs, filters, res)",
setup="from __main__ import tvm_depthwise",
globals={"inputs": inputs, "filters": filters, "res": res},
sub_label=sub_label,
description="TVMScript",
).blocked_autorange()
)
results.append(
benchmark.Timer(
stmt="torch_depthwise(inputs, filters)",
setup="from __main__ import torch_depthwise",
globals={
"inputs": inputs,
"filters": filters,
},
sub_label=sub_label,
description="PyTorch",
).blocked_autorange()
)
compare = benchmark.Compare(results)
compare.print()
# In author's environment, the average inference time of `tvm_depthwise` is 120.0 us,
# while the average inference time of `torch_depthwise` is 196.0 us (PyTorch version is 1.11.0),
# showing the speedup of around 38%.
| 5,600 | 33.574074 | 109 | py |
tvm | tvm-main/gallery/how_to/work_with_pytorch/using_optimized_torch.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Compile PyTorch Models
======================
**Author**:
`Yaoda Zhou <https://github.com/juda>`_
This article is a tutorial to optimize PyTorch models by using decorator `optimize_torch`.
To follow this tutorial, PyTorch, as well as TorchVision, should be installed:
.. code-block:: bash
%%shell
pip install torch
pip install torchvision
"""
# Import PyTorch
# sphinx_gallery_start_ignore
# sphinx_gallery_requires_cuda = True
# sphinx_gallery_end_ignore
import torch
import torch.nn as nn
import torch.nn.functional as F
# Import library for profiling
import torch.utils.benchmark as benchmark
from torchvision.models import resnet18
# Import `optimize_torch` function
from tvm.contrib.torch import optimize_torch
from tvm.meta_schedule import TuneConfig
######################################################################
# Define a simple module written by PyTorch
# -----------------------------------------
class SimpleModel(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
######################################################################
# Optimize SimpleModel by TVM MetaSchedule
# ----------------------------------------
# We provide the `optimize_torch` function, which has the similar usage as `torch.jit.trace`.
# The PyTorch model to optimize, along with its example input, are provided by users.
# The PyTorch module will be tuned by TVM for the target hardware.
# Without providing extra information, the model will be tuned for CPU.
simple_model = SimpleModel()
example_input = torch.randn(20, 1, 10, 10)
model_optimized_by_tvm = optimize_torch(simple_model, example_input)
######################################################################
# Save/Load module
# ----------------
# We can save and load our tuned module like the standard `nn.Module`.
# Let us run our tuned module.
ret1 = model_optimized_by_tvm(example_input)
torch.save(model_optimized_by_tvm, "model_optimized.pt")
model_loaded = torch.load("model_optimized.pt")
# We load the module and run it again.
ret2 = model_loaded(example_input)
# We will show 2 results:
# (1) we can safely load and save model by showing the result of model
# after save and load operations is still the same as original one;
# (2) the model we optimize returns the same result as the original PyTorch model.
ret3 = simple_model(example_input)
testing.assert_allclose(ret1.detach().numpy(), ret2.detach().numpy(), atol=1e-5, rtol=1e-5)
testing.assert_allclose(ret1.detach().numpy(), ret3.detach().numpy(), atol=1e-5, rtol=1e-5)
######################################################################
# Optimize resnet18
# -----------------
# In the following, we will show that our approach is able to
# accelerate common models, such as resnet18.
# We will tune our model for the GPU.
target_cuda = "nvidia/geforce-rtx-3070"
# For PyTorch users, the code could be written as usual, except for
# applying "optimize_torch" function on the resnet18 model.
resnet18_tvm = optimize_torch(
resnet18().cuda().eval(), [torch.rand(1, 3, 224, 224).cuda()], target=target_cuda
)
# TorchScript also provides a built-in "optimize_for_inference" function to accelerate the inference.
resnet18_torch = torch.jit.optimize_for_inference(torch.jit.script(resnet18().cuda().eval()))
######################################################################
# Compare the performance between two approaches
# ----------------------------------------------
results = []
for i in range(5):
test_input = torch.rand(1, 3, 224, 224).cuda()
sub_label = f"[test {i}]"
results.append(
benchmark.Timer(
stmt="resnet18_tvm(test_input)",
setup="from __main__ import resnet18_tvm",
globals={"test_input": test_input},
sub_label=sub_label,
description="tuning by meta",
).blocked_autorange()
)
results.append(
benchmark.Timer(
stmt="resnet18_torch(test_input)",
setup="from __main__ import resnet18_torch",
globals={"test_input": test_input},
sub_label=sub_label,
description="tuning by jit",
).blocked_autorange()
)
compare = benchmark.Compare(results)
compare.print()
# In author's environment, the average inference time of `resnet18_tvm` is 620.0 us,
# while the average inference time of `resnet18_torch` is 980.0 us (PyTorch version is 1.11.0),
# showing the speedup of around 38%.
| 5,431 | 34.272727 | 101 | py |
tvm | tvm-main/gallery/how_to/tune_with_autotvm/tune_relay_mobile_gpu.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Auto-tuning a Convolutional Network for Mobile GPU
==================================================
**Author**: `Lianmin Zheng <https://github.com/merrymercy>`_, `Eddie Yan <https://github.com/eqy>`_
Auto-tuning for a specific device is critical for getting the best
performance. This is a tutorial about how to tune a whole convolutional
network.
The operator implementation for Mobile GPU in TVM is written in template form.
The template has many tunable knobs (tile factor, vectorization, unrolling, etc).
We will tune all convolution, depthwise convolution and dense operators
in the neural network. After tuning, we produce a log file which stores
the best knob values for all required operators. When the TVM compiler compiles
these operators, it will query this log file to get the best knob values.
We also released pre-tuned parameters for some arm devices. You can go to
`Mobile GPU Benchmark <https://github.com/apache/tvm/wiki/Benchmark#mobile-gpu>`_
to see the results.
Note that this tutorial will not run on Windows or recent versions of macOS. To
get it to run, you will need to wrap the body of this tutorial in a :code:`if
__name__ == "__main__":` block.
"""
######################################################################
# Install dependencies
# --------------------
# To use the autotvm package in tvm, we need to install some extra dependencies.
# (change "3" to "2" if you use python2):
#
# .. code-block:: bash
#
# pip3 install --user psutil xgboost tornado cloudpickle
#
# To make TVM run faster during tuning, it is recommended to use cython
# as FFI of tvm. In the root directory of tvm, execute
# (change "3" to "2" if you use python2):
#
# .. code-block:: bash
#
# pip3 install --user cython
# sudo make cython3
#
# Now return to python code. Import packages.
import os
import numpy as np
import tvm
from tvm import relay, autotvm
import tvm.relay.testing
from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner
from tvm.contrib.utils import tempdir
import tvm.contrib.graph_executor as runtime
#################################################################
# Define network
# --------------
# First we need to define the network in relay frontend API.
# We can load some pre-defined network from :code:`relay.testing`.
# We can also load models from MXNet, ONNX and TensorFlow.
def get_network(name, batch_size):
"""Get the symbol definition and random weight of a network"""
input_shape = (batch_size, 3, 224, 224)
output_shape = (batch_size, 1000)
if "resnet" in name:
n_layer = int(name.split("-")[1])
mod, params = relay.testing.resnet.get_workload(
num_layers=n_layer, batch_size=batch_size, dtype=dtype
)
elif "vgg" in name:
n_layer = int(name.split("-")[1])
mod, params = relay.testing.vgg.get_workload(
num_layers=n_layer, batch_size=batch_size, dtype=dtype
)
elif name == "mobilenet":
mod, params = relay.testing.mobilenet.get_workload(batch_size=batch_size, dtype=dtype)
elif name == "squeezenet_v1.1":
mod, params = relay.testing.squeezenet.get_workload(
batch_size=batch_size, version="1.1", dtype=dtype
)
elif name == "inception_v3":
input_shape = (batch_size, 3, 299, 299)
mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
elif name == "mxnet":
# an example for mxnet model
from mxnet.gluon.model_zoo.vision import get_model
block = get_model("resnet18_v1", pretrained=True)
mod, params = relay.frontend.from_mxnet(block, shape={"data": input_shape}, dtype=dtype)
net = mod["main"]
net = relay.Function(
net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs
)
mod = tvm.IRModule.from_expr(net)
else:
raise ValueError("Unsupported network: " + name)
return mod, params, input_shape, output_shape
#################################################################
# .. _tutorials-autotvm-start-rpc-tracker:
#################################################################
# Start RPC Tracker
# -----------------
# TVM uses RPC session to communicate with ARM boards.
# During tuning, the tuner will send the generated code to the board and
# measure the speed of code on the board.
#
# To scale up the tuning, TVM uses RPC Tracker to manage distributed devices.
# The RPC Tracker is a centralized controller node. We can register all devices to
# the tracker. For example, if we have 10 phones, we can register all of them
# to the tracker, and run 10 measurements in parallel, accelerating the tuning process.
#
# To start an RPC tracker, run this command on the host machine. The tracker is
# required during the whole tuning process, so we need to open a new terminal for
# this command:
#
# .. code-block:: bash
#
# python -m tvm.exec.rpc_tracker --host=0.0.0.0 --port=9190
#
# The expected output is
#
# .. code-block:: bash
#
# INFO:RPCTracker:bind to 0.0.0.0:9190
#################################################################
# Register Devices to RPC Tracker
# -----------------------------------
# Now we can register our devices to the tracker. The first step is to
# build the TVM runtime for the ARM devices.
#
# * For Linux:
# Follow this section :ref:`build-tvm-runtime-on-device` to build
# the TVM runtime on the device. Then register the device to tracker by
#
# .. code-block:: bash
#
# python -m tvm.exec.rpc_server --tracker=[HOST_IP]:9190 --key=rk3399
#
# (replace :code:`[HOST_IP]` with the IP address of your host machine)
#
# * For Android:
# Follow this `readme page <https://github.com/apache/tvm/tree/main/apps/android_rpc>`_ to
# install TVM RPC APK on the android device. Make sure you can pass the android RPC test.
# Then you have already registered your device. During tuning, you have to go to developer option
# and enable "Keep screen awake during changing" and charge your phone to make it stable.
#
# After registering devices, we can confirm it by querying rpc_tracker
#
# .. code-block:: bash
#
# python -m tvm.exec.query_rpc_tracker --host=0.0.0.0 --port=9190
#
# For example, if we have 2 Huawei mate10 pro, 11 Raspberry Pi 3B and 2 rk3399,
# the output can be
#
# .. code-block:: bash
#
# Queue Status
# ----------------------------------
# key total free pending
# ----------------------------------
# mate10pro 2 2 0
# rk3399 2 2 0
# rpi3b 11 11 0
# ----------------------------------
#
# You can register multiple devices to the tracker to accelerate the measurement in tuning.
###########################################
# Set Tuning Options
# ------------------
# Before tuning, we should apply some configurations. Here I use an RK3399 board
# as example. In your setting, you should modify the target and device_key accordingly.
# set :code:`use_android` to True if you use android phone.
#### DEVICE CONFIG ####
# Replace "aarch64-linux-gnu" with the correct target of your board.
# This target host is used for cross compilation. You can query it by :code:`gcc -v` on your device.
target = tvm.target.Target("opencl -device=mali", host="llvm -mtriple=aarch64-linux-gnu")
# Also replace this with the device key in your tracker
device_key = "rk3399"
# Set this to True if you use android phone
use_android = False
#### TUNING OPTION ####
network = "resnet-18"
log_file = "%s.%s.log" % (device_key, network)
dtype = "float32"
tuning_option = {
"log_filename": log_file,
"tuner": "xgb",
"n_trial": 1000,
"early_stopping": 450,
"measure_option": autotvm.measure_option(
builder=autotvm.LocalBuilder(build_func="ndk" if use_android else "default"),
runner=autotvm.RPCRunner(
device_key,
host="127.0.0.1",
port=9190,
number=10,
timeout=5,
),
),
}
####################################################################
#
# .. note:: How to set tuning options
#
# In general, the default values provided here work well.
# If you have enough time budget, you can set :code:`n_trial`, :code:`early_stopping` larger,
# which makes the tuning run longer.
# If your device runs very slow or your conv2d operators have many GFLOPs, considering to
# set timeout larger.
#
###################################################################
# Begin Tuning
# ------------
# Now we can extract tuning tasks from the network and begin tuning.
# Here, we provide a simple utility function to tune a list of tasks.
# This function is just an initial implementation which tunes them in sequential order.
# We will introduce a more sophisticated tuning scheduler in the future.
# You can skip the implementation of this function for this tutorial.
def tune_tasks(
tasks,
measure_option,
tuner="xgb",
n_trial=1000,
early_stopping=None,
log_filename="tuning.log",
use_transfer_learning=True,
):
# create tmp log file
tmp_log_file = log_filename + ".tmp"
if os.path.exists(tmp_log_file):
os.remove(tmp_log_file)
for i, tsk in enumerate(reversed(tasks)):
prefix = "[Task %2d/%2d] " % (i + 1, len(tasks))
# create tuner
if tuner == "xgb":
tuner_obj = XGBTuner(tsk, loss_type="reg")
elif tuner == "xgb_knob":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="knob")
elif tuner == "xgb_itervar":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="itervar")
elif tuner == "xgb_curve":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="curve")
elif tuner == "xgb_rank":
tuner_obj = XGBTuner(tsk, loss_type="rank")
elif tuner == "xgb_rank_knob":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="knob")
elif tuner == "xgb_rank_itervar":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="itervar")
elif tuner == "xgb_rank_curve":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="curve")
elif tuner == "xgb_rank_binary":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary")
elif tuner == "xgb_rank_binary_knob":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="knob")
elif tuner == "xgb_rank_binary_itervar":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="itervar")
elif tuner == "xgb_rank_binary_curve":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="curve")
elif tuner == "ga":
tuner_obj = GATuner(tsk, pop_size=50)
elif tuner == "random":
tuner_obj = RandomTuner(tsk)
elif tuner == "gridsearch":
tuner_obj = GridSearchTuner(tsk)
else:
raise ValueError("Invalid tuner: " + tuner)
if use_transfer_learning:
if os.path.isfile(tmp_log_file):
tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file))
# do tuning
tsk_trial = min(n_trial, len(tsk.config_space))
tuner_obj.tune(
n_trial=tsk_trial,
early_stopping=early_stopping,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(tsk_trial, prefix=prefix),
autotvm.callback.log_to_file(tmp_log_file),
],
)
# pick best records to a cache file
autotvm.record.pick_best(tmp_log_file, log_filename)
os.remove(tmp_log_file)
########################################################################
# Finally, we launch tuning jobs and evaluate the end-to-end performance.
def tune_and_evaluate(tuning_opt):
# extract workloads from relay program
print("Extract tasks...")
mod, params, input_shape, _ = get_network(network, batch_size=1)
tasks = autotvm.task.extract_from_program(
mod["main"],
target=target,
params=params,
ops=(relay.op.get("nn.conv2d"),),
)
# run tuning tasks
print("Tuning...")
tune_tasks(tasks, **tuning_opt)
# compile kernels with history best records
with autotvm.apply_history_best(log_file):
print("Compile...")
with tvm.transform.PassContext(opt_level=3):
lib = relay.build_module.build(mod, target=target, params=params)
# export library
tmp = tempdir()
if use_android:
from tvm.contrib import ndk
filename = "net.so"
lib.export_library(tmp.relpath(filename), ndk.create_shared)
else:
filename = "net.tar"
lib.export_library(tmp.relpath(filename))
# upload module to device
print("Upload...")
remote = autotvm.measure.request_remote(device_key, "127.0.0.1", 9190, timeout=10000)
remote.upload(tmp.relpath(filename))
rlib = remote.load_module(filename)
# upload parameters to device
dev = remote.device(str(target), 0)
module = runtime.GraphModule(rlib["default"](dev))
data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype))
module.set_input("data", data_tvm)
# evaluate
print("Evaluate inference time cost...")
print(module.benchmark(dev, number=1, repeat=30))
# We do not run the tuning in our webpage server since it takes too long.
# Uncomment the following line to run it by yourself.
# tune_and_evaluate(tuning_option)
######################################################################
# Sample Output
# -------------
# The tuning needs to compile many programs and extract feature from them.
# So a high performance CPU is recommended.
# One sample output is listed below. It takes about 3 hours on a 32T AMD Ryzen Threadripper.
#
# .. code-block:: bash
#
# Extract tasks...
# Tuning...
# [Task 1/17] Current/Best: 25.30/ 39.12 GFLOPS | Progress: (992/1000) | 751.22 s Done.
# [Task 2/17] Current/Best: 40.70/ 45.50 GFLOPS | Progress: (736/1000) | 545.46 s Done.
# [Task 3/17] Current/Best: 38.83/ 42.35 GFLOPS | Progress: (992/1000) | 1549.85 s Done.
# [Task 4/17] Current/Best: 23.31/ 31.02 GFLOPS | Progress: (640/1000) | 1059.31 s Done.
# [Task 5/17] Current/Best: 0.06/ 2.34 GFLOPS | Progress: (544/1000) | 305.45 s Done.
# [Task 6/17] Current/Best: 10.97/ 17.20 GFLOPS | Progress: (992/1000) | 1050.00 s Done.
# [Task 7/17] Current/Best: 8.98/ 10.94 GFLOPS | Progress: (928/1000) | 421.36 s Done.
# [Task 8/17] Current/Best: 4.48/ 14.86 GFLOPS | Progress: (704/1000) | 582.60 s Done.
# [Task 9/17] Current/Best: 10.30/ 25.99 GFLOPS | Progress: (864/1000) | 899.85 s Done.
# [Task 10/17] Current/Best: 11.73/ 12.52 GFLOPS | Progress: (608/1000) | 304.85 s Done.
# [Task 11/17] Current/Best: 15.26/ 18.68 GFLOPS | Progress: (800/1000) | 747.52 s Done.
# [Task 12/17] Current/Best: 17.48/ 26.71 GFLOPS | Progress: (1000/1000) | 1166.40 s Done.
# [Task 13/17] Current/Best: 0.96/ 11.43 GFLOPS | Progress: (960/1000) | 611.65 s Done.
# [Task 14/17] Current/Best: 17.88/ 20.22 GFLOPS | Progress: (672/1000) | 670.29 s Done.
# [Task 15/17] Current/Best: 11.62/ 13.98 GFLOPS | Progress: (736/1000) | 449.25 s Done.
# [Task 16/17] Current/Best: 19.90/ 23.83 GFLOPS | Progress: (608/1000) | 708.64 s Done.
# [Task 17/17] Current/Best: 17.98/ 22.75 GFLOPS | Progress: (736/1000) | 1122.60 s Done.
# Compile...
# Upload...
# Evaluate inference time cost...
# Mean inference time (std dev): 128.05 ms (7.74 ms)
#
######################################################################
#
# .. note:: **Experiencing Difficulties?**
#
# The auto tuning module is error-prone. If you always see " 0.00/ 0.00 GFLOPS",
# then there must be something wrong.
#
# First, make sure you set the correct configuration of your device.
# Then, you can print debug information by adding these lines in the beginning
# of the script. It will print every measurement result, where you can find useful
# error messages.
#
# .. code-block:: python
#
# import logging
# logging.getLogger('autotvm').setLevel(logging.DEBUG)
#
# Finally, always feel free to ask our community for help on https://discuss.tvm.apache.org
| 17,315 | 38.534247 | 100 | py |
tvm | tvm-main/gallery/how_to/tune_with_autotvm/tune_conv2d_cuda.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Tuning High Performance Convolution on NVIDIA GPUs
=========================================================================
**Author**: `Lianmin Zheng <https://github.com/merrymercy>`_
This is an advanced tutorial for writing high performance tunable template for
NVIDIA GPU. By running auto-tuner on this template, we can outperform the
vendor provided library CuDNN in many cases.
Note that this tutorial will not run on Windows or recent versions of macOS. To
get it to run, you will need to wrap the body of this tutorial in a :code:`if
__name__ == "__main__":` block.
"""
######################################################################
# Install dependencies
# --------------------
# To use autotvm package in tvm, we need to install some extra dependencies.
# (change "3" to "2" if you use python2):
#
# .. code-block:: bash
#
# pip3 install --user psutil xgboost tornado cloudpickle
#
# To make TVM run faster in tuning, it is recommended to use cython
# as FFI of tvm. In the root directory of tvm, execute
#
# .. code-block:: bash
#
# pip3 install --user cython
# sudo make cython3
#
# Now return to python code. Import packages.
# sphinx_gallery_start_ignore
# sphinx_gallery_requires_cuda = True
# sphinx_gallery_end_ignore
import logging
import sys
import numpy as np
import tvm
from tvm import te, topi, testing
from tvm.topi.testing import conv2d_nchw_python
import tvm.testing
from tvm import autotvm
######################################################################
# Step 1: Define the search space
# --------------------------------
# There are plenty of useful schedule primitives in tvm. You can also find
# some tutorials that describe them in more details, such as
# (1). :ref:`opt-conv-gpu`
# (2). `Optimizing DepthwiseConv on NVIDIA GPU <https://tvm.apache.org/2017/08/22/Optimize-Deep-Learning-GPU-Operators-with-TVM-A-Depthwise-Convolution-Example>`_
#
# However, their implementations are manually tuned for some special input
# shapes. In this section, we build a large enough space to cover
# the techniques used in these tutorials. Then we rely on the efficient auto-tuner
# to search through this space and pick some good configurations.
#
# If you are familiar with writing cuda schedule, you can find the following
# template is very general. Actually this template can be easily modified
# to tune other operators such as depthwise convolution and GEMM.
# In order to fully understand this template, you should be familiar with
# the schedule primitives and auto tuning API. You can refer to the above
# tutorials and :ref:`autotvm tutorial <tutorial-autotvm-matmul-x86>`
#
# It is worth noting that the search space for a conv2d operator
# can be very large (at the level of 10^9 for some input shapes)
#
@autotvm.template("tutorial/conv2d_no_batching")
def conv2d_no_batching(N, H, W, CO, CI, KH, KW, stride, padding):
assert N == 1, "Only consider batch_size = 1 in this template"
data = te.placeholder((N, CI, H, W), name="data")
kernel = te.placeholder((CO, CI, KH, KW), name="kernel")
conv = topi.nn.conv2d_nchw(data, kernel, stride, padding, dilation=1, out_dtype="float32")
s = te.create_schedule([conv.op])
##### space definition begin #####
n, f, y, x = s[conv].op.axis
rc, ry, rx = s[conv].op.reduce_axis
cfg = autotvm.get_config()
cfg.define_split("tile_f", f, num_outputs=4)
cfg.define_split("tile_y", y, num_outputs=4)
cfg.define_split("tile_x", x, num_outputs=4)
cfg.define_split("tile_rc", rc, num_outputs=3)
cfg.define_split("tile_ry", ry, num_outputs=3)
cfg.define_split("tile_rx", rx, num_outputs=3)
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
cfg.define_knob("unroll_explicit", [0, 1])
##### space definition end #####
# inline padding
pad_data = s[conv].op.input_tensors[0]
s[pad_data].compute_inline()
data, raw_data = pad_data, data
output = conv
OL = s.cache_write(conv, "local")
# create cache stage
AA = s.cache_read(data, "shared", [OL])
WW = s.cache_read(kernel, "shared", [OL])
AL = s.cache_read(AA, "local", [OL])
WL = s.cache_read(WW, "local", [OL])
# tile and bind spatial axes
n, f, y, x = s[output].op.axis
bf, vf, tf, fi = cfg["tile_f"].apply(s, output, f)
by, vy, ty, yi = cfg["tile_y"].apply(s, output, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
kernel_scope = n # this is the scope to attach global config inside this kernel
s[output].bind(bf, te.thread_axis("blockIdx.z"))
s[output].bind(by, te.thread_axis("blockIdx.y"))
s[output].bind(bx, te.thread_axis("blockIdx.x"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(vy, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
s[output].bind(tf, te.thread_axis("threadIdx.z"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
s[output].reorder(n, bf, by, bx, vf, vy, vx, tf, ty, tx, fi, yi, xi)
s[OL].compute_at(s[output], tx)
# tile reduction axes
n, f, y, x = s[OL].op.axis
rc, ry, rx = s[OL].op.reduce_axis
rco, rcm, rci = cfg["tile_rc"].apply(s, OL, rc)
ryo, rym, ryi = cfg["tile_rx"].apply(s, OL, ry)
rxo, rxm, rxi = cfg["tile_ry"].apply(s, OL, rx)
s[OL].reorder(rco, ryo, rxo, rcm, rym, rxm, rci, ryi, rxi, n, f, y, x)
s[AA].compute_at(s[OL], rxo)
s[WW].compute_at(s[OL], rxo)
s[AL].compute_at(s[OL], rxm)
s[WL].compute_at(s[OL], rxm)
# cooperative fetching
for load in [AA, WW]:
n, f, y, x = s[load].op.axis
fused = s[load].fuse(n, f, y, x)
tz, fused = s[load].split(fused, nparts=cfg["tile_f"].size[2])
ty, fused = s[load].split(fused, nparts=cfg["tile_y"].size[2])
tx, fused = s[load].split(fused, nparts=cfg["tile_x"].size[2])
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
# tune unroll
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[output].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val)
return s, [raw_data, kernel, conv]
######################################################################
# Step 2: Search through the space
# ---------------------------------
# We pick the last layer on resnet as test case.
# Since our space is very large, :code:`XGBoostTuner` is most suitable
# for our case. Here we only do 20 trials for demonstration.
# In practice, making 1000 trials usually can find some good kernels
# for this template
# logging config (for printing tuning log to screen)
logging.getLogger("autotvm").setLevel(logging.DEBUG)
logging.getLogger("autotvm").addHandler(logging.StreamHandler(sys.stdout))
# the last layer in resnet
N, H, W, CO, CI, KH, KW, strides, padding = 1, 7, 7, 512, 512, 3, 3, (1, 1), (1, 1)
task = autotvm.task.create(
"tutorial/conv2d_no_batching", args=(N, H, W, CO, CI, KH, KW, strides, padding), target="cuda"
)
print(task.config_space)
# Use local gpu, measure 10 times for every config to reduce variance
# The timeout of compiling a program is 10 seconds, the timeout for running is 4 seconds
measure_option = autotvm.measure_option(
builder=autotvm.LocalBuilder(),
runner=autotvm.LocalRunner(repeat=3, min_repeat_ms=100, timeout=4),
)
record_file = None
# Begin tuning, log records to file `conv2d.log`
# During tuning we will also try many invalid configs, so you are expected to
# see many error reports. As long as you can see non-zero GFLOPS, it is okay.
# We do not run the tuning in our webpage server since it takes too long.
# Uncomment the following lines to run it by yourself.
# tuner = autotvm.tuner.XGBTuner(task)
# record_file = "conv2d.log"
# tuner.tune(
# n_trial=5,
# measure_option=measure_option,
# callbacks=[autotvm.callback.log_to_file(record_file)],
# )
#########################################################################
# Finally we can inspect the best config from log file, check correctness,
# and measure running time.
# inspect the best config
dispatch_context = autotvm.apply_history_best(record_file)
best_config = dispatch_context.query(task.target, task.workload)
print("\nBest config:")
print(best_config)
# apply history best from log file
with autotvm.apply_history_best(record_file):
with tvm.target.Target("cuda"):
s, arg_bufs = conv2d_no_batching(N, H, W, CO, CI, KH, KW, strides, padding)
func = tvm.build(s, arg_bufs)
# check correctness
a_np = np.random.uniform(size=(N, CI, H, W)).astype(np.float32)
w_np = np.random.uniform(size=(CO, CI, KH, KW)).astype(np.float32)
c_np = conv2d_nchw_python(a_np, w_np, strides, padding)
dev = tvm.cuda()
a_tvm = tvm.nd.array(a_np, device=dev)
w_tvm = tvm.nd.array(w_np, device=dev)
c_tvm = tvm.nd.empty(c_np.shape, device=dev)
func(a_tvm, w_tvm, c_tvm)
tvm.testing.assert_allclose(c_np, c_tvm.numpy(), rtol=1e-2)
# Evaluate running time. Here we choose a large repeat number (400) to reduce the noise
# and the overhead of kernel launch. You can also use nvprof to validate the result.
evaluator = func.time_evaluator(func.entry_name, dev, number=400)
print("Time cost of this operator: %f" % evaluator(a_tvm, w_tvm, c_tvm).mean)
| 10,170 | 39.043307 | 162 | py |
tvm | tvm-main/gallery/how_to/tune_with_autotvm/tune_relay_arm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
.. _tune_relay_arm:
Auto-tuning a Convolutional Network for ARM CPU
===============================================
**Author**: `Lianmin Zheng <https://github.com/merrymercy>`_, `Zhao Wu <https://github.com/FrozenGene>`_, `Eddie Yan <https://github.com/eqy>`_
Auto-tuning for a specific ARM device is critical for getting the best
performance. This is a tutorial about how to tune a whole convolutional
network.
The operator implementation for ARM CPU in TVM is written in template form.
The template has many tunable knobs (tile factor, vectorization, unrolling, etc).
We will tune all convolution and depthwise convolution operators
in the neural network. After tuning, we produce a log file which stores
the best knob values for all required operators. When the TVM compiler compiles
these operators, it will query this log file to get the best knob values.
We also released pre-tuned parameters for some arm devices. You can go to
`ARM CPU Benchmark <https://github.com/apache/tvm/wiki/Benchmark#arm-cpu>`_
to see the results.
Note that this tutorial will not run on Windows or recent versions of macOS. To
get it to run, you will need to wrap the body of this tutorial in a :code:`if
__name__ == "__main__":` block.
"""
######################################################################
# Install dependencies
# --------------------
# To use the autotvm package in tvm, we need to install some extra dependencies.
# (change "3" to "2" if you use python2):
#
# .. code-block:: bash
#
# pip3 install --user psutil xgboost tornado cloudpickle
#
# To make TVM run faster during tuning, it is recommended to use cython
# as FFI of TVM. In the root directory of TVM, execute
# (change "3" to "2" if you use python2):
#
# .. code-block:: bash
#
# pip3 install --user cython
# sudo make cython3
#
# Now return to python code. Import packages.
import os
import numpy as np
import tvm
from tvm import relay, autotvm
import tvm.relay.testing
from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner
from tvm.contrib.utils import tempdir
import tvm.contrib.graph_executor as runtime
#################################################################
# Define network
# --------------
# First we need to define the network in relay frontend API.
# We can load some pre-defined network from :code:`relay.testing`.
# We can also load models from MXNet, ONNX and TensorFlow.
def get_network(name, batch_size):
"""Get the symbol definition and random weight of a network"""
input_shape = (batch_size, 3, 224, 224)
output_shape = (batch_size, 1000)
if "resnet" in name:
n_layer = int(name.split("-")[1])
mod, params = relay.testing.resnet.get_workload(
num_layers=n_layer, batch_size=batch_size, dtype=dtype
)
elif "vgg" in name:
n_layer = int(name.split("-")[1])
mod, params = relay.testing.vgg.get_workload(
num_layers=n_layer, batch_size=batch_size, dtype=dtype
)
elif name == "mobilenet":
mod, params = relay.testing.mobilenet.get_workload(batch_size=batch_size)
elif name == "squeezenet_v1.1":
mod, params = relay.testing.squeezenet.get_workload(
batch_size=batch_size, version="1.1", dtype=dtype
)
elif name == "inception_v3":
input_shape = (batch_size, 3, 299, 299)
mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
elif name == "mxnet":
# an example for mxnet model
from mxnet.gluon.model_zoo.vision import get_model
block = get_model("resnet18_v1", pretrained=True)
mod, params = relay.frontend.from_mxnet(block, shape={"data": input_shape}, dtype=dtype)
net = mod["main"]
net = relay.Function(
net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs
)
mod = tvm.IRModule.from_expr(net)
else:
raise ValueError("Unsupported network: " + name)
return mod, params, input_shape, output_shape
#################################################################
# Start RPC Tracker
# -----------------
# TVM uses RPC session to communicate with ARM boards.
# During tuning, the tuner will send the generated code to the board and
# measure the speed of code on the board.
#
# To scale up the tuning, TVM uses RPC Tracker to manage distributed devices.
# The RPC Tracker is a centralized controller node. We can register all devices to
# the tracker. For example, if we have 10 phones, we can register all of them
# to the tracker, and run 10 measurements in parallel, accelerating the tuning process.
#
# To start an RPC tracker, run this command on the host machine. The tracker is
# required during the whole tuning process, so we need to open a new terminal for
# this command:
#
# .. code-block:: bash
#
# python -m tvm.exec.rpc_tracker --host=0.0.0.0 --port=9190
#
# The expected output is
#
# .. code-block:: bash
#
# INFO:RPCTracker:bind to 0.0.0.0:9190
#################################################################
# Register Devices to RPC Tracker
# -----------------------------------
# Now we can register our devices to the tracker. The first step is to
# build the TVM runtime for the ARM devices.
#
# * For Linux:
# Follow this section :ref:`build-tvm-runtime-on-device` to build
# the TVM runtime on the device. Then register the device to tracker by
#
# .. code-block:: bash
#
# python -m tvm.exec.rpc_server --tracker=[HOST_IP]:9190 --key=rk3399
#
# (replace :code:`[HOST_IP]` with the IP address of your host machine)
#
# * For Android:
# Follow this `readme page <https://github.com/apache/tvm/tree/main/apps/android_rpc>`_ to
# install the TVM RPC APK on the android device. Make sure you can pass the android rpc test.
# Then you have already registered your device. During tuning, you have to go to developer option
# and enable "Keep screen awake during changing" and charge your phone to make it stable.
#
# After registering devices, we can confirm it by querying rpc_tracker
#
# .. code-block:: bash
#
# python -m tvm.exec.query_rpc_tracker --host=0.0.0.0 --port=9190
#
# For example, if we have 2 Huawei mate10 pro, 11 Raspberry Pi 3B and 2 rk3399,
# the output can be
#
# .. code-block:: bash
#
# Queue Status
# ----------------------------------
# key total free pending
# ----------------------------------
# mate10pro 2 2 0
# rk3399 2 2 0
# rpi3b 11 11 0
# ----------------------------------
#
# You can register multiple devices to the tracker to accelerate the measurement in tuning.
###########################################
# Set Tuning Options
# ------------------
# Before tuning, we should apply some configurations. Here I use an RK3399 board
# as example. In your setting, you should modify the target and device_key accordingly.
# set :code:`use_android` to True if you use android phone.
#### DEVICE CONFIG ####
# Replace "aarch64-linux-gnu" with the correct target of your board.
# This target is used for cross compilation. You can query it by :code:`gcc -v` on your device.
target = tvm.target.Target("llvm -device=arm_cpu -mtriple=aarch64-linux-gnu")
# Also replace this with the device key in your tracker
device_key = "rk3399"
# Set this to True if you use android phone
use_android = False
#### TUNING OPTION ####
network = "resnet-18"
log_file = "%s.%s.log" % (device_key, network)
dtype = "float32"
tuning_option = {
"log_filename": log_file,
"tuner": "xgb",
"n_trial": 1500,
"early_stopping": 800,
"measure_option": autotvm.measure_option(
builder=autotvm.LocalBuilder(build_func="ndk" if use_android else "default"),
runner=autotvm.RPCRunner(
device_key,
host="127.0.0.1",
port=9190,
number=5,
timeout=10,
),
),
}
####################################################################
#
# .. note:: How to set tuning options
#
# In general, the default values provided here work well.
# If you have enough time budget, you can set :code:`n_trial`, :code:`early_stopping` larger,
# which makes the tuning run longer.
# If your device runs very slow or your conv2d operators have many GFLOPs, considering to
# set timeout larger.
#
# If your model has depthwise convolution, you could consider setting
# :code:`try_spatial_pack_depthwise` be :code:`True`, which perform better than default
# optimization in general. For example, on ARM CPU A53 2.0GHz, we find it could boost 1.6x
# performance of depthwise convolution on Mobilenet V1 model.
###################################################################
# Begin Tuning
# ------------
# Now we can extract tuning tasks from the network and begin tuning.
# Here, we provide a simple utility function to tune a list of tasks.
# This function is just an initial implementation which tunes them in sequential order.
# We will introduce a more sophisticated tuning scheduler in the future.
# You can skip the implementation of this function for this tutorial.
def tune_tasks(
tasks,
measure_option,
tuner="xgb",
n_trial=1000,
early_stopping=None,
log_filename="tuning.log",
use_transfer_learning=True,
):
# create tmp log file
tmp_log_file = log_filename + ".tmp"
if os.path.exists(tmp_log_file):
os.remove(tmp_log_file)
for i, tsk in enumerate(reversed(tasks)):
prefix = "[Task %2d/%2d] " % (i + 1, len(tasks))
# create tuner
if tuner == "xgb":
tuner_obj = XGBTuner(tsk, loss_type="reg")
elif tuner == "xgb_knob":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="knob")
elif tuner == "xgb_itervar":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="itervar")
elif tuner == "xgb_curve":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="curve")
elif tuner == "xgb_rank":
tuner_obj = XGBTuner(tsk, loss_type="rank")
elif tuner == "xgb_rank_knob":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="knob")
elif tuner == "xgb_rank_itervar":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="itervar")
elif tuner == "xgb_rank_curve":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="curve")
elif tuner == "xgb_rank_binary":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary")
elif tuner == "xgb_rank_binary_knob":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="knob")
elif tuner == "xgb_rank_binary_itervar":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="itervar")
elif tuner == "xgb_rank_binary_curve":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="curve")
elif tuner == "ga":
tuner_obj = GATuner(tsk, pop_size=50)
elif tuner == "random":
tuner_obj = RandomTuner(tsk)
elif tuner == "gridsearch":
tuner_obj = GridSearchTuner(tsk)
else:
raise ValueError("Invalid tuner: " + tuner)
if use_transfer_learning:
if os.path.isfile(tmp_log_file):
tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file))
# process tuning
tsk_trial = min(n_trial, len(tsk.config_space))
tuner_obj.tune(
n_trial=tsk_trial,
early_stopping=early_stopping,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(tsk_trial, prefix=prefix),
autotvm.callback.log_to_file(tmp_log_file),
],
)
# pick best records to a cache file
autotvm.record.pick_best(tmp_log_file, log_filename)
os.remove(tmp_log_file)
########################################################################
# Finally, we launch tuning jobs and evaluate the end-to-end performance.
def tune_and_evaluate(tuning_opt):
# extract workloads from relay program
print("Extract tasks...")
mod, params, input_shape, _ = get_network(network, batch_size=1)
tasks = autotvm.task.extract_from_program(
mod["main"], target=target, params=params, ops=(relay.op.get("nn.conv2d"),)
)
# run tuning tasks
print("Tuning...")
tune_tasks(tasks, **tuning_opt)
# compile kernels with history best records
with autotvm.apply_history_best(log_file):
print("Compile...")
with tvm.transform.PassContext(opt_level=3):
lib = relay.build_module.build(mod, target=target, params=params)
# export library
tmp = tempdir()
if use_android:
from tvm.contrib import ndk
filename = "net.so"
lib.export_library(tmp.relpath(filename), ndk.create_shared)
else:
filename = "net.tar"
lib.export_library(tmp.relpath(filename))
# upload module to device
print("Upload...")
remote = autotvm.measure.request_remote(device_key, "127.0.0.1", 9190, timeout=10000)
remote.upload(tmp.relpath(filename))
rlib = remote.load_module(filename)
# upload parameters to device
dev = remote.device(str(target), 0)
module = runtime.GraphModule(rlib["default"](dev))
data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype))
module.set_input("data", data_tvm)
# evaluate
print("Evaluate inference time cost...")
print(module.benchmark(dev, number=1, repeat=10))
# We do not run the tuning in our webpage server since it takes too long.
# Uncomment the following line to run it by yourself.
# tune_and_evaluate(tuning_option)
######################################################################
# Sample Output
# -------------
# The tuning needs to compile many programs and extract feature from them.
# So a high performance CPU is recommended.
# One sample output is listed below.
# It takes about 2 hours on a 32T AMD Ryzen Threadripper.
#
# .. code-block:: bash
#
# Extract tasks...
# Tuning...
# [Task 1/12] Current/Best: 22.37/ 52.19 GFLOPS | Progress: (544/1000) | 406.59 s Done.
# [Task 2/12] Current/Best: 6.51/ 18.77 GFLOPS | Progress: (608/1000) | 325.05 s Done.
# [Task 3/12] Current/Best: 4.67/ 24.87 GFLOPS | Progress: (480/1000) | 372.31 s Done.
# [Task 4/12] Current/Best: 11.35/ 46.83 GFLOPS | Progress: (736/1000) | 602.39 s Done.
# [Task 5/12] Current/Best: 1.01/ 19.80 GFLOPS | Progress: (448/1000) | 262.16 s Done.
# [Task 6/12] Current/Best: 2.47/ 23.76 GFLOPS | Progress: (672/1000) | 563.85 s Done.
# [Task 7/12] Current/Best: 14.57/ 33.97 GFLOPS | Progress: (544/1000) | 465.15 s Done.
# [Task 8/12] Current/Best: 1.13/ 17.65 GFLOPS | Progress: (576/1000) | 365.08 s Done.
# [Task 9/12] Current/Best: 14.45/ 22.66 GFLOPS | Progress: (928/1000) | 724.25 s Done.
# [Task 10/12] Current/Best: 3.22/ 15.36 GFLOPS | Progress: (864/1000) | 564.27 s Done.
# [Task 11/12] Current/Best: 11.03/ 32.23 GFLOPS | Progress: (736/1000) | 635.15 s Done.
# [Task 12/12] Current/Best: 8.00/ 21.65 GFLOPS | Progress: (1000/1000) | 1111.81 s Done.
# Compile...
# Upload...
# Evaluate inference time cost...
# Mean inference time (std dev): 162.59 ms (0.06 ms)
######################################################################
#
# .. note:: **Experiencing Difficulties?**
#
# The auto tuning module is error-prone. If you always see " 0.00/ 0.00 GFLOPS",
# then there must be something wrong.
#
# First, make sure you set the correct configuration of your device.
# Then, you can print debug information by adding these lines in the beginning
# of the script. It will print every measurement result, where you can find useful
# error messages.
#
# .. code-block:: python
#
# import logging
# logging.getLogger('autotvm').setLevel(logging.DEBUG)
#
# Finally, always feel free to ask our community for help on https://discuss.tvm.apache.org
| 17,042 | 38.269585 | 143 | py |
tvm | tvm-main/gallery/how_to/tune_with_autotvm/tune_relay_cuda.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Auto-tuning a Convolutional Network for NVIDIA GPU
==================================================
**Author**: `Lianmin Zheng <https://github.com/merrymercy>`_, `Eddie Yan <https://github.com/eqy/>`_
Auto-tuning for specific devices and workloads is critical for getting the
best performance. This is a tutorial on how to tune a whole convolutional
network for NVIDIA GPU.
The operator implementation for NVIDIA GPU in TVM is written in template form.
The template has many tunable knobs (tile factor, unrolling, etc).
We will tune all convolution and depthwise convolution operators
in the neural network. After tuning, we produce a log file which stores
the best knob values for all required operators. When the TVM compiler compiles
these operators, it will query this log file to get the best knob values.
We also released pre-tuned parameters for some NVIDIA GPUs. You can go to
`NVIDIA GPU Benchmark <https://github.com/apache/tvm/wiki/Benchmark#nvidia-gpu>`_
to see the results.
Note that this tutorial will not run on Windows or recent versions of macOS. To
get it to run, you will need to wrap the body of this tutorial in a :code:`if
__name__ == "__main__":` block.
"""
######################################################################
# Install dependencies
# --------------------
# To use the autotvm package in tvm, we need to install some extra dependencies.
# (change "3" to "2" if you use python2):
#
# .. code-block:: bash
#
# pip3 install --user psutil xgboost tornado cloudpickle
#
# To make TVM run faster during tuning, it is recommended to use cython
# as FFI of tvm. In the root directory of tvm, execute:
#
# .. code-block:: bash
#
# pip3 install --user cython
# sudo make cython3
#
# Now return to python code. Import packages.
# sphinx_gallery_start_ignore
# sphinx_gallery_requires_cuda = True
# sphinx_gallery_end_ignore
import os
import numpy as np
import tvm
from tvm import relay, autotvm
import tvm.relay.testing
from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner
import tvm.contrib.graph_executor as runtime
#################################################################
# Define Network
# --------------
# First we need to define the network in relay frontend API.
# We can load some pre-defined network from :code:`tvm.relay.testing`.
# We can also load models from MXNet, ONNX and TensorFlow.
def get_network(name, batch_size):
"""Get the symbol definition and random weight of a network"""
input_shape = (batch_size, 3, 224, 224)
output_shape = (batch_size, 1000)
if "resnet" in name:
n_layer = int(name.split("-")[1])
mod, params = relay.testing.resnet.get_workload(
num_layers=n_layer, batch_size=batch_size, dtype=dtype
)
elif "vgg" in name:
n_layer = int(name.split("-")[1])
mod, params = relay.testing.vgg.get_workload(
num_layers=n_layer, batch_size=batch_size, dtype=dtype
)
elif name == "mobilenet":
mod, params = relay.testing.mobilenet.get_workload(batch_size=batch_size, dtype=dtype)
elif name == "squeezenet_v1.1":
mod, params = relay.testing.squeezenet.get_workload(
batch_size=batch_size, version="1.1", dtype=dtype
)
elif name == "inception_v3":
input_shape = (batch_size, 3, 299, 299)
mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
elif name == "mxnet":
# an example for mxnet model
from mxnet.gluon.model_zoo.vision import get_model
block = get_model("resnet18_v1", pretrained=True)
mod, params = relay.frontend.from_mxnet(block, shape={"data": input_shape}, dtype=dtype)
net = mod["main"]
net = relay.Function(
net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs
)
mod = tvm.IRModule.from_expr(net)
else:
raise ValueError("Unsupported network: " + name)
return mod, params, input_shape, output_shape
###########################################
# Set Tuning Options
# ------------------
# Before tuning, we apply some configurations.
#### DEVICE CONFIG ####
target = tvm.target.cuda()
#### TUNING OPTION ####
network = "resnet-18"
log_file = "%s.log" % network
dtype = "float32"
tuning_option = {
"log_filename": log_file,
"tuner": "xgb",
"n_trial": 2000,
"early_stopping": 600,
"measure_option": autotvm.measure_option(
builder=autotvm.LocalBuilder(timeout=10),
runner=autotvm.LocalRunner(number=20, repeat=3, timeout=4, min_repeat_ms=150),
),
}
####################################################################
#
# .. note:: How to set tuning options
#
# In general, the default value provided here works well.
#
# If you have large time budget, you can set :code:`n_trial`, :code:`early_stopping` larger,
# which makes the tuning runs longer.
#
# If you have multiple devices, you can use all of them for measurement to
# accelerate the tuning process. (see the 'Scale up measurement` section below).
#
###################################################################
# Begin Tuning
# ------------
# Now we can extract tuning tasks from the network and begin tuning.
# Here, we provide a simple utility function to tune a list of tasks.
# This function is just an initial implementation which tunes them in sequential order.
# We will introduce a more sophisticated tuning scheduler in the future.
# You can skip the implementation of this function for this tutorial.
def tune_tasks(
tasks,
measure_option,
tuner="xgb",
n_trial=1000,
early_stopping=None,
log_filename="tuning.log",
use_transfer_learning=True,
):
# create tmp log file
tmp_log_file = log_filename + ".tmp"
if os.path.exists(tmp_log_file):
os.remove(tmp_log_file)
for i, tsk in enumerate(reversed(tasks)):
prefix = "[Task %2d/%2d] " % (i + 1, len(tasks))
# create tuner
if tuner == "xgb":
tuner_obj = XGBTuner(tsk, loss_type="reg")
elif tuner == "xgb_knob":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="knob")
elif tuner == "xgb_itervar":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="itervar")
elif tuner == "xgb_curve":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="curve")
elif tuner == "xgb_rank":
tuner_obj = XGBTuner(tsk, loss_type="rank")
elif tuner == "xgb_rank_knob":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="knob")
elif tuner == "xgb_rank_itervar":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="itervar")
elif tuner == "xgb_rank_curve":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="curve")
elif tuner == "xgb_rank_binary":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary")
elif tuner == "xgb_rank_binary_knob":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="knob")
elif tuner == "xgb_rank_binary_itervar":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="itervar")
elif tuner == "xgb_rank_binary_curve":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="curve")
elif tuner == "ga":
tuner_obj = GATuner(tsk, pop_size=100)
elif tuner == "random":
tuner_obj = RandomTuner(tsk)
elif tuner == "gridsearch":
tuner_obj = GridSearchTuner(tsk)
else:
raise ValueError("Invalid tuner: " + tuner)
if use_transfer_learning:
if os.path.isfile(tmp_log_file):
tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file))
# do tuning
tsk_trial = min(n_trial, len(tsk.config_space))
tuner_obj.tune(
n_trial=tsk_trial,
early_stopping=early_stopping,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(tsk_trial, prefix=prefix),
autotvm.callback.log_to_file(tmp_log_file),
],
)
# pick best records to a cache file
autotvm.record.pick_best(tmp_log_file, log_filename)
os.remove(tmp_log_file)
########################################################################
# Finally, we launch tuning jobs and evaluate the end-to-end performance.
def tune_and_evaluate(tuning_opt):
# extract workloads from relay program
print("Extract tasks...")
mod, params, input_shape, out_shape = get_network(network, batch_size=1)
tasks = autotvm.task.extract_from_program(
mod["main"], target=target, params=params, ops=(relay.op.get("nn.conv2d"),)
)
# run tuning tasks
print("Tuning...")
tune_tasks(tasks, **tuning_opt)
# compile kernels with history best records
with autotvm.apply_history_best(log_file):
print("Compile...")
with tvm.transform.PassContext(opt_level=3):
lib = relay.build_module.build(mod, target=target, params=params)
# load parameters
dev = tvm.device(str(target), 0)
module = runtime.GraphModule(lib["default"](dev))
data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype))
module.set_input("data", data_tvm)
# evaluate
print("Evaluate inference time cost...")
print(module.benchmark(dev, number=1, repeat=600))
# We do not run the tuning in our webpage server since it takes too long.
# Uncomment the following line to run it by yourself.
# tune_and_evaluate(tuning_option)
######################################################################
# Sample Output
# -------------
# The tuning needs to compile many programs and extract feature from them.
# So a high performance CPU is recommended. One sample output is listed below.
# It takes about 4 hours to get the following output on a 32T AMD Ryzen Threadripper.
# The tuning target is NVIDIA 1080 Ti.
# (You can see some errors during compilation. If the tuning is not stuck, it is okay.)
#
# .. code-block:: bash
#
# Extract tasks...
# Tuning...
# [Task 1/12] Current/Best: 541.83/3570.66 GFLOPS | Progress: (960/2000) | 1001.31 s Done.
# [Task 2/12] Current/Best: 0.56/ 803.33 GFLOPS | Progress: (704/2000) | 608.08 s Done.
# [Task 3/12] Current/Best: 103.69/1141.25 GFLOPS | Progress: (768/2000) | 702.13 s Done.
# [Task 4/12] Current/Best: 2905.03/3925.15 GFLOPS | Progress: (864/2000) | 745.94 sterminate called without an active exception
# [Task 4/12] Current/Best: 2789.36/3925.15 GFLOPS | Progress: (1056/2000) | 929.40 s Done.
# [Task 5/12] Current/Best: 89.06/1076.24 GFLOPS | Progress: (704/2000) | 601.73 s Done.
# [Task 6/12] Current/Best: 40.39/2129.02 GFLOPS | Progress: (1088/2000) | 1125.76 s Done.
# [Task 7/12] Current/Best: 4090.53/5007.02 GFLOPS | Progress: (800/2000) | 903.90 s Done.
# [Task 8/12] Current/Best: 4.78/1272.28 GFLOPS | Progress: (768/2000) | 749.14 s Done.
# [Task 9/12] Current/Best: 1391.45/2325.08 GFLOPS | Progress: (992/2000) | 1084.87 s Done.
# [Task 10/12] Current/Best: 1995.44/2383.59 GFLOPS | Progress: (864/2000) | 862.60 s Done.
# [Task 11/12] Current/Best: 4093.94/4899.80 GFLOPS | Progress: (224/2000) | 240.92 sterminate called without an active exception
# [Task 11/12] Current/Best: 3487.98/4909.91 GFLOPS | Progress: (480/2000) | 534.96 sterminate called without an active exception
# [Task 11/12] Current/Best: 4636.84/4912.17 GFLOPS | Progress: (1184/2000) | 1381.16 sterminate called without an active exception
# [Task 11/12] Current/Best: 50.12/4912.17 GFLOPS | Progress: (1344/2000) | 1602.81 s Done.
# [Task 12/12] Current/Best: 3581.31/4286.30 GFLOPS | Progress: (736/2000) | 943.52 s Done.
# Compile...
# Evaluate inference time cost...
# Mean inference time (std dev): 1.07 ms (0.05 ms)
#
# As a reference baseline, the time cost of MXNet + TensorRT on resnet-18 is 1.30ms. So we are a little faster.
######################################################################
#
# .. note:: **Experiencing Difficulties?**
#
# The auto tuning module is error-prone. If you always see " 0.00/ 0.00 GFLOPS",
# then there must be something wrong.
#
# First, make sure you set the correct configuration of your device.
# Then, you can print debug information by adding these lines in the beginning
# of the script. It will print every measurement result, where you can find useful
# error messages.
#
# .. code-block:: python
#
# import logging
# logging.getLogger('autotvm').setLevel(logging.DEBUG)
#
# Finally, always feel free to ask our community for help on https://discuss.tvm.apache.org
#################################################################
# .. _tutorials-autotvm-scale-up-rpc-tracker:
#################################################################
# Scale up measurement by using multiple devices
# ----------------------------------------------
# If you have multiple devices, you can use all of them for measurement.
# TVM uses the RPC Tracker to manage distributed devices.
# The RPC Tracker is a centralized controller node. We can register all devices to
# the tracker. For example, if we have 10 GPU cards, we can register all of them
# to the tracker, and run 10 measurements in parallel, accelerating the tuning process.
#
# To start an RPC tracker, run this command on the host machine. The tracker is
# required during the whole tuning process, so we need to open a new terminal for
# this command:
#
# .. code-block:: bash
#
# python -m tvm.exec.rpc_tracker --host=0.0.0.0 --port=9190
#
# The expected output is
#
# .. code-block:: bash
#
# INFO:RPCTracker:bind to 0.0.0.0:9190
#
# Then open another new terminal for the RPC server. We need to start one dedicated server
# for each device. We use a string key to distinguish the types of devices.
# You can pick a name you like.
# (Note: For rocm backend, there are some internal errors with the compiler,
# we need to add `--no-fork` to the argument list.)
#
# .. code-block:: bash
#
# python -m tvm.exec.rpc_server --tracker=127.0.0.1:9190 --key=1080ti
#
# After registering devices, we can confirm it by querying rpc_tracker
#
# .. code-block:: bash
#
# python -m tvm.exec.query_rpc_tracker --host=127.0.0.1 --port=9190
#
# For example, if we have four 1080ti, two titanx and one gfx900, the output can be
#
# .. code-block:: bash
#
# Queue Status
# ----------------------------------
# key total free pending
# ----------------------------------
# 1080ti 4 4 0
# titanx 2 2 0
# gfx900 1 1 0
# ----------------------------------
#
# Finally, we need to change the tuning option to use RPCRunner. Use the code below
# to replace the corresponding part above.
tuning_option = {
"log_filename": log_file,
"tuner": "xgb",
"n_trial": 2000,
"early_stopping": 600,
"measure_option": autotvm.measure_option(
builder=autotvm.LocalBuilder(timeout=10),
runner=autotvm.RPCRunner(
"1080ti", # change the device key to your key
"127.0.0.1",
9190,
number=20,
repeat=3,
timeout=4,
min_repeat_ms=150,
),
),
}
| 16,284 | 38.719512 | 135 | py |
tvm | tvm-main/gallery/how_to/tune_with_autotvm/tune_relay_x86.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
.. _tune_relay_x86:
Auto-tuning a Convolutional Network for x86 CPU
===============================================
**Author**: `Yao Wang <https://github.com/kevinthesun>`_, `Eddie Yan <https://github.com/eqy>`_
This is a tutorial about how to tune convolution neural network
for x86 CPU.
Note that this tutorial will not run on Windows or recent versions of macOS. To
get it to run, you will need to wrap the body of this tutorial in a :code:`if
__name__ == "__main__":` block.
"""
import os
import numpy as np
import tvm
from tvm import relay, autotvm
from tvm.relay import testing
from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner
from tvm.autotvm.graph_tuner import DPTuner, PBQPTuner
import tvm.contrib.graph_executor as runtime
#################################################################
# Define network
# --------------
# First we need to define the network in relay frontend API.
# We can either load some pre-defined network from :code:`relay.testing`
# or building :any:`relay.testing.resnet` with relay.
# We can also load models from MXNet, ONNX and TensorFlow.
#
# In this tutorial, we choose resnet-18 as tuning example.
def get_network(name, batch_size):
"""Get the symbol definition and random weight of a network"""
input_shape = (batch_size, 3, 224, 224)
output_shape = (batch_size, 1000)
if "resnet" in name:
n_layer = int(name.split("-")[1])
mod, params = relay.testing.resnet.get_workload(
num_layers=n_layer, batch_size=batch_size, dtype=dtype
)
elif "vgg" in name:
n_layer = int(name.split("-")[1])
mod, params = relay.testing.vgg.get_workload(
num_layers=n_layer, batch_size=batch_size, dtype=dtype
)
elif name == "mobilenet":
mod, params = relay.testing.mobilenet.get_workload(batch_size=batch_size, dtype=dtype)
elif name == "squeezenet_v1.1":
mod, params = relay.testing.squeezenet.get_workload(
batch_size=batch_size, version="1.1", dtype=dtype
)
elif name == "inception_v3":
input_shape = (batch_size, 3, 299, 299)
mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
elif name == "mxnet":
# an example for mxnet model
from mxnet.gluon.model_zoo.vision import get_model
block = get_model("resnet18_v1", pretrained=True)
mod, params = relay.frontend.from_mxnet(block, shape={input_name: input_shape}, dtype=dtype)
net = mod["main"]
net = relay.Function(
net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs
)
mod = tvm.IRModule.from_expr(net)
else:
raise ValueError("Unsupported network: " + name)
return mod, params, input_shape, output_shape
# Replace "llvm" with the correct target of your CPU.
# For example, for AWS EC2 c5 instance with Intel Xeon
# Platinum 8000 series, the target should be "llvm -mcpu=skylake-avx512".
# For AWS EC2 c4 instance with Intel Xeon E5-2666 v3, it should be
# "llvm -mcpu=core-avx2".
target = "llvm"
batch_size = 1
dtype = "float32"
model_name = "resnet-18"
log_file = "%s.log" % model_name
graph_opt_sch_file = "%s_graph_opt.log" % model_name
# Set the input name of the graph
# For ONNX models, it is typically "0".
input_name = "data"
# Set number of threads used for tuning based on the number of
# physical CPU cores on your machine.
num_threads = 1
os.environ["TVM_NUM_THREADS"] = str(num_threads)
#################################################################
# Configure tensor tuning settings and create tasks
# -------------------------------------------------
# To get better kernel execution performance on x86 CPU,
# we need to change data layout of convolution kernel from
# "NCHW" to "NCHWc". To deal with this situation, we define
# conv2d_NCHWc operator in topi. We will tune this operator
# instead of plain conv2d.
#
# We will use local mode for tuning configuration. RPC tracker
# mode can be setup similarly to the approach in
# :ref:`tune_relay_arm` tutorial.
#
# To perform a precise measurement, we should repeat the measurement several
# times and use the average of results. In addition, we need to flush the cache
# for the weight tensors between repeated measurements. This can make the measured
# latency of one operator closer to its actual latency during end-to-end inference.
tuning_option = {
"log_filename": log_file,
"tuner": "random",
"early_stopping": None,
"measure_option": autotvm.measure_option(
builder=autotvm.LocalBuilder(),
runner=autotvm.LocalRunner(
number=1, repeat=10, min_repeat_ms=0, enable_cpu_cache_flush=True
),
),
}
# You can skip the implementation of this function for this tutorial.
def tune_kernels(
tasks, measure_option, tuner="gridsearch", early_stopping=None, log_filename="tuning.log"
):
for i, task in enumerate(tasks):
prefix = "[Task %2d/%2d] " % (i + 1, len(tasks))
# create tuner
if tuner == "xgb":
tuner_obj = XGBTuner(task, loss_type="reg")
elif tuner == "xgb_knob":
tuner_obj = XGBTuner(task, loss_type="reg", feature_type="knob")
elif tuner == "xgb_itervar":
tuner_obj = XGBTuner(task, loss_type="reg", feature_type="itervar")
elif tuner == "xgb_curve":
tuner_obj = XGBTuner(task, loss_type="reg", feature_type="curve")
elif tuner == "xgb_rank":
tuner_obj = XGBTuner(task, loss_type="rank")
elif tuner == "xgb_rank_knob":
tuner_obj = XGBTuner(task, loss_type="rank", feature_type="knob")
elif tuner == "xgb_rank_itervar":
tuner_obj = XGBTuner(task, loss_type="rank", feature_type="itervar")
elif tuner == "xgb_rank_curve":
tuner_obj = XGBTuner(task, loss_type="rank", feature_type="curve")
elif tuner == "xgb_rank_binary":
tuner_obj = XGBTuner(task, loss_type="rank-binary")
elif tuner == "xgb_rank_binary_knob":
tuner_obj = XGBTuner(task, loss_type="rank-binary", feature_type="knob")
elif tuner == "xgb_rank_binary_itervar":
tuner_obj = XGBTuner(task, loss_type="rank-binary", feature_type="itervar")
elif tuner == "xgb_rank_binary_curve":
tuner_obj = XGBTuner(task, loss_type="rank-binary", feature_type="curve")
elif tuner == "ga":
tuner_obj = GATuner(task, pop_size=50)
elif tuner == "random":
tuner_obj = RandomTuner(task)
elif tuner == "gridsearch":
tuner_obj = GridSearchTuner(task)
else:
raise ValueError("Invalid tuner: " + tuner)
# do tuning
n_trial = len(task.config_space)
tuner_obj.tune(
n_trial=n_trial,
early_stopping=early_stopping,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(n_trial, prefix=prefix),
autotvm.callback.log_to_file(log_filename),
],
)
# Use graph tuner to achieve graph level optimal schedules
# Set use_DP=False if it takes too long to finish.
def tune_graph(graph, dshape, records, opt_sch_file, use_DP=True):
target_op = [
relay.op.get("nn.conv2d"),
]
Tuner = DPTuner if use_DP else PBQPTuner
executor = Tuner(graph, {input_name: dshape}, records, target_op, target)
executor.benchmark_layout_transform(min_exec_num=2000)
executor.run()
executor.write_opt_sch2record_file(opt_sch_file)
########################################################################
# Finally, we launch tuning jobs and evaluate the end-to-end performance.
def evaluate_performance(lib, data_shape):
# upload parameters to device
dev = tvm.cpu()
data_tvm = tvm.nd.array((np.random.uniform(size=data_shape)).astype(dtype))
module = runtime.GraphModule(lib["default"](dev))
module.set_input(input_name, data_tvm)
# evaluate
print("Evaluate inference time cost...")
print(module.benchmark(dev, number=100, repeat=3))
def tune_and_evaluate(tuning_opt):
# extract workloads from relay program
print("Extract tasks...")
mod, params, data_shape, out_shape = get_network(model_name, batch_size)
tasks = autotvm.task.extract_from_program(
mod["main"], target=target, params=params, ops=(relay.op.get("nn.conv2d"),)
)
# run tuning tasks
tune_kernels(tasks, **tuning_opt)
tune_graph(mod["main"], data_shape, log_file, graph_opt_sch_file)
# compile kernels in default mode
print("Evaluation of the network compiled in 'default' mode without auto tune:")
with tvm.transform.PassContext(opt_level=3):
print("Compile...")
lib = relay.build(mod, target=target, params=params)
evaluate_performance(lib, data_shape)
# compile kernels in kernel tuned only mode
print("\nEvaluation of the network been tuned on kernel level:")
with autotvm.apply_history_best(log_file):
print("Compile...")
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target, params=params)
evaluate_performance(lib, data_shape)
# compile kernels with graph-level best records
print("\nEvaluation of the network been tuned on graph level:")
with autotvm.apply_graph_best(graph_opt_sch_file):
print("Compile...")
with tvm.transform.PassContext(opt_level=3):
lib = relay.build_module.build(mod, target=target, params=params)
evaluate_performance(lib, data_shape)
# We do not run the tuning in our webpage server since it takes too long.
# Uncomment the following line to run it by yourself.
# tune_and_evaluate(tuning_option)
######################################################################
# Sample Output
# -------------
# The tuning needs to compile many programs and extract feature from them.
# So a high performance CPU is recommended.
# One sample output is listed below.
#
# .. code-block:: bash
#
# Extract tasks...
# Tuning...
# [Task 1/12] Current/Best: 598.05/2497.63 GFLOPS | Progress: (252/252) | 1357.95 s Done.
# [Task 2/12] Current/Best: 522.63/2279.24 GFLOPS | Progress: (784/784) | 3989.60 s Done.
# [Task 3/12] Current/Best: 447.33/1927.69 GFLOPS | Progress: (784/784) | 3869.14 s Done.
# [Task 4/12] Current/Best: 481.11/1912.34 GFLOPS | Progress: (672/672) | 3274.25 s Done.
# [Task 5/12] Current/Best: 414.09/1598.45 GFLOPS | Progress: (672/672) | 2720.78 s Done.
# [Task 6/12] Current/Best: 508.96/2273.20 GFLOPS | Progress: (768/768) | 3718.75 s Done.
# [Task 7/12] Current/Best: 469.14/1955.79 GFLOPS | Progress: (576/576) | 2665.67 s Done.
# [Task 8/12] Current/Best: 230.91/1658.97 GFLOPS | Progress: (576/576) | 2435.01 s Done.
# [Task 9/12] Current/Best: 487.75/2295.19 GFLOPS | Progress: (648/648) | 3009.95 s Done.
# [Task 10/12] Current/Best: 182.33/1734.45 GFLOPS | Progress: (360/360) | 1755.06 s Done.
# [Task 11/12] Current/Best: 372.18/1745.15 GFLOPS | Progress: (360/360) | 1684.50 s Done.
# [Task 12/12] Current/Best: 215.34/2271.11 GFLOPS | Progress: (400/400) | 2128.74 s Done.
# INFO Start to benchmark layout transformation...
# INFO Benchmarking layout transformation successful.
# INFO Start to run dynamic programming algorithm...
# INFO Start forward pass...
# INFO Finished forward pass.
# INFO Start backward pass...
# INFO Finished backward pass...
# INFO Finished DPExecutor run.
# INFO Writing optimal schedules to resnet-18_graph_opt.log successfully.
#
# Evaluation of the network compiled in 'default' mode without auto tune:
# Compile...
# Evaluate inference time cost...
# Mean inference time (std dev): 4.5 ms (0.03 ms)
#
# Evaluation of the network been tuned on kernel level:
# Compile...
# Evaluate inference time cost...
# Mean inference time (std dev): 3.2 ms (0.03 ms)
#
# Evaluation of the network been tuned on graph level:
# Compile...
# Config for target=llvm -keys=cpu, workload=('dense_nopack.x86', ('TENSOR', (1, 512), 'float32'), ('TENSOR', (1000, 512), 'float32'), None, 'float32') is missing in ApplyGraphBest context. A fallback configuration is used, which may bring great performance regression.
# Config for target=llvm -keys=cpu, workload=('dense_pack.x86', ('TENSOR', (1, 512), 'float32'), ('TENSOR', (1000, 512), 'float32'), None, 'float32') is missing in ApplyGraphBest context. A fallback configuration is used, which may bring great performance regression.
# Evaluate inference time cost...
# Mean inference time (std dev): 3.16 ms (0.03 ms)
| 13,571 | 41.149068 | 272 | py |
tvm | tvm-main/gallery/how_to/compile_models/from_onnx.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Compile ONNX Models
===================
**Author**: `Joshua Z. Zhang <https://zhreshold.github.io/>`_
This article is an introductory tutorial to deploy ONNX models with Relay.
To begin, install the ONNX package:
.. code-block:: bash
%%shell
pip install onnx onnxoptimizer
Alternatively, you can refer to official site:
https://github.com/onnx/onnx
"""
import onnx
import numpy as np
import tvm
from tvm import te
import tvm.relay as relay
from tvm.contrib.download import download_testdata
######################################################################
# Load pretrained ONNX model
# ---------------------------------------------
# The example super resolution model used here is exactly the same model in onnx tutorial
# http://pytorch.org/tutorials/advanced/super_resolution_with_caffe2.html
# we skip the pytorch model construction part, and download the saved onnx model
model_url = "".join(
[
"https://gist.github.com/zhreshold/",
"bcda4716699ac97ea44f791c24310193/raw/",
"93672b029103648953c4e5ad3ac3aadf346a4cdc/",
"super_resolution_0.2.onnx",
]
)
model_path = download_testdata(model_url, "super_resolution.onnx", module="onnx")
# now you have super_resolution.onnx on disk
onnx_model = onnx.load(model_path)
######################################################################
# Load a test image
# ---------------------------------------------
# A single cat dominates the examples! This model takes a single input image of size
# 224x224 and outputs a scaled image that is 3x greater than the input along each
# axis, a 672x672 image. Re-scale the cat image to fit this input shape then
# convert to `YCbCr`. The super resolution model will then be applied to the
# luminance (`Y`) channel.
from PIL import Image
img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
img_path = download_testdata(img_url, "cat.png", module="data")
img = Image.open(img_path).resize((224, 224))
img_ycbcr = img.convert("YCbCr") # convert to YCbCr
img_y, img_cb, img_cr = img_ycbcr.split()
x = np.array(img_y)[np.newaxis, np.newaxis, :, :]
######################################################################
# Compile the model with relay
# ---------------------------------------------
# Typically ONNX models mix model input values with parameter values, with
# the input having the name `1`. This model dependent, and you should check
# with the documentation for your model to determine the full input and
# parameter name space.
#
# Passing in the shape dictionary to the `relay.frontend.from_onnx` method
# tells relay which ONNX parameters are inputs, and which are parameters, and
# provides a static definition of the input size.
target = "llvm"
input_name = "1"
shape_dict = {input_name: x.shape}
mod, params = relay.frontend.from_onnx(onnx_model, shape_dict)
with tvm.transform.PassContext(opt_level=1):
executor = relay.build_module.create_executor(
"graph", mod, tvm.cpu(0), target, params
).evaluate()
######################################################################
# Execute on TVM
# ---------------------------------------------
dtype = "float32"
tvm_output = executor(tvm.nd.array(x.astype(dtype))).numpy()
######################################################################
# Display results
# ---------------------------------------------
# We put input and output image neck to neck. The luminance channel, `Y` is the output
# from the model. The chroma channels `Cb` and `Cr` are resized to match with a simple
# bicubic algorithm. The image is then recombined and converted back to `RGB`.
from matplotlib import pyplot as plt
out_y = Image.fromarray(np.uint8((tvm_output[0, 0]).clip(0, 255)), mode="L")
out_cb = img_cb.resize(out_y.size, Image.BICUBIC)
out_cr = img_cr.resize(out_y.size, Image.BICUBIC)
result = Image.merge("YCbCr", [out_y, out_cb, out_cr]).convert("RGB")
canvas = np.full((672, 672 * 2, 3), 255)
canvas[0:224, 0:224, :] = np.asarray(img)
canvas[:, 672:, :] = np.asarray(result)
plt.imshow(canvas.astype(np.uint8))
plt.show()
######################################################################
# Notes
# ---------------------------------------------
# By default, ONNX defines models in terms of dynamic shapes. The ONNX importer
# retains that dynamism upon import, and the compiler attempts to convert the model
# into a static shapes at compile time. If this fails, there may still be dynamic
# operations in the model. Not all TVM kernels currently support dynamic shapes,
# please file an issue on discuss.tvm.apache.org if you hit an error with dynamic kernels.
#
# This particular model was build using an older version of ONNX. During the import
# phase ONNX importer will run the ONNX verifier, which may throw a `Mismatched attribute type`
# warning. Because TVM supports a number of different ONNX versions, the Relay model
# will still be valid.
| 5,709 | 40.985294 | 95 | py |
tvm | tvm-main/gallery/how_to/compile_models/from_pytorch.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Compile PyTorch Models
======================
**Author**: `Alex Wong <https://github.com/alexwong/>`_
This article is an introductory tutorial to deploy PyTorch models with Relay.
For us to begin, PyTorch should be installed.
TorchVision is also required so we can use the model zoo.
A quick solution is to install via pip:
.. code-block:: bash
%%shell
pip install torch
pip install torchvision
or please refer to official site
https://pytorch.org/get-started/locally/
PyTorch versions should be backwards compatible but should be used
with the proper TorchVision version.
Currently, TVM supports PyTorch 1.7 and 1.4. Other versions may
be unstable.
"""
import tvm
from tvm import relay
import numpy as np
from tvm.contrib.download import download_testdata
# PyTorch imports
import torch
import torchvision
######################################################################
# Load a pretrained PyTorch model
# -------------------------------
model_name = "resnet18"
model = getattr(torchvision.models, model_name)(pretrained=True)
model = model.eval()
# We grab the TorchScripted model via tracing
input_shape = [1, 3, 224, 224]
input_data = torch.randn(input_shape)
scripted_model = torch.jit.trace(model, input_data).eval()
######################################################################
# Load a test image
# -----------------
# Classic cat example!
from PIL import Image
img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
img_path = download_testdata(img_url, "cat.png", module="data")
img = Image.open(img_path).resize((224, 224))
# Preprocess the image and convert to tensor
from torchvision import transforms
my_preprocess = transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
img = my_preprocess(img)
img = np.expand_dims(img, 0)
######################################################################
# Import the graph to Relay
# -------------------------
# Convert PyTorch graph to Relay graph. The input name can be arbitrary.
input_name = "input0"
shape_list = [(input_name, img.shape)]
mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)
######################################################################
# Relay Build
# -----------
# Compile the graph to llvm target with given input specification.
target = tvm.target.Target("llvm", host="llvm")
dev = tvm.cpu(0)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target, params=params)
######################################################################
# Execute the portable graph on TVM
# ---------------------------------
# Now we can try deploying the compiled model on target.
from tvm.contrib import graph_executor
dtype = "float32"
m = graph_executor.GraphModule(lib["default"](dev))
# Set inputs
m.set_input(input_name, tvm.nd.array(img.astype(dtype)))
# Execute
m.run()
# Get outputs
tvm_output = m.get_output(0)
#####################################################################
# Look up synset name
# -------------------
# Look up prediction top 1 index in 1000 class synset.
synset_url = "".join(
[
"https://raw.githubusercontent.com/Cadene/",
"pretrained-models.pytorch/master/data/",
"imagenet_synsets.txt",
]
)
synset_name = "imagenet_synsets.txt"
synset_path = download_testdata(synset_url, synset_name, module="data")
with open(synset_path) as f:
synsets = f.readlines()
synsets = [x.strip() for x in synsets]
splits = [line.split(" ") for line in synsets]
key_to_classname = {spl[0]: " ".join(spl[1:]) for spl in splits}
class_url = "".join(
[
"https://raw.githubusercontent.com/Cadene/",
"pretrained-models.pytorch/master/data/",
"imagenet_classes.txt",
]
)
class_name = "imagenet_classes.txt"
class_path = download_testdata(class_url, class_name, module="data")
with open(class_path) as f:
class_id_to_key = f.readlines()
class_id_to_key = [x.strip() for x in class_id_to_key]
# Get top-1 result for TVM
top1_tvm = np.argmax(tvm_output.numpy()[0])
tvm_class_key = class_id_to_key[top1_tvm]
# Convert input to PyTorch variable and get PyTorch result for comparison
with torch.no_grad():
torch_img = torch.from_numpy(img)
output = model(torch_img)
# Get top-1 result for PyTorch
top1_torch = np.argmax(output.numpy())
torch_class_key = class_id_to_key[top1_torch]
print("Relay top-1 id: {}, class name: {}".format(top1_tvm, key_to_classname[tvm_class_key]))
print("Torch top-1 id: {}, class name: {}".format(top1_torch, key_to_classname[torch_class_key]))
| 5,523 | 31.116279 | 97 | py |
tvm | tvm-main/gallery/how_to/compile_models/from_tflite.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Compile TFLite Models
=====================
**Author**: `Zhao Wu <https://github.com/FrozenGene>`_
This article is an introductory tutorial to deploy TFLite models with Relay.
To get started, TFLite package needs to be installed as prerequisite.
.. code-block:: bash
%%shell
pip install tflite==2.1.0
or you could generate TFLite package yourself. The steps are the following:
.. code-block:: bash
# Get the flatc compiler.
# Please refer to https://github.com/google/flatbuffers for details
# and make sure it is properly installed.
flatc --version
# Get the TFLite schema.
wget https://raw.githubusercontent.com/tensorflow/tensorflow/r1.13/tensorflow/lite/schema/schema.fbs
# Generate TFLite package.
flatc --python schema.fbs
# Add current folder (which contains generated tflite module) to PYTHONPATH.
export PYTHONPATH=${PYTHONPATH:+$PYTHONPATH:}$(pwd)
Now please check if TFLite package is installed successfully, ``python -c "import tflite"``
Below you can find an example on how to compile TFLite model using TVM.
"""
######################################################################
# Utils for downloading and extracting zip files
# ----------------------------------------------
import os
def extract(path):
import tarfile
if path.endswith("tgz") or path.endswith("gz"):
dir_path = os.path.dirname(path)
tar = tarfile.open(path)
tar.extractall(path=dir_path)
tar.close()
else:
raise RuntimeError("Could not decompress the file: " + path)
######################################################################
# Load pretrained TFLite model
# ----------------------------
# Load mobilenet V1 TFLite model provided by Google
from tvm.contrib.download import download_testdata
model_url = "http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz"
# Download model tar file and extract it to get mobilenet_v1_1.0_224.tflite
model_path = download_testdata(model_url, "mobilenet_v1_1.0_224.tgz", module=["tf", "official"])
model_dir = os.path.dirname(model_path)
extract(model_path)
# Now we can open mobilenet_v1_1.0_224.tflite
tflite_model_file = os.path.join(model_dir, "mobilenet_v1_1.0_224.tflite")
tflite_model_buf = open(tflite_model_file, "rb").read()
# Get TFLite model from buffer
try:
import tflite
tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0)
except AttributeError:
import tflite.Model
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)
######################################################################
# Load a test image
# -----------------
# A single cat dominates the examples!
from PIL import Image
from matplotlib import pyplot as plt
import numpy as np
image_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
image_path = download_testdata(image_url, "cat.png", module="data")
resized_image = Image.open(image_path).resize((224, 224))
plt.imshow(resized_image)
plt.show()
image_data = np.asarray(resized_image).astype("float32")
# Add a dimension to the image so that we have NHWC format layout
image_data = np.expand_dims(image_data, axis=0)
# Preprocess image as described here:
# https://github.com/tensorflow/models/blob/edb6ed22a801665946c63d650ab9a0b23d98e1b1/research/slim/preprocessing/inception_preprocessing.py#L243
image_data[:, :, :, 0] = 2.0 / 255.0 * image_data[:, :, :, 0] - 1
image_data[:, :, :, 1] = 2.0 / 255.0 * image_data[:, :, :, 1] - 1
image_data[:, :, :, 2] = 2.0 / 255.0 * image_data[:, :, :, 2] - 1
print("input", image_data.shape)
######################################################################
# Compile the model with relay
# ----------------------------
# TFLite input tensor name, shape and type
input_tensor = "input"
input_shape = (1, 224, 224, 3)
input_dtype = "float32"
# Parse TFLite model and convert it to a Relay module
from tvm import relay, transform
mod, params = relay.frontend.from_tflite(
tflite_model, shape_dict={input_tensor: input_shape}, dtype_dict={input_tensor: input_dtype}
)
# Build the module against to x86 CPU
target = "llvm"
with transform.PassContext(opt_level=3):
lib = relay.build(mod, target, params=params)
######################################################################
# Execute on TVM
# --------------
import tvm
from tvm import te
from tvm.contrib import graph_executor as runtime
# Create a runtime executor module
module = runtime.GraphModule(lib["default"](tvm.cpu()))
# Feed input data
module.set_input(input_tensor, tvm.nd.array(image_data))
# Run
module.run()
# Get output
tvm_output = module.get_output(0).numpy()
######################################################################
# Display results
# ---------------
# Load label file
label_file_url = "".join(
[
"https://raw.githubusercontent.com/",
"tensorflow/tensorflow/master/tensorflow/lite/java/demo/",
"app/src/main/assets/",
"labels_mobilenet_quant_v1_224.txt",
]
)
label_file = "labels_mobilenet_quant_v1_224.txt"
label_path = download_testdata(label_file_url, label_file, module="data")
# List of 1001 classes
with open(label_path) as f:
labels = f.readlines()
# Convert result to 1D data
predictions = np.squeeze(tvm_output)
# Get top 1 prediction
prediction = np.argmax(predictions)
# Convert id to class name and show the result
print("The image prediction result is: id " + str(prediction) + " name: " + labels[prediction])
| 6,309 | 31.525773 | 144 | py |
tvm | tvm-main/gallery/how_to/compile_models/from_mxnet.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
.. _tutorial-from-mxnet:
Compile MXNet Models
====================
**Author**: `Joshua Z. Zhang <https://zhreshold.github.io/>`_, \
`Kazutaka Morita <https://github.com/kazum>`_
This article is an introductory tutorial to deploy mxnet models with Relay. To begin, we must install `mxnet`:
.. code-block:: bash
%%shell
pip install mxnet
or please refer to official installation guide.
https://mxnet.apache.org/versions/master/install/index.html
"""
# some standard imports
# sphinx_gallery_start_ignore
# sphinx_gallery_requires_cuda = True
# sphinx_gallery_end_ignore
import mxnet as mx
import tvm
import tvm.relay as relay
import numpy as np
######################################################################
# Download Resnet18 model from Gluon Model Zoo
# ---------------------------------------------
# In this section, we download a pretrained imagenet model and classify an image.
from tvm.contrib.download import download_testdata
from mxnet.gluon.model_zoo.vision import get_model
from PIL import Image
from matplotlib import pyplot as plt
block = get_model("resnet18_v1", pretrained=True)
img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
img_name = "cat.png"
synset_url = "".join(
[
"https://gist.githubusercontent.com/zhreshold/",
"4d0b62f3d01426887599d4f7ede23ee5/raw/",
"596b27d23537e5a1b5751d2b0481ef172f58b539/",
"imagenet1000_clsid_to_human.txt",
]
)
synset_name = "imagenet1000_clsid_to_human.txt"
img_path = download_testdata(img_url, "cat.png", module="data")
synset_path = download_testdata(synset_url, synset_name, module="data")
with open(synset_path) as f:
synset = eval(f.read())
image = Image.open(img_path).resize((224, 224))
plt.imshow(image)
plt.show()
def transform_image(image):
image = np.array(image) - np.array([123.0, 117.0, 104.0])
image /= np.array([58.395, 57.12, 57.375])
image = image.transpose((2, 0, 1))
image = image[np.newaxis, :]
return image
x = transform_image(image)
print("x", x.shape)
######################################################################
# Compile the Graph
# -----------------
# Now we would like to port the Gluon model to a portable computational graph.
# It's as easy as several lines.
# We support MXNet static graph(symbol) and HybridBlock in mxnet.gluon
shape_dict = {"data": x.shape}
mod, params = relay.frontend.from_mxnet(block, shape_dict)
## we want a probability so add a softmax operator
func = mod["main"]
func = relay.Function(func.params, relay.nn.softmax(func.body), None, func.type_params, func.attrs)
######################################################################
# now compile the graph
target = "cuda"
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(func, target, params=params)
######################################################################
# Execute the portable graph on TVM
# ---------------------------------
# Now, we would like to reproduce the same forward computation using TVM.
from tvm.contrib import graph_executor
dev = tvm.cuda(0)
dtype = "float32"
m = graph_executor.GraphModule(lib["default"](dev))
# set inputs
m.set_input("data", tvm.nd.array(x.astype(dtype)))
# execute
m.run()
# get outputs
tvm_output = m.get_output(0)
top1 = np.argmax(tvm_output.numpy()[0])
print("TVM prediction top-1:", top1, synset[top1])
######################################################################
# Use MXNet symbol with pretrained weights
# ----------------------------------------
# MXNet often use `arg_params` and `aux_params` to store network parameters
# separately, here we show how to use these weights with existing API
def block2symbol(block):
data = mx.sym.Variable("data")
sym = block(data)
args = {}
auxs = {}
for k, v in block.collect_params().items():
args[k] = mx.nd.array(v.data().asnumpy())
return sym, args, auxs
mx_sym, args, auxs = block2symbol(block)
# usually we would save/load it as checkpoint
mx.model.save_checkpoint("resnet18_v1", 0, mx_sym, args, auxs)
# there are 'resnet18_v1-0000.params' and 'resnet18_v1-symbol.json' on disk
######################################################################
# for a normal mxnet model, we start from here
mx_sym, args, auxs = mx.model.load_checkpoint("resnet18_v1", 0)
# now we use the same API to get Relay computation graph
mod, relay_params = relay.frontend.from_mxnet(mx_sym, shape_dict, arg_params=args, aux_params=auxs)
# repeat the same steps to run this model using TVM
| 5,334 | 35.047297 | 110 | py |
tvm | tvm-main/gallery/how_to/compile_models/from_keras.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Compile Keras Models
=====================
**Author**: `Yuwei Hu <https://Huyuwei.github.io/>`_
This article is an introductory tutorial to deploy Keras models with Relay.
For us to begin with, keras should be installed.
Tensorflow is also required since it's used as the default backend of keras.
A quick solution is to install via pip
.. code-block:: bash
%%shell
pip install keras tensorflow
or please refer to official site
https://keras.io/#installation
"""
# sphinx_gallery_start_ignore
# sphinx_gallery_requires_cuda = True
# sphinx_gallery_end_ignore
import tvm
from tvm import te
import tvm.relay as relay
from tvm.contrib.download import download_testdata
import keras
import tensorflow as tf
import numpy as np
######################################################################
# Load pretrained keras model
# ----------------------------
# We load a pretrained resnet-50 classification model provided by keras.
if tuple(keras.__version__.split(".")) < ("2", "4", "0"):
weights_url = "".join(
[
"https://github.com/fchollet/deep-learning-models/releases/",
"download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels.h5",
]
)
weights_file = "resnet50_keras_old.h5"
else:
weights_url = "".join(
[
" https://storage.googleapis.com/tensorflow/keras-applications/",
"resnet/resnet50_weights_tf_dim_ordering_tf_kernels.h5",
]
)
weights_file = "resnet50_keras_new.h5"
weights_path = download_testdata(weights_url, weights_file, module="keras")
keras_resnet50 = tf.keras.applications.resnet50.ResNet50(
include_top=True, weights=None, input_shape=(224, 224, 3), classes=1000
)
keras_resnet50.load_weights(weights_path)
######################################################################
# Load a test image
# ------------------
# A single cat dominates the examples!
from PIL import Image
from matplotlib import pyplot as plt
from tensorflow.keras.applications.resnet50 import preprocess_input
img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
img_path = download_testdata(img_url, "cat.png", module="data")
img = Image.open(img_path).resize((224, 224))
plt.imshow(img)
plt.show()
# input preprocess
data = np.array(img)[np.newaxis, :].astype("float32")
data = preprocess_input(data).transpose([0, 3, 1, 2])
print("input_1", data.shape)
######################################################################
# Compile the model with Relay
# ----------------------------
# convert the keras model(NHWC layout) to Relay format(NCHW layout).
shape_dict = {"input_1": data.shape}
mod, params = relay.frontend.from_keras(keras_resnet50, shape_dict)
# compile the model
target = "cuda"
dev = tvm.cuda(0)
# TODO(mbs): opt_level=3 causes nn.contrib_conv2d_winograd_weight_transform
# to end up in the module which fails memory validation on cuda most likely
# due to a latent bug. Note that the pass context only has an effect within
# evaluate() and is not captured by create_executor().
with tvm.transform.PassContext(opt_level=0):
model = relay.build_module.create_executor("graph", mod, dev, target, params).evaluate()
######################################################################
# Execute on TVM
# ---------------
dtype = "float32"
tvm_out = model(tvm.nd.array(data.astype(dtype)))
top1_tvm = np.argmax(tvm_out.numpy()[0])
#####################################################################
# Look up synset name
# -------------------
# Look up prediction top 1 index in 1000 class synset.
synset_url = "".join(
[
"https://gist.githubusercontent.com/zhreshold/",
"4d0b62f3d01426887599d4f7ede23ee5/raw/",
"596b27d23537e5a1b5751d2b0481ef172f58b539/",
"imagenet1000_clsid_to_human.txt",
]
)
synset_name = "imagenet1000_clsid_to_human.txt"
synset_path = download_testdata(synset_url, synset_name, module="data")
with open(synset_path) as f:
synset = eval(f.read())
print("Relay top-1 id: {}, class name: {}".format(top1_tvm, synset[top1_tvm]))
# confirm correctness with keras output
keras_out = keras_resnet50.predict(data.transpose([0, 2, 3, 1]))
top1_keras = np.argmax(keras_out)
print("Keras top-1 id: {}, class name: {}".format(top1_keras, synset[top1_keras]))
| 5,079 | 34.774648 | 92 | py |
tvm | tvm-main/gallery/how_to/compile_models/from_coreml.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Compile CoreML Models
=====================
**Author**: `Joshua Z. Zhang <https://zhreshold.github.io/>`_, \
`Kazutaka Morita <https://github.com/kazum>`_, \
`Zhao Wu <https://github.com/FrozenGene>`_
This article is an introductory tutorial to deploy CoreML models with Relay.
To begin, we must install coremltools:
.. code-block:: bash
%%shell
pip install coremltools
or please refer to official site
https://github.com/apple/coremltools
"""
import tvm
from tvm import te
import tvm.relay as relay
from tvm.contrib.download import download_testdata
import coremltools as cm
import numpy as np
from PIL import Image
######################################################################
# Load pretrained CoreML model
# ----------------------------
# We will download and load a pretrained mobilenet classification network
# provided by apple in this example
model_url = "https://docs-assets.developer.apple.com/coreml/models/MobileNet.mlmodel"
model_file = "mobilenet.mlmodel"
model_path = download_testdata(model_url, model_file, module="coreml")
# Now you have mobilenet.mlmodel on disk
mlmodel = cm.models.MLModel(model_path)
######################################################################
# Load a test image
# ------------------
# A single cat dominates the examples!
img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
img_path = download_testdata(img_url, "cat.png", module="data")
img = Image.open(img_path).resize((224, 224))
# Mobilenet.mlmodel's input is BGR format
img_bgr = np.array(img)[:, :, ::-1]
x = np.transpose(img_bgr, (2, 0, 1))[np.newaxis, :]
######################################################################
# Compile the model on Relay
# ---------------------------
# We should be familiar with the process right now.
target = "llvm"
shape_dict = {"image": x.shape}
# Parse CoreML model and convert into Relay computation graph
mod, params = relay.frontend.from_coreml(mlmodel, shape_dict)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target, params=params)
######################################################################
# Execute on TVM
# -------------------
# The process is no different from other example
from tvm.contrib import graph_executor
dev = tvm.cpu(0)
dtype = "float32"
m = graph_executor.GraphModule(lib["default"](dev))
# set inputs
m.set_input("image", tvm.nd.array(x.astype(dtype)))
# execute
m.run()
# get outputs
tvm_output = m.get_output(0)
top1 = np.argmax(tvm_output.numpy()[0])
#####################################################################
# Look up synset name
# -------------------
# Look up prediction top 1 index in 1000 class synset.
synset_url = "".join(
[
"https://gist.githubusercontent.com/zhreshold/",
"4d0b62f3d01426887599d4f7ede23ee5/raw/",
"596b27d23537e5a1b5751d2b0481ef172f58b539/",
"imagenet1000_clsid_to_human.txt",
]
)
synset_name = "imagenet1000_clsid_to_human.txt"
synset_path = download_testdata(synset_url, synset_name, module="data")
with open(synset_path) as f:
synset = eval(f.read())
# You should see the following result: Top-1 id 282 class name tiger cat
print("Top-1 id", top1, "class name", synset[top1])
| 4,043 | 34.165217 | 85 | py |
tvm | tvm-main/gallery/how_to/compile_models/from_paddle.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Compile PaddlePaddle Models
===========================
**Author**: `Ziyuan Ma <https://github.com/ZiyuanMa/>`_
This article is an introductory tutorial to deploy PaddlePaddle models with Relay.
To begin, we'll install PaddlePaddle>=2.1.3:
.. code-block:: bash
%%shell
pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple
For more details, refer to the official install instructions at:
https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/install/pip/linux-pip.html
"""
import tarfile
import paddle
import numpy as np
import tvm
from tvm import relay
from tvm.contrib.download import download_testdata
######################################################################
# Load pretrained ResNet50 model
# ---------------------------------------------
# We load a pretrained ResNet50 provided by PaddlePaddle.
url = "https://bj.bcebos.com/x2paddle/models/paddle_resnet50.tar"
model_path = download_testdata(url, "paddle_resnet50.tar", module="model")
with tarfile.open(model_path) as tar:
names = tar.getnames()
for name in names:
tar.extract(name, "./")
model = paddle.jit.load("./paddle_resnet50/model")
######################################################################
# Load a test image
# ---------------------------------------------
# A single cat dominates the examples!
from PIL import Image
import paddle.vision.transforms as T
transforms = T.Compose(
[
T.Resize((256, 256)),
T.CenterCrop(224),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
img_path = download_testdata(img_url, "cat.png", module="data")
img = Image.open(img_path).resize((224, 224))
img = transforms(img)
img = np.expand_dims(img, axis=0)
######################################################################
# Compile the model with relay
# ---------------------------------------------
target = "llvm"
shape_dict = {"inputs": img.shape}
mod, params = relay.frontend.from_paddle(model, shape_dict)
with tvm.transform.PassContext(opt_level=3):
executor = relay.build_module.create_executor(
"graph", mod, tvm.cpu(0), target, params
).evaluate()
######################################################################
# Execute on TVM
# ---------------------------------------------
dtype = "float32"
tvm_output = executor(tvm.nd.array(img.astype(dtype))).numpy()
######################################################################
# Look up synset name
# ---------------------------------------------
# Look up prediction top 1 index in 1000 class synset.
synset_url = "".join(
[
"https://gist.githubusercontent.com/zhreshold/",
"4d0b62f3d01426887599d4f7ede23ee5/raw/",
"596b27d23537e5a1b5751d2b0481ef172f58b539/",
"imagenet1000_clsid_to_human.txt",
]
)
synset_name = "imagenet1000_clsid_to_human.txt"
synset_path = download_testdata(synset_url, synset_name, module="data")
with open(synset_path) as f:
synset = f.readlines()
top1 = np.argmax(tvm_output[0])
print(f"TVM prediction top-1 id: {top1}, class name: {synset[top1]}")
| 4,003 | 32.647059 | 102 | py |
tvm | tvm-main/gallery/how_to/compile_models/from_oneflow.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Compile OneFlow Models
======================
**Author**: `Xiaoyu Zhang <https://github.com/BBuf/>`_
This article is an introductory tutorial to deploy OneFlow models with Relay.
For us to begin with, OneFlow package should be installed.
A quick solution is to install via pip
.. code-block:: bash
%%shell
pip install flowvision==0.1.0
pip install -f https://release.oneflow.info oneflow==0.7.0+cpu
or please refer to official site:
https://github.com/Oneflow-Inc/oneflow
Currently, TVM supports OneFlow 0.7.0. Other versions may be unstable.
"""
# sphinx_gallery_start_ignore
# sphinx_gallery_requires_cuda = True
# sphinx_gallery_end_ignore
import os, math
from matplotlib import pyplot as plt
import numpy as np
from PIL import Image
# oneflow imports
import flowvision
import oneflow as flow
import oneflow.nn as nn
import tvm
from tvm import relay
from tvm.contrib.download import download_testdata
######################################################################
# Load a pretrained OneFlow model and save model
# ----------------------------------------------
model_name = "resnet18"
model = getattr(flowvision.models, model_name)(pretrained=True)
model = model.eval()
model_dir = "resnet18_model"
if not os.path.exists(model_dir):
flow.save(model.state_dict(), model_dir)
######################################################################
# Load a test image
# -----------------
# Classic cat example!
from PIL import Image
img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
img_path = download_testdata(img_url, "cat.png", module="data")
img = Image.open(img_path).resize((224, 224))
# Preprocess the image and convert to tensor
from flowvision import transforms
my_preprocess = transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
img = my_preprocess(img)
img = np.expand_dims(img.numpy(), 0)
######################################################################
# Import the graph to Relay
# -------------------------
# Convert OneFlow graph to Relay graph. The input name can be arbitrary.
class Graph(flow.nn.Graph):
def __init__(self, module):
super().__init__()
self.m = module
def build(self, x):
out = self.m(x)
return out
graph = Graph(model)
_ = graph._compile(flow.randn(1, 3, 224, 224))
mod, params = relay.frontend.from_oneflow(graph, model_dir)
######################################################################
# Relay Build
# -----------
# Compile the graph to llvm target with given input specification.
target = tvm.target.Target("llvm", host="llvm")
dev = tvm.cpu(0)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target, params=params)
######################################################################
# Execute the portable graph on TVM
# ---------------------------------
# Now we can try deploying the compiled model on target.
target = "cuda"
with tvm.transform.PassContext(opt_level=10):
intrp = relay.build_module.create_executor("graph", mod, tvm.cuda(0), target)
print(type(img))
print(img.shape)
tvm_output = intrp.evaluate()(tvm.nd.array(img.astype("float32")), **params)
#####################################################################
# Look up synset name
# -------------------
# Look up prediction top 1 index in 1000 class synset.
synset_url = "".join(
[
"https://raw.githubusercontent.com/Cadene/",
"pretrained-models.pytorch/master/data/",
"imagenet_synsets.txt",
]
)
synset_name = "imagenet_synsets.txt"
synset_path = download_testdata(synset_url, synset_name, module="data")
with open(synset_path) as f:
synsets = f.readlines()
synsets = [x.strip() for x in synsets]
splits = [line.split(" ") for line in synsets]
key_to_classname = {spl[0]: " ".join(spl[1:]) for spl in splits}
class_url = "".join(
[
"https://raw.githubusercontent.com/Cadene/",
"pretrained-models.pytorch/master/data/",
"imagenet_classes.txt",
]
)
class_name = "imagenet_classes.txt"
class_path = download_testdata(class_url, class_name, module="data")
with open(class_path) as f:
class_id_to_key = f.readlines()
class_id_to_key = [x.strip() for x in class_id_to_key]
# Get top-1 result for TVM
top1_tvm = np.argmax(tvm_output.numpy()[0])
tvm_class_key = class_id_to_key[top1_tvm]
# Convert input to OneFlow variable and get OneFlow result for comparison
with flow.no_grad():
torch_img = flow.from_numpy(img)
output = model(torch_img)
# Get top-1 result for OneFlow
top_oneflow = np.argmax(output.numpy())
oneflow_class_key = class_id_to_key[top_oneflow]
print("Relay top-1 id: {}, class name: {}".format(top1_tvm, key_to_classname[tvm_class_key]))
print(
"OneFlow top-1 id: {}, class name: {}".format(top_oneflow, key_to_classname[oneflow_class_key])
)
| 5,796 | 30.677596 | 99 | py |
tvm | tvm-main/gallery/how_to/tune_with_autoscheduler/tune_network_x86.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Auto-scheduling a Neural Network for x86 CPU
============================================
**Author**: `Lianmin Zheng <https://github.com/merrymercy>`_, \
`Chengfan Jia <https://github.com/jcf94/>`_
Auto-tuning for specific devices and workloads is critical for getting the
best performance. This is a tutorial on how to tune a whole neural
network for x86 CPU with the auto-scheduler.
To auto-tune a neural network, we partition the network into small subgraphs and
tune them independently. Each subgraph is treated as one search task.
A task scheduler slices the time and dynamically allocates time resources to
these tasks. The task scheduler predicts the impact of each task on the end-to-end
execution time and prioritizes the one that can reduce the execution time the most.
For each subgraph, we use the compute declaration in :code:`tvm/python/topi` to
get the computational DAG in the tensor expression form.
We then use the auto-scheduler to construct a search space of this DAG and search
for good schedules (low-level optimizations).
Different from the template-based :ref:`autotvm <tutorials-autotvm-sec>` which relies on
manual templates to define the search space, the auto-scheduler does not require any
schedule templates. In other words, the auto-scheduler only uses the compute declarations
in :code:`tvm/python/topi` and does not use existing schedule templates.
Note that this tutorial will not run on Windows or recent versions of macOS. To
get it to run, you will need to wrap the body of this tutorial in a :code:`if
__name__ == "__main__":` block.
"""
import numpy as np
import tvm
from tvm import relay, auto_scheduler
from tvm.relay import data_dep_optimization as ddo
import tvm.relay.testing
from tvm.contrib import graph_executor
#################################################################
# Define a Network
# ----------------
# First, we need to define the network with relay frontend API.
# We can load some pre-defined network from :code:`tvm.relay.testing`.
# We can also load models from MXNet, ONNX, PyTorch, and TensorFlow
# (see :ref:`front end tutorials<tutorial-frontend>`).
#
# For convolutional neural networks, although auto-scheduler can work correctly
# with any layout, we found the best performance is typically achieved with NHWC layout.
# We also implemented more optimizations for NHWC layout with the auto-scheduler.
# So it is recommended to convert your models to NHWC layout to use the auto-scheduler.
# You can use :ref:`ConvertLayout <convert-layout-usage>` pass to do the layout conversion in TVM.
def get_network(name, batch_size, layout="NHWC", dtype="float32", use_sparse=False):
"""Get the symbol definition and random weight of a network"""
# auto-scheduler prefers NHWC layout
if layout == "NHWC":
image_shape = (224, 224, 3)
elif layout == "NCHW":
image_shape = (3, 224, 224)
else:
raise ValueError("Invalid layout: " + layout)
input_shape = (batch_size,) + image_shape
output_shape = (batch_size, 1000)
if name.startswith("resnet-"):
n_layer = int(name.split("-")[1])
mod, params = relay.testing.resnet.get_workload(
num_layers=n_layer,
batch_size=batch_size,
layout=layout,
dtype=dtype,
image_shape=image_shape,
)
elif name.startswith("resnet3d-"):
n_layer = int(name.split("-")[1])
mod, params = relay.testing.resnet.get_workload(
num_layers=n_layer,
batch_size=batch_size,
layout=layout,
dtype=dtype,
image_shape=image_shape,
)
elif name == "mobilenet":
mod, params = relay.testing.mobilenet.get_workload(
batch_size=batch_size, layout=layout, dtype=dtype, image_shape=image_shape
)
elif name == "squeezenet_v1.1":
assert layout == "NCHW", "squeezenet_v1.1 only supports NCHW layout"
mod, params = relay.testing.squeezenet.get_workload(
version="1.1",
batch_size=batch_size,
dtype=dtype,
image_shape=image_shape,
)
elif name == "inception_v3":
input_shape = (batch_size, 3, 299, 299) if layout == "NCHW" else (batch_size, 299, 299, 3)
mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
elif name == "mxnet":
# an example for mxnet model
from mxnet.gluon.model_zoo.vision import get_model
assert layout == "NCHW"
block = get_model("resnet50_v1", pretrained=True)
mod, params = relay.frontend.from_mxnet(block, shape={"data": input_shape}, dtype=dtype)
net = mod["main"]
net = relay.Function(
net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs
)
mod = tvm.IRModule.from_expr(net)
elif name == "mlp":
mod, params = relay.testing.mlp.get_workload(
batch_size=batch_size, dtype=dtype, image_shape=image_shape, num_classes=1000
)
else:
raise ValueError("Network not found.")
if use_sparse:
from tvm.topi.sparse.utils import convert_model_dense_to_sparse
mod, params = convert_model_dense_to_sparse(mod, params, bs_r=4, random_params=True)
return mod, params, input_shape, output_shape
# Define the neural network and compilation target.
# If the target machine supports avx512 instructions, replace the
# "llvm -mcpu=core-avx2" with "llvm -mcpu=skylake-avx512"
network = "resnet-50"
use_sparse = False
batch_size = 1
layout = "NHWC"
target = tvm.target.Target("llvm -mcpu=core-avx2")
dtype = "float32"
log_file = "%s-%s-B%d-%s.json" % (network, layout, batch_size, target.kind.name)
#################################################################
# Extract Search Tasks
# --------------------
# Next, we extract the search tasks and their weights from a network.
# The weight of a task is the number of appearances of the task's subgraph
# in the whole network.
# By using the weight, we can approximate the end-to-end latency of the network
# as :code:`sum(latency[t] * weight[t])`, where :code:`latency[t]` is the
# latency of a task and :code:`weight[t]` is the weight of the task.
# The task scheduler will just optimize this objective.
# Extract tasks from the network
print("Get model...")
mod, params, input_shape, output_shape = get_network(
network,
batch_size,
layout,
dtype=dtype,
use_sparse=use_sparse,
)
print("Extract tasks...")
tasks, task_weights = auto_scheduler.extract_tasks(mod["main"], params, target)
for idx, task in enumerate(tasks):
print("========== Task %d (workload key: %s) ==========" % (idx, task.workload_key))
print(task.compute_dag)
#################################################################
# Begin Tuning
# ------------
# Now, we set some options for tuning and launch the search tasks
#
# * :code:`num_measure_trials` is the number of measurement trials we can use during the tuning.
# You can set it to a small number (e.g., 200) for a fast demonstrative run.
# In practice, we recommend setting it around :code:`800 * len(tasks)`,
# which is typically enough for the search to converge.
# For example, there are 29 tasks in resnet-50, so we can set it as 20000.
# You can adjust this parameter according to your time budget.
# * In addition, we use :code:`RecordToFile` to dump measurement records into a log file,
# The measurement records can be used to query the history best, resume the search,
# and do more analyses later.
# * see :any:`auto_scheduler.TuningOptions`,
# :any:`auto_scheduler.LocalRunner` for more parameters.
#
def run_tuning():
print("Begin tuning...")
tuner = auto_scheduler.TaskScheduler(tasks, task_weights)
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=200, # change this to 20000 to achieve the best performance
runner=auto_scheduler.LocalRunner(repeat=10, enable_cpu_cache_flush=True),
measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
)
if use_sparse:
from tvm.topi.sparse.utils import sparse_sketch_rules
search_policy = [
auto_scheduler.SketchPolicy(
task,
program_cost_model=auto_scheduler.XGBModel(),
init_search_callbacks=sparse_sketch_rules(),
)
for task in tasks
]
tuner.tune(tune_option, search_policy=search_policy)
else:
tuner.tune(tune_option)
# We do not run the tuning in our webpage server since it takes too long.
# Uncomment the following line to run it by yourself.
# run_tuning()
######################################################################
# .. note:: Explain the printed information during tuning
#
# During the tuning, a lot of information will be printed on the console.
# They are used for debugging purposes. The most important info is the output
# of the task scheduler. The following table is a sample output.
#
# .. code-block:: c
#
# ----------------------------------------------------------------------
# ------------------------------ [ Task Scheduler ]
# ----------------------------------------------------------------------
# | ID | Latency (ms) | Speed (GFLOPS) | Trials |
# -------------------------------------------------
# | 0 | 0.010 | 0.40 | 64 |
# | 1 | 0.087 | 47.19 | 64 |
# | 2 | 0.008 | -0.00 | 64 |
# | 3 | 0.177 | 582.07 | 64 |
# | 4 | 0.268 | 862.37 | 256 |
# | 5 | 0.166 | 621.13 | 128 |
# | 6 | 0.170 | 605.10 | 128 |
# | 7 | 0.128 | 403.20 | 64 |
# | 8 | 0.189 | 545.71 | 64 |
# | 9 | 0.231 | 1001.01 | 448 |
# | 10 | 0.155 | 664.80 | 256 |
# | 11 | 0.155 | 662.86 | 256 |
# | 12 | 0.119 | 434.08 | 64 |
# | 13 | 0.199 | 522.13 | 64 |
# | 14 | 0.235 | 986.56 | 320 |
# | 15 | 0.149 | 689.13 | 128 |
# | 16 | 0.155 | 664.80 | 192 |
# | 17 | 0.151 | 340.64 | 64 |
# | 18 | 0.176 | 597.55 | 128 |
# | 19 | 0.220 | 1054.37 | 192 |
# | 20 | 0.150 | 686.01 | 128 |
# | 21 | 0.159 | 650.88 | 128 |
# | 22 | 0.073 | 358.19 | 64 |
# | 23 | 0.031 | 70.63 | 64 |
# | 24 | 0.251 | 947.73 | 128 |
# | 25 | 0.157 | 652.47 | 128 |
# | 26 | 0.215 | 954.84 | 128 |
# | 27 | 0.237 | 868.92 | 128 |
# | 28 | 0.266 | 774.06 | 128 |
# -------------------------------------------------
# Estimated total latency: 10.016 ms Trials: 3992 Used time : 1131 s Next ID: 15
#
# This table lists the latency and (estimated) speed of all tasks.
# It also lists the allocation of measurement trials for all tasks.
# The last line prints the total weighted latency of these tasks,
# which can be a rough estimation of the end-to-end execution time
# of the network.
# The last line also prints the total number of measurement trials,
# total time spent on auto-tuning and the id of the next task to tune.
#
# There will also be some "tvm::Error"s errors, because the
# auto-scheduler will try some invalid schedules.
# You can safely ignore them if the tuning can continue, because these
# errors are isolated from the main process.
#
######################################################################
# .. note:: Terminate the tuning earlier
#
# You can terminate the tuning earlier by forcibly killing this process.
# As long as you get at least one valid schedule for each task in the log file,
# you should be able to do the compilation (the secion below).
#
#################################################################
# Compile and Evaluate
# --------------------
# After auto-tuning, we can compile the network with the best schedules we found.
# All measurement records are dumped into the log file during auto-tuning,
# so we can read the log file and load the best schedules.
# Compile with the history best
print("Compile...")
with auto_scheduler.ApplyHistoryBest(log_file):
with tvm.transform.PassContext(opt_level=3, config={"relay.backend.use_auto_scheduler": True}):
lib = relay.build(mod, target=target, params=params)
# Create graph executor
dev = tvm.device(str(target), 0)
module = graph_executor.GraphModule(lib["default"](dev))
data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype))
module.set_input("data", data_tvm)
# Evaluate
print("Evaluate inference time cost...")
print(module.benchmark(dev, repeat=3, min_repeat_ms=500))
#################################################################
# Other Tips
# ----------
# 1. During the tuning, the auto-scheduler needs to compile many programs and
# extract feature from them. This part is CPU-intensive,
# so a high-performance CPU with many cores is recommended for faster search.
# 2. You can use :code:`python3 -m tvm.auto_scheduler.measure_record --mode distill -i log.json`
# to distill the large log file and only save the best useful records.
# 3. You can resume a search from the previous log file. You just need to
# add a new argument :code:`load_log_file` when creating the task scheduler
# in function :code:`run_tuning`. Say,
# :code:`tuner = auto_scheduler.TaskScheduler(tasks, task_weights, load_log_file=log_file)`
# 4. If you have multiple target CPUs, you can use all of them for measurements to
# parallelize the measurements. Check this :ref:`section <tutorials-autotvm-scale-up-rpc-tracker>`
# to learn how to use the RPC Tracker and RPC Server.
# To use the RPC Tracker in auto-scheduler, replace the runner in :code:`TuningOptions`
# with :any:`auto_scheduler.RPCRunner`.
| 15,076 | 42.575145 | 101 | py |
tvm | tvm-main/gallery/how_to/tune_with_autoscheduler/tune_network_cuda.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Auto-scheduling a Neural Network for NVIDIA GPU
===============================================
**Author**: `Lianmin Zheng <https://github.com/merrymercy>`_
Auto-tuning for specific devices and workloads is critical for getting the
best performance. This is a tutorial on how to tune a whole neural
network for NVIDIA GPU with the auto-scheduler.
To auto-tune a neural network, we partition the network into small subgraphs and
tune them independently. Each subgraph is treated as one search task.
A task scheduler slices the time and dynamically allocates time resources to
these tasks. The task scheduler predicts the impact of each task on the end-to-end
execution time and prioritizes the one that can reduce the execution time the most.
For each subgraph, we use the compute declaration in :code:`tvm/python/topi` to
get the computational DAG in the tensor expression form.
We then use the auto-scheduler to construct a search space of this DAG and search
for good schedules (low-level optimizations).
Different from the template-based :ref:`autotvm <tutorials-autotvm-sec>` which relies on
manual templates to define the search space, the auto-scheduler does not require any
schedule templates. In other words, the auto-scheduler only uses the compute declarations
in :code:`tvm/python/topi` and does not use existing schedule templates.
Note that this tutorial will not run on Windows or recent versions of macOS. To
get it to run, you will need to wrap the body of this tutorial in a :code:`if
__name__ == "__main__":` block.
"""
import numpy as np
import tvm
from tvm import relay, auto_scheduler
import tvm.relay.testing
from tvm.contrib import graph_executor
#################################################################
# Define a Network
# ----------------
# First, we need to define the network with relay frontend API.
# We can load some pre-defined network from :code:`tvm.relay.testing`.
# We can also load models from MXNet, ONNX, PyTorch, and TensorFlow
# (see :ref:`front end tutorials<tutorial-frontend>`).
#
# For convolutional neural networks, although auto-scheduler can work correctly
# with any layout, we found the best performance is typically achieved with NHWC layout.
# We also implemented more optimizations for NHWC layout with the auto-scheduler.
# So it is recommended to convert your models to NHWC layout to use the auto-scheduler.
# You can use :ref:`ConvertLayout <convert-layout-usage>` pass to do the layout conversion in TVM.
def get_network(name, batch_size, layout="NHWC", dtype="float32"):
"""Get the symbol definition and random weight of a network"""
# auto-scheduler prefers NHWC layout
if layout == "NHWC":
image_shape = (224, 224, 3)
elif layout == "NCHW":
image_shape = (3, 224, 224)
else:
raise ValueError("Invalid layout: " + layout)
input_shape = (batch_size,) + image_shape
output_shape = (batch_size, 1000)
if name.startswith("resnet-"):
n_layer = int(name.split("-")[1])
mod, params = relay.testing.resnet.get_workload(
num_layers=n_layer,
batch_size=batch_size,
layout=layout,
dtype=dtype,
image_shape=image_shape,
)
elif name.startswith("resnet3d-"):
n_layer = int(name.split("-")[1])
mod, params = relay.testing.resnet.get_workload(
num_layers=n_layer,
batch_size=batch_size,
layout=layout,
dtype=dtype,
image_shape=image_shape,
)
elif name == "mobilenet":
mod, params = relay.testing.mobilenet.get_workload(
batch_size=batch_size, layout=layout, dtype=dtype, image_shape=image_shape
)
elif name == "squeezenet_v1.1":
assert layout == "NCHW", "squeezenet_v1.1 only supports NCHW layout"
mod, params = relay.testing.squeezenet.get_workload(
version="1.1",
batch_size=batch_size,
dtype=dtype,
image_shape=image_shape,
)
elif name == "inception_v3":
input_shape = (batch_size, 3, 299, 299) if layout == "NCHW" else (batch_size, 299, 299, 3)
mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
elif name == "mxnet":
# an example for mxnet model
from mxnet.gluon.model_zoo.vision import get_model
assert layout == "NCHW"
block = get_model("resnet18_v1", pretrained=True)
mod, params = relay.frontend.from_mxnet(block, shape={"data": input_shape}, dtype=dtype)
net = mod["main"]
net = relay.Function(
net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs
)
mod = tvm.IRModule.from_expr(net)
return mod, params, input_shape, output_shape
# Define the neural network and compilation target
network = "resnet-18"
batch_size = 1
layout = "NHWC"
target = tvm.target.Target("cuda")
dtype = "float32"
log_file = "%s-%s-B%d-%s.json" % (network, layout, batch_size, target.kind.name)
#################################################################
# Extract Search Tasks
# --------------------
# Next, we extract the search tasks and their weights from a network.
# The weight of a task is the number of appearances of the task's subgraph
# in the whole network.
# By using the weight, we can approximate the end-to-end latency of the network
# as :code:`sum(latency[t] * weight[t])`, where :code:`latency[t]` is the
# latency of a task and :code:`weight[t]` is the weight of the task.
# The task scheduler will just optimize this objective.
# Extract tasks from the network
print("Extract tasks...")
mod, params, input_shape, output_shape = get_network(network, batch_size, layout, dtype=dtype)
tasks, task_weights = auto_scheduler.extract_tasks(mod["main"], params, target)
for idx, task in enumerate(tasks):
print("========== Task %d (workload key: %s) ==========" % (idx, task.workload_key))
print(task.compute_dag)
#################################################################
# Begin Tuning
# ------------
# Now, we set some options for tuning and launch the search tasks
#
# * :code:`measure_ctx` launches a different process for measurement to
# provide isolation. It can protect the main process from GPU crashes
# during measurement and avoid other runtime conflicts.
# * :code:`min_repeat_ms` defines the minimum duration of one "repeat" in every measurement.
# This can warmup the GPU, which is necessary to get accurate measurement results.
# Typically, we recommend a value >= 300 ms.
# * :code:`num_measure_trials` is the number of measurement trials we can use during the tuning.
# You can set it to a small number (e.g., 200) for a fast demonstrative run.
# In practice, we recommend setting it around :code:`900 * len(tasks)`,
# which is typically enough for the search to converge.
# For example, there are 24 tasks in resnet-18, so we can set it as 20000.
# You can adjust this parameter according to your time budget.
# * In addition, we use :code:`RecordToFile` to dump measurement records into a log file,
# The measurement records can be used to query the history best, resume the search,
# and do more analyses later.
# * see :any:`auto_scheduler.TuningOptions`,
# :any:`auto_scheduler.LocalRPCMeasureContext` for more parameters.
#
def run_tuning():
print("Begin tuning...")
measure_ctx = auto_scheduler.LocalRPCMeasureContext(repeat=1, min_repeat_ms=300, timeout=10)
tuner = auto_scheduler.TaskScheduler(tasks, task_weights)
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=200, # change this to 20000 to achieve the best performance
runner=measure_ctx.runner,
measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
)
tuner.tune(tune_option)
# We do not run the tuning in our webpage server since it takes too long.
# Uncomment the following line to run it by yourself.
# run_tuning()
######################################################################
# .. note:: Explain the printed information during tuning
#
# During the tuning, a lot of information will be printed on the console.
# They are used for debugging purposes. The most important info is the output
# of the task scheduler. The following table is a sample output.
#
# .. code-block:: c
#
# ----------------------------------------------------------------------
# ------------------------------ [ Task Scheduler ]
# ----------------------------------------------------------------------
# | ID | Latency (ms) | Speed (GFLOPS) | Trials |
# -------------------------------------------------
# | 0 | 0.005 | 0.88 | 64 |
# | 1 | 0.010 | 99.10 | 64 |
# | 2 | 0.006 | 0.00 | 64 |
# | 3 | 0.145 | 979.78 | 384 |
# | 4 | 0.130 | 1097.02 | 384 |
# | 5 | 0.143 | 992.69 | 384 |
# | 6 | 0.076 | 1526.86 | 192 |
# | 7 | 0.115 | 999.44 | 320 |
# | 8 | 0.079 | 1449.39 | 320 |
# | 9 | 0.122 | 938.73 | 384 |
# | 10 | 0.063 | 1832.98 | 192 |
# | 11 | 0.072 | 1763.62 | 256 |
# | 12 | 0.062 | 2036.40 | 192 |
# | 13 | 0.068 | 1874.44 | 192 |
# | 14 | 0.049 | 2346.50 | 128 |
# | 15 | 0.076 | 1694.31 | 256 |
# | 16 | 0.067 | 1933.30 | 448 |
# | 17 | 0.076 | 1680.90 | 256 |
# | 18 | 0.022 | 98.43 | 64 |
# | 19 | 0.076 | 3112.55 | 192 |
# | 20 | 0.013 | 2026.44 | 64 |
# | 21 | 0.011 | 1136.69 | 64 |
# | 22 | 0.013 | 992.47 | 64 |
# | 23 | 0.020 | 627.56 | 64 |
# -------------------------------------------------
# Estimated total latency: 1.587 ms Trials: 4992 Used time : 13296 s Next ID: 3
#
# This table lists the latency and (estimated) speed of all tasks.
# It also lists the allocation of measurement trials for all tasks.
# The last line prints the total weighted latency of these tasks,
# which can be a rough estimation of the end-to-end execution time
# of the network.
# The last line also prints the total number of measurement trials,
# total time spent on auto-tuning and the id of the next task to tune.
#
# There will also be some "tvm::Error"s and CUDA errors, because the
# auto-scheduler will try some invalid schedules.
# You can safely ignore them if the tuning can continue, because these
# errors are isolated from the main process.
#
######################################################################
# .. note:: Terminate the tuning earlier
#
# You can terminate the tuning earlier by forcibly killing this process.
# As long as you get at least one valid schedule for each task in the log file,
# you should be able to do the compilation (the secion below).
#
#################################################################
# Compile and Evaluate
# --------------------
# After auto-tuning, we can compile the network with the best schedules we found.
# All measurement records are dumped into the log file during auto-tuning,
# so we can read the log file and load the best schedules.
# Compile with the history best
print("Compile...")
with auto_scheduler.ApplyHistoryBest(log_file):
with tvm.transform.PassContext(opt_level=3, config={"relay.backend.use_auto_scheduler": True}):
lib = relay.build(mod, target=target, params=params)
# Create graph executor
dev = tvm.device(str(target), 0)
module = graph_executor.GraphModule(lib["default"](dev))
data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype))
module.set_input("data", data_tvm)
# Evaluate
print("Evaluate inference time cost...")
print(module.benchmark(dev, repeat=3, min_repeat_ms=500))
#################################################################
# Other Tips
# ----------
# 1. During the tuning, the auto-scheduler needs to compile many programs and
# extract feature from them. This part is CPU-intensive,
# so a high-performance CPU with many cores is recommended for faster search.
# 2. You can use :code:`python3 -m tvm.auto_scheduler.measure_record --mode distill -i log.json`
# to distill the large log file and only save the best useful records.
# 3. You can resume a search from the previous log file. You just need to
# add a new argument :code:`load_log_file` when creating the task scheduler
# in function :code:`run_tuning`. Say,
# :code:`tuner = auto_scheduler.TaskScheduler(tasks, task_weights, load_log_file=log_file)`
# 4. If you have multiple target GPUs, you can use all of them for measurements to
# parallelize the measurements. Check this :ref:`section <tutorials-autotvm-scale-up-rpc-tracker>`
# to learn how to use the RPC Tracker and RPC Server.
# To use the RPC Tracker in auto-scheduler, replace the runner in :code:`TuningOptions`
# with :any:`auto_scheduler.RPCRunner`.
| 14,090 | 44.163462 | 101 | py |
tvm | tvm-main/gallery/how_to/tune_with_autoscheduler/tune_network_arm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Auto-scheduling a Neural Network for ARM CPU
=============================================
**Author**: `Thierry Moreau <https://github.com/tmoreau89>`_, \
`Lianmin Zheng <https://github.com/merrymercy>`_, \
`Chengfan Jia <https://github.com/jcf94/>`_
Auto-tuning for specific devices and workloads is critical for getting the
best performance. This is a tutorial on how to tune a whole neural
network for ARM CPU with the auto-scheduler via RPC.
To auto-tune a neural network, we partition the network into small subgraphs and
tune them independently. Each subgraph is treated as one search task.
A task scheduler slices the time and dynamically allocates time resources to
these tasks. The task scheduler predicts the impact of each task on the end-to-end
execution time and prioritizes the one that can reduce the execution time the most.
For each subgraph, we use the compute declaration in :code:`tvm/python/topi` to
get the computational DAG in the tensor expression form.
We then use the auto-scheduler to construct a search space of this DAG and search
for good schedules (low-level optimizations).
Different from the template-based :ref:`autotvm <tutorials-autotvm-sec>` which relies on
manual templates to define the search space, the auto-scheduler does not require any
schedule templates. In other words, the auto-scheduler only uses the compute declarations
in :code:`tvm/python/topi` and does not use existing schedule templates.
Note that this tutorial will not run on Windows or recent versions of macOS. To
get it to run, you will need to wrap the body of this tutorial in a :code:`if
__name__ == "__main__":` block.
"""
import numpy as np
import os
import tvm
from tvm import relay, auto_scheduler
from tvm.relay import data_dep_optimization as ddo
import tvm.relay.testing
from tvm.contrib import graph_executor
from tvm.contrib.utils import tempdir
#################################################################
# Define a Network
# ----------------
# First, we need to define the network with relay frontend API.
# We can load some pre-defined network from :code:`tvm.relay.testing`.
# We can also load models from MXNet, ONNX, PyTorch, and TensorFlow
# (see :ref:`front end tutorials<tutorial-frontend>`).
#
# For convolutional neural networks, although auto-scheduler can work correctly
# with any layout, we found the best performance is typically achieved with NHWC layout.
# We also implemented more optimizations for NHWC layout with the auto-scheduler.
# So it is recommended to convert your models to NHWC layout to use the auto-scheduler.
# You can use :ref:`ConvertLayout <convert-layout-usage>` pass to do the layout conversion in TVM.
def get_network(name, batch_size, layout="NHWC", dtype="float32", use_sparse=False):
"""Get the symbol definition and random weight of a network"""
# auto-scheduler prefers NHWC layout
if layout == "NHWC":
image_shape = (224, 224, 3)
elif layout == "NCHW":
image_shape = (3, 224, 224)
else:
raise ValueError("Invalid layout: " + layout)
input_shape = (batch_size,) + image_shape
output_shape = (batch_size, 1000)
if name.startswith("resnet-"):
n_layer = int(name.split("-")[1])
mod, params = relay.testing.resnet.get_workload(
num_layers=n_layer,
batch_size=batch_size,
layout=layout,
dtype=dtype,
image_shape=image_shape,
)
elif name.startswith("resnet3d-"):
n_layer = int(name.split("-")[1])
mod, params = relay.testing.resnet.get_workload(
num_layers=n_layer,
batch_size=batch_size,
layout=layout,
dtype=dtype,
image_shape=image_shape,
)
elif name == "mobilenet":
mod, params = relay.testing.mobilenet.get_workload(
batch_size=batch_size, layout=layout, dtype=dtype, image_shape=image_shape
)
elif name == "squeezenet_v1.1":
assert layout == "NCHW", "squeezenet_v1.1 only supports NCHW layout"
mod, params = relay.testing.squeezenet.get_workload(
version="1.1",
batch_size=batch_size,
dtype=dtype,
image_shape=image_shape,
)
elif name == "inception_v3":
input_shape = (batch_size, 3, 299, 299) if layout == "NCHW" else (batch_size, 299, 299, 3)
mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
elif name == "mxnet":
# an example for mxnet model
from mxnet.gluon.model_zoo.vision import get_model
assert layout == "NCHW"
block = get_model("resnet50_v1", pretrained=True)
mod, params = relay.frontend.from_mxnet(block, shape={"data": input_shape}, dtype=dtype)
net = mod["main"]
net = relay.Function(
net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs
)
mod = tvm.IRModule.from_expr(net)
elif name == "mlp":
mod, params = relay.testing.mlp.get_workload(
batch_size=batch_size, dtype=dtype, image_shape=image_shape, num_classes=1000
)
else:
raise ValueError("Network not found.")
if use_sparse:
from tvm.topi.sparse.utils import convert_model_dense_to_sparse
mod, params = convert_model_dense_to_sparse(mod, params, random_params=True)
return mod, params, input_shape, output_shape
#################################################################
# Start RPC Tracker
# -----------------
# TVM uses RPC session to communicate with ARM boards.
# During tuning, the tuner will send the generated code to the board and
# measure the speed of code on the board.
#
# To scale up the tuning, TVM uses RPC Tracker to manage distributed devices.
# The RPC Tracker is a centralized controller node. We can register all devices to
# the tracker. For example, if we have 10 phones, we can register all of them
# to the tracker, and run 10 measurements in parallel, accelerating the tuning process.
#
# To start an RPC tracker, run this command on the host machine. The tracker is
# required during the whole tuning process, so we need to open a new terminal for
# this command:
#
# .. code-block:: bash
#
# python -m tvm.exec.rpc_tracker --host=0.0.0.0 --port=9190
#
# The expected output is
#
# .. code-block:: bash
#
# INFO:RPCTracker:bind to 0.0.0.0:9190
#################################################################
# Register Devices to RPC Tracker
# -----------------------------------
# Now we can register our devices to the tracker. The first step is to
# build the TVM runtime for the ARM devices.
#
# * For Linux:
# Follow this section :ref:`build-tvm-runtime-on-device` to build
# the TVM runtime on the device. Then register the device to tracker by
#
# .. code-block:: bash
#
# python -m tvm.exec.rpc_server --tracker=[HOST_IP]:9190 --key=rasp4b-64
#
# (replace :code:`[HOST_IP]` with the IP address of your host machine)
#
# * For Android:
# Follow this `readme page <https://github.com/apache/tvm/tree/main/apps/android_rpc>`_ to
# install the TVM RPC APK on the android device. Make sure you can pass the android rpc test.
# Then you have already registered your device. During tuning, you have to go to developer option
# and enable "Keep screen awake during changing" and charge your phone to make it stable.
#
# After registering devices, we can confirm it by querying rpc_tracker
#
# .. code-block:: bash
#
# python -m tvm.exec.query_rpc_tracker --host=0.0.0.0 --port=9190
#
# For example, if we have 2 Huawei mate10 pro, 11 Raspberry Pi 4B with 64bit OS, and 2 rk3399,
# the output can be
#
# .. code-block:: bash
#
# Queue Status
# ----------------------------------
# key total free pending
# ----------------------------------
# mate10pro 2 2 0
# rk3399 2 2 0
# rasp4b-64 11 11 0
# ----------------------------------
#
# You can register multiple devices to the tracker to accelerate the measurement in tuning.
###########################################
# Set Tuning Options
# ------------------
# Before tuning, we should apply some configurations. Here I use a Raspberry Pi 4b 4GB board
# as example with a 64bit OS (Ubuntu 20.04). In your setting, you should modify the target
# and device_key accordingly.
# set :code:`use_ndk` to True if you use android phone.
#### DEVICE CONFIG ####
# Replace "aarch64-linux-gnu" with the correct target of your board.
# This target is used for cross compilation. You can query it by :code:`gcc -v` on your device.
# FIXME(tmoreau89, merrymercy): We leave '-device=arm_cpu' out of the target string
# because we're sharing x86 op strategy.
target = tvm.target.Target("llvm -mtriple=aarch64-linux-gnu -mattr=+neon")
# Also replace this with the device key, rpc host and rpc port in your tracker
device_key = "rasp4b-64"
rpc_host = "127.0.0.1"
rpc_port = 9190
# Set this to True if you use ndk tools for cross compiling
# And also set the environment variable below to point to the cross compiler
use_ndk = False
# os.environ["TVM_NDK_CC"] = "/usr/bin/aarch64-linux-gnu-g++"
#### TUNING OPTION ####
network = "mobilenet"
use_sparse = False
batch_size = 1
layout = "NHWC"
dtype = "float32"
log_file = "%s-%s-B%d-%s.json" % (network, layout, batch_size, target.kind.name)
#################################################################
# Extract Search Tasks
# --------------------
# Next, we extract the search tasks and their weights from a network.
# The weight of a task is the number of appearances of the task's subgraph
# in the whole network.
# By using the weight, we can approximate the end-to-end latency of the network
# as :code:`sum(latency[t] * weight[t])`, where :code:`latency[t]` is the
# latency of a task and :code:`weight[t]` is the weight of the task.
# The task scheduler will just optimize this objective.
# Extract tasks from the network
print("Get model...")
mod, params, input_shape, output_shape = get_network(
network, batch_size, layout, dtype=dtype, use_sparse=use_sparse
)
print("Extract tasks...")
tasks, task_weights = auto_scheduler.extract_tasks(mod["main"], params, target)
for idx, task in enumerate(tasks):
print("========== Task %d (workload key: %s) ==========" % (idx, task.workload_key))
print(task.compute_dag)
#################################################################
# Tuning and Evaluation
# ---------------------
# Now, we set some options for tuning and launch the search tasks
#
# * :code:`num_measure_trials` is the number of measurement trials we can use during the tuning.
# You can set it to a small number (e.g., 200) for a fast demonstrative run.
# In practice, we recommend setting it around :code:`800 * len(tasks)`,
# which is typically enough for the search to converge.
# For example, there are 29 tasks in resnet-50, so we can set it as 20000.
# You can adjust this parameter according to your time budget.
# * In addition, we use :code:`RecordToFile` to dump measurement records into a log file,
# The measurement records can be used to query the history best, resume the search,
# and do more analyses later.
# * see :any:`auto_scheduler.TuningOptions`,
# :any:`auto_scheduler.LocalRunner` for more parameters.
#
# After auto-tuning, we can compile the network with the best schedules we found.
# All measurement records are dumped into the log file during auto-tuning,
# so we can read the log file and load the best schedules.
def tune_and_evaluate():
print("Begin tuning...")
tuner = auto_scheduler.TaskScheduler(tasks, task_weights)
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=200, # change this to 20000 to achieve the best performance
builder=auto_scheduler.LocalBuilder(build_func="ndk" if use_ndk else "default"),
runner=auto_scheduler.RPCRunner(
device_key,
host=rpc_host,
port=rpc_port,
timeout=30,
repeat=1,
min_repeat_ms=200,
enable_cpu_cache_flush=True,
),
measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
)
tuner.tune(tune_option)
# Compile with the history best
print("Compile...")
with auto_scheduler.ApplyHistoryBest(log_file):
with tvm.transform.PassContext(
opt_level=3, config={"relay.backend.use_auto_scheduler": True}
):
lib = relay.build(mod, target=target, params=params)
# Export library
tmp = tempdir()
if use_ndk:
from tvm.contrib import ndk
filename = "net.so"
lib.export_library(tmp.relpath(filename), ndk.create_shared)
else:
filename = "net.tar"
lib.export_library(tmp.relpath(filename))
# Upload module to device
print("Upload...")
remote = auto_scheduler.utils.request_remote(device_key, rpc_host, rpc_port, timeout=10000)
remote.upload(tmp.relpath(filename))
rlib = remote.load_module(filename)
# Create graph executor
dev = remote.cpu()
module = graph_executor.GraphModule(rlib["default"](dev))
data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype))
module.set_input("data", data_tvm)
# Evaluate
print("Evaluate inference time cost...")
print(module.benchmark(dev, repeat=3, min_repeat_ms=500))
# We do not run the tuning in our webpage server since the server doesn't have a Raspberry Pi,
# or device tracker running.
# Uncomment the following line to run it by yourself.
# tune_and_evaluate()
######################################################################
# .. note:: Explaining the printed information during tuning
#
# During the tuning, a lot of information will be printed on the console.
# They are used for debugging purposes. The most important info is the output
# of the task scheduler. The following table is a sample output.
#
# .. code-block:: c
#
# ----------------------------------------------------------------------
# ------------------------------ [ Task Scheduler ]
# ----------------------------------------------------------------------
# | ID | Latency (ms) | Speed (GFLOPS) | Trials |
# -------------------------------------------------
# | 0 | 0.013 | 0.31 | 64 |
# | 1 | 0.845 | 2.43 | 448 |
# | 2 | 0.046 | -0.00 | 64 |
# | 3 | 4.194 | 24.53 | 2112 |
# | 4 | 0.109 | 9.21 | 64 |
# | 5 | 1.759 | 29.27 | 896 |
# | 6 | 0.083 | 6.01 | 64 |
# | 7 | 3.084 | 33.38 | 7680 |
# | 8 | 0.136 | 14.78 | 384 |
# | 9 | 1.349 | 38.23 | 768 |
# | 10 | 0.133 | 7.55 | 128 |
# | 11 | 2.747 | 37.56 | 1536 |
# | 12 | 0.338 | 11.87 | 192 |
# | 13 | 1.295 | 40.00 | 704 |
# | 14 | 0.482 | 4.16 | 256 |
# | 15 | 2.686 | 38.56 | 1344 |
# | 16 | 0.884 | 9.08 | 448 |
# | 17 | 1.332 | 39.18 | 704 |
# | 18 | 1.045 | 3.84 | 576 |
# | 19 | 1.391 | 38.09 | 704 |
# | 20 | 0.777 | 10.34 | 448 |
# | 21 | 0.739 | 30.97 | 448 |
# -------------------------------------------------
# Estimated total latency: 38.347 ms Trials: 19992 Used time : 19260 s Next ID: 3
#
# This table lists the latency and (estimated) speed of all tasks.
# It also lists the allocation of measurement trials for all tasks.
# The last line prints the total weighted latency of these tasks,
# which can be a rough estimation of the end-to-end execution time
# of the network.
# The last line also prints the total number of measurement trials,
# total time spent on auto-tuning and the id of the next task to tune.
#
# There will also be some "dmlc::Error"s errors, because the
# auto-scheduler will try some invalid schedules.
# You can safely ignore them if the tuning can continue, because these
# errors are isolated from the main process.
#
######################################################################
# .. note:: Terminate the tuning earlier
#
# You can terminate the tuning earlier by forcibly killing this process.
# As long as you get at least one valid schedule for each task in the log file,
# you should be able to do the compilation (the secion below).
#
#################################################################
# Other Tips
# ----------
# 1. During the tuning, the auto-scheduler needs to compile many programs and
# extract feature from them. This part is CPU-intensive,
# so a high-performance CPU with many cores is recommended for faster search.
# 2. You can use :code:`python3 -m tvm.auto_scheduler.measure_record --mode distill -i log.json`
# to distill the large log file and only save the best useful records.
# 3. You can resume a search from the previous log file. You just need to
# add a new argument :code:`load_log_file` when creating the task scheduler
# in function :code:`run_tuning`. Say,
# :code:`tuner = auto_scheduler.TaskScheduler(tasks, task_weights, load_log_file=log_file)`
# 4. If you have multiple target CPUs, you can use all of them for measurements to
# parallelize the measurements. Check this :ref:`section <tutorials-autotvm-scale-up-rpc-tracker>`
# to learn how to use the RPC Tracker and RPC Server.
# To use the RPC Tracker in auto-scheduler, replace the runner in :code:`TuningOptions`
# with :any:`auto_scheduler.RPCRunner`.
| 18,708 | 41.424036 | 101 | py |
tvm | tvm-main/gallery/how_to/tune_with_autoscheduler/tune_network_mali.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Auto-scheduling a Neural Network for mali GPU
=============================================
**Author**: `Zhao Wu <https://github.com/FrozenGene>`_
Auto-tuning for specific devices and workloads is critical for getting the
best performance. This is a tutorial on how to tune a whole neural
network for mali GPU with the auto-scheduler.
To auto-tune a neural network, we partition the network into small subgraphs and
tune them independently. Each subgraph is treated as one search task.
A task scheduler slices the time and dynamically allocates time resources to
these tasks. The task scheduler predicts the impact of each task on the end-to-end
execution time and prioritizes the one that can reduce the execution time the most.
For each subgraph, we use the compute declaration in :code:`tvm/python/topi` to
get the computational DAG in the tensor expression form.
We then use the auto-scheduler to construct a search space of this DAG and search
for good schedules (low-level optimizations).
Different from the template-based :ref:`autotvm <tutorials-autotvm-sec>` which relies on
manual templates to define the search space, the auto-scheduler does not require any
schedule templates. In other words, the auto-scheduler only uses the compute declarations
in :code:`tvm/python/topi` and does not use existing schedule templates.
Note that this tutorial will not run on Windows or recent versions of macOS. To
get it to run, you will need to wrap the body of this tutorial in a :code:`if
__name__ == "__main__":` block.
"""
import numpy as np
import tvm
from tvm import relay, auto_scheduler
import tvm.relay.testing
from tvm.contrib import graph_executor
import os
#################################################################
# Define a Network
# ----------------
# First, we need to define the network with relay frontend API.
# We can load some pre-defined network from :code:`tvm.relay.testing`.
# We can also load models from MXNet, ONNX, PyTorch, and TensorFlow
# (see :ref:`front end tutorials<tutorial-frontend>`).
#
# For convolutional neural networks, although auto-scheduler can work correctly
# with any layout, we found the best performance is typically achieved with NHWC layout.
# We also implemented more optimizations for NHWC layout with the auto-scheduler.
# So it is recommended to convert your models to NHWC layout to use the auto-scheduler.
# You can use :ref:`ConvertLayout <convert-layout-usage>` pass to do the layout conversion in TVM.
def get_network(name, batch_size, layout="NHWC", dtype="float32"):
"""Get the symbol definition and random weight of a network"""
# auto-scheduler prefers NHWC layout
if layout == "NHWC":
image_shape = (224, 224, 3)
elif layout == "NCHW":
image_shape = (3, 224, 224)
else:
raise ValueError("Invalid layout: " + layout)
input_shape = (batch_size,) + image_shape
output_shape = (batch_size, 1000)
if name.startswith("resnet-"):
n_layer = int(name.split("-")[1])
mod, params = relay.testing.resnet.get_workload(
num_layers=n_layer,
batch_size=batch_size,
layout=layout,
dtype=dtype,
image_shape=image_shape,
)
elif name.startswith("resnet3d-"):
n_layer = int(name.split("-")[1])
mod, params = relay.testing.resnet.get_workload(
num_layers=n_layer,
batch_size=batch_size,
layout=layout,
dtype=dtype,
image_shape=image_shape,
)
elif name == "mobilenet":
mod, params = relay.testing.mobilenet.get_workload(
batch_size=batch_size, layout=layout, dtype=dtype, image_shape=image_shape
)
elif name == "squeezenet_v1.1":
assert layout == "NCHW", "squeezenet_v1.1 only supports NCHW layout"
mod, params = relay.testing.squeezenet.get_workload(
version="1.1",
batch_size=batch_size,
dtype=dtype,
image_shape=image_shape,
)
elif name == "inception_v3":
input_shape = (batch_size, 3, 299, 299) if layout == "NCHW" else (batch_size, 299, 299, 3)
mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
elif name == "mxnet":
# an example for mxnet model
from mxnet.gluon.model_zoo.vision import get_model
assert layout == "NCHW"
block = get_model("resnet50_v1", pretrained=True)
mod, params = relay.frontend.from_mxnet(block, shape={"data": input_shape}, dtype=dtype)
net = mod["main"]
net = relay.Function(
net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs
)
mod = tvm.IRModule.from_expr(net)
return mod, params, input_shape, output_shape
# Define the neural network and compilation target.
network = "mobilenet"
batch_size = 1
layout = "NHWC"
# Set this to True if you use ndk tools for cross compiling
use_ndk = True
# Path to cross compiler
os.environ["TVM_NDK_CC"] = "/usr/bin/aarch64-linux-gnu-g++"
target = tvm.target.Target("opencl -device=mali", host="llvm -mtriple=aarch64-linux-gnu")
dtype = "float32"
log_file = "%s-%s-B%d-%s.json" % (network, layout, batch_size, target.kind.name)
#################################################################
# Start an RPC Tracker and Register Devices to the Tracker
# --------------------------------------------------------
# Please refer to the "Start RPC Tracker" and "Register Devices to RPC Tracker" setions
# in this :ref:`tutorial <tutorials-autotvm-start-rpc-tracker>` to start an RPC tracker
# and register devices to the tracker.
# Replace this with the device key in your tracker
device_key = "rk3399"
#################################################################
# Extract Search Tasks
# --------------------
# Next, we extract the search tasks and their weights from a network.
# The weight of a task is the number of appearances of the task's subgraph
# in the whole network.
# By using the weight, we can approximate the end-to-end latency of the network
# as :code:`sum(latency[t] * weight[t])`, where :code:`latency[t]` is the
# latency of a task and :code:`weight[t]` is the weight of the task.
# The task scheduler will just optimize this objective.
# Extract tasks from the network
print("Extract tasks...")
mod, params, input_shape, output_shape = get_network(network, batch_size, layout, dtype=dtype)
tasks, task_weights = auto_scheduler.extract_tasks(mod["main"], params, target)
for idx, task in enumerate(tasks):
print("========== Task %d (workload key: %s) ==========" % (idx, task.workload_key))
print(task.compute_dag)
######################################################################
# .. note:: How to get the hardware parameters from remote device
#
# .. code-block:: python
#
# from tvm.auto_scheduler.utils import request_remote
# remote = request_remote(device_key, "127.0.0.1", 9190)
# dev = remote.cl()
# max_shared_memory_per_block = dev.max_shared_memory_per_block
# # There is no explicit local memory limition
# # so we can use INT32_MAX to disable the check on local_memory.
# max_local_memory_per_block = 2147483647 # INT32_MAX
# max_threads_per_block = dev.max_threads_per_block
# max_vthread_extent = int(dev.warp_size / 4) if int(dev.warp_size / 4) > 1 else dev.warp_size
# warp_size = dev.warp_size
# hardware_params = auto_scheduler.HardwareParams(-1, 16, 64,
# max_shared_memory_per_block, max_local_memory_per_block,
# max_threads_per_block, max_vthread_extent, warp_size)
#
# Now you could pass it to search task and tune
#
# .. code-block:: python
#
# tasks, task_weights = auto_scheduler.extract_tasks(
# mod["main"], params, target, hardware_params = hardware_params
# )
#
#################################################################
# Tuning and Evaluate
# -------------------
# Now, we set some options for tuning, launch the search tasks and evaluate the end-to-end performance
#
# * :code:`num_measure_trials` is the number of measurement trials we can use during the tuning.
# You can set it to a small number (e.g., 200) for a fast demonstrative run.
# In practice, we recommend setting it around :code:`800 * len(tasks)`,
# which is typically enough for the search to converge.
# For example, there are 29 tasks in resnet-50, so we can set it as 20000.
# You can adjust this parameter according to your time budget.
# * In addition, we use :code:`RecordToFile` to dump measurement records into a log file,
# The measurement records can be used to query the history best, resume the search,
# and do more analyses later.
# * see :any:`auto_scheduler.TuningOptions`,
# :any:`auto_scheduler.LocalRunner` for more parameters.
#
def tune_and_evaluate():
print("Begin tuning...")
tuner = auto_scheduler.TaskScheduler(tasks, task_weights)
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=200, # change this to 20000 to achieve the best performance
builder=auto_scheduler.LocalBuilder(build_func="ndk" if use_ndk else "default"),
runner=auto_scheduler.RPCRunner(
device_key, host="127.0.0.1", port=9190, repeat=3, timeout=50
),
measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
)
tuner.tune(tune_option)
# Compile the whole network
print("Compile...")
with auto_scheduler.ApplyHistoryBest(log_file):
with tvm.transform.PassContext(
opt_level=3, config={"relay.backend.use_auto_scheduler": True}
):
lib = relay.build(mod, target, params=params)
# Create graph executor
print("=============== Request Remote ===============")
from tvm.auto_scheduler.utils import request_remote
remote = request_remote(device_key, "127.0.0.1", 9190)
dev = remote.cl()
from tvm.contrib import utils, ndk
temp = utils.tempdir()
filename = "deploy_lib.so"
path_lib = temp.relpath(filename)
lib.export_library(path_lib, ndk.create_shared)
remote.upload(path_lib)
loaded_lib = remote.load_module(filename)
module = graph_executor.GraphModule(loaded_lib["default"](dev))
data = (np.random.uniform(size=input_shape)).astype(dtype)
data_tvm = tvm.nd.array(data)
module.set_input("data", data_tvm)
# Evaluate
print("Evaluate inference time cost...")
print(module.benchmark(dev, repeat=3, min_repeat_ms=500))
# We do not run the tuning in our webpage server since server doesn't have mali gpu.
# Uncomment the following line to run it by yourself.
# tune_and_evaluate()
######################################################################
# .. note:: Explain the printed information during tuning
#
# During the tuning, a lot of information will be printed on the console.
# They are used for debugging purposes. The most important info is the output
# of the task scheduler. The following table is a sample output.
#
# .. code-block:: c
#
# ----------------------------------------------------------------------
# ------------------------------ [ Task Scheduler ]
# ----------------------------------------------------------------------
# | ID | Latency (ms) | Speed (GFLOPS) | Trials |
# -------------------------------------------------
# | 0 | 0.010 | 0.40 | 64 |
# | 1 | 0.087 | 47.19 | 64 |
# | 2 | 0.008 | -0.00 | 64 |
# | 3 | 0.177 | 582.07 | 64 |
# | 4 | 0.268 | 862.37 | 256 |
# | 5 | 0.166 | 621.13 | 128 |
# | 6 | 0.170 | 605.10 | 128 |
# | 7 | 0.128 | 403.20 | 64 |
# | 8 | 0.189 | 545.71 | 64 |
# | 9 | 0.231 | 1001.01 | 448 |
# | 10 | 0.155 | 664.80 | 256 |
# | 11 | 0.155 | 662.86 | 256 |
# | 12 | 0.119 | 434.08 | 64 |
# | 13 | 0.199 | 522.13 | 64 |
# | 14 | 0.235 | 986.56 | 320 |
# | 15 | 0.149 | 689.13 | 128 |
# | 16 | 0.155 | 664.80 | 192 |
# | 17 | 0.151 | 340.64 | 64 |
# | 18 | 0.176 | 597.55 | 128 |
# | 19 | 0.220 | 1054.37 | 192 |
# | 20 | 0.150 | 686.01 | 128 |
# | 21 | 0.159 | 650.88 | 128 |
# | 22 | 0.073 | 358.19 | 64 |
# | 23 | 0.031 | 70.63 | 64 |
# | 24 | 0.251 | 947.73 | 128 |
# | 25 | 0.157 | 652.47 | 128 |
# | 26 | 0.215 | 954.84 | 128 |
# | 27 | 0.237 | 868.92 | 128 |
# | 28 | 0.266 | 774.06 | 128 |
# -------------------------------------------------
# Estimated total latency: 10.016 ms Trials: 3992 Used time : 1131 s Next ID: 15
#
# This table lists the latency and (estimated) speed of all tasks.
# It also lists the allocation of measurement trials for all tasks.
# The last line prints the total weighted latency of these tasks,
# which can be a rough estimation of the end-to-end execution time
# of the network.
# The last line also prints the total number of measurement trials,
# total time spent on auto-tuning and the id of the next task to tune.
#
# There will also be some "tvm::Error"s errors, because the
# auto-scheduler will try some invalid schedules.
# You can safely ignore them if the tuning can continue, because these
# errors are isolated from the main process.
#
######################################################################
# .. note:: Terminate the tuning earlier
#
# You can terminate the tuning earlier by forcibly killing this process.
# As long as you get at least one valid schedule for each task in the log file,
# you should be able to do the compilation (the secion below).
#
#################################################################
# Other Tips
# ----------
# 1. During the tuning, the auto-scheduler needs to compile many programs and
# extract feature from them. This part is CPU-intensive,
# so a high-performance CPU with many cores is recommended for faster search.
# 2. You can use :code:`python3 -m tvm.auto_scheduler.measure_record --mode distill -i log.json`
# to distill the large log file and only save the best useful records.
# 3. You can resume a search from the previous log file. You just need to
# add a new argument :code:`load_log_file` when creating the task scheduler
# in function :code:`run_tuning`. Say,
# :code:`tuner = auto_scheduler.TaskScheduler(tasks, task_weights, load_log_file=log_file)`
# 4. If you have multiple target GPUs, you can use all of them for measurements to
# parallelize the measurements. Check this :ref:`section <tutorials-autotvm-scale-up-rpc-tracker>`
# to learn how to use the RPC Tracker and RPC Server.
# To use the RPC Tracker in auto-scheduler, replace the runner in :code:`TuningOptions`
# with :any:`auto_scheduler.RPCRunner`.
| 16,209 | 43.903047 | 110 | py |
tvm | tvm-main/gallery/how_to/work_with_relay/build_gcn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Building a Graph Convolutional Network
======================================
**Author**: `Yulun Yao <https://yulunyao.io/>`_, \
`Chien-Yu Lin <https://homes.cs.washington.edu/~cyulin/>`_
This article is an introductory tutorial to build a Graph Convolutional Network (GCN) with Relay.
In this tutorial, we will run our GCN on Cora dataset to demonstrate.
Cora dataset is a common benchmark for Graph Neural Networks (GNN) and frameworks that support GNN training and inference.
We directly load the dataset from DGL library to do the apples to apples comparison against DGL.
.. code-block:: bash
%%shell
pip install torch==2.0.0
pip install dgl==v1.0.0
Please refer to DGL doc for installation at
https://docs.dgl.ai/install/index.html.
Please refer to PyTorch guide for PyTorch installation at
https://pytorch.org/get-started/locally/.
"""
######################################################################
# Define GCN in DGL with PyTorch backend
# --------------------------------------
#
# DGL example: https://github.com/dmlc/dgl/tree/master/examples/pytorch/gcn
# This part reuses the code from the above example.
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
import networkx as nx
from dgl.nn.pytorch import GraphConv
class GCN(nn.Module):
def __init__(self, g, n_infeat, n_hidden, n_classes, n_layers, activation):
super(GCN, self).__init__()
self.g = g
self.layers = nn.ModuleList()
self.layers.append(GraphConv(n_infeat, n_hidden, activation=activation))
for i in range(n_layers - 1):
self.layers.append(GraphConv(n_hidden, n_hidden, activation=activation))
self.layers.append(GraphConv(n_hidden, n_classes))
def forward(self, features):
h = features
for i, layer in enumerate(self.layers):
# handle api changes for differnt DGL version
if dgl.__version__ > "0.3":
h = layer(self.g, h)
else:
h = layer(h, self.g)
return h
######################################################################
# Define the functions to load dataset and evaluate accuracy
# ----------------------------------------------------------
# You may substitute this part with your own dataset, here we load data from DGL
from dgl.data import load_data
from collections import namedtuple
def evaluate(g, logits):
label = g.ndata["label"]
test_mask = g.ndata["test_mask"]
pred = logits.argmax(axis=1)
acc = (torch.Tensor(pred[test_mask]) == label[test_mask]).float().mean()
return acc
######################################################################
# Load the data and set up model parameters
# -----------------------------------------
"""
Parameters
----------
num_layer: int
number of hidden layers
num_hidden: int
number of the hidden units in the hidden layer
infeat_dim: int
dimension of the input features
num_classes: int
dimension of model output (Number of classes)
"""
dataset = dgl.data.CoraGraphDataset()
dgl_g = dataset[0]
num_layers = 1
num_hidden = 16
features = dgl_g.ndata["feat"]
infeat_dim = features.shape[1]
num_classes = dataset.num_classes
######################################################################
# Set up the DGL-PyTorch model and get the golden results
# -------------------------------------------------------
#
# The weights are trained with https://github.com/dmlc/dgl/blob/master/examples/pytorch/gcn/train.py
from tvm.contrib.download import download_testdata
features = torch.FloatTensor(features)
torch_model = GCN(dgl_g, infeat_dim, num_hidden, num_classes, num_layers, F.relu)
# Download the pretrained weights
model_url = "https://homes.cs.washington.edu/~cyulin/media/gnn_model/gcn_cora.torch"
model_path = download_testdata(model_url, "gcn_cora.pickle", module="gcn_model")
# Load the weights into the model
torch_model.load_state_dict(torch.load(model_path))
######################################################################
# Run the DGL model and test for accuracy
# ---------------------------------------
torch_model.eval()
with torch.no_grad():
logits_torch = torch_model(features)
print("Print the first five outputs from DGL-PyTorch execution\n", logits_torch[:5])
acc = evaluate(dgl_g, logits_torch.numpy())
print("Test accuracy of DGL results: {:.2%}".format(acc))
######################################################################
# Define Graph Convolution Layer in Relay
# ---------------------------------------
# To run GCN on TVM, we first need to implement Graph Convolution Layer.
# You may refer to https://github.com/dmlc/dgl/blob/master/python/dgl/nn/mxnet/conv/graphconv.py for a GraphConv Layer implemented in DGL with MXNet Backend
#
# The layer is defined with below operations, note that we apply two transposes to keep adjacency matrix on right hand side of sparse_dense operator,
# this method is temporary and will be updated in next few weeks when we have sparse matrix transpose and support for left sparse operator.
#
# .. math::
#
# \mbox{GraphConv}(A, H, W) = A * H * W
# = ((H * W)^t * A^t)^t
# = ((W^t * H^t) * A^t)^t
from tvm import relay
from tvm.contrib import graph_executor
import tvm
from tvm import te
def GraphConv(layer_name, input_dim, output_dim, adj, input, norm=None, bias=True, activation=None):
"""
Parameters
----------
layer_name: str
Name of layer
input_dim: int
Input dimension per node feature
output_dim: int,
Output dimension per node feature
adj: namedtuple,
Graph representation (Adjacency Matrix) in Sparse Format (`data`, `indices`, `indptr`),
where `data` has shape [num_nonzeros], indices` has shape [num_nonzeros], `indptr` has shape [num_nodes + 1]
input: relay.Expr,
Input feature to current layer with shape [num_nodes, input_dim]
norm: relay.Expr,
Norm passed to this layer to normalize features before and after Convolution.
bias: bool
Set bias to True to add bias when doing GCN layer
activation: <function relay.op.nn>,
Activation function applies to the output. e.g. relay.nn.{relu, sigmoid, log_softmax, softmax, leaky_relu}
Returns
----------
output: tvm.relay.Expr
The Output Tensor for this layer [num_nodes, output_dim]
"""
if norm is not None:
input = relay.multiply(input, norm)
weight = relay.var(layer_name + ".weight", shape=(input_dim, output_dim))
weight_t = relay.transpose(weight)
dense = relay.nn.dense(weight_t, input)
output = relay.nn.sparse_dense(dense, adj)
output_t = relay.transpose(output)
if norm is not None:
output_t = relay.multiply(output_t, norm)
if bias is True:
_bias = relay.var(layer_name + ".bias", shape=(output_dim, 1))
output_t = relay.nn.bias_add(output_t, _bias, axis=-1)
if activation is not None:
output_t = activation(output_t)
return output_t
######################################################################
# Prepare the parameters needed in the GraphConv layers
# -----------------------------------------------------
#
import numpy as np
import networkx as nx
def prepare_params(g):
params = {}
params["infeats"] = g.ndata["feat"].numpy().astype("float32")
# Generate adjacency matrix
nx_graph = dgl.to_networkx(g)
adjacency = nx.to_scipy_sparse_array(nx_graph)
params["g_data"] = adjacency.data.astype("float32")
params["indices"] = adjacency.indices.astype("int32")
params["indptr"] = adjacency.indptr.astype("int32")
# Normalization w.r.t. node degrees
degs = [g.in_degrees(i) for i in range(g.number_of_nodes())]
params["norm"] = np.power(degs, -0.5).astype("float32")
params["norm"] = params["norm"].reshape((params["norm"].shape[0], 1))
return params
params = prepare_params(dgl_g)
# Check shape of features and the validity of adjacency matrix
assert len(params["infeats"].shape) == 2
assert (
params["g_data"] is not None and params["indices"] is not None and params["indptr"] is not None
)
assert params["infeats"].shape[0] == params["indptr"].shape[0] - 1
######################################################################
# Put layers together
# -------------------
# Define input features, norms, adjacency matrix in Relay
infeats = relay.var("infeats", shape=features.shape)
norm = relay.Constant(tvm.nd.array(params["norm"]))
g_data = relay.Constant(tvm.nd.array(params["g_data"]))
indices = relay.Constant(tvm.nd.array(params["indices"]))
indptr = relay.Constant(tvm.nd.array(params["indptr"]))
Adjacency = namedtuple("Adjacency", ["data", "indices", "indptr"])
adj = Adjacency(g_data, indices, indptr)
# Construct the 2-layer GCN
layers = []
layers.append(
GraphConv(
layer_name="layers.0",
input_dim=infeat_dim,
output_dim=num_hidden,
adj=adj,
input=infeats,
norm=norm,
activation=relay.nn.relu,
)
)
layers.append(
GraphConv(
layer_name="layers.1",
input_dim=num_hidden,
output_dim=num_classes,
adj=adj,
input=layers[-1],
norm=norm,
activation=None,
)
)
# Analyze free variables and generate Relay function
output = layers[-1]
######################################################################
# Compile and run with TVM
# ------------------------
#
# Export the weights from PyTorch model to Python Dict
model_params = {}
for param_tensor in torch_model.state_dict():
model_params[param_tensor] = torch_model.state_dict()[param_tensor].numpy()
for i in range(num_layers + 1):
params["layers.%d.weight" % (i)] = model_params["layers.%d.weight" % (i)]
params["layers.%d.bias" % (i)] = model_params["layers.%d.bias" % (i)]
# Set the TVM build target
target = "llvm" # Currently only support `llvm` as target
func = relay.Function(relay.analysis.free_vars(output), output)
func = relay.build_module.bind_params_by_name(func, params)
mod = tvm.IRModule()
mod["main"] = func
# Build with Relay
with tvm.transform.PassContext(opt_level=0): # Currently only support opt_level=0
lib = relay.build(mod, target, params=params)
# Generate graph executor
dev = tvm.device(target, 0)
m = graph_executor.GraphModule(lib["default"](dev))
######################################################################
# Run the TVM model, test for accuracy and verify with DGL
# --------------------------------------------------------
m.run()
logits_tvm = m.get_output(0).numpy()
print("Print the first five outputs from TVM execution\n", logits_tvm[:5])
acc = evaluate(dgl_g, logits_tvm)
print("Test accuracy of TVM results: {:.2%}".format(acc))
import tvm.testing
# Verify the results with the DGL model
tvm.testing.assert_allclose(logits_torch, logits_tvm, atol=1e-3)
| 11,740 | 33.031884 | 156 | py |
tvm | tvm-main/gallery/how_to/extend_tvm/bring_your_own_datatypes.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Bring Your Own Datatypes to TVM
===============================
**Authors**: `Gus Smith <https://github.com/gussmith23>`_, `Andrew Liu <https://github.com/hypercubestart>`_
In this tutorial, we will show you how to utilize the Bring Your Own Datatypes framework to use your own custom datatypes in TVM.
Note that the Bring Your Own Datatypes framework currently only handles **software emulated versions of datatypes**.
The framework does not support compiling for custom accelerator datatypes out-of-the-box.
Datatype Libraries
------------------
The Bring Your Own Datatypes allows users to register their own datatype implementations alongside TVM's native datatypes (such as ``float``).
In the wild, these datatype implementations often appear as libraries.
For example:
- `libposit <https://github.com/cjdelisle/libposit>`_, a posit library
- `Stillwater Universal <https://github.com/stillwater-sc/universal>`_, a library with posits, fixed-point numbers, and other types
- `SoftFloat <https://github.com/ucb-bar/berkeley-softfloat-3>`_, Berkeley's software implementation of IEEE 754 floating-point
The Bring Your Own Datatypes enables users to plug these datatype implementations into TVM!
In this section, we will use an example library we have already implemented, located at ``3rdparty/byodt/myfloat.cc``.
This datatype, which we dubbed "myfloat", is really just a IEE-754 float under-the-hood, but it serves a useful example
to show that any datatype can be used in the BYODT framework.
Setup
-----
Since we do not use any 3rdparty library, there is no setup needed.
If you would like to try this with your own datatype library, first bring the library's functions into the process space with ``CDLL``:
.. code-block:: python
ctypes.CDLL('my-datatype-lib.so', ctypes.RTLD_GLOBAL)
"""
######################
# A Simple TVM Program
# --------------------
#
# We'll begin by writing a simple program in TVM; afterwards, we will re-write it to use custom datatypes.
import tvm
from tvm import relay
# Our basic program: Z = X + Y
x = relay.var("x", shape=(3,), dtype="float32")
y = relay.var("y", shape=(3,), dtype="float32")
z = x + y
program = relay.Function([x, y], z)
module = tvm.IRModule.from_expr(program)
######################################################################
# Now, we create random inputs to feed into this program using numpy:
import numpy as np
np.random.seed(23) # for reproducibility
x_input = np.random.rand(3).astype("float32")
y_input = np.random.rand(3).astype("float32")
print("x: {}".format(x_input))
print("y: {}".format(y_input))
######################################################################
# Finally, we're ready to run the program:
z_output = relay.create_executor(mod=module).evaluate()(x_input, y_input)
print("z: {}".format(z_output))
######################################################################
# Adding Custom Datatypes
# -----------------------
# Now, we will do the same, but we will use a custom datatype for our intermediate computation.
#
# We use the same input variables ``x`` and ``y`` as above, but before adding ``x + y``, we first cast both ``x`` and ``y`` to a custom datatype via the ``relay.cast(...)`` call.
#
# Note how we specify the custom datatype: we indicate it using the special ``custom[...]`` syntax.
# Additionally, note the "32" after the datatype: this is the bitwidth of the custom datatype. This tells TVM that each instance of ``myfloat`` is 32 bits wide.
try:
with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
x_myfloat = relay.cast(x, dtype="custom[myfloat]32")
y_myfloat = relay.cast(y, dtype="custom[myfloat]32")
z_myfloat = x_myfloat + y_myfloat
z = relay.cast(z_myfloat, dtype="float32")
except tvm.TVMError as e:
# Print last line of error
print(str(e).split("\n")[-1])
######################################################################
# Trying to generate this program throws an error from TVM.
# TVM does not know how to handle any custom datatype out of the box!
# We first have to register the custom type with TVM, giving it a name and a type code:
tvm.target.datatype.register("myfloat", 150)
######################################################################
# Note that the type code, 150, is currently chosen manually by the user.
# See ``TVMTypeCode::kCustomBegin`` in `include/tvm/runtime/c_runtime_api.h <https://github.com/apache/tvm/blob/main/include/tvm/runtime/data_type.h>`_.
# Now we can generate our program again:
x_myfloat = relay.cast(x, dtype="custom[myfloat]32")
y_myfloat = relay.cast(y, dtype="custom[myfloat]32")
z_myfloat = x_myfloat + y_myfloat
z = relay.cast(z_myfloat, dtype="float32")
program = relay.Function([x, y], z)
module = tvm.IRModule.from_expr(program)
module = relay.transform.InferType()(module)
######################################################################
# Now we have a Relay program that uses myfloat!
print(program)
######################################################################
# Now that we can express our program without errors, let's try running it!
try:
with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
z_output_myfloat = relay.create_executor("graph", mod=module).evaluate()(x_input, y_input)
print("z: {}".format(y_myfloat))
except tvm.TVMError as e:
# Print last line of error
print(str(e).split("\n")[-1])
######################################################################
# Now, trying to compile this program throws an error.
# Let's dissect this error.
#
# The error is occurring during the process of lowering the custom datatype code to code that TVM can compile and run.
# TVM is telling us that it cannot find a *lowering function* for the ``Cast`` operation, when casting from source type 2 (``float``, in TVM), to destination type 150 (our custom datatype).
# When lowering custom datatypes, if TVM encounters an operation over a custom datatype, it looks for a user-registered *lowering function*, which tells it how to lower the operation to an operation over datatypes it understands.
# We have not told TVM how to lower ``Cast`` operations for our custom datatypes; thus, the source of this error.
#
# To fix this error, we simply need to specify a lowering function:
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func(
{
(32, 32): "FloatToCustom32", # cast from float32 to myfloat32
}
),
"Cast",
"llvm",
"float",
"myfloat",
)
######################################################################
# The ``register_op(...)`` call takes a lowering function, and a number of parameters which specify exactly the operation which should be lowered with the provided lowering function.
# In this case, the arguments we pass specify that this lowering function is for lowering a ``Cast`` from ``float`` to ``myfloat`` for target ``"llvm"``.
#
# The lowering function passed into this call is very general: it should take an operation of the specified type (in this case, `Cast`) and return another operation which only uses datatypes which TVM understands.
#
# In the general case, we expect users to implement operations over their custom datatypes using calls to an external library.
# In our example, our ``myfloat`` library implements a ``Cast`` from ``float`` to 32-bit ``myfloat`` in the function ``FloatToCustom32``.
# To provide for the general case, we have made a helper function, ``create_lower_func(...)``,
# which does just this: given a dictionary, it replaces the given operation with a ``Call`` to the appropriate function name provided based on the op and the bit widths.
# It additionally removes usages of the custom datatype by storing the custom datatype in an opaque ``uint`` of the appropriate width; in our case, a ``uint32_t``.
# For more information, see `the source code <https://github.com/apache/tvm/blob/main/python/tvm/target/datatype.py>`_.
# We can now re-try running the program:
try:
with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
z_output_myfloat = relay.create_executor("graph", mod=module).evaluate()(x_input, y_input)
print("z: {}".format(z_output_myfloat))
except tvm.TVMError as e:
# Print last line of error
print(str(e).split("\n")[-1])
######################################################################
# This new error tells us that the ``Add`` lowering function is not found, which is good news, as it's no longer complaining about the ``Cast``!
# We know what to do from here: we just need to register the lowering functions for the other operations in our program.
#
# Note that for ``Add``, ``create_lower_func`` takes in a dict where the key is an integer.
# For ``Cast`` operations, we require a 2-tuple to specify the ``src_bit_length`` and the ``dest_bit_length``,
# while for all other operations, the bit length is the same between the operands so we only require one integer to specify ``bit_length``.
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func({32: "Custom32Add"}),
"Add",
"llvm",
"myfloat",
)
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func({(32, 32): "Custom32ToFloat"}),
"Cast",
"llvm",
"myfloat",
"float",
)
# Now, we can run our program without errors.
with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
z_output_myfloat = relay.create_executor(mod=module).evaluate()(x_input, y_input)
print("z: {}".format(z_output_myfloat))
print("x:\t\t{}".format(x_input))
print("y:\t\t{}".format(y_input))
print("z (float32):\t{}".format(z_output))
print("z (myfloat32):\t{}".format(z_output_myfloat))
# Perhaps as expected, the ``myfloat32`` results and ``float32`` are exactly the same!
######################################################################
# Running Models With Custom Datatypes
# ------------------------------------
#
# We will first choose the model which we would like to run with myfloat.
# In this case we use `Mobilenet <https://arxiv.org/abs/1704.04861>`_.
# We choose Mobilenet due to its small size.
# In this alpha state of the Bring Your Own Datatypes framework, we have not implemented any software optimizations for running software emulations of custom datatypes; the result is poor performance due to many calls into our datatype emulation library.
#
# First let us define two helper functions to get the mobilenet model and a cat image.
def get_mobilenet():
dshape = (1, 3, 224, 224)
from mxnet.gluon.model_zoo.vision import get_model
block = get_model("mobilenet0.25", pretrained=True)
shape_dict = {"data": dshape}
return relay.frontend.from_mxnet(block, shape_dict)
def get_cat_image():
from tvm.contrib.download import download_testdata
from PIL import Image
url = "https://gist.githubusercontent.com/zhreshold/bcda4716699ac97ea44f791c24310193/raw/fa7ef0e9c9a5daea686d6473a62aacd1a5885849/cat.png"
dst = "cat.png"
real_dst = download_testdata(url, dst, module="data")
img = Image.open(real_dst).resize((224, 224))
# CoreML's standard model image format is BGR
img_bgr = np.array(img)[:, :, ::-1]
img = np.transpose(img_bgr, (2, 0, 1))[np.newaxis, :]
return np.asarray(img, dtype="float32")
module, params = get_mobilenet()
######################################################################
# It's easy to execute MobileNet with native TVM:
ex = tvm.relay.create_executor("graph", mod=module, params=params)
input = get_cat_image()
result = ex.evaluate()(input).numpy()
# print first 10 elements
print(result.flatten()[:10])
######################################################################
# Now, we would like to change the model to use myfloat internally. To do so, we need to convert the network. To do this, we first define a function which will help us convert tensors:
def convert_ndarray(dst_dtype, array):
"""Converts an NDArray into the specified datatype"""
x = relay.var("x", shape=array.shape, dtype=str(array.dtype))
cast = relay.Function([x], x.astype(dst_dtype))
with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
return relay.create_executor("graph").evaluate(cast)(array)
######################################################################
# Now, to actually convert the entire network, we have written `a pass in Relay <https://github.com/gussmith23/tvm/blob/ea174c01c54a2529e19ca71e125f5884e728da6e/python/tvm/relay/frontend/change_datatype.py#L21>`_ which simply converts all nodes within the model to use the new datatype.
from tvm.relay.frontend.change_datatype import ChangeDatatype
src_dtype = "float32"
dst_dtype = "custom[myfloat]32"
module = relay.transform.InferType()(module)
# Currently, custom datatypes only work if you run simplify_inference beforehand
module = tvm.relay.transform.SimplifyInference()(module)
# Run type inference before changing datatype
module = tvm.relay.transform.InferType()(module)
# Change datatype from float to myfloat and re-infer types
cdtype = ChangeDatatype(src_dtype, dst_dtype)
expr = cdtype.visit(module["main"])
module = tvm.relay.transform.InferType()(module)
# We also convert the parameters:
params = {k: convert_ndarray(dst_dtype, v) for k, v in params.items()}
# We also need to convert our input:
input = convert_ndarray(dst_dtype, input)
# Finally, we can try to run the converted model:
try:
# Vectorization is not implemented with custom datatypes.
with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
result_myfloat = tvm.relay.create_executor("graph", mod=module).evaluate(expr)(
input, **params
)
except tvm.TVMError as e:
print(str(e).split("\n")[-1])
######################################################################
# When we attempt to run the model, we get a familiar error telling us that more functions need to be registered for myfloat.
#
# Because this is a neural network, many more operations are required.
# Here, we register all the needed functions:
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func({32: "FloatToCustom32"}),
"FloatImm",
"llvm",
"myfloat",
)
tvm.target.datatype.register_op(
tvm.target.datatype.lower_ite, "Call", "llvm", "myfloat", intrinsic_name="tir.if_then_else"
)
tvm.target.datatype.register_op(
tvm.target.datatype.lower_call_pure_extern,
"Call",
"llvm",
"myfloat",
intrinsic_name="tir.call_pure_extern",
)
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func({32: "Custom32Mul"}),
"Mul",
"llvm",
"myfloat",
)
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func({32: "Custom32Div"}),
"Div",
"llvm",
"myfloat",
)
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func({32: "Custom32Sqrt"}),
"Call",
"llvm",
"myfloat",
intrinsic_name="tir.sqrt",
)
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func({32: "Custom32Sub"}),
"Sub",
"llvm",
"myfloat",
)
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func({32: "Custom32Exp"}),
"Call",
"llvm",
"myfloat",
intrinsic_name="tir.exp",
)
tvm.target.datatype.register_op(
tvm.target.datatype.create_lower_func({32: "Custom32Max"}),
"Max",
"llvm",
"myfloat",
)
tvm.target.datatype.register_min_func(
tvm.target.datatype.create_min_lower_func({32: "MinCustom32"}, "myfloat"),
"myfloat",
)
######################################################################
# Note we are making use of two new functions: ``register_min_func`` and ``create_min_lower_func``.
#
# ``register_min_func`` takes in an integer ``num_bits`` for the bit length, and should return an operation
# representing the minimum finite representable value for the custom data type with the specified bit length.
#
# Similar to ``register_op`` and ``create_lower_func``, the ``create_min_lower_func`` handles the general case
# where the minimum representable custom datatype value is implemented using calls to an external library.
#
# Now we can finally run the model:
# Vectorization is not implemented with custom datatypes.
with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
result_myfloat = relay.create_executor(mod=module).evaluate(expr)(input, **params)
result_myfloat = convert_ndarray(src_dtype, result_myfloat).numpy()
# print first 10 elements
print(result_myfloat.flatten()[:10])
# Again, note that the output using 32-bit myfloat exactly the same as 32-bit floats,
# because myfloat is exactly a float!
np.testing.assert_array_equal(result, result_myfloat)
| 17,598 | 41.92439 | 286 | py |
tvm | tvm-main/gallery/how_to/extend_tvm/use_pass_infra.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=line-too-long
"""
.. _tutorial-use-pass-infra:
How to Use TVM Pass Infra
=========================
**Author**: `Zhi Chen <https://github.com/zhiics>`_
As the number of optimization passes increases in Relay/tir, it becomes intractable to
execute them and maintain their dependencies manually. Therefore, we have
introduced an infrastructure to manage the optimization passes and make it
applicable to different layers of the IR in the TVM stack.
The optimizations of a Relay/tir program could be applied at various granularity,
namely function-level and module-level using :py:class:`tvm.relay.transform.FunctionPass`/
:py:class:`tvm.tir.transform.PrimFuncPass` and :py:class:`tvm.transform.ModulePass`
respectively. Or users can rely on :py:class:`tvm.transform.Sequential` to apply a sequence of passes
on a Relay/tir program where the dependencies between passes can be resolved by the
pass infra. For more details about each type of these passes, please refer to
the :ref:`pass-infra`
This tutorial mainly demonstrates how developers can use the pass infra to perform
a certain optimization and create an optimization pipeline for a Relay program.
The same approach can be used for tir as well.
"""
import numpy as np
import tvm
from tvm import te
import tvm.relay as relay
###############################################################################
# Create An Example Relay Program
# -------------------------------
# First of all, we create a simple Relay program for the tutorial. This program
# will be used by various optimizations of the examples in this tutorial.
# Similarly, users can write a tir primitive function and apply the tir passes.
def example():
shape = (1, 64, 54, 54)
c_data = np.empty(shape).astype("float32")
c = relay.const(c_data)
weight = relay.var("weight", shape=(64, 64, 3, 3))
x = relay.var("x", relay.TensorType((1, 64, 56, 56), "float32"))
conv = relay.nn.conv2d(x, weight)
y = relay.add(c, c)
y = relay.multiply(y, relay.const(2, "float32"))
y = relay.add(conv, y)
z = relay.add(y, c)
z1 = relay.add(y, c)
z2 = relay.add(z, z1)
return relay.Function([x, weight], z2)
###############################################################################
# Optimize the Program
# --------------------
# Now we would like to optimize the program. Relay features a host of
# optimizations. We will select some of them to apply on this example program.
#
# There are multiple ways to optimize a Relay program. Below we will provide
# examples for each of them.
#
# Manually Apply Optimization Passes
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Let's first create a relay Module which contains one or multiple Relay
# functions for optimization.
f = example()
mod = tvm.IRModule.from_expr(f)
# Now we can apply constant folding on the module.
# fold_const here is a callback that doesn't take any parameters.
fold_const = relay.transform.FoldConstant()
# Then, we can invoke the pass on the given module. Note that the constant
# folding pass works at the function-level. That being said, each function in
# the module will be applied with the optimization. Users don't need to iterate
# through individual functions manually to apply this pass.
mod = fold_const(mod)
# We can see from the updated program that the constants are folded.
print(mod)
###############################################################################
# More optimizations can be applied in the similar manner. For instance, we can
# eliminate the common expressions that used by `z` and `z1`.
mod = relay.transform.EliminateCommonSubexpr()(mod)
print(mod)
###############################################################################
# Some optimizations, such as fusion, are parametric as well. For example,
# opt level 0 will not allow operators to be fused together. Users can pass the
# `fuse_opt_level` to enable this.
mod = relay.transform.FuseOps(fuse_opt_level=0)(mod)
# We can observe that the optimized module contains functions that only have
# a signle primitive op.
print(mod)
###############################################################################
# Use Sequential to Apply a Sequence of Passes
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Applying passes as above is actually tedious and it may require users to have
# better understanding about the dependencies between them. For example, fusion
# currently doesn't work well on let bindings. Therefore, we would not be able
# to fuse operators that were fusable if :py:func:`relay.transform.ToANormalForm` is applied before
# fusion, as this pass generates let bindings for each expression to
# canonicalize a Relay program.
#
# Relay, hence, provides :py:class:`tvm.transform.Sequential` to alleviate developers from handling
# these issues explicitly by specifying the required passes of each pass and
# packing them as a whole to execute. For example, the same passes can now be
# applied using the sequential style as the following. :py:class:`tvm.transform.Sequential` is
# similar to `torch.nn.sequential <https://pytorch.org/docs/stable/nn.html#torch.nn.Sequential>`_
# and `mxnet.gluon.block <https://mxnet.apache.org/api/python/docs/_modules/mxnet/gluon/block.html>`_.
# For example, `torch.nn.sequential` is used to contain a sequence of PyTorch
# `Modules` that will be added to build a network. It focuses on the network
# layers. Instead, the :py:class:`tvm.transform.Sequential` in our pass infra works on the optimizing
# pass.
# Now let's execute some passes through :py:class:`tvm.transform.Sequential`
f = example()
mod = tvm.IRModule.from_expr(f)
# Glob the interested passes.
seq = tvm.transform.Sequential(
[
relay.transform.FoldConstant(),
relay.transform.EliminateCommonSubexpr(),
relay.transform.FuseOps(fuse_opt_level=2),
]
)
mod1 = seq(mod)
print(mod1)
###############################################################################
# From the transformed Relay program, we can see that there are still two
# identical addition operations. This is because ``EliminateCommonSubexpr``
# was not actually performed. The reason is because only the passes that have
# optimization level less or equal to 2 will be executed by default under
# :py:class:`tvm.transform.Sequential`. The pass infra,
# however, provides a configuration interface
# for users to customize the optimization level that they want to execute.
with tvm.transform.PassContext(opt_level=3):
mod2 = seq(mod)
print(mod2)
###############################################################################
# Now we can see that only one of the two identical additions is kept.
#
# In addition, users can selectively disable some passes using the
# `disabled_pass` config, which is similar to the `-fno-xxx` option used the
# general purpose compilers, such as Clang and GCC. For example, we can disable
# EliminateCommonSubexpr as following. The printed module will again show two
# identical addition operations.
with tvm.transform.PassContext(opt_level=3, disabled_pass=["EliminateCommonSubexpr"]):
mod3 = seq(mod)
print(mod3)
##############################################################################
# Implement a Pass Using Python Decorator
# ------------------------------------------
# The next example illustrates how we can orchestrate a customized optimization
# pipeline through the pass infra using Python decorators. This functionality
# greatly eases the implementation of passes. For example, users can simply
# define a decorated class to do function-level optimizations as the following
# example shows. `transform_function` wraps a class to replace all constants
# with a multiple of `c`. Later on, each function in a given module will be
# visited and each constant in the function will be replaced when we invoke the
# customized pass.
@relay.transform.function_pass(opt_level=1)
class CustomPipeline:
"""Simple test function to replace one argument to another."""
def __init__(self, multiplier):
self.multiplier = multiplier
# This function can define a pass.
def transform_function(self, func, mod, ctx):
obj = self
class ReplaceConstant(tvm.relay.ExprMutator):
def visit_constant(self, c):
return relay.multiply(obj.multiplier, c)
return ReplaceConstant().visit(func)
f = example()
mod = tvm.IRModule.from_expr(f)
custom_pass = CustomPipeline(multiplier=relay.const(3, "float32"))
assert custom_pass.info.name == "CustomPipeline"
mod3 = custom_pass(mod)
print(mod3)
##############################################################################
# Debug a Pass
# ------------
# TVM provides users a plug-and-play style debugging pass that print the IR
# after a certain pass is done through a special pass (``PrintIR``) to dump the IR of the
# whole module. A slightly modified version of the sequential pass example
# could be like the following to enable IR dumping for ``FoldConstant`` optimization.
f = example()
mod = tvm.IRModule.from_expr(f)
seq = tvm.transform.Sequential(
[
relay.transform.FoldConstant(),
tvm.transform.PrintIR(),
relay.transform.EliminateCommonSubexpr(),
relay.transform.FuseOps(),
]
)
###############################################################################
# By inserting the ``PrintIR`` pass after ``FoldConstant``, the pass infra will
# dump out the module IR when ``FoldConstant`` is done. Users can plug in this
# pass after any pass they want to debug for viewing the optimization effect.
#
# There is a more flexible debugging mechanism. One can implement a ``PassInstrument``
# class to execute arbitrary code not only before and/or after each pass but also
# at entering/exiting ``PassContext``. See :ref:`pass_instrument_cpp_backend`
# for more details.
#
# Here we use :py::func`tvm.instrument.pass_instrument` decorator to implement
# a PassInsturment class printing IR before execution of each passes:
@tvm.instrument.pass_instrument
class PrintIR:
"""Print the name of the pass, the IR, only before passes execute."""
def run_before_pass(self, mod, info):
print("Running pass: {}", info)
print(mod)
with tvm.transform.PassContext(opt_level=3, instruments=[PrintIR()]):
with tvm.target.Target("llvm"):
# Perform the optimizations.
mod = seq(mod)
print(mod)
print("done")
##############################################################################
# Summary
# -------
# This tutorial has covered how we can write and invoke passes in TVM more
# conveniently using the pass infra. Different ways of invoking a pass are also
# discussed. Using :py:class:`tvm.transform.Sequential` can largely help
# users to ease the work of handling multiple optimization passes and their
# dependencies. In addition, an example is provided to illustrate
# how we can debug a pass using the ``PrintIR`` and tracing.
| 11,740 | 41.694545 | 102 | py |
tvm | tvm-main/gallery/how_to/deploy_models/deploy_model_on_adreno_tvmc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
.. _tutorial-deploy-model-on-adreno-tvmc:
Deploy the Pretrained Model on Adreno™ with tvmc Interface
==========================================================
**Author**: Siva Rama Krishna
This article is a step-by-step tutorial to deploy pretrained Keras resnet50 model on Adreno™.
Besides that, you should have TVM built for Android.
See the following instructions on how to build it and setup RPC environment.
`Deploy to Adreno GPU <https://tvm.apache.org/docs/how_to/deploy/adreno.html>`_
"""
import os
import tvm
import numpy as np
from tvm import relay
from tvm.driver import tvmc
from tvm.driver.tvmc.model import TVMCPackage
from tvm.contrib import utils
#################################################################
# Configuration
# -------------
# Specify Adreno target before compiling to generate texture
# leveraging kernels and get all the benefits of textures
# Note: This generated example running on our x86 server for demonstration.
# If running it on the Android device, we need to
# specify its instruction set. Set :code:`local_demo` to False if you want
# to run this tutorial with a real device over rpc.
local_demo = True
# by default on CPU target will execute.
# select 'llvm', 'opencl' and 'opencl -device=adreno'
target = "llvm"
# Change target configuration.
# Run `adb shell cat /proc/cpuinfo` to find the arch.
arch = "arm64"
target_host = "llvm -mtriple=%s-linux-android" % arch
# Auto tuning is compute and time taking task, hence disabling for default run. Please enable it if required.
is_tuning = False
tune_log = "adreno-resnet50.log"
# To enable OpenCLML accelerated operator library.
enable_clml = False
cross_compiler = (
os.getenv("ANDROID_NDK_HOME", "")
+ "/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android28-clang"
)
#######################################################################
# Make a Keras Resnet50 Model
# ---------------------------
from tensorflow.keras.applications.resnet50 import ResNet50
tmp_path = utils.tempdir()
model_file_name = tmp_path.relpath("resnet50.h5")
model = ResNet50(include_top=True, weights="imagenet", input_shape=(224, 224, 3), classes=1000)
model.save(model_file_name)
#######################################################################
# Load Model
# ----------
# Convert a model from any framework to a tvm relay module.
# tvmc.load supports models from any framework (like tensorflow saves_model, onnx, tflite ..etc) and auto detects the filetype.
tvmc_model = tvmc.load(model_file_name)
print(tvmc_model.mod)
# tvmc_model consists of tvmc_mode.mod which is relay module and tvmc_model.params which parms of the module.
#######################################################################
# AutoTuning
# ----------
# Now, the below api can be used for autotuning the model for any target.
# Tuning required RPC setup and please refer to
# `Deploy to Adreno GPU <https://tvm.apache.org/docs/how_to/deploy/adreno.html>`_
rpc_tracker_host = os.environ.get("TVM_TRACKER_HOST", "127.0.0.1")
rpc_tracker_port = int(os.environ.get("TVM_TRACKER_PORT", 9190))
rpc_key = "android"
rpc_tracker = rpc_tracker_host + ":" + str(rpc_tracker_port)
# Auto tuning is compute intensive and time taking task.
# It is set to False in above configuration as this script runs in x86 for demonstration.
# Please to set :code:`is_tuning` to True to enable auto tuning.
# Also, :code:`test_target` is set to :code:`llvm` as this example to make compatible for x86 demonstration.
# Please change it to :code:`opencl` or :code:`opencl -device=adreno` for RPC target in configuration above.
if is_tuning:
tvmc.tune(
tvmc_model,
target=target,
tuning_records=tune_log,
target_host=target_host,
hostname=rpc_tracker_host,
port=rpc_tracker_port,
rpc_key=rpc_key,
tuner="xgb",
repeat=30,
trials=3,
early_stopping=0,
)
#######################################################################
# Compilation
# -----------
# Compilation to produce tvm artifacts
# This generated example running on our x86 server for demonstration.
# To deply and tun on real target over RPC please set :code:`local_demo` to False in above configuration sestion.
# OpenCLML offloading will try to accelerate supported operators by using OpenCLML proprietory operator library.
# By default :code:`enable_clml` is set to False in above configuration section.
if not enable_clml:
if local_demo:
tvmc_package = tvmc.compile(
tvmc_model,
target=target,
)
else:
tvmc_package = tvmc.compile(
tvmc_model,
target=target,
target_host=target_host,
cross=cross_compiler,
tuning_records=tune_log,
)
else:
# Altrernatively, we can save the compilation output and save it as a TVMCPackage.
# This way avoids loading of compiled module without compiling again.
target = target + ", clml"
pkg_path = tmp_path.relpath("keras-resnet50.tar")
tvmc.compile(
tvmc_model,
target=target,
target_host=target_host,
cross=cross_compiler,
tuning_records=tune_log,
package_path=pkg_path,
)
# Load the compiled package
tvmc_package = TVMCPackage(package_path=pkg_path)
# tvmc_package consists of tvmc_package.lib_path, tvmc_package.graph, tvmc_package.params
# Saved TVMPackage is nothing but tar archive with mod.so, mod.json and mod.params.
#######################################################################
# Deploy & Run
# ------------
# Deploy and run the compiled model on RPC
# Let tvmc fill inputs using random
# Run on RPC setup
if local_demo:
result = tvmc.run(tvmc_package, device="cpu", fill_mode="random")
else:
result = tvmc.run(
tvmc_package,
device="cl",
rpc_key=rpc_key,
hostname=rpc_tracker_host,
port=rpc_tracker_port,
fill_mode="random",
)
# result is a dictionary of outputs.
print("Result:", result)
| 6,849 | 33.422111 | 127 | py |
tvm | tvm-main/gallery/how_to/deploy_models/deploy_model_on_android.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
.. _tutorial-deploy-model-on-android:
Deploy the Pretrained Model on Android
=======================================
**Author**: `Tomohiro Kato <https://tkat0.github.io/>`_
This is an example of using Relay to compile a keras model and deploy it on Android device.
"""
import os
import numpy as np
from PIL import Image
import keras
from keras.applications.mobilenet_v2 import MobileNetV2
import tvm
from tvm import te
import tvm.relay as relay
from tvm import rpc
from tvm.contrib import utils, ndk, graph_executor as runtime
from tvm.contrib.download import download_testdata
######################################################################
# Setup Environment
# -----------------
# Since there are many required packages for Android, it is recommended to use the official Docker Image.
#
# First, to build and run Docker Image, we can run the following command.
#
# .. code-block:: bash
#
# git clone --recursive https://github.com/apache/tvm tvm
# cd tvm
# docker build -t tvm.demo_android -f docker/Dockerfile.demo_android ./docker
# docker run --pid=host -h tvm -v $PWD:/workspace \
# -w /workspace -p 9190:9190 --name tvm -it tvm.demo_android bash
#
# You are now inside the container. The cloned TVM directory is mounted on /workspace.
# At this time, mount the 9190 port used by RPC described later.
#
# .. note::
#
# Please execute the following steps in the container.
# We can execute :code:`docker exec -it tvm bash` to open a new terminal in the container.
#
# Next we build the TVM.
#
# .. code-block:: bash
#
# mkdir build
# cd build
# cmake -DUSE_LLVM=llvm-config-8 \
# -DUSE_RPC=ON \
# -DUSE_SORT=ON \
# -DUSE_VULKAN=ON \
# -DUSE_GRAPH_EXECUTOR=ON \
# ..
# make -j10
#
# After building TVM successfully, Please set PYTHONPATH.
#
# .. code-block:: bash
#
# echo 'export PYTHONPATH=/workspace/python:/workspace/vta/python:${PYTHONPATH}' >> ~/.bashrc
# source ~/.bashrc
#################################################################
# Start RPC Tracker
# -----------------
# TVM uses RPC session to communicate with Android device.
#
# To start an RPC tracker, run this command in the container. The tracker is
# required during the whole tuning process, so we need to open a new terminal for
# this command:
#
# .. code-block:: bash
#
# python3 -m tvm.exec.rpc_tracker --host=0.0.0.0 --port=9190
#
# The expected output is
#
# .. code-block:: bash
#
# INFO:RPCTracker:bind to 0.0.0.0:9190
#################################################################
# Register Android device to RPC Tracker
# --------------------------------------
# Now we can register our Android device to the tracker.
#
# Follow this `readme page <https://github.com/apache/tvm/tree/main/apps/android_rpc>`_ to
# install TVM RPC APK on the android device.
#
# Here is an example of config.mk. I enabled OpenCL and Vulkan.
#
#
# .. code-block:: bash
#
# APP_ABI = arm64-v8a
#
# APP_PLATFORM = android-24
#
# # whether enable OpenCL during compile
# USE_OPENCL = 1
#
# # whether to enable Vulkan during compile
# USE_VULKAN = 1
#
# ifeq ($(USE_VULKAN), 1)
# # Statically linking vulkan requires API Level 24 or higher
# APP_PLATFORM = android-24
# endif
#
# # the additional include headers you want to add, e.g., SDK_PATH/adrenosdk/Development/Inc
# ADD_C_INCLUDES += /work/adrenosdk-linux-5_0/Development/Inc
# ADD_C_INCLUDES =
#
# # the additional link libs you want to add, e.g., ANDROID_LIB_PATH/libOpenCL.so
# ADD_LDLIBS =
#
# .. note::
#
# At this time, don't forget to `create a standalone toolchain <https://github.com/apache/tvm/tree/main/apps/android_rpc#architecture-and-android-standalone-toolchain>`_ .
#
# for example
#
# .. code-block:: bash
#
# $ANDROID_NDK_HOME/build/tools/make-standalone-toolchain.sh \
# --platform=android-24 --use-llvm --arch=arm64 --install-dir=/opt/android-toolchain-arm64
# export TVM_NDK_CC=/opt/android-toolchain-arm64/bin/aarch64-linux-android-g++
#
# Next, start the Android application and enter the IP address and port of RPC Tracker.
# Then you have already registered your device.
#
# After registering devices, we can confirm it by querying rpc_tracker
#
# .. code-block:: bash
#
# python3 -m tvm.exec.query_rpc_tracker --host=0.0.0.0 --port=9190
#
# For example, if we have 1 Android device.
# the output can be
#
# .. code-block:: bash
#
# Queue Status
# ----------------------------------
# key total free pending
# ----------------------------------
# android 1 1 0
# ----------------------------------
#
# To confirm that you can communicate with Android, we can run following test script.
# If you use OpenCL and Vulkan, please set :code:`test_opencl` and :code:`test_vulkan` in the script.
#
# .. code-block:: bash
#
# export TVM_TRACKER_HOST=0.0.0.0
# export TVM_TRACKER_PORT=9190
#
# .. code-block:: bash
#
# cd /workspace/apps/android_rpc
# python3 tests/android_rpc_test.py
#
######################################################################
# Load pretrained keras model
# ---------------------------
# We load a pretrained MobileNetV2(alpha=0.5) classification model provided by keras.
keras.backend.clear_session() # Destroys the current TF graph and creates a new one.
weights_url = "".join(
[
"https://github.com/JonathanCMitchell/",
"mobilenet_v2_keras/releases/download/v1.1/",
"mobilenet_v2_weights_tf_dim_ordering_tf_kernels_0.5_224.h5",
]
)
weights_file = "mobilenet_v2_weights.h5"
weights_path = download_testdata(weights_url, weights_file, module="keras")
keras_mobilenet_v2 = MobileNetV2(
alpha=0.5, include_top=True, weights=None, input_shape=(224, 224, 3), classes=1000
)
keras_mobilenet_v2.load_weights(weights_path)
######################################################################
# In order to test our model, here we download an image of cat and
# transform its format.
img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
img_name = "cat.png"
img_path = download_testdata(img_url, img_name, module="data")
image = Image.open(img_path).resize((224, 224))
dtype = "float32"
def transform_image(image):
image = np.array(image) - np.array([123.0, 117.0, 104.0])
image /= np.array([58.395, 57.12, 57.375])
image = image.transpose((2, 0, 1))
image = image[np.newaxis, :]
return image
x = transform_image(image)
######################################################################
# synset is used to transform the label from number of ImageNet class to
# the word human can understand.
synset_url = "".join(
[
"https://gist.githubusercontent.com/zhreshold/",
"4d0b62f3d01426887599d4f7ede23ee5/raw/",
"596b27d23537e5a1b5751d2b0481ef172f58b539/",
"imagenet1000_clsid_to_human.txt",
]
)
synset_name = "imagenet1000_clsid_to_human.txt"
synset_path = download_testdata(synset_url, synset_name, module="data")
with open(synset_path) as f:
synset = eval(f.read())
######################################################################
# Compile the model with relay
# ----------------------------
# If we run the example on our x86 server for demonstration, we can simply
# set it as :code:`llvm`. If running it on the Android device, we need to
# specify its instruction set. Set :code:`local_demo` to False if you want
# to run this tutorial with a real device.
local_demo = True
# by default on CPU target will execute.
# select 'cpu', 'opencl' and 'vulkan'
test_target = "cpu"
# Change target configuration.
# Run `adb shell cat /proc/cpuinfo` to find the arch.
arch = "arm64"
target = tvm.target.Target("llvm -mtriple=%s-linux-android" % arch)
if local_demo:
target = tvm.target.Target("llvm")
elif test_target == "opencl":
target = tvm.target.Target("opencl", host=target)
elif test_target == "vulkan":
target = tvm.target.Target("vulkan", host=target)
input_name = "input_1"
shape_dict = {input_name: x.shape}
mod, params = relay.frontend.from_keras(keras_mobilenet_v2, shape_dict)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target, params=params)
# After `relay.build`, you will get three return values: graph,
# library and the new parameter, since we do some optimization that will
# change the parameters but keep the result of model as the same.
# Save the library at local temporary directory.
tmp = utils.tempdir()
lib_fname = tmp.relpath("net.so")
fcompile = ndk.create_shared if not local_demo else None
lib.export_library(lib_fname, fcompile)
######################################################################
# Deploy the Model Remotely by RPC
# --------------------------------
# With RPC, you can deploy the model remotely from your host machine
# to the remote android device.
tracker_host = os.environ.get("TVM_TRACKER_HOST", "127.0.0.1")
tracker_port = int(os.environ.get("TVM_TRACKER_PORT", 9190))
key = "android"
if local_demo:
remote = rpc.LocalSession()
else:
tracker = rpc.connect_tracker(tracker_host, tracker_port)
# When running a heavy model, we should increase the `session_timeout`
remote = tracker.request(key, priority=0, session_timeout=60)
if local_demo:
dev = remote.cpu(0)
elif test_target == "opencl":
dev = remote.cl(0)
elif test_target == "vulkan":
dev = remote.vulkan(0)
else:
dev = remote.cpu(0)
# upload the library to remote device and load it
remote.upload(lib_fname)
rlib = remote.load_module("net.so")
# create the remote runtime module
module = runtime.GraphModule(rlib["default"](dev))
######################################################################
# Execute on TVM
# --------------
# set input data
module.set_input(input_name, tvm.nd.array(x.astype(dtype)))
# run
module.run()
# get output
out = module.get_output(0)
# get top1 result
top1 = np.argmax(out.numpy())
print("TVM prediction top-1: {}".format(synset[top1]))
print("Evaluate inference time cost...")
print(module.benchmark(dev, number=1, repeat=10))
######################################################################
# Sample Output
# -------------
# The following is the result of 'cpu', 'opencl' and 'vulkan' using Adreno 530 on Snapdragon 820
#
# Although we can run on a GPU, it is slower than CPU.
# To speed up, we need to write and optimize the schedule according to the GPU architecture.
#
# .. code-block:: bash
#
# # cpu
# TVM prediction top-1: tiger cat
# Evaluate inference time cost...
# Mean inference time (std dev): 37.92 ms (19.67 ms)
#
# # opencl
# TVM prediction top-1: tiger cat
# Evaluate inference time cost...
# Mean inference time (std dev): 419.83 ms (7.49 ms)
#
# # vulkan
# TVM prediction top-1: tiger cat
# Evaluate inference time cost...
# Mean inference time (std dev): 465.80 ms (4.52 ms)
| 11,684 | 31.368421 | 173 | py |
tvm | tvm-main/gallery/how_to/deploy_models/deploy_prequantized.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Deploy a Framework-prequantized Model with TVM
==============================================
**Author**: `Masahiro Masuda <https://github.com/masahi>`_
This is a tutorial on loading models quantized by deep learning frameworks into TVM.
Pre-quantized model import is one of the quantization support we have in TVM. More details on
the quantization story in TVM can be found
`here <https://discuss.tvm.apache.org/t/quantization-story/3920>`_.
Here, we demonstrate how to load and run models quantized by PyTorch, MXNet, and TFLite.
Once loaded, we can run compiled, quantized models on any hardware TVM supports.
"""
#################################################################################
# First, necessary imports
from PIL import Image
import numpy as np
import torch
from torchvision.models.quantization import mobilenet as qmobilenet
import tvm
from tvm import relay
from tvm.contrib.download import download_testdata
#################################################################################
# Helper functions to run the demo
def get_transform():
import torchvision.transforms as transforms
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
return transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]
)
def get_real_image(im_height, im_width):
img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
img_path = download_testdata(img_url, "cat.png", module="data")
return Image.open(img_path).resize((im_height, im_width))
def get_imagenet_input():
im = get_real_image(224, 224)
preprocess = get_transform()
pt_tensor = preprocess(im)
return np.expand_dims(pt_tensor.numpy(), 0)
def get_synset():
synset_url = "".join(
[
"https://gist.githubusercontent.com/zhreshold/",
"4d0b62f3d01426887599d4f7ede23ee5/raw/",
"596b27d23537e5a1b5751d2b0481ef172f58b539/",
"imagenet1000_clsid_to_human.txt",
]
)
synset_name = "imagenet1000_clsid_to_human.txt"
synset_path = download_testdata(synset_url, synset_name, module="data")
with open(synset_path) as f:
return eval(f.read())
def run_tvm_model(mod, params, input_name, inp, target="llvm"):
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target, params=params)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](tvm.device(target, 0)))
runtime.set_input(input_name, inp)
runtime.run()
return runtime.get_output(0).numpy(), runtime
#################################################################################
# A mapping from label to class name, to verify that the outputs from models below
# are reasonable
synset = get_synset()
#################################################################################
# Everyone's favorite cat image for demonstration
inp = get_imagenet_input()
################################################################################
# Deploy a quantized PyTorch Model
# --------------------------------
# First, we demonstrate how to load deep learning models quantized by PyTorch,
# using our PyTorch frontend.
#
# Please refer to the PyTorch static quantization tutorial below to learn about
# their quantization workflow.
# https://pytorch.org/tutorials/advanced/static_quantization_tutorial.html
#
# We use this function to quantize PyTorch models.
# In short, this function takes a floating point model and converts it to uint8.
# The model is per-channel quantized.
def quantize_model(model, inp):
model.fuse_model()
model.qconfig = torch.quantization.get_default_qconfig("fbgemm")
torch.quantization.prepare(model, inplace=True)
# Dummy calibration
model(inp)
torch.quantization.convert(model, inplace=True)
##############################################################################
# Load quantization-ready, pretrained Mobilenet v2 model from torchvision
# -----------------------------------------------------------------------
# We choose mobilenet v2 because this model was trained with quantization aware
# training. Other models require a full post training calibration.
qmodel = qmobilenet.mobilenet_v2(pretrained=True).eval()
##############################################################################
# Quantize, trace and run the PyTorch Mobilenet v2 model
# ------------------------------------------------------
# The details are out of scope for this tutorial. Please refer to the tutorials
# on the PyTorch website to learn about quantization and jit.
pt_inp = torch.from_numpy(inp)
quantize_model(qmodel, pt_inp)
script_module = torch.jit.trace(qmodel, pt_inp).eval()
with torch.no_grad():
pt_result = script_module(pt_inp).numpy()
##############################################################################
# Convert quantized Mobilenet v2 to Relay-QNN using the PyTorch frontend
# ----------------------------------------------------------------------
# The PyTorch frontend has support for converting a quantized PyTorch model to
# an equivalent Relay module enriched with quantization-aware operators.
# We call this representation Relay QNN dialect.
#
# You can print the output from the frontend to see how quantized models are
# represented.
#
# You would see operators specific to quantization such as
# qnn.quantize, qnn.dequantize, qnn.requantize, and qnn.conv2d etc.
input_name = "input" # the input name can be be arbitrary for PyTorch frontend.
input_shapes = [(input_name, (1, 3, 224, 224))]
mod, params = relay.frontend.from_pytorch(script_module, input_shapes)
# print(mod) # comment in to see the QNN IR dump
##############################################################################
# Compile and run the Relay module
# --------------------------------
# Once we obtained the quantized Relay module, the rest of the workflow
# is the same as running floating point models. Please refer to other
# tutorials for more details.
#
# Under the hood, quantization specific operators are lowered to a sequence of
# standard Relay operators before compilation.
target = "llvm"
tvm_result, rt_mod = run_tvm_model(mod, params, input_name, inp, target=target)
##########################################################################
# Compare the output labels
# -------------------------
# We should see identical labels printed.
pt_top3_labels = np.argsort(pt_result[0])[::-1][:3]
tvm_top3_labels = np.argsort(tvm_result[0])[::-1][:3]
print("PyTorch top3 labels:", [synset[label] for label in pt_top3_labels])
print("TVM top3 labels:", [synset[label] for label in tvm_top3_labels])
###########################################################################################
# However, due to the difference in numerics, in general the raw floating point
# outputs are not expected to be identical. Here, we print how many floating point
# output values are identical out of 1000 outputs from mobilenet v2.
print("%d in 1000 raw floating outputs identical." % np.sum(tvm_result[0] == pt_result[0]))
##########################################################################
# Measure performance
# -------------------------
# Here we give an example of how to measure performance of TVM compiled models.
n_repeat = 100 # should be bigger to make the measurement more accurate
dev = tvm.cpu(0)
print(rt_mod.benchmark(dev, number=1, repeat=n_repeat))
######################################################################
# .. note::
#
# We recommend this method for the following reasons:
#
# * Measurements are done in C++, so there is no Python overhead
# * It includes several warm up runs
# * The same method can be used to profile on remote devices (android etc.).
######################################################################
# .. note::
#
# Unless the hardware has special support for fast 8 bit instructions, quantized models are
# not expected to be any faster than FP32 models. Without fast 8 bit instructions, TVM does
# quantized convolution in 16 bit, even if the model itself is 8 bit.
#
# For x86, the best performance can be achieved on CPUs with AVX512 instructions set.
# In this case, TVM utilizes the fastest available 8 bit instructions for the given target.
# This includes support for the VNNI 8 bit dot product instruction (CascadeLake or newer).
#
# Moreover, the following general tips for CPU performance equally applies:
#
# * Set the environment variable TVM_NUM_THREADS to the number of physical cores
# * Choose the best target for your hardware, such as "llvm -mcpu=skylake-avx512" or
# "llvm -mcpu=cascadelake" (more CPUs with AVX512 would come in the future)
###############################################################################
# Deploy a quantized MXNet Model
# ------------------------------
# TODO
###############################################################################
# Deploy a quantized TFLite Model
# -------------------------------
# TODO
| 9,941 | 39.91358 | 93 | py |
tvm | tvm-main/gallery/how_to/deploy_models/deploy_model_on_adreno.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
.. _tutorial-deploy-model-on-adreno:
Deploy the Pretrained Model on Adreno™
======================================
**Author**: Daniil Barinov, Siva Rama Krishna
This article is a step-by-step tutorial to deploy pretrained Pytorch ResNet-18 model on Adreno (on different precisions).
For us to begin with, PyTorch must be installed.
TorchVision is also required since we will be using it as our model zoo.
A quick solution is to install it via pip:
.. code-block:: bash
%%shell
pip install torch
pip install torchvision
Besides that, you should have TVM builded for Android.
See the following instructions on how to build it.
`Deploy to Adreno GPU <https://tvm.apache.org/docs/how_to/deploy/adreno.html>`_
After the build section there should be two files in *build* directory «libtvm_runtime.so» and «tvm_rpc».
Let's push them to the device and run TVM RPC Server.
"""
######################################################################
# TVM RPC Server
# --------------
# To get the hash of the device use:
#
# .. code-block:: bash
#
# adb devices
#
# Set the android device to use, if you have several devices connected to your computer.
#
# .. code-block:: bash
#
# export ANDROID_SERIAL=<device-hash>
#
# Then to upload these two files to the device you should use:
#
# .. code-block:: bash
#
# adb push {libtvm_runtime.so,tvm_rpc} /data/local/tmp
#
# At this moment you will have «libtvm_runtime.so» and «tvm_rpc» on path /data/local/tmp on your device.
# Sometimes cmake can’t find «libc++_shared.so». Use:
#
# .. code-block:: bash
#
# find ${ANDROID_NDK_HOME} -name libc++_shared.so
#
# to find it and also push it with adb on the desired device:
#
# .. code-block:: bash
#
# adb push libc++_shared.so /data/local/tmp
#
# We are now ready to run the TVM RPC Server.
# Launch rpc_tracker with following line in 1st console:
#
# .. code-block:: bash
#
# python3 -m tvm.exec.rpc_tracker --port 9190
#
# Then we need to run tvm_rpc server from under the desired device in 2nd console:
#
# .. code-block:: bash
#
# adb reverse tcp:9190 tcp:9190
# adb forward tcp:5000 tcp:5000
# adb forward tcp:5002 tcp:5001
# adb forward tcp:5003 tcp:5002
# adb forward tcp:5004 tcp:5003
# adb shell LD_LIBRARY_PATH=/data/local/tmp /data/local/tmp/tvm_rpc server --host=0.0.0.0 --port=5000 --tracker=127.0.0.1:9190 --key=android --port-end=5100
#
# Before proceeding to compile and infer model, specify TVM_TRACKER_HOST and TVM_TRACKER_PORT
#
# .. code-block:: bash
#
# export TVM_TRACKER_HOST=0.0.0.0
# export TVM_TRACKER_PORT=9190
#
# check that the tracker is running and the device is available
#
# .. code-block:: bash
#
# python -m tvm.exec.query_rpc_tracker --port 9190
#
# For example, if we have 1 Android device,
# the output can be:
#
# .. code-block:: bash
#
# Queue Status
# ----------------------------------
# key total free pending
# ----------------------------------
# android 1 1 0
# ----------------------------------
#################################################################
# Configuration
# -------------
import os
import torch
import torchvision
import tvm
from tvm import te
from tvm import relay, rpc
from tvm.contrib import utils, ndk
from tvm.contrib import graph_executor
from tvm.relay.op.contrib import clml
from tvm import autotvm
# Below are set of configuration that controls the behaviour of this script like
# local run or device run, target definitions, dtype setting and auto tuning enablement.
# Change these settings as needed if required.
# Adreno devices are efficient with float16 compared to float32
# Given the expected output doesn't effect by lowering precision
# it's advisable to use lower precision.
# We have a helper API to make the precision conversion simple and
# it supports dtype with "float16" and "float16_acc32" modes.
# Let's choose "float16" for calculation and "float32" for accumulation.
calculation_dtype = "float16"
acc_dtype = "float32"
# Specify Adreno target before compiling to generate texture
# leveraging kernels and get all the benefits of textures
# Note: This generated example running on our x86 server for demonstration.
# If running it on the Android device, we need to
# specify its instruction set. Set :code:`local_demo` to False if you want
# to run this tutorial with a real device over rpc.
local_demo = True
# by default on CPU target will execute.
# select 'cpu', 'opencl' and 'opencl -device=adreno'
test_target = "cpu"
# Change target configuration.
# Run `adb shell cat /proc/cpuinfo` to find the arch.
arch = "arm64"
target = tvm.target.Target("llvm -mtriple=%s-linux-android" % arch)
# Auto tuning is compute intensive and time taking task,
# hence disabling for default run. Please enable it if required.
is_tuning = False
tune_log = "adreno-resnet18.log"
# To enable OpenCLML accelerated operator library.
enable_clml = False
#################################################################
# Get a PyTorch Model
# -------------------
# Get resnet18 from torchvision models
model_name = "resnet18"
model = getattr(torchvision.models, model_name)(pretrained=True)
model = model.eval()
# We grab the TorchScripted model via tracing
input_shape = [1, 3, 224, 224]
input_data = torch.randn(input_shape)
scripted_model = torch.jit.trace(model, input_data).eval()
#################################################################
# Load a test image
# -----------------
# As an example we would use classical cat image from ImageNet
from PIL import Image
from tvm.contrib.download import download_testdata
from matplotlib import pyplot as plt
import numpy as np
img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
img_path = download_testdata(img_url, "cat.png", module="data")
img = Image.open(img_path).resize((224, 224))
plt.imshow(img)
plt.show()
# Preprocess the image and convert to tensor
from torchvision import transforms
my_preprocess = transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
img = my_preprocess(img)
img = np.expand_dims(img, 0)
#################################################################
# Convert PyTorch model to Relay module
# -------------------------------------
# TVM has frontend api for various frameworks under relay.frontend and now
# for pytorch model import we have relay.frontend.from_pytorch api.
# Input name can be arbitrary
input_name = "input0"
shape_list = [(input_name, img.shape)]
mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)
#################################################################
# Precisions
# ----------
# Adreno devices are efficient with float16 compared to float32
# Given the expected output doesn't effect by lowering precision
# it's advisable to use lower precision.
# TVM support Mixed Precision through ToMixedPrecision transformation pass.
# We may need to register precision rules like precision type, accumultation
# datatype ...etc. for the required operators to override the default settings.
# The below helper api simplifies the precision conversions across the module.
# Calculation dtype is set to "float16" and accumulation dtype is set to "float32"
# in configuration section above.
from tvm.driver.tvmc.transform import apply_graph_transforms
mod = apply_graph_transforms(
mod,
{
"mixed_precision": True,
"mixed_precision_ops": ["nn.conv2d", "nn.dense"],
"mixed_precision_calculation_type": calculation_dtype,
"mixed_precision_acc_type": acc_dtype,
},
)
#################################################################
# As you can see in the IR, the architecture now contains cast operations, which are
# needed to convert to FP16 precision.
# You can also use "float16" or "float32" precisions as other dtype options.
#################################################################
# Prepare TVM Target
# ------------------
# This generated example running on our x86 server for demonstration.
# To deply and tun on real target over RPC please set :code:`local_demo` to False in above configuration sestion.
# Also, :code:`test_target` is set to :code:`llvm` as this example to make compatible for x86 demonstration.
# Please change it to :code:`opencl` or :code:`opencl -device=adreno` for RPC target in configuration above.
if local_demo:
target = tvm.target.Target("llvm")
elif test_target.find("opencl"):
target = tvm.target.Target(test_target, host=target)
##################################################################
# AutoTuning
# ----------
# The below few instructions can auto tune the relay module with xgboost being the tuner algorithm.
# Auto Tuning process involces stages of extracting the tasks, defining tuning congiguration and
# tuning each task for best performing kernel configuration.
# Get RPC related settings.
rpc_tracker_host = os.environ.get("TVM_TRACKER_HOST", "127.0.0.1")
rpc_tracker_port = int(os.environ.get("TVM_TRACKER_PORT", 9190))
key = "android"
# Auto tuning is compute intensive and time taking task.
# It is set to False in above configuration as this script runs in x86 for demonstration.
# Please to set :code:`is_tuning` to True to enable auto tuning.
if is_tuning:
# Auto Tuning Stage 1: Extract tunable tasks
tasks = autotvm.task.extract_from_program(
mod, target=test_target, target_host=target, params=params
)
# Auto Tuning Stage 2: Define tuning configuration
tmp_log_file = tune_log + ".tmp"
measure_option = autotvm.measure_option(
builder=autotvm.LocalBuilder(
build_func=ndk.create_shared, timeout=15
), # Build the test kernel locally
runner=autotvm.RPCRunner( # The runner would be on a remote device.
key, # RPC Key
host=rpc_tracker_host, # Tracker host
port=int(rpc_tracker_port), # Tracker port
number=3, # Number of runs before averaging
timeout=600, # RPC Timeout
),
)
n_trial = 1024 # Number of iteration of training before choosing the best kernel config
early_stopping = False # Can be enabled to stop tuning while the loss is not minimizing.
# Auto Tuning Stage 3: Iterate through the tasks and tune.
from tvm.autotvm.tuner import XGBTuner
for i, tsk in enumerate(reversed(tasks[:3])):
print("Task:", tsk)
prefix = "[Task %2d/%2d] " % (i + 1, len(tasks))
# choose tuner
tuner = "xgb"
# create tuner
if tuner == "xgb":
tuner_obj = XGBTuner(tsk, loss_type="reg")
elif tuner == "xgb_knob":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="knob")
elif tuner == "xgb_itervar":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="itervar")
elif tuner == "xgb_curve":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="curve")
elif tuner == "xgb_rank":
tuner_obj = XGBTuner(tsk, loss_type="rank")
elif tuner == "xgb_rank_knob":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="knob")
elif tuner == "xgb_rank_itervar":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="itervar")
elif tuner == "xgb_rank_curve":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="curve")
elif tuner == "xgb_rank_binary":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary")
elif tuner == "xgb_rank_binary_knob":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="knob")
elif tuner == "xgb_rank_binary_itervar":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="itervar")
elif tuner == "xgb_rank_binary_curve":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="curve")
elif tuner == "ga":
tuner_obj = GATuner(tsk, pop_size=50)
elif tuner == "random":
tuner_obj = RandomTuner(tsk)
elif tuner == "gridsearch":
tuner_obj = GridSearchTuner(tsk)
else:
raise ValueError("Invalid tuner: " + tuner)
tsk_trial = min(n_trial, len(tsk.config_space))
tuner_obj.tune(
n_trial=tsk_trial,
early_stopping=early_stopping,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(tsk_trial, prefix=prefix),
autotvm.callback.log_to_file(tmp_log_file),
],
)
# Auto Tuning Stage 4: Pick the best performing configurations from the overall log.
autotvm.record.pick_best(tmp_log_file, tune_log)
#################################################################
# Enable OpenCLML Offloading
# --------------------------
# OpenCLML offloading will try to accelerate supported operators
# by using OpenCLML proprietory operator library.
# By default :code:`enable_clml` is set to False in above configuration section.
if not local_demo and enable_clml:
mod = clml.partition_for_clml(mod, params)
#################################################################
# Compilation
# -----------
# Use tuning cache if exists.
if os.path.exists(tune_log):
with autotvm.apply_history_best(tune_log):
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target, params=params)
else:
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target, params=params)
#################################################################
# Deploy the Model Remotely by RPC
# --------------------------------
# Using RPC you can deploy the model from host
# machine to the remote Adreno device
if local_demo:
remote = rpc.LocalSession()
else:
tracker = rpc.connect_tracker(rpc_tracker_host, rpc_tracker_port)
# When running a heavy model, we should increase the `session_timeout`
remote = tracker.request(key, priority=0, session_timeout=60)
if local_demo:
dev = remote.cpu(0)
elif test_target.find("opencl"):
dev = remote.cl(0)
else:
dev = remote.cpu(0)
temp = utils.tempdir()
dso_binary = "dev_lib_cl.so"
dso_binary_path = temp.relpath(dso_binary)
fcompile = ndk.create_shared if not local_demo else None
lib.export_library(dso_binary_path, fcompile)
remote_path = "/data/local/tmp/" + dso_binary
remote.upload(dso_binary_path)
rlib = remote.load_module(dso_binary)
m = graph_executor.GraphModule(rlib["default"](dev))
#################################################################
# Run inference
# -------------
# We now can set inputs, infer our model and get predictions as output
m.set_input(input_name, tvm.nd.array(img.astype("float32")))
m.run()
tvm_output = m.get_output(0)
#################################################################
# Get predictions and performance statistic
# -----------------------------------------
# This piece of code displays the top-1 and top-5 predictions, as
# well as provides information about the model's performance
from os.path import join, isfile
from matplotlib import pyplot as plt
from tvm.contrib import download
# Download ImageNet categories
categ_url = "https://github.com/uwsampl/web-data/raw/main/vta/models/"
categ_fn = "synset.txt"
download.download(join(categ_url, categ_fn), categ_fn)
synset = eval(open(categ_fn).read())
top_categories = np.argsort(tvm_output.asnumpy()[0])
top5 = np.flip(top_categories, axis=0)[:5]
# Report top-1 classification result
print("Top-1 id: {}, class name: {}".format(top5[1 - 1], synset[top5[1 - 1]]))
# Report top-5 classification results
print("\nTop5 predictions: \n")
print("\t#1:", synset[top5[1 - 1]])
print("\t#2:", synset[top5[2 - 1]])
print("\t#3:", synset[top5[3 - 1]])
print("\t#4:", synset[top5[4 - 1]])
print("\t#5:", synset[top5[5 - 1]])
print("\t", top5)
ImageNetClassifier = False
for k in top_categories[-5:]:
if "cat" in synset[k]:
ImageNetClassifier = True
assert ImageNetClassifier, "Failed ImageNet classifier validation check"
print("Evaluate inference time cost...")
print(m.benchmark(dev, number=1, repeat=10))
| 17,091 | 35.059072 | 158 | py |
tvm | tvm-main/gallery/how_to/deploy_models/deploy_model_on_nano.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
.. _tutorial-deploy-model-on-nano:
Deploy the Pretrained Model on Jetson Nano
===========================================
**Author**: `BBuf <https://github.com/BBuf>`_
This is an example of using Relay to compile a ResNet model and deploy
it on Jetson Nano.
"""
# sphinx_gallery_start_ignore
# sphinx_gallery_requires_cuda = True
# sphinx_gallery_end_ignore
import tvm
from tvm import te
import tvm.relay as relay
from tvm import rpc
from tvm.contrib import utils, graph_executor as runtime
from tvm.contrib.download import download_testdata
######################################################################
# .. _build-tvm-runtime-on-jetson-nano:
#
# Build TVM Runtime on Jetson Nano
# --------------------------------
#
# The first step is to build the TVM runtime on the remote device.
#
# .. note::
#
# All instructions in both this section and next section should be
# executed on the target device, e.g. Jetson Nano. And we assume it
# has Linux running.
#
# Since we do compilation on local machine, the remote device is only used
# for running the generated code. We only need to build tvm runtime on
# the remote device.
#
# .. code-block:: bash
#
# git clone --recursive https://github.com/apache/tvm tvm
# cd tvm
# mkdir build
# cp cmake/config.cmake build
# cd build
# cmake ..
# make runtime -j4
# .. note::
#
# If we want to use Jetson Nano's GPU for inference,
# we need to enable the CUDA option in `config.cmake`,
# that is, `set(USE_CUDA ON)`
#
# After building runtime successfully, we need to set environment varibles
# in :code:`~/.bashrc` file. We can edit :code:`~/.bashrc`
# using :code:`vi ~/.bashrc` and add the line below (Assuming your TVM
# directory is in :code:`~/tvm`):
#
# .. code-block:: bash
#
# export PYTHONPATH=$PYTHONPATH:~/tvm/python
#
# To update the environment variables, execute :code:`source ~/.bashrc`.
######################################################################
# Set Up RPC Server on Device
# ---------------------------
# To start an RPC server, run the following command on your remote device
# (Which is Jetson Nano in our example).
#
# .. code-block:: bash
#
# python -m tvm.exec.rpc_server --host 0.0.0.0 --port=9091
#
# If you see the line below, it means the RPC server started
# successfully on your device.
#
# .. code-block:: bash
#
# INFO:RPCServer:bind to 0.0.0.0:9091
#
######################################################################
# Prepare the Pre-trained Model
# -----------------------------
# Back to the host machine, which should have a full TVM installed (with LLVM).
#
# We will use pre-trained model from
# `MXNet Gluon model zoo <https://mxnet.apache.org/api/python/gluon/model_zoo.html>`_.
# You can found more details about this part at tutorial :ref:`tutorial-from-mxnet`.
from mxnet.gluon.model_zoo.vision import get_model
from PIL import Image
import numpy as np
# one line to get the model
block = get_model("resnet18_v1", pretrained=True)
######################################################################
# In order to test our model, here we download an image of cat and
# transform its format.
img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
img_name = "cat.png"
img_path = download_testdata(img_url, img_name, module="data")
image = Image.open(img_path).resize((224, 224))
def transform_image(image):
image = np.array(image) - np.array([123.0, 117.0, 104.0])
image /= np.array([58.395, 57.12, 57.375])
image = image.transpose((2, 0, 1))
image = image[np.newaxis, :]
return image
x = transform_image(image)
######################################################################
# synset is used to transform the label from number of ImageNet class to
# the word human can understand.
synset_url = "".join(
[
"https://gist.githubusercontent.com/zhreshold/",
"4d0b62f3d01426887599d4f7ede23ee5/raw/",
"596b27d23537e5a1b5751d2b0481ef172f58b539/",
"imagenet1000_clsid_to_human.txt",
]
)
synset_name = "imagenet1000_clsid_to_human.txt"
synset_path = download_testdata(synset_url, synset_name, module="data")
with open(synset_path) as f:
synset = eval(f.read())
######################################################################
# Now we would like to port the Gluon model to a portable computational graph.
# It's as easy as several lines.
# We support MXNet static graph(symbol) and HybridBlock in mxnet.gluon
shape_dict = {"data": x.shape}
mod, params = relay.frontend.from_mxnet(block, shape_dict)
# we want a probability so add a softmax operator
func = mod["main"]
func = relay.Function(func.params, relay.nn.softmax(func.body), None, func.type_params, func.attrs)
######################################################################
# Here are some basic data workload configurations.
batch_size = 1
num_classes = 1000
image_shape = (3, 224, 224)
data_shape = (batch_size,) + image_shape
######################################################################
# Compile The Graph
# -----------------
# To compile the graph, we call the :py:func:`relay.build` function
# with the graph configuration and parameters. However, You cannot to
# deploy a x86 program on a device with ARM instruction set. It means
# Relay also needs to know the compilation option of target device,
# apart from arguments :code:`net` and :code:`params` to specify the
# deep learning workload. Actually, the option matters, different option
# will lead to very different performance.
######################################################################
# If we run the example on our x86 server for demonstration, we can simply
# set it as :code:`llvm`. If running it on the Jetson Nano, we need to
# set it as :code:`nvidia/jetson-nano`. Set :code:`local_demo` to False
# if you want to run this tutorial with a real device.
local_demo = True
if local_demo:
target = tvm.target.Target("llvm")
else:
target = tvm.target.Target("nvidia/jetson-nano")
assert target.kind.name == "cuda"
assert target.attrs["arch"] == "sm_53"
assert target.attrs["shared_memory_per_block"] == 49152
assert target.attrs["max_threads_per_block"] == 1024
assert target.attrs["thread_warp_size"] == 32
assert target.attrs["registers_per_block"] == 32768
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(func, target, params=params)
# After `relay.build`, you will get three return values: graph,
# library and the new parameter, since we do some optimization that will
# change the parameters but keep the result of model as the same.
# Save the library at local temporary directory.
tmp = utils.tempdir()
lib_fname = tmp.relpath("net.tar")
lib.export_library(lib_fname)
######################################################################
# Deploy the Model Remotely by RPC
# --------------------------------
# With RPC, you can deploy the model remotely from your host machine
# to the remote device.
# obtain an RPC session from remote device.
if local_demo:
remote = rpc.LocalSession()
else:
# The following is my environment, change this to the IP address of your target device
host = "192.168.1.11"
port = 9091
remote = rpc.connect(host, port)
# upload the library to remote device and load it
remote.upload(lib_fname)
rlib = remote.load_module("net.tar")
# create the remote runtime module
if local_demo:
dev = remote.cpu(0)
else:
dev = remote.cuda(0)
module = runtime.GraphModule(rlib["default"](dev))
# set input data
module.set_input("data", tvm.nd.array(x.astype("float32")))
# run
module.run()
# get output
out = module.get_output(0)
# get top1 result
top1 = np.argmax(out.numpy())
print("TVM prediction top-1: {}".format(synset[top1]))
| 8,552 | 33.768293 | 99 | py |
tvm | tvm-main/gallery/how_to/deploy_models/deploy_object_detection_pytorch.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Compile PyTorch Object Detection Models
=======================================
This article is an introductory tutorial to deploy PyTorch object
detection models with Relay VM.
For us to begin with, PyTorch should be installed.
TorchVision is also required since we will be using it as our model zoo.
A quick solution is to install via pip
.. code-block:: bash
pip install torch
pip install torchvision
or please refer to official site
https://pytorch.org/get-started/locally/
PyTorch versions should be backwards compatible but should be used
with the proper TorchVision version.
Currently, TVM supports PyTorch 1.7 and 1.4. Other versions may
be unstable.
"""
import tvm
from tvm import relay
from tvm import relay
from tvm.runtime.vm import VirtualMachine
from tvm.contrib.download import download_testdata
import numpy as np
import cv2
# PyTorch imports
import torch
import torchvision
######################################################################
# Load pre-trained maskrcnn from torchvision and do tracing
# ---------------------------------------------------------
in_size = 300
input_shape = (1, 3, in_size, in_size)
def do_trace(model, inp):
model_trace = torch.jit.trace(model, inp)
model_trace.eval()
return model_trace
def dict_to_tuple(out_dict):
if "masks" in out_dict.keys():
return out_dict["boxes"], out_dict["scores"], out_dict["labels"], out_dict["masks"]
return out_dict["boxes"], out_dict["scores"], out_dict["labels"]
class TraceWrapper(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, inp):
out = self.model(inp)
return dict_to_tuple(out[0])
model_func = torchvision.models.detection.maskrcnn_resnet50_fpn
model = TraceWrapper(model_func(pretrained=True))
model.eval()
inp = torch.Tensor(np.random.uniform(0.0, 250.0, size=(1, 3, in_size, in_size)))
with torch.no_grad():
out = model(inp)
script_module = do_trace(model, inp)
######################################################################
# Download a test image and pre-process
# -------------------------------------
img_url = (
"https://raw.githubusercontent.com/dmlc/web-data/master/gluoncv/detection/street_small.jpg"
)
img_path = download_testdata(img_url, "test_street_small.jpg", module="data")
img = cv2.imread(img_path).astype("float32")
img = cv2.resize(img, (in_size, in_size))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = np.transpose(img / 255.0, [2, 0, 1])
img = np.expand_dims(img, axis=0)
######################################################################
# Import the graph to Relay
# -------------------------
input_name = "input0"
shape_list = [(input_name, input_shape)]
mod, params = relay.frontend.from_pytorch(script_module, shape_list)
######################################################################
# Compile with Relay VM
# ---------------------
# Note: Currently only CPU target is supported. For x86 target, it is
# highly recommended to build TVM with Intel MKL and Intel OpenMP to get
# best performance, due to the existence of large dense operator in
# torchvision rcnn models.
# Add "-libs=mkl" to get best performance on x86 target.
# For x86 machine supports AVX512, the complete target is
# "llvm -mcpu=skylake-avx512 -libs=mkl"
target = "llvm"
with tvm.transform.PassContext(opt_level=3, disabled_pass=["FoldScaleAxis"]):
vm_exec = relay.vm.compile(mod, target=target, params=params)
######################################################################
# Inference with Relay VM
# -----------------------
dev = tvm.cpu()
vm = VirtualMachine(vm_exec, dev)
vm.set_input("main", **{input_name: img})
tvm_res = vm.run()
######################################################################
# Get boxes with score larger than 0.9
# ------------------------------------
score_threshold = 0.9
boxes = tvm_res[0].numpy().tolist()
valid_boxes = []
for i, score in enumerate(tvm_res[1].numpy().tolist()):
if score > score_threshold:
valid_boxes.append(boxes[i])
else:
break
print("Get {} valid boxes".format(len(valid_boxes)))
| 4,953 | 31.168831 | 95 | py |
tvm | tvm-main/gallery/how_to/deploy_models/deploy_sparse.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Deploy a Hugging Face Pruned Model on CPU
=========================================
**Author**: `Josh Fromm <https://github.com/jwfromm>`_
This tutorial demonstrates how to take any pruned model, in this case `PruneBert
from Hugging Face
<https://huggingface.co/huggingface/prunebert-base-uncased-6-finepruned-w-distil-squad>`_,
and use TVM to leverage the model's sparsity support to produce real speedups. Although
the primary purpose of this tutorial is to realize speedups on already pruned
models, it may also be useful to estimate how fast a model would be *if* it were
pruned. To this end, we also provide a function that takes an unpruned model and
replaces its weights
with random and pruned weights at a specified sparsity. This may be a useful
feature when trying to decide if a model is worth pruning or not.
Before we get into the code, it's useful to discuss sparsity and pruning
and dig into the two
different types of sparsity: **structured** and **unstructured**.
Pruning is a technique primarily used to reduce the parameter size of a model
by replacing weight values with 0s. Although many methods exist for choosing which
weights should be set to 0, the most straight forward is by picking the
weights with the smallest value. Typically, weights are pruned to a desired
sparsity percentage. For example, a 95% sparse model would have only 5% of
its weights non-zero. Pruning to very high sparsities often requires
fine-tuning or full retraining as it tends to be a lossy approximation.
Although parameter size benefits are quite easy to obtain from a pruned model
through simple compression, leveraging sparsity to yield runtime speedups
is more complicated.
In structured sparsity weights are pruned with the goal of clustering
pruned weights together. In other words, they are pruned using both their
value and location. The benefit of bunching up pruned weights is that it allows
an algorithm such as matrix multiplication to skip entire blocks. It turns out
that some degree of *block sparsity* is very important to realizing significant
speedups on most hardware available today.
This is because when loading memory in most CPUs or GPUs,
it doesn't save any work to skip reading a single value at a time, instead an entire
chunk or tile is read in and executed using something like vectorized instructions.
Unstructured sparse weights are those that are pruned only on the value of
the original weights. They may appear to be scattered randomly throughout
a tensor rather than in chunks like we'd see in block sparse weights.
At low sparsities, unstructured pruning techniques are difficult to
accelerate. However, at high sparsities many blocks of all 0 values
will naturally appear, making it possible to accelerate.
This tutorial interacts with both structured and unstructured sparsity.
Hugging Face's PruneBert model is unstructured but 95% sparse, allowing us
to apply TVM's block sparse optimizations to it, even if not optimally.
When generating random sparse weights for an unpruned model, we do so with structured
sparsity. A fun exercise is comparing the real speed of PruneBert with the block
sparse speed using fake weights to see the benefit of structured sparsity.
"""
###############################################################################
# Load Required Modules
# ---------------------
# Other than TVM, scipy, the latest transformers, and
# tensorflow 2.2+ are required.
import os
import tvm
import time
import itertools
import numpy as np
import tensorflow as tf
from tvm import relay, runtime
from tvm.contrib import graph_executor
from tvm.relay import data_dep_optimization as ddo
from tensorflow.python.framework.convert_to_constants import (
convert_variables_to_constants_v2,
)
import scipy.sparse as sp
# Ask tensorflow to limit its GPU memory to what's actually needed
# instead of gobbling everything that's available.
# https://www.tensorflow.org/guide/gpu#limiting_gpu_memory_growth
# This way this tutorial is a little more friendly to sphinx-gallery.
gpus = tf.config.list_physical_devices("GPU")
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
print("tensorflow will use experimental.set_memory_growth(True)")
except RuntimeError as e:
print("experimental.set_memory_growth option is not available: {}".format(e))
###############################################################################
# Configure Settings
# ------------------
# Let's start by defining some parameters that define the type of model
# and sparsity to run.
# The name of the transformer model to download and run.
name = "huggingface/prunebert-base-uncased-6-finepruned-w-distil-squad"
# The number of batches in an input.
batch_size = 1
# The length of each input sequence.
seq_len = 128
# TVM platform identifier. Note that best cpu performance can be achieved by setting -mcpu
# appropriately for your specific machine. CUDA and ROCm are also supported.
target = "llvm"
# Which device to run on. Should be one of tvm.cpu() or tvm.cuda().
dev = tvm.cpu()
# If true, then a sparse variant of the network will be run and
# benchmarked.
measure_sparse = True
# The block size of structured sparsity to convert weight tensors
# into. Changing this parameter may yield speedups for some platforms.
bs_r = 1
# For models besides PruneBert (which is 95% sparse), this parameter
# determines how sparse the generated weights should be. The higher
# the sparsity, the faster the result.
sparsity = 0.85
###############################################################################
# Download and Convert Transformers Model
# ---------------------------------------
# Now we'll grab a model from the transformers module, download it,
# convert it into a TensorFlow graphdef in preperation for converting that graphdef into
# a relay graph that we can optimize and deploy.
def load_keras_model(module, name, seq_len, batch_size, report_runtime=True):
model = module.from_pretrained(name)
dummy_input = tf.keras.Input(shape=[seq_len], batch_size=batch_size, dtype="int32")
dummy_out = model(dummy_input) # Propagate shapes through the keras model.
if report_runtime:
np_input = np.random.uniform(size=[batch_size, seq_len], low=0, high=seq_len).astype(
"int32"
)
start = time.time()
repeats = 50
for i in range(repeats):
np_out = model(np_input)
end = time.time()
print("Keras Runtime: %f ms." % (1000 * ((end - start) / repeats)))
return model
def convert_to_graphdef(model, batch_size, seq_len):
model_func = tf.function(lambda x: model(x))
input_dict = model._saved_model_inputs_spec
input_spec = input_dict[list(input_dict.keys())[0]]
model_func = model_func.get_concrete_function(
tf.TensorSpec([batch_size, seq_len], input_spec.dtype)
)
frozen_func = convert_variables_to_constants_v2(model_func)
return frozen_func.graph.as_graph_def()
def download_model(name, batch_size, seq_len):
import transformers
module = getattr(transformers, "TFBertForSequenceClassification")
model = load_keras_model(module, name=name, batch_size=batch_size, seq_len=seq_len)
return convert_to_graphdef(model, batch_size, seq_len)
###############################################################################
# Convert to Relay Graph
# ----------------------
# We now have all the tooling to get a transformers model in the right format
# for relay conversion. Let's import it! In the following function we
# save the imported graph in relay's json format so that we dont have
# to reimport from tensorflow each time this script is run.
def import_graphdef(
name,
batch_size,
seq_len,
save_relay=True,
relay_file="model.json",
relay_params="model.params",
):
abs_path = os.path.dirname(os.path.abspath(__file__))
shape_dict = {"input_1": (batch_size, seq_len)}
relay_file = ("%s_%d_%d_%s" % (name, batch_size, seq_len, relay_file)).replace("/", "_")
relay_params = ("%s_%d_%d_%s" % (name, batch_size, seq_len, relay_params)).replace("/", "_")
if os.path.exists(os.path.join(abs_path, relay_file)) and os.path.exists(
os.path.join(abs_path, relay_params)
):
with open(os.path.join(abs_path, relay_file), "r") as fi:
mod = tvm.ir.load_json(fi.read())
with open(os.path.join(abs_path, relay_params), "rb") as fi:
params = relay.load_param_dict(fi.read())
else:
graph_def = download_model(name, batch_size, seq_len)
mod, params = relay.frontend.from_tensorflow(graph_def, shape=shape_dict)
if save_relay:
with open(os.path.join(abs_path, relay_file), "w") as fo:
fo.write(tvm.ir.save_json(mod))
with open(os.path.join(abs_path, relay_params), "wb") as fo:
fo.write(runtime.save_param_dict(params))
return mod, dict(params.items()), shape_dict
###############################################################################
# Run the Dense Graph
# -------------------
# Let's run the default version of the imported model. Note that even if
# the weights are sparse, we won't see any speedup because we are using
# regular dense matrix multiplications on these dense (but mostly zero)
# tensors instead of sparse aware kernels.
def run_relay_graph(mod, params, shape_dict, target, dev):
with relay.build_config(opt_level=3):
lib = relay.build(mod, target=target, params=params)
input_shape = shape_dict["input_1"]
dummy_data = np.random.uniform(size=input_shape, low=0, high=input_shape[1]).astype("int32")
m = graph_executor.GraphModule(lib["default"](dev))
m.set_input(0, dummy_data)
m.run()
tvm_output = m.get_output(0)
print(m.benchmark(dev, repeat=5, number=5))
return tvm_output
def run_dense(mod, params, shape_dict, target, dev):
print("Dense Model Benchmark:")
return run_relay_graph(mod, params, shape_dict, target, dev)
###############################################################################
# Run the Sparse Graph
# --------------------
# Next we'll convert the graph into a sparse representation and generate
# fake sparse weights if needed. Then we'll use the same benchmarking
# script as dense to see how much faster we go! We apply a few relay passes
# to the graph to get it leveraging sparsity. First we use
# `simplify_fc_transpose` to use transposes on the weights of dense layers
# into the parameters. This makes it easier to convert to matrix multiplies
# to sparse versions. Next we apply `bsr_dense.convert` to identify all
# weight matrices that can be sparse, and automatically replace them.
#
# The `bsr_dense.convert` call below is doing the heavy lifting of identifying
# which weights in the model can be made sparse by checking if they are
# at least `sparsity_threshold` percent sparse. If so, it converts those
# weights into *Block Compressed Row Format (BSR)*. BSR is essentially
# a representation that indexes into the nonzero chunks of the tensor,
# making it easy for an algorithm to load those non-zero chunks and ignore
# the rest of the tensor. Once the sparse weights are in BSR format,
# `relay.transform.DenseToSparse` is applied to actually replace
# `relay.dense` operations with `relay.sparse_dense` calls that can be
# run faster.
def random_bsr_matrix(M, N, BS_R, BS_C, density, dtype="float32"):
Y = np.zeros((M, N), dtype=dtype)
assert M % BS_R == 0
assert N % BS_C == 0
nnz = int(density * M * N)
num_blocks = int(nnz / (BS_R * BS_C)) + 1
candidate_blocks = np.asarray(list(itertools.product(range(0, M, BS_R), range(0, N, BS_C))))
assert candidate_blocks.shape[0] == M // BS_R * N // BS_C
chosen_blocks = candidate_blocks[
np.random.choice(candidate_blocks.shape[0], size=num_blocks, replace=False)
]
for i in range(len(chosen_blocks)):
r, c = chosen_blocks[i]
Y[r : r + BS_R, c : c + BS_C] = np.random.uniform(-0.1, 0.1, (BS_R, BS_C))
s = sp.bsr_matrix(Y, blocksize=(BS_R, BS_C))
assert s.data.shape == (num_blocks, BS_R, BS_C)
assert s.data.size >= nnz
assert s.indices.shape == (num_blocks,)
assert s.indptr.shape == (M // BS_R + 1,)
return s.todense()
def random_sparse_bert_params(func, params, density, BS_R, BS_C):
def deepcopy(param_dic):
ret = {}
for k, v in param_dic.items():
ret[k] = tvm.nd.array(v.numpy())
return ret
new_params = deepcopy(params)
dense_weight_names = relay.analysis.sparse_dense._search_dense_op_weight(func)
for item in dense_weight_names:
name = str(item)
shape = new_params[name].shape
if shape[0] % BS_R == 0 and shape[1] % BS_C == 0:
new_w = random_bsr_matrix(shape[0], shape[1], BS_R, BS_C, density)
new_params[name] = tvm.nd.array(new_w)
return new_params
def run_sparse(mod, params, shape_dict, target, dev, bs_r, sparsity, gen_weights):
mod, params = ddo.simplify_fc_transpose.convert(mod["main"], params)
if gen_weights:
params = random_sparse_bert_params(mod, params, BS_R=bs_r, BS_C=1, density=1 - sparsity)
mod, params = ddo.bsr_dense.convert(mod, params, (bs_r, 1), sparsity_threshold=0.8)
print("Block Sparse Model with {blocksize}x1 blocks:".format(blocksize=bs_r))
return run_relay_graph(mod, params, shape_dict, target, dev)
###############################################################################
# Run All the Code!
# -----------------
# And that's it! Now we'll simply call all the needed function to benchmark
# the model according to the set parameters. Note that to run this code
# you'll need to uncomment the last line first.
def benchmark():
mod, params, shape_dict = import_graphdef(name, batch_size, seq_len)
run_dense(mod, params, shape_dict, target, dev)
if measure_sparse:
gen_weights = "prune" not in name
run_sparse(mod, params, shape_dict, target, dev, bs_r, sparsity, gen_weights)
# benchmark()
###############################################################################
# Sample Output
# -------------
# For reference, below is the output of the script when run on an AMD CPU
# and shows about a 2.5X speedup from using sparsity.
# Dense Model Benchmark:
# Cannot find config for target=llvm, workload=('dense_nopack.x86', ('TENSOR', (1, 768), 'float32'), ('TENSOR', (2, 768), 'float32'), None, 'float32'). A fallback configuration is used, which may bring great performance regression.
# Cannot find config for target=llvm, workload=('dense_nopack.x86', ('TENSOR', (1, 768), 'float32'), ('TENSOR', (768, 768), 'float32'), None, 'float32'). A fallback configuration is used, which may bring great performance regression.
# Cannot find config for target=llvm, workload=('dense_nopack.x86', ('TENSOR', (128, 3072), 'float32'), ('TENSOR', (768, 3072), 'float32'), None, 'float32'). A fallback configuration is used, which may bring great performance regression.
# Cannot find config for target=llvm, workload=('dense_nopack.x86', ('TENSOR', (128, 768), 'float32'), ('TENSOR', (3072, 768), 'float32'), None, 'float32'). A fallback configuration is used, which may bring great performance regression.
# Cannot find config for target=llvm, workload=('dense_nopack.x86', ('TENSOR', (128, 768), 'float32'), ('TENSOR', (768, 768), 'float32'), None, 'float32'). A fallback configuration is used, which may bring great performance regression.
# Cannot find config for target=llvm, workload=('batch_matmul.x86', ('TENSOR', (12, 128, 128), 'float32'), ('TENSOR', (12, 64, 128), 'float32')). A fallback configuration is used, which may bring great performance regression.
# Cannot find config for target=llvm, workload=('batch_matmul.x86', ('TENSOR', (12, 128, 64), 'float32'), ('TENSOR', (12, 128, 64), 'float32')). A fallback configuration is used, which may bring great performance regression.
# Runtime: 165.26 ms (12.83 ms)
# Block Sparse Model with 1x1 blocks:
# Runtime: 67.75 ms (8.83 ms)
# Here is the output of this script on a GPU (GTX 1070) with the target "cuda -libs=cublas".
#
# Dense Model Benchmark:
# Cannot find config for target=cuda -keys=cuda,gpu -libs=cublas -max_num_threads=1024 -thread_warp_size=32, workload=('dense_cublas.cuda', ('TENSOR', (1, 768), 'float32'), ('TENSOR', (2, 768), 'float32'), None, 'float32'). A fallback configuration is used, which may bring great performance regression.
# Cannot find config for target=cuda -keys=cuda,gpu -libs=cublas -max_num_threads=1024 -thread_warp_size=32, workload=('dense_cublas.cuda', ('TENSOR', (1, 768), 'float32'), ('TENSOR', (768, 768), 'float32'), None, 'float32'). A fallback configuration is used, which may bring great performance regression.
# Cannot find config for target=cuda -keys=cuda,gpu -libs=cublas -max_num_threads=1024 -thread_warp_size=32, workload=('dense_cublas.cuda', ('TENSOR', (128, 3072), 'float32'), ('TENSOR', (768, 3072), 'float32'), None, 'float32'). A fallback configuration is used, which may bring great performance regression.
# Cannot find config for target=cuda -keys=cuda,gpu -libs=cublas -max_num_threads=1024 -thread_warp_size=32, workload=('dense_cublas.cuda', ('TENSOR', (128, 768), 'float32'), ('TENSOR', (3072, 768), 'float32'), None, 'float32'). A fallback configuration is used, which may bring great performance regression.
# Cannot find config for target=cuda -keys=cuda,gpu -libs=cublas -max_num_threads=1024 -thread_warp_size=32, workload=('dense_cublas.cuda', ('TENSOR', (128, 768), 'float32'), ('TENSOR', (768, 768), 'float32'), None, 'float32'). A fallback configuration is used, which may bring great performance regression.
# Cannot find config for target=cuda -keys=cuda,gpu -libs=cublas -max_num_threads=1024 -thread_warp_size=32, workload=('batch_matmul_cublas.cuda', ('TENSOR', (12, 128, 128), 'float32'), ('TENSOR', (12, 64, 128), 'float32'), (12, 128, 64)). A fallback configuration is used, which may bring great performance regression.
# Cannot find config for target=cuda -keys=cuda,gpu -libs=cublas -max_num_threads=1024 -thread_warp_size=32, workload=('batch_matmul_cublas.cuda', ('TENSOR', (12, 128, 64), 'float32'), ('TENSOR', (12, 128, 64), 'float32'), (12, 128, 128)). A fallback configuration is used, which may bring great performance regression.
# Runtime: 10.64 ms (0.29 ms)
# Block Sparse Model with 1x1 blocks:
# Runtime: 6.46 ms (0.05 ms)
| 19,380 | 52.244505 | 319 | py |
tvm | tvm-main/gallery/how_to/deploy_models/deploy_model_on_rasp.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
.. _tutorial-deploy-model-on-rasp:
Deploy the Pretrained Model on Raspberry Pi
===========================================
**Author**: `Ziheng Jiang <https://ziheng.org/>`_, \
`Hiroyuki Makino <https://makihiro.github.io/>`_
This is an example of using Relay to compile a ResNet model and deploy
it on Raspberry Pi.
"""
import tvm
from tvm import te
import tvm.relay as relay
from tvm import rpc
from tvm.contrib import utils, graph_executor as runtime
from tvm.contrib.download import download_testdata
######################################################################
# .. _build-tvm-runtime-on-device:
#
# Build TVM Runtime on Device
# ---------------------------
#
# The first step is to build the TVM runtime on the remote device.
#
# .. note::
#
# All instructions in both this section and next section should be
# executed on the target device, e.g. Raspberry Pi. And we assume it
# has Linux running.
#
# Since we do compilation on local machine, the remote device is only used
# for running the generated code. We only need to build tvm runtime on
# the remote device.
#
# .. code-block:: bash
#
# git clone --recursive https://github.com/apache/tvm tvm
# cd tvm
# mkdir build
# cp cmake/config.cmake build
# cd build
# cmake ..
# make runtime -j4
#
# After building runtime successfully, we need to set environment varibles
# in :code:`~/.bashrc` file. We can edit :code:`~/.bashrc`
# using :code:`vi ~/.bashrc` and add the line below (Assuming your TVM
# directory is in :code:`~/tvm`):
#
# .. code-block:: bash
#
# export PYTHONPATH=$PYTHONPATH:~/tvm/python
#
# To update the environment variables, execute :code:`source ~/.bashrc`.
######################################################################
# Set Up RPC Server on Device
# ---------------------------
# To start an RPC server, run the following command on your remote device
# (Which is Raspberry Pi in our example).
#
# .. code-block:: bash
#
# python -m tvm.exec.rpc_server --host 0.0.0.0 --port=9090
#
# If you see the line below, it means the RPC server started
# successfully on your device.
#
# .. code-block:: bash
#
# INFO:root:RPCServer: bind to 0.0.0.0:9090
#
######################################################################
# Prepare the Pre-trained Model
# -----------------------------
# Back to the host machine, which should have a full TVM installed (with LLVM).
#
# We will use pre-trained model from
# `MXNet Gluon model zoo <https://mxnet.apache.org/api/python/gluon/model_zoo.html>`_.
# You can found more details about this part at tutorial :ref:`tutorial-from-mxnet`.
from mxnet.gluon.model_zoo.vision import get_model
from PIL import Image
import numpy as np
# one line to get the model
block = get_model("resnet18_v1", pretrained=True)
######################################################################
# In order to test our model, here we download an image of cat and
# transform its format.
img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
img_name = "cat.png"
img_path = download_testdata(img_url, img_name, module="data")
image = Image.open(img_path).resize((224, 224))
def transform_image(image):
image = np.array(image) - np.array([123.0, 117.0, 104.0])
image /= np.array([58.395, 57.12, 57.375])
image = image.transpose((2, 0, 1))
image = image[np.newaxis, :]
return image
x = transform_image(image)
######################################################################
# synset is used to transform the label from number of ImageNet class to
# the word human can understand.
synset_url = "".join(
[
"https://gist.githubusercontent.com/zhreshold/",
"4d0b62f3d01426887599d4f7ede23ee5/raw/",
"596b27d23537e5a1b5751d2b0481ef172f58b539/",
"imagenet1000_clsid_to_human.txt",
]
)
synset_name = "imagenet1000_clsid_to_human.txt"
synset_path = download_testdata(synset_url, synset_name, module="data")
with open(synset_path) as f:
synset = eval(f.read())
######################################################################
# Now we would like to port the Gluon model to a portable computational graph.
# It's as easy as several lines.
# We support MXNet static graph(symbol) and HybridBlock in mxnet.gluon
shape_dict = {"data": x.shape}
mod, params = relay.frontend.from_mxnet(block, shape_dict)
# we want a probability so add a softmax operator
func = mod["main"]
func = relay.Function(func.params, relay.nn.softmax(func.body), None, func.type_params, func.attrs)
######################################################################
# Here are some basic data workload configurations.
batch_size = 1
num_classes = 1000
image_shape = (3, 224, 224)
data_shape = (batch_size,) + image_shape
######################################################################
# Compile The Graph
# -----------------
# To compile the graph, we call the :py:func:`relay.build` function
# with the graph configuration and parameters. However, You cannot to
# deploy a x86 program on a device with ARM instruction set. It means
# Relay also needs to know the compilation option of target device,
# apart from arguments :code:`net` and :code:`params` to specify the
# deep learning workload. Actually, the option matters, different option
# will lead to very different performance.
######################################################################
# If we run the example on our x86 server for demonstration, we can simply
# set it as :code:`llvm`. If running it on the Raspberry Pi, we need to
# specify its instruction set. Set :code:`local_demo` to False if you want
# to run this tutorial with a real device.
local_demo = True
if local_demo:
target = tvm.target.Target("llvm")
else:
target = tvm.target.arm_cpu("rasp3b")
# The above line is a simple form of
# target = tvm.target.Target('llvm -device=arm_cpu -model=bcm2837 -mtriple=armv7l-linux-gnueabihf -mattr=+neon')
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(func, target, params=params)
# After `relay.build`, you will get three return values: graph,
# library and the new parameter, since we do some optimization that will
# change the parameters but keep the result of model as the same.
# Save the library at local temporary directory.
tmp = utils.tempdir()
lib_fname = tmp.relpath("net.tar")
lib.export_library(lib_fname)
######################################################################
# Deploy the Model Remotely by RPC
# --------------------------------
# With RPC, you can deploy the model remotely from your host machine
# to the remote device.
# obtain an RPC session from remote device.
if local_demo:
remote = rpc.LocalSession()
else:
# The following is my environment, change this to the IP address of your target device
host = "10.77.1.162"
port = 9090
remote = rpc.connect(host, port)
# upload the library to remote device and load it
remote.upload(lib_fname)
rlib = remote.load_module("net.tar")
# create the remote runtime module
dev = remote.cpu(0)
module = runtime.GraphModule(rlib["default"](dev))
# set input data
module.set_input("data", tvm.nd.array(x.astype("float32")))
# run
module.run()
# get output
out = module.get_output(0)
# get top1 result
top1 = np.argmax(out.numpy())
print("TVM prediction top-1: {}".format(synset[top1]))
| 8,144 | 34.25974 | 116 | py |
tvm | tvm-main/gallery/how_to/deploy_models/deploy_quantized.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Deploy a Quantized Model on Cuda
================================
**Author**: `Wuwei Lin <https://github.com/vinx13>`_
This article is an introductory tutorial of automatic quantization with TVM.
Automatic quantization is one of the quantization modes in TVM. More details on
the quantization story in TVM can be found
`here <https://discuss.tvm.apache.org/t/quantization-story/3920>`_.
In this tutorial, we will import a GluonCV pre-trained model on ImageNet to
Relay, quantize the Relay model and then perform the inference.
"""
import tvm
from tvm import te
from tvm import relay
import mxnet as mx
from tvm.contrib.download import download_testdata
from mxnet import gluon
import logging
import os
batch_size = 1
model_name = "resnet18_v1"
target = "cuda"
dev = tvm.device(target)
###############################################################################
# Prepare the Dataset
# -------------------
# We will demonstrate how to prepare the calibration dataset for quantization.
# We first download the validation set of ImageNet and pre-process the dataset.
calibration_rec = download_testdata(
"http://data.mxnet.io.s3-website-us-west-1.amazonaws.com/data/val_256_q90.rec",
"val_256_q90.rec",
)
def get_val_data(num_workers=4):
mean_rgb = [123.68, 116.779, 103.939]
std_rgb = [58.393, 57.12, 57.375]
def batch_fn(batch):
return batch.data[0].asnumpy(), batch.label[0].asnumpy()
img_size = 299 if model_name == "inceptionv3" else 224
val_data = mx.io.ImageRecordIter(
path_imgrec=calibration_rec,
preprocess_threads=num_workers,
shuffle=False,
batch_size=batch_size,
resize=256,
data_shape=(3, img_size, img_size),
mean_r=mean_rgb[0],
mean_g=mean_rgb[1],
mean_b=mean_rgb[2],
std_r=std_rgb[0],
std_g=std_rgb[1],
std_b=std_rgb[2],
)
return val_data, batch_fn
###############################################################################
# The calibration dataset should be an iterable object. We define the
# calibration dataset as a generator object in Python. In this tutorial, we
# only use a few samples for calibration.
calibration_samples = 10
def calibrate_dataset():
val_data, batch_fn = get_val_data()
val_data.reset()
for i, batch in enumerate(val_data):
if i * batch_size >= calibration_samples:
break
data, _ = batch_fn(batch)
yield {"data": data}
###############################################################################
# Import the model
# ----------------
# We use the Relay MxNet frontend to import a model from the Gluon model zoo.
def get_model():
gluon_model = gluon.model_zoo.vision.get_model(model_name, pretrained=True)
img_size = 299 if model_name == "inceptionv3" else 224
data_shape = (batch_size, 3, img_size, img_size)
mod, params = relay.frontend.from_mxnet(gluon_model, {"data": data_shape})
return mod, params
###############################################################################
# Quantize the Model
# ------------------
# In quantization, we need to find the scale for each weight and intermediate
# feature map tensor of each layer.
#
# For weights, the scales are directly calculated based on the value of the
# weights. Two modes are supported: `power2` and `max`. Both modes find the
# maximum value within the weight tensor first. In `power2` mode, the maximum
# is rounded down to power of two. If the scales of both weights and
# intermediate feature maps are power of two, we can leverage bit shifting for
# multiplications. This make it computationally more efficient. In `max` mode,
# the maximum is used as the scale. Without rounding, `max` mode might have
# better accuracy in some cases. When the scales are not powers of two, fixed
# point multiplications will be used.
#
# For intermediate feature maps, we can find the scales with data-aware
# quantization. Data-aware quantization takes a calibration dataset as the
# input argument. Scales are calculated by minimizing the KL divergence between
# distribution of activation before and after quantization.
# Alternatively, we can also use pre-defined global scales. This saves the time
# for calibration. But the accuracy might be impacted.
def quantize(mod, params, data_aware):
if data_aware:
with relay.quantize.qconfig(calibrate_mode="kl_divergence", weight_scale="max"):
mod = relay.quantize.quantize(mod, params, dataset=calibrate_dataset())
else:
with relay.quantize.qconfig(calibrate_mode="global_scale", global_scale=8.0):
mod = relay.quantize.quantize(mod, params)
return mod
###############################################################################
# Run Inference
# -------------
# We create a Relay VM to build and execute the model.
def run_inference(mod):
model = relay.create_executor("vm", mod, dev, target).evaluate()
val_data, batch_fn = get_val_data()
for i, batch in enumerate(val_data):
data, label = batch_fn(batch)
prediction = model(data)
if i > 10: # only run inference on a few samples in this tutorial
break
def main():
mod, params = get_model()
mod = quantize(mod, params, data_aware=True)
run_inference(mod)
if __name__ == "__main__":
main()
| 6,145 | 35.802395 | 88 | py |
tvm | tvm-main/python/gen_requirements.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""TVM Python requirements.txt generator.
This script generates a set of requirements.txt files (stored in `./requirements`) that describe
TVM's Python dependencies.
## Pieces
TVM can be roughly broken into these named pieces along the lines of Python dependencies:
- "core": A core piece, which is intended to be buildable with very few external dependencies. Users
can use Relay, compile models, and run autotuning with this part.
- "importer-<tool>": Model importers, which convert models defined in various other tools (i.e.
TensorFlow, PyTorch, etc) into Relay models.
- Extra features (i.e. XGBoost in AutoTVM). These enhance TVM's functionality, but aren't required
for basic operation.
## What this tool does
From these pieces, this tool builds:
- requirements/<name>.txt - Python dependencies for each named piece above, `<name>` is the same as
the quoted piece name.
- requirements/all.txt - Consolidated Python dependencies for all pieces, excluding dev below.
- requirements/dev.txt - Python dependencies needed to develop TVM, such as lint and test tools.
The data representing each piece is contained in the two maps below.
"""
import argparse
import collections
import os
import re
import sys
import textwrap
import typing
RequirementsByPieceType = typing.List[typing.Tuple[str, typing.Tuple[str, typing.List[str]]]]
# Maps named TVM piece (see description above) to a list of names of Python packages. Please use
# alphabetical order for each package list, and do not add version constraints here!
REQUIREMENTS_BY_PIECE: RequirementsByPieceType = [
# Base requirements needed to install tvm.
(
"core",
(
"Base requirements needed to install tvm",
[
"attrs",
"cloudpickle",
"decorator",
"ml_dtypes",
"numpy",
"psutil",
"scipy",
"tornado",
"typing_extensions",
],
),
),
# Provide support for Arm(R) Ethos(TM)-U NPU.
(
"ethosu",
(
"Requirements for using Arm(R) Ethos(TM)-U NPU",
[
"ethos-u-vela",
],
),
),
# Relay frontends.
(
"importer-caffe",
(
"Requirements for the Caffe importer",
[
"numpy",
"protobuf",
"scikit-image",
"six",
],
),
),
(
"importer-caffe2",
(
"Requirements for the Caffe2 importer",
[
"future", # Hidden dependency of torch.
"torch",
],
),
),
("importer-coreml", ("Requirements for the CoreML importer", ["coremltools"])),
("importer-darknet", ("Requirements for the DarkNet importer", ["opencv-python"])),
(
"importer-keras",
("Requirements for the Keras importer", ["tensorflow", "tensorflow-estimator"]),
),
(
"importer-onnx",
(
"Requirements for the ONNX importer",
[
"future", # Hidden dependency of torch.
"onnx",
"onnxoptimizer",
"onnxruntime",
"torch",
"torchvision",
],
),
),
(
"importer-paddle",
("Requirements for the PaddlePaddle importer", ["paddlepaddle"]),
),
(
"importer-pytorch",
(
"Requirements for the PyTorch importer",
[
"future", # Hidden dependency of torch.
"torch",
"torchvision",
],
),
),
(
"importer-tensorflow",
("Requirements for the TensorFlow importer", ["tensorflow", "tensorflow-estimator"]),
),
(
"importer-tflite",
("Requirements for the TFLite importer", ["tensorflow", "tensorflow-estimator", "tflite"]),
),
(
"tvmc",
(
"Requirements for the tvmc command-line tool",
[
"ethos-u-vela",
"future", # Hidden dependency of torch.
"onnx",
"onnxoptimizer",
"onnxruntime",
"paddlepaddle",
"tensorflow",
"tflite",
"torch",
"torchvision",
"xgboost",
],
),
),
# Vitis AI requirements
(
"vitis-ai",
(
"Requirements for the Vitis AI codegen",
[
"h5py",
"progressbar",
],
),
),
# XGBoost, useful for autotuning on some targets.
(
"xgboost",
(
"Requirements for XGBoost autotuning",
[
"future", # Hidden dependency of torch.
"torch",
"xgboost",
],
),
),
# Development requirements
(
"dev",
(
"Requirements to develop TVM -- lint, docs, testing, etc.",
[
"astroid", # pylint requirement, listed so a hard constraint can be included.
"autodocsumm",
"black",
"commonmark",
"cpplint",
"docutils",
"image",
"matplotlib",
"pillow",
"pylint",
"sphinx",
"sphinx_autodoc_annotation",
"sphinx_gallery",
"sphinx_rtd_theme",
"types-psutil",
],
),
),
]
ConstraintsType = typing.List[typing.Tuple[str, typing.Union[None, str]]]
# Maps a named Python package (which should appear in REQUIREMENTS_BY_PIECE above) to a
# semver or pip version constraint. Semver constraints are translated into requirements.txt-friendly
# constraints.
#
# These constraints serve only to record technical reasons why a particular version can't be used.
# They are the default install_requires used in setup.py. These can be further narrowed to restrict
# dependencies to those tested or used in CI; however, that process is not done here.
#
# Policy for constraints listed here:
# 1. Each package specified in REQUIREMENTS_BY_PIECE must be included here.
# 2. If TVM will functionally break against an old version of a dependency, specify a >= relation
# here. Include a comment linking to context or explaining why the constraint is in place.
CONSTRAINTS = [
("astroid", None),
("attrs", None),
("autodocsumm", None),
("black", "==20.8b1"),
("cloudpickle", None),
("commonmark", ">=0.7.3"), # From PR #213.
("coremltools", None),
("cpplint", None),
("decorator", None),
(
"docutils",
"<0.17",
), # Work around https://github.com/readthedocs/sphinx_rtd_theme/issues/1115
("ethos-u-vela", "==3.8.0"),
("future", None),
("h5py", "==2.10.0"),
("image", None),
("matplotlib", None),
("numpy", None),
("onnx", None),
("onnxoptimizer", None),
("onnxruntime", None),
("opencv-python", None),
("paddlepaddle", None),
("pillow", None),
("progressbar", None),
("protobuf", None),
("psutil", None),
("pylint", None),
("scikit-image", None),
("scipy", None),
("six", None),
("sphinx", None),
("sphinx_autodoc_annotation", None),
("sphinx_gallery", None),
("sphinx_rtd_theme", None),
("tensorflow", None),
("tensorflow-estimator", None),
("tflite", None),
("torch", None),
("torchvision", None),
("tornado", None),
("typing_extensions", None),
("xgboost", ">=1.1.0"), # From PR #4953 & Issue #12009
]
################################################################################
# End of configuration options.
################################################################################
# Required keys in REQUIREMENTS_BY_PIECE.
REQUIRED_PIECES: typing.List[str] = ["core", "dev"]
# Regex to validates piece names.
PIECE_REGEX: typing.Pattern = re.compile(r"^[a-z0-9][a-z0-9-]*", re.IGNORECASE)
# Regex to match a constraint specification. Multiple constraints are not supported.
CONSTRAINT_REGEX: typing.Pattern = re.compile(r"(?:\^|\<|(?:~=)|(?:<=)|(?:==)|(?:>=)|\>)[^<>=\^,]+")
# Regex for parsing semantic versions. See
# https://semver.org/#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string
SEMVER_REGEX: typing.Pattern = re.compile(
r"^(?P<major>0|[1-9]\d*)\.(?P<minor>0|[1-9]\d*)\.(?P<patch>0|[1-9]\d*)(?:-(?P<prerelease>(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+(?P<buildmetadata>[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$"
)
def validate_requirements_by_piece() -> typing.List[str]:
"""Validate REQUIREMENTS_BY_PIECE, returning a list of problems.
Returns
-------
list[str] :
A list of strings, each one describing a distinct problem with REQUIREMENTS_BY_PIECE.
"""
problems = []
unseen_required_pieces = set(REQUIRED_PIECES)
seen_pieces = set()
# Ensure that core is listed first and dev is listed last.
saw_core = False
saw_dev = False
if not isinstance(REQUIREMENTS_BY_PIECE, (list, tuple)):
problems.append(f"must be list or tuple, see {REQUIREMENTS_BY_PIECE!r}")
return problems
for piece, value in REQUIREMENTS_BY_PIECE:
if not isinstance(piece, str):
problems.append(f"piece {piece!r}: must be str")
continue
if piece in unseen_required_pieces:
unseen_required_pieces.remove(piece)
piece_lower = piece.lower()
if piece_lower in seen_pieces:
problems.append(f"piece {piece}: listed twice")
seen_pieces.add(piece_lower)
if not saw_core and piece != "core":
problems.append(f'piece {piece}: must list after "core" (core must be first)')
elif piece == "core":
saw_core = True
if saw_dev:
problems.append(f'piece {piece}: must list before "dev" (dev must be last)')
elif piece == "dev":
saw_dev = True
if not isinstance(value, (tuple, list)) or len(value) != 2:
problems.append(
f'piece {piece}: should be formatted like ("{piece}", ("<requirements.txt comment>", ["dep1", "dep2", ...])). got: {value!r}'
)
continue
description, deps = value
if not isinstance(description, str):
problems.append(f"piece {piece}: description should be a string, got {description!r}")
if not isinstance(deps, (list, tuple)) or any(not isinstance(d, str) for d in deps):
problems.append(f"piece {piece}: deps should be a list of strings, got {deps!r}")
continue
if list(sorted(deps)) != list(deps):
problems.append(
f"piece {piece}: deps must be sorted. Correct order:\n {list(sorted(deps))!r}"
)
piece_deps = set()
for d in deps:
if CONSTRAINT_REGEX.search(d):
problems.append(
f"piece {piece}: dependency {d} should not specify a version. "
"Add it to CONSTRAINTS instead."
)
if d.lower() in piece_deps:
problems.append(f"piece {piece}: dependency {d} listed twice")
piece_deps.add(d.lower())
extras_pieces = [
k for (k, _) in REQUIREMENTS_BY_PIECE if k not in ("dev", "core") if isinstance(k, str)
]
sorted_extras_pieces = list(sorted(extras_pieces))
if sorted_extras_pieces != list(extras_pieces):
problems.append(
'pieces other than "core" and "dev" must appear in alphabetical order: '
f"{sorted_extras_pieces}"
)
return problems
def parse_semver(
package: str, constraint: str, problems: typing.List[str]
) -> typing.Tuple[typing.List[str], int, int]:
"""Parse a semantic versioning constraint of the form "^X.[.Y[.Z[...]]]]"
Parameters
----------
package : str
Name of the package specifying this constraint, for reporting problems.
constraint : str
The semver constraint. Must start with "^"
problems : List[str]
A list of strings describing problems that have occurred validating the configuration.
Problems encountered while validating constraint are appended to this list.
Returns
-------
tuple[list[str], int, int] :
A 3-tuple. The first element is a list containing an entry for each component in the
semver string (components separated by "."). The second element is the index of the
component in the list which must not change to meet the semver constraint. The third element
is an integer, the numeric value of the changing component (this can be non-trivial when
the patch is the changing part but pre-, post-release, or build metadta.
See "Caret requirements" at https://python-poetry.org/docs/versions/.
"""
m = SEMVER_REGEX.match(constraint[1:])
if not m:
problems.append(f"{package}: invalid semver constraint {constraint}")
return [], 0, 0
min_ver_parts = [
m.group("major"),
m.group("minor"),
m.group("patch")
+ (f"-{m.group('prerelease')}" if m.group("prerelease") else "")
+ (f"+{m.group('buildmetadata')}" if m.group("buildmetadata") else ""),
]
# Major/minor version handling is simple
for i, p in enumerate(min_ver_parts[:2]):
x = int(p.strip())
if x:
return min_ver_parts, i, x
# For patch version, consult only the numeric patch
if m.group("patch"):
patch_int = int(m.group("patch"))
if patch_int or min_ver_parts[2] != m.group("patch"):
return min_ver_parts, 2, patch_int
# All 0's
return min_ver_parts, 0, 0
def validate_constraints() -> typing.List[str]:
"""Validate CONSTRAINTS, returning a list of problems found.
Returns
-------
list[str] :
A list of strings, each one describing a distinct problem found in CONSTRAINTS.
"""
problems = []
if not isinstance(CONSTRAINTS, (list, tuple)):
problems.append(f"must be list or tuple, see: {CONSTRAINTS!r}")
seen_packages = set()
all_deps = set()
for _, (_, deps) in REQUIREMENTS_BY_PIECE:
for d in deps:
all_deps.add(d.lower())
for package, constraint in CONSTRAINTS:
if package in seen_packages:
problems.append(f"{package}: specified twice")
seen_packages.add(package)
if package.lower() not in all_deps:
problems.append(f"{package}: not specified in REQUIREMENTS_BY_PIECE")
if constraint is None: # None is just a placeholder that allows for comments.
continue
if not CONSTRAINT_REGEX.match(constraint):
problems.append(
f'{package}: constraint "{constraint}" does not look like a valid constraint'
)
if constraint.startswith("^"):
parse_semver(package, constraint, problems)
all_constrained_packages = [p for (p, _) in CONSTRAINTS]
sorted_constrained_packages = list(sorted(all_constrained_packages))
if sorted_constrained_packages != all_constrained_packages:
problems.append(
"CONSTRAINTS entries should be in this sorted order: " f"{sorted_constrained_packages}"
)
return problems
class ValidationError(Exception):
"""Raised when a validation error occurs."""
@staticmethod
def format_problems(config: str, problems: typing.List[str]) -> str:
"""Format a list of problems with a global config variable into human-readable output.
Parameters
----------
config : str
Name of the global configuration variable of concern. Prepended to the output.
problems: list[str]
A list of strings, each one a distinct problem with that config variable.
Returns
-------
str :
A human-readable string suitable for console, listing the problems as bullet points.
"""
formatted = []
for p in problems:
assert isinstance(p, str), f"problems element not a str: {p}"
formatted.append(
"\n".join(
textwrap.wrap(
f"{config}: {p}", width=80, initial_indent=" * ", subsequent_indent=" "
)
)
)
return "\n".join(formatted)
def __init__(self, config: str, problems: typing.List[str]):
"""Describes an error that occurs validating one of the global config variables.
Parameters
----------
config : str
Name of the global configuration variable of concern. Prepended to the output.
problems: list[str]
A list of strings, each one a distinct problem with that config variable.
"""
super(ValidationError, self).__init__(self.format_problems(config, problems))
self.problems = problems
def validate_or_raise():
problems = validate_requirements_by_piece()
if problems:
raise ValidationError("REQUIREMENTS_BY_PIECE", problems)
problems = validate_constraints()
if problems:
raise ValidationError("CONSTRAINTS", problems)
def semver_to_requirements(dep: str, constraint: str, joined_deps: typing.List[str]):
"""Convert a SemVer-style constraint to a setuptools-compatible constraint.
Parameters
----------
dep : str
Name of the PyPI package to depend on.
constraint : str
The SemVer constraint, of the form "^<semver constraint>"
joined_deps : list[str]
A list of strings, each a setuptools-compatible constraint which could be written to
a line in requirements.txt. The converted constraint is appended to this list.
"""
problems: typing.List[str] = []
min_ver_parts, fixed_index, fixed_part = parse_semver(dep, constraint, problems)
text_problems = "\n" + "\n".join(f" * {p}" for p in problems)
assert (
not problems
), f"should not happen: validated semver {constraint} parses with problems:{text_problems}"
max_ver_parts = (
min_ver_parts[:fixed_index]
+ [str(fixed_part + 1)]
+ ["0" for _ in min_ver_parts[fixed_index + 1 :]]
)
joined_deps.append(f'{dep}>={".".join(min_ver_parts)},<{".".join(max_ver_parts)}')
def join_requirements() -> typing.Dict[str, typing.Tuple[str, typing.List[str]]]:
"""Validate, then join REQUIRMENTS_BY_PIECE against CONSTRAINTS and return the result.
Returns
-------
An OrderedDict containing REQUIREMENTS_BY_PIECE, except any dependency mentioned in CONSTRAINTS
is replaced by a setuptools-compatible constraint.
"""
validate_or_raise()
constraints_map = collections.OrderedDict([(p.lower(), c) for (p, c) in CONSTRAINTS])
to_return = collections.OrderedDict()
all_deps = set()
for piece, (description, deps) in REQUIREMENTS_BY_PIECE:
joined_deps = []
for d in deps:
constraint = constraints_map.get(d.lower())
if constraint is None:
joined_deps.append(d)
continue
if constraint[0] == "^":
semver_to_requirements(d, constraint, joined_deps)
else:
joined_deps.append(f"{d}{constraint}")
if piece != "dev":
all_deps.update(joined_deps)
to_return[piece] = (description, joined_deps)
to_return["all-prod"] = (
"Combined dependencies for all TVM pieces, excluding dev",
list(sorted(all_deps)),
)
return to_return
def join_and_write_requirements(args: argparse.Namespace):
try:
joined_deps = join_requirements()
except ValidationError as e:
print(f"ERROR: invalid requirements configuration in {__file__}:", file=sys.stderr)
print(str(e), file=sys.stderr)
sys.exit(2)
if args.lint:
sys.exit(0)
output_dir = os.path.join(os.path.dirname(__file__), "requirements")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
elif not os.path.isdir(output_dir):
print(
f"ERROR: output directory {output_dir} exists but is not a dir. Delete it",
file=sys.stderr,
)
sys.exit(2)
for piece, (description, deps) in joined_deps.items():
with open(os.path.join(output_dir, f"{piece}.txt"), "w") as f:
f.write(
f"# AUTOGENERATED by python/gen_requirements.py{os.linesep}"
f"#{os.linesep}"
f"# {description}{os.linesep}"
)
for d in deps:
f.write(f"{d}{os.linesep}")
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument(
"--lint", action="store_true", help="Just lint dependencies, don't generate anything"
)
return parser.parse_args()
def main():
args = parse_args()
join_and_write_requirements(args)
if __name__ == "__main__":
main()
| 22,224 | 32.221226 | 244 | py |
tvm | tvm-main/python/tvm/testing/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unnecessary-comprehension
"""TVM testing utilities
Organization
************
This file contains functions expected to be called directly by a user
while writing unit tests. Integrations with the pytest framework
are in plugin.py.
Testing Markers
***************
We use pytest markers to specify the requirements of test functions. Currently
there is a single distinction that matters for our testing environment: does
the test require a gpu. For tests that require just a gpu or just a cpu, we
have the decorator :py:func:`requires_gpu` that enables the test when a gpu is
available. To avoid running tests that don't require a gpu on gpu nodes, this
decorator also sets the pytest marker `gpu` so we can use select the gpu subset
of tests (using `pytest -m gpu`).
Unfortunately, many tests are written like this:
.. python::
def test_something():
for target in all_targets():
do_something()
The test uses both gpu and cpu targets, so the test needs to be run on both cpu
and gpu nodes. But we still want to only run the cpu targets on the cpu testing
node. The solution is to mark these tests with the gpu marker so they will be
run on the gpu nodes. But we also modify all_targets (renamed to
enabled_targets) so that it only returns gpu targets on gpu nodes and cpu
targets on cpu nodes (using an environment variable).
Instead of using the all_targets function, future tests that would like to
test against a variety of targets should use the
:py:func:`tvm.testing.parametrize_targets` functionality. This allows us
greater control over which targets are run on which testing nodes.
If in the future we want to add a new type of testing node (for example
fpgas), we need to add a new marker in `tests/python/pytest.ini` and a new
function in this module. Then targets using this node should be added to the
`TVM_TEST_TARGETS` environment variable in the CI.
"""
import inspect
import copy
import copyreg
import ctypes
import functools
import hashlib
import itertools
import logging
import os
import pickle
import platform
import sys
import textwrap
import time
import shutil
import subprocess
from pathlib import Path
from typing import Optional, Callable, Union, List, Tuple
import pytest
import numpy as np
import tvm
import tvm.arith
import tvm.tir
import tvm.te
import tvm._ffi
from tvm.contrib import nvcc, cudnn, rocm
import tvm.contrib.hexagon._ci_env_check as hexagon
from tvm.driver.tvmc.frontends import load_model
from tvm.error import TVMError
SKIP_SLOW_TESTS = os.getenv("SKIP_SLOW_TESTS", "").lower() in {"true", "1", "yes"}
IS_IN_CI = os.getenv("CI", "") == "true"
skip_if_wheel_test = pytest.mark.skipif(
os.getenv("WHEEL_TEST", "").lower() in {"true", "1", "yes"},
reason="Test not supported in wheel.",
)
def assert_allclose(actual, desired, rtol=1e-7, atol=1e-7):
"""Version of np.testing.assert_allclose with `atol` and `rtol` fields set
in reasonable defaults.
Arguments `actual` and `desired` are not interchangeable, since the function
compares the `abs(actual-desired)` with `atol+rtol*abs(desired)`. Since we
often allow `desired` to be close to zero, we generally want non-zero `atol`.
"""
actual = np.asanyarray(actual)
desired = np.asanyarray(desired)
np.testing.assert_allclose(actual.shape, desired.shape)
np.testing.assert_allclose(actual, desired, rtol=rtol, atol=atol, verbose=True)
def check_numerical_grads(
function, input_values, grad_values, function_value=None, delta=1e-3, atol=1e-2, rtol=0.1
):
"""A helper function that checks that numerical gradients of a function are
equal to gradients computed in some different way (analytical gradients).
Numerical gradients are computed using finite difference approximation. To
reduce the number of function evaluations, the number of points used is
gradually increased if the error value is too high (up to 5 points).
Parameters
----------
function
A function that takes inputs either as positional or as keyword
arguments (either `function(*input_values)` or `function(**input_values)`
should be correct) and returns a scalar result. Should accept numpy
ndarrays.
input_values : Dict[str, numpy.ndarray] or List[numpy.ndarray]
A list of values or a dict assigning values to variables. Represents the
point at which gradients should be computed.
grad_values : Dict[str, numpy.ndarray] or List[numpy.ndarray]
Gradients computed using a different method.
function_value : float, optional
Should be equal to `function(**input_values)`.
delta : float, optional
A small number used for numerical computation of partial derivatives.
The default 1e-3 is a good choice for float32.
atol : float, optional
Absolute tolerance. Gets multiplied by `sqrt(n)` where n is the size of a
gradient.
rtol : float, optional
Relative tolerance.
"""
# If input_values is a list then function accepts positional arguments
# In this case transform it to a function taking kwargs of the form {"0": ..., "1": ...}
if not isinstance(input_values, dict):
input_len = len(input_values)
input_values = {str(idx): val for idx, val in enumerate(input_values)}
def _function(_input_len=input_len, _orig_function=function, **kwargs):
return _orig_function(*(kwargs[str(i)] for i in range(input_len)))
function = _function
grad_values = {str(idx): val for idx, val in enumerate(grad_values)}
if function_value is None:
function_value = function(**input_values)
# a helper to modify j-th element of val by a_delta
def modify(val, j, a_delta):
val = val.copy()
val.reshape(-1)[j] = val.reshape(-1)[j] + a_delta
return val
# numerically compute a partial derivative with respect to j-th element of the var `name`
def derivative(x_name, j, a_delta):
modified_values = {
n: modify(val, j, a_delta) if n == x_name else val for n, val in input_values.items()
}
return (function(**modified_values) - function_value) / a_delta
def compare_derivative(j, n_der, grad):
der = grad.reshape(-1)[j]
return np.abs(n_der - der) < atol + rtol * np.abs(n_der)
for x_name, grad in grad_values.items():
if grad.shape != input_values[x_name].shape:
raise AssertionError(
"Gradient wrt '{}' has unexpected shape {}, expected {} ".format(
x_name, grad.shape, input_values[x_name].shape
)
)
ngrad = np.zeros_like(grad)
wrong_positions = []
# compute partial derivatives for each position in this variable
for j in range(np.prod(grad.shape)):
# forward difference approximation
nder = derivative(x_name, j, delta)
# if the derivative is not equal to the analytical one, try to use more
# precise and expensive methods
if not compare_derivative(j, nder, grad):
# central difference approximation
nder = (derivative(x_name, j, -delta) + nder) / 2
if not compare_derivative(j, nder, grad):
# central difference approximation using h = delta/2
cnder2 = (
derivative(x_name, j, delta / 2) + derivative(x_name, j, -delta / 2)
) / 2
# five-point derivative
nder = (4 * cnder2 - nder) / 3
# if the derivatives still don't match, add this position to the
# list of wrong positions
if not compare_derivative(j, nder, grad):
wrong_positions.append(np.unravel_index(j, grad.shape))
ngrad.reshape(-1)[j] = nder
wrong_percentage = int(100 * len(wrong_positions) / np.prod(grad.shape))
dist = np.sqrt(np.sum((ngrad - grad) ** 2))
grad_norm = np.sqrt(np.sum(ngrad**2))
if not (np.isfinite(dist) and np.isfinite(grad_norm)):
raise ValueError(
"NaN or infinity detected during numerical gradient checking wrt '{}'\n"
"analytical grad = {}\n numerical grad = {}\n".format(x_name, grad, ngrad)
)
# we multiply atol by this number to make it more universal for different sizes
sqrt_n = np.sqrt(float(np.prod(grad.shape)))
if dist > atol * sqrt_n + rtol * grad_norm:
raise AssertionError(
"Analytical and numerical grads wrt '{}' differ too much\n"
"analytical grad = {}\n numerical grad = {}\n"
"{}% of elements differ, first 10 of wrong positions: {}\n"
"distance > atol*sqrt(n) + rtol*grad_norm\n"
"distance {} > {}*{} + {}*{}".format(
x_name,
grad,
ngrad,
wrong_percentage,
wrong_positions[:10],
dist,
atol,
sqrt_n,
rtol,
grad_norm,
)
)
max_diff = np.max(np.abs(ngrad - grad))
avg_diff = np.mean(np.abs(ngrad - grad))
logging.info(
"Numerical grad test wrt '%s' of shape %s passes, "
"dist = %f, max_diff = %f, avg_diff = %f",
x_name,
grad.shape,
dist,
max_diff,
avg_diff,
)
def assert_prim_expr_equal(lhs, rhs):
"""Assert lhs and rhs equals to each iother.
Parameters
----------
lhs : tvm.tir.PrimExpr
The left operand.
rhs : tvm.tir.PrimExpr
The left operand.
"""
ana = tvm.arith.Analyzer()
if not ana.can_prove_equal(lhs, rhs):
raise ValueError("{} and {} are not equal".format(lhs, rhs))
def check_bool_expr_is_true(bool_expr, vranges, cond=None):
"""Check that bool_expr holds given the condition cond
for every value of free variables from vranges.
for example, 2x > 4y solves to x > 2y given x in (0, 10) and y in (0, 10)
here bool_expr is x > 2y, vranges is {x: (0, 10), y: (0, 10)}, cond is 2x > 4y
We creates iterations to check,
for x in range(10):
for y in range(10):
assert !(2x > 4y) || (x > 2y)
Parameters
----------
bool_expr : tvm.ir.PrimExpr
Boolean expression to check
vranges: Dict[tvm.tir.expr.Var, tvm.ir.Range]
Free variables and their ranges
cond: tvm.ir.PrimExpr
extra conditions needs to be satisfied.
"""
if cond is not None:
bool_expr = tvm.te.any(tvm.tir.Not(cond), bool_expr)
def _run_expr(expr, vranges):
"""Evaluate expr for every value of free variables
given by vranges and return the tensor of results.
"""
def _compute_body(*us):
vmap = {v: u + r.min for (v, r), u in zip(vranges.items(), us)}
return tvm.tir.stmt_functor.substitute(expr, vmap)
A = tvm.te.compute([r.extent.value for v, r in vranges.items()], _compute_body)
args = [tvm.nd.empty(A.shape, A.dtype)]
sch = tvm.te.create_schedule(A.op)
mod = tvm.build(sch, [A])
mod(*args)
return args[0].numpy()
res = _run_expr(bool_expr, vranges)
if not np.all(res):
indices = list(np.argwhere(res == 0)[0])
counterex = [(str(v), i + r.min) for (v, r), i in zip(vranges.items(), indices)]
counterex = sorted(counterex, key=lambda x: x[0])
counterex = ", ".join([v + " = " + str(i) for v, i in counterex])
ana = tvm.arith.Analyzer()
raise AssertionError(
"Expression {}\nis not true on {}\n"
"Counterexample: {}".format(ana.simplify(bool_expr), vranges, counterex)
)
def check_int_constraints_trans_consistency(constraints_trans, vranges=None):
"""Check IntConstraintsTransform is a bijective transformation.
Parameters
----------
constraints_trans : arith.IntConstraintsTransform
Integer constraints transformation
vranges: Dict[tvm.tir.Var, tvm.ir.Range]
Free variables and their ranges
"""
if vranges is None:
vranges = {}
def _check_forward(constraints1, constraints2, varmap, backvarmap):
ana = tvm.arith.Analyzer()
all_vranges = vranges.copy()
all_vranges.update({v: r for v, r in constraints1.ranges.items()})
# Check that the transformation is injective
cond_on_vars = tvm.tir.const(1, "bool")
for v in constraints1.variables:
if v in varmap:
# variable mapping is consistent
v_back = ana.simplify(tvm.tir.stmt_functor.substitute(varmap[v], backvarmap))
cond_on_vars = tvm.te.all(cond_on_vars, v == v_back)
# Also we have to check that the new relations are true when old relations are true
cond_subst = tvm.tir.stmt_functor.substitute(
tvm.te.all(tvm.tir.const(1, "bool"), *constraints2.relations), backvarmap
)
# We have to include relations from vranges too
for v in constraints2.variables:
if v in constraints2.ranges:
r = constraints2.ranges[v]
range_cond = tvm.te.all(v >= r.min, v < r.min + r.extent)
range_cond = tvm.tir.stmt_functor.substitute(range_cond, backvarmap)
cond_subst = tvm.te.all(cond_subst, range_cond)
cond_subst = ana.simplify(cond_subst)
check_bool_expr_is_true(
tvm.te.all(cond_subst, cond_on_vars),
all_vranges,
cond=tvm.te.all(tvm.tir.const(1, "bool"), *constraints1.relations),
)
_check_forward(
constraints_trans.src,
constraints_trans.dst,
constraints_trans.src_to_dst,
constraints_trans.dst_to_src,
)
_check_forward(
constraints_trans.dst,
constraints_trans.src,
constraints_trans.dst_to_src,
constraints_trans.src_to_dst,
)
def _get_targets(target_names=None):
if target_names is None:
target_names = _tvm_test_targets()
if not target_names:
target_names = DEFAULT_TEST_TARGETS
targets = []
for target in target_names:
target_kind = target.split()[0]
if target_kind == "cuda" and "cudnn" in tvm.target.Target(target).attrs.get("libs", []):
is_enabled = tvm.support.libinfo()["USE_CUDNN"].lower() in ["on", "true", "1"]
is_runnable = is_enabled and cudnn.exists()
elif target_kind == "hexagon":
is_enabled = tvm.support.libinfo()["USE_HEXAGON"].lower() in ["on", "true", "1"]
# If Hexagon has compile-time support, we can always fall back
is_runnable = is_enabled and "ANDROID_SERIAL_NUMBER" in os.environ
else:
is_enabled = tvm.runtime.enabled(target_kind)
is_runnable = is_enabled and tvm.device(target_kind).exist
targets.append(
{
"target": target,
"target_kind": target_kind,
"is_enabled": is_enabled,
"is_runnable": is_runnable,
}
)
if all(not t["is_runnable"] for t in targets):
if tvm.runtime.enabled("llvm"):
logging.warning(
"None of the following targets are supported by this build of TVM: %s."
" Try setting TVM_TEST_TARGETS to a supported target. Defaulting to llvm.",
target_names,
)
return _get_targets(["llvm"])
raise TVMError(
"None of the following targets are supported by this build of TVM: %s."
" Try setting TVM_TEST_TARGETS to a supported target."
" Cannot default to llvm, as it is not enabled." % target_names
)
return targets
DEFAULT_TEST_TARGETS = [
"llvm",
"cuda",
"nvptx",
"vulkan -from_device=0",
"opencl",
"opencl -device=mali,aocl_sw_emu",
"opencl -device=intel_graphics",
"metal",
"rocm",
"hexagon",
]
def device_enabled(target):
"""Check if a target should be used when testing.
It is recommended that you use :py:func:`tvm.testing.parametrize_targets`
instead of manually checking if a target is enabled.
This allows the user to control which devices they are testing against. In
tests, this should be used to check if a device should be used when said
device is an optional part of the test.
Parameters
----------
target : str
Target string to check against
Returns
-------
bool
Whether or not the device associated with this target is enabled.
Example
-------
>>> @tvm.testing.uses_gpu
>>> def test_mytest():
>>> for target in ["cuda", "llvm"]:
>>> if device_enabled(target):
>>> test_body...
Here, `test_body` will only be reached by with `target="cuda"` on gpu test
nodes and `target="llvm"` on cpu test nodes.
"""
assert isinstance(target, str), "device_enabled requires a target as a string"
# only check if device name is found, sometime there are extra flags
target_kind = target.split(" ")[0]
return any(target_kind == t["target_kind"] for t in _get_targets() if t["is_runnable"])
def enabled_targets():
"""Get all enabled targets with associated devices.
In most cases, you should use :py:func:`tvm.testing.parametrize_targets` instead of
this function.
In this context, enabled means that TVM was built with support for
this target, the target name appears in the TVM_TEST_TARGETS
environment variable, and a suitable device for running this
target exists. If TVM_TEST_TARGETS is not set, it defaults to
variable DEFAULT_TEST_TARGETS in this module.
If you use this function in a test, you **must** decorate the test with
:py:func:`tvm.testing.uses_gpu` (otherwise it will never be run on the gpu).
Returns
-------
targets: list
A list of pairs of all enabled devices and the associated context
"""
return [(t["target"], tvm.device(t["target"])) for t in _get_targets() if t["is_runnable"]]
class Feature:
"""A feature that may be required to run a test.
Parameters
----------
name: str
The short name of the feature. Should match the name in the
requires_* decorator. This is applied as a mark to all tests
using this feature, and can be used in pytests ``-m``
argument.
long_name: Optional[str]
The long name of the feature, to be used in error messages.
If None, defaults to the short name.
cmake_flag: Optional[str]
The flag that must be enabled in the config.cmake in order to
use this feature.
If None, no flag is required to use this feature.
target_kind_enabled: Optional[str]
The target kind that must be enabled to run tests using this
feature. If present, the target_kind must appear in the
TVM_TEST_TARGETS environment variable, or in
tvm.testing.DEFAULT_TEST_TARGETS if TVM_TEST_TARGETS is
undefined.
If None, this feature does not require a specific target to be
enabled.
compile_time_check: Optional[Callable[[], Union[bool,str]]]
A check that returns True if the feature can be used at
compile-time. (e.g. Validating the version number of the nvcc
compiler.) If the feature does not have support to perform
compile-time tests, the check should returns False to display
a generic error message, or a string to display a more
specific error message.
If None, no additional check is performed.
target_kind_hardware: Optional[str]
The target kind that must have available hardware in order to
run tests using this feature. This is checked using
tvm.device(target_kind_hardware).exist. If a feature requires
a different check, this should be implemented using
run_time_check.
If None, this feature does not require a specific
tvm.device to exist.
run_time_check: Optional[Callable[[], Union[bool,str]]]
A check that returns True if the feature can be used at
run-time. (e.g. Validating the compute version supported by a
GPU.) If the feature does not have support to perform
run-time tests, the check should returns False to display a
generic error message, or a string to display a more specific
error message.
If None, no additional check is performed.
parent_features: Optional[Union[str,List[str]]]
The short name of a feature or features that are required in
order to use this feature. (e.g. Using cuDNN requires using
CUDA) This feature should inherit all checks of the parent
feature, with the exception of the `target_kind_enabled`
checks.
If None, this feature does not require any other parent
features.
"""
_all_features = {}
def __init__(
self,
name: str,
long_name: Optional[str] = None,
cmake_flag: Optional[str] = None,
target_kind_enabled: Optional[str] = None,
compile_time_check: Optional[Callable[[], Union[bool, str]]] = None,
target_kind_hardware: Optional[str] = None,
run_time_check: Optional[Callable[[], Union[bool, str]]] = None,
parent_features: Optional[Union[str, List[str]]] = None,
):
self.name = name
self.long_name = long_name or name
self.cmake_flag = cmake_flag
self.target_kind_enabled = target_kind_enabled
self.compile_time_check = compile_time_check
self.target_kind_hardware = target_kind_hardware
self.run_time_check = run_time_check
if parent_features is None:
self.parent_features = []
elif isinstance(parent_features, str):
self.parent_features = [parent_features]
else:
self.parent_features = parent_features
self._all_features[self.name] = self
def _register_marker(self, config):
config.addinivalue_line("markers", f"{self.name}: Mark a test as using {self.long_name}")
def _uses_marks(self):
for parent in self.parent_features:
yield from self._all_features[parent]._uses_marks()
yield getattr(pytest.mark, self.name)
def _compile_only_marks(self):
for parent in self.parent_features:
yield from self._all_features[parent]._compile_only_marks()
if self.compile_time_check is not None:
res = self.compile_time_check()
if isinstance(res, str):
yield pytest.mark.skipif(True, reason=res)
else:
yield pytest.mark.skipif(
not res, reason=f"Compile-time support for {self.long_name} not present"
)
if self.target_kind_enabled is not None:
target_kind = self.target_kind_enabled.split()[0]
yield pytest.mark.skipif(
all(enabled.split()[0] != target_kind for enabled in _tvm_test_targets()),
reason=(
f"{self.target_kind_enabled} tests disabled "
f"by TVM_TEST_TARGETS environment variable"
),
)
if self.cmake_flag is not None:
yield pytest.mark.skipif(
not _cmake_flag_enabled(self.cmake_flag),
reason=(
f"{self.long_name} support not enabled. "
f"Set {self.cmake_flag} in config.cmake to enable."
),
)
def _run_only_marks(self):
for parent in self.parent_features:
yield from self._all_features[parent]._run_only_marks()
if self.run_time_check is not None:
res = self.run_time_check()
if isinstance(res, str):
yield pytest.mark.skipif(True, reason=res)
else:
yield pytest.mark.skipif(
not res, reason=f"Run-time support for {self.long_name} not present"
)
if self.target_kind_hardware is not None:
yield pytest.mark.skipif(
not tvm.device(self.target_kind_hardware).exist,
reason=f"No device exists for target {self.target_kind_hardware}",
)
def marks(self, support_required="compile-and-run"):
"""Return a list of marks to be used
Parameters
----------
support_required: str
Allowed values: "compile-and-run" (default),
"compile-only", or "optional".
See Feature.__call__ for details.
"""
if support_required not in ["compile-and-run", "compile-only", "optional"]:
raise ValueError(f"Unknown feature support type: {support_required}")
if support_required == "compile-and-run":
marks = itertools.chain(
self._run_only_marks(), self._compile_only_marks(), self._uses_marks()
)
elif support_required == "compile-only":
marks = itertools.chain(self._compile_only_marks(), self._uses_marks())
elif support_required == "optional":
marks = self._uses_marks()
else:
raise ValueError(f"Unknown feature support type: {support_required}")
return list(marks)
def __call__(self, func=None, *, support_required="compile-and-run"):
"""Mark a pytest function as requiring this feature
Can be used either as a bare decorator, or as a decorator with
arguments.
Parameters
----------
func: Callable
The pytest test function to be marked
support_required: str
Allowed values: "compile-and-run" (default),
"compile-only", or "optional".
If "compile-and-run", the test case is marked as using the
feature, and is skipped if the environment lacks either
compile-time or run-time support for the feature.
If "compile-only", the test case is marked as using the
feature, and is skipped if the environment lacks
compile-time support.
If "optional", the test case is marked as using the
feature, but isn't skipped. This is kept for backwards
compatibility for tests that use `enabled_targets()`, and
should be avoided in new test code. Instead, prefer
parametrizing over the target using the `target` fixture.
Examples
--------
.. code-block:: python
@feature
def test_compile_and_run():
...
@feature(compile_only=True)
def test_compile_only():
...
"""
if support_required not in ["compile-and-run", "compile-only", "optional"]:
raise ValueError(f"Unknown feature support type: {support_required}")
def wrapper(func):
for mark in self.marks(support_required=support_required):
func = mark(func)
return func
if func is None:
return wrapper
return wrapper(func)
@classmethod
def require(cls, name, support_required="compile-and-run"):
"""Returns a decorator that marks a test as requiring a feature
Parameters
----------
name: str
The name of the feature that is used by the test
support_required: str
Allowed values: "compile-and-run" (default),
"compile-only", or "optional".
See Feature.__call__ for details.
Examples
--------
.. code-block:: python
@Feature.require("cuda")
def test_compile_and_run():
...
@Feature.require("cuda", compile_only=True)
def test_compile_only():
...
"""
return cls._all_features[name](support_required=support_required)
def _any_gpu_exists():
return (
tvm.cuda().exist
or tvm.rocm().exist
or tvm.opencl().exist
or tvm.metal().exist
or tvm.vulkan().exist
)
# Mark a test as requiring llvm to run
requires_llvm = Feature(
"llvm", "LLVM", cmake_flag="USE_LLVM", target_kind_enabled="llvm", target_kind_hardware="llvm"
)
# Mark a test as requiring a GPU to run.
requires_gpu = Feature("gpu", run_time_check=_any_gpu_exists)
# Mark to differentiate tests that use the GPU in some capacity.
#
# These tests will be run on CPU-only test nodes and on test nodes with GPUs.
# To mark a test that must have a GPU present to run, use
# :py:func:`tvm.testing.requires_gpu`.
uses_gpu = requires_gpu(support_required="optional")
# Mark a test as requiring the x86 Architecture to run.
requires_x86 = Feature(
"x86", "x86 Architecture", run_time_check=lambda: platform.machine() == "x86_64"
)
# Mark a test as requiring the CUDA runtime.
requires_cuda = Feature(
"cuda",
"CUDA",
cmake_flag="USE_CUDA",
target_kind_enabled="cuda",
target_kind_hardware="cuda",
parent_features="gpu",
)
# Mark a test as requiring a tensorcore to run
requires_tensorcore = Feature(
"tensorcore",
"NVIDIA Tensor Core",
run_time_check=lambda: tvm.cuda().exist and nvcc.have_tensorcore(tvm.cuda().compute_version),
parent_features="cuda",
)
# Mark a test as requiring the cuDNN library.
requires_cudnn = Feature("cudnn", "cuDNN", cmake_flag="USE_CUDNN", parent_features="cuda")
# Mark a test as requiring the cuBLAS library.
requires_cublas = Feature("cublas", "cuBLAS", cmake_flag="USE_CUBLAS", parent_features="cuda")
# Mark a test as requiring the NVPTX compilation on the CUDA runtime
requires_nvptx = Feature(
"nvptx",
"NVPTX",
target_kind_enabled="nvptx",
target_kind_hardware="nvptx",
parent_features=["llvm", "cuda"],
)
# Mark a test as requiring the CUDA Graph Feature
requires_cudagraph = Feature(
"cudagraph",
"CUDA Graph",
target_kind_enabled="cuda",
compile_time_check=nvcc.have_cudagraph,
parent_features="cuda",
)
# Mark a test as requiring the OpenCL runtime
requires_opencl = Feature(
"opencl",
"OpenCL",
cmake_flag="USE_OPENCL",
target_kind_enabled="opencl",
target_kind_hardware="opencl" if "RPC_TARGET" not in os.environ else None,
parent_features="gpu" if "RPC_TARGET" not in os.environ else None,
)
# Mark a test as requiring the rocm runtime
requires_rocm = Feature(
"rocm",
"ROCm",
cmake_flag="USE_ROCM",
target_kind_enabled="rocm",
target_kind_hardware="rocm",
parent_features="gpu",
)
# Mark a test as requiring a matrixcore to run
requires_matrixcore = Feature(
"matrixcore",
"AMD Matrix Core",
run_time_check=lambda: tvm.rocm().exist and rocm.have_matrixcore(tvm.rocm().compute_version),
parent_features="rocm",
)
# Mark a test as requiring the metal runtime
requires_metal = Feature(
"metal",
"Metal",
cmake_flag="USE_METAL",
target_kind_enabled="metal",
target_kind_hardware="metal",
parent_features="gpu",
)
# Mark a test as requiring the vulkan runtime
requires_vulkan = Feature(
"vulkan",
"Vulkan",
cmake_flag="USE_VULKAN",
target_kind_enabled="vulkan",
target_kind_hardware="vulkan",
parent_features="gpu",
)
# Mark a test as requiring OpenCLML support in build.
requires_openclml = Feature(
"OpenCLML",
"CLML",
cmake_flag="USE_CLML",
target_kind_enabled="opencl",
)
# Mark a test as requiring microTVM to run
requires_micro = Feature("micro", "MicroTVM", cmake_flag="USE_MICRO")
# Mark a test as requiring CUTLASS to run
requires_cutlass = Feature("cutlass", "CUTLASS", cmake_flag="USE_CUTLASS")
# Mark a test as requiring rpc to run
requires_rpc = Feature("rpc", "RPC", cmake_flag="USE_RPC")
# Mark a test as requiring Arm(R) Ethos(TM)-N to run
requires_ethosn = Feature("ethosn", "Arm(R) Ethos(TM)-N", cmake_flag="USE_ETHOSN")
# Mark a test as requiring Arm(R) Ethos(TM)-U to run
requires_ethosu = Feature("ethosu", "Arm(R) Ethos(TM)-U", cmake_flag="USE_ETHOSU")
# Mark a test as requiring libtorch to run
requires_libtorch = Feature("libtorch", "LibTorch", cmake_flag="USE_LIBTORCH")
# Mark a test as requiring Hexagon to run
requires_hexagon = Feature(
"hexagon",
"Hexagon",
cmake_flag="USE_HEXAGON",
target_kind_enabled="hexagon",
compile_time_check=hexagon._compile_time_check,
run_time_check=hexagon._run_time_check,
parent_features="llvm",
)
# Mark a test as requiring the CMSIS NN library
requires_cmsisnn = Feature("cmsisnn", "CMSIS NN", cmake_flag="USE_CMSISNN")
def _corstone300_compile_time_check():
if shutil.which("arm-none-eabi-gcc") is None:
return "ARM embedded toolchain unavailable"
return True
# Mark a test as requiring the corstone300 FVP
requires_corstone300 = Feature(
"corstone300",
"Corstone-300",
compile_time_check=_corstone300_compile_time_check,
parent_features="cmsisnn",
)
# Mark a test as requiring Vitis AI to run
requires_vitis_ai = Feature("vitis_ai", "Vitis AI", cmake_flag="USE_VITIS_AI")
def _arm_dot_supported():
arch = platform.machine()
if arch not in ["arm64", "aarch64"]:
return False
if sys.platform.startswith("darwin"):
cpu_info = subprocess.check_output("sysctl -a", shell=True).strip().decode()
for line in cpu_info.split("\n"):
if line.startswith("hw.optional.arm.FEAT_DotProd"):
return bool(int(line.split(":", 1)[1]))
elif sys.platform.startswith("linux"):
return True
return False
def _is_intel():
# Only linux is supported for now.
if sys.platform.startswith("linux"):
with open("/proc/cpuinfo", "r") as content:
return "Intel" in content.read()
return False
def _has_vnni():
arch = platform.machine()
# Only linux is supported for now.
if arch == "x86_64" and sys.platform.startswith("linux"):
with open("/proc/cpuinfo", "r") as content:
return "avx512_vnni" in content.read()
return False
# check avx512 intrinsic groups for SkyLake X
def _has_slavx512():
# Check LLVM support
llvm_version = tvm.target.codegen.llvm_version_major()
is_llvm_support = llvm_version >= 8
arch = platform.machine()
# Only linux is supported for now.
if arch == "x86_64" and sys.platform.startswith("linux"):
with open("/proc/cpuinfo", "r") as content:
ctx = content.read()
check = (
"avx512f" in ctx
and "avx512cd" in ctx
and "avx512bw" in ctx
and "avx512dq" in ctx
and "avx512vl" in ctx
)
return check and is_llvm_support
return False
requires_arm_dot = Feature("arm_dot", "ARM dot product", run_time_check=_arm_dot_supported)
requires_cascadelake = Feature(
"cascadelake", "x86 CascadeLake", run_time_check=lambda: _has_vnni() and _is_intel()
)
requires_skylake_avx512 = Feature(
"skylake_avx512",
"x86 SkyLake AVX512",
run_time_check=lambda: _has_slavx512() and _is_intel(),
)
def _cmake_flag_enabled(flag):
flag = tvm.support.libinfo()[flag]
# Because many of the flags can be library flags, we check if the
# flag is not disabled, rather than checking if it is enabled.
return flag.lower() not in ["off", "false", "0"]
def _tvm_test_targets():
target_str = os.environ.get("TVM_TEST_TARGETS", "").strip()
if target_str:
# Use dict instead of set for de-duplication so that the
# targets stay in the order specified.
return list({t.strip(): None for t in target_str.split(";") if t.strip()})
return DEFAULT_TEST_TARGETS
def _compose(args, decs):
"""Helper to apply multiple markers"""
if len(args) > 0:
f = args[0]
for d in reversed(decs):
f = d(f)
return f
return decs
slow = pytest.mark.skipif(
SKIP_SLOW_TESTS,
reason="Skipping slow test since the SKIP_SLOW_TESTS environment variable is 'true'",
)
def requires_nvcc_version(major_version, minor_version=0, release_version=0):
"""Mark a test as requiring at least a specific version of nvcc.
Unit test marked with this decorator will run only if the
installed version of NVCC is at least `(major_version,
minor_version, release_version)`.
This also marks the test as requiring a cuda support.
Parameters
----------
major_version: int
The major version of the (major,minor,release) version tuple.
minor_version: int
The minor version of the (major,minor,release) version tuple.
release_version: int
The release version of the (major,minor,release) version tuple.
"""
try:
nvcc_version = nvcc.get_cuda_version()
except RuntimeError:
nvcc_version = (0, 0, 0)
min_version = (major_version, minor_version, release_version)
version_str = ".".join(str(v) for v in min_version)
requires = [
pytest.mark.skipif(nvcc_version < min_version, reason=f"Requires NVCC >= {version_str}"),
*requires_cuda.marks(),
]
def inner(func):
return _compose([func], requires)
return inner
def requires_cuda_compute_version(major_version, minor_version=0):
"""Mark a test as requiring at least a compute architecture
Unit test marked with this decorator will run only if the CUDA
compute architecture of the GPU is at least `(major_version,
minor_version)`.
This also marks the test as requiring a cuda support.
Parameters
----------
major_version: int
The major version of the (major,minor) version tuple.
minor_version: int
The minor version of the (major,minor) version tuple.
"""
min_version = (major_version, minor_version)
try:
arch = tvm.contrib.nvcc.get_target_compute_version()
compute_version = tvm.contrib.nvcc.parse_compute_version(arch)
except ValueError:
# No GPU present. This test will be skipped from the
# requires_cuda() marks as well.
compute_version = (0, 0)
min_version_str = ".".join(str(v) for v in min_version)
compute_version_str = ".".join(str(v) for v in compute_version)
requires = [
pytest.mark.skipif(
compute_version < min_version,
reason=f"Requires CUDA compute >= {min_version_str}, but have {compute_version_str}",
),
*requires_cuda.marks(),
]
def inner(func):
return _compose([func], requires)
return inner
def skip_if_32bit(reason):
def decorator(*args):
if "32bit" in platform.architecture()[0]:
return _compose(args, [pytest.mark.skip(reason=reason)])
return _compose(args, [])
return decorator
def requires_package(*packages):
"""Mark a test as requiring python packages to run.
If the packages listed are not available, tests marked with
`requires_package` will appear in the pytest results as being skipped.
This is equivalent to using ``foo = pytest.importorskip('foo')`` inside
the test body.
Parameters
----------
packages : List[str]
The python packages that should be available for the test to
run.
Returns
-------
mark: pytest mark
The pytest mark to be applied to unit tests that require this
"""
def has_package(package):
try:
__import__(package)
return True
except ImportError:
return False
marks = [
pytest.mark.skipif(not has_package(package), reason=f"Cannot import '{package}'")
for package in packages
]
def wrapper(func):
for mark in marks:
func = mark(func)
return func
return wrapper
def parametrize_targets(*args):
"""Parametrize a test over a specific set of targets.
Use this decorator when you want your test to be run over a
specific set of targets and devices. It is intended for use where
a test is applicable only to a specific target, and is
inapplicable to any others (e.g. verifying target-specific
assembly code matches known assembly code). In most
circumstances, :py:func:`tvm.testing.exclude_targets` or
:py:func:`tvm.testing.known_failing_targets` should be used
instead.
If used as a decorator without arguments, the test will be
parametrized over all targets in
:py:func:`tvm.testing.enabled_targets`. This behavior is
automatically enabled for any target that accepts arguments of
``target`` or ``dev``, so the explicit use of the bare decorator
is no longer needed, and is maintained for backwards
compatibility.
Parameters
----------
f : function
Function to parametrize. Must be of the form `def test_xxxxxxxxx(target, dev)`:,
where `xxxxxxxxx` is any name.
targets : list[str], optional
Set of targets to run against. If not supplied,
:py:func:`tvm.testing.enabled_targets` will be used.
Example
-------
>>> @tvm.testing.parametrize_targets("llvm", "cuda")
>>> def test_mytest(target, dev):
>>> ... # do something
"""
# Backwards compatibility, when used as a decorator with no
# arguments implicitly parametrizes over "target". The
# parametrization is now handled by _auto_parametrize_target, so
# this use case can just return the decorated function.
if len(args) == 1 and callable(args[0]):
return args[0]
return pytest.mark.parametrize("target", list(args), scope="session")
def exclude_targets(*args):
"""Exclude a test from running on a particular target.
Use this decorator when you want your test to be run over a
variety of targets and devices (including cpu and gpu devices),
but want to exclude some particular target or targets. For
example, a test may wish to be run against all targets in
tvm.testing.enabled_targets(), except for a particular target that
does not support the capabilities.
Applies pytest.mark.skipif to the targets given.
Parameters
----------
f : function
Function to parametrize. Must be of the form `def test_xxxxxxxxx(target, dev)`:,
where `xxxxxxxxx` is any name.
targets : list[str]
Set of targets to exclude.
Example
-------
>>> @tvm.testing.exclude_targets("cuda")
>>> def test_mytest(target, dev):
>>> ... # do something
Or
>>> @tvm.testing.exclude_targets("llvm", "cuda")
>>> def test_mytest(target, dev):
>>> ... # do something
"""
def wraps(func):
func.tvm_excluded_targets = args
return func
return wraps
def known_failing_targets(*args):
"""Skip a test that is known to fail on a particular target.
Use this decorator when you want your test to be run over a
variety of targets and devices (including cpu and gpu devices),
but know that it fails for some targets. For example, a newly
implemented runtime may not support all features being tested, and
should be excluded.
Applies pytest.mark.xfail to the targets given.
Parameters
----------
f : function
Function to parametrize. Must be of the form `def test_xxxxxxxxx(target, dev)`:,
where `xxxxxxxxx` is any name.
targets : list[str]
Set of targets to skip.
Example
-------
>>> @tvm.testing.known_failing_targets("cuda")
>>> def test_mytest(target, dev):
>>> ... # do something
Or
>>> @tvm.testing.known_failing_targets("llvm", "cuda")
>>> def test_mytest(target, dev):
>>> ... # do something
"""
def wraps(func):
func.tvm_known_failing_targets = args
return func
return wraps
def parameter(*values, ids=None, by_dict=None):
"""Convenience function to define pytest parametrized fixtures.
Declaring a variable using ``tvm.testing.parameter`` will define a
parametrized pytest fixture that can be used by test
functions. This is intended for cases that have no setup cost,
such as strings, integers, tuples, etc. For cases that have a
significant setup cost, please use :py:func:`tvm.testing.fixture`
instead.
If a test function accepts multiple parameters defined using
``tvm.testing.parameter``, then the test will be run using every
combination of those parameters.
The parameter definition applies to all tests in a module. If a
specific test should have different values for the parameter, that
test should be marked with ``@pytest.mark.parametrize``.
Parameters
----------
values : Any
A list of parameter values. A unit test that accepts this
parameter as an argument will be run once for each parameter
given.
ids : List[str], optional
A list of names for the parameters. If None, pytest will
generate a name from the value. These generated names may not
be readable/useful for composite types such as tuples.
by_dict : Dict[str, Any]
A mapping from parameter name to parameter value, to set both the
values and ids.
Returns
-------
function
A function output from pytest.fixture.
Example
-------
>>> size = tvm.testing.parameter(1, 10, 100)
>>> def test_using_size(size):
>>> ... # Test code here
Or
>>> shape = tvm.testing.parameter((5,10), (512,1024), ids=['small','large'])
>>> def test_using_size(shape):
>>> ... # Test code here
Or
>>> shape = tvm.testing.parameter(by_dict={'small': (5,10), 'large': (512,1024)})
>>> def test_using_size(shape):
>>> ... # Test code here
"""
if by_dict is not None:
if values or ids:
raise RuntimeError(
"Use of the by_dict parameter cannot be used alongside positional arguments"
)
ids, values = zip(*by_dict.items())
# Optional cls parameter in case a parameter is defined inside a
# class scope.
@pytest.fixture(params=values, ids=ids)
def as_fixture(*_cls, request):
return request.param
return as_fixture
_parametrize_group = 0
def parameters(*value_sets, ids=None):
"""Convenience function to define pytest parametrized fixtures.
Declaring a variable using tvm.testing.parameters will define a
parametrized pytest fixture that can be used by test
functions. Like :py:func:`tvm.testing.parameter`, this is intended
for cases that have no setup cost, such as strings, integers,
tuples, etc. For cases that have a significant setup cost, please
use :py:func:`tvm.testing.fixture` instead.
Unlike :py:func:`tvm.testing.parameter`, if a test function
accepts multiple parameters defined using a single call to
``tvm.testing.parameters``, then the test will only be run once
for each set of parameters, not for all combinations of
parameters.
These parameter definitions apply to all tests in a module. If a
specific test should have different values for some parameters,
that test should be marked with ``@pytest.mark.parametrize``.
Parameters
----------
values : List[tuple]
A list of parameter value sets. Each set of values represents
a single combination of values to be tested. A unit test that
accepts parameters defined will be run once for every set of
parameters in the list.
ids : List[str], optional
A list of names for the parameter sets. If None, pytest will
generate a name from each parameter set. These generated names may
not be readable/useful for composite types such as tuples.
Returns
-------
List[function]
Function outputs from pytest.fixture. These should be unpacked
into individual named parameters.
Example
-------
>>> size, dtype = tvm.testing.parameters( (16,'float32'), (512,'float16') )
>>> def test_feature_x(size, dtype):
>>> # Test code here
>>> assert( (size,dtype) in [(16,'float32'), (512,'float16')])
"""
global _parametrize_group
parametrize_group = _parametrize_group
_parametrize_group += 1
outputs = []
for param_values in zip(*value_sets):
# Optional cls parameter in case a parameter is defined inside a
# class scope.
def fixture_func(*_cls, request):
return request.param
fixture_func.parametrize_group = parametrize_group
fixture_func.parametrize_values = param_values
fixture_func.parametrize_ids = ids
outputs.append(pytest.fixture(fixture_func))
return outputs
def fixture(func=None, *, cache_return_value=False):
"""Convenience function to define pytest fixtures.
This should be used as a decorator to mark functions that set up
state before a function. The return value of that fixture
function is then accessible by test functions as that accept it as
a parameter.
Fixture functions can accept parameters defined with
:py:func:`tvm.testing.parameter`.
By default, the setup will be performed once for each unit test
that uses a fixture, to ensure that unit tests are independent.
If the setup is expensive to perform, then the
cache_return_value=True argument can be passed to cache the setup.
The fixture function will be run only once (or once per parameter,
if used with tvm.testing.parameter), and the same return value
will be passed to all tests that use it. If the environment
variable TVM_TEST_DISABLE_CACHE is set to a non-zero value, it
will disable this feature and no caching will be performed.
Example
-------
>>> @tvm.testing.fixture
>>> def cheap_setup():
>>> return 5 # Setup code here.
>>>
>>> def test_feature_x(target, dev, cheap_setup)
>>> assert(cheap_setup == 5) # Run test here
Or
>>> size = tvm.testing.parameter(1, 10, 100)
>>>
>>> @tvm.testing.fixture
>>> def cheap_setup(size):
>>> return 5*size # Setup code here, based on size.
>>>
>>> def test_feature_x(cheap_setup):
>>> assert(cheap_setup in [5, 50, 500])
Or
>>> @tvm.testing.fixture(cache_return_value=True)
>>> def expensive_setup():
>>> time.sleep(10) # Setup code here
>>> return 5
>>>
>>> def test_feature_x(target, dev, expensive_setup):
>>> assert(expensive_setup == 5)
"""
force_disable_cache = bool(int(os.environ.get("TVM_TEST_DISABLE_CACHE", "0")))
cache_return_value = cache_return_value and not force_disable_cache
# Deliberately at function scope, so that caching can track how
# many times the fixture has been used. If used, the cache gets
# cleared after the fixture is no longer needed.
scope = "function"
def wraps(func):
if cache_return_value:
func = _fixture_cache(func)
func = pytest.fixture(func, scope=scope)
return func
if func is None:
return wraps
return wraps(func)
class _DeepCopyAllowedClasses(dict):
def __init__(self, allowed_class_list):
self.allowed_class_list = allowed_class_list
super().__init__()
def get(self, key, *args, **kwargs):
"""Overrides behavior of copy.deepcopy to avoid implicit copy.
By default, copy.deepcopy uses a dict of id->object to track
all objects that it has seen, which is passed as the second
argument to all recursive calls. This class is intended to be
passed in instead, and inspects the type of all objects being
copied.
Where copy.deepcopy does a best-effort attempt at copying an
object, for unit tests we would rather have all objects either
be copied correctly, or to throw an error. Classes that
define an explicit method to perform a copy are allowed, as
are any explicitly listed classes. Classes that would fall
back to using object.__reduce__, and are not explicitly listed
as safe, will throw an exception.
"""
obj = ctypes.cast(key, ctypes.py_object).value
cls = type(obj)
if (
cls in copy._deepcopy_dispatch
or issubclass(cls, type)
or getattr(obj, "__deepcopy__", None)
or copyreg.dispatch_table.get(cls)
or cls.__reduce__ is not object.__reduce__
or cls.__reduce_ex__ is not object.__reduce_ex__
or cls in self.allowed_class_list
):
return super().get(key, *args, **kwargs)
rfc_url = (
"https://github.com/apache/tvm-rfcs/blob/main/rfcs/0007-parametrized-unit-tests.md"
)
raise TypeError(
(
f"Cannot copy fixture of type {cls.__name__}. TVM fixture caching "
"is limited to objects that explicitly provide the ability "
"to be copied (e.g. through __deepcopy__, __getstate__, or __setstate__),"
"and forbids the use of the default `object.__reduce__` and "
"`object.__reduce_ex__`. For third-party classes that are "
"safe to use with copy.deepcopy, please add the class to "
"the arguments of _DeepCopyAllowedClasses in tvm.testing._fixture_cache.\n"
"\n"
f"For discussion on this restriction, please see {rfc_url}."
)
)
def _fixture_cache(func):
cache = {}
# Can't use += on a bound method's property. Therefore, this is a
# list rather than a variable so that it can be accessed from the
# pytest_collection_modifyitems().
num_tests_use_this_fixture = [0]
num_times_fixture_used = 0
# Using functools.lru_cache would require the function arguments
# to be hashable, which wouldn't allow caching fixtures that
# depend on numpy arrays. For example, a fixture that takes a
# numpy array as input, then calculates uses a slow method to
# compute a known correct output for that input. Therefore,
# including a fallback for serializable types.
def get_cache_key(*args, **kwargs):
try:
hash((args, kwargs))
return (args, kwargs)
except TypeError:
pass
try:
return pickle.dumps((args, kwargs))
except TypeError as e:
raise TypeError(
"TVM caching of fixtures requires arguments to the fixture "
"to be either hashable or serializable"
) from e
@functools.wraps(func)
def wrapper(*args, **kwargs):
if num_tests_use_this_fixture[0] == 0:
raise RuntimeError(
"Fixture use count is 0. "
"This can occur if tvm.testing.plugin isn't registered. "
"If using outside of the TVM test directory, "
"please add `pytest_plugins = ['tvm.testing.plugin']` to your conftest.py"
)
try:
cache_key = get_cache_key(*args, **kwargs)
try:
cached_value = cache[cache_key]
except KeyError:
cached_value = cache[cache_key] = func(*args, **kwargs)
yield copy.deepcopy(
cached_value,
# allowed_class_list should be a list of classes that
# are safe to copy using copy.deepcopy, but do not
# implement __deepcopy__, __reduce__, or
# __reduce_ex__.
_DeepCopyAllowedClasses(allowed_class_list=[]),
)
finally:
# Clear the cache once all tests that use a particular fixture
# have completed.
nonlocal num_times_fixture_used
num_times_fixture_used += 1
if num_times_fixture_used >= num_tests_use_this_fixture[0]:
cache.clear()
# Set in the pytest_collection_modifyitems(), by _count_num_fixture_uses
wrapper.num_tests_use_this_fixture = num_tests_use_this_fixture
return wrapper
def identity_after(x, sleep):
"""Testing function to return identity after sleep
Parameters
----------
x : int
The input value.
sleep : float
The amount of time to sleep
Returns
-------
x : object
The original value
"""
if sleep:
time.sleep(sleep)
return x
def terminate_self():
"""Testing function to terminate the process."""
sys.exit(-1)
def is_ampere_or_newer():
"""Check if the target environment has an NVIDIA Ampere GPU or newer."""
arch = tvm.contrib.nvcc.get_target_compute_version()
major, _ = tvm.contrib.nvcc.parse_compute_version(arch)
return major >= 8
def install_request_hook(depth: int) -> None:
"""Add a wrapper around urllib.request for CI tests"""
if not IS_IN_CI:
return
# https://sphinx-gallery.github.io/stable/faq.html#why-is-file-not-defined-what-can-i-use
base = None
msg = ""
try:
base = __file__
msg += f"found file {__file__}\n"
except NameError:
msg += "no file\n"
if base is None:
hook_script_dir = Path.cwd().resolve()
msg += "used path.cwd()\n"
else:
hook_script_dir = Path(base).resolve().parent
msg += "used base()\n"
msg += f"using depth {depth}\n"
if depth <= 0:
raise ValueError(f"depth less than 1 not supported, found: {depth}")
# Go up the parent directories
while depth > 0:
msg += f"[depth={depth}] dir={hook_script_dir}\n"
hook_script_dir = hook_script_dir.parent
depth -= 1
# Ensure the specified dir is valid
hook_script_dir = hook_script_dir / "tests" / "scripts" / "request_hook"
if not hook_script_dir.exists():
raise RuntimeError(f"Directory {hook_script_dir} does not exist:\n{msg}")
# Import the hook and start it up (it's not included here directly to avoid
# keeping a database of URLs inside the tvm Python package
sys.path.append(str(hook_script_dir))
# This import is intentionally delayed since it should only happen in CI
import request_hook # pylint: disable=import-outside-toplevel
request_hook.init()
def fetch_model_from_url(
url: str,
model_format: str,
sha256: str,
) -> Tuple[tvm.ir.module.IRModule, dict]:
"""Testing function to fetch a model from a URL and return it as a Relay
model. Downloaded files are cached for future re-use.
Parameters
----------
url : str
The URL or list of URLs to try downloading the model from.
model_format: str
The file extension of the model format used.
sha256 : str
The sha256 hex hash to compare the downloaded model against.
Returns
-------
(mod, params) : object
The Relay representation of the downloaded model.
"""
rel_path = f"model_{sha256}.{model_format}"
file = tvm.contrib.download.download_testdata(url, rel_path, overwrite=False)
# Check SHA-256 hash
file_hash = hashlib.sha256()
with open(file, "rb") as f:
for block in iter(lambda: f.read(2**24), b""):
file_hash.update(block)
if file_hash.hexdigest() != sha256:
raise FileNotFoundError("SHA-256 hash for model does not match")
tvmc_model = load_model(file, model_format)
return tvmc_model.mod, tvmc_model.params
def _mark_parameterizations(*params, marker_fn, reason):
"""
Mark tests with a nodeid parameters that exactly matches one in params.
Useful for quickly marking tests as xfail when they have a large
combination of parameters.
"""
params = set(params)
def decorator(func):
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
if "[" in request.node.name and "]" in request.node.name:
# Strip out the test name and the [ and ] brackets
params_from_name = request.node.name[len(request.node.originalname) + 1 : -1]
if params_from_name in params:
marker_fn(
reason=f"{marker_fn.__name__} on nodeid {request.node.nodeid}: " + reason
)
return func(request, *args, **kwargs)
return wrapper
return decorator
def xfail_parameterizations(*xfail_params, reason):
return _mark_parameterizations(*xfail_params, marker_fn=pytest.xfail, reason=reason)
def skip_parameterizations(*skip_params, reason):
return _mark_parameterizations(*skip_params, marker_fn=pytest.skip, reason=reason)
def main():
test_file = inspect.getsourcefile(sys._getframe(1))
sys.exit(pytest.main([test_file] + sys.argv[1:]))
class CompareBeforeAfter:
"""Utility for comparing before/after of TIR transforms
A standard framework for writing tests that take a TIR PrimFunc as
input, apply a transformation, then either compare against an
expected output or assert that the transformation raised an error.
A test should subclass CompareBeforeAfter, defining class members
`before`, `transform`, and `expected`. CompareBeforeAfter will
then use these members to define a test method and test fixture.
`transform` may be one of the following.
- An instance of `tvm.ir.transform.Pass`
- A method that takes no arguments and returns a `tvm.ir.transform.Pass`
- A pytest fixture that returns a `tvm.ir.transform.Pass`
`before` may be any one of the following.
- An instance of `tvm.tir.PrimFunc`. This is allowed, but is not
the preferred method, as any errors in constructing the
`PrimFunc` occur while collecting the test, preventing any other
tests in the same file from being run.
- An TVMScript function, without the ``@T.prim_func`` decoration.
The ``@T.prim_func`` decoration will be applied when running the
test, rather than at module import.
- A method that takes no arguments and returns a `tvm.tir.PrimFunc`
- A pytest fixture that returns a `tvm.tir.PrimFunc`
`expected` may be any one of the following. The type of
`expected` defines the test being performed. If `expected`
provides a `tvm.tir.PrimFunc`, the result of the transformation
must match `expected`. If `expected` is an exception, then the
transformation must raise that exception type.
- Any option supported for `before`.
- The `Exception` class object, or a class object that inherits
from `Exception`.
- A method that takes no arguments and returns `Exception` or a
class object that inherits from `Exception`.
- A pytest fixture that returns `Exception` or an class object
that inherits from `Exception`.
Examples
--------
.. python::
class TestRemoveIf(tvm.testing.CompareBeforeAfter):
transform = tvm.tir.transform.Simplify()
def before(A: T.Buffer(1, "int32")):
if True:
A[0] = 42
else:
A[0] = 5
def expected(A: T.Buffer(1, "int32")):
A[0] = 42
"""
def __init_subclass__(cls):
if hasattr(cls, "before"):
cls.before = cls._normalize_before(cls.before)
if hasattr(cls, "expected"):
cls.expected = cls._normalize_expected(cls.expected)
if hasattr(cls, "transform"):
cls.transform = cls._normalize_transform(cls.transform)
@classmethod
def _normalize_ir_module(cls, func):
if isinstance(func, tvm.tir.PrimFunc):
def inner(self):
# pylint: disable=unused-argument
return func
elif cls._is_method(func):
def inner(self):
# pylint: disable=unused-argument
return func(self)
elif inspect.isclass(func):
def inner(self):
# pylint: disable=unused-argument
func_dict = {}
for name, method in func.__dict__.items():
if name.startswith("_"):
pass
elif isinstance(method, tvm.ir.function.BaseFunc):
func_dict[name] = method
else:
source_code = "@T.prim_func\n" + textwrap.dedent(inspect.getsource(method))
prim_func = tvm.script.from_source(source_code)
func_dict[name] = prim_func
return tvm.IRModule(func_dict)
else:
def inner(self):
# pylint: disable=unused-argument
source_code = "@T.prim_func\n" + textwrap.dedent(inspect.getsource(func))
return tvm.script.from_source(source_code)
return pytest.fixture(inner)
@classmethod
def _normalize_before(cls, func):
if hasattr(func, "_pytestfixturefunction"):
return func
else:
return cls._normalize_ir_module(func)
@classmethod
def _normalize_expected(cls, func):
if hasattr(func, "_pytestfixturefunction"):
return func
elif inspect.isclass(func) and issubclass(func, Exception):
def inner(self):
# pylint: disable=unused-argument
return func
return pytest.fixture(inner)
else:
return cls._normalize_ir_module(func)
@classmethod
def _normalize_transform(cls, transform):
def apply(module_transform):
def inner(obj):
if isinstance(obj, tvm.IRModule):
return module_transform(obj)
elif isinstance(obj, tvm.tir.PrimFunc):
mod = tvm.IRModule({"main": obj})
mod = module_transform(mod)
return mod["main"]
else:
raise TypeError(f"Expected IRModule or PrimFunc, but received {type(obj)}")
return inner
if hasattr(transform, "_pytestfixturefunction"):
if not hasattr(cls, "_transform_orig"):
cls._transform_orig = transform
def inner(self, _transform_orig):
# pylint: disable=unused-argument
return apply(_transform_orig)
elif isinstance(transform, tvm.ir.transform.Pass):
def inner(self):
# pylint: disable=unused-argument
return apply(transform)
elif cls._is_method(transform):
def inner(self):
# pylint: disable=unused-argument
return apply(transform(self))
else:
raise TypeError(
"Expected transform to be a tvm.ir.transform.Pass, or a method returning a Pass"
)
return pytest.fixture(inner)
@staticmethod
def _is_method(func):
sig = inspect.signature(func)
return "self" in sig.parameters
def test_compare(self, before, expected, transform):
"""Unit test to compare the expected TIR PrimFunc to actual"""
if inspect.isclass(expected) and issubclass(expected, Exception):
with pytest.raises(expected):
after = transform(before)
# This portion through pytest.fail isn't strictly
# necessary, but gives a better error message that
# includes the before/after.
before_str = before.script(name="before")
after_str = after.script(name="after")
pytest.fail(
msg=(
f"Expected {expected.__name__} to be raised from transformation, "
f"instead received TIR\n:{before_str}\n{after_str}"
)
)
elif isinstance(expected, (tvm.tir.PrimFunc, tvm.ir.IRModule)):
after = transform(before)
try:
tvm.ir.assert_structural_equal(after, expected)
except ValueError as err:
before_str = before.script(name="before")
after_str = after.script(name="after")
expected_str = expected.script(name="expected")
raise ValueError(
f"TIR after transformation did not match expected:\n"
f"{before_str}\n{after_str}\n{expected_str}"
) from err
else:
raise TypeError(
f"tvm.testing.CompareBeforeAfter requires the `expected` fixture "
f"to return either `Exception`, an `Exception` subclass, "
f"or an instance of `tvm.tir.PrimFunc`. "
f"Instead, received {type(expected)}."
)
class _control_span_filling:
def __init__(self, on=True):
self._on = on
self._pass_ctx = tvm.transform.PassContext(config={"relay.frontend.fill_span": self._on})
def __enter__(self):
self._pass_ctx.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
self._pass_ctx.__exit__(exc_type, exc_val, exc_tb)
class enable_span_filling(_control_span_filling):
def __init__(self):
super().__init__()
class disable_span_filling(_control_span_filling):
def __init__(self):
super().__init__(on=False)
| 70,687 | 32.109133 | 99 | py |
tvm | tvm-main/python/tvm/relay/testing/densenet.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long
"""
Port of MxNet version of Densenet to Relay.
https://github.com/apache/incubator-mxnet/blob/master/python/mxnet/gluon/model_zoo/vision/densenet.py
"""
# pylint: enable=line-too-long
from tvm import relay
from . import layers
from .init import create_workload
def _make_dense_layer(data, growth_rate, bn_size, index):
"""Single densenet layer."""
bn1 = layers.batch_norm_infer(data, name=f"batch_1_{index}")
relu1 = relay.nn.relu(bn1)
conv1 = layers.conv2d(
relu1, channels=bn_size * growth_rate, kernel_size=(1, 1), name=f"conv2d_1_{index}"
)
bn2 = layers.batch_norm_infer(conv1, name="batch_2_" + index)
relu2 = relay.nn.relu(bn2)
conv2 = layers.conv2d(
relu2, channels=growth_rate, kernel_size=(3, 3), padding=(1, 1), name=f"conv2d_2_{index}"
)
return conv2
def _make_dense_block(data, num_layers, bn_size, growth_rate, index):
"""Makes a block of dense layers of the specified size."""
layer_out = data
blocks = []
for i in range(num_layers):
layer_out = _make_dense_layer(layer_out, growth_rate, bn_size, f"{index}_{i}")
blocks.append(layer_out)
block_out = relay.concatenate(blocks, 1)
return block_out
def _make_transition(data, num_output_features, index):
"""Transition between layers."""
bn = layers.batch_norm_infer(data, name=f"batch_t_{index}")
relu = relay.nn.relu(bn)
conv = layers.conv2d(
relu, channels=num_output_features, kernel_size=(1, 1), name=f"conv_t_{index}"
)
return relay.nn.avg_pool2d(conv, pool_size=(2, 2), strides=(2, 2))
def _make_dense_net(
num_init_features, growth_rate, block_config, data_shape, data_dtype, bn_size=4, classes=1000
):
"""Builds up a densenet."""
data = relay.Var(
"data", relay.TensorType(data_shape, data_dtype)
) # (batch_size, 3, 224, 224)))
conv1 = layers.conv2d(
data,
channels=num_init_features,
kernel_size=(7, 7),
strides=(2, 2),
padding=(3, 3),
name="conv1",
)
bn1 = layers.batch_norm_infer(conv1, name="batch1")
relu1 = relay.nn.relu(bn1)
mp = relay.nn.max_pool2d(relu1, pool_size=(3, 3), strides=(2, 2), padding=(1, 1))
num_features = num_init_features
layer_out = mp
for i, num_layers in enumerate(block_config):
layer_out = _make_dense_block(layer_out, num_layers, bn_size, growth_rate, i)
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
layer_out = _make_transition(layer_out, num_features // 2, i)
num_features = num_features // 2
bn2 = layers.batch_norm_infer(layer_out, name="batch2")
relu2 = relay.nn.relu(bn2)
avg = relay.nn.avg_pool2d(relu2, pool_size=(7, 7))
flat = relay.nn.batch_flatten(avg)
ret = layers.dense_add_bias(flat, units=classes, name="dense")
return relay.Function(relay.analysis.free_vars(ret), ret)
def get_workload(
densenet_size=121, classes=1000, batch_size=4, image_shape=(3, 224, 224), dtype="float32"
):
"""Gets benchmark workload for densenet.
Parameters
----------
densenet_size : int, optional (default 121)
Parameter for the network size. The supported sizes
are 121, 161, 169, and 201.
classes : int, optional (default 1000)
The number of classes.
batch_size : int, optional (detault 4)
The batch size for the network.
image_shape : shape, optional (default (3, 224, 224))
The shape of the input data.
dtype : data type, optional (default 'float32')
The data type of the input data.
Returns
-------
mod: tvm.IRModule
The relay module that contains a DenseNet network.
params : dict of str to NDArray
The benchmark paraeters.
"""
specs = {
121: (64, 32, [6, 12, 24, 16]),
161: (96, 48, [6, 12, 36, 24]),
169: (69, 32, [6, 12, 32, 32]),
201: (64, 32, [6, 12, 48, 32]),
}
bn_size = 4
num_init_features, growth_rate, block_config = specs[densenet_size]
data_shape = tuple([batch_size] + list(image_shape))
net = _make_dense_net(
num_init_features, growth_rate, block_config, data_shape, dtype, bn_size, classes
)
return create_workload(net)
| 5,127 | 34.123288 | 101 | py |
tvm | tvm-main/python/tvm/relay/testing/dcgan.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""
Net of the generator of DCGAN
Adopted from:
https://github.com/tqchen/mxnet-gan/blob/main/mxgan/generator.py
Reference:
Radford, Alec, Luke Metz, and Soumith Chintala.
"Unsupervised representation learning with deep convolutional generative adversarial networks."
arXiv preprint arXiv:1511.06434 (2015).
"""
from tvm import relay
from . import layers
from .init import create_workload
def deconv2d(data, ishape, oshape, kshape, layout, name, stride=(2, 2)):
"""a deconv layer that enlarges the feature map"""
target_shape = (oshape[-2], oshape[-1])
pad_y = (kshape[0] - 1) // 2
pad_x = (kshape[1] - 1) // 2
adj_y = (target_shape[0] + 2 * pad_y - kshape[0]) % stride[0]
adj_x = (target_shape[1] + 2 * pad_x - kshape[1]) % stride[1]
if layout == "NCHW":
kernel_layout = "IOHW"
elif layout == "NHWC":
kernel_layout = "HWOI"
else:
raise ValueError("Invalid layout: " + layout)
net = layers.conv2d_transpose(
data,
kernel_size=kshape,
strides=stride,
channels=oshape[0],
padding=(pad_y, pad_x),
output_padding=(adj_y, adj_x),
data_layout=layout,
kernel_layout=kernel_layout,
name=name,
)
return net
def deconv2d_bn_relu(data, prefix, **kwargs):
"""a block of deconv + batch norm + relu"""
eps = 1e-5 + 1e-12
net = deconv2d(data, name=f"{prefix}_deconv", **kwargs)
bn_axis = kwargs.get("layout", "NCHW").index("C")
net = layers.batch_norm_infer(
net, epsilon=eps, scale=False, axis=bn_axis, name=f"{prefix}_batch_norm"
)
net = relay.nn.relu(net)
return net
def get_net(
batch_size,
random_len=100,
oshape=(3, 64, 64),
ngf=128,
code=None,
layout="NCHW",
dtype="float32",
):
"""get net of dcgan generator"""
assert oshape[-1] == 64, "Only support 64x64 image"
assert oshape[-2] == 64, "Only support 64x64 image"
code = relay.var("data", dtype=dtype, shape=(batch_size, random_len)) if code is None else code
dense_weight = relay.var("dense_weight")
dense = relay.nn.dense(code, weight=dense_weight, units=4 * 4 * ngf * 8)
relu = relay.nn.relu(dense)
# 4 x 4
if layout == "NCHW":
reshape = relay.reshape(relu, newshape=(-1, ngf * 8, 4, 4))
elif layout == "NHWC":
reshape = relay.reshape(relu, newshape=(-1, 4, 4, ngf * 8))
else:
raise ValueError("Invalid layout: " + layout)
# 8 x 8
dc8 = deconv2d_bn_relu(
reshape,
ishape=(ngf * 8, 4, 4),
oshape=(ngf * 4, 8, 8),
kshape=(4, 4),
layout=layout,
prefix="g2",
)
# 16x16
dc16 = deconv2d_bn_relu(
dc8,
ishape=(ngf * 4, 8, 8),
oshape=(ngf * 2, 16, 16),
kshape=(4, 4),
layout=layout,
prefix="g3",
)
# 32x32
dc32 = deconv2d_bn_relu(
dc16,
ishape=(ngf * 2, 16, 16),
oshape=(ngf, 32, 32),
kshape=(4, 4),
layout=layout,
prefix="g4",
)
# 64x64
dc64 = deconv2d(
dc32,
ishape=(ngf, 32, 32),
oshape=oshape[-3:],
kshape=(4, 4),
layout=layout,
name="g5_deconv",
)
tanh = relay.tanh(dc64)
args = relay.analysis.free_vars(tanh)
return relay.Function(args, tanh)
def get_workload(
batch_size, oshape=(3, 64, 64), ngf=128, random_len=100, layout="NCHW", dtype="float32"
):
"""Get benchmark workload for a DCGAN generator
Parameters
----------
batch_size : int
The batch size used in the model
oshape : tuple, optional
The shape of output image, layout="CHW"
ngf: int, optional
The number of final feature maps in the generator
random_len : int, optional
The length of random input
layout: str, optional
The layout of conv2d transpose
dtype : str, optional
The data type
Returns
-------
mod : tvm.IRModule
The relay module that contains a DCGAN network.
params : dict of str to NDArray
The parameters.
"""
net = get_net(batch_size, random_len, oshape=oshape, ngf=ngf, layout=layout, dtype=dtype)
return create_workload(net)
| 5,048 | 28.354651 | 99 | py |
tvm | tvm-main/python/tvm/relay/testing/inception_v3.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Inception V3, suitable for images with around 299 x 299
Reference:
Szegedy, Christian, et al. "Rethinking the Inception Architecture for Computer Vision."
arXiv preprint arXiv:1512.00567 (2015).
Adopted from https://github.com/apache/incubator-mxnet/blob/master/
example/image-classification/symbols/inception-v3.py
"""
# pylint: disable=invalid-name,missing-docstring,unused-argument, superfluous-parens
from tvm import relay
from .init import create_workload
from . import layers
def Conv(data, num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), name=None, suffix=""):
conv = layers.conv2d(
data=data,
channels=int(num_filter),
kernel_size=kernel,
strides=stride,
padding=pad,
name=f"{name}{suffix}_conv1",
)
bn = layers.batch_norm_infer(data=conv, epsilon=2e-5, scale=False, name=f"{name}{suffix}_bn")
act = relay.nn.relu(data=bn)
return act
def Pooling(data, kernel, stride, pad, pool_type, name):
if pool_type == "max":
return relay.nn.max_pool2d(data=data, pool_size=kernel, strides=stride, padding=pad)
if pool_type == "avg":
return relay.nn.avg_pool2d(
data=data, pool_size=kernel, strides=stride, padding=pad, count_include_pad=True
)
raise ValueError("Invalid pooling type: " + pool_type)
def Inception7A(
data, num_1x1, num_3x3_red, num_3x3_1, num_3x3_2, num_5x5_red, num_5x5, pool, proj, name
):
tower_1x1 = Conv(data, num_1x1, name=f"{name}_conv")
tower_5x5 = Conv(data, num_5x5_red, name=f"{name}_tower", suffix="_conv")
tower_5x5 = Conv(
tower_5x5, num_5x5, kernel=(5, 5), pad=(2, 2), name=f"{name}_tower", suffix="_conv_1"
)
tower_3x3 = Conv(data, num_3x3_red, name=f"{name}_tower_1", suffix="_conv")
tower_3x3 = Conv(
tower_3x3, num_3x3_1, kernel=(3, 3), pad=(1, 1), name=f"{name}_tower_1", suffix="_conv_1"
)
tower_3x3 = Conv(
tower_3x3, num_3x3_2, kernel=(3, 3), pad=(1, 1), name=f"{name}_tower_1", suffix="_conv_2"
)
pooling = Pooling(
data=data,
kernel=(3, 3),
stride=(1, 1),
pad=(1, 1),
pool_type=pool,
name=f"{pool}_pool_{name}_pool",
)
cproj = Conv(pooling, proj, name=f"{name}_tower_2", suffix="_conv")
concat = relay.concatenate((tower_1x1, tower_5x5, tower_3x3, cproj), axis=1)
return concat
# First Downsample
def Inception7B(data, num_3x3, num_d3x3_red, num_d3x3_1, num_d3x3_2, pool, name):
tower_3x3 = Conv(data, num_3x3, kernel=(3, 3), pad=(0, 0), stride=(2, 2), name=f"{name}_conv")
tower_d3x3 = Conv(data, num_d3x3_red, name=f"{name}_tower", suffix="_conv")
tower_d3x3 = Conv(
tower_d3x3,
num_d3x3_1,
kernel=(3, 3),
pad=(1, 1),
stride=(1, 1),
name=f"{name}_tower",
suffix="_conv_1",
)
tower_d3x3 = Conv(
tower_d3x3,
num_d3x3_2,
kernel=(3, 3),
pad=(0, 0),
stride=(2, 2),
name=f"{name}_tower",
suffix="_conv_2",
)
pooling = Pooling(
data=data,
kernel=(3, 3),
stride=(2, 2),
pad=(0, 0),
pool_type="max",
name=f"max_pool_{name}_pool",
)
concat = relay.concatenate((tower_3x3, tower_d3x3, pooling), axis=1)
return concat
def Inception7C(
data,
num_1x1,
num_d7_red,
num_d7_1,
num_d7_2,
num_q7_red,
num_q7_1,
num_q7_2,
num_q7_3,
num_q7_4,
pool,
proj,
name,
):
tower_1x1 = Conv(data=data, num_filter=num_1x1, kernel=(1, 1), name=f"{name}_conv")
tower_d7 = Conv(data=data, num_filter=num_d7_red, name=f"{name}_tower", suffix="_conv")
tower_d7 = Conv(
data=tower_d7,
num_filter=num_d7_1,
kernel=(1, 7),
pad=(0, 3),
name=f"{name}_tower",
suffix="_conv_1",
)
tower_d7 = Conv(
data=tower_d7,
num_filter=num_d7_2,
kernel=(7, 1),
pad=(3, 0),
name=f"{name}_tower",
suffix="_conv_2",
)
tower_q7 = Conv(data=data, num_filter=num_q7_red, name=f"{name}_tower_1", suffix="_conv")
tower_q7 = Conv(
data=tower_q7,
num_filter=num_q7_1,
kernel=(7, 1),
pad=(3, 0),
name=f"{name}_tower_1",
suffix="_conv_1",
)
tower_q7 = Conv(
data=tower_q7,
num_filter=num_q7_2,
kernel=(1, 7),
pad=(0, 3),
name=f"{name}_tower_1",
suffix="_conv_2",
)
tower_q7 = Conv(
data=tower_q7,
num_filter=num_q7_3,
kernel=(7, 1),
pad=(3, 0),
name=f"{name}_tower_1",
suffix="_conv_3",
)
tower_q7 = Conv(
data=tower_q7,
num_filter=num_q7_4,
kernel=(1, 7),
pad=(0, 3),
name=f"{name}_tower_1",
suffix="_conv_4",
)
pooling = Pooling(
data=data,
kernel=(3, 3),
stride=(1, 1),
pad=(1, 1),
pool_type=pool,
name=f"{pool}_pool_{name}_pool",
)
cproj = Conv(
data=pooling, num_filter=proj, kernel=(1, 1), name=f"{name}_tower_2", suffix="_conv"
)
# concat
concat = relay.concatenate((tower_1x1, tower_d7, tower_q7, cproj), axis=1)
return concat
def Inception7D(
data, num_3x3_red, num_3x3, num_d7_3x3_red, num_d7_1, num_d7_2, num_d7_3x3, pool, name
):
tower_3x3 = Conv(data=data, num_filter=num_3x3_red, name=f"{name}_tower", suffix="_conv")
tower_3x3 = Conv(
data=tower_3x3,
num_filter=num_3x3,
kernel=(3, 3),
pad=(0, 0),
stride=(2, 2),
name=f"{name}_tower",
suffix="_conv_1",
)
tower_d7_3x3 = Conv(
data=data, num_filter=num_d7_3x3_red, name=f"{name}_tower_1", suffix="_conv"
)
tower_d7_3x3 = Conv(
data=tower_d7_3x3,
num_filter=num_d7_1,
kernel=(1, 7),
pad=(0, 3),
name=f"{name}_tower_1",
suffix="_conv_1",
)
tower_d7_3x3 = Conv(
data=tower_d7_3x3,
num_filter=num_d7_2,
kernel=(7, 1),
pad=(3, 0),
name=f"{name}_tower_1",
suffix="_conv_2",
)
tower_d7_3x3 = Conv(
data=tower_d7_3x3,
num_filter=num_d7_3x3,
kernel=(3, 3),
stride=(2, 2),
name=f"{name}_tower_1",
suffix="_conv_3",
)
pooling = Pooling(
data=data,
kernel=(3, 3),
stride=(2, 2),
pool_type=pool,
pad=(0, 0),
name=f"{pool}_pool_{name}_pool",
)
# concat
concat = relay.concatenate((tower_3x3, tower_d7_3x3, pooling), axis=1)
return concat
def Inception7E(
data,
num_1x1,
num_d3_red,
num_d3_1,
num_d3_2,
num_3x3_d3_red,
num_3x3,
num_3x3_d3_1,
num_3x3_d3_2,
pool,
proj,
name,
):
tower_1x1 = Conv(data=data, num_filter=num_1x1, kernel=(1, 1), name=f"{name}_conv")
tower_d3 = Conv(data=data, num_filter=num_d3_red, name=f"{name}_tower", suffix="_conv")
tower_d3_a = Conv(
data=tower_d3,
num_filter=num_d3_1,
kernel=(1, 3),
pad=(0, 1),
name=f"{name}_tower",
suffix="_mixed_conv",
)
tower_d3_b = Conv(
data=tower_d3,
num_filter=num_d3_2,
kernel=(3, 1),
pad=(1, 0),
name=f"{name}_tower",
suffix="_mixed_conv_1",
)
tower_3x3_d3 = Conv(
data=data, num_filter=num_3x3_d3_red, name=f"{name}_tower_1", suffix="_conv"
)
tower_3x3_d3 = Conv(
data=tower_3x3_d3,
num_filter=num_3x3,
kernel=(3, 3),
pad=(1, 1),
name=f"{name}_tower_1",
suffix="_conv_1",
)
tower_3x3_d3_a = Conv(
data=tower_3x3_d3,
num_filter=num_3x3_d3_1,
kernel=(1, 3),
pad=(0, 1),
name=f"{name}_tower_1",
suffix="_mixed_conv",
)
tower_3x3_d3_b = Conv(
data=tower_3x3_d3,
num_filter=num_3x3_d3_2,
kernel=(3, 1),
pad=(1, 0),
name=f"{name}_tower_1",
suffix="_mixed_conv_1",
)
pooling = Pooling(
data=data,
kernel=(3, 3),
stride=(1, 1),
pad=(1, 1),
pool_type=pool,
name=f"{pool}_pool_{name}_pool",
)
cproj = Conv(
data=pooling, num_filter=proj, kernel=(1, 1), name=f"{name}_tower_2", suffix="_conv"
)
# concat
concat = relay.concatenate(
(tower_1x1, tower_d3_a, tower_d3_b, tower_3x3_d3_a, tower_3x3_d3_b, cproj), axis=1
)
return concat
def get_net(batch_size, num_classes, image_shape, dtype):
"""Get network a Inception v3 network.
batch_size : int
The batch size used in the model
num_classes : int, optional
Number of claseses
image_shape : tuple, optional
The input image shape
dtype : str, optional
The data type
Returns
-------
net : relay.Function
The dataflow.
"""
data_shape = (batch_size,) + image_shape
data = relay.var("data", shape=data_shape, dtype=dtype)
# stage 1
conv = Conv(data, 32, kernel=(3, 3), stride=(2, 2), name="conv")
conv_1 = Conv(conv, 32, kernel=(3, 3), name="conv_1")
conv_2 = Conv(conv_1, 64, kernel=(3, 3), pad=(1, 1), name="conv_2")
pool = Pooling(
data=conv_2, kernel=(3, 3), stride=(2, 2), pool_type="max", pad=(0, 0), name="pool"
)
# stage 2
conv_3 = Conv(pool, 80, kernel=(1, 1), name="conv_3")
conv_4 = Conv(conv_3, 192, kernel=(3, 3), name="conv_4")
pool1 = Pooling(
data=conv_4, kernel=(3, 3), stride=(2, 2), pool_type="max", pad=(0, 0), name="pool1"
)
# stage 3
in3a = Inception7A(pool1, 64, 64, 96, 96, 48, 64, "avg", 32, "mixed")
in3b = Inception7A(in3a, 64, 64, 96, 96, 48, 64, "avg", 64, "mixed_1")
in3c = Inception7A(in3b, 64, 64, 96, 96, 48, 64, "avg", 64, "mixed_2")
in3d = Inception7B(in3c, 384, 64, 96, 96, "max", "mixed_3")
# stage 4
in4a = Inception7C(in3d, 192, 128, 128, 192, 128, 128, 128, 128, 192, "avg", 192, "mixed_4")
in4b = Inception7C(in4a, 192, 160, 160, 192, 160, 160, 160, 160, 192, "avg", 192, "mixed_5")
in4c = Inception7C(in4b, 192, 160, 160, 192, 160, 160, 160, 160, 192, "avg", 192, "mixed_6")
in4d = Inception7C(in4c, 192, 192, 192, 192, 192, 192, 192, 192, 192, "avg", 192, "mixed_7")
in4e = Inception7D(in4d, 192, 320, 192, 192, 192, 192, "max", "mixed_8")
# stage 5
in5a = Inception7E(in4e, 320, 384, 384, 384, 448, 384, 384, 384, "avg", 192, "mixed_9")
in5b = Inception7E(in5a, 320, 384, 384, 384, 448, 384, 384, 384, "max", 192, "mixed_10")
# pool
pool = Pooling(
data=in5b, kernel=(8, 8), stride=(1, 1), pool_type="avg", pad=(0, 0), name="global_pool"
)
flatten = relay.nn.batch_flatten(pool)
fc1 = relay.nn.dense(flatten, relay.var("fc1_weight"), units=num_classes)
fc1 = relay.nn.bias_add(fc1, relay.var("fc2_bias"), axis=-1)
inception_v3 = relay.nn.softmax(data=fc1)
args = relay.analysis.free_vars(inception_v3)
return relay.Function(args, inception_v3)
def get_workload(batch_size=1, num_classes=1000, image_shape=(3, 299, 299), dtype="float32"):
"""Get benchmark workload for InceptionV3
Parameters
----------
batch_size : int
The batch size used in the model
num_classes : int, optional
Number of classes
image_shape : tuple, optional
The input image shape
dtype : str, optional
The data type
Returns
-------
mod : tvm.IRModule
The relay module that contains an Inception V3 network.
params : dict of str to NDArray
The parameters.
"""
net = get_net(batch_size, num_classes, image_shape, dtype)
return create_workload(net)
| 12,575 | 28.521127 | 98 | py |
tvm | tvm-main/python/tvm/relay/frontend/mxnet.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-self, len-as-condition, no-else-return, too-many-lines
# pylint: disable=use-list-literal
"""MXNet symbol frontend."""
import json
import math
import numpy as np
import tvm
from tvm import relay
from tvm.ir import IRModule
from tvm.topi.utils import get_const_tuple
from ... import nd as _nd
from .. import analysis
from .. import expr as _expr
from .. import function as _function
from .. import op as _op
from .. import scope_builder as _scope_builder
from .common import StrAttrsDict
from .common import get_name as _get_name
from .common import infer_shape as _infer_shape
from .common import infer_type as _infer_type
from .common import infer_value as _infer_value
from .mxnet_qnn_op_utils import (
dequantize_mxnet_min_max,
get_conv_mkldnn_requantized_scale_outDtype,
get_mkldnn_int8_scale,
get_mkldnn_requantize_scale_outDtype,
get_mkldnn_uint8_scale,
quantize_conv_bias_mkldnn_from_var,
quantize_conv_weights_bias_channel_mkldnn_from_var,
quantize_mxnet_min_max,
)
from .nnvm_common import (
_arg_reduce,
_binop_scalar,
_cast,
_clip,
_elemwise_sum,
_init_op,
_rbinop_scalar,
_reduce,
_rename,
_reshape,
_softmax_op,
_transpose,
_upsampling,
_warn_not_used,
)
__all__ = ["from_mxnet"]
_activation_map = {"sigmoid": _op.sigmoid, "tanh": _op.tanh, "relu": _op.nn.relu}
def _mx_fully_connected(inputs, attrs):
import mxnet as mx # pylint: disable=import-outside-toplevel
units = attrs.get_int("num_hidden")
use_bias = not attrs.get_bool("no_bias", False)
try:
_ = mx.sym.FullyConnected(mx.sym.var("x"), num_hidden=1, flatten=True)
has_flatten = True
except mx.base.MXNetError:
# no flatten attribute in old mxnet
has_flatten = False
use_flatten = attrs.get_bool("flatten", True)
if has_flatten and use_flatten:
inputs[0] = _op.nn.batch_flatten(inputs[0])
data_shape = _infer_type(inputs[0]).checked_type.shape
if len(data_shape) > 2:
inputs[0] = _op.reverse_reshape(inputs[0], [-1, 0])
res = _op.nn.dense(inputs[0], inputs[1], units=units)
if use_bias:
assert len(inputs) == 3
res = _op.nn.bias_add(res, inputs[2], axis=-1)
if len(data_shape) > 2:
new_shape = data_shape[:-1]
new_shape.append(units)
res = _op.reshape(res, new_shape)
return res
def _get_channel_axis(layout, op_name):
if layout in ["NCHW", "NCDHW"]:
return 1
if layout == "NHWC":
return 3
if layout == "NDHWC":
return 4
raise tvm.error.OpAttributeInvalid(
f'Value {padding} in attribute "layout" of operator {op_name} is not valid.'
)
def _mx_activations(inputs, attrs):
act_type = attrs.get_str("act_type")
assert len(inputs) == 1
if act_type == "softrelu":
def _stable_softrelu(x):
# log(1 + exp(-abs(x))) + relu(x)
one = _expr.const(1, dtype="float32")
exp_neg_abs_x = _op.exp(_op.negative(_op.abs(x)))
return _op.add(_op.log(_op.add(one, exp_neg_abs_x)), _op.nn.relu(x))
return _stable_softrelu(inputs[0])
if act_type not in _activation_map:
raise tvm.error.OpNotImplemented(
f"Operator {act_type} is not supported for frontend MXNet."
)
return _activation_map[act_type](inputs[0])
def _mx_compare(new_op, wrapper):
def impl(inputs, attrs):
expr = _infer_type(inputs[0])
dtype = expr.checked_type.dtype
return wrapper(new_op)(inputs, attrs).astype(dtype)
return impl
def _mx_unravel_index(inputs, attrs):
assert len(inputs) == 1
shape = attrs.get_int_tuple("shape")
shape_expr = _expr.const(list(shape))
return _op.unravel_index(inputs[0], shape_expr)
def _mx_swap_axis(inputs, attrs):
assert len(inputs) == 1
dim1 = attrs.get_int("dim1")
dim2 = attrs.get_int("dim2")
shape = _infer_type(inputs[0]).checked_type.shape
axes = list(range(len(shape)))
axes[dim1] = dim2
axes[dim2] = dim1
return _op.transpose(inputs[0], axes=axes)
def _mx_zeros(inputs, attrs):
assert len(inputs) == 0
shape = attrs.get_int_tuple("shape")
dtype = attrs.get_str("dtype", "float32")
if 0 in shape:
return None
return _op.zeros(shape=shape, dtype=dtype)
def _mx_conv(inputs, attrs):
kernel_size = attrs.get_int_tuple("kernel")
if len(kernel_size) == 3:
return _mx_conv3d(inputs, attrs)
elif len(kernel_size) == 2:
return _mx_conv2d(inputs, attrs)
elif len(kernel_size) == 1:
return _mx_conv1d(inputs, attrs)
else:
raise tvm.error.OpAttributeInvalid(
"1D, 2D or 3D kernels only are supported for operator Convolution"
)
def _mx_conv1d(inputs, attrs):
kernel_size = attrs.get_int_tuple("kernel")
if len(kernel_size) != 1:
raise tvm.error.OpAttributeInvalid(
"Non 1D or 2D kernels are not supported for operator Convolution"
)
data_layout = attrs.get_str("layout", "NCW")
# MXNet Conv1D only supports ‘NCW’ layout for now.
if data_layout != "NCW":
raise tvm.error.OpAttributeInvalid('Only "NCW" data layout is supported for 1D Convolution')
data_layout = "NCHW"
channel_axis = 1
kernel_layout = "OIHW"
new_attrs = {}
new_attrs["channels"] = attrs.get_int("num_filter")
new_attrs["kernel_size"] = (1,) + kernel_size
new_attrs["strides"] = (1,) + attrs.get_int_tuple("stride", (1,))
new_attrs["padding"] = (0,) + attrs.get_int_tuple("pad", (0,))
new_attrs["dilation"] = (1,) + attrs.get_int_tuple("dilate", (1,))
new_attrs["groups"] = attrs.get_int("num_group", 1)
new_attrs["data_layout"] = data_layout
new_attrs["kernel_layout"] = kernel_layout
use_bias = not attrs.get_bool("no_bias", False)
data = _op.expand_dims(inputs[0], axis=2)
kernel = _op.expand_dims(inputs[1], axis=2)
res = _op.nn.conv2d(data, kernel, **new_attrs)
if use_bias:
assert len(inputs) == 3
res = _op.nn.bias_add(res, inputs[2], axis=channel_axis)
res = _op.squeeze(res, axis=[2])
return res
def _get_mx_conv2d_attrs(attrs):
kernel_size = attrs.get_int_tuple("kernel")
data_layout = attrs.get_str("layout", "NCHW")
if "kernel_layout" in attrs.attrs:
kernel_layout = attrs.get_str("kernel_layout")
else:
kernel_layout = "HWIO" if data_layout == "NHWC" else "OIHW"
new_attrs = {}
new_attrs["channels"] = attrs.get_int("num_filter")
new_attrs["kernel_size"] = kernel_size
new_attrs["strides"] = attrs.get_int_tuple("stride", (1, 1))
new_attrs["padding"] = attrs.get_int_tuple("pad", (0, 0))
new_attrs["dilation"] = attrs.get_int_tuple("dilate", (1, 1))
new_attrs["groups"] = attrs.get_int("num_group", 1)
new_attrs["data_layout"] = data_layout
new_attrs["kernel_layout"] = kernel_layout
return new_attrs
def _mx_conv2d(inputs, attrs):
kernel_size = attrs.get_int_tuple("kernel")
data_layout = attrs.get_str("layout", "NCHW")
if len(kernel_size) != 2:
raise tvm.error.OpAttributeInvalid("Only 2D kernels are supported for operator Convolution")
new_attrs = _get_mx_conv2d_attrs(attrs)
channel_axis = _get_channel_axis(data_layout, "conv2d")
use_bias = not attrs.get_bool("no_bias", False)
res = _op.nn.conv2d(inputs[0], inputs[1], **new_attrs)
if use_bias:
assert len(inputs) == 3
res = _op.nn.bias_add(res, inputs[2], axis=channel_axis)
return res
def _get_mx_conv3d_attrs(attrs):
kernel_size = attrs.get_int_tuple("kernel")
data_layout = attrs.get_str("layout", "NCDHW")
if "kernel_layout" in attrs.attrs:
kernel_layout = attrs.get_str("kernel_layout")
else:
kernel_layout = "DHWIO" if data_layout == "NDHWC" else "OIDHW"
new_attrs = {}
new_attrs["channels"] = attrs.get_int("num_filter")
new_attrs["kernel_size"] = kernel_size
new_attrs["strides"] = attrs.get_int_tuple("stride", (1, 1, 1))
new_attrs["padding"] = attrs.get_int_tuple("pad", (0, 0, 0))
new_attrs["dilation"] = attrs.get_int_tuple("dilate", (1, 1, 1))
new_attrs["groups"] = attrs.get_int("num_group", 1)
new_attrs["data_layout"] = data_layout
new_attrs["kernel_layout"] = kernel_layout
return new_attrs
def _mx_conv3d(inputs, attrs):
kernel_size = attrs.get_int_tuple("kernel")
data_layout = attrs.get_str("layout", "NCDHW")
if len(kernel_size) != 3:
raise tvm.error.OpAttributeInvalid("Only 3D kernels are supported for operator Convolution")
new_attrs = _get_mx_conv3d_attrs(attrs)
channel_axis = _get_channel_axis(data_layout, "conv3d")
use_bias = not attrs.get_bool("no_bias", False)
res = _op.nn.conv3d(inputs[0], inputs[1], **new_attrs)
if use_bias:
assert len(inputs) == 3
res = _op.nn.bias_add(res, inputs[2], axis=channel_axis)
return res
def _mx_conv_transpose(inputs, attrs):
kernel_size = attrs.get_int_tuple("kernel")
if len(kernel_size) == 3:
return _mx_conv3d_transpose(inputs, attrs)
elif len(kernel_size) == 2:
return _mx_conv2d_transpose(inputs, attrs)
elif len(kernel_size) == 1:
return _mx_conv1d_transpose(inputs, attrs)
else:
raise tvm.error.OpAttributeInvalid(
"1D, 2D or 3D kernels only are supported for operator Convolution"
)
def _mx_conv1d_transpose(inputs, attrs):
if "target_shape" in attrs.attrs:
raise tvm.error.OpAttributeUnImplemented(
'Attribute "target_shape" is not supported for operator Conv2D-transpose.'
)
data_layout = attrs.get_str("layout", "NCW")
if data_layout != "NCW":
raise tvm.error.OpAttributeInvalid('Only "NCW" data layout is supported for 1D Convolution')
channel_axis = 1
kernel_layout = "IOW"
new_attrs = {}
new_attrs["channels"] = attrs.get_int("num_filter")
new_attrs["kernel_size"] = attrs.get_int_tuple("kernel")
new_attrs["strides"] = attrs.get_int_tuple("stride", (1,))
new_attrs["output_padding"] = attrs.get_int_tuple("adj", (0,))
new_attrs["padding"] = attrs.get_int_tuple("pad", (0,))
new_attrs["dilation"] = attrs.get_int_tuple("dilate", (1,))
new_attrs["groups"] = attrs.get_int("num_group", 1)
new_attrs["data_layout"] = data_layout
new_attrs["kernel_layout"] = kernel_layout
use_bias = not attrs.get_bool("no_bias", True)
res = _op.nn.conv1d_transpose(inputs[0], inputs[1], **new_attrs)
if use_bias:
assert len(inputs) == 3
res = _op.nn.bias_add(res, inputs[2], axis=channel_axis)
return res
def _mx_conv2d_transpose(inputs, attrs):
if "target_shape" in attrs.attrs:
raise tvm.error.OpAttributeUnImplemented(
'Attribute "target_shape" is not supported for operator Conv2D-transpose.'
)
kernel_size = attrs.get_int_tuple("kernel")
if len(kernel_size) != 2:
raise tvm.error.OpAttributeInvalid(
"Non-2D kernels are not supported for operator Conv2D-transpose."
)
data_layout = attrs.get_str("layout", "NCHW")
channel_axis = _get_channel_axis(data_layout, "conv2d_transpose")
if "kernel_layout" in attrs.attrs:
kernel_layout = attrs.get_str("kernel_layout")
else:
kernel_layout = "HWIO" if data_layout == "NHWC" else "IOHW"
new_attrs = {}
new_attrs["channels"] = attrs.get_int("num_filter")
new_attrs["kernel_size"] = kernel_size
new_attrs["strides"] = attrs.get_int_tuple("stride", (1, 1))
new_attrs["output_padding"] = attrs.get_int_tuple("adj", (0, 0))
new_attrs["padding"] = attrs.get_int_tuple("pad", (0, 0))
new_attrs["dilation"] = attrs.get_int_tuple("dilate", (1, 1))
new_attrs["groups"] = attrs.get_int("num_group", 1)
new_attrs["data_layout"] = data_layout
new_attrs["kernel_layout"] = kernel_layout
use_bias = not attrs.get_bool("no_bias", True)
res = _op.nn.conv2d_transpose(inputs[0], inputs[1], **new_attrs)
if use_bias:
assert len(inputs) == 3
res = _op.nn.bias_add(res, inputs[2], axis=channel_axis)
return res
def _mx_conv3d_transpose(inputs, attrs):
if "target_shape" in attrs.attrs:
raise tvm.error.OpAttributeUnImplemented(
'Attribute "target_shape" is not supported for operator Conv3D-transpose.'
)
kernel_size = attrs.get_int_tuple("kernel")
if len(kernel_size) != 3:
raise tvm.error.OpAttributeInvalid(
"Non-3D kernels are not supported for operator Conv3D-transpose."
)
data_layout = attrs.get_str("layout", "NCDHW")
channel_axis = _get_channel_axis(data_layout, "conv3d_transpose")
if "kernel_layout" in attrs.attrs:
kernel_layout = attrs.get_str("kernel_layout")
else:
kernel_layout = "DHWIO" if data_layout == "NDHWC" else "OIDHW"
new_attrs = {}
new_attrs["channels"] = attrs.get_int("num_filter")
new_attrs["kernel_size"] = kernel_size
new_attrs["strides"] = attrs.get_int_tuple("stride", (1, 1, 1))
new_attrs["output_padding"] = attrs.get_int_tuple("adj", (0, 0, 0))
new_attrs["padding"] = attrs.get_int_tuple("pad", (0, 0, 0))
new_attrs["dilation"] = attrs.get_int_tuple("dilate", (1, 1, 1))
new_attrs["groups"] = attrs.get_int("num_group", 1)
new_attrs["data_layout"] = data_layout
new_attrs["kernel_layout"] = kernel_layout
use_bias = not attrs.get_bool("no_bias", True)
res = _op.nn.conv3d_transpose(inputs[0], inputs[1], **new_attrs)
if use_bias:
assert len(inputs) == 3
res = _op.nn.bias_add(res, inputs[2], axis=channel_axis)
return res
def _mx_pooling(inputs, attrs):
global_pool = attrs.get_bool("global_pool", False)
pool_type = attrs.get_str("pool_type")
def _pool2d(new_op, is_avg):
kernel_size = attrs.get_int_tuple("kernel")
if len(kernel_size) != 2:
raise tvm.error.OpAttributeInvalid("Only 2D kernels are supported for operator Pool2D.")
new_attrs = {}
new_attrs["pool_size"] = kernel_size
new_attrs["strides"] = attrs.get_int_tuple("stride", (1, 1))
new_attrs["padding"] = attrs.get_int_tuple("pad", (0, 0))
new_attrs["ceil_mode"] = attrs.get_str("pooling_convention", "valid") == "full"
if is_avg:
new_attrs["count_include_pad"] = attrs.get_bool("count_include_pad", True)
return new_op(inputs[0], **new_attrs)
def _pool3d(new_op, is_avg):
kernel_size = attrs.get_int_tuple("kernel")
if len(kernel_size) != 3:
raise tvm.error.OpAttributeInvalid("Only 3D kernels are supported for operator Pool3D.")
new_attrs = {}
new_attrs["pool_size"] = kernel_size
new_attrs["strides"] = attrs.get_int_tuple("stride", (1, 1, 1))
new_attrs["padding"] = attrs.get_int_tuple("pad", (0, 0, 0))
new_attrs["ceil_mode"] = attrs.get_str("pooling_convention", "valid") == "full"
if is_avg:
new_attrs["count_include_pad"] = attrs.get_bool("count_include_pad", True)
return new_op(inputs[0], **new_attrs)
# 3D pooling
if len(_infer_shape(inputs[0])) == 5:
if pool_type == "max":
if global_pool:
return _op.nn.global_max_pool3d(inputs[0])
return _pool3d(_op.nn.max_pool3d, False)
if pool_type == "avg":
if global_pool:
return _op.nn.global_avg_pool3d(inputs[0])
return _pool3d(_op.nn.avg_pool3d, True)
raise tvm.error.OpNotImplemented(
f"Operator {pool_type.capitalize()} Pooling is not supported for frontend MXNet."
)
# 2D Pooling
if pool_type == "max":
if global_pool:
return _op.nn.global_max_pool2d(inputs[0])
return _pool2d(_op.nn.max_pool2d, False)
if pool_type == "avg":
if global_pool:
return _op.nn.global_avg_pool2d(inputs[0])
return _pool2d(_op.nn.avg_pool2d, True)
raise tvm.error.OpNotImplemented(
f"Operator {pool_type.capitalize()} Pooling is not supported for frontend MXNet."
)
def _mx_adaptive_avg_pooling(inputs, attrs):
output_size = attrs.get_int_tuple("output_size", [])
return _op.nn.adaptive_avg_pool2d(inputs[0], output_size)
def _mx_dropout(inputs, attrs):
rate = attrs.get_float("p", 0.5)
return _op.nn.dropout(inputs[0], rate=rate)
def _mx_BlockGrad(inputs, attrs): # pylint: disable=unused-argument
return inputs
def _mx_batch_norm(inputs, attrs):
if attrs.get_bool("output_mean_var", False):
raise tvm.error.OpAttributeUnImplemented(
'Attribute "output_mean_var" is not supported for operator Batch Norm.'
)
if attrs.get_bool("use_global_stats", False):
_warn_not_used("use_global_stats", "batch_norm")
new_attrs = {}
new_attrs["axis"] = attrs.get_int("axis", 1)
new_attrs["epsilon"] = attrs.get_float("eps", 0.001)
new_attrs["center"] = True
new_attrs["scale"] = not attrs.get_bool("fix_gamma", True)
return _op.nn.batch_norm(*inputs, **new_attrs)
def _mx_instance_norm(inputs, attrs):
assert len(inputs) == 3
new_attrs = {}
new_attrs["axis"] = attrs.get_int("axis", 1)
new_attrs["epsilon"] = attrs.get_float("eps", 1e-5)
return _op.nn.instance_norm(*inputs, **new_attrs)
def _mx_layer_norm(inputs, attrs):
assert len(inputs) == 3
if attrs.get_bool("output_mean_var", False):
raise tvm.error.OpAttributeUnimplemented(
'Attribute "output_mean_var" is not supported for operator Layer Norm.'
)
new_attrs = {}
new_attrs["axis"] = attrs.get_int("axis", -1)
new_attrs["epsilon"] = attrs.get_float("eps", 1e-5)
return _op.nn.layer_norm(*inputs, **new_attrs)
def _mx_group_norm(inputs, attrs):
assert len(inputs) == 3
if attrs.get_bool("output_mean_var", False):
raise tvm.error.OpAttributeUnimplemented(
'Attribute "output_mean_var" is not supported for operator Group Norm.'
)
new_attrs = {}
new_attrs["axis"] = 1
new_attrs["num_groups"] = attrs.get_int("num_groups", 1)
new_attrs["epsilon"] = attrs.get_float("eps", 1e-5)
return _op.nn.group_norm(*inputs, **new_attrs)
def _mx_slice(inputs, attrs):
new_attrs = {}
begin = list(attrs.get_int_tuple("begin", None))
end = list(attrs.get_int_tuple("end", None))
stride = attrs.get_int_tuple("step", None)
input_shape = _infer_type(inputs[0]).checked_type.shape
if begin is None:
raise tvm.error.OpAttributeRequired('Attribute "begin" not found in operator Slice.')
if end is None:
raise tvm.error.OpAttributeRequired('Attribute "end" not found in operator Slice.')
begin = (x if x is not None else 0 for x in begin)
for i, ed in enumerate(end):
if ed is None:
end[i] = input_shape[i]
new_attrs = {"begin": list(begin), "end": list(end)}
if stride is not None:
stride = (x if x is not None else 1 for x in stride)
new_attrs["strides"] = list(stride)
return _op.strided_slice(inputs[0], **new_attrs)
def _mx_slice_like(inputs, attrs):
assert len(inputs) == 2
new_attrs = {}
new_attrs["axes"] = attrs.get_int_tuple("axes", None)
return _op.slice_like(*inputs, **new_attrs)
def _mx_slice_axis(inputs, attrs):
assert len(inputs) == 1
expr = _infer_type(inputs[0])
shape = expr.checked_type.shape
axis = attrs.get_int("axis")
ax_beg = attrs.get_int("begin")
ax_end = attrs.get_str("end")
if axis < 0:
axis += len(shape)
assert 0 <= axis < len(shape)
if ax_end == "None":
ax_end = int(shape[axis])
else:
ax_end = int(ax_end)
if ax_beg < 0:
ax_beg += int(shape[axis])
if ax_end < 0:
ax_end += int(shape[axis])
assert 0 <= ax_beg < int(shape[axis])
assert ax_beg < ax_end <= int(shape[axis])
begin = []
end = []
for i, dim in enumerate(shape):
if i != axis:
begin.append(0)
end.append(dim)
else:
begin.append(ax_beg)
end.append(ax_end)
return _op.strided_slice(inputs[0], begin, end)
def _mx_crop_like(inputs, attrs):
if len(inputs) < 2:
raise tvm.error.OpAttributeUnimplemented(
"Only support crop_like pattern for operator Crop."
)
if attrs.get_bool("center_crop", False):
raise tvm.error.OpAttributeUnimplemented("Center crop is not supported in operator Crop.")
if attrs.get_int_tuple("h_w", (0, 0)) != (0, 0):
raise tvm.error.OpAttributeUnimplemented("Doesn't support h_w in operator Crop.")
offset = attrs.get_int_tuple("offset", (0, 0))
new_attrs = {}
if offset == (0, 0):
new_attrs["axes"] = (2, 3)
return _op.slice_like(*inputs, **new_attrs)
expr = _infer_type(inputs[1])
like_shape = expr.checked_type.shape
new_attrs["begin"] = [0, 0, offset[0], offset[1]]
new_attrs["end"] = [
like_shape[0],
like_shape[1],
offset[0] + like_shape[2],
offset[1] + like_shape[3],
]
return _op.strided_slice(inputs[0], **new_attrs)
def _mx_split(inputs, attrs):
axis = attrs.get_int("axis", 1)
new_attrs = {}
new_attrs["indices_or_sections"] = attrs.get_int("num_outputs")
new_attrs["axis"] = axis
res = _op.split(inputs[0], **new_attrs)
if attrs.get_bool("squeeze_axis", False):
return tuple([_op.squeeze(x, axis=[axis]) for x in res])
return res
def _mx_softmax_activation(inputs, attrs):
mode = attrs.get_str("mode", "instance")
axis = 0 if mode == "instance" else 1
return _op.nn.softmax(inputs[0], axis=axis)
def _mx_softmax_output(inputs, attrs):
if attrs.get_bool("multi_output", False):
return _op.nn.softmax(inputs[0], axis=1)
return _op.nn.softmax(inputs[0])
def _mx_linear_regression_output(inputs, _):
return inputs[0]
def _mx_logistic_regression_output(inputs, _):
return _op.sigmoid(inputs[0])
def _mx_concat(inputs, attrs):
axis = attrs.get_int("dim", 1)
return _op.concatenate(tuple(inputs), axis=axis)
def _mx_stack(inputs, attrs):
axis = attrs.get_int("axis", 0)
return _op.stack(tuple(inputs), axis=axis)
def _mx_expand_dims(inputs, attrs):
axis = attrs.get_int("axis")
return _op.expand_dims(inputs[0], axis=axis)
def _mx_pad(inputs, attrs):
pad_mode = attrs.get_str("mode", None)
if pad_mode is None:
raise tvm.error.OpAttributeRequired('Attribute "mode" not found in operator pad.')
if pad_mode not in ["constant", "edge", "reflect"]:
raise tvm.error.OpAttributeInvalid("Value " + mode + ' in attribute "mode" is not valid')
pad_width = attrs.get_int_tuple("pad_width", None)
if pad_width is None:
raise tvm.error.OpAttributeRequired('Attribute "pad_width" not found in operator pad.')
if None in pad_width:
raise tvm.error.OpAttributeInvalid(
'Value None in attribute "pad_width" of operator Slice is not valid.'
)
constant_value = attrs.get_float("constant_value", 0.0)
padding = tuple(tuple((b, a)) for b, a in zip(pad_width[::2], pad_width[1::2]))
return _op.nn.pad(
data=inputs[0], pad_width=padding, pad_value=constant_value, pad_mode=pad_mode
)
def _mx_leaky_relu(inputs, attrs):
act_type = attrs.get_str("act_type", "leaky")
if act_type == "leaky":
return _op.nn.leaky_relu(inputs[0], alpha=attrs.get_float("slope", 0.25))
if act_type == "prelu":
assert len(inputs) == 2
return _op.nn.prelu(*inputs)
if act_type == "elu":
# -slope * relu(1-exp(x)) + relu(x)
slope = attrs.get_float("slope", 0.25)
one = _expr.const(1, dtype="float32")
x = inputs[0]
mslope = _op.nn.relu(_op.subtract(one, _op.exp(x)))
mslope = _op.multiply(mslope, _expr.const(-slope, dtype="float32"))
return _op.add(mslope, _op.nn.relu(x))
if act_type == "rrelu":
# NOTE this is only converted for inference.
lower_bound = attrs.get_float("lower_bound")
upper_bound = attrs.get_float("upper_bound")
alpha = (lower_bound + upper_bound) / 2.0
return _op.nn.leaky_relu(inputs[0], alpha=alpha)
if act_type == "gelu":
# 0.5 * x * (1 + erf(x / sqrt(2)))
sqrt2 = _expr.const(math.sqrt(2), dtype="float32")
erf = _op.erf(_op.divide(inputs[0], sqrt2))
one = _expr.const(1, dtype="float32")
erf_plus_one = _op.add(one, erf)
half = _expr.const(0.5, dtype="float32")
half_x = _op.multiply(inputs[0], half)
return _op.multiply(half_x, erf_plus_one)
raise tvm.error.OpNotImplemented(f"Operator {act_type} is not supported for frontend MXNet.")
def _mx_make_power(power):
def _impl(inputs, _): # Note: no attrs
assert len(inputs) == 1
scalar = _expr.const(power, dtype=None)
# Note: int maps to "int32", float maps to "float32"
return _op.power(inputs[0], scalar)
return _impl
def _mx_make_exponent(base):
# exp(b, x) = e^b * e^x
def _impl(inputs, _): # Note: no attrs
assert len(inputs) == 1
scalar = _op.exp(_expr.const(base, dtype="float32"))
return _op.multiply(inputs[0], scalar)
return _impl
def _mx_make_logarithm(base):
# log(b, x) = log(x) / log(b)
def _impl(inputs, _): # Note: no attrs
assert len(inputs) == 1
scalar = _op.log(_expr.const(base, dtype="float32"))
return _op.divide(inputs[0], scalar)
return _impl
def _mx_expm1():
# exp_minus_1 x = exp(x) - 1
def _impl(inputs, _): # Note: no attrs
assert len(inputs) == 1
one = _expr.const(1, dtype="float32")
return _op.log(_op.subtract(inputs[0], one))
return _impl
def _mx_log1p():
# 1_plus_log x = log(x + 1)
def _impl(inputs, _): # Note: no attrs
assert len(inputs) == 1
one = _expr.const(1, dtype="float32")
return _op.log(_op.add(inputs[0], one))
return _impl
def _mx_lrn(inputs, attrs):
new_attrs = {}
new_attrs["alpha"] = attrs.get_float("alpha", 0.0001)
new_attrs["beta"] = attrs.get_float("beta", 0.75)
new_attrs["bias"] = attrs.get_float("knorm", 2)
# NCHW format and normalization along channel axis
new_attrs["axis"] = 1
new_attrs["size"] = attrs.get_int("nsize")
assert len(inputs) == 1
return _op.nn.lrn(inputs[0], **new_attrs)
def _mx_multibox_prior(inputs, attrs):
new_attrs = {}
new_attrs["sizes"] = attrs.get_float_tuple("sizes", (1.0,))
new_attrs["steps"] = attrs.get_float_tuple("steps", (-1.0, -1.0))
new_attrs["offsets"] = attrs.get_float_tuple("offsets", (0.5, 0.5))
new_attrs["ratios"] = attrs.get_float_tuple("ratios", (1.0,))
new_attrs["clip"] = attrs.get_bool("clip", False)
return _op.vision.multibox_prior(inputs[0], **new_attrs)
def _mx_multibox_detection(inputs, attrs):
new_attrs0 = {}
new_attrs0["clip"] = attrs.get_bool("clip", True)
new_attrs0["threshold"] = attrs.get_float("threshold", 0.01)
new_attrs0["variances"] = attrs.get_float_tuple("variances", (0.1, 0.1, 0.2, 0.2))
new_attrs1 = {}
new_attrs1["return_indices"] = False
new_attrs1["iou_threshold"] = attrs.get_float("nms_threshold", 0.5)
new_attrs1["force_suppress"] = attrs.get_bool("force_suppress", False)
new_attrs1["top_k"] = attrs.get_int("nms_topk", -1)
ret = _op.vision.multibox_transform_loc(inputs[0], inputs[1], inputs[2], **new_attrs0)
return _op.vision.non_max_suppression(ret[0], ret[1], ret[1], **new_attrs1)
def _mx_dot(inputs, attrs):
assert len(inputs) == 2
a = inputs[0]
b = inputs[1]
rank_a = len(_infer_type(a).checked_type.shape)
rank_b = len(_infer_type(b).checked_type.shape)
if rank_a < 1 or rank_b < 1:
raise tvm.error.OpAttributeInvalid("Unsupported shape of input tensors.")
transpose_a = attrs.get_bool("transpose_a", False)
transpose_b = attrs.get_bool("transpose_b", False)
if transpose_a is True:
msg = f'Value {transpose_a} in attribute "transpose_a" of operator dot is not valid.'
raise tvm.error.OpAttributeInvalid(msg)
# When performing dot product we need to properly handle shape of result -> out_shape
if rank_a == 1:
out_shape = list()
a = _op.expand_dims(a, axis=0)
else:
shape_a = list(_infer_type(a).checked_type.shape)
out_shape = shape_a[:-1]
a = _op.reshape(a, newshape=(-1, shape_a[-1]))
if rank_b == 1:
if not out_shape:
out_shape = [1]
b = _op.expand_dims(b, axis=1)
else:
# Transpose matrix b if needed
if transpose_b:
trans_axes = list(range(rank_b))
trans_axes = trans_axes[-1:] + trans_axes[:-1]
b = _op.transpose(b, axes=trans_axes)
shape_b = list(_infer_type(b).checked_type.shape)
out_shape += shape_b[1:]
b = _op.reshape(b, newshape=(shape_b[0], -1))
out = _op.reshape(_op.nn.matmul(a, b), newshape=out_shape)
return out
def _mx_batch_dot(inputs, attrs):
assert len(inputs) == 2
a, b = inputs
a_shape = _infer_type(a).checked_type.shape
batch_shapes = None
if len(a_shape) > 3:
batch_shapes = a_shape[:-2]
a = _op.reverse_reshape(a, newshape=(-1, 0, 0))
b_shape = _infer_type(b).checked_type.shape
if len(b_shape) > 3:
if batch_shapes is None:
batch_shapes = b_shape[:-2]
b = _op.reverse_reshape(b, newshape=(-1, 0, 0))
transpose_a = attrs.get_bool("transpose_a", False)
transpose_b = attrs.get_bool("transpose_b", False)
if transpose_a is True:
msg = f'Value {transpose_a} in attribute "transpose_a" of operator batch_dot is not valid.'
raise tvm.error.OpAttributeInvalid(msg)
if transpose_b is False:
b = _op.transpose(b, axes=[0, 2, 1])
out = _op.nn.batch_matmul(a, b)
if batch_shapes is not None:
out = _op.reverse_reshape(out, newshape=tuple(batch_shapes) + (0, 0))
return out
def _mx_arange(inputs, attrs):
assert len(inputs) == 0
if attrs.get_int("repeat", 1) != 1:
raise tvm.error.OpAttributeUnimplemented(
'Attribute "repeat" is not supported in operator arange.'
)
dtype = attrs.get_str("dtype", "float32")
stop = attrs.get_str("stop", "None")
if stop == "None":
stop = None
else:
stop = _expr.const(float(stop), dtype=dtype)
new_attrs = {}
new_attrs["start"] = _expr.const(attrs.get_float("start", 0.0), dtype=dtype)
new_attrs["stop"] = stop
new_attrs["step"] = _expr.const(attrs.get_float("step", 1.0), dtype=dtype)
new_attrs["dtype"] = dtype
return _op.arange(**new_attrs)
# pylint: disable=unused-argument
def _mx_make_loss(inputs, attrs):
# while doing inference make_loss does not have any effect
# and it should be mapped to identity
return inputs[0]
def _mx_contrib_arange_like(inputs, attrs):
assert len(inputs) == 1
if attrs.get_int("repeat", 1) != 1:
raise tvm.error.OpAttributeUnimplemented(
'Attribute "repeat" is not supported in operator arange_like.'
)
ty = _infer_type(inputs[0]).checked_type
assert ty
shape, dtype = get_const_tuple(ty.shape), ty.dtype
axis = attrs.get_int("axis", None)
if axis is None:
n_elems = 1
for dim in shape:
if not isinstance(dim, int):
raise tvm.error.OpError("Don't support arange_like with symbolic input shape.")
n_elems *= dim
else:
axis = axis + len(shape) if axis < 0 else axis
assert 0 <= axis < len(shape)
n_elems = shape[axis]
if not isinstance(n_elems, int):
raise tvm.error.OpError("Don't support arange_like with symbolic input shape.")
shape = (n_elems,)
start = attrs.get_float("start", 0.0)
step = attrs.get_float("step", 1.0)
stop = start + step * n_elems
new_attrs = {}
new_attrs["start"] = _expr.const(start, dtype=dtype)
new_attrs["stop"] = _expr.const(stop, dtype=dtype)
new_attrs["step"] = _expr.const(step, dtype=dtype)
new_attrs["dtype"] = dtype
ret = _op.arange(**new_attrs)
if len(shape) > 1:
ret = _op.reshape(ret, shape)
return ret
def _mx_repeat(inputs, attrs):
assert len(inputs) == 1
new_attrs = {}
new_attrs["repeats"] = attrs.get_int("repeats")
new_attrs["axis"] = attrs.get_int("axis", 0)
return _op.repeat(inputs[0], **new_attrs)
def _mx_tile(inputs, attrs):
assert len(inputs) == 1
new_attrs = {}
new_attrs["reps"] = attrs.get_int_tuple("reps")
return _op.tile(inputs[0], **new_attrs)
def _mx_take(inputs, attrs):
assert len(inputs) == 2
mode = attrs.get_str("mode", "clip")
if mode == "raise":
raise tvm.error.OpAttributeUnimplemented("take with raise mode is not supported yet")
axis = attrs.get_int("axis", 0)
return _op.take(inputs[0], inputs[1].astype("int32"), axis=axis, mode=mode)
def _mx_gather_nd(inputs, attrs):
assert len(inputs) == 2
assert len(_infer_shape(inputs[1])) > 1, "index tensor to have at least 2 dimensions"
return _op.gather_nd(inputs[0], inputs[1])
def _mx_reverse(inputs, attrs):
assert len(inputs) == 1
new_attrs = {}
new_attrs["axis"] = attrs.get_int("axis")
return _op.reverse(inputs[0], **new_attrs)
def _mx_sequence_reverse(inputs, attrs):
new_attrs = {}
use_seq_lengths = attrs.get_bool("use_sequence_length")
if not use_seq_lengths:
assert len(inputs) == 1
new_attrs["axis"] = attrs.get_int("axis")
return _op.reverse(inputs[0], **new_attrs)
assert len(inputs) == 2
new_attrs["seq_axis"] = attrs.get_int("axis")
# MXNet assumes batch_axis as 1.
new_attrs["batch_axis"] = 1
return _op.reverse_sequence(inputs[0], inputs[1], **new_attrs)
def _mx_roi_align(inputs, attrs):
new_attrs = {}
new_attrs["pooled_size"] = attrs.get_int_tuple("pooled_size")
new_attrs["spatial_scale"] = attrs.get_float("spatial_scale")
new_attrs["sample_ratio"] = attrs.get_int("sample_ratio", -1)
new_attrs["layout"] = "NCHW"
return _op.vision.roi_align(inputs[0], inputs[1], **new_attrs)
def _mx_resize(inputs, attrs):
scale_height = attrs.get_float("scale_height", None)
scale_width = attrs.get_float("scale_width", None)
height = attrs.get_int("height", 1)
width = attrs.get_int("width", 1)
expr = _infer_type(inputs[0])
shape = expr.checked_type.shape
if scale_height is not None:
height = (scale_height * shape[2]).astype("int32")
if scale_width is not None:
width = (scale_width * shape[3]).astype("int32")
size = (height, width)
return _op.image.resize2d(inputs[0], size, coordinate_transformation_mode="align_corners")
def _mx_amp_multicast(inputs, attrs):
cast_narrow = attrs.get_bool("cast_narrow", False)
dtypes = [_infer_type(x).checked_type.dtype for x in inputs]
supported_dtypes = ["float16", "float32"]
assert all(
[x in supported_dtypes for x in dtypes]
), "amp_multicast support is limited to float16 and float32 inputs only."
has_float16 = any(x == "float16" for x in dtypes)
has_float32 = any(x == "float32" for x in dtypes)
dtype = dtypes[0]
if cast_narrow and has_float16:
dtype = "float16"
if not cast_narrow and has_float32:
dtype = "float32"
return [_op.cast(x, dtype) for x in inputs]
def _mx_grid_generator(inputs, attrs):
transform_type = attrs.get_str("transform_type")
if transform_type == "affine":
target_shape = attrs.get_int_tuple("target_shape")
return _op.image.affine_grid(_op.reshape(inputs[0], (0, 2, 3)), target_shape)
if transform_type == "warp":
checked_type = _infer_type(inputs[0]).checked_type
batch, _, height, width = get_const_tuple(checked_type.shape)
dtype = checked_type.dtype
identity_affine = relay.const(np.array([[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]]], dtype=dtype))
identity_affine = _op.broadcast_to(identity_affine, (batch, 2, 3))
normalizer = (2.0 / np.array([width - 1, height - 1])).reshape(1, -1, 1, 1).astype(dtype)
normalized_flow = inputs[0] * relay.const(normalizer)
grid = _op.image.affine_grid(identity_affine, (height, width))
return grid + normalized_flow
raise ValueError("unknown transform type" + transform_type)
def _mx_bilinear_sampler(inputs, attrs):
return _op.image.grid_sample(inputs[0], inputs[1], "bilinear", "NCHW")
def _mx_roi_pooling(inputs, attrs):
new_attrs = {}
new_attrs["pooled_size"] = attrs.get_int_tuple("pooled_size")
new_attrs["spatial_scale"] = attrs.get_float("spatial_scale")
new_attrs["layout"] = "NCHW"
return _op.vision.roi_pool(inputs[0], inputs[1], **new_attrs)
def _mx_proposal(inputs, attrs):
new_attrs = {}
new_attrs["scales"] = attrs.get_float_tuple("scales", (4.0, 8.0, 16.0, 32.0))
new_attrs["ratios"] = attrs.get_float_tuple("ratios", (0.5, 1.0, 2.0))
new_attrs["feature_stride"] = attrs.get_int("feature_stride", 16)
new_attrs["threshold"] = attrs.get_float("threshold", 0.7)
new_attrs["rpn_pre_nms_top_n"] = attrs.get_int("rpn_pre_nms_top_n", 6000)
new_attrs["rpn_post_nms_top_n"] = attrs.get_int("rpn_post_nms_top_n", 300)
new_attrs["rpn_min_size"] = attrs.get_int("rpn_min_size", 16)
new_attrs["iou_loss"] = attrs.get_bool("iou_loss", False)
assert not attrs.get_bool("output_score", False), "proposal doesn't support output score"
return _op.vision.proposal(inputs[0], inputs[1], inputs[2], **new_attrs)
def _mx_box_nms(inputs, attrs):
force_suppress = attrs.get_bool("force_suppress", False)
iou_thresh = attrs.get_float("overlap_thresh", 0.5)
top_k = attrs.get_int("topk", -1)
valid_thresh = attrs.get_float("valid_thresh", 0)
coord_start = attrs.get_int("coord_start", 2)
score_index = attrs.get_int("score_index", 1)
id_index = attrs.get_int("id_index", -1)
in_format = attrs.get_str("in_format", "corner")
out_format = attrs.get_str("out_format", "corner")
if in_format != "corner":
raise tvm.error.OpAttributeInvalid(
'Value of attribute "in_format" must equal "corner" for operator box_nms.'
)
if out_format != "corner":
raise tvm.error.OpAttributeInvalid(
'Value of attribute "out_format" must equal "corner" for operator box_nms.'
)
ret = _op.vision.get_valid_counts(
inputs[0], score_threshold=valid_thresh, id_index=id_index, score_index=score_index
)
nms_out = _op.vision.non_max_suppression(
ret[1],
ret[0],
ret[2],
iou_threshold=iou_thresh,
force_suppress=force_suppress,
top_k=top_k,
coord_start=coord_start,
score_index=score_index,
id_index=id_index,
return_indices=False,
invalid_to_bottom=True,
)
return nms_out
def _mx_box_decode(inputs, attrs):
std0 = relay.const(attrs.get_float("std0", 1), "float32")
std1 = relay.const(attrs.get_float("std1", 1), "float32")
std2 = relay.const(attrs.get_float("std2", 1), "float32")
std3 = relay.const(attrs.get_float("std3", 1), "float32")
clip = attrs.get_float("clip", -1)
in_format = attrs.get_str("format", "corner")
anchors = inputs[1] # (1, N, 4) encoded in corner or center
a = _op.split(anchors, indices_or_sections=4, axis=-1)
# Convert to format "center".
if in_format == "corner":
a_width = a[2] - a[0]
a_height = a[3] - a[1]
a_x = a[0] + a_width * relay.const(0.5, "float32")
a_y = a[1] + a_height * relay.const(0.5, "float32")
else:
a_x, a_y, a_width, a_height = a
data = inputs[0] # (B, N, 4) predicted bbox offset
p = _op.split(data, indices_or_sections=4, axis=-1)
ox = p[0] * std0 * a_width + a_x
oy = p[1] * std1 * a_height + a_y
dw = p[2] * std2
dh = p[3] * std3
if clip > 0:
clip = relay.const(clip, "float32")
dw = _op.minimum(dw, clip)
dh = _op.minimum(dh, clip)
dw = _op.exp(dw)
dh = _op.exp(dh)
ow = dw * a_width * relay.const(0.5, "float32")
oh = dh * a_height * relay.const(0.5, "float32")
out = _op.concatenate([ox - ow, oy - oh, ox + ow, oy + oh], axis=-1)
return out
def _mx_l2_normalize(inputs, attrs):
new_attrs = {}
mode = attrs.get_str("mode", "instance")
if mode == "channel":
new_attrs["axis"] = [1]
elif mode == "instance":
ndim = len(_infer_type(inputs[0]).checked_type.shape)
new_attrs["axis"] = list(range(1, ndim))
elif mode == "spatial":
ndim = len(_infer_type(inputs[0]).checked_type.shape)
new_attrs["axis"] = list(range(2, ndim))
else:
raise tvm.error.OpAttributeInvalid(
f'Mode "{mode}" is not supported for operator l2_normalize.'
)
new_attrs["eps"] = attrs.get_float("eps", 1e-10)
return _op.nn.l2_normalize(inputs[0], **new_attrs)
def _mx_softsign(inputs, attrs):
return inputs[0] / (_expr.const(1.0) + _op.abs(inputs[0]))
def _mx_softmin(inputs, attrs):
axis = attrs.get_int("axis", -1)
return _op.nn.softmax(_op.negative(inputs[0]), axis)
def _mx_hard_sigmoid(inputs, attrs):
x = (_expr.const(0.2) * inputs[0]) + _expr.const(0.5)
return _op.clip(x, a_min=0.0, a_max=1.0)
def _mx_reciprocal(inputs, attrs):
return _expr.const(1.0) / inputs[0]
def _mx_shape_array(inputs, attrs):
assert len(inputs) == 1
if attrs.get_int("lhs_begin", None) is not None:
raise tvm.error.OpAttributeUnimplemented("shape_array doesn't support lhs_begin")
if attrs.get_int("lhs_end", None) is not None:
raise tvm.error.OpAttributeUnimplemented("shape_array doesn't support lhs_end")
if attrs.get_int("rhs_begin", None) is not None:
raise tvm.error.OpAttributeUnimplemented("shape_array doesn't support rhs_begin")
if attrs.get_int("rhs_end", None) is not None:
raise tvm.error.OpAttributeUnimplemented("shape_array doesn't support rhs_end")
return _op.shape_of(inputs[0], dtype="int64")
def _mx_full(inputs, attrs):
assert len(inputs) == 0
val = attrs.get_float("value")
shape = attrs.get_int_tuple("shape")
dtype = attrs.get_str("dtype", "float32")
return _op.full(_expr.const(val, dtype), shape, dtype)
def _mx_squeeze(inputs, attrs):
assert len(inputs) == 1
axis = attrs.get_int_tuple("axis", None)
return _op.squeeze(inputs[0], axis)
def _mx_broadcast_axis(inputs, attrs):
assert len(inputs) == 1
axis = attrs.get_int_tuple("axis", [])
size = attrs.get_int_tuple("size", [])
assert len(axis) == len(size)
if len(axis) == 0:
return inputs[0]
expr = _infer_type(inputs[0])
src_shape = expr.checked_type.shape
tgt_shape = []
for i, dim in enumerate(src_shape):
if i not in axis:
tgt_shape.append(dim)
else:
assert int(dim) == 1
idx = axis.index(i)
tgt_shape.append(size[idx])
return _op.broadcast_to(inputs[0], tgt_shape)
def _mx_embedding(inputs, _):
assert len(inputs) == 2
indices, weight = inputs
return _op.take(weight, indices.astype("int32"), axis=0)
def _mx_smooth_l1(inputs, attrs):
scalar = attrs.get_float("scalar", 1.0)
scalar_sq = scalar * scalar
mask = _op.less(inputs[0], _expr.const(1.0 / scalar_sq, dtype="float32"))
return _op.where(
mask,
_expr.const(scalar_sq / 2.0, dtype="float32") * inputs[0] * inputs[0],
_op.abs(inputs[0]) - _expr.const(0.5 / scalar_sq),
)
def _mx_deformable_convolution(inputs, attrs):
new_attrs = {}
new_attrs["kernel_size"] = attrs.get_int_tuple("kernel")
new_attrs["strides"] = attrs.get_int_tuple("stride")
new_attrs["padding"] = attrs.get_int_tuple("pad")
new_attrs["dilation"] = attrs.get_int_tuple("dilate")
new_attrs["channels"] = attrs.get_int("num_filter")
new_attrs["deformable_groups"] = attrs.get_int("num_deformable_group", 1)
new_attrs["groups"] = attrs.get_int("num_group", 1)
assert attrs.get_str("layout", "NCHW") == "NCHW", "Deformable conv2d only supports NCHW layout"
use_bias = not attrs.get_bool("no_bias", False)
res = _op.nn.deformable_conv2d(inputs[0], inputs[1], inputs[2], **new_attrs)
if use_bias:
assert len(inputs) == 4
res = _op.nn.bias_add(res, inputs[3])
return res
def _mx_argsort(inputs, attrs):
assert len(inputs) == 1
new_attrs = {}
new_attrs["axis"] = attrs.get_int("axis", -1)
new_attrs["is_ascend"] = attrs.get_bool("is_ascend", True)
new_attrs["dtype"] = attrs.get_str("dtype", "float32")
return _op.argsort(inputs[0], **new_attrs)
def _mx_topk(inputs, attrs):
assert len(inputs) == 1
new_attrs = {}
new_attrs["k"] = attrs.get_int("k", 1)
new_attrs["axis"] = attrs.get_int("axis", -1)
new_attrs["is_ascend"] = attrs.get_bool("is_ascend", False)
ret_type = attrs.get_str("ret_typ", "indices")
if ret_type == "mask":
raise tvm.error.OpAttributeUnimplemented(
"Attribute ret_type=mask is not supported in topk operator"
)
new_attrs["ret_type"] = "values" if ret_type == "value" else ret_type
new_attrs["dtype"] = attrs.get_str("dtype", "float32")
return _op.topk(inputs[0], **new_attrs)
def _mx_sequence_mask(inputs, attrs):
assert len(inputs) == 1 or len(inputs) == 2
new_attrs = {}
use_sequence_length = attrs.get_bool("use_sequence_length", False)
new_attrs["mask_value"] = attrs.get_float("value", 0.0)
new_attrs["axis"] = attrs.get_int("axis", 0)
if use_sequence_length:
return _op.sequence_mask(*inputs, **new_attrs)
else:
return inputs[0]
def _mx_contrib_div_sqrt_dim(inputs, _):
assert len(inputs) == 1
ndim = len(_infer_type(inputs[0]).checked_type.shape)
dim = _op.take(_op.shape_of(inputs[0]), _expr.const(ndim - 1, dtype="int32"))
dtype = _infer_type(inputs[0]).checked_type.dtype
sqrt_dim = _op.sqrt(dim.astype(dtype))
out = inputs[0] / sqrt_dim
return out
def _mx_rnn_param_concat(inputs, _):
# We don't need to concatenate RNN params because we will unravel the RNN op
return [inputs]
def _mx_rnn_layer(inputs, attrs):
def _rnn_cell(data, states, i2h_weight, h2h_weight, i2h_bias, h2h_bias, activation):
i2h = _op.nn.bias_add(_op.nn.dense(data, i2h_weight), i2h_bias, axis=-1)
h2h = _op.nn.bias_add(_op.nn.dense(states[0], h2h_weight), h2h_bias, axis=-1)
out = _activation_map[activation](i2h + h2h)
return out, [out]
def _gru_cell(data, states, i2h_weight, h2h_weight, i2h_bias, h2h_bias):
expr = _infer_type(data)
dtype = expr.checked_type.dtype
i2h = _op.nn.bias_add(_op.nn.dense(data, i2h_weight), i2h_bias, axis=-1)
h2h = _op.nn.bias_add(_op.nn.dense(states[0], h2h_weight), h2h_bias, axis=-1)
i2h_r, i2h_z, i2h = _op.split(i2h, indices_or_sections=3, axis=1)
h2h_r, h2h_z, h2h = _op.split(h2h, indices_or_sections=3, axis=1)
reset_gate = _activation_map["sigmoid"](i2h_r + h2h_r)
update_gate = _activation_map["sigmoid"](i2h_z + h2h_z)
next_h_tmp = _activation_map["tanh"](reset_gate * h2h + i2h)
next_h = (_expr.const(1, dtype) - update_gate) * next_h_tmp + update_gate * states[0]
return next_h, [next_h]
def _lstm_cell(data, states, i2h_weight, h2h_weight, i2h_bias, h2h_bias):
i2h = _op.nn.bias_add(_op.nn.dense(data, i2h_weight), i2h_bias, axis=-1)
h2h = _op.nn.bias_add(_op.nn.dense(states[0], h2h_weight), h2h_bias, axis=-1)
gates = i2h + h2h
slice_gates = _op.split(gates, indices_or_sections=4, axis=1)
in_gate = _activation_map["sigmoid"](slice_gates[0])
forget_gate = _activation_map["sigmoid"](slice_gates[1])
in_transform = _activation_map["tanh"](slice_gates[2])
out_gate = _activation_map["sigmoid"](slice_gates[3])
next_c = forget_gate * states[1] + in_gate * in_transform
next_h = out_gate * _activation_map["tanh"](next_c)
return next_h, [next_h, next_c]
num_layers = attrs.get_int("num_layers", 1)
mode = attrs.get_str("mode")
output_states = attrs.get_bool("state_outputs", False)
if mode.startswith("rnn"):
mode, activation = mode.split("_")
assert mode in ["rnn", "gru", "lstm"]
bidirectional = attrs.get_bool("bidirectional", False)
direct = 2 if bidirectional else 1
layout = attrs.get_str("layout", "TNC")
if layout != "TNC":
raise tvm.error.OpAttributeUnimplemented(
"RNN with layout other than TNC is not supported yet"
)
num_states = 2 if mode == "lstm" else 1
assert len(inputs) == num_states + 2
seq_data = inputs[0]
concat_weight = inputs[1]
init_states = inputs[2:]
expr = _infer_type(seq_data)
data_shape = expr.checked_type.shape
seq_len = int(data_shape[0])
assert len(concat_weight) == num_layers * 4 * direct
for idx, state in enumerate(init_states[:]):
if isinstance(state, dict):
node = state
attrs = StrAttrsDict(node.get("attrs", {}))
op_name = node["op"]
# by default, RNN layer uses zeros to initialize states
assert op_name == "_zeros"
shape = attrs.get_int_tuple("shape")
dtype = attrs.get_str("dtype", "float32")
init_layout = attrs.get_str("__layout__")
new_shape = list(shape)
for i, dim in enumerate(shape):
if dim == 0:
axis = layout.find(init_layout[i])
assert axis >= 0
new_shape[i] = int(data_shape[axis])
init_states[idx] = _op.zeros(new_shape, dtype)
weights = []
bias = []
states = []
back_weights = []
back_bias = []
back_states = []
for i in range(num_layers):
weights.append(
[concat_weight[i * 2 * direct].args[0], concat_weight[i * 2 * direct + 1].args[0]]
)
bias.append(
[
concat_weight[(num_layers + i) * 2 * direct].args[0],
concat_weight[(num_layers + i) * 2 * direct + 1].args[0],
]
)
s = []
for state in init_states:
s.append(_op.take(state, _expr.const(i * direct, "int32"), axis=0))
states.append(s)
if bidirectional:
back_weights.append(
[
concat_weight[i * 2 * direct + 2].args[0],
concat_weight[i * 2 * direct + 3].args[0],
]
)
back_bias.append(
[
concat_weight[(num_layers + i) * 2 * direct + 2].args[0],
concat_weight[(num_layers + i) * 2 * direct + 3].args[0],
]
)
s = []
for state in init_states:
s.append(_op.take(state, _expr.const(i * direct + 1, "int32"), axis=0))
back_states.append(s)
xs = [_op.take(seq_data, _expr.const(t, "int32"), axis=0) for t in range(seq_len)]
for l in range(num_layers):
outputs = []
back_outputs = []
for x in xs:
if mode == "rnn":
out, new_states = _rnn_cell(x, states[l], *weights[l], *bias[l], activation)
elif mode == "gru":
out, new_states = _gru_cell(x, states[l], *weights[l], *bias[l])
else: # mode == "lstm"
out, new_states = _lstm_cell(x, states[l], *weights[l], *bias[l])
states[l] = new_states
outputs.append(out)
if bidirectional:
for x in reversed(xs):
if mode == "rnn":
out, new_states = _rnn_cell(
x, back_states[l], *back_weights[l], *back_bias[l], activation
)
elif mode == "gru":
out, new_states = _gru_cell(x, back_states[l], *back_weights[l], *back_bias[l])
else: # mode == "lstm"
out, new_states = _lstm_cell(x, back_states[l], *back_weights[l], *back_bias[l])
back_states[l] = new_states
back_outputs.append(out)
back_outputs.reverse()
concat_outputs = []
for t, out in enumerate(outputs):
new_out = _op.concatenate([out, back_outputs[t]], axis=-1)
concat_outputs.append(new_out)
outputs = concat_outputs
xs = outputs
ret = [_op.stack(outputs, axis=0)]
if output_states:
for i in range(num_states):
inputs = []
for l, s in enumerate(states):
inputs.append(s[i])
if bidirectional:
inputs.append(back_states[l][i])
ret.append(_op.stack(inputs, axis=0))
return ret
def _mx_one_hot(inputs, attrs):
indices = inputs[0].astype("int32")
depth = attrs.get_int("depth", 0)
dtype = attrs.get_str("dtype", "int32")
on_value = tvm.relay.const(attrs.get_float("on_value", 1.0), dtype)
off_value = tvm.relay.const(attrs.get_float("off_value", 0.0), dtype)
return _op.one_hot(indices, on_value, off_value, depth, -1, dtype)
def _mx_depth_to_space(inputs, attrs):
assert len(inputs) == 1
new_attrs = {}
new_attrs["block_size"] = attrs.get_int("block_size")
return _op.nn.depth_to_space(*inputs, **new_attrs)
def _mx_space_to_depth(inputs, attrs):
assert len(inputs) == 1
new_attrs = {}
new_attrs["block_size"] = attrs.get_int("block_size")
return _op.nn.space_to_depth(*inputs, **new_attrs)
def _mx_correlation(inputs, attrs):
assert len(inputs) == 2
new_attrs = {}
new_attrs["kernel_size"] = attrs.get_int("kernel_size", 1)
new_attrs["max_displacement"] = attrs.get_int("max_displacement", 1)
new_attrs["stride1"] = attrs.get_int("stride1", 1)
new_attrs["stride2"] = attrs.get_int("stride2", 1)
new_attrs["padding"] = attrs.get_int("pad_size", 0)
new_attrs["is_multiply"] = attrs.get_bool("is_multiply", True)
new_attrs["layout"] = "NCHW"
return _op.nn.correlation(*inputs, **new_attrs)
def _mx_contrib_fifo_buffer(inputs, attrs):
new_attrs = {}
new_attrs["axis"] = attrs.get_int("axis")
return _op.nn.fifo_buffer(*inputs, **new_attrs)
def _mx_contrib_interleaved_matmul_selfatt_qk(inputs, attrs):
"""
tmp = mx.nd.reshape(queries_keys_values, shape=(0, 0, num_heads, 3, -1))
q_proj = mx.nd.transpose(tmp[:,:,:,0,:], axes=(1, 2, 0, 3))
q_proj = mx.nd.reshape(q_proj, shape=(-1, 0, 0), reverse=True)
q_proj = mx.nd.contrib.div_sqrt_dim(q_proj)
k_proj = mx.nd.transpose(tmp[:,:,:,1,:], axes=(1, 2, 0, 3))
k_proj = mx.nd.reshape(k_proj, shape=(-1, 0, 0), reverse=True)
output = mx.nd.batch_dot(q_proj, k_proj, transpose_b=True)
"""
assert len(inputs) == 1
qkv = inputs[0]
num_heads = attrs.get_int("heads")
qkv = _op.reshape(qkv, newshape=(0, 0, num_heads, 3, -1))
q_proj = _op.take(qkv, _expr.const(0, "int32"), axis=3)
q_proj = _op.transpose(q_proj, axes=[1, 2, 0, 3])
q_proj = _op.reverse_reshape(q_proj, newshape=(-1, 0, 0))
q_proj = _mx_contrib_div_sqrt_dim([q_proj], None)
k_proj = _op.take(qkv, _expr.const(1, "int32"), axis=3)
k_proj = _op.transpose(k_proj, axes=[1, 2, 0, 3])
k_proj = _op.reverse_reshape(k_proj, newshape=(-1, 0, 0))
ret = _op.nn.batch_matmul(q_proj, k_proj)
return ret
def _mx_contrib_interleaved_matmul_selfatt_valatt(inputs, attrs):
"""
tmp = mx.nd.reshape(queries_keys_values, shape=(0, 0, num_heads, 3, -1))
v_proj = mx.nd.transpose(tmp[:,:,:,2,:], axes=(1, 2, 0, 3))
v_proj = mx.nd.reshape(v_proj, shape=(-1, 0, 0), reverse=True)
output = mx.nd.batch_dot(attention, v_proj)
output = mx.nd.reshape(output, shape=(-1, num_heads, 0, 0), reverse=True)
output = mx.nd.transpose(output, axes=(2, 0, 1, 3))
output = mx.nd.reshape(output, shape=(0, 0, -1))
"""
assert len(inputs) == 2
qkv, att = inputs
num_heads = attrs.get_int("heads")
qkv = _op.reshape(qkv, newshape=(0, 0, num_heads, 3, -1))
v_proj = _op.take(qkv, _expr.const(2, "int32"), axis=3)
v_proj = _op.transpose(v_proj, axes=(1, 2, 0, 3))
v_proj = _op.reverse_reshape(v_proj, newshape=(-1, 0, 0))
v_proj = _op.transpose(v_proj, axes=[0, 2, 1])
out = _op.nn.batch_matmul(att, v_proj)
out = _op.reverse_reshape(out, newshape=(-1, num_heads, 0, 0))
out = _op.transpose(out, axes=(2, 0, 1, 3))
out = _op.reshape(out, newshape=(0, 0, -1))
return out
def _mx_cond(inputs, attrs, subgraphs):
assert len(subgraphs) == 3
cond_input_locs = json.loads(attrs.get_str("cond_input_locs"))
then_input_locs = json.loads(attrs.get_str("then_input_locs"))
else_input_locs = json.loads(attrs.get_str("else_input_locs"))
num_outputs = attrs.get_int("num_outputs")
input_args = []
for i, arg in enumerate(inputs):
var = _expr.var(f"arg{i}", _infer_type(arg).checked_type)
input_args.append(var)
cond_args = [input_args[i] for i in cond_input_locs]
then_args = [input_args[i] for i in then_input_locs]
else_args = [input_args[i] for i in else_input_locs]
cond_arg_shapes = [arg.type_annotation.shape for arg in cond_args]
cond_arg_dtype_info = [arg.type_annotation.dtype for arg in cond_args]
cond_func = _from_mxnet_impl(subgraphs[0], cond_arg_shapes, cond_arg_dtype_info)
cond = _expr.Call(cond_func, cond_args).astype("bool")
cond_shape = get_const_tuple(_infer_type(cond).checked_type.shape)
if len(cond_shape) > 0:
assert len(cond_shape) == 1 and cond_shape[0] == 1, "Condition is not scalar"
cond = _op.take(cond, _expr.const(1, "int"))
sb = _scope_builder.ScopeBuilder()
with sb.if_scope(cond):
then_arg_shapes = [arg.type_annotation.shape for arg in then_args]
then_arg_dtype_info = [arg.type_annotation.dtype for arg in then_args]
then_func = _from_mxnet_impl(subgraphs[1], then_arg_shapes, then_arg_dtype_info)
sb.ret(_expr.Call(then_func, then_args))
with sb.else_scope():
else_arg_shapes = [arg.type_annotation.shape for arg in else_args]
else_arg_dtype_info = [arg.type_annotation.dtype for arg in else_args]
else_func = _from_mxnet_impl(subgraphs[2], else_arg_shapes, else_arg_dtype_info)
sb.ret(_expr.Call(else_func, else_args))
func = _function.Function(input_args, sb.get())
ret = _expr.Call(func, inputs)
if num_outputs > 1:
ret = _expr.TupleWrapper(ret, num_outputs)
return ret
def _qnn_contrib_concat(inputs, attrs):
axis = attrs.get_int("dim", 1)
num_args = attrs.get_int("num_args", -1)
assert num_args > 0
input_exprs = inputs[0:num_args]
min_start_idx = num_args
max_start_idx = num_args + 1
mins = list()
maxs = list()
for i in range(min_start_idx, len(inputs), 2):
mins.append(inputs[i])
for i in range(max_start_idx, len(inputs), 2):
maxs.append(inputs[i])
# Check if all the input tensors have same qnn params.
if len(set(mins)) == 1 and len(set(maxs)) == 1:
output_min = mins[0]
output_max = maxs[0]
concat = _op.concatenate(tuple(input_exprs), axis=axis)
return concat, output_min, output_max
else:
# Get all dtypes. Find input and output scales, call concatenate.
dtypes = [_infer_type(x).checked_type.dtype for x in input_exprs]
assert all(
[x == "uint8" for x in dtypes]
), "Current support is limited to uint8 inputs only."
new_min = min(mins)
new_max = max(maxs)
assert new_min == 0
output_scale = get_mkldnn_uint8_scale(new_min, new_max)
min_max = zip(mins, maxs)
input_scales = [get_mkldnn_uint8_scale(x, y) for (x, y) in min_max]
input_zeros = [0] * len(input_scales)
output_zero = 0
input_scales_expr = [relay.const(x, "float32") for x in input_scales]
input_zeros_expr = [relay.const(x, "int32") for x in input_zeros]
output_scale_expr = relay.const(output_scale, "float32")
output_zero_expr = relay.const(output_zero, "int32")
res = relay.qnn.op.concatenate(
input_exprs,
input_scales_expr,
input_zeros_expr,
output_scale_expr,
output_zero_expr,
axis=axis,
)
return res, new_min, new_max
def _qnn_quantize(inputs, attrs):
out_dtype = "int8"
out_type = attrs.get_str("out_type")
if out_type == "auto":
if attrs.has_attr("min_calib_range") and attrs.has_attr("max_calib_range"):
if attrs.get_float("min_calib_range") >= 0:
out_dtype = "uint8"
else:
out_dtype = "int8"
else:
out_dtype = out_type
if out_dtype not in {"int8", "uint8"}:
raise ValueError(f"Unsupported out_dtype: {out_dtype}")
min_calib_range = attrs.get_float("min_calib_range", 0.0)
max_calib_range = attrs.get_float("max_calib_range", 0.0)
quantized_output, _, _ = quantize_mxnet_min_max(
inputs[0], min_range=min_calib_range, max_range=max_calib_range, out_dtype=out_dtype
)
return quantized_output, min_calib_range, max_calib_range
def _qnn_contrib_quantized_fifo_buffer(inputs, attrs, params):
data = inputs[0]
buffer = inputs[1]
min_calib_range = inputs[2]
max_calib_range = inputs[3]
data_dtype = _infer_type(data).checked_type.dtype
buffer_shape = _infer_shape(buffer)
buffer_name = _get_name(buffer)
params[buffer_name] = _nd.array(np.zeros(buffer_shape).astype(data_dtype))
new_buffer = relay.var(buffer_name, relay.TensorType(buffer_shape, data_dtype))
inputs[1] = new_buffer
res = _op.nn.fifo_buffer(data=data, buffer=new_buffer, axis=attrs.get_int("axis"))
return res, min_calib_range, max_calib_range
def _get_subgraph_op(subgraphs, op_name):
assert len(subgraphs) == 1, f"Subgraph should have 1 node but has {len(subgraphs)}"
subgraph = subgraphs[0]
nodes = subgraph["nodes"]
assert nodes is not None
for node in nodes:
if node["op"] == op_name:
return node
raise ValueError(f"Op {op_name} was not found in the subgraph")
def _qnn_conv(inputs, attrs, subgraphs, params):
def _has_fused_activation(_attrs, _supported_activations):
has_fused_activation = False
if attrs.get_bool("with_act", False) or attrs.get_bool("with_postsum_act", False):
subgraph_activation_attrs = _get_subgraph_op(subgraphs, "Activation")["attrs"]
act_type = subgraph_activation_attrs["act_type"]
if act_type not in _supported_activations:
raise ValueError(f"Fused activation {act_type} is not supported at this time")
has_fused_activation = True
return has_fused_activation
def _get_data_scale_and_zp(_data, _inputs, _data_min_idx, _data_max_idx):
"""Finds the Qnn params for the data expr."""
data_min = _inputs[_data_min_idx]
data_max = _inputs[_data_max_idx]
assert data_min <= data_max
data_dtype = _infer_type(_data).checked_type.dtype
assert data_dtype in {"int8", "uint8"}
if data_min < 0.0:
assert (
data_dtype == "int8"
), "Expect int8 when data_min < 0.0, consider quantize model with int8."
_data_scale = (
get_mkldnn_uint8_scale(data_min, data_max)
if data_dtype == "uint8"
else get_mkldnn_int8_scale(data_min, data_max)
)
_data_zero_point = 0
return _data_scale, _data_zero_point
def _get_bn_alpha_coeff(_bn_gamma_idx, _bn_beta_idx, _bn_running_mean_idx, _bn_running_var_idx):
"""Extract the BN coeff. These will be use later for BN folding into convolution."""
# Extract relevant attrs from bn.
bn_attrs = _get_subgraph_op(subgraphs, "BatchNorm")["attrs"]
bn_epsilon_param = float(bn_attrs["eps"])
bn_scale_param = bn_attrs["fix_gamma"] == "False"
bn_center_param = True
# Extract the relevant relay expressions.
bn_running_var = inputs[_bn_running_var_idx]
bn_gamma = inputs[_bn_gamma_idx]
bn_beta = inputs[_bn_beta_idx]
bn_running_mean = inputs[_bn_running_mean_idx]
# Get coefficient to multiply to weights.
bn_epsilon = relay.const(bn_epsilon_param, "float32")
denominator = relay.sqrt(relay.add(bn_running_var, bn_epsilon))
_bn_scale = relay.divide(relay.const(1.0, "float32"), denominator)
if bn_scale_param:
_bn_scale = relay.multiply(bn_gamma, _bn_scale)
# Get the shift.
_bn_shift = relay.negative(relay.multiply(bn_running_mean, _bn_scale))
if bn_center_param:
_bn_shift = relay.add(bn_beta, _bn_shift)
return _bn_scale, _bn_shift
def _fold_bn(_bn_scale, _bn_shift, _has_bias, _has_bn):
"""Fold BN into kernel and bias. Get new kernel and bias."""
_kernel = inputs[1]
if _bn_scale:
assert attrs.get_bool("with_bn", False)
# Weights are on OIHW, and _bn_scale is in O.
exp_bn_scale = relay.expand_dims(_bn_scale, axis=1, num_newaxis=3)
_kernel = relay.multiply(exp_bn_scale, _kernel)
_bias = None
if _has_bias:
_bias = inputs[2]
if _has_bn:
assert _bn_shift is not None
assert _bn_scale is not None
_bias = relay.add(relay.multiply(_bn_scale, _bias), _bn_shift)
elif _has_bn:
assert _bn_shift is not None
assert _bn_scale is not None
_bias = _bn_shift
return _kernel, _bias
def _get_quantized_kernel(_kernel, _bias, _data_scale):
# For quantizing, we need min/max of kernel. So, we have to pre compute this expr.
np_kernel = _infer_value(_kernel, params).numpy()
kernel_channel_min = np.amin(np_kernel, axis=(1, 2, 3))
kernel_channel_max = np.amax(np_kernel, axis=(1, 2, 3))
np_bias = None
if _bias is not None:
np_bias = _infer_value(_bias, params).numpy()
return quantize_conv_weights_bias_channel_mkldnn_from_var(
_kernel, np_bias, kernel_channel_min, kernel_channel_max, _data_scale
)
def _get_qnn_conv2d(
_data,
_kernel,
_data_zero_point,
_kernel_zero_point,
_data_scale,
_kernel_vector_scale,
_conv2d_attrs,
):
return relay.qnn.op.conv2d(
_data,
_kernel,
input_zero_point=relay.const(_data_zero_point, "int32"),
kernel_zero_point=relay.const(_kernel_zero_point, "int32"),
input_scale=relay.const(_data_scale, "float32"),
kernel_scale=relay.const(_kernel_vector_scale),
channels=_conv2d_attrs["channels"],
groups=_conv2d_attrs["groups"],
kernel_size=_conv2d_attrs["kernel_size"],
strides=_conv2d_attrs["strides"],
dilation=_conv2d_attrs["dilation"],
padding=_conv2d_attrs["padding"],
data_layout=_conv2d_attrs["data_layout"],
kernel_layout=_conv2d_attrs["kernel_layout"],
)
def _get_requantized_op(_res, _input_scale, _output_scale, _out_dtype):
# Requantize to get the output back
return relay.qnn.op.requantize(
_res,
input_scale=relay.const(_input_scale),
input_zero_point=relay.const(0, "int32"),
output_scale=relay.const(_output_scale, "float32"),
output_zero_point=relay.const(0, "int32"),
axis=1,
out_dtype=_out_dtype,
)
def _get_sum(_res, _output_scale, out_dtype):
"""Handles sum of the second quantized tensor."""
# This is done in following steps
# 1) rhs is the add's second operand. First rhs will be requantized to output scale with
# dtype int32. The int32 dtype is to keep precision high before adding.
# 2) Call normal add
# 3) Depending on final out_dtype, clip and cast (basically requantize).
_output_scale = relay.const(_output_scale, "float32")
data_sum = inputs[-5]
data_sum_min = inputs[-2]
data_sum_max = inputs[-1]
data_sum_dtype = _infer_type(data_sum).checked_type.dtype
data_sum_scale = (
get_mkldnn_uint8_scale(data_sum_min, data_sum_max)
if data_sum_dtype == "uint8"
else get_mkldnn_int8_scale(data_sum_min, data_sum_max)
)
data_sum_scale = relay.const(data_sum_scale, "float32")
zero_point = relay.const(0, "int32")
# Save one requantize if the previous expr already has a requantize node. This also improves
# little bit with accuracy.
if isinstance(data_sum, _expr.Call) and data_sum.op.name == "qnn.requantize":
prev_input, prev_scale, prev_zero_point = data_sum.args[0:3]
prev_axis = data_sum.attrs.axis
data_sum = relay.qnn.op.requantize(
prev_input,
input_scale=prev_scale,
input_zero_point=prev_zero_point,
output_scale=_output_scale,
output_zero_point=zero_point,
axis=prev_axis,
out_dtype="int32",
)
else:
data_sum = relay.qnn.op.requantize(
data_sum,
input_scale=data_sum_scale,
input_zero_point=zero_point,
output_scale=_output_scale,
output_zero_point=zero_point,
out_dtype="int32",
)
# 2) Add two int32 tensors.
_res = relay.add(_res, data_sum)
# 3) Clip/cast to change the out dtype.
_res = relay.clip(
_res,
a_min=float(tvm.tir.op.min_value(out_dtype).value),
a_max=float(tvm.tir.op.max_value(out_dtype).value),
)
_res = relay.cast(_res, out_dtype)
return _res
def _parse():
assert len(subgraphs) == 1
subgraph_conv_attrs = StrAttrsDict(_get_subgraph_op(subgraphs, "Convolution")["attrs"])
is_quantized = attrs.get_bool("quantized", False)
if is_quantized:
# The MKLDNN has a quantized convolution subgraph. There are many different arguments
# that are taken into account to parse the subgraph.
# * no_bias
# * with_sum
# * with_bn
# * with_postsum_relu
# * with_act
#
# Note - Relu/clip handling is not required because output min/max take care of that.
#
# The parsing can be broken down into following steps
# 1) Get the input data scale and zero points.
# 2) Extract BN params.
# 3) Fold the BN params into kernel and bias.
# 4) Quantize the kernel.
# 4) Call QNN conv2d op.
# 5) Quantize bias and call bias_add.
# 6) Handle sum of quantized tensors if needed. Or just Requantize.
has_bias = not subgraph_conv_attrs.get_bool("no_bias", False)
has_sum = attrs.get_bool("with_sum", False)
has_bn = attrs.get_bool("with_bn", False)
###############################################
# 1) Get the input data scale and zero point.
###############################################
# Last 2 indexes are data min and max. If the conv has a sum, then last 2 indexes are
# for the second tensor. So, the data min max indexes are last 3 and 4
data_min_idx = -2
data_max_idx = -1
if has_sum:
data_min_idx = -4
data_max_idx = -3
data = inputs[0]
data_scale, data_zero_point = _get_data_scale_and_zp(
data, inputs, data_min_idx, data_max_idx
)
#############################
# 2) Extract the BN params.
#############################
# Find the indexes to look at for BN.
bn_scale = bn_shift = None
if has_bn:
if has_bias:
bn_start_idx = 3
else:
bn_start_idx = 2
bn_gamma_idx = bn_start_idx
bn_beta_idx = bn_start_idx + 1
bn_running_mean_idx = bn_start_idx + 2
bn_running_var_idx = bn_start_idx + 3
bn_scale, bn_shift = _get_bn_alpha_coeff(
bn_gamma_idx, bn_beta_idx, bn_running_mean_idx, bn_running_var_idx
)
########################################
# 3) Fold the BN into kernel and bias.
########################################
kernel, bias = _fold_bn(bn_scale, bn_shift, has_bias, has_bn)
#######################################################################
# 4) Fold BN params into kernel. Get quantized kernel and QNN params.
#######################################################################
kernel, kernel_vector_scale, kernel_zero_point = _get_quantized_kernel(
kernel, bias, data_scale
)
##########################
# 5) Call QNN conv2d op.
##########################
conv2d_attrs = _get_mx_conv2d_attrs(subgraph_conv_attrs)
res = _get_qnn_conv2d(
data,
kernel,
data_zero_point,
kernel_zero_point,
data_scale,
kernel_vector_scale,
conv2d_attrs,
)
###############################################
# 6) Fold BN params into bias. Call bias_add.
###############################################
if has_bias or has_bn:
bias_scale = data_scale * kernel_vector_scale
int32_bias = quantize_conv_bias_mkldnn_from_var(bias, bias_scale)
res = _op.nn.bias_add(res, int32_bias, axis=1)
#####################################################################
# 7) Handle sum of quantized tensors if needed. Or just Requantize.
#####################################################################
min_output_range = attrs.get_float("min_calib_range")
max_output_range = attrs.get_float("max_calib_range")
output_scale, out_dtype = get_conv_mkldnn_requantized_scale_outDtype(
min_output_range, max_output_range
)
# QNN conv2d output scale is product of data_scale and kernel_vector_scale
input_scale = data_scale * kernel_vector_scale
if attrs.get_bool("with_sum", False):
# There is a second tensor that has to be added to the QNN conv2d output. Therefore,
# the QNN conv2d is first requantized to output scale with int32 precision. The
# second tensor will also be requantized to output scale with int32 precision,
# followed by an add operator.
res = _get_requantized_op(res, input_scale, output_scale, "int32")
res = _get_sum(res, output_scale, out_dtype)
else:
# Get the requantized conv output
res = _get_requantized_op(res, input_scale, output_scale, out_dtype)
return res, min_output_range, max_output_range
else:
res = _mx_conv(inputs, subgraph_conv_attrs)
has_fused_relu = _has_fused_activation(attrs, ["relu"])
if has_fused_relu:
res = _op.nn.relu(res)
return res
return _parse()
def _qnn_flatten(inputs, attrs):
# pylint: disable=unused-argument
data = inputs[0]
output_min = inputs[1]
output_max = inputs[2]
output = _op.nn.batch_flatten(data)
return output, output_min, output_max
def _qnn_dequantize(inputs, attrs):
# pylint: disable=unused-argument
data = inputs[0]
input_min = inputs[1]
input_max = inputs[2]
in_dtype = _infer_type(data).checked_type.dtype
result = dequantize_mxnet_min_max(data, input_min, input_max, in_dtype)
return result
def _qnn_activation(inputs, attrs):
act_type = attrs.get_str("act_type")
assert len(inputs) == 3
assert act_type == "relu", "Currently only relu is supported"
data = inputs[0]
range_min = inputs[1]
range_max = inputs[2]
res = _op.nn.relu(data)
return res, range_min, range_max
def _qnn_pooling(inputs, attrs):
input_min = inputs[1]
input_max = inputs[2]
data = inputs[0]
data_dtype = _infer_type(data).checked_type.dtype
pool_type = attrs.get_str("pool_type")
if data_dtype in ("int8", "uint8") and pool_type != "max":
data = _op.cast(data, "int32")
res = _mx_pooling([data, input_min, input_max], attrs)
if data_dtype in ("int8", "uint8") and pool_type != "max":
res = _op.cast(res, data_dtype)
return res, input_min, input_max
def _qnn_batch_norm(inputs, attrs):
# Perform batch norm in FP32
data = inputs[0]
# Dequantize the data.
data_min_idx, data_max_idx = (-2, -1)
data_min, data_max = inputs[data_min_idx], inputs[data_max_idx]
data_dtype = _infer_type(data).checked_type.dtype
data_scale = (
get_mkldnn_uint8_scale(data_min, data_max)
if data_dtype == "uint8"
else get_mkldnn_int8_scale(data_min, data_max)
)
data_zp = 0
data = relay.qnn.op.dequantize(
data, relay.const(data_scale, "float32"), relay.const(data_zp, "int32")
)
# Run BN. The last 4 inputs are same as before.
new_inputs = [data, *inputs[1:5]]
res = _mx_batch_norm(new_inputs, attrs)
# Quantize the result
min_output_range = attrs.get_float("min_calib_range")
max_output_range = attrs.get_float("max_calib_range")
output_scale, out_dtype = get_conv_mkldnn_requantized_scale_outDtype(
min_output_range, max_output_range
)
res = relay.qnn.op.quantize(
res[0], relay.const(output_scale, "float32"), relay.const(0, "int32"), out_dtype=out_dtype
)
return res, min_output_range, max_output_range
def _qnn_fully_connected(inputs, attrs, subgraphs, params):
def _get_input_scale_zp(_data_dtype, _inputs, _has_bias):
data_min_idx, data_max_idx = (3, 4) if _has_bias else (2, 3)
data_min, data_max = _inputs[data_min_idx], _inputs[data_max_idx]
_data_scale = (
get_mkldnn_uint8_scale(data_min, data_max)
if _data_dtype == "uint8"
else get_mkldnn_int8_scale(data_min, data_max)
)
_data_zp = 0
return _data_scale, _data_zp
def _get_kernel_scale_zp_tensor_quantized(_kernel, _inputs, _has_bias):
kernel_dtype = _infer_type(_kernel).checked_type.dtype
if kernel_dtype != "int8":
raise tvm.error.OpNotImplemented(
"Tensor wise quantized expects weights in int8 data type"
)
if isinstance(_kernel, tvm.relay.Call) and _kernel.op.name == "qnn.quantize":
_kernel_scale = _kernel.args[1].data.numpy()
_kernel_zp = _kernel.args[2].data.numpy()
return _kernel_scale, _kernel_zp
kernel_min_idx, kernel_max_idx = (5, 6) if _has_bias else (4, 5)
kernel_min_name = _get_name(_inputs[kernel_min_idx])
kernel_min = params[kernel_min_name].numpy()[0]
kernel_max_name = _get_name(_inputs[kernel_max_idx])
kernel_max = params[kernel_max_name].numpy()[0]
_kernel_scale = (
get_mkldnn_uint8_scale(kernel_min, kernel_max)
if kernel_dtype == "uint8"
else get_mkldnn_int8_scale(kernel_min, kernel_max)
)
_kernel_zp = 0
return _kernel_scale, _kernel_zp
def _get_kernel_scale_zp_channel_quantized(_kernel, _bias, _data_scale):
kernel_dtype = _infer_type(_kernel).checked_type.dtype
if kernel_dtype != "float32":
raise tvm.error.OpNotImplemented(
"Channel wise quantized expects weights in float32 data type"
)
# Get the FP32 values, calculate min/max and then channel quantize them
np_kernel = _infer_value(_kernel, params).numpy()
kernel_channel_min = np.amin(np_kernel, axis=(1,))
kernel_channel_max = np.amax(np_kernel, axis=(1,))
np_bias = None
if _bias is not None:
np_bias = _infer_value(_bias, params).numpy()
return quantize_conv_weights_bias_channel_mkldnn_from_var(
_kernel, np_bias, kernel_channel_min, kernel_channel_max, _data_scale
)
def _get_bias_requantize_scale(_inputs, _data_scale, _kernel_scale):
_bias = _inputs[2]
if isinstance(_bias, tvm.relay.Call) and _bias.op.name == "qnn.quantize":
_bias_scale = _bias.args[1].data.numpy()
_bias_requantize_scale = _bias_scale / (_data_scale * _kernel_scale)
_bias_requantize_scale = _expr.const(_bias_requantize_scale, dtype="float32")
return _bias_requantize_scale
bias_min_name = _get_name(_inputs[7])
bias_min = params[bias_min_name].numpy()[0]
bias_max_name = _get_name(_inputs[8])
bias_max = params[bias_max_name].numpy()[0]
bias_scale = get_mkldnn_int8_scale(bias_min, bias_max)
_bias_requantize_scale = bias_scale / (_data_scale * _kernel_scale)
_bias_requantize_scale = _expr.const(_bias_requantize_scale, dtype="float32")
return _bias_requantize_scale
is_quantized = attrs.get_bool("quantized", False)
with_relu = attrs.get_bool("with_relu", False)
subgraph_dense_attrs = StrAttrsDict(_get_subgraph_op(subgraphs, "FullyConnected")["attrs"])
if not is_quantized:
res = _mx_fully_connected(inputs, subgraph_dense_attrs)
if with_relu:
res = _op.nn.relu(res)
return res
else:
has_bias = not subgraph_dense_attrs.get_bool("no_bias", False)
units = subgraph_dense_attrs.get_int("num_hidden")
is_flatten = subgraph_dense_attrs.get_bool("flatten", True)
enable_float_output = attrs.get_bool("enable_float_output", False)
is_channel_quantized = attrs.get_bool("channel_wise_quantize", False)
########################
# Get data, kernel, bias
########################
data, kernel = inputs[0], inputs[1]
bias = None
if has_bias:
bias = inputs[2]
##############################
# Handle for shape of data > 2
##############################
if is_flatten:
data = _op.nn.batch_flatten(data)
data_shape = _infer_type(data).checked_type.shape
if len(data_shape) > 2:
data = _op.reverse_reshape(data, [-1, 0])
###############################
# Get data scale and zero point
###############################
data_dtype = _infer_type(data).checked_type.dtype
data_scale, data_zp = _get_input_scale_zp(data_dtype, inputs, has_bias)
#################################
# Get weight scale and zero point
#################################
if is_channel_quantized:
kernel, kernel_scale, kernel_zp = _get_kernel_scale_zp_channel_quantized(
kernel, bias, data_scale
)
else:
kernel_scale, kernel_zp = _get_kernel_scale_zp_tensor_quantized(
kernel, inputs, has_bias
)
################
# Call QNN dense
################
res = relay.qnn.op.dense(
data,
kernel,
input_zero_point=relay.const(data_zp, "int32"),
kernel_zero_point=relay.const(kernel_zp, "int32"),
input_scale=relay.const(data_scale, "float32"),
kernel_scale=relay.const(kernel_scale, "float32"),
units=units,
)
#################
# Handle bias add
#################
if has_bias:
if is_channel_quantized:
bias_scale = data_scale * kernel_scale
int32_bias = quantize_conv_bias_mkldnn_from_var(bias, bias_scale)
res = _op.nn.bias_add(res, int32_bias, axis=-1)
else:
bias_data = inputs[2]
bias_requantize_scale = _get_bias_requantize_scale(inputs, data_scale, kernel_scale)
multiplied_bias = _op.multiply(
_op.cast(bias_data, "float32"), bias_requantize_scale
)
rounded_bias = _op.round(multiplied_bias)
clipped_bias = _op.clip(
rounded_bias,
a_min=tvm.tir.op.min_value("int32").value,
a_max=tvm.tir.op.max_value("int32").value,
)
requantized_bias = _op.cast(clipped_bias, "int32")
res = _op.nn.bias_add(res, requantized_bias, axis=-1)
##############################################
# Dequantize if float32 output else Requantize
##############################################
if enable_float_output:
output_scale = np.float32(data_scale * kernel_scale)
res = relay.qnn.op.dequantize(
res, relay.const(output_scale), input_zero_point=relay.const(0, "int32"), axis=1
)
if with_relu:
res = _op.nn.relu(res)
else:
if is_channel_quantized:
raise tvm.error.OpNotImplemented(
"Channel wise quantized dense with non float output is not supported yet"
)
out_dtype = "uint8" if attrs.get_bool("with_relu", False) else "int8"
input_scale = np.float32(data_scale * kernel_scale)
min_output_range = attrs.get_float("min_calib_range")
max_output_range = attrs.get_float("max_calib_range")
output_scale = get_mkldnn_requantize_scale_outDtype(
min_output_range, max_output_range, out_dtype
)
res = relay.qnn.op.requantize(
res,
input_scale=relay.const(input_scale, "float32"),
input_zero_point=relay.const(0, "int32"),
output_scale=relay.const(output_scale, "float32"),
output_zero_point=relay.const(0, "int32"),
out_dtype=out_dtype,
)
if with_relu:
res = _op.nn.relu(res)
##############################
# Handle for shape of data > 2
##############################
if len(data_shape) > 2:
new_shape = data_shape[:-1]
new_shape.append(units)
res = _op.reshape(res, new_shape)
if enable_float_output:
return res
return res, min_output_range, max_output_range
def _mx_broadcast_to(inputs, attrs):
data = inputs[0]
tgt_shape = attrs.get_int_tuple("shape", [])
return _op.broadcast_to(data, tgt_shape)
def _mx_broadcast_like(inputs, attrs):
assert len(inputs) == 2
for axes in ["lhs_axes", "rhs_axes"]:
if axes in attrs.attrs:
raise tvm.error.OpAttributeUnImplemented(
f'Attribute "{axes}" is not supported for operator broadcast_like.'
)
return _op.broadcast_to_like(*inputs)
def _mx_logical_not(inputs, input_types):
data = inputs[0]
dtype = _infer_type(data).checked_type.dtype
data = _op.cast(data, "bool") if dtype != "bool" else data
return _op.cast(_op.logical_not(data), dtype)
def _mx_broadcast_logical(logical_op):
def impl(inputs, input_types):
lhs_type = _infer_type(inputs[0]).checked_type.dtype
rhs_type = _infer_type(inputs[1]).checked_type.dtype
lhs = _op.cast(inputs[0], "bool") if lhs_type != "bool" else inputs[0]
rhs = _op.cast(inputs[1], "bool") if rhs_type != "bool" else inputs[1]
return _op.cast(logical_op(lhs, rhs), lhs_type)
return impl
def _mx_npi_transpose(inputs, attrs):
axes = attrs.get_int_tuple("axes", None)
# translate default case
axes = None if len(axes) == 0 or axes[0] is None else axes
return _op.transpose(inputs[0], axes=axes)
def _mx_npi_pad(inputs, attrs):
pad_mode = attrs.get_str("mode", None)
if pad_mode is None:
raise tvm.error.OpAttributeRequired('Attribute "mode" not found in operator pad.')
if pad_mode not in ["constant", "edge", "reflect"]:
raise tvm.error.OpAttributeInvalid("Value " + mode + ' in attribute "mode" is not valid')
if "pad_width" not in attrs.attrs:
raise tvm.error.OpAttributeRequired('Attribute "pad_width" not found in operator pad.')
# Begin to parse tuple of tuple, we cannot use get_int_tuple here because it's a tuple of tuple.
pad_width = attrs.attrs["pad_width"]
pad_width = pad_width.replace("(", "[")
pad_width = pad_width.replace(")", "]")
pad_width = json.loads(pad_width)
constant_values = attrs.get_float("constant_values", 0.0)
return _op.nn.pad(
data=inputs[0], pad_width=pad_width, pad_value=constant_values, pad_mode=pad_mode
)
def _mx_npi_concatenate(inputs, attrs):
axis = attrs.get_str("axis", "0")
if axis == "None":
return _op.reshape(_op.concatenate(tuple(inputs), axis=0), (-1,))
else:
return _op.concatenate(tuple(inputs), axis=int(axis))
def _mx_npi_stack(inputs, attrs):
axis = attrs.get_str("axis", "0")
if axis == "None":
return _op.reshape(_op.stack(tuple(inputs), axis=0), (-1,))
else:
return _op.stack(tuple(inputs), axis=int(axis))
def _mx_npx_reshape(inputs, attrs):
shape = attrs.get_int_tuple("newshape")
reverse = attrs.get_bool("reverse", False)
shape_list = list(shape)
old_shape = get_const_tuple(_infer_type(inputs[0]).checked_type.shape)
new_shape = []
if reverse:
old_shape = old_shape[::-1]
shape_list = shape_list[::-1]
ptr = 0
unknown_axis = None
src_ptr = 0
while src_ptr < len(shape_list):
ele = shape_list[src_ptr]
src_ptr += 1
if ele > 0:
new_shape.append(ele)
ptr += 1
elif ele == -1:
new_shape.append(-1)
if unknown_axis is not None:
raise tvm.error.OpAttributeInvalid("Can only have one -1 in the input shape.")
unknown_axis = len(new_shape)
ptr += 1
elif ele == -2:
new_shape.append(old_shape[ptr])
ptr += 1
elif ele == -3:
if old_shape[ptr] != 1:
raise tvm.error.OpAttributeInvalid(
f"Dimension of the original shape "
f"that corresponds to -3 must be 1. Received"
f" {old_shape[ptr]}"
)
ptr += 1
elif ele == -4:
new_shape += old_shape[ptr:]
break
elif ele == -5:
new_shape.append(old_shape[ptr] * old_shape[ptr + 1])
ptr += 2
elif ele == -6:
# Split axis
lhs = shape_list[src_ptr]
rhs = shape_list[src_ptr + 1]
src_ptr += 2
if lhs == -1 and rhs == -1:
raise tvm.error.OpAttributeInvalid("The lhs and rhs can not both be -1.")
if lhs == -1:
if old_shape[ptr] % rhs != 0:
raise tvm.error.OpAttributeInvalid(
"When splitting the axis, "
"the dimension of the split axis must "
"be divisible by the splitted values."
)
lhs = old_shape[ptr] // rhs
if rhs == -1:
if old_shape[ptr] % lhs != 0:
raise tvm.error.OpAttributeInvalid(
"When splitting the axis, "
"the dimension of the split axis must "
"be divisible by the splitted values."
)
rhs = old_shape[ptr] // lhs
new_shape.append(lhs)
new_shape.append(rhs)
ptr += 1
else:
raise tvm.error.OpAttributeInvalid(f"Shape dimension {ele} is not supported")
if reverse:
new_shape = new_shape[::-1]
return _op.reshape(inputs[0], newshape=new_shape)
def _mx_split_v2(inputs, attrs):
axis = attrs.get_int("axis")
indices = list(attrs.get_int_tuple("indices", []))
# remove the prefix '0'
if len(indices) != 0 and indices[0] == 0:
indices.remove(0)
sections = attrs.get_int("sections", 0)
indices_or_sections = list(indices) if len(indices) != 0 else sections
res = _op.split(inputs[0], indices_or_sections=indices_or_sections, axis=axis)
if attrs.get_bool("squeeze_axis", False):
res = tuple([_op.squeeze(x, axis=[axis]) for x in res])
return res
def _mx_npi_where_rscalar(inputs, attrs):
cond, dat = inputs
scalar = attrs.get_float("scalar")
cond_shape = get_const_tuple(_infer_type(cond).checked_type.shape)
dat_shape = get_const_tuple(_infer_type(dat).checked_type.shape)
dtype = _infer_type(dat).checked_type.dtype
# Check for broadcasting
out_shape = np.broadcast(np.empty(cond_shape), np.empty(dat_shape)).shape
if out_shape != cond_shape:
cond = _op.broadcast_to(cond, out_shape)
if out_shape != dat_shape:
dat = _op.broadcast_to(dat, out_shape)
scalar = _expr.const(scalar, dtype=dtype)
ones = _op.ones_like(dat)
scalar = _op.multiply(ones, scalar)
return _op.where(cond, dat, scalar)
# Note: due to attribute conversion constraint
# ops in the identity set must be attribute free
_identity_list = [
"abs",
"log",
"exp",
"erf",
"sqrt",
"floor",
"ceil",
"round",
"trunc",
"sign",
"sigmoid",
"negative",
"reshape_like",
"zeros_like",
"ones_like",
"cos",
"cosh",
"sin",
"sinh",
"tan",
"tanh",
"where",
]
_convert_map = {
"_copy": _rename(_op.copy),
"relu": _rename(_op.nn.relu),
"broadcast_add": _rename(_op.add),
"broadcast_plus": _rename(_op.add),
"broadcast_sub": _rename(_op.subtract),
"broadcast_minus": _rename(_op.subtract),
"broadcast_mul": _rename(_op.multiply),
"broadcast_div": _rename(_op.divide),
"broadcast_mod": _rename(_op.mod),
"broadcast_maximum": _rename(_op.maximum),
"broadcast_minimum": _rename(_op.minimum),
"broadcast_power": _rename(_op.power),
"arccos": _rename(_op.acos),
"arcsin": _rename(_op.asin),
"arctan": _rename(_op.atan),
"arccosh": _rename(_op.acosh),
"arcsinh": _rename(_op.asinh),
"arctanh": _rename(_op.atanh),
"broadcast_equal": _mx_compare(_op.equal, _rename),
"broadcast_not_equal": _mx_compare(_op.not_equal, _rename),
"broadcast_greater": _mx_compare(_op.greater, _rename),
"broadcast_greater_equal": _mx_compare(_op.greater_equal, _rename),
"broadcast_lesser": _mx_compare(_op.less, _rename),
"broadcast_lesser_equal": _mx_compare(_op.less_equal, _rename),
"broadcast_logical_or": _mx_broadcast_logical(_op.logical_or),
"broadcast_logical_and": _mx_broadcast_logical(_op.logical_and),
"broadcast_logical_xor": _mx_broadcast_logical(_op.logical_xor),
"broadcast_to": _mx_broadcast_to,
"broadcast_like": _mx_broadcast_like,
"logical_not": _mx_logical_not,
"_equal": _mx_compare(_op.equal, _rename),
"_not_equal": _mx_compare(_op.not_equal, _rename),
"_greater": _mx_compare(_op.greater, _rename),
"_greater_equal": _mx_compare(_op.greater_equal, _rename),
"_lesser": _mx_compare(_op.less, _rename),
"_lesser_equal": _mx_compare(_op.less_equal, _rename),
"elemwise_add": _rename(_op.add),
"elemwise_sub": _rename(_op.subtract),
"elemwise_mul": _rename(_op.multiply),
"elemwise_div": _rename(_op.divide),
"_maximum": _rename(_op.maximum),
"_minimum": _rename(_op.minimum),
"flatten": _rename(_op.nn.batch_flatten),
"Flatten": _rename(_op.nn.batch_flatten),
# scalar power
"square": _mx_make_power(2),
"rsqrt": _mx_make_power(-1 / 2),
"cbrt": _mx_make_power(1 / 3),
"rcbrt": _mx_make_power(-1 / 3),
"__pow_scalar__": _binop_scalar(_op.power),
"_power_scalar": _binop_scalar(_op.power),
"__rsub_scalar__": _rbinop_scalar(_op.subtract),
"_rminus_scalar": _rbinop_scalar(_op.subtract),
"__rdiv_scalar__": _rbinop_scalar(_op.divide),
"_rdiv_scalar": _rbinop_scalar(_op.divide),
"__rpow_scalar__": _rbinop_scalar(_op.power),
# scalar op
"__add_scalar__": _binop_scalar(_op.add),
"_plus_scalar": _binop_scalar(_op.add),
"__sub_scalar__": _binop_scalar(_op.subtract),
"_minus_scalar": _binop_scalar(_op.subtract),
"__mul_scalar__": _binop_scalar(_op.multiply),
"_mul_scalar": _binop_scalar(_op.multiply),
"__div_scalar__": _binop_scalar(_op.divide),
"_div_scalar": _binop_scalar(_op.divide),
"log2": _mx_make_logarithm(2),
"log10": _mx_make_logarithm(10),
"log1p": _mx_log1p,
"expm1": _mx_expm1,
"_equal_scalar": _mx_compare(_op.equal, _binop_scalar),
"_not_equal_scalar": _mx_compare(_op.not_equal, _binop_scalar),
"_greater_scalar": _mx_compare(_op.greater, _binop_scalar),
"_greater_equal_scalar": _mx_compare(_op.greater_equal, _binop_scalar),
"_lesser_scalar": _mx_compare(_op.less, _binop_scalar),
"_lesser_equal_scalar": _mx_compare(_op.less_equal, _binop_scalar),
"_maximum_scalar": _binop_scalar(_op.maximum),
"_minimum_scalar": _binop_scalar(_op.minimum),
# reduction ops
"mean": _reduce(_op.mean),
"max": _reduce(_op.max),
"min": _reduce(_op.min),
"sum": _reduce(_op.sum),
"max_axis": _reduce(_op.max),
"min_axis": _reduce(_op.min),
"sum_axis": _reduce(_op.sum),
"argmax": _arg_reduce(_op.argmax),
"argmin": _arg_reduce(_op.argmin),
# init ops
"_ones": _init_op(_op.ones),
# softmax
"softmax": _softmax_op(_op.nn.softmax),
"log_softmax": _softmax_op(_op.nn.log_softmax),
"Softmax": _softmax_op(_op.nn.softmax),
"softsign": _mx_softsign,
"softmin": _mx_softmin,
"hard_sigmoid": _mx_hard_sigmoid,
"reciprocal": _mx_reciprocal,
# per op specialization
"Reshape": _reshape,
"reshape": _reshape,
"Cast": _cast,
"amp_cast": _cast,
"amp_multicast": _mx_amp_multicast,
"clip": _clip,
"transpose": _transpose,
"UpSampling": _upsampling,
"add_n": _elemwise_sum,
# MXNet specific implementations
"_zeros": _mx_zeros,
"FullyConnected": _mx_fully_connected,
"Activation": _mx_activations,
"Convolution": _mx_conv,
"Convolution_v1": _mx_conv2d,
"Deconvolution": _mx_conv_transpose,
"Pooling": _mx_pooling,
"Pooling_v1": _mx_pooling,
"Dropout": _mx_dropout,
"BatchNorm": _mx_batch_norm,
"BatchNorm_v1": _mx_batch_norm,
"_contrib_SyncBatchNorm": _mx_batch_norm,
"InstanceNorm": _mx_instance_norm,
"LayerNorm": _mx_layer_norm,
"GroupNorm": _mx_group_norm,
"LRN": _mx_lrn,
"L2Normalization": _mx_l2_normalize,
"slice": _mx_slice,
"slice_like": _mx_slice_like,
"slice_axis": _mx_slice_axis,
"SliceChannel": _mx_split,
"split": _mx_split,
"_split_v2": _mx_split_v2,
"SwapAxis": _mx_swap_axis,
"expand_dims": _mx_expand_dims,
"Concat": _mx_concat,
"concat": _mx_concat,
"stack": _mx_stack,
"dot": _mx_dot,
"batch_dot": _mx_batch_dot,
"LeakyReLU": _mx_leaky_relu,
"_arange": _mx_arange,
"_full": _mx_full,
"repeat": _mx_repeat,
"tile": _mx_tile,
"pad": _mx_pad,
"Pad": _mx_pad,
"take": _mx_take,
"gather_nd": _mx_gather_nd,
"reverse": _mx_reverse,
"SequenceReverse": _mx_sequence_reverse,
"squeeze": _mx_squeeze,
"broadcast_axis": _mx_broadcast_axis,
"broadcast_axes": _mx_broadcast_axis,
"BlockGrad": _mx_BlockGrad,
"shape_array": _mx_shape_array,
"Embedding": _mx_embedding,
"argsort": _mx_argsort,
"topk": _mx_topk,
"_unravel_index": _mx_unravel_index,
"SequenceMask": _mx_sequence_mask,
"SoftmaxOutput": _mx_softmax_output,
"SoftmaxActivation": _mx_softmax_activation,
"LinearRegressionOutput": _mx_linear_regression_output,
"LogisticRegressionOutput": _mx_logistic_regression_output,
"smooth_l1": _mx_smooth_l1,
"make_loss": _mx_make_loss,
"_contrib_div_sqrt_dim": _mx_contrib_div_sqrt_dim,
"_contrib_arange_like": _mx_contrib_arange_like,
"one_hot": _mx_one_hot,
"depth_to_space": _mx_depth_to_space,
"space_to_depth": _mx_space_to_depth,
"Correlation": _mx_correlation,
# vision
"_contrib_BilinearResize2D": _mx_resize,
"_contrib_MultiBoxPrior": _mx_multibox_prior,
"_contrib_MultiBoxDetection": _mx_multibox_detection,
"_contrib_ROIAlign": _mx_roi_align,
"ROIPooling": _mx_roi_pooling,
"_contrib_Proposal": _mx_proposal,
"_contrib_MultiProposal": _mx_proposal,
"_contrib_box_nms": _mx_box_nms,
"_contrib_box_decode": _mx_box_decode,
"_contrib_DeformableConvolution": _mx_deformable_convolution,
"_contrib_AdaptiveAvgPooling2D": _mx_adaptive_avg_pooling,
"GridGenerator": _mx_grid_generator,
"BilinearSampler": _mx_bilinear_sampler,
# NLP
"RNN": _mx_rnn_layer,
"_rnn_param_concat": _mx_rnn_param_concat,
"_contrib_interleaved_matmul_selfatt_qk": _mx_contrib_interleaved_matmul_selfatt_qk,
"_contrib_interleaved_matmul_selfatt_valatt": _mx_contrib_interleaved_matmul_selfatt_valatt,
# control flow
"_cond": _mx_cond,
# Depricated:
"Crop": _mx_crop_like,
# List of missing operators that are present in NNVMv1
# TODO(tvm-tvm): support all operators.
#
# "contrib_fifo_buffer": _mx_contrib_fifo_buffer,
"ring_buffer": _mx_contrib_fifo_buffer,
# Qnn ops
"_contrib_quantize_v2": _qnn_quantize,
"_contrib_quantized_concat": _qnn_contrib_concat,
# "_contrib_quantized_fifo_buffer": _qnn_contrib_quantized_fifo_buffer,
"_contrib_quantized_ring_buffer": _qnn_contrib_quantized_fifo_buffer,
"_sg_mkldnn_conv": _qnn_conv,
"_contrib_quantized_flatten": _qnn_flatten,
"_contrib_dequantize": _qnn_dequantize,
"_contrib_quantized_act": _qnn_activation,
"_contrib_quantized_pooling": _qnn_pooling,
"_contrib_quantized_batch_norm": _qnn_batch_norm,
"_sg_mkldnn_fully_connected": _qnn_fully_connected,
# numpy
"_np_transpose": _mx_npi_transpose,
"_npi_transpose": _mx_npi_transpose,
"_npi_pad": _mx_npi_pad,
"_npi_concatenate": _mx_npi_concatenate,
"_npx_reshape": _mx_npx_reshape,
"_np_copy": _rename(_op.copy),
"_npi_copy": _rename(_op.copy),
"_npi_power": _rename(_op.power),
"_npi_power_scalar": _binop_scalar(_op.power),
"_npi_multiply": _rename(_op.multiply),
"_npi_multiply_scalar": _binop_scalar(_op.multiply),
"_npi_add": _rename(_op.add),
"_npi_add_scalar": _binop_scalar(_op.add),
"_npi_subtract": _rename(_op.subtract),
"_npi_subtract_scalar": _binop_scalar(_op.subtract),
"_npi_where_rscalar": _mx_npi_where_rscalar,
"_npi_less": _rename(_op.less),
"_npi_less_equal": _mx_compare(_op.less_equal, _rename),
"_npi_tanh": _rename(_op.tanh),
"_npi_true_divide_scalar": _binop_scalar(_op.divide),
"_npi_stack": _mx_npi_stack,
}
# set identity list
_convert_map.update({k: _rename(k) for k in _identity_list})
_control_flow_ops = ["_cond", "_foreach", "_while_loop"]
_qnn_subgraph_ops = ["_sg_mkldnn_conv", "_sg_mkldnn_fully_connected"]
_subgraph_ops = _control_flow_ops + _qnn_subgraph_ops
_params_ops = ["_contrib_quantized_ring_buffer"]
def _get_op_params(children, attrs, op_name, node, params):
op_params = [children, attrs]
if op_name in _subgraph_ops:
subgraphs = node["subgraphs"]
op_params.append(subgraphs)
if op_name in _qnn_subgraph_ops:
op_params.append(params)
if op_name in _params_ops:
op_params.append(params)
return op_params
def _from_mxnet_impl(symbol, shape_dict, dtype_info, params=None, mod=None):
# pylint: disable=unused-argument
"""Convert mxnet symbol to compatible relay Function.
Reconstruct a relay Function by traversing the mxnet symbol.
Parameters
----------
symbol : mxnet.sym.Symbol
Incompatible symbol from mxnet.
The op_name and attrs inside are not always compatible.
shape_dict : dict
Known parameter shapes
dtype_info : dict or str.
Known parameter dtypes
mod : tvm.IRModule
The module that contains global information. It will be used for
converting ops that need global information, e.g. control-flow ops.
Returns:
-------
func : tvm.relay.Function
Converted relay Function
"""
assert symbol is not None
if isinstance(symbol, dict):
jgraph = symbol
else:
jgraph = json.loads(symbol.tojson())
jnodes = jgraph["nodes"]
node_map = {}
shape_idx = 0
# Check if there have any unsupported ops
unsupported = {}
for node in jnodes:
op_name = node["op"]
if op_name != "null" and op_name not in _convert_map:
if op_name not in unsupported:
unsupported[op_name] = 0
unsupported[op_name] += 1
if unsupported:
msg = "\n".join([f"{op_name}: {cnt}" for op_name, cnt in unsupported.items()])
raise tvm.error.OpNotImplemented(
f"One or more operators are not supported in frontend MXNet:\n{msg}"
)
for nid, node in enumerate(jnodes):
children = [node_map[e[0]][e[1]] for e in node["inputs"]]
attrs = StrAttrsDict(node.get("attrs", {}))
node_name = node["name"]
op_name = node["op"]
if op_name == "null":
if isinstance(shape_dict, dict):
shape = shape_dict[node_name] if node_name in shape_dict else None
elif isinstance(shape_dict, (list, tuple)):
shape = shape_dict[shape_idx]
else:
raise ValueError("Unknown type of shape_dict: %s" + type(shape_dict))
if isinstance(dtype_info, dict):
dtype = dtype_info[node_name] if node_name in dtype_info else "float32"
elif isinstance(dtype_info, (list, tuple)):
dtype = dtype_info[shape_idx]
else:
dtype = dtype_info
if isinstance(shape_dict, (list, tuple)):
shape_idx += 1
node_map[nid] = [_expr.var(node_name, shape=shape, dtype=dtype)]
else:
assert op_name in _convert_map
op_params = _get_op_params(children, attrs, op_name, node, params)
res = _convert_map[op_name](*op_params)
if res is None:
# defer conversion, used in RNN state initialization
res = [node]
elif isinstance(res, (_expr.TupleWrapper, tuple, list)):
pass
elif isinstance(res, _expr.Expr):
res = [res]
else:
raise RuntimeError(f"unexpected type {type(res)}")
node_map[nid] = res
outputs = [node_map[e[0]][e[1]] for e in jgraph["heads"]]
outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs)
func = _function.Function(analysis.free_vars(outputs), outputs)
return func
def _update_shape_dtype(shape, dtype, params):
"""Update shape dtype given params information"""
shape = {} if shape is None else shape
if not params:
return shape, dtype
shape = shape.copy()
shape.update({k: v.shape for k, v in params.items()})
if isinstance(dtype, str):
for k, v in params.items():
if v.dtype != dtype:
raise ValueError(f"{k}: dtype not expected {dtype} vs {v.dtype}")
else:
dtype = dtype.copy()
dtype.update({k: str(v.dtype) for k, v in params.items()})
return shape, dtype
def from_mxnet(symbol, shape=None, dtype="float32", arg_params=None, aux_params=None):
"""Convert from MXNet"s model into compatible relay Function.
Parameters
----------
symbol : mxnet.Symbol or mxnet.gluon.HybridBlock
MXNet symbol.
shape : dict of str to tuple, optional
The input shape to the graph
dtype : str or dict of str to str
The input types to the graph
arg_params : dict of str to mx.NDArray
The argument parameters in mxnet
aux_params : dict of str to mx.NDArray
The auxiliary parameters in mxnet
Returns
-------
mod : tvm.IRModule
The relay module for compilation
params : dict of str to tvm.nd.NDArray
The parameter dict to be used by nnvm
"""
try:
import mxnet as mx # pylint: disable=import-outside-toplevel
except ImportError as e:
raise ImportError(f"{e}. MXNet is required to parse symbols.")
mod = IRModule()
if isinstance(symbol, mx.sym.Symbol):
params = {}
arg_params = arg_params if arg_params else {}
aux_params = aux_params if aux_params else {}
for k, v in arg_params.items():
params[k] = _nd.array(v.asnumpy())
for k, v in aux_params.items():
params[k] = _nd.array(v.asnumpy())
shape, dtype = _update_shape_dtype(shape, dtype, params)
func = _from_mxnet_impl(symbol, shape, dtype, params, mod)
elif isinstance(symbol, mx.gluon.HybridBlock):
if arg_params is not None or aux_params is not None:
raise ValueError("arg_params and aux_params ae not used when importing HybridBlock")
params = {}
for k, v in symbol.collect_params().items():
params[k] = _nd.array(v.data().asnumpy())
inputs = []
for name in shape:
inputs.append(mx.sym.Variable(name))
sym = symbol(*inputs)
if isinstance(sym, (list, tuple)):
sym = mx.sym.Group(sym)
shape, dtype = _update_shape_dtype(shape, dtype, params)
func = _from_mxnet_impl(sym, shape, dtype, params, mod)
elif isinstance(symbol, mx.gluon.Block):
raise NotImplementedError("Only Hybrid Blocks are supported now.")
else:
msg = f"mxnet.Symbol or gluon.HybridBlock expected, got {type(symbol)}"
raise ValueError(msg)
mod["main"] = func
return mod, params
| 112,322 | 36.959784 | 100 | py |
tvm | tvm-main/python/tvm/relay/frontend/mxnet_qnn_op_utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-self, len-as-condition, no-else-return
"""MXNet qnn dialect helper methods for MXNet specific implementations of more
generic qnn supported ops.
"""
import numpy as np
from tvm import relay
from tvm.relay.qnn.op.qnn import quantize, dequantize
# The below values are taken from -
# https://github.com/apache/incubator-mxnet/blob/master/src/operator/quantization/quantization_utils.h#L38-L39
zero_centered_uint8_quantized_range = np.float32(255.5)
zero_centered_int8_quantized_range = np.float32(127.5)
def _get_mkldnn_scale(data_min, data_max, quantized_range):
"""Computes the scale as per MKLDNN specification mentioned here -
https://intel.github.io/mkl-dnn/ex_int8_simplenet.html
Parameters
----------
data_min : float32
A number representing the lower end of the tensor to be quantized.
data_max : float32
A number representing the upper end of the tensor to be quantized.
quantized_range : float32
255 for uint8 and 127 for int8. This is the data type range.
Returns
-------
scale : A floating point number which acts as the scale for quantization.
"""
real_range = np.max([np.abs(np.float32(data_min)), np.abs(np.float32(data_max))])
scale = np.divide(quantized_range, real_range)
scale_inverse = np.divide(1.0, scale)
return scale_inverse
def _quantize_scale_with_zero_centered(data, scale, zero_point, out_dtype):
quantized_output = quantize(
data, relay.const(scale, "float32"), relay.const(zero_point, "int32"), out_dtype=out_dtype
)
return quantized_output, scale, zero_point
def _quantize_with_zero_centered(data, data_min, data_max, quantized_range, out_dtype):
"""Quantizes the given data tensor by calculating the scale
using the MKLDNN formula `quantized_range / max(abs(data_min, data_max))`.
Where quantized_range is 255 for uint8 and 127 for int8. The `data_min`
and `data_max` are the min and max to use for the `data` tensor elements.
Parameters
----------
data : tvm.relay.Expr
The input tensor to be quantized. Can be of type float32.
data_min : float
The minimum to use data elements.
data_max : float
The maximum to use for data elements.
quantized_range : float
255 for uint8 and 127 for int8. This is the data type range.
out_dtype : str
The output data type. Can be int8 or uint8
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
scale = _get_mkldnn_scale(data_min, data_max, quantized_range)
zero_point = 0
return _quantize_scale_with_zero_centered(data, scale, zero_point, out_dtype)
def _quantize_mkldnn_min_max_uint8(data, data_min, data_max):
"""Quantizes the given `data` in float32 and the given
min and max ranges and the output data type is `uint8`.
The method of quantizing is described here - https://tinyurl.com/y5k6fz5w.
We use our default quantize implementation from src/relay/qnn/op/quantize.cc:72
but compute the `scale` and `zero_point` to fit our equation.
Unlike in TFLite where we get the scale and zero_point from the model, MKLDNN
stores the min and max from which we calculate the scale and zero_point.
Parameters
----------
data : tvm.relay.Expr
The input tensor to be quantized. Can be of type float32.
imin_range : float
The minimum to use data elements.
imax_range : float
The maximum to use for data elements.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _quantize_with_zero_centered(
data, data_min, data_max, zero_centered_uint8_quantized_range, "uint8"
)
def _quantize_mkldnn_min_max_int8(data, data_min, data_max):
"""Quantizes the given `data` in float32 and the given
min and max ranges and the output data type is `int8`.
The method of quantizing is described here - https://tinyurl.com/y5k6fz5w.
We use our default quantize implementation from src/relay/qnn/op/quantize.cc:72
but compute the `scale` and `zero_point` to fit our equation.
Unlike in TFLite where we get the scale and zero_point from the model, MKLDNN
stores the min and max from which we calculate the scale and zero_point.
Parameters
----------
data : tvm.relay.Expr
The input tensor to be quantized. Can be of type float32.
data_min : float
The minimum to use data elements.
data_max : float
The maximum to use for data elements.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _quantize_with_zero_centered(
data, data_min, data_max, zero_centered_int8_quantized_range, "int8"
)
def get_mkldnn_int8_scale(range_min, range_max):
"""Computes the quantization scale using MKLDNN specifications
with the given range. The output datatype of tensor to be quantized should be
int8.
Parameters
----------
range_min : float32
A number representing the lower end of the tensor to be quantized.
range_max : float32
A number representing the upper end of the tensor to be quantized.
Returns
-------
scale : A float32 number which acts as the scale for quantization.
"""
scale = _get_mkldnn_scale(range_min, range_max, zero_centered_int8_quantized_range)
return np.float32(scale)
def get_mkldnn_uint8_scale(range_min, range_max):
"""Computes the quantization scale using MKLDNN specifications
with the given range. The output datatype of tensor to be quantized should be
uint8.
Parameters
----------
range_min : float32
A number representing the lower end of the tensor to be quantized.
range_max : float32
A number representing the upper end of the tensor to be quantized.
Returns
-------
scale : A float32 number which acts as the scale for quantization.
"""
scale = _get_mkldnn_scale(range_min, range_max, zero_centered_uint8_quantized_range)
return np.float32(scale)
def quantize_conv_weights_bias_channel_mkldnn_from_var(
weights_var, bias, min_vector_range, max_vector_range, data_scale
):
"""Helper method to quantize the convolution kernel in prequantized model
in MXNet with MKLDNN. The kernel is always quantized to int8 output datatype.
The inputs are the raw weights which are floating point numbers. The min and
max ranges are used from the weight itself. The name supplied is used to create
a tvm.relay.var with the given name.
Parameters
----------
weights_var : tvm.relay.var
The float32 representation of the weights.
bias : np.array
The float32 np array for bias.
min_vector_range : array of float32
A number representing the minimum of the weights per channel.
max_vector_range : array of float32
A number representing the maximum of the weights per channel.
data_scale : float
The data scale value.
Returns
-------
result : tvm.relay.expr
The quantized representation of the weights.
"""
quantized_range = zero_centered_int8_quantized_range
real_vector_range = np.maximum(np.absolute(min_vector_range), np.absolute(max_vector_range))
# If real_vector_range is 0, then to avoid division by 0 in scaling,
# make real_vector INT32_max
vector_scale = np.where(
real_vector_range == 0,
1.0 / float(np.iinfo(np.int32).max),
np.divide(real_vector_range, quantized_range),
)
# Handle bias impact on scales as done by MxNet-MKLDNN.
if bias is not None:
common = 2.0 * bias.astype("float32") * (1 / data_scale)
vector_scale_min = np.where(
bias > 0, common / float(np.iinfo(np.int32).max), common / float(np.iinfo(np.int32).min)
)
vector_scale = np.maximum(vector_scale, vector_scale_min)
zero_point = 0
quantized_output = quantize(
weights_var,
relay.const(vector_scale),
relay.const(zero_point, "int32"),
axis=0,
out_dtype="int8",
)
return quantized_output, vector_scale, zero_point
def get_mkldnn_requantize_scale_outDtype(min_output_range, max_output_range, out_dtype):
"""Get the MKLDNN requantized scale."""
quantized_out_range = (
zero_centered_int8_quantized_range
if out_dtype == "int8"
else zero_centered_uint8_quantized_range
)
out_range = np.max([np.abs(np.float32(min_output_range)), np.abs(np.float32(max_output_range))])
output_scale = quantized_out_range / out_range
requantize_scale = np.float32(1 / output_scale)
return requantize_scale
def get_conv_mkldnn_requantized_scale_outDtype(min_output_range, max_output_range):
out_dtype = "uint8" if min_output_range >= 0.0 else "int8"
requantize_scale = get_mkldnn_requantize_scale_outDtype(
min_output_range, max_output_range, out_dtype
)
return requantize_scale, out_dtype
def quantize_conv_bias_mkldnn_from_var(bias_var, bias_scale):
"""Quantized conv2d bias"""
zero_point = 0
quantized_bias = quantize(
data=bias_var,
output_scale=relay.const(bias_scale),
output_zero_point=relay.const(zero_point, "int32"),
axis=0,
out_dtype="int32",
)
return quantized_bias
def quantize_mxnet_min_max(data, min_range, max_range, out_dtype="int8"):
"""Quantizes the given `data` in float32 and the given
min and max ranges and the output data type.
Only `int8` and `uint8` is supported as output data types.
The input data type is expected to be `float32`.
Mxnet has two different flavors for quantization 1) Default 2)MKLDNN.
To get the second one Mxnet must be built with MKLDNN during compile time.
Users can choose either of the implementation for TVM runtime.
The main difference between the two implementation is that MKLDNN is centered
around 0 and the default implementation for uint8 is not.
Parameters
----------
data : tvm.relay.Expr
The input tensor to be quantized. Can be of type float32.
min_range : float
The minimum to use data elements.
max_range : float
The maximum to use for data elements.
out_dtype: str, optional
The output data type, can be 'int8' or 'uint8'
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
if out_dtype == "uint8":
return _quantize_mkldnn_min_max_uint8(data, min_range, max_range)
elif out_dtype == "int8":
return _quantize_mkldnn_min_max_int8(data, min_range, max_range)
else:
raise ValueError("Expected out_dtype to be int8 or uint8 but was {out_dtype}")
def _dequantize_zero_centered(data, data_min, data_max, quantized_range):
"""Dequantizes the given data tensor by calculating the scale
using the MKLDNN formula `max(abs(data_min, data_max))/quantized_range`.
Where quantized_range is 255 for uint8 and 127 for int8. The `data_min`
and `data_max` are the min and max to use for the `data` tensor elements.
Parameters
----------
data : tvm.relay.Expr
The input tensor to be quantized. Can be of type {int8 or uint8}.
data_min : float
The minimum to use data elements.
data_max : float
The maximum to use for data elements.
quantized_range : float
255 for uint8 and 127 for int8. This is the data type range.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
real_range = np.max([np.abs(np.float32(data_min)), np.abs(np.float32(data_max))])
scale = relay.const(np.divide(real_range, quantized_range), "float32")
zero_point = relay.const(0, "int32")
return dequantize(data, scale, zero_point)
def _dequantize_mkldnn_min_max_int8(data, imin_range, imax_range):
"""Dequantizes the given `data` in {int8 or uint8} and the given
min and max ranges and the output data type is `float32`.
The method of dequantizing is described here - https://tinyurl.com/y5k6fz5w.
We use our default quantize implementation from src/relay/qnn/op/dequantize.cc:67
but compute the `scale` and `zero_point` to fit our equation.
Unlike in TFLite where we get the scale and zero_point from the model, MKLDNN
stores the min and max from which we calculate the scale and zero_point.
Parameters
----------
data : tvm.relay.Expr
The input tensor to be quantized. Can be of type float32.
imin_range : float
The minimum to use data elements.
imax_range : float
The maximum to use for data elements.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _dequantize_zero_centered(
data,
data_min=imin_range,
data_max=imax_range,
quantized_range=zero_centered_int8_quantized_range,
)
def _dequantize_mkldnn_min_max_uint8(data, imin_range, imax_range):
"""Dequantizes the given `data` in {int8 or uint8} and the given
min and max ranges and the output data type is `float32`.
The method of dequantize is described here - https://tinyurl.com/y5k6fz5w.
We use our default quantize implementation from src/relay/qnn/op/dequantize.cc:67
but compute the `scale` and `zero_point` to fit our equation.
Unlike in TFLite where we get the scale and zero_point from the model, MKLDNN
stores the min and max from which we calculate the scale and zero_point.
Parameters
----------
data : tvm.relay.Expr
The input tensor to be quantized. Can be of type float32.
imin_range : float
The minimum to use data elements.
imax_range : float
The maximum to use for data elements.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _dequantize_zero_centered(
data,
data_min=imin_range,
data_max=imax_range,
quantized_range=zero_centered_uint8_quantized_range,
)
def dequantize_mxnet_min_max(data, min_range, max_range, in_dtype="int8"):
"""Dequantizes the given `data` in {int8 or uint8} and the given
min and max ranges. The output data type is float32.
Only `float32` is supported as output data types.
The input data type is expected to be {int8 or uint8}.
Mxnet has two different flavors for dequantization 1) Default 2)MKLDNN.
To get the second one Mxnet must be built with MKLDNN during compile time.
Users can choose either of the implementation for TVM runtime.
The main difference between the two implementation is that MKLDNN is centered
around 0 and the default implementation for uint8 is not.
Parameters
----------
data : tvm.relay.Expr
The input tensor to be quantized. Can be of type float32.
min_range : float
The minimum to use data elements for the output.
max_range : float
The maximum to use for data elements for the output.
in_dtype: str, optional
The input data type, can be 'int8' or 'uint8'
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
if in_dtype == "uint8":
return _dequantize_mkldnn_min_max_uint8(data, min_range, max_range)
elif in_dtype == "int8":
return _dequantize_mkldnn_min_max_int8(data, min_range, max_range)
else:
raise ValueError(f"Expected out_dtype to be int8 or uint8 but was {in_dtype}")
| 16,337 | 35.632287 | 110 | py |
tvm | tvm-main/python/tvm/relay/frontend/keras.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-self, import-outside-toplevel
"""Keras frontend."""
import dis
import sys
import numpy as np
import tvm
from tvm.ir import IRModule, TensorType, TupleType
from .. import analysis
from .. import expr as _expr
from .. import function as _function
from .. import op as _op
from ... import nd as _nd
from .common import ExprTable, new_var
__all__ = ["from_keras"]
def _check_data_format(keras_layer):
if hasattr(keras_layer, ("data_format")):
if keras_layer.data_format != "channels_last":
raise ValueError("Keras frontend currently supports data_format = channels_last only.")
def _get_pad_pair(input1d, kernel1d, stride1d):
out1d = (input1d + stride1d - 1) // stride1d
pad = np.maximum((out1d - 1) * stride1d + kernel1d - input1d, 0)
pad_before = pad // 2
pad_after = pad - pad_before
return [pad_before, pad_after]
def _get_elu(inexpr, alpha):
"""A helper method for elu."""
return _op.negative(alpha) * _op.nn.relu(
_expr.const(1.0, dtype="float32") - _op.exp(inexpr)
) + _op.nn.relu(inexpr)
def _as_list(arr):
"""Force being a list, ignore if already is."""
if isinstance(arr, list):
return arr
return [arr]
def _convert_recurrent_activation(inexpr, keras_layer):
act_type = keras_layer.recurrent_activation.__name__
return _convert_activation(inexpr, act_type, None, None, None)
def _convert_activation(
inexpr, keras_layer, etab, data_layout, input_shape=None
): # pylint: disable=unused-argument
if isinstance(keras_layer, str):
act_type = keras_layer
else:
if sys.version_info.major < 3:
act_type = keras_layer.activation.func_name
else:
act_type = keras_layer.activation.__name__
if act_type == "linear":
if isinstance(keras_layer, str):
return inexpr
alpha = keras_layer.alpha if hasattr(keras_layer, "alpha") else 1.0
beta = keras_layer.beta if hasattr(keras_layer, "beta") else 0.0
alpha = _expr.const(alpha, dtype="float32")
beta = _expr.const(beta, dtype="float32")
return _op.add(_op.multiply(inexpr, alpha), beta)
if act_type == "softmax":
axis = 1 if data_layout == "NCHW" else -1
return _op.nn.softmax(inexpr, axis)
if act_type == "sigmoid":
return _op.sigmoid(inexpr)
if act_type == "tanh":
return _op.tanh(inexpr)
if act_type == "relu":
return _op.nn.relu(inexpr)
if act_type == "softplus":
return _op.log(_op.add(_op.exp(inexpr), _expr.const(1.0, dtype="float32")))
if act_type == "elu":
alpha = keras_layer.alpha if hasattr(keras_layer, "alpha") else 1.0
alpha = _expr.const(alpha, dtype="float32")
return _get_elu(inexpr, alpha)
if act_type == "selu":
# Alpha, Gamma values obtained from https://arxiv.org/abs/1706.02515
alpha = (
keras_layer.alpha
if hasattr(keras_layer, "alpha")
else 1.6732632423543772848170429916717
)
gamma = (
keras_layer.gamma
if hasattr(keras_layer, "gamma")
else 1.0507009873554804934193349852946
)
alpha = _expr.const(alpha, dtype="float32")
gamma = _expr.const(gamma, dtype="float32")
return gamma * _get_elu(inexpr, alpha)
if act_type == "relu6":
return _op.clip(inexpr, a_min=0.0, a_max=6.0)
if act_type == "softsign":
return inexpr / (_expr.const(1.0, dtype="float32") + _op.abs(inexpr))
if act_type == "hard_sigmoid":
x = (_expr.const(0.2, dtype="float32") * inexpr) + _expr.const(0.5, dtype="float32")
return _op.clip(x, a_min=0.0, a_max=1.0)
raise tvm.error.OpNotImplemented(f"Operator {act_type} is not supported in frontend Keras.")
def _convert_advanced_activation(inexpr, keras_layer, etab, data_layout, input_shape=None):
act_type = type(keras_layer).__name__
if input_shape is None:
input_shape = keras_layer.input_shape
if act_type == "Softmax":
axis = keras_layer.axis
dims = len(input_shape) if input_shape else 0
if isinstance(axis, list):
raise tvm.error.OpAttributeUnImplemented(f"Softmax with axes {axis} is not supported.")
if data_layout == "NCHW":
if dims == 0:
axis = 0
elif axis == -1:
axis = 1
else:
axis = axis + 1 if axis < dims - 1 else 1
return _op.nn.softmax(inexpr, axis=axis)
if act_type == "ReLU":
if np.isnan(keras_layer.threshold).any():
raise tvm.error.OpAttributeInvalid("The threshold value of a ReLU cannot be None.")
threshold = _expr.const(keras_layer.threshold, dtype="float32")
if keras_layer.max_value and float(keras_layer.threshold) == 0:
# f(x) = max_value, for x >= max_value
# f(x) = x, for threshold <= x < max_value
return _op.clip(inexpr, a_min=0.0, a_max=float(keras_layer.max_value))
if keras_layer.max_value and _op.greater(threshold, inexpr).astype("float32"):
# f(x) = negative_slope * (inexpr - threshold)
negative_slope = _expr.const(keras_layer.negative_slope, dtype="float32")
return _op.multiply(negative_slope, _op.subtract(inexpr, threshold))
return _op.nn.relu(inexpr)
if act_type == "LeakyReLU":
if np.isnan(keras_layer.alpha).any():
raise tvm.error.OpAttributeInvalid("The alpha value of a LeakyReLU cannot be None.")
return _op.nn.leaky_relu(inexpr, alpha=float(keras_layer.alpha))
if act_type == "ELU":
if np.isnan(keras_layer.alpha).any():
raise tvm.error.OpAttributeInvalid("The alpha value of a ELU cannot be None.")
alpha = keras_layer.alpha if hasattr(keras_layer, "alpha") else 1.0
alpha = _expr.const(alpha, dtype="float32")
return _get_elu(inexpr, alpha)
if act_type == "PReLU":
assert hasattr(keras_layer, "alpha"), "alpha required for PReLU."
_check_data_format(keras_layer)
size = len(keras_layer.alpha.shape)
if data_layout == "NCHW":
alpha = etab.new_const(keras_layer.get_weights()[0].transpose(np.roll(range(size), 1)))
else:
alpha = etab.new_const(keras_layer.get_weights()[0])
return _op.negative(alpha) * _op.nn.relu(_op.negative(inexpr)) + _op.nn.relu(inexpr)
if act_type == "ThresholdedReLU":
theta = keras_layer.theta if hasattr(keras_layer, "theta") else 1.0
return _op.multiply(
inexpr, _op.greater(inexpr, _expr.const(theta, dtype="float32")).astype("float32")
)
raise tvm.error.OpNotImplemented(f"Operator {act_type} is not supported in frontend Keras.")
def _convert_merge(
inexpr, keras_layer, _, input_shape=None, data_layout=None
): # pylint: disable=unused-argument
merge_type = type(keras_layer).__name__
ret = inexpr[0]
if merge_type == "Dot":
axes = keras_layer.axes
if isinstance(keras_layer.axes, int):
axes = [keras_layer.axes, keras_layer.axes]
if isinstance(axes, list):
if len(axes) != 2:
raise tvm.error.OpAttributeUnImplemented(
f"Dot with axes {keras_layer.axes} is not supported."
)
for i, axis in enumerate(axes):
if axis not in [1, 2]:
raise tvm.error.OpAttributeUnImplemented(
f"Dot with axes {keras_layer.axes} is not supported."
)
if axes[i] == 2:
inexpr[i] = _op.transpose(inexpr[i], axes=[0, 2, 1])
else:
raise tvm.error.OpAttributeUnImplemented(
f"Dot with axes {keras_layer.axes} is not supported."
)
ret_dot = _op.nn.batch_matmul(inexpr[0], inexpr[1])
ret = _op.transpose(ret_dot, axes=[0, 2, 1])
elif merge_type == "Subtract":
assert len(inexpr) == 2, "Subtract merge takes 2 inputs."
ret = _op.subtract(ret, inexpr[1])
elif merge_type in ["Add", "Multiply", "Minimum", "Maximum"]:
op_map = {
"Add": _op.add,
"Multiply": _op.multiply,
"Minimum": _op.minimum,
"Maximum": _op.maximum,
}
for i in range(1, len(inexpr)):
ret = op_map[merge_type](ret, inexpr[i])
elif merge_type == "Average":
for i in range(1, len(inexpr)):
ret = _op.add(ret, inexpr[i])
ret = ret / _expr.const(len(inexpr), dtype="float32")
else:
raise tvm.error.OpNotImplemented(
f"Operator {merge_type} is not supported in frontend Keras."
)
return ret
def _convert_permute(
inexpr, keras_layer, _, input_shape=None, data_layout=None
): # pylint: disable=unused-argument
return _op.transpose(inexpr, axes=(0,) + keras_layer.dims)
def _convert_embedding(
inexpr, keras_layer, etab, data_layout, input_shape=None
): # pylint: disable=unused-argument
indices = inexpr
weightList = keras_layer.get_weights()
weight = etab.new_const(weightList[0])
out = _op.take(weight, indices.astype("int32"), axis=0)
return out
def _convert_dense(
inexpr, keras_layer, etab, data_layout, input_shape=None
): # pylint: disable=unused-argument
weightList = keras_layer.get_weights()
weight = etab.new_const(weightList[0].transpose([1, 0]))
params = {"weight": weight, "units": weightList[0].shape[1]}
if input_shape is None:
input_shape = keras_layer.input_shape
input_dim = len(input_shape)
# In case of RNN dense, input shape will be (1, 1, n)
if input_dim > 2:
input_shape = tuple(dim if dim else 1 for dim in _as_list(input_shape)[0])
if input_dim != 3 or input_shape[0] != 1 or input_shape[1] != 1:
raise tvm.error.OpAttributeInvalid(
f"Input shape {input_shape} is not valid for operator Dense."
)
inexpr = _op.squeeze(inexpr, axis=[0])
out = _op.nn.dense(data=inexpr, **params)
if keras_layer.use_bias:
bias = etab.new_const(weightList[1])
out = _op.nn.bias_add(out, bias)
# defuse activation
if sys.version_info.major < 3:
act_type = keras_layer.activation.func_name
else:
act_type = keras_layer.activation.__name__
if act_type != "linear":
out = _convert_activation(out, act_type, etab, data_layout)
if input_dim > 2:
out = _op.expand_dims(out, axis=0)
return out
def _convert_convolution1d(inexpr, keras_layer, etab, data_layout, input_shape=None):
is_deconv = type(keras_layer).__name__ == "Conv1DTranspose"
if input_shape is None:
input_shape = keras_layer.input_shape
_check_data_format(keras_layer)
weightList = keras_layer.get_weights()
weight = weightList[0]
if data_layout == "NWC":
kernel_layout = "WIO"
if is_deconv:
kernel_layout = "WOI"
else:
kernel_layout = "OIW"
if is_deconv:
kernel_layout = "IOW"
msg = (
f"Kernel layout with {kernel_layout} is not supported for operator Convolution1D "
f"in frontend Keras."
)
raise tvm.error.OpAttributeUnImplemented(msg)
if is_deconv:
if kernel_layout == "IOW":
weight = weight.transpose([2, 1, 0])
kernel_w, n_filters, _ = weight.shape
else:
kernel_w, _, n_filters = weight.shape
dilation_rate = keras_layer.dilation_rate
if isinstance(dilation_rate, (list, tuple)):
dilation = [dilation_rate[0]]
else:
dilation = [dilation_rate]
dilated_kernel_w = (kernel_w - 1) * dilation[0] + 1
stride_w = keras_layer.strides[0]
params = {
"weight": etab.new_const(weight),
"kernel_size": [kernel_w],
"strides": [stride_w],
"dilation": dilation,
"padding": [0],
"data_layout": data_layout,
"kernel_layout": kernel_layout,
}
params["channels"] = n_filters
if keras_layer.padding == "valid":
pass
# calculate the padding values
elif keras_layer.padding == "same":
in_w = input_shape[1]
pad_w = _get_pad_pair(in_w, dilated_kernel_w, stride_w)
params["padding"] = [pad_w[0], pad_w[1]]
else:
msg = (
f"Padding with {keras_layer.padding} is not supported for operator Convolution3D "
f"in frontend Keras."
)
raise tvm.error.OpAttributeUnImplemented(msg)
if is_deconv:
out = _op.nn.conv1d_transpose(data=inexpr, **params)
else:
out = _op.nn.conv1d(data=inexpr, **params)
channel_axis = -1 if data_layout == "NWC" else 1
if keras_layer.use_bias:
bias = etab.new_const(weightList[1])
out = _op.nn.bias_add(out, bias, channel_axis)
# defuse activation
if sys.version_info.major < 3:
act_type = keras_layer.activation.func_name
else:
act_type = keras_layer.activation.__name__
if act_type != "linear":
out = _convert_activation(out, act_type, etab, data_layout)
return out
def _convert_convolution(inexpr, keras_layer, etab, data_layout, input_shape=None):
_check_data_format(keras_layer)
is_deconv = type(keras_layer).__name__ == "Conv2DTranspose"
is_depthconv = type(keras_layer).__name__ == "DepthwiseConv2D"
weightList = keras_layer.get_weights()
weight = weightList[0]
if input_shape is None:
input_shape = keras_layer.input_shape
if data_layout == "NHWC":
if is_depthconv:
kernel_layout = "HWOI"
elif is_deconv:
kernel_layout = "HWOI"
else:
kernel_layout = "HWIO"
else:
if is_deconv:
kernel_layout = "IOHW"
else:
kernel_layout = "OIHW"
if is_deconv:
kernel_h, kernel_w, n_filters, in_channels = weight.shape
if kernel_layout == "IOHW":
weight = weight.transpose([3, 2, 0, 1])
elif is_depthconv:
kernel_h, kernel_w, in_channels, depth_mult = weight.shape
if kernel_layout == "OIHW":
weight = weight.transpose([2, 3, 0, 1])
elif data_layout == "NCHW":
kernel_h, kernel_w, in_channels, n_filters = weight.shape
weight = weight.transpose([3, 2, 0, 1])
else:
kernel_h, kernel_w, in_channels, n_filters = weight.shape
if isinstance(keras_layer.dilation_rate, (list, tuple)):
dilation = [keras_layer.dilation_rate[0], keras_layer.dilation_rate[1]]
else:
dilation = [keras_layer.dilation_rate, keras_layer.dilation_rate]
dilated_kernel_h = (kernel_h - 1) * dilation[0] + 1
dilated_kernel_w = (kernel_w - 1) * dilation[1] + 1
stride_h, stride_w = keras_layer.strides
params = {
"weight": etab.new_const(weight),
"kernel_size": [kernel_h, kernel_w],
"strides": [stride_h, stride_w],
"dilation": dilation,
"padding": [0, 0],
"data_layout": data_layout,
"kernel_layout": kernel_layout,
}
if is_depthconv:
params["channels"] = in_channels * depth_mult
params["groups"] = in_channels
else:
params["channels"] = n_filters
if is_deconv and keras_layer.output_padding:
params["output_padding"] = keras_layer.output_padding
if keras_layer.padding == "valid":
pass
# we insert a separate pad operator
elif keras_layer.padding == "same":
in_h = input_shape[1]
in_w = input_shape[2]
pad_t, pad_b = _get_pad_pair(in_h, dilated_kernel_h, stride_h)
pad_l, pad_r = _get_pad_pair(in_w, dilated_kernel_w, stride_w)
params["padding"] = (pad_t, pad_l, pad_b, pad_r)
else:
msg = (
f"Padding with {keras_layer.padding} is not supported for operator Convolution "
f"in frontend Keras."
)
raise tvm.error.OpAttributeUnImplemented(msg)
if is_deconv:
out = _op.nn.conv2d_transpose(data=inexpr, **params)
else:
out = _op.nn.conv2d(data=inexpr, **params)
if keras_layer.use_bias:
bias = etab.new_const(weightList[1])
if data_layout == "NCHW":
out = _op.nn.bias_add(out, bias)
else:
out = _op.nn.bias_add(out, bias, axis=-1)
# defuse activation
if sys.version_info.major < 3:
act_type = keras_layer.activation.func_name
else:
act_type = keras_layer.activation.__name__
if act_type != "linear":
out = _convert_activation(out, act_type, etab, data_layout)
return out
def _convert_convolution3d(inexpr, keras_layer, etab, data_layout, input_shape=None):
_check_data_format(keras_layer)
is_deconv = type(keras_layer).__name__ == "Conv3DTranspose"
weightList = keras_layer.get_weights()
weight = weightList[0]
if input_shape is None:
input_shape = keras_layer.input_shape
if data_layout == "NDHWC":
kernel_layout = "DHWIO"
if is_deconv:
kernel_layout = "DHWOI"
else:
kernel_layout = "OIDHW"
if is_deconv:
kernel_layout = "IODHW"
msg = (
f"Kernel layout with {kernel_layout} is not supported for operator Convolution3D "
f"in frontend Keras."
)
raise tvm.error.OpAttributeUnImplemented(msg)
if is_deconv:
kernel_d, kernel_h, kernel_w, n_filters, _ = weight.shape
if kernel_layout == "IODHW":
weight = weight.transpose([4, 3, 0, 1, 2])
else:
kernel_d, kernel_h, kernel_w, _, n_filters = weight.shape
dilation_rate = keras_layer.dilation_rate
if isinstance(dilation_rate, (list, tuple)):
dilation = [dilation_rate[0], dilation_rate[1], dilation_rate[2]]
else:
dilation = [dilation_rate, dilation_rate, dilation_rate]
dilated_kernel_d = (kernel_d - 1) * dilation[0] + 1
dilated_kernel_h = (kernel_h - 1) * dilation[1] + 1
dilated_kernel_w = (kernel_w - 1) * dilation[2] + 1
stride_d, stride_h, stride_w = keras_layer.strides
params = {
"weight": etab.new_const(weight),
"kernel_size": [kernel_d, kernel_h, kernel_w],
"strides": [stride_d, stride_h, stride_w],
"dilation": dilation,
"padding": [0, 0, 0],
"data_layout": data_layout,
"kernel_layout": kernel_layout,
}
params["channels"] = n_filters
if is_deconv and keras_layer.output_padding:
params["output_padding"] = keras_layer.output_padding
if keras_layer.padding == "valid":
pass
# calculate the padding values
elif keras_layer.padding == "same":
in_d = input_shape[1]
in_h = input_shape[2]
in_w = input_shape[3]
pad_d = _get_pad_pair(in_d, dilated_kernel_d, stride_d)
pad_h = _get_pad_pair(in_h, dilated_kernel_h, stride_h)
pad_w = _get_pad_pair(in_w, dilated_kernel_w, stride_w)
params["padding"] = [pad_d[0], pad_h[0], pad_w[0], pad_d[1], pad_h[1], pad_w[1]]
else:
msg = (
f"Padding with {keras_layer.padding} is not supported for operator Convolution3D "
f"in frontend Keras."
)
raise tvm.error.OpAttributeUnImplemented(msg)
if is_deconv:
out = _op.nn.conv3d_transpose(data=inexpr, **params)
else:
out = _op.nn.conv3d(data=inexpr, **params)
channel_axis = -1 if data_layout == "NDHWC" else 1
if keras_layer.use_bias:
bias = etab.new_const(weightList[1])
out = _op.nn.bias_add(out, bias, channel_axis)
# defuse activation
if sys.version_info.major < 3:
act_type = keras_layer.activation.func_name
else:
act_type = keras_layer.activation.__name__
if act_type != "linear":
out = _convert_activation(out, act_type, etab, None)
return out
def _convert_separable_convolution(inexpr, keras_layer, etab, data_layout, input_shape=None):
_check_data_format(keras_layer)
if data_layout == "NHWC":
kernel_layout = "HWOI"
else:
kernel_layout = "OIHW"
if input_shape is None:
input_shape = keras_layer.input_shape
weightList = keras_layer.get_weights()
# depthwise conv
kernel_h, kernel_w, in_channels, depth_mult = weightList[0].shape
stride_h, stride_w = keras_layer.strides
if kernel_layout == "OIHW":
weight0 = weightList[0].transpose([2, 3, 0, 1])
else:
weight0 = weightList[0]
if isinstance(keras_layer.dilation_rate, (list, tuple)):
dilation = [keras_layer.dilation_rate[0], keras_layer.dilation_rate[1]]
else:
dilation = [keras_layer.dilation_rate, keras_layer.dilation_rate]
params0 = {
"weight": etab.new_const(weight0),
"channels": in_channels * depth_mult,
"groups": in_channels,
"kernel_size": [kernel_h, kernel_w],
"strides": [stride_h, stride_w],
"dilation": dilation,
"padding": [0, 0],
"data_layout": data_layout,
"kernel_layout": kernel_layout,
}
if keras_layer.padding == "valid":
pass
# we insert a separate pad operator
elif keras_layer.padding == "same":
in_h = input_shape[1]
in_w = input_shape[2]
pad_t, pad_b = _get_pad_pair(in_h, kernel_h, stride_h)
pad_l, pad_r = _get_pad_pair(in_w, kernel_w, stride_w)
params0["padding"] = (pad_t, pad_l, pad_b, pad_r)
else:
msg = (
f"Padding with {keras_layer.padding} is not supported for operator Separable "
f"Convolution in frontend Keras."
)
raise tvm.error.OpAttributeUnImplemented(msg)
depthconv = _op.nn.conv2d(data=inexpr, **params0)
# pointwise conv
if kernel_layout == "OIHW":
weight1 = weightList[1].transpose([3, 2, 0, 1])
else:
weight1 = weightList[1]
kernel_layout = "HWIO"
params1 = {
"weight": etab.new_const(weight1),
"channels": weightList[1].shape[3],
"groups": 1,
"kernel_size": [1, 1],
"strides": [1, 1],
"dilation": [1, 1],
"data_layout": data_layout,
"kernel_layout": kernel_layout,
}
out = _op.nn.conv2d(data=depthconv, **params1)
if keras_layer.use_bias:
bias = etab.new_const(weightList[2])
if data_layout == "NCHW":
out = _op.nn.bias_add(out, bias)
else:
out = _op.nn.bias_add(out, bias, axis=-1)
# defuse activation
if sys.version_info.major < 3:
act_type = keras_layer.activation.func_name
else:
act_type = keras_layer.activation.__name__
if act_type != "linear":
out = _convert_activation(out, act_type, etab, data_layout)
return out
def _convert_flatten(
inexpr, keras_layer, etab, data_layout, input_shape=None
): # pylint: disable=unused-argument
_check_data_format(keras_layer)
# NCHW -> NHWC so that dense can be correctly converted
if data_layout == "NCHW":
inexpr = _op.transpose(inexpr, axes=[0, 2, 3, 1])
return _op.nn.batch_flatten(inexpr)
def _convert_pooling(
inexpr, keras_layer, etab, data_layout, input_shape=None
): # pylint: disable=unused-argument
_check_data_format(keras_layer)
pool_type = type(keras_layer).__name__
# global pool in keras = global pool + flatten in relay
global_pool_params = {"layout": data_layout}
if input_shape is None:
input_shape = keras_layer.input_shape
if pool_type == "GlobalMaxPooling2D":
return _convert_flatten(
_op.nn.global_max_pool2d(inexpr, **global_pool_params), keras_layer, etab, data_layout
)
if pool_type == "GlobalAveragePooling2D":
global_avg_pool2d = _op.nn.global_avg_pool2d(inexpr, **global_pool_params)
keep_dims = len(keras_layer.input.shape) == len(keras_layer.output.shape)
if keep_dims:
return global_avg_pool2d
return _convert_flatten(global_avg_pool2d, keras_layer, etab, data_layout)
pool_h, pool_w = keras_layer.pool_size
stride_h, stride_w = keras_layer.strides
params = {
"pool_size": [pool_h, pool_w],
"strides": [stride_h, stride_w],
"padding": [0, 0],
"layout": data_layout,
}
if keras_layer.padding == "valid":
pass
elif keras_layer.padding == "same":
in_h = input_shape[1]
in_w = input_shape[2]
pad_t, pad_b = _get_pad_pair(in_h, pool_h, stride_h)
pad_l, pad_r = _get_pad_pair(in_w, pool_w, stride_w)
params["padding"] = [pad_t, pad_l, pad_b, pad_r]
else:
raise tvm.error.OpAttributeUnImplemented(
f"Padding with {keras_layer.padding} is not supported in operator Pooling."
)
if pool_type == "MaxPooling2D":
return _op.nn.max_pool2d(inexpr, **params)
if pool_type == "AveragePooling2D":
params["count_include_pad"] = False
return _op.nn.avg_pool2d(inexpr, **params)
raise tvm.error.OpNotImplemented(f"Operator {keras_layer} is not supported for frontend Keras.")
def _convert_pooling3d(
inexpr, keras_layer, etab, data_layout, input_shape=None
): # pylint: disable=unused-argument
_check_data_format(keras_layer)
pool_type = type(keras_layer).__name__
if input_shape is None:
input_shape = keras_layer.input_shape
if pool_type not in ["MaxPooling3D", "AveragePooling3D"]:
raise tvm.error.OpNotImplemented(
f"Operator {keras_layer} is not supported for frontend Keras."
)
pool_d1, pool_d2, pool_d3 = keras_layer.pool_size
stride_d1, stride_d2, stride_d3 = keras_layer.strides
params = {
"pool_size": [pool_d1, pool_d2, pool_d3],
"strides": [stride_d1, stride_d2, stride_d3],
"padding": [0, 0, 0],
"layout": data_layout,
}
if keras_layer.padding == "valid":
pass
elif keras_layer.padding == "same":
in_d1 = input_shape[1]
in_d2 = input_shape[2]
in_d3 = input_shape[3]
pad_d1 = _get_pad_pair(in_d1, pool_d1, stride_d1)
pad_d2 = _get_pad_pair(in_d2, pool_d2, stride_d2)
pad_d3 = _get_pad_pair(in_d3, pool_d3, stride_d3)
params["padding"] = [pad_d1[0], pad_d2[0], pad_d3[0], pad_d1[1], pad_d2[1], pad_d3[1]]
else:
raise tvm.error.OpAttributeUnImplemented(
f"Padding with {keras_layer.padding} is not supported in operator Pooling3D."
)
out = _op.transpose(inexpr, axes=(0, 4, 1, 2, 3))
params["layout"] = "NCDHW"
if pool_type == "MaxPooling3D":
out = _op.nn.max_pool3d(out, **params)
elif pool_type == "AveragePooling3D":
out = _op.nn.avg_pool3d(out, **params)
return _op.transpose(out, axes=(0, 2, 3, 4, 1))
def _convert_global_pooling3d(
inexpr, keras_layer, etab, data_layout, input_shape=None
): # pylint: disable=unused-argument
_check_data_format(keras_layer)
pool_type = type(keras_layer).__name__
global_pool_params = {"layout": data_layout}
if pool_type == "GlobalMaxPooling3D":
out = _op.nn.global_max_pool3d(inexpr, **global_pool_params)
elif pool_type == "GlobalAveragePooling3D":
out = _op.nn.global_avg_pool3d(inexpr, **global_pool_params)
else:
raise tvm.error.OpNotImplemented(
f"Operator {keras_layer} is not supported for frontend Keras."
)
return _convert_flatten(out, keras_layer, etab, input_shape, data_layout)
def _convert_upsample(
inexpr, keras_layer, etab, data_layout, input_shape=None
): # pylint: disable=unused-argument
_check_data_format(keras_layer)
upsample_type = type(keras_layer).__name__
params = {}
if upsample_type == "UpSampling1D":
h = keras_layer.size
params["scale_h"] = h
elif upsample_type == "UpSampling2D":
h, w = keras_layer.size
params["scale_h"] = h
params["scale_w"] = w
if hasattr(keras_layer, "interpolation"):
interpolation = keras_layer.interpolation
if interpolation == "nearest":
params["method"] = "nearest_neighbor"
else:
params["method"] = "bilinear"
else:
raise tvm.error.OpNotImplemented(
f"Operator {upsample_type} is not supported for frontend Keras."
)
params["layout"] = data_layout
out = _op.nn.upsampling(inexpr, **params)
return out
def _convert_upsample3d(
inexpr, keras_layer, etab, data_layout, input_shape=None
): # pylint: disable=unused-argument
_check_data_format(keras_layer)
params = {}
d, h, w = keras_layer.size
params["scale_d"] = d
params["scale_h"] = h
params["scale_w"] = w
params["layout"] = data_layout
params["coordinate_transformation_mode"] = "asymmetric"
out = _op.nn.upsampling3d(inexpr, **params)
return out
def _convert_cropping(
inexpr, keras_layer, etab, data_layout, input_shape=None
): # pylint: disable=unused-argument
_check_data_format(keras_layer)
crop_type = type(keras_layer).__name__
if input_shape is None:
input_shape = keras_layer.input_shape
if crop_type == "Cropping2D":
(_, in_h, in_w, _) = input_shape
((crop_t, crop_b), (crop_l, crop_r)) = keras_layer.cropping
else:
raise tvm.error.OpNotImplemented(
f"Operator {crop_type} is not supported for frontend Keras."
)
int32_max = np.iinfo(np.int32).max
if data_layout == "NHWC":
begin = [0, crop_t, crop_l, 0]
end = [int32_max, in_h - crop_b, in_w - crop_r, int32_max]
else:
begin = [0, 0, crop_t, crop_l]
end = [int32_max, int32_max, in_h - crop_b, in_w - crop_r]
return _op.strided_slice(
inexpr,
begin=begin,
end=end,
)
def _convert_batchnorm(inexpr, keras_layer, etab, data_layout, input_shape=None):
if input_shape is None:
input_shape = keras_layer.input_shape
if data_layout == "NCHW" or len(input_shape) < 4:
axis = 1
else:
axis = 3
params = {"scale": False, "center": False, "epsilon": keras_layer.epsilon, "axis": axis}
idx = 0
if keras_layer.scale:
params["scale"] = True
gamma = keras_layer.get_weights()[idx]
params["gamma"] = etab.new_const(gamma)
idx += 1
if keras_layer.center:
params["center"] = True
beta = keras_layer.get_weights()[idx]
params["beta"] = etab.new_const(beta)
idx += 1
moving_mean = keras_layer.get_weights()[idx]
moving_var = keras_layer.get_weights()[idx + 1]
params["moving_mean"] = etab.new_const(moving_mean)
params["moving_var"] = etab.new_const(moving_var)
# in case beta or gamma is not defined
params["beta"] = (
etab.new_const(np.zeros(moving_mean.shape)) if "beta" not in params else params["beta"]
)
params["gamma"] = (
etab.new_const(np.ones(moving_mean.shape)) if "gamma" not in params else params["gamma"]
)
result, moving_mean, moving_var = _op.nn.batch_norm(inexpr, **params)
return result
def _convert_padding(
inexpr, keras_layer, etab, data_layout, input_shape=None
): # pylint: disable=unused-argument
_check_data_format(keras_layer)
padding_type = type(keras_layer).__name__
padding = keras_layer.padding
top = left = bottom = right = 0
if padding_type == "ZeroPadding2D":
if isinstance(padding, int):
top = left = bottom = right = padding
elif isinstance(padding, tuple):
if isinstance(padding[0], int):
top, left = padding
bottom, right = padding
elif isinstance(padding[0], tuple):
top, bottom = padding[0]
left, right = padding[1]
else:
msg = (
f'Value {str(padding)} in attribute "padding" of operator Padding is '
f"not valid."
)
raise tvm.error.OpAttributeInvalid(msg)
else:
msg = f'Value {str(padding)} in attribute "padding" of operator Padding is not valid.'
raise tvm.error.OpAttributeInvalid(msg)
else:
msg = f"Operator {padding_type} is not supported in frontend Keras."
raise tvm.error.OpNotImplemented(msg)
if data_layout == "NCHW":
return _op.nn.pad(data=inexpr, pad_width=((0, 0), (0, 0), (top, bottom), (left, right)))
return _op.nn.pad(data=inexpr, pad_width=((0, 0), (top, bottom), (left, right), (0, 0)))
def _convert_padding3d(
inexpr, keras_layer, etab, data_layout, input_shape=None
): # pylint: disable=unused-argument
_check_data_format(keras_layer)
padding = keras_layer.padding
d_pad = h_pad = w_pad = [0, 0]
# padding can be 'int' or 'tuple of 3 ints' or 'tuple of 3 tuples of 2 ints' or 'tuple
# of 3 tuples of 2 ints different values'. In all these scenarios keras will send 3
# tuples of 2 ints.
if isinstance(padding, tuple) and isinstance(padding[0], tuple):
d_pad = padding[0]
h_pad = padding[1]
w_pad = padding[2]
else:
msg = f'Value {str(padding)} in attribute "padding" of operator ZeroPadding3D is not valid.'
raise tvm.error.OpAttributeInvalid(msg)
if data_layout == "NCDHW":
out = _op.nn.pad(
data=inexpr,
pad_width=(
(0, 0),
(0, 0),
(d_pad[0], d_pad[1]),
(h_pad[0], h_pad[1]),
(w_pad[0], w_pad[1]),
),
)
else:
out = _op.nn.pad(
data=inexpr,
pad_width=(
(0, 0),
(d_pad[0], d_pad[1]),
(h_pad[0], h_pad[1]),
(w_pad[0], w_pad[1]),
(0, 0),
),
)
return out
def _convert_concat(
inexpr, keras_layer, etab, data_layout, input_shape=None
): # pylint: disable=unused-argument
_check_data_format(keras_layer)
if input_shape is None:
input_shape = keras_layer.input_shape
axis = keras_layer.axis
dims = len(input_shape[0])
if data_layout == "NCHW": # need_transpose
if axis == -1:
axis = 1
else:
axis = axis + 1 if axis < dims else 1
return _op.concatenate(_as_list(inexpr), axis=axis)
def _convert_reshape(
inexpr, keras_layer, etab, data_layout, input_shape=None
): # pylint: disable=unused-argument
_check_data_format(keras_layer)
if input_shape is None:
input_shape = keras_layer.input_shape
inshape = input_shape # includes batch
tshape = keras_layer.target_shape # no batch
shape = (-1,) + tshape
if data_layout == "NCHW" and (len(inshape) > 3 or len(tshape) > 2):
# Perform reshape in original NHWC format.
inexpr = _op.transpose(inexpr, [0] + list(range(2, len(inshape))) + [1])
inexpr = _op.reshape(inexpr, newshape=shape)
return _op.transpose(inexpr, axes=[0, -1] + list(range(1, len(shape) - 1)))
return _op.reshape(inexpr, newshape=shape)
def _convert_lstm(
inexpr, keras_layer, etab, data_layout, input_shape=None
): # pylint: disable=unused-argument
_check_data_format(keras_layer)
if input_shape is None:
input_shape = keras_layer.input_shape
if not isinstance(inexpr, list):
buf = np.zeros((1, keras_layer.units), "float32")
c_op = etab.new_const(buf)
h_op = etab.new_const(buf)
inexpr = [inexpr, h_op, c_op]
in_data = inexpr[0]
next_h = inexpr[1]
next_c = inexpr[2]
weightList = keras_layer.get_weights()
in_shape = tuple(dim if dim else 1 for dim in _as_list(input_shape)[0])
kernel_weight = etab.new_const(weightList[0].transpose([1, 0]))
recurrent_weight = etab.new_const(weightList[1].transpose([1, 0]))
if keras_layer.use_bias:
in_bias = etab.new_const(weightList[2])
if keras_layer.go_backwards:
in_data = _op.reverse(in_data, axis=1)
units = list(weightList[0].shape)[1]
time_steps = in_shape[1]
in_data = _op.squeeze(in_data, axis=[0])
in_data = _op.split(in_data, indices_or_sections=time_steps, axis=0)
# loop for the number of time_steps
out_list = [] # store h outputs in case return_sequences is True
for data in in_data:
ixh1 = _op.nn.dense(data, kernel_weight, units=units)
ixh2 = _op.nn.dense(next_h, recurrent_weight, units=units)
if keras_layer.use_bias:
ixh2 = _op.nn.bias_add(ixh2, bias=in_bias)
gate = ixh1 + ixh2
gates = _op.split(gate, indices_or_sections=4, axis=1)
in_gate = _convert_recurrent_activation(gates[0], keras_layer)
in_transform = _convert_recurrent_activation(gates[1], keras_layer)
next_c = in_transform * next_c + in_gate * _convert_activation(
gates[2], keras_layer, etab, data_layout
)
out_gate = _convert_recurrent_activation(gates[3], keras_layer)
next_h = out_gate * _convert_activation(next_c, keras_layer, etab, data_layout)
if keras_layer.return_sequences:
out_list.append(_op.expand_dims(next_h, axis=1))
out = _op.concatenate(out_list, axis=1) if keras_layer.return_sequences else next_h
out_shape = tuple(dim if dim else 1 for dim in _as_list(keras_layer.output_shape)[0])
out = _op.reshape(out, newshape=out_shape)
return [out, next_h, next_c]
def _convert_simple_rnn(
inexpr, keras_layer, etab, data_layout, input_shape=None
): # pylint: disable=unused-argument
_check_data_format(keras_layer)
if not isinstance(inexpr, list):
buf = np.zeros((1, keras_layer.units), "float32")
prev_op = etab.new_const(buf)
inexpr = [inexpr, prev_op]
in_data = inexpr[0]
prev_op = inexpr[1]
weightList = keras_layer.get_weights()
kernel_weight = etab.new_const(weightList[0].transpose([1, 0]))
recurrent_weight = etab.new_const(weightList[1].transpose([1, 0]))
if keras_layer.use_bias:
in_bias = etab.new_const(weightList[2])
units = list(weightList[0].shape)[1]
in_data = _op.nn.batch_flatten(in_data)
ixh = _op.nn.dense(in_data, kernel_weight, units=units)
if keras_layer.use_bias:
ixh = _op.nn.bias_add(ixh, bias=in_bias)
prev_op = _op.nn.batch_flatten(prev_op)
ixh2 = _op.nn.dense(prev_op, recurrent_weight, units=units)
output = ixh + ixh2
output = _convert_activation(output, keras_layer, etab, data_layout)
out_shape = tuple(dim if dim else 1 for dim in _as_list(keras_layer.output_shape)[0])
output = _op.reshape(output, newshape=out_shape)
return [output, output]
def _convert_gru(
inexpr, keras_layer, etab, data_layout, input_shape=None
): # pylint: disable=unused-argument
_check_data_format(keras_layer)
if not isinstance(inexpr, list):
buf = np.zeros((1, keras_layer.units), "float32")
h_tm1 = etab.new_const(buf)
inexpr = [inexpr, h_tm1]
in_data = inexpr[0]
h_tm1_op = inexpr[1]
weightList = keras_layer.get_weights()
kernel_weight = etab.new_const(weightList[0].transpose([1, 0]))
recurrent_weight = etab.new_const(weightList[1].transpose([1, 0]))
if keras_layer.use_bias:
in_bias = etab.new_const(weightList[2])
units = list(weightList[0].shape)[1]
in_data = _op.nn.batch_flatten(in_data)
matrix_x = _op.nn.dense(in_data, kernel_weight, units=units)
if keras_layer.use_bias:
matrix_x = _op.nn.bias_add(matrix_x, in_bias)
# inputs projected by all gate matrices at once
split_indices = [keras_layer.units, 2 * keras_layer.units]
gates = _op.split(matrix_x, indices_or_sections=split_indices, axis=1)
x_z = gates[0]
x_r = gates[1]
x_h = gates[2]
# hidden state projected separately for update/reset and new
units = 2 * keras_layer.units
split_indices = [units]
rec_weights = _op.split(recurrent_weight, indices_or_sections=split_indices, axis=0)
h_tm1_op = _op.nn.batch_flatten(h_tm1_op)
matrix_inner = _op.nn.dense(h_tm1_op, rec_weights[0], units=units)
split_indices = [keras_layer.units]
recurrent = _op.split(matrix_inner, indices_or_sections=split_indices, axis=1)
recurrent_z = recurrent[0]
recurrent_r = recurrent[1]
rec_act_z = _convert_recurrent_activation(x_z + recurrent_z, keras_layer)
rec_act_r = _convert_recurrent_activation(x_r + recurrent_r, keras_layer)
units = keras_layer.units
recurrent_h = _op.nn.dense(rec_act_r * h_tm1_op, rec_weights[1], units=units)
act_hh = _convert_activation(x_h + recurrent_h, keras_layer, etab, data_layout)
# previous and candidate state mixed by update gate
output = rec_act_z * h_tm1_op + (_expr.const(1.0, dtype="float32") - rec_act_z) * act_hh
out_shape = tuple(dim if dim else 1 for dim in _as_list(keras_layer.output_shape)[0])
output = _op.reshape(output, newshape=out_shape)
return [output, output]
def _convert_repeat_vector(
inexpr, keras_layer, etab, data_layout, input_shape=None
): # pylint: disable=unused-argument
if input_shape is None:
input_shape = keras_layer.input_shape
input_shape = list(input_shape)
repeats = keras_layer.n
out_shape = [-1, repeats] + input_shape[1:]
out = _op.repeat(inexpr, repeats=repeats, axis=0)
out = _op.reshape(out, out_shape)
return out
def _convert_l2_normalize(inexpr, keras_layer, data_layout):
l2_normalize_is_loaded = False
param_list = []
for i in dis.get_instructions(keras_layer.function):
if i.opname in ["LOAD_GLOBAL", "LOAD_DEREF"]:
continue
if i.opname in ["LOAD_ATTR", "LOAD_METHOD"]:
if i.argval == "l2_normalize":
assert not l2_normalize_is_loaded, "l2_normalize was already LOADED"
l2_normalize_is_loaded = True
elif i.opname in ["LOAD_CONST", "LOAD_FAST"] and l2_normalize_is_loaded:
param_list.append(i.argval)
elif i.opname == "BUILD_LIST":
sz = i.argval
assert len(param_list) >= sz
new_list = param_list[-sz:]
param_list = param_list[:-sz]
param_list.append(new_list)
elif i.opname in ["CALL_FUNCTION_KW", "CALL_METHOD"]:
break
axis = None
is_param_list_parsed = False
if l2_normalize_is_loaded and len(param_list) > 0:
# last param_list item is tuple of strings means that
# lambda uses named parameters when calling l2_normalize
if (
isinstance(param_list[-1], tuple)
and len(param_list[-1]) > 0
and isinstance(param_list[-1][0], str)
):
param_names = param_list[-1]
if len(param_names) == 1 and param_names[0] == "x":
# lambda v: K.l2_normalize(x=v)
axis = None
is_param_list_parsed = True
elif len(param_names) == 1 and param_names[0] == "axis" and len(param_list) == 3:
# lambda x: K.l2_normalize(x, axis=(2,3))
axis = param_list[1]
is_param_list_parsed = True
elif len(param_names) == 2 and len(param_list) == 3:
# lambda x: K.l2_normalize(x=x, axis=(2,3))
# lambda x: K.l2_normalize(axis=(2,3), x=x)
axis = param_list[param_names.index("axis")]
is_param_list_parsed = True
else:
# lambda x: K.l2_normalize(x)
if len(param_list) == 1:
axis = None
is_param_list_parsed = True
# lambda x: K.l2_normalize(x, (2,3))
elif len(param_list) == 2:
axis = param_list[1]
is_param_list_parsed = True
def is_int_or_tuple_of_ints(v):
if isinstance(v, list) and len(v) > 0:
for i in v:
if not isinstance(i, int):
return False
return True
if isinstance(v, tuple) and len(v) > 0:
return isinstance(v[0], int)
return isinstance(v, int)
assert is_param_list_parsed and (
axis is None or is_int_or_tuple_of_ints(axis)
), "Can not parse l2_normalize lambda function found in Lambda layer"
if isinstance(axis, int):
axis = [axis]
if data_layout == "NCHW":
dims = len(keras_layer.input_shape)
def fix_axis_for_nchw(axis):
if axis == 0:
return 0
if axis in [(dims - 1), -1]:
return 1
return axis + 1
axis = [fix_axis_for_nchw(x) for x in axis]
return _op.nn.l2_normalize(inexpr, eps=1e-12, axis=axis)
def _convert_lambda(inexpr, keras_layer, _, data_layout):
fcode = keras_layer.function.__code__
# Convert l2_normalize
if (
fcode.co_name == "<lambda>"
and len(fcode.co_names) > 0
and fcode.co_names[-1] == "l2_normalize"
):
return _convert_l2_normalize(inexpr, keras_layer, data_layout)
raise tvm.error.OpNotImplemented(
f"Function {fcode.co_names} used in Lambda layer is not supported in frontend Keras."
)
def _convert_time_distributed(inexpr, keras_layer, etab, data_layout, input_shape=None):
# TimeDistributed: split input tensor along the second dimension (assumed to be time),
# apply inner layer to each split individually,
# and then combine the results
if input_shape is None:
input_shape = keras_layer.input_shape
assert len(input_shape) >= 2, "Input to TimeDistributed must have at least two dimensions"
inner_layer = keras_layer.layer
inner_input_shape = [d for (i, d) in enumerate(input_shape) if i != 1]
# for NDHWC, inner data layout will drop the D
inner_data_layout = data_layout
if data_layout == "NDHWC":
inner_data_layout = "NHWC"
# some code duplication from keras_op_to_relay
# but it's useful to avoid cluttering the etab
inner_layer_op_name = type(keras_layer.layer).__name__
if inner_layer_op_name not in _convert_map:
raise tvm.error.OpNotImplemented(
f"The inner layer for TimeDistributed {inner_layer_op_name} is not supported for"
f" frontend Keras."
)
conversion_func = lambda expr: _convert_map[inner_layer_op_name](
expr, inner_layer, etab, inner_data_layout, input_shape=inner_input_shape
)
split_dim = input_shape[1]
split_input = _op.split(inexpr, split_dim, 1)
split_shape = list(input_shape)
if split_shape[0] is None:
split_shape[0] = 1
split_shape[1] = 1
split_var = new_var(
"time_distributed_split",
type_annotation=TupleType(
[TensorType(split_shape, dtype="float32") for i in range(split_dim)]
),
)
# For each split, squeeze away the second dimension,
# apply the inner layer.
# Afterwards, combine the transformed splits back along
# the second dimension using stack
splits = [
conversion_func(_op.squeeze(_expr.TupleGetItem(split_var, i), axis=[1]))
for i in range(split_dim)
]
return _expr.Let(split_var, split_input.astuple(), _op.stack(splits, axis=1))
def _default_skip(inexpr, keras_layer, etab, data_layout): # pylint: disable=unused-argument
"""Layers that can be skipped because they are train time only."""
return inexpr
_convert_map = {
"Dense": _convert_dense,
"Activation": _convert_activation,
"Softmax": _convert_advanced_activation,
"ReLU": _convert_advanced_activation,
"LeakyReLU": _convert_advanced_activation,
"PReLU": _convert_advanced_activation,
"ELU": _convert_advanced_activation,
"ThresholdedReLU": _convert_advanced_activation,
"AveragePooling2D": _convert_pooling,
"MaxPooling2D": _convert_pooling,
"GlobalAveragePooling2D": _convert_pooling,
"GlobalMaxPooling2D": _convert_pooling,
"Conv2D": _convert_convolution,
"Conv2DTranspose": _convert_convolution,
"DepthwiseConv2D": _convert_convolution,
"SeparableConv2D": _convert_separable_convolution,
"Flatten": _convert_flatten,
"Reshape": _convert_reshape,
"Concatenate": _convert_concat,
"BatchNormalization": _convert_batchnorm,
# Specific tf.Keras terminology for batch normalization
"BatchNormalizationV1": _convert_batchnorm,
"Add": _convert_merge,
"Subtract": _convert_merge,
"Multiply": _convert_merge,
"ZeroPadding2D": _convert_padding,
"UpSampling2D": _convert_upsample,
"Cropping2D": _convert_cropping,
# 'ZeroPadding1D' : _convert_padding,
# 'AveragePooling1D' : _convert_pooling,
# 'MaxPooling1D' : _convert_pooling,
# 'GlobalAveragePooling1D' : _convert_pooling,
# 'GlobalMaxPooling1D' : _convert_pooling,
# 'Cropping1D' : _convert_cropping,
# 'UpSampling1D' : _convert_upsample,
"Conv1D": _convert_convolution1d,
# "Conv1DTranspose": _convert_convolution1d,
"Conv3D": _convert_convolution3d,
"Conv3DTranspose": _convert_convolution3d,
# 'SeparableConv3D' : _convert_convolution3d,
"MaxPooling3D": _convert_pooling3d,
"AveragePooling3D": _convert_pooling3d,
"GlobalMaxPooling3D": _convert_global_pooling3d,
"GlobalAveragePooling3D": _convert_global_pooling3d,
"UpSampling3D": _convert_upsample3d,
"ZeroPadding3D": _convert_padding3d,
"SimpleRNN": _convert_simple_rnn,
"LSTM": _convert_lstm,
"GRU": _convert_gru,
# 'Bidirectional' : _convert_bidirectional,
"TimeDistributed": _convert_time_distributed,
"Average": _convert_merge,
"Minimum": _convert_merge,
"Maximum": _convert_merge,
"Dot": _convert_merge,
"Permute": _convert_permute,
"Embedding": _convert_embedding,
"RepeatVector": _convert_repeat_vector,
"Lambda": _convert_lambda,
"InputLayer": _default_skip,
"Dropout": _default_skip,
"AlphaDropout": _default_skip,
"SpatialDropout2D": _default_skip,
"SpatialDropout1D": _default_skip,
"GaussianDropout": _default_skip,
"GaussianNoise": _default_skip,
}
def _check_unsupported_layers(model):
missing_ops = set()
for layer in model.layers:
op_name = type(layer).__name__
if op_name not in _convert_map:
missing_ops.add(op_name)
if missing_ops:
raise NotImplementedError(f"The following operators are not implemented: {missing_ops}")
def keras_op_to_relay(inexpr, keras_layer, outname, etab, data_layout):
"""Convert a Keras layer to a Relay expression and update the expression table.
Parameters
----------
inexpr : relay.expr.Expr or a list of it
The input Relay expression(s).
keras_layer : keras.layers
The Keras layer to be converted.
outname : str
Name of the output Relay expression.
etab : relay.frontend.common.ExprTable
The global expression table to be updated.
data_layout : str
The input data layout
"""
op_name = type(keras_layer).__name__
if op_name not in _convert_map:
raise tvm.error.OpNotImplemented(f"Operator {op_name} is not supported for frontend Keras.")
outs = _convert_map[op_name](inexpr, keras_layer, etab, data_layout)
outs = _as_list(outs)
for t_idx, out in enumerate(outs):
name = outname + ":" + str(t_idx)
etab.set_expr(name, out)
return outs
def from_keras(model, shape=None, layout="NCHW"):
"""Convert keras model to relay Function.
Parameters
----------
model : keras.engine.training.Model or tensorflow.keras.models.Model
The keras model to be converted.
shape: dict of str to int list/tuple
Input shapes of the model, optional
layout: str
One of 'NCHW' or 'NHWC', indicates how data should be arranged in
the output model. Default layout is 'NCHW' as it in general
performs better across TVM.
Returns
-------
mod : tvm.IRModule
The relay module for compilation.
params : dict of str to tvm.nd.NDArray
The parameter dict to be used by Relay.
"""
def _check_model_is_tf_keras():
return type(model).__module__.startswith("tensorflow.python.keras")
def _convert_input_layer(keras_layer):
input_name = keras_layer.name
input_shape = shape[input_name] if shape is not None and input_name in shape else None
etab.set_expr(input_name, new_var(input_name, shape=input_shape))
def _convert_layer(keras_layer, etab, scope=""):
inbound_nodes = (
keras_layer.inbound_nodes
if hasattr(keras_layer, "inbound_nodes")
else keras_layer._inbound_nodes
if hasattr(keras_layer, "_inbound_nodes")
else None
)
if inbound_nodes is None:
raise TypeError(f"Unknown layer type or unsupported Keras version : {keras_layer}")
outs = []
for node_idx, node in enumerate(inbound_nodes):
# If some nodes in imported model are not relevant to the current model,
# skip such layers.
# - In Keras, model._network_nodes contains keys of all nodes relevant to the
# current model;
# - In tf.Keras, this is already done as part of tensorflow.keras.network.get_config
if not is_tf_keras:
if (
hasattr(model, "_node_key")
and not model._node_key(keras_layer, node_idx) in model._network_nodes
):
continue
inexpr = []
# Since Keras allows creating multiple layers from the same name instance,
# we append node index to the expr name to make it unique.
# The one exception is InputLayer. Changing input variable names after conversion
# would confuse users, so we should keep them as far as possible. Fortunately,
# they are named uniquely to input_1, input_2, input_3... by default.
# node_indices attribute removed in tensorflow 2.3, however iterate_inbound() can
# be used
if hasattr(node, "node_indices"):
zip_node = zip(
_as_list(node.inbound_layers),
_as_list(node.node_indices),
_as_list(node.tensor_indices),
_as_list(node.input_tensors),
)
node_attributes = zip_node
else:
node_attributes = node.iterate_inbound()
for inbound_layer, n_idx, t_idx, _ in node_attributes:
if isinstance(inbound_layer, input_layer_class):
expr_name = inbound_layer.name
_convert_input_layer(inbound_layer)
else:
expr_name = scope + inbound_layer.name + ":" + str(n_idx) + ":" + str(t_idx)
expr = etab.get_expr(expr_name)
inexpr.append(expr)
# Handle nested layers
if hasattr(keras_layer, "layers"):
input_index = 0
for layer in keras_layer.layers:
if isinstance(layer, input_layer_class):
# Replace input layer with inbound node
etab.set_expr(layer.name, inexpr[input_index])
input_index += 1
else:
# Convert child layer. Prepend scope with parent layer name.
layer_outs = _convert_layer(layer, etab, keras_layer.name + "_" + scope)
# Get output of last child layer and mark as output of parent.
outname = keras_layer.name + ":" + str(node_idx)
for t_idx, out in enumerate(layer_outs):
name = outname + ":" + str(t_idx)
etab.set_expr(name, out)
outs.extend(layer_outs)
else:
if len(inexpr) == 1:
inexpr = inexpr[0]
outs.extend(
keras_op_to_relay(
inexpr,
keras_layer,
scope + keras_layer.name + ":" + str(node_idx),
etab,
layout,
)
)
return outs
is_tf_keras = _check_model_is_tf_keras()
if not is_tf_keras:
# Importing from Keras
try:
import keras
except ImportError:
raise ImportError("Keras must be installed")
if keras.backend.backend() != "tensorflow":
raise ValueError("Keras frontend currently supports tensorflow backend only.")
if keras.backend.image_data_format() != "channels_last":
raise ValueError("Keras frontend currently supports data_format = channels_last only.")
try:
import keras.engine as E
except ImportError:
try:
import keras.src.engine as E
except ImportError:
raise ImportError("Cannot find Keras's engine")
expected_model_class = E.training.Model
if hasattr(E, "InputLayer"):
input_layer_class = E.InputLayer
else:
# TFlite >=2.6
input_layer_class = E.input_layer.InputLayer
else:
# Importing from Tensorflow Keras (tf.keras)
try:
from tensorflow import keras as tf_keras
except ImportError:
raise ImportError("Tensorflow must be installed")
expected_model_class = tf_keras.models.Model
input_layer_class = tf_keras.layers.InputLayer
assert isinstance(model, expected_model_class)
etab = ExprTable()
# Set global data format.
assert layout in [
"NWC",
"NCHW",
"NHWC",
"NDHWC",
], "Layout must be one of 'NWC', 'NCHW', NHWC or NDHWC"
for keras_layer in model.layers:
if isinstance(keras_layer, input_layer_class):
_convert_input_layer(keras_layer)
else:
_convert_layer(keras_layer, etab)
# model._output_coordinates contains out_node(oc[0]), node_index(oc[1]) and tensor_index(oc[2])
# Get all output nodes in etab using the name made from above values.
# The out exprs were added to etab in keras_op_to_relay using this name.
outexpr = [
etab.get_expr(oc[0].name + ":" + str(oc[1]) + ":" + str(oc[2]))
for oc in model._output_coordinates
]
outexpr = outexpr[0] if len(outexpr) == 1 else _expr.Tuple(outexpr)
func = _function.Function(analysis.free_vars(outexpr), outexpr)
params = {k: _nd.array(np.array(v, dtype=np.float32)) for k, v in etab.params.items()}
return IRModule.from_expr(func), params
| 59,855 | 36.883544 | 100 | py |
tvm | tvm-main/python/tvm/relay/frontend/pytorch.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-self, too-many-lines, len-as-condition, no-else-return, unused-variable, too-many-nested-blocks
# pylint: disable=consider-iterating-dictionary, invalid-name, unused-argument, unused-variable, broad-except
# pylint: disable=import-outside-toplevel, simplifiable-if-expression, cell-var-from-loop, unnecessary-lambda
# pylint: disable=missing-function-docstring, redefined-builtin, use-implicit-booleaness-not-comparison
"""PT: PyTorch frontend."""
import functools
import itertools
import math
import re
import sys
import numpy as np
import tvm
from tvm.ir import IRModule
from tvm.topi.utils import get_const_tuple
from .. import analysis as _analysis
from .. import expr as _expr
from .. import function as _function
from .. import op as _op
from .. import qnn, transform
from ..expr_functor import ExprMutator
from ..loops import while_loop
from ..prelude import Prelude, StaticTensorArrayOps
from ..ty import Any, TensorType, TupleType
from . import qnn_torch
from .common import AttrCvt, get_relay_op, gru_cell, logger, rnn_cell
from .common import infer_shape as _infer_shape
from .common import infer_value as _infer_value
from .common import infer_value_simulated as _infer_value_simulated
from .common import lstm_cell, try_infer_value, unbind, fold_constant
from .common import set_span
from .pytorch_utils import is_version_greater_than, getattr_attr_name
__all__ = ["from_pytorch"]
# This returns a "subgraph" which puts variables whenever
# the type is known. It also records things to map the input
# nodes to the extracted graph's nodes.
# As Python objects are not round-trippable through C++, and
# our type annotations only live in Python, we need to map
# the we need to map the nodes we get in visiting to the nodes
# we used to construct the graph (they are the same in C++,
# match each other in dictionary lookups, but are not the same
# in Python) by using the hint dictionary filled as
# {node: node for node in nodes} to get the type annotations.
# https://discuss.tvm.apache.org/t/round-tripping-objects-through-the-ffi/8440
class _TypeFinder(ExprMutator):
def __init__(self, types):
super().__init__()
self.counter = 0
self.vars = {}
self.types = types
self.leave = set() # some variables are not inputs
def visit_let(self, let):
self.leave.add(let.var)
return super().visit_let(let)
def visit_function(self, fn):
self.leave.update(fn.params)
return super().visit_function(fn)
def visit(self, expr):
if expr in self.leave:
return super().visit(expr)
if expr in self.vars:
return self.vars[expr]
if isinstance(expr, tvm.relay.Var):
self.vars[expr] = expr
return expr
if expr in self.types:
ty = self.types[expr]
v = tvm.relay.var(f"_{self.counter}", type_annotation=ty)
self.counter += 1
self.vars[expr] = v
return v
v = super().visit(expr)
return v
def _should_construct_dynamic_list(list_construct_node):
# if this list is element-accessed or modified at runtime, generate List ADT
def inplace_add_to_add(op_name):
if op_name == "aten::add_":
return "aten::add"
else:
return op_name
uses = _get_uses(list_construct_node)
for loop_use in filter(lambda use: use.user.kind() == "prim::Loop", uses):
block_input_index = loop_use.offset - 1
block = list(loop_use.user.blocks())[0]
list_loop_var = list(block.inputs())[block_input_index]
uses += _get_uses(list_loop_var.node())
op_names = map(inplace_add_to_add, set(use.user.kind() for use in uses))
list_ops = set(["aten::add", "aten::__getitem__"])
intersect = list_ops.intersection(op_names)
if len(intersect) > 0 and intersect != set(["aten::add"]):
return True
# if add op outputs list, it is dynamic so we need to construct List ADT
for use in filter(lambda use: use.user.kind() in ["aten::add", "aten::add_"], uses):
output_type = _get_node_type(use.user)
if output_type == "ListType":
return True
return False
def _is_int_seq(seq):
# TODO (t-vi): handle non-int constants? (like numpy.intXX)
return len(seq) > 0 and all([isinstance(i, int) for i in seq])
# operator implementation
class PyTorchOpConverter:
"""A helper class for holding PyTorch op converters."""
def __init__(self, prelude, default_dtype, use_parser_friendly_name=False):
self.prelude = prelude
self.default_dtype = default_dtype
self.create_convert_map()
self.types = {} # map from nodes to (Relay) type annotations
self.source_map = {} # map from graph node to its source name
self.op_type_dict = {} # map from op type to its presenting order
self.current_op = [] # stack for recording current processing op
self.use_parser_friendly_name = use_parser_friendly_name
# this incrementally infers the type, see the comments on the type visitor
# above.
def infer_type(self, node, mod=None):
"""An incremental method to infer the type of a node in the relay graph."""
if node in self.types:
return self.types[node]
if isinstance(node, tvm.relay.Var):
return node.type_annotation
tf = _TypeFinder(types=self.types)
new_node = tf.visit(node)
fn = _function.Function(list(tf.vars.values()), new_node)
new_mod = IRModule({"main": fn})
if mod is not None:
new_mod.update(mod)
new_mod = transform.RemoveUnusedFunctions()(new_mod)
new_mod = transform.InferType()(new_mod)
entry = new_mod["main"]
ty = entry.body.checked_type
self.types[node] = ty
return self.types[node]
def infer_type_with_prelude(self, val):
body = self.infer_type(val, self.prelude.mod)
return body
# list ADT utilities
def convert_to_list_adt(self, py_lst):
elem_tys = [self.infer_type_with_prelude(elem) for elem in py_lst]
msg = "List elements should have identical types"
assert all(map(lambda ty: ty == elem_tys[0], elem_tys)), msg
# get_type returns type_name, ctor1, ..., ctorN
# 1 is nil
_, cons, nil = self.prelude.mod.get_type("List")
adt_lst = nil()
for elem in reversed(py_lst):
adt_lst = cons(elem, adt_lst)
return adt_lst
def map_tensor_array_constructor(self, adt_lst, shape):
static_tensor_array_ops = StaticTensorArrayOps(self.prelude, "float32", shape)
static_tensor_array_ops.register()
tensor_create = self.prelude.get_tensor_ctor_static("tensor_constructor", "float32", shape)
return self.prelude.map(tensor_create, adt_lst)
def convert_to_tensor_array(self, adt_lst):
_, cons, nil = self.prelude.mod.get_type("List")
if self.prelude.length(adt_lst) == 0:
return nil()
checked_type = self.infer_type_with_prelude(self.prelude.hd(adt_lst))
shape = checked_type.shape
tensor_array = self.map_tensor_array_constructor(adt_lst, shape)
return tensor_array, tuple(shape)
def infer_shape(self, inputs, mod=None):
"""A method to get the output type of an intermediate node in the graph."""
typ = self.infer_type(inputs, mod=mod)
if hasattr(typ, "shape"):
# Regular operator that outputs tensors
return get_const_tuple(typ.shape)
# The return type is not a tensor, for example List
return typ
def infer_shape_with_prelude(self, inputs):
return self.infer_shape(inputs, mod=self.prelude.mod)
def is_empty_shape(self, shape):
rank = len(shape)
if rank:
is_empty = False
for i in range(rank):
if shape[i] == 0:
is_empty = True
break
return is_empty
else:
return True
def record_output_type(self, output):
if isinstance(output, tuple):
cleaned_output = [o for o in output if o is not None]
types = self.infer_type_with_prelude(_expr.Tuple(cleaned_output))
for o, t in zip(cleaned_output, types.fields):
self.types[o] = t
elif isinstance(output, _expr.Expr):
self.infer_type_with_prelude(output)
# it can also happen that the type is int or so
def pytorch_promote_types(self, inputs, dtypes):
"""This promotes TVM inputs with TVM dtypes passed like PyTorch would"""
actual_dtypes = []
for i, inp in enumerate(inputs):
if isinstance(inp, _expr.Expr):
idt = self.infer_type(inp).dtype
actual_dtypes.append(idt)
else:
actual_dtypes.append(dtypes[i])
dtypes = actual_dtypes
tensor_dtypes = [dt for inp, dt in zip(inputs, dtypes) if not np.isscalar(inp)]
non_tensor_inputs = [inp for inp in inputs if np.isscalar(inp)]
result_type = _pytorch_result_type(tensor_dtypes, non_tensor_inputs)
results = []
for inp, dt in zip(inputs, dtypes):
if np.isscalar(inp):
results.append(_expr.const(inp, dtype=result_type))
elif dt == result_type:
results.append(inp)
else:
results.append(_op.cast(inp, result_type))
return results
def is_quantized_tensor(self, data):
# If a quantized Torch module is saved and loaded back, dtype will be dropped
# Since dtypes from Torch tensors are not reliable in such cases, we use
# Relay's type inference result to decide if an input tensor is quantized
ty = self.infer_type_with_prelude(data)
return ty.dtype == "uint8"
# Operator implementations
def make_elemwise(self, name):
def elemwise(inputs, input_types):
if name == "divide":
# https://pytorch.org/docs/stable/generated/torch.div.html#torch.div
# None - default behavior. Performs no rounding and, if both input and
# other are integer types, promotes the inputs to the default scalar type.
if all(["int" in input_type for input_type in input_types[:2]]):
input_types[:2] = ["float32"] * 2
cast_inputs = []
for inp in inputs[:2]:
if np.isscalar(inp):
cast_inputs.append(_expr.const(inp, dtype="float32"))
else:
cast_inputs.append(_op.cast(inp, "float32"))
inputs[:2] = cast_inputs
data0, data1 = self.pytorch_promote_types(inputs[:2], input_types[:2])
return get_relay_op(name)(data0, data1)
return elemwise
def min_max_common(self, name_elemwise, name_reduce, inputs, input_types):
if len(inputs) == 1:
data = self.pytorch_promote_types(inputs[:1], input_types[:1])
return get_relay_op(name_reduce)(data[0])
elif len(inputs) >= 2 and isinstance(inputs[1], (list, int)):
data = self.pytorch_promote_types(inputs[:1], input_types[:1])
dim = inputs[1]
keepdims = inputs[2] if len(inputs) > 2 else False
# also return dummy indices
return get_relay_op(name_reduce)(data[0], axis=dim, keepdims=keepdims), None
else:
data0, data1 = self.pytorch_promote_types(inputs[:2], input_types[:2])
return get_relay_op(name_elemwise)(data0, data1)
def max(self, inputs, input_types):
return self.min_max_common("maximum", "max", inputs, input_types)
def min(self, inputs, input_types):
return self.min_max_common("minimum", "min", inputs, input_types)
def maximum(self, inputs, input_types):
data0, data1 = self.pytorch_promote_types(inputs[:2], input_types[:2])
return _op.maximum(data0, data1)
def minimum(self, inputs, input_types):
data0, data1 = self.pytorch_promote_types(inputs[:2], input_types[:2])
return _op.minimum(data0, data1)
def make_unary(self, name):
def unary(inputs, input_types):
# this is just to ensure tensor input
(data,) = self.pytorch_promote_types(inputs[:1], input_types[:1])
return get_relay_op(name)(data)
return unary
def log1p(self, inputs, input_types):
# 1_plus_log x = log(x + 1)
(dtype,) = input_types
one = _expr.const(1, dtype=dtype)
return _op.log(inputs[0] + one)
def square(self, inputs, input_types):
(dtype,) = input_types
return _op.power(inputs[0], _expr.const(2, dtype))
def lerp(self, inputs, input_types):
if len(inputs) != 3:
msg = f"Wrong number of arguments ({len(inputs)}) to parse."
raise AssertionError(msg)
start = inputs[0]
end = inputs[1]
weight = inputs[2]
return start + weight * (end - start)
def arange(self, inputs, input_types):
def _get_value(val, dtype):
# dtype is a tvm dtype
if isinstance(val, _expr.Expr):
# since "arange" op will fill expr into its attribute
# invoke set_span here to prevent expr-rewritten occurrs in span-filling stage
source_name = self.source_map[self.current_op[-1]]
inp = set_span(_op.cast(val, dtype), source_name)
ret, _ = try_infer_value(inp, lambda ret: _expr.const(ret, dtype))
else:
ret = _create_typed_const(val, dtype)
return ret
def _get_type(val, inp_type):
if isinstance(val, _expr.Expr):
dtype = str(self.infer_type(val))
return dtype
return inp_type
# PyTorch arange uses the following type semantics:
# - if a dtype is given, start, stop, step are converted to that dtype
# - if no dtype is given and all args are integral, dtype is int64
# - if no dtype is given and there is a float arg, dtype is float32
if len(inputs) in {5, 6, 7}:
# inputs look like [_,_,_,dtype,layout,device,requires_grad]
# therefore dtype_idx is always the length of inputs minus 4
dtype_idx = len(inputs) - 4
types = [_get_type(inputs[i], input_types[i]) for i in range(dtype_idx)]
if inputs[dtype_idx] is not None:
dtype = _convert_dtype_value(inputs[dtype_idx])
elif any([t.startswith("float") for t in types]):
dtype = "float32"
else:
dtype = "int64"
# - if len(inputs) == 5, inputs = [stop, dtype, ...]
# - if len(inputs) == 6, inputs = [start, stop, dtype, ...]
# - if len(inputs) == 7, inputs = [start, stop, step, dtype, ...]
start = _get_value(inputs[0], dtype) if len(inputs) > 5 else _expr.const(0, dtype)
stop = _get_value(inputs[1 if len(inputs) > 5 else 0], dtype)
step = _get_value(inputs[2], dtype) if len(inputs) > 6 else _expr.const(1, dtype)
else:
msg = f"Unknown number of arguments ({len(inputs)}) to parse."
raise AssertionError(msg)
return _op.transform.arange(start=start, stop=stop, step=step, dtype=dtype)
def squeeze(self, inputs, input_types):
data = inputs[0]
if len(inputs) == 1:
axis = None
else:
# TODO (t-vi): why is the cast to int needed? similarly elsewhere
inputs = [inputs[1]] if not isinstance(inputs[1], list) else inputs[1]
axis = [int(v) for v in inputs]
return _op.transform.squeeze(data, axis)
def unsqueeze(self, inputs, input_types):
data = inputs[0]
axis = inputs[1]
return _op.transform.expand_dims(data, int(axis), 1)
def concatenate(self, inputs, input_types):
def tensor_array_concat(lst, axis):
assert axis == 0, "Tensor array concat supported only for axis 0"
tensor_array, shape = self.convert_to_tensor_array(lst)
concat_shape = (Any(),) + shape[1:]
concat = self.prelude.get_global_var_static("tensor_array_concat", "float32", shape)
concatenated = concat(tensor_array)
static_tensor_array_ops = StaticTensorArrayOps(self.prelude, "float32", concat_shape)
static_tensor_array_ops.register()
get_tensor = self.prelude.get_global_var_static(
"tensor_get_data", "float32", concat_shape
)
return get_tensor(concatenated)
data = inputs[0]
axis = inputs[1]
if not isinstance(data, list):
return tensor_array_concat(data, axis)
if isinstance(data, _expr.Expr):
data = [data]
return _op.tensor.concatenate(data, int(axis))
def slice(self, inputs, input_types):
axis_dtype = "int64"
index_size_limit = sys.maxsize
data = inputs[0]
dshape = self.infer_shape(data)
ndim = len(dshape)
dim = int(inputs[1])
stride = inputs[4]
target_begin, is_begin_const = try_infer_value(
inputs[2], lambda ret: ret.astype(np.int).item(0)
)
target_end, is_end_const = try_infer_value(
inputs[3], lambda ret: ret.astype(np.int).item(0)
)
# A fast path when slicing is nop.
if (
isinstance(target_begin, int)
and isinstance(target_end, int)
and target_begin == 0
and target_end >= index_size_limit
and stride == 1
):
return data
if target_begin is None and target_end is None:
return data
# Process begin
begin = [0] * ndim
if target_begin is not None:
begin[dim] = target_begin
if target_begin is not None and not isinstance(begin[dim], int):
tmp = []
for b in begin:
if isinstance(b, int):
tmp.append(_op.expand_dims(_expr.const(b, axis_dtype), axis=0))
else:
tmp.append(_op.cast(_op.expand_dims(b, axis=0), axis_dtype))
begin = _op.concatenate(tmp, axis=0)
btype = self.infer_type(begin).dtype
if str(btype) != axis_dtype:
begin = _op.cast(begin, axis_dtype)
# Process end
if isinstance(target_end, int) and target_end >= index_size_limit:
target_end = dshape[dim]
if any([isinstance(d, tvm.tir.Any) for d in dshape]):
end = _op.shape_of(data)
else:
end = dshape
if isinstance(target_end, int):
if isinstance(end, list):
end[dim] = target_end
else:
all_static = True
for i, shape_dim in enumerate(dshape):
if i != dim and isinstance(shape_dim, tvm.tir.Any):
all_static = False
if all_static:
end = list(get_const_tuple(dshape))
end[dim] = target_end
else:
target_end = _expr.const(target_end)
end = _op.scatter_elements(
end,
_op.expand_dims(_expr.const(dim), axis=0),
_op.expand_dims(target_end, axis=0),
axis=0,
)
else:
end = _op.cast(_op.shape_of(data), axis_dtype)
if target_end is not None and not isinstance(target_end, tvm.tir.Any):
ttype = self.infer_type(target_end).dtype
if str(ttype) != axis_dtype:
target_end = _op.cast(target_end, axis_dtype)
end = _op.scatter_elements(
end,
_op.expand_dims(_expr.const(dim), axis=0),
_op.expand_dims(target_end, axis=0),
axis=0,
)
if not isinstance(end, list):
etype = self.infer_type(end).dtype
if str(etype) != axis_dtype:
end = _op.cast(end, axis_dtype)
strides = [1] * ndim
strides[dim] = stride
return _op.transform.strided_slice(
data, begin=begin, end=end, strides=strides, slice_mode="end"
)
def narrow(self, inputs, input_types):
# Inputs are:
# 0 - the tensor to narrow
# 1 - the dimension along which to narrow
# 2 - the starting dimension
# 3 - the distance to the ending dimension
# Lets find the ending dimension
end = self.add(inputs[2:4], input_types[2:4])
stride = 1
slice_input = inputs[:3] + [end, stride]
slice_types = input_types + ["int32"]
return self.slice(slice_input, slice_types)
def split(self, inputs, input_types):
data = inputs[0]
split_size = int(inputs[1])
dim = int(inputs[2])
split_index = split_size
indices = []
while split_index < self.infer_shape(data)[dim]:
indices.append(split_index)
split_index += split_size
return _op.split(data, indices, dim)
def split_with_sizes(self, inputs, input_types):
data = inputs[0]
sections = inputs[1]
dim = int(inputs[2])
if len(sections) == 1:
# a special case used in torchvision detection models
return _expr.TupleWrapper(_expr.Tuple([data]), 1)
split_index = 0
indices = []
for i in range(len(sections) - 1):
index, _ = try_infer_value(sections[i], lambda ret: int(ret))
split_index += index
indices.append(split_index)
return _op.split(data, indices, dim)
def tensor_split(self, inputs, input_types):
# Reference: https://pytorch.org/docs/stable/generated/torch.tensor_split.html
import torch
if not isinstance(inputs[1], (int, list, tuple, torch.Tensor)):
msg = (
f"indices_or_sections type {type(inputs[1])} could not be parsed in "
f"tensor_split op"
)
raise AssertionError(msg)
if isinstance(inputs[1], torch.Tensor) and not (
list(inputs[1].shape) == [] or list(inputs[1].shape) == 1
):
msg = "indices_or_sections must be a zero-dimensional or one-dimensional long tensor"
raise AssertionError(msg)
if isinstance(inputs[1], int) or (
isinstance(inputs[1], torch.Tensor) and list(inputs[1].shape) == []
):
data = inputs[0]
n = int(inputs[1])
dim = int(inputs[2])
split_size = int(self.infer_shape(data)[dim] / n)
split_rest = int(self.infer_shape(data)[dim] % n)
indices = []
split_index = split_size
if split_rest == 0:
for i in range(n - 1):
indices.append(split_index)
split_index += split_size
else:
for i in range(split_rest):
indices.append(split_index + 1)
split_index = (i + 1) * (split_index + 1)
for i in range(n - split_rest - 1):
split_index += split_size
indices.append(split_index)
return _op.split(data, indices, dim)
else:
data = inputs[0]
sections = inputs[1]
dim = int(inputs[2])
if isinstance(sections, tuple):
sections = list(sections)
elif isinstance(sections, torch.Tensor):
sections = sections.cpu().numpy().tolist()
return _op.split(data, sections, dim)
def select(self, inputs, input_types):
data = inputs[0]
dim = int(inputs[1])
index = _wrap_const(inputs[2])
return _op.transform.take(data, index, axis=dim, mode="wrap")
def take(self, inputs, input_types):
data = inputs[0]
indices = _op.cast(inputs[1], "int32")
return _op.transform.take(data, indices=indices, mode="wrap")
def topk(self, inputs, input_types):
data = inputs[0]
axis = int(inputs[2])
is_ascend = not bool(inputs[3])
sort = bool(inputs[4])
if isinstance(inputs[1], _expr.Expr):
k, _ = try_infer_value(inputs[1], lambda ret: ret.tolist())
else:
k = inputs[1]
if not sort:
msg = "Currently supports only sorted output for topk operator."
raise AssertionError(msg)
outs = _op.topk(data, k=k, axis=axis, is_ascend=is_ascend, ret_type="both", dtype="int64")
return outs[0], outs[1]
def reciprocal(self, inputs, input_types):
data = inputs[0]
return _expr.const(1.0, dtype=input_types[0]) / data
def repeat(self, inputs, input_types):
data = inputs[0]
reps = []
for r in inputs[1]:
if isinstance(r, int):
reps.append(r)
else:
reps.append(int(_infer_value(r, {}).numpy()))
return _op.transform.tile(data, reps=reps)
def repeat_interleave(self, inputs, input_types):
data = inputs[0]
if isinstance(inputs[1], int):
repeats = inputs[1]
axis = inputs[2]
elif isinstance(inputs[1], _expr.Expr):
if isinstance(inputs[1], _expr.Constant):
repeats = int(inputs[1].data.numpy())
else:
repeats, _ = try_infer_value(inputs[1], lambda ret: ret.tolist())
axis = inputs[2]
else:
msg = "Only repeat with one value as repeat is currently supported."
raise AssertionError(msg)
if axis is None: # Flatten the data if no axis is given from torch
data = _op.transform.reshape(data, [-1])
axis = 0
return _op.transform.repeat(data, repeats=repeats, axis=axis)
def addcdiv(self, inputs, input_types):
data, t1, t2, c = self.pytorch_promote_types(inputs[:4], input_types[:4])
return data + (c * (t1 / t2))
def addcmul(self, inputs, input_types):
data, t1, t2, c = self.pytorch_promote_types(inputs[:4], input_types[:4])
return data + (c * (t1 * t2))
def where(self, inputs, input_types):
if len(inputs) == 1:
return self.nonzero([inputs[0], True], input_types)
cond = inputs[0]
x, y = self.pytorch_promote_types(inputs[1:3], input_types[1:3])
return _op.where(cond, x, y)
def full_impl(self, data, fill_value, dtype):
size = []
need_reshape = False
new_shape = []
for dim in data:
if isinstance(dim, _expr.Expr):
if isinstance(dim, _expr.Constant):
dim = int(dim.data.numpy())
if isinstance(size, list):
size.append(dim)
new_shape.append(dim)
else:
dim, success = try_infer_value(dim, lambda ret: int(ret), lambda: 0)
new_shape.append(dim)
if success:
if isinstance(size, list):
size.append(dim)
else:
size = None
need_reshape = True
else:
if isinstance(size, list):
size.append(dim)
new_shape.append(dim)
if size is None:
tmp = []
for dim in data:
tmp.append(_op.cast(_op.expand_dims(dim, axis=0), "int64"))
size = _op.concatenate(tmp, axis=0)
if not isinstance(fill_value, _expr.Constant):
if isinstance(fill_value, _expr.Expr):
fill_value = _infer_value(fill_value, {})
fill_value = _expr.const(fill_value, dtype=dtype)
out = _op.full(fill_value, size, dtype=dtype)
if need_reshape:
out = _op.reshape(out, new_shape)
return out
def ones(self, inputs, input_types):
data = inputs[0]
import torch
if not isinstance(data, (_expr.Expr, list, torch.Tensor, np.ndarray)):
msg = f"Data type {type(data)} could not be parsed in ones op"
raise AssertionError(msg)
if inputs[1] is not None:
dtype = _convert_dtype_value(inputs[1])
else:
dtype = self.default_dtype
return self.full_impl(data, 1, dtype)
def ones_like(self, inputs, input_types):
data = inputs[0]
out = _op.ones_like(data)
# If the input and the output datatype is different, do a cast
if inputs[1] is not None:
dtype = _convert_dtype_value(inputs[1])
else:
dtype = self.default_dtype
if input_types[0] != dtype:
out = _op.cast(out, dtype)
return out
def new_ones(self, inputs, input_types):
size = inputs[1]
import torch
if not isinstance(size, (_expr.Expr, list, tuple, torch.Size, np.ndarray)):
msg = f"Data type {type(size)} could not be parsed in ones op"
raise AssertionError(msg)
if inputs[2] is not None:
dtype = _convert_dtype_value(inputs[2])
else:
dtype = input_types[0]
return self.full_impl(size, 1, dtype)
def zeros(self, inputs, input_types):
data = inputs[0]
import torch
if not isinstance(data, (_expr.Expr, list, torch.Tensor, np.ndarray)):
msg = f"Data type {type(data)} could not be parsed in zeros op"
raise AssertionError(msg)
if inputs[1] is not None:
dtype = _convert_dtype_value(inputs[1])
else:
dtype = self.default_dtype
return self.full_impl(data, 0, dtype)
def zero_(self, inputs, input_types):
data = inputs[0]
return self.full_impl(self.infer_shape(data), 0, input_types[0])
def zeros_like(self, inputs, input_types):
data = inputs[0]
out = _op.zeros_like(data)
# If the input and the output datatype is different, do a cast
if inputs[1] is not None:
dtype = _convert_dtype_value(inputs[1])
else:
dtype = self.default_dtype
if input_types[0] not in dtype:
out = _op.cast(out, dtype)
return out
def new_zeros(self, inputs, input_types):
data = inputs[1]
import torch
if not isinstance(data, (_expr.Expr, list, tuple, torch.Size)):
msg = f"Data type {type(data)} could not be parsed in new_zeros op"
raise AssertionError(msg)
if inputs[2] is not None:
dtype = _convert_dtype_value(inputs[2])
else:
# if dtype is None, use the dtype of the input tensor
dtype = self.infer_type(inputs[0])
return self.full_impl(data, 0, dtype)
def full(self, inputs, input_types):
data = inputs[0]
fill_value = inputs[1]
import torch
if not isinstance(data, (_expr.Expr, list, torch.Tensor, np.ndarray)):
msg = f"Data type {type(data)} could not be parsed in full op"
raise AssertionError(msg)
if inputs[2] is not None: # dtype given
dtype = _convert_dtype_value(inputs[2])
else:
# if dtype is None, torch uses a global default set by torch.set_default_tensor_type()
dtype = self.default_dtype
return self.full_impl(data, fill_value, dtype)
def full_like(self, inputs, input_types):
data = inputs[0]
fill_value = inputs[1]
out = _op.full_like(data, _expr.const(fill_value))
# If the input and the output datatype is different, do a cast
if inputs[2] is not None: # dtype given
dtype = _convert_dtype_value(inputs[2])
else:
# if dtype is None, torch uses a global default set by torch.set_default_tensor_type()
dtype = self.default_dtype
if input_types[0] not in dtype:
out = _op.cast(out, dtype)
return out
def new_full(self, inputs, input_types):
data = inputs[1]
fill_value = inputs[2]
import torch
if not isinstance(data, (_expr.Expr, list, tuple, torch.Size)):
msg = f"Data type {type(data)} could not be parsed in full op"
raise AssertionError(msg)
if inputs[3] is not None: # dtype given
dtype = _convert_dtype_value(inputs[3])
else:
# if dtype is None, use the dtype of the input tensor
dtype = self.infer_type(inputs[0])
return self.full_impl(data, fill_value, dtype)
def fill_(self, inputs, input_types):
data = inputs[0]
fill_value = inputs[1]
if not isinstance(fill_value, (bool, int, float, complex)):
fill_value = fold_constant(fill_value)
return self.full_impl(self.infer_shape(data), fill_value, input_types[0])
def linspace(self, inputs, input_types):
start = inputs[0]
stop = inputs[1]
step = inputs[2]
# Find the spacing between values as step
if step != 1:
step = (stop - start) / (step - 1)
stop = stop + step
else:
stop = start + step
if inputs[3] is None:
import torch
dtype = _convert_data_type(str(torch.get_default_dtype()))
else:
dtype = _convert_dtype_value(inputs[3])
start = _create_typed_const(start, dtype)
stop = _create_typed_const(stop, dtype)
step = _create_typed_const(step, dtype)
return _op.transform.arange(start=start, stop=stop, step=step, dtype=dtype)
def relu(self, inputs, input_types):
data = inputs[0]
if self.is_quantized_tensor(data):
assert len(inputs) == 3, "Input quant param not found in op inputs"
input_zero_point = _expr.const(inputs[2], dtype="int32")
return qnn_torch.quantized_relu(data, input_zero_point)
return _op.nn.relu(data)
def relu6(self, inputs, input_types):
data = inputs[0]
return _op.tensor.clip(data, 0.0, 6.0)
def prelu(self, inputs, input_types):
# Reference: https://pytorch.org/docs/stable/generated/torch.nn.PReLU.html#torch.nn.PReLU
data = inputs[0]
dim = self.get_dims(data)
ndims = len(dim)
axis = 0 if ndims == 1 else 1
alpha = _op.broadcast_to(inputs[1], (dim[axis]))
return _op.nn.prelu(data, alpha, axis)
def leaky_relu(self, inputs, input_types):
data = inputs[0]
alpha = float(inputs[1])
return _op.nn.leaky_relu(data, alpha)
def elu(self, inputs, input_types):
data = inputs[0]
dtype = input_types[0]
alpha = _expr.const(-float(inputs[1]), dtype=dtype)
return alpha * _op.nn.relu(_expr.const(1, dtype=dtype) - _op.exp(data)) + _op.nn.relu(data)
def celu(self, inputs, input_types):
data = inputs[0]
dtype = input_types[0]
alpha = _expr.const(float(inputs[1]), dtype=dtype)
zero = _op.const(0, dtype)
return alpha * _op.minimum(
zero, _op.exp(data / alpha) - _expr.const(1, dtype=dtype)
) + _op.nn.relu(data)
def gelu(self, inputs, input_types):
data = inputs[0]
dtype = input_types[0]
# gelu is data * normcdf(data)
# normcdf expressed as erf because we don't currently have that intrinsic
# note that there is also a fastgelu variant approximating normcdf
# with tanh and third order polynomials, but this is "true" gelu
return data * (
_expr.const(0.5, dtype=dtype)
+ _op.erf(data * _expr.const(0.5**0.5, dtype=dtype)) * _expr.const(0.5, dtype=dtype)
)
def selu(self, inputs, input_types):
data = inputs[0]
# https://pytorch.org/docs/stable/nn.html#selu
dtype = input_types[0]
alpha = _expr.const(-1.6732632423543772848170429916717, dtype=dtype)
gamma = _expr.const(1.0507009873554804934193349852946, dtype=dtype)
return gamma * (
alpha * _op.nn.relu(_expr.const(1.0, dtype=dtype) - _op.exp(data)) + _op.nn.relu(data)
)
def silu(self, inputs, input_types):
data = inputs[0]
return data * _op.tensor.sigmoid(data)
def glu(self, inputs, input_types):
"""
Applies the gated linear unit function GLU(a,b)= a * sigmoid(b)
where a is the first half of the input matrices and b is the second half.
Link: https://pytorch.org/docs/stable/generated/torch.nn.GLU.html
"""
data = inputs[0]
dim = inputs[1]
relay_tup = _op.transform.split(data, 2, dim)
return relay_tup[0] * _op.tensor.sigmoid(relay_tup[1])
def log_sigmoid(self, inputs, input_types):
data = inputs[0]
mn = _op.minimum(_op.const(0, dtype=input_types[0]), data)
z = _op.exp(-_op.abs(data))
return mn - self.log1p([z], input_types)
def cross_entropy_loss_with_logits(self, inputs, input_types):
input = inputs[0]
target = inputs[1]
weights = inputs[2]
reduction = inputs[3]
ignore_index = inputs[4]
label_smoothing = inputs[5]
input_shape = self.infer_shape(input)
target_shape = self.infer_shape(target)
if input_shape != target_shape:
if reduction == 0:
reduction = "none"
elif reduction == 1:
reduction = "mean"
else:
reduction = "sum"
num_class = self.infer_shape(input)[1]
if weights is None:
weights = _op.full(_expr.const(1), (num_class,), dtype=input_types[0])
return _op.nn.nll_loss(
_op.nn.log_softmax(input), target, weights, reduction, ignore_index
)
assert reduction == 1, "reduction not supported in cross_entropy_loss"
assert ignore_index == -100, "ignore_index not supported in cross_entropy_loss"
assert label_smoothing == 0.0, "label_smoothing not supported in cross_entropy_loss"
assert weights is None, "weight not supported in cross_entropy_loss"
return _op.nn.cross_entropy_with_logits(_op.nn.log_softmax(input), target)
def l1_loss(self, inputs, input_types):
assert len(inputs) == 3
[predictions, targets, reduction] = inputs
delta = _op.abs(_op.subtract(predictions, targets))
if reduction == 0:
# reduction = "none"
return delta
elif reduction == 1:
# reduction = "mean"
return _op.mean(delta)
else:
# reduction = "sum"
return _op.sum(delta)
def mse_loss(self, inputs, input_types):
assert len(inputs) == 3
[predictions, targets, reduction] = inputs
delta = _op.subtract(predictions, targets)
delta = _op.power(delta, _expr.const(2, input_types[0]))
if reduction == 0:
# reduction = "none"
return delta
elif reduction == 1:
# reduction = "mean"
return _op.mean(delta)
else:
# reduction = "sum"
return _op.sum(delta)
def hard_sigmoid(self, inputs, input_types):
def _relu6(x):
return _op.tensor.clip(x, 0.0, 6.0)
def func(x):
return _relu6(x + _expr.const(3.0)) / _expr.const(6.0)
if self.is_quantized_tensor(inputs[0]):
input_scale = _expr.const(inputs[1])
input_zero_point = _expr.const(inputs[2])
# PyTorch seems to use the following output qparams, but accuracy
# is broken if we use this.
# TODO(masahi): Revisit this parameter choice
#
# Taken from src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp
# output_scale = _expr.const(0.00390625) # 1.0 / 2^8
# output_zero_point = _expr.const(-128)
output_scale = input_scale
output_zero_point = input_zero_point
data = qnn.op.dequantize(inputs[0], input_scale, input_zero_point, axis=1)
out = func(data)
return qnn.op.quantize(out, output_scale, output_zero_point, out_dtype="uint8")
return func(inputs[0])
def hard_swish(self, inputs, input_types):
data = inputs[0]
return data * self.hard_sigmoid(inputs, input_types)
def adaptive_avg_pool(self, op, inputs, input_types):
data = inputs[0]
output_size = inputs[1]
for i, item in enumerate(output_size):
if isinstance(item, tvm.relay.expr.Constant):
# convert Constant to int
output_size[i] = item.data.numpy()[()]
def func(x):
return op(x, output_size=output_size)
if self.is_quantized_tensor(data):
return qnn_torch.apply_with_upcast(data, func)
return func(data)
def adaptive_max_pool(self, op, inputs, input_types):
data = inputs[0]
output_size = inputs[1]
for i, item in enumerate(output_size):
if isinstance(item, tvm.relay.expr.Constant):
# convert Constant to int
output_size[i] = item.data.numpy()[()]
# returns dummy indices too
return op(data, output_size=output_size), None
@staticmethod
def convert_const_list(data):
if isinstance(data, list):
for i, _ in enumerate(data):
if isinstance(data[i], _expr.Expr):
data[i] = int(_infer_value_simulated(data[i], {}).numpy())
return data
def maxpool_2d(self, inputs, input_types):
data = inputs[0]
pool_size = self.convert_const_list(inputs[1])
strides = self.convert_const_list(inputs[2] if inputs[2] else pool_size)
padding = inputs[3]
dilation = inputs[4]
ceil_mode = int(inputs[5])
return _op.nn.max_pool2d(
data,
pool_size=pool_size,
strides=strides,
dilation=dilation,
padding=padding,
layout="NCHW",
ceil_mode=ceil_mode,
)
def maxpool_2d_with_indices(self, inputs, input_types):
# returns dummy indices too
return self.maxpool_2d(inputs, input_types), None
def maxpool_1d(self, inputs, input_types):
data = inputs[0]
pool_size = inputs[1]
strides = inputs[2] if inputs[2] else pool_size
padding = inputs[3]
dilation = inputs[4]
ceil_mode = int(inputs[5])
return _op.nn.max_pool1d(
data,
pool_size=pool_size,
strides=strides,
dilation=dilation,
padding=padding,
layout="NCW",
ceil_mode=ceil_mode,
)
def maxpool_3d(self, inputs, input_types):
data = inputs[0]
need_squeeze = False
if len(self.get_dims(data)) == 4:
need_squeeze = True
data = _op.expand_dims(data, 0)
pool_size = inputs[1]
strides = inputs[2] if inputs[2] else pool_size
padding = inputs[3]
dilation = inputs[4]
ceil_mode = int(inputs[5])
res = _op.nn.max_pool3d(
data,
pool_size=pool_size,
strides=strides,
dilation=dilation,
padding=padding,
ceil_mode=ceil_mode,
)
return res if not need_squeeze else _op.squeeze(res, [0])
def hardtanh(self, inputs, input_types):
a = inputs[0]
tanh_min = float(inputs[1])
tanh_max = float(inputs[2])
return _op.tensor.clip(a, tanh_min, tanh_max)
def convolution(self, inputs, input_types):
# Use transpose or normal
use_transpose = True if inputs[6] == 1 else False
data = inputs[0]
weight = inputs[1]
bias = inputs[2]
strides = tuple(inputs[3])
padding = tuple(inputs[4])
dilation = tuple(inputs[5])
if isinstance(weight, _expr.Expr):
inferred_shape = self.infer_shape(weight)
weight_shape = []
for infer in inferred_shape:
weight_shape.append(infer)
else:
msg = f"Data type {type(weight)} could not be parsed in conv op"
raise AssertionError(msg)
groups = int(inputs[8])
if use_transpose:
channels = weight_shape[1] * groups
in_channels = weight_shape[0]
else:
channels = weight_shape[0]
in_channels = weight_shape[1]
# Check if this is depth wise convolution
# We need to reshape weight so that Relay could recognize this is depth wise
# weight_shape[1] is always in_channels // groups
# For depthwise, in_channels == groups, so weight_shape[1] == 1
# If groups > 1 but weight_shape[1] != 1, this is group convolution
if groups > 1 and in_channels == 1:
channel_multiplier = channels // groups
new_weight_shape = (groups, channel_multiplier) + tuple(weight_shape[2:])
weight = _op.transform.reshape(weight, new_weight_shape)
kernel_size = weight_shape[2:]
use_bias = isinstance(bias, _expr.Expr)
# We are trying to invoke various relay operations through a single conv_op variable.
# However the function signatures for some operations have additional attributes so we
# pass these in along with the standard ones.
additional_arguments = dict()
if use_transpose:
if len(kernel_size) == 3:
conv_op = _op.nn.conv3d_transpose
elif len(kernel_size) == 2:
conv_op = _op.nn.conv2d_transpose
else:
conv_op = _op.nn.conv1d_transpose
output_padding = tuple(inputs[7])
additional_arguments["output_padding"] = output_padding
else:
if len(kernel_size) == 3:
conv_op = _op.nn.conv3d
elif len(kernel_size) == 2:
conv_op = _op.nn.conv2d
else:
conv_op = _op.nn.conv1d
if len(kernel_size) == 3:
data_layout = "NCDHW"
kernel_layout = "OIDHW"
if use_transpose:
# Transposed convolutions have IODHW layout.
kernel_layout = "IODHW"
elif len(kernel_size) == 2:
data_layout = "NCHW"
kernel_layout = "OIHW"
if use_transpose:
# Transposed convolutions have IOHW layout.
kernel_layout = "IOHW"
else:
data_layout = "NCW"
kernel_layout = "OIW"
if use_transpose:
# Transposed convolutions have IOW layout.
kernel_layout = "IOW"
# Conv1d does not currently support grouped convolution so we convert it to conv2d
is_grouped_conv1d = False
if groups > 1 and len(kernel_size) == 1 and not use_transpose:
is_grouped_conv1d = True
conv_op = _op.nn.conv2d
kernel_size = [1] + kernel_size
strides = (1,) + strides
padding = (0,) + padding
dilation = (1,) + dilation
data = _op.expand_dims(data, axis=2)
weight = _op.expand_dims(weight, axis=2)
data_layout = "NCHW"
kernel_layout = "OIHW"
conv_out = conv_op(
data,
weight,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
channels=channels,
kernel_size=kernel_size,
data_layout=data_layout,
kernel_layout=kernel_layout,
out_layout="",
out_dtype="",
**additional_arguments,
)
if use_bias:
res = _op.nn.bias_add(conv_out, bias)
else:
res = conv_out
if is_grouped_conv1d:
# Because we conducted grouped conv1d convolution through conv2d we must
# squeeze the output to get the correct result.
res = _op.squeeze(res, axis=[2])
return res
def softmax(self, inputs, input_types):
data = inputs[0]
axis = inputs[1]
if isinstance(axis, str):
axis = int(axis)
return _op.nn.softmax(data, axis=axis)
def threshold(self, inputs, input_types):
data = inputs[0]
threshold_f = float(inputs[1])
threshold_ = _op.full_like(inputs[0], fill_value=_expr.const(threshold_f))
value_f = float(inputs[2])
value = _op.full_like(inputs[0], fill_value=_expr.const(value_f))
return _op.where(_op.greater(data, threshold_), data, value)
def contiguous(self, inputs, input_types):
return inputs[0]
def batch_norm(self, inputs, input_types):
data = inputs[0]
data_type = input_types[0]
channels = self.infer_shape(data)
scale = isinstance(inputs[1], _expr.Expr)
if scale:
gamma = inputs[1]
else:
gamma = _create_typed_const(np.ones([int(channels[1])]), data_type)
center = isinstance(inputs[2], _expr.Expr)
if center:
beta = inputs[2]
else:
beta = _create_typed_const(np.zeros([int(channels[1])]), data_type)
moving_mean = inputs[3]
moving_var = inputs[4]
epsilon = float(inputs[7])
return _op.nn.batch_norm(
data,
gamma,
beta,
moving_mean,
moving_var,
axis=1,
epsilon=epsilon,
center=center,
scale=scale,
)[0]
def instance_norm(self, inputs, input_types):
data = inputs[0]
data_type = input_types[0]
channels = self.infer_shape(data)
running_mean = inputs[3]
running_var = inputs[4]
use_input_stats = inputs[5]
if isinstance(inputs[1], _expr.Expr) and isinstance(inputs[2], _expr.Expr):
scale = center = True
weight = inputs[1]
beta = inputs[2]
gamma = weight
else:
scale = center = False
if not scale:
gamma = _create_typed_const(np.ones([int(channels[1])]), data_type)
if not center:
beta = _create_typed_const(np.zeros([int(channels[1])]), data_type)
epsilon = float(inputs[7])
if not use_input_stats:
return _op.nn.batch_norm(
data,
gamma,
beta,
running_mean,
running_var,
axis=1,
epsilon=epsilon,
center=center,
scale=scale,
)[0]
return _op.nn.instance_norm(
data, gamma, beta, axis=1, epsilon=epsilon, center=center, scale=scale
)
def get_dims(self, data):
import torch
if isinstance(data, _expr.Expr):
dims = self.infer_shape(data)
elif isinstance(data, list):
dims = data
elif isinstance(data, (torch.Tensor, np.ndarray)):
dims = data.shape
else:
msg = f"Data type {type(data)} could not be parsed"
raise AssertionError(msg)
return dims
def layer_norm(self, inputs, input_types):
data = inputs[0]
ndims = len(self.get_dims(inputs[1]))
assert ndims == 1, "Support only normalization over last one dimension."
return _op.nn.layer_norm(
data,
gamma=inputs[2],
beta=inputs[3],
axis=-1,
epsilon=float(inputs[4]),
center=True,
scale=True,
)
def group_norm(self, inputs, input_types):
data = inputs[0]
gamma = inputs[2]
beta = inputs[3]
num_groups = inputs[1]
epsilon = float(inputs[4])
return _op.nn.group_norm(
data,
gamma=gamma,
beta=beta,
num_groups=num_groups,
axis=1,
epsilon=epsilon,
center=True,
scale=True,
)
def transpose(self, inputs, input_types):
data = inputs[0]
import torch
if isinstance(data, _expr.Expr):
ndims = len(self.infer_shape_with_prelude(data))
elif isinstance(data, list):
ndims = data
elif isinstance(data, (torch.Tensor, np.ndarray)):
ndims = data.shape
else:
msg = f"Data type {type(data)} could not be parsed in transpose op"
raise AssertionError(msg)
if isinstance(data, tvm.runtime.NDArray):
ndims = len(data.shape)
axes = list(range(ndims))
num_inputs = len(inputs)
if num_inputs == 1:
if ndims >= 2:
axes[-1] = ndims - 2
axes[-2] = ndims - 1
if not isinstance(data, _expr.Expr):
data = _expr.const(data)
elif num_inputs == 3:
parse = lambda i: ndims * (i < 0) + i
src, dst = [parse(int(inputs[i])) for i in [1, 2]]
axes[src] = dst
axes[dst] = src
else:
axes = inputs[1]
return _op.transform.transpose(data, axes)
def numpy_T(self, inputs, input_types):
data = inputs[0]
shape = self.infer_shape(data)
if len(shape) != 2:
logger.warning(
"The use of Tensor.T on tensors of dimensions != 2 is deprecated"
"and will be removed in a future release of PyTorch."
)
return _op.transform.transpose(data)
def flatten(self, inputs, input_types):
data = inputs[0]
start = int(inputs[1])
end = int(inputs[2])
dshape = get_const_tuple(self.infer_shape_with_prelude(data))
ndim = len(dshape)
if start < 0:
start += ndim
if end < 0:
end += ndim
assert start <= end, "start dim cannot come after end dim"
new_shape = [0] * start
new_shape.append(-1)
squeeze_axes = []
for i in range(start + 1, end + 1):
new_shape.append(1)
squeeze_axes.append(i)
for _ in range(end + 1, ndim):
new_shape.append(0)
out = _op.reshape(data, new_shape)
if squeeze_axes:
out = _op.squeeze(out, axis=squeeze_axes)
return out
def addmm(self, inputs, input_types):
input_mat = inputs[0]
mat1 = inputs[1]
data_type = input_types[1]
mat2 = inputs[2]
beta = inputs[3]
alpha = inputs[4]
if not isinstance(alpha, _expr.Expr) and alpha != 1:
alpha = _create_typed_const(alpha, data_type)
mat1 *= alpha
if not isinstance(beta, _expr.Expr) and beta != 1:
beta = _create_typed_const(beta, data_type)
mat2 *= beta
transposed_mat2 = _op.transform.transpose(mat2, axes=[1, 0])
units = self.infer_shape(transposed_mat2)[0]
dense_out = _op.nn.dense(mat1, transposed_mat2, units=units)
return dense_out + input_mat
def size(self, inputs, input_types):
shape = self.infer_shape_with_prelude(inputs[0])
axis = None
if len(inputs) > 1:
axis = int(inputs[1])
if any(map(lambda s: isinstance(s, tvm.tir.expr.Any), shape)):
if axis is None or isinstance(shape[axis], tvm.tir.expr.Any):
shape_dynamic = _op.shape_of(inputs[0], dtype="int32")
if axis is not None:
return _op.take(shape_dynamic, _expr.const(axis), 0)
return shape_dynamic
if axis is not None:
return _expr.const(shape[axis])
return _expr.const(shape)
def numtotensor(self, inputs, input_types):
val = inputs[0]
dtype = input_types[0]
if isinstance(val, _expr.Expr):
return val
if isinstance(val, tvm.tir.IntImm):
val = val.__int__()
dtype = int
arr = val * np.ones([]).astype(dtype)
return arr
def tensortonum(self, inputs, input_types):
return inputs[0]
def view(self, inputs, input_types):
data = inputs[0]
if len(inputs) == 3:
shape_inp = [inputs[1], self.infer_shape(inputs[2])[0]]
else:
if isinstance(inputs[1], list):
shape_inp = inputs[1]
else:
shape_inp = self.infer_shape(inputs[1])
new_shape = shape_inp
for i, shape in enumerate(shape_inp):
if isinstance(shape, _expr.Expr):
val = _infer_value_simulated(shape, {})
new_shape[i] = val.numpy().item(0)
return _op.transform.reshape(data, new_shape)
def reshape(self, inputs, input_types):
data = inputs[0]
new_shape = inputs[1]
tmp_shape = []
is_dyn = False
for s in new_shape:
if isinstance(s, _expr.Constant):
tmp_shape.append(int(s.data.numpy()))
elif isinstance(s, _expr.Expr):
dim, success = try_infer_value(s, lambda ret: int(ret))
tmp_shape.append(dim)
if not success:
is_dyn = True
else:
tmp_shape.append(s)
if is_dyn:
new_shape = []
for i, s in enumerate(tmp_shape):
if not isinstance(s, _expr.Expr):
s = _expr.const(s, "int64")
else:
s = _op.cast(s, "int64")
new_shape.append(_op.expand_dims(s, axis=0))
new_shape = _op.concatenate(new_shape, axis=0)
else:
new_shape = tmp_shape
return _op.transform.reshape(data, new_shape)
def reshape_as(self, inputs, input_types):
data = inputs[0]
new_shape = self.infer_shape(inputs[1])
return _op.transform.reshape(data, new_shape)
def pixel_shuffle(self, inputs, input_types):
data = inputs[0]
upscale_factor = inputs[1]
upscale_squared = upscale_factor * upscale_factor
b, c, h, w = self.infer_shape(data)
assert (
c % upscale_squared == 0
), "input channel should be divisible by square of upscale_factor"
ndims = len(self.infer_shape_with_prelude(data))
axes = list(range(ndims))
num_inputs = len(inputs)
oc = c // upscale_squared
oh = h * upscale_factor
ow = w * upscale_factor
new_shape = [b, oc, upscale_factor, upscale_factor, h, w]
out_shape = [b, oc, oh, ow]
data = _op.transform.reshape(data, new_shape)
# The data will be transposed to
# [b, oc, h, upscale_factor, w, upscale_factor]
# for further reshape
axes = [0, 1, 4, 2, 5, 3]
data = _op.transform.transpose(data, axes)
return _op.transform.reshape(data, out_shape)
def clone(self, inputs, input_types):
data = inputs[0]
return _op.tensor.copy(data)
def log_softmax(self, inputs, input_types):
data = inputs[0]
axis = int(inputs[1])
return _op.nn.log_softmax(data, axis)
def sigmoid(self, inputs, input_types):
data = inputs[0]
def func(x):
return _op.tensor.sigmoid(x)
if self.is_quantized_tensor(data):
assert len(inputs) == 5, "Input/Ouput quant param not found in op inputs"
return qnn_torch.quantized_sigmoid(inputs)
return func(data)
def softplus(self, inputs, input_types):
dtype = input_types[0]
beta = _expr.const(float(inputs[1]), dtype=dtype)
threshold = int(inputs[2]) if inputs[2] else 20
threshold_ = _op.full_like(inputs[0], fill_value=_expr.const(threshold))
softplus_value = _op.log(_op.exp(inputs[0] * beta) + _expr.const(1.0, dtype=dtype)) / beta
return _op.where(_op.greater(inputs[0] * beta, threshold_), inputs[0], softplus_value)
def make_avg_pool(self, dim):
def avg_pool(inputs, input_types):
data = inputs[0]
pool_size = self.convert_const_list(inputs[1])
strides = self.convert_const_list(inputs[2] if inputs[2] else pool_size)
padding = inputs[3]
ceil_mode = int(inputs[4])
count_include_pad = int(inputs[5])
def func(x):
if dim == 1:
return _op.nn.avg_pool1d(
x,
pool_size=pool_size,
strides=strides,
padding=padding,
dilation=(1,),
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
)
elif dim == 2:
return _op.nn.avg_pool2d(
x,
pool_size=pool_size,
strides=strides,
padding=padding,
dilation=(1, 1),
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
)
elif dim == 3:
return _op.nn.avg_pool3d(
x,
pool_size=pool_size,
strides=strides,
padding=padding,
dilation=(1, 1, 1),
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
)
else:
msg = "Average Pooling dimension should be between 1 and 3"
raise RuntimeError(msg)
if self.is_quantized_tensor(data):
return qnn_torch.apply_with_upcast(data, func)
return func(data)
return avg_pool
def linear(self, inputs, input_types):
# https://pytorch.org/docs/stable/nn.functional.html#linear
# 0 - input
# 1 - weight
bias = inputs[2]
a_shape = self.infer_shape_with_prelude(inputs[0])
b_shape = self.infer_shape_with_prelude(inputs[1])
if len(a_shape) == 2 and len(b_shape) == 2:
mm_out = _op.nn.dense(inputs[0], inputs[1])
elif len(b_shape) == 1:
mm_out = self.matmul([inputs[0], inputs[1]], input_types[:2])
else:
mm_out = self.matmul(
[inputs[0], _op.transpose(inputs[1], axes=(1, 0))], input_types[:2]
)
if isinstance(bias, _expr.Expr):
bias_ndims = len(self.infer_shape_with_prelude(bias))
if bias_ndims == 1:
return _op.nn.bias_add(mm_out, bias, axis=-1)
mm_dtype = self.infer_type_with_prelude(mm_out).dtype
return self.add([mm_out, bias], [mm_dtype, input_types[2]])
return mm_out
def dropout(self, inputs, input_types):
data = inputs[0]
rate = float(inputs[1])
return _op.nn.dropout(data, rate)
def make_reduce(self, name):
def reduce(inputs, input_types):
data = inputs[0]
axis = None
keepdims = False
if len(inputs) > 2: # default, torch have only data, axis=None, keepdims=False
if isinstance(inputs[1], int):
axis = int(inputs[1])
elif _is_int_seq(inputs[1]):
axis = inputs[1]
else:
axis = list(self.infer_shape(inputs[1]))
keepdims = bool(inputs[2])
return get_relay_op(name)(data, axis=axis, keepdims=keepdims)
return reduce
def norm(self, inputs, input_types):
data = inputs[0]
dtype = input_types[0]
axis = None
keepdims = False
if len(inputs) > 3:
axis = inputs[2]
keepdims = bool(inputs[3])
order = inputs[1]
if order == np.inf:
return _op.reduce.max(_op.abs(data), axis=axis, keepdims=keepdims)
elif order == np.NINF:
return _op.reduce.min(_op.abs(data), axis=axis, keepdims=keepdims)
else:
reci_order = _expr.const(1.0 / order, dtype=dtype)
order = _expr.const(order)
return _op.power(
_op.reduce.sum(_op.power(_op.abs(data), order), axis=axis, keepdims=keepdims),
reci_order,
)
def frobenius_norm(self, inputs, input_types):
data = inputs[0]
axis = None
keepdims = False
if len(inputs) > 2:
axis = inputs[1] if len(inputs[1]) > 0 else None
keepdims = bool(inputs[2])
return _op.sqrt(_op.reduce.sum((data * data), axis=axis, keepdims=keepdims))
def std(self, inputs, input_types):
data = inputs[0]
if len(inputs) == 2:
axis = None
keepdims = False
unbiased = bool(inputs[1])
else:
axis = inputs[1]
keepdims = bool(inputs[3])
unbiased = bool(inputs[2])
return _op.reduce.std(data, axis=axis, keepdims=keepdims, unbiased=unbiased)
def variance(self, inputs, input_types):
data = inputs[0]
if len(inputs) == 2:
axis = None
keepdims = False
unbiased = bool(inputs[1])
else:
axis = inputs[1]
keepdims = bool(inputs[3])
unbiased = bool(inputs[2])
return _op.reduce.variance(data, axis=axis, keepdims=keepdims, unbiased=unbiased)
def mean(self, inputs, input_types):
data = inputs[0]
if inputs[1]:
axis = inputs[1]
else:
axis = None
if len(inputs) > 2 and inputs[2]:
keepdims = int(inputs[2])
else:
keepdims = False
if len(inputs) > 3 and inputs[3]:
exclude = int(inputs[3])
else:
exclude = False
def func(x):
return _op.mean(x, axis, keepdims, exclude)
if self.is_quantized_tensor(data):
assert len(inputs) == 6, "Input quant param not found in op inputs"
input_scale = _expr.const(inputs[4])
input_zero_point = _expr.const(inputs[5])
# refer to aten/src/ATen/native/quantized/cpu/qreduction.cpp
return qnn_torch.apply_with_fp32_fallback(data, input_scale, input_zero_point, func)
return func(data)
def var_mean(self, inputs, input_types):
data = inputs[0]
if len(inputs) == 2:
axis = None
keepdims = False
unbiased = bool(inputs[1])
else:
axis = inputs[1]
keepdims = bool(inputs[3])
unbiased = bool(inputs[2])
m, v = _op.reduce.mean_variance(data, axis, keepdims, False, unbiased)
return v, m
def chunk(self, inputs, input_types):
data = inputs[0]
num_chunks = int(inputs[1])
axis = int(inputs[2])
if isinstance(data, _expr.Expr):
inferred_shape = self.infer_shape_with_prelude(data)
shape = []
for infer in inferred_shape:
shape.append(infer)
dim = int(shape[axis])
if dim % num_chunks:
unif_size = int(dim / (num_chunks - 1))
else:
unif_size = int(dim / num_chunks)
indeces = []
for i in range(unif_size, dim, unif_size):
indeces.append(i)
return _op.split(data, indeces, axis)
def baddbmm(self, inputs, _):
input = inputs[0]
batch1, batch2 = inputs[1:3]
beta = _expr.const(float(inputs[3]))
alpha = _expr.const(float(inputs[4]))
return beta * input + alpha * _op.nn.batch_matmul(batch1, batch2, transpose_b=False)
def matmul(self, inputs, input_types):
assert len(inputs) == 2, "Two tensors to be multiplied are expected."
a = inputs[0]
b = inputs[1]
# Need to check input shape as batch matmul must be supported.
a_shape = self.infer_shape_with_prelude(a)
b_shape = self.infer_shape_with_prelude(b)
a_ndims = len(a_shape)
b_ndims = len(b_shape)
# Check if both tensors are at least 1D.
if a_ndims == 0 or b_ndims == 0:
msg = "Both arguments to matmul must be at least 1D."
raise AssertionError(msg)
# Check if tensors can be multiplied.
b_mulaxis = b_shape[-2] if b_ndims > 1 else b_shape[0]
if a_shape[-1] != b_mulaxis:
msg = "Tensors being multiplied do not have compatible shapes."
raise AssertionError(msg)
# If 1D, remember axis that should be deleted at the end
squeeze_dims = []
if a_ndims == 1:
a = _op.expand_dims(a, axis=0)
squeeze_dims += [-2]
a_ndims = 2
a_shape = (1,) + a_shape
if b_ndims == 1:
b = _op.expand_dims(b, axis=1)
squeeze_dims += [-1]
b_ndims = 2
b_shape = b_shape + (1,)
# Compute result
if a_ndims == 2 and b_ndims == 2:
# Result is obtained using matmul
out = _op.nn.dense(a, _op.transpose(b))
else:
# Result is obtained using batch_matmul
batch_shape = [1] * (max(a_ndims, b_ndims) - 2)
for i, j in enumerate(reversed(a_shape[:-2])):
batch_shape[i] = j
for i, j in enumerate(reversed(b_shape[:-2])):
# Need to check if axis can be broadcasted
if batch_shape[i] == 1 or j == 1 or batch_shape[i] == j:
batch_shape[i] = max(batch_shape[i], j)
else:
msg = "Batch dimensions are not broadcastable."
raise AssertionError(msg)
batch_shape = batch_shape[::-1]
a = _op.broadcast_to(a, batch_shape + list(a_shape[-2:]))
b = _op.broadcast_to(b, batch_shape + list(b_shape[-2:]))
out = _op.nn.batch_matmul(
_op.reshape(a, [-1, *a_shape[-2:]]),
_op.reshape(b, [-1, *b_shape[-2:]]),
transpose_b=False,
)
out_shape = batch_shape + [a_shape[-2]] + [b_shape[-1]]
out = _op.reshape(out, out_shape)
return _op.squeeze(out, axis=squeeze_dims)
def expand(self, inputs, input_types):
data_in = inputs[0]
shape = list(self.infer_shape(data_in))
ndims = len(shape)
sizes = inputs[1]
out = data_in
out_dims = len(sizes)
if ndims < out_dims:
num_newaxis = out_dims - ndims
out = _op.expand_dims(out, axis=0, num_newaxis=num_newaxis)
shape = [1] * num_newaxis + shape
for i in range(out_dims):
if sizes[i] != -1 and shape[i] == 1:
if not isinstance(sizes[i], int):
sizes[i] = int(_infer_value(sizes[i], {}).numpy())
out = _op.repeat(out, sizes[i], axis=i)
return out
def int(self, inputs, input_types):
if isinstance(inputs[0], _expr.Expr):
return inputs[0]
return int(inputs[0])
def identity(self, inputs, input_types):
return inputs[0]
def none(self, inputs, input_types):
return None
def pad_common(self, mode, pad_value, inputs, input_types):
data = inputs[0]
if isinstance(inputs[1], list):
pad_list = inputs[1]
else:
pad_list = list(self.infer_shape(inputs[1]))
# initialize paddings based on input len
pad_len = len(self.infer_shape(data)) * 2
paddings = [0] * pad_len
if len(pad_list) >= 2:
paddings[-1] = pad_list[1]
paddings[-2] = pad_list[0]
if len(pad_list) >= 4:
paddings[-3] = pad_list[3]
paddings[-4] = pad_list[2]
if len(pad_list) >= 6:
paddings[-5] = pad_list[5]
paddings[-6] = pad_list[4]
# group into tuple of 2 ints
paddings = [paddings[i : i + 2] for i in range(0, len(paddings), 2)]
const_paddings = []
non_zero_found = False
for pad in paddings:
const_paddings.append([])
for p in pad:
if isinstance(p, _expr.Expr):
p = int(_infer_value(p, {}).numpy())
elif not isinstance(p, int):
raise NotImplementedError("pad width should be int/expr")
const_paddings[-1].append(p)
if p != 0:
non_zero_found = True
if not non_zero_found:
return data
elif mode == "constant":
return _op.nn.pad(data, const_paddings, pad_value=pad_value, pad_mode=mode)
else:
return _op.nn.pad(data, const_paddings, pad_mode=mode)
def pad(self, inputs, input_types):
# mode: Optional default "constant"
if len(inputs) > 2 and inputs[2] is not None:
mode = inputs[2]
else:
mode = "constant"
# pad_value: Optional default 0
if len(inputs) == 4 and inputs[3] is not None:
pad_value = inputs[3]
else:
pad_value = 0
# replicate is edge in TVM's padding mode
if mode == "replicate":
mode = "edge"
elif mode == "circular":
raise ValueError("circular mode for torch.nn.functional.pad are not supported in TVM")
return self.pad_common(mode, pad_value, inputs, input_types)
def constant_pad_nd(self, inputs, input_types):
return self.pad_common("constant", _expr.const(inputs[2]), inputs, input_types)
def reflection_pad1d(self, inputs, input_types):
return self.pad_common("reflect", 0, inputs, input_types)
def reflection_pad2d(self, inputs, input_types):
return self.pad_common("reflect", 0, inputs, input_types)
def replication_pad1d(self, inputs, input_types):
return self.pad_common("edge", 0, inputs, input_types)
def replication_pad2d(self, inputs, input_types):
return self.pad_common("edge", 0, inputs, input_types)
def replication_pad3d(self, inputs, input_types):
return self.pad_common("edge", 0, inputs, input_types)
def clamp_common(self, data, min=None, max=None):
def get_v(v, default_v):
if isinstance(v, _expr.Constant):
return float(v.data.numpy())
if isinstance(v, _expr.Expr):
infer_v, success = try_infer_value(v, lambda ret: float(ret))
if success:
return infer_v
if v is not None:
return v
return default_v
dtype = self.infer_type(data).dtype
type_info = np.finfo(dtype) if "float" in dtype else np.iinfo(dtype)
# TODO(masahi): Properly handle inf in a one-way clamp case.
if min is not None and max is not None:
amin = get_v(min, type_info.min)
amax = get_v(max, type_info.max)
elif min is not None:
amin = get_v(min, type_info.min)
amax = type_info.max
else:
amin = type_info.min
amax = get_v(max, type_info.max)
return _op.clip(data, amin, amax)
def clamp(self, inputs, _):
return self.clamp_common(inputs[0], min=inputs[1], max=inputs[2])
def clamp_min(self, inputs, input_types):
return self.clamp_common(inputs[0], min=inputs[1])
def clamp_max(self, inputs, input_types):
return self.clamp_common(inputs[0], max=inputs[1])
def to(self, inputs, input_types):
data = inputs[0]
dtype = inputs[1] if inputs[1] is not None and not isinstance(inputs[1], str) else inputs[2]
# special handling for aten::to(data, 6, _, _, _) case
# 6 means dtype = float
# this happens when converting upsampling with scale factor
cast_map = {5: "float16", 6: "float32", 7: "float64", 3: "int32", 4: "int64"}
cast_func = {5: float, 6: float, 7: float, 3: int, 4: int}
ret = data
if isinstance(data, _expr.Expr):
actual_dtype = str(self.infer_type(data).dtype)
if dtype in cast_map and cast_map[dtype] != actual_dtype:
ret = _op.cast(data, cast_map[dtype])
elif dtype in cast_map:
ret = cast_func[dtype](data)
return ret
def get_upsample_out_size(self, inputs, method):
# This assumes a static shape
out_size = []
if inputs[1] is not None:
for size in inputs[1]:
if not isinstance(size, int):
out_size.append(int(_infer_value(size, {}).numpy()))
else:
out_size.append(size)
else:
scale_index = 3 if method != "nearest_neighbor" else 2
scales = inputs[scale_index]
assert scales is not None, "neither out size nor scale provided"
assert isinstance(scales, list)
ishape = self.infer_shape(inputs[0])
for i, scale in enumerate(scales):
out_size.append(int(math.floor(float(ishape[2 + i]) * scale)))
return out_size
def make_upsample(self, method):
def upsample(inputs, input_types):
data = inputs[0]
out_size = self.get_upsample_out_size(inputs, method)
if len(inputs) > 2 and method != "nearest_neighbor":
align_corners = inputs[2]
else:
align_corners = False
if method == "nearest_neighbor":
coord_trans = "asymmetric"
elif align_corners:
coord_trans = "align_corners"
else:
coord_trans = "half_pixel"
def func(x):
return _op.image.resize2d(
x, out_size, None, "NCHW", method, coord_trans, cubic_alpha=-0.75
)
if self.is_quantized_tensor(data):
# input qparams are manually appended by us
assert isinstance(inputs[-2], float)
assert isinstance(inputs[-1], int)
input_scale = _expr.const(inputs[-2])
input_zero_point = _expr.const(inputs[-1])
# currently piggy backs to fp32, it gets identical output as torch
return qnn_torch.apply_with_fp32_fallback(data, input_scale, input_zero_point, func)
return func(data)
return upsample
def make_upsample3d(self, method):
def upsample3d(inputs, input_types):
data = inputs[0]
out_size = self.get_upsample_out_size(inputs, method)
if len(inputs) > 2 and method == "linear":
align_corners = inputs[2]
else:
align_corners = False
if method == "nearest_neighbor":
coord_trans = "asymmetric"
elif align_corners:
coord_trans = "align_corners"
else:
coord_trans = "half_pixel"
return _op.image.resize3d(data, out_size, None, "NCDHW", method, coord_trans)
return upsample3d
def expand_as(self, inputs, input_types):
target = inputs[1]
t0 = self.infer_type(inputs[0]).dtype
t1 = self.infer_type(inputs[1]).dtype
if str(t0) != str(t1):
target = _op.cast(target, t0)
return _op.broadcast_to_like(inputs[0], target)
def broadcast_tensors(self, inputs, input_types):
tensor_list = inputs[0]
import torch
infer_shape_value = [self.infer_shape(t) for t in tensor_list]
# "torch.broadcast_shapes" is available after PyTorch 1.8.0
if hasattr(torch, "broadcast_shapes"):
res_shape = list(torch.broadcast_shapes(*infer_shape_value))
else:
res_shape = list(torch.broadcast_tensors(*map(torch.empty, infer_shape_value))[0].shape)
return [_op.broadcast_to(tensor, res_shape) for tensor in tensor_list]
def Bool(self, inputs, input_types):
assert len(inputs) == 1
return inputs[0]
def Float(self, inputs, input_types):
assert len(inputs) == 1
return _op.cast(inputs[0], "float32")
def bitwise_not(self, inputs, input_types):
data = inputs[0]
# The input tensor must be of integral or Boolean types.
# For bool tensors, it computes the logical NOT
if input_types[0] == "bool":
out = _op.logical_not(_op.cast(data, "bool"))
else:
out = _op.bitwise_not(_op.cast(data, "int"))
return out
def bitwise_xor(self, inputs, input_types):
lhs = inputs[0]
rhs = inputs[1]
lhs = _op.cast(lhs, "bool") if input_types[0] == "bool" else _op.cast(lhs, "int")
rhs = _op.cast(rhs, "bool") if input_types[1] == "bool" else _op.cast(rhs, "int")
return _op.bitwise_xor(lhs, rhs)
def logical_not(self, inputs, input_types):
data = _wrap_const(inputs[0])
return _op.logical_not(_op.cast(data, "bool"))
def logical_xor(self, inputs, input_types):
lhs = _op.cast(inputs[0], "bool")
rhs = _op.cast(inputs[1], "bool")
return _op.logical_xor(lhs, rhs)
def list_getitem(self, inputs, input_types):
return self.prelude.nth(inputs[0], _wrap_const(inputs[1]))
def list_len(self, inputs, input_types):
return self.prelude.length(inputs[0])
def type_as(self, inputs, input_types):
assert len(inputs) == 2
assert len(input_types) == 2
return _op.cast(inputs[0], input_types[1])
def gather(self, inputs, input_types):
data = inputs[0]
axis = inputs[1]
indices = inputs[2]
return _op.gather(data, axis, indices)
def add(self, inputs, input_types):
# add_ is overloaded for tensor add and list concat
if input_types[0] == "ListType":
return self.prelude.concat(inputs[0], inputs[1])
return self.make_elemwise("add")(inputs, input_types)
def tensor_array_stack(self, inputs, input_types):
dim = inputs[1]
assert dim == 0, "stacking on a dynamic tensor list only supported on a first axis"
tensor_array, shape = self.convert_to_tensor_array(inputs[0])
stacked_shape = (Any(),) + shape
stack = self.prelude.get_global_var_static("tensor_array_stack", "float32", shape)
stacked = stack(tensor_array)
static_tensor_array_ops = StaticTensorArrayOps(self.prelude, "float32", stacked_shape)
static_tensor_array_ops.register()
get_tensor = self.prelude.get_global_var_static("tensor_get_data", "float32", stacked_shape)
return get_tensor(stacked)
def stack(self, inputs, input_types):
if isinstance(inputs[0], list):
# a static python list of tensors
dim = inputs[1]
return _op.stack(inputs[0], dim)
else:
# List ADT case
assert isinstance(inputs[0], _expr.Expr)
ty = self.infer_type_with_prelude(inputs[0])
list_ty = self.prelude.mod.get_global_type_var("List")
msg = "The input list is expected to be List ADT"
assert isinstance(ty, tvm.ir.TypeCall) and ty.func == list_ty, msg
return self.tensor_array_stack(inputs, input_types)
def sub(self, inputs, input_types):
if len(inputs) == 3:
data0, data1, alpha = self.pytorch_promote_types(inputs, input_types)
return get_relay_op("subtract")(data0, alpha * data1)
else:
data0, data1 = self.pytorch_promote_types(inputs, input_types)
return get_relay_op("subtract")(data0, data1)
def rsub(self, inputs, input_types):
data0, data1, alpha = self.pytorch_promote_types(inputs, input_types)
# note: rsub means data0 and data1 swap places
return get_relay_op("subtract")(data1, alpha * data0)
def embedding(self, inputs, input_types):
weight = inputs[0]
indices = inputs[1]
return _op.take(weight, indices.astype("int32"), axis=0)
def one_hot(self, inputs, input_types):
indices = inputs[0].astype("int32")
num_classes = inputs[1]
if num_classes == -1:
msg = "Inferring the number of classes is not yet supported."
raise NotImplementedError(msg)
dtype = "int32"
on_value = tvm.relay.const(1.0, dtype)
off_value = tvm.relay.const(0.0, dtype)
return _op.one_hot(indices, on_value, off_value, num_classes, -1, dtype)
def index(self, inputs, input_types):
data = inputs[0]
data_shape = self.infer_type(data).shape
axes_adv_idx = [i for i, v in enumerate(inputs[1]) if v is not None]
axes_rest = [i for i in range(len(data_shape)) if i not in axes_adv_idx]
# check if the adv_index axes are consecutive
# if consecutive, result must be transposed again at the end
consecutive = True
for curr, nxt in zip(axes_adv_idx[:-1], axes_adv_idx[1:]):
if nxt - curr != 1:
consecutive = False
break
indices_list = []
axes_order = axes_adv_idx + axes_rest
for i in axes_adv_idx:
inp = inputs[1][i]
if self.infer_type(inp).dtype == "bool":
# adv_index does not support a mask as the index tensor (it will treat 0/1 as
# an index rather than a flag).
# So we use argwhere to turn the mask into indices, which will also take care
# of the dynamism in the indexing by mask.
indices_list.append(_op.squeeze(_op.transform.argwhere(inp), axis=[1]))
else:
indices_list.append(inp)
data_after_adv_index = _op.adv_index([_op.transpose(data, axes=axes_order)] + indices_list)
if consecutive:
num_dims = len(self.infer_type(data_after_adv_index).shape)
num_new_dims = num_dims - len(axes_rest)
axes_final_order = list(range(num_dims))
axes_final_order = (
axes_final_order[num_new_dims : num_new_dims + axes_adv_idx[0]]
+ axes_final_order[:num_new_dims]
+ axes_final_order[num_new_dims + axes_adv_idx[0] :]
)
return _op.transpose(data_after_adv_index, axes=axes_final_order)
else:
return data_after_adv_index
def meshgrid(self, inputs, input_types):
data = inputs[0]
return _op.meshgrid(data, indexing="ij")
def nms(self, inputs, input_types):
boxes = inputs[0]
scores = inputs[1]
iou_threshold = inputs[2]
# TVM NMS assumes score > 0
# - since there exists multi-comsumers for "scores", "num_boxes"
# - invoke set_span here to prevent expr-rewritten occurrs in span-filling stage
source_name = self.source_map[self.current_op[-1]]
scores = set_span(scores - _op.min(scores) + _op.const(1.0), source_name)
num_boxes = set_span(_op.shape_of(scores), source_name)
# PyTorch NMS doesn't have score_threshold, so no need to run get_valid_count
# - since "arange" op will fill expr into its attribute
# - invoke set_span here to prevent expr-rewritten occurrs in span-filling stage
indices = _op.transform.arange(set_span(_op.squeeze(num_boxes), source_name), dtype="int32")
indices = _op.expand_dims(indices, 0, 1)
# Generate data with shape (1, num_anchors, 5)
scores = AttrCvt(op_name="expand_dims", extras={"axis": -1, "num_newaxis": 1})([scores], {})
data = _op.concatenate([scores, boxes], -1)
data = _op.expand_dims(data, 0, 1)
# Perform Non-Maximum Suppression,
# PyTorch NMS doesn't have parameter top_k and max_output_size
score_index = 0
top_k = max_out_size = -1
nms_ret = get_relay_op("non_max_suppression")(
data=data,
valid_count=num_boxes,
indices=indices,
max_output_size=max_out_size,
iou_threshold=iou_threshold,
force_suppress=True,
top_k=top_k,
coord_start=1,
score_index=score_index,
id_index=-1,
return_indices=True,
invalid_to_bottom=False,
)
# squeeze the two outputs of nms for strided_slice
size = get_relay_op("squeeze")(nms_ret[1], axis=[1])
data_slice = get_relay_op("squeeze")(nms_ret[0], axis=[0])
# strided slice to get the dynamic result
ret = get_relay_op("strided_slice")(
data_slice, begin=_expr.const([0]), end=size, slice_mode="size"
)
# in torchvision, indices from nms are int64
return _op.cast(ret, "int64")
def logsumexp(self, inputs, input_types):
data = self.pytorch_promote_types(inputs[:1], input_types[:1])
dim_list = inputs[1]
keepdim = inputs[2] if len(inputs) > 2 else False
# dim is output of prim::ListConstruct, even if it is int in python code
assert isinstance(dim_list, list), "dim is expected to be a list"
return _op.logsumexp(data[0], axis=dim_list, keepdims=keepdim)
def roi_align(self, inputs, input_types):
data = inputs[0]
boxes = inputs[1]
output_size = (inputs[3], inputs[4])
spatial_scale = inputs[2]
sample_ratio = inputs[5]
aligned = False if len(inputs) < 7 else inputs[6]
if aligned:
boxes -= _expr.const(0.5 / spatial_scale)
return _op.vision.roi_align(data, boxes, output_size, spatial_scale, sample_ratio)
def deform_conv2d(self, inputs, input_types):
data = inputs[0]
weight = inputs[1]
offset = inputs[2]
if len(inputs) > 12:
strides_offset = 5
bias = inputs[4]
logger.warning("mask argument in deformable conv2d is not supported and ignored")
else:
strides_offset = 4
bias = inputs[3]
strides = (inputs[strides_offset], inputs[strides_offset + 1])
padding = (inputs[strides_offset + 2], inputs[strides_offset + 3])
dilation = (inputs[strides_offset + 4], inputs[strides_offset + 5])
groups = inputs[strides_offset + 6]
deformable_groups = inputs[strides_offset + 7]
weight_shape = self.infer_shape(weight)
output_channels = weight_shape[0]
kernel_size = (weight_shape[2], weight_shape[3])
conv_out = _op.nn.deformable_conv2d(
data,
offset,
weight,
strides,
padding,
dilation,
deformable_groups,
groups,
output_channels,
kernel_size,
)
return _op.nn.bias_add(conv_out, bias)
def stft(self, inputs, input_types):
data = inputs[0]
n_fft = inputs[1]
hop_length = inputs[2]
win_length = inputs[3]
window = inputs[4]
normalized = inputs[5]
onesided = inputs[6]
return _op.stft(data, n_fft, hop_length, win_length, window, normalized, onesided)
def unbind(self, inputs, input_types):
data = inputs[0]
axis = int(inputs[1])
return unbind(data, axis)
def shape_as_tensor(self, inputs, input_types):
is_symbolic_shape = False
input_shape = self.infer_shape(inputs[0], self.prelude.mod)
for axis in input_shape:
if not isinstance(axis, (int, tvm.tir.IntImm)):
is_symbolic_shape = True
break
if is_symbolic_shape:
ret = _op.shape_of(inputs[0], dtype="int64")
else:
ret = _expr.const(np.array(input_shape), dtype="int64")
return ret
def logical_and(self, inputs, input_types):
lhs = _op.cast(inputs[0], "bool")
rhs = _op.cast(inputs[1], "bool")
return _op.logical_and(lhs, rhs)
def nonzero(self, inputs, input_types, is_numpy_style=False):
data = inputs[0]
ret = _op.transform.argwhere(data)
if is_numpy_style or (len(inputs) > 1 and inputs[1]):
return unbind(ret, 1)
return ret
def nonzero_numpy(self, inputs, input_types):
return self.nonzero(inputs, input_types, is_numpy_style=False)
def scatter(self, inputs, input_types):
assert len(inputs) == 4 or len(inputs) == 5, (
f"scatter takes 4 or 5 inputs: data, dim, index, src, reduce (optional), "
f"but {len(inputs)} given"
)
data = inputs[0]
axis = int(inputs[1])
index = inputs[2]
src = inputs[3]
if len(inputs) == 5:
reduce = inputs[4]
else:
reduce = "update"
data_shape = self.infer_shape(data)
data_rank = len(data_shape)
index_shape = self.infer_shape(index)
index_rank = len(index_shape)
# When index is empty, the operation returns data unchanged
if self.is_empty_shape(index_shape):
return data
if np.isscalar(src):
assert self.infer_type(src).dtype == "float", "Scalar source can be float only"
src = _op.broadcast_to_like(src, data_shape)
src_shape = data_shape
else:
src_shape = self.infer_shape(src)
src_rank = len(src_shape)
assert data_rank == index_rank, "Index rank is not the same as data rank"
assert data_rank == src_rank, "Src rank is not the same as data rank"
assert 0 <= axis < data_rank, "Dim is out of bounds"
for i in range(data_rank):
index_dim = index_shape[i]
src_dim = src_shape[i]
data_dim = data_shape[i]
# Skip check for dynamic dimensions
if not any([isinstance(index_dim, tvm.tir.Any), isinstance(src_dim, tvm.tir.Any)]):
assert index_dim <= src_dim, "Index dim size should be less than src one"
if i != axis and not any(
[isinstance(index_dim, tvm.tir.Any), isinstance(data_dim, tvm.tir.Any)]
):
assert index_dim <= data_dim, "Index dim size should be less than data one"
if reduce is None:
reduce = "update"
elif reduce == "multiply":
reduce = "mul"
assert reduce in [
"update",
"add",
"mul",
], 'reduce arg is expected from "add", "multiply" or None'
return _op.scatter_elements(data, index, src, axis, reduce)
def index_put(self, inputs, input_types):
in_tensor = inputs[0]
indices = inputs[1]
values = inputs[2]
accumulate = inputs[3]
if not accumulate:
mode = "update"
else:
mode = "add"
# Combine array of index tensors into one index tensor with shape (N,_)
index_tensor = _op.stack(indices, axis=0)
return _op.scatter_nd(in_tensor, index_tensor, values, mode)
def scalar_tensor(self, inputs, input_types):
data = inputs[0]
cast_map = {6: "float32", 7: "float64", 3: "int32", 4: "int64"}
type_key = inputs[1]
if isinstance(data, _expr.Constant):
data = data.data.numpy().tolist()
return _expr.const(data, cast_map[type_key])
def interpolate(self, inputs, input_types):
if isinstance(inputs[1], _expr.Expr):
out_size = inputs[1]
elif isinstance(inputs[1], list):
out_size = []
for i in [0, 1]:
size, _ = try_infer_value(
inputs[1][i],
lambda ret: ret.astype(np.int),
lambda: _op.expand_dims(inputs[1][i], axis=0),
)
out_size.append(size)
out_size = _op.concatenate(out_size, axis=0)
data = inputs[0]
align_corners = inputs[4]
method = inputs[3]
if method.startswith("nearest"):
method = "nearest_neighbor"
elif method[0:2] == "bi":
method = method[2:]
if method == "nearest_neighbor":
coord_trans = "asymmetric"
elif align_corners:
coord_trans = "align_corners"
else:
coord_trans = "half_pixel"
return _op.image.resize2d(
data, out_size, None, "NCHW", method, coord_trans, cubic_alpha=-0.75
)
def numel(self, inputs, input_types):
return _op.ndarray_size(inputs[0])
def empty(self, inputs, input_types):
shape = []
for s in inputs[0]:
if isinstance(s, _expr.Constant):
shape.append(s.data.numpy().item())
else:
assert isinstance(s, int)
shape.append(s)
return _op.zeros(shape, _convert_dtype_value(inputs[1]))
def empty_like(self, inputs, input_types):
shape = self.infer_shape(inputs[0])
if inputs[1] is not None:
dtype = _convert_dtype_value(inputs[1])
else:
dtype = input_types[0]
return _op.zeros(shape, dtype)
def new_empty(self, inputs, input_types):
size = inputs[1]
import torch
if not isinstance(size, (_expr.Expr, list, tuple, torch.Size, np.ndarray)):
msg = f"Data type {type(size)} could not be parsed in empty op"
raise AssertionError(msg)
if inputs[2] is not None:
dtype = _convert_dtype_value(inputs[2])
else:
dtype = input_types[0]
return _op.zeros(size, dtype)
def randn(self, inputs, input_types):
import time # use current time as seed
shape = inputs[0]
output = _op.random.normal(_op.random.threefry_key(int(time.time())), shape)
_, values = _expr.TupleWrapper(output, 2)
return values
def bincount(self, inputs, input_types):
data = inputs[0]
weights = inputs[1]
input_type = self.infer_type(data).dtype
if input_type == "int64":
logger.warning(
"Casting an int64 input to int32, since we do not have int64 atomic add"
"needed for bincount yet."
)
data = _op.cast(data, "int32")
maximum = _op.max(data)
dim = maximum + _expr.const(1, dtype="int32")
if weights:
weight_type = self.infer_type(weights)
out_dtype = weight_type.dtype
updates = weights
else:
out_dtype = "int32"
updates = _op.ones_like(data)
counts = _op.zeros(_op.reshape(dim, [1]), out_dtype)
out = _op.scatter_elements(counts, data, updates, axis=0, reduction="add")
if input_type == "int32":
# Torch always outputs int64 results for bincount
return _op.cast(out, "int64")
return out
def scatter_add(self, inputs, input_types):
assert (
len(inputs) == 4
), f"scatter_add takes 4 inputs (data, dim, index, src), but {len(inputs)} given"
data = inputs[0]
axis = inputs[1]
index = inputs[2]
src = inputs[3]
data_shape = self.infer_shape(inputs[0])
data_rank = len(data_shape)
index_shape = self.infer_shape(inputs[2])
index_rank = len(index_shape)
# When index is empty, the operation returns data unchanged
if self.is_empty_shape(index_shape):
return data
src_shape = self.infer_shape(inputs[3])
src_rank = len(src_shape)
assert data_rank == index_rank, "Index rank is not the same as data rank"
assert data_rank == src_rank, "Src rank is not the same as data rank"
assert 0 <= axis < data_rank, "Dim is out of bounds"
for i in range(data_rank):
assert index_shape[i] <= src_shape[i], "Index dim size should be less than src one"
if i != axis:
assert (
index_shape[i] <= data_shape[i]
), "Index dim size should be less than data one"
return _op.scatter_elements(data, index, src, axis=axis, reduction="add")
def scatter_reduce(self, inputs, input_types):
assert len(inputs) == 5 or len(inputs) == 6, (
f"scatter_reduce takes 5 or 6 inputs (data, dim, index, src, reduce, include_self), "
f"but {len(inputs)} given"
)
data = inputs[0]
dim = inputs[1]
index = inputs[2]
src = inputs[3]
reduce = inputs[4]
if len(inputs) == 6:
include_self = inputs[5]
# TODO(vvchernov): support include_self == False
assert include_self, "include_self=False has not been suppoted for scatter_reduce yet"
data_shape = self.infer_shape(inputs[0])
data_rank = len(data_shape)
index_shape = self.infer_shape(inputs[2])
index_rank = len(index_shape)
src_shape = self.infer_shape(inputs[3])
src_rank = len(src_shape)
assert data_rank == index_rank, "Index rank is not the same as data rank"
assert data_rank == src_rank, "Src rank is not the same as data rank"
assert 0 <= dim < data_rank, "Dim is out of bounds"
for i in range(data_rank):
assert index_shape[i] <= src_shape[i], "Index dim size should be less than src one"
if i != dim:
assert (
index_shape[i] <= data_shape[i]
), "Index dim size should be less than data one"
red_valids = ["sum", "prod", "mean", "amax", "amin"]
assert (
reduce in red_valids
), f"Only {red_valids} modes are supported, but {reduce} is gotten"
if reduce == "sum":
reduce = "add"
elif reduce == "prod":
reduce = "mul"
elif reduce == "amin":
reduce = "min"
elif reduce == "amax":
reduce = "max"
return _op.scatter_elements(data, index, src, axis=dim, reduction=reduce)
def cumsum(self, inputs, input_types):
data = inputs[0]
dim = inputs[1]
dtype = inputs[2]
if inputs[2] is not None:
dtype = _convert_dtype_value(inputs[2])
return _op.cumsum(data, axis=dim, dtype=dtype)
def masked_fill(self, inputs, input_types):
mask = inputs[1]
value = _op.cast(_wrap_const(inputs[2]), input_types[0])
return _op.where(mask, value, inputs[0])
def masked_select(self, inputs, input_types):
mask = inputs[1]
indices = self.nonzero([mask], input_types, is_numpy_style=True)
return _op.adv_index([inputs[0]] + [indices[i] for i in range(indices.size)])
def sort(self, inputs, input_types):
data = inputs[0]
dim = inputs[1]
is_descending = inputs[2]
# pytorch sort returns both sorted indices and values
indices = _op.argsort(data, dim, not is_descending)
return _op.gather(data, dim, indices), indices
def argsort(self, inputs, input_types):
data = inputs[0]
dim = inputs[1]
is_descending = inputs[2]
return _op.argsort(data, dim, not is_descending)
def is_floating_point(self, inputs, input_types):
assert len(inputs) == 1
if isinstance(inputs[0], _expr.Expr):
input_type = self.infer_type(inputs[0]).dtype
else:
input_type = input_types[0]
is_float = input_type in ["float32", "float64", "float16", "bfloat16"]
return _expr.const(is_float)
def unique(self, inputs, input_types):
assert len(inputs) == 4
[data, is_sorted, return_inverse, return_counts] = inputs
if not is_sorted:
logger.warning("TVM always assumes sorted=True for torch.unique")
is_sorted = True
if return_counts:
[unique, indices, inverse_indices, num_uniq, counts] = _op.unique(
data, is_sorted=is_sorted, return_counts=True
)
unique_sliced = _op.strided_slice(unique, begin=[0], end=num_uniq, slice_mode="size")
counts_sliced = _op.strided_slice(counts, begin=[0], end=num_uniq, slice_mode="size")
return (unique_sliced, inverse_indices, counts_sliced)
else:
[unique, indices, inverse_indices, num_uniq] = _op.unique(
data, is_sorted=is_sorted, return_counts=False
)
unique_sliced = _op.strided_slice(unique, begin=[0], end=num_uniq, slice_mode="size")
return (unique_sliced, inverse_indices)
def nll_loss(self, inputs, input_types):
assert len(inputs) == 5
[predictions, targets, weights, reduction, ignore_index] = inputs
num_class = self.infer_shape(predictions)[1]
if reduction == 0:
reduction = "none"
elif reduction == 1:
reduction = "mean"
else:
reduction = "sum"
if weights is None:
weights = _op.full(_expr.const(1), (num_class,), dtype=input_types[0])
return _op.nn.nll_loss(predictions, targets, weights, reduction, ignore_index)
def flip(self, inputs, input_types):
data = inputs[0]
axis = inputs[1]
return _op.transform.reverse(data, axis=axis[0])
def bidir_rnn_cell(self, input_seqs, weights_dicts, act=_op.tanh):
"""
Bidirectional RNN cell
"""
seq_len = len(input_seqs)
forward_outputs, fw_H_t = rnn_cell(input_seqs, **weights_dicts[0], backwards=False, act=act)
reverse_outputs, rev_H_t = rnn_cell(input_seqs, **weights_dicts[1], backwards=True, act=act)
final_outputs = []
for i in range(seq_len):
final_outputs.append(
_op.concatenate([forward_outputs[i], reverse_outputs[seq_len - 1 - i]], axis=-1)
)
return final_outputs, _op.stack([fw_H_t, rev_H_t], axis=0)
def rnn_layers(self, input_data, layer_weights_dicts, bidirectional, act, dropout_p=0.0):
"""
Methods iterates layers for Stacked RNN
"""
layers_num = len(layer_weights_dicts)
# split input sequence to samples set
input_seqs = unbind(input_data, 0) # [seq_num, (batch, feature_size)]
output_hiddens = []
for i in range(layers_num):
weights_dicts = layer_weights_dicts[i]
# input_seqs shape = [seq_num, (batch, feature_size)] or
# [seq_num, (batch, 2*feature_size)] for bidirectional
if bidirectional:
input_seqs, H_t = self.bidir_rnn_cell(input_seqs, weights_dicts, act=act)
else:
input_seqs, H_t = rnn_cell(input_seqs, **weights_dicts[0], act=act)
output_hiddens.append(H_t)
# TODO (yuanfz98): in pytorch implementation train is also checked
# see https://github.com/pytorch/pytorch/blob/70c8daf43946b53af6493d058899ef952d27d339
# /aten/src/ATen/native/RNN.cpp#L1054
if dropout_p != 0 and i < layers_num - 1:
# for input in input_seqs:
# input = _op.dropout(input, dropout_p)
raise NotImplementedError("Dropout for GRU has not been supported yet!")
output_hiddens = (
_op.concatenate(output_hiddens, 0) if bidirectional else _op.stack(output_hiddens, 0)
)
return _op.stack(input_seqs, 0), output_hiddens
def rnn(self, inputs, input_types, nonlinearity):
"""
Description of RNN in pytorch:
https://pytorch.org/docs/stable/generated/torch.nn.RNN.html#torch.nn.RNN
Description of inputs:
https://github.com/pytorch/pytorch/blob/736fb7d22cc948b739db2c35aeb5ad4d19aea4f4/torch/overrides.py#L937
"""
# TODO (yuanfz98): support dropout
assert len(inputs) == 9, "Input of size 9 is expected"
# Unpack inputs, note that if optional and not provided then value will be None.
_X = inputs[0]
# _X shape (seq_num, batch, feature_size) or (batch, seq_num, feature_size)
hidden_state = inputs[1]
# Hidden state shape (hidden_layers_num, batch, hidden_size)
_weights = inputs[2]
# Wi layer[0] shape (hidden_size, feature_size)
# Wh layer[0] shape (hidden_size, hidden_size)
# Bi layer[0] shape (hidden_size)
# Bh layer[0] shape (hidden_size)
# Wi layer[>0] shape (hidden_size, hidden_size * num_directions)
# Wh layer[>0] shape (hidden_size, hidden_size)
# Bi layer[>0] shape (hidden_size)
# Bh layer[>0] shape (hidden_size)
# Scalar inputs
has_biases = inputs[3]
num_layers = inputs[4]
dropout_p = inputs[5] # dropout probability, if 0.0 it means there is no dropout
# train = inputs[6]
bidirectional = inputs[7]
batch_first = inputs[8]
num_directions = 1
if bidirectional:
num_directions = 2
rsd = len(_weights) % num_layers
assert rsd == 0, "The number of weights must be a multiple of the number of layers!"
rsd = (len(_weights) / num_layers) % num_directions
assert (
rsd == 0
), "The number of weights in layer must be a multiple of the number of directions!"
weights_num = int(len(_weights) / num_layers / num_directions)
if has_biases:
assert weights_num == 4, "The weights number in layer is expected equal to 4"
else:
assert weights_num == 2, "The weights number in layer is expected equal to 2"
if nonlinearity == "tanh":
act = _op.tanh
elif nonlinearity == "relu":
act = _op.nn.relu
assert act, "The nonlinearity is unknown"
X = (
_op.transpose(_X, (1, 0, 2)) if batch_first else _X
) # always (seq_num, batch, feature_size)
# TODO (yuanfz98): Which data type should be used? from input or weights?
# Instead of it _infer_type(X).checked_type.dtype can be used
X_dtype = input_types[0]
X_shape = _infer_shape(X) # (seq_num, batch, feature_size)
hidden_size = int(_infer_shape(_weights[0])[0])
batch_size = X_shape[1]
# Initialize hidden states if not provided.
layers_h = []
hidden_layers_num = num_directions * num_layers
if hidden_state is None:
h_0 = _op.zeros((batch_size, hidden_size), X_dtype)
for i in range(hidden_layers_num):
layers_h.append(h_0)
else:
layers_h = unbind(hidden_state, 0)
layer_weights_dicts = []
k = 0 # layer counter
if has_biases:
names = ["hidden_state", "w_inp", "w_hid", "b_inp", "b_hid"]
if bidirectional:
rsd = len(_weights) % (2 * weights_num)
assert rsd == 0, "got an incorrect number of RNN weights"
for i in range(0, len(_weights), 2 * weights_num):
fw_tensors = [layers_h[2 * k], *_weights[i : i + 4]]
fw_weights_dict = dict(zip(names, fw_tensors))
j = i + weights_num
rev_tensors = [layers_h[2 * k + 1], *_weights[j : j + 4]]
rev_weights_dict = dict(zip(names, rev_tensors))
layer_weights_dicts.append([fw_weights_dict, rev_weights_dict])
k += 1
else:
assert len(_weights) % weights_num == 0, "got an incorrect number of GRU weights"
for i in range(0, len(_weights), weights_num):
fw_tensors = [layers_h[k], *_weights[i : i + 4]]
fw_weights_dict = dict(zip(names, fw_tensors))
layer_weights_dicts.append([fw_weights_dict])
k += 1
else:
names = ["hidden_state", "w_inp", "w_hid"]
if bidirectional:
rsd = len(_weights) % (2 * weights_num)
assert rsd == 0, "got an incorrect number of RNN weights"
for i in range(0, len(_weights), 2 * weights_num):
fw_tensors = [layers_h[2 * k], *_weights[i : i + 2]]
fw_weights_dict = dict(zip(names, fw_tensors))
j = i + weights_num
rev_tensors = [layers_h[2 * k + 1], *_weights[j : j + 2]]
rev_weights_dict = dict(zip(names, rev_tensors))
layer_weights_dicts.append([fw_weights_dict, rev_weights_dict])
k += 1
else:
assert len(_weights) % weights_num == 0, "got an incorrect number of RNN weights"
for i in range(0, len(_weights), weights_num):
fw_tensors = [layers_h[k], *_weights[i : i + 2]]
fw_weights_dict = dict(zip(names, fw_tensors))
layer_weights_dicts.append([fw_weights_dict])
k += 1
assert (
len(layer_weights_dicts) == num_layers and k == num_layers
), "For stacked RNN number of weights sets should be the same as number of layers!"
output, out_hidden_state = self.rnn_layers(
X, layer_weights_dicts, bidirectional, act, dropout_p=dropout_p
)
# output shape = (seq_num, batch, hidden_size) or
# (seq_num, batch, 2*feature_size) for bidirectional
if batch_first:
output = _op.transpose(output, (1, 0, 2))
return (output, out_hidden_state)
def bidir_gru_cell(self, input_seqs, weights_dicts):
"""
Bidirectional GRU cell
"""
seq_len = len(input_seqs)
forward_outputs, fw_H_t = gru_cell(input_seqs, **weights_dicts[0])
reverse_outputs, rev_H_t = gru_cell(input_seqs, **weights_dicts[1], backwards=True)
final_outputs = []
for i in range(seq_len):
final_outputs.append(
_op.concatenate([forward_outputs[i], reverse_outputs[seq_len - 1 - i]], axis=-1)
)
return final_outputs, _op.stack([fw_H_t, rev_H_t], axis=0)
def gru_layers(self, input_data, layer_weights_dicts, bidirectional, dropout_p=0.0):
"""
Methods iterates layers for Stacked GRU
"""
layers_num = len(layer_weights_dicts)
# split input sequence to samples set
input_seqs = unbind(input_data, 0) # [seq_num, (batch, feature_size)]
output_hiddens = []
for i in range(layers_num):
weights_dicts = layer_weights_dicts[i]
# input_seqs shape = [seq_num, (batch, feature_size)] or
# [seq_num, (batch, 2*feature_size)] for bidirectional
if bidirectional:
input_seqs, H_t = self.bidir_gru_cell(input_seqs, weights_dicts)
else:
input_seqs, H_t = gru_cell(input_seqs, **weights_dicts[0])
output_hiddens.append(H_t)
# TODO (vvchernov): in pytorch implementation train is also checked
# see https://github.com/pytorch/pytorch/blob/70c8daf43946b53af6493d058899ef952d27d339
# /aten/src/ATen/native/RNN.cpp#L1054
if dropout_p != 0 and i < layers_num - 1:
# for input in input_seqs:
# input = _op.dropout(input, dropout_p)
raise NotImplementedError("Dropout for GRU has not been supported yet!")
return _op.stack(input_seqs, 0), _op.stack(output_hiddens, 0)
def gru(self, inputs, input_types):
"""
Description of GRU in pytorch:
https://pytorch.org/docs/stable/generated/torch.nn.GRU.html?highlight=gru#torch.nn.GRU
"""
# TODO (vvchernov): support dropout
assert len(inputs) == 9, "Input of size 9 is expected"
# Unpack inputs, note that if optional and not provided then value will be None.
_X = inputs[0]
# _X shape (seq_num, batch, feature_size) or (batch, seq_num, feature_size)
hidden_state = inputs[1]
# Hidden state shape (hidden_layers_num, batch, hidden_size)
_weights = inputs[2]
# Wi layer[0] shape (3 * hidden_size, feature_size)
# Wh layer[0] shape (3 * hidden_size, hidden_size)
# Bi layer[0] shape (3 * hidden_size)
# Bh layer[0] shape (3 * hidden_size)
# Wi layer[>0] shape (3 * hidden_size, hidden_size * num_directions)
# Wh layer[>0] shape (3 * hidden_size, hidden_size)
# Bi layer[>0] shape (3 * hidden_size)
# Bh layer[>0] shape (3 * hidden_size)
# Scalar inputs
has_biases = inputs[3]
num_layers = inputs[4]
dropout_p = inputs[5] # dropout probability, if 0.0 it means there is no dropout
# train = inputs[6]
bidirectional = inputs[7]
batch_first = inputs[8]
num_directions = 1
if bidirectional:
num_directions = 2
rsd = len(_weights) % num_layers
assert rsd == 0, "The number of weights must be a multiple of the number of layers!"
rsd = (len(_weights) / num_layers) % num_directions
assert (
rsd == 0
), "The number of weights in layer must be a multiple of the number of directions!"
weights_num = int(len(_weights) / num_layers / num_directions)
if has_biases:
assert weights_num == 4, "The weights number in layer is expected equal to 4"
else:
assert weights_num == 2, "The weights number in layer is expected equal to 2"
X = _op.transpose(_X, (1, 0, 2)) if batch_first else _X
# TODO (vvchernov): Which data type should be used? from input or weights?
# Instead of it _infer_type(X).checked_type.dtype can be used
X_dtype = input_types[0]
X_shape = _infer_shape(X) # (seq_num, batch, feature_size)
hidden_size = int(_infer_shape(_weights[0])[0] / 3)
batch_size = X_shape[1]
# Initialize hidden states if not provided.
layers_h = []
hidden_layers_num = num_directions * num_layers
if hidden_state is None:
h_0 = _op.zeros((batch_size, hidden_size), X_dtype)
for i in range(hidden_layers_num):
layers_h.append(h_0)
else:
layers_h = unbind(hidden_state, 0)
layer_weights_dicts = []
k = 0 # layer counter
if has_biases:
names = ["hidden_state", "w_inp", "w_hid", "b_inp", "b_hid"]
if bidirectional:
rsd = len(_weights) % (2 * weights_num)
assert rsd == 0, "got an incorrect number of GRU weights"
for i in range(0, len(_weights), 2 * weights_num):
fw_tensors = [layers_h[2 * k], *_weights[i : i + 4]]
fw_weights_dict = dict(zip(names, fw_tensors))
j = i + weights_num
rev_tensors = [layers_h[2 * k + 1], *_weights[j : j + 4]]
rev_weights_dict = dict(zip(names, rev_tensors))
layer_weights_dicts.append([fw_weights_dict, rev_weights_dict])
k += 1
else:
assert len(_weights) % weights_num == 0, "got an incorrect number of GRU weights"
for i in range(0, len(_weights), weights_num):
fw_tensors = [layers_h[k], *_weights[i : i + 4]]
fw_weights_dict = dict(zip(names, fw_tensors))
layer_weights_dicts.append([fw_weights_dict])
k += 1
else:
names = ["hidden_state", "w_inp", "w_hid"]
if bidirectional:
rsd = len(_weights) % (2 * weights_num)
assert rsd == 0, "got an incorrect number of GRU weights"
for i in range(0, len(_weights), 2 * weights_num):
fw_tensors = [layers_h[2 * k], *_weights[i : i + 2]]
fw_weights_dict = dict(zip(names, fw_tensors))
j = i + weights_num
rev_tensors = [layers_h[2 * k + 1], *_weights[j : j + 2]]
rev_weights_dict = dict(zip(names, rev_tensors))
layer_weights_dicts.append([fw_weights_dict, rev_weights_dict])
k += 1
else:
assert len(_weights) % weights_num == 0, "got an incorrect number of GRU weights"
for i in range(0, len(_weights), weights_num):
fw_tensors = [layers_h[k], *_weights[i : i + 2]]
fw_weights_dict = dict(zip(names, fw_tensors))
layer_weights_dicts.append([fw_weights_dict])
k += 1
assert (
len(layer_weights_dicts) == num_layers and k == num_layers
), "For stacked GRU number of weights sets should be the same as number of layers!"
output, out_hidden_state = self.gru_layers(
X, layer_weights_dicts, bidirectional, dropout_p=dropout_p
)
# output shape = (seq_num, batch, hidden_size) or
# (seq_num, batch, 2*feature_size) for bidirectional
if batch_first:
output = _op.transpose(output, (1, 0, 2))
return (output, out_hidden_state)
def bidir_lstm_cell(self, input_seqs, weights_dicts):
"""
Bidirectional LSTM cell
"""
seq_len = len(input_seqs)
forward_outputs, fw_H_t, fw_C_t = lstm_cell(input_seqs, **weights_dicts[0])
reverse_outputs, rev_H_t, rev_C_t = lstm_cell(
input_seqs, **weights_dicts[1], backwards=True
)
final_outputs = []
for i in range(seq_len):
final_outputs.append(
_op.concatenate([forward_outputs[i], reverse_outputs[seq_len - 1 - i]], axis=-1)
)
return final_outputs, (fw_H_t, fw_C_t), (rev_H_t, rev_C_t)
def lstm_layers(self, input_data, layer_weights_dicts, bidirectional, dtype, dropout_p=0.0):
"""
Methods iterates layers for Stacked LSTM
"""
layers_num = len(layer_weights_dicts)
# split input sequence to samples set
input_seqs = unbind(input_data, 0) # [seq_num, (batch, feature_size)]
output_hiddens = []
for i in range(layers_num):
weights_dicts = layer_weights_dicts[i]
# input_seqs shape = [seq_num, (batch, feature_size)] or
# [seq_num, (batch, 2*feature_size)] for bidirectional
if bidirectional:
input_seqs, H_t, C_t = self.bidir_lstm_cell(input_seqs, weights_dicts)
else:
input_seqs, H_t, C_t = lstm_cell(input_seqs, **weights_dicts[0])
output_hiddens.append((H_t, C_t))
# TODO (vvchernov): in pytorch implementation train is also checked
# see https://github.com/pytorch/pytorch/blob/70c8daf43946b53af6493d058899ef952d27d339
# /aten/src/ATen/native/RNN.cpp#L1054
if dropout_p != 0 and i < layers_num - 1:
# for input in input_seqs:
# input = _op.dropout(input, dropout_p)
raise NotImplementedError("Dropout for LSTM has not been supported yet!")
final_hiddens = []
if bidirectional:
for output_hidden in output_hiddens:
final_hiddens.append(output_hidden[0])
final_hiddens.append(output_hidden[1])
else:
final_hiddens = output_hiddens
return _op.stack(input_seqs, 0), final_hiddens
def lstm(self, inputs, input_types):
"""
Description of LSTM in pytorch:https://pytorch.org/docs/stable/generated/torch.nn.LSTM.html
Native implementation for torch version less than 1.8.0 (projection is unsupported):
https://github.com/pytorch/pytorch/blob/70c8daf43946b53af6493d058899ef952d27d339/aten/ \
src/ATen/native/RNN.cpp#L1396
Native implementation for torch version from 1.8.0 and higher (projection is supported):
https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/RNN.cpp#L1483
"""
# TODO (vvchernov): support dropout
assert len(inputs) == 9, "Input of size 9 is expected"
# Unpack inputs, note that if optional and not provided then value will be None.
_X = inputs[0]
# _X shape (seq_num, batch, feature_size) or (batch, seq_num, feature_size)
hidden_states = inputs[1]
assert len(hidden_states) == 2, "lstm expects two hidden states"
h_0 = hidden_states[0]
c_0 = hidden_states[1]
# H0 shape (hidden_layers_num, batch, proj_size) if projection
# else (hidden_layers_num, batch, hidden_size)
# C0 shape (hidden_layers_num, batch, hidden_size)
_weights = inputs[2]
# If no projection
# Wi layer[0] shape (4 * hidden_size, feature_size)
# Wh layer[0] shape (4 * hidden_size, hidden_size)
# Bi layer[0] shape (4 * hidden_size)
# Bh layer[0] shape (4 * hidden_size)
# Wi layer[>0] shape (4 * hidden_size, hidden_size * num_directions)
# Wh layer[>0] shape (4 * hidden_size, hidden_size)
# Bi layer[>0] shape (4 * hidden_size)
# Bh layer[>0] shape (4 * hidden_size)
# If projection
# Wi layer[0] shape (4 * hidden_size, feature_size)
# Wh layer[0] shape (4 * hidden_size, proj_size)
# Bi layer[0] shape (4 * hidden_size)
# Bh layer[0] shape (4 * hidden_size)
# P layer[0] shape (proj_size, hidden_size)
# Wi layer[>0] shape (4 * hidden_size, proj_size * num_directions)
# Wh layer[>0] shape (4 * hidden_size, proj_size)
# Bi layer[>0] shape (4 * hidden_size)
# Bh layer[>0] shape (4 * hidden_size)
# P layer[>0] shape (proj_size, hidden_size)
# Scalar inputs
has_biases = inputs[3]
num_layers = inputs[4]
dropout_p = inputs[5] # dropout probability, if 0.0 it means there is no dropout
# train = inputs[6]
bidirectional = inputs[7]
batch_first = inputs[8]
num_directions = 1
if bidirectional:
num_directions = 2
rsd = len(_weights) % num_layers
assert rsd == 0, "The number of weights must be a multiple of the number of layers!"
rsd = (len(_weights) / num_layers) % num_directions
assert (
rsd == 0
), "The number of weights in layer must be a multiple of the number of directions!"
has_proj = False
proj_size = 0
weights_num = int(len(_weights) / num_layers / num_directions)
if has_biases:
if weights_num == 5:
has_proj = True
proj_size = _infer_shape(_weights[4])[0]
else:
assert weights_num == 4, "The weights number in layer is expected equal to 4"
else:
if weights_num == 3:
has_proj = True
proj_size = _infer_shape(_weights[2])[0]
else:
assert weights_num == 2, "The weights number in layer is expected equal to 2"
X = _op.transpose(_X, (1, 0, 2)) if batch_first else _X
# TODO (vvchernov): Which data type should be used? from input or weights?
# Instead of it _infer_type(X).checked_type.dtype can be used
X_dtype = input_types[0]
X_shape = _infer_shape(X) # (seq_num, batch, feature_size)
hidden_size = _infer_shape(_weights[0])[0] / 4
batch_size = X_shape[1]
# Initialize hidden states if not provided.
layers_h = []
layers_c = []
hidden_layers_num = num_directions * num_layers
if h_0 is None:
if has_proj:
h_0 = _op.zeros((batch_size, proj_size), X_dtype)
else:
h_0 = _op.zeros((batch_size, hidden_size), X_dtype)
for i in range(hidden_layers_num):
layers_h.append(h_0)
else:
layers_h = unbind(h_0, 0)
if c_0 is None:
c_0 = _op.zeros((batch_size, hidden_size), X_dtype)
for i in range(hidden_layers_num):
layers_c.append(c_0)
else:
layers_c = unbind(c_0, 0)
layer_weights_dicts = []
k = 0 # layer counter
if has_biases:
names = ["hidden_state", "cell_state", "w_inp", "w_hid", "b_inp", "b_hid"]
if bidirectional:
rsd = len(_weights) % (2 * weights_num)
assert rsd == 0, "got an incorrect number of LSTM weights"
for i in range(0, len(_weights), 2 * weights_num):
fw_tensors = [layers_h[2 * k], layers_c[2 * k], *_weights[i : i + 4]]
fw_weights_dict = dict(zip(names, fw_tensors))
if has_proj:
fw_weights_dict["proj"] = _weights[i + 4]
j = i + weights_num
rev_tensors = [layers_h[2 * k + 1], layers_c[2 * k + 1], *_weights[j : j + 4]]
rev_weights_dict = dict(zip(names, rev_tensors))
if has_proj:
rev_weights_dict["proj"] = _weights[j + 4]
layer_weights_dicts.append([fw_weights_dict, rev_weights_dict])
k += 1
else:
assert len(_weights) % weights_num == 0, "got an incorrect number of LSTM weights"
for i in range(0, len(_weights), weights_num):
fw_tensors = [layers_h[k], layers_c[k], *_weights[i : i + 4]]
fw_weights_dict = dict(zip(names, fw_tensors))
if has_proj:
fw_weights_dict["proj"] = _weights[i + 4]
layer_weights_dicts.append([fw_weights_dict])
k += 1
else:
names = ["hidden_state", "cell_state", "w_inp", "w_hid"]
if bidirectional:
rsd = len(_weights) % (2 * weights_num)
assert rsd == 0, "got an incorrect number of LSTM weights"
for i in range(0, len(_weights), 2 * weights_num):
fw_tensors = [layers_h[2 * k], layers_c[2 * k], *_weights[i : i + 2]]
fw_weights_dict = dict(zip(names, fw_tensors))
if has_proj:
fw_weights_dict["proj"] = _weights[i + 2]
j = i + weights_num
rev_tensors = [layers_h[2 * k + 1], layers_c[2 * k + 1], *_weights[j : j + 2]]
rev_weights_dict = dict(zip(names, rev_tensors))
if has_proj:
rev_weights_dict["proj"] = _weights[j + 2]
layer_weights_dicts.append([fw_weights_dict, rev_weights_dict])
k += 1
else:
assert len(_weights) % weights_num == 0, "got an incorrect number of LSTM weights"
for i in range(0, len(_weights), weights_num):
fw_tensors = [layers_h[k], layers_c[k], *_weights[i : i + 2]]
fw_weights_dict = dict(zip(names, fw_tensors))
if has_proj:
fw_weights_dict["proj"] = _weights[i + 2]
layer_weights_dicts.append([fw_weights_dict])
k += 1
assert (
len(layer_weights_dicts) == num_layers and k == num_layers
), "For stacked LSTM number of weights sets should be the same as number of layers!"
outputs = self.lstm_layers(
X, layer_weights_dicts, bidirectional, dtype=X_dtype, dropout_p=dropout_p
)
# output shape = (seq_num, batch, hidden_size) or
# (seq_num, batch, 2*feature_size) for bidirectional
output = outputs[0]
hy = []
cy = []
for hidden in outputs[1]:
hy.append(hidden[0])
cy.append(hidden[1])
if batch_first:
output = _op.transpose(output, (1, 0, 2))
return (output, _op.stack(hy, 0), _op.stack(cy, 0))
def all_any_common(self, op, inputs, input_types):
if len(inputs) >= 2:
dim = inputs[1]
else:
dim = None
if len(inputs) >= 3:
keepdim = inputs[2]
else:
keepdim = False
if self.infer_type(inputs[0]).dtype != "bool":
# The input dtype can be uint8.
inp = _op.cast(inputs[0], "bool")
else:
inp = inputs[0]
return op(inp, axis=dim, keepdims=keepdim)
def searchsorted_common(
self, sorted_sequence, values, out_int32, right, side=None, out=None, sorter=None
):
assert side is None and out is None and sorter is None, "unsupported parameters"
dtype = "int32" if out_int32 else "int64"
values_shape = _infer_shape(values)
if len(values_shape) == 0:
values = _op.expand_dims(values, 0)
out = _op.searchsorted(sorted_sequence, values, right=right, dtype=dtype)
if len(values_shape) == 0:
return _op.squeeze(out)
return out
def searchsorted(self, inputs, input_types):
return self.searchsorted_common(*inputs)
def bucketize(self, inputs, input_types):
return self.searchsorted_common(inputs[1], inputs[0], inputs[2], inputs[3])
def roll(self, inputs, input_types):
def slide_axes(inp, shape, ax):
axes = list(range(len(shape)))
axes = axes[:ax] + [-1] + axes[ax:-1]
return _op.transpose(inp, axes)
x = inputs[0]
shifts = inputs[1]
dims = inputs[2]
shape = self.infer_shape(x)
start = _expr.const(0, "int64")
step = _expr.const(1, "int64")
out = x
for i, dim in enumerate(dims):
roll_dim = _expr.const(shape[dim], "int64")
indices_1d = _op.mod(
_op.transform.arange(start, roll_dim, step, "int64")
- _expr.const(shifts[i], "int64")
+ roll_dim,
roll_dim,
)
# First fill in the last axis with roll indices, and then do transpose to
# bring the roll indices into the desired axis.
indices = slide_axes(
_op.tile(indices_1d, shape[:dim] + shape[dim + 1 :] + (1,)), shape, dim
)
out = _op.gather(out, dim, indices)
return out
def einsum(self, inputs, input_types):
equation = inputs[0]
data = inputs[1]
return _op.einsum(data, equation)
def dot(self, inputs, _):
lhs, rhs = inputs
return _op.sum(_op.multiply(lhs, rhs))
def mv(self, inputs, _):
lhs, rhs = inputs
# Convert the 1D matrix (vector) into a 2D matrix with the extra
# dimension=1
rhs_matrix = _op.transform.expand_dims(rhs, 0)
# Run multiplication
dense_result = _op.nn.dense(lhs, rhs_matrix, units=None)
# Chop off the extra result dimension
return _op.transform.squeeze(dense_result)
def grid_sampler(self, inputs, input_types):
interpolate_mode = inputs[2]
padding_mode = inputs[3]
align_corners = inputs[4]
data_shape = self.infer_shape_with_prelude(inputs[0])
if len(data_shape) == 4:
layout = "NCHW"
axes = [0, 3, 1, 2]
grid = _op.transform.transpose(inputs[1], axes)
elif len(data_shape) == 5:
layout = "NCDHW"
axes = [0, 4, 1, 2, 3]
grid = _op.transform.transpose(inputs[1], axes)
else:
msg = "only 4D and 5D are supported."
raise ValueError(msg)
if interpolate_mode == 0:
interpolate_str = "bilinear"
elif interpolate_mode == 1:
interpolate_str = "nearest"
elif interpolate_mode == 2:
interpolate_str = "bicubic"
else:
msg = f"interpolation method {interpolate_mode} is not supported"
raise ValueError(msg)
if padding_mode == 0:
padding_mode_str = "zeros"
elif padding_mode == 1:
padding_mode_str = "border"
elif padding_mode == 2:
padding_mode_str = "reflection"
else:
msg = f"padding_mode {padding_mode} is not supported"
raise ValueError(msg)
return _op.image.grid_sample(
inputs[0], grid, interpolate_str, layout, padding_mode_str, align_corners
)
def trilu(self, inputs, input_types, mode):
data = inputs[0]
k = inputs[1] if inputs[1] else 0
upper = True if mode == "triu" else False
return _op.trilu(data, k, upper)
def multinomial(self, inputs, input_types):
probs = inputs[0]
num_samples = inputs[1]
replacement = inputs[2] if inputs[2] else True
assert not (
replacement is False and num_samples > 1
), "Multinomial without replacement is not yet supported."
# Ideally this seed would be generated by a previous threefry operation.
# Eventually we might want to add a global store for random keys.
seed = np.random.randint(1e6)
key = _op.random.threefry_key(seed)
output = _op.random.multinomial(key, probs, num_samples)
_, indices = _expr.TupleWrapper(output, 2)
return indices
def weight_norm(self, inputs, input_types):
weight_v, weight_g = inputs[0], inputs[1]
dim = inputs[2]
dtype = input_types[0]
order = 2.0
reci_order = _expr.const(1.0 / order, dtype=dtype)
order = _expr.const(order)
norm_v = _op.power(
_op.reduce.sum(_op.power(_op.abs(weight_v), order), axis=dim, exclude=2, keepdims=True),
reci_order,
)
return weight_g * (weight_v / norm_v)
# Operator mappings
def create_convert_map(self):
self.convert_map = {
"aten::is_floating_point": self.is_floating_point,
"aten::pixel_shuffle": self.pixel_shuffle,
"aten::device": self.none,
"prim::device": self.none,
"aten::sub": self.sub,
"aten::max": self.max,
"aten::min": self.min,
"aten::maximum": self.maximum,
"aten::minimum": self.minimum,
"aten::amax": self.max,
"aten::amin": self.min,
"aten::stft": self.stft,
"aten::mul": self.make_elemwise("multiply"),
"aten::pow": self.make_elemwise("power"),
"aten::lerp": self.lerp,
"aten::arange": self.arange,
"aten::meshgrid": self.meshgrid,
"aten::div": self.make_elemwise("divide"),
"aten::floor_divide": self.make_elemwise("floor_divide"),
"aten::true_divide": self.make_elemwise("divide"),
"aten::fmod": self.make_elemwise("trunc_mod"),
"aten::remainder": self.make_elemwise("floor_mod"),
"aten::addcdiv": self.addcdiv,
"aten::addcmul": self.addcmul,
"aten::ones": self.ones,
"aten::ones_like": self.ones_like,
"aten::zeros": self.zeros,
"aten::zero_": self.zero_,
"aten::zeros_like": self.zeros_like,
"aten::new_zeros": self.new_zeros,
"aten::new_ones": self.new_ones,
"aten::full": self.full,
"aten::full_like": self.full_like,
"aten::new_full": self.new_full,
"aten::fill_": self.fill_,
"aten::linspace": self.linspace,
"aten::reciprocal": self.reciprocal,
"aten::repeat": self.repeat,
"aten::repeat_interleave": self.repeat_interleave,
"aten::to": self.to,
"aten::squeeze": self.squeeze,
"aten::unsqueeze": self.unsqueeze,
"aten::cat": self.concatenate,
"aten::slice": self.slice,
"aten::narrow": self.narrow,
"aten::split": self.split,
"aten::tensor_split": self.tensor_split,
"aten::split_with_sizes": self.split_with_sizes,
"aten::select": self.select,
"aten::take": self.take,
"aten::where": self.where,
"aten::topk": self.topk,
"aten::relu": self.relu,
"aten::relu6": self.relu6,
"aten::prelu": self.prelu,
"aten::leaky_relu": self.leaky_relu,
"aten::elu": self.elu,
"aten::celu": self.celu,
"aten::gelu": self.gelu,
"aten::selu": self.selu,
"aten::silu": self.silu,
"aten::glu": self.glu,
"aten::log_sigmoid": self.log_sigmoid,
"aten::adaptive_avg_pool1d": functools.partial(
self.adaptive_avg_pool, _op.nn.adaptive_avg_pool1d
),
"aten::adaptive_avg_pool2d": functools.partial(
self.adaptive_avg_pool, _op.nn.adaptive_avg_pool2d
),
"aten::adaptive_avg_pool3d": functools.partial(
self.adaptive_avg_pool, _op.nn.adaptive_avg_pool3d
),
"aten::adaptive_max_pool1d": functools.partial(
self.adaptive_max_pool, _op.nn.adaptive_max_pool1d
),
"aten::adaptive_max_pool2d": functools.partial(
self.adaptive_max_pool, _op.nn.adaptive_max_pool2d
),
"aten::adaptive_max_pool3d": functools.partial(
self.adaptive_max_pool, _op.nn.adaptive_max_pool3d
),
"aten::max_pool2d": self.maxpool_2d,
"aten::max_pool2d_with_indices": self.maxpool_2d_with_indices,
"aten::max_pool1d": self.maxpool_1d,
"aten::max_pool3d": self.maxpool_3d,
"aten::hardtanh": self.hardtanh,
"aten::_convolution": self.convolution,
"aten::softmax": self.softmax,
"aten::threshold": self.threshold,
"aten::contiguous": self.contiguous,
"aten::batch_norm": self.batch_norm,
"aten::instance_norm": self.instance_norm,
"aten::layer_norm": self.layer_norm,
"aten::group_norm": self.group_norm,
"aten::transpose": self.transpose,
"aten::t": self.transpose,
"aten::numpy_T": self.numpy_T,
"aten::flatten": self.flatten,
"aten::addmm": self.addmm,
"aten::size": self.size,
"aten::view": self.view,
"aten::reshape": self.reshape,
"aten::reshape_as": self.reshape_as,
"aten::clone": self.clone,
"aten::log_softmax": self.log_softmax,
"aten::sigmoid": self.sigmoid,
"aten::softplus": self.softplus,
"aten::avg_pool1d": self.make_avg_pool(1),
"aten::avg_pool2d": self.make_avg_pool(2),
"aten::avg_pool3d": self.make_avg_pool(3),
"aten::linear": self.linear,
"aten::dropout": self.dropout,
"aten::feature_dropout": self.dropout,
"aten::alpha_dropout": self.dropout,
"aten::mean": self.mean,
"aten::chunk": self.chunk,
"aten::unsafe_chunk": self.chunk,
"aten::matmul": self.matmul,
"aten::bmm": self.matmul,
"aten::baddbmm": self.baddbmm,
"aten::expand": self.expand,
"aten::Int": self.int,
"prim::NumToTensor": self.numtotensor,
"prim::ImplicitTensorToNum": self.tensortonum,
"aten::ScalarImplicit": self.tensortonum,
"aten::pad": self.pad,
"aten::constant_pad_nd": self.constant_pad_nd,
"aten::reflection_pad1d": self.reflection_pad1d,
"aten::reflection_pad2d": self.reflection_pad2d,
"aten::replication_pad1d": self.replication_pad1d,
"aten::replication_pad2d": self.replication_pad2d,
"aten::replication_pad3d": self.replication_pad3d,
"aten::permute": self.transpose,
"aten::sum": self.make_reduce("sum"),
"aten::prod": self.make_reduce("prod"),
"aten::argmin": self.make_reduce("argmin"),
"aten::argmax": self.make_reduce("argmax"),
"aten::norm": self.norm,
"aten::frobenius_norm": self.frobenius_norm,
"aten::std": self.std,
"aten::var": self.variance,
"aten::var_mean": self.var_mean,
"aten::abs": self.make_unary("abs"),
"aten::neg": self.make_unary("negative"),
"aten::cos": self.make_unary("cos"),
"aten::cosh": self.make_unary("cosh"),
"aten::sin": self.make_unary("sin"),
"aten::sinh": self.make_unary("sinh"),
"aten::tan": self.make_unary("tan"),
"aten::tanh": self.make_unary("tanh"),
"aten::acos": self.make_unary("acos"),
"aten::asin": self.make_unary("asin"),
"aten::atan": self.make_unary("atan"),
"aten::log": self.make_unary("log"),
"aten::log2": self.make_unary("log2"),
"aten::log10": self.make_unary("log10"),
"aten::log1p": self.log1p,
"aten::exp": self.make_unary("exp"),
"aten::erf": self.make_unary("erf"),
"aten::trunc": self.make_unary("trunc"),
"aten::sign": self.make_unary("sign"),
"aten::sqrt": self.make_unary("sqrt"),
"aten::rsqrt": self.make_unary("rsqrt"),
"aten::square": self.square,
"aten::tril": functools.partial(self.trilu, mode="tril"),
"aten::triu": functools.partial(self.trilu, mode="triu"),
"aten::ceil": self.make_unary("ceil"),
"aten::floor": self.make_unary("floor"),
"aten::round": self.make_unary("round"),
"aten::isfinite": self.make_unary("isfinite"),
"aten::isinf": self.make_unary("isinf"),
"aten::isnan": self.make_unary("isnan"),
"aten::clamp": self.clamp,
"aten::clamp_min": self.clamp_min,
"aten::clamp_max": self.clamp_max,
"aten::detach": self.identity,
"aten::upsample_bilinear2d": self.make_upsample("linear"),
"aten::upsample_bicubic2d": self.make_upsample("cubic"),
"aten::upsample_nearest2d": self.make_upsample("nearest_neighbor"),
"aten::upsample_trilinear3d": self.make_upsample3d("linear"),
"aten::upsample_nearest3d": self.make_upsample3d("nearest_neighbor"),
"aten::expand_as": self.expand_as,
"aten::broadcast_tensors": self.broadcast_tensors,
"aten::lt": self.make_elemwise("less"),
"aten::gt": self.make_elemwise("greater"),
"aten::le": self.make_elemwise("less_equal"),
"aten::ge": self.make_elemwise("greater_equal"),
"aten::ne": self.make_elemwise("not_equal"),
"aten::eq": self.make_elemwise("equal"),
"aten::logical_not": self.logical_not,
"aten::logical_xor": self.logical_xor,
"aten::bitwise_not": self.bitwise_not,
"aten::bitwise_xor": self.bitwise_xor,
"aten::Bool": self.Bool,
"aten::Float": self.Float,
"aten::rsub": self.rsub,
"aten::embedding": self.embedding,
"aten::one_hot": self.one_hot,
"aten::mm": self.matmul,
"aten::add": self.add,
"aten::stack": self.stack,
"aten::__getitem__": self.list_getitem,
"aten::len": self.list_len,
"aten::type_as": self.type_as,
"aten::gather": self.gather,
"aten::index_select": self.select,
"aten::index": self.index,
"torchvision::nms": self.nms,
"aten::logsumexp": self.logsumexp,
"torchvision::roi_align": self.roi_align,
"torchvision::deform_conv2d": self.deform_conv2d,
"aten::unbind": self.unbind,
"aten::__and__": self.logical_and,
"aten::logical_and": self.logical_and,
"aten::_shape_as_tensor": self.shape_as_tensor,
"aten::nonzero": self.nonzero,
"aten::nonzero_numpy": self.nonzero_numpy,
"aten::scatter": self.scatter,
"aten::scatter_add": self.scatter_add,
"aten::scatter_reduce": self.scatter_reduce,
"aten::index_put": self.index_put,
"aten::scalar_tensor": self.scalar_tensor,
"aten::__interpolate": self.interpolate,
"aten::IntImplicit": self.identity,
"aten::tensor": self.identity, # used for example in tensor(1.0)
"aten::numel": self.numel,
"aten::empty": self.empty,
"aten::empty_like": self.empty_like,
"aten::new_empty": self.new_empty,
"aten::randn": self.randn,
"aten::bincount": self.bincount,
"aten::__not__": self.logical_not,
"aten::hardswish": self.hard_swish,
"aten::hardsigmoid": self.hard_sigmoid,
"aten::cumsum": self.cumsum,
"aten::masked_fill": self.masked_fill,
"aten::masked_select": self.masked_select,
"aten::argsort": self.argsort,
"aten::sort": self.sort,
"aten::_unique2": self.unique,
"aten::nll_loss": self.nll_loss,
"aten::nll_loss2d": self.nll_loss,
"aten::nll_loss_nd": self.nll_loss,
"aten::cross_entropy_loss": self.cross_entropy_loss_with_logits,
"aten::l1_loss": self.l1_loss,
"aten::mse_loss": self.mse_loss,
"aten::flip": self.flip,
"aten::rnn_tanh": functools.partial(self.rnn, nonlinearity="tanh"),
"aten::rnn_relu": functools.partial(self.rnn, nonlinearity="relu"),
"aten::gru": self.gru,
"aten::lstm": self.lstm,
"aten::all": functools.partial(self.all_any_common, _op.all),
"aten::any": functools.partial(self.all_any_common, _op.any),
"aten::searchsorted": self.searchsorted,
"aten::bucketize": self.bucketize,
"aten::roll": self.roll,
"aten::einsum": self.einsum,
"aten::dot": self.dot,
"aten::mv": self.mv,
"aten::grid_sampler": self.grid_sampler,
"aten::__ior__": self.make_elemwise("bitwise_or"),
"aten::__iand__": self.make_elemwise("bitwise_and"),
"aten::__ixor__": self.make_elemwise("bitwise_xor"),
"aten::__lshift__": self.make_elemwise("left_shift"),
"aten::__rshift__": self.make_elemwise("right_shift"),
"aten::multinomial": self.multinomial,
"aten::_weight_norm": self.weight_norm,
}
def update_convert_map(self, custom_map):
self.convert_map.update(custom_map)
def report_missing_conversion(self, op_names):
"""Check if all ops in an input graph are supported by TVM"""
known_ops = [
"prim::Constant",
"prim::GetAttr",
"prim::ListConstruct",
"prim::ListUnpack",
"prim::TupleConstruct",
"prim::TupleUnpack",
"prim::RaiseException",
"prim::If",
"prim::Loop",
]
known_ops += list(self.convert_map.keys())
known_ops += list(qnn_torch.convert_map.keys())
missing = []
for op_name in op_names:
# Also take care of in-place variant ops like aten::relu_
if op_name not in known_ops and not (
op_name.endswith("_") and op_name[:-1] in known_ops
):
missing.append(op_name)
if missing:
msg = f"The following operators are not implemented: {missing}"
raise NotImplementedError(msg)
def convert_block(self, block, outputs):
"""Translate Torch "Block", used for prim::If and prim::Loop"""
ops = _get_operator_nodes(
block.nodes(), self.source_map, self.op_type_dict, self.use_parser_friendly_name
)
ret_names = _get_input_names(block.returnNode())
return self.convert_operators(ops, outputs, ret_names)
def convert_if(self, if_node, outputs):
"""Translate Torch prim::If to Relay If"""
cond = outputs[if_node.inputsAt(0).debugName()]
blocks = list(if_node.blocks())
true_branch = self.convert_block(blocks[0], outputs)
false_branch = self.convert_block(blocks[1], outputs)
assert len(true_branch) == 1 and len(false_branch) == 1
return _expr.If(cond, true_branch[0], false_branch[0])
def convert_loop(self, loop_node, outputs):
"""Translate Torch prim::Loop to Relay while_loop"""
def get_input(index):
ivalue = loop_node.inputsAt(index)
inode = ivalue.node()
if inode.kind() == "prim::Constant":
return _expr.const(_get_constant(inode))
var_name = ivalue.debugName()
assert var_name in outputs
return _wrap_const(outputs[var_name])
# Refer to the spec for prim::Loop below
# https://github.com/pytorch/pytorch/blob/master/torch/csrc/jit/OVERVIEW.md#loops
# The first input: %max_trip_count
# The second input: %initial_condition
# The rest of input: loop variables
max_loop_count = get_input(0)
init_cond = get_input(1)
num_loop_var = len(list(loop_node.inputs())) - 2
init_vals = [get_input(i + 2) for i in range(num_loop_var)]
# while loop has always max_loop_count being int64 max
# max_loop_count.data (tvm.runtime.NDArray) is -1, so _get_constant again
is_while_loop = (
isinstance(max_loop_count, _expr.Constant)
and _get_constant(loop_node.inputsAt(0).node()) == sys.maxsize
)
if is_while_loop:
loop_iter_dtype = "bool"
# while loop with non input dependent condition such as while i < 10:
# init_cond is int, need to cast to bool to type check
if isinstance(init_cond, _expr.Constant):
init_cond = _op.cast(init_cond, "bool")
init_loop_iter_val = init_cond
else:
loop_iter_dtype = "int32"
# always count from 0
init_loop_iter_val = _expr.const(0, dtype="int32")
body_block = list(loop_node.blocks())[0]
block_input_names = _get_input_names(body_block)
num_block_inputs = len(block_input_names)
name_val_pairs = list(zip(block_input_names, [init_loop_iter_val] + init_vals))
outputs.update(name_val_pairs)
def get_var(name, val):
if val:
checked_type = self.infer_type_with_prelude(val)
if hasattr(checked_type, "shape"):
shape = get_const_tuple(checked_type.shape)
actual_shape = []
for dim in shape:
if isinstance(dim, int) and dim == 0:
actual_shape.append(Any())
else:
actual_shape.append(dim)
expr = _expr.var(name, shape=actual_shape, dtype=checked_type.dtype)
else:
expr = _expr.var(name, type_annotation=checked_type)
return set_span(expr, val.span) if val.span else expr
return _expr.var(name)
source_name = self.source_map[loop_node]
loop_iter_var = set_span(
_expr.var(block_input_names[0], shape=(), dtype=loop_iter_dtype), span=source_name
)
loop_vars = set_span(
[get_var(name, val) for name, val in name_val_pairs[1:]], span=source_name
)
# Add non constant free variables to loop variables to prevent code blow up
# Without this, if there are two for loops in a row, which often happens
# if the outer loop is unrolled, the computation corresponding to the first for loop
# is inlined inside loop body, turning O(N) + O(N) computation into O(N^2).
# This issue was found when converting from Stacked LSTM test. Torch does not add the
# outputof the eariler loop into loop variables of the next loop.
# So the variable corresponding to the first loop output appears free in the second
# loop body.
free_vars = [
var
for var in _get_free_vars_from_block(body_block)
if var in outputs
and not isinstance(outputs[var], (_expr.Constant, int, float, str))
and outputs[var]
]
prev_outputs = {}
for name in free_vars:
prev_output = outputs[name]
new_loop_var = get_var(name, prev_output)
prev_outputs[name] = prev_output
outputs[name] = set_span(new_loop_var, source_name)
loop_vars.append(new_loop_var)
init_vals.append(prev_output)
def cond(*current_vals):
i = current_vals[0]
if is_while_loop:
return _op.equal(i, _expr.const(True, "bool"))
return _op.less(i, max_loop_count)
def body(*current_vals):
# Update loop variables using the prev iteration outputs
assert len(current_vals) == num_block_inputs + len(free_vars)
for (i, val) in enumerate(current_vals):
if i < num_block_inputs:
outputs[block_input_names[i]] = val
else:
outputs[free_vars[i - num_block_inputs]] = val
block_outputs = self.convert_block(body_block, outputs)
block_outputs += [outputs[name] for name in free_vars]
if not is_while_loop:
# iter var increment implicit in torch, so do it manually
# for while loop, block_outputs[0] is already a boolean,
# the result of termination check
incr = _expr.const(1, dtype="int32")
block_outputs[0] = current_vals[0] + incr
return block_outputs
loop = while_loop(cond, [loop_iter_var] + loop_vars, body)
loop_val = loop(init_loop_iter_val, *init_vals)
# restore original output values for free vars
outputs.update(prev_outputs)
# The first element is a loop counter or boolean condition, ignore it
return [_expr.TupleGetItem(loop_val, i + 1) for i in range(num_loop_var)]
def convert_operators(self, operators, outputs, ret_names):
"""Convert each Torch IR operators to Relay equivalent"""
for node_name, op_node in operators:
operator = op_node.kind()
inputs = _get_op_inputs(op_node, outputs)
# we need to record what current operator is to provide correct source name
# for operators needed to be taken care with (e.g. nms / arange ...)
self.current_op.append(op_node)
if operator == "prim::Constant":
outputs[node_name] = _get_constant(op_node)
elif operator == "prim::ListConstruct" and _should_construct_dynamic_list(op_node):
outputs[node_name] = set_span(
self.convert_to_list_adt(inputs), self.source_map[op_node]
)
elif operator == "prim::ListConstruct":
# This assumes that no more elements will be appended to this list
# In this case, we keep the Python list
outputs[node_name] = inputs
elif operator == "prim::TupleConstruct":
def _handel_nested_input(inputs):
inputs_list = []
for i, _ in enumerate(inputs):
if isinstance(inputs[i], list):
inputs_list.append(_handel_nested_input(inputs[i]))
else:
assert isinstance(inputs[i], _expr.Expr)
inputs_list.append(inputs[i])
return _expr.Tuple(inputs_list)
outputs[node_name] = set_span(
_handel_nested_input(inputs), self.source_map[op_node]
)
elif operator in ["prim::ListUnpack", "prim::TupleUnpack"]:
assert len(inputs) == 1
if isinstance(inputs[0], (list, _expr.TupleWrapper)):
unpacked = inputs[0]
else:
unpacked = _unpack_tuple(inputs[0])
outputs.update(
zip(_get_output_names(op_node), set_span(unpacked, self.source_map[op_node]))
)
elif operator == "prim::prim::RaiseException":
logger.warning("raising exceptions is ignored")
outputs[node_name] = None
elif operator == "prim::If":
if_out = self.convert_if(op_node, outputs)
outputs[node_name] = set_span(if_out, self.source_map[op_node])
elif operator == "prim::Loop":
loop_out = self.convert_loop(op_node, outputs)
unpacked_names = _get_output_names(op_node)
assert len(loop_out) == len(unpacked_names)
outputs.update(zip(unpacked_names, set_span(loop_out, self.source_map[op_node])))
else:
if operator not in self.convert_map:
# At this point, the only possible ops that are not in convert_map are
# in-place variant of ops like aten::relu_
assert operator.endswith("_")
logger.warning(
"An in-place op %s found, the result will not be correct "
"if the model depends on side-effects by this op.",
operator,
)
relay_op = self.convert_map[operator[:-1]]
else:
relay_op = self.convert_map[operator]
self._set_parameter_source_name(op_node, outputs)
relay_out = relay_op(
# since the elements in "outputs" may change due to span-filling process
# we have to call "_get_op_inputs" again rather than use "inputs" directly
_get_op_inputs(op_node, outputs),
_get_input_types(op_node, outputs, default_dtype=self.default_dtype),
)
relay_out = set_span(relay_out, self.source_map[op_node])
self.record_output_type(relay_out)
if isinstance(relay_out, tuple):
# This is for torch operators that return multiple outputs
# See _adaptive_max_2d above for example
out_names = _get_output_names(op_node)
outputs.update(zip(out_names, relay_out))
else:
assert op_node.outputsSize() == 1
outputs[node_name] = relay_out
self.current_op.pop()
return [_wrap_const(outputs[ret_name]) for ret_name in ret_names]
def _set_parameter_source_name(self, op_node, outputs):
"""A helper function to rewrite source_name of parameter."""
for name in _get_input_names(op_node):
expr = outputs[name]
if isinstance(expr, (_expr.Var, _expr.Constant)):
name_sep = "_" if self.use_parser_friendly_name else "."
source_name = [self.source_map[op_node]]
if isinstance(expr, _expr.Var):
# variable name should have contained node source name
# for op with attributes in convert_params stage
# e.g. "aten::batch_norm_5.running_mean"
if expr.name_hint.startswith(source_name[0]):
source_name[0] = expr.name_hint
else:
source_name.append(expr.name_hint)
new_expr = set_span(expr, name_sep.join(source_name))
outputs[name] = new_expr
def _pytorch_result_type(dtypes, non_tensor_inputs):
"""This promotes TVM dtypes like PyTorch would"""
import torch
dtype_map = {
"float64": torch.float64,
"float32": torch.float32,
"float16": torch.float16,
"bfloat16": torch.bfloat16,
"int64": torch.int64,
"int32": torch.int32,
"int16": torch.int16,
"int8": torch.int8,
"uint8": torch.uint8,
"bool": torch.bool,
}
if len(dtypes) > 0:
result_type = dtypes[0]
for dt in dtypes[1:]:
if dt != result_type: # we don't want to work with same types as we
# don't do quantized here (which cannot be promoted?)
result_type = _convert_data_type(
str(
torch.result_type(
torch.zeros((), dtype=dtype_map[result_type]),
torch.zeros((), dtype=dtype_map[dt]),
)
)
)
else:
result_type = "bool" # this is the smallest type...
for inp in non_tensor_inputs:
result_type = _convert_data_type(
str(torch.result_type(torch.zeros((), dtype=dtype_map[result_type]), inp))
)
return result_type
# Helper functions for operator implementation
def _convert_dtype_value(val):
"""converts a PyTorch the PyTorch numeric type id to a torch scalar type."""
convert_torch_dtype_map = {
11: "torch.bool",
7: "torch.float64",
6: "torch.float32",
5: "torch.float16",
4: "torch.int64",
3: "torch.int32",
2: "torch.int16",
1: "torch.int8",
0: "torch.uint8",
None: "torch.int64",
} # Default is torch.int64
if val in convert_torch_dtype_map:
return _convert_data_type(convert_torch_dtype_map[val])
else:
msg = f"Torch data type value {val} is not handled yet."
raise NotImplementedError(msg)
def _convert_data_type(input_type, default_dtype=None):
"""converts the PyTorch scalar type input_type to a TVM dtype.
optionally, default_dtype can be a TVM dtype that is used
if input_type is None (but not when it is unknown)"""
if input_type is None and default_dtype is not None:
return default_dtype
input_type = input_type.lower()
if input_type in ["double", "float64", "torch.float64"]:
return "float64"
elif input_type in ["float", "float32", "torch.float32"]:
return "float32"
elif input_type in ["half", "float16", "torch.float16"]:
return "float16"
elif input_type in ["long", "int64", "torch.int64"]:
return "int64"
elif input_type in ["int", "int32", "torch.int32"]:
return "int32"
elif input_type in ["short", "int16", "torch.int16"]:
return "int16"
elif input_type in ["char", "int8", "torch.int8"]:
return "int8"
elif input_type in ["byte", "uint8", "torch.uint8"]:
return "uint8"
elif input_type in ["quint8", "torch.quint8"]:
return "quint8"
elif input_type in ["qint8", "torch.qint8"]:
return "qint8"
elif input_type in ["qint32", "torch.qint32"]:
return "qint32"
elif input_type in ["bool", "torch.bool"]:
return "bool"
elif input_type in ["str"]:
return "str"
else:
raise NotImplementedError(f"input_type {input_type} is not handled yet")
return "float32" # Never reached
def _create_typed_const(data, dtype):
"""create a (scalar) constant of given value and dtype.
dtype should be a TVM dtype"""
if dtype == "float64":
typed_data = _expr.const(np.float64(data), dtype=dtype)
elif dtype == "float32":
typed_data = _expr.const(np.float32(data), dtype=dtype)
elif dtype == "float16":
typed_data = _expr.const(np.float16(data), dtype=dtype)
elif dtype == "int64":
typed_data = _expr.const(np.int64(data), dtype=dtype)
elif dtype == "int32":
typed_data = _expr.const(np.int32(data), dtype=dtype)
elif dtype == "int16":
typed_data = _expr.const(np.int16(data), dtype=dtype)
elif dtype == "int8":
typed_data = _expr.const(np.int8(data), dtype=dtype)
elif dtype == "uint8":
typed_data = _expr.const(np.uint8(data), dtype=dtype)
else:
raise NotImplementedError(f"input_type {dtype} is not handled yet")
return typed_data
def _wrap_const(c):
if not isinstance(c, (_expr.Expr, list, tvm.tir.expr.Any)):
return _expr.const(c)
return c
def _run_jit_passes(graph, enable_lower_all_tuples=True):
"""The inline pass is necessary to unwrap prim::CallMethod"""
# pylint: disable=c-extension-no-member
import torch
if is_version_greater_than("1.5.1"):
# This is required for torchvision detection models from 1.6 above
# It is the same as _jit_pass_inline, except that it has some special
# case behaviors for some ops such as aten::__interpolate()
torch._C._jit_pass_onnx_function_substitution(graph)
else:
torch._C._jit_pass_inline(graph)
if enable_lower_all_tuples:
torch._C._jit_pass_lower_all_tuples(graph)
def _get_tensor_and_var(torch_tensor, name):
tensor = tvm.nd.array(torch_tensor.cpu().numpy())
var = _expr.var(name, shape=tensor.shape, dtype=tensor.dtype)
return tensor, var
def _get_output_name(node):
assert node.outputsSize() == 1
return node.output().debugName()
def _get_output_names(node):
return [output.debugName() for output in node.outputs()]
def _get_input_names(node_or_graph):
return [inp.debugName() for inp in node_or_graph.inputs()]
def _get_op_inputs(op_node, outputs):
return [outputs[name] for name in _get_input_names(op_node)]
def _get_node_type(node):
assert node.outputsSize() == 1
return node.output().type().kind()
def _get_uses(node):
uses = []
for output in node.outputs():
uses += output.uses()
return uses
def _get_users(node):
return [use.user for use in _get_uses(node)]
def _getattr_full_name(getattrs, sep="."):
return sep.join([getattr_attr_name(node) for node in getattrs])
def _get_pytorch_value_type(typ, default_dtype="float32"):
kind = typ.kind()
if kind == "TensorType":
if typ.scalarType() is None:
# Tensor's type can be unknown if we use torch.jit.script(...)
# Defaults can be passed in, if not it is float32
logger.warning("Untyped Tensor found, assume it is %s", default_dtype)
return default_dtype
else:
return _convert_data_type(typ.scalarType())
elif kind == "ListType":
return "ListType"
elif kind in ["IntType", "FloatType", "BoolType", "StringType", "OptionalType"]:
pt_dtype = str(typ).lower()
dtype = pt_dtype if kind == "OptionalType" else _convert_data_type(pt_dtype)
return dtype
else:
return "UnsupportedType"
def _get_input_types(op_node, outputs, default_dtype="float32"):
"""Returns a TVM dtype for each input nodes derived from the torch type"""
in_types = []
for inp in op_node.inputs():
if inp.node().kind() == "prim::GetAttr":
# GetAttr nodes always return None when we call scalarType() on it
name = inp.debugName()
assert name in outputs
if isinstance(outputs[name], _expr.Var):
in_types.append(outputs[name].type_annotation.dtype)
else:
# For quantized modules with parameters, here we would get
# "prim::GetAttr[name="_packed_params"]". Since the dtype corresponding to
# _packed_params is not needed by quantized ops, we return an arbitrary type.
in_types.append(default_dtype)
else:
in_types.append(_get_pytorch_value_type(inp.type(), default_dtype=default_dtype))
return in_types
def _get_constant(node):
"""Retrieve a constant associated with this prim::Constant node"""
attribute_names = node.attributeNames()
num_attributes = len(attribute_names)
if num_attributes == 1:
attr_name = attribute_names[0]
ty = node.output().type().kind()
if ty == "IntType":
return node.i(attr_name)
elif ty == "BoolType":
return bool(node.i(attr_name))
elif ty in ["FloatType", "LongType"]:
return node.f(attr_name)
elif ty in ["TensorType", "CompleteTensorType"]:
tensor = node.t(attr_name)
if tensor.is_cuda:
tensor = tensor.cpu()
if len(tensor.shape) == 0: # tensor(0.1)
# TODO(t-vi): When is this needed?
return tensor.item()
return _wrap_const(tensor.numpy())
elif ty in ["DeviceObjType", "StringType"]:
return node.s(attr_name)
elif ty == "FunctionType":
return None
else:
raise NotImplementedError(f"Unsupported type: {ty}")
else:
assert num_attributes == 0
return None
def _rename_outputs(node, source_map, op_type_dict, use_parser_friendly_name):
"""Rewrite debug name of node outputs with its operator type"""
def _get_source_name(op_type):
op_idx = 0
if op_type in op_type_dict:
op_idx = op_type_dict[op_type] + 1
op_type_dict[op_type] = op_idx
return "_".join([op_type, str(op_idx)])
# get source name of operator and rename all of its outputs
# e.g. node.kind(): aten::adaptive_max_pool2d
# node_src_name -> aten::adaptive_max_pool2d_x
# output_1 -> aten::adaptive_max_pool2d_x_0
# output_2 -> aten::adaptive_max_pool2d_x_1
if node.kind() != "prim::GetAttr":
node_src_name = _get_source_name(node.kind())
for index, output in enumerate(node.outputs()):
output.setDebugName("_".join([node_src_name, str(index)]))
# update source map
# if use_parser_friendly_name is True: e.g. prim::Constant_0 -> prim__Constant_0
if use_parser_friendly_name:
node_src_name = re.sub(r":|\.", "_", node_src_name)
source_map[node] = node_src_name
def _debug_rename(graph, use_parser_friendly_name):
"""Returns map between node and source name"""
source_map, op_type_dict = {}, {}
prim_with_blocks = ["prim::If", "prim::Loop"]
def _traverse_graph(nodes):
for node in nodes:
if node.outputsSize() == 0:
continue
if node.kind() in prim_with_blocks:
for block in node.blocks():
_traverse_graph(block.nodes())
_rename_outputs(node, source_map, op_type_dict, use_parser_friendly_name)
_traverse_graph(graph.nodes())
return source_map
def _get_operator_nodes(nodes, source_map=None, op_type_dict=None, use_parser_friendly_name=False):
"""Returns torch IR nodes that need conversion to Relay"""
ops, should_rename_graph = [], all([source_map, op_type_dict]) is not None
# Traverse nodes and add to graph
for node in nodes:
if node.outputsSize() == 0:
continue
if should_rename_graph:
_rename_outputs(node, source_map, op_type_dict, use_parser_friendly_name)
if node.outputsSize() > 1:
node_name = "_".join(_get_output_names(node))
else:
node_name = _get_output_name(node)
if node.kind() != "prim::GetAttr":
ops.append((node_name, node))
return ops
def _get_relay_input_vars(graph, input_infos, prelude, is_module=True, default_dtype="float32"):
"""
Return Relay vars from input shapes and create entries based on
expected graph inputs - to allow translation
"""
graph_inputs = list(graph.inputs())
if is_module:
# a module has "self" as first input, which we do not need/want
graph_inputs = graph_inputs[1:]
if not isinstance(input_infos, list):
msg = "Graph inputs input_infos should be a list"
raise RuntimeError(msg)
if len(graph_inputs) != len(input_infos):
msg = f"PyTorch has {len(graph_inputs)} inputs and input_infos lists {len(input_infos)}."
raise RuntimeError(msg)
def get_relay_ty(ishape, itype, pt_type):
if pt_type.kind() == "TensorType":
if not (_is_int_seq(ishape) or len(ishape) == 0):
msg = "Shape for Tensors must be lists of ints"
raise RuntimeError(msg)
if (pt_type.dim() is not None and pt_type.dim() != len(ishape)) or (
pt_type.sizes() is not None
and any([s1 != s2 for s1, s2 in zip(pt_type.sizes(), ishape)])
):
msg = "Shapes of input list and information in the graph do not match"
raise RuntimeError(msg)
if len(ishape) > 1 and any(dim <= 0 for dim in ishape[1:]):
msg = (
"Expected input's non-batch dimensions to have positive length, "
f"but input has a shape of {pt_type.sizes()}"
)
raise RuntimeError(msg)
pt_dtype = pt_type.scalarType()
if not pt_dtype and itype:
pt_dtype = itype
dtype = _convert_data_type(pt_dtype, default_dtype=default_dtype)
return TensorType(ishape, dtype)
elif pt_type.kind() == "TupleType":
if not isinstance(ishape, tuple):
msg = "Shapes for tuples must be tuples"
raise RuntimeError(msg)
return TupleType(
[get_relay_ty(elem, itype, pt_t) for elem, pt_t in zip(ishape, pt_type.elements())]
)
elif pt_type.kind() == "ListType":
if not isinstance(ishape, list):
msg = "Shapes for lists must be lists"
raise RuntimeError(msg)
pt_elemtype = pt_type.getElementType()
elem_tys = [get_relay_ty(s, itype, pt_elemtype) for s in ishape]
if len(elem_tys) > 0 and not all(map(lambda ty: ty == elem_tys[0], elem_tys)):
msg = "List elements need have identical types"
raise RuntimeError(msg)
rlist, _, _ = prelude.mod.get_type("List")
return rlist(elem_tys[0])
elif pt_type.kind() == "OptionalType":
# we do not support None yet, so we fill in the type
return get_relay_ty(ishape, itype, pt_type.getElementType())
# TODO: scalar inputs
raise NotImplementedError("unsupported input type")
input_vars = {}
new_input_infos = []
for num, inp in enumerate(input_infos):
if not isinstance(inp, tuple):
msg = f"Graph input {num} is not a tuple"
raise RuntimeError(msg)
if len(inp) != 2 or not isinstance(inp[0], str):
msg = (
f"Graph input {inp} is not valid,"
f" expected ('name', shape) or ('name', (shape, dtype))"
)
raise RuntimeError(msg)
if not isinstance(inp[1], tuple) or len(inp[1]) == 0 or not isinstance(inp[1][-1], str):
new_input_infos.append((inp[0], (inp[1], default_dtype)))
else:
new_input_infos.append(inp)
input_types = [
(name, get_relay_ty(info[0], info[1], gi.type()))
for (name, info), gi in zip(new_input_infos, graph_inputs)
]
ir_inputs = [i.debugName() for i in graph_inputs]
for ir_input, (name, itype) in zip(ir_inputs, input_types):
inp = _expr.var(name, type_annotation=itype)
# Translate from graph input to user input name
input_vars[ir_input] = inp
return input_vars
def _unpack_tuple(tup):
def unpack(tup, num_fields):
return [_expr.TupleGetItem(tup, i) for i in range(num_fields)]
if isinstance(tup, _expr.Tuple):
return unpack(tup, len(tup.fields))
elif isinstance(tup.type_annotation, TupleType):
return unpack(tup, len(tup.type_annotation.fields))
# shouldn't happen
assert False
def _get_free_vars_from_block(block):
block_inp_names = _get_input_names(block)
bound_names = block_inp_names
free_vars = set()
for node in block.nodes():
inp_names = _get_input_names(node)
list_diff = [name for name in inp_names if name not in bound_names]
free_vars.update(list_diff)
bound_names += _get_output_names(node)
return free_vars
def get_use_chains(root_node, terminate=lambda _: False):
"""
Track a chain of users of this node forward, returning a list of chains
See get_attr_chains below for its usage
"""
def concat_lists(lists):
return itertools.chain.from_iterable(lists)
def inner(current, accum):
users = _get_users(current)
if not users or terminate(users):
return [accum]
return concat_lists([inner(nxt, accum + [nxt]) for nxt in users])
return inner(root_node, [root_node])
def get_attr_chains(root_getattr_node):
"""Returns chains of attribute access starting from root_getattr_node
For example, given attribute "block", as in "self.block" when "self" points
to the top level torch.nn.Module, it returns lists of attribute "chains",
e.g. ['block', '2'], ['block', '1'], ['block', '0', '_packed_params']
These sets of attributes form full attribute accessors. For example,
"self.block.1", "self.block.2" will return the second and third submodule,
and "self.block.0._packed_params" will return the parameters of the first
submodule.
"""
def terminate(users):
next_attrs = [user for user in users if user.kind() == "prim::GetAttr"]
return len(next_attrs) == 0
return get_use_chains(root_getattr_node, terminate)
def convert_params(graph, state_dict, source_map, use_parser_friendly_name=False):
"""
Return Relay vars and TVM NDArrays for input parameters
A chain of prim::GetAttr nodes is processed one at a time
"""
getattr_nodes = graph.findAllNodes("prim::GetAttr", recurse=True)
params = {}
param_tensors = {}
packed_param_map = {}
param_debug_name_map = {}
vars_by_name = {}
seen = set()
attr_name_sep = "_" if use_parser_friendly_name else "."
for node in getattr_nodes:
if _get_output_name(node) in seen:
continue
for getattrs in get_attr_chains(node):
seen.update(map(_get_output_name, getattrs))
full_attr = _getattr_full_name(getattrs, attr_name_sep)
full_attr_node_name = _get_output_name(getattrs[-1])
# set variable name by concatenating first consumer's name with full attribute
# e.g. "aten::batch_norm_5.running_mean"
var_name = attr_name_sep.join(
[source_map[_get_users(getattrs[-1])[0]], full_attr.split(attr_name_sep)[-1]]
)
if full_attr.endswith("_packed_params"): # for quantized models
packed_param_map[full_attr_node_name] = full_attr
elif full_attr in state_dict:
if var_name in vars_by_name:
var = vars_by_name[var_name]
else:
torch_tensor = state_dict[full_attr]
tensor, var = _get_tensor_and_var(torch_tensor, var_name)
param_tensors[var_name] = tensor
# for quantized parameters to be correctly located
param_debug_name_map[full_attr_node_name] = var_name
vars_by_name[var_name] = var
params[full_attr_node_name] = var
return params, param_tensors, packed_param_map, param_debug_name_map
def get_all_op_names(graph):
"""Return all operator names in the input graph"""
nodes = list(graph.nodes())
prim_with_blocks = ["prim::If", "prim::Loop"]
for prim in prim_with_blocks:
prim_nodes = graph.findAllNodes(prim, recurse=True)
for prim_node in prim_nodes:
for block in prim_node.blocks():
nodes += block.nodes()
return set(node.kind() for node in nodes)
def export_c_graph(location, graph):
"""Convert the graph to an onnx model and export it to the location."""
import datetime
import os
if not os.path.exists(location):
os.makedirs(location)
time_stamp = datetime.datetime.now().strftime("%m_%d_%Y_%H_%M_%S")
fname = os.path.join(location, f"tvm_exported_c_graph_{time_stamp}.txt")
with open(f"{fname}", "w") as f:
f.write(str(graph))
def from_pytorch(
script_module,
input_infos,
custom_convert_map=None,
default_dtype="float32",
use_parser_friendly_name=False,
keep_quantized_weight=False,
export_renamed_c_graph_path=None,
):
"""Load PyTorch model in the form of a scripted PyTorch model and convert into relay.
The companion parameters will be handled automatically.
Parameters
----------
script_module : TopLevelTracedModule object
TorchScripted PyTorch graph
Note: We currently only support traces (ie: torch.jit.trace(model, input))
input_infos : List of tuples
Can be (input name, input shape) or (input name, (input shape, input types))
Graph level input shape and type list
The same input names need to be used for deployment, so choose easy to
remember names (such as: input0, input1)
e.g.
[('input0', (1, 2)), ('input1', (3, 4))]
or
[('input0', ((1, 2), 'int')), ('input1', ((3, 4), 'float'))]
custom_convert_map : Dictionary of str to Relay op
A custom op conversion map in the same format as _convert_map above
default_type : str
The default dtype to use when type information is not provided by PyTorch.
use_parser_friendly_name : bool
When True, replace '.' with `_' in a original parameter name.
The Relay text parser treats a variable name followed by a period as a tuple element access,
so a variable name like "dense.weight" cannot be parsed correctly.
Use this option when you want to run the AnnotateSpans pass on the imported module.
keep_quantized_weight : bool
Return quantized weights and bias, rather than float ones. PyTorch stores quantized weights
in a custom format, so we cannot directly access 8 bit weights as Numpy arrays. We use
a PyTorch function to unpack quantized weights into float32 arrays and quantization
parameters. By default, we return float32 weights and rely on the QNN lowering and the
Relay constant folding pass to quantize weights at compile time. In BYOC use cases, however,
we cannot apply the constant folding pass on a QNN graph. If keep_quantized_weight is True,
we quantize weights in the frontend using a function that is equivalent to
qnn.op.quantize(...) operating on Numpy arrays.
export_renamed_c_graph_path : str, optional
Export the renamed torch._C.Graph to the path.
During the conversion, variable names in torch._C.Graph will be assigned based on their op
types. The exported text file can be the reference to spans.
Returns
-------
mod : tvm.IRModule
The module that optimizations will be performed on.
params : dict of str to tvm.runtime.NDArray
Dict of converted parameters stored in tvm.runtime.ndarray format
"""
import torch
mod = tvm.IRModule()
prelude = Prelude(mod)
enable_lower_all_tuples = True
converter = PyTorchOpConverter(prelude, default_dtype, use_parser_friendly_name)
graph = script_module.graph.copy()
# Check if lower_all_tuples pass can be enabled
graph_inputs = list(graph.inputs())
for inp in graph_inputs:
if inp.type().kind() == "TupleType" or inp.type().kind() == "ListType":
enable_lower_all_tuples = False
break
_run_jit_passes(graph, enable_lower_all_tuples)
if custom_convert_map:
converter.update_convert_map(custom_convert_map)
op_names = get_all_op_names(graph)
converter.report_missing_conversion(op_names)
is_module = isinstance(script_module, torch.jit.ScriptModule)
params = script_module.state_dict() if is_module else {}
outputs = _get_relay_input_vars(
graph, input_infos, prelude, default_dtype=default_dtype, is_module=is_module
)
if use_parser_friendly_name:
new_names = [key.replace(".", "_") for key in params.keys()]
params = dict(zip(new_names, params.values()))
# rename _C.Graph here for constructing meaningful source name of graph nodes
# by doing so, we could Use source_map as the reference to rename model parameters
source_map = _debug_rename(graph, use_parser_friendly_name)
param_vars, tensors, packed_param_map, param_debug_name_map = convert_params(
graph, params, source_map, use_parser_friendly_name
)
tvm_params = {k: tvm.nd.array(v) for k, v in tensors.items()}
outputs.update(param_vars)
# For quantized models
quantized_ops = set(["aten::quantize_per_tensor", "quantized::linear_dynamic"])
if len(quantized_ops.intersection(set(op_names))) > 0:
weight_quant_params = qnn_torch.get_weight_quant_params(
script_module, packed_param_map.values()
)
qnn_torch.inline_input_quant_params_for_fx(graph, tensors, param_debug_name_map)
input_scales_for_bias = qnn_torch.add_input_quant_params_to_op_inputs(graph)
qnn_torch.add_quant_params_to_outputs(
outputs,
packed_param_map,
weight_quant_params,
input_scales_for_bias,
keep_quantized_weight,
)
qnn_torch.add_quant_params(tvm_params, weight_quant_params)
converter.update_convert_map(qnn_torch.convert_map)
operator_nodes = _get_operator_nodes(
graph.nodes(), converter.source_map, converter.op_type_dict, use_parser_friendly_name
)
ret_name = _get_input_names(graph.return_node())
outputs = converter.convert_operators(operator_nodes, outputs, ret_name)
# ListConstruct kept original python list. Convert to tuple.
outputs = [_expr.Tuple(output) if isinstance(output, list) else output for output in outputs]
if len(outputs) > 1:
ret = _expr.Tuple(outputs)
else:
ret = outputs[0]
# Separate data inputs and parameters to make sure data inputs come first.
func_args = []
data_inputs = []
for arg in _analysis.free_vars(ret):
if arg.name_hint not in tvm_params.keys():
data_inputs.append(arg)
else:
func_args.append(arg)
# Ensures the order of data_input is the same as the order of inputs specified in input_info.
order_input_infos = {
input_info[0]: len(input_infos) - idx for idx, input_info in enumerate(input_infos)
}
data_inputs = sorted(
data_inputs,
key=lambda data_input: order_input_infos[data_input.name_hint]
if data_input.name_hint in order_input_infos
else -1,
reverse=True,
)
func_args = data_inputs + func_args
mod["main"] = tvm.relay.Function(func_args, ret)
if export_renamed_c_graph_path:
export_c_graph(export_renamed_c_graph_path, graph)
return transform.RemoveUnusedFunctions()(mod), tvm_params
| 194,224 | 37.529062 | 120 | py |
tvm | tvm-main/python/tvm/relay/frontend/onnx.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-self, len-as-condition, unused-argument, too-many-lines
# pylint: disable=import-outside-toplevel
"""ONNX: Open Neural Network Exchange frontend for Relay."""
import copy
import math
import warnings
from typing import Optional
import numpy as np
import tvm
from tvm import relay
from tvm.ir import IRModule
from tvm.topi.utils import get_const_tuple
from ... import nd as _nd
from .. import analysis
from .. import expr as _expr
from .. import function as _function
from .. import loops as _loops
from .. import op as _op
from .. import qnn as _qnn
from .. import random as _random
from .. import ty as _ty
from .. import vision as _vision
from .common import (
AttrCvt,
Renamer,
autopad,
ensure_scalar_shape,
fold_constant,
get_name,
get_relay_op,
gru_cell,
infer_channels,
infer_shape,
infer_type,
infer_value,
lstm_cell,
new_var,
rnn_cell,
shape_of,
try_resolve_var_to_const,
unbind,
set_span,
)
__all__ = ["from_onnx"]
# The default configurations of Relay ONNX frontend.
ONNX_DEFAULT_CONFIGS = {
# By default, TVM converts qualified onnx `matmul` to `transpose(weight) + nn.batch_matmul_NT`.
# Change this flag to False to directly convert to `nn.batch_matmul`.
# Note that `nn.batch_matmul` with format other than NT is in experimental, it may have some
# performance issues.
"use_nt_batch_matmul": True
}
class onnx_input(list):
"""A helper extension to list that returns None for out of bound indices."""
def __getitem__(self, item):
if isinstance(item, slice):
if item.stop is None:
stop = len(self)
else:
stop = item.stop
indices = list(range(stop)[item])
return [self[i] for i in indices]
if isinstance(item, int):
return list(self)[item] if item < len(self) else None
raise TypeError(f"list indices must be integers or slices, not {type(item).__name__}")
def get_numpy(tensor_proto):
"""Grab data in TensorProto and convert to numpy array."""
try:
from onnx.numpy_helper import to_array
except ImportError as e:
raise ImportError(f"Unable to import onnx which is required {e}")
return to_array(tensor_proto)
def get_type(elem_type):
"""Converts onnx integer datatype to numpy datatype"""
# If a string was passed instead of a tensor type, it does not need
# conversion and can be returned.
if isinstance(elem_type, str):
return elem_type
try:
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
except ImportError as e:
raise ImportError(f"Unable to import onnx which is required {e}")
try:
from onnx import TensorProto
except ImportError as e:
raise ImportError(f"Unable to import TensorProto from onnx {e}")
# Onnx mapping converts bfloat16 to float16 because
# numpy does not have a bfloat16 data type. However,
# tvm has one, so we force the return type to be bfloat16
if elem_type == int(TensorProto.BFLOAT16):
return "bfloat16"
return str(TENSOR_TYPE_TO_NP_TYPE[elem_type])
def get_info(info_proto):
"""Extract the shape from a ValueInfoProto."""
shape = []
shape_name = []
for dim in info_proto.type.tensor_type.shape.dim:
name = dim.dim_param
value = dim.dim_value
if value is None or value == 0:
value = _ty.Any()
shape_name.append(name)
else:
shape_name.append(value)
shape.append(value)
name = info_proto.name
if info_proto.type.tensor_type.elem_type:
dtype = get_type(info_proto.type.tensor_type.elem_type)
else:
dtype = None
return name, shape, dtype, shape_name
def dimension_picker(prefix, suffix=""):
"""Check that dimensions are supported."""
def _impl(attr):
kernel = attr["kernel_shape"]
if len(kernel) == 1:
return prefix + "1d" + suffix
if len(kernel) == 2:
return prefix + "2d" + suffix
if len(kernel) == 3:
return prefix + "3d" + suffix
op_name = prefix + "1d/2d/3d"
msg = f"Only 1D, 2D, and 3D kernels are supported for operator {op_name}."
raise tvm.error.OpAttributeInvalid(msg)
return _impl
def revert_caffe2_pad(pads):
"""Caffe2 requires two times the normal padding."""
if len(pads) == 4:
pads = pads[:2]
elif len(pads) == 2:
pass
else:
raise tvm.error.OpAttributeInvalid("Number of pads must be either 2 or 4.")
return pads
def get_pad_pair(input1d, kernel1d, stride1d, mode):
"""infer pad size"""
if input1d % stride1d == 0:
pad = max(kernel1d - stride1d, 0)
else:
pad = max(kernel1d - (input1d % stride1d), 0)
pad_before = pad // 2
pad_after = pad - pad_before
if "LOWER" in mode:
return [pad_after, pad_before]
return [pad_before, pad_after]
def onnx_default_layout(dims, op_name):
if dims == 1:
return "NCW"
if dims == 2:
return "NCHW"
if dims == 3:
return "NCDHW"
msg = f"Only 1D, 2D and 3D layouts are currently supported for operator {op_name}."
raise tvm.error.OpAttributeInvalid(msg)
def onnx_storage_order2layout(storage_order, dims, op_name):
"""converter of onnx storage order parameter to tvm storage order format"""
if storage_order not in (0, 1):
raise tvm.error.OpAttributeInvalid("Mode of storage_order must be either 0 or 1")
if dims == 1:
return "NCW" if storage_order == 0 else "NWC"
if dims == 2:
return "NCHW" if storage_order == 0 else "NHWC"
if dims == 3:
return "NCDHW" if storage_order == 0 else "NDHWC"
msg = f"Only 1D, 2D and 3D layouts are currently supported for operator {op_name}."
raise tvm.error.OpAttributeInvalid(msg)
def dimension_constraint():
def _dim_check(attrs):
if len(attrs["kernel_shape"]) in [1, 2, 3]:
return True
return False
return _dim_check, "Only 1d, 2d and 3d kernel supported."
def get_scalar(x, params, dtype="float32"):
"""Helper to get a scalar value for Quantized operators."""
if isinstance(x, _expr.Var) and x.name_hint in params:
return _op.const(params[x.name_hint].numpy(), dtype)
rank = len(infer_shape(x))
assert rank <= 1, "scale and zero_point input must be scalars"
if rank == 1:
x = _op.squeeze(x, [0])
return _op.cast(x, dtype)
def get_scalar_or_1d_tensor(x, params, dtype="float32"):
"""Helper to get a scalar value or 1D tensor for Quantized operators."""
if isinstance(x, _expr.Var) and x.name_hint in params:
return _op.const(params[x.name_hint].numpy(), dtype)
rank = len(infer_shape(x))
assert rank <= 1, "scale and zero_point input must be scalars or 1D tensors"
return _op.cast(x, dtype)
def flatten_to_nd(x, x_shape, nd=3):
"""Flatten input tensor to nd rank"""
ndims = infer_shape(x_shape)[0]
if ndims == nd:
return x
newshape = _op.concatenate(
[
_expr.const([-1], dtype=infer_type(x_shape).checked_type.dtype),
_op.strided_slice(x_shape, [ndims - nd + 1], [ndims]),
],
0,
)
out = _op.reshape(x, fold_constant(newshape))
return out
def matmul_out_dtype(inputs, out_dtype):
"""Common function to handle MatMul and MatMulInteger16"""
a_shape = shape_of(inputs[0])
a_rank = infer_shape(a_shape)[0]
b_shape = shape_of(inputs[1])
b_rank = infer_shape(b_shape)[0]
if a_rank > 2 or b_rank > 2:
# Determine the output batch dimension.
new_a_shape = a_shape
new_b_shape = b_shape
if a_rank > b_rank:
rank_diff = a_rank - b_rank
new_b_shape = _op.concatenate(
[
_expr.const([1] * rank_diff, dtype=infer_type(b_shape).checked_type.dtype),
b_shape,
],
0,
)
elif a_rank < b_rank:
rank_diff = b_rank - a_rank
new_a_shape = _op.concatenate(
[
_expr.const([1] * rank_diff, dtype=infer_type(a_shape).checked_type.dtype),
a_shape,
],
0,
)
else:
pass
out_batch = _op.concatenate(
[
_op.maximum(
_op.strided_slice(new_b_shape, [i], [i + 1]),
_op.strided_slice(new_a_shape, [i], [i + 1]),
)
for i in range(max(a_rank, b_rank) - 2)
],
0,
)
b_type = infer_type(inputs[1])
# Convert to dense if the second matrix is 2d and non-dynamic
if b_rank == 2 and not _ty.is_dynamic(b_type.checked_type):
a = flatten_to_nd(inputs[0], a_shape, 2)
b = _op.transpose(inputs[1])
output = _op.nn.dense(a, b, out_dtype=out_dtype)
else:
a = inputs[0]
b = inputs[1]
# broadcast a and b
a_broadcasted_shape = fold_constant(
_op.concatenate([out_batch, _op.strided_slice(a_shape, [a_rank - 2], [a_rank])], 0)
)
b_broadcasted_shape = fold_constant(
_op.concatenate([out_batch, _op.strided_slice(b_shape, [b_rank - 2], [b_rank])], 0)
)
if not tvm.ir.structural_equal(a_shape, a_broadcasted_shape):
a = _op.transform.broadcast_to(a, a_broadcasted_shape)
if not tvm.ir.structural_equal(b_shape, b_broadcasted_shape):
b = _op.transform.broadcast_to(b, b_broadcasted_shape)
# Convert a and b into 3 dimensional tensors.
a = flatten_to_nd(a, shape_of(a), 3)
b = flatten_to_nd(b, shape_of(b), 3)
if ONNX_DEFAULT_CONFIGS["use_nt_batch_matmul"]:
# Transpose matrix dimensions of b.
bt = _op.transpose(b, [0, 2, 1])
# Perform a NT batch matmul.
output = _op.nn.batch_matmul(a, bt, out_dtype=out_dtype)
else:
# Perform a NN batch matmul.
output = _op.nn.batch_matmul(a, b, out_dtype=out_dtype, transpose_b=False)
# Reshape output to original dimensions.
final_shape = _op.concatenate(
[
out_batch,
_op.strided_slice(
a_shape, [infer_shape(a_shape)[0] - 2], [infer_shape(a_shape)[0] - 1]
),
_op.strided_slice(
b_shape, [infer_shape(b_shape)[0] - 1], [infer_shape(b_shape)[0]]
),
],
0,
)
return _op.reshape(output, fold_constant(final_shape))
if a_rank == 1 or b_rank == 1:
axis = []
if a_rank == 1:
lhs = _op.expand_dims(inputs[0], axis=0)
axis.append(0)
else:
lhs = inputs[0]
if b_rank == 1:
rhs = _op.expand_dims(inputs[1], axis=1)
axis.append(-1)
else:
rhs = inputs[1]
return _op.squeeze(_op.nn.matmul(lhs, rhs), axis=axis)
# Otherwise a simple dense op will get the job done.
input_1_t = _op.transpose(inputs[1], axes=(1, 0))
return _op.nn.dense(inputs[0], input_1_t, out_dtype=out_dtype)
def qmatmul(
a,
b,
a_zp_scalar,
b_zp_scalar,
a_scale_scalar,
b_scale_scalar,
transform_num_hidden_units,
matmul_result_dtype,
):
"""
Helper function to handle QLinearMatMul
It is very close to 'matmul_out_dtype' but separated due to
differences in signatures of dense, matmul, batch_matmul of nn and qnn.
They requre scaling and zero point arguments
"""
a_shape = shape_of(a)
a_rank = infer_shape(a_shape)[0]
b_shape = shape_of(b)
b_rank = infer_shape(b_shape)[0]
if a_rank > 2 or b_rank > 2:
# Determine the output batch dimension.
new_a_shape = a_shape
new_b_shape = b_shape
if a_rank > b_rank:
rank_diff = a_rank - b_rank
new_b_shape = _op.concatenate(
[
_expr.const([1] * rank_diff, dtype=infer_type(b_shape).checked_type.dtype),
b_shape,
],
0,
)
elif a_rank < b_rank:
rank_diff = b_rank - a_rank
new_a_shape = _op.concatenate(
[
_expr.const([1] * rank_diff, dtype=infer_type(a_shape).checked_type.dtype),
a_shape,
],
0,
)
else:
pass
out_batch = _op.concatenate(
[
_op.maximum(
_op.strided_slice(new_b_shape, [i], [i + 1]),
_op.strided_slice(new_a_shape, [i], [i + 1]),
)
for i in range(max(a_rank, b_rank) - 2)
],
0,
)
b_type = infer_type(b)
# Convert to dense if the second matrix is 2d and non-dynamic
if b_rank == 2 and not _ty.is_dynamic(b_type.checked_type):
a = flatten_to_nd(a, a_shape, 2)
b = _op.transpose(b)
output = _qnn.op.dense(
a,
b,
a_zp_scalar,
b_zp_scalar,
a_scale_scalar,
b_scale_scalar,
transform_num_hidden_units,
matmul_result_dtype,
)
else:
# broadcast a and b
a_broadcasted_shape = fold_constant(
_op.concatenate([out_batch, _op.strided_slice(a_shape, [a_rank - 2], [a_rank])], 0)
)
b_broadcasted_shape = fold_constant(
_op.concatenate([out_batch, _op.strided_slice(b_shape, [b_rank - 2], [b_rank])], 0)
)
if not tvm.ir.structural_equal(a_shape, a_broadcasted_shape):
a = _op.transform.broadcast_to(a, a_broadcasted_shape)
if not tvm.ir.structural_equal(b_shape, b_broadcasted_shape):
b = _op.transform.broadcast_to(b, b_broadcasted_shape)
# Convert a and b into 3 dimensional tensors.
a = flatten_to_nd(a, shape_of(a), 3)
b = flatten_to_nd(b, shape_of(b), 3)
# Transpose matrix dimensions of b.
bt = _op.transpose(b, [0, 2, 1])
# Perform a NT batch matmul.
output = _qnn.op.batch_matmul(
a, bt, a_zp_scalar, b_zp_scalar, a_scale_scalar, b_scale_scalar, matmul_result_dtype
)
# Reshape output to original dimensions.
final_shape = _op.concatenate(
[
out_batch,
_op.strided_slice(a_shape, [a_rank - 2], [a_rank - 1]),
_op.strided_slice(b_shape, [b_rank - 1], [b_rank]),
],
0,
)
return _op.reshape(output, fold_constant(final_shape))
if a_rank == 1:
# TODO(vvchernov): There should be qnn.matmul but it is not implemented
# return _op.squeeze(_qnn.op.matmul(_op.expand_dims(a, axis=0),
# b,
# a_zp_scalar,
# b_zp_scalar,
# a_scale_scalar,
# b_scale_scalar,
# transform_num_hidden_units,
# matmul_result_dtype,
# ),
# axis=[0]
# )
return _op.squeeze(
_qnn.op.dense(
_op.expand_dims(a, axis=0),
_op.transpose(b),
a_zp_scalar,
b_zp_scalar,
a_scale_scalar,
b_scale_scalar,
transform_num_hidden_units,
matmul_result_dtype,
),
axis=[0],
)
# Otherwise a simple dense op will get the job done.
return _qnn.op.dense(
a,
_op.transpose(b),
a_zp_scalar,
b_zp_scalar,
a_scale_scalar,
b_scale_scalar,
transform_num_hidden_units,
matmul_result_dtype,
)
def layer_norm(x, eps, gamma, beta):
"""A common function to handle layer norm.
Use LayerNormalization for the actual onnx op.
"""
eps_dtype = infer_type(x).checked_type.dtype
u, s = _op.mean_variance(x, axis=-1, keepdims=True)
output = _op.divide(_op.subtract(x, u), _op.sqrt(_op.add(s, _op.const(eps, dtype=eps_dtype))))
output = _op.multiply(output, gamma)
if beta is not None:
output = _op.add(output, beta)
return output
def get_source_name(node, type_dict):
"""A helper function to get source information of onnx nodes."""
if node.name:
return node.name
else:
op_idx = 0
if node.op_type in type_dict:
op_idx = type_dict[node.op_type] + 1
type_dict[node.op_type] = op_idx
# rewrite name property in case any revisiting occurs to current node
node.name = f"{node.op_type}_{op_idx}"
return node.name
def get_source_name_from_parameter(expr, name_sep="."):
"""A helper function to get source information of graph node from parameter."""
if expr.span:
source_name = expr.span.source_name.name
# discard variable/parameter name to get span of op node
# e.g. conv2d.w -> conv2d
if isinstance(expr, _expr.Var):
postfix = f"{name_sep}{expr.name_hint}"
source_name = source_name[: -len(postfix)]
return source_name
return None
def make_parameter_span(source_name_list, name_sep="."):
return name_sep.join(source_name_list)
class OnnxOpConverter(object):
"""A helper class for holding onnx op converters."""
@classmethod
def get_converter(cls, opset):
"""Get converter matches given opset.
Parameters
----------
opset: int
opset from model.
Returns
-------
converter, which should be `_impl_vx`. Number x is the biggest
number smaller than or equal to opset belongs to all support versions.
"""
versions = [int(d.replace("_impl_v", "")) for d in dir(cls) if "_impl_v" in d]
versions = sorted(versions + [opset])
version = versions[max([i for i, v in enumerate(versions) if v == opset]) - 1]
if hasattr(cls, f"_impl_v{version}"):
return getattr(cls, f"_impl_v{version}")
raise NotImplementedError(f"opset version {version} of {cls.__name__} not implemented")
class Unary(OnnxOpConverter):
"""A helper class for unary op converters."""
name = ""
@classmethod
def _impl_v1(cls, inputs, attr, params):
assert len(inputs) == 1, f"Unary math op {cls.name} takes 1 input, {len(inputs)} given"
op_name = cls.name
return get_relay_op(op_name)(*inputs)
class Elemwise(OnnxOpConverter):
"""A helper class for elemwise op converters."""
name = ""
@classmethod
def _impl_v1(cls, inputs, attr, params):
assert len(inputs) == 2, f"Math op {cls.name} take 2 inputs, {len(inputs)} given"
op_name = cls.name
conv_ops = ["conv2d", "conv2d_transpose"]
if attr.get("broadcast", 0) and any(x in str(inputs[0]) for x in conv_ops):
# TODO(zhreshold): remove hard coded infershape
axis = int(attr.get("axis", 0))
inputs[1] = _op.expand_dims(inputs[1], axis=axis, num_newaxis=2)
return get_relay_op(op_name)(*inputs)
class Pool(OnnxOpConverter):
"""A helper class for pool op converters."""
name = ""
@classmethod
def _impl_v1(cls, inputs, attr, params):
data = inputs[0]
input_shape = infer_shape(data)
ndim = len(input_shape)
attr_cvt, data = cls._run_calculation(inputs, attr, params)
out = attr_cvt([data], attr, params)
if ndim - len(attr["kernel_shape"]) == 1:
out = _op.squeeze(out, axis=[0])
return out
@classmethod
def _run_calculation(cls, inputs, attr, params):
"""Helper method to return the processed input data and AttrCvt object"""
data = inputs[0]
input_shape = infer_shape(data)
input_dtype = infer_type(data).checked_type.dtype
ndim = len(input_shape)
if "auto_pad" in attr:
attr["auto_pad"] = attr["auto_pad"].decode("utf-8")
if attr["auto_pad"] in ("SAME_UPPER", "SAME_LOWER"):
if cls.name == "avg_pool":
pad_tuple = []
for axis in range(len(input_shape) - 2):
axis_shape = input_shape[2 + axis]
stride = attr.get("strides", [1] * ndim)[axis]
kernel = attr["kernel_shape"][axis]
pad = get_pad_pair(axis_shape, kernel, stride, attr["auto_pad"])
pad_tuple.append(pad)
pad_tuple = tuple([val for pair in zip(*pad_tuple) for val in pair])
attr["pads"] = pad_tuple
else:
# Warning: Pool does not yet support dynamic shapes,
# one will need to run dynamic_to_static on this model after import
if "int" in input_dtype:
pad_val = np.iinfo(np.dtype(input_dtype)).min
else:
pad_val = np.finfo(np.dtype(input_dtype)).min
data = autopad(
data,
attr.get("strides", [1] * (ndim - 2)),
attr["kernel_shape"],
[1] * ndim,
pad_value=pad_val,
mode=attr["auto_pad"],
)
elif attr["auto_pad"] == "VALID":
attr["pads"] = tuple([0 for i in range(ndim - 2)])
elif attr["auto_pad"] == "NOTSET":
pass
else:
msg = (
f'Value {attr["auto_pad"]} in attribute "auto_pad" of operator {cls.name} '
f"is invalid."
)
raise tvm.error.OpAttributeInvalid(msg)
attr.pop("auto_pad")
if "storage_order" in attr:
attr["layout"] = onnx_storage_order2layout(
attr["storage_order"], dims=(len(input_shape) - 2), op_name=cls.name
)
else:
if ndim - len(attr["kernel_shape"]) == 1:
data = _op.expand_dims(data, axis=0)
input_shape = [1] + list(input_shape)
attr["layout"] = onnx_default_layout(dims=(len(input_shape) - 2), op_name=cls.name)
return (
AttrCvt(
op_name=dimension_picker(cls.name),
transforms={
"kernel_shape": "pool_size",
"pads": ("padding", 0),
"dilations": ("dilation", 1),
},
ignores=["storage_order"],
custom_check=dimension_constraint(),
),
data,
)
class Absolute(Unary):
"""Operator converter for Absolute."""
name = "abs"
class Add(Elemwise):
"""Operator converter for Add."""
name = "add"
class AveragePool(Pool):
"""Operator converter for AveragePool."""
name = "avg_pool"
class QLinearAveragePool(Pool):
"""Operator converter for QLinearAveragePool from Microsoft onnxruntime contrib opset."""
name = "avg_pool"
@classmethod
def _impl_v1(cls, inputs, attr, params):
x_scale = get_scalar(inputs[1], params)
x_zero_point = get_scalar(inputs[2], params, dtype="int32")
y_scale = fold_constant(get_scalar(inputs[3], params))
y_zero_point = get_scalar(inputs[4], params, dtype="int32")
attr_cvt, data = cls._run_calculation(inputs, attr, params)
input_dtype = infer_type(data).checked_type.dtype
# Onnxruntime doesn't actually do this op in integer, they dequantize to fp32
# and then requantize afer (according to documentation below)
# https://github.com/microsoft/onnxruntime/blob/master/docs/ContribOperators.md#com.microsoft.QLinearAveragePool
float_node = _qnn.op.dequantize(data, x_scale, x_zero_point)
out = attr_cvt([float_node], attr, params)
return _qnn.op.quantize(out, y_scale, y_zero_point, out_dtype=input_dtype)
class BatchNorm(OnnxOpConverter):
"""Operator converter for BatchNorm."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
# TODO(zhreshold): 'spatial' is not properly handled here.
# TODO(vvchernov): 'training_mode' (onnx tag) is not correctly handled, ignore for now
out = AttrCvt(
op_name="batch_norm",
ignores=["spatial", "is_test", "consumed_inputs", "momentum", "training_mode"],
)(inputs, attr, params)
# We only support test mode, so we return data, moving_mean, moving_var,
# and then moving_mean and moving_var again as placeholders for
# the expected "saved_mean", "saved_var".
return _expr.TupleWrapper(_expr.Tuple((*out, out[1], out[2])), 5)
class InstanceNorm(OnnxOpConverter):
"""Operator converter for BatchNorm."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return AttrCvt(op_name="instance_norm")(inputs, attr, params)
class Conv(OnnxOpConverter):
"""Operator converter for Conv."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
# Use shape of input to determine convolution type.
data = inputs[0]
kernel = inputs[1]
input_shape = infer_shape(data)
ndim = len(input_shape)
kernel_type = infer_type(inputs[1])
kernel_shapes = [get_const_tuple(kernel_type.checked_type.shape)]
if "kernel_shape" not in attr:
attr["kernel_shape"] = kernel_shapes[0][2:]
if "auto_pad" in attr:
attr["auto_pad"] = attr["auto_pad"].decode("utf-8")
if attr["auto_pad"] in ("SAME_UPPER", "SAME_LOWER"):
# Warning: Convolution does not yet support dynamic shapes,
# one will need to run dynamic_to_static on this model after import
data = autopad(
data,
attr.get("strides", [1] * (ndim - 2)),
attr["kernel_shape"],
attr.get("dilations", [1] * (ndim - 2)),
mode=attr["auto_pad"],
)
elif attr["auto_pad"] == "VALID":
attr["pads"] = [0 for i in range(ndim - 2)]
elif attr["auto_pad"] == "NOTSET":
pass
else:
msg = (
f'Value {attr["auto_pad"]} in attribute "auto_pad" of operator Conv '
f"is invalid."
)
raise tvm.error.OpAttributeInvalid(msg)
attr.pop("auto_pad")
attr["channels"] = kernel_shapes[0][0]
out = AttrCvt(
op_name=dimension_picker("conv"),
transforms={
"kernel_shape": "kernel_size",
"dilations": ("dilation", 1),
"pads": ("padding", 0),
"group": ("groups", 1),
},
custom_check=dimension_constraint(),
)([data, kernel], attr, params)
use_bias = len(inputs) == 3
if use_bias:
out = _op.nn.bias_add(out, inputs[2])
return out
class ConvTranspose(OnnxOpConverter):
"""Operator converter for ConvTranspose."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
# get number of channels
out_type = infer_type(inputs[1])
kernel_shape = [get_const_tuple(out_type.checked_type.shape)]
out_channels = kernel_shape[0][1] * attr.get("group", 1)
attr["channels"] = out_channels
groups = attr.get("group", 1)
if "kernel_shape" not in attr:
attr["kernel_shape"] = kernel_shape[0][2:]
attr["groups"] = groups
# infer pads for auto_pad
data = inputs[0]
input_shape = infer_shape(data)
ndim = len(input_shape)
if "auto_pad" in attr or "output_shape" in attr:
if "auto_pad" in attr:
attr["auto_pad"] = attr["auto_pad"].decode("utf-8")
if "output_shape" in attr or attr["auto_pad"] in ("SAME_UPPER", "SAME_LOWER"):
# Warning: Convolution does not yet support dynamic shapes,
# one will need to run dynamic_to_static on this model after import
kernel_shape = attr["kernel_shape"]
kndim = len(kernel_shape)
dilations = attr.get("dilations", [1] * kndim)
output_padding = attr.get("output_padding", [0] * kndim)
strides = attr["strides"]
total_pad = [0] * kndim
# https://github.com/onnx/onnx/blob/main/docs/Operators.md#ConvTranspose
if "output_shape" in attr:
for i in range(kndim):
total_pad[i] = (
strides[i] * (input_shape[ndim - kndim + i] - 1)
+ output_padding[i]
+ ((kernel_shape[i] - 1) * dilations[i] + 1)
- attr["output_shape"][i]
)
left = [p // 2 for p in total_pad]
right = [total_pad[i] - left[i] for i in range(kndim)]
if "output_shape" in attr and "auto_pad" not in attr:
pad = right + left
elif "LOWER" in attr["auto_pad"]:
pad = left + right
else:
pad = right + left
attr["pads"] = pad
else:
data = autopad(
data,
attr.get("strides", [1] * (ndim - 2)),
attr["kernel_shape"],
attr.get("dilations", [1] * (ndim - 2)),
deconv=True,
mode=attr["auto_pad"],
)
elif attr["auto_pad"] == "VALID":
attr["pads"] = tuple([0 for i in range(ndim - 2)])
elif attr["auto_pad"] == "NOTSET":
pass
else:
msg = (
f'Value {attr["auto_pad"]} in attribute "auto_pad" of operator Conv '
f"is invalid."
)
raise tvm.error.OpAttributeInvalid(msg)
if "auto_pad" in attr:
attr.pop("auto_pad")
out = AttrCvt(
op_name=dimension_picker("conv", "_transpose"),
transforms={
"kernel_shape": "kernel_size",
"dilations": ("dilation", 1),
"pads": ("padding", 0),
"group": ("groups", 1),
},
disables=["output_shape"],
custom_check=dimension_constraint(),
)([data, inputs[1]], attr, params)
use_bias = len(inputs) == 3
if use_bias:
out = _op.nn.bias_add(out, inputs[2])
return out
@classmethod
def _impl_v11(cls, inputs, attr, params):
# get number of channels
out_type = infer_type(inputs[1])
kernel_shape = [get_const_tuple(out_type.checked_type.shape)]
out_channels = kernel_shape[0][1] * attr.get("group", 1)
attr["channels"] = out_channels
groups = attr.get("group", 1)
if "kernel_shape" not in attr:
attr["kernel_shape"] = kernel_shape[0][2:]
attr["groups"] = groups
# infer pads for auto_pad
data = inputs[0]
input_shape = infer_shape(data)
ndim = len(input_shape)
if "auto_pad" in attr or "output_shape" in attr:
if "auto_pad" in attr:
attr["auto_pad"] = attr["auto_pad"].decode("utf-8")
if "output_shape" in attr or attr["auto_pad"] in ("SAME_UPPER", "SAME_LOWER"):
# Warning: Convolution does not yet support dynamic shapes,
# one will need to run dynamic_to_static on this model after import
kernel_shape = attr["kernel_shape"]
kndim = len(kernel_shape)
dilations = attr.get("dilations", [1] * kndim)
output_padding = attr.get("output_padding", [0] * kndim)
strides = attr["strides"]
total_pad = [0] * kndim
# https://github.com/onnx/onnx/blob/main/docs/Operators.md#ConvTranspose
if "output_shape" in attr:
for i in range(kndim):
total_pad[i] = (
strides[i] * (input_shape[ndim - kndim + i] - 1)
+ output_padding[i]
+ ((kernel_shape[i] - 1) * dilations[i] + 1)
- attr["output_shape"][i]
)
else:
for i in range(kndim):
total_pad[i] = (
output_padding[i]
+ ((kernel_shape[i] - 1) * dilations[i] + 1)
- strides[i]
)
left = [p // 2 for p in total_pad]
right = [total_pad[i] - left[i] for i in range(kndim)]
if "output_shape" in attr and "auto_pad" not in attr:
pad = right + left
elif "LOWER" in attr["auto_pad"]:
pad = left + right
else:
pad = right + left
attr["pads"] = pad
elif attr["auto_pad"] == "VALID":
attr["pads"] = tuple([0 for i in range(ndim - 2)])
elif attr["auto_pad"] == "NOTSET":
pass
else:
msg = (
f'Value {attr["auto_pad"]} in attribute "auto_pad" of operator Conv '
f"is invalid."
)
raise tvm.error.OpAttributeInvalid(msg)
if "auto_pad" in attr:
attr.pop("auto_pad")
out = AttrCvt(
op_name=dimension_picker("conv", "_transpose"),
transforms={
"kernel_shape": "kernel_size",
"dilations": ("dilation", 1),
"pads": ("padding", 0),
"group": ("groups", 1),
},
disables=["output_shape"],
custom_check=dimension_constraint(),
)([data, inputs[1]], attr, params)
use_bias = len(inputs) == 3
if use_bias:
out = _op.nn.bias_add(out, inputs[2])
return out
class GlobalAveragePool(OnnxOpConverter):
"""Operator converter for GlobalAveragePool"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
rank = len(infer_shape(inputs[0]))
if rank == 3:
return _op.nn.global_avg_pool1d(inputs[0])
if rank == 4:
return _op.nn.global_avg_pool2d(inputs[0])
if rank == 5:
return _op.nn.global_avg_pool3d(inputs[0])
raise NotImplementedError(
"Global average pooling is only implemented for 1D, 2D, and 3D kernels, got %dD."
% (rank - 2)
)
class QLinearGlobalAveragePool(OnnxOpConverter):
"Operator converter for QLinearGlobalAveragePool from Microsoft onnxruntime contrib opset."
@classmethod
def _impl_v1(cls, inputs, attr, params):
rank = len(infer_shape(inputs[0]))
x_scale = get_scalar(inputs[1], params)
x_zero_point = get_scalar(inputs[2], params, dtype="int32")
y_scale = fold_constant(get_scalar(inputs[3], params))
y_zero_point = get_scalar(inputs[4], params, dtype="int32")
input_dtype = infer_type(inputs[0]).checked_type.dtype
# Onnxruntime documentation does not mention that this global avg_pool should follow the
# sequence dequantize -> float op -> quantize, but that is how QLinearAveragePool is done.
#
# This op also follows the same pattern since qnn op is not available right now.
# TODO: Generate QNN op to perform quantized operation instead of dequant -> op -> quant
x = _qnn.op.dequantize(inputs[0], x_scale, x_zero_point)
if rank == 3:
out = _op.nn.global_avg_pool1d(x)
elif rank == 4:
out = _op.nn.global_avg_pool2d(x)
elif rank == 5:
out = _op.nn.global_avg_pool3d(x)
else:
raise NotImplementedError(
"Global average pooling is only implemented for 1D, 2D, and 3D kernels, got %dD."
% (rank - 2)
)
return _qnn.op.quantize(out, y_scale, y_zero_point, out_dtype=input_dtype)
class GlobalMaxPool(OnnxOpConverter):
"""Operator converter for GlobalMaxPool"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
rank = len(infer_shape(inputs[0]))
if rank == 3:
return _op.nn.global_max_pool1d(inputs[0])
if rank == 4:
return _op.nn.global_max_pool2d(inputs[0])
if rank == 5:
return _op.nn.global_max_pool3d(inputs[0])
raise NotImplementedError(
"Global max pooling is only implemented for 1D, 2D, and 3D kernels, got %dD."
% (rank - 2)
)
class Div(Elemwise):
"""Operator converter for Divide."""
name = "divide"
class Elu(OnnxOpConverter):
"""Operator converter for Elu."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = float(attr.get("alpha", 1.0))
return _expr.const(-alpha) * _op.nn.relu(
_expr.const(1.0) - _op.exp(inputs[0])
) + _op.nn.relu(inputs[0])
class Gelu(OnnxOpConverter):
"""Operator converter for Gelu from Microsoft onnxruntime contrib opset.
gelu(x) = 0.5x(1 + erf(x/sqrt(2)))
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
x = inputs[0]
# Declare consts
const_dtype = infer_type(x).checked_type.dtype
half = _expr.const(0.5, dtype=const_dtype)
one = _expr.const(1.0, dtype=const_dtype)
sqrt2 = _expr.const(math.sqrt(2), dtype=const_dtype)
# Compute gelu
term1 = _op.multiply(half, x)
erf = _op.erf(_op.divide(x, sqrt2))
term2 = _op.add(one, erf)
return _op.multiply(term1, term2)
class FastGelu(OnnxOpConverter):
"""Operator converter for FastGelu from Microsoft onnxruntime contrib opset.
fast_gelu(x) = 0.5x(1 + tanh(sqrt(2/pi)(x + 0.044715x^3)))
= 0.5x(1 + tanh((sqrt(2/pi)x + 0.044715(sqrt(2/pi)x^3)))
= 0.5x(1 + tanh(c1 * x + c2 * x^3)))
, where
c1 = sqrt(2/pi)
c2 = 0.044715 * sqrt(2/pi)
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
x = inputs[0]
if inputs[1]:
bias = inputs[1]
bias_shape = infer_shape(bias)
assert len(bias_shape) == 1, "bias term must be a 1D tensor"
x += bias
# Declare consts
const_dtype = infer_type(x).checked_type.dtype
half = _expr.const(0.5, dtype=const_dtype)
one = _expr.const(1.0, dtype=const_dtype)
const1 = _expr.const(math.sqrt(2 / math.pi), dtype=const_dtype)
const2 = _expr.const(0.044715 * math.sqrt(2 / math.pi), dtype=const_dtype)
# Compute FastGelu
term1 = _op.multiply(half, x)
term2 = _op.multiply(const1, x)
term3 = _op.multiply(const2, _op.power(x, _expr.const(3, const_dtype)))
tanh = _op.tanh(_op.add(term2, term3))
return _op.multiply(term1, _op.add(one, tanh))
class BiasGelu(OnnxOpConverter):
"""Operator converter for BiasGelu from Microsoft onnxruntime contrib opset.
bias_gelu(x, b) = 0.5(x + b)(1 + erf((x + b)/sqrt(2)))
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
x = inputs[0]
b = inputs[1]
b_shape = infer_shape(b)
assert len(b_shape) == 1, "BiasGelu bias term must be a 1D tensor"
inp = _op.add(x, b)
return Gelu._impl_v1([inp], attr, params)
class LayerNormalization(OnnxOpConverter):
"""Operator converter for LayerNormalization from Microsoft onnxruntime contrib opset."""
@classmethod
def _impl_v17(cls, inputs, attr, params):
x = inputs[0]
gamma = inputs[1]
beta = inputs[2]
axis = attr.get("axis", -1)
eps = attr.get("epsilon", 1e-5)
# according to the onnx doc, given the int axis (default -1)
# to compute the mean and inv_stdev which are of dim [d[0], ..., d[axis-1], 1, ..., 1]
# the actual computation is over (axis, ..., rank(x) - 1) axes
# see https://github.com/onnx/onnx/blob/main/docs/Changelog.md#layernormalization-17
rank = len(infer_shape(x))
axis = tuple(range(axis, rank)) if axis >= 0 else tuple(range(rank + axis, rank))
dtype = infer_type(x).checked_type.dtype
mean = _op.mean(x, axis, keepdims=True)
var = _op.variance(x, axis, keepdims=True, with_mean=mean)
inv_stdev = _op.divide(
_op.const(1, dtype=dtype), _op.sqrt(_op.add(var, _op.const(eps, dtype=dtype)))
)
x_norm = _op.multiply(_op.subtract(x, mean), inv_stdev)
ln = _op.multiply(x_norm, gamma)
if beta is not None:
ln = _op.add(ln, beta)
return _expr.TupleWrapper(_expr.Tuple([ln, mean, inv_stdev]), 3)
class EmbedLayerNormalization(OnnxOpConverter):
"""Operator converter for EmbedLayerNormalization from Microsoft onnxruntime contrib opset.
This layer embeds the input tokens, sums them, and applies layer normalization.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
input_ids = inputs[0]
segment_ids = inputs[1]
word_emb = inputs[2]
pos_emb = inputs[3]
segment_emb = inputs[4]
gamma = inputs[5]
beta = inputs[6]
mask = inputs[7]
pos_ids = inputs[8]
eps = attr.get("epsilon", 1e-12)
(batch_size, seq_len) = infer_shape(input_ids)
if segment_ids:
assert segment_emb
if pos_ids is None:
pos_ids = _op.const([list(range(seq_len))] * batch_size, dtype="int32")
word_vec = _op.take(word_emb, input_ids, axis=0)
segment_vec = _op.take(segment_emb, segment_ids, axis=0)
pos_vec = _op.take(pos_emb, pos_ids, axis=0)
vec_sum = _op.add(word_vec, pos_vec)
if segment_ids:
vec_sum = _op.add(vec_sum, segment_vec)
ln = layer_norm(vec_sum, eps, gamma, beta)
mask_index = _op.const(np.zeros((batch_size,), dtype="int32"))
if mask:
# calculate number of words per sentence
mask_index = _op.sum(mask, axis=1)
# TODO(@anwang2009): onnxruntime v1.10.0 requires a third output of vec_sum
return _expr.TupleWrapper(_expr.Tuple([ln, mask_index]), 2)
class SkipLayerNormalization(OnnxOpConverter):
"""Operator converter for SkipLayerNormalization from Microsoft onnxruntime contrib opset.
This layer sums the two input tensors (along with optional bias), and applies layer
normalization.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
data = inputs[0]
skip = inputs[1]
gamma = inputs[2]
beta = inputs[3]
bias = inputs[4]
assert (
beta is not None and bias is not None
), "SkipLayerNormalization import currently only supports required beta and bias"
eps = attr.get("epsilon", 1e-12)
x = _op.add(data, skip)
if bias is not None:
x = _op.add(x, bias)
output = layer_norm(x, eps, gamma, beta)
# onnxruntime doesn't compute the other outputs, despite the documentation
placeholder = _op.const(0, dtype="float32")
return _expr.TupleWrapper(_expr.Tuple([output, placeholder, placeholder]), 3)
class OrtAttentionBase:
"""
Base class for Attention and QAttention from Microsoft onnxruntime contrib opset.
"""
@classmethod
def _check_input_embeddings(cls, input_emb, valid_types, **kwargs):
assert infer_type(input_emb).checked_type.dtype in valid_types
assert (
len(infer_shape(input_emb)) == 3
), "Input should be 3D tensor with shape (batch_size, sequence_length, input_hidden_size)"
(batch_size, seq_len, input_hidden) = infer_shape(input_emb)
assert input_hidden > 0, (
"The weight tensor has (input_hidden_size, 3 * output_hidden_size) shape, so it doesn't"
f" make sense to have ({input_hidden}, 3 * output_hidden_size) weight tensor."
)
assert seq_len > 0, (
"The output tensor has (batch_size, sequence_length, hidden_size) shape,"
f" so it doesn't make sense to have (batch_size, {seq_len}, hidden_size) output."
)
return batch_size, seq_len, input_hidden
@classmethod
def _check_weights(cls, weight, valid_types, **kwargs):
assert infer_type(weight).checked_type.dtype in valid_types
assert len(infer_shape(weight)) == 2, (
"Weight should be 2D input tensor with shape (input_hidden_size, 3 * hidden_size), "
"hidden_size = num_heads * head_size"
)
(input_hidden_weight, out_hidden_x3) = infer_shape(weight)
assert kwargs["input_hidden"] == input_hidden_weight
assert out_hidden_x3 % 3 == 0, "output hidden shape should be divisible by 3: W_Q, W_K, W_V"
out_hidden = out_hidden_x3 // 3
assert (
out_hidden % kwargs["num_heads"] == 0
), "output hidden size should be divisible by number of attention heads"
head_size = out_hidden // kwargs["num_heads"]
return out_hidden_x3, out_hidden, head_size
@classmethod
def _check_bias(cls, bias, valid_types, **kwargs):
assert infer_type(bias).checked_type.dtype in valid_types
assert (
len(infer_shape(bias)) == 1
), "Bias should be 1D input tensor with shape (3 * hidden_size)"
(out_hidden_x3_bias,) = infer_shape(bias)
assert kwargs["out_hidden_x3"] == out_hidden_x3_bias
@classmethod
def _check_mask_index(cls, mask_index, valid_types, **kwargs):
assert infer_type(mask_index).checked_type.dtype in valid_types
mask_index_shape = infer_shape(mask_index)
assert (
len(mask_index_shape) == 2
and mask_index_shape[0] == kwargs["batch_size"]
and mask_index_shape[1] >= kwargs["seq_len"]
), "currently only support (batch_size, past_sequence_len + sequence_length) mask index"
return mask_index_shape[1]
@classmethod
def _check_past(cls, past, valid_types, **kwargs):
assert infer_type(past).checked_type.dtype in valid_types
past_shape = infer_shape(past)
assert len(past_shape) == 5, "past should be 5D tensor"
assert (
past_shape[0] == 2
and past_shape[1] == kwargs["batch_size"]
and past_shape[2] == kwargs["num_heads"]
and past_shape[3] + kwargs["seq_len"] == kwargs["total_seq_len"]
and past_shape[4] == kwargs["head_size"]
)
past_seq_len = past_shape[3]
return past_seq_len
@classmethod
def _split_into_heads(cls, tensor, batch_size, seq_len, num_heads, head_size):
"""
In the implementation of Multi-head attention we just split queries, keys, and values
we compute for a single-head attention into several parts:
(batch_size, num_heads, seq_len, head_size)
"""
tensor = _op.reshape(tensor, (batch_size, seq_len, num_heads, head_size))
# (batch_size, num_heads, seq_len, head_size)
tensor = _op.transpose(tensor, axes=[0, 2, 1, 3])
return tensor
@classmethod
def _merge_first_dimensions(cls, tensor):
"""
nn.batch_matmul is expecting 3D tensor:
(batch_size * num_heads, past_seq_len + seq_len, head_size)
"""
return _op.reverse_reshape(tensor, (-1, 0, 0))
@classmethod
def _create_unidirectional_mask(cls, left_value, right_value, past_seq_len, seq_len, dtype):
"""
[lhs rhs rhs ... rhs rhs]
[lhs lhs rhs ... rhs rhs]
[lhs lhs lhs ... rhs rhs]
.........................
[lhs lhs lhs ... lhs rhs]
[lhs lhs lhs ... lhs lhs]
"""
numpy_unidirectional_mask = np.array(
[
np.concatenate(
[
np.full(past_seq_len + s_i + 1, left_value),
np.full(seq_len - s_i - 1, right_value),
]
)
for s_i in range(seq_len)
]
)
unidirectional_mask = _op.const(numpy_unidirectional_mask, dtype=dtype)
unidirectional_mask = _op.expand_dims(unidirectional_mask, 0, num_newaxis=2)
return unidirectional_mask
@classmethod
def _compute_attention(cls, Q, K, V, mask_index, **kwargs):
# Compute Attention scores
att_scores = _op.nn.batch_matmul(Q, K, transpose_a=False, transpose_b=True)
score_dtype = infer_type(att_scores).checked_type.dtype
att_scores = _op.divide(
att_scores,
_op.const(
np.sqrt(kwargs["head_size"]), dtype=infer_type(att_scores).checked_type.dtype
),
)
att_scores = _op.reshape(
att_scores,
(
kwargs["batch_size"],
kwargs["num_heads"],
kwargs["seq_len"],
kwargs["past_seq_len"] + kwargs["seq_len"],
),
)
# Build the attention mask
att_mask = _op.cast(mask_index, score_dtype)
# Attention mask has value 0 or 1. Here we convert 0 to -10000, and 1 to 0.
att_mask = _op.subtract(_op.const(1, dtype=score_dtype), att_mask)
att_mask = _op.multiply(att_mask, _op.const(-10000, dtype=score_dtype))
# Expand for att_scores broadcast
# (batch_size, past_seq_len + seq_len) -> (batch_size, 1, seq_len, past_seq_len + seq_len)
att_mask = _op.expand_dims(att_mask, 1, num_newaxis=2)
att_mask = _op.concatenate([att_mask] * kwargs["seq_len"], axis=2)
if kwargs["unidirectional"]:
att_mask = _op.add(
att_mask,
cls._create_unidirectional_mask(
0, -10000, kwargs["past_seq_len"], kwargs["seq_len"], score_dtype
),
)
# Apply the mask
att_scores = _op.add(att_scores, att_mask)
# TODO(agladyshev):
# Comment from ORT source code (onnxruntime/contrib_ops/cpu/bert/attention_cpu_base.h):
# "Fix unidirectional mask to be parity with huggingface implementation"
if kwargs["unidirectional"]:
att_scores = _op.multiply(
att_scores,
cls._create_unidirectional_mask(
1, 0, kwargs["past_seq_len"], kwargs["seq_len"], score_dtype
),
)
att_scores = _op.add(
att_scores,
_op.multiply(
att_mask,
cls._create_unidirectional_mask(
0, 1, kwargs["past_seq_len"], kwargs["seq_len"], score_dtype
),
),
)
# Compute Softmax
att_scores = _op.reshape(
att_scores,
(
kwargs["batch_size"] * kwargs["num_heads"],
kwargs["seq_len"],
kwargs["past_seq_len"] + kwargs["seq_len"],
),
)
att_probs = _op.nn.softmax(att_scores, axis=-1)
# Compute output
output = _op.nn.batch_matmul(att_probs, V, transpose_a=False, transpose_b=False)
output = _op.reverse_reshape(output, (-1, kwargs["num_heads"], 0, 0))
output = _op.transpose(output, axes=[0, 2, 1, 3])
output = _op.reshape(output, (0, 0, kwargs["out_hidden"]))
return output
class Attention(OrtAttentionBase, OnnxOpConverter):
"""Operator converter for Attention from Microsoft onnxruntime contrib opset.
This is the self-attention mechanism used in transformer models.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
# ************************* Read attrs *************************
num_heads = attr["num_heads"]
unidirectional = attr["unidirectional"]
assert (
"past_present_share_buffer" not in attr
), "share past and present buffers are not currently supported"
assert (
"qkv_hidden_sizes" not in attr
), "different hidden sizes for Q, K, V are not currently supported"
# ************************* Read inputs *************************
# (batch, seq, in_hidden)
input_emb = inputs[0]
# TODO(agladyshev):
# ORT documentation says:
# The weights for input projection of Q, K and V are merged.
# The data is stacked on the second dimension.
# Its shape is (input_hidden_size, hidden_size + hidden_size + v_hidden_size).
# Here hidden_size is the hidden dimension of Q and K, and v_hidden_size is that of V.
# However, in our case, we consider that hidden_size == v_hidden_size.
# Therefore, weight has the following shape:
# (in_hidden, 3 * out_hidden), where out_hidden = num_heads * head_size
weight = inputs[1]
# (3 * out_hidden,)
bias = inputs[2]
# 1. ( batch, 1, max_seq, max_seq)
# 2. ( batch, past_seq + seq,)
# 3. ( batch, seq, past_seq + seq,)
# 4. ( batch,)
# 5. (2 * batch,)
# TODO: For now, we only support case 2.
mask_index = inputs[3]
# (2, batch, num_heads, past_seq, head_size)
past = inputs[4]
# (batch, num_heads, seq, seq)
extra_add = inputs[5]
assert extra_add is None, "extra add to QxK not currently supported"
# When past_present_share_buffer is used,
# it is required to specify past_sequence_length (could be 0)
past_seq_len = inputs[6]
assert past_seq_len is None, "past sequence length not currently supported"
# ************************* Parse inputs *************************
t = ["float32", "float16"]
m = ["int32"]
# input
batch_size, seq_len, input_hidden = cls._check_input_embeddings(input_emb, t)
# weight
out_hidden_x3, out_hidden, head_size = cls._check_weights(
weight, t, num_heads=num_heads, input_hidden=input_hidden
)
# bias
cls._check_bias(bias, t, out_hidden_x3=out_hidden_x3)
# mask_index
assert (
mask_index is not None
), "Attention import currently only supports required mask_index"
total_seq_len = cls._check_mask_index(mask_index, m, batch_size=batch_size, seq_len=seq_len)
# past
if past_seq_len is None:
past_seq_len = 0
if past is not None:
past_seq_len = cls._check_past(
past,
t,
batch_size=batch_size,
num_heads=num_heads,
seq_len=seq_len,
total_seq_len=total_seq_len,
head_size=head_size,
)
# split weight and biases and do the matmuls
w_Q, w_K, w_V = _op.split(weight, 3, axis=1)
b_Q, b_K, b_V = _op.split(bias, 3, axis=0)
# need to merge batch dimensions since TVM matmul is 2D
input_emb = _op.reverse_reshape(input_emb, (-1, 0))
Q = _op.add(_op.nn.matmul(input_emb, w_Q), b_Q)
K = _op.add(_op.nn.matmul(input_emb, w_K), b_K)
V = _op.add(_op.nn.matmul(input_emb, w_V), b_V)
Q = cls._split_into_heads(Q, batch_size, seq_len, num_heads, head_size)
K = cls._split_into_heads(K, batch_size, seq_len, num_heads, head_size)
V = cls._split_into_heads(V, batch_size, seq_len, num_heads, head_size)
# Concatenate (past_K, past_V) with (K, V) by sequence axis:
# (batch_size, num_heads, past_sequence_length + sequence_length, head_size)
if past is not None and past_seq_len > 0:
K_past, V_past = _op.split(past, 2, axis=0)
K = _op.concatenate([_op.squeeze(K_past, axis=[0]), K], axis=2)
V = _op.concatenate([_op.squeeze(V_past, axis=[0]), V], axis=2)
# Prepare present state for Key and Value with shape
# (2, batch_size, num_heads, past_sequence_length + sequence_length, head_size)
present = _op.stack([K, V], axis=0)
Q = cls._merge_first_dimensions(Q)
K = cls._merge_first_dimensions(K)
V = cls._merge_first_dimensions(V)
# Compute Attention output
output = cls._compute_attention(
Q,
K,
V,
mask_index,
unidirectional=unidirectional,
batch_size=batch_size,
out_hidden=out_hidden,
num_heads=num_heads,
head_size=head_size,
seq_len=seq_len,
past_seq_len=past_seq_len,
)
return _expr.TupleWrapper(_expr.Tuple([output, present]), 2)
class QAttention(OrtAttentionBase, OnnxOpConverter):
"""Operator converter for QAttention from Microsoft onnxruntime contrib opset.
This is the self-attention mechanism used in transformer models.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
# ************************* Read attrs *************************
num_heads = attr["num_heads"]
unidirectional = attr["unidirectional"]
# ************************* Read inputs *************************
# (batch, seq, in_hidden)
input_emb = inputs[0]
# (in_hidden, 3 * out_hidden), where out_hidden = num_heads * head_size
weight = inputs[1]
# (3 * out_hidden,)
bias = inputs[2]
# Scale of quantized input tensor.
# Scalar, which means a per-tensor/layer quantization
input_scale = inputs[3]
# Scale of quantized weight tensor.
# Scalar or a 1D tensor, which means a per-tensor/per-column quantization.
# Its size should be 3 * out_hidden if it is per-column quantization
weight_scale = inputs[4]
# TODO(agladyshev):
# ORT documentation says that shape is (batch,),
# but in ORT source code we have following comment:
# 1. (batch_size)
# 2. (2 * batch_size)
# 3. (batch_size, 1)
# 4. (1, 1)
# 5. (batch_size, past_sequence_length + sequence_length)
# In practice, for GPT-2 there shape is (batch, past_seq_length + seq_length).
# Currently only (batch, past_seq_length + seq_length) shape is supported.
mask_index = inputs[5]
# Zero point of quantized input tensor.
# Scalar, which means a per-tensor/layer quantization
input_zero_point = inputs[6]
# Zero point of quantized weight tensor.
# Scalar or a 1D tensor, which means a per-tensor/per-column quantization.
# Its size should be 3 * out_hidden if it is per-column quantization
weight_zero_point = inputs[7]
# (2, batch, num_heads, past_seq, head_size)
past = inputs[8]
# ************************* Parse inputs *************************
t1 = ["int8", "uint8"]
t2 = ["int8", "uint8"]
t3 = ["float32", "float16"]
t4 = ["int32"]
# input
batch_size, seq_len, input_hidden = cls._check_input_embeddings(input_emb, t1)
# weight
out_hidden_x3, out_hidden, head_size = cls._check_weights(
weight, t2, num_heads=num_heads, input_hidden=input_hidden
)
# bias
cls._check_bias(bias, t3, out_hidden_x3=out_hidden_x3)
# input_scale
assert infer_type(input_scale).checked_type.dtype in t3
input_scale = get_scalar(
input_scale, params, dtype=infer_type(input_scale).checked_type.dtype
)
# weight_scale
assert infer_type(weight_scale).checked_type.dtype in t3
# TODO(agladyshev): now QNN Batch Matmul only supports scalar types for scale and zero_point
weight_scale = get_scalar(
weight_scale, params, dtype=infer_type(weight_scale).checked_type.dtype
)
# mask_index
assert (
mask_index is not None
), "Attention import currently only supports required mask_index"
total_seq_len = cls._check_mask_index(
mask_index, t4, batch_size=batch_size, seq_len=seq_len
)
# TODO(agladyshev): int32 required for qnn.batch_matmul (QnnBatchMatmulRel)
zero_point_zero = _expr.const(0, "int32")
# input_zero_point
if input_zero_point is None:
input_zero_point = zero_point_zero
else:
assert infer_type(input_zero_point).checked_type.dtype in t1
# TODO(agladyshev): int32 required for qnn.batch_matmul (QnnBatchMatmulRel)
input_zero_point = get_scalar(input_zero_point, params, dtype="int32")
# weight_zero_point
if weight_zero_point is None:
weight_zero_point = zero_point_zero
else:
assert infer_type(weight_zero_point).checked_type.dtype in t2
# TODO(agladyshev): int32 required for qnn.batch_matmul (QnnBatchMatmulRel)
weight_zero_point = get_scalar(weight_zero_point, params, dtype="int32")
# past (2, batch_size, num_heads, past_sequence_length, head_size)
past_seq_len = 0
if past is not None:
past_seq_len = cls._check_past(
past,
t3,
batch_size=batch_size,
num_heads=num_heads,
seq_len=seq_len,
total_seq_len=total_seq_len,
head_size=head_size,
)
# ************************* Create Relay *************************
# Add batch dimension for QNN Batch Matmul
weight = _op.expand_dims(weight, 0, num_newaxis=1)
weight = _op.concatenate([weight] * batch_size, axis=0)
# Split weight and biases and do the Matmul
w_Q, w_K, w_V = _op.split(weight, 3, axis=-1)
b_Q, b_K, b_V = _op.split(bias, 3, axis=-1)
def qmatmul_dequantize_bias(
lhs, rhs, lhs_scale, rhs_scale, lhs_zero_point, rhs_zero_point, bias
):
rhs_transposed = _op.transpose(rhs, axes=[0, 2, 1]) # QNN Batch Matmul do: X * Y^T
result = _qnn.op.batch_matmul(
lhs, rhs_transposed, lhs_zero_point, rhs_zero_point, lhs_scale, rhs_scale
)
# In our case zero point and scale are scalar, therefore 'axis' doesn't matter
result = _qnn.op.dequantize(result, _op.multiply(lhs_scale, rhs_scale), zero_point_zero)
result = _op.add(result, bias)
return result
Q = qmatmul_dequantize_bias(
input_emb, w_Q, input_scale, weight_scale, input_zero_point, weight_zero_point, b_Q
)
K = qmatmul_dequantize_bias(
input_emb, w_K, input_scale, weight_scale, input_zero_point, weight_zero_point, b_K
)
V = qmatmul_dequantize_bias(
input_emb, w_V, input_scale, weight_scale, input_zero_point, weight_zero_point, b_V
)
Q = cls._split_into_heads(Q, batch_size, seq_len, num_heads, head_size)
K = cls._split_into_heads(K, batch_size, seq_len, num_heads, head_size)
V = cls._split_into_heads(V, batch_size, seq_len, num_heads, head_size)
# Concatenate (past_K, past_V) with (K, V) by sequence axis:
# (batch_size, num_heads, past_sequence_length + sequence_length, head_size)
if past is not None and past_seq_len > 0:
K_past, V_past = _op.split(past, 2, axis=0)
K = _op.concatenate([_op.squeeze(K_past, axis=[0]), K], axis=2)
V = _op.concatenate([_op.squeeze(V_past, axis=[0]), V], axis=2)
# Prepare present state for Key and Value with shape
# (2, batch_size, num_heads, past_sequence_length + sequence_length, head_size)
present = _op.stack([K, V], axis=0)
Q = cls._merge_first_dimensions(Q)
K = cls._merge_first_dimensions(K)
V = cls._merge_first_dimensions(V)
# Compute Attention output
output = cls._compute_attention(
Q,
K,
V,
mask_index,
unidirectional=unidirectional,
batch_size=batch_size,
out_hidden=out_hidden,
num_heads=num_heads,
head_size=head_size,
seq_len=seq_len,
past_seq_len=past_seq_len,
)
return _expr.TupleWrapper(_expr.Tuple([output, present]), 2)
class Gemm(OnnxOpConverter):
"""Operator converter for Gemm."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
assert (
len(inputs) == 3 or len(inputs) == 2
), f"Gemm op take 2 or 3 inputs, {len(inputs)} given"
input0_state = infer_type(inputs[0])
dtype = input0_state.checked_type.dtype
# Y = alpha * A * B + beta * C
alpha = float(attr.get("alpha", 1.0))
beta = float(attr.get("beta", 1.0))
transA = int(attr.get("transA", 0))
transB = int(attr.get("transB", 0))
# get number of channels
channels = infer_channels(inputs[1], not transB)
if transA:
inputs[0] = _op.transpose(inputs[0], axes=(1, 0))
if not transB:
inputs[1] = _op.transpose(inputs[1], axes=(1, 0))
if len(input0_state.checked_type.shape) != 2:
inputs[0] = _op.nn.batch_flatten(inputs[0])
if alpha != 1.0:
inputs[0] *= _expr.const(alpha, dtype=dtype)
out = _op.nn.dense(inputs[0], inputs[1], units=channels)
if len(inputs) == 3:
if beta != 1.0:
out += _expr.const(float(beta), dtype=dtype) * inputs[2]
else:
out += inputs[2]
return out
class MatMul(OnnxOpConverter):
"""Operator converter for MatMul."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
assert len(inputs) == 2, f"MatMul op take 2 inputs, {len(inputs)} given"
# Need to check input shape as batch matmul must be supported.
return matmul_out_dtype(inputs, out_dtype=infer_type(inputs[0]).checked_type.dtype)
class MatMulInteger16(OnnxOpConverter):
"""Operator converter for MatMulInteger16 from Microsoft onnxruntime contrib opset."""
@classmethod
def _impl_v10(cls, inputs, attr, params):
assert len(inputs) == 2, f"MatMulInteger16 op take 2 inputs, {len(inputs)} given"
a_dtype = infer_type(inputs[0]).checked_type.dtype
b_dtype = infer_type(inputs[1]).checked_type.dtype
# Check input data types
assert a_dtype in ("int16", "uint16"), "MatMulInteger16: invalid dtype for first input"
assert b_dtype in ("int16", "uint16"), "MatMulInteger16: invalid dtype for second input"
out_dtype = "int32"
if a_dtype == "uint16" and b_dtype == "uint16":
out_dtype = "uint32"
return matmul_out_dtype(inputs, out_dtype)
class Mod(OnnxOpConverter):
"""Operator converter for Mod."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
assert len(inputs) == 2, f"Mod op take 2 inputs, {len(inputs)} given"
# Note: attr['fmod'] determines whether the operator should behave like np.fmod or np.mod.
# attr['fmod'] == 0 will behave as np.mod and attr['fmod'] == 1 will force fmod treatment.
# The relay equivalent of np.fmod is relay.mod and np.mod is relay.floor_mod
if attr.get("fmod", 0) == 0:
op_name = "floor_mod"
else:
op_name = "mod"
return AttrCvt(op_name)(inputs, {}, params)
class MaxPool(Pool):
"""Operator converter for MaxPool"""
name = "max_pool"
class MaxUnpool(OnnxOpConverter):
"""Operator converter for MaxUnpool"""
@classmethod
def _impl_v11(cls, inputs, attr, params):
# Unpack inputs and attributes
data = inputs[0]
data_type = infer_type(data).checked_type.dtype
indices = inputs[1]
output_shape = inputs[2]
kernel_shape = attr.get("kernel_shape")
pads = attr.get("pads", None)
strides = attr.get("strides", [1] * len(kernel_shape))
# Compute the proper output shape before padding.
multiplier = _op.concatenate(
[_expr.const([1, 1], dtype="int64"), _expr.const(list(strides), dtype="int64")], axis=0
)
total_output_shape = multiplier * shape_of(data, dtype="int64")
# Add extra dimensions from kernel size and stride mismatch
total_output_shape += _op.concatenate(
[_expr.const([0, 0], "int64"), _expr.const(list(kernel_shape), "int64")], axis=0
) - _op.concatenate(
[_expr.const([0, 0], "int64"), _expr.const(list(strides), "int64")], axis=0
)
# Compute padding amount if output shape is specified.
if output_shape is not None:
total_output_shape = output_shape
elif pads is not None:
# Get pads in the proper format for relay.
pads = _op.concatenate(
[_expr.const([0, 0, 0, 0], "int64"), _expr.const(list(pads), "int64")], axis=0
)
pads = _op.reshape(pads, [-1, 2])
# Compute the total padding per axis.
total_pad = _op.sum(pads, axis=-1)
# Reversing maxpool means that padding actually makes our output smaller.
total_output_shape = total_output_shape - total_pad
# Create a tensor of zeros then scatter our data through it.
zeros_tensor = _op.zeros(total_output_shape, data_type)
# We need to flatten all our tensors before scattering.
flat_tensor = _op.scatter_elements(
_op.reshape(zeros_tensor, [-1]),
_op.reshape(indices, [-1]),
_op.reshape(data, [-1]),
axis=0,
)
# Now reshape back to prepadded shape.
output_tensor = _op.reshape(flat_tensor, total_output_shape)
return output_tensor
class LpPool(OnnxOpConverter):
"""A helper class for lppool op converters."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
dtype = infer_type(inputs[0]).checked_type.dtype
data = inputs[0]
input_shape = infer_shape(data)
ndim = len(input_shape)
num_spatial_dims = ndim - 2
if "auto_pad" in attr:
attr["auto_pad"] = attr["auto_pad"].decode("utf-8")
if attr["auto_pad"] in ("SAME_UPPER", "SAME_LOWER"):
# Warning: LpPool does not yet support dynamic shapes,
# one will need to run dynamic_to_static on this model after import
data = autopad(
data,
# this is meant to handle the field 'strides' being optional for opsets 11+
attr.get("strides", [1] * num_spatial_dims),
attr["kernel_shape"],
[1] * ndim,
mode=attr["auto_pad"],
)
elif attr["auto_pad"] == "VALID":
attr["pads"] = tuple([0 for i in range(ndim - 2)])
elif attr["auto_pad"] == "NOTSET":
pass
else:
msg = (
f'Value {attr["auto_pad"]} in attribute "auto_pad" of operator LpPool '
f"is invalid."
)
raise tvm.error.OpAttributeInvalid(msg)
attr.pop("auto_pad")
if "storage_order" in attr:
attr["layout"] = onnx_storage_order2layout(
attr["storage_order"], dims=(len(input_shape) - 2), op_name="LpPool"
)
else:
attr["layout"] = onnx_default_layout(dims=(len(input_shape) - 2), op_name="LpPool")
p_value = attr.get("p", 2)
p = _expr.const(p_value, dtype)
reci_p = _expr.const(1.0 / p_value, dtype)
data = _op.power(data, p)
out = AttrCvt(
op_name=dimension_picker("avg_pool"),
transforms={"kernel_shape": "pool_size", "pads": ("padding", 0)},
extras={"count_include_pad": True},
ignores=["p"],
custom_check=dimension_constraint(),
)([data], attr, params)
kernels = attr["kernel_shape"]
out = _op.abs(out) * _expr.const(np.prod(kernels).astype(dtype))
return _op.power(out, reci_p)
class GlobalLpPool(OnnxOpConverter):
"""Operator converter for GlobalLpPool."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
# TODO: GlobalLpPool does not yet support dynamic shapes
in_shape = infer_shape(inputs[0])
attr["kernel_shape"] = in_shape[2:]
return LpPool._impl_v1(inputs, attr, params)
class Mul(Elemwise):
"""Operator converter for Multiply."""
name = "multiply"
class Pad(OnnxOpConverter):
"""Operator converter for Pad."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
pad_width = []
pads = attr.pop("paddings")
dims = int(len(pads) / 2)
for i in range(dims):
pad_width.append((pads[i], pads[i + dims]))
attr["pad_width"] = pad_width
pad_mode = attr.get("mode", b"constant").decode("utf-8")
if pad_mode in ["constant", "edge", "reflect"]:
attr["pad_mode"] = pad_mode
attr.pop("mode", None)
else:
raise tvm.error.OpAttributeInvalid(
"Value " + pad_mode + ' in attribute "mode" is invalid for operator Pad.'
)
return AttrCvt(_op.nn.pad, transforms={"value": "pad_value"})(inputs, attr, params)
@classmethod
def _impl_v2(cls, inputs, attr, params):
pad_width = []
pads = attr.pop("pads")
dims = int(len(pads) / 2)
for i in range(dims):
pad_width.append((pads[i], pads[i + dims]))
attr["pad_width"] = pad_width
pad_mode = attr.get("mode", b"constant").decode("utf-8")
if pad_mode in ["constant", "edge", "reflect"]:
attr["pad_mode"] = pad_mode
attr.pop("mode", None)
else:
raise tvm.error.OpAttributeInvalid(
"Value " + pad_mode + ' in attribute "mode" is invalid for operator Pad.'
)
return AttrCvt("pad", transforms={"value": "pad_value"})(inputs, attr, params)
@classmethod
def _impl_v11(cls, inputs, attr, params):
pads = inputs[1]
if len(inputs) == 3 and inputs[2] is not None:
value = fold_constant(_op.take(inputs[2], _op.const(0)))
else:
value = 0.0
pad_width_expr = fold_constant(_op.transpose(_op.reshape(pads, (2, -1))))
pad_mode = attr.get("mode", b"constant").decode("utf-8")
if not pad_mode in ["constant", "edge", "reflect"]:
raise tvm.error.OpAttributeInvalid(
"Value " + pad_mode + ' in attribute "mode" is invalid for operator Pad.'
)
return _op.nn.pad(inputs[0], pad_width_expr, value, pad_mode=pad_mode)
class ParametricSoftPlus(OnnxOpConverter):
"""Operator converter for ParametricSoftPlus."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = _expr.const(float(attr.get("alpha", 1.0)))
beta = _expr.const(float(attr.get("beta", 1.0)))
return _op.log(_op.exp(beta * inputs[0]) + _expr.const(1.0)) * alpha
class Pow(OnnxOpConverter):
"""Operator converter for Pow."""
@classmethod
def _impl_v13(cls, inputs, attr, params):
x = inputs[0]
y = inputs[1]
x_type = infer_type(x).checked_type.dtype
output_type = x_type
y_type = infer_type(y).checked_type.dtype
if not x_type.startswith("float"):
x_type = "float32"
x = _op.cast(x, x_type)
if x_type != y_type:
y = _op.cast(y, x_type)
# TODO: come up with good default integer pow() func for common backends
result = _op.power(x, y)
if x_type != output_type:
return _op.cast(result, output_type)
return result
class Prelu(OnnxOpConverter):
"""Operator converter for Prelu."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
assert len(inputs) == 2, f"Prelu need 2 inputs, {len(inputs)} given"
input_shape = shape_of(inputs[0])
alpha = _op.broadcast_to_like(inputs[1], inputs[0])
alpha = _op.reshape(alpha, [-1])
output = _op.nn.prelu(_op.reshape(inputs[0], [-1]), alpha, axis=0)
return _op.reshape(output, input_shape)
class Reciprocal(OnnxOpConverter):
"""Operator converter for Reciprocal."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
dtype = infer_type(inputs[0]).checked_type.dtype
return _expr.const(1.0, dtype=dtype) / inputs[0]
class Flatten(OnnxOpConverter):
"""Operator converter for Flatten."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
axis = attr.get("axis", 1)
ishape = shape_of(inputs[0])
ndim = infer_shape(ishape)[0]
if axis < 0:
axis = axis + ndim
if axis == 1:
out = _op.nn.batch_flatten(inputs[0])
else:
pre_shape = _op.prod(_op.strided_slice(ishape, [0], [axis], [1]), keepdims=True)
post_shape = _op.prod(_op.strided_slice(ishape, [axis], [ndim], [1]), keepdims=True)
newshape = fold_constant(_op.concatenate([pre_shape, post_shape], axis=0))
out = _op.reshape(inputs[0], newshape)
return out
class Reshape(OnnxOpConverter):
"""Operator converter for Reshape."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return _op.reshape(inputs[0], attr["shape"])
@classmethod
def _impl_v5(cls, inputs, attr, params):
allowzero = attr.get("allowzero", False)
if get_name(inputs[1]) in params:
shape = tuple(params[inputs[1].name_hint].numpy().astype("int32"))
out = _op.reshape(inputs[0], shape, allowzero=allowzero)
else:
out = _op.reshape(*inputs, allowzero=allowzero)
return out
class DepthToSpace(OnnxOpConverter):
"""Operator converter for DepthToSpace."""
@classmethod
def _impl_v11(cls, inputs, attr, params):
block_size = int(attr["blocksize"])
mode = attr.get("mode", b"DCR").decode("utf-8")
return _op.nn.depth_to_space(inputs[0], block_size, mode=mode)
class SpaceToDepth(OnnxOpConverter):
"""Operator converter for SpaceToDepth."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
block_size = int(attr["blocksize"])
return _op.nn.space_to_depth(inputs[0], block_size)
class Concat(OnnxOpConverter):
"""Operator converter for Concat."""
@classmethod
def _impl_v1(cls, inputs, args, params):
return AttrCvt(op_name="concatenate")((inputs,), args)
class Scale(OnnxOpConverter):
"""Operator converter for Scale."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
scale = float(attr.get("scale", 1.0))
return inputs[0] * _expr.const(scale)
class Selu(OnnxOpConverter):
"""Operator converter for Selu."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = float(attr.get("alpha", 1.67326319217681884765625))
gamma = float(attr.get("gamma", 1.05070102214813232421875))
return _expr.const(gamma) * (
_expr.const(-alpha) * _op.nn.relu(_expr.const(1.0) - _op.exp(inputs[0]))
+ _op.nn.relu(inputs[0])
)
class ScaledTanh(OnnxOpConverter):
"""Operator converter for ScaledTanh."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = float(attr.get("alpha", 1.0))
beta = float(attr.get("beta", 1.0))
return _op.tanh(_expr.const(beta) * inputs[0]) * _expr.const(alpha)
class Shrink(OnnxOpConverter):
"""Operator converter for Shrink."""
@classmethod
def _impl_v9(cls, inputs, attr, params):
x = inputs[0]
dtype = infer_type(x).checked_type.dtype
lambd = _op.const(attr.get("lambd", 0.5), dtype=dtype)
bias = _op.const(attr.get("bias", 0.0), dtype=dtype)
zeros = _op.zeros_like(x)
return _op.where(x < -lambd, x + bias, zeros) + _op.where(x > lambd, x - bias, zeros)
class Softsign(OnnxOpConverter):
"""Operator converter for Softsign."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return inputs[0] / (_expr.const(1.0) + Absolute.get_converter(1)(inputs, attr, params))
class Sub(Elemwise):
"""Operator converter for Subtract."""
name = "subtract"
class Sum(OnnxOpConverter):
"""Operator converter for Sum."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
# Onnx Sum Operator
for in_index in range(len(inputs) - 1):
inputs[in_index + 1] = _op.add(inputs[in_index], inputs[in_index + 1])
return inputs[len(inputs) - 1]
class Optional_(OnnxOpConverter):
"""Operator converter for Optional based on sequence construction op."""
@classmethod
def _impl_v15(cls, inputs, attr, params):
return SequenceConstruct._impl_v11(inputs, attr, params)
class OptionalHasElement(OnnxOpConverter):
"""Operator converter for OptionalHasElement."""
@classmethod
def _impl_v15(cls, inputs, attr, params):
shape = infer_shape(inputs[0])
return _op.const(True) if shape else _op.const(False)
class OptionalGetElement(OnnxOpConverter):
"""Operator converter for OptionalGetElement based on sequence construction op."""
@classmethod
def _impl_v15(cls, inputs, attr, params):
opt_as_seq = Optional_._impl_v15(inputs, attr, params)
return _expr.TupleGetItem(opt_as_seq, 0)
class Affine(OnnxOpConverter):
"""Operator converter for Affine transformation."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = _expr.const(attr.get("alpha", 1.0))
beta = _expr.const(attr.get("beta", 0.0))
return (alpha * inputs[0]) + beta
class ThresholdedRelu(OnnxOpConverter):
"""Operator converter for ThresholdedRelu."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = float(attr.get("alpha", 1.0))
alpha_tensor = _op.full_like(inputs[0], fill_value=_expr.const(alpha))
mask = _op.greater(inputs[0], alpha_tensor).astype("float32")
return inputs[0] * mask
def _broadcast_constraint():
def _broadcast_check(attrs):
if attrs.get("axis", None):
return False
return True
return _broadcast_check, "Specifying broadcast axis not allowed."
def _fully_connected(opset):
def _impl(inputs, attr, params):
# get number of channels
channels = infer_channels(inputs[1], params)
attr["units"] = channels
return AttrCvt("dense", ignores=["axis", "axis_w"])(inputs, attr)
return _impl
class Upsample(OnnxOpConverter):
"""Operator converter for Upsample (nearest mode)."""
@classmethod
def _impl_v9(cls, inputs, attr, params):
scales = attr.get("scales")
input_shape = infer_shape(inputs[0])
dims = len(input_shape)
if not scales:
# Here we are going to higher OPSET version.
assert len(inputs) == 2, f"Upsample op takes 2 inputs, {len(inputs)} given"
if get_name(inputs[1]) in params:
scales = params[inputs[1].name_hint].numpy()
else:
scales = inputs[1]
if isinstance(scales, _expr.Constant):
scales = list(scales.data.numpy())
if not isinstance(scales, _expr.Expr):
assert scales[0] == 1.0 and scales[1] == 1.0
mode = attr.get("mode")
if mode == b"nearest":
method = "nearest_neighbor"
elif mode == b"linear":
method = "trilinear" if dims == 5 else "bilinear"
else:
raise tvm.error.OpAttributeInvalid(
f'Value {mode} in attribute "mode" of operator Upsample is not valid.'
)
# in 3d case, we use the purely static op
if dims == 5:
if isinstance(scales, _expr.Expr):
scale_h = _op.take(scales, _op.const(3))
scale_w = _op.take(scales, _op.const(4))
scale_d = _op.take(scales, _op.const(1))
else:
assert len(scales) == 5
scale_h = scales[-2]
scale_w = scales[-1]
scale_d = scales[-3]
layout = "NCDHW"
out = _op.nn.upsampling3d(
inputs[0],
scale_d,
scale_h,
scale_w,
layout=layout,
method=method,
coordinate_transformation_mode="asymmetric",
)
# in 2d case, use dynamic op
else:
if isinstance(scales, _expr.Expr):
scale_h = _op.take(scales, _op.const(3))
scale_w = _op.take(scales, _op.const(4))
else:
assert len(scales) == 4
scale_h = scales[-2]
scale_w = scales[-1]
layout = "NCHW"
out = _op.nn.upsampling(
inputs[0], scale_h, scale_w, layout=layout, method=method, align_corners=False
)
return out
class Shape(OnnxOpConverter):
"""Operator converter for Shape."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return shape_of(inputs[0], "int64")
@classmethod
def _impl_v15(cls, inputs, attr, params):
start = attr.get("start")
end = attr.get("end")
return shape_of(inputs[0], dtype="int64", start=start, end=end)
class CumSum(OnnxOpConverter):
"""Operator converter for CumSum."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
data = inputs[0]
dim = inputs[1]
if dim is not None:
dim = int(infer_value(dim, params).numpy())
exclusive = attr.get("exclusive", 0)
reverse = attr.get("reverse", 0)
if reverse != 0:
out = _op.reverse(data, axis=dim)
out = _op.cumsum(out, axis=dim, exclusive=exclusive)
return _op.reverse(out, axis=dim)
return _op.cumsum(data, axis=dim, exclusive=exclusive)
class Cast(OnnxOpConverter):
"""Operator converter for Cast."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return AttrCvt(op_name="cast", transforms={"to": "dtype"})(inputs, attr)
@classmethod
def _impl_v6(cls, inputs, attr, params):
try:
from onnx import TensorProto
except ImportError as e:
raise ImportError(f"Unable to import TensorProto from onnx {e}")
# If onnx mapping is used, bfloat16 gets converted to float16
# which is not the desired behavior
if attr["to"] == int(TensorProto.BFLOAT16):
attr["to"] = "bfloat16"
else:
try:
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
attr["to"] = str(TENSOR_TYPE_TO_NP_TYPE[attr["to"]])
except ImportError as e:
raise ImportError(f"Unable to import onnx.mapping which is required {e}")
return AttrCvt(op_name="cast", transforms={"to": "dtype"})(inputs, attr)
class CastLike(OnnxOpConverter):
"""Operator converter for CastLike."""
@classmethod
def _impl_v15(cls, inputs, attr, params):
return AttrCvt(op_name="cast_like")(inputs, attr)
class Unsqueeze(OnnxOpConverter):
"""Operator converter for Unsqueeze."""
@classmethod
def run_calculation(cls, tensor, axes):
axes = sorted(axes)
for axis in axes:
if axis < 0 and isinstance(tensor, _expr.Var):
axis = len(tensor.type_annotation.concrete_shape) + len(axes) + axis
tensor = _op.expand_dims(tensor, axis=axis, num_newaxis=1)
return tensor
@classmethod
def _impl_v1(cls, inputs, attr, params):
return cls.run_calculation(inputs[0], attr["axes"])
@classmethod
def _impl_v13(cls, inputs, attr, params):
if isinstance(inputs[1], _expr.Constant):
constant_axes = list(inputs[1].data.numpy())
constant_axes = list(map(int, constant_axes))
return cls.run_calculation(inputs[0], constant_axes)
rank_input = len(infer_type(inputs[0]).checked_type.shape)
num_new_axis = int(infer_type(inputs[1]).checked_type.shape[0])
axes = relay.sort(inputs[1])
axes = relay.split(axes, num_new_axis).astuple()
rank_output = rank_input + num_new_axis
result = inputs[0]
# TODO (AndrewZhaoLuo): investigate performance issues with consecutive
# dynamic expand_dims on non-llvm targets.
for i in range(num_new_axis):
axis = relay.TupleGetItem(axes, i)
# Unpack scalar
axis = relay.reshape(axis, [])
axis = relay.where(
axis >= relay.const(0, "int64"), axis, axis + relay.const(rank_output, "int64")
)
result = _op.expand_dims(result, axis)
return result
class Squeeze(OnnxOpConverter):
"""Operator converter for Squeeze."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
axis = attr.get("axes", None)
return _op.squeeze(inputs[0], axis)
@classmethod
def _impl_v13(cls, inputs, attr, params):
ishape = infer_shape(inputs[0])
axis = inputs[1]
if axis is None:
# If axes is not provided, all the single dimensions will be removed from the shape.
if not ishape: # scalar
return inputs[0]
axis = [i for i in range(len(ishape)) if ishape[i] == 1]
axis = _op.const(axis)
dtype = infer_type(axis).checked_type.dtype
if isinstance(axis, _expr.Constant):
constant_axes = list(axis.data.numpy())
constant_axes = list(map(int, constant_axes))
return _op.squeeze(inputs[0], constant_axes)
rank = _op.shape_of(_op.shape_of(inputs[0], dtype), dtype)
axis = _op.where(axis < _op.const(0, dtype), axis + rank, axis)
return _op.squeeze(inputs[0], fold_constant(axis))
class Split(OnnxOpConverter):
"""Operator converter for Split."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
splits = attr.get("split", None)
if splits is not None and len(splits) > 1:
indices = []
index = 0
for i in splits[:-1]:
index += i
indices.append(index)
# When splits isnt specified divide evenly over axis.
else:
indices = attr["tvm_custom"]["num_outputs"]
output = _op.split(inputs[0], indices, attr.get("axis", 0))
# If the output of split is a single value, unpack if from the TupleWrapper
if len(output) == 1:
output = output[0]
return output
@classmethod
def _impl_v13(cls, inputs, attr, params):
splits = inputs[1]
splits_rank = None
if splits is not None:
splits_rank = len(infer_shape(splits))
if splits is not None and splits_rank > 0:
if isinstance(splits, _expr.Constant):
splits = splits.data.asnumpy()
indices = []
index = 0
for i in splits[:-1]:
index += i
indices.append(index)
else:
raise ValueError("Dynamic Split not yet supported")
# When splits isnt specified divide evenly over axis.
else:
indices = attr["tvm_custom"]["num_outputs"]
output = _op.split(inputs[0], indices, attr.get("axis", 0))
# If the output of split is a single value, unpack if from the TupleWrapper
if len(output) == 1:
output = output[0]
return output
class Slice(OnnxOpConverter):
"""Operator converter for Slice."""
@classmethod
def _common(cls, starts, ends, axes):
N = max(axes) + 1
new_axes = list(range(N))
new_starts = [0] * N
new_ends = [np.iinfo(np.int32).max] * N
for i, axis in enumerate(axes):
new_starts[axis] = starts[i]
new_ends[axis] = ends[i]
return new_starts, new_ends, new_axes
@classmethod
def _impl_v1(cls, inputs, attr, params):
if isinstance(attr["starts"], int):
attr["starts"] = (attr["starts"],)
attr["ends"] = (attr["ends"],)
try:
# Update the starts and ends according to axes if required.
if isinstance(attr["axes"], int):
attr["axes"] = (attr["axes"],)
new_starts, new_ends, new_axes = cls._common(attr["starts"], attr["ends"], attr["axes"])
attr["axes"] = new_axes
attr["starts"] = new_starts
attr["ends"] = new_ends
except KeyError:
pass
begin = list(attr["starts"])
end = list(attr["ends"])
return _op.strided_slice(inputs[0], begin=begin, end=end)
@classmethod
def _impl_v10(cls, inputs, attr, params):
starts = inputs[1]
ends = inputs[2]
axes = inputs[3]
steps = inputs[4]
ishape = infer_shape(inputs[0])
data_rank = len(ishape)
if axes is not None:
# Normalize for negative axes
axes_dtype = infer_type(axes).checked_type.dtype
axes = fold_constant(
_op.where(
axes < _op.const(0, axes_dtype), axes + _op.const(data_rank, axes_dtype), axes
)
)
def has_static_axes():
return (
isinstance(axes, _expr.Constant)
and isinstance(starts, _expr.Constant)
and isinstance(ends, _expr.Constant)
and (steps is None or isinstance(steps, _expr.Constant))
)
if axes is not None and has_static_axes():
axes_np = axes.data.numpy().astype("int64")
begin_np = starts.data.numpy().astype("int64")
end_np = ends.data.numpy().astype("int64")
if steps is None:
strides_np = np.ones_like(begin_np).astype("int64")
else:
strides_np = steps.data.numpy().astype("int64")
if all([isinstance(ishape[i], int) for i in axes_np]):
return _op.strided_slice(
inputs[0], list(begin_np), list(end_np), list(strides_np), axes=list(axes_np)
)
# Update the starts and ends according to axes if required.
if axes is not None:
data_shape = shape_of(inputs[0], dtype=infer_type(ends).checked_type.dtype)
starts = _op.scatter_elements(
_op.const([0] * data_rank, dtype=infer_type(starts).checked_type.dtype),
axes,
starts,
axis=0,
)
ends = _op.scatter_elements(data_shape, axes, ends, axis=0)
if steps is not None:
steps = _op.scatter_elements(
_op.const([1] * data_rank, dtype=infer_type(steps).checked_type.dtype),
axes,
steps,
axis=0,
)
if steps is None:
steps = _op.const([1] * data_rank, dtype=infer_type(starts).checked_type.dtype)
return _op.strided_slice(
inputs[0], fold_constant(starts), fold_constant(ends), fold_constant(steps)
)
def normalize_gather_indices(data, indices, axis):
"""Make sure gather indices aren't negative"""
ind_dtype = infer_type(indices).checked_type.dtype
# Normalize the indices to a positive range
s = _op.take(_op.shape_of(data, dtype=ind_dtype), _op.const(axis, dtype="int64"))
cond = fold_constant(indices < _op.const(0, ind_dtype))
if isinstance(cond, _expr.Constant):
val = cond.data.numpy()
if val.size == 1:
cond = val.item()
if cond:
indices = indices + s
return indices
indices = _op.where(cond, indices + s, indices)
return indices
class Gather(OnnxOpConverter):
"""Operator converter for Gather."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
axis = attr.get("axis", 0)
data = inputs[0]
indices = inputs[1]
indices = normalize_gather_indices(data, indices, axis)
return _op.take(data, indices, axis)
class GatherElements(OnnxOpConverter):
"""Operator converter for GatherElements."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
data = inputs[0]
indices = inputs[1]
axis = attr.get("axis", 0)
indices = normalize_gather_indices(data, indices, axis)
return _op.gather(data, axis, indices)
class GatherND(OnnxOpConverter):
"""Operator converter for GatherND."""
@classmethod
def _impl_common(cls, data, indices, batch_dims=0):
indices_dims = len(infer_shape(indices))
indices_shape = infer_shape(indices)
indices = _op.transpose(indices, axes=[-1] + list(range(indices_dims - 1)))
index_rank = indices_shape[-1]
return _op.gather_nd(data, indices, batch_dims=batch_dims, index_rank=index_rank)
@classmethod
def _impl_v1(cls, inputs, attr, params):
return cls._impl_common(inputs[0], inputs[1])
@classmethod
def _impl_v12(cls, inputs, attr, params):
batch_dims = attr.get("batch_dims", 0)
return cls._impl_common(inputs[0], inputs[1], batch_dims)
class Compress(OnnxOpConverter):
"""Operator converter for compress"""
@classmethod
def _impl_v11(cls, inputs, attr, params):
input_tensor, condition_tensor = inputs
axis = attr.get("axis", None)
# Change one hot tensor to indices e.g. [0, 1, 1, 0, 1] -> [1, 2, 4]
condition_tensor = _op.reshape(_op.argwhere(condition_tensor), (-1,))
if axis is not None:
return _op.take(input_tensor, condition_tensor, axis=axis)
# if axis is None, flatten input tensor before selection
input_tensor = _op.reshape(input_tensor, (-1,))
return _op.take(input_tensor, condition_tensor, axis=0)
class Scatter(OnnxOpConverter):
"""Operator converter for Scatter."""
@classmethod
def _args_check(cls, inputs, attr):
assert (
len(inputs) == 3
), f"Scatter takes 3 inputs (data, indices, updates), {len(inputs)} given"
assert infer_type(inputs[1]).checked_type.dtype in ["int32", "int64"]
data_rank = len(infer_shape(inputs[0]))
assert data_rank > 0, "Data rank higher than 0 is expected"
indices_shape = infer_shape(inputs[1])
indices_rank = len(indices_shape)
assert indices_rank == data_rank, "Indices rank is not the same as data one"
updates_shape = infer_shape(inputs[2])
updates_rank = len(updates_shape)
assert updates_rank == data_rank, "Updates rank is not the same as data one"
for i in range(data_rank):
assert (
indices_shape[i] == updates_shape[i]
), "Indices dimension size should be the same as updates one"
axis = attr.get("axis", 0)
assert -data_rank <= axis < data_rank, "Axis is out of bounds"
return axis
@classmethod
def _impl_v9(cls, inputs, attr, params):
axis = cls._args_check(inputs, attr)
return _op.scatter_elements(inputs[0], inputs[1], inputs[2], axis)
class ScatterElements(OnnxOpConverter):
"""Operator converter for ScatterElements."""
@classmethod
def _args_check(cls, inputs, attr, red_valids=None):
ret = []
assert (
len(inputs) == 3
), f"ScatterElements takes 3 inputs (data, indices, updates), {len(inputs)} given"
assert infer_type(inputs[1]).checked_type.dtype in ["int32", "int64"]
axis = attr.get("axis", 0)
rank = len(infer_shape(inputs[0]))
assert rank > 0, "Data rank higher than 0 is expected"
assert -rank <= axis < rank, "Axis is out of bounds"
ret.append(axis)
if red_valids:
reduction = attr.get("reduction", None)
if reduction is None:
reduction = b"update"
reduction = reduction.decode("utf-8")
assert (
reduction in red_valids
), f"Only {red_valids} modes are supported, but {reduction} is gotten"
ret.append(reduction)
return ret
@classmethod
def _impl_v11(cls, inputs, attr, params):
axis = cls._args_check(inputs, attr)[0]
return _op.scatter_elements(inputs[0], inputs[1], inputs[2], axis, "update")
@classmethod
def _impl_v16(cls, inputs, attr, params):
axis, reduction = cls._args_check(inputs, attr, ["update", "add", "mul"])
return _op.scatter_elements(inputs[0], inputs[1], inputs[2], axis, reduction)
@classmethod
def _impl_v18(cls, inputs, attr, params):
axis, reduction = cls._args_check(inputs, attr, ["update", "add", "mul", "min", "max"])
return _op.scatter_elements(inputs[0], inputs[1], inputs[2], axis, reduction)
class ScatterND(OnnxOpConverter):
"""Operator converter for ScatterND."""
@classmethod
def _inputs_check(cls, inputs):
assert (
len(inputs) == 3
), f"ScatterND takes 3 inputs (data, indices, updates), {len(inputs)} given"
assert infer_type(inputs[1]).checked_type.dtype == "int64"
data_rank = len(infer_shape(inputs[0]))
assert data_rank > 0, "Data rank higher than 0 is expected"
indices_rank = len(infer_shape(inputs[1]))
assert indices_rank > 0, "Indices rank higher than 0 is expected"
updates_rank = len(infer_shape(inputs[2]))
assert (
updates_rank == data_rank + indices_rank - infer_shape(inputs[1])[-1] - 1
), "Updates rank should be equal to data_rank + indices_rank - indices_shape[-1] - 1"
@classmethod
def _reduction_check(cls, attr, red_valids=None):
reduction = attr.get("reduction", None)
if reduction is None:
reduction = b"update"
reduction = reduction.decode("utf-8")
if red_valids is None:
red_valids = ["update"]
assert (
reduction in red_valids
), f"Only {red_valids} reductions are supported, but {reduction} is gotten"
return reduction
@classmethod
def _impl_v11(cls, inputs, attr, params):
cls._inputs_check(inputs)
indices_dim = len(infer_shape(inputs[1]))
axes = list(range(indices_dim))
return _op.scatter_nd(inputs[0], _op.transpose(inputs[1], axes[-1:] + axes[:-1]), inputs[2])
@classmethod
def _impl_v16(cls, inputs, attr, params):
cls._inputs_check(inputs)
reduction = cls._reduction_check(attr, ["update", "add", "mul"])
indices_dim = len(infer_shape(inputs[1]))
axes = list(range(indices_dim))
return _op.scatter_nd(
inputs[0], _op.transpose(inputs[1], axes[-1:] + axes[:-1]), inputs[2], reduction
)
@classmethod
def _impl_v18(cls, inputs, attr, params):
cls._inputs_check(inputs)
reduction = cls._reduction_check(attr, ["update", "add", "mul", "min", "max"])
indices_dim = len(infer_shape(inputs[1]))
axes = list(range(indices_dim))
return _op.scatter_nd(
inputs[0], _op.transpose(inputs[1], axes[-1:] + axes[:-1]), inputs[2], reduction
)
class EyeLike(OnnxOpConverter):
"""Operator converter for EyeLike."""
@classmethod
def _impl_v9(cls, inputs, attr, params):
dtype = attr.get("dtype", None)
if dtype is None:
in_checked_type = infer_type(inputs[0]).checked_type
in_dtype = in_checked_type.dtype
dtype = in_dtype
else:
dtype = get_type(dtype)
node_source_name = get_source_name_from_parameter(inputs[0])
# since there exists multi-comsumer for the same expression
# invoke set_span here to prevent expr-rewritten in span-filling stage
in_shape = set_span(_op.shape_of(inputs[0]), node_source_name)
zeros = _op.zeros(in_shape, dtype)
dim = set_span(_op.take(in_shape, _op.const(0)), node_source_name)
indices = _op.arange(_op.const(0), dim, dtype="int32")
ones = _op.full(_op.const(1), _op.reshape(dim, (1,)), dtype=dtype)
k = _op.const(attr.get("k", 0), dtype="int32")
return _op.scatter_nd(zeros, _op.stack([indices, indices + k], axis=0), ones, "update")
class LRN(OnnxOpConverter):
"""Operator converter for Local Response Normalization."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
"""LRN support only NCHW format
https://github.com/onnx/onnx/blob/main/docs/Operators.md#LRN
"""
axis = 1
alpha = attr.get("alpha", 0.0001)
beta = attr.get("beta", 0.75)
bias = attr.get("bias", 1.0)
nsize = attr.get("size")
attr = {"size": nsize, "axis": axis, "alpha": alpha, "beta": beta, "bias": bias}
return AttrCvt("lrn")(inputs, attr)
class Maximum(OnnxOpConverter):
"""Operator converter for Maximum."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if len(inputs) == 1:
return inputs[0]
_max = inputs[0]
for i in range(1, len(inputs)):
_max = AttrCvt("maximum")([_max, inputs[i]], {})
return _max
class Minimum(OnnxOpConverter):
"""Operator converter for Minimum."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if len(inputs) == 1:
return inputs[0]
_min = inputs[0]
for i in range(1, len(inputs)):
_min = AttrCvt("minimum")([_min, inputs[i]], {})
return _min
class Mean(OnnxOpConverter):
"""Operator converter for Mean."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if len(inputs) == 1:
return inputs[0]
# avoid overflow
concat = _op.concatenate([_op.expand_dims(x, axis=0) for x in inputs], axis=0)
return _op.mean(concat, axis=0, keepdims=False)
class MeanVarianceNormalization(OnnxOpConverter):
"""Operator converter for MeanVarianceNormalization."""
@classmethod
def _impl_v13(cls, inputs, attr, params):
axis = attr.get("axes", (0, 2, 3))
data_mean = _op.mean(inputs[0], axis=axis, keepdims=True)
data_mean_squared = _op.power(data_mean, _expr.const(2, "float32"))
data_squared = _op.power(inputs[0], _expr.const(2, "float32"))
data_squared_mean = _op.mean(data_squared, axis=axis, keepdims=True)
return (inputs[0] - data_mean) / _op.sqrt(data_squared_mean - data_mean_squared)
class HardSigmoid(OnnxOpConverter):
"""Operator converter for HardSigmoid."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = attr.get("alpha", 0.2)
beta = attr.get("beta", 0.5)
transformX = (inputs[0] * _expr.const(alpha)) + _expr.const(beta)
attr = {"a_min": 0, "a_max": 1}
return AttrCvt("clip")([transformX], attr)
class HardSwish(OnnxOpConverter):
"""Operator converter for HardSwish."""
@classmethod
def _impl_v14(cls, inputs, attr, params):
alpha = attr.get("alpha", 1 / 6)
beta = attr.get("beta", 0.5)
transformX = inputs[0] * _expr.const(alpha) + _expr.const(beta)
attr = {"a_min": 0, "a_max": 1}
return inputs[0] * AttrCvt("clip")([transformX], attr)
class Reduce(OnnxOpConverter):
"""Operator converter for reduce ops."""
name = ""
@classmethod
def run_calculation(cls, inputs, axis, keepdims):
attr = {"axis": axis, "keepdims": keepdims}
return AttrCvt(cls.name)(inputs, attr)
@classmethod
def _impl_v1(cls, inputs, attr, params):
if not infer_shape(inputs[0]): # promote scalar to 1-D tensor
inputs[0] = _op.expand_dims(inputs[0], axis=0)
if "axes" in attr:
axis = attr.get("axes", 0)
else:
axis_len = len(infer_shape(inputs[0]))
axis = list(range(axis_len))
return cls.run_calculation(inputs, axis, attr.get("keepdims", True))
@classmethod
def _impl_v12(cls, inputs, attr, params):
if not infer_shape(inputs[0]): # promote scalar to 1-D tensor
inputs[0] = _op.expand_dims(inputs[0], axis=0)
if len(inputs) == 2:
if isinstance(inputs[1], _expr.Constant):
# Get axis and unpack scalar
constant_axis = int(inputs[1].data.numpy()[0])
return cls.run_calculation([inputs[0]], constant_axis, attr.get("keepdims", True))
raise ValueError("Dynamic Reduce is not supported yet!")
return cls._impl_v1(inputs, attr, params)
@classmethod
def _impl_v13(cls, inputs, attr, params):
if not infer_shape(inputs[0]): # promote scalar to 1-D tensor
inputs[0] = _op.expand_dims(inputs[0], axis=0)
noop_with_empty_axes = attr.get("noop_with_empty_axes", 0)
num_axis = int(infer_type(inputs[1]).checked_type.shape[0]) if inputs[1] is not None else 0
if noop_with_empty_axes and num_axis == 0:
return inputs[0]
if len(inputs) == 2:
if isinstance(inputs[1], _expr.Constant):
# Get axis and unpack scalar
constant_axis = int(inputs[1].data.numpy()[0])
return cls.run_calculation([inputs[0]], constant_axis, attr.get("keepdims", True))
if num_axis > 0:
raise ValueError("Dynamic Reduce is not supported yet!")
axis_len = len(infer_shape(inputs[0]))
axis = list(range(axis_len))
return cls.run_calculation([inputs[0]], axis, attr.get("keepdims", True))
return cls._impl_v1(inputs, attr, params)
class ReduceMax(Reduce):
"""Operator converter for ReduceMax."""
name = "max"
class ReduceMin(Reduce):
"""Operator converter for ReduceMin."""
name = "min"
class ReduceSum(Reduce):
"""Operator converter for ReduceSum."""
name = "sum"
class ReduceMean(Reduce):
"""Operator converter for ReduceMean."""
name = "mean"
class ReduceProd(Reduce):
"""Operator converter for ReduceProd."""
name = "prod"
class ReduceLogSumExp(Reduce):
"""Operator converter for ReduceLogSumExp."""
name = "logsumexp"
class ReduceSumSquare(OnnxOpConverter):
"""Operator converter for ReduceSumSquare."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if not infer_shape(inputs[0]): # promote scalar to 1-D tensor
inputs[0] = _op.expand_dims(inputs[0], axis=0)
if "axes" in attr:
axis = attr.get("axes", 0)
else:
axis_len = len(infer_shape(inputs[0]))
axis = list(range(axis_len))
attr = {"axis": axis, "keepdims": attr.get("keepdims", True)}
inputs[0] = inputs[0] * inputs[0]
return AttrCvt("sum")(inputs, attr)
class ReduceL1(OnnxOpConverter):
"""Operator converter for ReduceL1."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if not infer_shape(inputs[0]): # promote scalar to 1-D tensor
inputs[0] = _op.expand_dims(inputs[0], axis=0)
if "axes" in attr:
axis = attr.get("axes", 0)
else:
axis_len = len(infer_shape(inputs[0]))
axis = list(range(axis_len))
attr = {"axis": axis, "keepdims": attr.get("keepdims", True)}
inputs[0] = _op.abs(inputs[0])
return AttrCvt("sum")(inputs, attr)
class ReduceL2(OnnxOpConverter):
"""Operator converter for ReduceL2."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if not infer_shape(inputs[0]): # promote scalar to 1-D tensor
inputs[0] = _op.expand_dims(inputs[0], axis=0)
if "axes" in attr:
axis = attr.get("axes", 0)
else:
axis_len = len(infer_shape(inputs[0]))
axis = list(range(axis_len))
attr = {"axis": axis, "keepdims": attr.get("keepdims", True)}
inputs[0] = inputs[0] * inputs[0]
out = AttrCvt("sum")(inputs, attr)
return _op.sqrt(out)
class ReduceLogSum(OnnxOpConverter):
"""Operator converter for ReduceLogSum."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if not infer_shape(inputs[0]): # promote scalar to 1-D tensor
inputs[0] = _op.expand_dims(inputs[0], axis=0)
if "axes" in attr:
axis = attr.get("axes", 0)
else:
axis_len = len(infer_shape(inputs[0]))
axis = list(range(axis_len))
attr = {"axis": axis, "keepdims": attr.get("keepdims", True)}
out = AttrCvt("sum")(inputs, attr)
return _op.log(out)
class ArgMax(OnnxOpConverter):
"""Operator converter for ArgMax."""
@classmethod
def _impl_v13(cls, inputs, attr, params):
axis = attr.get("axis", 0)
keepdims = attr.get("keepdims", True)
select_last_index = attr.get("select_last_index", False)
attr = {"axis": axis, "keepdims": keepdims, "select_last_index": select_last_index}
return _op.cast(AttrCvt("argmax")(inputs, attr), "int64")
class ArgMin(OnnxOpConverter):
"""Operator converter for ArgMin."""
@classmethod
def _impl_v13(cls, inputs, attr, params):
axis = attr.get("axis", 0)
keepdims = attr.get("keepdims", True)
select_last_index = attr.get("select_last_index", False)
attr = {"axis": axis, "keepdims": keepdims, "select_last_index": select_last_index}
return _op.cast(AttrCvt("argmin")(inputs, attr), "int64")
class Softmax(OnnxOpConverter):
"""Operator converter for Softmax."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
axis = attr.get("axis", 1)
in_shape = infer_shape(inputs[0])
ndim = len(in_shape)
if axis < 0:
axis += ndim
if axis == 0:
reshape_shape = [-1]
elif axis == ndim - 1:
return _op.nn.softmax(inputs[0], axis=axis)
else:
axis_val = [in_shape[i] for i in range(axis)]
reshape_shape = [np.prod(axis_val)] + [-1]
data_reshape = _op.reshape(inputs[0], newshape=reshape_shape)
out = _op.nn.softmax(data_reshape, axis=-1)
out = _op.reshape(out, newshape=in_shape)
return out
@classmethod
def _impl_v13(cls, inputs, attr, _):
axis = attr.get("axis", -1)
ndim = len(infer_shape(inputs[0]))
if axis < 0:
axis += ndim
return _op.nn.softmax(inputs[0], axis=axis)
class LogSoftmax(OnnxOpConverter):
"""Operator converter for Softmax."""
@classmethod
def run_calculation(cls, inputs, attr, params, opset):
"""Run the calculation for Log Softmax calculation."""
res = Softmax.get_converter(opset)(inputs, attr, params)
return _op.log(res)
@classmethod
def _impl_v1(cls, inputs, attr, params):
return cls.run_calculation(inputs, attr, params, opset=1)
@classmethod
def _impl_v13(cls, inputs, attr, params):
return cls.run_calculation(inputs, attr, params, opset=13)
class Hardmax(OnnxOpConverter):
"""Operator converter for Hardmax."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
axis = attr.get("axis", 1)
ndim = len(infer_shape(inputs[0]))
if axis < 0:
axis += ndim
dtype = infer_type(inputs[0]).checked_type.dtype
if axis == 0:
pre = _op.const([1], "int64")
else:
pre = _op.prod(
_op.strided_slice(shape_of(inputs[0]), [0], [axis], [1]), axis=0, keepdims=True
)
post = _op.prod(
_op.strided_slice(shape_of(inputs[0]), [axis], [2147483647], [1]), axis=0, keepdims=True
)
newshape = _op.concatenate([pre, post], axis=0)
x = _op.reshape(inputs[0], fold_constant(newshape))
argmax = _op.argmax(x, axis=1)
onehot = _op.one_hot(
argmax,
_op.const(1.0, dtype),
_op.const(0.0, dtype),
fold_constant(_op.take(shape_of(x), _op.const([1], "int64"))),
1,
dtype,
)
return _op.reshape(onehot, shape_of(inputs[0]))
@classmethod
def _impl_v13(cls, inputs, attr, params) -> relay.Expr:
inferred_type = infer_type(inputs[0])
dtype = inferred_type.checked_type.dtype
ndim = len(inferred_type.checked_type.shape)
axis = attr.get("axis", -1) % ndim
argmax = _op.argmax(inputs[0], axis=axis)
return _op.one_hot(
argmax,
_op.const(1.0, dtype),
_op.const(0.0, dtype),
fold_constant(_op.take(shape_of(inputs[0]), _op.const([axis], "int64"))),
axis,
dtype,
)
class OneHot(OnnxOpConverter):
"""Operator converter for OneHot."""
@classmethod
def _impl_v9(cls, inputs, attr, params):
# Extract relay one_hot inputs.
indices, depth, values = inputs
ndim = len(infer_shape(indices))
# Split onnx on off values into two separate expressions.
off_value, on_value = _op.take(values, _op.const(0)), _op.take(values, _op.const(1))
# Extract the datatype of the output from on_value.
dtype = infer_type(on_value).checked_type.dtype
ind_dtype = infer_type(indices).checked_type.dtype
# Normalize the indices to a positive range
indices = _op.where(
indices < _op.const(0, ind_dtype), indices + _op.cast(depth, ind_dtype), indices
)
# set default value when axis is not set in the model
if "axis" not in attr:
attr["axis"] = -1
axis = attr["axis"]
if axis < 0:
axis += ndim + 1
return _op.one_hot(indices, on_value, off_value, depth, axis, dtype=dtype)
class ConstantOfShape(OnnxOpConverter):
"""Operator converter for ConstantOfShape."""
@classmethod
def _impl_v9(cls, inputs, attr, params):
if "value" in attr:
np_value = get_numpy(attr.pop("value"))[0]
value = _expr.const(np_value)
dtype = np_value.dtype.name
else:
value = _expr.const(0)
dtype = "float32"
output = _op.full(value, inputs[0], dtype=dtype)
return output
class Constant(OnnxOpConverter):
"""Operator converter for ConstantOfShape."""
@classmethod
def _impl_v9(cls, inputs, attr, params):
if "value" not in attr:
raise tvm.error.OpAttributeRequired("no value in Constant")
value = attr.pop("value")
# Constants may rarely have string types. These are likely exported
# from other frameworks and not actually used in TVM. We'll just use
# a zero valued constant for compatibility.
if isinstance(value, bytes):
np_value = np.asarray([0]).astype("int64")
else:
np_value = get_numpy(value)
dtype = np_value.dtype.name
value = _expr.const(np_value, dtype)
return value
class Sign(OnnxOpConverter):
"""Operator converter for Sign."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return _op.sign(inputs[0])
class Equal(Elemwise):
"""Operator converter for Equal."""
name = "equal"
class Not(Elemwise):
"""Operator converter for Not."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return _op.logical_not(inputs[0])
class And(Elemwise):
"""Operator converter for And."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return _op.logical_and(inputs[0], inputs[1])
class Tile(Elemwise):
"""Operator converter for Tile"""
@classmethod
def _impl_v6(cls, inputs, attr, params):
return _op.tile(inputs[0], inputs[1])
class Erf(OnnxOpConverter):
"""Operator converter for Erf"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return _op.erf(inputs[0])
class Where(OnnxOpConverter):
"""Operator converter for Where"""
@classmethod
def _impl_v9(cls, inputs, attr, params):
return _op.where(*inputs)
class Or(Elemwise):
"""Operator converter for Or."""
@classmethod
def _impl_v7(cls, inputs, attr, params):
return _op.logical_or(inputs[0], inputs[1])
class Expand(OnnxOpConverter):
"""Operator converter for Expand."""
@classmethod
def _impl_v8(cls, inputs, attr, params):
dtype = infer_type(inputs[1]).checked_type.dtype
in_shape = shape_of(inputs[0], dtype=dtype)
shape = inputs[1]
# Currently 'op.broadcast_to' expect the rank of the given 'shape'
# (the 2nd input) is always higher than that of the given 'input' (the 1st input)
# However, ONNX Expand supports multi-directional broadcasting, which allows
# above pattern and also some extent of 'shape' can be smaller than the corresponding
# extent of 'input'. In this case, the extent of 'shape' must be 1.
# https://github.com/onnx/onnx/blob/main/docs/Broadcasting.md
# In above cases, we cannot directorly apply 'op.broadcast_to' instead of 'expand'
# so, here we solved this problem by expanding the given 'shape' itself.
def expand_shape(in_shape, shape):
"""A function expands the shape when the rank is lower than that of the given
intput. Also it replaces the extent of the shape with the corresponding extent
of the intput when it is 1.
"""
in_dims = infer_shape(in_shape)[0]
new_dims = infer_shape(shape)[0]
if in_dims < new_dims:
in_shape = _op.concatenate(
[_expr.const([1] * (new_dims - in_dims), dtype=dtype), in_shape], axis=0
)
elif new_dims < in_dims:
shape = _op.concatenate(
[_expr.const([1] * (in_dims - new_dims), dtype=dtype), shape], axis=0
)
new_shape = _op.maximum(in_shape, shape)
return new_shape
shape = fold_constant(expand_shape(in_shape, shape))
return _op.broadcast_to(inputs[0], shape=shape)
class RNN(OnnxOpConverter):
"""Operator converter for RNNs such as RNN, LSTM and GRU."""
@classmethod
def _activation_helper(cls, activation, alpha, beta):
convert_map = _get_convert_map(1)
attrs = {}
if alpha is not None:
attrs["alpha"] = alpha
if beta is not None:
attrs["beta"] = beta
return lambda x: convert_map[activation.decode("utf-8")]([x], attrs, {})
@classmethod
def _activation_needs_alpha(cls, activation):
needs_alpha = ["Affine", "LeakyRelu", "ThresholdedRelu", "ScaledTanh", "HardSigmoid", "Elu"]
return activation.decode("utf-8") in needs_alpha
@classmethod
def _activation_needs_beta(cls, activation):
needs_beta = ["Affine", "ScaledTanh", "HardSigmoid"]
return activation.decode("utf-8") in needs_beta
@classmethod
def bidir_rnn_cell(cls, input_seqs, weight_dicts, acts):
"""
Bidirectional RNN cell
"""
seq_len = len(input_seqs)
forward_outputs, fw_H_t = rnn_cell(input_seqs, **weight_dicts[0], act=acts[0])
reverse_outputs, rev_H_t = rnn_cell(
input_seqs, **weight_dicts[1], act=acts[1], backwards=True
)
final_outputs = []
for i in range(seq_len):
final_outputs.append(
_op.stack([forward_outputs[i], reverse_outputs[seq_len - 1 - i]], axis=0)
)
return (_op.stack(final_outputs, axis=0), _op.stack([fw_H_t, rev_H_t], axis=0))
@classmethod
def _default_activations(cls, num_directions):
return [_op.tanh] * num_directions
@classmethod
def _get_activations(cls, attr, multiplier, num_directions, rnn_type):
"""
Activation functions
"""
if "activations" in attr:
activations = attr["activations"]
if len(activations) != multiplier * num_directions:
raise NotImplementedError(
f"{rnn_type} assumes {multiplier} * num_directions activation functions "
f"are provided"
)
alpha_loc = 0
alphas = attr.get("activation_alpha", [])
if isinstance(alphas, float):
alphas = [alphas]
beta_loc = 0
betas = attr.get("activation_beta", [])
if isinstance(betas, float):
betas = [betas]
acts = []
for i in range(multiplier * num_directions):
alpha = None
beta = None
activation = activations[i]
if cls._activation_needs_alpha(activation) and len(alphas) > alpha_loc:
alpha = alphas[alpha_loc]
alpha_loc += 1
if cls._activation_needs_beta(activation) and len(betas) > beta_loc:
beta = betas[beta_loc]
beta_loc += 1
acts.append(cls._activation_helper(activation, alpha, beta))
else:
acts = cls._default_activations(num_directions)
return acts
@classmethod
def _inputs_helper(cls, inputs, layout):
"""
Process inputs
"""
# Unpack inputs, note that if optional and not provided then value will be None.
X = inputs[0]
Wp = inputs[1]
Rp = inputs[2]
Bp = inputs[3]
sequence_lens = inputs[4]
Hp_0 = inputs[5]
num_directions = infer_shape(Wp)[0]
if num_directions not in [1, 2]:
raise ValueError("num_directions must be either 1 or 2!")
if layout == 1:
X = _op.transpose(X, axes=(1, 0))
# Initialize state if not provided.
if Hp_0 is None:
W_dtype = infer_type(Wp).checked_type.dtype
X_shape = infer_shape(X)
hidden_size = infer_shape(Rp)[-1]
batch_size = X_shape[1]
Hp_0 = _op.zeros((num_directions, batch_size, hidden_size), W_dtype)
elif layout == 1:
Hp_0 = _op.transpose(Hp_0, axes=(1, 0))
# TODO (vvchernov): It can be replaced by _op.split if issue #8412 is resolved
X_steps = unbind(X, axis=0)
H_ts = _op.split(Hp_0, num_directions)
Ws = _op.split(Wp, num_directions)
Rs = _op.split(Rp, num_directions)
Bs = None
if Bp is not None:
Bs = _op.split(Bp, num_directions)
return X_steps, H_ts, Ws, Rs, Bs, num_directions, sequence_lens
@classmethod
def _impl_common(cls, inputs, attr, layout):
X_steps, H_ts, Ws, Rs, Bs, num_directions, _ = cls._inputs_helper(inputs, layout)
acts = cls._get_activations(attr, 1, num_directions, "RNN")
weights_dicts = []
for i in range(num_directions):
weights_dict = {}
weights_dict["hidden_state"] = _op.squeeze(H_ts[i], axis=[0])
weights_dict["w_inp"] = _op.squeeze(Ws[i], axis=[0])
weights_dict["w_hid"] = _op.squeeze(Rs[i], axis=[0])
if Bs is not None:
Bi, Bh = _op.split(Bs[i], 2, -1)
weights_dict["b_inp"] = _op.squeeze(Bi, axis=[0])
weights_dict["b_hid"] = _op.squeeze(Bh, axis=[0])
weights_dicts.append(weights_dict)
if num_directions == 2:
output, H = RNN.bidir_rnn_cell(
input_seqs=X_steps, weight_dicts=weights_dicts, acts=acts
)
else:
# outputs shape = [seqs_num, (batch_size, hidden_size)]
outputs, H = rnn_cell(input_seqs=X_steps, **weights_dicts[0], act=acts[0])
# output shape = (seqs_num, num_directions, batch_size, hidden_size)
output = _op.expand_dims(_op.stack(outputs, axis=0), axis=1)
H = _op.expand_dims(H, axis=0)
if layout == 1:
output = _op.transpose(output, axes=(1, 0))
H = _op.transpose(H, axes=(1, 0))
return _expr.TupleWrapper(_expr.Tuple((output, H)), 2)
@classmethod
def _impl_v7(cls, inputs, attr, params):
return cls._impl_common(inputs, attr, 0)
@classmethod
def _impl_v14(cls, inputs, attr, params):
layout = attr.get("layout", 0)
return cls._impl_common(inputs, attr, layout)
class LSTM(RNN):
"""Operator converter for LSTM"""
@classmethod
def bidir_lstm_cell(cls, input_seqs, weight_dicts, acts):
"""
Bidirectional LSTM cell
"""
seq_len = len(input_seqs)
forward_outputs, fw_H_t, fw_C_t = lstm_cell(
input_seqs, **weight_dicts[0], f_act=acts[0], g_act=acts[1], h_act=acts[2]
)
reverse_outputs, rev_H_t, rev_C_t = lstm_cell(
input_seqs,
**weight_dicts[1],
f_act=acts[3],
g_act=acts[4],
h_act=acts[5],
backwards=True,
)
final_outputs = []
for i in range(seq_len):
final_outputs.append(
_op.stack([forward_outputs[i], reverse_outputs[seq_len - 1 - i]], axis=0)
)
return (
_op.stack(final_outputs, axis=0),
_op.stack([fw_H_t, rev_H_t], axis=0),
_op.stack([fw_C_t, rev_C_t], axis=0),
)
@classmethod
def _default_activations(cls, num_directions):
return [_op.sigmoid, _op.tanh, _op.tanh] * num_directions
@classmethod
def _impl_common(cls, inputs, attr, layout):
X_steps, H_ts, Ws, Rs, Bs, num_directions, _ = cls._inputs_helper(inputs, layout)
acts = cls._get_activations(attr, 3, num_directions, "LSTM")
# cell state
Cp_0 = inputs[6]
if Cp_0 is None:
C_ts = _expr.TupleWrapper(
_expr.Tuple([_op.zeros_like(H_ts[i]) for i in range(num_directions)]),
num_directions,
)
else:
if layout == 1:
Cp_0 = _op.transpose(Cp_0, axes=(1, 0))
C_ts = _op.split(Cp_0, num_directions)
# peepholes
Pp = inputs[7]
if Pp is not None:
p_i, p_o, p_f = _op.split(Pp, 3, axis=1)
p_is = _op.split(p_i, num_directions)
p_fs = _op.split(p_f, num_directions)
p_os = _op.split(p_o, num_directions)
weights_dicts = []
for i in range(num_directions):
weights_dict = {}
weights_dict["hidden_state"] = _op.squeeze(H_ts[i], axis=[0])
weights_dict["cell_state"] = _op.squeeze(C_ts[i], axis=[0])
# Weights permutation: onnx format i-o-f-c, lstm cell format i-f-c-o
mati, mato, matf, matc = _op.split(_op.squeeze(Ws[i], axis=[0]), 4)
weights_dict["w_inp"] = _op.concatenate([mati, matf, matc, mato], axis=0)
mati, mato, matf, matc = _op.split(_op.squeeze(Rs[i], axis=[0]), 4)
weights_dict["w_hid"] = _op.concatenate([mati, matf, matc, mato], axis=0)
if Bs is not None:
Bi, Bh = _op.split(Bs[i], 2, -1)
mati, mato, matf, matc = _op.split(_op.squeeze(Bi, axis=[0]), 4)
weights_dict["b_inp"] = _op.concatenate([mati, matf, matc, mato], axis=0)
mati, mato, matf, matc = _op.split(_op.squeeze(Bh, axis=[0]), 4)
weights_dict["b_hid"] = _op.concatenate([mati, matf, matc, mato], axis=0)
if Pp is not None:
weights_dict["p_i"] = _op.squeeze(p_is[i], axis=[0])
weights_dict["p_f"] = _op.squeeze(p_fs[i], axis=[0])
weights_dict["p_o"] = _op.squeeze(p_os[i], axis=[0])
weights_dicts.append(weights_dict)
if num_directions == 2:
output, H, C = LSTM.bidir_lstm_cell(
input_seqs=X_steps, weight_dicts=weights_dicts, acts=acts
)
else:
# outputs shape = [seqs_num, (batch_size, hidden_size)]
outputs, H, C = lstm_cell(
input_seqs=X_steps, **weights_dicts[0], f_act=acts[0], g_act=acts[1], h_act=acts[2]
)
# output shape = (seqs_num, num_directions, batch_size, hidden_size)
output = _op.expand_dims(_op.stack(outputs, axis=0), axis=1)
H = _op.expand_dims(H, axis=0)
C = _op.expand_dims(C, axis=0)
if layout == 1:
output = _op.transpose(output, axes=(1, 0))
H = _op.transpose(H, axes=(1, 0))
C = _op.transpose(C, axes=(1, 0))
return _expr.TupleWrapper(_expr.Tuple((output, H, C)), 3)
class GRU(RNN):
"""Operator convert for GRU"""
@classmethod
def bidir_gru_cell(cls, input_seqs, weight_dicts, acts, sequence_lens=None):
"""
Bidirectional GRU cell
"""
seq_len = len(input_seqs)
forward_outputs, fw_H_t = gru_cell(
input_seqs,
**weight_dicts[0],
rz_act=acts[0],
n_act=acts[1],
sequence_lens=sequence_lens,
)
reverse_outputs, rev_H_t = gru_cell(
input_seqs,
**weight_dicts[1],
rz_act=acts[2],
n_act=acts[3],
backwards=True,
sequence_lens=sequence_lens,
)
final_outputs = []
for i in range(seq_len):
final_outputs.append(
_op.stack([forward_outputs[i], reverse_outputs[seq_len - 1 - i]], axis=0)
)
return (_op.stack(final_outputs, axis=0), _op.stack([fw_H_t, rev_H_t], axis=0))
@classmethod
def _default_activations(cls, num_directions):
return [_op.sigmoid, _op.tanh] * num_directions
@classmethod
def _impl_common(cls, inputs, attr, layout):
X_steps, H_ts, Ws, Rs, Bs, num_directions, sequence_lens = cls._inputs_helper(
inputs, layout
)
acts = cls._get_activations(attr, 2, num_directions, "GRU")
linear_before_reset = attr.get("linear_before_reset", 0)
weights_dicts = []
for i in range(num_directions):
weights_dict = {}
weights_dict["hidden_state"] = _op.squeeze(H_ts[i], axis=[0])
weights_dict["linear_before_reset"] = linear_before_reset
# Weights permutation: onnx format i-o-f-c, lstm cell format i-f-c-o
matz, matr, matn = _op.split(_op.squeeze(Ws[i], axis=[0]), 3)
weights_dict["w_inp"] = _op.concatenate([matr, matz, matn], axis=0)
matz, matr, matn = _op.split(_op.squeeze(Rs[i], axis=[0]), 3)
weights_dict["w_hid"] = _op.concatenate([matr, matz, matn], axis=0)
if Bs is not None:
Bi, Bh = _op.split(Bs[i], 2, -1)
matz, matr, matn = _op.split(_op.squeeze(Bi, axis=[0]), 3)
weights_dict["b_inp"] = _op.concatenate([matr, matz, matn], axis=0)
matz, matr, matn = _op.split(_op.squeeze(Bh, axis=[0]), 3)
weights_dict["b_hid"] = _op.concatenate([matr, matz, matn], axis=0)
weights_dicts.append(weights_dict)
if num_directions == 2:
output, H = GRU.bidir_gru_cell(
input_seqs=X_steps,
weight_dicts=weights_dicts,
acts=acts,
sequence_lens=sequence_lens,
)
else:
# outputs shape = [seqs_num, (batch_size, hidden_size)]
outputs, H = gru_cell(
input_seqs=X_steps,
**weights_dicts[0],
rz_act=acts[0],
n_act=acts[1],
sequence_lens=sequence_lens,
)
# output shape = (seqs_num, num_directions, batch_size, hidden_size)
output = _op.expand_dims(_op.stack(outputs, axis=0), axis=1)
H = _op.expand_dims(H, axis=0)
if layout == 1:
output = _op.transpose(output, axes=(1, 0))
H = _op.transpose(H, axes=(1, 0))
return _expr.TupleWrapper(_expr.Tuple((output, H)), 2)
class Resize(OnnxOpConverter):
"""Operator converter for Resize"""
@classmethod
def _impl_v10(cls, inputs, attr, params):
mode = attr.get("mode").decode("ascii")
if mode == "nearest":
method = "nearest_neighbor"
elif mode == "linear":
method = "linear"
elif mode == "cubic":
method = "cubic"
else:
raise tvm.error.OpAttributeInvalid(
f'Value {mode} in attribute "mode" of operator Resize is not valid.'
)
scale = inputs[1]
size = _op.cast(shape_of(inputs[0]), infer_type(scale).checked_type.dtype) * scale
ndims = len(infer_shape(inputs[0]))
out = None
if ndims == 3:
out_size = fold_constant(_op.strided_slice(size, [2], [3]))
out = _op.image.resize1d(inputs[0], out_size, None, "NCW", method, "asymmetric")
elif ndims == 4:
out_size = fold_constant(_op.strided_slice(size, [2], [4]))
out = _op.image.resize2d(inputs[0], out_size, None, "NCHW", method, "asymmetric")
elif ndims == 5:
out_size = fold_constant(_op.strided_slice(size, [2], [5]))
out = _op.image.resize3d(inputs[0], out_size, None, "NCDHW", method, "asymmetric")
else:
raise NotImplementedError("Resize only supports 3, 4, or 5 dims")
return out
@classmethod
def _impl_v11(cls, inputs, attr, params):
scale = inputs[2]
scale_shape = infer_shape(scale)
if len(inputs) == 4:
assert (
len(scale_shape) == 0 or scale_shape[0] == 0
), "One of scale or size should be passed, not both."
size = inputs[3]
else:
assert len(scale_shape) != 0, "One of scale or size should be passed."
size = _op.cast(shape_of(inputs[0]), infer_type(scale).checked_type.dtype) * scale
return cls.v11_13_common(inputs, size, attr, params)
@classmethod
def _impl_v13(cls, inputs, attr, params):
scale = inputs[2]
size = inputs[3]
# Some versions of onnx exporters produce an opset 13 model with the opset 11
# resize op, handle that edge case
if scale is not None and size is not None:
return cls._impl_v11(inputs, attr, params)
if size is not None:
assert scale is None, "One of scale or size should be passed, not both."
else:
scale_type = infer_type(scale)
scale_shape = scale_type.checked_type.shape
scale_dtype = scale_type.checked_type.dtype
assert len(scale_shape) != 0, "One of scale or size should be passed."
size = _op.cast(shape_of(inputs[0]), scale_dtype) * scale
return cls.v11_13_common(inputs, size, attr, params)
@classmethod
def v11_13_common(cls, inputs, size, attr, params):
"""
Resize v11 and Resize v13 are identical except in how
they handle the passing of scale and size. This utility
provides the implementation for both
"""
roi = inputs[1]
if roi is not None and infer_shape(roi)[0] == 0:
roi = None
ndims = len(infer_shape(inputs[0]))
mode = attr.get("mode").decode("ascii")
if mode == "nearest":
method = "nearest_neighbor"
elif mode == "linear":
method = "linear"
elif mode == "cubic":
method = "cubic"
else:
raise tvm.error.OpAttributeInvalid(
f'Value {mode} in attribute "mode" of operator Resize is not valid.'
)
coord_trans = attr.get("coordinate_transformation_mode", b"half_pixel").decode("ascii")
nearest_mode = attr.get("nearest_mode", b"round_prefer_floor").decode("ascii")
alpha = attr.get("cubic_coeff_a", -0.75)
exclude = attr.get("exclude_outside", 0)
extrapolation_value = attr.get("extrapolation_value", 0.0)
if roi is not None:
roi = fold_constant(
_op.concatenate(
[
_op.strided_slice(roi, [2], [ndims]),
_op.strided_slice(roi, [ndims + 2], [2 * ndims]),
],
axis=0,
)
)
out_size = fold_constant(_op.strided_slice(size, [2], [ndims]))
out = None
if ndims == 3:
out = _op.image.resize1d(
inputs[0],
out_size,
roi,
"NCW",
method,
coord_trans,
nearest_mode,
alpha,
exclude,
extrapolation_value,
)
elif ndims == 4:
out = _op.image.resize2d(
inputs[0],
out_size,
roi,
"NCHW",
method,
coord_trans,
nearest_mode,
alpha,
exclude,
extrapolation_value,
)
elif ndims == 5:
out = _op.image.resize3d(
inputs[0],
out_size,
roi,
"NCDHW",
method,
coord_trans,
nearest_mode,
alpha,
exclude,
extrapolation_value,
)
else:
raise NotImplementedError("Resize only supports 3, 4, or 5 dims")
return out
class NonZero(OnnxOpConverter):
"""Operator converter for NonZero"""
@classmethod
def _impl_v9(cls, inputs, attr, params):
if len(inputs) > 1:
raise ValueError("Expect 1 input only")
output = AttrCvt(op_name="argwhere")(inputs, attr, params)
# ONNX NonZero always outputs int64
output = _op.cast(output, "int64")
return _op.transpose(output, axes=(1, 0))
class ReverseSequence(OnnxOpConverter):
"""Operator converter for ReverseSequence"""
@classmethod
def _impl_v10(cls, inputs, attr, params):
return _op.reverse_sequence(inputs[0], inputs[1], attr["time_axis"], attr["batch_axis"])
class TopK(OnnxOpConverter):
"""Operator converter for TopK"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if len(inputs) != 2:
raise ValueError("Expect 2 input only")
axis = attr.get("axis", -1)
largest = attr.get("largest", 1)
if largest == 0:
# TODO(mbrookhart): optimize this by adding a smallest attribute to topi if this
# ever becomes a bottleneck
ndim = len(infer_shape(inputs[0]))
if axis < 0:
axis += ndim
sort = _op.sort(inputs[0], axis=axis)
argsort = _op.argsort(inputs[0], axis=axis, dtype="int64")
begin = [0] * ndim
stride = [1] * ndim
end = _op.concatenate(
[
_op.const([np.iinfo(np.int64).max] * axis, dtype="int64"),
inputs[1],
_op.const([np.iinfo(np.int64).max] * (ndim - axis - 1), dtype="int64"),
],
axis=0,
)
return _expr.TupleWrapper(
_expr.Tuple(
[
_op.strided_slice(sort, begin, end, stride),
_op.strided_slice(argsort, begin, end, stride),
]
),
2,
)
return _op.topk(inputs[0], inputs[1], axis=axis, dtype="int64")
class Range(OnnxOpConverter):
"""Operator converter for Range"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if len(inputs) != 3:
raise ValueError("Expect 3 input only")
return _op.arange(
inputs[0], inputs[1], inputs[2], dtype=infer_type(inputs[0]).checked_type.dtype
)
class IsInf(OnnxOpConverter):
"""Operator converter for IsInf"""
@classmethod
def _impl_v10(cls, inputs, attr, params):
detect_negative = attr.get("detect_negative", 1)
detect_positive = attr.get("detect_positive", 1)
dtype = infer_type(inputs[0]).checked_type.dtype
isinf = _op.isinf(inputs[0])
if not detect_negative:
isinf = isinf * (inputs[0] > _op.const(0, dtype))
if not detect_positive:
isinf = isinf * (inputs[0] < _op.const(0, dtype))
return isinf
class Celu(OnnxOpConverter):
"""Operator convereter for celu"""
@classmethod
def _impl_v12(cls, inputs, attr, params):
x = inputs[0]
dtype = infer_type(x).checked_type.dtype
alpha = _op.const(attr.get("alpha", 1.0), dtype)
zero = _op.const(0, dtype)
one = _op.const(1, dtype)
out = _op.maximum(zero, x) + _op.minimum(zero, alpha * (_op.exp(x / alpha) - one))
return out
class MaxRoiPool(OnnxOpConverter):
"""Operator converter for MaxRoiPool."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
assert len(inputs) == 2, f"MMaxRoiPool op take 2 inputs, {len(inputs)} given"
data = inputs[0]
rois = inputs[1]
pooled_shape = attr.get("pooled_shape")
spatial_scale = attr.get("spatial_scale", 1.0)
return _vision.roi_pool(data, rois, pooled_shape, spatial_scale)
class RoiAlign(OnnxOpConverter):
"""Operator converter for RoiAlign."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if len(inputs) != 3:
raise ValueError("Expect 3 inputs only")
x = inputs[0]
rois = inputs[1]
batch_indices = inputs[2]
mode = attr.get("mode", b"avg")
if mode not in (b"avg", b"max"):
raise NotImplementedError("RoiAlign in Relay only uses avg and max modes")
output_height = attr.get("output_height", 1)
output_width = attr.get("output_width", 1)
sampling_ratio = attr.get("sampling_ratio", 0)
spatial_scale = attr.get("spatial_scale", 1.0)
batch_indices = _op.expand_dims(batch_indices, axis=1, num_newaxis=1)
batch_indices = _op.cast(batch_indices, infer_type(rois).checked_type.dtype)
rois = _op.concatenate([batch_indices, rois], 1)
return _vision.roi_align(
x, rois, [output_height, output_width], spatial_scale, sampling_ratio, mode=mode
)
class Clip(OnnxOpConverter):
"""Operator converter for Clip."""
@staticmethod
def convert_attributes(inputs, attr, params):
convert = AttrCvt("clip", transforms={"min": "a_min", "max": "a_max"})
return convert(inputs, attr, params)
@classmethod
def _impl_v1(cls, inputs, attr, params):
if "min" not in attr:
attr["min"] = -np.inf
if "max" not in attr:
attr["max"] = np.inf
return Clip.convert_attributes(inputs, attr, params)
@classmethod
def _impl_v11(cls, inputs, attr, params):
if len(inputs) == 3 and isinstance(inputs[2], _expr.Constant):
attr["max"] = inputs[2].data.numpy().item()
inputs = inputs[0:2]
if len(inputs) >= 2 and isinstance(inputs[1], _expr.Constant):
attr["min"] = inputs[1].data.numpy().item()
inputs = inputs[0:1]
if "min" in attr and "max" in attr:
return Clip.convert_attributes(inputs, attr, params)
assert len(inputs) <= 3, "Clip-11 takes up to 3 inputs, input, min, max"
result = inputs[0]
for i, op in enumerate([_op.tensor.maximum, _op.tensor.minimum]):
if i < len(inputs) - 1:
if inputs[i + 1] is not None:
result = op(result, inputs[i + 1])
return result
class Softplus(OnnxOpConverter):
"""Operator converter for Softplus."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
data = inputs[0]
data_dtype = infer_type(data).checked_type.dtype
data = _op.exp(data) + _expr.const(1, dtype=data_dtype)
return _op.log(data)
class Loop(OnnxOpConverter):
"""Operator converter for Loop"""
@classmethod
def _impl_v11(cls, inputs, attr, params):
max_loop_count = inputs[0]
cond = inputs[1]
loop_deps = inputs[2:]
num_deps = len(loop_deps)
# Create a copy of the body function to prevent the original
# from being modified.
body = copy.copy(attr["body"])
iter_dtype = infer_type(max_loop_count).checked_type.dtype
# Determine what condition mode we're in.
assert cond is not None or max_loop_count is not None
is_for_loop = max_loop_count is not None and cond is None
is_condition_for_loop = cond is not None and max_loop_count is not None
# Loop inputs will be packed as
# [iter_count, max_count, condition, loop_deps, scan_outputs]
def cond_fn(*loop_inputs):
i = loop_inputs[0]
max_count = loop_inputs[1]
w = loop_inputs[2]
if cond is not None:
out_while = _op.equal(w, _expr.const(True, "bool"))
if max_loop_count is not None:
out_loop = _op.less(i, max_count)
if is_condition_for_loop:
return _op.logical_and(out_while, out_loop)
if is_for_loop:
return out_loop
return out_while
# Get the current graph proto and create a clone for the subgraph
graph_scope = GraphProto.current
subgraph_scope = GraphProto(
graph_scope._shape,
graph_scope._dtype,
graph_scope._freeze_params,
graph_scope._op_type_dict,
)
# Load nodes from outer graph into inner graph.
subgraph_scope._nodes = graph_scope._nodes.copy()
# Create a list of variables for each value updated in the loop.
def get_var(name, val, scan=False):
checked_type = infer_type(val)
if hasattr(checked_type, "type_annotation"):
checked_type = checked_type.type_annotation
if hasattr(checked_type, "checked_type"):
checked_type = checked_type.checked_type
shape = get_const_tuple(checked_type.shape)
actual_shape = []
for dim in shape:
if isinstance(dim, int) and dim == 0:
actual_shape.append(_ty.Any())
else:
actual_shape.append(dim)
if scan:
return _expr.var(name, shape=[_ty.Any()] + actual_shape, dtype=checked_type.dtype)
return _expr.var(name, shape=actual_shape, dtype=checked_type.dtype)
loop_vars = [
_expr.var(body.input[0].name, shape=(), dtype=iter_dtype), # iteration count
_expr.var("max_count", shape=(), dtype=iter_dtype), # iteration count
get_var(body.input[1].name, cond), # exit condition
]
loop_vars += [get_var(body.input[i + 2].name, v) for i, v in enumerate(loop_deps)]
loop_var_names = [v.name_hint for v in loop_vars]
# get span information of loop body
body_source_name = get_source_name(body, subgraph_scope._op_type_dict)
# set span to inputs of loop body
for i, v in enumerate(loop_vars):
loop_vars[i] = set_span(v, make_parameter_span([v.name_hint, body_source_name]))
num_scan_outputs = len(body.output) - (1 + num_deps)
# Construct variables and initial empty tensors for any scan outputs.
# To do this, we'll figure out the output shapes of the body subgraph by importing
# it and doing type inference.
scan_output_vars = []
scan_output_init = []
if num_scan_outputs > 0:
with subgraph_scope:
loop_outputs = subgraph_scope.from_onnx(
body, graph_scope.opset, get_output_expr=True
)
loop_outputs = _expr.TupleWrapper(loop_outputs, len(body.output))
for i in range(num_scan_outputs):
name, _, _, _ = get_info(body.output[i + 1 + num_deps])
output_node = infer_type(loop_outputs[i + 1 + num_deps])
shape = get_const_tuple(output_node.checked_type.shape)
dtype = output_node.checked_type.dtype
scan_output_vars.append(
_expr.var(name, shape=([_ty.Any()] * (len(shape) + 1)), dtype=dtype)
)
scan_output_init.append(
_op.reshape(_expr.const(np.array([]).astype(dtype)), [0] + [1] * len(shape))
)
# Now we can remove loop iter variables from our inner loop's inputs.
# This is kind of a hack since we have graph inputs that we don't
# want to treat as actual inputs.
while len(body.input) != 0:
body.input.pop(0)
# Define the loop body, in this function we need to unpack loop inputs,
# convert the loop subgraph, and pack outputs for the next iteration.
def body_fn(*loop_inputs):
# Unpack inputs
loop_count = loop_inputs[0]
max_count = loop_inputs[1]
cond = loop_inputs[2]
current_vars = list(loop_inputs[3 : (3 + num_deps)])
scan_outputs = loop_inputs[(3 + num_deps) :]
# Prepare body inputs by adding them to node dictionary.
new_inputs = [loop_count, max_count, cond] + current_vars
for i, inp in enumerate(new_inputs):
subgraph_scope._nodes[loop_var_names[i]] = inp
# Get the output of the current loop using the updated inputs.
with subgraph_scope:
loop_outputs = subgraph_scope.from_onnx(
body, graph_scope.opset, get_output_expr=True
)
# Unpack the body outputs and prepare variables for next iteration.
new_cond = loop_outputs[0]
new_loop_vars = [loop_outputs[i] for i in range(1, 1 + num_deps)]
new_scan_outputs = [loop_outputs[i] for i in range(1 + num_deps, len(loop_outputs))]
# Add new scan outputs to tracking
combined_scan_outputs = []
for i, scan in enumerate(scan_outputs):
rank = len(infer_shape(scan)) - 1
new_scan = new_scan_outputs[i]
expand_scan = _op.expand_dims(new_scan, axis=0)
# For non scalar outputs we need to broadcast the initial value.
if rank > 0:
new_scan_shape = shape_of(new_scan, dtype=iter_dtype)
scan_broadcast = _op.concatenate(
[_op.reshape(loop_count, [1]), new_scan_shape], axis=0
)
scan = _op.broadcast_to(scan, scan_broadcast)
combined_scan = _op.concatenate([scan, expand_scan], axis=0)
combined_scan_outputs.append(combined_scan)
# Increment counter.
if max_loop_count is not None:
incr = _expr.const(1, dtype=iter_dtype)
loop_count = loop_count + incr
# Pack loop outputs for next iteration
# [iter_count, cond, loop_deps, loop_scans]
return [loop_count, max_count, new_cond] + new_loop_vars + combined_scan_outputs
# Create the loop function.
loop = fold_constant(_loops.while_loop(cond_fn, loop_vars + scan_output_vars, body_fn))
# Now need to run initial values through the graph.
init_count = _expr.const(0, dtype=iter_dtype)
loop_vals = loop(init_count, max_loop_count, cond, *loop_deps, *scan_output_init)
# Extract final iteration outputs.
if num_deps + num_scan_outputs == 1:
outputs = _expr.TupleGetItem(loop_vals, 3)
else:
outputs = _expr.TupleWrapper(
_expr.Tuple(
[
_expr.TupleGetItem(loop_vals, i + 3)
for i in range(num_deps + num_scan_outputs)
]
),
num_deps + num_scan_outputs,
)
# Update outer graph with constants found in the subgraph.
free_vars = analysis.free_vars(loop)
graph_scope._params.update(subgraph_scope._params)
graph_scope._nodes.update(subgraph_scope._nodes)
for var in free_vars:
graph_scope._nodes.update({var.name_hint: var})
return outputs
class If(OnnxOpConverter):
"""Operator converter for If"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
cond = inputs[0]
# Convert array to bool if needed.
if len(infer_shape(cond)) > 0:
cond = _op.take(cond, _expr.const(0, dtype="int64"))
then_branch = attr.get("then_branch", None)
else_branch = attr.get("else_branch", None)
assert then_branch is not None and else_branch is not None
# Create graph converters for both branches.
graph_scope = GraphProto.current
then_graph = GraphProto(
graph_scope._shape,
graph_scope._dtype,
graph_scope._freeze_params,
graph_scope._op_type_dict,
)
then_graph._nodes = graph_scope._nodes.copy()
else_graph = GraphProto(
graph_scope._shape,
graph_scope._dtype,
graph_scope._freeze_params,
graph_scope._op_type_dict,
)
else_graph._nodes = graph_scope._nodes.copy()
# Convert each branch to a relay expression.
with then_graph:
then_expr = then_graph.from_onnx(then_branch, graph_scope.opset, get_output_expr=True)
with else_graph:
else_expr = else_graph.from_onnx(else_branch, graph_scope.opset, get_output_expr=True)
# Add constants from both branches to parent graph.
graph_scope._params.update(then_graph._params)
graph_scope._nodes.update(then_graph._nodes)
then_free_vars = analysis.free_vars(then_expr)
for var in then_free_vars:
graph_scope._nodes.update({var.name_hint: var})
graph_scope._params.update(else_graph._params)
graph_scope._nodes.update(else_graph._nodes)
else_free_vars = analysis.free_vars(else_expr)
for var in else_free_vars:
graph_scope._nodes.update({var.name_hint: var})
# Sometimes pytorch to onnx will insert silly if statements that produce dynamic ranks.
# Often these dont contribute anything. If we see a dynamic rank output, try to unify
# them so we can continue without breaking.
if not isinstance(then_expr, _expr.Tuple) and not isinstance(else_expr, _expr.Tuple):
then_shape = infer_shape(then_expr)
else_shape = infer_shape(else_expr)
if len(then_shape) != len(else_shape):
warning_msg = (
"If statement produced outputs with different rank. "
"Attempting to unify ranks but this may produce incorrect results."
)
warnings.warn(warning_msg)
if len(then_shape) < len(else_shape):
then_expr = _op.broadcast_to_like(then_expr, else_expr)
else:
else_expr = _op.broadcast_to_like(else_expr, then_expr)
# Now we can construct the relay if statement and return.
ret = _expr.If(cond, then_expr, else_expr)
if len(then_branch.output) > 1:
ret = _expr.TupleWrapper(ret, len(then_branch.output))
return ret
class Scan(OnnxOpConverter):
"""Operator converter for Scan"""
@classmethod
def _impl_v8(cls, inputs, attr, params):
new_inputs = inputs[1:]
batch_num = infer_shape(inputs[1])[0]
out = []
for i in range(batch_num):
v9_inputs = [
_op.take(new_inputs[j], _expr.const(i), axis=0) for j in range(len(new_inputs))
]
results = cls._impl_v9(v9_inputs, attr, params)
results = [_op.expand_dims(results[j], axis=0) for j in range(len(results))]
if i == 0:
out = results
else:
out = [_op.concatenate([out[j], results[j]], axis=0) for j in range(len(results))]
out = _expr.TupleWrapper(_expr.Tuple(out), len(out))
return out
@classmethod
def _impl_v9(cls, inputs, attr, params):
body = attr.get("body")
num_scan_inputs = attr.get("num_scan_inputs")
num_all_inputs = len(inputs)
num_state_inputs = len(body.input) - num_scan_inputs
num_state_outputs = num_state_inputs
num_all_outputs = len(body.output)
num_scan_outputs = num_all_outputs - num_state_outputs
scan_input_axes = attr.get("scan_input_axes", [0] * num_scan_inputs)
scan_input_directions = attr.get("scan_input_directions", [0] * num_scan_inputs)
scan_output_axes = list(attr.get("scan_output_axes", [0] * num_scan_outputs))
scan_output_directions = attr.get("scan_output_directions", [0] * num_scan_outputs)
# loop count are the same for all scan inputs, so get loop count by first input scan
# strided_slice not support dynamic axes, so assume input shape are static
max_loop_count = infer_shape(inputs[num_state_inputs])[scan_input_axes[0]]
# Create a copy of the body function to prevent the original
# from being modified.
body = copy.copy(attr["body"])
# Loop inputs will be packed as
# [iter_count, loop_deps, scan_outputs]
def cond_fn(*loop_inputs):
i = loop_inputs[0]
return _op.less(i, relay.const(max_loop_count, "int32"))
# Get the current graph proto and create a clone for the subgraph
graph_scope = GraphProto.current
subgraph_scope = GraphProto(
graph_scope._shape,
graph_scope._dtype,
graph_scope._freeze_params,
graph_scope._op_type_dict,
)
# Load nodes from outer graph into inner graph.
subgraph_scope._nodes = graph_scope._nodes.copy()
# Create a list of variables for each value updated in the loop.
def get_var(name, val, scan=False):
checked_type = infer_type(val)
if hasattr(checked_type, "type_annotation"):
checked_type = checked_type.type_annotation
if hasattr(checked_type, "checked_type"):
checked_type = checked_type.checked_type
shape = get_const_tuple(checked_type.shape)
actual_shape = []
for dim in shape:
if isinstance(dim, int) and dim == 0:
actual_shape.append(_ty.Any())
else:
actual_shape.append(dim)
if scan:
return _expr.var(name, shape=[_ty.Any()] + actual_shape, dtype=checked_type.dtype)
return _expr.var(name, shape=actual_shape, dtype=checked_type.dtype)
# Construct variables and initial empty tensors for any scan outputs.
# To do this, we'll figure out the output shapes of the body subgraph by importing
# it and doing type inference.
scan_output_vars = []
scan_output_init = []
if num_scan_outputs > 0:
with subgraph_scope:
loop_outputs = subgraph_scope.from_onnx(
body, graph_scope.opset, get_output_expr=True
)
loop_outputs = _expr.TupleWrapper(loop_outputs, len(body.output))
for i in range(num_scan_outputs):
name, _, _, _ = get_info(body.output[i + num_state_outputs])
output_node = infer_type(loop_outputs[i + num_state_outputs])
shape = list(get_const_tuple(output_node.checked_type.shape))
if scan_output_axes[i] < 0:
scan_output_axes[i] = len(shape) + scan_output_axes[i] + 1
shape.insert(scan_output_axes[i], max_loop_count)
dtype = output_node.checked_type.dtype
scan_output_vars.append(_expr.var(name, shape=shape, dtype=dtype))
scan_output_init.append(_op.zeros(shape, dtype))
# loop vars = [iter_count, scan_state, scan_out]
loop_vars = [_expr.var("iter", shape=(), dtype="int32")] # iteration count
loop_vars += [
get_var(body.input[i].name, v) for i, v in enumerate(inputs) if i < num_state_inputs
]
# get span information of scan body
body_source_name = get_source_name(body, subgraph_scope._op_type_dict)
# set span to inputs of scan body
for i, v in enumerate(loop_vars):
loop_vars[i] = set_span(v, make_parameter_span([v.name_hint, body_source_name]))
loop_vars += scan_output_vars
body_input_var_names = ["iter"] + [body.input[i].name for i in range(len(body.input))]
# # Now we can remove loop iter variables from our inner loop's inputs.
# # This is kind of a hack since we have graph inputs that we don't
# # want to treat as actual inputs.
while len(body.input) != 0:
body.input.pop(0)
# Define the loop body, in this function we need to unpack loop inputs,
# convert the loop subgraph, and pack outputs for the next iteration.
def body_fn(*loop_inputs):
# Unpack inputs
loop_count = loop_inputs[0]
state_vars = list(loop_inputs[1 : 1 + num_state_inputs])
scan_vars = list(loop_inputs[1 + num_state_inputs :])
# body take scan graph scan inputs as original input
input_scan_exprs = []
for i in range(num_state_inputs, num_all_inputs):
if scan_input_directions[i - num_state_inputs] != 0:
input_scan_exprs.append(
relay.take(
inputs[i],
relay.const(max_loop_count - 1, "int32") - loop_count,
axis=scan_input_axes[i - num_state_inputs],
)
)
else:
input_scan_exprs.append(
relay.take(
inputs[i], loop_count, axis=scan_input_axes[i - num_state_inputs]
)
)
# Prepare body inputs by adding them to node dictionary.
body_inputs = [loop_count] + state_vars + input_scan_exprs
for i, inp in enumerate(body_inputs):
subgraph_scope._nodes[body_input_var_names[i]] = inp
# Get the output of the current loop using the updated inputs.
with subgraph_scope:
loop_outputs = subgraph_scope.from_onnx(
body, graph_scope.opset, get_output_expr=True
)
# Unpack the body outputs and prepare variables for next iteration.
new_state_vars = [loop_outputs[i] for i in range(num_state_outputs)]
new_scan_vars = [loop_outputs[i] for i in range(num_state_outputs, num_all_outputs)]
# Add new scan outputs to tracking
combined_scan_outputs = []
for i in range(num_scan_outputs):
if scan_output_directions[i] == 0:
# append new scan output
combined_scan = _op.concatenate(
[scan_vars[i], _op.expand_dims(new_scan_vars[i], axis=scan_output_axes[i])],
axis=scan_output_axes[i],
)
# pop head scan output
combined_scan = _op.strided_slice(
combined_scan,
begin=[1],
end=[max_loop_count + 1],
strides=[1],
axes=[scan_output_axes[i]],
)
else:
# prepend new scan output
combined_scan = _op.concatenate(
[_op.expand_dims(new_scan_vars[i], axis=scan_output_axes[i]), scan_vars[i]],
axis=scan_output_axes[i],
)
# pop tail scan output
combined_scan = _op.strided_slice(
combined_scan,
begin=[0],
end=[max_loop_count],
strides=[1],
axes=[scan_output_axes[i]],
)
combined_scan_outputs.append(combined_scan)
incr = _expr.const(1, dtype="int32")
loop_count = loop_count + incr
# Pack loop outputs for next iteration
# [iter_count, state_var, scan_var]
return [loop_count] + new_state_vars + combined_scan_outputs
# Create the loop function.
loop = fold_constant(_loops.while_loop(cond_fn, loop_vars, body_fn))
# Now need to run initial values through the graph.
init_count = _expr.const(0, dtype="int32")
input_states = [inputs[i] for i in range(num_state_inputs)]
loop_vals = loop(init_count, *input_states, *scan_output_init)
outputs = _expr.TupleWrapper(
_expr.Tuple([_expr.TupleGetItem(loop_vals, i + 1) for i in range(num_all_outputs)]),
num_all_outputs,
)
# Update outer graph with constants found in the subgraph.
free_vars = analysis.free_vars(loop)
graph_scope._params.update(subgraph_scope._params)
graph_scope._nodes.update(subgraph_scope._nodes)
for var in free_vars:
graph_scope._nodes.update({var.name_hint: var})
return outputs
class LinearRegressor(OnnxOpConverter):
"""Operator converter for LinearRegressor."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
data = inputs[0]
coefficients = attr.get("coefficients", 0)
data_shape = infer_shape(data)
targets = attr.get("targets", 1)
coefficients = _expr.const(list(coefficients), dtype="float32")
coefficients_shape = infer_shape(coefficients)
coefficients = _op.reshape(coefficients, (targets, coefficients_shape[0] // targets))
if coefficients_shape[0] // targets < data_shape[-1]:
data = _op.split(data, [coefficients_shape[0] // targets], -1)[0]
mm_out = _op.nn.dense(data, coefficients)
if "intercepts" in attr:
intercepts = attr.get("intercepts", 0)
intercepts = _expr.const(list(intercepts), dtype="float32")
if targets == 1:
return _op.nn.bias_add(mm_out, intercepts, axis=-1)
return get_relay_op("add")(mm_out, intercepts)
return mm_out
class DFT(OnnxOpConverter):
"""Operator converter for discrete Fourier transform (DFT)."""
@classmethod
def _impl_v17(cls, inputs, attr, params):
# ************************* Read attrs *************************
axis = attr.get("axis")
inverse = attr.get("inverse")
onesided = attr.get("onesided")
# ************************* Read inputs ************************
input_tensor = inputs[0]
dft_length = inputs[1]
# ************************* Parse inputs ***********************
t1 = ["float16", "float32", "float64"]
t2 = ["int32", "int64"]
# input
assert infer_type(input_tensor).checked_type.dtype in t1
input_shape = infer_shape(input_tensor)
assert len(input_shape) >= 3
if axis < 0:
axis = len(input_shape) + axis
assert 1 <= axis <= len(input_shape) - 1, "axis is out of bounds"
# dft_length
if dft_length is None:
dft_length = input_shape[axis]
else:
dft_length_dtype = infer_type(dft_length).checked_type.dtype
assert dft_length_dtype in t2
dft_length = int(infer_value(dft_length, params).numpy())
# ************************
input_tensor = cls._maybe_crop_or_pad(input_tensor, axis, dft_length)
swap_axis = -1
re_input_tensor, im_input_tensor = cls._split_real_and_imag_parts(input_tensor)
re_input_tensor = cls._swap_axes(re_input_tensor, axis, swap_axis)
im_input_tensor = cls._swap_axes(im_input_tensor, axis, swap_axis)
re_input_tensor, im_input_tensor = _op.dft(re_input_tensor, im_input_tensor, inverse)
re_input_tensor = cls._swap_axes(re_input_tensor, axis, swap_axis)
im_input_tensor = cls._swap_axes(im_input_tensor, axis, swap_axis)
if onesided:
re_input_tensor = cls._crop_onesided(re_input_tensor, axis)
im_input_tensor = cls._crop_onesided(im_input_tensor, axis)
return cls._merge_real_and_imag_parts(re_input_tensor, im_input_tensor)
@classmethod
def _crop_axis(cls, tensor, axis, new_dim):
shape = infer_shape(tensor)
slices = [slice(0, a, 1) for a in shape]
slices[axis] = slice(0, new_dim, 1)
return _op.strided_slice(
tensor,
begin=[s.start for s in slices],
end=[s.stop for s in slices],
strides=[s.step for s in slices],
axes=list(range(len(shape))),
)
@classmethod
def _maybe_crop_or_pad(cls, input_tensor, axis, n_fft):
shape = infer_shape(input_tensor)
if shape[axis] != n_fft:
if shape[axis] > n_fft:
return cls._crop_axis(input_tensor, axis, n_fft)
else:
pad_width = [(0, 0)] * len(shape)
pad_width[axis] = (0, n_fft - shape[axis])
return _op.nn.pad(input_tensor, pad_width)
return input_tensor
@classmethod
def _swap_axes(cls, tensor, axis1, axis2):
permutation = list(range(len(infer_shape(tensor))))
permutation[axis1] = axis2
permutation[axis2] = axis1
return _op.transpose(tensor, permutation)
@classmethod
def _split_real_and_imag_parts(cls, tensor):
shape = infer_shape(tensor)
dtype = infer_type(tensor).checked_type.dtype
if shape[-1] == 1:
re = tensor
im = _op.const(np.zeros(shape), dtype=dtype)
else:
re, im = _op.split(tensor, 2, -1)
return _op.squeeze(re, -1), _op.squeeze(im, -1)
@classmethod
def _merge_real_and_imag_parts(cls, re, im):
re = _op.expand_dims(re, axis=-1)
im = _op.expand_dims(im, axis=-1)
return _op.concatenate([re, im], axis=-1)
@classmethod
def _crop_onesided(cls, tensor, axis):
shape = infer_shape(tensor)
return cls._crop_axis(tensor, axis, shape[axis] // 2 + 1)
class NonMaxSuppression(OnnxOpConverter):
"""Operator converter for NonMaxSuppression."""
@classmethod
def _impl_v10(cls, inputs, attr, params):
# Get parameter values
boxes = inputs[0]
scores = inputs[1]
max_output_boxes_per_class = inputs[2]
iou_threshold = inputs[3]
score_threshold = inputs[4]
boxes_dtype = infer_type(boxes).checked_type.dtype
if attr.get("center_point_box", 0) != 0:
xc, yc, w, h = _op.split(boxes, 4, axis=2)
half_w = w / _expr.const(2.0, boxes_dtype)
half_h = h / _expr.const(2.0, boxes_dtype)
x1 = xc - half_w
x2 = xc + half_w
y1 = yc - half_h
y2 = yc + half_h
boxes = _op.concatenate([y1, x1, y2, x2], axis=2)
if iou_threshold is None:
iou_threshold = _expr.const(0.0, dtype="float32")
if score_threshold is None:
score_threshold = _expr.const(0.0, dtype="float32")
def conditionally_squeeze_scalar(x):
rank = len(infer_shape(x))
assert rank <= 1, "nms thresholds must be scalars"
if rank == 1:
return _op.squeeze(x, [0])
return x
max_output_boxes_per_class = conditionally_squeeze_scalar(max_output_boxes_per_class)
iou_threshold = conditionally_squeeze_scalar(iou_threshold)
score_threshold = conditionally_squeeze_scalar(score_threshold)
nms_out = _op.vision.all_class_non_max_suppression(
boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold
)
return _op.strided_slice(nms_out[0], _op.const([0], dtype="int64"), nms_out[1])
class ATen(OnnxOpConverter):
"""Operator converter for Pytorch ATen ops."""
@classmethod
def _op_dispatch(cls, operator, inputs, attr, params):
op_map = {
"size": cls._size,
"arange": cls._arange,
"index_put": cls._index_put,
"reshape": cls._reshape,
"embedding_bag": cls._embedding_bag,
}
assert operator in op_map, f"Operator {operator} is not supported."
return op_map[operator](inputs, attr, params)
@classmethod
def _size(cls, inputs, attr, params):
return _op.take(
_op.shape_of(inputs[0], dtype="int64"),
_expr.const(-1, dtype="int64"),
axis=0,
mode="wrap",
)
@classmethod
def _arange(cls, inputs, attr, params):
return _op.arange(inputs[0], inputs[1], inputs[2], dtype="int64")
@classmethod
def _check_index(cls, indices, values):
def unfolding_indices(indices, values):
n = len(indices)
flatten_indices = []
slices_size = []
for index in indices:
flatten_indices.append(_op.reshape(index, _op.const([-1])))
slices_size.append(infer_shape(flatten_indices[-1])[0])
repeat_size = [1]
tile_size = [1]
for i in range(1, n):
repeat_size.append(slices_size[-i] * repeat_size[-1])
tile_size.append(slices_size[i - 1] * tile_size[-1])
repeat_size.reverse()
unflod_slices = []
for i in range(n):
unflod_slices.append(
fold_constant(
_op.repeat(_op.tile(flatten_indices[i], (tile_size[i],)), repeat_size[i], 0)
)
)
return unflod_slices, _op.reshape(values, _op.const([-1]))
values_shape = infer_shape(values)
if len(values_shape) != 1:
return unfolding_indices(indices, values)
return indices, values
@classmethod
def _index_put(cls, inputs, attr, params):
in_tensor = inputs[0]
indices, values = cls._check_index(inputs[1 : len(inputs) - 2], inputs[len(inputs) - 2])
accumulate = inputs[len(inputs) - 1].data.asnumpy() != 0
if not accumulate:
mode = "update"
else:
mode = "add"
index_tensor = _op.stack(indices, axis=0)
return _op.scatter_nd(in_tensor, index_tensor, values, mode)
@classmethod
def _reshape(cls, inputs, attr, params):
return _op.reshape(inputs[0], inputs[1])
@classmethod
def _embedding_bag(cls, inputs, attr, params):
mode_map = {0: _op.sum, 1: _op.mean, 2: _op.max}
mode = attr.get("mode", 1)
reduction_fn = mode_map[mode]
weights, indices, offsets = inputs[0], inputs[1], inputs[2]
offsets_shape = _op.shape_of(offsets, dtype="int64")
indices_shape = _op.stack(
[
_op.take(offsets_shape, _expr.const(0, dtype="int64")),
_expr.const(-1, dtype="int64"),
],
axis=0,
)
indices = _op.reshape(indices, indices_shape)
embedding = _op.take(weights, indices.astype("int64"), axis=0)
rembedding = reduction_fn(embedding, axis=1)
# EmbeddingBag has 4 outputs for some reason despite only one ever being used.
# Fill the rest with 0s.
unused_output = _expr.const(0, dtype="float32")
return _expr.TupleWrapper(
_expr.Tuple((rembedding, unused_output, unused_output, unused_output)), 4
)
@classmethod
def _impl_v1(cls, inputs, attr, params):
operator = attr.get("operator", None).decode("utf-8")
assert operator, "ATen Operator not found"
return cls._op_dispatch(operator, inputs, attr, params)
class QuantizeLinear(OnnxOpConverter):
"""Operator converter for QuantizeLinear."""
@classmethod
def _impl_v10(cls, inputs, attr, params):
data, scale, zp = inputs
out_dtype = infer_type(zp).checked_type.dtype
return _qnn.op.quantize(data, scale, _op.cast(zp, "int32"), 0, out_dtype)
@classmethod
def _impl_v13(cls, inputs, attr, params):
data, scale, zp = inputs
out_dtype = infer_type(zp).checked_type.dtype
axis = attr.get("axis", 1)
if len(infer_shape(data)) < 2:
axis = 0
return _qnn.op.quantize(data, scale, _op.cast(zp, "int32"), axis, out_dtype)
class DequantizeLinear(OnnxOpConverter):
"""Operator converter for QuantizeLinear."""
@classmethod
def _impl_v10(cls, inputs, attr, params):
data, scale, zp = inputs
return _qnn.op.dequantize(data, scale, _op.cast(zp, "int32"), 0)
@classmethod
def _impl_v13(cls, inputs, attr, params):
data, scale, zp = inputs
axis = attr.get("axis", 1)
if len(infer_shape(data)) <= 1:
axis = 0
return _qnn.op.dequantize(data, scale, _op.cast(zp, "int32"), axis)
class DynamicQuantizeLinear(OnnxOpConverter):
"""Operator converter for QuantizeLinear."""
@classmethod
def _impl_v11(cls, inputs, attr, params):
"""This op is deprecated an only supports uint8"""
data = inputs[0]
data_dtype = infer_type(data).checked_type.dtype
zero = _op.const(0, dtype=data_dtype)
maximum = _op.maximum(zero, _op.max(data))
minimum = _op.minimum(zero, _op.min(data))
scale = (maximum - minimum) / _op.const(255, dtype=data_dtype)
zp = zero - _op.min(data) / scale
zp = _op.cast(_op.round(_op.clip(zp, 0, 255)), "uint8")
return _expr.TupleWrapper(
_expr.Tuple(
[_qnn.op.quantize(data, scale, _op.cast(zp, "int32"), 0, "uint8"), scale, zp]
),
size=3,
)
class QLinearConv(OnnxOpConverter):
"""Operator converter for QLinearConv."""
@classmethod
def _impl_v10(cls, inputs, attr, params):
data = inputs[0]
x_scale = get_scalar(inputs[1], params)
x_zero_point = get_scalar(inputs[2], params, "int32")
weight = inputs[3]
w_scale = get_scalar_or_1d_tensor(inputs[4], params)
w_zero_point = get_scalar_or_1d_tensor(inputs[5], params, "int32")
y_scale = fold_constant(get_scalar(inputs[6], params))
y_zero_point = get_scalar(inputs[7], params, "int32")
# Check shapes for per channel quantization
w_scale_shape = infer_shape(w_scale)
w_zero_point_shape = infer_shape(w_zero_point)
if len(w_scale_shape) == 1 or len(w_zero_point_shape) == 1:
m = infer_shape(weight)[0]
if m != w_scale_shape[0] or m != w_zero_point_shape[0]:
raise tvm.error.OpAttributeInvalid(
"The number of elements should be equal to the number of output channels"
)
input_shape = infer_shape(data)
ndim = len(input_shape)
kernel_type = infer_type(weight)
kernel_shapes = [get_const_tuple(kernel_type.checked_type.shape)]
if "kernel_shape" not in attr:
attr["kernel_shape"] = kernel_shapes[0][2:]
if "auto_pad" in attr:
attr["auto_pad"] = attr["auto_pad"].decode("utf-8")
if attr["auto_pad"] in ("SAME_UPPER", "SAME_LOWER"):
# Warning: Convolution does not yet support dynamic shapes,
# one will need to run dynamic_to_static on this model after import
zp = fold_constant(x_zero_point)
assert isinstance(zp, relay.Constant), "Zero point expected to be a constant"
data = autopad(
data,
attr.get("strides", [1] * (ndim - 2)),
attr["kernel_shape"],
attr.get("dilations", [1] * (ndim - 2)),
pad_value=zp.data,
mode=attr["auto_pad"],
)
elif attr["auto_pad"] == "VALID":
attr["pads"] = tuple([0 for i in range(ndim - 2)])
elif attr["auto_pad"] == "NOTSET":
pass
else:
msg = (
f'Value {attr["auto_pad"]} in attribute "auto_pad" of operator Conv '
f"is invalid."
)
raise tvm.error.OpAttributeInvalid(msg)
attr.pop("auto_pad")
out_channels = kernel_shapes[0][0]
dilation = attr.get("dilations", [1] * (ndim - 2))
strides = attr.get("strides", [1] * (ndim - 2))
padding = attr["pads"] if "pads" in attr else 0
groups = attr["group"] if "group" in attr else 1
if ndim != 4:
raise tvm.error.OpAttributeInvalid(
"Only 2D kernels are supported for operator QLinearConv."
)
out = _qnn.op.conv2d(
data,
weight,
x_zero_point,
w_zero_point,
x_scale,
w_scale,
kernel_size=attr["kernel_shape"],
channels=out_channels,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
)
use_bias = len(inputs) == 9
if use_bias:
out = _op.nn.bias_add(out, inputs[8])
out_dtype = infer_type(inputs[7]).checked_type.dtype
requantize_scale = _op.multiply(x_scale, w_scale)
# requantize requires y_scale to be constant,
# if y_scale is not constant, doing dequantize -> quantize
if isinstance(y_scale, _expr.Constant):
out = _qnn.op.requantize(
out,
requantize_scale,
_op.const(0, dtype="int32"),
y_scale,
y_zero_point,
out_dtype=out_dtype,
axis=1,
)
else:
out = _qnn.op.dequantize(out, requantize_scale, _op.const(0, dtype="int32"), axis=1)
out = _qnn.op.quantize(out, y_scale, y_zero_point, axis=1, out_dtype=out_dtype)
return out
class QGemm(OnnxOpConverter):
"""Operator converter for QGemm."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
# https://github.com/microsoft/onnxruntime/blob/main/docs/ContribOperators.md#com.microsoft.QGemm
a = inputs[0]
a_scale = get_scalar(inputs[1], params)
a_zp = get_scalar(inputs[2], params, "int32")
b = inputs[3]
# must be a scalar or 1D tensor which means a per-tensor or per-column quantization
# If 1-D tensor, number of elements should be equal to columns elements of input B
b_scale = get_scalar_or_1d_tensor(inputs[4], params)
b_zp = get_scalar_or_1d_tensor(inputs[5], params, "int32")
# note that if optional and not provided then value will be None.
C = inputs[6]
# must be null or a scalar or 1D tensor of size 1
y_scale = inputs[7]
# must be null or a scalar or 1D tensor of size 1
y_zp = get_scalar(inputs[8], params, "int32")
assert len(infer_shape(a)) == 2
assert len(infer_shape(b)) == 2
# zero point and scale of input b should have same shape size
assert infer_shape(b_scale) == infer_shape(b_zp)
alpha = float(attr.get("alpha", 1.0))
transA = int(attr.get("transA", 0))
transB = int(attr.get("transB", 0))
# get number of channels
channels = infer_channels(b, not transB)
a_dtype = infer_type(a).checked_type.dtype
if transA:
a = _op.transpose(a, axes=(1, 0))
if not transB:
b = _op.transpose(b, axes=(1, 0))
result = _qnn.op.dense(a, b, a_zp, b_zp, a_scale, b_scale, channels)
if C:
result = _op.add(result, C)
requantize_scale = _op.multiply(a_scale, b_scale)
if alpha != 1.0:
requantize_scale *= _expr.const(alpha, dtype="float32")
requantize_zp = _op.const(0, dtype="int32")
if y_scale:
# requantize requires y_scale to be constant,
# if y_scale is not constant, doing dequantize -> quantize
if isinstance(y_scale, _expr.Constant):
y = _qnn.op.requantize(
result,
requantize_scale,
requantize_zp,
y_scale,
y_zp,
axis=-1,
rounding="TONEAREST",
out_dtype=a_dtype,
)
else:
result_deq = _qnn.op.dequantize(result, requantize_scale, requantize_zp, axis=0)
y = _qnn.op.quantize(result_deq, y_scale, y_zp, axis=0, out_dtype=a_dtype)
else:
y = _op.multiply(_op.cast(result, "float32"), requantize_scale)
return y
class QLinearAdd(OnnxOpConverter):
"""Operator converter for QLinearAdd from Microsoft onnxruntime contrib opset."""
@classmethod
def _impl_v10(cls, inputs, attr, params):
a = inputs[0]
a_scale = get_scalar(inputs[1], params)
a_zero_point = get_scalar(inputs[2], params, "int32")
b = inputs[3]
b_scale = get_scalar(inputs[4], params)
b_zero_point = get_scalar(inputs[5], params, "int32")
c_scale = get_scalar(inputs[6], params)
c_zero_point = get_scalar(inputs[7], params, "int32")
dtype = infer_type(a).checked_type.dtype
## Onnxruntime doesn't actually do this op in integer, they dequantize to fp32
## and then requantize afer
## https://github.com/microsoft/onnxruntime/blob/master/onnxruntime/core/mlas/lib/qladd.cpp
a = _qnn.op.dequantize(
inputs[0], a_scale, a_zero_point
) # , c_scale, c_zero_point, out_dtype = dtype)
b = _qnn.op.dequantize(
inputs[3], b_scale, b_zero_point
) # , c_scale, c_zero_point, out_dtype = dtype)
out = _op.add(a, b)
return _qnn.op.quantize(out, c_scale, c_zero_point, out_dtype=dtype)
class QLinearMatMul(OnnxOpConverter):
"""
Operator converter for QLinearMatMul from Microsoft onnxruntime contrib opset.
Limitations:
- Not guaranteed to meet the integer-overflow behavior stipulated in the
ONNX documentation for this operator.
The QLinearMatMul converter is re-used for MatMulInteger and is adapted for
the latter with the optional `expected_out_dtypes` argument.
"""
@classmethod
def _impl_v10(cls, inputs, attr, params, expected_out_dtypes=None):
if expected_out_dtypes is None:
# The default QLinearMatMul converter is expected to have one of
# these output dtypes.
expected_out_dtypes = ["int8", "uint8"]
# Some of the ops used below take scalar-like inputs, and may require either
# of the following:
#
# - the input is Const node (not merely an expression that *could* be reduced
# to a single Const at graph-compilation time)
#
# - the input has a specific dtype
#
# This function attempts to present 'x' in a form that meets both of those
# requirements.
def try_resolve_to_const(x, dtype_override=None):
x2 = try_resolve_var_to_const(x, params)
num_elem = np.prod(infer_shape(x))
if num_elem == 1:
x2 = ensure_scalar_shape(x2)
x_dtype = infer_type(x).checked_type.dtype
if (dtype_override is not None) and (dtype_override != x_dtype):
x2 = _op.cast(x2, dtype_override)
x3 = fold_constant(x2)
return x3
# Unpack the inputs and obtain some type info...
a, a_scale, a_zp, b, b_scale, b_zp, y_scale, y_zp = inputs
a_type = infer_type(a).checked_type # 'T1' in ONNX doc for this op
a_scale_type = infer_type(a_scale).checked_type
a_zp_type = infer_type(a_zp).checked_type
b_type = infer_type(b).checked_type # 'T2' in ONNX doc for this op
b_scale_type = infer_type(b_scale).checked_type
b_zp_type = infer_type(b_zp).checked_type
y_scale_type = infer_type(y_scale).checked_type
y_zp_type = infer_type(y_zp).checked_type # 'T3' in ONNX doc for this op
# Verify type assumptions, based on the ONNX doc for this op...
assert a_type.dtype in ["int8", "uint8"]
assert a_scale_type.dtype == "float32"
assert a_zp_type.dtype == a_type.dtype
assert b_type.dtype in ["int8", "uint8"]
assert b_scale_type.dtype == "float32"
assert b_zp_type.dtype == b_type.dtype
assert y_scale_type.dtype == "float32"
assert y_zp_type.dtype in expected_out_dtypes
# _qnn.op.dense requires the zero-point values to have dtype int32.
a_scale_scalar = try_resolve_to_const(a_scale)
a_zp_scalar = try_resolve_to_const(a_zp, "int32")
b_scale_scalar = try_resolve_to_const(b_scale)
b_zp_scalar = try_resolve_to_const(b_zp, "int32")
y_scale_scalar = try_resolve_to_const(y_scale)
y_zp_scalar = try_resolve_to_const(y_zp, "int32")
# TODO: Confirm that we're using 'num_hidden_units' correctly / as intended with
# the '_qnn.op.dense' instance below.
num_hidden_units = infer_shape(b)[-1]
# - Specify the matmul result dtype as int32, so that hopefully the matmul will use
# a 32-bit accumulator as seems to be required by the ONNX op's documentation.
#
# TL;DR:
# The ONNX documentation for this op is clear about acceptable overflow
# behavior during the matmul operation:
# - The scalar multiplication ops MAY NOT overflow.
# - The scalar addition ops, which sum the results of the scalar multiplication,
# MAY overflow, but if they do so, it must behave as one would expect during
# 32-bit integer-addition overflow.
# As of this writing, Relay's qnn.op.dense operator doesn't expose a way for us to
# express these constraints.
#
# TODO: Extend TVM / Relay / TIR / etc. to allow this kind of constraint to be
# expressed in a Relay graph. And then update this importer and various TVM
# backends accordingly.
matmul_result_dtype = "int32"
# TODO(vvchernov): possibly it is better to use unsigned type for result
# if input types are unsigned:
# if a_type.dtype == "uint8" and b_type.dtype == "uint8":
# matmul_result_dtype = "uint32"
matmul_result = qmatmul(
a,
b,
a_zp_scalar,
b_zp_scalar,
a_scale_scalar,
b_scale_scalar,
num_hidden_units,
matmul_result_dtype,
)
# This information might only be found in the C++ code-comments for the
# dense.matmul op, but the quantized tensor returned by _qnn.op.dense
# has scale==(a_scale_scalar * b_scale_scalar), and zero_point==0.
#
# 'matmul_result_zp_scalar' has type 'int32' to satisfy input requirements
# of the [de/re]quantize ops below.
matmul_result_scale_scalar = fold_constant(_op.multiply(a_scale_scalar, b_scale_scalar))
matmul_result_zp_scalar = _op.const(0, dtype="int32")
if "int32" in expected_out_dtypes:
# This is the adaptation of the QLinearMatMul converter for MatMulInteger,
# in the MatMulInteger case we skip the unnecessary requantization step.
return matmul_result
# requantize requires y_scale to be constant,
# if y_scale is not constant, doing dequantize -> quantize
if isinstance(y_scale_scalar, _expr.Constant):
y = _qnn.op.requantize(
matmul_result,
matmul_result_scale_scalar,
matmul_result_zp_scalar,
y_scale_scalar,
y_zp_scalar,
axis=-1,
rounding="TONEAREST",
out_dtype=y_zp_type.dtype,
)
else:
matmul_result_deq = _qnn.op.dequantize(
matmul_result, matmul_result_scale_scalar, matmul_result_zp_scalar, axis=0
)
y = _qnn.op.quantize(
matmul_result_deq, y_scale_scalar, y_zp_scalar, axis=0, out_dtype=y_zp_type.dtype
)
return y
class MatMulInteger(OnnxOpConverter):
"""Operator converter for MatMulInteger."""
@classmethod
def _impl_v10(cls, inputs, attr, params):
a = inputs[0]
b = inputs[1]
a_dtype = infer_type(a).checked_type.dtype
b_dtype = infer_type(b).checked_type.dtype
assert a_dtype in ("int8", "uint8"), "MatMulInteger: invalid dtype for first input"
assert b_dtype in ("int8", "uint8"), "MatMulInteger: invalid dtype for second input"
assert a_dtype == b_dtype, "MatMulInteger: input dtypes must match"
a_scale = _op.const(1.0, dtype="float32")
b_scale = _op.const(1.0, dtype="float32")
out_scale = _op.const(1.0, dtype="float32")
a_zero_point = _op.const(0.0, dtype=a_dtype)
b_zero_point = _op.const(0.0, dtype=b_dtype)
out_zero_point = _op.const(0.0, dtype="int32")
if len(inputs) == 4:
a_zero_point = inputs[2]
b_zero_point = inputs[3]
a_zp_dtype = infer_type(a_zero_point).checked_type.dtype
b_zp_dtype = infer_type(b_zero_point).checked_type.dtype
assert (
a_zp_dtype == a_dtype and b_zp_dtype == b_dtype
), "MatMulInteger: input dtype doesn't match zero point dtype"
elif len(inputs) != 2:
raise AssertionError(f"MatMulInteger op takes 2 or 4 inputs, {len(inputs)} given")
inputs = [a, a_scale, a_zero_point, b, b_scale, b_zero_point, out_scale, out_zero_point]
return QLinearMatMul.get_converter(10)(inputs, attr, params, expected_out_dtypes=["int32"])
class QLinearMul(OnnxOpConverter):
"""Operator converter for QLinearMul from Microsoft onnxruntime contrib opset."""
@classmethod
def _impl_v10(cls, inputs, attr, params):
a = inputs[0]
a_scale = get_scalar(inputs[1], params)
a_zero_point = get_scalar(inputs[2], params, "int32")
b = inputs[3]
b_scale = get_scalar(inputs[4], params)
b_zero_point = get_scalar(inputs[5], params, "int32")
y_scale = fold_constant(get_scalar(inputs[6], params))
y_zero_point = get_scalar(inputs[7], params, "int32")
dtype = infer_type(a).checked_type.dtype
## Onnxruntime doesn't actually do this op in integer, they dequantize to fp32
## and then requantize afer
## https://github.com/microsoft/onnxruntime/blob/master/onnxruntime/core/mlas/lib/qlmul.cpp
a = _qnn.op.dequantize(inputs[0], a_scale, a_zero_point)
b = _qnn.op.dequantize(inputs[3], b_scale, b_zero_point)
out = _op.multiply(a, b)
return _qnn.op.quantize(out, y_scale, y_zero_point, out_dtype=dtype)
class QLinearLeakyRelu(OnnxOpConverter):
"""Operator converter for QLinearLeakyRelu from Microsoft onnxruntime contrib opset."""
@classmethod
def _impl_v10(cls, inputs, attr, params):
a_scale = get_scalar(inputs[1], params)
a_zero_point = get_scalar(inputs[2], params, "int32")
y_scale = fold_constant(get_scalar(inputs[3], params))
y_zero_point = get_scalar(inputs[4], params, "int32")
alpha = float(attr.get("alpha", 1.0))
dtype = infer_type(inputs[0]).checked_type.dtype
# Onnxruntime doesn't actually do this op in integer, they dequantize to fp32
# and then requantize afer (according to documentation below)
# https://github.com/microsoft/onnxruntime/blob/master/docs/ContribOperators.md#com.microsoft.QLinearLeakyRelu
a = _qnn.op.dequantize(inputs[0], a_scale, a_zero_point)
out = _op.nn.leaky_relu(a, alpha)
return _qnn.op.quantize(out, y_scale, y_zero_point, out_dtype=dtype)
class QLinearSigmoid(OnnxOpConverter):
"""Operator converter for QLinearSigmoid from Microsoft onnxruntime contrib opset."""
@classmethod
def _impl_v10(cls, inputs, attr, params):
x = inputs[0]
x_scale = get_scalar(inputs[1], params)
x_zero_point = get_scalar(inputs[2], params, "int32")
y_scale = fold_constant(get_scalar(inputs[3], params))
y_zero_point = get_scalar(inputs[4], params, "int32")
dtype = infer_type(x).checked_type.dtype
## Apparently, onnxruntime doesn't do this op in integer, they dequantize to fp32
## and then requantize after:
## https://github.com/microsoft/onnxruntime/blob/master/onnxruntime/core/
## providers/dml/DmlExecutionProvider/src/GraphTransformer.cpp#L245
x = _qnn.op.dequantize(x, x_scale, x_zero_point)
out = _op.sigmoid(x)
return _qnn.op.quantize(out, y_scale, y_zero_point, out_dtype=dtype)
class QLinearSoftmax(OnnxOpConverter):
"""Operator converter for QLinearSoftmax from Microsoft onnxruntime contrib opset."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
axis = attr["axis"]
x = inputs[0]
x_scale = get_scalar(inputs[1], params)
x_zero_point = get_scalar(inputs[2], params, "int32")
y_scale = fold_constant(get_scalar(inputs[3], params))
y_zero_point = get_scalar(inputs[4], params, "int32")
dtype = infer_type(x).checked_type.dtype
x = _qnn.op.dequantize(x, x_scale, x_zero_point)
out = _op.nn.softmax(x, axis)
return _qnn.op.quantize(out, y_scale, y_zero_point, out_dtype=dtype)
class QLinearConcat(OnnxOpConverter):
"""Operator converter for QLinearConcat from Microsoft onnxruntime contrib opset."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
# which axis to concat on
axis = attr["axis"]
y_scale = fold_constant(get_scalar(inputs[0], params))
y_zero_point = get_scalar(inputs[1], params, "int32")
# input tensors, scales, zero_points
assert (
len(inputs) % 3 == 2
), "Additional input count must be a multiple of 3 -- tensor/scale/zero_point tuples"
tensors = []
scales = []
zero_points = []
for i in range(2, len(inputs), 3):
tensors.append(inputs[i])
scales.append(get_scalar(inputs[i + 1], params))
zero_points.append(get_scalar(inputs[i + 2], params, "int32"))
return _qnn.op.concatenate(tensors, scales, zero_points, y_scale, y_zero_point, axis)
class ConvInteger(OnnxOpConverter):
"""Operator converter for ConvInteger."""
@classmethod
def _impl_v10(cls, inputs, attr, params):
data = inputs[0]
weight = inputs[1]
data_zp = inputs[2]
weight_zp = inputs[3]
if data_zp is None:
data_zp = _expr.const(0, "int32")
if weight_zp is None:
weight_zp = _expr.const(0, "int32")
input_type = infer_type(data)
input_shape = get_const_tuple(input_type.checked_type.shape)
ndim = len(input_shape)
kernel_type = infer_type(weight)
kernel_shape = get_const_tuple(kernel_type.checked_type.shape)
if "kernel_shape" not in attr:
attr["kernel_shape"] = kernel_shape[2:]
if "auto_pad" in attr:
attr["auto_pad"] = attr["auto_pad"].decode("utf-8")
if attr["auto_pad"] in ("SAME_UPPER", "SAME_LOWER"):
# Warning: Convolution does not yet support dynamic shapes,
# one will need to run dynamic_to_static on this model after import
data = autopad(
data,
attr.get("strides", [1] * (ndim - 2)),
attr["kernel_shape"],
attr.get("dilations", [1] * (ndim - 2)),
pad_value=data_zp,
mode=attr["auto_pad"],
)
elif attr["auto_pad"] == "VALID":
attr["pads"] = tuple([0 for i in range(ndim - 2)])
elif attr["auto_pad"] == "NOTSET":
pass
else:
msg = (
f'Value {attr["auto_pad"]} in attribute "auto_pad" of operator Conv '
f"is invalid."
)
raise tvm.error.OpAttributeInvalid(msg)
attr.pop("auto_pad")
out_channels = kernel_shape[0]
dilation = attr.get("dilations", [1] * (ndim - 2))
strides = attr.get("strides", [1] * (ndim - 2))
padding = attr["pads"] if "pads" in attr else 0
groups = attr["group"] if "group" in attr else 1
if ndim != 4:
raise tvm.error.OpAttributeInvalid(
"Only 2D kernels are supported for operator ConvInteger."
)
return _qnn.op.conv2d(
data,
weight,
_op.cast(data_zp, "int32"),
_op.cast(weight_zp, "int32"),
_expr.const(1.0, "float32"),
_expr.const(1.0, "float32"),
kernel_size=attr["kernel_shape"],
channels=out_channels,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
)
class BitwiseBase(OnnxOpConverter):
"""Base class of operator converter for Bitwise operations"""
name = ""
@classmethod
def check_inputs(cls, inputs, num=2, use_int=True):
assert len(inputs) == num, f"{cls.name} takes {num} inputs, {len(inputs)} given"
valid_types = ["uint8", "uint16", "uint32", "uint64"]
if use_int:
valid_types += ["int8", "int16", "int32", "int64"]
for i in range(num):
in_dtype = infer_type(inputs[i]).checked_type.dtype
assert in_dtype in valid_types, f"Wrong dtype of the {i}-th input: {in_dtype}"
class BitShift(BitwiseBase):
"""Operator converter for BitShift"""
name = "BitShift"
@classmethod
def _impl_v11(cls, inputs, attr, params):
cls.check_inputs(inputs, use_int=False)
direction = attr.get("direction", "LEFT").decode("ascii")
if direction == "LEFT":
out = _op.left_shift(*inputs)
elif direction == "RIGHT":
out = _op.right_shift(*inputs)
else:
raise ValueError("Unsupported Shift Direction: " + direction)
return out
class BitwiseAnd(BitwiseBase):
"""Operator converter for BitwiseAnd"""
name = "BitwiseAnd"
@classmethod
def _impl_v18(cls, inputs, attr, params):
cls.check_inputs(inputs)
return _op.bitwise_and(*inputs)
class BitwiseNot(BitwiseBase):
"""Operator converter for BitwiseNot"""
name = "BitwiseNot"
@classmethod
def _impl_v18(cls, inputs, attr, params):
cls.check_inputs(inputs, num=1)
return _op.bitwise_not(*inputs)
class BitwiseOr(BitwiseBase):
"""Operator converter for BitwiseOr"""
name = "BitwiseOr"
@classmethod
def _impl_v18(cls, inputs, attr, params):
cls.check_inputs(inputs)
return _op.bitwise_or(*inputs)
class BitwiseXor(BitwiseBase):
"""Operator converter for BitwiseXor"""
name = "BitwiseXor"
@classmethod
def _impl_v18(cls, inputs, attr, params):
cls.check_inputs(inputs)
return _op.bitwise_xor(*inputs)
class Unique(OnnxOpConverter):
"""Operator converter for unique"""
@classmethod
def _impl_v11(cls, inputs, attr, params):
if len(inputs) != 1:
raise ValueError("Unique expects 1 input")
data = inputs[0]
axis = attr.get("axis", None)
if axis is None: # If axis is None, flatten the input before calling unique
data = _op.reshape(data, _op.const([-1]))
else:
data_shape = infer_shape(data)
if len(data_shape) != 1:
raise ValueError("TVM only supports 1D Unique operator.")
is_sorted = attr.get("sorted", 1) # sorted is 0 or 1, 1 by default
# ONNX documentation lists return_counts as optional but there is no input to specify
# whether it is returned. Therefore we'll just always return it.
unique = _op.unique(data, is_sorted=(is_sorted == 1), return_counts=True)
num_unique = unique[3]
trim_unique_lambda = lambda input: _op.strided_slice(input, _op.const([0]), num_unique)
unique_vals = trim_unique_lambda(unique[0])
indices = _op.cast(trim_unique_lambda(unique[1]), "int64") # ONNX always returns int64
inverse_indices = _op.cast(unique[2], "int64") # ONNX always returns int64
counts = _op.cast(trim_unique_lambda(unique[4]), "int64") # ONNX always returns int64
# ONNX unique returns unique, indices, inverse_indices, (optional) counts
return _expr.TupleWrapper(_expr.Tuple([unique_vals, indices, inverse_indices, counts]), 4)
class Einsum(OnnxOpConverter):
"""Operator converter for Einsum"""
@classmethod
def _impl_v12(cls, inputs, attr, params):
equation = attr["equation"].decode("utf-8")
return _op.einsum(inputs, equation)
class Trilu(OnnxOpConverter):
"""Operator converter for Trilu"""
@classmethod
def _impl_v14(cls, inputs, attr, params):
upper = attr.get("upper", True)
if len(inputs) == 2:
data, k = inputs
else:
data = inputs[0]
k = 0
return _op.trilu(data, k, upper)
class GridSample(OnnxOpConverter):
"""Operator converter for GridSample"""
@classmethod
def _impl_v16(cls, inputs, attr, params):
grid = inputs[1]
# onnx grid is of shape (N, H, W, 2) which should be transposed to (N, 2, H, W) for relay
grid = _op.transform.transpose(grid, axes=(0, 3, 1, 2))
method: str = attr.get("mode", b"bilinear").decode("utf-8")
padding_mode: str = attr.get("padding_mode", b"zeros").decode("utf-8")
# onnx default is 0 which should be changed to False in relay
align_corners = attr.get("align_corners", 0) != 0
return _op.image.grid_sample(
inputs[0], grid, method, padding_mode=padding_mode, align_corners=align_corners
)
class Bernoulli(OnnxOpConverter):
"""Operator converter for Bernoulli"""
@classmethod
def _impl_v15(cls, inputs, attr, params):
in_dtype = infer_type(inputs[0]).checked_type.dtype
assert in_dtype in ["float32", "float64"], "Only float input tensor is currently supported."
# The data type for the elements of the output tensor.
# if not specified, we will use the data type of the input tensor
out_dtype = attr.get("dtype", None)
if out_dtype is None:
out_dtype = in_dtype
else:
out_dtype = get_type(out_dtype)
seed = attr.get("seed", None)
if seed is None:
seed = np.random.randint(1e6)
else:
seed = int(seed)
key = _random.threefry_key(seed)
inter_outputs = _op.random.uniform(key, infer_shape(inputs[0]), in_dtype)
_, uniform_nums = _expr.TupleWrapper(inter_outputs, 2)
return _op.cast(_op.less(uniform_nums, inputs[0]), out_dtype)
class RandomNormal(OnnxOpConverter):
"""Operator converter for random_normal"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
dtype = get_type(attr.get("dtype", 1))
mean = attr.get("mean", 0.0)
scale = attr.get("scale", 1.0)
seed = attr.get("seed", None)
shape = attr["shape"]
assert dtype in [
"float32",
"float64",
], "Only float random value generation is currently supported."
if seed is None:
seed = np.random.randint(1e6)
else:
seed = int(seed)
key = _random.threefry_key(seed)
output = _op.random.normal(key, shape, dtype=dtype, mean=mean, scale=scale)
_, vals = _expr.TupleWrapper(output, 2)
return vals
class RandomNormalLike(OnnxOpConverter):
"""Operator converter for random_normal_like"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
dtype = attr.get("dtype", None)
scale = attr.get("scale", 1.0)
mean = attr.get("mean", 0.0)
seed = attr.get("seed", None)
shape = infer_shape(inputs[0])
if dtype is None:
dtype = infer_type(inputs[0]).checked_type.dtype
else:
dtype = get_type(dtype)
assert dtype in [
"float32",
"float64",
], "Only float random value generation is currently supported."
if seed is None:
seed = np.random.randint(1e6)
else:
seed = int(seed)
key = _random.threefry_key(seed)
output = _op.random.normal(key, shape, dtype=dtype, mean=mean, scale=scale)
_, vals = _expr.TupleWrapper(output, 2)
return vals
class RandomUniform(OnnxOpConverter):
"""Operator converter for random_uniform"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
dtype = get_type(attr.get("dtype", 1))
high = attr.get("high", 1.0)
low = attr.get("low", 0.0)
seed = attr.get("seed", None)
shape = attr["shape"]
assert dtype in [
"float32",
"float64",
], "Only float random value generation is currently supported."
if seed is None:
seed = np.random.randint(1e6)
else:
seed = int(seed)
key = _random.threefry_key(seed)
output = _op.random.uniform(key, shape, dtype=dtype, low=low, high=high)
_, vals = _expr.TupleWrapper(output, 2)
return vals
class RandomUniformLike(OnnxOpConverter):
"""Operator converter for random_uniform_like"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
dtype = attr.get("dtype", None)
high = attr.get("high", 1.0)
low = attr.get("low", 0.0)
seed = attr.get("seed", None)
shape = infer_shape(inputs[0])
if dtype is None:
dtype = infer_type(inputs[0]).checked_type.dtype
else:
dtype = get_type(dtype)
assert dtype in [
"float32",
"float64",
], "Only float random value generation is currently supported."
if seed is None:
seed = np.random.randint(1e6)
else:
seed = int(seed)
key = _random.threefry_key(seed)
output = _op.random.uniform(key, shape, dtype=dtype, low=low, high=high)
_, vals = _expr.TupleWrapper(output, 2)
return vals
class Multinomial(OnnxOpConverter):
"""Operator converter for multinomial"""
@classmethod
def _impl_v7(cls, inputs, attr, params):
dtype = attr.get("dtype", "int64")
sample_size = attr.get("sample_size", 1)
seed = attr.get("seed", None)
if seed is None:
seed = np.random.randint(1e6)
key = _op.random.threefry_key(seed)
output = _op.random.multinomial(key, inputs[0], sample_size)
_, indices = _expr.TupleWrapper(output, 2)
return _op.cast(indices, get_type(dtype))
class NegativeLogLikelihoodLoss(OnnxOpConverter):
"""Operator converter for NegativeLogLikehoodLoss"""
VALID_REDUCTIONS = {"mean", "sum", "none"}
@classmethod
def run_calculation(
cls: "NegativeLogLikelihoodLoss",
input_tensor: relay.Expr,
target_tensor: relay.Expr,
weight_tensor: Optional[relay.Expr],
ignore_index: int,
):
"""Run calculation for NegativeLogLikelihood, returning output tensor and
weight tensor used for mean-style reductions.
"""
# Convert negative indices --> positive indices for gather ops, note we have to
# use the original target tensor to interact with ignore_index to have proper behavior.
normalized_target_tensor = normalize_gather_indices(input_tensor, target_tensor, 1)
if weight_tensor is None:
channels = infer_shape(input_tensor)[1]
weight_tensor = relay.ones(
[channels], dtype=infer_type(input_tensor).checked_type.dtype
)
loss = -relay.gather(
input_tensor, axis=1, indices=relay.expand_dims(normalized_target_tensor, 1)
)
loss = relay.squeeze(loss, axis=[1])
expanded_normalized_target_tensor = relay.expand_dims(normalized_target_tensor, 0)
expanded_normalized_target_tensor = relay.nn.batch_flatten(
expanded_normalized_target_tensor
)
flattened_weights = relay.gather_nd(weight_tensor, expanded_normalized_target_tensor)
select_weights = relay.reshape_like(flattened_weights, loss)
loss *= select_weights
if ignore_index is not None:
# "Ignore" values whose target is the ignore_index
mask_tensor = relay.equal(
target_tensor, relay.const(ignore_index, dtype=target_tensor.type_annotation.dtype)
)
mask_tensor = relay.const(1, dtype="int8") - relay.cast(mask_tensor, "int8")
loss = relay.where(
mask_tensor, loss, relay.const(0, infer_type(loss).checked_type.dtype)
)
# This is not explained super clearly in the onnx spec, but masked values don't
# contribute toward the final value in reduction
select_weights *= relay.cast_like(mask_tensor, select_weights)
weight_total = relay.sum(select_weights)
return loss, weight_total
@classmethod
def _impl_v13(cls, inputs, attr, params):
ignore_index = attr.get("ignore_index", None)
reduction = attr.get("reduction", b"mean").decode("utf-8")
if reduction not in cls.VALID_REDUCTIONS:
raise ValueError(
f"Unknown reduction type {reduction}, choices are {cls.VALID_REDUCTIONS}"
)
input_tensor, target_tensor = inputs[0], inputs[1]
if len(inputs) == 3:
weight_tensor = inputs[2]
else:
weight_tensor = None
loss, weight_total = cls.run_calculation(
input_tensor, target_tensor, weight_tensor=weight_tensor, ignore_index=ignore_index
)
if reduction == "mean":
return relay.sum(loss) / weight_total
if reduction == "sum":
return relay.sum(loss)
# Case reduction == 'none'
return loss
class SoftmaxCrossEntropyLoss(OnnxOpConverter):
"""Operator converter for SCE_loss"""
@classmethod
def _impl_v13(cls, inputs, attr, params):
ignore_index = attr.get("ignore_index", None)
reduction = attr.get("reduction", b"mean").decode("utf-8")
input_tensor, target_tensor = inputs[0], inputs[1]
if len(inputs) == 3:
weight_tensor = inputs[2]
else:
weight_tensor = None
get_log_prob = attr["tvm_custom"]["num_outputs"] == 2
log_softmax_attr = {"axis": 1}
log_softmax_tensor = LogSoftmax.get_converter(13)([input_tensor], log_softmax_attr, None)
loss, weight_total = NegativeLogLikelihoodLoss.run_calculation(
log_softmax_tensor, target_tensor, weight_tensor, ignore_index=ignore_index
)
if reduction == "mean":
loss = relay.sum(loss) / weight_total
elif reduction == "sum":
loss = relay.sum(loss)
if get_log_prob:
return relay.TupleWrapper(relay.Tuple((loss, log_softmax_tensor)), 2)
return loss
class Adagrad(OnnxOpConverter):
"""Operator converter for adagrad op."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
decay_factor = attr.get("decay_factor", 0.0)
epsilon = attr.get("epsilon", 0.0)
norm_coefficient = attr.get("norm_coefficient", 0.0)
R = inputs[0]
T = inputs[1]
# convert attributes to constants, proper types
dtype_inputs = infer_type(inputs[3]).checked_type.dtype
decay_factor = relay.const(decay_factor, dtype=dtype_inputs)
epsilon = relay.const(epsilon, dtype=dtype_inputs)
norm_coefficient = relay.const(norm_coefficient, dtype=dtype_inputs)
T = relay.cast_like(T, inputs[3])
assert (
len(inputs) - 2
) % 3 == 0, f"Expect triplets for remaining inputs, found {len(inputs) - 2}"
# Remaining inputs are:
# [x_1, x_2 ..., x_1_gradient, x_2_gradient, ... x_1_sq_g, x_2_sq_g...]
num_input_tensors = (len(inputs) - 2) // 3
output_tensors = []
output_accumulated_squared_gradients = []
for i in range(num_input_tensors):
x = inputs[i + 2]
gradient = inputs[i + 2 + num_input_tensors]
accumulated_squared_gradient = inputs[i + 2 + 2 * num_input_tensors]
r = R / (relay.const(1.0, dtype=dtype_inputs) + T * decay_factor)
g_regularized = norm_coefficient * x + gradient
new_accumulated_squared_gradient = (
accumulated_squared_gradient + g_regularized * g_regularized
)
h_adaptive = relay.sqrt(new_accumulated_squared_gradient) + epsilon
x_new = x - r * g_regularized / h_adaptive
output_tensors.append(x_new)
output_accumulated_squared_gradients.append(new_accumulated_squared_gradient)
# append lists together, momentums come after result tensors
result = output_tensors + output_accumulated_squared_gradients
return _expr.TupleWrapper(_expr.Tuple(result), len(result))
class Adam(OnnxOpConverter):
"""Operator converter for Adam op."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = attr.get("alpha", 0.9)
beta = attr.get("beta", 0.999)
# Note in the docs epsilon default is 0.0 but in the tests it is set to 1e-2:
# https://git.io/Ju5C4
epsilon = attr.get("epsilon", 1e-2)
norm_coefficient = attr.get("norm_coefficient", 0.0)
norm_coefficient_post = attr.get("norm_coefficient_post", 0.0)
R = inputs[0]
T = inputs[1]
assert (
len(inputs) - 2
) % 4 == 0, f"Expect 4-lets for remaining inputs, found {len(inputs) - 2}"
# convert attributes to constants, proper types
dtype_inputs = infer_type(inputs[3]).checked_type.dtype
inverse_alpha = relay.const(1 - alpha, dtype=dtype_inputs)
alpha = relay.const(alpha, dtype=dtype_inputs)
inverse_beta = relay.const(1 - beta, dtype=dtype_inputs)
beta = relay.const(beta, dtype=dtype_inputs)
epsilon = relay.const(epsilon, dtype=dtype_inputs)
norm_coefficient = relay.const(norm_coefficient, dtype=dtype_inputs)
norm_coefficient_post = relay.const(norm_coefficient_post, dtype=dtype_inputs)
one = relay.const(1, dtype=dtype_inputs)
T = relay.cast_like(T, inputs[3])
# Remaining inputs are:
# [x_1, x_2 ..., x_1_grad, x_2_grad, ... x_1_g_accum, x_2_g_accum..., x_1_g_sq_accum, ...]
num_input_tensors = (len(inputs) - 2) // 4
output_tensors = []
output_accumulated_gradients = []
output_accumulated_squared_gradients = []
for i in range(num_input_tensors):
x = inputs[i + 2]
g = inputs[i + 2 + num_input_tensors]
v = inputs[i + 2 + 2 * num_input_tensors]
h = inputs[i + 2 + 3 * num_input_tensors]
g_regularized = norm_coefficient * x + g
v_new = alpha * v + inverse_alpha * g_regularized
h_new = beta * h + inverse_beta * g_regularized * g_regularized
h_sqrt = relay.sqrt(h_new) + epsilon
true_branch = R * relay.sqrt(one - relay.power(beta, T)) / (one - relay.power(alpha, T))
R_adjusted = relay.If(T > relay.const(0, dtype=dtype_inputs), true_branch, R)
x_new = x - R_adjusted * (v_new / h_sqrt)
x_result = (one - norm_coefficient_post) * x_new
output_tensors.append(x_result)
output_accumulated_gradients.append(v_new)
output_accumulated_squared_gradients.append(h_new)
# append lists together to get final result
result = (
output_tensors + output_accumulated_gradients + output_accumulated_squared_gradients
)
return _expr.TupleWrapper(_expr.Tuple(result), len(result))
class Momentum(OnnxOpConverter):
"""Operator converter for Momentum op."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = attr["alpha"]
beta = attr["beta"]
mode = attr["mode"].decode("utf-8")
norm_coefficient = attr["norm_coefficient"]
assert mode in ["nesterov", "standard"], f"Unknown momentum mode {mode}"
R = inputs[0]
T = inputs[1]
assert (
len(inputs) - 2
) % 3 == 0, f"Expect triplets for remaining inputs, found {len(inputs) - 2}"
# Remaining inputs are:
# [x_1, x_2 ..., x_1_gradient, x_2_gradient, ... x_1_momentum, x_2_momentum...]
num_input_tensors = (len(inputs) - 2) // 3
# convert attributes to constants
dtype_inputs = infer_type(inputs[3]).checked_type.dtype
alpha = relay.const(alpha, dtype=dtype_inputs)
beta = relay.const(beta, dtype=dtype_inputs)
norm_coefficient = relay.const(norm_coefficient, dtype=dtype_inputs)
default_beta = relay.const(1.0, dtype=dtype_inputs)
# Calculate updated values for every input
output_tensors = []
output_momentums = []
for i in range(num_input_tensors):
x = inputs[i + 2]
gradient = inputs[i + 2 + num_input_tensors]
momentum = inputs[i + 2 + 2 * num_input_tensors]
g_regularized = norm_coefficient * x + gradient
beta_adjusted = relay.If(T > relay.const(0, dtype="int64"), beta, default_beta)
new_momentum = alpha * momentum + beta_adjusted * g_regularized
if mode == "standard":
x_output = x - R * new_momentum
else:
# mode == 'nesterov'
x_output = x - R * (g_regularized + alpha * new_momentum)
output_tensors.append(x_output)
output_momentums.append(new_momentum)
# append lists together, momentums come after result tensors
result = output_tensors + output_momentums
return _expr.TupleWrapper(_expr.Tuple(result), len(result))
class Round(OnnxOpConverter):
"""Operator converter for round op."""
@classmethod
def _impl_v11(cls, inputs, attr, params):
# Onnx round uses Banker's rounding which rounds .5 to the nearest even integer
x = inputs[0]
dtype = infer_type(x).checked_type.dtype
half = _expr.const(0.5, dtype=dtype)
one = _expr.const(1, dtype=dtype)
two = _expr.const(2, dtype=dtype)
rounded = _op.ceil(x - half)
bankers_mask = one - (_op.ceil(x + half) - _op.floor(x + half))
non_even = _op.abs(_op.mod(rounded, two))
return rounded + (bankers_mask * non_even)
class SequenceConstruct(OnnxOpConverter):
"""Operator converter for sequence construction op."""
@classmethod
def _impl_v11(cls, inputs, attr, params):
# Construct a tuple from input tensors.
return _expr.Tuple(inputs)
class SequenceEmpty(OnnxOpConverter):
"""Operator converter for sequence empty op."""
@classmethod
def _impl_v11(cls, inputs, attr, params):
# Construct an empty tuple.
return _expr.Tuple([])
class SequenceErase(OnnxOpConverter):
"""Operator converter for sequence erase op."""
@classmethod
def _impl_v11(cls, inputs, attr, params):
# Erase tensor from sequence on specified position
input_sequence = inputs[0]
if len(inputs) == 2:
position = inputs[1]
# Non constant position is not supported.
if isinstance(position, _expr.Constant):
position = position.data.numpy()
elif position.name_hint in params:
position = params[position.name_hint].numpy()
else:
raise NotImplementedError("Position must be a constant.")
else:
position = -1
seq_len = len(input_sequence)
assert -seq_len <= position < seq_len, "Position is out of bounds"
if position < 0:
position = seq_len + position
# Convert sequence to a list, insert tensors before erased, and repackage as Tuple.
tensor_list = [input_sequence[i] for i in range(seq_len) if i != position]
# Create new tuple and return.
return _expr.Tuple(tensor_list)
class SequenceInsert(OnnxOpConverter):
"""Operator converter for sequence insert op."""
@classmethod
def _impl_v11(cls, inputs, attr, params):
# Insert a new tensor into a tuple of tensors.
input_sequence = inputs[0]
new_tensor = inputs[1]
if len(inputs) == 3:
position = inputs[2]
# Non constant position is not supported.
if isinstance(position, _expr.Constant):
position = position.data.numpy()
elif position.name_hint in params:
position = params[position.name_hint].numpy()
else:
raise NotImplementedError("Position must be a constant.")
else:
position = -1
if position < 0:
position = len(input_sequence) + position + 1
# Convert sequence to a list, insert new tensor, and repackage as Tuple.
tensor_list = [input_sequence[i] for i in range(len(input_sequence))]
# Insert new tensor.
tensor_list.insert(position, new_tensor)
# Create new tuple and return.
return _expr.Tuple(tensor_list)
class SequenceLength(OnnxOpConverter):
"""Operator converter for sequence length op."""
@classmethod
def _impl_v11(cls, inputs, attr, params):
# Get length of input sequence
return _expr.const(len(inputs[0]), dtype="int64")
class ConcatFromSequence(OnnxOpConverter):
"""Operator converter for sequence concatenation op."""
@classmethod
def _impl_v11(cls, inputs, attr, params):
axis = attr.get("axis", 0)
new_axis = attr.get("new_axis", 0)
# If a new axis should be created, just stack input tensors.
if new_axis == 1:
return _op.stack(inputs[0], axis=axis)
return _op.concatenate(inputs[0], axis=axis)
class SplitToSequence(OnnxOpConverter):
"""Operator converter for split to sequence op."""
@classmethod
def _impl_v11(cls, inputs, attr, params):
axis = attr.get("axis", 0)
keepdims = attr.get("keepdims", 1)
input_tensor = inputs[0]
input_shape = infer_shape(input_tensor)
split = inputs[1]
# If split is not provided, we split all values along axis.
if split is None:
output = _op.split(input_tensor, input_shape[axis], axis=axis)
# If keepdims is 0, then we need to squeeze off the axis.
if not keepdims:
output = [_op.squeeze(tensor_slice, axis=[axis]) for tensor_slice in output]
return _expr.Tuple(list(output))
# Otherwise, split based on provided split value.
else:
# For now we only support constant valued split.
assert isinstance(
split, _expr.Constant
), "Only constant split supported for SplitToSequence"
split = split.data.numpy()
if len(split.shape) == 1 and split.shape[0] > 1:
# If split is a 1D tensor, it must be converted to indices for relay compatibility.
split = np.cumsum(split)
# Remove final invalid index.
split = split[:-1]
else:
# Otherwise get split as an integer.
split = int(split)
output = _op.split(input_tensor, split, axis=axis)
# If keepdims is set to 0 remove split axis. Note that this is
# an inconsistency with the onnx spec but is needed for pytorch compatibility.
if not keepdims:
output = [_op.squeeze(tensor_slice, axis=[axis]) for tensor_slice in output]
return _expr.Tuple(list(output))
class SequenceAt(OnnxOpConverter):
"""Operator converter for sequence at op."""
@classmethod
def _impl_v11(cls, inputs, attr, params):
input_sequence = inputs[0]
position = inputs[1]
assert isinstance(
position, _expr.Constant
), "Only constant position supported for SequenceAt"
# Convert position to integer.
position = int(position.data.numpy())
return input_sequence[position]
# compatible operators that do NOT require any conversion.
_identity_list = []
# _convert_map defines maps of name to converter functor(callable)
# for 1 to 1 mapping, use Renamer if nothing but name is different
# use AttrCvt if attributes need to be converted
# for 1 to N mapping(composed), use custom callable functions
# for N to 1 mapping, currently not supported(?)
def _get_convert_map(opset):
return {
# defs/experimental
"Identity": Renamer("copy"),
"Optional": Optional_.get_converter(opset),
"OptionalHasElement": OptionalHasElement.get_converter(opset),
"OptionalGetElement": OptionalGetElement.get_converter(opset),
"Affine": Affine.get_converter(opset),
# Bitwise operators
"BitShift": BitShift.get_converter(opset),
"BitwiseAnd": BitwiseAnd.get_converter(opset),
"BitwiseNot": BitwiseNot.get_converter(opset),
"BitwiseOr": BitwiseOr.get_converter(opset),
"BitwiseXor": BitwiseXor.get_converter(opset),
"ThresholdedRelu": ThresholdedRelu.get_converter(opset),
"ScaledTanh": ScaledTanh.get_converter(opset),
"ParametricSoftplus": ParametricSoftPlus.get_converter(opset),
"Constant": Constant.get_converter(opset),
"ConstantOfShape": ConstantOfShape.get_converter(opset),
# 'GivenTensorFill'
"FC": AttrCvt("dense", ignores=["axis", "axis_w"]),
"Scale": Scale.get_converter(opset),
# 'GRUUnit'
# 'ATen'
# 'ImageScaler'
"MeanVarianceNormalization": MeanVarianceNormalization.get_converter(opset),
# 'Crop'
# 'Embedding'
"Upsample": Upsample.get_converter(opset),
"SpatialBN": BatchNorm.get_converter(opset),
# defs/generator
# defs/logical
# defs/math
"Add": Add.get_converter(opset),
"Sub": Sub.get_converter(opset),
"Mul": Mul.get_converter(opset),
"Div": Div.get_converter(opset),
"Neg": Renamer("negative"),
"Abs": Absolute.get_converter(opset),
"Reciprocal": Reciprocal.get_converter(opset),
"Floor": Renamer("floor"),
"Ceil": Renamer("ceil"),
"Round": Round.get_converter(opset),
"IsInf": IsInf.get_converter(opset),
"IsNaN": Renamer("isnan"),
"Sqrt": Renamer("sqrt"),
"Relu": Renamer("relu"),
"Celu": Celu.get_converter(opset),
"LeakyRelu": Renamer("leaky_relu"),
"Selu": Selu.get_converter(opset),
"Elu": Elu.get_converter(opset),
"Gelu": Gelu.get_converter(opset),
"FastGelu": FastGelu.get_converter(opset),
"BiasGelu": BiasGelu.get_converter(opset),
"LayerNormalization": LayerNormalization.get_converter(opset),
# TODO: We need a better way to handle different domains, in case
# of name collisions. EmbedLayerNormalization, SkipLayerNormalization, and Attention
# are in the `com.microsoft` domain.
"EmbedLayerNormalization": EmbedLayerNormalization.get_converter(opset),
"SkipLayerNormalization": SkipLayerNormalization.get_converter(opset),
"Attention": Attention.get_converter(opset),
"QAttention": QAttention.get_converter(opset),
"Exp": Renamer("exp"),
"Greater": Renamer("greater"),
"GreaterOrEqual": Renamer("greater_equal"),
"Less": Renamer("less"),
"LessOrEqual": Renamer("less_equal"),
"Log": Renamer("log"),
"Acos": Renamer("acos"),
"Acosh": Renamer("acosh"),
"Asin": Renamer("asin"),
"Asinh": Renamer("asinh"),
"Atan": Renamer("atan"),
"Atanh": Renamer("atanh"),
"Cos": Renamer("cos"),
"Cosh": Renamer("cosh"),
"Sin": Renamer("sin"),
"Sinh": Renamer("sinh"),
"Tan": Renamer("tan"),
"Tanh": Renamer("tanh"),
"Pow": Pow.get_converter(opset),
"PRelu": Prelu.get_converter(opset),
"Sigmoid": Renamer("sigmoid"),
"HardSigmoid": HardSigmoid.get_converter(opset),
"HardSwish": HardSwish.get_converter(opset),
"Max": Maximum.get_converter(opset),
"Min": Minimum.get_converter(opset),
"Sum": Sum.get_converter(opset),
"Mean": Mean.get_converter(opset),
"Clip": Clip.get_converter(opset),
"Softplus": Softplus.get_converter(opset),
# softmax default axis is different in onnx
"Softmax": Softmax.get_converter(opset),
"LogSoftmax": LogSoftmax.get_converter(opset),
"OneHot": OneHot.get_converter(opset),
"Hardmax": Hardmax.get_converter(opset),
"Shrink": Shrink.get_converter(opset),
"Softsign": Softsign.get_converter(opset),
"Gemm": Gemm.get_converter(opset),
"MatMul": MatMul.get_converter(opset),
"MatMulInteger": MatMulInteger.get_converter(opset),
"MatMulInteger16": MatMulInteger16.get_converter(opset),
"Mod": Mod.get_converter(opset),
"Xor": Renamer("logical_xor"),
# defs/nn
"AveragePool": AveragePool.get_converter(opset),
"LpPool": LpPool.get_converter(opset),
"GlobalLpPool": GlobalLpPool.get_converter(opset),
"MaxPool": MaxPool.get_converter(opset),
"MaxUnpool": MaxUnpool.get_converter(opset),
"Conv": Conv.get_converter(opset),
"ConvTranspose": ConvTranspose.get_converter(opset),
"GlobalAveragePool": GlobalAveragePool.get_converter(opset),
"GlobalMaxPool": GlobalMaxPool.get_converter(opset),
"BatchNormalization": BatchNorm.get_converter(opset),
"InstanceNormalization": InstanceNorm.get_converter(opset),
# 'LpNormalization'
"Dropout": AttrCvt("dropout", {"ratio": "rate"}, ignores=["is_test"]),
"Flatten": Flatten.get_converter(opset),
"LRN": LRN.get_converter(opset),
# Recurrent Layers
"RNN": RNN.get_converter(opset),
"LSTM": LSTM.get_converter(opset),
"GRU": GRU.get_converter(opset),
# defs/vision
"MaxRoiPool": MaxRoiPool.get_converter(opset),
"RoiAlign": RoiAlign.get_converter(opset),
"NonMaxSuppression": NonMaxSuppression.get_converter(opset),
# defs/reduction
"ReduceMax": ReduceMax.get_converter(opset),
"ReduceMin": ReduceMin.get_converter(opset),
"ReduceSum": ReduceSum.get_converter(opset),
"ReduceMean": ReduceMean.get_converter(opset),
"ReduceProd": ReduceProd.get_converter(opset),
"ReduceLogSumExp": ReduceLogSumExp.get_converter(opset),
"ReduceLogSum": ReduceLogSum.get_converter(opset),
"ReduceSumSquare": ReduceSumSquare.get_converter(opset),
"ReduceL1": ReduceL1.get_converter(opset),
"ReduceL2": ReduceL2.get_converter(opset),
# defs/sorting
"ArgMax": ArgMax.get_converter(opset),
"ArgMin": ArgMin.get_converter(opset),
"TopK": TopK.get_converter(opset),
# defs/tensor
"Cast": Cast.get_converter(opset),
"CastLike": CastLike.get_converter(opset),
"Reshape": Reshape.get_converter(opset),
"Expand": Expand.get_converter(opset),
"Concat": Concat.get_converter(opset),
"Split": Split.get_converter(opset),
"Slice": Slice.get_converter(opset),
"Transpose": AttrCvt("transpose", {"perm": "axes"}),
"DepthToSpace": DepthToSpace.get_converter(opset),
"SpaceToDepth": SpaceToDepth.get_converter(opset),
"Gather": Gather.get_converter(opset),
"GatherElements": GatherElements.get_converter(opset),
"GatherND": GatherND.get_converter(opset),
"Compress": Compress.get_converter(opset),
"Size": AttrCvt("ndarray_size", extras={"dtype": "int64"}),
"Scatter": Scatter.get_converter(opset),
"ScatterElements": ScatterElements.get_converter(opset),
"ScatterND": ScatterND.get_converter(opset),
"EyeLike": EyeLike.get_converter(opset),
"Squeeze": Squeeze.get_converter(opset),
"Unsqueeze": Unsqueeze.get_converter(opset),
"Pad": Pad.get_converter(opset),
"Shape": Shape.get_converter(opset),
"Sign": Sign.get_converter(opset),
"Equal": Equal.get_converter(opset),
"Not": Not.get_converter(opset),
"And": And.get_converter(opset),
"Tile": Tile.get_converter(opset),
"Erf": Erf.get_converter(opset),
"Where": Where.get_converter(opset),
"Or": Or.get_converter(opset),
"Resize": Resize.get_converter(opset),
"NonZero": NonZero.get_converter(opset),
"Range": Range.get_converter(opset),
"CumSum": CumSum.get_converter(opset),
"Unique": Unique.get_converter(opset),
"Einsum": Einsum.get_converter(opset),
"Trilu": Trilu.get_converter(opset),
"GridSample": GridSample.get_converter(opset),
# defs/control_flow
"Loop": Loop.get_converter(opset),
"If": If.get_converter(opset),
# Torch ATen Dispatcher.
"ATen": ATen.get_converter(opset),
# Quantization
"QuantizeLinear": QuantizeLinear.get_converter(opset),
"DequantizeLinear": DequantizeLinear.get_converter(opset),
"DynamicQuantizeLinear": DynamicQuantizeLinear.get_converter(opset),
"ReverseSequence": ReverseSequence.get_converter(opset),
"QGemm": QGemm.get_converter(opset),
"QLinearConv": QLinearConv.get_converter(opset),
"QLinearConcat": QLinearConcat.get_converter(opset),
"QLinearAdd": QLinearAdd.get_converter(opset),
"QLinearMatMul": QLinearMatMul.get_converter(opset),
"QLinearMul": QLinearMul.get_converter(opset),
"QLinearSigmoid": QLinearSigmoid.get_converter(opset),
"QLinearSoftmax": QLinearSoftmax.get_converter(opset),
"ConvInteger": ConvInteger.get_converter(opset),
"QLinearAveragePool": QLinearAveragePool.get_converter(opset),
"QLinearGlobalAveragePool": QLinearGlobalAveragePool.get_converter(opset),
"QLinearLeakyRelu": QLinearLeakyRelu.get_converter(opset),
# Random number generation.
"Bernoulli": Bernoulli.get_converter(opset),
"RandomNormal": RandomNormal.get_converter(opset),
"RandomNormalLike": RandomNormalLike.get_converter(opset),
"RandomUniform": RandomUniform.get_converter(opset),
"RandomUniformLike": RandomUniformLike.get_converter(opset),
"Multinomial": Multinomial.get_converter(opset),
# Loss functions / training
"NegativeLogLikelihoodLoss": NegativeLogLikelihoodLoss.get_converter(opset),
"SoftmaxCrossEntropyLoss": SoftmaxCrossEntropyLoss.get_converter(opset),
"Adagrad": Adagrad.get_converter(opset),
"Adam": Adam.get_converter(opset),
"Momentum": Momentum.get_converter(opset),
"Scan": Scan.get_converter(opset),
# ML
"LinearRegressor": LinearRegressor.get_converter(opset),
"DFT": DFT.get_converter(opset),
# Sequence operators
"SequenceConstruct": SequenceConstruct.get_converter(opset),
"SequenceEmpty": SequenceEmpty.get_converter(opset),
"SequenceErase": SequenceErase.get_converter(opset),
"SequenceInsert": SequenceInsert.get_converter(opset),
"SequenceLength": SequenceLength.get_converter(opset),
"ConcatFromSequence": ConcatFromSequence.get_converter(opset),
"SplitToSequence": SplitToSequence.get_converter(opset),
"SequenceAt": SequenceAt.get_converter(opset),
}
class GraphProto:
"""A helper class for handling Relay expression copying from pb2.GraphProto.
Definition: https://github.com/onnx/onnx/blob/main/onnx/onnx.proto
Parameters
----------
shape : dict of str to tuple, optional
The input shape to the graph
dtype : str or dict of str to str
The input types to the graph
freeze_params: bool
If this parameter is true, the importer will take any provided
onnx input values (weights, shapes, etc) and embed them into the relay model
as Constants instead of variables. This allows more aggressive optimizations
at compile time and helps in making models static if certain inputs represent
attributes relay would traditionally consider compile-time constants.
op_type_dict: Dict[str, int]
Dictionary for span filling usage. If the name property of op was not set
op_type_dict will provide an alternative by combining literal op type with
its presenting order
"""
current = None
def __init__(self, shape, dtype, freeze_params=False, op_type_dict=None):
self._nodes = {}
self._params = {}
self._inputs = {}
self._renames = {}
self._num_input = 0
self._num_param = 0
self._shape = shape.copy() if shape else {}
self._input_names = []
self._dtype = dtype
self.opset = None
self._freeze_params = freeze_params
self._op_type_dict = op_type_dict
def __enter__(self):
self._old_manager = GraphProto.current
GraphProto.current = self
return self
def __exit__(self, ptype, value, trace):
GraphProto.current = self._old_manager
def freeze(self, func, params):
bind_map = {}
for name in params.keys():
if name in self._nodes.keys():
bind_map[self._nodes[name]] = _expr.const(params[name])
body = _expr.bind(func.body, bind_map)
fn = _function.Function(analysis.free_vars(body), body)
return fn, {}
def from_onnx(self, graph, opset, get_output_expr=False):
"""Construct Relay expression from ONNX graph.
Onnx graph is a python protobuf object.
The companion parameters will be handled automatically.
However, the input names from onnx graph is vague, mixing inputs and
network weights/bias such as "1", "2"...
For convenience, we rename the `real` input names to "input_0",
"input_1"... And renaming parameters to "param_0", "param_1"...
Parameters
----------
graph : onnx protobuf object
The loaded onnx graph
opset : opset version
get_output_expr: bool
If set to true, this conversion will return each output expression rather
than a packaged module. This can be useful when converting subgraphs to
relay.
Returns
-------
mod : tvm.IRModule
The returned relay module
params : dict
A dict of name: tvm.nd.array pairs, used as pretrained weights
"""
self.opset = opset
self._parse_graph_initializers(graph)
self._parse_graph_input(graph)
self._check_user_inputs_in_outermost_graph_scope()
self._check_for_unsupported_ops(graph)
self._construct_nodes(graph)
# now return the outputs
outputs = [self._nodes[self._parse_value_proto(i)] for i in graph.output]
outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs)
# If requested, directly return the converted expressions.
if get_output_expr:
return outputs
## Maintain the order of inputs and parameters from the ONNX graph, but only include
## those parameters that are needed to execute the relay graph
free_vars = analysis.free_vars(outputs)
nodes = {v: k for k, v in self._nodes.items()}
free_vars = [nodes[var] for var in free_vars]
for i_name in self._params:
if i_name in free_vars and i_name not in self._inputs:
self._inputs[i_name] = self._nodes[i_name]
# Create a function from our output expression and all input variables.
func = _function.Function([v for k, v in self._inputs.items()], outputs)
return IRModule.from_expr(func), self._params
def _parse_graph_initializers(self, graph):
"""Parse network inputs to relay, aka parameters."""
for init_tensor in graph.initializer:
if not init_tensor.name.strip():
raise ValueError("Tensor's name is required.")
array = self._parse_array(init_tensor)
if self._freeze_params:
self._nodes[init_tensor.name] = _expr.const(array)
else:
self._params[init_tensor.name] = array
self._nodes[init_tensor.name] = new_var(
init_tensor.name,
shape=self._params[init_tensor.name].shape,
dtype=self._params[init_tensor.name].dtype,
)
def _parse_graph_input(self, graph):
for i in graph.input:
# from onnx v0.2, GraphProto.input has type ValueInfoProto,
# and the name is 'i.name'
i_name, i_shape, d_type, i_shape_name = get_info(i)
if i_name in self._params:
# i is a param instead of input
self._num_param += 1
self._nodes[i_name] = new_var(
i_name, shape=self._params[i_name].shape, dtype=self._params[i_name].dtype
)
elif i_name in self._nodes:
continue
else:
self._num_input += 1
self._input_names.append(i_name)
if i_name in self._shape:
i_shape = self._shape[i_name]
else:
if "?" in str(i_shape):
warning_msg = (
"Input %s has unknown dimension shapes: %s. "
"Specifying static values may improve performance"
% (i_name, str(i_shape_name))
)
warnings.warn(warning_msg)
if isinstance(self._dtype, dict):
dtype = self._dtype[i_name] if i_name in self._dtype else d_type
else:
dtype = d_type
self._nodes[i_name] = new_var(i_name, shape=i_shape, dtype=dtype)
self._inputs[i_name] = self._nodes[i_name]
def _check_user_inputs_in_outermost_graph_scope(self):
"""Only check user inputs in the outer-most graph scope."""
if self._old_manager is None:
assert all(
[name in self._input_names for name in self._shape.keys()]
), "User specified the shape for inputs that weren't found in the graph: " + str(
self._shape
)
def _check_for_unsupported_ops(self, graph):
convert_map = _get_convert_map(self.opset)
unsupported_ops = set()
for node in graph.node:
op_name = node.op_type
if (
op_name not in convert_map
and op_name != "Constant"
and op_name not in _identity_list
):
unsupported_ops.add(op_name)
if unsupported_ops:
msg = "The following operators are not supported for frontend ONNX: "
msg += ", ".join(unsupported_ops)
raise tvm.error.OpNotImplemented(msg)
def _construct_nodes(self, graph):
"""Nodes are stored as directed acyclic graph."""
for node in graph.node:
op_name = node.op_type
attr = self._parse_attr(node.attribute)
# Fill in span of inputs
node_source_name = get_source_name(node, self._op_type_dict)
self._set_parameter_span(node, node_source_name)
# Create and populate input list.
inputs = onnx_input()
for i in node.input:
if i != "":
inputs.append(self._nodes[self._renames.get(i, i)])
else:
inputs.append(None)
i_name = self._parse_value_proto(node)
node_output = self._fix_outputs(op_name, node.output)
attr["tvm_custom"] = {}
attr["tvm_custom"]["name"] = i_name
attr["tvm_custom"]["num_outputs"] = len(node_output)
op = self._convert_operator(op_name, inputs, attr, self.opset)
if not isinstance(op, _expr.TupleWrapper):
outputs_num = 1
else:
outputs_num = len(op)
if outputs_num == 1:
op = fold_constant(op)
else:
op = _expr.TupleWrapper(fold_constant(op.astuple()), len(op))
op = set_span(op, node_source_name)
if outputs_num > 1:
# ONNX supports optional outputs for some nodes.
# This block searches for missing outputs in the ONNX graph
# and removes any unneeded ops
valid_outputs = [False] * outputs_num
for i, output in enumerate(node_output):
if output != "":
valid_outputs[i] = True
# If we have outputs ONNX isn't expecting, we need to drop them
if not all(valid_outputs):
tup = op.astuple()
# TupleWrapper can also wrap ops with TupleType outputs
if isinstance(tup, _expr.Tuple):
# For tuples, we extract the fields instead of using GetTupleItem
outputs = [tup.fields[i] for i, valid in enumerate(valid_outputs) if valid]
else:
# For call nodes, we need to GetTupleItem
outputs = [op[i] for i, valid in enumerate(valid_outputs) if valid]
# Create the new op with valid outputs
if len(outputs) == 1:
op = outputs[0]
elif len(outputs) != outputs_num:
op = _expr.TupleWrapper(_expr.Tuple(outputs), len(outputs))
# Drop invalid outputs for the onnx node
outputs_num = len(outputs)
node_output = [output for output in node_output if output != ""]
assert (
len(node_output) == outputs_num
), f"Number of output mismatch {len(node_output)} vs {outputs_num} in {op_name}."
if outputs_num == 1:
self._nodes[node_output[0]] = op
else:
for k, i in zip(list(node_output), range(len(node_output))):
self._nodes[k] = op[i]
def _set_parameter_span(self, node, node_source_name):
for i in node.input:
if i != "":
name = self._renames.get(i, i)
expr = self._nodes.get(name)
# relay.Var -> inputs / params
# relay.Constant -> freezed params / built-in constants
if isinstance(expr, (relay.Var, relay.Constant)):
expr_with_span = set_span(expr, make_parameter_span([node_source_name, name]))
self._nodes[name] = expr_with_span
if name in self._inputs:
self._inputs[name] = expr_with_span
def _parse_value_proto(self, value_proto):
"""Parse ValueProto or raw str."""
try:
name = value_proto.name
except AttributeError:
name = value_proto
return name
def _parse_array(self, tensor_proto):
np_array = get_numpy(tensor_proto).reshape(tuple(tensor_proto.dims))
return _nd.array(np_array)
def _parse_attr(self, attr_proto):
"""Convert a list of AttributeProto to a dict, with names as keys."""
attrs = {}
for a in attr_proto:
for f in ["f", "i", "s", "g"]:
if a.HasField(f):
attrs[a.name] = getattr(a, f)
for f in ["floats", "ints", "strings"]:
if list(getattr(a, f)):
assert a.name not in attrs, "Only one type of attr is allowed"
attrs[a.name] = tuple(getattr(a, f))
for f in ["t"]:
if a.HasField(f):
attrs[a.name] = getattr(a, f)
for f in ["tensors"]:
if list(getattr(a, f)):
assert a.name not in attrs, "Only one type of attr is allowed"
attrs[a.name] = tuple(getattr(a, f))
for f in ["graphs"]:
if list(getattr(a, f)):
raise NotImplementedError(f"Field {f} is not supported in relay.")
if a.name not in attrs:
raise ValueError(f"Cannot parse attribute: \n{a}\n.")
return attrs
def _convert_operator(self, op_name, inputs, attrs, opset):
"""Convert ONNX operator into a Relay operator.
The converter must specify conversions explicitly for incompatible name, and
apply handlers to operator attributes.
Parameters
----------
op_name : str
Operator name, such as Convolution, FullyConnected
inputs : list of tvm.relay.function.Function
List of inputs.
attrs : dict
Dict of operator attributes
opset : int
Opset version
Returns
-------
sym : tvm.relay.function.Function
Converted relay function
"""
convert_map = _get_convert_map(opset)
if op_name in _identity_list:
sym = get_relay_op(op_name)(*inputs, **attrs)
elif op_name in convert_map:
sym = convert_map[op_name](inputs, attrs, self._params)
else:
raise NotImplementedError(f"Operator {op_name} not implemented.")
return sym
def _fix_outputs(self, op_name, outputs):
"""A hack to handle dropout or similar operator that have more than one out
in ONNX.
"""
if op_name == "Dropout":
if len(outputs) == 1:
return outputs
# TODO(zhreshold): support dropout mask?
outputs = outputs[:-1]
return outputs
def export_model(location, graph):
"""Convert the graph to an onnx model and export it to the location."""
import datetime
import os
from onnx import save, helper
if not os.path.exists(location):
os.makedirs(location)
time_stamp = datetime.datetime.now().strftime("%m_%d_%Y_%H_%M_%S")
model = helper.make_model(graph)
save(model, os.path.join(location, f"tvm_exported_model_{time_stamp}.onnx"))
def from_onnx(
model,
shape=None,
dtype="float32",
opset=None,
freeze_params=True,
convert_config=None,
export_node_renamed_model_path=None,
):
"""Convert a ONNX model into an equivalent Relay Function.
ONNX graphs are represented as Python Protobuf objects.
The companion parameters will be handled automatically.
However, the input names from onnx graph is vague, mixing inputs and
network weights/bias such as "1", "2"...
For convenience, we rename the `real` input names to "input_0",
"input_1"... And renaming parameters to "param_0", "param_1"...
By default, ONNX defines models in terms of dynamic shapes. The ONNX importer
retains that dynamism upon import, and the compiler attempts to convert the
model into a static shapes at compile time. If this fails, there may still
be dynamic operations in the model. Not all TVM kernels currently support
dynamic shapes, please file an issue on discuss.tvm.apache.org
if you hit an error with dynamic kernels.
Parameters
----------
model : protobuf object
ONNX ModelProto after ONNX v1.1.0
shape : dict of str to tuple, optional
The input shape to the graph
dtype : str or dict of str to str
The input types to the graph
opset : int, optional
Override to autodetected opset.
This can be helpful for some testing.
freeze_params: bool
If this parameter is true, the importer will take any provided
onnx input values (weights, shapes, etc) and embed them into the relay model
as Constants instead of variables. This allows more aggressive optimizations
at compile time and helps in making models static if certain inputs represent
attributes relay would traditionally consider compile-time constants.
convert_config : Optional[Dict[str, Any]]
Default config:
use_nt_batch_matmul : bool = True
True to convert qualified onnx `matmul` to `nn.batch_matmul` strict to NT format
(transpose_a=False, transpose_b=True).
export_node_renamed_model_path : str, optional
Export the node renamed onnx model to the path.
Some models do not contain names in their nodes. During the conversion, if names of nodes
are empty, new names will be assigned based on their op types. The exported model can be the
reference to spans.
Returns
-------
mod : tvm.IRModule
The relay module for compilation
params : dict of str to tvm.nd.NDArray
The parameter dict to be used by relay
"""
global ONNX_DEFAULT_CONFIGS
if convert_config is not None:
ONNX_DEFAULT_CONFIGS.update(convert_config)
try:
import onnx
if hasattr(onnx.checker, "check_model"):
# try use onnx's own model checker before converting any model
try:
onnx.checker.check_model(model)
except Exception as e: # pylint: disable=c-extension-no-member, broad-except
# the checker is a bit violent about errors, so simply print warnings here
warnings.warn(str(e))
except ImportError:
pass
g = GraphProto(shape, dtype, freeze_params, op_type_dict={})
graph = model.graph
try:
opset_in_model = 1
if model.opset_import:
# TODO: for now we only really support ai.onnx op set
# TODO: handle other namespaces well see https://github.com/apache/tvm/issues/10950
for opset_identifier in model.opset_import:
# As per https://github.com/onnx/onnx/blob/main/docs/IR.md
# All operator sets except the default one must specify the operator version
if str(opset_identifier.domain) in ["ai.onnx", ""]:
opset_in_model = opset_identifier.version
break
except AttributeError:
opset_in_model = 1
if opset is None:
opset = opset_in_model
elif opset < opset_in_model:
warnings.warn(
""
f"You are overwritting original opset ver = {opset_in_model} by lower ver = {opset}. "
f"That might cause model conversion errors."
)
# Use the graph proto as a scope so that ops can access other nodes if needed.
with g:
mod, params = g.from_onnx(graph, opset)
if export_node_renamed_model_path:
export_model(export_node_renamed_model_path, graph)
if freeze_params:
mod = relay.transform.DynamicToStatic()(mod)
return mod, params
| 268,754 | 36.311537 | 120 | py |
tvm | tvm-main/python/tvm/relay/frontend/coreml.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-self, unused-argument, unused-variable, no-else-return
# pylint: disable=inconsistent-return-statements, import-outside-toplevel
"""CoreML frontend."""
import math
import numpy as np
import tvm
from tvm.ir import IRModule
from .. import analysis
from .. import expr as _expr
from .. import function as _function
from .. import op as _op
from ... import nd as _nd
from ..._ffi import base as _base
from .common import ExprTable
from .common import infer_shape as _infer_shape
__all__ = ["from_coreml"]
def _NeuralNetworkImageScaler(op, inexpr, etab):
# TODO: we need to support more colorspace, such as rgb.
# this changes the symbol
biases = np.array([op.blueBias, op.greenBias, op.redBias]).reshape([3, 1, 1])
bias = etab.new_const(biases)
ret = _op.multiply(inexpr, _expr.const(op.channelScale, dtype="float32"))
ret = _op.add(ret, bias)
return ret
def _NeuralNetworkMeanImage(op, inexpr, etab):
# this changes the symbol
ret = _op.subtract(inexpr, _expr.const(op.meanImage, dtype="float32"))
return ret
def _ConvolutionLayerParams(op, inexpr, etab):
"""Convolution layer params."""
if op.isDeconvolution:
weights = etab.new_const(
np.array(list(op.weights.floatValue)).reshape(
tuple([op.kernelChannels, op.outputChannels] + list(op.kernelSize))
)
)
else:
weights = etab.new_const(
np.array(list(op.weights.floatValue)).reshape(
tuple([op.outputChannels, op.kernelChannels] + list(op.kernelSize))
)
)
dilation = list(op.dilationFactor)
if not dilation:
dilation = [1, 1]
N, C, H, W = _infer_shape(inexpr)
params = {
"channels": op.outputChannels,
"kernel_size": list(op.kernelSize),
"strides": list(op.stride),
"dilation": dilation,
"groups": op.nGroups,
}
if op.WhichOneof("ConvolutionPaddingType") == "valid":
valid = op.valid
if valid.paddingAmounts.borderAmounts:
assert len(valid.paddingAmounts.borderAmounts) == 2
pad_t = valid.paddingAmounts.borderAmounts[0].startEdgeSize
pad_l = valid.paddingAmounts.borderAmounts[1].startEdgeSize
pad_b = valid.paddingAmounts.borderAmounts[0].endEdgeSize
pad_r = valid.paddingAmounts.borderAmounts[1].endEdgeSize
if not all(v == 0 for v in (pad_t, pad_l, pad_b, pad_r)):
params["padding"] = (pad_t, pad_l, pad_b, pad_r)
elif op.WhichOneof("ConvolutionPaddingType") == "same":
assert op.same.asymmetryMode == 0, (
"Only support BOTTOM_RIGHT_HEAVY mode, " "which is used by tf/caffe and so on"
)
kernel = params["kernel_size"]
strides = params["strides"]
pad_t, pad_b = get_pad_value(H, kernel[0], strides[0])
pad_l, pad_r = get_pad_value(W, kernel[1], strides[1])
params["padding"] = (pad_t, pad_l, pad_b, pad_r)
else:
raise NotImplementedError("Valid/Same convolution padding implemented")
if op.isDeconvolution:
ret = _op.nn.conv2d_transpose(data=inexpr, weight=weights, **params)
else:
ret = _op.nn.conv2d(data=inexpr, weight=weights, **params)
if op.hasBias:
biases = etab.new_const(list(op.bias.floatValue))
ret = _op.nn.bias_add(ret, biases)
return ret
def _BatchnormLayerParams(op, inexpr, etab):
"""Get layer of batchnorm parameter"""
# this changes the symbol
if op.instanceNormalization:
raise tvm.error.OpNotImplemented(
'Operator "instance normalization" is not supported in frontend CoreML.'
)
params = {
"gamma": etab.new_const(list(op.gamma.floatValue)),
"beta": etab.new_const(list(op.beta.floatValue)),
"moving_mean": etab.new_const(list(op.mean.floatValue)),
"moving_var": etab.new_const(list(op.variance.floatValue)),
"epsilon": op.epsilon,
}
result, moving_mean, moving_var = _op.nn.batch_norm(data=inexpr, **params)
return result
def _ActivationParams(op, inexpr, etab):
"""Get activation parameters"""
whichActivation = op.WhichOneof("NonlinearityType")
par = getattr(op, whichActivation)
if whichActivation == "linear":
alpha = _expr.const(par.alpha, dtype="float32")
beta = _expr.const(par.beta, dtype="float32")
return _op.add(_op.multiply(inexpr, alpha), beta)
if whichActivation == "ReLU":
return _op.nn.relu(inexpr)
if whichActivation == "leakyReLU":
return _op.nn.leaky_relu(inexpr, alpha=par.alpha)
elif whichActivation == "thresholdedReLU":
alpha_tensor = _op.full_like(inexpr, fill_value=_expr.const(par.alpha, dtype="float32"))
return _op.multiply(inexpr, _op.greater(inexpr, alpha_tensor).as_type("float32"))
if whichActivation == "PReLU":
return _op.nn.prelu(inexpr, alpha=_expr.const(par.alpha, dtype="float32"))
if whichActivation == "tanh":
return _op.tanh(inexpr)
if whichActivation == "scaledTanh":
alpha = _expr.const(par.alpha, dtype="float32")
beta = _expr.const(par.beta, dtype="float32")
return _op.multiply(_op.tanh(_op.multiply(inexpr, beta)), alpha)
if whichActivation == "sigmoid":
return _op.sigmoid(inexpr)
if whichActivation == "sigmoidHard":
alpha = _expr.const(par.alpha, dtype="float32")
beta = _expr.const(par.beta, dtype="float32")
transformX = (alpha * inexpr) + beta
return _op.clip(transformX, a_min=0.0, a_max=1.0)
if whichActivation == "ELU":
return _op.multiply(
_op.add(_op.exp(inexpr), _expr.const(-1, dtype="float32")),
_expr.const(par.alpha, dtype="float32"),
)
if whichActivation == "softsign":
return inexpr / (
_expr.const(1, dtype="float32")
+ (op.nn.relu(inexpr) + _op.nn.relu(_op.negative(inexpr)))
)
if whichActivation == "softplus":
return _op.log(_op.add(_op.exp(inexpr), _expr.const(1, dtype="float32")))
if whichActivation == "parametricSoftplus":
alpha = list(par.alpha.floatValue)
beta = list(par.alpha.floatValue)
if len(alpha) == 1:
return _op.multiply(
_op.log(_op.add(_op.exp(inexpr), _expr.const(beta[0], dtype="float32"))),
_expr.const(alpha[0], dtype="float32"),
)
alpha = np.array(alpha).reshape((len(alpha), 1, 1))
beta = np.array(beta).reshape((len(beta), 1, 1))
alpha_expr = etab.new_const(alpha)
beta_expr = etab.new_const(beta)
return _op.multiply(_op.log(_op.add(_op.exp(inexpr), beta_expr)), alpha_expr)
raise tvm.error.OpNotImplemented(
f"Operator {whichActivation} is not supported in frontend CoreML."
)
def _ScaleLayerParams(op, inexpr, etab):
"""Scale layer params."""
scale = etab.new_const(
np.array(list(op.scale.floatValue)).reshape(tuple(list(op.shapeScale) + [1, 1]))
)
ret = _op.multiply(inexpr, scale)
if op.hasBias:
bias = etab.new_const(
np.array(list(op.bias.floatValue)).reshape(tuple(list(op.shapeBias) + [1, 1]))
)
ret = _op.add(ret, bias)
return ret
def _PoolingLayerParams(op, inexpr, etab):
"""get pooling parameters"""
if op.globalPooling:
if op.type == 0:
return _op.nn.global_max_pool2d(inexpr)
if op.type == 1:
return _op.nn.global_avg_pool2d(inexpr)
raise tvm.error.OpNotImplemented(
"Only Max and Average Pooling are supported in frontend CoreML."
)
params = {"pool_size": list(op.kernelSize), "strides": list(op.stride)}
if op.WhichOneof("PoolingPaddingType") == "valid":
valid = op.valid
if valid.paddingAmounts.borderAmounts:
assert len(valid.paddingAmounts.borderAmounts) == 2
pad_t = valid.paddingAmounts.borderAmounts[0].startEdgeSize
pad_l = valid.paddingAmounts.borderAmounts[1].startEdgeSize
pad_b = valid.paddingAmounts.borderAmounts[0].endEdgeSize
pad_r = valid.paddingAmounts.borderAmounts[1].endEdgeSize
if not all(v == 0 for v in (pad_t, pad_l, pad_b, pad_r)):
params["padding"] = [pad_t, pad_l, pad_b, pad_r]
elif op.WhichOneof("PoolingPaddingType") == "includeLastPixel":
# I don't know if this is correct
valid = op.includeLastPixel
padding = list(valid.paddingAmounts)
params["padding"] = padding
params["ceil_mode"] = True
else:
op_name = op.WhichOneof("PoolingPaddingType")
msg = f"PoolingPaddingType {op_name} is not supported in operator Pooling."
raise tvm.error.OpAttributeUnImplemented(msg)
if op.type == 0:
return _op.nn.max_pool2d(inexpr, **params)
if op.type == 1:
return _op.nn.avg_pool2d(inexpr, **params)
raise tvm.error.OpNotImplemented("Only Max and Average Pooling are supported in CoreML.")
def _SoftmaxLayerParams(op, inexpr, etab):
return _op.nn.softmax(_op.nn.batch_flatten(inexpr))
def _InnerProductLayerParams(op, inexpr, etab):
weights = etab.new_const(
np.array(op.weights.floatValue).reshape((op.outputChannels, op.inputChannels))
)
out = _op.nn.dense(data=inexpr, weight=weights, units=op.outputChannels)
if op.hasBias:
bias = etab.new_const(np.array(op.bias.floatValue))
out = _op.nn.bias_add(out, bias)
return out
def _AddLayerParams(op, inexpr, etab):
if not isinstance(inexpr, list):
inexpr = [inexpr]
ret = inexpr[0]
for i in range(1, len(inexpr)):
ret = _op.add(ret, inexpr[i])
if op.alpha > 0:
ret = _op.add(ret, _expr.const(op.alpha, dtype="float32"))
return ret
def _MultiplyLayerParams(op, inexpr, etab):
if not isinstance(inexpr, list):
inexpr = [inexpr]
ret = inexpr[0]
for i in range(1, len(inexpr)):
ret = _op.multiply(ret, inexpr[i])
if op.alpha != 1:
ret = _op.multiply(ret, _expr.const(op.alpha, dtype="float32"))
return ret
def _ConcatLayerParams(op, inexpr, etab):
if not isinstance(inexpr, list):
inexpr = [inexpr]
if op.sequenceConcat:
raise tvm.error.OpNotImplemented(
"Operator Sequence Concat is not supported in frontend CoreML."
)
ret = _op.concatenate(inexpr, axis=1)
return ret
def _FlattenLayerParams(op, inexpr, etab):
if op.mode == 1:
inexpr = _op.transpose(_op.reshape(inexpr, newshape=(0, 0, -1)), axes=(0, 2, 1))
return _op.nn.batch_flatten(inexpr)
def _PaddingLayerParams(op, inexpr, etab):
"""Padding layer params."""
if op.WhichOneof("PaddingType") == "constant":
constant = op.constant
if constant.value != 0:
raise tvm.error.OpAttributeUnImplemented(
f"{constant.value} is not supported in operator Padding."
)
pad_t = op.paddingAmounts.borderAmounts[0].startEdgeSize
pad_l = op.paddingAmounts.borderAmounts[1].startEdgeSize
pad_b = op.paddingAmounts.borderAmounts[0].endEdgeSize
pad_r = op.paddingAmounts.borderAmounts[1].endEdgeSize
return _op.nn.pad(data=inexpr, pad_width=((0, 0), (0, 0), (pad_t, pad_b), (pad_l, pad_r)))
raise tvm.error.OpNotImplemented("Non-constant padding is not supported in frontend CoreML.")
def _PermuteLayerParams(op, inexpr, etab):
axes = tuple(op.axis)
return _op.transpose(inexpr, axes=axes)
def _UpsampleLayerParams(op, inexpr, etab):
if op.scalingFactor[0] != op.scalingFactor[1]:
raise tvm.error.OpAttributeUnimplemented("Upsample height and width must be equal.")
interpolationMode = "nearest_neighbor" if op.mode == 0 else "bilinear"
return _op.nn.upsampling(
inexpr, scale_h=op.scalingFactor[0], scale_w=op.scalingFactor[1], method=interpolationMode
)
def _L2NormalizeLayerParams(op, inexpr, etab):
return _op.nn.l2_normalize(inexpr, eps=op.epsilon, axis=[1])
def _LRNLayerParams(op, inexpr, etab):
par = {}
par["size"] = op.localSize
par["bias"] = op.k
par["alpha"] = op.alpha
par["beta"] = op.beta
par["axis"] = 1 # default layout is nchw
return _op.nn.lrn(data=inexpr, **par)
def _AverageLayerParams(op, inexpr, etab):
if not isinstance(inexpr, list) or len(inexpr) < 2:
raise ValueError("Expect minimum 2 inputs")
count = len(inexpr)
_sum = inexpr[0]
for i in range(1, count):
_sum = _op.add(_sum, inexpr[i])
return _sum / _expr.const(count, dtype="float32")
def _MaxLayerParams(op, inexpr, etab):
if not isinstance(inexpr, list) or len(inexpr) < 2:
raise ValueError("Expect minimum 2 inputs")
_max = inexpr[0]
for i in range(1, len(inexpr)):
_max = _op.maximum(_max, inexpr[i])
return _max
def _MinLayerParams(op, inexpr, etab):
if not isinstance(inexpr, list) or len(inexpr) < 2:
raise ValueError("Expect minimum 2 inputs")
_min = inexpr[0]
for i in range(1, len(inexpr)):
_min = _op.minimum(_min, inexpr[i])
return _min
def _UnaryFunctionLayerParams(op, inexpr, etab):
op_type = op.type
if op_type == op.SQRT:
return _op.sqrt(inexpr)
elif op_type == op.RSQRT:
epsilon = _expr.const(op.epsilon)
return _op.rsqrt(inexpr + epsilon)
elif op_type == op.INVERSE:
epsilon = _expr.const(op.epsilon)
return _expr.const(1.0) / (inexpr + epsilon)
elif op_type == op.POWER:
alpha = _expr.const(op.alpha)
return _op.power(inexpr, alpha)
elif op_type == op.EXP:
return _op.exp(inexpr)
elif op_type == op.LOG:
return _op.log(inexpr)
elif op_type == op.ABS:
return _op.abs(inexpr)
elif op_type == op.THRESHOLD:
alpha = _expr.const(op.alpha)
return _op.maximum(inexpr, alpha)
else:
msg = f"Unary Op type value {op_type} is not supported in frontend CoreML."
raise tvm.error.OpAttributeUnImplemented(msg)
def _ReduceLayerParams(op, inexpr, etab):
axis = op.axis
if axis == op.CHW:
axis = [-3, -2, -1]
elif axis == op.HW:
axis = [-2, -1]
elif axis == op.C:
axis = -3
elif axis == op.H:
axis = -2
elif axis == op.W:
axis = -1
else:
msg = f"Reduce axis value {axis} is not supported in frontend CoreML."
raise tvm.error.OpAttributeUnImplemented(msg)
mode = op.mode
if mode == op.SUM:
return _op.sum(inexpr, axis=axis, keepdims=True)
elif mode == op.AVG:
return _op.mean(inexpr, axis=axis, keepdims=True)
elif mode == op.PROD:
return _op.prod(inexpr, axis=axis, keepdims=True)
elif mode == op.MIN:
return _op.min(inexpr, axis=axis, keepdims=True)
elif mode == op.MAX:
return _op.max(inexpr, axis=axis, keepdims=True)
elif mode == op.ARGMAX:
return _op.argmax(inexpr, axis=axis, keepdims=True)
else:
msg = f"Reduce mode value {mode} is not supported in frontend CoreML."
raise tvm.error.OpAttributeUnImplemented(msg)
def _ReshapeLayerParams(op, inexpr, etab):
return _op.reshape(inexpr, op.targetShape)
def _SplitLayerParams(op, inexpr, etab):
return _op.split(inexpr, op.nOutputs, axis=-3)
_convert_map = {
"NeuralNetworkMeanImage": _NeuralNetworkMeanImage,
"NeuralNetworkImageScaler": _NeuralNetworkImageScaler,
"ConvolutionLayerParams": _ConvolutionLayerParams,
"BatchnormLayerParams": _BatchnormLayerParams,
"ActivationParams": _ActivationParams,
"ScaleLayerParams": _ScaleLayerParams,
"PoolingLayerParams": _PoolingLayerParams,
"SoftmaxLayerParams": _SoftmaxLayerParams,
"InnerProductLayerParams": _InnerProductLayerParams,
"AddLayerParams": _AddLayerParams,
"MultiplyLayerParams": _MultiplyLayerParams,
"FlattenLayerParams": _FlattenLayerParams,
"ConcatLayerParams": _ConcatLayerParams,
"PaddingLayerParams": _PaddingLayerParams,
"PermuteLayerParams": _PermuteLayerParams,
"UpsampleLayerParams": _UpsampleLayerParams,
"L2NormalizeLayerParams": _L2NormalizeLayerParams,
"LRNLayerParams": _LRNLayerParams,
"AverageLayerParams": _AverageLayerParams,
"MaxLayerParams": _MaxLayerParams,
"MinLayerParams": _MinLayerParams,
"UnaryFunctionLayerParams": _UnaryFunctionLayerParams,
"ReduceLayerParams": _ReduceLayerParams,
"ReshapeLayerParams": _ReshapeLayerParams,
"SplitLayerParams": _SplitLayerParams,
}
# SAME padding: https://www.tensorflow.org/api_guides/python/nn
def get_pad_value(data, kernel, stride):
"""Get the pad tuple of value for SAME padding
Parameters
----------
data:
1D input data
kernel:
1D input kernel
stride:
1D input stride
Returns
-------
pad tuple of value
"""
out = int(math.ceil(float(data) / float(stride)))
pad = max(0, (out - 1) * stride + kernel - data)
pad_before = pad // 2
pad_after = pad - pad_before
return pad_before, pad_after
def coreml_op_to_relay(op, inname, outnames, etab):
"""Convert coreml layer to a Relay expression and update the expression table.
Parameters
----------
op: a coreml protobuf bit
inname : str or list of str
Name of the input Relay expression.
outnames : str or list of str
Name of the output Relay expression.
etab : relay.frontend.common.ExprTable
The global expression table to be updated.
"""
classname = type(op).__name__
if classname not in _convert_map:
raise tvm.error.OpNotImplemented(
f"Operator {classname} is not supported in frontend CoreML."
)
if isinstance(inname, _base.string_types):
insym = etab.get_expr(inname)
else:
insym = [etab.get_expr(i) for i in inname]
outs = _convert_map[classname](op, insym, etab)
if outnames:
if isinstance(outnames, _base.string_types) or len(outnames) == 1:
outname = outnames if isinstance(outnames, _base.string_types) else outnames[0]
etab.set_expr(outname, outs, force_override=True)
else:
# the number of outputs from model op and tvm relay must be same
assert len(outnames) == len(outs)
for outname, out in zip(outnames, outs):
etab.set_expr(outname, out, force_override=True)
def from_coreml(model, shape=None):
"""Convert from coreml model into Relay Function.
Parameters
----------
model:
coremltools.models.MLModel of a NeuralNetworkClassifier
shape : dict of str to int list/tuple, optional
The input shapes
Returns
-------
mod : tvm.IRModule
The relay module for compilation.
params : dict of str to tvm.nd.NDArray
The parameter dict to be used by Relay.
"""
try:
import coremltools as cm
except ImportError:
raise ImportError("The coremltools package must be installed")
assert isinstance(model, cm.models.MLModel)
spec = model.get_spec()
modeltype = spec.WhichOneof("Type")
assert modeltype in ["neuralNetworkClassifier", "neuralNetwork", "neuralNetworkRegressor"]
cc = getattr(spec, modeltype)
etab = ExprTable()
for i in spec.description.input:
input_shape = list(shape[i.name]) if shape is not None and i.name in shape else None
etab.set_expr(i.name, _expr.var(i.name, shape=input_shape))
for pp in cc.preprocessing:
whichpp = pp.WhichOneof("preprocessor")
ppmethod = getattr(pp, whichpp)
if whichpp == "scaler":
# Be careful we maybe only preprocess one input when we have multi inputs
# which is stored in pp.featureName. See unit testing verify_image_scaler
# in test_forward.py for CoreML.
for i in spec.description.input:
# we have multi inputs
if len(spec.description.input) > 1:
assert pp.featureName != ""
if i.name == pp.featureName:
coreml_op_to_relay(ppmethod, i.name, i.name, etab)
else:
assert pp.featureName == ""
coreml_op_to_relay(ppmethod, i.name, i.name, etab)
else:
coreml_op_to_relay(ppmethod, pp.featureName, pp.featureName, etab)
for l in cc.layers:
layertype = l.WhichOneof("layer")
layerop = getattr(l, layertype)
if len(l.input) == 1:
coreml_op_to_relay(layerop, l.input[0], l.output, etab)
else:
coreml_op_to_relay(layerop, list(l.input), l.output, etab)
outexpr = [
etab.get_expr(o.name) if o.name in etab.exprs else _expr.var(o.name)
for o in spec.description.output
]
# check there are multiple outputs in the model and all are there in etab
multi_out = all([bool(o.name in etab.exprs) for o in spec.description.output])
outexpr = _expr.Tuple(outexpr) if multi_out else outexpr[0]
func = _function.Function(analysis.free_vars(outexpr), outexpr)
params = {k: _nd.array(np.array(v, dtype=np.float32)) for k, v in etab.params.items()}
return IRModule.from_expr(func), params
| 22,188 | 35.555189 | 98 | py |
tvm | tvm-main/python/tvm/relay/frontend/common.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=broad-except
"""Common utilities"""
from __future__ import absolute_import as _abs
import logging
import numpy as np
import tvm
from tvm.ir import IRModule
from tvm.topi.utils import get_const_tuple
from ..expr_functor import ExprMutator
from .. import expr as _expr
from .. import function as _function
from .. import transform as _transform
from .. import op as _op
from .. import ty as _ty
from .. import analysis
class DuplicateFilter:
"""A log filter that only prints the same message once."""
def __init__(self):
self.msgs = set()
def filter(self, record):
self.msgs.add(record.msg)
return record.msg not in self.msgs
# pylint: disable=invalid-name
logger = logging.getLogger("Frontend")
logger.addFilter(DuplicateFilter())
# Uncomment below line to print all debug msgs
# logger.setLevel(logging.DEBUG)
class RequiredAttr(object):
"""Dummpy class to represent required attr"""
class StrAttrsDict(object):
"""Helper class to parse attrs stored as Dict[str, str].
Parameters
----------
attrs : Dict[str, str]
The attributes to be used.
"""
def __init__(self, attrs):
self.attrs = attrs
def has_attr(self, key):
"""Checks if a attribute is present in the map.
Parameters
----------
key : str
The attribute key
Returns
-------
bool : True if the key is present in the attributes else false.
"""
return key in self.attrs
def get_float(self, key, default=RequiredAttr()):
"""Get float attribute
Parameters
----------
key : str
The attribute key
default : float
The default value.
Returns
-------
value : The result
"""
if key in self.attrs:
return float(self.attrs[key])
if isinstance(default, RequiredAttr):
raise AttributeError(f"Required attribute {key} not found.")
return default
def get_int(self, key, default=RequiredAttr()):
"""Get int attribute
Parameters
----------
key : str
The attribute key
default : float
The default value.
Returns
-------
value : The result
"""
if key in self.attrs:
val = self.attrs[key]
if val == "None":
return None
return int(val)
if isinstance(default, RequiredAttr):
raise AttributeError(f"Required attribute {key} not found.")
return default
def get_str(self, key, default=RequiredAttr()):
"""Get str attribute
Parameters
----------
key : str
The attribute key
default : float
The default value.
Returns
-------
value : The result
"""
if key in self.attrs:
return self.attrs[key]
if isinstance(default, RequiredAttr):
raise AttributeError(f"Required attribute {key} not found.")
return default
def get_int_tuple(self, key, default=RequiredAttr()):
"""Get int tuple attribute
Parameters
----------
key : str
The attribute key
default : float
The default value.
Returns
-------
value : The result
"""
if key in self.attrs:
tshape = self.attrs[key]
return tuple(
int(x) if x.strip("- ").isdigit() else None
for x in tshape.strip("()[]").split(",")
if x
)
if isinstance(default, RequiredAttr):
raise AttributeError(f"Required attribute {key} not found.")
return default
def get_float_tuple(self, key, default=RequiredAttr()):
"""Get float tuple attribute
Parameters
----------
key : str
The attribute key
default : float
The default value.
Returns
-------
value : The result
"""
if key in self.attrs:
tshape = self.attrs[key]
return tuple(float(x.strip()) for x in tshape.strip("()[]").split(","))
if isinstance(default, RequiredAttr):
raise AttributeError(f"Required attribute {key} not found.")
return default
def get_tuple_tuple_int(self, key, default=RequiredAttr()):
"""Get int list attribute
Parameters
----------
key : str
The attribute key
default : float
The default value.
Returns
-------
value : The result
"""
if key in self.attrs:
value = self.attrs[key]
seq = []
for tup in value.strip("()").split("),"):
tup = tup.strip("[]()")
els = [int(x.strip("( ")) for x in tup.split(",")]
seq.append(tuple(els))
return tuple(seq)
if isinstance(default, RequiredAttr):
raise AttributeError(f"Required attribute {key} not found.")
return default
def get_int_list(self, key, default=RequiredAttr()):
"""Get int list attribute
Parameters
----------
key : str
The attribute key
default : float
The default value.
Returns
-------
value : The result
"""
if key in self.attrs:
tshape = self.attrs[key]
return tuple(int(x.strip()) for x in tshape.strip("[]()").split(","))
if isinstance(default, RequiredAttr):
raise AttributeError(f"Required attribute {key} not found.")
return default
def get_bool(self, key, default=RequiredAttr()):
"""Get bool tuple attribute
Parameters
----------
key : str
The attribute key
default : float
The default value.
Returns
-------
value : The result
"""
if key in self.attrs:
val = self.attrs[key]
return val.strip().lower() in ["true", "1", "t", "y", "yes"]
if isinstance(default, RequiredAttr):
raise AttributeError(f"Required attribute {key} not found.")
return default
def get_relay_op(op_name):
"""Get the callable function from Relay based on operator name.
Parameters
----------
op_name : str
The Relay operator name.
"""
if "." in op_name:
# explicit hierarchical modules
op = _op
try:
for opn in op_name.split("."):
op = getattr(op, opn)
except AttributeError:
op = None
else:
# try search op in various modules
for candidate in (_op, _op.nn, _op.image, _op.vision, _op.contrib):
op = getattr(candidate, op_name, None)
if op is not None:
break
if not op:
raise tvm.error.OpNotImplemented(f"Unable to map op_name {op_name} to relay")
return op
class ExprTable(object):
"""Table storing Relay expressions by names."""
def __init__(self):
self.exprs = {}
self.params = {}
self.const_ctr = 1
self.in_padding = False
def new_const(self, value, shape=None, dtype="float32", source_name=None):
"""Construct a new var expr and add to exprs dictionary"""
name = f"_param_{self.const_ctr}"
if hasattr(value, "shape"):
shape = value.shape
self.const_ctr += 1
self.params[name] = value
self.exprs[name] = _expr.var(name_hint=name, shape=shape, dtype=dtype)
if source_name:
self.exprs[name] = set_span(self.exprs[name], source_name)
return self.exprs[name]
def get_expr(self, name):
return self.exprs[name]
def set_expr(self, name, expr, force_override=False):
assert isinstance(expr, _expr.Expr)
# if name exists, we should override the value
# otherwise, we can not get like x = func(x) work.
# One example is CoreML preprocess, which will override
# the same name of input.
# However, according to git log, Find keras frontend depends
# on this property, so we add one force_override to control it.
if name not in self.exprs or force_override:
self.exprs[name] = expr
def has_expr(self, name):
return name in self.exprs
def set_padding(self, paddings):
self.paddings = paddings
self.in_padding = True
def clear_padding(self):
self.in_padding = False
class AttrCvt(object):
"""Common attribute converter. An AttrConverter instance is a callable:
```
attr_converter = AttrConverter(op_name, transforms={'a':'b', 'c':('d', 1)})
new_op_name, new_attr = attr_converter(attrs)
```
Parameters
----------
op_name : str or callable
If set as str, returned operator name is the str.
If set as callable, returned operator is the str returned by calling:
`op_name = func(attr)`
transforms : dict of `new_name, or (new_name, default_value, transform function)`
If only a new_name is provided, it's like renaming the attribute name.
If default_value if provided, then the attribute is considered as optional.
If transform function is provided, the original attribute value is handled
by transform function.
excludes : list
A list of excluded attributes that should `NOT` appear.
Raise NotImplementedError if occurred.
disables : list
A list of attributes that is disabled in relay. Log warnings.
ignores : list
A list of attributes that is ignored in relay. Debug level logging.
extras : dict
A series of additional attributes should be added anyway to the returned
attribute dict.
custom_check : callable
A custom function takes attribute, and return True/False.
Raise RuntimeError if not bool(True) returned.
"""
def __init__(
self,
op_name,
transforms=None,
excludes=None,
disables=None,
ignores=None,
extras=None,
custom_check=None,
):
self._op_name = op_name
self._transforms = transforms if transforms else {}
self._excludes = excludes if excludes else []
self._disables = disables if disables else []
self._ignores = ignores if ignores else []
self._extras = extras if extras else {}
self._custom_check = custom_check
def __call__(self, inputs, attrs, *args):
self._ignores.append("_output_shapes")
self._ignores.append("_input_shapes")
self._ignores.append("T")
self._ignores.append("use_cudnn_on_gpu")
self._ignores.append("_node_name")
self._ignores.append("is_training")
self._ignores.append("_target_layout")
# apply custom check
if self._custom_check:
func, msg = self._custom_check
if not func(attrs):
raise RuntimeError(f"Check failed: {msg}")
# get new op_name
if isinstance(self._op_name, str):
op_name = self._op_name
else:
assert callable(self._op_name), "op_name can either be string or callable"
op_name = self._op_name(attrs)
# ignore 'tvm_custom' always
self._ignores.append("tvm_custom")
# convert attributes
new_attrs = {}
for k in attrs.keys():
if k in self._excludes:
raise NotImplementedError(
"Attribute %s in operator %s is not" + " supported.", k, op_name
)
if k in self._disables:
logger.debug("Attribute %s is disabled in relay.sym.%s", k, op_name)
elif k in self._ignores:
if k != "tvm_custom":
logger.debug("Attribute %s is ignored in relay.sym.%s", k, op_name)
elif k in self._transforms:
new_name, defaults, transform = self._parse_default(self._transforms[k])
if defaults is None:
new_attr = self._required_attr(attrs, k)
else:
new_attr = attrs.get(k, None)
if new_attr is None:
new_attrs[new_name] = defaults
else:
new_attrs[new_name] = transform(new_attr)
else:
# copy
new_attrs[k] = attrs[k]
# add extras
new_attrs.update(self._extras)
return get_relay_op(op_name)(*inputs, **new_attrs)
def _parse_default(self, target):
"""Helper function to parse default values."""
if not isinstance(target, (list, tuple)):
k, v, t = target, None, lambda x: x
elif len(target) == 1:
k, v, t = target[0], None, lambda x: x
elif len(target) == 2:
k, v, t = target[0], target[1], lambda x: x
elif len(target) > 2:
k, v, t = target[0], target[1], target[2]
else:
k = None # should raise
if not isinstance(k, str):
msg = f"{target} is not a valid target, (name, default) expected."
raise ValueError(msg)
return k, v, t
def _parse_bool(self, value):
"""Helper function to parse default boolean values."""
if isinstance(value, str):
return value.strip().lower() in ["true", "1", "t", "y", "yes"]
return bool(value)
def _required_attr(self, attr, key):
"""Wrapper for getting required attributes."""
assert isinstance(attr, dict)
if key not in attr:
raise AttributeError(f"Required attribute {key} not found.")
return attr[key]
def get_name(node):
name = ""
if hasattr(node, "name_hint"):
name = node.name_hint
return name
def infer_type(node, mod=None):
"""A method to infer the type of an intermediate node in the relay graph."""
if isinstance(mod, IRModule):
mod["main"] = _function.Function(tvm.relay.analysis.free_vars(node), node)
mod = _transform.InferType()(mod)
entry = mod["main"]
ret = entry.body
else:
new_mod = IRModule.from_expr(node)
if mod is not None:
new_mod.update(mod)
new_mod = _transform.InferType()(new_mod)
entry = new_mod["main"]
ret = entry if isinstance(node, _function.Function) else entry.body
return ret
def fold_constant(node, mod=None):
if mod is None:
mod = IRModule()
return _transform.FoldConstantExpr(node, mod)
def infer_channels(inputs, transpose=False):
"""A hack for getting 'channels' or 'units' since caffe2 does not provide
these attributes. We check the shape of weights provided to get the number.
"""
out_type = infer_type(inputs)
out_shapes = [get_const_tuple(out_type.checked_type.shape)]
channels = out_shapes[0][0] if not transpose else out_shapes[0][1]
return channels
def infer_shape(inputs, mod=None):
"""A method to get the output type of an intermediate node in the graph."""
out_type = infer_type(inputs, mod=mod)
checked_type = out_type.checked_type
if hasattr(checked_type, "shape"):
# Regular operator that outputs tensors
return get_const_tuple(checked_type.shape)
# The return type is not a tensor, for example List
return checked_type
def infer_value(input_val, params, mod=None):
"""A hack for getting the value of an expression by evaluating a
portion of the relay graph. This is often needed for functions that
whose output shape depends on the value of a tensor.
"""
# Check that all free variables have associated parameters.
assert all(
var.name_hint in params.keys() for var in analysis.free_vars(input_val)
), "All inputs to infer must be available in params."
assert tvm.runtime.enabled("llvm"), "LLVM must be enabled to infer value."
try:
# TODO(kevinthesun): Use VM for all cases.
# pylint: disable=import-outside-toplevel
from tvm.contrib import graph_executor
func = _function.Function(analysis.free_vars(input_val), input_val)
with tvm.transform.PassContext(opt_level=0):
lib = tvm.relay.build(func, target="llvm", params=params)
dev = tvm.cpu(0)
m = graph_executor.GraphModule(lib["default"](dev))
m.run()
return m.get_output(0)
except Exception:
if isinstance(mod, IRModule):
mod["main"] = _function.Function(analysis.free_vars(input_val), input_val)
else:
mod = IRModule.from_expr(input_val)
inputs = []
for param in mod["main"].params:
inputs.append(params[param.name_hint])
result = tvm.relay.create_executor(
"debug", mod=mod, device=tvm.cpu(), target="llvm"
).evaluate()(*inputs)
return result
def infer_value_simulated(input_val, params):
"""Extension to infer_value that can be used when some input
values are missing. This function creates dummy inputs with the same
shape and random values then calls infer_value. This is helpful when
implementing certain onnx operators where we need to evaluate the graph
to determine a static shape.
"""
fake_params = []
# Add a fake copy of all missing params.
for free_param in analysis.free_vars(input_val):
if free_param.name_hint not in params:
fp_dtype = free_param.type_annotation.dtype
fp_shape = [s.value for s in free_param.type_annotation.shape]
fake_params.append(free_param)
params[free_param.name_hint] = tvm.nd.array(np.random.rand(*fp_shape).astype(fp_dtype))
# Now infer the value.
output_value = infer_value(input_val, params)
# Clean fake params out of param dictionary.
for fake_p in fake_params:
params.pop(fake_p.name_hint, None)
return output_value
def try_infer_value(val, on_success=None, on_failure=None, parameters=None):
"""Try running infer_value on the input val, and if successful, return the inferred value or
pass it to on_success callback if provided. Otherwise, run on_failure callback if it is
provided, or return the input val as output. In each case, the second return value
indicates whether infer_value has succeeded or not.
"""
try:
params = parameters if parameters is not None else {}
ret = infer_value(val, params).numpy()
if on_success:
return on_success(ret), True
return ret, True
except Exception:
if on_failure:
return on_failure(), False
return val, False
def shape_of(x, dtype="int64", start=None, end=None):
"""Get shape of a tensor."""
ttype = infer_type(x).checked_type
if not _ty.is_dynamic(ttype):
shape = list(ttype.shape)
start = start or 0 # default to first
end = end or len(shape) # default to last
shape_sliced = shape[start:end]
return _expr.const(shape_sliced, dtype)
return _op.shape_of(x, dtype)
def new_var(name_hint, type_annotation=None, shape=None, dtype="float32"):
return _expr.var(name_hint, type_annotation, shape, dtype)
class Renamer(object):
"""A simply renamer for operators.
Parameters
----------
new_name : str
The new name for the operator
"""
def __init__(self, new_name):
self._new_name = new_name
def __call__(self, inputs, attrs, *args):
if "tvm_custom" in attrs:
attrs.pop("tvm_custom")
return get_relay_op(self._new_name)(*inputs, **attrs)
def to_int_list(np_array):
"""Convert a np array to a python int list.
Note: This function converts np.int32 to python's int.
If we don't do this conversion, numpy's automatic upcast will make
the shape / parameters be converted to int64 IntImm in relay and
cause problems in relay/TOPI.
"""
return [int(x) for x in np_array]
def unbind(data, axis=0):
"""
Unbind was taken from Pytorch frontend. The operation removes a tensor dimension
and returns a tuple of all slices along a given dimension, with specified axis removed.
TODO (vvchernov): It needs such operation on relay side to reduce time consumption
on squeeze operation.
Parameters
----------
data : relay.Expr
Input tensor
axis : int
Axis along which tensor is split.
Returns
-------
result : List[relay.Expr]
The sequence of computed tensors
"""
shape = infer_shape(data)
if axis >= len(shape):
msg = "Please check input dim, it shouldn't be greater than or equal to rank."
raise AttributeError(msg)
selections = shape[axis]
res_split = _op.split(data, selections, axis)
ret = []
for i in range(selections):
ret.append(_op.squeeze(res_split[i], axis=[axis]))
return _expr.TupleWrapper(_expr.Tuple(ret), selections)
def rnn_cell(
input_seqs, hidden_state, w_inp, w_hid, b_inp=None, b_hid=None, backwards=False, act=_op.tanh
):
"""
Common implementation of RNN cell for all frontends of TVM
Parameters
----------
input_seqs : List[relay.Expr]
The sequence of input tensors
Input tensor should be 2d while issue #8412 is not resolved
Shape = (batch, feature_size)
hidden_state : relay.Expr
Hidden state. shape = (batch_size, hidden_size)
w_inp, w_hid: relay.Expr
weight matrices. shape = (hidden_size, feature_size), (hidden_size, feature_size)
b_inp, b_hid : relay.Expr
bias matrices. The same order of internal parts as for weights. shape = (1 * hidden_size)
backwards : bool
Flag for reverse pass of RNN
act : relay.op
activation function. It is tanh by default.
Returns
-------
result : List[relay.Expr], relay.Expr, relay.Expr
The sequence of computed result, final hidden and cell state
"""
outputs_list = []
for x_t in input_seqs if not backwards else reversed(input_seqs):
xwt = _op.nn.dense(x_t, w_inp)
hwt = _op.nn.dense(hidden_state, w_hid)
if b_inp is not None and b_hid is not None:
xwt += b_inp
hwt += b_hid
hidden_state = act(xwt + hwt)
outputs_list.append(hidden_state) # [seq_num, (batch, hidden_size)]
return outputs_list, hidden_state
def gru_cell(
input_seqs,
hidden_state,
w_inp,
w_hid,
b_inp=None,
b_hid=None,
rz_act=_op.sigmoid,
n_act=_op.tanh,
backwards=False,
linear_before_reset=True,
sequence_lens=None,
):
"""
Common implementation of GRU cell for all frontends of TVM
TODO(vvchernov): currently it is used by pytorch and ONNX. Extend for other frontends
Parameters
----------
input_seqs : List[relay.Expr]
The sequence of input tensors
Input tensor should be 2d while issue #8412 is not resolved
Shape = (batch, feature_size)
hidden_state : relay.Expr
Hidden state. shape = (batch_size, hidden_size)
w_inp, w_hid : relay.Expr
weight matrices. wi shape = (3 * hidden_size, feature_size)
wh shape = (3 * hidden_size, hidden_size)
NOTE: wi = (w_ir|w_iz|w_in) for reset, update and new gates.
The order is important for correct GRU calculation!
b_inp, b_hid : relay.Expr
bias matrices. The same order of internal parts as for weights. shape = (3 * hidden_size)
r_act : relay.op
activation function for reset gate. it is sigmoid by default
z_act : relay.op
activation function for update gate. it is sigmoid by default
n_act : relay.op
activation function for new gate. it is tanh by default
backwards : bool
Flag for reverse pass of GRU
linear_before_reset : bool
Flag for applying the linear transformation before multiplying by the output of the reset
gate.
sequence_lens : relay.op
Tensor specifying lengths of the sequences in a batch.
Shape = (batch_size)
Returns
-------
result : List[relay.Expr], relay.Expr, relay.Expr
The sequence of computed result, final hidden and cell state
"""
outputs_list = []
seq_len = len(input_seqs)
input_dtype = infer_type(input_seqs[0]).checked_type.dtype
if sequence_lens is not None:
shape = infer_shape(sequence_lens)
dtype = infer_type(sequence_lens).checked_type.dtype
arange = _op.arange(_op.const(0), _op.const(seq_len), dtype=dtype)
arange = _op.expand_dims(arange, 1)
sequence_lens = _op.broadcast_to(sequence_lens, [seq_len, shape[0]])
# cast to data dtype
mask = _op.less(arange, sequence_lens)
mask = _op.cast(mask, dtype=input_dtype)
mask = _op.expand_dims(mask, 2)
mask_seqs = unbind(mask)
res_mask = _op.greater_equal(arange, sequence_lens)
res_mask = _op.cast(res_mask, dtype=input_dtype)
res_mask = _op.expand_dims(res_mask, 2)
res_mask_seqs = unbind(res_mask)
if backwards:
# need a mask to keep intial_h_B correct
initial_h = hidden_state
initial_h_mask = _op.equal(arange, sequence_lens)
initial_h_mask = _op.cast(initial_h_mask, dtype=input_dtype)
initial_h_mask = _op.expand_dims(initial_h_mask, 2)
initial_h_mask_seqs = unbind(initial_h_mask)
output = _op.zeros(infer_shape(hidden_state), input_dtype)
for i in range(seq_len) if not backwards else reversed(range(seq_len)):
x_t = input_seqs[i]
xwt = _op.nn.dense(x_t, w_inp)
if linear_before_reset:
hwt = _op.nn.dense(hidden_state, w_hid)
if b_inp is not None and b_hid is not None:
xwt += b_inp
hwt += b_hid
i_r, i_z, i_n = _op.split(xwt, 3, axis=-1)
h_r, h_z, h_n = _op.split(hwt, 3, axis=-1)
r_gate = rz_act(i_r + h_r)
z_gate = rz_act(i_z + h_z)
n_gate = n_act(i_n + r_gate * h_n)
else:
i_r, i_z, i_n = _op.split(xwt, 3, axis=1)
w_hr, w_hz, w_hn = _op.split(w_hid, 3, axis=0)
r_gate = i_r + _op.nn.dense(hidden_state, w_hr)
z_gate = i_z + _op.nn.dense(hidden_state, w_hz)
if b_inp is not None and b_hid is not None:
b_ir, b_iz, b_in = _op.split(b_inp, 3, axis=-1)
b_hr, b_hz, b_hn = _op.split(b_hid, 3, axis=-1)
r_gate += b_ir + b_hr
r_gate = rz_act(r_gate)
z_gate += b_iz + b_hz
i_n += b_in
h_n = _op.nn.dense((r_gate * hidden_state), w_hn) + b_hn
else:
r_gate = rz_act(r_gate)
h_n = _op.nn.dense((r_gate * hidden_state), w_hn)
z_gate = rz_act(z_gate)
n_gate = n_act(i_n + h_n)
hidden_state = (hidden_state - n_gate) * z_gate + n_gate
if sequence_lens is not None:
hidden_state = hidden_state * mask_seqs[i]
outputs_list.append(hidden_state) # [seq_num, (batch, hidden_size)]
if sequence_lens is not None:
output = output * res_mask_seqs[i] + hidden_state
else:
output = hidden_state
# make sure initial_h_B correct
if backwards and sequence_lens is not None:
hidden_state = hidden_state + initial_h * initial_h_mask_seqs[i]
return outputs_list, output
def lstm_cell(
input_seqs,
hidden_state,
cell_state,
w_inp,
w_hid,
b_inp=None,
b_hid=None,
proj=None,
p_i=None,
p_f=None,
p_o=None,
f_act=_op.sigmoid,
g_act=_op.tanh,
h_act=_op.tanh,
backwards=False,
):
"""
Common implementation of LSTM cell for all frontends of TVM
TODO (vvchernov): currently it is used by onnx and pytorch. Extend for other frontends
Parameters
----------
input_seqs : List[relay.Expr]
The sequence of input tensors
Input tensor should be 2d while issue #8412 is not resolved
Shape = (batch, feature_size)
hidden_state : relay.Expr
Hidden state. shape = (batch, hidden_size)
cell_state : relay.Expr
Cell state. shape = (batch, hidden_size)
w_inp, w_hid : relay.Expr
weight matrices. wi shape = (4 * hidden_size, feature_size)
wh shape = (4 * hidden_size, hidden_size or proj_size)
NOTE: wi = (w_ii|w_if|w_ig|w_io) for input, forget, cell and output gates.
The order is important for correct LSTM calculation!
b_inp, b_hid : relay.Expr
bias matrices. The same order of internal parts as for weights. shape = (4 * hidden_size)
proj : relay.Expr
projection matrix. shape = (proj_size, hidden_size)
p_i, p_f, p_o : relay.Expr
peephole LSTM matrices. shape = (batch, hidden_size)
f_act, g_act, h_act : relay.op
activation functions
backwards : bool
Flag for reverse pass of LSTM
Returns
-------
result : List[relay.Expr], relay.Expr, relay.Expr
The sequence of computed result, final hidden and cell state
"""
outputs_list = []
for x_t in input_seqs if not backwards else reversed(input_seqs):
# x_t shape = (batch, feature size), step shape = (batch, feature size + hidden_size)
step = _op.concatenate([x_t, hidden_state], axis=1)
cat_w = _op.concatenate([w_inp, w_hid], axis=1)
# Instead of nn.dense(x_t, w_inp) + nn.dense(hidden_state, w_hid)
# nn.dense(step, cat_w) is used
# gates shape = (batch, 4 * hidden_size)
gates = _op.nn.dense(step, cat_w)
# Add biases
if b_inp is not None:
gates += b_inp
if b_hid is not None:
gates += b_hid
# any gate shape = (batch, hidden_size)
inp_gate, fgt_gate, cell_gate, otp_gate = _op.split(gates, 4, axis=-1)
if p_i is not None and p_f is not None:
inp_gate = f_act(inp_gate + p_i * cell_state)
fgt_gate = f_act(fgt_gate + p_f * cell_state)
else:
inp_gate = f_act(inp_gate)
fgt_gate = f_act(fgt_gate)
cell_gate = g_act(cell_gate)
cell_state = fgt_gate * cell_state + inp_gate * cell_gate
if p_o is not None:
otp_gate = f_act(otp_gate + p_o * cell_state)
else:
otp_gate = f_act(otp_gate)
hidden_state = otp_gate * h_act(cell_state)
if proj is not None:
hidden_state = _op.nn.dense(hidden_state, proj)
outputs_list.append(hidden_state) # [seq_num, (batch, hidden_size)]
return outputs_list, hidden_state, cell_state
def autopad(
data,
strides,
kernel_shape,
dilations=(1, 1),
pad_type="constant",
deconv=False,
mode="SAME_UPPER",
pad_value=0.0,
):
"""
Perform autopadding with dynamic input shapes
"""
# get attributes as constants
strides = _op.const(np.array(strides), dtype="int64")
dilated_kernel_shape = _op.const(
np.array(
[(kernel - 1) * dilation + 1 for kernel, dilation in zip(kernel_shape, dilations)]
),
dtype="int64",
)
# get input shape
ndim = len(infer_shape(data))
shape = _op.strided_slice(shape_of(data, dtype="int64"), [2], [ndim])
# set up integer constants
zero = _op.const(0, dtype="int64")
one = _op.const(1, dtype="int64")
two = _op.const(2, dtype="int64")
# Calculate total padding
mod = _op.mod(shape, strides)
left = _op.maximum(dilated_kernel_shape - strides, zero)
right = _op.maximum(dilated_kernel_shape - mod, zero)
total_pad = _op.where(_op.equal(mod, zero), left, right)
if deconv:
total_pad = _op.const(np.array(kernel_shape), dtype="int64") - one - total_pad
# split total padding into before and after
pad_before = _op.floor_divide(total_pad, two)
pad_after = total_pad - pad_before
# combine
if "LOWER" in mode:
pad = _op.concatenate(
[_op.reshape(pad_after, [-1, 1]), _op.reshape(pad_before, [-1, 1])], axis=1
)
else:
pad = _op.concatenate(
[_op.reshape(pad_before, [-1, 1]), _op.reshape(pad_after, [-1, 1])], axis=1
)
# pad N and C with zeros
pad = _op.concatenate([_op.const(np.zeros([2, 2], dtype="int64"), dtype="int64"), pad], axis=0)
if isinstance(pad_value, (float, int)):
pad_value = _op.const(pad_value)
return _op.nn.pad(data, fold_constant(pad), pad_value, pad_type)
def ensure_scalar_shape(x):
"""
Assume that `x` is a tensor with one element (regardless of tensor rank).
Return a version of that tensor with rank 0.
"""
x_shape = infer_shape(x)
x_rank = len(x_shape)
if x_rank == 0:
return x
num_elem = np.prod(x_shape)
assert num_elem == 1, f"Cannot squeeze tensor shape {x_shape} to scalar form."
return _op.squeeze(x)
def try_resolve_var_to_const(x, graph_params):
"""
Try to resolve the value of tensor `x` to a specific value.
If successful, return a Const op with that value.
If unsuccessful, simply return `x`.
"""
if isinstance(x, _expr.Var) and x.name_hint in graph_params:
value = graph_params[x.name_hint].numpy()
dtype = infer_type(x).checked_type.dtype
return _op.const(value, dtype)
return x
class _SpanFiller(ExprMutator):
"""SpanFiller"""
def __init__(self, span):
ExprMutator.__init__(self)
if isinstance(span, tvm.relay.Span):
self._span = span
elif isinstance(span, str):
self._span = tvm.relay.Span(tvm.relay.SourceName(span), 0, 0, 0, 0)
elif isinstance(span, bytes):
self._span = tvm.relay.Span(tvm.relay.SourceName(span.decode("utf-8")), 0, 0, 0, 0)
else:
assert False, f"unsupported span type: {type(span)}"
def visit(self, expr):
if hasattr(expr, "span") and expr.span:
return expr
return super().visit(expr)
def visit_function(self, fn):
new_params = [self.visit(x) for x in fn.params]
new_body = self.visit(fn.body)
return _function.FunctionWithFields(
fn, list(new_params), new_body, fn.ret_type, fn.type_params, fn.attrs, None, self._span
)
def visit_let(self, let):
new_variable = self.visit(let.var)
new_value = self.visit(let.value)
new_body = self.visit(let.body)
return _expr.LetWithFields(let, new_variable, new_value, new_body, None, self._span)
def visit_call(self, call):
new_args = [self.visit(arg) for arg in call.args]
# call.op might be RelayExpr or Op type
# ExprMutator will return directly if subject belongs to Op type
new_op = self.visit(call.op)
return _expr.CallWithFields(
call, new_op, new_args, call.attrs, call.type_args, None, self._span
)
def visit_var(self, var):
return _expr.VarWithFields(var, var.vid, var.type_annotation, None, self._span)
def visit_if(self, ite):
return _expr.IfWithFields(
ite,
self.visit(ite.cond),
self.visit(ite.true_branch),
self.visit(ite.false_branch),
None,
self._span,
)
def visit_tuple(self, tup):
return _expr.TupleWithFields(
tup, [self.visit(field) for field in tup.fields], None, self._span
)
def visit_tuple_getitem(self, op):
return _expr.TupleGetItemWithFields(
op, self.visit(op.tuple_value), op.index, None, self._span
)
def visit_constant(self, const):
return _expr.ConstantWithFields(const, const.data, None, self._span)
# TODO: Frontend model translation could not use following relay expressions so far,
# enable them when new models/impls leverage these kinds of relay expressions.
def visit_ref_create(self, _):
raise NotImplementedError()
def visit_ref_write(self, _):
raise NotImplementedError()
def visit_ref_read(self, _):
raise NotImplementedError()
def visit_match(self, _):
raise NotImplementedError()
def fill(self, sym):
"""Fill span to sym when it is an expr, or return it without change
Parameters
----------
sym :
A symbol which is generated from the conversion of a frontend operator.
Returns
-------
sym:
A expr with span-filled or the original sym.
"""
if isinstance(sym, _expr.TupleWrapper):
return _expr.TupleWrapper(self.visit(sym.tuple_value), sym.size)
elif isinstance(sym, _expr.RelayExpr):
return self.visit(sym)
elif isinstance(sym, list):
assert all(
isinstance(expr, _expr.RelayExpr) for expr in sym
), f"unexpected relay expressions in {sym}"
return [self.visit(expr) for expr in sym]
elif isinstance(sym, tuple):
# some op conversion may return dummy elements
# e.g. op in frontend/pytorch.py: min_max_common
assert all(
isinstance(expr, (_expr.RelayExpr, type(None))) for expr in sym
), f"unexpected relay expressions in {sym}"
return tuple(self.visit(expr) if expr else None for expr in sym)
elif isinstance(sym, (float, int)):
return sym
elif isinstance(sym, np.ndarray):
return sym
elif not sym:
# some op conversion may return None
# e.g. op in frontend/pytorch.py: prim::device
return sym
raise RuntimeError(f"unsupported type {type(sym)}")
def set_span(sym, span):
"""
Recursively tag the span to the symbol. Stop when it encounters a span-tagged expr. Disabled
when setting the "relay.frontend.fill_span" as False to the config of PassContext
Parameters
----------
sym :
A symbol is generated from the conversion of a frontend operator. Raise an error when the
type of the symbol is not supported.
span : String, Span, or bytes
The source information of the corresponding symbol.
Returns
-------
result :
The symbol tagged with span.
Examples
--------
.. code-block:: python
x = set_span(relay.var("x", shape=(1, 64, 56, 56)), "x_var")
w = relay.const(np.ones([64, 64, 3, 3]), dtype="int64")
y = set_span(
relay.nn.conv2d(x, w, channels=64, kernel_size=(3, 3), padding=(1, 1)), "conv2d"
)
print(relay.Function([x], y))
#fn (%x: Tensor[(1, 64, 56, 56), float32] /* span=x_var:0:0 */) {
# nn.conv2d(%x, meta[relay.Constant][0] /* span=conv2d:0:0 */, ...) /* span=conv2d:0:0 */
#}
"""
if tvm.transform.PassContext.current().config.get("relay.frontend.fill_span", True):
return _SpanFiller(span).fill(sym)
return sym
| 40,194 | 32.000821 | 99 | py |
tvm | tvm-main/python/tvm/relay/frontend/qnn_torch.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-outside-toplevel
""" Functions to convert quantized torch models to QNN """
import numpy as np
import tvm
from tvm import relay
from tvm.relay import expr as _expr
from tvm.relay import op as _op
from tvm.relay.frontend.common import infer_shape
from .common import logger
from .pytorch_utils import is_version_greater_than, getattr_attr_name
class QNNParam(object):
"""A placeholder for weight quantization parameters"""
def __init__(self, weight, bias, scale, zero_point):
self.weight = weight
self.bias = None if bias is None else bias.detach().numpy()
self.scale = _expr.const(scale)
self.zero_point = _expr.const(zero_point, dtype="int32")
class ConvPackedParam(QNNParam):
"""A placeholder for quantized conv2d op attributes
As of PyTorch 1.6, attributes of quantized conv2d ops, like
stride, padding etc are stored in ConvPackedParams objects,
together with weights and quantization parameters
"""
def __init__(
self, weight_np, bias, scale, zero_point, stride, padding, dilation, groups, output_padding
):
super().__init__(weight_np, bias, scale, zero_point)
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
# Used only for conv_transpose2d
self.output_padding = output_padding
def _get_quant_params(qweight):
import torch
weight_np = qweight.dequantize().numpy()
if qweight.qscheme() == torch.per_tensor_affine:
return weight_np, qweight.q_scale(), int(qweight.q_zero_point())
scales = qweight.q_per_channel_scales().numpy()
zero_points = qweight.q_per_channel_zero_points().numpy()
# This is an assumption posed by QNN
msg = "The values of zero points should be all zero for per channel"
assert np.all(zero_points == 0), msg
return weight_np, scales, 0
def make_qnn_param(qweight, bias):
weight_np, scale, zero_point = _get_quant_params(qweight)
return QNNParam(weight_np, bias, scale, zero_point)
def make_conv_packed_param(qweight, bias, packed_params):
weight_np, scale, zero_point = _get_quant_params(qweight)
stride = packed_params.stride()
padding = packed_params.padding()
dilation = packed_params.dilation()
groups = packed_params.groups()
output_padding = packed_params.output_padding()
return ConvPackedParam(
weight_np, bias, scale, zero_point, stride, padding, dilation, groups, output_padding
)
def get_weight_quant_params(script_module, packed_param_names):
"""Retrieve and unpack weight parameters from quantized modules"""
import torch
param_name = "_packed_params"
quant_params = {}
def filter_func(named_module):
m = named_module[1]
return isinstance(m, torch.jit.RecursiveScriptModule) and (
("Conv" in m.original_name) or (m.original_name == "LinearPackedParams")
)
for name, m in filter(filter_func, script_module.named_modules()):
key = name + "." + param_name
state_dict = m.state_dict()
if key not in packed_param_names:
continue
if len(state_dict) == 0 and not hasattr(m, param_name):
# for v1.6 and above
# This case seems to happen if a model is serialized
# and loaded back
# This module can be safely ignored
continue
if len(state_dict) == 0 and hasattr(m, param_name):
# for v1.6 and above
packed_params = m._packed_params
else:
assert len(state_dict) == 1
packed_params = list(state_dict.values())[0]
if "Conv" in m.original_name and len(state_dict) == 0:
qweight, bias = torch.ops.quantized.conv2d_unpack(packed_params)
quant_params[key] = make_conv_packed_param(qweight, bias, packed_params)
elif "Conv" in m.original_name:
qweight, bias = torch.ops.quantized.conv2d_unpack(packed_params)
quant_params[key] = make_qnn_param(qweight, bias)
elif m.original_name == "LinearPackedParams":
qweight, bias = torch.ops.quantized.linear_unpack(packed_params)
quant_params[key] = make_qnn_param(qweight, bias)
return quant_params
def quantize_numpy(weight, scale, zero_point, out_dtype_np):
iinfo = np.iinfo(out_dtype_np)
clip_min = iinfo.min
clip_max = iinfo.max
if len(scale.shape) > 0:
scale = np.reshape(scale, [weight.shape[0]] + [1] * (len(weight.shape) - 1))
transformed = zero_point + weight / scale
return np.clip(np.round(transformed), clip_min, clip_max).astype(out_dtype_np)
def add_quant_params_to_outputs(
outputs, packed_param_map, quant_params, input_scales_for_bias, keep_quantized_weight=False
):
"""
Add quant params to outputs so that they can be referenced by other
ops later. Weights are quantized here.
"""
for node_name, packed_param_name in packed_param_map.items():
qparam = quant_params[packed_param_name]
weight_scale = _get_numpy(qparam.scale)
param_prefix = packed_param_name[: -len("._packed_params")]
if keep_quantized_weight:
qparam.weight_var = _expr.var(
param_prefix + "_weight", shape=qparam.weight.shape, dtype="int8"
)
qparam.weight = quantize_numpy(
qparam.weight, weight_scale, _get_numpy(qparam.zero_point), np.int8
)
qweight = qparam.weight_var
else:
qparam.weight_var = _expr.var(
param_prefix + "_weight", shape=qparam.weight.shape, dtype="float32"
)
qweight = relay.qnn.op.quantize(
qparam.weight_var, qparam.scale, qparam.zero_point, out_dtype="int8", axis=0
)
if qparam.bias is not None:
float_bias_var = _expr.var(
param_prefix + "_bias", shape=qparam.bias.shape, dtype="float32"
)
if node_name not in input_scales_for_bias:
# This case is for dynamic quantization, where the input activation scale is
# unknown until runtime.
qparam.bias_var = float_bias_var
qbias = qparam.bias_var
elif keep_quantized_weight:
qparam.bias_var = _expr.var(
param_prefix + "_bias", shape=qparam.bias.shape, dtype="int32"
)
qparam.bias = quantize_numpy(
qparam.bias, input_scales_for_bias[node_name] * weight_scale, 0, np.int32
)
qbias = qparam.bias_var
else:
qparam.bias_var = float_bias_var
qbias = relay.qnn.op.quantize(
qparam.bias_var,
_expr.const(input_scales_for_bias[node_name] * weight_scale),
_expr.const(0, "int32"),
out_dtype="int32",
axis=0,
)
else:
qbias = None
quant_params[packed_param_name] = qparam
params = [qweight, qparam.scale, qparam.zero_point, qbias]
if isinstance(quant_params[packed_param_name], ConvPackedParam):
params += [
qparam.stride,
qparam.padding,
qparam.dilation,
qparam.groups,
qparam.output_padding,
]
outputs[node_name] = params
def _get_quant_param_for_input(input_value):
"""
We want to know the input scale and zp of this input_value, since
input quant params are not explicitly passed around in torch (they
are embedded in a QTensor data structure, not visible statically).
We know that it is quantized using output scale and zp
of some previous quantized op. The purpose of this function
is to find that pair of parameters.
"""
# Indices for output scale and zp
# For example, in quantized::conv2d(%input, %1, %2, %3, %4, %5, %6, %7),
# 6th and 7th arg are output scale and zp respectively.
# PyTorch 1.6 changed qconv API
if is_version_greater_than("1.5.1"):
qconv_indices = (2, 3)
else:
qconv_indices = (6, 7)
output_quant_param_indices = {
"aten::quantize_per_tensor": (1, 2),
"quantized::conv2d": qconv_indices,
"quantized::conv2d_relu": qconv_indices,
"quantized::linear": (2, 3),
"quantized::linear_relu": (2, 3),
"quantized::add_relu": (2, 3),
"quantized::add": (2, 3),
"quantized::mul_relu": (2, 3),
"quantized::mul": (2, 3),
"quantized::cat": (2, 3),
"quantized::mul_scalar": (2, 3),
"quantized::add_scalar": (2, 3),
"quantized::hardswish": (1, 2),
"quantized::conv_transpose2d": qconv_indices,
"quantized::leaky_relu": (3, 4),
"aten::sigmoid": (1, 2),
}
def dfs(current_node):
# trace back to find the producer of this input value
current_op = current_node.kind()
if current_op in output_quant_param_indices:
indices = output_quant_param_indices[current_op]
scale = current_node.inputsAt(indices[0])
zp = current_node.inputsAt(indices[1])
return scale, zp
# Trace back eariler nodes, dfs order
# Assume quantized tensor comes earlier in the args
for arg in current_node.inputs():
return dfs(arg.node())
# If input_value is not quantized, we reach here.
return None, None
return dfs(input_value.node())
def _get_add_scalar_output_quant_param(input_scale, input_zero_point, scalar):
"""
Determine the output scale and zp of quantized::add_scalar op
This is used for mobilenet v3
Refer to aten/src/ATen/native/quantized/cpu/qadd.cpp
The names of variables are the same as torch impl
"""
q_min = 0
q_max = 255
s = input_scale
z = input_zero_point
c = scalar
c_q = round(c / s)
if q_min > z - c_q:
s_prime = (float(q_max) - (z - c_q)) / (float(q_max) - q_min) * s
z_prime = q_min
elif q_max < z - c_q:
s_prime = (float(z - c_q) - q_min) / (float(q_max) - q_min) * s
z_prime = q_max
else:
s_prime = s
z_prime = z - c_q
return s_prime, z_prime
def _get_mul_scalar_output_quant_param(input_scale, input_zero_point, scalar):
"""
Determine the output scale and zp of quantized::mul_scalar op
This is used for mobilenet v3
Refer to aten/src/ATen/native/quantized/cpu/qmul.cpp
The names of variables are the same as torch impl
"""
q_min = 0
q_max = 255
self_scale = input_scale
self_zero_point = input_zero_point
other_val = scalar
if other_val > 0.0:
s_prime = other_val * self_scale
z_prime = self_zero_point
elif other_val == 0.0:
s_prime = 1.0
z_prime = 0
else:
s_prime = abs(other_val) * self_scale
z_prime = q_max - (self_zero_point - q_min)
return s_prime, z_prime
def _add_output_quant_params_to_scalar_op(node, graph, input_scale, input_zero_point, scalar):
"""
The output scale and zp of {add,mul}_scalar op are not explicit in the IR
They are required for _get_quant_param_for_input above to work correctly
So calculate these params using the same way torch does, and make new
constant nodes in the input IR. Also add these params to the inputs of
scalar op.
For example,
%6 : float = prim::Constant[value=3.]()
%input : QUInt8(1, 3, 224, 224) = quantized::add_scalar(%x.1, %6)
becomes
%6 : float = prim::Constant[value=3.]()
%7 : float = prim::Constant[value=0.015686161816120148]()
%8 : int = prim::Constant[value=0]()
%input : UInt8(1, 3, 224, 224) = quantized::add_scalar(%x.1, %6, %7, %8)
%7 and %8 are newly created output scale and zp constant nodes
"""
# pylint: disable=c-extension-no-member
import torch
operator = node.kind()
if operator == "quantized::mul_scalar":
out_scale, out_zero_point = _get_mul_scalar_output_quant_param(
input_scale, input_zero_point, scalar
)
elif operator == "quantized::add_scalar":
out_scale, out_zero_point = _get_add_scalar_output_quant_param(
input_scale, input_zero_point, scalar
)
else:
raise NotImplementedError(f"unsupported scalar op: {operator}")
# create new constant nodes and add them to graph
out_scale_node = graph.create("prim::Constant")
out_zero_point_node = graph.create("prim::Constant")
out_scale_node.insertBefore(node)
out_zero_point_node.insertBefore(node)
out_scale_node.f_("value", out_scale)
out_zero_point_node.i_("value", out_zero_point)
out_scale_node.output().setType(torch._C.FloatType.get())
out_zero_point_node.output().setType(torch._C.IntType.get())
node.addInput(out_scale_node.output())
node.addInput(out_zero_point_node.output())
def _add_output_quant_params_to_sigmoid_op(node, graph):
"""
Refer to aten/src/ATen/native/quantized/cpu/qsigmoid.cpp,
the output scale and zp of sigmoid op are two fixed numbers.
So we need to make two new constant nodes in the input IR and
add these params to the inputs of sigmoid op.
"""
# pylint: disable=c-extension-no-member
import torch
# suppose scale_type is uint8
out_scale = 1.0 / 256
out_zero_point = 0
# create new constant nodes and add them to graph
out_scale_node = graph.create("prim::Constant")
out_zero_point_node = graph.create("prim::Constant")
out_scale_node.insertBefore(node)
out_zero_point_node.insertBefore(node)
out_scale_node.f_("value", out_scale)
out_zero_point_node.i_("value", out_zero_point)
out_scale_node.output().setType(torch._C.FloatType.get())
out_zero_point_node.output().setType(torch._C.IntType.get())
node.addInput(out_scale_node.output())
node.addInput(out_zero_point_node.output())
def add_input_quant_params_to_op_inputs(graph):
"""
In Torch, input quant params are not explicitly passed around
Instead, they are stored in QTensor data structure, and retrieved
at runtime by each quantized ops.
However, they need to be known statically for QNN translation.
To workaround and simplify the translation of inputs, we manually add
input quant params to inputs of Torch quantized operators listed below.
See _quantized_conv2d() below for example of why this is helpful.
For example,
%input : QUInt8(1, 512, 7, 7) = quantized::add(%x.8, %x.9, %434, %435)
becomes
%395 : float = prim::Constant[value=0.036212071776390076]()
%396 : int = prim::Constant[value=0]()
%430 : float = prim::Constant[value=0.16080744564533234]()
%431 : int = prim::Constant[value=42]()
%input : QUInt8(1, 512, 7, 7) = quantized::add(%x.8, %x.9, %434, %435,
%430, %431, %395, %396)
%434, %435 are output scale and zp of quantized::add op
%430, %431, %395, %396 are two pairs of input (scale, zp) for two tensors
added by this function
"""
# How many quantized tensors each op takes as inputs?
# A pair of (scale, zp) for each input quantized tensor will be added
# to the input nodes
num_quantized_inputs = {
"quantized::conv2d": 1,
"quantized::conv2d_relu": 1,
"quantized::linear": 1,
"quantized::linear_relu": 1,
"quantized::add_relu": 2,
"quantized::add": 2,
"quantized::mul_relu": 2,
"quantized::mul": 2,
"aten::dequantize": 1,
"aten::mean": 1,
"aten::sigmoid": 1,
"aten::upsample_nearest2d": 1,
"aten::upsample_bilinear2d": 1,
"aten::relu_": 1,
"aten::relu": 1,
"quantized::add_scalar": 1,
"quantized::mul_scalar": 1,
"quantized::relu6": 1,
"quantized::hardswish": 1,
"aten::hardsigmoid": 1,
"quantized::conv_transpose2d": 1,
"quantized::leaky_relu": 1,
}
need_input_quant_param = set(num_quantized_inputs.keys())
need_input_quant_param.add("quantized::cat")
input_scales_for_bias = {}
for node in graph.nodes():
operator = node.kind()
if operator not in need_input_quant_param:
continue
input_scales = []
input_zero_points = []
if operator == "quantized::cat":
# the number of inputs to concat is not constant
# so handle it separately
inputs = node.inputsAt(0).node().inputs()
for inp in inputs:
scale, zp = _get_quant_param_for_input(inp)
input_scales.append(scale)
input_zero_points.append(zp)
else:
for i in range(num_quantized_inputs[operator]):
scale, zp = _get_quant_param_for_input(node.inputsAt(i))
if scale is not None and zp is not None:
input_scales.append(scale)
input_zero_points.append(zp)
if operator in ["quantized::add_scalar", "quantized::mul_scalar"]:
scalar = node.inputsAt(1).node().f("value")
inp_scale = input_scales[0].node().f("value")
inp_zero_point = input_zero_points[0].node().i("value")
# see the comments in this function above
_add_output_quant_params_to_scalar_op(node, graph, inp_scale, inp_zero_point, scalar)
if operator == "aten::sigmoid":
_add_output_quant_params_to_sigmoid_op(node, graph)
for scale, zp in zip(input_scales, input_zero_points):
node.addInput(scale)
node.addInput(zp)
if "quantized::conv" in operator or "quantized::linear" in operator:
# This is required for quantizing the bias
assert len(input_scales) == 1, "One quantized parameter expected for qconv or qlinear."
input_scales_for_bias[node.inputsAt(1).debugName()] = input_scales[0].node().f("value")
return input_scales_for_bias
def add_quant_params(params, quant_params):
"""Add quant parameters to TVM param map"""
for qparam in quant_params.values():
params[qparam.weight_var.name_hint] = tvm.nd.array(qparam.weight)
if qparam.bias is not None:
params[qparam.bias_var.name_hint] = tvm.nd.array(qparam.bias)
def inline_input_quant_params_for_fx(graph, params, param_debug_name_map):
"""
Canonicalize input scale and zero point access for FX-quantized graphs.
We expect input qparams to aten::quantize_per_tensor to be prim::Constant, but that's
not the case for FX-based quantized models as shown below.
We replace prim::GetAttr with prim::Constant so that FX-based quantized models can be
converted in the same way as eager-mode based quantized models.
Before:
%pan_input_zero_point_1 : Tensor = prim::GetAttr[name="pan_input_zero_point_1"](%backbone)
%pan_input_scale_1 : Tensor = prim::GetAttr[name="pan_input_scale_1"](%backbone)
...
%quantize_per_tensor_2 ... = aten::quantize_per_tensor(...,
%pan_input_scale_1, %pan_input_zero_point_1, ...)
After:
%2402 : int = prim::Constant[value=0]()
%2403 : float = prim::Constant[value=1.]()
%quantize_per_tensor_2 ... = aten::quantize_per_tensor(..., %2403, %2402, ...)
"""
# pylint: disable=c-extension-no-member
import torch
def get_full_attr_name(current):
current_attr = getattr_attr_name(current)
inputs = list(current.inputs())
if len(inputs) == 1 and inputs[0].node().kind() == "prim::GetAttr":
return get_full_attr_name(inputs[0].node()) + "." + current_attr
return current_attr
for node in graph.findAllNodes("prim::GetAttr", recurse=True):
out_name = node.output().debugName()
if "_scale" in out_name or "_zero_point" in out_name:
full_attr = param_debug_name_map[get_full_attr_name(node)]
assert full_attr in params, f"{full_attr} not found in param dict."
param_np = params[full_attr].numpy()
new_const_node = graph.create("prim::Constant")
new_const_node.insertBefore(node)
if "_scale" in out_name:
new_const_node.f_("value", param_np)
new_const_node.output().setType(torch._C.FloatType.get())
else:
new_const_node.i_("value", param_np.item())
new_const_node.output().setType(torch._C.IntType.get())
node.replaceAllUsesWith(new_const_node)
def apply_with_upcast(data, func):
inp = _op.cast(data, dtype="int32")
out = func(inp)
return _op.cast(out, "uint8")
def apply_with_fp32_fallback(data, input_scale, input_zero_point, func_fp32):
dequantized = relay.qnn.op.dequantize(data, input_scale, input_zero_point)
out = func_fp32(dequantized)
return relay.qnn.op.quantize(out, input_scale, input_zero_point, out_dtype="uint8", axis=1)
def quantized_relu(data, input_zero_point):
# refer to aten/src/ATen/native/quantized/cpu/qrelu.cpp
zp = _op.cast(input_zero_point, dtype="uint8")
return _op.tensor.maximum(data, zp)
def quantized_sigmoid(inputs):
data = inputs[0]
output_scale = _expr.const(inputs[1])
output_zero_point = _expr.const(inputs[2])
input_scale = _expr.const(inputs[3])
input_zero_point = _expr.const(inputs[4])
return relay.qnn.op.sigmoid(
data, input_scale, input_zero_point, output_scale, output_zero_point
)
def _quantize_per_tensor():
def _impl(inputs, _):
dim = len(infer_shape(inputs[0]))
if dim > 1:
axis = 1
else:
axis = 0
return relay.qnn.op.quantize(
inputs[0], _expr.const(inputs[1]), _expr.const(inputs[2]), out_dtype="uint8", axis=axis
)
return _impl
def _dequantize():
def _impl(inputs, _):
assert len(inputs) == 3, "Input quant params not found in op inputs"
inp_scale = _expr.const(inputs[1])
inp_zero_point = _expr.const(inputs[2])
return relay.qnn.op.dequantize(inputs[0], inp_scale, inp_zero_point)
return _impl
def _get_numpy(relay_const_scalar):
return relay_const_scalar.data.numpy()
def _get_scalar(relay_const_scalar):
return _get_numpy(relay_const_scalar).item(0)
def _do_bias_and_requantize(
output, bias, input_scale, weight_scale, output_scale, output_zero_point, with_relu
):
"""Output processing for conv and linear"""
# this is a vector for per channel case
requant_input_scale = _expr.const(_get_numpy(input_scale) * _get_numpy(weight_scale))
# Torch does bias add and requanize scale in fp32
# refer to third_party/fbgemm/include/fbgemm/OutputProcessing-inl.h
# Instead, we do bias add in int32 and use qnn requantize, which needs
# integer input.
# We observed no loss in accuracy in doing this way, and it is better
# for tvm because bias quantization can be done at compile time
# Instead, the torch way requires rounding of activation at runtime
if bias is not None:
requantize_input = _op.nn.bias_add(output, bias)
else:
requantize_input = output
requantized = relay.qnn.op.requantize(
requantize_input,
requant_input_scale,
relay.const(0, "int32"),
output_scale,
output_zero_point,
out_dtype="int32",
axis=1,
)
clip_min = 0
if with_relu:
clip_min = _get_scalar(output_zero_point)
clip = _op.tensor.clip(requantized, clip_min, 255.0)
return _op.cast(clip, dtype="uint8")
def _quantized_conv2d(with_relu=False):
def _impl(inputs, _):
# refer to src/ATen/native/quantized/cpu/qconv.cpp
# inputs[0]: input tensor
# inputs[1]: (weight, scale, zero_point, bias)
# inputs[2-5]: stride, padding, dilation, groups
# inputs[6]: output_scale
# inputs[7]: output_zero_point
# inputs[8]: input_scale (added manually by frontend)
# inputs[9]: input_zero_point (added manually by frontend)
conv_params = inputs[1]
weight = conv_params[0]
weight_scale = conv_params[1]
weight_zero_point = conv_params[2]
bias = conv_params[3]
if len(conv_params) > 4:
# Torch 1.6 or newer case
strides = conv_params[4]
padding = conv_params[5]
dilation = conv_params[6]
groups = conv_params[7]
output_scale = _expr.const(inputs[2])
output_zero_point = _expr.const(inputs[3])
assert len(inputs) == 6, "Input quant params not found in op inputs"
# These are manually added by add_input_quant_params_to_op_inputs above
# In torch, they are retrieved from QTensor data structure at runtime
input_scale = _expr.const(inputs[4])
input_zero_point = _expr.const(inputs[5])
else:
strides = inputs[2]
padding = inputs[3]
dilation = inputs[4]
groups = inputs[5]
output_scale = _expr.const(inputs[6])
output_zero_point = _expr.const(inputs[7])
assert len(inputs) == 10, "Input quant params not found in op inputs"
input_scale = _expr.const(inputs[8])
input_zero_point = _expr.const(inputs[9])
weight_shape = infer_shape(weight)
kernel_size = (weight_shape[2], weight_shape[3])
out_channels = weight_shape[0]
if padding[0] != 0 or padding[1] != 0:
pad_val = _get_scalar(input_zero_point)
inp = _op.nn.pad(
inputs[0],
pad_width=((0, 0), (0, 0), (padding[0], padding[0]), (padding[1], padding[1])),
pad_value=float(pad_val),
)
else:
inp = inputs[0]
# padding is (0, 0) because we did explicit pad op with
# pad value being zero point above
conv_out = relay.qnn.op.conv2d(
inp,
weight,
input_zero_point,
weight_zero_point,
input_scale,
weight_scale,
kernel_size=kernel_size,
dilation=dilation,
strides=strides,
padding=(0, 0),
groups=groups,
channels=out_channels,
)
return _do_bias_and_requantize(
conv_out, bias, input_scale, weight_scale, output_scale, output_zero_point, with_relu
)
return _impl
def _linear(with_relu=False):
# similar to conv
def _impl(inputs, _):
weight = inputs[1][0]
weight_scale = inputs[1][1]
weight_zero_point = inputs[1][2]
output_scale = _expr.const(inputs[2])
output_zero_point = _expr.const(inputs[3])
assert len(inputs) == 6, "Input quant params not found in op inputs"
# Manually added by add_input_quant_params_to_op_inputs above
input_scale = _expr.const(inputs[4])
input_zero_point = _expr.const(inputs[5])
weight_shape = infer_shape(weight)
dense = relay.qnn.op.dense(
inputs[0],
weight,
input_zero_point,
weight_zero_point,
input_scale,
weight_scale,
units=weight_shape[0],
)
bias_var = inputs[1][3]
return _do_bias_and_requantize(
dense, bias_var, input_scale, weight_scale, output_scale, output_zero_point, with_relu
)
return _impl
def _binop(relay_op, with_relu=False, fp32_piggy_back=False):
def qnn_impl(
lhs,
rhs,
input_scale_lhs,
input_zero_point_lhs,
input_scale_rhs,
input_zero_point_rhs,
output_scale,
output_zero_point,
):
qnn_out = relay_op(
lhs,
rhs,
input_scale_lhs,
input_zero_point_lhs,
input_scale_rhs,
input_zero_point_rhs,
output_scale,
output_zero_point,
)
if with_relu:
clip_min = _get_scalar(output_zero_point)
return _op.tensor.clip(qnn_out, clip_min, 255)
return qnn_out
# refer to aten/src/ATen/native/quantized/cpu/{qadd, qmul}.cpp
# they piggy backs to fp32 math by dequantize -> fp32 math -> quantize
def torch_impl(
lhs,
rhs,
input_scale_lhs,
input_zero_point_lhs,
input_scale_rhs,
input_zero_point_rhs,
output_scale,
output_zero_point,
):
if isinstance(lhs, _expr.Call) and lhs.op.name == "qnn.quantize":
lhs = lhs.args[0]
else:
lhs = relay.qnn.op.dequantize(lhs, input_scale_lhs, input_zero_point_lhs)
if isinstance(rhs, _expr.Call) and rhs.op.name == "qnn.quantize":
rhs = rhs.args[0]
else:
rhs = relay.qnn.op.dequantize(rhs, input_scale_rhs, input_zero_point_rhs)
fp32_out = relay_op(lhs, rhs)
if with_relu:
fp32_out = _op.nn.relu(fp32_out)
return relay.qnn.op.quantize(
fp32_out, output_scale, output_zero_point, axis=-1, out_dtype="uint8"
)
def _impl(inputs, _):
lhs = inputs[0]
rhs = inputs[1]
output_scale = _expr.const(inputs[2])
output_zero_point = _expr.const(inputs[3])
assert len(inputs) == 8, "Input quant params not found in op inputs"
# Manually added by add_input_quant_params_to_op_inputs above
input_scale_lhs = _expr.const(inputs[4])
input_zero_point_lhs = _expr.const(inputs[5])
input_scale_rhs = _expr.const(inputs[6])
input_zero_point_rhs = _expr.const(inputs[7])
if fp32_piggy_back:
logger.info("Piggy backing to FP32 op (PyTorch way)")
return torch_impl(
lhs,
rhs,
input_scale_lhs,
input_zero_point_lhs,
input_scale_rhs,
input_zero_point_rhs,
output_scale,
output_zero_point,
)
return qnn_impl(
lhs,
rhs,
input_scale_lhs,
input_zero_point_lhs,
input_scale_rhs,
input_zero_point_rhs,
output_scale,
output_zero_point,
)
return _impl
def _cat(fp32_piggy_back=False):
# refer to aten/src/ATen/native/quantized/cpu/qconcat.cpp
# for concat they also piggy backs to fp32(!)
# dequantize -> fp32 math -> quantize
def torch_impl(inputs, input_scales, input_zero_points, output_scale, output_zero_point, axis):
dequantized = []
for inp, inp_scale, inp_zp in zip(inputs, input_scales, input_zero_points):
dequantized.append(relay.qnn.op.dequantize(inp, inp_scale, inp_zp))
concat = _op.tensor.concatenate(dequantized, axis=axis)
return relay.qnn.op.quantize(
concat, output_scale, output_zero_point, axis=axis, out_dtype="uint8"
)
def _impl(inputs, _):
axis = inputs[1]
output_scale = _expr.const(inputs[2])
output_zero_point = _expr.const(inputs[3])
num_inputs = (len(inputs) - 4) // 2
input_scales = []
input_zero_points = []
for i in range(0, num_inputs):
input_scales.append(_expr.const(inputs[4 + i * 2]))
input_zero_points.append(_expr.const(inputs[4 + i * 2 + 1]))
if fp32_piggy_back:
return torch_impl(
inputs[0], input_scales, input_zero_points, output_scale, output_zero_point, axis
)
return relay.qnn.op.concatenate(
inputs[0], input_scales, input_zero_points, output_scale, output_zero_point, axis
)
return _impl
def _add_scalar():
# this is used for mobilenet v3
def _impl(inputs, _):
# refer to aten/src/ATen/native/quantized/cpu/qadd.cpp
assert len(inputs) == 6, "Input quant params not found in op inputs"
s = inputs[4]
z = inputs[5]
c = inputs[1]
c_q = round(c / s)
q_min = 0
q_max = 255
# math for calculating output scale and zp are already done
# during _add_output_quant_params_to_scalar_op above
out_scale = _expr.const(inputs[2])
out_zp = _expr.const(inputs[3])
if q_min > z - c_q or q_max < z - c_q:
# TODO(masahi): Replace this with integer only compute
dequant = relay.qnn.op.dequantize(inputs[0], _expr.const(s), _expr.const(z))
dequantized_add = _op.tensor.add(dequant, _expr.const(c_q * s))
return relay.qnn.op.quantize(
dequantized_add, out_scale, out_zp, axis=1, out_dtype="uint8"
)
# only scale change
return inputs[0]
return _impl
def quantize_scalar(data, scale, zero_point):
# used to quantize 6., in mobilenet v3
transformed = zero_point + data / scale
return max(0, min(round(transformed), 255))
def _relu6():
# refer to src/ATen/native/quantized/cpu/qrelu.cpp
def _impl(inputs, _):
assert len(inputs) == 4, "Input quant params not found in op inputs"
input_scale = inputs[2]
input_zero_point = inputs[3]
six = quantize_scalar(6.0, input_scale, input_zero_point)
return _op.tensor.clip(inputs[0], input_zero_point, six)
return _impl
def _leaky_relu(fp32_piggy_back=False):
# refer to src/ATen/native/quantized/cpu/qrelu.cpp
def _impl_fp32(inputs, _):
alpha = inputs[1]
output_scale = _expr.const(inputs[3])
output_zero_point = _expr.const(inputs[4])
input_scale = _expr.const(inputs[5])
input_zero_point = _expr.const(inputs[6])
dequant = relay.qnn.op.dequantize(inputs[0], input_scale, input_zero_point)
dequantized = _op.nn.leaky_relu(dequant, alpha)
return relay.qnn.op.quantize(
dequantized, output_scale, output_zero_point, out_dtype="uint8"
)
def _impl_int8(inputs, _):
alpha = inputs[1]
output_scale = _expr.const(inputs[3])
output_zero_point = _expr.const(inputs[4])
input_scale = _expr.const(inputs[5])
input_zero_point = _expr.const(inputs[6])
return relay.qnn.op.leaky_relu(
inputs[0], alpha, input_scale, input_zero_point, output_scale, output_zero_point
)
def _impl(inputs, _):
assert len(inputs) == 7, "Input quant params not found in op inputs"
if fp32_piggy_back:
return _impl_fp32(inputs, _)
return _impl_int8(inputs, _)
return _impl
def _mul_scalar():
# this is used for mobilenet v3
def _impl(inputs, _):
# refer to aten/src/ATen/native/quantized/cpu/qmul.cpp
# math for calculating output scale and zp are already done
# during _add_output_quant_params_to_scalar_op above
assert len(inputs) == 6, "Input quant params not found in op inputs"
other_val = inputs[1] # scalar
if other_val > 0.0:
# only scale change
return inputs[0]
if other_val == 0.0:
shape = infer_shape(inputs[0])
return _op.full(_expr.const(0), shape, dtype="uint8")
# negative scale case
q_min = 0
q_max = 255
bias = _expr.const(q_max + q_min, dtype="int8")
int8 = bias - _op.cast(inputs[0], "int8")
return _op.cast(int8, "uint8")
return _impl
def _hswish(fp32_piggy_back=False):
def _impl_fp32(inputs):
# refer to src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp
# They fallback to fp32
def relu6(x):
return _op.tensor.clip(x, 0.0, 6.0)
def hardsigmoid(x):
dtype = "float32"
return relu6(x + _expr.const(3.0, dtype=dtype)) / _expr.const(6.0, dtype=dtype)
output_scale = _expr.const(inputs[1])
output_zero_point = _expr.const(inputs[2])
input_scale = _expr.const(inputs[3])
input_zero_point = _expr.const(inputs[4])
dequant = relay.qnn.op.dequantize(inputs[0], input_scale, input_zero_point, axis=1)
dequantized_hswish = dequant * hardsigmoid(dequant)
return relay.qnn.op.quantize(
dequantized_hswish, output_scale, output_zero_point, out_dtype="uint8"
)
def _impl_int8(inputs):
output_scale = _expr.const(inputs[1])
output_zero_point = _expr.const(inputs[2])
input_scale = _expr.const(inputs[3])
input_zero_point = _expr.const(inputs[4])
return relay.qnn.op.hardswish(
inputs[0], input_scale, input_zero_point, output_scale, output_zero_point
)
def _impl(inputs, _):
assert len(inputs) == 5, "Input quant params not found in op inputs"
if fp32_piggy_back:
return _impl_fp32(inputs)
return _impl_int8(inputs)
return _impl
def _linear_dynamic():
def _calculate_qparam(inp):
# reference ATen/native/quantized/cpu/qlinear_dynamic.cpp
# ChooseQuantizationParams function
mn = _op.min(inp)
mx = _op.max(inp)
# Ensure that the interval contains 0
mn = _op.minimum(mn, _op.const(0.0, dtype="float32"))
mx = _op.maximum(mx, _op.const(0.0, dtype="float32"))
qmax = 255
# reduce_range became True in v1.6
if is_version_greater_than("1.5.1"):
qmax = 127
scale = (mx - mn) / _expr.const(qmax, dtype="float32")
zero_point_from_min = -(mn / scale)
zero_point = _op.cast(_op.round(_op.clip(zero_point_from_min, 0.0, qmax)), "int32")
return scale, zero_point
def _impl(inputs, _):
weight = inputs[1][0]
weight_scale = inputs[1][1]
weight_zero_point = inputs[1][2]
inp = inputs[0]
input_scale, input_zero_point = _calculate_qparam(inp)
qinp = relay.qnn.op.quantize(inp, input_scale, input_zero_point, out_dtype="uint8")
data_shape = infer_shape(inp)
if len(data_shape) > 2:
qinp = _op.reverse_reshape(qinp, [-1, 0])
weight_shape = infer_shape(weight)
units = weight_shape[0]
dense = relay.qnn.op.dense(
qinp,
weight,
input_zero_point,
weight_zero_point,
input_scale,
weight_scale,
units=units,
)
bias_var = inputs[1][3]
dequant_scale = input_scale * weight_scale
dense_out = relay.qnn.op.dequantize(
dense, dequant_scale, input_zero_point=relay.const(0, "int32"), axis=1
)
if len(data_shape) > 2:
new_shape = list(data_shape[:-1])
new_shape.append(units)
dense_out = _op.reshape(dense_out, new_shape)
if bias_var is not None:
return dense_out + bias_var
return dense_out
return _impl
def _quantized_conv_transpose2d(with_relu=False):
def _impl(inputs, _):
# Refer to aten/src/ATen/native/quantized/cpu/qconv.cpp
# Supported in Torch 1.7 or newer
conv_params = inputs[1]
weight = conv_params[0]
weight_scale = conv_params[1]
weight_zero_point = conv_params[2]
bias = conv_params[3]
strides = conv_params[4]
padding = conv_params[5]
dilation = conv_params[6]
groups = conv_params[7]
output_padding = conv_params[8]
output_scale = _expr.const(inputs[2])
output_zero_point = _expr.const(inputs[3])
assert len(inputs) == 6, "Input quant params not found in op inputs"
# These are manually added by add_input_quant_params_to_op_inputs above
# In torch, they are retrieved from QTensor data structure at runtime
input_scale = _expr.const(inputs[4])
input_zero_point = _expr.const(inputs[5])
weight_shape = list(infer_shape(weight))
kernel_size = (weight_shape[2], weight_shape[3])
out_channels = weight_shape[1]
conv_out = relay.qnn.op.conv2d_transpose(
inputs[0],
weight,
input_zero_point,
weight_zero_point,
input_scale,
weight_scale,
kernel_size=kernel_size,
dilation=dilation,
strides=strides,
padding=padding,
groups=groups,
channels=out_channels,
output_padding=output_padding,
out_dtype="int32",
kernel_layout="IOHW",
)
return _do_bias_and_requantize(
conv_out, bias, input_scale, weight_scale, output_scale, output_zero_point, with_relu
)
return _impl
convert_map = {
"aten::quantize_per_tensor": _quantize_per_tensor(),
"quantized::conv2d_relu": _quantized_conv2d(with_relu=True),
"aten::dequantize": _dequantize(),
"quantized::conv2d": _quantized_conv2d(),
"quantized::add_relu": _binop(relay.qnn.op.add, with_relu=True),
"quantized::add": _binop(relay.qnn.op.add),
"quantized::mul_relu": _binop(relay.qnn.op.mul, with_relu=True),
"quantized::mul": _binop(relay.qnn.op.mul),
"quantized::linear": _linear(),
"quantized::linear_relu": _linear(with_relu=True),
"quantized::cat": _cat(),
"quantized::add_scalar": _add_scalar(),
"quantized::mul_scalar": _mul_scalar(),
"quantized::relu6": _relu6(),
"quantized::leaky_relu": _leaky_relu(),
"quantized::linear_dynamic": _linear_dynamic(),
"quantized::hardswish": _hswish(fp32_piggy_back=False),
"quantized::conv_transpose2d": _quantized_conv_transpose2d(),
}
| 42,792 | 34.366116 | 99 | py |
tvm | tvm-main/python/tvm/relay/frontend/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Frontends for constructing Relay programs.
Contains the model importers currently defined
for Relay.
"""
from .mxnet import from_mxnet
from .mxnet_qnn_op_utils import quantize_conv_bias_mkldnn_from_var
from .keras import from_keras
from .oneflow import from_oneflow
from .onnx import from_onnx
from .tflite import from_tflite
from .coreml import from_coreml
from .caffe2 import from_caffe2
from .tensorflow import from_tensorflow
from .darknet import from_darknet
from .pytorch import from_pytorch
from .caffe import from_caffe
from .paddlepaddle import from_paddle
from .change_datatype import ChangeDatatype
| 1,400 | 36.864865 | 66 | py |
tvm | tvm-main/python/tvm/relay/frontend/caffe.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, too-many-lines, import-outside-toplevel
# pylint: disable=no-else-return, no-else-continue, use-list-literal
"""Caffe frontend."""
import numpy as np
import tvm
from tvm.ir import IRModule
from ... import nd as _nd
from .. import analysis
from .. import expr as _expr
from .. import function as _function
from .. import op as _op
from .common import ExprTable
from .common import infer_shape as _infer_shape
__all__ = ["from_caffe"]
class OperatorConverter(object):
"""Operator Converted for converting Caffe ops to Relay ops"""
def __init__(self, init_layer_dict, predict_layer, exp_tab):
self.init_layer_dict = init_layer_dict
self.predict_layer = predict_layer
self.exp_tab = exp_tab
self.new_bn = {}
self.changed_layers = None
self.convert_map = {
"BatchNorm": self.convert_batch_norm,
"Concat": self.convert_concat,
"Convolution": self.convert_conv,
"Crop": self.convert_crop,
"Deconvolution": self.convert_deconv,
"Dropout": self.convert_dropout,
"Eltwise": self.convert_eltwise,
"Embed": self.convert_embed,
"Flatten": self.convert_flatten,
"InnerProduct": self.convert_innerproduct,
"Input": None,
"LRN": self.convert_lrn,
"Permute": self.convert_permute,
"Pooling": self.convert_pooling,
"Power": self.convert_power,
"PReLU": self.convert_prelu,
"ReLU": self.convert_relu,
"Reshape": self.convert_reshape,
"Scale": self.convert_scale,
"Sigmoid": self.convert_sigmoid,
"Slice": self.convert_slice,
"Softmax": self.convert_softmax,
"TanH": self.convert_tanh,
"Reduction": self.convert_reduction,
}
def convert_flatten(self, op):
"""Convert Flatten layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
flatten_params = op.flatten_param.axis
assert flatten_params == 1, "flatten axis should be 1"
out = _op.nn.batch_flatten(in_expr)
return out
def convert_eltwise(self, op):
"""Convert Eltwise layer"""
inputs = op.bottom
assert len(inputs) >= 2, "input tensors length should be larger than 2"
# gethering initial 2 input expressions
lhs_expr = self.exp_tab.get_expr(inputs[0])
rhs_expr = self.exp_tab.get_expr(inputs[1])
lhs_shape = _infer_shape(lhs_expr)
rhs_shape = _infer_shape(rhs_expr)
assert lhs_shape == rhs_shape, "input tensors shape should be equal"
eltwise_params = op.eltwise_param
eltwise_type_dict = ["PROD", "SUM", "MAX"]
eltwise_type = eltwise_params.operation
coeff = list(eltwise_params.coeff)
if eltwise_type_dict[eltwise_type] == "PROD":
out = _op.multiply(lhs_expr, rhs_expr)
# for rest inputs
for i in range(len(inputs) - 2):
extra_expr = self.exp_tab.get_expr(inputs[i + 2])
assert _infer_shape(out) == _infer_shape(extra_expr)
out = _op.multiply(out, extra_expr)
elif eltwise_type_dict[eltwise_type] == "SUM":
if coeff:
left_coeff_expr = self.exp_tab.new_const(np.asarray(coeff[0], np.float32))
right_coeff_expr = self.exp_tab.new_const(np.asarray(coeff[1], np.float32))
lhs_expr_scale = _op.multiply(lhs_expr, left_coeff_expr)
rhs_expr_scale = _op.multiply(rhs_expr, right_coeff_expr)
out = _op.add(lhs_expr_scale, rhs_expr_scale)
else:
out = _op.add(lhs_expr, rhs_expr)
# for rest inputs
for i in range(len(inputs) - 2):
extra_expr = self.exp_tab.get_expr(inputs[i + 2])
assert _infer_shape(out) == _infer_shape(extra_expr)
if coeff:
coeff_expr = self.exp_tab.new_const(np.asarray(coeff[i + 2], np.float32))
extra_expr_scale = _op.multiply(extra_expr, coeff_expr)
out = _op.add(out, extra_expr_scale)
else:
out = _op.add(out, extra_expr)
elif eltwise_type_dict[eltwise_type] == "MAX":
out = _op.maximum(lhs_expr, rhs_expr)
# for rest inputs
for i in range(len(inputs) - 2):
extra_expr = self.exp_tab.get_expr(inputs[i + 2])
assert _infer_shape(out) == _infer_shape(extra_expr)
out = _op.maximum(out, extra_expr)
else:
raise tvm.error.OpNotImplemented(
f"eltwise_type {eltwise_type} is not supported for frontend Caffe."
)
return out
def _parse_conv_params(self, op):
"""Parse the parameters of Convolution and Deconvolution layer"""
nonzone = lambda val, pos, dflt: val[pos] if pos < len(val) else dflt
conv_params = op.convolution_param
params = dict()
# parse kernel size
if conv_params.kernel_h > 0 or conv_params.kernel_w > 0:
params["kernel_size"] = (conv_params.kernel_h, conv_params.kernel_w)
else:
ksize_h = nonzone(conv_params.kernel_size, 0, 1)
ksize_w = nonzone(conv_params.kernel_size, 1, ksize_h)
params["kernel_size"] = (ksize_h, ksize_w)
# parse padding size
if conv_params.pad_h > 0 or conv_params.pad_w > 0:
params["padding"] = (conv_params.pad_h, conv_params.pad_w)
else:
pad_h = nonzone(conv_params.pad, 0, 0)
pad_w = nonzone(conv_params.pad, 1, pad_h)
params["padding"] = (pad_h, pad_w)
# parse stride size
if conv_params.stride_h > 0 or conv_params.stride_w > 0:
params["strides"] = (conv_params.stride_h, conv_params.stride_w)
else:
stride_h = nonzone(conv_params.stride, 0, 1)
stride_w = nonzone(conv_params.stride, 1, stride_h)
params["strides"] = (stride_h, stride_w)
# parse dilation size
if hasattr(conv_params, "dilation") and len(conv_params.dilation) > 0:
dilation = " ".join(str(d) for d in conv_params.dilation)
dilation = tuple(map(int, dilation.split(" ")))
params["dilation"] = dilation
if len(dilation) == 1:
params["dilation"] = (dilation[0], dilation[0])
params["kernel_layout"] = "OIHW"
params["data_layout"] = "NCHW"
params["groups"] = conv_params.group
params["channels"] = conv_params.num_output
return params
def convert_batch_norm(self, op):
"""Convert BatchNorm layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
n, c, h, w = _infer_shape(in_expr)
if op.name in self.new_bn:
mean, var, eps, gamma, beta = self.new_bn[op.name]
mean_expr = self.exp_tab.new_const(mean, dtype="float32")
var_expr = self.exp_tab.new_const(var, dtype="float32")
gamma_expr = self.exp_tab.new_const(gamma, dtype="float32")
beta_expr = self.exp_tab.new_const(beta, dtype="float32")
out = _op.nn.batch_norm(
in_expr, gamma_expr, beta_expr, mean_expr, var_expr, epsilon=eps, scale=True
)
else:
weight_bias_blobs = self.init_layer_dict[op.name].blobs
mean = np.asarray(weight_bias_blobs[0].data, np.float32)
var = np.asarray(weight_bias_blobs[1].data, np.float32)
if len(weight_bias_blobs) == 2:
mean = np.repeat(mean, h * w).reshape((c, h, w))
mean = np.expand_dims(mean, 0).repeat(n, axis=0)
mean_expr = self.exp_tab.new_const(mean, dtype="float32")
var = np.repeat(var, h * w).reshape((c, h, w))
var = np.expand_dims(var, 0).repeat(n, axis=0)
var_expr = self.exp_tab.new_const(var, dtype="float32")
tmp_out = _op.multiply(in_expr, mean_expr)
out = _op.add(tmp_out, var_expr)
return out
else:
scale = np.asarray(weight_bias_blobs[2].data, np.float32)
if scale:
scale = 1 / scale
mean_expr = self.exp_tab.new_const(mean * scale, dtype="float32")
var_expr = self.exp_tab.new_const(var * scale, dtype="float32")
# caffe bn layer not support scale
gamma_expr = self.exp_tab.new_const(
np.ones(mean.shape, dtype=np.float32), dtype="float32"
)
beta_expr = self.exp_tab.new_const(
np.zeros(mean.shape, dtype=np.float32), dtype="float32"
)
bn_params = op.batch_norm_param.eps
out = _op.nn.batch_norm(
in_expr, gamma_expr, beta_expr, mean_expr, var_expr, epsilon=bn_params, scale=False
)
return out[0]
def convert_scale(self, op):
"""Convert Scale layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
weight_bias_blobs = self.init_layer_dict[op.name].blobs
params = dict()
params["bias"] = op.scale_param.bias_term
params["axis"] = op.scale_param.axis
gamma = np.asarray(weight_bias_blobs[0].data, np.float32)
gamma_expr = self.exp_tab.new_const(gamma, dtype="float32")
if params["bias"]:
beta = np.asarray(weight_bias_blobs[1].data, np.float32)
beta_expr = self.exp_tab.new_const(beta, dtype="float32")
else:
beta_expr = self.exp_tab.new_const(
np.zeros(gamma.shape, dtype=np.float32), dtype="float32"
)
_, c, _, _ = _infer_shape(in_expr)
gamma_expr = _op.reshape(gamma_expr, newshape=(1, c, 1, 1))
beta_expr = _op.reshape(beta_expr, newshape=(1, c, 1, 1))
out = _op.multiply(in_expr, gamma_expr)
out = _op.add(out, beta_expr)
return out
def convert_concat(self, op):
"""Convert Concat layer"""
inputs = op.bottom
in_expr = (self.exp_tab.get_expr(inputs[i]) for i in range(len(inputs)))
c_params = dict()
c_params["axis"] = op.concat_param.axis
out = _op.concatenate(in_expr, axis=c_params["axis"])
return out
def convert_reshape(self, op):
"""Convert Reshape layer"""
inputs = op.bottom
input_name = inputs[0]
reshape_param = op.reshape_param
dims = list(reshape_param.shape.dim)
in_expr = self.exp_tab.get_expr(input_name)
input_shape = list(_infer_shape(in_expr))
start_axis = int(reshape_param.axis)
if start_axis < 0:
start_axis = len(input_shape) + start_axis + 1
num_axes = int(reshape_param.num_axes)
end_axis = len(input_shape)
if num_axes != -1:
end_axis = start_axis + num_axes
left_shape = input_shape[:start_axis]
if end_axis == len(input_shape):
center_shape = input_shape[start_axis:]
right_shape = []
else:
center_shape = input_shape[start_axis:end_axis]
right_shape = input_shape[end_axis:]
for idx, dim in enumerate(dims):
if dim == 0:
dims[idx] = center_shape[idx]
tmp = np.random.rand(*center_shape)
tmp = np.reshape(tmp, dims)
center_shape = list(tmp.shape)
newshape = left_shape + center_shape + right_shape
out = _op.reshape(in_expr, newshape=newshape)
return out
def convert_softmax(self, op):
"""Convert Softmax layer"""
inputs = op.bottom
assert len(inputs) == 1, "input tensors length should be 1"
input_name = inputs[0]
in_expr = self.exp_tab.get_expr(input_name)
softmax_param = op.softmax_param
parmas = {"axis": softmax_param.axis}
out = _op.nn.softmax(in_expr, **parmas)
return out
def convert_conv(self, op):
"""Convert Convolution layer"""
params = self._parse_conv_params(op)
weight_bias_blobs = self.init_layer_dict[op.name].blobs
conv_params = op.convolution_param
inputs = op.bottom
# process weight and bias blobs
weight, bias = None, None
if len(weight_bias_blobs) > 1:
weight = weight_bias_blobs[0]
bias = weight_bias_blobs[1]
else:
weight = weight_bias_blobs[0]
if weight:
kh, kw = params["kernel_size"]
weight_shape = [conv_params.num_output, -1, kh, kw]
weight_value = np.asarray(weight.data, np.float32)
weight_value = np.reshape(weight_value, weight_shape)
else:
raise Exception(f"No weight value of layer {op.name} in caffemodel")
weight_expr = self.exp_tab.new_const(weight_value, dtype="float32")
in_expr = self.exp_tab.get_expr(inputs[0])
out = _op.nn.conv2d(data=in_expr, weight=weight_expr, **params)
if bias:
bias_value = np.asarray(bias.data, np.float32)
bias_expr = self.exp_tab.new_const(bias_value, dtype="float32")
out = _op.nn.bias_add(out, bias_expr)
return out
def convert_pooling(self, op):
"""Convert Pooling layer"""
inputs = op.bottom
input_name = inputs[0]
pool_params = op.pooling_param
pool_type_dict = ["MAX", "AVE", "STOCHASTIC"]
params = dict()
# parse pool type: 0: MAX, 1: AVE, 2: STOCHASTIC
pool_type = pool_params.pool
# parse kernel size
if pool_params.kernel_h > 0 or pool_params.kernel_w > 0:
params["pool_size"] = (pool_params.kernel_h, pool_params.kernel_w)
else:
params["pool_size"] = (pool_params.kernel_size, pool_params.kernel_size)
# parse padding size
if pool_params.pad_h > 0 or pool_params.pad_w > 0:
params["padding"] = (pool_params.pad_h, pool_params.pad_w)
else:
params["padding"] = (pool_params.pad, pool_params.pad)
# parse stride size
if pool_params.stride_h > 0 or pool_params.stride_w > 0:
params["strides"] = (pool_params.stride_h, pool_params.stride_w)
else:
params["strides"] = (pool_params.stride, pool_params.stride)
params["ceil_mode"] = True
if hasattr(pool_params, "round_mode"):
params["ceil_mode"] = pool_params.round_mode == "CEIL"
in_expr = self.exp_tab.get_expr(input_name)
if pool_type_dict[pool_type] == "MAX":
if pool_params.global_pooling:
out = _op.nn.global_max_pool2d(in_expr)
else:
if len(op.top) == 1:
out = _op.nn.max_pool2d(in_expr, **params)
elif len(op.top) == 2:
out1 = _op.nn.max_pool2d_with_argmax(in_expr, **params)
out2 = _op.vision.max_pool2d_location(in_expr, **params)
return _expr.Tuple((out1, out2))
elif pool_type_dict[pool_type] == "AVE": # AVE
if pool_params.global_pooling:
out = _op.nn.global_avg_pool2d(in_expr)
else:
params["count_include_pad"] = True
out = _op.nn.avg_pool2d(in_expr, **params)
else:
raise tvm.error.OpNotImplemented(
f"Operator {pool_type_dict[pool_type]} pool is not supported for frontend Caffe."
)
return out
def convert_lrn(self, op):
"""Convert LRN layer"""
inputs = op.bottom
input_name = inputs[0]
params = dict()
lrn_params = op.lrn_param
params["size"] = lrn_params.local_size
params["bias"] = lrn_params.k
params["alpha"] = lrn_params.alpha
params["beta"] = lrn_params.beta
in_expr = self.exp_tab.get_expr(input_name)
out = _op.nn.lrn(in_expr, **params)
return out
def convert_innerproduct(self, op):
"""Convert InnerProduct layer"""
inputs = op.bottom
weight_bias_blobs = self.init_layer_dict[op.name].blobs
dense_params = op.inner_product_param
params = dict()
params["num_output"] = dense_params.num_output
params["bias"] = dense_params.bias_term
params["axis"] = dense_params.axis
if params["axis"] != 1:
raise Exception("Only support 2D InnerProduct")
# process weight and bias blobs
weight, bias = None, None
if params["bias"]:
weight = weight_bias_blobs[0]
bias = weight_bias_blobs[1]
else:
weight = weight_bias_blobs[0]
if weight:
weight_value = np.asarray(weight.data, np.float32)
weight_value = np.reshape(weight_value, (params["num_output"], -1))
weight_shape = weight_value.shape
else:
raise Exception(f"No weight value of layer {op.name} in caffemodel")
weight_expr = self.exp_tab.new_const(weight_value, dtype="float32")
in_expr = self.exp_tab.get_expr(inputs[0])
in_reshape = _op.reshape(data=in_expr, newshape=(-1, weight_shape[-1]))
out = _op.nn.dense(data=in_reshape, weight=weight_expr)
if bias:
bias_value = np.asarray(bias.data, np.float32)
bias_expr = self.exp_tab.new_const(bias_value, dtype="float32")
out = _op.nn.bias_add(out, bias_expr, axis=params["axis"])
return out
def convert_dropout(self, op):
"""Convert Dropout layer"""
inputs = op.bottom
input_name = inputs[0]
params = dict()
dropout_params = op.dropout_param
params["rate"] = dropout_params.dropout_ratio
in_expr = self.exp_tab.get_expr(input_name)
out = _op.nn.dropout(in_expr, **params)
return out
def convert_relu(self, op):
"""Convert ReLU layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
negative_slope = op.relu_param.negative_slope
if negative_slope:
out = _op.nn.leaky_relu(in_expr, negative_slope)
return out
out = _op.nn.relu(in_expr)
return out
def convert_prelu(self, op):
"""Convert PReLU layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
alpha = self.init_layer_dict[op.name].blobs[0].data
alpha = np.asarray(alpha, np.float32)
alpha = self.exp_tab.new_const(alpha, dtype="float32")
axis = 1
out = _op.nn.prelu(in_expr, alpha, axis=axis)
return out
def convert_deconv(self, op):
"""Convert Deconvolution layer"""
params = self._parse_conv_params(op)
weight_bias_blobs = self.init_layer_dict[op.name].blobs
conv_params = op.convolution_param
inputs = op.bottom
# process weight and bias blobs
weight, bias = None, None
if len(weight_bias_blobs) > 1:
weight = weight_bias_blobs[0]
bias = weight_bias_blobs[1]
else:
weight = weight_bias_blobs[0]
if weight:
kh, kw = params["kernel_size"]
weight_shape = [-1, conv_params.num_output, kh, kw]
if not weight.data:
if conv_params.weight_filler:
_filler = conv_params.weight_filler.value
weight_value = np.full(weight.shape.dim, _filler, np.float32)
else:
raise tvm.error.OpAttributeInvalid("At least weight_filler must be given")
else:
weight_value = np.asarray(weight.data, np.float32)
weight_value = np.reshape(weight_value, weight_shape)
# weight shape is in relay's IOHW format rn, we need it to be OIHW
weight_value = np.transpose(weight_value, [1, 0, 2, 3])
else:
raise tvm.error.OpAttributeRequired(f"No weight value of layer {op.name} in caffemodel")
weight_expr = self.exp_tab.new_const(weight_value, dtype="float32")
in_expr = self.exp_tab.get_expr(inputs[0])
groups = params["groups"]
channels = params["channels"]
if bias:
bias_value = np.asarray(bias.data, np.float32)
bias_expr = self.exp_tab.new_const(bias_value, dtype="float32")
if groups > channels:
raise tvm.error.OpAttributeInvalid(
"Groups cannot be larger than the number of input channels"
)
if groups == channels:
inputs_expr = _op.split(in_expr, groups, axis=1)
# changing split axis to 0, according to PR #9336
weights_expr = _op.split(weight_expr, groups, axis=0)
# Preventing to create Concat layer with too many tensors(> 16)
q = groups >> 4
r = groups % 16
params["groups"] = 1
params["channels"] = 1
out = []
for lc in range(q):
_outputs = []
_inputs = [inputs_expr[i] for i in range(lc << 4, (lc << 4) + 16)]
_weights = [weights_expr[i] for i in range(lc << 4, (lc << 4) + 16)]
for (i, w) in zip(_inputs, _weights):
_out = _op.nn.conv2d_transpose(data=i, weight=w, **params)
if bias:
_out = _op.nn.bias_add(_out, bias_expr)
_outputs.append(_out)
out.append(_op.concatenate(_outputs, axis=1))
if r != 0:
_outputs = []
_inputs = [inputs_expr[i] for i in range(groups - r, groups)]
_weights = [weights_expr[i] for i in range(groups - r, groups)]
for (i, w) in zip(_inputs, _weights):
_out = _op.nn.conv2d_transpose(data=i, weight=w, **params)
if bias:
_out = _op.nn.bias_add(_out, bias_expr)
_outputs.append(_out)
out.append(_op.concatenate(_outputs, axis=1))
out = _op.concatenate(out, axis=1)
elif groups == 1:
out = _op.nn.conv2d_transpose(data=in_expr, weight=weight_expr, **params)
if bias:
out = _op.nn.bias_add(out, bias_expr)
else:
raise tvm.error.OpAttributeInvalid("Unable to handle.")
return out
def convert_slice(self, op):
"""Convert Slice layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
output_num = len(op.top)
slice_params = op.slice_param
axis = int(slice_params.axis)
indices_or_sections = list([int(s) for s in slice_params.slice_point])
if len(indices_or_sections) == 0:
indices_or_sections = output_num
else:
indices_or_sections = sorted(indices_or_sections)
out = _op.split(in_expr, indices_or_sections=indices_or_sections, axis=axis)
return out
def convert_sigmoid(self, op):
"""Convert Sigmoid layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
out = _op.sigmoid(in_expr)
return out
def convert_tanh(self, op):
"""Convert TanH layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
out = _op.tanh(in_expr)
return out
def convert_reduction(self, op):
"""Convert Reduction layer"""
reduction_dic = ["NOP", "SUM", "ASUM", "SUMSQ", "MEAN"]
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
method = op.reduction_param.operation
axis = op.reduction_param.axis
coeff = op.reduction_param.coeff
coeff_expr = self.exp_tab.new_const(np.asarray(coeff, np.float32))
num_axes = len(_infer_shape(in_expr))
# Currently, only reduction along ALL "tail" axes is supported in Caffe;
# reduction of axis M through N, where N < num_axes - 1, is unsupported.
if 0 < axis < (num_axes - 1):
for _axis in reversed(range(axis + 1, num_axes)):
in_expr = _op.sum(in_expr, axis=_axis)
in_expr = _op.squeeze(in_expr)
if reduction_dic[method] == "SUM":
out = _op.sum(in_expr, axis=axis)
elif reduction_dic[method] == "MEAN":
out = _op.mean(in_expr, axis=axis)
elif reduction_dic[method] == "ASUM":
in_expr = _op.abs(in_expr)
out = _op.sum(in_expr, axis=axis)
elif reduction_dic[method] == "SUMSQ":
in_expr = _op.multiply(in_expr, in_expr)
out = _op.sum(in_expr, axis=axis)
else:
raise tvm.error.OpAttributeInvalid(
f"reduction method:{method} is invalid in Caffe frontend."
)
if float(coeff) != 1.0:
out = _op.multiply(out, coeff_expr)
return out
def convert_crop(self, op):
"""Convert Crop layer"""
inputs = op.bottom
assert len(inputs) == 2, "Need two inputs of Crop layer"
in_expr_a = self.exp_tab.get_expr(inputs[0])
in_expr_b = self.exp_tab.get_expr(inputs[1])
# parse crop params
crop_params = op.crop_param
axis = int(getattr(crop_params, "axis", 2))
offset = list(getattr(crop_params, "offset", 0))
# expand offset to (offset1, offset2, ...)
in_a_shape = _infer_shape(in_expr_a)
num_to_crop = len(in_a_shape) - axis
if not offset:
offset = [0] * num_to_crop
if len(offset) == 1:
offset = offset * num_to_crop
elif len(offset) != num_to_crop:
raise tvm.error.OpAttributeInvalid("No matching the number between axis and offset!")
slice_end = in_a_shape
slice_start = [0] * len(in_a_shape)
for i in range(num_to_crop):
slice_start[i + axis] = offset[i]
to_crop_axis = list(range(len(in_a_shape)))
to_crop_axis = to_crop_axis[axis:]
# secondly, crop in_expr_a by in_expr_b
in_expr_a_stride = _op.strided_slice(in_expr_a, slice_start, slice_end)
out = _op.slice_like(in_expr_a_stride, in_expr_b, axes=to_crop_axis)
return out
def convert_permute(self, op):
"""Convert Permute layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
# parse permute params
permute_param = op.permute_param
axes = list(getattr(permute_param, "order", 0))
out = _op.transpose(in_expr, axes)
return out
def convert_embed(self, op):
"""Convert Embed layer"""
inputs = op.bottom
embed_param = op.embed_param
num_output = embed_param.num_output
input_dim = embed_param.input_dim
bias_term = embed_param.bias_term
weight_bias_blobs = self.init_layer_dict[op.name].blobs
weight, bias = None, None
if bias_term:
weight = weight_bias_blobs[0]
bias = weight_bias_blobs[1]
assert weight and bias
else:
weight = weight_bias_blobs[0]
assert weight
weight_value = np.asarray(weight.data, np.float32)
weight_value = np.reshape(weight_value, [input_dim, num_output])
weight_expr = self.exp_tab.new_const(weight_value, dtype="float32")
in_expr = self.exp_tab.get_expr(inputs[0])
input_shape = _infer_shape(in_expr)
input_count = 1
for dim in input_shape:
input_count *= dim
index = _op.cast(in_expr, "int32")
out = _op.take(weight_expr, index, axis=0)
if bias_term:
bias_value = np.asarray(bias.data, np.float32)
bias_expr = self.exp_tab.new_const(bias_value, dtype="float32")
out = _op.reshape(out, [input_count, num_output])
out = _op.add(out, bias_expr)
out_shape = list(input_shape)
out_shape.append(num_output)
out = _op.reshape(out, out_shape)
return out
def convert_power(self, op):
"""Convert Power layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
power = _expr.const(op.power_param.power)
scale = _expr.const(op.power_param.scale)
shift = _expr.const(op.power_param.shift)
out = _op.multiply(in_expr, scale)
out = _op.add(out, shift)
out = _op.power(out, power)
return out
def check_unsupported_ops(self):
"""Check unsupported Caffe ops in our converter."""
unsupported_ops_set = set()
include_layer = dict()
for pl in self.predict_layer:
if pl.type not in include_layer:
include_layer[pl.type] = 1
else:
include_layer[pl.type] = include_layer[pl.type] + 1
for pl in self.predict_layer:
op_name = pl.type
if op_name not in self.convert_map:
unsupported_ops_set.add(op_name)
if unsupported_ops_set:
msg = "The following operators are not supported in frontend " "Caffe: {}"
ops = str(list(unsupported_ops_set)).strip("[,]")
raise tvm.error.OpNotImplemented(msg.format(ops))
def fuse_op(self, layers):
"""Fusing the BatchNorm and Scale layer"""
bn, scale = layers["bn"], layers["scale"]
# bn params
bn_weight_bias_blobs = self.init_layer_dict[bn.name].blobs
bn_scale = np.asarray(bn_weight_bias_blobs[2].data, np.float32)
if bn_scale:
bn_scale = 1 / bn_scale
bn_mean = np.asarray(bn_weight_bias_blobs[0].data, np.float32) * bn_scale
bn_var = np.asarray(bn_weight_bias_blobs[1].data, np.float32) * bn_scale
bn_eps = bn.batch_norm_param.eps
# scale params
scale_weight_bias_blobs = self.init_layer_dict[scale.name].blobs
scale_gamma = np.asarray(scale_weight_bias_blobs[0].data, np.float32)
scale_bias = scale.scale_param.bias_term
if scale_bias:
scale_beta = np.asarray(scale_weight_bias_blobs[1].data, np.float32)
else:
scale_beta = np.zeros(scale_gamma.shape, dtype=np.float32)
# new params
self.new_bn[bn.name] = [bn_mean, bn_var, bn_eps, scale_gamma, scale_beta]
return bn
def op_fuse(self):
"""fuse bn and scale"""
new_layers = []
temp_layers = {}
changed_layers = {}
for index, pl in enumerate(self.predict_layer):
op_type = pl.type
if op_type == "Input":
new_layers.append(pl)
continue
elif op_type == "BatchNorm":
if (index != len(self.predict_layer) - 1) and (
self.predict_layer[index + 1].type == "Scale"
):
temp_layers["bn"] = pl
continue
else:
new_layers.append(pl)
temp_layers.clear()
elif op_type == "Scale":
if self.predict_layer[index - 1].type == "BatchNorm":
temp_layers["scale"] = pl
else:
new_layers.append(pl)
temp_layers.clear()
else:
temp_layers.clear()
if len(temp_layers) == 2:
layer = self.fuse_op(temp_layers)
new_layers.append(layer)
changed_layers[temp_layers["scale"].name] = temp_layers["bn"].name
for idx, plt in enumerate(pl.bottom):
if plt in changed_layers:
pl.bottom[idx] = changed_layers[plt]
if op_type not in ["BatchNorm", "Scale"]:
new_layers.append(pl)
self.predict_layer = new_layers
self.changed_layers = changed_layers
def convert_op_to_relay(self):
"""Convert Caffe ops to relay ops"""
for pl in self.predict_layer:
op_type = pl.type
if op_type == "Input":
continue
output_tensors = pl.top
ret = self.convert_map[op_type](pl)
if len(output_tensors) == 1:
self.exp_tab.set_expr(output_tensors[0], ret)
else:
for idx, output_tensor in enumerate(output_tensors):
self.exp_tab.set_expr(output_tensor, ret[idx])
def _rebuild_layers(predict_layer):
"""Rebuild caffe layer. If the caffe net include in-place layers, repalce its top
with its name and update the bottom of other layer that is related to it.
"""
# dict of input name that will be changed to new name
changed_top_dict = dict()
for pl in predict_layer:
if pl.type == "Input":
continue
# if current layer has single input and output and input equals to output
# it means that the layer does "in-place"
if len(pl.top) == 1 and len(pl.bottom) == 1:
if pl.top[0] == pl.bottom[0]:
# change current layer's input firstly
if pl.bottom[0] in changed_top_dict:
pl.bottom[0] = changed_top_dict[pl.bottom[0]]
# update "change" dict
changed_top_dict[pl.top[0]] = pl.name
# change current layer's output to its name
pl.top[0] = pl.name
else:
if pl.bottom[0] in changed_top_dict:
pl.bottom[0] = changed_top_dict[pl.bottom[0]]
# if the layer does not
else:
for index, plt in enumerate(pl.bottom):
if plt in changed_top_dict:
pl.bottom[index] = changed_top_dict[plt]
def _get_inputs_outputs(predict_layer):
"""Obtain Caffe model's inputs and outpus"""
# model inputs / outputs
model_inputs = list()
model_outputs = list()
# The bottoms of every layer can not be as outputs
not_outputs = set()
for pl in predict_layer:
if pl.type == "Input":
assert len(pl.top) == 1, "The number of Input layer's output is more than 1."
model_inputs.append(pl.top[0])
for i in pl.bottom:
not_outputs.add(i)
for pl in predict_layer:
if len(pl.bottom) > 0:
for t in pl.top:
if t not in not_outputs:
model_outputs.append(t)
return model_inputs, model_outputs
def from_caffe(init_net, predict_net, shape_dict, dtype_dict):
"""Convert from caffe model into compatible relay Function.
Parameters
----------
init_net : caffe_pb2.NetParameter
caffemodel
predict_net : caffe_pb2.NetParameter
caffe prototxt
shape_dict : dict of str to int list/tuple
Input shapes of the model.
dtype_dict : dict of str to str
Input types of the model.
Returns
-------
mod : tvm.IRModule
The relay module for compilation.
params : dict of str to tvm.NDArray
The parameter dict to be used by relay
"""
old_caffe = False
if len(predict_net.input) != 0: # old caffe version
old_caffe = True
model_inputs = list(predict_net.input)
predict_layer = predict_net.layer
# replace layer's top with its name and update other layers'bottoms
_rebuild_layers(predict_layer)
# obtain inputs and outputs of Net
if old_caffe:
_, model_outputs = _get_inputs_outputs(predict_layer)
else:
model_inputs, model_outputs = _get_inputs_outputs(predict_layer)
exp_tab = ExprTable()
for in_name in model_inputs:
shape = shape_dict[in_name] if in_name in shape_dict else None
dtype = dtype_dict[in_name] if in_name in dtype_dict else "float32"
exp_tab.set_expr(in_name, _expr.var(in_name, shape=shape, dtype=dtype))
if list(init_net.layer):
init_layer = init_net.layer
else:
init_layer = init_net.layers
init_layer_dict = {il.name: il for il in init_layer}
# op code in model
op_converter = OperatorConverter(init_layer_dict, predict_layer, exp_tab)
op_converter.check_unsupported_ops()
op_converter.op_fuse()
op_converter.convert_op_to_relay()
# params and outputs
params = {k: _nd.array(np.array(v)) for k, v in exp_tab.params.items()}
outputs = list()
for n in model_outputs:
if n in op_converter.changed_layers:
n = op_converter.changed_layers[n]
outputs.append(exp_tab.get_expr(n))
outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs)
func = _function.Function(analysis.free_vars(outputs), outputs)
mod = IRModule.from_expr(func)
return mod, params
| 37,920 | 36.845309 | 100 | py |
tvm | tvm-main/python/tvm/relay/frontend/caffe2.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-self, invalid-name, line-too-long, unused-argument
"""Caffe2 frontend"""
import tvm
from tvm.ir import IRModule
from .. import analysis
from .. import expr as _expr
from .. import function as _function
from .. import op as _op
from ... import nd as _nd
from .common import AttrCvt, Renamer
from .common import get_relay_op, new_var, infer_channels
__all__ = ["from_caffe2"]
def dimension_picker(prefix, surfix=""):
def _impl(attr):
kernel = attr["kernel_shape"]
if len(kernel) == 2:
return prefix + "2d" + surfix
raise tvm.error.OpAttributeUnImplemented(
f"Non-2D kernels are not supported for operator {prefix}2d"
)
return _impl
def revert_caffe2_pad(pads):
"""Caffe2 requires two times the normal padding."""
if len(pads) == 4:
pads = pads[:2]
elif len(pads) == 2:
pass
else:
raise tvm.error.OpAttributeInvalid("Number of pads must equal 2 or 4.")
return pads
def dimension_constraint():
def _dim_check(args):
if len(args["kernel_shape"]) == 2:
return True
return False
return _dim_check, "Only 2d kernel supported."
def _clean_up_pool_args(args):
"""A helper function to clean up common arguments in conv and pooling ops."""
assert isinstance(args, dict)
if "stride_h" in args and "stride_w" in args:
assert "stride" not in args and "strides" not in args
args["strides"] = [args["stride_h"], args["stride_w"]]
args.pop("stride_h")
args.pop("stride_w")
elif "stride" in args:
args["strides"] = [args["stride"], args["stride"]]
args.pop("stride")
# rename 'kernel', 'kernels', to 'kernel_shape'
if "kernel_h" in args and "kernel_w" in args:
assert "kernel" not in args and "kernels" not in args
args["kernel_shape"] = [args["kernel_h"], args["kernel_w"]]
args.pop("kernel_h")
args.pop("kernel_w")
elif "kernel" in args:
args["kernel_shape"] = [args["kernel"], args["kernel"]]
args.pop("kernel")
elif "kernels" in args:
args["kernel_shape"] = args["kernels"]
args.pop("kernels")
if "pad_t" in args and "pad_l" in args and "pad_b" in args and "pad_r" in args:
assert "pad" not in args and "pads" not in args
args["pads"] = [args["pad_t"], args["pad_l"], args["pad_b"], args["pad_r"]]
for pad in ["pad_t", "pad_l", "pad_b", "pad_r"]:
args.pop(pad)
elif "pad" in args:
args["pads"] = [args["pad"], args["pad"]]
args.pop("pad")
if "dilation_h" in args and "dilation_w" in args:
assert "dilation" not in args and "dilations" not in args
args["dilations"] = [args["dilation_h"], args["dilation_w"]]
args.pop("dilation_h")
args.pop("dilation_w")
elif "dilation" in args:
args["dilations"] = [args["dilation"], args["dilation"]]
args.pop("dilation")
return args
class Caffe2OpConverter(object):
"""A helper class for holding Caffe2 op converters."""
@classmethod
def get_converter(cls):
"""Get converter.
:return: converter, which should be `_impl`.
"""
if hasattr(cls, "_impl"):
return getattr(cls, "_impl")
raise tvm.error.OpNotImplemented(
f"Operator {cls.__name__} is not supported in frontend Caffe2."
)
_caffe2_internal_args = [
# nnpack args
"algo",
"convolution_transform_strategy",
"float16_compute",
"shared_buffer",
# training args
"init_params",
"cudnn_exhaustive_search",
"exhaustive_search",
# training args
"adj",
"hwgq",
# args that we don't care
"legacy_pad",
]
class Elemwise(Caffe2OpConverter):
"""A helper class for elemwise op converters."""
name = ""
@classmethod
def _impl(cls, inputs, args, params):
assert len(inputs) == 2, f"Math op take 2 inputs, {len(inputs)} given"
op_name = cls.name
conv_ops = ["conv2d", "conv2d_transpose"]
if args.get("broadcast", 0) and any(x in str(inputs[0]) for x in conv_ops):
# TODO(zhreshold): remove hard coded infershape
axis = int(args.get("axis", 0))
inputs[1] = _op.expand_dims(inputs[1], axis=axis, num_newaxis=2)
return get_relay_op(op_name)(*inputs)
class Add(Elemwise):
"""Operator converter for Add."""
name = "add"
class Mul(Elemwise):
"""Operator converter for Mul."""
name = "multiply"
class Pool(Caffe2OpConverter):
"""A helper class for pool op converters."""
name = ""
@classmethod
def _impl(cls, inputs, args, params):
_clean_up_pool_args(args)
if "global_pooling" in args and args["global_pooling"] == 1:
op_name = dimension_picker("global_" + cls.name)
return get_relay_op(op_name(args))(*inputs)
return AttrCvt(
op_name=dimension_picker(cls.name),
transforms={
"kernel_shape": "pool_size",
"pads": ("padding", (0, 0), revert_caffe2_pad),
"strides": "strides",
},
ignores=["dilations", "order", "legacy_pad", "global_pooling"],
extras={"ceil_mode": False},
custom_check=dimension_constraint(),
)(inputs, args, params)
class AveragePool(Pool):
name = "avg_pool"
class MaxPool(Pool):
name = "max_pool"
class Conv(Caffe2OpConverter):
"""Operator converter for Conv."""
@classmethod
def _impl(cls, inputs, args, params):
# get number of channels
channels = infer_channels(inputs[1])
args["channels"] = channels
_clean_up_pool_args(args)
out = AttrCvt(
op_name=dimension_picker("conv"),
transforms={
"group": ("groups", 1),
"kernel_shape": "kernel_size",
"pads": ("padding", (0, 0), revert_caffe2_pad),
"strides": "strides",
"dilations": ("dilation", (1, 1)),
"order": (
"data_layout",
("NCHW"),
lambda x: x if isinstance(x, str) else x.decode("UTF-8"),
),
},
excludes=[],
ignores=_caffe2_internal_args,
custom_check=dimension_constraint(),
)(inputs[:2], args, params)
use_bias = len(inputs) == 3
if use_bias:
out = _op.nn.bias_add(out, inputs[2])
return out
class ConvTranspose(Caffe2OpConverter):
"""Operator converter for ConvTranspose."""
@classmethod
def _impl(cls, inputs, args, params):
# get number of channels
channels = infer_channels(inputs[1], True)
args["channels"] = channels
_clean_up_pool_args(args)
out = AttrCvt(
op_name=dimension_picker("conv", "_transpose"),
transforms={
"kernel_shape": "kernel_size",
"pads": ("padding", (0, 0), revert_caffe2_pad),
"dilations": ("dilation", (1, 1)),
"order": (
"data_layout",
("NCHW"),
lambda x: x if isinstance(x, str) else x.decode("UTF-8"),
),
},
excludes=[],
ignores=_caffe2_internal_args,
custom_check=dimension_constraint(),
)(inputs[:2], args, params)
use_bias = len(inputs) == 3
if use_bias:
out = _op.nn.bias_add(out, inputs[2])
return out
class Concat(Caffe2OpConverter):
"""Operator converter for Concat."""
@classmethod
def _impl(cls, inputs, args, params):
def _get_axis_from_order_str(order):
order = order if isinstance(order, str) else order.decode("UTF-8")
if order == "NCHW":
return 1
if order == "NHWC":
return 3
raise tvm.error.OpAttributeUnImplemented(
f"Order {order} is not supported in operator Concat."
)
return AttrCvt(
op_name="concatenate",
transforms={"order": ("axis", (1), _get_axis_from_order_str)},
excludes=["add_axis"],
)((inputs,), args, params)
class NormalizePlanarYUV(Caffe2OpConverter):
"""Operator converter for NormalizePlanarYUV.
caffe2 definition: https://github.com/pytorch/pytorch/blob/master/caffe2/operators/norm_planar_yuv_op.cc
"""
@classmethod
def _impl(cls, inputs, args, params):
assert len(inputs) == 3
mean = _op.expand_dims(inputs[1], axis=2, num_newaxis=2)
std = _op.expand_dims(inputs[2], axis=2, num_newaxis=2)
return _op.divide(_op.subtract(inputs[0], mean), std)
class ResizeNearest(Caffe2OpConverter):
"""Operator converter for Upsample (nearest mode)."""
@classmethod
def _impl(cls, inputs, args, params):
width_scale = args["width_scale"] if "width_scale" in args else 1
height_scale = args["height_scale"] if "height_scale" in args else 1
assert width_scale == height_scale
return _op.nn.upsampling(
inputs[0], scale_h=int(width_scale), scale_w=int(width_scale), method="NEAREST_NEIGHBOR"
)
class Sum(Caffe2OpConverter):
"""Operator converter for Sum."""
@classmethod
def _impl(cls, inputs, args, params):
# Sum Operator
for in_index in range(len(inputs) - 1):
inputs[in_index + 1] = _op.add(inputs[in_index], inputs[in_index + 1])
return inputs[len(inputs) - 1]
class Softmax(Caffe2OpConverter):
"""Operator converter for Softmax."""
@classmethod
def _impl(cls, inputs, args, params):
# set default value when axis is not set in the model
if "axis" not in args:
args["axis"] = 1
return AttrCvt("softmax", transforms={"axis": ("axis", args["axis"])})(inputs, args, params)
class FC(Caffe2OpConverter):
"""Operator converter for FC."""
@classmethod
def _impl(cls, inputs, args, params):
inputs[0] = _op.nn.batch_flatten(inputs[0])
units = infer_channels(inputs[1])
res = _op.nn.dense(inputs[0], inputs[1], units=units)
use_bias = len(inputs) == 3
if use_bias:
res = _op.nn.bias_add(res, inputs[2])
return res
class SpatialBN(Caffe2OpConverter):
"""Operator converter for SpatialBN."""
@classmethod
def _impl(cls, inputs, args, params):
return AttrCvt(
op_name="batch_norm",
disables=["momentum"],
ignores=["order", "spatial", "is_test", "consumed_inputs", "num_batches"],
)(inputs, args, params)
# compatible operators that do NOT require any conversion.
_identity_list = []
# _convert_map defines maps of name to converter functor(callable)
# for 1 to 1 mapping, use Renamer if nothing but name is different
# use AttrCvt if attributes need to be converted
# for 1 to N mapping(composed), use custom callable functions
# for N to 1 mapping, currently not supported(?)
# Minimal set of ops for squeezenet and resnet50
def _get_convert_map():
return {
# caffe2 common operators
"Add": Add.get_converter(),
"Sum": Sum.get_converter(),
"Mul": Mul.get_converter(),
"Softmax": Softmax.get_converter(),
# nn
"AveragePool": AveragePool.get_converter(),
"MaxPool": MaxPool.get_converter(),
"Conv": Conv.get_converter(),
"ConvTranspose": ConvTranspose.get_converter(),
"Concat": Concat.get_converter(),
"FC": FC.get_converter(),
"SpatialBN": SpatialBN.get_converter(),
"ResizeNearest": ResizeNearest.get_converter(),
"Relu": AttrCvt("relu", {}, ignores=["order"]),
"Sigmoid": Renamer("sigmoid"),
"Dropout": AttrCvt("dropout", {"ratio": "rate"}, ignores=["is_test"]),
# c2 image preprocessing ops
"NormalizePlanarYUV": NormalizePlanarYUV.get_converter(),
}
class Caffe2NetDef(object):
"""A helper class for handling Relay expression copying from pb2.GraphProto.
Definition: https://github.com/pytorch/pytorch/blob/master/caffe2/proto/caffe2.proto
"""
def __init__(self, shape, dtype):
self._nodes = {}
self._params = {}
self._visited_nodes = set()
self._ops = {}
self._shape = shape
self._dtype = dtype
self._mod = IRModule({})
def from_caffe2(self, init_net, predict_net):
"""Construct Relay expression from caffe2 graph.
Parameters
----------
init_net : protobuf object
predict_net : protobuf object
Returns
-------
mod : tvm.IRModule
The module that optimizations will be performed on.
params : dict
A dict of name: tvm.nd.array pairs, used as pretrained weights
"""
# pylint: disable=import-outside-toplevel
from caffe2.python import workspace
workspace.RunNetOnce(init_net)
# Input
input_name = predict_net.op[0].input[0]
# Params
self._params = {}
used_blobs = set()
for c2_op in predict_net.op:
for i in c2_op.input:
used_blobs.add(i)
for blob in workspace.Blobs():
if blob in used_blobs and blob != input_name:
self._params[blob] = _nd.array(workspace.FetchBlob(blob))
# Variables
self._nodes = {}
for blob in predict_net.external_input:
if blob in self._params:
self._nodes[blob] = new_var(
blob, shape=self._params[blob].shape, dtype=self._params[blob].dtype
)
else:
shape = self._shape[blob] if blob in self._shape else ()
if isinstance(self._dtype, dict) and blob in self._dtype:
dtype = str(self._dtype[blob])
elif isinstance(self._dtype, str):
dtype = self._dtype
else:
dtype = "float32"
self._nodes[blob] = new_var(blob, shape=shape, dtype=dtype)
# Ops
for c2_op in predict_net.op:
for blob in c2_op.output:
self._ops[blob] = c2_op
for c2_op in predict_net.op:
self._process_op(c2_op)
# Outputs
out = []
for blob in predict_net.external_output:
out.append(self._nodes[blob])
if len(out) > 1:
outputs = _expr.Tuple(out)
else:
outputs = out[0]
func = _function.Function(analysis.free_vars(outputs), outputs)
self._mod["main"] = func
return self._mod, self._params
def _get_node(self, blob):
"""Get the Symbol of blob and detect cyclic dependency in the graph."""
if blob in self._nodes:
return self._nodes[blob]
assert blob not in self._visited_nodes, f"Cyclic dependency in the graph (in {blob})"
self._visited_nodes.add(blob)
self._process_op(self._ops[blob])
return self._nodes[blob]
def _process_op(self, c2_op):
op_type = c2_op.type
args = self._parse_arg(c2_op.arg)
inputs = [self._get_node(i) for i in c2_op.input]
tvm_op = self._convert_operator(op_type, inputs, args)
if not isinstance(tvm_op, _expr.TupleWrapper):
self._nodes[c2_op.output[0]] = tvm_op
else:
for k, i in zip(list(c2_op.output), range(len(tvm_op))):
self._nodes[k] = tvm_op[i]
def _parse_arg(self, arg):
"""Convert a list of Argument to a dict, with names as keys."""
args = {}
for a in arg:
for f in ["f", "i", "s"]:
if a.HasField(f):
args[a.name] = getattr(a, f)
for f in ["floats", "ints", "strings"]:
if list(getattr(a, f)):
assert a.name not in args, "Only one type of attr is allowed"
args[a.name] = tuple(getattr(a, f))
for f in ["n"]:
if a.HasField(f):
raise NotImplementedError(f"Field {f} is not supported in relay.")
for f in ["nets"]:
if list(getattr(a, f)):
raise NotImplementedError(f"Field {f} is not supported in relay.")
if a.name not in args:
raise ValueError(f"Cannot parse attribute: \n{a}\n.")
return args
def _convert_operator(self, op_type, inputs, args, identity_list=None, convert_map=None):
"""Convert from Caffe2 operator to Relay operator.
The converter must specify conversions explicitly for incompatible name, and
apply handlers to operator attributes.
Parameters
----------
op_type : str
Operator name, such as Convolution, FullyConnected
inputs : list of tvm.relay.function.Function
List of input inputs.
args : dict
Dict of operator attributes
identity_list : list
List of operators that don't require conversion
convert_map : dict
Dict of name : callable, where name is the op's name that
require conversion to relay, callable are functions which
take args and return (new_op_type, new_args)
Returns
-------
func : tvm.relay.function.Function
Converted relay function
"""
identity_list = identity_list if identity_list else _identity_list
convert_map = convert_map if convert_map else _get_convert_map()
if op_type in identity_list:
func = get_relay_op(op_type)(*inputs, **args)
elif op_type in convert_map:
# Add a sanitizing step to convert all byte strings in args to strings
func = convert_map[op_type](inputs, args, self._params)
else:
raise tvm.error.OpNotImplemented(
f"Operator {op_type} is not supported in frontend Caffe2."
)
return func
def from_caffe2(init_net, predict_net, shape=None, dtype="float32"):
"""Load caffe2 graph which contains init_net and predict_net into Relay Function.
Parameters
----------
init_net : protobuf object
Caffe2 NetDef containing the weights
predict_net : protobuf object
Caffe2 NetDef containing the graph
shape : dict of str to tuple
The input shape to the graph
dtype : str or dict of str to str
The input types to the graph
Returns
-------
mod : tvm.IRModule
The module that optimizations will be performed on.
params : dict of str to tvm.nd.NDArray
Dict of converted parameters stored in tvm.nd.NDArray format
"""
caffe2 = Caffe2NetDef(shape, dtype)
return caffe2.from_caffe2(init_net, predict_net)
| 19,850 | 31.81157 | 108 | py |
tvm | tvm-main/python/tvm/relay/frontend/pytorch_utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-outside-toplevel, unused-argument, invalid-name
""" Common utilities used by PyTorch frontend """
from .. import expr
from .. import op
from ..dataflow_pattern import (
wildcard,
is_constant,
is_op,
rewrite,
is_tuple,
is_tuple_get_item,
is_if,
DFPatternCallback,
)
def is_version_greater_than(ver):
"""
Returns True if the local PyTorch version is greater
than the one given as an argument.
"""
import torch
from distutils.version import LooseVersion
torch_ver = torch.__version__
# PT version numbers can include +cu[cuda version code]
# and we don't want to include that in the comparison
if "+cu" in torch_ver:
torch_ver = torch_ver.split("+cu")[0]
return LooseVersion(torch_ver) > ver
def getattr_attr_name(node):
attribute_names = node.attributeNames()
assert len(attribute_names) == 1
return node.s(attribute_names[0])
def dyn_strided_slice_pattern(inp, end):
"""A pattern to detect dynamic strided slice op."""
zero = is_constant()
cast_like = is_op("cast_like")(zero, is_constant())
less = is_op("less")(is_constant(), cast_like)
shape_of = is_op("shape_of")(inp)
cast_like = is_op("cast_like")(shape_of, is_constant())
add = is_op("add")(is_constant(), cast_like)
where = is_op("where")(less, add, is_constant())
return is_op("dyn.strided_slice")(inp, where, end, is_constant())
def batched_nms_pattern(boxes, scores, idxs, iou_threshold, num_boxes, indices):
"""A pattern to detect batched_nms function in torchvision
The inputs to this function, boxes, scores, idxs, iou_threshold are wildcard
patterns which can be used later in the rewriting to extract matched Relay fragments.
We want to detect the following PyTorch code snippet:
def batched_nms(boxes, scores, idxs, iou_threshold):
max_coordinate = boxes.max()
offsets = idxs.to(boxes) * (max_coordinate + torch.tensor(1).to(boxes))
boxes_for_nms = boxes + offsets[:, None]
keep = nms(boxes_for_nms, scores, iou_threshold)
return keep
Here is how PyTorch frontend lowers above PyTorch code. For simplicity, Relay ops for
dealing with dynamic strided_slice are omitted. %num_boxes, %indices are complex
expressions, but since we can use the wildcard part for them, we do not need to construct
their patterns.
%2 = expand_dims(%scores, axis=-1);
%3 = cast(%idxs, dtype="float32");
%4 = max(%boxes);
%5 = add(%4, 1f);
%6 = multiply(%3, %5);
%7 = strided_slice(%6, begin=[0], end=[4507], strides=[1]);
%8 = expand_dims(%7, axis=1);
%9 = add(%boxes, %8);
%10 = (%2, %9);
%11 = concatenate(%10, axis=-1);
%12 = expand_dims(%11, axis=0);
...
...
%17 = vision.non_max_suppression(%12, %num_boxes, %indices, -1, 0.7f, ...);
"""
one = is_constant()
# Equivalent PyTorch code from above snippet
# offsets = idxs.to(boxes) * (max_coordinate + torch.tensor(1).to(boxes))
cast = is_op("cast")(idxs)
mx = is_op("max")(boxes)
add = is_op("add")(mx, one)
mul = is_op("multiply")(cast, add)
shape_of = is_op("shape_of")(mul)
cast = is_op("cast")(shape_of)
# Add offsets to the boxes
expand_dims = is_op("expand_dims")(mul)
add = is_op("add")(boxes, expand_dims)
# The rest of patterns correspond to the PyTorch frontend conversion
# function for torchvision::nms
score_expand_dims = is_op("expand_dims")(scores)
tup = is_tuple([score_expand_dims, add])
concat = is_op("concatenate")(tup)
data = is_op("expand_dims")(concat)
return is_op("vision.non_max_suppression")(
data, num_boxes, indices, is_constant(), iou_threshold
)
def topk_after_batch_nms_pattern(cond, true_branch, data, valid_count, indices, iou_threshold):
"""
Detect the following pattern used in torchvision detection models.
def batched_nms(...):
if boxes.numel() == 0:
return torch.empty((0,), dtype=torch.int64, device=boxes.device)
else:
...
return nms(boxes_for_nms, scores, iou_threshold)
keep = batched_nms(boxes, scores, lvl, self.nms_thresh)
keep = keep[:post_nms_top_k] # keep only topk scoring predictions
An equivalent Relay subgraph:
%1184 = if (%1117) {
...
} else {
...
%1172 = vision.non_max_suppression(%1167, %1168, %1171, -1, 0.7f, ...);
...
%1183 = dyn.strided_slice(%1174, %1180, %1182, ...);
cast(%1183, dtype="int64")
};
%1185 = strided_slice(%1184, begin=[0], end=[1000], strides=[1]);
"""
nms = is_op("vision.non_max_suppression")(
data, valid_count, indices, is_constant(), iou_threshold
)
indices = is_op("squeeze")(is_tuple_get_item(nms, 0))
size = is_op("squeeze")(is_tuple_get_item(nms, 1))
dyn_strided_slice = dyn_strided_slice_pattern(indices, size)
cast_i64 = is_op("cast")(dyn_strided_slice)
batched_nms_result = is_if(cond, true_branch, cast_i64)
return is_op("strided_slice")(batched_nms_result)
class MulticlassNMSRewrite(DFPatternCallback):
"""A callback to rewrite nms and restore batched nms."""
def __init__(self):
super().__init__()
# exprs to extract
self.boxes = wildcard()
self.scores = wildcard()
self.idxs = wildcard()
self.iou_threshold = wildcard()
self.num_boxes = wildcard()
self.indices = wildcard()
self.pattern = batched_nms_pattern(
self.boxes,
self.scores,
self.idxs,
self.iou_threshold,
self.num_boxes,
self.indices,
)
def convert_batched_nms(self, boxes, scores, idxs, iou_thres, num_boxes, indices):
"""Restore class-aware NMS using extracted class indices"""
scores = op.expand_dims(scores, axis=-1, num_newaxis=1)
idxs = op.expand_dims(idxs, axis=-1, num_newaxis=1)
idxs = op.cast(idxs, "float32")
data = op.concatenate([idxs, scores, boxes], -1)
data = op.expand_dims(data, 0, 1)
top_k = max_out_size = -1
out = op.vision.non_max_suppression(
data=data,
valid_count=num_boxes,
indices=indices,
max_output_size=max_out_size,
iou_threshold=iou_thres,
force_suppress=False,
top_k=top_k,
coord_start=2,
score_index=1,
id_index=0,
return_indices=True,
invalid_to_bottom=False,
)
return out.tuple_value
def callback(self, pre, post, node_map):
boxes = node_map[self.boxes][0]
scores = node_map[self.scores][0]
idxs = node_map[self.idxs][0]
iou_thres = node_map[self.iou_threshold][0]
num_boxes = node_map[self.num_boxes][0]
indices = node_map[self.indices][0]
return self.convert_batched_nms(boxes, scores, idxs, iou_thres, num_boxes, indices)
class PostNMSTopKRewrite(DFPatternCallback):
"""A callback to rewrite nms to exploit max_out_size parameter."""
def __init__(self):
super().__init__()
self.cond = wildcard()
self.true_branch = wildcard()
self.data = wildcard()
self.valid_count = wildcard()
self.indices = wildcard()
self.iou_threshold = wildcard()
self.pattern = topk_after_batch_nms_pattern(
self.cond,
self.true_branch,
self.data,
self.valid_count,
self.indices,
self.iou_threshold,
)
def rewrite_batch_nms_with_max_out_size(
self, cond, true_branch, data, valid_count, indices, iou_threshold, post_nms_topk
):
"""Use the detected post NMS topk parameter in NMS op."""
nms_ret = op.vision.non_max_suppression(
data=data,
valid_count=valid_count,
indices=indices,
max_output_size=post_nms_topk,
iou_threshold=iou_threshold,
force_suppress=False,
top_k=-1,
coord_start=2,
score_index=1,
id_index=0,
return_indices=True,
invalid_to_bottom=False,
)
size = op.squeeze(nms_ret[1], axis=[1])
data_slice = op.squeeze(nms_ret[0], axis=[0])
ret = op.strided_slice(data_slice, begin=expr.const([0]), end=size, slice_mode="size")
nms_result = op.cast(ret, "int64")
return expr.If(cond, true_branch, nms_result)
def callback(self, pre, post, node_map):
post_nms_topk = post.attrs.end[0].value
return self.rewrite_batch_nms_with_max_out_size(
node_map[self.cond][0],
node_map[self.true_branch][0],
node_map[self.data][0],
node_map[self.valid_count][0],
node_map[self.indices][0],
node_map[self.iou_threshold][0],
post_nms_topk,
)
def scatter_roi_align_result_pattern(levels, roi_align_results, num_scales):
"""Detect the Relay subgraph corresponding to the following PyTorch code
first_result = roi_align_results[0]
dtype, device = first_result.dtype, first_result.device
res = torch.zeros((levels.size(0), first_result.size(1),
first_result.size(2), first_result.size(3)),
dtype=dtype, device=device)
for level in range(len(roi_align_results)):
index = torch.where(levels == level)[0].view(-1, 1, 1, 1)
index = index.expand(index.size(0),
roi_align_results[level].size(1),
roi_align_results[level].size(2),
roi_align_results[level].size(3))
res = res.scatter(0, index, roi_align_results[level])
return res
"""
def do_where(levels, _):
idx_in_level = is_op("argwhere")(is_op("equal")(levels, is_constant()))
idx_in_level = is_op("split")(idx_in_level)
idx_in_level = is_tuple_get_item(idx_in_level, 0)
idx_in_level = is_op("squeeze")(idx_in_level)
idx_in_level = is_tuple_get_item(is_tuple([idx_in_level]), 0)
return idx_in_level
scatter_res = wildcard()
for i in range(num_scales):
# index = torch.where(levels == level)[0].view(-1, 1, 1, 1)
scatter_indices = do_where(levels, i)
scatter_indices = is_op("reshape")(scatter_indices)
# index = index.expand(index.size(0),
# unmerged_results[level].size(1),
# unmerged_results[level].size(2),
# unmerged_results[level].size(3))
scatter_indices = is_op("repeat")(scatter_indices)
scatter_indices = is_op("repeat")(scatter_indices)
scatter_indices = is_op("repeat")(scatter_indices)
scatter_res = is_op("scatter_elements")(scatter_res, scatter_indices, roi_align_results[i])
return is_op("reshape")(scatter_res)
class ScatterRewrite(DFPatternCallback):
"""A callback to rewrite repeated scatters with a batched gather."""
def __init__(self, num_scales):
super().__init__()
self.num_scales = num_scales
self.levels = wildcard()
self.roi_align_results = []
for _ in range(num_scales):
self.roi_align_results.append(wildcard())
self.pattern = scatter_roi_align_result_pattern(
self.levels, self.roi_align_results, num_scales
)
def convert_scatter_to_gather(self, levels, roi_align_results):
"""Replace the detected scatter loop with the following PyTorch code
indices_per_level = []
for level in range(num_scales):
idx_in_level = torch.where(levels == level)[0]
indices_per_leve.append(idx_in_level)
stacked_features = torch.cat(roi_align_results, dim=0)
stacked_indices = torch.cat(indices_per_level, dim=0)
argsort_indices = torch.argort(stacked_indices)
return stacked_features[argsort_indices, :]
"""
# Collect inidices and concat them
indices_per_level = []
for i in range(self.num_scales):
equal = op.equal(levels, expr.const(i, dtype="int64"))
argwhere = op.argwhere(equal)
split = op.split(argwhere, indices_or_sections=1, axis=1)
squeeze = op.squeeze(split[0], axis=[1])
indices = op.cast(squeeze, dtype="int64")
indices_per_level.append(indices)
indices_concat = op.concatenate(indices_per_level, 0)
# Concat roi align results per level, and argsort indices
# To prepare for a batched gather
roi_align_results_concat = op.concatenate(roi_align_results, 0)
argsort_indices = op.cast(op.argsort(indices_concat), dtype="int64")
# Permute rows by argsorted indices
permuted = op.take(roi_align_results_concat, argsort_indices, axis=0)
return op.reshape(permuted, [0, -1, 1, 1])
def callback(self, pre, post, node_map):
levels = node_map[self.levels][0]
roi_align_results = [node_map[feat][0] for feat in self.roi_align_results]
return self.convert_scatter_to_gather(levels, roi_align_results)
def rewrite_nms_to_batched_nms(mod):
"""Rewrite the input graph to replace non maximum surpression
in torchvision that does not take class id into account with the one
that avoids IOU tests between different classes.
"""
mod["main"] = rewrite(MulticlassNMSRewrite(), mod["main"])
return mod
def rewrite_batched_nms_with_max_out_size(mod):
"""Rewrite the input graph to detect slicing after batched nms and
use the slicing size as the parameter max_out_size in NMS.
"""
mod["main"] = rewrite(PostNMSTopKRewrite(), mod["main"])
return mod
def rewrite_scatter_to_gather(mod, num_scales):
"""Rewrite the input graph to replace a repeated scatter loop with
a batched gather. The scatter loop is used in torchvision MultiScaleRoIAlign
to merge roi_align results for all scales. The scatter is used to emulate
inplace updates.
"""
mod["main"] = rewrite(ScatterRewrite(num_scales), mod["main"])
return mod
| 15,098 | 34.864608 | 99 | py |
tvm | tvm-main/python/tvm/relay/op/transform.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-outside-toplevel
"""Transform operators."""
from ...tir import expr as _expr
from ..expr import Constant, Expr, Tuple, TupleWrapper, const
from . import _make
from .dyn import _make as _dyn_make
from .tensor import shape_of
def sliding_window(data, axis, window_shape, strides):
"""Slide a window over the data tensor.
Parameters
----------
data : relay.Expr
The input data to the operator.
axis : int
What axis the window begins sliding over. Window will be slid over
this axis and all following axes. The axis value determines the window
shape (and thus, the number of strides): window shape and strides must
both be of length `data.ndim-axis`.
window_shape : List[int]
The window shape to form over the input. Window shape must be of length
`data.ndim-axis`.
strides : List[int]
How to stride the window along each dimension. Strides must be of length
`data.ndim-axis`.
Returns
-------
result : relay.Expr
The resulting tensor.
Examples
--------
.. code-block:: python
# Slide a window of shape (3, 4, 5) over the x tensor, beginning with
# dimension 1, which slides the window over the two subtensors of
# shape (3, 32, 32).
x = relay.var("x", relay.TensorType((2, 3, 32, 32), "float32"))
y = relay.sliding_window(x, 1, [3, 4, 5], [1, 2, 3])
data = np.random.rand(2, 3, 32, 32).astype("float32")
result = create_executor().evaluate(y, {x: relay.const(data)}).numpy()
# The resulting shape still has batch size 2. Each dimension in
# (1, 15, 10) represents the locations where we were able to
# form a window; that is, we were able to place the window
# in one place along the dimension of length 3, 15 places along
# the dimension of length 32 (when striding by 2), and 10 places
# along the second dimension of length 32 (when striding by 3).
# The remaining dimension (3, 4, 5) represent the formed windows.
assert result.shape == (2, 1, 15, 10, 3, 4, 5)
assert np.array_equal(result[0, 0, 0, 0, :, :, :], data[0, :, 0:4, 0:5])
assert np.array_equal(result[1, 0, 7, 3, :, :, :], data[1, :, 14:18, 9:14])
assert np.array_equal(result[1, 0, 14, 9, :, :, :], data[1, :, 28:32, 27:32])
"""
from .. import _ffi_api as _relay_make
return _relay_make.sliding_window(data, axis, window_shape, strides)
def cast(data, dtype):
"""Cast input tensor to data type.
Parameters
----------
data : relay.Expr
The input data to the operator.
dtype : str
The target data type.
Returns
-------
result : relay.Expr
The casted result.
"""
from .. import _ffi_api as _relay_make
return _relay_make.cast(data, dtype)
def cast_like(data, dtype_like):
"""Cast input tensor to data type of another tensor.
Parameters
----------
data : relay.Expr
The input data to the operator.
dtype_like : relay.Expr
The tensor to cast to.
Returns
-------
result : relay.Expr
The casted result.
"""
from .. import _ffi_api as _relay_make
return _relay_make.cast_like(data, dtype_like)
def reinterpret(data, dtype):
"""Reinterpret input tensor to data type.
Parameters
----------
data : relay.Expr
The input data to the operator.
dtype : str
The target data type.
Returns
-------
result : relay.Expr
The reinterpreted result.
"""
from .. import _make as _relay_make
return _relay_make.reinterpret(data, dtype)
def expand_dims(data, axis, num_newaxis=1):
"""Insert `num_newaxis` axes at the position given by `axis`.
Parameters
----------
data : relay.Expr
The input data to the operator.
axis : Union[int, Expr]
The axis at which the input array is expanded.
Should lie in range `[-data.ndim - 1, data.ndim]`.
If `axis < 0`, it is the first axis inserted;
If `axis >= 0`, it is the last axis inserted in Python's negative indexing.
num_newaxis : int, optional
Number of axes to be inserted. Should be >= 0.
Returns
-------
result : relay.Expr
The reshaped result.
"""
if isinstance(axis, int):
return _make.expand_dims(data, axis, num_newaxis)
if isinstance(axis, Expr):
# TODO (AndrewZhaoLuo): investigate performance issues with consecutive
# dynamic expand_dims on non-llvm targets.
return _dyn_make.expand_dims(data, axis, num_newaxis)
raise ValueError(f"Unknown type for axis: {type(axis)}")
def transpose(data, axes=None):
"""Permutes the dimensions of an array.
Parameters
----------
data : relay.Expr
The input data to the operator.
axes : None or List[int]
The target axes order, reverse order if not specified.
Returns
-------
result : relay.Expr
The transposed result.
"""
if axes is not None:
axes = list(axes)
return _make.transpose(data, axes)
def squeeze(data, axis=None):
"""Squeeze axes in the array.
Parameters
----------
data : relay.Expr
The input data to the operator.
axis : Union[None, int, Tuple[int], List[int]] or Expr
The set of axes to remove.
If axis = None, remove all axes of dimension 1.
If any specified axis has dimension that does not equal 1, it is an error.
Returns
-------
result : relay.Expr
The squeezed result.
"""
if isinstance(axis, Constant):
if axis.data.shape:
axis = list(axis.data.numpy())
else:
axis = [axis.data.numpy().item()]
if isinstance(axis, Expr):
return _dyn_make.squeeze(data, axis)
if isinstance(axis, int):
axis = [axis]
if isinstance(axis, (tuple, list)):
tempaxis = []
for tmpax in axis:
if isinstance(tmpax, _expr.IntImm):
tempaxis.append(tmpax.value)
else:
try:
tempaxis.append(int(tmpax))
except ValueError as err:
raise RuntimeError(f"Unrecognized axis type: {err}")
axis = tempaxis
return _make.squeeze(data, axis)
def reshape(data, newshape, allowzero=False):
"""Reshape the input array.
To give user more convenience in without doing manual shape inference,
some dimensions of the shape can take special values from the set {0, -1, -2, -3, -4}.
The significance of each is explained below:
``0`` copy this dimension from the input to the output shape.
.. code-block:: python
data.shape = (2,3,4), newshape = (4,0,2), result.shape = (4,3,2)
data.shape = (2,3,4), newshape = (2,0,0), result.shape = (2,3,4)
Note: If the parameter allowzero is manually set to true, it specifies a
special case where 0 actually means a true empty tensor.
``-1`` infers the dimension of the output shape by using the remainder of
the input dimensions keeping the size of the new array same as that of the input array.
At most one dimension of shape can be -1.
.. code-block:: python
data.shape = (2,3,4), newshape = (6,1,-1), result.shape = (6,1,4)
data.shape = (2,3,4), newshape = (3,-1,8), result.shape = (3,1,8)
data.shape = (2,3,4), newshape = (-1,), result.shape = (24,)
``-2`` copy all/remainder of the input dimensions to the output shape.
.. code-block:: python
data.shape = (2,3,4), newshape = (-2,), result.shape = (2,3,4)
data.shape = (2,3,4), newshape = (2,-2), result.shape = (2,3,4)
data.shape = (2,3,4), newshape = (-2,1,1), result.shape = (2,3,4,1,1)
``-3`` use the product of two consecutive dimensions of the input shape
as the output dimension.
.. code-block:: python
data.shape = (2,3,4), newshape = (-3,4), result.shape = (6,4)
data.shape = (2,3,4,5), newshape = (-3,-3), result.shape = (6,20)
data.shape = (2,3,4), newshape = (0,-3), result.shape = (2,12)
data.shape = (2,3,4), newshape = (-3,-2), result.shape = (6,4)
``-4`` split one dimension of the input into two dimensions passed subsequent
to -4 in shape (can contain -1).
.. code-block:: python
data.shape = (2,3,4), newshape = (-4,1,2,-2), result.shape = (1,2,3,4)
data.shape = (2,3,4), newshape = (2,-4,-1,3,-2), result.shape = (2,1,3,4)
Parameters
----------
data : relay.Expr
The input data to the operator.
newshape : Union[int, Tuple[int], List[int]] or relay.Expr
The new shape. Should be compatible with the original shape.
allowzero : Bool, optional
If true, then treat zero as true empty tensor rather than a copy instruction.
Returns
-------
result : relay.Expr
The reshaped result.
"""
if isinstance(newshape, Constant):
newshape = list(newshape.data.numpy())
if isinstance(newshape, Expr):
return _dyn_make.reshape(data, newshape, allowzero)
if isinstance(newshape, int):
newshape = [newshape]
if isinstance(newshape, (tuple, list)):
tempshape = []
for shape in newshape:
if isinstance(shape, _expr.IntImm):
tempshape.append(shape.value)
else:
try:
tempshape.append(int(shape))
except ValueError as err:
raise RuntimeError(f"Unrecognized shape type: {err}")
newshape = tempshape
return _make.reshape(data, list(newshape), allowzero)
def argwhere(condition):
"""Find the indices of elements of a tensor that are
non-zero.
Parameters
----------
condition : relay.Expr
The input condition tensor.
Returns
-------
result : relay.Expr
Tensor with the indices of elements that are non-zero.
Examples
--------
.. code-block:: python
condition = [[True, False], [False, True]]
relay.argwhere(condition) = [[0, 0], [1, 1]]
"""
return _make.argwhere(condition)
def scatter_elements(data, indices, updates, axis=0, reduction="update"):
"""Scatter elements with updating data by reduction of values in updates
at positions defined by indices.
Parameters
----------
data : relay.Expr
The input data to the operator.
indices : relay.Expr
The index locations to update.
updates : relay.Expr
The values to update.
axis : int
The axis to scatter elements on. It is zero by default.
reduction : string, optional
The reduction mode for scatter. Choise is from ["update", "add", "mul", "mean", "min", max"]
If update, the update values will replace the input data
If add, the update values will be added to the input data
If mul, the input data will be multiplied on the update values
If mean, the input data will be mean between the update values and the input data
If min, there is choice of minimal between the update values and the input data
If max, there is choice of maximal between the update values and the input data
It is "update" by default
Returns
-------
ret : relay.Expr
The computed result.
"""
return _make.scatter_elements(data, indices, updates, axis, reduction)
def scatter_nd(data, indices, updates, mode="update"):
"""Scatter values from an array and update.
See :py:func:`tvm.topi.scatter` for how data is scattered.
Parameters
----------
data : relay.Expr
The input data to the operator.
indices : relay.Expr
The index locations to update.
updates : relay.Expr
The values to update.
mode : string, optional
The accumulation mode for scatter. "update", "add", "mul", "min" or "max"
If update, the update values will replace the input data
If add, the update values will be added to the input data
If mul, the update values will be multiply to the input data
If min, there is choice of minimal between the update values and the input data
If max, there is choice of maximal between the update values and the input data
It is "update" by default
Returns
-------
ret : relay.Expr
The computed result.
"""
return _make.scatter_nd(data, indices, updates, mode)
def reshape_like(data, shape_like, lhs_begin=0, lhs_end=None, rhs_begin=0, rhs_end=None):
"""Reshapes the input tensor by the size of another tensor.
For an input tensor with shape ``(d0, d1, ..., d(k-1))``, `reshape_like` operation reshapes
the input tensor into an output tensor with the same shape as the second input tensor,
in particular reshaping the dimensions of `data` in `[lhs_begin, lhs_end)` using the dimensions
from `shape_like` in `[rhs_begin, rhs_end)`.
.. note::
Sizes for `data` and the output tensor should be compatible.
Parameters
----------
data : relay.Expr
The input data to the operator.
shape_like : relay.Expr
The tensor to reshape data like. Should be compatible with the original shape on the
reshaped dimensions.
lhs_begin : int, optional
The axis of data to begin reshaping. Default is 0.
lhs_end : int or None, optional
The axis of data where reshaping should stop, exclusive. Default is None which reshapes to
the end.
rhs_begin : int, optional
The axis of shape_like where the target shape begins. Default is 0.
rhs_end : int or None, optional
The axis of shape_like where the target shape ends, exclusive. Default is None which extends
to the end.
Returns
-------
ret : relay.Expr
The computed result.
Examples
--------
.. code-block:: python
data.shape == (1, 2, 3, 4)
shape_like.shape == (6, 2, 2, 3)
ret = relay.reshape_like(data, shape_like, lhs_begin=1, rhs_end=3)
ret.shape == (1, 6, 2, 2)
"""
return _make.reshape_like(data, shape_like, lhs_begin, lhs_end, rhs_begin, rhs_end)
def take(data, indices, axis=None, batch_dims=0, mode="clip"):
"""Take elements from an array along an axis.
Parameters
----------
data : relay.Expr
The source array.
indices : relay.Expr
The indices of the values to extract.
axis : int, optional
The axis over which to select values. By default,
the flattened input array is used.
batch_dims : int, optional
The number of batch dimensions. By default is 0.
mode : str, optional
Specifies how out-of-bound indices will behave [clip, wrap, fast].
clip: clip to the range (default).
wrap: wrap around the indices.
fast: no clip or wrap around (user must make sure indices are in-bound).
Returns
-------
ret : relay.Expr
The computed result.
"""
return _make.take(data, indices, batch_dims, axis, mode)
def full(fill_value, shape=(), dtype=""):
"""Fill array with scalar value.
Parameters
----------
fill_value : relay.Expr
The value to fill. Must be a scalar.
shape : tuple of int or relay.Expr, optional
The shape of the target.
dtype : data type, optional (defaults to data type of the fill value)
The data type of the target.
Returns
-------
result : relay.Expr
The resulting tensor.
"""
if isinstance(shape, Constant):
shape = list(shape.data.numpy())
if isinstance(shape, Expr):
return _dyn_make.full(fill_value, shape, dtype)
if isinstance(shape, int):
shape = [shape]
if isinstance(shape, (list, tuple)):
shape = list(shape)
return _make.full(fill_value, shape, dtype)
def full_like(data, fill_value):
"""Return a scalar value array with the same shape and type as the input array.
Parameters
----------
data : relay.Expr
The input tensor.
fill_value : relay.Expr
The scalar value to fill.
Returns
-------
result : relay.Expr
The resulting tensor.
"""
return _make.full_like(data, fill_value)
def arange(start, stop=None, step=None, dtype="float32"):
"""Return evenly spaced values within a given interval.
.. note::
Similar to ``numpy.arange``. When only one argument is given, it is used
as `stop` instead of `start` while `start` takes default value 0.
Warning: Undefined behavior when dtype is incompatible with start/stop/step.
It could lead to different results compared to numpy, MXNet, pytorch, etc.
Parameters
----------
start : relay.Expr, optional
Start of interval. The interval includes this value. The default start
value is 0.
stop : relay.Expr
Stop of interval. The interval does not include this value.
step : relay.Expr, optional
Spacing between values. The default step size is 1.
dtype : str, optional
The target data type.
Returns
-------
result : relay.Expr
The resulting tensor.
Examples
--------
.. code-block:: python
relay.arange(5) = [0, 1, 2, 3, 4]
relay.arange(1, 5) = [1, 2, 3, 4]
relay.arange(1, 5, 1.5) = [1, 2.5, 4]
"""
if step is None:
step = const(1, dtype=dtype)
if stop is None:
stop = start
start = const(0, dtype=dtype)
return _make.arange(start, stop, step, dtype)
def meshgrid(data, indexing="ij"):
"""Create coordinate matrices from coordinate vectors.
.. note::
Similar to ``numpy.meshgrid``.
Parameters
----------
data : Union(List[relay.Expr], Tuple[relay.Expr])
A list of tensors, which must be either scalars or 1-D vectors.
indexing : str, optional
Indexing mode, either "ij" for matrix indexing or "xy" for Cartesian indexing.
Returns
-------
ret : relay.Tuple([relay.Expr, relay.Expr])
The computed result.
Examples
--------
.. code-block:: python
x = [1, 2, 3]
y = [4, 5]
gx, gy = relay.meshgrid([x, y])
gx = [[1., 1.],
[2., 2.],
[3., 3.]]
gy = [[4., 5.],
[4., 5.],
[4., 5.]]
"""
data = list(data)
ret_size = len(data)
return TupleWrapper(_make.meshgrid(Tuple(data), indexing), ret_size)
def repeat(data, repeats, axis):
"""Repeats elements of an array.
By default, repeat flattens the input array into 1-D and then repeats the elements.
Parameters
----------
data : relay.Expr
The input tensor.
repeats : int
The number of repetitions for each element.
axis: int
The axis along which to repeat values. The negative numbers are interpreted
counting from the backward. By default, use the flattened input array, and
return a flat output array.
Returns
-------
ret : relay.Expr
The computed result.
Examples
--------
.. code-block:: python
x = [[1, 2], [3, 4]]
relay.repeat(x, repeats=2) = [1., 1., 2., 2., 3., 3., 4., 4.]
relay.repeat(x, repeats=2, axis=1) = [[1., 1., 2., 2.],
[3., 3., 4., 4.]]
"""
return _make.repeat(data, repeats, axis)
def tile(data, reps):
"""Repeats the whole array multiple times.
Parameters
----------
data : relay.Expr
The input data to the operator.
reps : tuple of int or relay.Expr
The number of times repeating the tensor data.
Returns
-------
ret : relay.Expr
The computed result.
Examples
--------
.. code-block:: python
x = [[1, 2], [3, 4]]
relay.tile(x, reps=(2,3)) = [[1., 2., 1., 2., 1., 2.],
[3., 4., 3., 4., 3., 4.],
[1., 2., 1., 2., 1., 2.],
[3., 4., 3., 4., 3., 4.]]
relay.tile(x, reps=(2,)) = [[1., 2., 1., 2.],
[3., 4., 3., 4.]]
Notes
-----
Each dim size of reps must be a positive integer. If reps has length d,
the result will have dimension of max(d, data.ndim); If data.ndim < d,
data is promoted to be d-dimensional by prepending new axes.
If data.ndim >= d, reps is promoted to a.ndim by pre-pending 1's to it.
"""
if isinstance(reps, Constant):
reps = list(reps.data.numpy())
if isinstance(reps, Expr):
return _dyn_make.tile(data, reps)
return _make.tile(data, reps)
def reverse(data, axis):
"""Reverses the order of elements along given axis while preserving array shape.
Parameters
----------
data : relay.Expr
The input data to the operator.
axis: int
The axis along which to reverse elements.
Returns
-------
ret : relay.Expr
The computed result.
Examples
--------
.. code-block:: python
x = [[1., 2.], [3., 4.]]
relay.reverse(x, axis=0) = [[3., 4.], [1., 2.]]
relay.reverse(x, axis=1) = [[2., 1.], [4., 3.]]
"""
return _make.reverse(data, axis)
def reverse_sequence(data, seq_lengths, seq_axis=1, batch_axis=0):
"""Reverse the tensor for variable length slices.
Input is first sliced along batch axis and then elements are reversed along seq axis.
Parameters
----------
data : relay.Expr
The tensor to be reversed.
seq_lengths : relay.Expr
A 1D Tensor with length a.dims[batch_axis].
Must be one of the following types: int32, int64.
If seq_lengths[i] > a.dims[seq_axis], it is rounded to a.dims[seq_axis].
If seq_lengths[i] < 1, it is rounded to 1.
seq_axis : int, optional
The axis along which the elements will be reversed. Default is 1.
batch_axis : int, optional
The axis along which the tensor will be sliced. Default is 0.
Returns
-------
ret : relay.Expr
The computed result of same shape and type as of input.
Examples
--------
.. code-block:: python
x = [[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]]
relay.reverse(x, [1, 2, 3, 4], 0, 1) = [[0, 5, 10, 15],
[4, 1, 6, 11],
[8, 9, 2, 7],
[12, 13, 14, 3]]
relay.reverse(x, [1, 2, 3, 4], 1, 0) = [[0, 1, 2, 3],
[5, 4, 6, 7],
[10, 9, 8, 11],
[15, 14, 13, 12]]
"""
return _make.reverse_sequence(data, seq_lengths, seq_axis, batch_axis)
def where(condition, x, y):
"""Selecting elements from either x or y depending on the value of the
condition.
.. note::
Shapes of condition, x, and y must be broadcastable to a common shape.
Semantics follow numpy where function
https://numpy.org/doc/stable/reference/generated/numpy.where.html
Parameters
----------
condition : relay.Expr
Where True, yield x, otherwise yield y
x : relay.Expr
The first array or scalar to be selected.
y : relay.Expr
The second array or scalar to be selected.
Returns
-------
result : relay.Expr
The selected array. The output shape is the broadcasted shape from
condition, x, and y.
Examples
--------
.. code-block:: python
x = [[1, 2], [3, 4]]
y = [[5, 6], [7, 8]]
condition = [[0, 1], [-1, 0]]
relay.where(conditon, x, y) = [[5, 2], [3, 8]]
condition = [[1], [0]]
relay.where(conditon, x, y) = [[1, 2], [7, 8]]
"""
return _make.where(condition, x, y)
def broadcast_to(data, shape):
"""Return a scalar value array with the same type, broadcasted to
the provided shape.
Parameters
----------
data : relay.Expr
The input tensor.
shape : tuple of int or relay.Expr
Provide the shape to broadcast to.
Returns
-------
result : relay.Expr
The resulting tensor.
"""
if isinstance(shape, Constant):
shape = list(shape.data.numpy())
if isinstance(shape, Expr):
return _dyn_make.broadcast_to(data, shape)
if isinstance(shape, int):
shape = [shape]
if isinstance(shape, (list, tuple)):
shape = list(shape)
return _make.broadcast_to(data, shape)
def broadcast_to_like(data, broadcast_type):
"""Return a scalar value array with the same shape and type as the input array.
Parameters
----------
data : relay.Expr
The input tensor.
broadcast_type : relay.Expr
Provide the shape to broadcast to.
Returns
-------
result : relay.Expr
The resulting tensor.
"""
return _make.broadcast_to_like(data, broadcast_type)
def collapse_sum_like(data, collapse_type):
"""Return a scalar value array with the same shape and type as the input array.
Parameters
----------
data : relay.Expr
The input tensor.
collapse_type : relay.Expr
Provide the shape to collapse to.
Returns
-------
result : relay.Expr
The resulting tensor.
"""
return _make.collapse_sum_like(data, collapse_type)
def collapse_sum_to(data, shape):
"""Return a summation of data to the specified shape.
Parameters
----------
data : relay.Expr
The input tensor.
shape : relay.Expr
Shape to collapse to.
Returns
-------
result : relay.Expr
The resulting tensor.
"""
if isinstance(shape, (list, tuple)):
shape = const(list(shape), "int32")
return _make.collapse_sum_to(data, shape)
def split(data, indices_or_sections, axis=0):
"""Split input tensor along axis by sections or indices.
If indices_or_sections is an integer, the input will be divided equally
along given axis. If such a split is not possible, an error is raised.
If indices_or_sections is a tuple of sorted integers,
the entries indicate where along axis the array is split.
Parameters
----------
data : relay.Expr
The source array.
indices_or_sections : int or tuple of int
Indices or sections to split into. Accepts an int or a tuple.
axis : int, optional
The axis over which to split.
Returns
-------
ret : relay.Tuple([relay.Expr, relay.Expr])
The computed result.
"""
if isinstance(indices_or_sections, int):
ret_size = indices_or_sections
else:
ret_size = len(indices_or_sections) + 1
return TupleWrapper(_make.split(data, indices_or_sections, axis), ret_size)
def strided_slice(data, begin, end, strides=None, axes=None, slice_mode="end"):
"""Strided slice of an array.
Parameters
----------
data : relay.Expr
The source array to be sliced.
begin : relay.Expr, Tuple[int], or List[int]
The indices to begin with in the slicing.
end : relay.Expr, Tuple[int], or List[int]
Indices indicating end of the slice.
strides : relay.Expr, Tuple[int], or List[int], optional
Specifies the stride values. It can be negative. In that case,
the input tensor will be reversed in that particular axis.
axes : Tuple[int] or List[int], optional
Axes along which slicing is applied. When it is specified, the length of begin, end,
strides, and axes must be equal. Moreover, begin, end, strides, and axes must be
static (cannot be relay.Expr). Axes argument for dynamic parameter slicing is
not supported yet.
slice_mode : str, optional
The slice mode [end, size].
end: The ending indices for the slice [default].
size: The input strides will be ignored. Input end in this mode indicates
the size of a slice starting at the location specified by begin. If end[i]
is -1, all remaining elements in that dimension are included in the slice.
Returns
-------
ret : relay.Expr
The computed result.
"""
strides = strides or [1]
if isinstance(begin, Constant):
begin = list(begin.data.numpy())
if isinstance(end, Constant):
end = list(end.data.numpy())
if isinstance(strides, Constant):
strides = list(strides.data.numpy())
if isinstance(begin, Expr) or isinstance(end, Expr) or isinstance(strides, Expr):
if isinstance(begin, (tuple, list)):
begin = const(list(begin))
if isinstance(end, (tuple, list)):
end = const(list(end))
if isinstance(strides, (tuple, list)):
strides = const(list(strides))
ishape = cast_like(shape_of(data), begin)
ishape_slice = slice_like(ishape, begin)
begin = _make.where(begin < cast_like(const(0), begin), begin + ishape_slice, begin)
begin = _make.where(begin >= ishape_slice, ishape_slice, begin)
# TODO(masahi): Support axes argument in dynamic strided slice
assert axes is None, "Axes argument for dynamic parameter slicing is not supported yet."
return _dyn_make.strided_slice(data, begin, end, strides, slice_mode)
return _make.strided_slice(data, begin, end, strides, slice_mode, axes)
def strided_set(data, v, begin, end, strides=None):
"""Strided set of an array.
Parameters
----------
data : relay.Expr
The source array to be sliced.
v : relay.Expr
The data to be set.
begin : relay.Expr, Tuple[int], or List[int]
The indices to begin with in the slicing.
end : relay.Expr, Tuple[int], or List[int]
Indices indicating end of the slice.
strides: relay.Expr, Tuple[int], or List[int], optional
Specifies the stride values. It can be negative. In that case,
the input tensor will be reversed in that particular axis.
Returns
-------
ret : relay.Expr
The computed result.
"""
strides = strides or const([1], dtype="int32")
if isinstance(begin, (tuple, list)):
begin = const(list(begin))
if isinstance(end, (tuple, list)):
end = const(list(end))
if isinstance(strides, (tuple, list)):
strides = const(list(strides))
return _make.strided_set(data, v, begin, end, strides)
def slice_like(data, shape_like, axes=None):
"""Slice the first input with respect to the second input.
For an input array with shape ``(d1, d2, ..., dk)``, `slice_like` operation slices the
input array corresponding to the size of the second array. By default will slice on all axes.
Parameters
----------
data : relay.Expr
The source array.
shape_like : relay.Expr
An array based on which shape, the result shape is computed.
axes : Tuple[int] or List[int], optional
List of axes on which input data will be sliced according to the corresponding size of
the second input. By default will slice on all axes. Negative axes mean counting in reverse.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.slice_like(data, shape_like, axes)
def layout_transform(data, src_layout, dst_layout):
"""Transform the layout of a tensor.
Parameters
----------
data : relay.Expr
The source tensor to be transformed.
src_layout : str
The source layout. (e.g NCHW)
dst_layout : str
The destination layout. (e.g. NCHW16c)
Returns
-------
ret : relay.Expr
The transformed tensor.
"""
return _make.layout_transform(data, src_layout, dst_layout)
def reverse_reshape(data, newshape):
"""Reshapes the input array where the special values are inferred from
right to left.
The special values have the same semantics as :py:class:`tvm.relay.reshape`.
The difference is that special values are inferred from right to left. It
can be explained in the example below.
.. code-block:: python
data.shape = (10,5,4), newshape = (-1,0), reshape results in (40,5)
data.shape = (10,5,4), newshape = (-1,0), reverse_reshape results in (50,4)
Parameters
----------
data : relay.Expr
The input data to the operator.
newshape : Union[int, Tuple[int], List[int]]
The new shape. Should be compatible with the original shape.
Returns
-------
result : relay.Expr
The reshaped result.
"""
if isinstance(newshape, int):
newshape = [newshape]
return _make.contrib_reverse_reshape(data, list(newshape))
def gather(data, axis, indices):
"""Gather values along given axis from given indices.
E.g. for a 3D tensor, output is computed as:
.. code-block:: python
out[i][j][k] = data[indices[i][j][k]][j][k] # if axis == 0
out[i][j][k] = data[i][indices[i][j][k]][k] # if axis == 1
out[i][j][k] = data[i][j][indices[i][j][k]] # if axis == 2
``indices`` must have the same shape as ``data``, except at dimension ``axis``
which must just be not null. Output will have the same shape as ``indices``.
Parameters
----------
data : relay.Expr
The input data to the operator.
axis : int
The axis along which to index. Negative axis is supported.
indices : relay.Expr
The indices of values to gather.
Examples
--------
.. code-block:: python
data = [[1, 2], [3, 4]]
axis = 1
indices = [[0, 0], [1, 0]]
relay.gather(data, axis, indices) = [[1, 1], [4, 3]]
"""
return _make.gather(data, axis, indices)
def gather_nd(data, indices, batch_dims=0, index_rank=None):
"""Gather elements or slices from data and store them to a tensor whose shape is
defined by indices.
Parameters
----------
data : relay.Expr
The input data to the operator.
indices : relay.Expr
The shape of output tensor.
batch_dims : int, optional
The number of batch dimensions.
index_rank : int, optional
The size of an indexing tuple, which is a fixed value and the same as indices.shape[0].
Only needed when other dimensions of indices are dynamic.
Returns
-------
ret : relay.Expr
The computed result.
Examples
--------
.. code-block:: python
data = [[0, 1], [2, 3]]
indices = [[1, 1, 0], [0, 1, 0]]
relay.gather_nd(data, indices) = [2, 3, 0]
data = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
indices = [[0, 1], [1, 0]]
relay.gather_nd(data, indices) = [[3, 4], [5, 6]]
data = [[[0, 1], [2, 3]], [[4, 5], [6, 7]]]
indices = [[1, 0]]
relay.gather_nd(data, indices, batch_dims=1) = [[2, 3],[4, 5]]
"""
return _make.gather_nd(data, indices, batch_dims, index_rank)
def sequence_mask(data, valid_length, mask_value=0, axis=0):
"""Sets all elements outside the expected length of the sequence to a constant value.
This function takes an n-dimensional input array of the form [MAX_LENGTH, batch_size, ...] or
[batch_size, MAX_LENGTH, ...] and returns an array of the same shape.
Parameters
----------
data : relay.Expr
The input data.
valid_length : relay.Expr
The expected (valid) length of each sequence in the tensor.
mask_value : float, optional
The masking value.
axis : int, optional
The axis of the length dimension.
Returns
-------
ret : relay.Expr
The computed result.
Examples
--------
.. code-block:: python
x = [[[ 1., 2., 3.], [ 4., 5., 6.]],
[[ 7., 8., 9.], [ 10., 11., 12.]],
[[ 13., 14., 15.], [ 16., 17., 18.]]]
relay.sequence_mask(x, valid_length=[1, 1]) =
[[[ 1., 2., 3.], [ 4., 5., 6.]],
[[ 0., 0., 0.], [ 0., 0., 0.]],
[[ 0., 0., 0.], [ 0., 0., 0.]]]
relay.sequence_mask(x, valid_length=[2, 3], mask_value=0.1) =
[[[ 1., 2., 3.], [ 4., 5., 6.]],
[[ 7., 8., 9.], [ 10., 11., 12.]],
[[ 0.1, 0.1, 0.1], [ 16., 17., 18.]]]
"""
return _make.sequence_mask(data, valid_length, mask_value, axis)
def one_hot(indices, on_value, off_value, depth, axis, dtype):
"""Returns a one-hot tensor where the locations represented by indices take value on_value,
and other locations take value off_value.
Final dimension is <indices outer dimensions> x depth x <indices inner dimensions>.
Parameters
----------
indices : relay.Expr
Locations to set to on_value.
on_value : relay.Expr
Value to fill at indices.
off_value : relay.Expr
Value to fill at all other positions besides indices.
depth : int or relay.Expr
Depth of the one-hot dimension.
axis : int
Axis to fill.
dtype : str
Data type of the output tensor.
Returns
-------
ret : relay.Expr
The one-hot tensor.
Examples
--------
.. code-block:: python
indices = [0, 1, 2]
relay.one_hot(indices, 3) =
[[1, 0, 0],
[0, 1, 0],
[0, 0, 1]]
"""
if isinstance(depth, Constant):
depth = depth.data.numpy().item()
if isinstance(depth, Expr):
return _dyn_make.one_hot(indices, on_value, off_value, depth, axis, dtype)
return _make.one_hot(indices, on_value, off_value, depth, axis, dtype)
def unravel_index(indices, shape):
"""Convert a flat index or array of flat indices into a tuple of coordinate arrays.
Parameters
----------
indices : relay.Expr
An integer array containing indices.
shape : relay.Expr
The shape of the array.
Returns
-------
result : relay.Expr
The tuple of coordinate arrays.
Examples
-------
.. code-block:: python
relay.unravel_index([22, 41, 37], [7, 6]) =
[[3, 6, 6],
[4, 5, 1]]
"""
return _make.unravel_index(indices, shape)
def sparse_to_dense(sparse_indices, output_shape, sparse_values, default_value=0):
"""Converts a sparse representation into a dense tensor.
Parameters
----------
sparse_indices : relay.Expr
A 0-D, 1-D, or 2-D tensor of integers containing location of sparse values.
output_shape : relay.Expr
A list of integers. Shape of the dense output tensor.
sparse_values : relay.Expr
A 0-D or 1-D tensor containing the sparse values for the sparse indices.
default_value : relay.Expr, optional
A 0-D tensor containing the default value for the remaining locations.
Defaults to 0.
Returns
-------
result : relay.Expr
Dense tensor of shape output_shape. Has the same type as sparse_values.
Examples
-------
.. code-block:: python
relay.sparse_to_dense([[0, 0], [1, 1]], [2, 2], [3, 3], 0) =
[[3, 0],
[0, 3]]
"""
if default_value == 0:
default_value = const(0)
if isinstance(output_shape, Expr):
return _dyn_make.sparse_to_dense(sparse_indices, output_shape, sparse_values, default_value)
return _make.sparse_to_dense(sparse_indices, output_shape, sparse_values, default_value)
def matrix_set_diag(data, diagonal, k=0, align="RIGHT_LEFT"):
"""Returns a tensor with the diagonals of input tensor replaced with the provided
diagonal values.
Parameters
----------
data : relay.Expr
Input tensor.
diagonal : relay.Expr
Values to be filled in the diagonal.
k : int or tuple of int, optional
Diagonal offset(s). The diagonal or range of diagonals to set. (0 by default)
Positive value means superdiagonal, 0 refers to the main diagonal, and
negative value means subdiagonals. k can be a single integer (for a single diagonal)
or a pair of integers specifying the low and high ends of a matrix band.
k[0] must not be larger than k[1].
align : string, optional
Some diagonals are shorter than max_diag_len and need to be padded.
align is a string specifying how superdiagonals and subdiagonals should be aligned,
respectively. There are four possible alignments: "RIGHT_LEFT" (default), "LEFT_RIGHT",
"LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals to the right
(left-pads the row) and subdiagonals to the left (right-pads the row). It is the packing
format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is the opposite alignment.
Returns
-------
result : relay.Expr
New tensor with given diagonal values.
Examples
--------
.. code-block:: python
data = [[[7, 7, 7, 7],
[7, 7, 7, 7],
[7, 7, 7, 7]],
[[7, 7, 7, 7],
[7, 7, 7, 7],
[7, 7, 7, 7]]]
diagonal = [[1, 2, 3],
[4, 5, 6]]
relay.matrix_set_diag(input, diagonal) =
[[[1, 7, 7, 7],
[7, 2, 7, 7],
[7, 7, 3, 7]],
[[4, 7, 7, 7],
[7, 5, 7, 7],
[7, 7, 6, 7]]]
"""
if isinstance(k, (tuple, list)):
k_one = k[0]
if len(k) >= 2:
k_two = k[1]
else:
k_two = k[0]
else:
k_one = k
k_two = k
super_diag_right_align = align[:5] == "RIGHT"
sub_diag_right_align = align[-5:] == "RIGHT"
return _make.matrix_set_diag(
data, diagonal, k_one, k_two, super_diag_right_align, sub_diag_right_align
)
def adv_index(inputs):
"""Numpy style advanced indexing. Index with a list of tensors.
Parameters
----------
inputs : Union(List[relay.Expr], Tuple[relay.Expr])
Input tensor and indices.
The first tensor is the input data and the rest are the indices.
Returns
-------
result : relay.Expr
Output tensor.
"""
return _make.adv_index(Tuple(inputs))
def sparse_fill_empty_rows(sparse_indices, sparse_values, dense_shape, default_value):
"""Fill rows in a sparse matrix that do not contain any values. Values are placed in the first
column of empty rows. The sparse array is in COO format.
It returns a TupleWrapper with 3 outputs.
Parameters
----------
sparse_indices : relay.Expr
A 2-D tensor[N, ndims] of integers containing the locations of sparse values, where N is
the number of sparse values and n_dim is the number of dimensions of the dense_shape.
The first column of this parameter must be sorted in ascending order.
sparse_values : relay.Expr
A 1-D tensor[N] containing the sparse values for the sparse indices.
dense_shape : relay.Expr
A 1-D tensor[ndims] which contains the shape of the dense output tensor.
default_value : relay.Expr
A 1-D tensor[1] containing the default value for the remaining locations.
Returns
-------
new_sparse_indices : relay.Expr
A 2-D tensor[?, ndims] of integers containing location of new sparse
indices. The first column outputs must be sorted in ascending order.
new_sparse_values : relay.Expr
A 1-D tensor[?] containing the sparse values for the sparse indices.
empty_row_indicator : relay.Expr
A 1-D tensor[dense_shape[0]] filled with zeros and ones
indicating whether the particular row is empty or full respectively.
Note
----
This op exactly follows the documentation here:
https://www.tensorflow.org/api_docs/python/tf/sparse/fill_empty_rows
There are two exceptions:
1. Input Sparse Indices are expected to be in row-major order.
2. Empty Row Indicator has int64 output type with 1(for True) and 0(for False).
Examples
-------
.. code-block:: python
sparse_indices = [[0, 1],
[0, 3],
[2, 0],
[3, 1]]
sparse_values = [1, 2, 3, 4]
default_value = [10]
dense_shape = [5, 6]
new_sparse_indices, empty_row_indicator, new_sparse_values =
relay.sparse_fill_empty_rows(
sparse_indices,
sparse_values,
default_value,
dense_shape)
new_sparse_indices = [[0, 1],
[0, 3],
[1, 0],
[2, 0],
[3, 1],
[4, 0]]
empty_row_indicator = [False, True, False, False, True]
new_sparse_values = [1, 2, 10, 3, 4, 10]
"""
new_sparse_indices, new_sparse_values, empty_row_indicator = TupleWrapper(
_make.sparse_fill_empty_rows(sparse_indices, sparse_values, dense_shape, default_value), 3
)
new_sparse_indices = cast_like(new_sparse_indices, sparse_indices)
new_sparse_values = cast_like(new_sparse_values, sparse_values)
empty_row_indicator = cast(empty_row_indicator, "bool")
return Tuple((new_sparse_indices, new_sparse_values, empty_row_indicator))
def sparse_reshape(sparse_indices, prev_shape, new_shape):
"""Reshape a sparse tensor. The sparse array is in COO format.
Parameters
----------
sparse_indices : relay.Expr
A 2-D tensor[N, n_dim] of integers containing location of sparse values, where N is the
number of sparse values and n_dim is the number of dimensions of the dense_shape.
prev_shape : relay.Expr
A 1-D tensor containing the previous shape of the dense tensor.
new_shape : relay.Expr
A 1-D tensor containing the new shape of the dense tensor.
Returns
-------
result: relay.Expr
Output tensor.
Examples
--------
.. code-block:: python
sparse_indices = [[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[1, 2, 3]]
prev_shape = [2, 3, 6]
new_shape = [9, -1]
new_sparse_indices, new_shape = relay.sparse_reshape(sparse_indices,
prev_shape,
new_shape)
new_sparse_indices = [[0, 0],
[0, 1],
[1, 2],
[4, 2],
[8, 1]]
new_shape = [9, 4]
"""
return TupleWrapper(_make.sparse_reshape(sparse_indices, prev_shape, new_shape), 2)
def segment_sum(data, segment_ids, num_segments=None):
"""Computes the sum along segment_ids along axis 0. If multiple segment_ids reference the same
location their contributions add up.
result[index, j, k, ...] = Σi... data[i, j, k,..] where index = segment_ids[i]
This op is much better understood with visualization articulated in the following links and
examples at the end of this docstring.
https://www.tensorflow.org/api_docs/python/tf/math/unsorted_segment_sum
https://caffe2.ai/docs/sparse-operations.html#null__unsorted-segment-reduction-ops
Parameters
----------
data : relay.Expr
Input tensor. It can be of any type and multi-dimensional.
segment_ids : relay.Expr
A 1-D int32/int64 tensor containing the segment_ids of the rows to calculate the output
sum upon. It defines a mapping from the zeroth dimension of data onto segment_ids. The
segment_ids tensor should be the size of the first dimension, d0, with consecutive IDs
in the range 0 to k, where k<d0. In particular, a segmentation of a matrix tensor is a
mapping of rows to segments. This tensor doesn't need to be sorted.
num_segments : int, optional
An integer describing the shape of the zeroth dimension. If unspecified, it is calculated
equivalent to the number of unique segment_ids.
Returns
-------
result : relay.Expr
Output tensor.
Examples
--------
.. code-block:: python
data = [[1, 2, 3, 4],
[4, -3, 2, -1],
[5, 6, 7, 8]]
segment_ids = [0, 0, 1]
relay.segment_sum(data, segment_ids) = [[5, -1, 5, 3],
[5, 6, 7, 8]]
data = [[1, 2, 3, 4],
[4, -3, 2, -1],
[5, 6, 7, 8]]
segment_ids = [2, 0, 0]
num_segments = 3
segment_sum(data, segment_ids, num_segments) = [[9, 3, 9, 7],
[0, 0, 0, 0],
[1, 2, 3, 4]]
"""
one_tensor = cast_like(const([1]), segment_ids)
if num_segments:
if isinstance(num_segments, int):
max_segments = const([num_segments])
max_segments = cast_like(max_segments, segment_ids)
else:
max_segments = cast_like(num_segments, segment_ids)
else:
max_segments = _make.add(reshape(_make.max(segment_ids, [0], False, False), -1), one_tensor)
data_offrow_shape = strided_slice(_make.shape_of(data, "int32"), [1], [-1], slice_mode="size")
data_offrow_shape = cast_like(data_offrow_shape, max_segments)
new_shape = _make.concatenate(Tuple([max_segments, data_offrow_shape]), 0)
segment_ids_tiled_shape = _make.concatenate(
Tuple([reverse(data_offrow_shape, 0), one_tensor]), 0
)
expanded_segment_ids = tile(segment_ids, segment_ids_tiled_shape)
scatter_add_segment_ids = transpose(expanded_segment_ids)
src = cast_like(_dyn_make.zeros(new_shape, "float64"), data)
return scatter_elements(src, scatter_add_segment_ids, data, axis=0, reduction="add")
def cumsum(data, axis=None, dtype=None, exclusive=None):
"""Numpy style cumsum op. Return the cumulative inclusive sum of the elements along
a given axis.
Parameters
----------
data : relay.Expr
The input data to the operator.
axis : int, optional
Axis along which the cumulative sum is computed. The default (None) is to compute
the cumsum over the flattened array.
dtype : string, optional
Type of the returned array and of the accumulator in which the elements are summed.
If dtype is not specified, it defaults to the dtype of data.
exclusive : bool, optional
If true will return exclusive sum in which the first element is not
included. In other terms, if true, the j-th output element would be
the sum of the first (j-1) elements. Otherwise, it would be the sum of
the first j elements.
Returns
-------
result : relay.Expr
The result has the same size as data, and the same shape as data if axis is not None.
If axis is None, the result is a 1-d array.
Examples
--------
.. code-block:: python
a = [[1, 2, 3], [4, 5, 6]]
cumsum(a) # if axis is not provided, cumsum is done over the flattened input.
-> [ 1, 3, 6, 10, 15, 21]
cumsum(a, dtype="float32")
-> [ 1., 3., 6., 10., 15., 21.]
cumsum(a, axis=0) # sum over rows for each of the 3 columns
-> [[1, 2, 3],
[5, 7, 9]]
cumsum(a, axis=1)
-> [[ 1, 3, 6],
[ 4, 9, 15]]
a = [1, 0, 1, 0, 1, 1, 0] # a is a boolean array
cumsum(a, dtype=int32) # dtype should be provided to get the expected results
-> [1, 1, 2, 2, 3, 4, 4]
"""
return _make.cumsum(data, axis, dtype, exclusive)
def cumprod(data, axis=None, dtype=None, exclusive=None):
"""Numpy style cumprod op. Return the cumulative inclusive product of the elements along
a given axis.
Parameters
----------
data : relay.Expr
The input data to the operator.
axis : int, optional
Axis along which the cumulative product is computed. The default (None) is to compute
the cumprod over the flattened array.
dtype : string, optional
Type of the returned array and of the accumulator in which the elements are multiplied.
If dtype is not specified, it defaults to the dtype of data.
exclusive : bool, optional
If true will return exclusive product in which the first element is not
included. In other terms, if true, the j-th output element would be
the product of the first (j-1) elements. Otherwise, it would be the product of
the first j elements. The product of zero elements will be 1.
Returns
-------
result : relay.Expr
The result has the same size as data, and the same shape as data if axis is not None.
If axis is None, the result is a 1-d array.
Examples
--------
.. code-block:: python
a = [[1, 2, 3], [4, 5, 6]]
cumprod(a) # if axis is not provided, cumprod is done over the flattened input.
-> [ 1, 2, 6, 24, 120, 720]
cumprod(a, dtype="float32")
-> [ 1., 2., 6., 24., 120., 720.]
cumprod(a, axis=0) # multiply over rows for each of the 3 columns
-> [[1, 2, 3],
[4, 10, 18]]
cumprod(a, axis=1)
-> [[ 1, 2, 6],
[ 4, 20, 120]]
a = [1, 1, 1, 0, 1, 1, 0] # a is a boolean array
cumprod(a, dtype=int32) # dtype should be provided to get the expected results
-> [1, 1, 1, 0, 0, 0, 0]
"""
return _make.cumprod(data, axis, dtype, exclusive)
def unique(data, is_sorted=True, return_counts=False):
"""Find the unique elements of a 1-D tensor. Please note `output` and `counts` are all padded to
have the same length of `data` and element with index >= num_unique[0] has undefined value.
Parameters
----------
data : relay.Expr
A 1-D tensor of integers.
is_sorted : bool, optional
Whether to sort the unique elements in ascending order before returning as output.
return_counts : bool, optional
Whether to return the count of each unique element.
Returns
-------
unique : relay.Expr
A 1-D tensor containing the unique elements of the input data tensor.
indices : relay.Expr
A 1-D tensor containing the indeces of the first occurence of each unique value
in the input tensor.
inverse_indices : relay.Expr
A 1-D tensor. For each entry in data, it contains the index of that data element in the
unique array.
num_unique : relay.Expr
A 1-D tensor with size=1 containing the number of unique elements in the input data tensor.
counts : relay.Expr, optional
A 1-D tensor containing the count of each unique element in the output.
Examples
--------
.. code-block:: python
[output, indices, inverse_indices, num_unique] = unique([4, 5, 1, 2, 3, 3, 4, 5],
False,
False)
output = [4, 5, 1, 2, 3, _, _, _]
indices = [0, 1, 2, 3, 4, _, _, _]
inverse_indices = [0, 1, 2, 3, 4, 4, 0, 1]
num_unique = [5]
[output, indices, inverse_indices, num_unique, counts] = unique([4, 5, 1, 2, 3, 3, 4, 5],
False,
True)
output = [4, 5, 1, 2, 3, _, _, _]
indices = [0, 1, 2, 3, 4, _, _, _]
inverse_indices = [0, 1, 2, 3, 4, 4, 0, 1]
num_unique = [5]
counts = [2, 2, 1, 1, 2, _, _, _]
[output, indices, inverse_indices, num_unique] = unique([4, 5, 1, 2, 3, 3, 4, 5], True)
output = [1, 2, 3, 4, 5, _, _, _]
indices = [2, 3, 4, 0, 1, _, _, _]
inverse_indices = [3, 4, 0, 1, 2, 2, 3, 4]
num_unique = [5]
"""
if return_counts:
return TupleWrapper(_make.unique(data, is_sorted, return_counts), 5)
return TupleWrapper(_make.unique(data, is_sorted, return_counts), 4)
def invert_permutation(data):
"""Computes the inverse permutation of data.
This operation computes the inverse of an index permutation.
It takes a 1-D integer tensor x, which represents the indices of a zero-based
array and swaps each value with its index position.
For an output tensor y and an input tensor x, this operation computes the following:
y[x[i]] = i for i in [0, 1, ..., len(x) - 1]
Parameters
----------
data : relay.Expr
The source data to be invert permuted.
Returns
-------
ret : relay.Expr
Invert permuted data. Has the same type as data.
Examples
--------
.. code-block:: python
data = [3, 4, 0, 2, 1]
relay.invert_permutation(data) = [2, 4, 3, 0, 1]
"""
return _make.invert_permutation(data)
def stft(
data, n_fft, hop_length=None, win_length=None, window=None, normalized=False, onesided=True
):
"""The STFT computes the Fourier transform of short overlapping windows of the input.
This gives frequency components of the signal as they change over time.
Parameters
----------
data : relay.Expr
Either a 1-D tensor or a 2-D batch tensor.
n_fft : int
The size of Fourier transform.
hop_length : int, optional
The distance between neighboring sliding window frames. If is None,
it is treated as equal to floor(n_fft / 4).
win_length : int, optional
The size of window frame and STFT filter. If is None, it is treated as equal to n_fft.
window : relay.Expr, optional
A 1-D tensor window frame. If is None (default), it is treated as if
having 1 everywhere in the window.
normalized : bool, optional
Whether to return the normalized STFT results. Default value is False.
onesided : bool, optional
Whether to return onesided result or fill with conjugate symmetry. Default value is True.
Returns
-------
output : relay.Expr
Tensor containing the STFT result with shape [batch, N, T, 2], where N is the
number of frequencies where STFT is applied and T is the total number of frames used.
Examples
--------
.. code-block:: python
data = [1, 2, 3, 4, 5, 6]
window = [4, 3, 2]
[n_fft, hop_length, win_length, normalized, onesided] = [3, 3, 3, False, True]
relay.stft(data, n_fft, hop_length, win_length, window, normalized, onesided)
-> [[[16.0000, 0.0000], [43.0000, 0.0000]], [[ -2.0000, 0.0000], [ 2.5000, -2.5981]]]
"""
if hop_length is None:
hop_length = n_fft // 4
if win_length is None:
win_length = n_fft
if window is None:
window = _make.ones([n_fft], "int32")
return _make.stft(data, n_fft, hop_length, win_length, window, normalized, onesided)
def dft(re_data, im_data, inverse=False):
"""
Computes the discrete Fourier transform of input (calculation along the last axis).
This gives frequency components of the signal as they change over time.
Parameters
----------
re_data : relay.Expr
N-D tensor, real part of the input signal.
im_data : relay.Expr
N-D tensor, imaginary part of the input signal.
If the signal is real, then the values of this tensor are zeros.
inverse : bool
Whether to perform the inverse discrete fourier transform.
Returns
-------
re_output : relay.Expr
The Fourier Transform of the input (Real part).
im_output : relay.Expr
The Fourier Transform of the input (Imaginary part).
"""
return TupleWrapper(_make.dft(re_data, im_data, inverse), 2)
def trilu(data, k, upper=True):
"""Given a 2-D matrix or batches of 2-D matrices, returns the
upper or lower triangular part of the tensor.
Parameters
----------
data : relay.Expr
The tensor that trilu will be applied to. Must be either
a 2D matrix or a tensor of batches of 2D matrices.
k : int
The number of diagonals above or below the main diagonal
to exclude or include.
upper: bool, optional
If True, only upper triangular values of input are kept,
if False, the lower triangular values are kept.
Returns
-------
ret : relay.Expr
The new tensor with appropriate diagonals set to zero.
Examples
--------
.. code-block:: python
x = [[0, 1, 2],
[3, 4, 5],
[6, 7, 8]]
relay.trilu(x, True, 0) =
[[0, 1, 2],
[0, 4, 5],
[0, 0, 8]]
"""
if not isinstance(k, Expr):
k = const(k, dtype="int32")
return _make.trilu(data, k, upper)
| 62,500 | 30.141505 | 100 | py |
tvm | tvm-main/python/tvm/relay/op/image/image.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Image operations."""
from . import _make
from ..dyn.image import _make as _dyn_make
from ...expr import Expr, Constant, const
def resize1d(
data,
size,
roi=None,
layout="NCW",
method="linear",
coordinate_transformation_mode="half_pixel",
rounding_method="",
cubic_alpha=-0.5,
cubic_exclude=0,
extrapolation_value=0.0,
out_dtype=None,
):
"""Image resize1d operator.
This operator takes data as input and does 1D scaling to the given scale factor.
In the default case, where the data_layout is `NCW`
with data of shape (n, c, w)
out will have a shape (n, c, size[0])
method indicates the algorithm to be used while calculating the out value
and method can be one of ("linear", "nearest_neighbor", "cubic")
Parameters
----------
data : relay.Expr
The input data to the operator.
size: Tuple of Int or Expr
The out size to which the image will be resized.
roi: Tuple of Float or Expr, optional
The region of interest for cropping the input image. Expected to be of
size 2, and format [start_w, end_w].
Only used if coordinate_transformation_mode is tf_crop_and_resize.
layout : str, optional
Layout of the input.
method : str, optional
Scale method to used [nearest_neighbor, linear, cubic].
coordinate_transformation_mode : string, optional
Describes how to transform the coordinate in the resized tensor
to the coordinate in the original tensor. Defintions can be found
in topi/image/resize.py.
[half_pixel, align_corners, asymmetric, pytorch_half_pixel,
tf_half_pixel_for_nn, and tf_crop_and_resize].
rounding_method: string, optional
indicates how to find the "nearest" pixel in nearest_neighbor method
[round, floor, ceil]
cubic_alpha: float
Spline Coefficient for cubic interpolation
cubic_exclude: int
Flag to exclude exterior of the image during cubic interpolation
extrapolation_value: float
Fill value to use when roi is outside of the image
out_dtype : str, optional
Type to return. If left None returns the same type as input.
Returns
-------
result: relay.Expr
The resized result.
"""
if roi is None:
roi = [0.0] * 2
if isinstance(size, Constant):
size = list(size.data.numpy().astype("int32"))
if isinstance(roi, Constant):
roi = list(roi.data.numpy().astype("int32"))
if isinstance(size, Expr) or isinstance(roi, Expr):
raise NotImplementedError(
"dyn.resize1d is not yet implemented, got size", size, "and roi", roi
)
return _make.resize1d(
data,
size,
roi,
layout,
method,
coordinate_transformation_mode,
rounding_method,
cubic_alpha,
cubic_exclude,
extrapolation_value,
out_dtype,
)
def resize2d(
data,
size,
roi=None,
layout="NCHW",
method="linear",
coordinate_transformation_mode="half_pixel",
rounding_method="",
cubic_alpha=-0.5,
cubic_exclude=0,
extrapolation_value=0.0,
out_dtype=None,
):
"""Image resize2d operator.
This operator takes data as input and does 2D scaling to the given scale factor.
In the default case, where the data_layout is `NCHW`
with data of shape (n, c, h, w)
out will have a shape (n, c, size[0], size[1])
method indicates the algorithm to be used while calculating the out value
and method can be one of ("linear", "nearest_neighbor", "cubic")
Parameters
----------
data : relay.Expr
The input data to the operator.
size: Tuple of Int or Expr
The out size to which the image will be resized.
roi: Tuple of Float or Expr, optional
The region of interest for cropping the input image. Expected to be of
size 4, and format [start_h, start_w, end_h, end_w].
Only used if coordinate_transformation_mode is tf_crop_and_resize.
layout : str, optional
Layout of the input.
method : str, optional
Scale method to used [nearest_neighbor, linear, cubic].
coordinate_transformation_mode : string, optional
Describes how to transform the coordinate in the resized tensor
to the coordinate in the original tensor. Defintions can be found
in topi/image/resize.py.
[half_pixel, align_corners, asymmetric, pytorch_half_pixel,
tf_half_pixel_for_nn, and tf_crop_and_resize].
rounding_method: string, optional
indicates how to find the "nearest" pixel in nearest_neighbor method
[round, floor, ceil]
cubic_alpha: float
Spline Coefficient for bicubic interpolation
cubic_exclude: int
Flag to exclude exterior of the image during bicubic interpolation
extrapolation_value: float
Fill value to use when roi is outside of the image
out_dtype : str, optional
Type to return. If left None returns the same type as input.
Returns
-------
result: relay.Expr
The resized result.
"""
if roi is None:
roi = [0.0] * 4
if isinstance(size, Constant):
size = list(size.data.numpy().astype("int32"))
if isinstance(roi, Constant):
roi = list(roi.data.numpy().astype("float32"))
if isinstance(size, Expr) or isinstance(roi, Expr):
if not isinstance(size, Expr):
size = const(size, "int64")
if not isinstance(roi, Expr):
roi = const(roi, "float32")
return _dyn_make.resize2d(
data,
size,
roi,
layout,
method,
coordinate_transformation_mode,
rounding_method,
cubic_alpha,
cubic_exclude,
extrapolation_value,
out_dtype,
)
return _make.resize2d(
data,
size,
roi,
layout,
method,
coordinate_transformation_mode,
rounding_method,
cubic_alpha,
cubic_exclude,
extrapolation_value,
out_dtype,
)
def resize3d(
data,
size,
roi=None,
layout="NCDHW",
method="linear",
coordinate_transformation_mode="half_pixel",
rounding_method="",
cubic_alpha=-0.5,
cubic_exclude=0,
extrapolation_value=0.0,
out_dtype=None,
):
"""Image resize3d operator.
This operator takes data as input and does 3D scaling to the given scale factor.
In the default case, where the data_layout is `NCDHW`
with data of shape `(n, c, d, h, w)`
out will have a shape `(n, c, size[0], size[1], size[2])`
method indicates the algorithm to be used while calculating the out value
and method can be one of ("linear", "nearest_neighbor", "cubic")
Parameters
----------
data : relay.Expr
The input data to the operator.
size: Tuple of Int or Expr
The out size to which the image will be resized.
roi: Tuple of Float or Expr, optional
The region of interest for cropping the input image. Expected to be of
size 6, and format [start_d, start_h, start_w, end_d, end_h, end_w].
Only used if coordinate_transformation_mode is tf_crop_and_resize.
layout : str, optional
Layout of the input.
method : str, optional
Scale method to used [nearest_neighbor, linear, cubic].
coordinate_transformation_mode : string, optional
Describes how to transform the coordinate in the resized tensor
to the coordinate in the original tensor. Defintions can be found
in topi/image/resize.py.
[half_pixel, align_corners, asymmetric, pytorch_half_pixel,
tf_half_pixel_for_nn, and tf_crop_and_resize].
rounding_method: string, optional
indicates how to find the "nearest" pixel in nearest_neighbor method
[round, floor, ceil]
cubic_alpha: float
Spline Coefficient for cubic interpolation
cubic_exclude: int
Flag to exclude exterior of the image during cubic interpolation
extrapolation_value: float
Fill value to use when roi is outside of the image
out_dtype : str, optional
Type to return. If left None returns the same type as input.
Returns
-------
result: relay.Expr
The resized result.
"""
if roi is None:
roi = [0.0] * 6
if isinstance(size, Constant):
size = list(size.data.numpy().astype("int32"))
if isinstance(roi, Constant):
roi = list(roi.data.numpy().astype("int32"))
if isinstance(size, Expr) or isinstance(roi, Expr):
raise NotImplementedError(
"dyn.resize3d is not yet implemented, got size", size, "and roi", roi
)
return _make.resize3d(
data,
size,
roi,
layout,
method,
coordinate_transformation_mode,
rounding_method,
cubic_alpha,
cubic_exclude,
extrapolation_value,
out_dtype,
)
def crop_and_resize(
data,
boxes,
box_indices,
crop_size,
layout,
method="bilinear",
extrapolation_value=0,
out_dtype=None,
):
"""Crop input images and resize them.
method indicates the algorithm to be used while calculating the out value
and method can be either "bilinear" or "nearest_neighbor".
Parameters
----------
data : relay.Expr
The input data to the operator.
boxes : relay.Expr
A 2-D tensor of shape [num_boxes, 4]. Each row of the tensor specifies
the coordinates of a box.
box_indices : relay.Expr
A 1-D tensor of shape [num_boxes], box_ind[i] specifies the data that
the i-th box refers to.
crop_size : Tuple of PrimExpr
The target size to which each box will be resized.
layout : str, optional
Layout of the input.
method : str, optional
Scale method, it can be either "nearest_neighbor" or "bilinear".
extrapolation_value : float, optional
Value used for extrapolation, when applicable.
out_dtype : str, optional
Type to return. If left None returns the same type as input.
Returns
-------
result: relay.Expr
The computed result.
"""
return _make.crop_and_resize(
data, boxes, box_indices, crop_size, layout, method, extrapolation_value, out_dtype
)
def dilation2d(
data,
weight,
strides=(1, 1),
padding=(0, 0),
dilations=(1, 1),
data_layout="NCHW",
kernel_layout="IHW",
out_dtype="",
):
r"""Morphological Dilation 2D.
This operator takes the weight as the dilation kernel and dilates it with
data to produce an output. In the default case, where the data_layout is `NCHW`
and kernel_layout is `OIHW`, dilation2d takes in a data Tensor with shape
`(batch_size, in_channels, height, width)`, and a weight Tensor with shape
`(channels, kernel_height, kernel_width)` to produce an output Tensor
with the following rule:
.. math::
\mbox{out}[b, c, y, x] = \max_{dy, dx}
\mbox{data}[b, c, \mbox{strides}[0] * y + dy, \mbox{strides}[1] * x + dx] +
\mbox{weight}[c, dy, dx]
Padding and dilation are applied to data and weight respectively before the computation.
This operator accepts data layout specification. Semantically, the operator
will convert the layout to the canonical layout
(`NCHW` for data and `IHW` for weight) and perform the computation.
weight : tvm.relay.Expr
The weight expressions.
strides : Optional[Tuple[int]]
The strides of convolution.
padding : Optional[Tuple[int]]
The padding of convolution on both sides of inputs before convolution.
dilations : Optional[Tuple[int]]
Specifies the dilation rate to be used for dilated convolution.
data_layout : Optional[str]
Layout of the input.
kernel_layout : Optional[str]
Layout of the weight.
out_dtype : Optional[str]
Specifies the output data type.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.dilation2d(
data, weight, strides, padding, dilations, data_layout, kernel_layout, out_dtype
)
def affine_grid(data, target_shape=None):
"""affine_grid operator that generates 2D sampling grid.
This operation is described in https://arxiv.org/pdf/1506.02025.pdf. It generates a uniform
sampling grid within the target shape and normalizes it to [-1, 1]. The provided affine
transformation is then applied on the sampling grid.
Parameters
----------
data : tvm.Tensor
3-D with shape [batch, 2, 3]. The affine matrix.
target_shape: list/tuple of two int
Specifies the output shape (H, W).
Returns
-------
Output : tvm.Tensor
4-D with shape [batch, 2, target_height, target_width]
"""
return _make.affine_grid(data, target_shape)
def grid_sample(
data, grid, method="bilinear", layout="NCHW", padding_mode="zeros", align_corners=True
):
"""Applies grid sampling to input feature map.
Given :math:`data` and :math:`grid`, then for 4-D the output is computed by
.. math::
x_{src} = grid[batch, 0, y_{dst}, x_{dst}] \\
y_{src} = grid[batch, 1, y_{dst}, x_{dst}] \\
output[batch, channel, y_{dst}, x_{dst}] = G(data[batch, channel, y_{src}, x_{src}])
:math:`x_{dst}`, :math:`y_{dst}` enumerate all spatial locations in :math:`output`, and
:math:`G()` denotes the interpolation function.
The out-boundary points will be padded with zeros if padding_mode is "zeros", or
border pixel value if padding_mode is "border", or
inner pixel value if padding_mode is "reflection".
The left-top corner (-1, -1) and right-bottom corner (1, 1) in grid will be map to
(0, 0) and (h - 1, w - 1) of data if align_corners is "True", or
(-0.5, -0.5) and (h - 0.5, w - 0.5) of data if align_corners is "False".
The shape of the output will be
4-D (data.shape[0], data.shape[1], grid.shape[2], grid.shape[3]), or
5-D (data.shape[0], data.shape[1], grid.shape[2], grid.shape[3], grid.shape[4]).
The operator assumes that :math:`grid` has been normalized to [-1, 1].
grid_sample often cooperates with affine_grid which generates sampling grids for grid_sample.
Parameters
----------
data : tvm.Tensor
4-D with shape [batch, in_channel, in_height, in_width], or
5-D with shape [batch, in_channel, in_depth, in_height, in_width]
grid : tvm.Tensor
4-D with shape [batch, 2, out_height, out_width], or
5-D with shape [batch, 3, out_depth, out_height, out_width]
method : str
The interpolation method, 4-D "nearest", "bilinear", "bicubic" and
5-D "nearest", "bilinear"("trilinear") are supported.
layout : str
The layout of input data and the output.
padding_mode : str
The padding mode for outside grid values, "zeros", "border", "reflection" are supported.
align_corners: bool
Geometrically, we consider the pixels of the input as squares rather than points.
If set to "True", the extrema ("-1" and "1") are considered as referring
to the center points of the input corner pixels. If set to "False", they
are instead considered as referring to the corner points of the input corner
pixels, making the sampling more resolution agnostic.
Returns
-------
Output : tvm.Tensor
4-D with shape [batch, in_channel, out_height, out_width], or
5-D with shape [batch, in_channel, out_depth, out_height, out_width]
"""
return _make.grid_sample(data, grid, method, layout, padding_mode, align_corners)
| 16,683 | 30.839695 | 97 | py |
tvm | tvm-main/python/tvm/relay/op/contrib/libtorch.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, no-else-return, E1102
"""Torch codegen operators"""
from tvm import relay
from tvm.relay.op.annotation import compiler_begin, compiler_end
def torchop(script_fn, *params):
"""Insert an Operation executed in the PyTorch JIT
The operation includes backend annotation
Currently, only tensors are supported. The shape inferrence
assumes that input shapes (and not values) determine output shapes."""
return compiler_end(
relay.op._make.torchop(
[compiler_begin(p, "torch") for p in params], script_fn.save_to_buffer()
),
"torch",
)
| 1,427 | 37.594595 | 84 | py |
tvm | tvm-main/python/tvm/relay/op/contrib/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""Contrib modules."""
from .register import get_pattern_table, register_pattern_table
from .arm_compute_lib import *
from .dnnl import *
from .bnns import *
from .coreml import *
from .ethosn import *
from .libtorch import *
from .tensorrt import *
from .cutlass import *
from .clml import *
| 1,113 | 36.133333 | 63 | py |
tvm | tvm-main/python/tvm/driver/tvmc/frontends.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Provides support to parse models from different frameworks into Relay networks.
Frontend classes do lazy-loading of modules on purpose, to reduce time spent on
loading the tool.
"""
import logging
import os
import sys
import re
import importlib
from abc import ABC
from abc import abstractmethod
from typing import Optional, List, Dict
from pathlib import Path
import numpy as np
from tvm import relay
from tvm import parser
from tvm.driver.tvmc import TVMCException, TVMCImportError
from tvm.driver.tvmc.model import TVMCModel
# pylint: disable=invalid-name
logger = logging.getLogger("TVMC")
class Frontend(ABC):
"""Abstract class for command line driver frontend.
Provide a unified way to import models (as files), and deal
with any required preprocessing to create a TVM module from it."""
@staticmethod
@abstractmethod
def name():
"""Frontend name"""
@staticmethod
@abstractmethod
def suffixes():
"""File suffixes (extensions) used by this frontend"""
@abstractmethod
def load(self, path, shape_dict=None, **kwargs):
"""Load a model from a given path.
Parameters
----------
path: str
Path to a file
shape_dict: dict, optional
Mapping from input names to their shapes.
Returns
-------
mod : tvm.IRModule
The produced relay module.
params : dict
The parameters (weights) for the relay module.
"""
def lazy_import(pkg_name, from_pkg_name=None, hide_stderr=False):
"""Lazy import a frontend package or subpackage"""
try:
return importlib.import_module(pkg_name, package=from_pkg_name)
except ImportError as error:
raise TVMCImportError(pkg_name) from error
finally:
if hide_stderr:
sys.stderr = stderr
class KerasFrontend(Frontend):
"""Keras frontend for TVMC"""
@staticmethod
def name():
return "keras"
@staticmethod
def suffixes():
return ["h5"]
def load(self, path, shape_dict=None, **kwargs):
# pylint: disable=C0103
tf = lazy_import("tensorflow")
keras = lazy_import("keras", from_pkg_name="tensorflow")
# tvm build currently imports keras directly instead of tensorflow.keras
try:
model = keras.models.load_model(path)
except ValueError as err:
raise TVMCException(str(err))
# There are two flavours of keras model, sequential and
# functional, TVM expects a functional model, so convert
# if required:
if self.is_sequential_p(model):
model = self.sequential_to_functional(model)
in_shapes = []
for layer in model._input_layers:
if tf.executing_eagerly():
in_shapes.append(tuple(dim if dim is not None else 1 for dim in layer.input.shape))
else:
in_shapes.append(
tuple(dim.value if dim.value is not None else 1 for dim in layer.input.shape)
)
inputs = [np.random.uniform(size=shape, low=-1.0, high=1.0) for shape in in_shapes]
input_shapes = {name: x.shape for (name, x) in zip(model.input_names, inputs)}
if shape_dict is not None:
input_shapes.update(shape_dict)
kwargs.setdefault("layout", "NHWC")
return relay.frontend.from_keras(model, input_shapes, **kwargs)
def is_sequential_p(self, model):
keras = lazy_import("keras", from_pkg_name="tensorflow")
return isinstance(model, keras.models.Sequential)
def sequential_to_functional(self, model):
keras = lazy_import("keras", from_pkg_name="tensorflow")
assert self.is_sequential_p(model)
input_layer = keras.layers.Input(batch_shape=model.layers[0].input_shape)
prev_layer = input_layer
for layer in model.layers:
prev_layer = layer(prev_layer)
model = keras.models.Model([input_layer], [prev_layer])
return model
class OnnxFrontend(Frontend):
"""ONNX frontend for TVMC"""
@staticmethod
def name():
return "onnx"
@staticmethod
def suffixes():
return ["onnx"]
def load(self, path, shape_dict=None, **kwargs):
onnx = lazy_import("onnx")
# pylint: disable=E1101
model = onnx.load(path)
return relay.frontend.from_onnx(model, shape=shape_dict, **kwargs)
class TensorflowFrontend(Frontend):
"""TensorFlow frontend for TVMC"""
@staticmethod
def name():
return "pb"
@staticmethod
def suffixes():
return ["pb"]
def load(self, path, shape_dict=None, **kwargs):
tf = lazy_import("tensorflow")
tf_testing = lazy_import("tvm.relay.testing.tf")
with tf.io.gfile.GFile(path, "rb") as tf_graph:
content = tf_graph.read()
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(content)
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
logger.debug("parse TensorFlow model and convert into Relay computation graph")
return relay.frontend.from_tensorflow(graph_def, shape=shape_dict, **kwargs)
class TFLiteFrontend(Frontend):
"""TFLite frontend for TVMC"""
@staticmethod
def name():
return "tflite"
@staticmethod
def suffixes():
return ["tflite"]
def load(self, path, shape_dict=None, **kwargs):
model = lazy_import("tflite.Model")
with open(path, "rb") as tf_graph:
content = tf_graph.read()
# tflite.Model.Model is tflite.Model in 1.14 and 2.1.0
try:
tflite_model = model.Model.GetRootAsModel(content, 0)
except AttributeError:
tflite_model = model.GetRootAsModel(content, 0)
try:
version = tflite_model.Version()
logger.debug("tflite version %s", version)
except Exception:
raise TVMCException("input file not tflite")
if version != 3:
raise TVMCException("input file not tflite version 3")
logger.debug("parse TFLite model and convert into Relay computation graph")
mod, params = relay.frontend.from_tflite(tflite_model, shape_dict=shape_dict, **kwargs)
return mod, params
class PyTorchFrontend(Frontend):
"""PyTorch frontend for TVMC"""
@staticmethod
def name():
return "pytorch"
@staticmethod
def suffixes():
# Torch Script is a zip file, but can be named pth
return ["pth", "zip"]
def load(self, path, shape_dict=None, **kwargs):
torch = lazy_import("torch")
if shape_dict is None:
raise TVMCException("--input-shapes must be specified for %s" % self.name())
traced_model = torch.jit.load(path)
traced_model.eval() # Switch to inference mode
# Convert shape dictionary to list for Pytorch frontend compatibility
input_shapes = list(shape_dict.items())
logger.debug("parse Torch model and convert into Relay computation graph")
return relay.frontend.from_pytorch(
traced_model, input_shapes, keep_quantized_weight=True, **kwargs
)
class PaddleFrontend(Frontend):
"""PaddlePaddle frontend for TVMC"""
@staticmethod
def name():
return "paddle"
@staticmethod
def suffixes():
return ["pdmodel"]
def load(self, path, shape_dict=None, **kwargs):
# pylint: disable=C0415
import paddle
paddle.enable_static()
paddle.disable_signal_handler()
if not os.path.exists(path):
raise TVMCException("File {} is not exist.".format(path))
if not path.endswith(".pdmodel"):
raise TVMCException("Path of model file should be endwith suffixes '.pdmodel'.")
prefix = "".join(path.strip().split(".")[:-1])
params_file_path = prefix + ".pdiparams"
if not os.path.exists(params_file_path):
raise TVMCException("File {} is not exist.".format(params_file_path))
# pylint: disable=E1101
exe = paddle.static.Executor(paddle.CPUPlace())
prog, _, _ = paddle.static.load_inference_model(prefix, exe)
return relay.frontend.from_paddle(prog, shape_dict=shape_dict, **kwargs)
class RelayFrontend(Frontend):
"""Relay frontend for TVMC"""
@staticmethod
def name():
return "relay"
@staticmethod
def suffixes():
return ["relay"]
def load(self, path, shape_dict=None, **kwargs):
with open(path, "r", encoding="utf-8") as relay_text:
text = relay_text.read()
if shape_dict is None:
logger.warning(
"Specify --input-shapes to ensure that model inputs "
"will not be considered as constants."
)
def _validate_text(text):
"""Check the provided file contents.
The relay.txt artifact contained in the MLF is missing the version header and
the metadata which is required to use meta[relay.Constant]."""
if re.compile(r".*\#\[version\.*").match(text) is None:
raise TVMCException(
"The relay model does not include the required version information."
)
if re.compile(r".*meta\[.+\].*", re.DOTALL).match(text):
if "#[metadata]" not in text:
raise TVMCException(
"The relay model does not include the required #[metadata] section. "
"Use ir_mod.astext(show_meta_data=True) to export compatible code."
)
_validate_text(text)
ir_mod = parser.fromtext(text)
if shape_dict:
input_names = shape_dict.keys()
else:
input_names = []
def _gen_params(ir_mod, skip_names=None):
"""Populate the all the params in the mode with ones."""
main_func = ir_mod["main"]
shape_dict = {p.name_hint: p.checked_type.concrete_shape for p in main_func.params}
type_dict = {p.name_hint: p.checked_type.dtype for p in main_func.params}
params = {}
for name, shape in shape_dict.items():
if skip_names and name in skip_names:
continue
if "int" in type_dict[name]:
data = np.random.randint(128, size=shape, dtype=type_dict[name])
else:
data = np.random.uniform(-1, 1, size=shape).astype(type_dict[name])
params[name] = data
return params
params = _gen_params(ir_mod, skip_names=input_names)
return ir_mod, params
ALL_FRONTENDS = [
KerasFrontend,
OnnxFrontend,
TensorflowFrontend,
TFLiteFrontend,
PyTorchFrontend,
PaddleFrontend,
RelayFrontend,
]
def get_frontend_names():
"""Return the names of all supported frontends
Returns
-------
list : list of str
A list of frontend names as strings
"""
return [frontend.name() for frontend in ALL_FRONTENDS]
def get_frontend_by_name(name: str):
"""
This function will try to get a frontend instance, based
on the name provided.
Parameters
----------
name : str
the name of a given frontend
Returns
-------
frontend : tvm.driver.tvmc.Frontend
An instance of the frontend that matches with
the file extension provided in `path`.
"""
for frontend in ALL_FRONTENDS:
if name == frontend.name():
return frontend()
raise TVMCException(
"unrecognized frontend '{0}'. Choose from: {1}".format(name, get_frontend_names())
)
def guess_frontend(path: str):
"""
This function will try to imply which framework is being used,
based on the extension of the file provided in the path parameter.
Parameters
----------
path : str
The path to the model file.
Returns
-------
frontend : tvm.driver.tvmc.Frontend
An instance of the frontend that matches with
the file extension provided in `path`.
"""
suffix = Path(path).suffix.lower()
if suffix.startswith("."):
suffix = suffix[1:]
for frontend in ALL_FRONTENDS:
if suffix in frontend.suffixes():
return frontend()
raise TVMCException("failed to infer the model format. Please specify --model-format")
def load_model(
path: str,
model_format: Optional[str] = None,
shape_dict: Optional[Dict[str, List[int]]] = None,
**kwargs,
):
"""Load a model from a supported framework and convert it
into an equivalent relay representation.
Parameters
----------
path : str
The path to the model file.
model_format : str, optional
The underlying framework used to create the model.
If not specified, this will be inferred from the file type.
shape_dict : dict, optional
Mapping from input names to their shapes.
Returns
-------
tvmc_model : TVMCModel
The produced model package.
"""
if model_format is not None:
frontend = get_frontend_by_name(model_format)
else:
frontend = guess_frontend(path)
mod, params = frontend.load(path, shape_dict, **kwargs)
return TVMCModel(mod, params)
| 14,237 | 28.724426 | 99 | py |
tvm | tvm-main/python/tvm/meta_schedule/testing/relay_workload.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Workloads in Relay IR"""
# pylint: disable=import-outside-toplevel
import logging
import multiprocessing
import os
import pickle
from typing import Any, Dict, List, Optional, Tuple
import tvm
import tvm.relay.testing
from tvm import meta_schedule as ms
from tvm import relay
from tvm.ir import IRModule
from tvm.runtime import NDArray, load_param_dict, save_param_dict
from tvm.target import Target
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def _get_network(
args: Tuple[str, List[int], Optional[str]]
) -> Tuple[IRModule, bytearray, Tuple[str, List[int], str]]:
name: str
input_shape: List[int]
layout: Optional[str]
name, input_shape, layout = args
if layout == "None":
layout = None
mod: IRModule
if name in [
"resnet_18",
"resnet_50",
"wide_resnet_50",
"resnext_50",
"mobilenet_v2",
"mobilenet_v3",
"inception_v3",
"densenet_121",
"resnet3d_18",
"vgg_16",
]:
import torch # type: ignore
from torchvision import models # type: ignore
assert layout is None or layout in ["NCHW", "NHWC"]
params: Dict[str, Any] = {}
if name in ["resnet_18", "resnet_50"]:
model = getattr(models, name.replace("_", ""))
elif name == "wide_resnet_50":
model = getattr(models, "wide_resnet50_2")
elif name == "resnext_50":
model = getattr(models, "resnext50_32x4d")
elif name == "mobilenet_v2":
model = getattr(models, name)
elif name == "mobilenet_v3":
model = getattr(models, name + "_large")
elif name == "inception_v3":
model = getattr(models, name)
params["aux_logits"] = False
elif name == "densenet_121":
model = getattr(models, name.replace("_", ""))
elif name == "resnet3d_18":
model = models.video.r3d_18
elif name == "vgg_16":
model = getattr(models, name.replace("_", ""))
try:
model = model(**params, weights=None)
except TypeError:
model = model(**params, pretrained=False)
dtype = "float32"
input_data = torch.randn(input_shape).type( # pylint: disable=no-member
{
"float32": torch.float32, # pylint: disable=no-member
}[dtype]
)
scripted_model = torch.jit.trace(model, input_data).eval() # type: ignore
input_name = "input0"
shape_list = [(input_name, input_shape)]
mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)
passes = [relay.transform.RemoveUnusedFunctions()]
if layout is None or layout == "NHWC":
# PyTorch is imported as NCHW by default
passes.append(
relay.transform.ConvertLayout(
{
"nn.conv2d": ["NHWC", "default"],
"nn.conv3d": ["NDHWC", "default"],
"nn.max_pool2d": ["NHWC", "default"],
"nn.avg_pool2d": ["NHWC", "default"],
}
)
)
with tvm.transform.PassContext(opt_level=3):
mod = tvm.transform.Sequential(passes)(mod)
inputs = (input_name, input_shape, dtype)
elif name in ["bert_tiny", "bert_base", "bert_medium", "bert_large"]:
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# pip3 install transformers==3.5 torch==1.7
import torch # type: ignore
import transformers # type: ignore
assert layout is None
config_dict = {
"bert_tiny": transformers.BertConfig(
num_hidden_layers=6,
hidden_size=512,
intermediate_size=2048,
num_attention_heads=8,
return_dict=False,
),
"bert_base": transformers.BertConfig(
num_hidden_layers=12,
hidden_size=768,
intermediate_size=3072,
num_attention_heads=12,
return_dict=False,
),
"bert_medium": transformers.BertConfig(
num_hidden_layers=12,
hidden_size=1024,
intermediate_size=4096,
num_attention_heads=16,
return_dict=False,
),
"bert_large": transformers.BertConfig(
num_hidden_layers=24,
hidden_size=1024,
intermediate_size=4096,
num_attention_heads=16,
return_dict=False,
),
}
configuration = config_dict[name]
model = transformers.BertModel(configuration)
input_name = "input_ids"
input_dtype = "int64"
a = torch.randint(10000, input_shape) # pylint: disable=no-member
model.eval()
scripted_model = torch.jit.trace(model, [a], strict=False) # type: ignore
input_name = "input_ids"
shape_list = [(input_name, input_shape)]
mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)
mod = relay.transform.FastMath()(mod)
mod = relay.transform.CombineParallelBatchMatmul()(mod)
inputs = (input_name, input_shape, input_dtype)
elif name == "dcgan":
assert layout is None
output_shape = input_shape
batch_size = output_shape[0]
oshape = output_shape[1:]
mod, params = relay.testing.dcgan.get_workload(
batch_size=batch_size,
oshape=oshape,
layout="NHWC",
)
inputs = ("data", [100], "float32")
else:
raise ValueError("Invalid name: " + name)
params_bytearray: bytearray = save_param_dict(params)
return mod, params_bytearray, inputs
def _load_cache(cache_dir: Optional[str], filename: str) -> Optional[List[Any]]:
if cache_dir is None:
return None
path = os.path.join(os.path.expanduser(cache_dir), filename)
if not os.path.exists(path):
return None
logger.info("Loaded from cached: %s", path)
with open(path, "rb") as i_f:
return pickle.load(i_f)
def _save_cache(cache_dir: Optional[str], filename: str, objects: List[Any]) -> None:
if cache_dir is None:
return
path = os.path.join(os.path.expanduser(cache_dir), filename)
with open(path, "wb") as o_f:
pickle.dump(objects, o_f)
def get_network(
name: str,
input_shape: List[int],
*,
layout: Optional[str] = None,
cache_dir: Optional[str] = None,
) -> Tuple[IRModule, Dict[str, NDArray], Tuple[str, List[int], str]]:
"""Get the symbol definition and random weight of a network
Parameters
----------
name : str
The name of the network.
input_shape : List[int]
The shape of the input tensor.
layout : Optional[str]
The layout of the input tensor. For vision models, the layout is by default NHWC.
cache_dir : Optional[str], optional
The directory to cache the generated network.
If not specified, the cache will be disabled.
Returns
-------
mod : IRModule
The IRModule representing the network.
params : Dict[str, NDArray]
The parameters of the networks.
inputs : Tuple[str, List[int], str]
The name, shape and dtype of the input tensor.
"""
mod: IRModule
params: Dict[str, NDArray]
inputs: Tuple[str, List[int], str]
params_bytearray: bytearray
filename = f'relay-{name}-{layout}-{",".join(str(i) for i in input_shape)}.json'
cached = _load_cache(cache_dir, filename)
if cached is None:
with multiprocessing.Pool(processes=1) as pool:
result = pool.map(_get_network, [(name, input_shape, layout)])
((mod, params_bytearray, inputs),) = result
cached = [mod, params_bytearray, inputs]
_save_cache(cache_dir, filename, cached)
mod, params_bytearray, inputs = cached
params = load_param_dict(params_bytearray)
return mod, params, inputs
def extract_from_relay(
mod: IRModule,
target: Target,
params: Optional[Dict[str, NDArray]],
name: str,
input_shape: List[int],
*,
cache_dir: Optional[str] = None,
) -> List[ms.ExtractedTask]:
"""Extract the tasks from a network.
Parameters
----------
mod : IRModule
The IRModule representing the network.
target : Target
The target that the network will be deployed to.
params : Optional[Dict[str, NDArray]]
The parameters of the networks.
name : str
The name of the network.
input_shape : List[int]
The shape of the input tensor.
cache_dir : Optional[str]
The directory to cache the generated network.
If not specified, the cache will be disabled.
Returns
-------
extracted_tasks : List[ExtractedTask]
The extracted tasks.
"""
filename = f'tasks-{target.kind.name}-{name}-{",".join(str(i) for i in input_shape)}.json'
extracted_tasks = _load_cache(cache_dir, filename)
if extracted_tasks is None:
extracted_tasks = ms.relay_integration.extract_tasks(
mod=mod,
target=target,
params=params,
)
extracted_tasks = list(extracted_tasks)
_save_cache(cache_dir, filename, extracted_tasks)
return extracted_tasks
SUPPORTED = [
# TorchVision
"resnet_18",
"resnet_50",
"mobilenet_v2",
"mobilenet_v3",
"wide_resnet_50",
"resnext_50",
"resnet3d_18",
"inception_v3",
"densenet_121",
"vgg_16",
# Transformer
"bert_tiny",
"bert_base",
"bert_medium",
"bert_large",
# Relay testing
"dcgan",
]
| 10,611 | 32.371069 | 94 | py |
tvm | tvm-main/python/tvm/meta_schedule/testing/torchbench/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, pointless-exception-statement
"""
Helper functions for running TorchBench through the benchmark functions
from TorchDynamo.
"""
import functools
import os
import sys
from dataclasses import dataclass
from enum import Enum
from typing import Set
import torch # type: ignore
class DisallowedOperator(Enum):
"""
The operators to disallow in the fx graph produced by TorchDynamo.
This is to workaround the limitation in TVM's PyTorch frontend.
- inplace_copy: aten::copy_ as inplace assign A[...] = ..., or method call A.copy_(...)
- einsum: torch.functional.einsum
- multihead_attention: torch.nn.MultiheadAttention
- as_stride: Tensor.as_stride
"""
INPLACE_COPY = "inplace_copy"
EINSUM = "einsum"
MULTIHEAD_ATTENTION = "multihead_attention"
AS_STRIDE = "as_stride"
def find_torchdynamo() -> str:
"""
Find the directory of TorchDynamo repo.
It can't directly import the benchmark runner in TorchDynamo
becuase it isn't designed to be used as a Python package.
"""
candidates = [
"torchdynamo",
"../torchdynamo",
"../../torchdynamo",
]
for library_dir in candidates:
if os.path.exists(f"{library_dir}/benchmarks"):
return library_dir
raise RuntimeError(
"""
Cannot find directory for torchdynamo.
You need to clone https://github.com/pytorch/torchdynamo to the parent directory of cwd.
"""
)
DYNAMO_DIR = find_torchdynamo()
sys.path.insert(
0, DYNAMO_DIR
) # opacus_cifar10 depends on opacus, which installs a package called 'benchmarks'
sys.path.append(f"{DYNAMO_DIR}/benchmarks")
# pylint: disable=wrong-import-position, unused-import
import torchdynamo # type: ignore
from benchmarks.common import same, timed # type: ignore
from torchbench import TorchBenchmarkRunner # type: ignore
# pylint: disable=wrong-import-position, unused-import
def _disallow_operators(disallowed_ops: Set[DisallowedOperator]):
"""
Disallow certain operators in the fx graph produced by TorchDynamo.
There are two ways to disallow operator in TorchDynamo,
1. Use the disallow_in_graph API, which only applies to free function call.
2. Patch the TensorVariable class, which applies to method call on torch.Tensor.
"""
disallowed_tensor_methods: Set[str] = set()
if DisallowedOperator.INPLACE_COPY in disallowed_ops:
torchdynamo.disallow_in_graph(torch.Tensor.copy_)
disallowed_tensor_methods.update({"copy_", "__setitem__"})
if DisallowedOperator.EINSUM in disallowed_ops:
torchdynamo.disallow_in_graph(torch.functional.einsum)
if DisallowedOperator.MULTIHEAD_ATTENTION in disallowed_ops:
torchdynamo.disallow_in_graph(torch.nn.MultiheadAttention)
if DisallowedOperator.AS_STRIDE in disallowed_ops:
disallowed_tensor_methods.add("as_stride")
tensor_variable_cls = torchdynamo.variables.tensor.TensorVariable
old_call_method = tensor_variable_cls.call_method
@functools.wraps(old_call_method)
def call_method(self, translator, name, args, kwargs):
if name in disallowed_tensor_methods:
raise torchdynamo.exc.Unsupported(f"Tensor.{name} not supported by TVM.")
return old_call_method(self, translator, name, args, kwargs)
tensor_variable_cls.call_method = call_method
def load_torchdynamo_benchmark_runner(
is_cuda: bool,
cosine_similarity: bool = False,
float32: bool = False,
disallowed_operators: Set[DisallowedOperator] = None,
) -> TorchBenchmarkRunner:
"""
Load the benchmark runner from TorchDynamo.
"""
@dataclass
class RunnerArgs:
"""
This class simulates the parsed args required by the benchmark code from TorchDynamo.
"""
ci: bool = False # Whether runs in CI mode. pylint: disable=invalid-name
training: bool = False # Whether it benchmarks training workload.
use_eval_mode: bool = True # Whether the model should be in eval mode.
dynamic_shapes: bool = False # Whether runs the model in dynamic shape mode.
float16: bool = False # Whether to cast model and inputs to float16
float32: bool = False # Whether to cast model and inputs to float32
accuracy: bool = False # Whether to perform a accuracy test
performance: bool = True # Whether to perform a performance test
cosine: bool = False # Whether to use consine similarity to check if output is correct.
args = RunnerArgs(cosine=cosine_similarity, float32=float32)
runner = TorchBenchmarkRunner()
runner.args = args
runner.model_iter_fn = runner.forward_pass
if disallowed_operators:
_disallow_operators(disallowed_operators)
if is_cuda:
# pylint: disable=import-outside-toplevel
import benchmarks.common # type: ignore
# pylint: enable=import-outside-toplevel
benchmarks.common.synchronize = torch.cuda.synchronize
return runner
| 5,837 | 33.75 | 96 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.