repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
TREMBA | TREMBA-master/imagenet_model/Resnet.py | import torch.nn as nn
import torch
import math
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Denoise(nn.Module):
def __init__(self, channel, embed=True, softmax=True):
super().__init__()
self.embed = embed
self.softmax = softmax
self.channel = channel
if self.embed:
self.conv_theta = nn.Conv2d(channel, channel//2, kernel_size=1, stride=1, padding=0, bias=False)
self.conv_phi = nn.Conv2d(channel, channel//2, kernel_size=1, stride=1, padding=0, bias=False)
self.conv = nn.Conv2d(channel, channel, kernel_size=1, stride=1, padding=0, bias=False)
self.bn = nn.BatchNorm2d(channel)
def forward(self, x):
if self.embed:
theta = self.conv_theta(x)
phi = self.conv_phi(x)
else:
theta = x
phi = x
n_in, H, W = list(x.size())[1:]
if n_in > H*W or self.softmax:
f = torch.einsum('niab,nicd->nabcd', theta,phi)
if self.softmax:
shape = f.size()
f = f.view(-1, shape[2]*shape[3], shape[2]*shape[3])
f = f / math.sqrt(self.channel/2)
f = nn.functional.softmax(f, dim=-1)
f = f.view(shape)
f = torch.einsum('nabcd,nicd->niab', f, x)
else:
f = torch.einsum('nihw,njhw->nij', phi, x)
f = torch.einsum('nij,nihw->njhw', f, theta)
if not self.softmax:
f = f / (H*W)
y = self.bn(self.conv(f))
return x + y
class DenoiseBottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(DenoiseBottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.denoise = Denoise(planes * self.expansion, False, False)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
out = self.denoise(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None, denoise=False):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.denoise = denoise
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
if self.denoise:
layers.append(Denoise(self.inplanes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet152():
model = ResNet(Bottleneck, [3, 8, 36, 3])
return model
def resnet152_denoise():
model = ResNet(Bottleneck, [3, 8, 36, 3], denoise=True)
return model
def resnet101_denoise():
model = ResNet(DenoiseBottleneck, [3, 4, 23, 3], denoise=False, width_per_group=8, groups=32)
return model | 10,508 | 35.113402 | 108 | py |
SemFormer | SemFormer-main/inference_rw.py | # Copyright (C) 2020 * Ltd. All rights reserved.
# author : Sanghyeon Jo <josanghyeokn@gmail.com>
import os
import sys
import copy
import shutil
import random
import argparse
import numpy as np
import math
from tqdm import tqdm
import imageio
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from core.puzzle_utils import *
from core.networks import *
from core.datasets import *
from tools.general.io_utils import *
from tools.general.time_utils import *
from tools.general.json_utils import *
from tools.ai.log_utils import *
from tools.ai.demo_utils import *
from tools.ai.optim_utils import *
from tools.ai.torch_utils import *
from tools.ai.evaluate_utils import *
from tools.ai.augment_utils import *
from tools.ai.randaugment import *
parser = argparse.ArgumentParser()
###############################################################################
# Dataset
###############################################################################
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--num_workers', default=4, type=int)
parser.add_argument('--data_dir', default='../VOC2012/', type=str)
parser.add_argument('--start', default=0.0, type=float)
parser.add_argument('--end', default=1.0, type=float)
###############################################################################
# Network
###############################################################################
parser.add_argument('--architecture', default='resnet50', type=str)
###############################################################################
# Inference parameters
###############################################################################
parser.add_argument('--model_name', default='', type=str)
parser.add_argument('--cam_dir', default='', type=str)
parser.add_argument('--domain', default='train', type=str)
parser.add_argument('--beta', default=10, type=int)
parser.add_argument('--exp_times', default=8, type=int)
parser.add_argument('--image_size', default=512, type=int)
parser.add_argument('--clear_cache', default=False, type=str2bool)
if __name__ == '__main__':
###################################################################################
# Arguments
###################################################################################
args = parser.parse_args()
experiment_name = args.model_name
if 'train' in args.domain:
experiment_name += '@train'
else:
experiment_name += '@val'
experiment_name += '@beta=%d'%args.beta
experiment_name += '@exp_times=%d'%args.exp_times
experiment_name += '@rw'
cam_dir = f'./experiments/predictions/{args.cam_dir}/'
pred_dir = create_directory(f'./experiments/predictions/{experiment_name}/')
model_path = './experiments/models/' + f'{args.model_name}.pth'
set_seed(args.seed)
log_func = lambda string='': print(string)
###################################################################################
# Transform, Dataset, DataLoader
###################################################################################
imagenet_mean = [0.485, 0.456, 0.406]
imagenet_std = [0.229, 0.224, 0.225]
normalize_fn = Normalize(imagenet_mean, imagenet_std)
meta_dic = read_json('./data/VOC_2012.json')
if args.domain == 'test':
dataset = VOC_Dataset_For_Evaluation(args.data_dir, args.domain)
else:
dataset = VOC_Dataset_For_Making_CAM(args.data_dir, args.domain)
###################################################################################
# Network
###################################################################################
stride = 4
path_index = PathIndex(radius=10, default_size=(args.image_size // stride, args.image_size // stride))
model = AffinityNet(args.architecture, path_index)
model = model.cuda()
model.eval()
log_func('[i] Architecture is {}'.format(args.architecture))
log_func('[i] Total Params: %.2fM'%(calculate_parameters(model)))
log_func()
try:
use_gpu = os.environ['CUDA_VISIBLE_DEVICES']
except KeyError:
use_gpu = '0'
the_number_of_gpu = len(use_gpu.split(','))
if the_number_of_gpu > 1:
log_func('[i] the number of gpu : {}'.format(the_number_of_gpu))
model = nn.DataParallel(model)
load_model(model, model_path, parallel=the_number_of_gpu > 1)
#################################################################################################
# Evaluation
#################################################################################################
eval_timer = Timer()
print('rw output dir: {}'.format(pred_dir))
print('total number: {}'.format(len(dataset)))
with torch.no_grad():
dataset_len = len(dataset)
start = int(dataset_len * args.start)
end = int(dataset_len * args.end)
length = end - start
for item_id in tqdm(
range(start, end),
total=length,
dynamic_ncols=True,
):
item = dataset.__getitem__(item_id)
if args.domain == 'test':
ori_image, image_id, gt_mask = item # (gt_mask is None)
else:
ori_image, image_id, label, gt_mask = item
ori_w, ori_h = ori_image.size
npy_path = pred_dir + image_id + '.npy'
if os.path.isfile(npy_path) and (not args.clear_cache):
continue
# preprocessing
image = np.asarray(ori_image)
image = normalize_fn(image)
image = image.transpose((2, 0, 1))
image = torch.from_numpy(image)
flipped_image = image.flip(-1)
images = torch.stack([image, flipped_image])
images = images.cuda()
edge = model.get_edge(images, image_size=(512, 512), stride=stride)
# postprocessing
cam_dict = np.load(cam_dir + image_id + '.npy', allow_pickle=True).item()
cams = cam_dict['cam']
if isinstance(cams, np.ndarray):
cams = torch.from_numpy(cams)
cam_downsized_values = cams.cuda()
rw = propagate_to_edge(cam_downsized_values, edge, beta=args.beta, exp_times=args.exp_times, radius=5)
rw_up = F.interpolate(rw, scale_factor=stride, mode='bilinear', align_corners=False)[..., 0, :ori_h, :ori_w]
rw_up = rw_up / torch.max(rw_up)
np.save(npy_path, {"keys": cam_dict['keys'], "rw": rw_up.cpu().numpy()})
print()
print("python evaluate.py --experiment_name {} --domain {}".format(experiment_name, args.domain)) | 6,873 | 34.802083 | 120 | py |
SemFormer | SemFormer-main/inference_classification.py | # Copyright (C) 2020 * Ltd. All rights reserved.
# author : Sanghyeon Jo <josanghyeokn@gmail.com>
import os
import sys
import copy
import shutil
import random
import argparse
import numpy as np
import imageio
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from core.networks import *
from core.datasets import *
from tools.general.io_utils import *
from tools.general.time_utils import *
from tools.general.json_utils import *
from tools.ai.log_utils import *
from tools.ai.demo_utils import *
from tools.ai.optim_utils import *
from tools.ai.torch_utils import *
from tools.ai.evaluate_utils import *
from tools.ai.augment_utils import *
from tools.ai.randaugment import *
parser = argparse.ArgumentParser()
###############################################################################
# Dataset
###############################################################################
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--num_workers', default=8, type=int)
parser.add_argument('--data_dir', default='../VOC2012/', type=str)
###############################################################################
# Network
###############################################################################
parser.add_argument('--architecture', default='resnet50', type=str)
parser.add_argument('--mode', default='normal', type=str)
###############################################################################
# Inference parameters
###############################################################################
parser.add_argument('--tag', default='', type=str)
parser.add_argument('--domain', default='train', type=str)
parser.add_argument('--scales', default='0.5,1.0,1.5,2.0', type=str)
if __name__ == '__main__':
###################################################################################
# Arguments
###################################################################################
args = parser.parse_args()
experiment_name = args.tag
if 'train' in args.domain:
experiment_name += '@train'
else:
experiment_name += '@val'
experiment_name += '@scale=%s'%args.scales
pred_dir = create_directory(f'./experiments/predictions/{experiment_name}/')
model_path = './experiments/models/' + f'{args.tag}.pth'
set_seed(args.seed)
log_func = lambda string='': print(string)
###################################################################################
# Transform, Dataset, DataLoader
###################################################################################
imagenet_mean = [0.485, 0.456, 0.406]
imagenet_std = [0.229, 0.224, 0.225]
normalize_fn = Normalize(imagenet_mean, imagenet_std)
# for mIoU
meta_dic = read_json('./data/VOC_2012.json')
dataset = VOC_Dataset_For_Making_CAM(args.data_dir, args.domain)
###################################################################################
# Network
###################################################################################
model = Classifier(args.architecture, meta_dic['classes'], mode=args.mode)
model = model.cuda()
model.eval()
log_func('[i] Architecture is {}'.format(args.architecture))
log_func('[i] Total Params: %.2fM'%(calculate_parameters(model)))
log_func()
try:
use_gpu = os.environ['CUDA_VISIBLE_DEVICES']
except KeyError:
use_gpu = '0'
the_number_of_gpu = len(use_gpu.split(','))
if the_number_of_gpu > 1:
log_func('[i] the number of gpu : {}'.format(the_number_of_gpu))
model = nn.DataParallel(model)
load_model(model, model_path, parallel=the_number_of_gpu > 1)
#################################################################################################
# Evaluation
#################################################################################################
eval_timer = Timer()
scales = [float(scale) for scale in args.scales.split(',')]
model.eval()
eval_timer.tik()
def get_cam(ori_image, scale):
# preprocessing
image = copy.deepcopy(ori_image)
image = image.resize((round(ori_w*scale), round(ori_h*scale)), resample=PIL.Image.CUBIC)
image = normalize_fn(image)
image = image.transpose((2, 0, 1))
image = torch.from_numpy(image)
flipped_image = image.flip(-1)
images = torch.stack([image, flipped_image])
images = images.cuda()
# inferenece
_, features = model(images, with_cam=True)
# postprocessing
cams = F.relu(features)
cams = cams[0] + cams[1].flip(-1)
return cams
stride1 = 4
stride2 = 8 if '38' in args.architecture else 16
with torch.no_grad():
length = len(dataset)
for step, (ori_image, image_id, label, gt_mask) in enumerate(dataset):
ori_w, ori_h = ori_image.size
npy_path = pred_dir + image_id + '.npy'
if os.path.isfile(npy_path):
continue
strided_size = get_strided_size((ori_h, ori_w), stride1)
strided_up_size = get_strided_up_size((ori_h, ori_w), stride2)
cams_list = [get_cam(ori_image, scale) for scale in scales]
strided_cams_list = [resize_for_tensors(cams.unsqueeze(0), strided_size)[0] for cams in cams_list]
strided_cams = torch.sum(torch.stack(strided_cams_list), dim=0)
hr_cams_list = [resize_for_tensors(cams.unsqueeze(0), strided_up_size)[0] for cams in cams_list]
hr_cams = torch.sum(torch.stack(hr_cams_list), dim=0)[:, :ori_h, :ori_w]
keys = torch.nonzero(torch.from_numpy(label))[:, 0]
strided_cams = strided_cams[keys]
strided_cams /= F.adaptive_max_pool2d(strided_cams, (1, 1)) + 1e-5
hr_cams = hr_cams[keys]
hr_cams /= F.adaptive_max_pool2d(hr_cams, (1, 1)) + 1e-5
# save cams
keys = np.pad(keys + 1, (1, 0), mode='constant')
np.save(npy_path, {"keys": keys, "cam": strided_cams.cpu(), "hr_cam": hr_cams.cpu().numpy()})
sys.stdout.write('\r# Make CAM [{}/{}] = {:.2f}%, ({}, {})'.format(step + 1, length, (step + 1) / length * 100, (ori_h, ori_w), hr_cams.size()))
sys.stdout.flush()
print()
if args.domain == 'train_aug':
args.domain = 'train'
print("python evaluate.py --experiment_name {} --domain {}".format(experiment_name, args.domain)) | 6,793 | 34.202073 | 156 | py |
SemFormer | SemFormer-main/make_affinity_labels.py | # Copyright (C) 2020 * Ltd. All rights reserved.
# author : Sanghyeon Jo <josanghyeokn@gmail.com>
import os
import sys
import copy
import shutil
import random
import argparse
import numpy as np
import joblib
import multiprocessing
import imageio
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from core.puzzle_utils import *
from core.networks import *
from core.datasets import *
from tools.general.io_utils import *
from tools.general.time_utils import *
from tools.general.json_utils import *
from tools.ai.log_utils import *
from tools.ai.demo_utils import *
from tools.ai.optim_utils import *
from tools.ai.torch_utils import *
from tools.ai.evaluate_utils import *
from tools.ai.augment_utils import *
from tools.ai.randaugment import *
parser = argparse.ArgumentParser()
###############################################################################
# Dataset
###############################################################################
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--num_workers', default=4, type=int)
parser.add_argument('--data_dir', default='../VOC2012/', type=str)
parser.add_argument('--n_jobs', default=multiprocessing.cpu_count(), type=int)
###############################################################################
# Inference parameters
###############################################################################
parser.add_argument('--experiment_name', default='resnet50@seed=0@bs=16@ep=5@nesterov@train@scale=0.5,1.0,1.5,2.0', type=str)
parser.add_argument('--domain', default='train', type=str)
parser.add_argument('--fg_threshold', default=0.11, type=float)
parser.add_argument('--bg_threshold', default=0.15, type=float)
parser.add_argument('--clear_cache', default=False, type=str2bool)
if __name__ == '__main__':
###################################################################################
# Arguments
###################################################################################
args = parser.parse_args()
experiment_name = args.experiment_name
pred_dir = f'./experiments/predictions/{experiment_name}/'
aff_dir = create_directory('./experiments/predictions/{}@aff_fg={:.2f}_bg={:.2f}/'.format(experiment_name, args.fg_threshold, args.bg_threshold))
set_seed(args.seed)
log_func = lambda string='': print(string)
###################################################################################
# Transform, Dataset, DataLoader
###################################################################################
meta_dic = read_json('./data/VOC_2012.json')
dataset = VOC_Dataset_For_Making_CAM(args.data_dir, args.domain)
#################################################################################################
# Convert
#################################################################################################
eval_timer = Timer()
length = len(dataset)
# Process per sample
def process(i):
ori_image, image_id, _, _ = dataset.__getitem__(i)
png_path = aff_dir + image_id + '.png'
# load
image = np.asarray(ori_image)
cam_dict = np.load(pred_dir + image_id + '.npy', allow_pickle=True).item()
ori_h, ori_w, c = image.shape
keys = cam_dict['keys']
cams = cam_dict['hr_cam']
# 1. find confident fg & bg
fg_cam = np.pad(cams, ((1, 0), (0, 0), (0, 0)), mode='constant', constant_values=args.fg_threshold)
fg_cam = np.argmax(fg_cam, axis=0)
fg_conf = keys[crf_inference_label(image, fg_cam, n_labels=keys.shape[0])]
bg_cam = np.pad(cams, ((1, 0), (0, 0), (0, 0)), mode='constant', constant_values=args.bg_threshold)
bg_cam = np.argmax(bg_cam, axis=0)
bg_conf = keys[crf_inference_label(image, bg_cam, n_labels=keys.shape[0])]
# 2. combine confident fg & bg
conf = fg_conf.copy()
conf[fg_conf == 0] = 255
conf[bg_conf + fg_conf == 0] = 0
imageio.imwrite(png_path, conf.astype(np.uint8))
# make affinity label with multi-process
joblib.Parallel(n_jobs=args.n_jobs, verbose=10, pre_dispatch="all")(
[joblib.delayed(process)(i) for i in range(len(dataset))]
)
print('Finish making affinity labels. Affinity labels dir: {}'.format(aff_dir))
| 4,537 | 34.732283 | 149 | py |
SemFormer | SemFormer-main/train_classification.py | # Copyright (C) 2020 * Ltd. All rights reserved.
# author : Sanghyeon Jo <josanghyeokn@gmail.com>
import os
import sys
import copy
import shutil
import random
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from core.networks import *
from core.datasets import *
from tools.general.io_utils import *
from tools.general.time_utils import *
from tools.general.json_utils import *
from tools.ai.log_utils import *
from tools.ai.demo_utils import *
from tools.ai.optim_utils import *
from tools.ai.torch_utils import *
from tools.ai.evaluate_utils import *
from tools.ai.augment_utils import *
from tools.ai.randaugment import *
parser = argparse.ArgumentParser()
###############################################################################
# Dataset
###############################################################################
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--num_workers', default=8, type=int)
parser.add_argument('--data_dir', default='../VOC2012/', type=str)
###############################################################################
# Network
###############################################################################
# parser.add_argument('--architecture', default='resnet50', type=str)
parser.add_argument('--architecture', default='resnet38', type=str)
parser.add_argument('--mode', default='normal', type=str) # fix
###############################################################################
# Hyperparameter
###############################################################################
parser.add_argument('--batch_size', default=32, type=int)
parser.add_argument('--max_epoch', default=20, type=int)
parser.add_argument('--lr', default=0.1, type=float)
parser.add_argument('--wd', default=5e-4, type=float)
parser.add_argument('--nesterov', default=True, type=str2bool)
parser.add_argument('--image_size', default=448, type=int)
parser.add_argument('--min_image_size', default=224, type=int)
parser.add_argument('--max_image_size', default=896, type=int)
parser.add_argument('--print_ratio', default=0.1, type=float)
parser.add_argument('--tag', default='', type=str)
parser.add_argument('--augment', default='colorjitter', type=str)
if __name__ == '__main__':
###################################################################################
# Arguments
###################################################################################
args = parser.parse_args()
log_dir = create_directory(f'./experiments/logs/')
data_dir = create_directory(f'./experiments/data/')
model_dir = create_directory('./experiments/models/')
tensorboard_dir = create_directory(f'./experiments/tensorboards/{args.tag}/')
log_path = log_dir + f'{args.tag}.txt'
data_path = data_dir + f'{args.tag}.json'
model_path = model_dir + f'{args.tag}.pth'
set_seed(args.seed)
log_func = lambda string='': log_print(string, log_path)
log_func('[i] {}'.format(args.tag))
log_func()
###################################################################################
# Transform, Dataset, DataLoader
###################################################################################
imagenet_mean = [0.485, 0.456, 0.406]
imagenet_std = [0.229, 0.224, 0.225]
normalize_fn = Normalize(imagenet_mean, imagenet_std)
train_transforms = [
RandomResize(args.min_image_size, args.max_image_size),
RandomHorizontalFlip(),
]
if 'colorjitter' in args.augment:
train_transforms.append(transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.1))
if 'randaugment' in args.augment:
train_transforms.append(RandAugmentMC(n=2, m=10))
train_transform = transforms.Compose(train_transforms + \
[
Normalize(imagenet_mean, imagenet_std),
RandomCrop(args.image_size),
Transpose()
]
)
test_transform = transforms.Compose([
Normalize_For_Segmentation(imagenet_mean, imagenet_std),
Top_Left_Crop_For_Segmentation(args.image_size),
Transpose_For_Segmentation()
])
meta_dic = read_json('./data/VOC_2012.json')
class_names = np.asarray(meta_dic['class_names'])
train_dataset = VOC_Dataset_For_Classification(args.data_dir, 'train_aug', train_transform)
train_dataset_for_seg = VOC_Dataset_For_Testing_CAM(args.data_dir, 'train', test_transform)
valid_dataset_for_seg = VOC_Dataset_For_Testing_CAM(args.data_dir, 'val', test_transform)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.num_workers, shuffle=True, drop_last=True)
train_loader_for_seg = DataLoader(train_dataset_for_seg, batch_size=args.batch_size, num_workers=1, drop_last=True)
valid_loader_for_seg = DataLoader(valid_dataset_for_seg, batch_size=args.batch_size, num_workers=1, drop_last=True)
log_func('[i] mean values is {}'.format(imagenet_mean))
log_func('[i] std values is {}'.format(imagenet_std))
log_func('[i] The number of class is {}'.format(meta_dic['classes']))
log_func('[i] train_transform is {}'.format(train_transform))
log_func('[i] test_transform is {}'.format(test_transform))
log_func()
val_iteration = len(train_loader)
log_iteration = int(val_iteration * args.print_ratio)
max_iteration = args.max_epoch * val_iteration
log_func('[i] log_iteration : {:,}'.format(log_iteration))
log_func('[i] val_iteration : {:,}'.format(val_iteration))
log_func('[i] max_iteration : {:,}'.format(max_iteration))
###################################################################################
# Network
###################################################################################
model = Classifier(args.architecture, meta_dic['classes'], mode=args.mode)
param_groups = model.get_parameter_groups(print_fn=None)
model = model.cuda()
model.train()
log_func('[i] Architecture is {}'.format(args.architecture))
log_func('[i] Total Params: %.2fM'%(calculate_parameters(model)))
log_func()
try:
use_gpu = os.environ['CUDA_VISIBLE_DEVICES']
except KeyError:
use_gpu = '0'
the_number_of_gpu = len(use_gpu.split(','))
if the_number_of_gpu > 1:
log_func('[i] the number of gpu : {}'.format(the_number_of_gpu))
model = nn.DataParallel(model)
load_model_fn = lambda: load_model(model, model_path, parallel=the_number_of_gpu > 1)
save_model_fn = lambda: save_model(model, model_path, parallel=the_number_of_gpu > 1)
###################################################################################
# Loss, Optimizer
###################################################################################
class_loss_fn = nn.MultiLabelSoftMarginLoss(reduction='none').cuda()
log_func('[i] The number of pretrained weights : {}'.format(len(param_groups[0])))
log_func('[i] The number of pretrained bias : {}'.format(len(param_groups[1])))
log_func('[i] The number of scratched weights : {}'.format(len(param_groups[2])))
log_func('[i] The number of scratched bias : {}'.format(len(param_groups[3])))
optimizer = PolyOptimizer([
{'params': param_groups[0], 'lr': args.lr, 'weight_decay': args.wd},
{'params': param_groups[1], 'lr': 2*args.lr, 'weight_decay': 0},
{'params': param_groups[2], 'lr': 10*args.lr, 'weight_decay': args.wd},
{'params': param_groups[3], 'lr': 20*args.lr, 'weight_decay': 0},
], lr=args.lr, momentum=0.9, weight_decay=args.wd, max_step=max_iteration, nesterov=args.nesterov)
#################################################################################################
# Train
#################################################################################################
data_dic = {
'train' : [],
'validation' : []
}
train_timer = Timer()
eval_timer = Timer()
train_meter = Average_Meter(['loss', 'class_loss'])
best_train_mIoU = -1
thresholds = list(np.arange(0.10, 0.50, 0.05))
def evaluate(loader):
model.eval()
eval_timer.tik()
meter_dic = {th : Calculator_For_mIoU('./data/VOC_2012.json') for th in thresholds}
with torch.no_grad():
length = len(loader)
for step, (images, labels, gt_masks) in enumerate(loader):
images = images.cuda()
labels = labels.cuda()
_, features = model(images, with_cam=True)
mask = labels.unsqueeze(2).unsqueeze(3)
cams = (make_cam(features) * mask)
if step < 3:
obj_cams = cams.max(dim=1)[0]
for b in range(8):
image = get_numpy_from_tensor(images[b])
cam = get_numpy_from_tensor(obj_cams[b])
image = denormalize(image, imagenet_mean, imagenet_std)[..., ::-1]
h, w, c = image.shape
cam = (cam * 255).astype(np.uint8)
cam = cv2.resize(cam, (w, h), interpolation=cv2.INTER_LINEAR)
cam = colormap(cam)
image = cv2.addWeighted(image, 0.5, cam, 0.5, 0)[..., ::-1]
image = image.astype(np.float32) / 255.
writer.add_image('CAM/{}'.format(b + 1), image, iteration, dataformats='HWC')
for batch_index in range(images.size()[0]):
# c, h, w -> h, w, c
cam = get_numpy_from_tensor(cams[batch_index]).transpose((1, 2, 0))
gt_mask = get_numpy_from_tensor(gt_masks[batch_index])
h, w, c = cam.shape
gt_mask = cv2.resize(gt_mask, (w, h), interpolation=cv2.INTER_NEAREST)
for th in thresholds:
bg = np.ones_like(cam[:, :, 0]) * th
pred_mask = np.argmax(np.concatenate([bg[..., np.newaxis], cam], axis=-1), axis=-1)
meter_dic[th].add(pred_mask, gt_mask)
sys.stdout.write('\r# Evaluation [{}/{}] = {:.2f}%'.format(step + 1, length, (step + 1) / length * 100))
sys.stdout.flush()
print(' ')
model.train()
best_th = 0.0
best_mIoU = 0.0
for th in thresholds:
mIoU, mIoU_foreground = meter_dic[th].get(clear=True)
if best_mIoU < mIoU:
best_th = th
best_mIoU = mIoU
return best_th, best_mIoU
writer = SummaryWriter(tensorboard_dir)
train_iterator = Iterator(train_loader)
for iteration in range(max_iteration):
images, labels = train_iterator.get()
images, labels = images.cuda(), labels.cuda()
#################################################################################################
logits = model(images)
class_loss = class_loss_fn(logits, labels).mean()
loss = class_loss
#################################################################################################
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_meter.add({
'loss' : loss.item(),
'class_loss' : class_loss.item()
})
#################################################################################################
# For Log
#################################################################################################
if (iteration + 1) % log_iteration == 0:
loss, class_loss = train_meter.get(clear=True)
learning_rate = float(get_learning_rate_from_optimizer(optimizer))
data = {
'iteration' : iteration + 1,
'learning_rate' : learning_rate,
'loss' : loss,
'class_loss' : class_loss,
'time' : train_timer.tok(clear=True),
}
data_dic['train'].append(data)
write_json(data_path, data_dic)
log_func('[i] \
iteration={iteration:,}, \
learning_rate={learning_rate:.4f}, \
loss={loss:.4f}, \
class_loss={class_loss:.4f}, \
time={time:.0f}sec'.format(**data)
)
writer.add_scalar('Train/loss', loss, iteration)
writer.add_scalar('Train/class_loss', class_loss, iteration)
writer.add_scalar('Train/learning_rate', learning_rate, iteration)
#################################################################################################
# Evaluation
#################################################################################################
if (iteration + 1) % val_iteration == 0:
threshold, mIoU = evaluate(train_loader_for_seg)
if best_train_mIoU == -1 or best_train_mIoU < mIoU:
best_train_mIoU = mIoU
save_model_fn()
log_func('[i] save model')
data = {
'iteration' : iteration + 1,
'threshold' : threshold,
'train_mIoU' : mIoU,
'best_train_mIoU' : best_train_mIoU,
'time' : eval_timer.tok(clear=True),
}
data_dic['validation'].append(data)
write_json(data_path, data_dic)
log_func('[i] \
iteration={iteration:,}, \
threshold={threshold:.2f}, \
train_mIoU={train_mIoU:.2f}%, \
best_train_mIoU={best_train_mIoU:.2f}%, \
time={time:.0f}sec'.format(**data)
)
writer.add_scalar('Evaluation/threshold', threshold, iteration)
writer.add_scalar('Evaluation/train_mIoU', mIoU, iteration)
writer.add_scalar('Evaluation/best_train_mIoU', best_train_mIoU, iteration)
write_json(data_path, data_dic)
writer.close()
print(args.tag) | 14,549 | 38.754098 | 132 | py |
SemFormer | SemFormer-main/train_segmentation.py | # Copyright (C) 2020 * Ltd. All rights reserved.
# author : Sanghyeon Jo <josanghyeokn@gmail.com>
import os
import sys
import copy
import shutil
import random
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from core.networks import *
from core.datasets import *
from tools.general.io_utils import *
from tools.general.time_utils import *
from tools.general.json_utils import *
from tools.ai.log_utils import *
from tools.ai.demo_utils import *
from tools.ai.optim_utils import *
from tools.ai.torch_utils import *
from tools.ai.evaluate_utils import *
from tools.ai.augment_utils import *
from tools.ai.randaugment import *
parser = argparse.ArgumentParser()
devices = os.environ['CUDA_VISIBLE_DEVICES']
###############################################################################
# Dataset
###############################################################################
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--num_workers', default=8, type=int)
parser.add_argument('--data_dir', default='../VOC2012/', type=str)
parser.add_argument('--val_split', default='val', type=str)
###############################################################################
# Network
###############################################################################
parser.add_argument('--architecture', default='DeepLabv3+', type=str)
parser.add_argument('--backbone', default='resnest269', type=str)
parser.add_argument('--mode', default='fix', type=str)
parser.add_argument('--use_gn', default=True, type=str2bool)
parser.add_argument('--dropout_ratios', default=[0.5, 0.1], type=float, nargs=2)
###############################################################################
# Hyperparameter
###############################################################################
parser.add_argument('--batch_size', default=32, type=int)
parser.add_argument('--max_epoch', default=50, type=int)
parser.add_argument('--lr', default=0.007, type=float)
parser.add_argument('--wd', default=4e-5, type=float)
parser.add_argument('--nesterov', default=True, type=str2bool)
parser.add_argument('--image_size', default=512, type=int)
parser.add_argument('--min_image_size', default=256, type=int)
parser.add_argument('--max_image_size', default=1024, type=int)
parser.add_argument('--print_ratio', default=0.1, type=float)
parser.add_argument('--tag', default='', type=str)
parser.add_argument('--augment', default='', type=str)
parser.add_argument('--label_name', required=True, type=str)
if __name__ == '__main__':
###################################################################################
# Arguments
###################################################################################
args = parser.parse_args()
log_dir = create_directory(f'./experiments/logs/')
data_dir = create_directory(f'./experiments/data/')
model_dir = create_directory('./experiments/models/')
tensorboard_dir = create_directory(f'./experiments/tensorboards/{args.tag}/')
pred_dir = './experiments/predictions/{}/'.format(args.label_name)
log_path = log_dir + f'{args.tag}.txt'
data_path = data_dir + f'{args.tag}.json'
model_path = model_dir + f'{args.tag}.pth'
set_seed(args.seed)
log_func = lambda string='': log_print(string, log_path)
log_func('[i] {}'.format(args.tag))
log_func('[i] {}'.format(args))
log_func()
###################################################################################
# Transform, Dataset, DataLoader
###################################################################################
imagenet_mean = [0.485, 0.456, 0.406]
imagenet_std = [0.229, 0.224, 0.225]
normalize_fn = Normalize(imagenet_mean, imagenet_std)
train_transforms = [
RandomResize_For_Segmentation(args.min_image_size, args.max_image_size),
RandomHorizontalFlip_For_Segmentation(),
]
if 'colorjitter' in args.augment:
train_transforms.append(ColorJitter_For_Segmentation(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.1))
train_transforms = train_transforms + [
Normalize_For_Segmentation(imagenet_mean, imagenet_std),
RandomCrop_For_Segmentation(args.image_size),
]
train_transform = transforms.Compose(train_transforms + [Transpose_For_Segmentation()])
test_transform = transforms.Compose([
Normalize_For_Segmentation(imagenet_mean, imagenet_std),
Top_Left_Crop_For_Segmentation(args.image_size),
Transpose_For_Segmentation()
])
meta_dic = read_json('./data/VOC_2012.json')
class_names = np.asarray(meta_dic['class_names'])
train_dataset = VOC_Dataset_For_WSSS(args.data_dir, 'train_aug', pred_dir, train_transform)
valid_dataset = VOC_Dataset_For_Segmentation(args.data_dir, args.val_split, test_transform)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.num_workers, shuffle=True, drop_last=True)
valid_loader = DataLoader(valid_dataset, batch_size=args.batch_size, num_workers=1, shuffle=False, drop_last=True)
log_func('[i] mean values is {}'.format(imagenet_mean))
log_func('[i] std values is {}'.format(imagenet_std))
log_func('[i] The number of class is {}'.format(meta_dic['classes']))
log_func('[i] train_transform is {}'.format(train_transform))
log_func()
val_iteration = len(train_loader)
log_iteration = int(val_iteration * args.print_ratio)
max_iteration = args.max_epoch * val_iteration
log_func('[i] log_iteration : {:,}'.format(log_iteration))
log_func('[i] val_iteration : {:,}'.format(val_iteration))
log_func('[i] max_iteration : {:,}'.format(max_iteration))
###################################################################################
# Network
###################################################################################
model = DeepLabv3_Plus(args.backbone, num_classes=meta_dic['classes'] + 1,
mode=args.mode, use_group_norm=args.use_gn, dropout_ratios=args.dropout_ratios)
param_groups = model.get_parameter_groups(None)
params = [
{'params': param_groups[0], 'lr': args.lr, 'weight_decay': args.wd},
{'params': param_groups[1], 'lr': 2*args.lr, 'weight_decay': 0},
{'params': param_groups[2], 'lr': 10*args.lr, 'weight_decay': args.wd},
{'params': param_groups[3], 'lr': 20*args.lr, 'weight_decay': 0},
]
model = model.cuda()
model.train()
log_func('[i] Architecture is {}'.format(args.architecture))
log_func('[i] Total Params: %.2fM'%(calculate_parameters(model)))
log_func()
try:
use_gpu = os.environ['CUDA_VISIBLE_DEVICES']
except KeyError:
use_gpu = '0'
the_number_of_gpu = len(use_gpu.split(','))
if the_number_of_gpu > 1:
log_func('[i] the number of gpu : {}'.format(the_number_of_gpu))
model = nn.DataParallel(model)
load_model_fn = lambda: load_model(model, model_path, parallel=the_number_of_gpu > 1)
save_model_fn = lambda: save_model(model, model_path, parallel=the_number_of_gpu > 1)
save_model_fn_for_backup = lambda: save_model(model, model_path.replace('.pth', f'_backup.pth'), parallel=the_number_of_gpu > 1)
###################################################################################
# Loss, Optimizer
###################################################################################
class_loss_fn = nn.CrossEntropyLoss(ignore_index=255).cuda()
log_func('[i] The number of pretrained weights : {}'.format(len(param_groups[0])))
log_func('[i] The number of pretrained bias : {}'.format(len(param_groups[1])))
log_func('[i] The number of scratched weights : {}'.format(len(param_groups[2])))
log_func('[i] The number of scratched bias : {}'.format(len(param_groups[3])))
optimizer = PolyOptimizer(params, lr=args.lr, momentum=0.9, weight_decay=args.wd, max_step=max_iteration, nesterov=args.nesterov)
#################################################################################################
# Train
#################################################################################################
data_dic = {
'train' : [],
'validation' : [],
}
train_timer = Timer()
eval_timer = Timer()
train_meter = Average_Meter(['loss'])
best_valid_mIoU = -1
def evaluate(loader):
model.eval()
eval_timer.tik()
meter = Calculator_For_mIoU('./data/VOC_2012.json')
with torch.no_grad():
length = len(loader)
for step, (images, labels) in enumerate(loader):
images = images.cuda()
labels = labels.cuda()
logits = model(images)
predictions = torch.argmax(logits, dim=1)
# for visualization
if step == 0:
for b in range(len(images)):
image = get_numpy_from_tensor(images[b])
pred_mask = get_numpy_from_tensor(predictions[b])
image = denormalize(image, imagenet_mean, imagenet_std)[..., ::-1]
h, w, c = image.shape
pred_mask = decode_from_colormap(pred_mask, train_dataset.colors)
pred_mask = cv2.resize(pred_mask, (w, h), interpolation=cv2.INTER_NEAREST)
image = cv2.addWeighted(image, 0.5, pred_mask, 0.5, 0)[..., ::-1]
image = image.astype(np.float32) / 255.
writer.add_image('Mask/{}'.format(b + 1), image, iteration, dataformats='HWC')
for batch_index in range(images.size()[0]):
pred_mask = get_numpy_from_tensor(predictions[batch_index])
gt_mask = get_numpy_from_tensor(labels[batch_index])
h, w = pred_mask.shape
gt_mask = cv2.resize(gt_mask, (w, h), interpolation=cv2.INTER_NEAREST)
meter.add(pred_mask, gt_mask)
sys.stdout.write('\r# Evaluation [{}/{}] = {:.2f}%'.format(step + 1, length, (step + 1) / length * 100))
sys.stdout.flush()
print(' ')
model.train()
return meter.get(clear=True)
writer = SummaryWriter(tensorboard_dir)
train_iterator = Iterator(train_loader)
torch.autograd.set_detect_anomaly(True)
for iteration in range(max_iteration):
images, labels = train_iterator.get()
images, labels = images.cuda(), labels.cuda()
#################################################################################################
# Inference
#################################################################################################
logits = model(images)
###############################################################################
# The part is to calculate losses.
###############################################################################
if 'Seg' in args.architecture:
labels = resize_for_tensors(labels.type(torch.FloatTensor).unsqueeze(1), logits.size()[2:], 'nearest', None)[:, 0, :, :]
labels = labels.type(torch.LongTensor).cuda()
loss = class_loss_fn(logits, labels)
#################################################################################################
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_meter.add({
'loss' : loss.item(),
})
#################################################################################################
# For Log
#################################################################################################
if (iteration + 1) % log_iteration == 0:
loss = train_meter.get(clear=True)[0]
learning_rate = float(get_learning_rate_from_optimizer(optimizer))
t = train_timer.tok(clear=True)
left_sec = (max_iteration - (iteration + 1)) * t / log_iteration
left_min = int(left_sec // 60)
left_sec = int(left_sec - (left_min * 60))
data = {
'iteration' : iteration + 1,
'max_iteration': max_iteration,
'learning_rate' : learning_rate,
'loss' : loss,
'time' : t,
'left_min' : left_min,
'left_sec' : left_sec
}
data_dic['train'].append(data)
write_json(data_path, data_dic)
log_func('[i] {} devices: {}'.format(args.tag, devices))
log_func('[i] \
iteration={iteration:,}/{max_iteration:,}, \
learning_rate={learning_rate:.4f}, \
loss={loss:.4f}, \
time={time:.0f}sec, \
left_time={left_min:d}:{left_sec:d}'.format(**data)
)
writer.add_scalar('Train/loss', loss, iteration)
writer.add_scalar('Train/learning_rate', learning_rate, iteration)
#################################################################################################
# Evaluation
#################################################################################################
if (iteration + 1) % val_iteration == 0:
mIoU, _ = evaluate(valid_loader)
if best_valid_mIoU == -1 or best_valid_mIoU < mIoU:
best_valid_mIoU = mIoU
save_model_fn()
log_func('[i] save model')
data = {
'iteration' : iteration + 1,
'mIoU' : mIoU,
'best_valid_mIoU' : best_valid_mIoU,
'time' : eval_timer.tok(clear=True),
}
data_dic['validation'].append(data)
write_json(data_path, data_dic)
log_func('[i] \
iteration={iteration:,}, \
mIoU={mIoU:.2f}%, \
best_valid_mIoU={best_valid_mIoU:.2f}%, \
time={time:.0f}sec'.format(**data)
)
writer.add_scalar('Evaluation/mIoU', mIoU, iteration)
writer.add_scalar('Evaluation/best_valid_mIoU', best_valid_mIoU, iteration)
write_json(data_path, data_dic)
writer.close()
print(args.tag) | 14,851 | 39.249322 | 133 | py |
SemFormer | SemFormer-main/train_affinitynet.py | # Copyright (C) 2020 * Ltd. All rights reserved.
# author : Sanghyeon Jo <josanghyeokn@gmail.com>
import os
import sys
import copy
import shutil
import random
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from core.networks import *
from core.datasets import *
from tools.general.io_utils import *
from tools.general.time_utils import *
from tools.general.json_utils import *
from tools.ai.log_utils import *
from tools.ai.demo_utils import *
from tools.ai.optim_utils import *
from tools.ai.torch_utils import *
from tools.ai.evaluate_utils import *
from tools.ai.augment_utils import *
from tools.ai.randaugment import *
parser = argparse.ArgumentParser()
###############################################################################
# Dataset
###############################################################################
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--num_workers', default=8, type=int)
parser.add_argument('--data_dir', default='../VOC2012/', type=str)
###############################################################################
# Network
###############################################################################
parser.add_argument('--architecture', default='resnet50', type=str)
###############################################################################
# Hyperparameter
###############################################################################
parser.add_argument('--batch_size', default=32, type=int)
parser.add_argument('--max_epoch', default=3, type=int)
parser.add_argument('--lr', default=0.1, type=float)
parser.add_argument('--wd', default=1e-4, type=float)
parser.add_argument('--nesterov', default=True, type=str2bool)
parser.add_argument('--image_size', default=512, type=int)
parser.add_argument('--min_image_size', default=320, type=int)
parser.add_argument('--max_image_size', default=640, type=int)
parser.add_argument('--print_ratio', default=0.1, type=float)
parser.add_argument('--tag', default='', type=str)
parser.add_argument('--augment', default='colorjitter', type=str)
parser.add_argument('--pred_dir', default='./experiments/predictions/', type=str)
parser.add_argument('--label_name', required=True, type=str)
if __name__ == '__main__':
###################################################################################
# Arguments
###################################################################################
args = parser.parse_args()
log_dir = create_directory(f'./experiments/logs/')
data_dir = create_directory(f'./experiments/data/')
model_dir = create_directory('./experiments/models/')
tensorboard_dir = create_directory(f'./experiments/tensorboards/{args.tag}/')
log_path = log_dir + f'{args.tag}.txt'
data_path = data_dir + f'{args.tag}.json'
model_path = model_dir + f'{args.tag}.pth'
set_seed(args.seed)
log_func = lambda string='': log_print(string, log_path)
log_func('[i] {}'.format(args.tag))
log_func()
###################################################################################
# Transform, Dataset, DataLoader
###################################################################################
imagenet_mean = [0.485, 0.456, 0.406]
imagenet_std = [0.229, 0.224, 0.225]
normalize_fn = Normalize(imagenet_mean, imagenet_std)
stride = 4
train_transform = [
RandomResize_For_Segmentation(args.min_image_size, args.max_image_size),
RandomHorizontalFlip_For_Segmentation(),
]
if 'colorjitter' in args.augment:
train_transform.append(ColorJitter_For_Segmentation(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.1))
train_transform = transforms.Compose(train_transform + [
Normalize_For_Segmentation(imagenet_mean, imagenet_std),
RandomCrop_For_Segmentation(args.image_size),
Transpose_For_Segmentation(),
Resize_For_Mask(args.image_size // stride),
])
meta_dic = read_json('./data/VOC_2012.json')
class_names = np.asarray(meta_dic['class_names'])
path_index = PathIndex(radius=10, default_size=(args.image_size // stride, args.image_size // stride))
train_dataset = VOC_Dataset_For_Affinity(args.data_dir, 'train_aug', path_index=path_index,
label_dir=args.pred_dir + '{}/'.format(args.label_name), transform=train_transform)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.num_workers, shuffle=True, drop_last=True)
log_func('[i] mean values is {}'.format(imagenet_mean))
log_func('[i] std values is {}'.format(imagenet_std))
log_func('[i] The number of class is {}'.format(meta_dic['classes']))
log_func('[i] train_transform is {}'.format(train_transform))
log_func()
val_iteration = len(train_loader)
log_iteration = int(val_iteration * args.print_ratio)
max_iteration = args.max_epoch * val_iteration
log_func('[i] log_iteration : {:,}'.format(log_iteration))
log_func('[i] val_iteration : {:,}'.format(val_iteration))
log_func('[i] max_iteration : {:,}'.format(max_iteration))
###################################################################################
# Network
###################################################################################
if args.image_size != args.resolution:
pos_embed_size = args.image_size // args.patch_size
else:
pos_embed_size = None
model = AffinityNet(args.architecture, path_index)
param_groups = list(model.edge_layers.parameters())
model = model.cuda()
model.train()
log_func('[i] Architecture is {}'.format(args.architecture))
log_func('[i] Total Params: %.2fM'%(calculate_parameters(model)))
log_func()
try:
use_gpu = os.environ['CUDA_VISIBLE_DEVICES']
except KeyError:
use_gpu = '0'
the_number_of_gpu = len(use_gpu.split(','))
if the_number_of_gpu > 1:
log_func('[i] the number of gpu : {}'.format(the_number_of_gpu))
model = nn.DataParallel(model)
load_model_fn = lambda: load_model(model, model_path, parallel=the_number_of_gpu > 1)
save_model_fn = lambda: save_model(model, model_path, parallel=the_number_of_gpu > 1)
save_model_fn_for_backup = lambda: save_model(model, model_path.replace('.pth', f'_backup.pth'), parallel=the_number_of_gpu > 1)
###################################################################################
# Loss, Optimizer
###################################################################################
optimizer = PolyOptimizer([
{'params': param_groups, 'lr': args.lr, 'weight_decay': args.wd},
], lr=args.lr, momentum=0.9, weight_decay=args.wd, max_step=max_iteration, nesterov=args.nesterov)
#################################################################################################
# Train
#################################################################################################
data_dic = {
'train' : [],
}
train_timer = Timer()
train_meter = Average_Meter([
'loss',
'bg_loss', 'fg_loss', 'neg_loss',
])
writer = SummaryWriter(tensorboard_dir)
train_iterator = Iterator(train_loader)
torch.autograd.set_detect_anomaly(True)
def cal_loss(bg_pos_label, fg_pos_label, neg_label, aff):
pos_aff_loss = (-1) * torch.log(aff + 1e-5)
neg_aff_loss = (-1) * torch.log(1. + 1e-5 - aff)
bg_pos_aff_loss = torch.sum(bg_pos_label * pos_aff_loss) / (torch.sum(bg_pos_label) + 1e-5)
fg_pos_aff_loss = torch.sum(fg_pos_label * pos_aff_loss) / (torch.sum(fg_pos_label) + 1e-5)
pos_aff_loss = bg_pos_aff_loss / 2 + fg_pos_aff_loss / 2
neg_aff_loss = torch.sum(neg_label * neg_aff_loss) / (torch.sum(neg_label) + 1e-5)
return bg_pos_aff_loss, fg_pos_aff_loss, pos_aff_loss, neg_aff_loss
for iteration in range(max_iteration):
images, labels = train_iterator.get()
images = images.cuda()
bg_pos_label = labels[0].cuda(non_blocking=True)
fg_pos_label = labels[1].cuda(non_blocking=True)
neg_label = labels[2].cuda(non_blocking=True)
#################################################################################################
# Affinity Matrix
#################################################################################################
aff = model(images, with_affinity=True)
###############################################################################
# The part is to calculate losses.
###############################################################################
pos_aff_loss = (-1) * torch.log(aff + 1e-5)
neg_aff_loss = (-1) * torch.log(1. + 1e-5 - aff)
bg_pos_aff_loss = torch.sum(bg_pos_label * pos_aff_loss) / (torch.sum(bg_pos_label) + 1e-5)
fg_pos_aff_loss = torch.sum(fg_pos_label * pos_aff_loss) / (torch.sum(fg_pos_label) + 1e-5)
pos_aff_loss = bg_pos_aff_loss / 2 + fg_pos_aff_loss / 2
neg_aff_loss = torch.sum(neg_label * neg_aff_loss) / (torch.sum(neg_label) + 1e-5)
loss = (pos_aff_loss + neg_aff_loss) / 2
#################################################################################################
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_meter.add({
'loss' : loss.item(),
'bg_loss' : bg_pos_aff_loss.item(),
'fg_loss' : fg_pos_aff_loss.item(),
'neg_loss' : neg_aff_loss.item(),
})
#################################################################################################
# For Log
#################################################################################################
if (iteration + 1) % log_iteration == 0:
loss, bg_loss, fg_loss, neg_loss = train_meter.get(clear=True)
learning_rate = float(get_learning_rate_from_optimizer(optimizer))
t = train_timer.tok(clear=True)
left_sec = (max_iteration - (iteration + 1)) * t / log_iteration
left_min = int(left_sec // 60)
left_sec = int(left_sec - (left_min * 60))
data = {
'iteration' : iteration + 1,
'learning_rate' : learning_rate,
'loss' : loss,
'bg_loss' : bg_loss,
'fg_loss' : fg_loss,
'neg_loss' : neg_loss,
'time' : t,
'left_min' : left_min,
'left_sec' : left_sec
}
data_dic['train'].append(data)
write_json(data_path, data_dic)
log_func('[i] iteration={iteration:,}, learning_rate={learning_rate:.4f}, loss={loss:.4f}, \n\
\r bg_loss={bg_loss:.4f}, fg_loss={fg_loss:.4f}, neg_loss={neg_loss:.4f}, \n\
\r time={time:.0f}sec, left_time={left_min:d}:{left_sec:d}'.format(**data)
)
writer.add_scalar('Train/loss', loss, iteration)
writer.add_scalar('Train/bg_loss', bg_loss, iteration)
writer.add_scalar('Train/fg_loss', fg_loss, iteration)
writer.add_scalar('Train/neg_loss', neg_loss, iteration)
writer.add_scalar('Train/learning_rate', learning_rate, iteration)
#################################################################################################
# Evaluation
#################################################################################################
if (iteration + 1) % val_iteration == 0:
save_model_fn()
save_model_fn()
write_json(data_path, data_dic)
writer.close()
print(args.tag) | 12,118 | 38.865132 | 132 | py |
SemFormer | SemFormer-main/make_pseudo_labels.py | # Copyright (C) 2020 * Ltd. All rights reserved.
# author : Sanghyeon Jo <josanghyeokn@gmail.com>
import os
import sys
import copy
import shutil
import random
import argparse
import numpy as np
import joblib
import multiprocessing
import imageio
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from core.puzzle_utils import *
from core.networks import *
from core.datasets import *
from tools.general.io_utils import *
from tools.general.time_utils import *
from tools.general.json_utils import *
from tools.ai.log_utils import *
from tools.ai.demo_utils import *
from tools.ai.optim_utils import *
from tools.ai.torch_utils import *
from tools.ai.evaluate_utils import *
from tools.ai.augment_utils import *
from tools.ai.randaugment import *
parser = argparse.ArgumentParser()
###############################################################################
# Dataset
###############################################################################
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--num_workers', default=4, type=int)
parser.add_argument('--data_dir', default='../VOC2012/', type=str)
parser.add_argument('--n_jobs', default=multiprocessing.cpu_count() // 2, type=int)
###############################################################################
# Inference parameters
###############################################################################
parser.add_argument('--experiment_name', default='', type=str)
parser.add_argument('--domain', default='train', type=str)
parser.add_argument('--threshold', default=0.25, type=float)
parser.add_argument('--crf_iteration', default=1, type=int)
parser.add_argument('--clear_cache', default=False, type=str2bool)
if __name__ == '__main__':
###################################################################################
# Arguments
###################################################################################
args = parser.parse_args()
cam_dir = f'./experiments/predictions/{args.experiment_name}/'
pred_dir = create_directory(f'./experiments/predictions/{args.experiment_name}@crf={args.crf_iteration}/')
set_seed(args.seed)
log_func = lambda string='': print(string)
###################################################################################
# Transform, Dataset, DataLoader
###################################################################################
dataset = VOC_Dataset_For_Making_CAM(args.data_dir, args.domain)
#################################################################################################
# Evaluation
#################################################################################################
eval_timer = Timer()
print('Pseudo label dir: {}'.format(pred_dir))
with torch.no_grad():
length = len(dataset)
# Process per sample
def process(i):
ori_image, image_id, label, gt_mask = dataset.__getitem__(i)
png_path = pred_dir + image_id + '.png'
ori_w, ori_h = ori_image.size
predict_dict = np.load(cam_dir + image_id + '.npy', allow_pickle=True).item()
keys = predict_dict['keys']
if 'rw' in predict_dict.keys():
cams = predict_dict['rw']
else:
cams = predict_dict['hr_cam']
cams = np.pad(cams, ((1, 0), (0, 0), (0, 0)), mode='constant', constant_values=args.threshold)
cams = np.argmax(cams, axis=0)
if args.crf_iteration > 0:
cams = crf_inference_label(np.asarray(ori_image), cams, n_labels=keys.shape[0], t=args.crf_iteration)
conf = keys[cams]
imageio.imwrite(png_path, conf.astype(np.uint8))
# make pseudo label with multi-process
joblib.Parallel(n_jobs=args.n_jobs, verbose=10, pre_dispatch="all")(
[joblib.delayed(process)(i) for i in range(len(dataset))]
)
print("python evaluate.py --experiment_name {} --mode png".format(args.experiment_name + f'@crf={args.crf_iteration}'))
| 4,297 | 34.520661 | 123 | py |
SemFormer | SemFormer-main/inference_segmentation.py | # Copyright (C) 2020 * Ltd. All rights reserved.
# author : Sanghyeon Jo <josanghyeokn@gmail.com>
import os
import sys
import copy
import shutil
import random
import argparse
import numpy as np
from tqdm import tqdm
import imageio
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from core.puzzle_utils import *
from core.networks import *
from core.datasets import *
from tools.general.io_utils import *
from tools.general.time_utils import *
from tools.general.json_utils import *
from tools.ai.log_utils import *
from tools.ai.demo_utils import *
from tools.ai.optim_utils import *
from tools.ai.torch_utils import *
from tools.ai.evaluate_utils import *
from tools.ai.augment_utils import *
from tools.ai.randaugment import *
parser = argparse.ArgumentParser()
###############################################################################
# Dataset
###############################################################################
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--num_workers', default=4, type=int)
parser.add_argument('--data_dir', default='../VOC2012/', type=str)
parser.add_argument('--start', default=0.0, type=float)
parser.add_argument('--end', default=1.0, type=float)
###############################################################################
# Network
###############################################################################
parser.add_argument('--architecture', default='DeepLabv3+', type=str)
parser.add_argument('--backbone', default='resnest101', type=str)
parser.add_argument('--mode', default='fix', type=str)
parser.add_argument('--use_gn', default=True, type=str2bool)
###############################################################################
# Inference parameters
###############################################################################
parser.add_argument('--tag', default='', type=str)
parser.add_argument('--domain', default='val', type=str)
parser.add_argument('--save_type', default='png', type=str)
parser.add_argument('--scales', default='0.5,1.0,1.5,2.0', type=str)
parser.add_argument('--iteration', default=0, type=int)
if __name__ == '__main__':
###################################################################################
# Arguments
###################################################################################
args = parser.parse_args()
model_dir = create_directory('./experiments/models/')
model_path = model_dir + f'{args.tag}.pth'
if 'train' in args.domain:
args.tag += '@train'
else:
args.tag += '@' + args.domain
args.tag += '@scale=%s'%args.scales
args.tag += '@iteration=%d'%args.iteration
pred_dir = create_directory('./experiments/predictions/{}/'.format(args.tag))
set_seed(args.seed)
log_func = lambda string='': print(string)
###################################################################################
# Transform, Dataset, DataLoader
###################################################################################
imagenet_mean = [0.485, 0.456, 0.406]
imagenet_std = [0.229, 0.224, 0.225]
normalize_fn = Normalize(imagenet_mean, imagenet_std)
meta_dic = read_json('./data/VOC_2012.json')
dataset = VOC_Dataset_For_Evaluation(args.data_dir, args.domain)
###################################################################################
# Network
###################################################################################
if args.architecture == 'DeepLabv3+':
model = DeepLabv3_Plus(args.backbone, num_classes=meta_dic['classes'] + 1, mode=args.mode, use_group_norm=args.use_gn)
elif args.architecture == 'Seg_Model':
model = Seg_Model(args.backbone, num_classes=meta_dic['classes'] + 1)
elif args.architecture == 'CSeg_Model':
model = CSeg_Model(args.backbone, num_classes=meta_dic['classes'] + 1)
model = model.cuda()
model.eval()
log_func('[i] Architecture is {}'.format(args.architecture))
log_func('[i] Total Params: %.2fM'%(calculate_parameters(model)))
log_func()
load_model(model, model_path, parallel=False)
#################################################################################################
# Evaluation
#################################################################################################
eval_timer = Timer()
scales = [float(scale) for scale in args.scales.split(',')]
model.eval()
eval_timer.tik()
def inference(images, image_size):
images = images.cuda()
logits = model(images)
logits = resize_for_tensors(logits, image_size)
logits = logits[0] + logits[1].flip(-1)
return logits
log_func('[i] pred_dir: {}'.format(pred_dir))
with torch.no_grad():
dataset_len = len(dataset)
start = int(dataset_len * args.start)
end = int(dataset_len * args.end)
length = end - start
for item_id in tqdm(
range(start, end),
total=length,
dynamic_ncols=True,
):
item = dataset.__getitem__(item_id)
ori_image, image_id, gt_mask = item
ori_w, ori_h = ori_image.size
cams_list = []
for scale in scales:
image = copy.deepcopy(ori_image)
image = image.resize((round(ori_w*scale), round(ori_h*scale)), resample=PIL.Image.CUBIC)
image = normalize_fn(image)
image = image.transpose((2, 0, 1))
image = torch.from_numpy(image)
flipped_image = image.flip(-1)
images = torch.stack([image, flipped_image])
cams = inference(images, (ori_h, ori_w))
cams_list.append(cams)
preds = torch.stack(cams_list, dim=0) # (#n_scale, c, h, w)
preds = torch.sum(preds, dim=0) # (c, h, w)
preds = F.softmax(preds, dim=0) # (c, h, w)
preds = preds.cpu() # (c, h, w)
if args.iteration > 0:
preds = preds.numpy() # (c, h, w)
preds = crf_inference(np.asarray(ori_image), preds, t=args.iteration)
if 'png' in args.save_type:
if isinstance(preds, torch.Tensor):
preds = preds.numpy() # (c, h, w)
pred_mask = np.argmax(preds, axis=0)
if args.domain == 'test':
pred_mask = decode_from_colormap(pred_mask, dataset.colors)[..., ::-1]
imageio.imwrite(pred_dir + image_id + '.png', pred_mask.astype(np.uint8))
elif 'colorful' in args.save_type:
pred_mask = decode_from_colormap(pred_mask, dataset.colors)[..., ::-1]
imageio.imwrite(pred_dir + image_id + '_decode.png', pred_mask.astype(np.uint8))
else:
imageio.imwrite(pred_dir + image_id + '.png', pred_mask.astype(np.uint8))
if 'npy' in args.save_type:
if isinstance(preds, np.ndarray):
preds = torch.from_numpy(preds).cuda()
downsample_preds = F.interpolate(preds[None, ...], size=(ori_h // 4, ori_w // 4), mode='bilinear') # (1, c, h, w)
downsample_preds = downsample_preds[0] # (c, h, w)
np.save(pred_dir + image_id + '.npy',
{"cam": downsample_preds.cpu(), "hr_cam": preds.cpu().numpy()})
print()
if args.domain == 'val':
print("python evaluate.py --experiment_name {} --domain {} --mode {}".format(args.tag, args.domain, args.save_type)) | 7,906 | 37.014423 | 129 | py |
SemFormer | SemFormer-main/train_semformer.py | # Copyright (C) 2020 * Ltd. All rights reserved.
# author : Sanghyeon Jo <josanghyeokn@gmail.com>
import os
import sys
import copy
import shutil
import random
import argparse
import numpy as np
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from core.networks import *
from core.datasets import *
from tools.general.io_utils import *
from tools.general.time_utils import *
from tools.general.json_utils import *
from tools.ai.log_utils import *
from tools.ai.demo_utils import *
from tools.ai.optim_utils import *
from tools.ai.torch_utils import *
from tools.ai.evaluate_utils import *
from tools.ai.augment_utils import *
from tools.ai.randaugment import *
parser = argparse.ArgumentParser()
devices = os.environ.get('CUDA_VISIBLE_DEVICES', '0')
###############################################################################
# Dataset
###############################################################################
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--num_workers', default=6, type=int)
parser.add_argument('--data_dir', default='../VOC2012/', type=str)
###############################################################################
# Network
###############################################################################
parser.add_argument('--architecture', default='deit', type=str)
parser.add_argument('--version', default='small', type=str)
parser.add_argument('--patch_size', default=16, type=int)
parser.add_argument('--resolution', default=224, type=int)
parser.add_argument('--in21k', default=False, type=str2bool)
parser.add_argument('--class_dim', default=256, type=int)
parser.add_argument('--reduction', default='sum', type=str)
parser.add_argument('--ae_decoder_width', default=768, type=int)
parser.add_argument('--ae_decoder_depth', default=8, type=int)
parser.add_argument('--ae_architecture', default='deit_distilled', type=str)
parser.add_argument('--ae_version', default='base', type=str)
parser.add_argument('--ae_patch_size', default=16, type=int)
parser.add_argument('--ae_resolution', default=224, type=int)
parser.add_argument('--ae_in21k', default=False, type=str2bool)
###############################################################################
# Hyperparameter
###############################################################################
parser.add_argument('--batch_size', default=8, type=int)
parser.add_argument('--max_epoch', default=20, type=int)
parser.add_argument('--lr', default=0.005, type=float)
parser.add_argument('--wd', default=5e-4, type=float)
parser.add_argument('--nesterov', default=True, type=str2bool)
parser.add_argument('--seg_sim_weight', default=1.0, type=float)
parser.add_argument('--cls_fg_weight', default=1.0, type=float)
parser.add_argument('--cls_bg_weight', default=1.0, type=float)
parser.add_argument('--act_supp_weight', default=0.075, type=float)
parser.add_argument('--act_cplt_weight', default=1.0, type=float)
parser.add_argument('--image_size', default=448, type=int)
parser.add_argument('--min_image_size', default=224, type=int)
parser.add_argument('--max_image_size', default=896, type=int)
parser.add_argument('--ae_image_size', default=224, type=int)
parser.add_argument('--print_ratio', default=0.1, type=float)
parser.add_argument('--tag', default='', type=str)
parser.add_argument('--augment', default='colorjitter,randomexpand', type=str)
parser.add_argument('--ae_tag', default='', type=str)
parser.add_argument('--stuck', action='store_true')
###############################################################################
# Categories
###############################################################################
categories = ['background',
'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
def cal_act_cplt_loss(mask, labels, grad_masks=None):
assert (mask.dim() == 4) and (labels.dim() == 2)
assert mask.shape[:2] == labels.shape[:2]
B, C, H, W = mask.shape
if grad_masks is not None:
grad_masks_bool = grad_masks[:, 0, :, :].bool()
act_cplt_loss = 0.
normalizer = 0
for img_id in range(B):
for cls_id in labels[img_id].nonzero(as_tuple=False).view(-1):
label = labels[img_id].clone()
label[cls_id] = 0
other_clses = label.nonzero(as_tuple=False).view(-1)
if len(other_clses) < 1:
continue
if grad_masks is not None:
normalizer += grad_masks[img_id].sum().item()
else:
normalizer += H * W
max_others = mask[img_id, other_clses, :, :].max(dim=0)[0]
cls_loss = (mask[img_id, cls_id, :, :] + max_others - 1) ** 2
if grad_masks is not None:
act_cplt_loss = act_cplt_loss + cls_loss[grad_masks_bool[img_id]].sum()
else:
act_cplt_loss = act_cplt_loss + cls_loss.sum()
if normalizer == 0:
return mask.new_zeros([1]).mean()
act_cplt_loss = act_cplt_loss.sum() / normalizer
return act_cplt_loss
def cal_act_supp_loss(masks, labels):
# It is better to suppress the activation value of object classes
# rather than all classes (object classes + background class).
# It may because the activated regions of background class should be
# more larger than that of any object class. Therefore, suppressing the
# activation value of background class will affect the performance.
fg_loss = masks[:, 1:, :, :].mean()
bg_loss = 1 - masks[:, 0, :, :].mean()
return fg_loss + bg_loss
if __name__ == '__main__':
###################################################################################
# Arguments
###################################################################################
args = parser.parse_args()
log_dir = create_directory(f'./experiments/logs/')
data_dir = create_directory(f'./experiments/data/')
model_dir = create_directory('./experiments/models/')
tensorboard_dir = create_directory(f'./experiments/tensorboards/{args.tag}/')
log_path = log_dir + f'{args.tag}.txt'
data_path = data_dir + f'{args.tag}.json'
model_path = model_dir + f'{args.tag}.pth'
class_model_path = model_dir + f'{args.tag}@classifier.pth'
set_seed(args.seed)
log_func = lambda string='': log_print(string, log_path)
log_func('[i] {}'.format(args))
log_func('[i] {}'.format(args.tag))
log_func()
###################################################################################
# Transform, Dataset, DataLoader
###################################################################################
imagenet_mean = [0.485, 0.456, 0.406]
imagenet_std = [0.229, 0.224, 0.225]
normalize_fn = Normalize(imagenet_mean, imagenet_std)
train_transforms = []
if 'colorjitter' in args.augment:
train_transforms.append(transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.1))
train_transforms += [
RandomResize(args.min_image_size, args.max_image_size),
RandomHorizontalFlip(),
]
train_transform = transforms.Compose(train_transforms + \
[
Normalize(imagenet_mean, imagenet_std),
RandomCrop(args.image_size, with_bbox=True),
Transpose_with_BBox()
]
)
test_transform = transforms.Compose([
Normalize_For_Segmentation(imagenet_mean, imagenet_std),
Top_Left_Crop_For_Segmentation(args.image_size),
Transpose_For_Segmentation()
])
meta_dic = read_json('./data/VOC_2012.json')
class_names = np.asarray(meta_dic['class_names'])
train_dataset = VOC_Dataset_For_Classification_DetachPadding(args.data_dir, 'train_aug', train_transform)
train_dataset_for_seg = VOC_Dataset_For_Testing_CAM(args.data_dir, 'train', test_transform)
valid_dataset_for_seg = VOC_Dataset_For_Testing_CAM(args.data_dir, 'val', test_transform)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.num_workers, shuffle=True, drop_last=True)
train_loader_for_seg = DataLoader(train_dataset_for_seg, batch_size=args.batch_size, num_workers=1, drop_last=True)
valid_loader_for_seg = DataLoader(valid_dataset_for_seg, batch_size=args.batch_size, num_workers=1, drop_last=True)
log_func('[i] mean values is {}'.format(imagenet_mean))
log_func('[i] std values is {}'.format(imagenet_std))
log_func('[i] The number of class is {}'.format(meta_dic['classes']))
log_func('[i] train_transform is {}'.format(train_transform))
log_func('[i] test_transform is {}'.format(test_transform))
log_func()
val_iteration = len(train_loader)
log_iteration = int(val_iteration * args.print_ratio)
max_iteration = args.max_epoch * val_iteration
# val_iteration = log_iteration
log_func('[i] log_iteration : {:,}'.format(log_iteration))
log_func('[i] val_iteration : {:,}'.format(val_iteration))
log_func('[i] max_iteration : {:,}'.format(max_iteration))
###################################################################################
# Network
###################################################################################
num_classes = meta_dic['classes'] + 1
if args.ae_image_size != args.ae_resolution:
ae_pos_embed_size = args.ae_image_size // args.ae_patch_size
else:
ae_pos_embed_size = None
ae = ClassAwareAutoEncoder(
decoder_width=args.ae_decoder_width,
decoder_depth=args.ae_decoder_depth,
num_classes=num_classes,
class_dim=args.class_dim,
reduction=args.reduction,
model_name=args.ae_architecture,
version=args.ae_version,
patch_size=args.ae_patch_size,
resolution=args.ae_resolution,
in21k=args.ae_in21k,
pos_embed_size=ae_pos_embed_size
)
ae.load_state_dict(torch.load('./experiments/models/{}.pth'.format(args.ae_tag)), strict=True)
if args.image_size != args.resolution:
pos_embed_size = args.image_size // args.patch_size
else:
pos_embed_size = None
model = SemFormer(
class_dim=args.class_dim,
model_name=args.architecture,
num_classes=num_classes,
version=args.version,
patch_size=args.patch_size,
resolution=args.resolution,
in21k=args.in21k,
pos_embed_size=pos_embed_size
)
ae.eval()
param_groups = model.get_parameter_groups(print_fn=None)
model.ae = ae
for param in model.ae.parameters():
param.requires_grad = False
model = model.cuda()
model.train()
log_func('[i] Architecture is {}'.format(args.architecture))
log_func('[i] Total Params: %.2fM'%(calculate_parameters(model)))
log_func()
try:
use_gpu = os.environ['CUDA_VISIBLE_DEVICES']
except KeyError:
use_gpu = '0'
the_number_of_gpu = len(use_gpu.split(','))
if the_number_of_gpu > 1:
log_func('[i] the number of gpu : {}'.format(the_number_of_gpu))
model = nn.DataParallel(model)
load_model_fn = lambda: load_model(model, model_path, ignore_modules=['ae'], parallel=the_number_of_gpu > 1)
save_model_fn = lambda: save_model(model, model_path, ignore_modules=['ae'], parallel=the_number_of_gpu > 1)
###################################################################################
# Loss, Optimizer
###################################################################################
log_func('[i] The number of pretrained weights : {}'.format(len(param_groups[0])))
log_func('[i] The number of pretrained bias : {}'.format(len(param_groups[1])))
log_func('[i] The number of scratched weights : {}'.format(len(param_groups[2])))
log_func('[i] The number of scratched bias : {}'.format(len(param_groups[3])))
optimizer = PolyOptimizer([
{'params': param_groups[0], 'lr': args.lr, 'weight_decay': args.wd},
{'params': param_groups[1], 'lr': 2*args.lr, 'weight_decay': 0},
{'params': param_groups[2], 'lr': 10*args.lr, 'weight_decay': args.wd},
{'params': param_groups[3], 'lr': 20*args.lr, 'weight_decay': 0},
], lr=args.lr, momentum=0.9, weight_decay=args.wd, max_step=max_iteration, nesterov=args.nesterov)
#################################################################################################
# Train
#################################################################################################
data_dic = {
'train' : [],
'validation' : []
}
train_timer = Timer()
eval_timer = Timer()
loss_names = [
'loss',
'seg_sim_loss',
'cls_fg_loss',
'cls_bg_loss',
'act_supp_loss',
'act_cplt_loss'
]
train_meter = Average_Meter(loss_names)
best_train_mIoU = -1
thresholds = list(np.arange(0.01, 1.00, 0.01))
def evaluate(loader):
model.eval()
eval_timer.tik()
meter_dic = {th : Calculator_For_mIoU_CUDA('./data/VOC_2012.json') for th in thresholds}
with torch.no_grad():
length = len(loader)
for step, (images, labels, gt_masks) in enumerate(loader):
images = images.cuda()
labels = labels.cuda()
_, features = model(images)
features = features[..., :images.shape[-2], :images.shape[-1]]
features = features[:, 1:, :, :]
mask = labels.unsqueeze(2).unsqueeze(3)
cams = (make_cam(features) * mask)
# for visualization
if step < 3:
obj_cams = cams.max(dim=1)[0]
for b in range(images.shape[0]):
image = get_numpy_from_tensor(images[b])
cam = get_numpy_from_tensor(obj_cams[b])
all_cam = get_numpy_from_tensor(cams[b])
label = get_numpy_from_tensor(labels[b])
image = denormalize(image, imagenet_mean, imagenet_std)[..., ::-1]
h, w, c = image.shape
cam = (cam * 255).astype(np.uint8)
if cam.shape[-2] != h or cam.shape[-1] != w:
cam = cv2.resize(cam, (w, h), interpolation=cv2.INTER_LINEAR)
cam = colormap(cam)
image_obj = cv2.addWeighted(image, 0.5, cam, 0.5, 0)[..., ::-1]
image_obj = image_obj.astype(np.float32) / 255.
writer.add_image('CAM-obj/{}-{}'.format(step, b + 1), image_obj, iteration, dataformats='HWC')
# for each class
for cls_idx in range(label.shape[0]):
if label[cls_idx] > 0:
cam_cls = all_cam[cls_idx]
cam_cls = (cam_cls * 255).astype(np.uint8)
if cam_cls.shape[-2] != h or cam_cls.shape[-1] != w:
cam_cls = cv2.resize(cam_cls, (w, h), interpolation=cv2.INTER_LINEAR)
cam_cls = colormap(cam_cls)
image_cls = cv2.addWeighted(image, 0.5, cam_cls, 0.5, 0)[..., ::-1]
image_cls = image_cls.astype(np.float32) / 255.
writer.add_image('CAM-{}-{}/{}-{}'.format(cls_idx + 1, categories[cls_idx + 1], step, b + 1), image_cls, iteration, dataformats='HWC')
gt_masks = gt_masks.cuda()
if gt_masks.shape[-2:] != cams.shape[-2:]:
gt_masks = F.interpolate(gt_masks[:, None].float(), size=cams.shape[-2:], mode='nearest').long()[:, 0]
for th in thresholds:
cam = F.pad(cams, (0, 0, 0, 0, 1, 0), mode='constant', value=th)
pred_mask = torch.argmax(cam, dim=1)
meter_dic[th].add(pred_mask, gt_masks)
sys.stdout.write('\r# {} Evaluation [{}/{}] = {:.2f}%'.format(args.tag, step + 1, length, (step + 1) / length * 100))
sys.stdout.flush()
print(' ')
model.train()
best_th = 0.0
best_mIoU = 0.0
IoU_dic = {}
FP = 0.0
FN = 0.0
for th in thresholds:
mIoU, mIoU_foreground, IoU_dic_, FP_, FN_ = meter_dic[th].get(detail=True, clear=True)
if best_mIoU < mIoU:
best_th = th
best_mIoU = mIoU
IoU_dic = IoU_dic_
FP = FP_
FN = FN_
return best_th, best_mIoU, IoU_dic, FP, FN
writer = SummaryWriter(tensorboard_dir)
train_iterator = Iterator(train_loader)
for iteration in range(max_iteration):
images, labels, crop_regions = train_iterator.get()
images, labels, crop_regions = images.cuda(), labels.cuda(), crop_regions.cuda()
grad_masks = create_mask(
(crop_regions * (args.ae_image_size / args.image_size)).floor().long(),
(args.ae_image_size, args.ae_image_size))[:, None, :, :]
#################################################################################################
if args.ae_image_size != args.image_size:
ae_images = F.interpolate(images, size=(args.ae_image_size, args.ae_image_size), mode='bilinear',
align_corners=True)
else:
ae_images = images
bg_labels = labels.new_ones([labels.shape[0], 1])
labels = torch.cat([bg_labels, labels], dim=1)
results = model(images, ae_images, labels, grad_masks, stage='forward_train')
masks, seg_sim, cls_fg_pull_sim, cls_fg_push_sim, cls_bg_pull_sim, cls_bg_push_sim = results
seg_sim_loss = F.binary_cross_entropy(seg_sim, labels)
act_supp_loss = cal_act_supp_loss(masks, labels)
act_cplt_loss = cal_act_cplt_loss(masks, labels, grad_masks)
cls_fg_pull_loss = 1 - nanmean(cls_fg_pull_sim)
cls_fg_push_loss = 1 + nanmean(cls_fg_push_sim)
cls_fg_loss = (cls_fg_pull_loss + cls_fg_push_loss) / 2.
cls_bg_pull_loss = 1 - nanmean(cls_bg_pull_sim)
cls_bg_push_loss = 1 + nanmean(cls_bg_push_sim)
cls_bg_loss = (cls_bg_pull_loss + cls_bg_push_loss) / 2.
loss = args.seg_sim_weight * seg_sim_loss + \
args.cls_fg_weight * cls_fg_loss + \
args.cls_bg_weight * cls_bg_loss + \
args.act_supp_weight * act_supp_loss + \
args.act_cplt_weight * act_cplt_loss
#################################################################################################
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_dict = {k: float(eval(k)) for k in loss_names}
train_meter.add(loss_dict)
#################################################################################################
# For Log
#################################################################################################
if (iteration + 1) % log_iteration == 0:
losses = train_meter.get(keys=loss_names, clear=True)
learning_rate = float(get_learning_rate_from_optimizer(optimizer))
t = train_timer.tok(clear=True)
left_sec = (max_iteration - (iteration + 1)) * t / log_iteration
left_min = int(left_sec // 60)
left_sec = int(left_sec - (left_min * 60))
data = {
'iteration' : iteration + 1,
'max_iteration': max_iteration,
'learning_rate' : learning_rate,
'time' : t,
'left_min' : left_min,
'left_sec' : left_sec
}
data.update({loss_names[i]: losses[i] for i in range(len(loss_names))})
data_dic['train'].append(data)
write_json(data_path, data_dic)
format_string = nice_format(data)
log_func(format_string)
for loss_name in loss_names:
writer.add_scalar('Train/{}'.format(loss_name), data[loss_name], iteration)
writer.add_scalar('Train/learning_rate', learning_rate, iteration)
#################################################################################################
# Evaluation
#################################################################################################
if (iteration + 1) % val_iteration == 0:
threshold, mIoU, IoU_dic, FP, FN = evaluate(train_loader_for_seg)
if best_train_mIoU == -1 or best_train_mIoU < mIoU:
best_train_mIoU = mIoU
save_model_fn()
log_func('[i] save model')
data = {
'iteration' : iteration + 1,
'threshold' : threshold,
'train_mIoU' : mIoU,
'best_train_mIoU' : best_train_mIoU,
'FP': FP,
'FN': FN,
'time' : eval_timer.tok(clear=True),
}
data_dic['validation'].append(data)
write_json(data_path, data_dic)
log_func('[i] {} devices: {}'.format(args.tag, devices))
log_func('[i] iteration={iteration:,}, threshold={threshold:.2f}, train_mIoU={train_mIoU:.2f}%, best_train_mIoU={best_train_mIoU:.2f}%, FP={FP:.2f}, FN={FN:.2f}, time={time:.0f}sec'.format(**data)
)
writer.add_scalar('Evaluation/threshold', threshold, iteration)
writer.add_scalar('Evaluation/train_mIoU', mIoU, iteration)
writer.add_scalar('Evaluation/best_train_mIoU', best_train_mIoU, iteration)
writer.add_scalar('Evaluation/FP', FP, iteration)
writer.add_scalar('Evaluation/FN', FN, iteration)
write_json(data_path, data_dic)
writer.close()
print(args.tag) | 22,528 | 38.803887 | 209 | py |
SemFormer | SemFormer-main/inference_semformer.py | # Copyright (C) 2020 * Ltd. All rights reserved.
# author : Sanghyeon Jo <josanghyeokn@gmail.com>
import os
import sys
import copy
import shutil
import random
import argparse
import numpy as np
from tqdm import tqdm
import imageio
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from core.networks import *
from core.datasets import *
from tools.general.io_utils import *
from tools.general.time_utils import *
from tools.general.json_utils import *
from tools.ai.log_utils import *
from tools.ai.demo_utils import *
from tools.ai.optim_utils import *
from tools.ai.torch_utils import *
from tools.ai.evaluate_utils import *
from tools.ai.augment_utils import *
from tools.ai.randaugment import *
parser = argparse.ArgumentParser()
###############################################################################
# Dataset
###############################################################################
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--num_workers', default=8, type=int)
parser.add_argument('--data_dir', default='../VOC2012/', type=str)
parser.add_argument('--start', default=0.0, type=float)
parser.add_argument('--end', default=1.0, type=float)
###############################################################################
# Network
###############################################################################
parser.add_argument('--architecture', default='deit', type=str)
parser.add_argument('--version', default='small', type=str)
parser.add_argument('--patch_size', default=16, type=int)
parser.add_argument('--resolution', default=224, type=int)
parser.add_argument('--in21k', default=False, type=str2bool)
parser.add_argument('--train_img_size', default=448, type=int)
parser.add_argument('--cra_layers', default=4, type=int)
parser.add_argument('--class_dim', default=256, type=int)
parser.add_argument('--with_cra', default=True, type=str2bool)
###############################################################################
# Inference parameters
###############################################################################
parser.add_argument('--tag', default='', type=str)
parser.add_argument('--domain', default='train', type=str)
parser.add_argument('--scales', default='0.5,1.0,1.5,2.0', type=str)
parser.add_argument('--reduction', default='sum', type=str)
parser.add_argument('--clear_cache', default=False, type=str2bool)
if __name__ == '__main__':
###################################################################################
# Arguments
###################################################################################
args = parser.parse_args()
experiment_name = args.tag
if 'train' in args.domain:
experiment_name += '@train'
else:
experiment_name += '@val'
experiment_name += '@scale=%s'%args.scales
pred_dir = create_directory(f'./experiments/predictions/{experiment_name}/')
model_path = './experiments/models/' + f'{args.tag}.pth'
set_seed(args.seed)
log_func = lambda string='': print(string)
###################################################################################
# Transform, Dataset, DataLoader
###################################################################################
imagenet_mean = [0.485, 0.456, 0.406]
imagenet_std = [0.229, 0.224, 0.225]
normalize_fn = Normalize(imagenet_mean, imagenet_std)
meta_dic = read_json('./data/VOC_2012.json')
dataset = VOC_Dataset_For_Making_CAM(args.data_dir, args.domain)
###################################################################################
# Network
###################################################################################
model = SemFormer(
class_dim=args.class_dim,
model_name=args.architecture,
num_classes=meta_dic['classes'] + 1,
version=args.version,
patch_size=args.patch_size,
resolution=args.resolution,
in21k=args.in21k,
pos_embed_size=args.train_img_size // args.patch_size
)
model = model.cuda()
model.eval()
log_func('[i] Architecture is {}'.format(args.architecture))
log_func('[i] Total Params: %.2fM'%(calculate_parameters(model)))
log_func()
try:
use_gpu = os.environ['CUDA_VISIBLE_DEVICES']
except KeyError:
use_gpu = '0'
the_number_of_gpu = len(use_gpu.split(','))
if the_number_of_gpu > 1:
log_func('[i] the number of gpu : {}'.format(the_number_of_gpu))
model = nn.DataParallel(model)
load_model(model, model_path, ignore_modules=['ae'], parallel=the_number_of_gpu > 1)
#################################################################################################
# Evaluation
#################################################################################################
eval_timer = Timer()
scales = [float(scale) for scale in args.scales.split(',')]
model.eval()
eval_timer.tik()
reduction_func = getattr(torch, args.reduction)
is_trans = ('vit' in args.architecture) or ('deit' in args.architecture)
def get_cam(ori_image, scale):
# preprocessing
image = copy.deepcopy(ori_image)
if scale > 20: # for specific size
image = image.resize((int(scale), int(scale)), resample=PIL.Image.CUBIC)
else: # for scaling with float scalar
if is_trans:
new_w, new_h = make_divisible(round(ori_w*scale), args.patch_size), make_divisible(round(ori_h*scale), args.patch_size)
image = image.resize((new_w, new_h), resample=PIL.Image.CUBIC)
else:
image = image.resize((round(ori_w*scale), round(ori_h*scale)), resample=PIL.Image.CUBIC)
image = normalize_fn(image)
image = image.transpose((2, 0, 1))
image = torch.from_numpy(image)
flipped_image = image.flip(-1)
images = torch.stack([image, flipped_image])
images = images.cuda()
# inferenece
if args.with_cra:
_, features, cra_list = model(images, return_cra=True)
features = F.relu(features)
if args.with_cra:
cra_list = cra_list[-args.cra_layers:]
cra_list = [cra.mean(dim=1) for cra in cra_list]
cra = torch.stack(cra_list, dim=0).sum(dim=0)
cra = min_max_norm(cra, n_last_dim=1)
cra = cra.view(*features.shape)
features = features * cra
else:
_, features = model(images)
features = features[:, 1:, :, :]
# postprocessing
cams = F.relu(features)
# cams = features
cams = [cams[0], cams[1].flip(-1)]
return cams
stride1 = 4
stride2 = 8 if '38' in args.architecture else 16
log_func(f'[i] stride1={stride1}, stride2={stride2}')
with torch.no_grad():
dataset_len = len(dataset)
start = int(dataset_len * args.start)
end = int(dataset_len * args.end)
length = end - start
for item_id in tqdm(
range(start, end),
total=length,
dynamic_ncols=True,
):
ori_image, image_id, label, gt_mask = dataset.__getitem__(item_id)
ori_w, ori_h = ori_image.size
npy_path = pred_dir + image_id + '.npy'
if os.path.isfile(npy_path) and (not args.clear_cache):
continue
tensor_label = torch.from_numpy(label)
keys = torch.nonzero(tensor_label)[:, 0]
strided_size = get_strided_size((ori_h, ori_w), stride1)
strided_up_size = get_strided_up_size((ori_h, ori_w), stride2)
cams_list = []
for scale in scales:
cams_list += get_cam(ori_image, scale)
cams_list = [cams.unsqueeze(0) for cams in cams_list]
strided_cams_list = [resize_for_tensors(cams, strided_size)[0] for cams in cams_list]
strided_cams = reduction_func(torch.stack(strided_cams_list), dim=0)
# return tuple when reduction is `max`
if isinstance(strided_cams, (list, tuple)):
strided_cams = strided_cams[0]
hr_cams_list = [resize_for_tensors(cams, strided_up_size)[0] for cams in cams_list]
hr_cams = reduction_func(torch.stack(hr_cams_list), dim=0)
# return tuple when reduction is `max`
if isinstance(hr_cams, (list, tuple)):
hr_cams = hr_cams[0]
hr_cams = hr_cams[:, :ori_h, :ori_w]
strided_cams = strided_cams[keys]
strided_cams /= F.adaptive_max_pool2d(strided_cams, (1, 1)) + 1e-5
hr_cams = hr_cams[keys]
hr_cams /= F.adaptive_max_pool2d(hr_cams, (1, 1)) + 1e-5
# save cams
keys = np.pad(keys + 1, (1, 0), mode='constant')
save_dict = dict(keys=keys, cam=strided_cams.cpu().numpy(), hr_cam=hr_cams.cpu().numpy())
np.save(npy_path, save_dict)
print()
if args.domain == 'train_aug':
args.domain = 'train'
print("python evaluate.py --experiment_name {} --domain {}".format(experiment_name, args.domain)) | 9,474 | 36.011719 | 135 | py |
SemFormer | SemFormer-main/train_caae.py | # Copyright (C) 2020 * Ltd. All rights reserved.
# author : Sanghyeon Jo <josanghyeokn@gmail.com>
import os
import sys
import copy
import shutil
import random
import argparse
import numpy as np
import math
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from core.networks import *
from core.datasets import *
from tools.general.io_utils import *
from tools.general.time_utils import *
from tools.general.json_utils import *
from tools.ai.log_utils import *
from tools.ai.demo_utils import *
from tools.ai.optim_utils import *
from tools.ai.torch_utils import *
from tools.ai.evaluate_utils import *
from tools.ai.augment_utils import *
from tools.ai.randaugment import *
parser = argparse.ArgumentParser()
devices = os.environ.get('CUDA_VISIBLE_DEVICES', '0')
categories = [
'background',
'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
###############################################################################
# Dataset
###############################################################################
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--num_workers', default=16, type=int)
parser.add_argument('--data_dir', default='../VOC2012/', type=str)
###############################################################################
# Network
###############################################################################
parser.add_argument('--class_dim', default=256, type=int)
parser.add_argument('--reduction', default='sum', type=str)
parser.add_argument('--decoder_width', default=768, type=int)
parser.add_argument('--decoder_depth', default=8, type=int)
parser.add_argument('--architecture', default='deit_distilled', type=str)
parser.add_argument('--version', default='base', type=str)
parser.add_argument('--patch_size', default=16, type=int)
parser.add_argument('--resolution', default=224, type=int)
parser.add_argument('--in21k', default=False, type=str2bool)
parser.add_argument('--sim_weight', default=1.0, type=float)
parser.add_argument('--recon_weight', default=1.0, type=float)
###############################################################################
# Hyperparameter
###############################################################################
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--max_epoch', default=200, type=int)
parser.add_argument('--lr', default=0.025, type=float)
parser.add_argument('--wd', default=5e-4, type=float)
parser.add_argument('--nesterov', default=True, type=str2bool)
parser.add_argument('--image_size', default=224, type=int)
parser.add_argument('--print_ratio', default=0.1, type=float)
parser.add_argument('--tag', default='', type=str)
parser.add_argument('--augment', default='colorjitter,randomexpand', type=str)
if __name__ == '__main__':
###################################################################################
# Arguments
###################################################################################
args = parser.parse_args()
log_dir = create_directory(f'./experiments/logs/')
data_dir = create_directory(f'./experiments/data/')
model_dir = create_directory('./experiments/models/')
tensorboard_dir = create_directory(f'./experiments/tensorboards/{args.tag}/')
log_path = log_dir + f'{args.tag}.txt'
data_path = data_dir + f'{args.tag}.json'
model_path = model_dir + f'{args.tag}.pth'
class_model_path = model_dir + f'{args.tag}@classifier.pth'
set_seed(args.seed)
log_func = lambda string='': log_print(string, log_path)
log_func('[i] {}'.format(args))
log_func('[i] {}'.format(args.tag))
log_func()
###################################################################################
# Transform, Dataset, DataLoader
###################################################################################
imagenet_mean = [0.485, 0.456, 0.406]
imagenet_std = [0.229, 0.224, 0.225]
normalize_fn = Normalize(imagenet_mean, imagenet_std)
train_transforms = []
if 'colorjitter' in args.augment:
train_transforms.append(transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.1))
if 'randomexpand' in args.augment:
train_transforms.append(RandomExpand(scales=(1.0, 3.0)))
train_transforms += [
ResizedRandomCrop(args.image_size),
RandomHorizontalFlip()
]
train_transform = transforms.Compose(train_transforms + \
[
Normalize(imagenet_mean, imagenet_std),
Transpose()
]
)
test_transform = transforms.Compose([
Resize_For_Segmentation(args.image_size),
Normalize_For_Segmentation(imagenet_mean, imagenet_std),
Transpose_For_Segmentation()
])
meta_dic = read_json('./data/VOC_2012.json')
class_names = np.asarray(meta_dic['class_names'])
train_dataset = VOC_Dataset_For_Classification(args.data_dir, 'train_aug', train_transform)
train_dataset_for_seg = VOC_Dataset_For_Testing_CAM(args.data_dir, 'train', test_transform)
valid_dataset_for_seg = VOC_Dataset_For_Testing_CAM(args.data_dir, 'val', test_transform)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.num_workers,
shuffle=True, drop_last=True)
train_loader_for_seg = DataLoader(train_dataset_for_seg, batch_size=args.batch_size, num_workers=1, drop_last=True)
valid_loader_for_seg = DataLoader(valid_dataset_for_seg, batch_size=args.batch_size, num_workers=1, drop_last=True)
log_func('[i] mean values is {}'.format(imagenet_mean))
log_func('[i] std values is {}'.format(imagenet_std))
log_func('[i] The number of class is {}'.format(meta_dic['classes']))
log_func('[i] train_transform is {}'.format(train_transform))
log_func('[i] test_transform is {}'.format(test_transform))
log_func()
len_train_loader = val_iteration = len(train_loader)
log_iteration = int(len_train_loader * args.print_ratio)
max_iteration = args.max_epoch * len_train_loader
log_func('[i] log_iteration : {:,}'.format(log_iteration))
log_func('[i] val_iteration : {:,}'.format(val_iteration))
log_func('[i] max_iteration : {:,}'.format(max_iteration))
###################################################################################
# Network
###################################################################################
if args.image_size != args.resolution:
pos_embed_size = args.image_size // args.patch_size
else:
pos_embed_size = None
model = ClassAwareAutoEncoder(
decoder_width=args.decoder_width,
decoder_depth=args.decoder_depth,
num_classes=meta_dic['classes'] + 1,
class_dim=args.class_dim,
reduction=args.reduction,
model_name=args.architecture,
version=args.version,
patch_size=args.patch_size,
resolution=args.resolution,
in21k=args.in21k,
pos_embed_size=pos_embed_size
)
param_groups = model.get_parameter_groups(print_fn=None)
model = model.cuda()
model.train()
log_func('[i] Architecture is {}'.format(args.architecture))
log_func('[i] Total Params: %.2fM'%(calculate_parameters(model)))
log_func()
try:
use_gpu = os.environ['CUDA_VISIBLE_DEVICES']
except KeyError:
use_gpu = '0'
the_number_of_gpu = len(use_gpu.split(','))
if the_number_of_gpu > 1:
log_func('[i] the number of gpu : {}'.format(the_number_of_gpu))
model = nn.DataParallel(model)
load_model_fn = lambda: load_model(model, model_path, parallel=the_number_of_gpu > 1)
save_model_fn = lambda: save_model(model, model_path, parallel=the_number_of_gpu > 1)
###################################################################################
# Loss, Optimizer
###################################################################################
log_func('[i] The number of pretrained weights : {}'.format(len(param_groups[0])))
log_func('[i] The number of pretrained bias : {}'.format(len(param_groups[1])))
log_func('[i] The number of scratched weights : {}'.format(len(param_groups[2])))
log_func('[i] The number of scratched bias : {}'.format(len(param_groups[3])))
optimizer = PolyOptimizer([
{'params': param_groups[0], 'lr': args.lr, 'weight_decay': args.wd},
{'params': param_groups[1], 'lr': 2*args.lr, 'weight_decay': 0},
{'params': param_groups[2], 'lr': 10*args.lr, 'weight_decay': args.wd},
{'params': param_groups[3], 'lr': 20*args.lr, 'weight_decay': 0},
], lr=args.lr, momentum=0.9, weight_decay=args.wd, max_step=max_iteration, nesterov=args.nesterov)
#################################################################################################
# Train
#################################################################################################
data_dic = {
'train' : [],
'validation' : []
}
total_timer = Timer()
train_timer = Timer()
eval_timer = Timer()
loss_names = [
'loss', 'sim_loss', 'recon_loss'
]
train_meter = Average_Meter(loss_names)
best_loss = 1e8
best_train_mAP = -1
thresholds = list(np.arange(0.01, 1.00, 0.01))
def evaluate(loader):
model.eval()
eval_timer.tik()
meter_dic = {th : Calculator_For_mAP('./data/VOC_2012.json') for th in thresholds}
with torch.no_grad():
length = len(loader)
for step, (images, labels, gt_masks) in enumerate(loader):
images = images.cuda()
labels = labels.cuda()
bg_labels = labels.new_ones([labels.shape[0], 1])
labels = torch.cat([bg_labels, labels], dim=1)
label_info = get_label_info(labels, return_type='dict')
classes_total = label_info['classes_total']
cre = model(images, stage='get_cre')
crs = model(images, stage='get_crs')
sim = cosine_similarity(cre, crs, is_aligned=True)
for th in thresholds:
sim_th = sim.clone()
sim_th[sim_th > th] = 1.0
sim_th[sim_th <= th] = 0.0
meter_dic[th].add(sim_th, labels)
sys.stdout.write('\r# {} Evaluation [{}/{}] = {:.2f}%'.format(args.tag, step + 1, length, (step + 1) / length * 100))
sys.stdout.flush()
print(' ')
model.train()
best_th = 0.0
best_mAP = 0.0
for th in thresholds:
mAP = meter_dic[th].get(clear=True)
if best_mAP < mAP:
best_th = th
best_mAP = mAP
return best_th, best_mAP
writer = SummaryWriter(tensorboard_dir)
train_iterator = Iterator(train_loader)
for iteration in range(max_iteration):
images, labels = train_iterator.get()
images = images.cuda()
labels = labels.cuda()
bg_labels = labels.new_ones([labels.shape[0], 1])
labels = torch.cat([bg_labels, labels], dim=1)
sim, output = model(images, labels)
sim_loss = F.binary_cross_entropy(sim, labels)
recon_loss = F.mse_loss(output, images)
loss = args.sim_weight * sim_loss + \
args.recon_weight * recon_loss
#################################################################################################
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_dict = {k: float(eval(k)) for k in loss_names}
train_meter.add(loss_dict)
#################################################################################################
# For Log
#################################################################################################
if (iteration + 1) % log_iteration == 0:
losses = train_meter.get(keys=loss_names, clear=True)
learning_rate = float(get_learning_rate_from_optimizer(optimizer))
iter_time = train_timer.tok(clear=True)
iter_time_str = get_str_time(iter_time)
elapsed_time = total_timer.tok(clear=False)
elapsed_time_str = get_str_time(elapsed_time)
left_time = (max_iteration - (iteration + 1)) * iter_time / log_iteration
left_time_str = get_str_time(left_time)
data = {
'iteration': iteration + 1,
'epoch': (iteration // len_train_loader) + 1,
'max_iteration': max_iteration,
'learning_rate': learning_rate,
'iteration_time' : iter_time_str,
'elapsed_time': elapsed_time_str,
'left_time' : left_time_str,
}
data.update({loss_names[i]: losses[i] for i in range(len(loss_names))})
data_dic['train'].append(data)
write_json(data_path, data_dic)
format_string = nice_format(data)
log_func(format_string)
for loss_name in loss_names:
writer.add_scalar('Train/{}'.format(loss_name), data[loss_name], iteration)
writer.add_scalar('Train/learning_rate', learning_rate, iteration)
#################################################################################################
# Evaluation
#################################################################################################
if (iteration + 1) % val_iteration == 0:
threshold, train_mAP = evaluate(train_loader_for_seg)
epoch = iteration // len_train_loader
if (best_train_mAP == -1) or (best_train_mAP < train_mAP):
best_train_mAP = train_mAP
best_epoch = epoch
save_model_fn()
log_func('[i] save model')
data = {
'iteration' : iteration + 1,
'best_epoch': best_epoch + 1,
'epoch': epoch + 1,
'threshold' : threshold,
'train_mAP': train_mAP,
'best_train_mAP' : best_train_mAP,
'eval_time' : get_str_time(eval_timer.tok(clear=True)),
}
data_dic['validation'].append(data)
write_json(data_path, data_dic)
log_func('[i] {} devices: {}'.format(args.tag, devices))
format_string = nice_format(data)
log_func(format_string)
write_json(data_path, data_dic)
writer.close()
print(args.tag) | 15,231 | 36.517241 | 133 | py |
SemFormer | SemFormer-main/tools/ai/augment_utils.py | import cv2
import random
import numpy as np
from torchvision.transforms import transforms
from torchvision.transforms import functional as TF
import torch.nn.functional as F
from PIL import Image
def convert_OpenCV_to_PIL(image):
return Image.fromarray(image[..., ::-1])
def convert_PIL_to_OpenCV(image):
return np.asarray(image)[..., ::-1]
def resized_crop(x, i, j, h, w, size, interpolation=Image.BILINEAR):
x = TF.crop(x, i, j, h, w)
x = x.resize(size, interpolation)
return x
class RandomExpand:
def __init__(self, scales, interpolation=Image.BILINEAR):
assert isinstance(scales, (list, tuple))
assert len(scales) == 2
assert 0 < scales[0] < scales[1]
self.scales = scales
self.interpolation = interpolation
def __call__(self, image):
width, height = image.size
scale = np.random.uniform(self.scales[0], self.scales[1])
return image.resize((int(scale * width), int(scale * height)), self.interpolation)
class RandomExpand_For_Segmentation:
def __init__(self, scales, interpolation=Image.BILINEAR):
assert isinstance(scales, (list, tuple))
assert len(scales) == 2
assert 0 < scales[0] < scales[1]
self.scales = scales
self.interpolation = interpolation
def __call__(self, data):
width, height = image.size
scale = np.random.uniform(self.scales[0], self.scales[1])
new_size = (int(scale * width), int(scale * height))
data['image'] = data['image'].resize(new_size, self.interpolation)
data['mask'] = data['mask'].resize(new_size, Image.NEAREST)
return data
class ResizedRandomCrop(transforms.RandomResizedCrop):
pass
class ResizedRandomCrop_For_Segmentation(transforms.RandomResizedCrop):
def resize_crop(self, x):
return
def __call__(self, data):
image, mask = data['image'], data['mask']
i, j, h, w = self.get_params(image, self.scale, self.ratio)
# data['image'] = resized_crop(image, i, j, h, w, self.size, self.interpolation)
# data['mask'] = resized_crop(image, i, j, h, w, self.size, Image.NEAREST)
data['image'] = TF.resized_crop(image, i, j, h, w, self.size, self.interpolation)
data['mask'] = TF.resized_crop(image, i, j, h, w, self.size, Image.NEAREST)
return data
class RandomResizedRandomCrop(transforms.RandomResizedCrop):
def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation=Image.BILINEAR):
assert isinstance(size, (list, tuple)) and len(size) == 2
assert size[1] >= size[0]
self.size = size
self.interpolation = interpolation
self.scale = scale
self.ratio = ratio
def __call__(self, image):
i, j, h, w = self.get_params(image, self.scale, self.ratio)
image_size = random.randint(*self.size)
return TF.resized_crop(image, i, j, h, w, image_size, self.interpolation)
class RandomResizedRandomCrop_For_Segmentation(transforms.RandomResizedCrop):
def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation=Image.BILINEAR):
assert isinstance(size, (list, tuple)) and len(size) == 2
assert size[1] > size[0]
self.size = size
self.interpolation = interpolation
self.scale = scale
self.ratio = ratio
def __call__(self, data):
image, mask = data['image'], data['mask']
i, j, h, w = self.get_params(image, self.scale, self.ratio)
image_size = random.randint(*self.size)
data['image'] = TF.resized_crop(image, i, j, h, w, image_size, self.interpolation)
data['mask'] = TF.resized_crop(image, i, j, h, w, image_size, Image.NEAREST)
return data
class ColorJitter:
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
self.brightness = brightness
self.contrast = contrast
self.saturation = saturation
self.hue = hue
self.colorjitter = transforms.ColorJitter(
brightness, contrast, saturation, hue)
def __call__(self, image):
return self.colorjitter(image)
class ColorJitter_For_Segmentation:
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
self.brightness = brightness
self.contrast = contrast
self.saturation = saturation
self.hue = hue
self.colorjitter = transforms.ColorJitter(
brightness, contrast, saturation, hue)
def __call__(self, data):
data['image'] = self.colorjitter(data['image'])
return data
class Resize:
def __init__(self, size, interpolation=Image.BILINEAR):
self.size = (size, size)
self.interpolation = interpolation
def __call__(self, image):
return image.resize(self.size[::-1], self.interpolation)
class Resize_For_Segmentation:
def __init__(self, size, interpolation=Image.BILINEAR):
self.size = (size, size)
self.interpolation = interpolation
def __call__(self, data):
data['image'] = data['image'].resize(self.size[::-1], self.interpolation)
data['mask'] = data['mask'].resize(self.size[::-1], Image.NEAREST)
return data
class RandomResize:
def __init__(self, min_image_size, max_image_size):
self.min_image_size = min_image_size
self.max_image_size = max_image_size
self.modes = [Image.BICUBIC, Image.NEAREST]
def __call__(self, image, mode=Image.BICUBIC):
rand_image_size = random.randint(self.min_image_size, self.max_image_size)
w, h = image.size
if w < h:
scale = rand_image_size / h
else:
scale = rand_image_size / w
size = (int(round(w*scale)), int(round(h*scale)))
if size[0] == w and size[1] == h:
return image
return image.resize(size, mode)
class RandomResize_For_Segmentation:
def __init__(self, min_image_size, max_image_size):
self.min_image_size = min_image_size
self.max_image_size = max_image_size
self.modes = [Image.BICUBIC, Image.NEAREST]
def __call__(self, data):
image, mask = data['image'], data['mask']
rand_image_size = random.randint(self.min_image_size, self.max_image_size)
w, h = image.size
if w < h:
scale = rand_image_size / h
else:
scale = rand_image_size / w
size = (int(round(w*scale)), int(round(h*scale)))
if size[0] == w and size[1] == h:
pass
else:
data['image'] = image.resize(size, Image.BICUBIC)
data['mask'] = mask.resize(size, Image.NEAREST)
return data
class RandomResizeMinShort_For_Segmentation:
def __init__(self, min_image_size, max_image_size):
self.min_image_size = min_image_size
self.max_image_size = max_image_size
self.modes = [Image.BICUBIC, Image.NEAREST]
def __call__(self, data):
image, mask = data['image'], data['mask']
rand_image_size = random.randint(self.min_image_size, self.max_image_size)
w, h = image.size
if w < h:
scale = rand_image_size / w
else:
scale = rand_image_size / h
size = (int(round(w*scale)), int(round(h*scale)))
if size[0] == w and size[1] == h:
pass
else:
data['image'] = image.resize(size, Image.BICUBIC)
data['mask'] = mask.resize(size, Image.NEAREST)
return data
class RandomHorizontalFlip:
def __init__(self):
pass
def __call__(self, image):
if bool(random.getrandbits(1)):
return image.transpose(Image.FLIP_LEFT_RIGHT)
return image
class RandomHorizontalFlip_For_Segmentation:
def __init__(self):
pass
def __call__(self, data):
image, mask = data['image'], data['mask']
if bool(random.getrandbits(1)):
data['image'] = image.transpose(Image.FLIP_LEFT_RIGHT)
data['mask'] = mask.transpose(Image.FLIP_LEFT_RIGHT)
return data
class Normalize:
def __init__(self, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)):
self.mean = mean
self.std = std
def __call__(self, image):
image = np.asarray(image)
norm_image = np.empty_like(image, np.float32)
norm_image[..., 0] = (image[..., 0] / 255. - self.mean[0]) / self.std[0]
norm_image[..., 1] = (image[..., 1] / 255. - self.mean[1]) / self.std[1]
norm_image[..., 2] = (image[..., 2] / 255. - self.mean[2]) / self.std[2]
return norm_image
class Normalize_For_Segmentation:
def __init__(self, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)):
self.mean = mean
self.std = std
def __call__(self, data):
image, mask = data['image'], data['mask']
image = np.asarray(image, dtype=np.float32)
mask = np.asarray(mask, dtype=np.int64)
norm_image = np.empty_like(image, np.float32)
norm_image[..., 0] = (image[..., 0] / 255. - self.mean[0]) / self.std[0]
norm_image[..., 1] = (image[..., 1] / 255. - self.mean[1]) / self.std[1]
norm_image[..., 2] = (image[..., 2] / 255. - self.mean[2]) / self.std[2]
data['image'] = norm_image
data['mask'] = mask
return data
class ToNumpy:
def __call__(self, image):
return np.asarray(image, dtype=np.float32)
class ToNumpy_For_Segmentation:
def __call__(self, data):
data['image'] = np.asarray(data['image'], dtype=np.float32)
data['mask'] = np.asarray(data['mask'], dtype=np.int64)
return data
class Top_Left_Crop:
def __init__(self, crop_size, channels=3):
self.bg_value = 0
self.crop_size = crop_size
self.crop_shape = (self.crop_size, self.crop_size, channels)
def __call__(self, image):
h, w, c = image.shape
ch = min(self.crop_size, h)
cw = min(self.crop_size, w)
cropped_image = np.ones(self.crop_shape, image.dtype) * self.bg_value
cropped_image[:ch, :cw] = image[:ch, :cw]
return cropped_image
class Top_Left_Crop_For_Segmentation:
def __init__(self, crop_size, channels=3):
self.bg_value = 0
self.crop_size = crop_size
self.crop_shape = (self.crop_size, self.crop_size, channels)
self.crop_shape_for_mask = (self.crop_size, self.crop_size)
def __call__(self, data):
image, mask = data['image'], data['mask']
h, w, c = image.shape
ch = min(self.crop_size, h)
cw = min(self.crop_size, w)
cropped_image = np.ones(self.crop_shape, image.dtype) * self.bg_value
cropped_image[:ch, :cw] = image[:ch, :cw]
cropped_mask = np.ones(self.crop_shape_for_mask, mask.dtype) * 255
cropped_mask[:ch, :cw] = mask[:ch, :cw]
data['image'] = cropped_image
data['mask'] = cropped_mask
return data
class RandomCrop:
def __init__(self, crop_size, channels=3, with_bbox=False):
self.bg_value = 0
self.with_bbox = with_bbox
self.crop_size = crop_size
self.crop_shape = (self.crop_size, self.crop_size, channels)
def get_random_crop_box(self, image):
h, w, c = image.shape
ch = min(self.crop_size, h)
cw = min(self.crop_size, w)
w_space = w - self.crop_size
h_space = h - self.crop_size
if w_space > 0:
cont_left = 0
img_left = random.randrange(w_space + 1)
else:
cont_left = random.randrange(-w_space + 1)
img_left = 0
if h_space > 0:
cont_top = 0
img_top = random.randrange(h_space + 1)
else:
cont_top = random.randrange(-h_space + 1)
img_top = 0
dst_bbox = {
'xmin' : cont_left, 'ymin' : cont_top,
'xmax' : cont_left+cw, 'ymax' : cont_top+ch
}
src_bbox = {
'xmin' : img_left, 'ymin' : img_top,
'xmax' : img_left+cw, 'ymax' : img_top+ch
}
return dst_bbox, src_bbox
def __call__(self, image, bbox_dic=None):
if bbox_dic is None:
dst_bbox, src_bbox = self.get_random_crop_box(image)
else:
dst_bbox, src_bbox = bbox_dic['dst_bbox'], bbox_dic['src_bbox']
cropped_image = np.ones(self.crop_shape, image.dtype) * self.bg_value
cropped_image[dst_bbox['ymin']:dst_bbox['ymax'], dst_bbox['xmin']:dst_bbox['xmax']] = \
image[src_bbox['ymin']:src_bbox['ymax'], src_bbox['xmin']:src_bbox['xmax']]
if self.with_bbox:
return cropped_image, {'dst_bbox':dst_bbox, 'src_bbox':src_bbox}
else:
return cropped_image
class RandomCrop_For_Segmentation(RandomCrop):
def __init__(self, crop_size):
super().__init__(crop_size)
self.crop_shape_for_mask = (self.crop_size, self.crop_size)
def __call__(self, data):
image, mask = data['image'], data['mask']
dst_bbox, src_bbox = self.get_random_crop_box(image)
cropped_image = np.ones(self.crop_shape, image.dtype) * self.bg_value
cropped_image[dst_bbox['ymin']:dst_bbox['ymax'], dst_bbox['xmin']:dst_bbox['xmax']] = \
image[src_bbox['ymin']:src_bbox['ymax'], src_bbox['xmin']:src_bbox['xmax']]
cropped_mask = np.ones(self.crop_shape_for_mask, mask.dtype) * 255
cropped_mask[dst_bbox['ymin']:dst_bbox['ymax'], dst_bbox['xmin']:dst_bbox['xmax']] = \
mask[src_bbox['ymin']:src_bbox['ymax'], src_bbox['xmin']:src_bbox['xmax']]
data['image'] = cropped_image
data['mask'] = cropped_mask
return data
class Transpose:
def __init__(self):
pass
def __call__(self, image):
return image.transpose((2, 0, 1))
class Transpose_with_BBox:
def __init__(self):
pass
def __call__(self, input):
image, bbox_dict = input
return image.transpose((2, 0, 1)), bbox_dict
class Transpose_For_Segmentation:
def __init__(self):
pass
def __call__(self, data):
# h, w, c -> c, h, w
data['image'] = data['image'].transpose((2, 0, 1))
return data
class Resize_For_Mask:
def __init__(self, size):
self.size = (size, size)
def __call__(self, data):
mask = Image.fromarray(data['mask'].astype(np.uint8))
mask = mask.resize(self.size, Image.NEAREST)
data['mask'] = np.asarray(mask, dtype=np.uint64)
return data | 14,808 | 29.597107 | 104 | py |
SemFormer | SemFormer-main/tools/ai/optim_utils.py | import torch
from .torch_utils import *
class PolyOptimizer(torch.optim.SGD):
def __init__(self, params, lr, weight_decay, max_step, momentum=0.9, nesterov=False):
super().__init__(params, lr, weight_decay, nesterov=nesterov)
self.global_step = 0
self.max_step = max_step
self.momentum = momentum
self.__initial_lr = [group['lr'] for group in self.param_groups]
def step(self, closure=None):
if self.global_step < self.max_step:
lr_mult = (1 - self.global_step / self.max_step) ** self.momentum
for i in range(len(self.param_groups)):
self.param_groups[i]['lr'] = self.__initial_lr[i] * lr_mult
super().step(closure)
self.global_step += 1
| 770 | 31.125 | 89 | py |
SemFormer | SemFormer-main/tools/ai/torch_utils.py | import cv2
import math
import torch
import random
import numpy as np
import torch.nn.functional as F
from torch.optim.lr_scheduler import LambdaLR
def make_divisible(x, divisor, rounding='ceil'):
assert divisor != 0, 'divisor must be nonzero'
rounding_func = getattr(math, rounding)
return rounding_func(x / divisor) * divisor
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
def rotation(x, k):
return torch.rot90(x, k, (1, 2))
def interleave(x, size):
s = list(x.shape)
return x.reshape([-1, size] + s[1:]).transpose(0, 1).reshape([-1] + s[1:])
def de_interleave(x, size):
s = list(x.shape)
return x.reshape([size, -1] + s[1:]).transpose(0, 1).reshape([-1] + s[1:])
def resize_for_tensors(tensors, size, mode='bilinear', align_corners=False):
return F.interpolate(tensors, size, mode=mode, align_corners=align_corners)
def L1_Loss(A_tensors, B_tensors):
return torch.abs(A_tensors - B_tensors)
def L2_Loss(A_tensors, B_tensors):
return torch.pow(A_tensors - B_tensors, 2)
# ratio = 0.2, top=20%
def Online_Hard_Example_Mining(values, ratio=0.2):
b, c, h, w = values.size()
return torch.topk(values.reshape(b, -1), k=int(c * h * w * ratio), dim=-1)[0]
def shannon_entropy_loss(logits, activation=torch.sigmoid, epsilon=1e-5):
v = activation(logits)
return -torch.sum(v * torch.log(v+epsilon), dim=1).mean()
def expand_dim(input, dim, times=1):
for t in range(times):
input = input.unsqueeze(dim)
return input
def min_max_norm(input, n_last_dim=2, eps=1e-5):
input_flat = input.view(*input.shape[:-n_last_dim], -1)
input_min = input_flat.min(dim=-1)[0]
input_min = expand_dim(input_min, dim=-1, times=n_last_dim)
input_max = input_flat.max(dim=-1)[0]
input_max = expand_dim(input_max, dim=-1, times=n_last_dim)
return (input - input_min) / (input_max - input_min + eps)
def make_cam(x, epsilon=1e-5):
# relu(x) = max(x, 0)
x = F.relu(x)
b, c, h, w = x.size()
flat_x = x.view(b, c, (h * w))
max_value = flat_x.max(axis=-1)[0].view((b, c, 1, 1))
return F.relu(x - epsilon) / (max_value + epsilon)
def one_hot_embedding(label, classes):
"""Embedding labels to one-hot form.
Args:
labels: (int) class labels.
num_classes: (int) number of classes.
Returns:
(tensor) encoded labels, sized [N, #classes].
"""
vector = np.zeros((classes), dtype = np.float32)
if len(label) > 0:
vector[label] = 1.
return vector
def calculate_parameters(model):
return sum(param.numel() for param in model.parameters())/1000000.0
def get_learning_rate_from_optimizer(optimizer):
return optimizer.param_groups[0]['lr']
def get_numpy_from_tensor(tensor):
return tensor.cpu().detach().numpy()
def contain_keys(string, keys):
for key in keys:
if key in string:
return True
return False
def filter_state_dict(state_dict, ignore_keys):
new_state_dict = dict()
for key, value in state_dict.items():
if contain_keys(key, ignore_keys):
continue
new_state_dict[key] = value
return new_state_dict
def load_model(model, model_path, ignore_modules=[], parallel=False):
if parallel:
model.module.load_state_dict(filter_state_dict(
torch.load(model_path), ignore_modules))
else:
model.load_state_dict(filter_state_dict(
torch.load(model_path), ignore_modules))
def save_model(model, model_path, ignore_modules=[], parallel=False):
if parallel:
torch.save(filter_state_dict(model.module.state_dict(), ignore_modules), model_path)
else:
torch.save(filter_state_dict(model.state_dict(), ignore_modules), model_path)
def transfer_model(pretrained_model, model):
pretrained_dict = pretrained_model.state_dict()
model_dict = model.state_dict()
pretrained_dict = {k:v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
def get_learning_rate(optimizer):
lr=[]
for param_group in optimizer.param_groups:
lr +=[ param_group['lr'] ]
return lr
def get_cosine_schedule_with_warmup(optimizer,
warmup_iteration,
max_iteration,
cycles=7./16.
):
def _lr_lambda(current_iteration):
if current_iteration < warmup_iteration:
return float(current_iteration) / float(max(1, warmup_iteration))
no_progress = float(current_iteration - warmup_iteration) / float(max(1, max_iteration - warmup_iteration))
return max(0., math.cos(math.pi * cycles * no_progress))
return LambdaLR(optimizer, _lr_lambda, -1) | 4,951 | 30.341772 | 115 | py |
SemFormer | SemFormer-main/tools/ai/evaluate_utils.py | import numpy as np
import torch
from sklearn.metrics import average_precision_score
from tools.general.json_utils import read_json
from core.functional import cosine_similarity
def calculate_for_tags(pred_tags, gt_tags):
"""This function calculates precision, recall, and f1-score using tags.
Args:
pred_tags:
The type of variable is list.
The type of each element is string.
gt_tags:
The type of variable is list.
the type of each element is string.
Returns:
precision:
pass
recall:
pass
f1-score:
pass
"""
if len(pred_tags) == 0 and len(gt_tags) == 0:
return 100, 100, 100
elif len(pred_tags) == 0 or len(gt_tags) == 0:
return 0, 0, 0
pred_tags = np.asarray(pred_tags)
gt_tags = np.asarray(gt_tags)
precision = pred_tags[:, np.newaxis] == gt_tags[np.newaxis, :]
recall = gt_tags[:, np.newaxis] == pred_tags[np.newaxis, :]
precision = np.sum(precision) / len(precision) * 100
recall = np.sum(recall) / len(recall) * 100
if precision == 0 and recall == 0:
f1_score = 0
else:
f1_score = 2 * ((precision * recall) / (precision + recall))
return precision, recall, f1_score
def calculate_mIoU(pred_mask, gt_mask):
"""This function is to calculate precision, recall, and f1-score using tags.
Args:
pred_mask:
The type of variable is numpy array.
gt_mask:
The type of variable is numpy array.
Returns:
miou:
miou is meanIU.
"""
inter = np.logical_and(pred_mask, gt_mask)
union = np.logical_or(pred_mask, gt_mask)
epsilon = 1e-5
miou = (np.sum(inter) + epsilon) / (np.sum(union) + epsilon)
return miou * 100
class Calculator_For_mIoU:
def __init__(self, json_path=None, class_names=None):
if class_names is None:
data = read_json(json_path)
class_names = data['class_names']
self.class_names = ['background'] + class_names
self.classes = len(self.class_names)
self.clear()
def get_data(self, pred_mask, gt_mask):
obj_mask = gt_mask<255
correct_mask = (pred_mask==gt_mask) * obj_mask
P_list, T_list, TP_list = [], [], []
for i in range(self.classes):
P_list.append(np.sum((pred_mask==i)*obj_mask))
T_list.append(np.sum((gt_mask==i)*obj_mask))
TP_list.append(np.sum((gt_mask==i)*correct_mask))
return (P_list, T_list, TP_list)
def add_using_data(self, data):
P_list, T_list, TP_list = data
for i in range(self.classes):
self.P[i] += P_list[i]
self.T[i] += T_list[i]
self.TP[i] += TP_list[i]
def add(self, pred_mask, gt_mask):
obj_mask = gt_mask<255
correct_mask = (pred_mask==gt_mask) * obj_mask
for i in range(self.classes):
self.P[i] += np.sum((pred_mask==i)*obj_mask)
self.T[i] += np.sum((gt_mask==i)*obj_mask)
self.TP[i] += np.sum((gt_mask==i)*correct_mask)
def get(self, detail=False, clear=True):
IoU_dic = {}
IoU_list = []
FP_list = [] # over activation
FN_list = [] # under activation
for i in range(self.classes):
IoU = self.TP[i]/(self.T[i]+self.P[i]-self.TP[i]+1e-10) * 100
FP = (self.P[i]-self.TP[i])/(self.T[i] + self.P[i] - self.TP[i] + 1e-10)
FN = (self.T[i]-self.TP[i])/(self.T[i] + self.P[i] - self.TP[i] + 1e-10)
IoU_dic[self.class_names[i]] = IoU
IoU_list.append(IoU)
FP_list.append(FP)
FN_list.append(FN)
mIoU = np.mean(np.asarray(IoU_list))
mIoU_foreground = np.mean(np.asarray(IoU_list)[1:])
FP = np.mean(np.asarray(FP_list))
FN = np.mean(np.asarray(FN_list))
if clear:
self.clear()
if detail:
return mIoU, mIoU_foreground, IoU_dic, FP, FN
else:
return mIoU, mIoU_foreground
def clear(self):
self.TP = []
self.P = []
self.T = []
for _ in range(self.classes):
self.TP.append(0)
self.P.append(0)
self.T.append(0)
def calculate_mIoU_cuda(pred_mask, gt_mask):
"""This function is to calculate precision, recall, and f1-score using tags.
Args:
pred_mask:
The type of variable is numpy array.
gt_mask:
The type of variable is numpy array.
Returns:
miou:
miou is meanIU.
"""
inter = pred_mask & gt_mask
union = pred_mask | gt_mask
epsilon = 1e-5
miou = (torch.sum(inter) + epsilon) / (torch.sum(union) + epsilon)
return miou.item() * 100
class Calculator_For_mIoU_CUDA:
def __init__(self, json_path=None, class_names=None):
if class_names is None:
data = read_json(json_path)
class_names = data['class_names']
self.class_names = ['background'] + class_names
self.classes = len(self.class_names)
self.clear()
def get_data(self, pred_mask, gt_mask):
obj_mask = gt_mask<255
correct_mask = (pred_mask==gt_mask) * obj_mask
P_list, T_list, TP_list = [], [], []
for i in range(self.classes):
P_list.append(torch.sum((pred_mask==i)*obj_mask).item())
T_list.append(torch.sum((gt_mask==i)*obj_mask).item())
TP_list.append(torch.sum((gt_mask==i)*correct_mask).item())
return (P_list, T_list, TP_list)
def add_using_data(self, data):
P_list, T_list, TP_list = data
for i in range(self.classes):
self.P[i] += P_list[i]
self.T[i] += T_list[i]
self.TP[i] += TP_list[i]
def add(self, pred_mask, gt_mask):
obj_mask = gt_mask<255
correct_mask = (pred_mask==gt_mask) * obj_mask
for i in range(self.classes):
self.P[i] += torch.sum((pred_mask==i)*obj_mask).item()
self.T[i] += torch.sum((gt_mask==i)*obj_mask).item()
self.TP[i] += torch.sum((gt_mask==i)*correct_mask).item()
def get(self, detail=False, clear=True):
IoU_dic = {}
IoU_list = []
FP_list = [] # over activation
FN_list = [] # under activation
for i in range(self.classes):
IoU = self.TP[i]/(self.T[i]+self.P[i]-self.TP[i]+1e-10) * 100
FP = (self.P[i]-self.TP[i])/(self.T[i] + self.P[i] - self.TP[i] + 1e-10)
FN = (self.T[i]-self.TP[i])/(self.T[i] + self.P[i] - self.TP[i] + 1e-10)
IoU_dic[self.class_names[i]] = IoU
IoU_list.append(IoU)
FP_list.append(FP)
FN_list.append(FN)
mIoU = np.mean(np.asarray(IoU_list))
mIoU_foreground = np.mean(np.asarray(IoU_list)[1:])
FP = np.mean(np.asarray(FP_list))
FN = np.mean(np.asarray(FN_list))
if clear:
self.clear()
if detail:
return mIoU, mIoU_foreground, IoU_dic, FP, FN
else:
return mIoU, mIoU_foreground
def clear(self):
self.TP = []
self.P = []
self.T = []
for _ in range(self.classes):
self.TP.append(0)
self.P.append(0)
self.T.append(0)
def compute_AP(labels, outputs):
if isinstance(labels, torch.Tensor):
labels = labels.cpu().numpy()
if isinstance(outputs, torch.Tensor):
outputs = outputs.cpu().numpy()
y_true = labels
y_pred = outputs
AP = []
for i in range(y_true.shape[0]):
AP.append(average_precision_score(y_true[i], y_pred[i]))
return AP
class Calculator_For_mAP:
def __init__(self, json_path=None, class_names=None):
if class_names is None:
data = read_json(json_path)
class_names = data['class_names']
self.class_names = ['background'] + class_names
self.classes = len(self.class_names)
self.clear()
def get_data(self, logits, labels, topk=None):
return np.mean(compute_AP(labels, logits))
def add_using_data(self, data):
self.ap_list += data
def add(self, logits, labels, topk=None):
AP = compute_AP(labels, logits)
self.ap_list += AP
def get(self, detail=False, clear=True):
mAP = 100 * np.mean(self.ap_list)
if clear:
self.clear()
return mAP
def clear(self):
self.ap_list = [] | 8,682 | 28.334459 | 84 | py |
SemFormer | SemFormer-main/tools/ai/randaugment.py | # code in this file is adpated from
# https://github.com/ildoonet/pytorch-randaugment/blob/master/RandAugment/augmentations.py
# https://github.com/google-research/fixmatch/blob/master/third_party/auto_augment/augmentations.py
# https://github.com/google-research/fixmatch/blob/master/libml/ctaugment.py
import logging
import random
import numpy as np
import PIL
import PIL.ImageOps
import PIL.ImageEnhance
import PIL.ImageDraw
from PIL import Image
logger = logging.getLogger(__name__)
PARAMETER_MAX = 10
def AutoContrast(img, **kwarg):
return PIL.ImageOps.autocontrast(img)
def Brightness(img, v, max_v, bias=0):
v = _float_parameter(v, max_v) + bias
return PIL.ImageEnhance.Brightness(img).enhance(v)
def Color(img, v, max_v, bias=0):
v = _float_parameter(v, max_v) + bias
return PIL.ImageEnhance.Color(img).enhance(v)
def Contrast(img, v, max_v, bias=0):
v = _float_parameter(v, max_v) + bias
return PIL.ImageEnhance.Contrast(img).enhance(v)
def Cutout(img, v, max_v, bias=0):
if v == 0:
return img
v = _float_parameter(v, max_v) + bias
v = int(v * min(img.size))
return CutoutAbs(img, v)
def CutoutAbs(img, v, **kwarg):
w, h = img.size
x0 = np.random.uniform(0, w)
y0 = np.random.uniform(0, h)
x0 = int(max(0, x0 - v / 2.))
y0 = int(max(0, y0 - v / 2.))
x1 = int(min(w, x0 + v))
y1 = int(min(h, y0 + v))
xy = (x0, y0, x1, y1)
# gray
# color = (127, 127, 127)
# black
color = (0, 0, 0)
img = img.copy()
PIL.ImageDraw.Draw(img).rectangle(xy, color)
return img
def Equalize(img, **kwarg):
return PIL.ImageOps.equalize(img)
def Identity(img, **kwarg):
return img
def Invert(img, **kwarg):
return PIL.ImageOps.invert(img)
def Posterize(img, v, max_v, bias=0):
v = _int_parameter(v, max_v) + bias
return PIL.ImageOps.posterize(img, v)
def Rotate(img, v, max_v, bias=0):
v = _int_parameter(v, max_v) + bias
if random.random() < 0.5:
v = -v
return img.rotate(v)
def Sharpness(img, v, max_v, bias=0):
v = _float_parameter(v, max_v) + bias
return PIL.ImageEnhance.Sharpness(img).enhance(v)
def ShearX(img, v, max_v, bias=0):
v = _float_parameter(v, max_v) + bias
if random.random() < 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, v, 0, 0, 1, 0))
def ShearY(img, v, max_v, bias=0):
v = _float_parameter(v, max_v) + bias
if random.random() < 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, v, 1, 0))
def Solarize(img, v, max_v, bias=0):
v = _int_parameter(v, max_v) + bias
return PIL.ImageOps.solarize(img, 256 - v)
def SolarizeAdd(img, v, max_v, bias=0, threshold=128):
v = _int_parameter(v, max_v) + bias
if random.random() < 0.5:
v = -v
img_np = np.array(img).astype(np.int)
img_np = img_np + v
img_np = np.clip(img_np, 0, 255)
img_np = img_np.astype(np.uint8)
img = Image.fromarray(img_np)
return PIL.ImageOps.solarize(img, threshold)
def TranslateX(img, v, max_v, bias=0):
v = _float_parameter(v, max_v) + bias
if random.random() < 0.5:
v = -v
v = int(v * img.size[0])
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))
def TranslateY(img, v, max_v, bias=0):
v = _float_parameter(v, max_v) + bias
if random.random() < 0.5:
v = -v
v = int(v * img.size[1])
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))
def _float_parameter(v, max_v):
return float(v) * max_v / PARAMETER_MAX
def _int_parameter(v, max_v):
return int(v * max_v / PARAMETER_MAX)
def fixmatch_augment_pool():
# FixMatch paper
augs = [(AutoContrast, None, None),
(Brightness, 0.9, 0.05),
(Color, 0.9, 0.05),
(Contrast, 0.9, 0.05),
(Equalize, None, None),
(Identity, None, None),
(Posterize, 4, 4),
(Rotate, 30, 0),
(Sharpness, 0.9, 0.05),
(ShearX, 0.3, 0),
(ShearY, 0.3, 0),
(Solarize, 256, 0),
(TranslateX, 0.3, 0),
(TranslateY, 0.3, 0)]
return augs
def my_augment_pool():
# Test
augs = [(AutoContrast, None, None),
(Brightness, 1.8, 0.1),
(Color, 1.8, 0.1),
(Contrast, 1.8, 0.1),
(Cutout, 0.2, 0),
(Equalize, None, None),
(Invert, None, None),
(Posterize, 4, 4),
(Rotate, 30, 0),
(Sharpness, 1.8, 0.1),
(ShearX, 0.3, 0),
(ShearY, 0.3, 0),
(Solarize, 256, 0),
(SolarizeAdd, 110, 0),
(TranslateX, 0.45, 0),
(TranslateY, 0.45, 0)]
return augs
class RandAugmentPC(object):
def __init__(self, n, m):
assert n >= 1
assert 1 <= m <= 10
self.n = n
self.m = m
self.augment_pool = my_augment_pool()
def __call__(self, img):
ops = random.choices(self.augment_pool, k=self.n)
for op, max_v, bias in ops:
prob = np.random.uniform(0.2, 0.8)
if random.random() + prob >= 1:
img = op(img, v=self.m, max_v=max_v, bias=bias)
img = CutoutAbs(img, int(32*0.5))
return img
class RandAugmentMC(object):
def __init__(self, n, m):
assert n >= 1
assert 1 <= m <= 10
self.n = n
self.m = m
self.augment_pool = fixmatch_augment_pool()
def __call__(self, img):
ops = random.choices(self.augment_pool, k=self.n)
for op, max_v, bias in ops:
v = np.random.randint(1, self.m)
if random.random() < 0.5:
img = op(img, v=v, max_v=max_v, bias=bias)
img = CutoutAbs(img, int(32*0.5))
return img
| 5,864 | 24.951327 | 99 | py |
SemFormer | SemFormer-main/core/aff_utils.py | import torch
import torch.nn.functional as F
import numpy as np
class PathIndex:
def __init__(self, radius, default_size):
self.radius = radius
self.radius_floor = int(np.ceil(radius) - 1)
self.search_paths, self.search_dst = self.get_search_paths_dst(self.radius)
self.path_indices, self.src_indices, self.dst_indices = self.get_path_indices(default_size)
def get_search_paths_dst(self, max_radius=5):
coord_indices_by_length = [[] for _ in range(max_radius * 4)]
search_dirs = []
for x in range(1, max_radius):
search_dirs.append((0, x))
for y in range(1, max_radius):
for x in range(-max_radius + 1, max_radius):
if x * x + y * y < max_radius ** 2:
search_dirs.append((y, x))
for dir in search_dirs:
length_sq = dir[0] ** 2 + dir[1] ** 2
path_coords = []
min_y, max_y = sorted((0, dir[0]))
min_x, max_x = sorted((0, dir[1]))
for y in range(min_y, max_y + 1):
for x in range(min_x, max_x + 1):
dist_sq = (dir[0] * x - dir[1] * y) ** 2 / length_sq
if dist_sq < 1:
path_coords.append([y, x])
path_coords.sort(key=lambda x: -abs(x[0]) - abs(x[1]))
path_length = len(path_coords)
coord_indices_by_length[path_length].append(path_coords)
path_list_by_length = [np.asarray(v) for v in coord_indices_by_length if v]
path_destinations = np.concatenate([p[:, 0] for p in path_list_by_length], axis=0)
return path_list_by_length, path_destinations
def get_path_indices(self, size):
full_indices = np.reshape(np.arange(0, size[0] * size[1], dtype=np.int64), (size[0], size[1]))
cropped_height = size[0] - self.radius_floor
cropped_width = size[1] - 2 * self.radius_floor
path_indices = []
for paths in self.search_paths:
path_indices_list = []
for p in paths:
coord_indices_list = []
for dy, dx in p:
coord_indices = full_indices[dy:dy + cropped_height,
self.radius_floor + dx:self.radius_floor + dx + cropped_width]
coord_indices = np.reshape(coord_indices, [-1])
coord_indices_list.append(coord_indices)
path_indices_list.append(coord_indices_list)
path_indices.append(np.array(path_indices_list))
src_indices = np.reshape(full_indices[:cropped_height, self.radius_floor:self.radius_floor + cropped_width], -1)
dst_indices = np.concatenate([p[:,0] for p in path_indices], axis=0)
return path_indices, src_indices, dst_indices
def edge_to_affinity(edge, paths_indices):
aff_list = []
edge = edge.view(edge.size(0), -1)
for i in range(len(paths_indices)):
if isinstance(paths_indices[i], np.ndarray):
paths_indices[i] = torch.from_numpy(paths_indices[i])
paths_indices[i] = paths_indices[i].cuda(non_blocking=True)
for ind in paths_indices:
ind_flat = ind.view(-1)
dist = torch.index_select(edge, dim=-1, index=ind_flat)
dist = dist.view(dist.size(0), ind.size(0), ind.size(1), ind.size(2))
aff = torch.squeeze(1 - F.max_pool2d(dist, (dist.size(2), 1)), dim=2)
aff_list.append(aff)
aff_cat = torch.cat(aff_list, dim=1)
return aff_cat
def affinity_sparse2dense(affinity_sparse, ind_from, ind_to, n_vertices):
ind_from = torch.from_numpy(ind_from)
ind_to = torch.from_numpy(ind_to)
affinity_sparse = affinity_sparse.view(-1).cpu()
ind_from = ind_from.repeat(ind_to.size(0)).view(-1)
ind_to = ind_to.view(-1)
indices = torch.stack([ind_from, ind_to])
indices_tp = torch.stack([ind_to, ind_from])
indices_id = torch.stack([torch.arange(0, n_vertices).long(), torch.arange(0, n_vertices).long()])
affinity_dense = torch.sparse.FloatTensor(torch.cat([indices, indices_id, indices_tp], dim=1),
torch.cat([affinity_sparse, torch.ones([n_vertices]), affinity_sparse])).to_dense().cuda()
return affinity_dense
def to_transition_matrix(affinity_dense, beta, times):
scaled_affinity = torch.pow(affinity_dense, beta)
trans_mat = scaled_affinity / torch.sum(scaled_affinity, dim=0, keepdim=True)
for _ in range(times):
trans_mat = torch.matmul(trans_mat, trans_mat)
return trans_mat
def propagate_to_edge(x, edge, radius=5, beta=10, exp_times=8):
height, width = x.shape[-2:]
hor_padded = width+radius*2
ver_padded = height+radius
path_index = PathIndex(radius=radius, default_size=(ver_padded, hor_padded))
edge_padded = F.pad(edge, (radius, radius, 0, radius), mode='constant', value=1.0)
sparse_aff = edge_to_affinity(torch.unsqueeze(edge_padded, 0),
path_index.path_indices)
dense_aff = affinity_sparse2dense(sparse_aff, path_index.src_indices,
path_index.dst_indices, ver_padded * hor_padded)
dense_aff = dense_aff.view(ver_padded, hor_padded, ver_padded, hor_padded)
dense_aff = dense_aff[:-radius, radius:-radius, :-radius, radius:-radius]
dense_aff = dense_aff.reshape(height * width, height * width)
trans_mat = to_transition_matrix(dense_aff, beta=beta, times=exp_times)
x = x.view(-1, height, width) * (1 - edge)
rw = torch.matmul(x.view(-1, height * width), trans_mat)
rw = rw.view(rw.size(0), 1, height, width)
return rw
class GetAffinityLabelFromIndices():
def __init__(self, indices_from, indices_to):
self.indices_from = indices_from
self.indices_to = indices_to
def __call__(self, segm_map):
segm_map_flat = np.reshape(segm_map, -1)
segm_label_from = np.expand_dims(segm_map_flat[self.indices_from], axis=0)
segm_label_to = segm_map_flat[self.indices_to]
valid_label = np.logical_and(np.less(segm_label_from, 21), np.less(segm_label_to, 21))
equal_label = np.equal(segm_label_from, segm_label_to)
pos_affinity_label = np.logical_and(equal_label, valid_label)
bg_pos_affinity_label = np.logical_and(pos_affinity_label, np.equal(segm_label_from, 0)).astype(np.float32)
fg_pos_affinity_label = np.logical_and(pos_affinity_label, np.greater(segm_label_from, 0)).astype(np.float32)
neg_affinity_label = np.logical_and(np.logical_not(equal_label), valid_label).astype(np.float32)
return torch.from_numpy(bg_pos_affinity_label), torch.from_numpy(fg_pos_affinity_label), torch.from_numpy(neg_affinity_label)
| 6,785 | 36.910615 | 133 | py |
SemFormer | SemFormer-main/core/utils.py | import torch
import torch.nn.functional as F
def grad_enable(model, ignore_param_names=None):
for param_name, param in model.named_parameters():
if ignore_param_names is not None:
if param_name in ignore_param_names:
continue
param.requires_grad = True
def grad_disable(model, ignore_param_names=None):
for param_name, param in model.named_parameters():
if ignore_param_names is not None:
if param_name in ignore_param_names:
continue
param.requires_grad = False
def no_grad_forward(model, *args, ignore_param_names=None, **kwargs):
grad_disable(model, ignore_param_names=ignore_param_names)
out = model(*args, **kwargs)
grad_enable(model, ignore_param_names=ignore_param_names)
return out
def no_grad_wrapper(old_func, ignore_param_names=None):
def new_func(model, *args, **kwargs):
grad_disable(model, ignore_param_names=ignore_param_names)
out = old_func(model, *args, **kwargs)
grad_enable(model, ignore_param_names=ignore_param_names)
return out
return new_func
def get_label_info(labels, return_type='dict'):
B, C = labels.shape
indexes_total = torch.arange(B)[:, None].repeat(1, C).to(labels.device)
classes_total = torch.arange(C)[None, :].repeat(B, 1).to(labels.device)
seen_mask_total = labels > 0
unseen_mask_total = ~seen_mask_total
seen_indexes = indexes_total[seen_mask_total]
seen_classes = classes_total[seen_mask_total]
unseen_indexes = indexes_total[unseen_mask_total]
unseen_classes = classes_total[unseen_mask_total]
num_seens = seen_mask_total.sum().item()
range_seens = range(num_seens)
seen_labels = labels.new_zeros([num_seens, C])
seen_labels[range_seens, seen_classes] = 1
re_seen_labels = labels.clone()[seen_indexes]
re_seen_labels[range_seens, seen_classes] = 0
indexes_total = indexes_total.reshape(-1)
classes_total = classes_total.reshape(-1)
labels_total = labels.new_zeros([B * C, C])
labels_total[range(B * C), classes_total] = labels.reshape(-1)
re_labels_total = labels[indexes_total].clone()
re_labels_total[range(B * C), classes_total] = 0
if return_type == 'list':
return (seen_indexes, seen_classes, seen_mask_total,
unseen_indexes, unseen_classes, unseen_mask_total,
seen_labels, re_seen_labels,
indexes_total, classes_total, labels_total, re_labels_total)
elif return_type == 'dict':
return dict(
seen_indexes=seen_indexes, seen_classes=seen_classes, seen_mask_total=seen_mask_total,
unseen_indexes=unseen_indexes, unseen_classes=unseen_classes, unseen_mask_total=unseen_mask_total,
seen_labels=seen_labels, re_seen_labels=re_seen_labels,
indexes_total=indexes_total, classes_total=classes_total,
labels_total=labels_total, re_labels_total=re_labels_total
)
else:
raise ValueError('unsupported return_type: {}'.format(return_type))
def create_mask(regions, image_size):
num_regions = regions.shape[0]
H, W = image_size
target_masks = torch.zeros([num_regions, H, W], dtype=torch.float, device=regions.device)
for i in range(num_regions):
region = regions[i]
target_masks[i, region[1]:region[3], region[0]:region[2]] = 1.
return target_masks | 3,424 | 39.77381 | 110 | py |
SemFormer | SemFormer-main/core/networks_legacy.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
import torch.utils.model_zoo as model_zoo
from .arch_resnet import resnet, resnet38
from .arch_resnest import resnest
from .arch_vgg import vgg
from .deeplab_utils import ASPP, Decoder
from .aff_utils import PathIndex
from tools.ai.torch_utils import resize_for_tensors
from .module import FixedBatchNorm, Interpolate
from .abc_modules import ABC_Model
from .module import *
from .functional import *
from .models import *
from .utils import *
#######################################################################
class Backbone(nn.Module, ABC_Model):
def __init__(self, model_name, num_classes=20, mode='fix', segmentation=False):
super().__init__()
self.mode = mode
if self.mode == 'fix':
self.norm_fn = FixedBatchNorm
else:
self.norm_fn = nn.BatchNorm2d
if 'resnet' in model_name:
if '38' in model_name:
model = resnet38.ResNet38()
state_dict = resnet38.convert_mxnet_to_torch()
model.load_state_dict(state_dict)
else:
model = resnet.ResNet(resnet.Bottleneck, resnet.layers_dic[model_name], strides=(2, 2, 2, 1), batch_norm_fn=self.norm_fn)
state_dict = model_zoo.load_url(resnet.urls_dic[model_name])
state_dict.pop('fc.weight')
state_dict.pop('fc.bias')
model.load_state_dict(state_dict)
else:
if segmentation:
dilation, dilated = 4, True
else:
dilation, dilated = 2, False
model = eval("resnest." + model_name)(pretrained=True, dilated=dilated, dilation=dilation, norm_layer=self.norm_fn)
del model.avgpool
del model.fc
if 'resnet38' in model_name:
self.model = model
else:
self.stage1 = nn.Sequential(model.conv1,
model.bn1,
model.relu,
model.maxpool) # stride = 4
self.stage2 = nn.Sequential(model.layer1) # stride = 4
self.stage3 = nn.Sequential(model.layer2) # stride = 8
self.stage4 = nn.Sequential(model.layer3) # stride = 16
self.stage5 = nn.Sequential(model.layer4) # stride = 16
class Classifier(Backbone):
def __init__(self, model_name, num_classes=20, mode='fix'):
super().__init__(model_name, num_classes, mode)
self.model_name = model_name
if model_name == 'resnet38':
self.classifier = nn.Sequential(
nn.Dropout2d(0.5),
nn.Conv2d(4096, num_classes, 1, bias=False)
)
else:
self.classifier = nn.Conv2d(2048, num_classes, 1, bias=False)
self.num_classes = num_classes
self.initialize([self.classifier])
def forward(self, x, with_cam=False):
if '38' in self.model_name:
x = self.model(x)
else:
x = self.stage1(x)
x = self.stage2(x)
x = self.stage3(x)
x = self.stage4(x)
x = self.stage5(x)
if with_cam:
features = self.classifier(x)
logits = self.global_average_pooling_2d(features)
return logits, features
else:
x = self.global_average_pooling_2d(x, keepdims=True)
logits = self.classifier(x).view(-1, self.num_classes)
return logits
class DeepLabv3_Plus(Backbone):
def __init__(self, model_name, num_classes=21, mode='fix', use_group_norm=False, dropout_ratios=(0.5, 0.1)):
super().__init__(model_name, num_classes, mode, segmentation=False)
self.model_name = model_name
if use_group_norm:
norm_fn_for_extra_modules = group_norm
else:
norm_fn_for_extra_modules = self.norm_fn
if '38' in model_name:
inplanes = 4096
self.aspp = ASPP(output_stride=8, norm_fn=norm_fn_for_extra_modules, inplanes=inplanes)
self.decoder = Decoder(num_classes, 256, norm_fn_for_extra_modules, dropout_ratios=dropout_ratios)
else:
inplanes = 2048
self.aspp = ASPP(output_stride=16, norm_fn=norm_fn_for_extra_modules, inplanes=inplanes)
self.decoder = Decoder(num_classes, 256, norm_fn_for_extra_modules, dropout_ratios=dropout_ratios)
def forward(self, x, with_cam=False):
inputs = x
x1 = self.stage1(x)
x2 = self.stage2(x1)
x_low_level = x1 if '38' in self.model_name else x2
x = self.stage3(x2)
x = self.stage4(x)
x = self.stage5(x)
x = self.aspp(x)
x = self.decoder(x, x_low_level)
x = resize_for_tensors(x, inputs.size()[2:], align_corners=True)
return x | 5,016 | 33.6 | 137 | py |
SemFormer | SemFormer-main/core/networks.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
import torch.utils.model_zoo as model_zoo
from .arch_resnet import resnet, resnet38
from .arch_resnest import resnest
from .arch_vgg import vgg
from .deeplab_utils import ASPP, Decoder
from .aff_utils import PathIndex
from tools.ai.torch_utils import resize_for_tensors
from .module import FixedBatchNorm, Interpolate
from .abc_modules import ABC_Model
from .module import *
from .functional import *
from .models import *
from .utils import *
from .networks_legacy import Backbone, Classifier, DeepLabv3_Plus
from .affinitynet import AffinityNet
| 651 | 24.076923 | 65 | py |
SemFormer | SemFormer-main/core/datasets.py | import os
import cv2
import glob
import torch
import copy
import torchvision.datasets as dset
import math
import imageio
import numpy as np
from PIL import Image
from core.aff_utils import *
from tools.ai.augment_utils import *
from tools.ai.torch_utils import one_hot_embedding
from tools.general.xml_utils import read_xml
from tools.general.json_utils import read_json
from tools.dataset.voc_utils import get_color_map_dic
class Iterator:
def __init__(self, loader):
self.loader = loader
self.init()
def init(self):
self.iterator = iter(self.loader)
def get(self):
try:
data = next(self.iterator)
except StopIteration:
self.init()
data = next(self.iterator)
return data
class VOC_Dataset(torch.utils.data.Dataset):
def __init__(self, root_dir, domain, with_id=False, with_tags=False, with_mask=False):
self.root_dir = root_dir
self.image_dir = self.root_dir + 'JPEGImages/'
self.xml_dir = self.root_dir + 'Annotations/'
self.mask_dir = self.root_dir + 'SegmentationClass/'
self.image_id_list = [image_id.strip() for image_id in open('./data/%s.txt'%domain).readlines()]
self.with_id = with_id
self.with_tags = with_tags
self.with_mask = with_mask
def __len__(self):
return len(self.image_id_list)
def get_image(self, image_id):
image = Image.open(self.image_dir + image_id + '.jpg').convert('RGB')
return image
def get_mask(self, image_id):
mask_path = self.mask_dir + image_id + '.png'
if os.path.isfile(mask_path):
mask = Image.open(mask_path)
else:
mask = None
return mask
def get_tags(self, image_id):
_, tags = read_xml(self.xml_dir + image_id + '.xml')
return tags
def __getitem__(self, index):
image_id = self.image_id_list[index]
data_list = [self.get_image(image_id)]
if self.with_id:
data_list.append(image_id)
if self.with_tags:
data_list.append(self.get_tags(image_id))
if self.with_mask:
data_list.append(self.get_mask(image_id))
return data_list
class VOC_Dataset_For_Classification(VOC_Dataset):
def __init__(self, root_dir, domain, transform=None):
super().__init__(root_dir, domain, with_tags=True)
self.transform = transform
data = read_json('./data/VOC_2012.json')
self.class_dic = data['class_dic']
self.classes = data['classes']
def __getitem__(self, index):
image, tags = super().__getitem__(index)
if self.transform is not None:
image = self.transform(image)
label = one_hot_embedding([self.class_dic[tag] for tag in tags], self.classes)
return image, label
class VOC_Dataset_For_Classification_DetachPadding(VOC_Dataset):
def __init__(self, root_dir, domain, transform=None, region_to_mask=False):
super().__init__(root_dir, domain, with_tags=True)
self.transform = transform
self.region_to_mask = region_to_mask
data = read_json('./data/VOC_2012.json')
self.class_dic = data['class_dic']
self.classes = data['classes']
def __getitem__(self, index):
image, tags = super().__getitem__(index)
crop_region = None
if self.transform is not None:
image, bbox_dict = self.transform(image)
dst_bbox = bbox_dict['dst_bbox']
if self.region_to_mask:
mask = np.zeros(image.shape[-2:], dtype=np.float32)
mask[dst_bbox['ymin']:dst_bbox['ymax'], dst_bbox['xmin']:dst_bbox['xmax']] = 1
crop_region = mask
else:
crop_region = np.array([dst_bbox['xmin'], dst_bbox['ymin'], dst_bbox['xmax'], dst_bbox['ymax']])
label = one_hot_embedding([self.class_dic[tag] for tag in tags], self.classes)
return image, label, crop_region
class VOC_Dataset_For_Classification_MultiView(VOC_Dataset):
def __init__(self, root_dir, domain, transform=None, num_views=1):
super().__init__(root_dir, domain, with_tags=True)
self.transform = transform
self.num_views = num_views
data = read_json('./data/VOC_2012.json')
self.class_dic = data['class_dic']
self.classes = data['classes']
def __getitem__(self, index):
image, tags = super().__getitem__(index)
if self.transform is not None:
images = [self.transform(image) for i in range(self.num_views)]
else:
images = [copy.deepcopy(image) for i in range(self.num_views)]
label = one_hot_embedding([self.class_dic[tag] for tag in tags], self.classes)
return images + [label]
class VOC_Dataset_For_Segmentation(VOC_Dataset):
def __init__(self, root_dir, domain, transform=None):
super().__init__(root_dir, domain, with_mask=True)
self.transform = transform
cmap_dic, _, class_names = get_color_map_dic()
self.colors = np.asarray([cmap_dic[class_name] for class_name in class_names])
def __getitem__(self, index):
image, mask = super().__getitem__(index)
if self.transform is not None:
input_dic = {'image':image, 'mask':mask}
output_dic = self.transform(input_dic)
image = output_dic['image']
mask = output_dic['mask']
return image, mask
class VOC_Dataset_For_Evaluation(VOC_Dataset):
def __init__(self, root_dir, domain, transform=None):
super().__init__(root_dir, domain, with_id=True, with_mask=True)
self.transform = transform
cmap_dic, _, class_names = get_color_map_dic()
self.colors = np.asarray([cmap_dic[class_name] for class_name in class_names])
def __getitem__(self, index):
image, image_id, mask = super().__getitem__(index)
if self.transform is not None:
input_dic = {'image':image, 'mask':mask}
output_dic = self.transform(input_dic)
image = output_dic['image']
mask = output_dic['mask']
return image, image_id, mask
class VOC_Dataset_For_WSSS(VOC_Dataset):
def __init__(self, root_dir, domain, pred_dir, transform=None):
super().__init__(root_dir, domain, with_id=True)
self.pred_dir = pred_dir
self.transform = transform
cmap_dic, _, class_names = get_color_map_dic()
self.colors = np.asarray([cmap_dic[class_name] for class_name in class_names])
def __getitem__(self, index):
image, image_id = super().__getitem__(index)
mask = Image.open(self.pred_dir + image_id + '.png')
if self.transform is not None:
input_dic = {'image':image, 'mask':mask}
output_dic = self.transform(input_dic)
image = output_dic['image']
mask = output_dic['mask']
return image, mask
class VOC_Dataset_For_Testing_CAM(VOC_Dataset):
def __init__(self, root_dir, domain, transform=None):
super().__init__(root_dir, domain, with_tags=True, with_mask=True)
self.transform = transform
cmap_dic, _, class_names = get_color_map_dic()
self.colors = np.asarray([cmap_dic[class_name] for class_name in class_names])
data = read_json('./data/VOC_2012.json')
self.class_dic = data['class_dic']
self.classes = data['classes']
def __getitem__(self, index):
image, tags, mask = super().__getitem__(index)
if self.transform is not None:
input_dic = {'image':image, 'mask':mask}
output_dic = self.transform(input_dic)
image = output_dic['image']
mask = output_dic['mask']
label = one_hot_embedding([self.class_dic[tag] for tag in tags], self.classes)
return image, label, mask
class VOC_Dataset_For_Testing_CAM_MultiView(VOC_Dataset):
def __init__(self, root_dir, domain, transform=None, num_views=1):
super().__init__(root_dir, domain, with_tags=True, with_mask=True)
self.transform = transform
self.num_views = num_views
cmap_dic, _, class_names = get_color_map_dic()
self.colors = np.asarray([cmap_dic[class_name] for class_name in class_names])
data = read_json('./data/VOC_2012.json')
self.class_dic = data['class_dic']
self.classes = data['classes']
def __getitem__(self, index):
image, tags, mask = super().__getitem__(index)
if self.transform is not None:
input_dic = {'image':image, 'mask':mask}
images = []
masks = []
for i in range(self.num_views):
output_dic = self.transform(copy.deepcopy(input_dic))
image = output_dic['image']
mask = output_dic['mask']
images.append(image)
masks.append(mask)
else:
images = [copy.deepcopy(image) for i in range(self.num_views)]
masks = [copy.deepcopy(mask) for i in range(self.num_views)]
label = one_hot_embedding([self.class_dic[tag] for tag in tags], self.classes)
return images + [label] + masks
class VOC_Dataset_For_Making_CAM(VOC_Dataset):
def __init__(self, root_dir, domain):
super().__init__(root_dir, domain, with_id=True, with_tags=True, with_mask=True)
cmap_dic, _, class_names = get_color_map_dic()
self.colors = np.asarray([cmap_dic[class_name] for class_name in class_names])
data = read_json('./data/VOC_2012.json')
self.class_names = np.asarray(class_names[1:21])
self.class_dic = data['class_dic']
self.classes = data['classes']
def __getitem__(self, index):
image, image_id, tags, mask = super().__getitem__(index)
label = one_hot_embedding([self.class_dic[tag] for tag in tags], self.classes)
return image, image_id, label, mask
class VOC_Dataset_For_Affinity(VOC_Dataset):
def __init__(self, root_dir, domain, path_index, label_dir, transform=None):
super().__init__(root_dir, domain, with_id=True)
data = read_json('./data/VOC_2012.json')
self.class_dic = data['class_dic']
self.classes = data['classes']
self.transform = transform
self.label_dir = label_dir
self.path_index = path_index
self.extract_aff_lab_func = GetAffinityLabelFromIndices(self.path_index.src_indices, self.path_index.dst_indices)
def __getitem__(self, idx):
image, image_id = super().__getitem__(idx)
label = imageio.imread(self.label_dir + image_id + '.png')
label = Image.fromarray(label)
output_dic = self.transform({'image':image, 'mask':label})
image, label = output_dic['image'], output_dic['mask']
return image, self.extract_aff_lab_func(label)
class VOC_Dataset_For_Affinity_MS(VOC_Dataset):
def __init__(self, root_dir, domain, path_indexes, label_dir, scales=[0.5, 0.75], transform=None):
super().__init__(root_dir, domain, with_id=True)
data = read_json('./data/VOC_2012.json')
self.class_dic = data['class_dic']
self.classes = data['classes']
self.transform = transform
self.label_dir = label_dir
self.path_indexes = path_indexes
self.scales = scales
self.extract_aff_lab_funcs = [GetAffinityLabelFromIndices(path_index.src_indices, path_index.dst_indices)
for path_index in self.path_indexes]
def __getitem__(self, idx):
image, image_id = super().__getitem__(idx)
label = imageio.imread(self.label_dir + image_id + '.png')
label = Image.fromarray(label)
output_dic = self.transform({'image':image, 'mask':label})
image, label = output_dic['image'], output_dic['mask']
labs = []
for f in self.extract_aff_lab_funcs:
labs += f(label)
return image, labs | 12,191 | 32.772853 | 121 | py |
SemFormer | SemFormer-main/core/deeplab_utils.py | # Copyright (C) 2021 * Ltd. All rights reserved.
# author : Sanghyeon Jo <josanghyeokn@gmail.com>
import torch
import torch.nn as nn
import torch.nn.functional as F
class ASPPModule(nn.Module):
def __init__(self, inplanes, planes, kernel_size, padding, dilation, norm_fn=None):
super().__init__()
self.atrous_conv = nn.Conv2d(inplanes, planes, kernel_size=kernel_size, stride=1, padding=padding, dilation=dilation, bias=False)
self.bn = norm_fn(planes)
self.relu = nn.ReLU(inplace=True)
self.initialize([self.atrous_conv, self.bn])
def forward(self, x):
x = self.atrous_conv(x)
x = self.bn(x)
return self.relu(x)
def initialize(self, modules):
for m in modules:
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
class ASPP(nn.Module):
def __init__(self, output_stride, norm_fn, inplanes=2048):
super().__init__()
# inplanes = 2048
if output_stride == 16:
dilations = [1, 6, 12, 18]
elif output_stride == 8:
dilations = [1, 12, 24, 36]
self.aspp1 = ASPPModule(inplanes, 256, 1, padding=0, dilation=dilations[0], norm_fn=norm_fn)
self.aspp2 = ASPPModule(inplanes, 256, 3, padding=dilations[1], dilation=dilations[1], norm_fn=norm_fn)
self.aspp3 = ASPPModule(inplanes, 256, 3, padding=dilations[2], dilation=dilations[2], norm_fn=norm_fn)
self.aspp4 = ASPPModule(inplanes, 256, 3, padding=dilations[3], dilation=dilations[3], norm_fn=norm_fn)
self.global_avg_pool = nn.Sequential(
nn.AdaptiveAvgPool2d((1, 1)),
nn.Conv2d(inplanes, 256, 1, stride=1, bias=False),
norm_fn(256),
nn.ReLU(inplace=True),
)
self.conv1 = nn.Conv2d(1280, 256, 1, bias=False)
self.bn1 = norm_fn(256)
self.relu = nn.ReLU(inplace=True)
self.dropout = nn.Dropout(0.5)
self.initialize([self.conv1, self.bn1] + list(self.global_avg_pool.modules()))
def forward(self, x):
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = self.global_avg_pool(x)
x5 = F.interpolate(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), dim=1)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.dropout(x)
return x
def initialize(self, modules):
for m in modules:
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
class Decoder(nn.Module):
def __init__(self, num_classes, low_level_inplanes, norm_fn, dropout_ratios=(0.5, 0.1)):
super().__init__()
assert isinstance(dropout_ratios, (list, tuple)) and len(dropout_ratios) == 2
self.dropout_ratios = dropout_ratios
self.conv1 = nn.Conv2d(low_level_inplanes, 48, 1, bias=False)
self.bn1 = norm_fn(48)
self.relu = nn.ReLU(inplace=True)
self.classifier = nn.Sequential(
nn.Conv2d(304, 256, kernel_size=3, stride=1, padding=1, bias=False),
norm_fn(256),
nn.ReLU(inplace=True),
nn.Dropout(dropout_ratios[0]),
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
norm_fn(256),
nn.ReLU(inplace=True),
nn.Dropout(dropout_ratios[1]),
nn.Conv2d(256, num_classes, kernel_size=1, stride=1)
)
self.initialize([self.conv1, self.bn1] + list(self.classifier.modules()))
def forward(self, x, x_low_level):
x_low_level = self.conv1(x_low_level)
x_low_level = self.bn1(x_low_level)
x_low_level = self.relu(x_low_level)
x = F.interpolate(x, size=x_low_level.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x, x_low_level), dim=1)
x = self.classifier(x)
return x
def initialize(self, modules):
for m in modules:
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_() | 4,572 | 34.449612 | 137 | py |
SemFormer | SemFormer-main/core/affinitynet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
import torch.utils.model_zoo as model_zoo
from .arch_resnet import resnet, resnet38
from .arch_resnest import resnest
from .arch_vgg import vgg
from .models.transformer_backbone import ViTBackbone
from . import functional as _F
from .deeplab_utils import ASPP, Decoder
from .aff_utils import PathIndex
from tools.ai.torch_utils import resize_for_tensors
from .module import FixedBatchNorm, Interpolate
from .abc_modules import ABC_Model
from .module import *
from .functional import *
from .models import *
from .utils import *
from .networks_legacy import Backbone
def _make_fc_edge_layer(in_channels, out_channels, num_groups=4, scale_factor=None):
layers = [
nn.Conv2d(in_channels, out_channels, 1, bias=False),
nn.GroupNorm(num_groups, out_channels)
]
if (scale_factor is not None) and (scale_factor != 1):
layers += [Interpolate(scale_factor=scale_factor, mode='bilinear', align_corners=True)]
layers += [nn.ReLU(inplace=True)]
return nn.Sequential(*layers)
class AffinityNet(Backbone):
def __init__(self, model_name, path_index=None):
super().__init__(model_name, None, 'fix')
self.model_name = model_name
if '38' in model_name:
self.fc_edge_features_list = [128, 256, 512, 1024, 4096]
self.strides = [2, 4, 8, 8, 8]
else:
self.fc_edge_features_list = [64, 256, 512, 1024, 2048]
self.strides = [4, 4, 8, 16, 16]
for i in range(5):
self.add_module(
'fc_edge{}'.format(i + 1),
_make_fc_edge_layer(self.fc_edge_features_list[i], 32, scale_factor=self.strides[i] / 4))
self.fc_edge6 = nn.Conv2d(32 * 5, 1, 1, bias=True)
if '38' in model_name:
self.backbone = self.model
else:
self.backbone = nn.ModuleList([self.stage1, self.stage2, self.stage3, self.stage4, self.stage5])
if '38' in model_name:
self.edge_layers = nn.ModuleList([self.fc_edge2, self.fc_edge3, self.fc_edge4, self.fc_edge5, self.fc_edge6])
else:
self.edge_layers = nn.ModuleList([self.fc_edge1, self.fc_edge2, self.fc_edge3, self.fc_edge4, self.fc_edge5, self.fc_edge6])
if path_index is not None:
self.path_index = path_index
self.n_path_lengths = len(self.path_index.path_indices)
for i, pi in enumerate(self.path_index.path_indices):
self.register_buffer("path_indices_" + str(i), torch.from_numpy(pi))
def train(self, mode=True):
super().train(mode)
self.backbone.eval()
def forward(self, x, with_affinity=False):
if '38' in self.model_name:
x1, x2, x3, x4, x5 = self.model(x, return_stages=True, detach_between_stages=True)
edge1 = self.fc_edge1(x1)
edge2 = self.fc_edge2(x2)
edge1 = edge1[..., :edge2.size(2), :edge2.size(3)]
edge3 = self.fc_edge3(x3)[..., :edge2.size(2), :edge2.size(3)]
edge4 = self.fc_edge4(x4)[..., :edge2.size(2), :edge2.size(3)]
edge5 = self.fc_edge5(x5)[..., :edge2.size(2), :edge2.size(3)]
edge = self.fc_edge6(torch.cat([edge1, edge2, edge3, edge4, edge5], dim=1))
else:
x1 = self.stage1(x).detach()
x2 = self.stage2(x1).detach()
x3 = self.stage3(x2).detach()
x4 = self.stage4(x3).detach()
x5 = self.stage5(x4).detach()
edge1 = self.fc_edge1(x1)
edge2 = self.fc_edge2(x2)
edge3 = self.fc_edge3(x3)[..., :edge2.size(2), :edge2.size(3)]
edge4 = self.fc_edge4(x4)[..., :edge2.size(2), :edge2.size(3)]
edge5 = self.fc_edge5(x5)[..., :edge2.size(2), :edge2.size(3)]
edge = self.fc_edge6(torch.cat([edge1, edge2, edge3, edge4, edge5], dim=1))
if with_affinity:
return self.to_affinity(torch.sigmoid(edge))
else:
return edge
def get_edge(self, x, image_size=(512, 512), stride=4):
feat_size = (x.size(2)-1)//stride+1, (x.size(3)-1)//stride+1
x = F.pad(x, [0, image_size[1]-x.size(3), 0, image_size[0]-x.size(2)])
edge_out = self.forward(x)
edge_out = edge_out[..., :feat_size[0], :feat_size[1]]
edge_out = torch.sigmoid(edge_out[0]/2 + edge_out[1].flip(-1)/2)
return edge_out
def to_affinity(self, edge):
aff_list = []
edge = edge.view(edge.size(0), -1)
for i in range(self.n_path_lengths):
ind = self._buffers["path_indices_" + str(i)]
ind_flat = ind.view(-1)
dist = torch.index_select(edge, dim=-1, index=ind_flat)
dist = dist.view(dist.size(0), ind.size(0), ind.size(1), ind.size(2))
aff = torch.squeeze(1 - F.max_pool2d(dist, (dist.size(2), 1)), dim=2)
aff_list.append(aff)
aff_cat = torch.cat(aff_list, dim=1)
return aff_cat | 5,071 | 37.424242 | 136 | py |
SemFormer | SemFormer-main/core/abc_modules.py |
import math
import torch
import torch.nn as nn
from abc import ABC
class BaseModule(nn.Module):
def forward(self, *x, stage='forward_x', **kwargs):
if isinstance(stage, (list, tuple)):
output = x
for s in stage:
func = getattr(self, stage)
output = func(*tuple(output), **kwargs)
return output
else:
func = getattr(self, stage)
return func(*x, **kwargs)
class ABC_Model(ABC):
@property
def pretrained_modules(self):
return self._pretrained_modules
@property
def scratched_modules(self):
return self._scratched_modules
def global_average_pooling_2d(self, x, keepdims=False):
x = torch.mean(x.view(x.size(0), x.size(1), -1), -1)
if keepdims:
x = x.view(x.size(0), x.size(1), 1, 1)
return x
def initialize(self, modules):
for m in modules:
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def get_parameter_groups(self, print_fn=print, modules=None):
def get_param_groups(module):
groups = [[], [], [], []]
for name, value in module.named_parameters():
# pretrained weights
if ('model' in name) or ('stage' in name) or ('backbone' in name):
if 'weight' in name:
groups[0].append(value)
else:
groups[1].append(value)
# scracthed weights
else:
if 'weight' in name:
if print_fn is not None:
print_fn(f'scratched weights : {name}')
groups[2].append(value)
else:
if print_fn is not None:
print_fn(f'scratched bias : {name}')
groups[3].append(value)
return groups
if modules is None:
return get_param_groups(self)
else:
groups = [[], [], [], []]
for module in modules:
gs = get_param_groups(module)
for i in range(4):
groups[i] = groups[i] + gs[i]
return groups | 2,492 | 29.777778 | 82 | py |
SemFormer | SemFormer-main/core/sync_batchnorm/replicate.py | # -*- coding: utf-8 -*-
# File : replicate.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import functools
from torch.nn.parallel.data_parallel import DataParallel
__all__ = [
'CallbackContext',
'execute_replication_callbacks',
'DataParallelWithCallback',
'patch_replication_callback'
]
class CallbackContext(object):
pass
def execute_replication_callbacks(modules):
"""
Execute an replication callback `__data_parallel_replicate__` on each module created by original replication.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Note that, as all modules are isomorphism, we assign each sub-module with a context
(shared among multiple copies of this module on different devices).
Through this context, different copies can share some information.
We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback
of any slave copies.
"""
master_copy = modules[0]
nr_modules = len(list(master_copy.modules()))
ctxs = [CallbackContext() for _ in range(nr_modules)]
for i, module in enumerate(modules):
for j, m in enumerate(module.modules()):
if hasattr(m, '__data_parallel_replicate__'):
m.__data_parallel_replicate__(ctxs[j], i)
class DataParallelWithCallback(DataParallel):
"""
Data Parallel with a replication callback.
An replication callback `__data_parallel_replicate__` of each module will be invoked after being created by
original `replicate` function.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
# sync_bn.__data_parallel_replicate__ will be invoked.
"""
def replicate(self, module, device_ids):
modules = super(DataParallelWithCallback, self).replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
def patch_replication_callback(data_parallel):
"""
Monkey-patch an existing `DataParallel` object. Add the replication callback.
Useful when you have customized `DataParallel` implementation.
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallel(sync_bn, device_ids=[0, 1])
> patch_replication_callback(sync_bn)
# this is equivalent to
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
"""
assert isinstance(data_parallel, DataParallel)
old_replicate = data_parallel.replicate
@functools.wraps(old_replicate)
def new_replicate(module, device_ids):
modules = old_replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
data_parallel.replicate = new_replicate | 3,218 | 35.579545 | 115 | py |
SemFormer | SemFormer-main/core/sync_batchnorm/unittest.py | # -*- coding: utf-8 -*-
# File : unittest.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import unittest
import numpy as np
from torch.autograd import Variable
def as_numpy(v):
if isinstance(v, Variable):
v = v.data
return v.cpu().numpy()
class TorchTestCase(unittest.TestCase):
def assertTensorClose(self, a, b, atol=1e-3, rtol=1e-3):
npa, npb = as_numpy(a), as_numpy(b)
self.assertTrue(
np.allclose(npa, npb, atol=atol),
'Tensor close check failed\n{}\n{}\nadiff={}, rdiff={}'.format(a, b, np.abs(npa - npb).max(), np.abs((npa - npb) / np.fmax(npa, 1e-5)).max())
)
| 834 | 26.833333 | 157 | py |
SemFormer | SemFormer-main/core/sync_batchnorm/batchnorm.py | # -*- coding: utf-8 -*-
# File : batchnorm.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import collections
import torch
import torch.nn.functional as F
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn.parallel._functions import ReduceAddCoalesced, Broadcast
from .comm import SyncMaster
__all__ = ['SynchronizedBatchNorm1d', 'SynchronizedBatchNorm2d', 'SynchronizedBatchNorm3d']
def _sum_ft(tensor):
"""sum over the first and last dimention"""
return tensor.sum(dim=0).sum(dim=-1)
def _unsqueeze_ft(tensor):
"""add new dementions at the front and the tail"""
return tensor.unsqueeze(0).unsqueeze(-1)
_ChildMessage = collections.namedtuple('_ChildMessage', ['sum', 'ssum', 'sum_size'])
_MasterMessage = collections.namedtuple('_MasterMessage', ['sum', 'inv_std'])
class _SynchronizedBatchNorm(_BatchNorm):
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True):
super(_SynchronizedBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine)
self._sync_master = SyncMaster(self._data_parallel_master)
self._is_parallel = False
self._parallel_id = None
self._slave_pipe = None
def forward(self, input):
# If it is not parallel computation or is in evaluation mode, use PyTorch's implementation.
if not (self._is_parallel and self.training):
return F.batch_norm(
input, self.running_mean, self.running_var, self.weight, self.bias,
self.training, self.momentum, self.eps)
# Resize the input to (B, C, -1).
input_shape = input.size()
input = input.view(input.size(0), self.num_features, -1)
# Compute the sum and square-sum.
sum_size = input.size(0) * input.size(2)
input_sum = _sum_ft(input)
input_ssum = _sum_ft(input ** 2)
# Reduce-and-broadcast the statistics.
if self._parallel_id == 0:
mean, inv_std = self._sync_master.run_master(_ChildMessage(input_sum, input_ssum, sum_size))
else:
mean, inv_std = self._slave_pipe.run_slave(_ChildMessage(input_sum, input_ssum, sum_size))
# Compute the output.
if self.affine:
# MJY:: Fuse the multiplication for speed.
output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std * self.weight) + _unsqueeze_ft(self.bias)
else:
output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std)
# Reshape it.
return output.view(input_shape)
def __data_parallel_replicate__(self, ctx, copy_id):
self._is_parallel = True
self._parallel_id = copy_id
# parallel_id == 0 means master device.
if self._parallel_id == 0:
ctx.sync_master = self._sync_master
else:
self._slave_pipe = ctx.sync_master.register_slave(copy_id)
def _data_parallel_master(self, intermediates):
"""Reduce the sum and square-sum, compute the statistics, and broadcast it."""
# Always using same "device order" makes the ReduceAdd operation faster.
# Thanks to:: Tete Xiao (http://tetexiao.com/)
intermediates = sorted(intermediates, key=lambda i: i[1].sum.get_device())
to_reduce = [i[1][:2] for i in intermediates]
to_reduce = [j for i in to_reduce for j in i] # flatten
target_gpus = [i[1].sum.get_device() for i in intermediates]
sum_size = sum([i[1].sum_size for i in intermediates])
sum_, ssum = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce)
mean, inv_std = self._compute_mean_std(sum_, ssum, sum_size)
broadcasted = Broadcast.apply(target_gpus, mean, inv_std)
outputs = []
for i, rec in enumerate(intermediates):
outputs.append((rec[0], _MasterMessage(*broadcasted[i * 2:i * 2 + 2])))
return outputs
def _compute_mean_std(self, sum_, ssum, size):
"""Compute the mean and standard-deviation with sum and square-sum. This method
also maintains the moving average on the master device."""
assert size > 1, 'BatchNorm computes unbiased standard-deviation, which requires size > 1.'
mean = sum_ / size
sumvar = ssum - sum_ * mean
unbias_var = sumvar / (size - 1)
bias_var = sumvar / size
self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data
self.running_var = (1 - self.momentum) * self.running_var + self.momentum * unbias_var.data
return mean, bias_var.clamp(self.eps) ** -0.5
class SynchronizedBatchNorm1d(_SynchronizedBatchNorm):
r"""Applies Synchronized Batch Normalization over a 2d or 3d input that is seen as a
mini-batch.
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm1d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, L)` slices, it's common terminology to call this Temporal BatchNorm
Args:
num_features: num_features from an expected input of size
`batch_size x num_features [x width]`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape:
- Input: :math:`(N, C)` or :math:`(N, C, L)`
- Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm1d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm1d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 2 and input.dim() != 3:
raise ValueError('expected 2D or 3D input (got {}D input)'
.format(input.dim()))
super(SynchronizedBatchNorm1d, self)._check_input_dim(input)
class SynchronizedBatchNorm2d(_SynchronizedBatchNorm):
r"""Applies Batch Normalization over a 4d input that is seen as a mini-batch
of 3d inputs
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm2d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, H, W)` slices, it's common terminology to call this Spatial BatchNorm
Args:
num_features: num_features from an expected input of
size batch_size x num_features x height x width
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape:
- Input: :math:`(N, C, H, W)`
- Output: :math:`(N, C, H, W)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm2d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm2d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(input.dim()))
super(SynchronizedBatchNorm2d, self)._check_input_dim(input)
class SynchronizedBatchNorm3d(_SynchronizedBatchNorm):
r"""Applies Batch Normalization over a 5d input that is seen as a mini-batch
of 4d inputs
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm3d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, D, H, W)` slices, it's common terminology to call this Volumetric BatchNorm
or Spatio-temporal BatchNorm
Args:
num_features: num_features from an expected input of
size batch_size x num_features x depth x height x width
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape:
- Input: :math:`(N, C, D, H, W)`
- Output: :math:`(N, C, D, H, W)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm3d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm3d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45, 10))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 5:
raise ValueError('expected 5D input (got {}D input)'
.format(input.dim()))
super(SynchronizedBatchNorm3d, self)._check_input_dim(input) | 12,932 | 44.861702 | 116 | py |
SemFormer | SemFormer-main/core/models/caae.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.layers import Mlp
from ..module import SeparateLinear
from .modules import Token2Embed, Embed2Token
from .transformer_backbone import ViTBackbone
from ..functional import cosine_similarity
from ..arch_transformer.vit import VIT_NET_CFG
from ..arch_transformer.vit import Block as ViTBlock
from ..abc_modules import BaseModule, ABC_Model
class ViTEncoder(BaseModule):
def __init__(self, model_name, with_last_norm=True, **kwargs):
super().__init__()
self.vit_backbone = ViTBackbone(model_name, with_last_norm=with_last_norm, **kwargs)
def forward_x_with_outer_token(self, x, outer_token, outer_posembed):
x = self.vit_backbone.forward_features_with_outer_token(x, outer_token, outer_posembed)
return x
def forward_x(self, x):
x = self.vit_backbone.forward_features(x)
return x
class ViTDecoder(BaseModule):
def __init__(self, width, depth, num_heads, patch_size, out_dim=3, **kwargs):
super().__init__()
self.width = width
self.depth = depth
self.num_heads = num_heads
self.patch_size = patch_size
self.out_dim = out_dim
self.blocks = nn.ModuleList([
ViTBlock(dim=width, num_heads=num_heads, mlp_ratio=4., qkv_bias=True, drop=0.,
attn_drop=0., drop_path=0., norm_layer=nn.LayerNorm, act_layer=nn.GELU)
for i in range(depth)
])
self.norm = nn.LayerNorm(width)
self.mlp = nn.Linear(width, out_dim * (patch_size ** 2), bias=True)
def forward_x_without_outer_token(self, x, num_outer_tokens):
for block in self.blocks:
x = block(x)
x = x[:, num_outer_tokens:, :]
x = self.norm(x)
x = self.mlp(x)
return x
def forward_x(self, x):
for block in self.blocks:
x = block(x)
x = self.norm(x)
x = self.mlp(x)
return x
class ViTAutoEncoder(BaseModule, ABC_Model):
def __init__(self, decoder_width, decoder_depth, out_dim=3,
model_name=None, with_last_norm=True, **kwargs):
super().__init__()
self.decoder_width = decoder_width
self.decoder_depth = decoder_depth
self.out_dim = out_dim
self.model_name = model_name
# avoid inconsistency
version = kwargs.pop('version', 'base')
patch_size = kwargs.pop('patch_size', 16)
kwargs['version'] = version
kwargs['patch_size'] = patch_size
# for convenience
self.patch_size = patch_size
num_heads = VIT_NET_CFG[version]['num_heads']
self.num_heads = num_heads
self.encoder = ViTEncoder(model_name=model_name, with_last_norm=with_last_norm, **kwargs)
# convert to decoder width
self.mlp = nn.Linear(self.encoder.vit_backbone.model.embed_dim, decoder_width, bias=True)
self.decoder = ViTDecoder(
width=decoder_width, depth=decoder_depth,
num_heads=num_heads, out_dim=out_dim, **kwargs)
self.initialize([self.decoder, self.mlp])
def forward_x(self, x):
token = self.encoder(x)
token = self.mlp(token)
output = self.decoder(token)
# (B, 3 x P x P, L)
output = output.transpose(1, 2)
output = F.fold(
output,
output_size=x.shape[-2:], kernel_size=self.patch_size,
dilation=1, padding=0, stride=self.patch_size)
return token, output
class ClassAwareAutoEncoder(ViTAutoEncoder):
def __init__(self, *args, num_classes, class_dim, reduction='sum', **kwargs):
super().__init__(*args, **kwargs)
self.num_classes = num_classes
self.class_dim = class_dim
self.reduction = getattr(torch, reduction)
self.embed_dim = self.encoder.vit_backbone.model.embed_dim
self.convert = nn.Sequential(
nn.Linear(self.embed_dim, num_classes * self.class_dim, bias=False),
nn.ReLU(True)
)
self.re_convert = nn.Sequential(
nn.Linear(num_classes * self.class_dim, self.embed_dim, bias=False),
nn.LayerNorm(self.embed_dim)
)
self.crs = nn.Sequential(
nn.Embedding(num_classes, self.class_dim),
nn.ReLU(True)
)
self.initialize([self.convert, self.re_convert])
def get_crs(self, x):
index = torch.arange(self.num_classes)[None, :].repeat(x.shape[0], 1).to(x.device)
crs = self.crs(index)
return crs
def get_cre(self, x, grid_embed=False):
# (B, L, C)
token = self.encoder(x)
# (B, L, KE)
cls_embed = self.convert(token)
if grid_embed:
# (B, L, K, E)
cre = cls_embed.view(cls_embed.shape[0], cls_embed.shape[1], self.num_classes, -1)
else:
# (B, K, E)
cre = self.reduction(cls_embed, dim=1)
if isinstance(cre, (list, tuple)):
cre = cre[0]
cre = cre.reshape(
x.shape[0], self.num_classes, self.class_dim)
return cre
def forward_x(self, x, labels):
# (B, L, C)
token = self.encoder(x)
# (B, L, KE)
cls_embed = self.convert(token)
# (B, K, E)
cre = self.reduction(cls_embed, dim=1)
if isinstance(cre, (list, tuple)):
cre = cre[0]
cre = cre.reshape(
x.shape[0], self.num_classes, self.class_dim)
# (B, L, C)
re_token = self.re_convert(cls_embed)
re_token = self.mlp(re_token)
output = self.decoder(re_token)
# (B, 3 x P x P, L)
output = output.transpose(1, 2)
output = F.fold(
output,
output_size=x.shape[-2:], kernel_size=self.patch_size,
dilation=1, padding=0, stride=self.patch_size)
crs = self.get_crs(x)
mask = labels[:, :, None]
crs = (crs * mask) + crs.detach() * (1 - mask)
sim = cosine_similarity(cre, crs, is_aligned=True)
return sim, output | 6,160 | 30.433673 | 97 | py |
SemFormer | SemFormer-main/core/models/modules.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.layers import Mlp, DropPath
from ..arch_transformer.vit import Attention as SelfAttention
from ..arch_transformer.vit import Block as ViTBlock
class ResBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, dilation=1, stride=1, groups=1):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.dilation = dilation
self.stride = stride
self.groups = groups
self.pad1 = SamePad2d(kernel_size, stride, dilation)
self.conv1 = nn.Conv2d(
in_channels, out_channels, kernel_size, dilation=dilation, stride=stride,
padding=0, bias=False, groups=groups)
self.bn1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(
out_channels, out_channels, kernel_size, dilation=dilation, stride=1,
padding=0, bias=False, groups=groups)
self.bn2 = nn.BatchNorm2d(out_channels)
self.pad2 = SamePad2d(kernel_size, stride=1, dilation=dilation)
self.relu = nn.ReLU(True)
if (in_channels != out_channels) or (stride != 1):
self.identity_transform = nn.Sequential(
nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=stride, bias=False, groups=groups),
nn.BatchNorm2d(out_channels),
)
else:
self.identity_transform = None
def forward(self, x):
identity = x
out = self.pad1(x)
out = self.conv1(out)
out = self.bn1(out)
out = self.relu(out)
out = self.pad2(out)
out = self.conv2(out)
out = self.bn2(out)
if self.identity_transform is not None:
identity = self.identity_transform(x)
out += identity
out = self.relu(out)
return out
class ResBlockBottleNeck(nn.Module):
def __init__(self, in_channels, inter_channels, out_channels, kernel_size=3, dilation=1, stride=1, groups=1):
super().__init__()
self.in_channels = in_channels
self.inter_channels = inter_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.dilation = dilation
self.stride = stride
self.groups = groups
self.conv1 = nn.Conv2d(in_channels, inter_channels, 1, groups=groups)
self.bn1 = nn.BatchNorm2d(inter_channels)
self.pad = SamePad2d(kernel_size, stride=1, dilation=dilation)
self.conv2 = nn.Conv2d(
inter_channels, inter_channels, kernel_size, dilation=dilation, stride=1,
padding=0, bias=False, groups=groups)
self.bn2 = nn.BatchNorm2d(inter_channels)
self.conv3 = nn.Conv2d(inter_channels, out_channels, 1, groups=groups)
self.bn3 = nn.BatchNorm2d(out_channels)
self.pad2 = SamePad2d(kernel_size, stride=1, dilation=dilation)
self.relu = nn.ReLU(True)
if (in_channels != out_channels) or (stride != 1):
self.identity_transform = nn.Sequential(
nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=stride, bias=False, groups=groups),
nn.BatchNorm2d(out_channels),
)
else:
self.identity_transform = None
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.pad(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.identity_transform is not None:
identity = self.identity_transform(x)
out += identity
out = self.relu(out)
return out
class PixelDecoder(nn.Module):
def __init__(self, in_channels, out_channels, start_level=0, end_level=-1):
super().__init__()
assert isinstance(in_channels, (list, tuple))
self.num_levels = len(in_channels)
self.in_channels = in_channels
self.out_channels = out_channels
assert start_level < self.num_levels
self.start_level = start_level
if end_level < 0:
end_level += self.num_levels
assert self.start_level < end_level < self.num_levels
self.end_level = end_level
self.conv_reduce = nn.ModuleList()
for i in range(self.num_levels):
if self.start_level <= i <= self.end_level:
self.conv_reduce.append(nn.Sequential(
nn.Conv2d(in_channels[i], out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(True)
))
else:
self.conv_reduce.append(nn.Identity())
self.conv_merge = nn.Sequential(
nn.Conv2d(out_channels * (self.end_level - self.start_level + 1), out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(True)
)
self.out_feat = nn.Sequential(
nn.Linear(out_channels, out_channels, bias=False),
nn.LayerNorm(out_channels),
nn.GELU()
)
def forward(self, x):
assert isinstance(x, (list, tuple)) and len(x) == self.num_levels
reduce_x = [self.conv_reduce[i](x[i]) for i in range(self.num_levels)]
size = x[self.start_level].shape[-2:]
mlvl_feats = [x[self.start_level]]
for i in range(self.start_level + 1, self.end_level + 1):
mlvl_feats.append(F.interpolate(x[i], size=size, mode='bilinear', align_corners=True))
mlvl_feats = torch.cat(mlvl_feats, dim=1)
merge_feat = self.conv_merge(mlvl_feats)
merge_feat = merge_feat.view(x.shape[0], self.out_channels, -1).transpose(1, 2)
out = self.out_feat(merge_feat)
return out
class SemeanticDecoder(nn.Module):
def __init__(self, in_channels, out_channels, depth):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.depth = depth
self.conv_feat = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(True)
)
self.blocks = nn.ModuleList([
ViTBlock(dim=out_channels, num_heads=12, mlp_ratio=4., qkv_bias=True, drop=0.,
attn_drop=0., drop_path=0., norm_layer=nn.LayerNorm, act_layer=nn.GELU)
for i in range(depth)
])
self.norm = nn.LayerNorm(out_channels)
self.act = nn.GELU()
self.out_feat = nn.Sequential(
nn.Linear(out_channels, out_channels, bias=False),
nn.LayerNorm(out_channels),
nn.GELU()
)
def forward(self, x):
x = self.conv_feat(x)
x = x.view(x.shape[0], self.out_channels, -1).transpose(1, 2)
for block in self.blocks:
x = block(x)
x = self.norm(x)
x = self.act(x)
out = self.out_feat(x)
return out
# class PixelDecoder(nn.Module):
# def __init__(self, in_channels, out_channels, start_level=0, end_level=-1):
# super().__init__()
# assert isinstance(in_channels, (list, tuple))
# self.num_levels = len(in_channels)
# self.in_channels = in_channels
# self.out_channels = out_channels
# assert start_level < self.num_levels
# self.start_level = start_level
# if end_level < 0:
# end_level += self.num_levels
# assert self.start_level < end_level < self.num_levels
# self.end_level = end_level
# self.conv_reduce = nn.ModuleList()
# for i in range(self.num_levels):
# if self.start_level <= i <= self.end_level:
# self.conv_reduce.append(nn.Sequential(
# nn.Conv2d(in_channels[i], out_channels, 1, bias=False),
# nn.BatchNorm2d(out_channels),
# nn.ReLU(True)
# ))
# else:
# self.conv_reduce.append(nn.Identity())
# self.conv_merge = nn.Sequential(
# nn.Conv2d(out_channels * (self.end_level - self.start_level + 1), out_channels, 1, bias=False),
# nn.BatchNorm2d(out_channels),
# nn.ReLU(True)
# )
# self.out_feat = nn.Sequential(
# nn.Linear(out_channels, out_channels, bias=False),
# nn.LayerNorm(out_channels),
# nn.GELU()
# )
# def forward(self, x):
# assert isinstance(x, (list, tuple)) and len(x) == self.num_levels
# reduce_x = [self.conv_reduce[i](x[i]) for i in range(self.num_levels)]
# size = x[self.start_level].shape[-2:]
# mlvl_feats = [x[self.start_level]]
# for i in range(self.start_level + 1, self.end_level + 1):
# mlvl_feats.append(F.interpolate(x[i], size=size, mode='bilinear', align_corners=True))
# mlvl_feats = torch.cat(mlvl_feats, dim=1)
# merge_feat = self.conv_merge(mlvl_feats)
# merge_feat = merge_feat.view(x.shape[0], self.out_channels, -1).transpose(1, 2)
# out = self.out_feat(merge_feat)
# return out
# class KeyGenerator(nn.Module):
# def __init__(self, in_channels, out_channels=256, start_level=0, end_level=-1):
# super().__init__()
# assert isinstance(in_channels, (list, tuple))
# self.num_levels = len(in_channels)
# self.in_channels = in_channels
# self.out_channels = out_channels
# assert start_level < self.num_levels
# self.start_level = start_level
# if end_level < 0:
# end_level += self.num_levels
# assert self.start_level < end_level < self.num_levels
# self.end_level = end_level
# self.lateral_convs = nn.ModuleList()
# self.conv_outs = nn.ModuleList()
# for i in range(self.num_levels):
# if self.start_level <= i <= self.end_level:
# self.lateral_convs.append(
# nn.Sequential(
# nn.Conv2d(in_channels[i], out_channels, 1, bias=False),
# nn.GroupNorm(num_features=out_channels, num_groups=32)
# )
# self.conv_outs.append(
# nn.Sequential(
# nn.Conv2d(out_channels, out_channels, 3, padding=1, bias=False),
# nn.GroupNorm(num_features=out_channels, num_groups=32),
# nn.ReLU(True))
# )
# else:
# self.lateral_convs.append(nn.Identity())
# self.conv_outs.append(nn.Identity())
# self.key_embed = nn.Conv2d(out_channels, out_channels, 1)
# def forward(self, inputs):
# assert isinstance(inputs, (list, tuple)) and len(inputs) == self.num_levels
# outputs = [self.lateral_convs[i](inputs[i]) for i in range(self.num_levels)]
# for i in range(self.end_level, self.start_level - 1, -1):
# outputs[i - 1] += F.interpolate(
# outputs[i], size=outputs[i - 1].shape[-2:], mode='bilinear', align_corners=True)
# outputs[i - 1] = self.conv_outs[i - 1](outputs[i - 1])
# # outputs = [self.conv_outs[i](outputs[i]) for i in range(self.num_levels)]
# key_embed = self.key_embed(outputs[self.start_level])
# return key_embed
class CrossAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.kv = nn.Linear(dim, dim*2, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, query, key):
B, Nq, C = query.shape
Nk = key.shape[1]
# shape: (B, Nq, self.num_heads, C // self.num_heads) -> (B, self.num_heads, Nq, C // self.num_heads)
q = self.q(query).reshape(B, Nq, self.num_heads, C // self.num_heads).transpose(1, 2)
# shape: (B, Nk, 2, self.num_heads, C // self.num_heads) -> (2, B, self.num_heads, Nk, C // self.num_heads)
kv = self.kv(key).reshape(B, Nk, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
# shape: (B, self.num_heads, Nk, C // self.num_heads)
k, v = kv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
# shape: (B, self.num_heads, Nq, Nk)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
# shape: (B, self.num_heads, Nq, C // self.num_heads) -> (B, Nq, self.num_heads, C // self.num_heads)
# -> (B, Nq, C)
x = (attn @ v).transpose(1, 2).reshape(B, Nq, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class HybridAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):
super().__init__()
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.qkv_bias = qkv_bias
self.attn_drop = attn_drop
self.proj_drop = proj_drop
self.self_attn = SelfAttention(dim, num_heads, qkv_bias, attn_drop, proj_drop)
self.cross_attn = CrossAttention(dim, num_heads, qkv_bias, attn_drop, proj_drop)
def forward(self, query, key):
self_attn = self.self_attn(query)
cross_attn = self.cross_attn(query, key)
attn = self_attn + cross_attn
return attn
class HybridBlock(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1_q = norm_layer(dim)
self.norm1_k = norm_layer(dim)
self.attn = HybridAttention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, query, key):
query = query + self.drop_path(self.attn(self.norm1_q(query), self.norm1_k(key)))
query = query + self.drop_path(self.mlp(self.norm2(query)))
return x
class KeyGenerator(nn.Module):
def __init__(self, in_channels, out_channels=256, start_level=0, end_level=-1):
super().__init__()
assert isinstance(in_channels, (list, tuple))
self.num_levels = len(in_channels)
self.in_channels = in_channels
self.out_channels = out_channels
assert start_level < self.num_levels
self.start_level = start_level
if end_level < 0:
end_level += self.num_levels
assert self.start_level < end_level < self.num_levels
self.end_level = end_level
self.projs = nn.ModuleList([
nn.Sequential(
nn.Linear(in_channels[i], out_channels, bias=False),
nn.LayerNorm(out_channels))
for i in range(self.end_level - self.start_level + 1)
])
self.blocks = nn.ModuleList([
HybridBlock(dim=out_channels, num_heads=12, mlp_ratio=4., qkv_bias=True, drop=0.,
attn_drop=0., drop_path=0., norm_layer=norm_layer, act_layer=nn.GELU)
for i in range(self.end_level - self.start_level)
])
# self.norm = nn.LayerNorm(out_channels)
# self.mlp = nn.Linear(out_channels, out_channels, 1)
def forward(self, inputs):
assert isinstance(inputs, (list, tuple)) and len(inputs) == self.num_levels
mlvl_feats = [
x.flatten(-2).transpose(1, 2) for x in inputs[self.start_level:self.end_level + 1]]
mlvl_feats = [self.projs[i](mlvl_feats[i]) for i in range(len(mlvl_feats))]
for i in range(len(mlvl_feats) - 2, -1, -1):
mlvl_feats[i] = self.blocks[i](mlvl_feats[i], mlvl_feats[i + 1])
# out = self.norm(mlvl_feats[self.start_level])
# out = self.mlp(out)
out = mlvl_feats[self.start_level]
return out
class QueryGenerator(nn.Module):
def __init__(self, in_channels, out_channels, depth):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.depth = depth
self.proj = nn.Sequential(
nn.Linear(in_channels, out_channels, 1, bias=False),
nn.LayerNorm(out_channels))
self.blocks = nn.ModuleList([
ViTBlock(dim=out_channels, num_heads=12, mlp_ratio=4., qkv_bias=True, drop=0.,
attn_drop=0., drop_path=0., norm_layer=norm_layer, act_layer=nn.GELU)
for i in range(depth)
])
# self.norm = nn.LayerNorm(out_channels)
# self.mlp = nn.Linear(out_channels, out_channels)
def forward(self, x):
x = x.flatten(-2).transpose(1, 2)
x = self.proj(x)
for block in self.blocks:
x = block(x)
# x = self.norm(x)
# x = self.mlp(x)
return x
# class DynamicTokenSelection(nn.Module):
# def __init__(self, )
class Token2Embed(nn.Module):
def __init__(self, dim, qkv_bias=False, attn_drop=0., proj_drop=0.):
super().__init__()
self.dim = dim
self.scale = dim ** -0.5
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.kv = nn.Linear(dim, dim*2, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, token, embed):
B, Nt, C = token.shape
Ne = embed.shape[1]
# (B, Ne, C)
q = self.q(embed)
# (B, Nt, 2C) -> (B, Nt, 2, C) -> (2, B, Nt, C)
kv = self.kv(token).reshape(B, Nt, 2, C).permute(2, 0, 1, 3)
# (B, Nt, C)
k, v = kn.unbind(0) # make torchscript happy (cannot use tensor as tuple)
# (B, Ne, C) @ (B, C, Nt) -> (B, Ne, Nt)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
# (B, Ne, Nt) @ (B, Nt, C) -> (B, Ne, C)
x = attn @ v
x = self.proj(x)
x = self.proj_drop(x)
return x
class Embed2Token(nn.Module):
def __init__(self, dim, qkv_bias=False, attn_drop=0., proj_drop=0.):
super().__init__()
self.dim = dim
self.scale = dim ** -0.5
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.kv = nn.Linear(dim, dim*2, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, token, embed):
B, Nt, C = token.shape
Ne = embed.shape[1]
# (B, Nt, C)
q = self.q(token)
# (B, Ne, 2C) -> (B, Ne, 2, C) -> (2, B, Ne, C)
kv = self.kv(embed).reshape(B, Nt, 2, C).permute(2, 0, 1, 3)
# (B, Ne, C)
k, v = kn.unbind(0) # make torchscript happy (cannot use tensor as tuple)
# (B, Nt, C) @ (B, C, Ne) -> (B, Nt, Ne)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
# (B, Nt, Ne) @ (B, Ne, C) -> (B, Nt, C)
x = attn @ v
x = self.proj(x)
x = self.proj_drop(x)
return x
class SemanticCorrelationModule(nn.Module):
def __init__(self, in_channels, num_classes):
super().__init__()
self.in_channels = in_channels
self.num_classes = num_classes
self.proj_feat = nn.Conv2d(in_channels, in_channels // 4, 1)
def forward(self, feat, cam):
B, C, H, W = feat.shape
K = cam.shape[1]
cam_detach = cam.detach()
# (B, C, H, W) -> (B, C, HW)
feat_proj = self.proj_feat(feat)
feat_proj = feat_proj.view(B, C // 4, -1).transpose(1, 2)
cam_reshape = cam_detach.view(B, K, -1)#.transpose(1, 2)
# class branch
# (B, K, HW) @ (B, HW, C) -> (B, K, C)
class_term = cam_reshape @ feat_proj
# pixel branch
# (B, HW, C) @ (B, C, HW) -> (B, HW, HW)
pixel_sim = feat_proj @ feat_proj.transpose(1, 2)
pixel_sim = pixel_sim.softmax(dim=-1)
# (B, HW, HW) @ (B, HW, C) -> (B, HW, C)
pixel_term = pixel_sim @ feat_proj
# aug cam
# (B, K, C) @ (B, C, HW) -> (B, K, HW) -> (B, K, H, W)
aug_cam = class_term @ pixel_term.transpose(1, 2)
aug_cam = aug_cam.view(B, self.num_classes, H, W)
cam = cam + aug_cam
return cam
| 21,397 | 33.737013 | 117 | py |
SemFormer | SemFormer-main/core/models/base_backbone.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
import re
from ..module import FixedBatchNorm
from ..arch_resnet import resnet, resnet38
from ..arch_resnest import resnest
from ..arch_vgg import vgg
from ..abc_modules import ABC_Model
class BaseBackboneVGG(nn.Module, ABC_Model):
ARCH_CFG = {
'11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def __init__(self, model_name, num_classes=20):
super().__init__()
self.model_name = model_name
self.num_classes = num_classes
# stage1 - stage5
self.num_features = [64, 128, 256, 512, 512]
vgg_model = getattr(models, model_name)
model = vgg_model(pretrained=True)
stages = BaseBackboneVGG.build_stages(model_name, model.features)
for i in range(len(stages)):
self.add_module('stage{}'.format(i + 1), stages[i])
@staticmethod
def count_indices(cfg, batch_norm=False):
indices = [0]
counter = 0
for v in cfg:
if v == 'M':
counter += 1
indices.append(counter)
else:
if batch_norm:
counter += 3
else:
counter += 2
return indices
@staticmethod
def build_stages(model_name, model):
depth = re.findall(r'\d+', model_name)[0]
cfg = BaseBackboneVGG.ARCH_CFG[depth]
bn = 'bn' in model_name
indices = BaseBackboneVGG.count_indices(cfg, bn)
assert indices[-1] == len(model), 'indices: {}, model length: {}'.format(indices, len(model))
stages = []
for i in range(1, len(indices)):
stages.append(model[indices[i - 1]:indices[i]])
return stages
def forward(self, x):
stage1 = self.stage1(x) # stride = 2
stage2 = self.stage2(stage1) # stride = 4
stage3 = self.stage3(stage2) # stride = 8
stage4 = self.stage4(stage3) # stride = 16
stage5 = self.stage5(stage4) # stride = 32
return [stage1, stage2, stage3, stage4, stage5]
class BaseBackbone(nn.Module, ABC_Model):
def __init__(self, model_name, num_classes=20, mode='fix',
segmentation=False, strides=(2, 2, 2, 1), dilations=(1, 1, 1, 1)):
super().__init__()
self.model_name = model_name
self.num_classes = num_classes
self.mode = mode
self.segmentation = segmentation
self.strides = strides
if self.mode == 'fix':
self.norm_fn = FixedBatchNorm
else:
self.norm_fn = nn.BatchNorm2d
if 'resnet' in model_name:
if '38' in model_name:
model = resnet38.ResNet38(strides=strides)
state_dict = resnet38.convert_mxnet_to_torch()
model.load_state_dict(state_dict)
else:
model = resnet.ResNet(resnet.Bottleneck, resnet.layers_dic[model_name], strides=strides,
dilations=dilations, batch_norm_fn=self.norm_fn)
state_dict = model_zoo.load_url(resnet.urls_dic[model_name])
state_dict.pop('fc.weight')
state_dict.pop('fc.bias')
model.load_state_dict(state_dict)
else:
if segmentation:
dilation, dilated = 4, True
else:
dilation, dilated = 2, False
model = eval("resnest." + model_name)(pretrained=True, dilated=dilated, dilation=dilation, norm_layer=self.norm_fn)
del model.avgpool
del model.fc
# self.model = model
if 'resnet38' in model_name:
self.model = model
else:
self.stage1 = nn.Sequential(model.conv1,
model.bn1,
model.relu,
model.maxpool)
self.stage2 = nn.Sequential(model.layer1)
self.stage3 = nn.Sequential(model.layer2)
self.stage4 = nn.Sequential(model.layer3)
self.stage5 = nn.Sequential(model.layer4)
def forward(self, x):
if '38' in self.model_name:
return self.model(x, return_stages=True)
stage1 = self.stage1(x) # stride = 4 / 2
stage2 = self.stage2(stage1) # stride = 4 / 4
stage3 = self.stage3(stage2) # stride = 8 / 8
stage4 = self.stage4(stage3) # stride = 16 / 8
stage5 = self.stage5(stage4) # stride = 16 / 8
return [stage1, stage2, stage3, stage4, stage5]
class ReturnLastLayerBaseBackboneVGG(BaseBackboneVGG):
def forward(self, x):
return super().forward(x)[-1]
class ReturnLastLayerBaseBackbone(BaseBackbone):
def forward(self, x):
return super().forward(x)[-1]
| 5,212 | 33.296053 | 127 | py |
SemFormer | SemFormer-main/core/models/transformer_segmentor.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from .. import functional as _F
from ..module import SeparateLinear
from .transformer_backbone import ViTBackbone
from ..abc_modules import ABC_Model
class SemFormerSegmentor(nn.Module, ABC_Model):
def __init__(self,
model_name, num_classes, class_dim, version='base', patch_size=16,
resolution=224, in21k=False, pos_embed_size=None):
super().__init__()
self.model_name = model_name
self.backbone = ViTBackbone(
model_name, version=version,
patch_size=patch_size,
resolution=resolution, in21k=in21k,
pos_embed_size=pos_embed_size,
with_cls_token=True, with_posembed=True)
self.num_classes = num_classes
self.class_dim = class_dim
self.with_dist = 'dist' in model_name
self.patch_size = self.backbone.model.patch_size
self.cls_token = nn.Parameter(
self.backbone.model.cls_token.data.clone().repeat(1, num_classes, 1))
self.cls_posembed = nn.Parameter(
self.backbone.model.pos_embed.data[:, :1, :].clone().repeat(1, num_classes, 1))
self.cls_head = SeparateLinear(self.backbone.model.embed_dim, class_dim, groups=num_classes)
self.seg_head = nn.Sequential(
nn.Dropout2d(0.5),
nn.Linear(self.backbone.model.embed_dim, self.num_classes)
)
self.initialize([self.cls_head, self.seg_head])
def forward(self, x, return_cra=False):
iH, iW = x.shape[-2:]
x = _F.patchable_pad2d(x, self.patch_size)
input_shape = x.shape
H, W = [s // self.backbone.model.patch_size for s in input_shape[-2:]]
if return_cra:
x, attn_list = self.backbone.forward_features_with_outer_token(
x, self.cls_token, self.cls_posembed, return_attn=True)
# CRA: (B, H, K, N)
cra_list = [
attn[:, :, :self.num_classes, self.num_classes:] for attn in attn_list]
else:
x = self.backbone.forward_features_with_outer_token(x, self.cls_token, self.cls_posembed)
grid_token = x[:, self.num_classes:, :]
seg_logits = self.seg_head(grid_token)
seg_logits = seg_logits.transpose(1, 2).view(x.shape[0], self.num_classes, H, W)
if self.training:
cls_token = x[:, :self.num_classes, :]
cls_pred = self.cls_head(cls_token)
cls_pred = F.relu(cls_pred, inplace=True)
return x, cls_pred, seg_logits
else:
if return_cra:
return x, seg_logits, cra_list
else:
return x, seg_logits | 2,732 | 35.44 | 101 | py |
SemFormer | SemFormer-main/core/models/transformer_backbone.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from ..arch_transformer import vit
from ..abc_modules import ABC_Model
class ViTBackbone(nn.Module, ABC_Model):
def __init__(self, model_name, with_last_norm=True,
with_posembed=False, with_cls_token=False, img_size=224, **kwargs):
super().__init__()
self.model_name = model_name
version = kwargs.pop('version', 'base')
patch_size = kwargs.pop('patch_size', 16)
resolution = kwargs.pop('resolution', 224)
in21k = kwargs.pop('in21k', False)
pos_embed_size = kwargs.pop('pos_embed_size', None)
print('{} pos_embed_size: {}'.format(self.__class__.__name__, pos_embed_size))
if 'vit' in model_name:
model_fn = 'vit_{}_patch{}_{}'.format(version, patch_size, resolution)
if in21k:
model_fn += '_in21k'
model_fn = getattr(vit, model_fn)
model = model_fn(pretrained=True, img_size=img_size, **kwargs)
elif 'deit' in model_name:
if 'distilled' in model_name:
model_fn = 'deit_{}_distilled_patch{}_{}'.format(version, patch_size, resolution)
else:
model_fn = 'deit_{}_patch{}_{}'.format(version, patch_size, resolution)
if in21k:
model_fn += '_in21k'
model_fn = getattr(vit, model_fn)
model = model_fn(pretrained=True, img_size=img_size, **kwargs)
else:
raise ValueError('unspported model name {} for {}'.format(model_name, self.__class__.__name__))
self.num_patches = model.patch_embed.num_patches
if pos_embed_size is not None:
new_size = pos_embed_size
if not isinstance(new_size, (list, tuple)):
new_size = [new_size, new_size]
pretrained_grid_posembed = model.pos_embed[:, model.num_tokens:, :].data.clone()
pretrained_grid_posembed = self.__class__.resize_pos_meb(pretrained_grid_posembed, new_size)
self.grid_posembed = nn.Parameter(pretrained_grid_posembed)
else:
self.grid_posembed = nn.Parameter(model.pos_embed[:, model.num_tokens:, :].data.clone())
print('self.grid_posembed:', self.grid_posembed.shape)
if not with_posembed:
del model.pos_embed
if not with_cls_token:
del model.cls_token
del model.pre_logits
del model.head
self.with_last_norm = with_last_norm
if (not with_last_norm) and hasattr(model, 'norm'):
del model.norm
self.model = model
@staticmethod
def resize_pos_meb(posemb_grid, new_size=()):
"""code modified from timm"""
posemb_grid = posemb_grid[0]
gs_old = int(math.sqrt(len(posemb_grid)))
if len(new_size) < 1: # backwards compatibility
new_size = [gs_old] * 2
assert len(new_size) >= 2
if (new_size[0] == gs_old) and (new_size[1] == gs_old):
return posemb_grid[None, ...]
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
posemb_grid = F.interpolate(posemb_grid, size=new_size, mode='bicubic', align_corners=False)
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, new_size[0] * new_size[1], -1)
return posemb_grid
def forward_features(self, x, return_attn=False):
height = x.shape[-2] // self.model.patch_embed.patch_size[0]
x = self.model.patch_embed(x)
width = x.shape[1] // height
pos_embed = self.__class__.resize_pos_meb(self.grid_posembed, (height, width))
x = self.model.pos_drop(x + pos_embed)
if return_attn:
attn_list = []
for block in self.model.blocks:
block_result = block(x, return_attn=return_attn)
if return_attn:
x, attns = block_result
attn_list.append(attns)
else:
x = block_result
if self.with_last_norm:
x = self.model.norm(x)
if return_attn:
return x, attn_list
return x
def forward_features_with_outer_token(self, x, outer_token, outer_posembed, return_attn=False):
height = x.shape[-2] // self.model.patch_embed.patch_size[0]
x = self.model.patch_embed(x)
width = x.shape[1] // height
pos_embed = self.__class__.resize_pos_meb(self.grid_posembed, (height, width))
pos_embed = torch.cat([outer_posembed, pos_embed], dim=1)
outer_token = outer_token.expand(x.shape[0], -1, -1)
x = torch.cat([outer_token, x], dim=1)
x = self.model.pos_drop(x + pos_embed)
if return_attn:
attn_list = []
for block in self.model.blocks:
block_result = block(x, return_attn=return_attn)
if return_attn:
x, attns = block_result
attn_list.append(attns)
else:
x = block_result
if self.with_last_norm:
x = self.model.norm(x)
if return_attn:
return x, attn_list
return x
def forward(self, x):
return self.forward_features(x) | 5,250 | 38.780303 | 107 | py |
SemFormer | SemFormer-main/core/models/semformer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import random
from .transformer_segmentor import SemFormerSegmentor
from ..abc_modules import BaseModule, ABC_Model
from ..functional import cosine_similarity
from ..utils import get_label_info
class SemFormer(BaseModule, ABC_Model):
def __init__(self, model_name, num_classes, class_dim,
version='base', patch_size=16, resolution=224, in21k=False, in22k=False,
pos_embed_size=None):
super().__init__()
self.class_dim = class_dim
self.segmentor = SemFormerSegmentor(
model_name=model_name, num_classes=num_classes, class_dim=class_dim,
version=version, patch_size=patch_size,
resolution=resolution, in21k=in21k, pos_embed_size=pos_embed_size)
def train(self, mode=True):
"""Set the same train mode for ae and self."""
super().train(mode)
if hasattr(self, 'ae'):
self.ae.train(False)
def forward_x(self, x, **kwargs):
return self.segmentor(x, **kwargs)
def forward_train(self, images, ae_images, labels, grad_masks):
label_info = get_label_info(labels)
seen_masks = label_info['seen_mask_total']
seen_labels = label_info['seen_labels']
re_seen_labels = label_info['re_seen_labels']
seen_indexes = label_info['seen_indexes']
seen_classes = label_info['seen_classes']
mask_feat, seg_cre, ori_mask_logits = self.segmentor(images)
ori_mask_logits = ori_mask_logits[..., :images.shape[-2], :images.shape[-1]]
if ori_mask_logits.shape[-2:] != ae_images.shape[-2:]:
mask_logits = F.interpolate(ori_mask_logits, size=ae_images.shape[-2:], mode='bilinear', align_corners=True)
else:
mask_logits = ori_mask_logits
masks = mask_logits.sigmoid()
if grad_masks is not None:
if grad_masks.dim() == 3:
grad_masks = grad_masks[:, None, :, :]
masks = (grad_masks * masks) + (1 - grad_masks) * 0.
images_for_spec_cls = ae_images[seen_indexes, :, :, :]
cls_fg_masks = masks[seen_masks, :, :][:, None, :, :]
cls_fg_images = images_for_spec_cls * cls_fg_masks
cls_bg_masks = 1 - cls_fg_masks
cls_bg_images = images_for_spec_cls * cls_bg_masks
crs = self.ae(ae_images, stage='get_crs')
crs_repeat = self.ae(cls_fg_images, stage='get_crs')
cs_embed = self.ae(cls_fg_images, stage='get_cre')
re_cs_embed = self.ae(cls_bg_images, stage='get_cre')
seg_sim = cosine_similarity(seg_cre, crs, is_aligned=True)
pull_mask = seen_labels > 0
push_mask = re_seen_labels > 0
re_pull_mask = push_mask
re_push_mask = pull_mask
cls_fg_pull_sim = cosine_similarity(
cs_embed[pull_mask, :], crs_repeat[pull_mask, :], is_aligned=True)
cls_fg_push_sim = cosine_similarity(
cs_embed[push_mask, :], crs_repeat[push_mask, :], is_aligned=True)
cls_bg_pull_sim = cosine_similarity(
re_cs_embed[re_pull_mask, :], crs_repeat[re_pull_mask, :], is_aligned=True)
cls_bg_push_sim = cosine_similarity(
re_cs_embed[re_push_mask, :], crs_repeat[re_push_mask, :], is_aligned=True)
return masks, seg_sim, cls_fg_pull_sim, cls_fg_push_sim, cls_bg_pull_sim, cls_bg_push_sim
| 3,397 | 37.613636 | 120 | py |
SemFormer | SemFormer-main/core/models/base_segmentor.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import functools
from ..module import SMDConv2d
from .modules import SemanticCorrelationModule
from .base_backbone import (BaseBackboneVGG,
BaseBackbone,
ReturnLastLayerBaseBackboneVGG,
ReturnLastLayerBaseBackbone)
from ..abc_modules import ABC_Model
class BaseClassifier(nn.Module, ABC_Model):
def __init__(self, model_name, num_classes=20, mode='fix', strides=(2, 2, 2, 1)):
super().__init__()
self.backbone = ReturnLastLayerBaseBackbone(model_name, num_classes, mode, strides)
self.num_classes = num_classes
# stage1 - stage5
if '38' in model_name:
self.num_features = [128, 256, 512, 1024, 4096]
else:
self.num_features = [64, 256, 512, 1024, 2048]
self.gap = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Sequential(
nn.Dropout2d(0.5),
nn.Conv2d(self.num_features[-1], num_classes + 1, 1, bias=False)
)
self.initialize([self.classifier])
def forward(self, x, with_cam=False):
stage5 = self.backbone(x)
if with_cam:
cam_logits = self.classifier(stage5)
return stage5, cam_logits
else:
gap = self.gap(stage5)
logits = self.classifier(gap).view(x.shape[0], self.num_classes)
return logits | 1,470 | 30.978261 | 91 | py |
SemFormer | SemFormer-main/core/arch_resnet/resnet.py | import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
urls_dic = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
layers_dic = {
'resnet18' : [2, 2, 2, 2],
'resnet34' : [3, 4, 6, 3],
'resnet50' : [3, 4, 6, 3],
'resnet101' : [3, 4, 23, 3],
'resnet152' : [3, 8, 36, 3]
}
def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d:
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion: int = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1, batch_norm_fn=nn.BatchNorm2d):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = batch_norm_fn(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = batch_norm_fn(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1, batch_norm_fn=nn.BatchNorm2d):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = batch_norm_fn(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=dilation, bias=False, dilation=dilation)
self.bn2 = batch_norm_fn(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = batch_norm_fn(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, strides=(2, 2, 2, 2), dilations=(1, 1, 1, 1), batch_norm_fn=nn.BatchNorm2d):
self.batch_norm_fn = batch_norm_fn
print('dilations:', dilations)
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=strides[0], padding=3, # 2
bias=False)
self.bn1 = self.batch_norm_fn(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) # 2
self.layer1 = self._make_layer(block, 64, layers[0], stride=1, dilation=dilations[0]) # 1
self.layer2 = self._make_layer(block, 128, layers[1], stride=strides[1], dilation=dilations[1]) # 2
self.layer3 = self._make_layer(block, 256, layers[2], stride=strides[2], dilation=dilations[2]) # 2
self.layer4 = self._make_layer(block, 512, layers[3], stride=strides[3], dilation=dilations[3]) # 1
self.inplanes = 1024
#self.avgpool = nn.AvgPool2d(7, stride=1)
#self.fc = nn.Linear(512 * block.expansion, 1000)
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
self.batch_norm_fn(planes * block.expansion),
)
layers = [block(self.inplanes, planes, stride, downsample, dilation=1, batch_norm_fn=self.batch_norm_fn)]
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation, batch_norm_fn=self.batch_norm_fn))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
| 5,537 | 33.830189 | 114 | py |
SemFormer | SemFormer-main/core/arch_resnet/resnet38.py | import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
class ResBlock(nn.Module):
def __init__(self, in_channels, mid_channels, out_channels, stride=1, first_dilation=None, dilation=1):
super(ResBlock, self).__init__()
self.same_shape = (in_channels == out_channels and stride == 1)
if first_dilation == None: first_dilation = dilation
self.bn_branch2a = nn.BatchNorm2d(in_channels)
self.conv_branch2a = nn.Conv2d(in_channels, mid_channels, 3, stride,
padding=first_dilation, dilation=first_dilation, bias=False)
self.bn_branch2b1 = nn.BatchNorm2d(mid_channels)
self.conv_branch2b1 = nn.Conv2d(mid_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False)
if not self.same_shape:
self.conv_branch1 = nn.Conv2d(in_channels, out_channels, 1, stride, bias=False)
def forward(self, x, get_x_bn_relu=False, x_bn_relu_detach=False):
branch2 = self.bn_branch2a(x)
branch2 = F.relu(branch2)
if x_bn_relu_detach:
branch2 = branch2.detach()
x_bn_relu = branch2
if not self.same_shape:
branch1 = self.conv_branch1(branch2)
else:
branch1 = x
branch2 = self.conv_branch2a(branch2)
branch2 = self.bn_branch2b1(branch2)
branch2 = F.relu(branch2)
branch2 = self.conv_branch2b1(branch2)
x = branch1 + branch2
if get_x_bn_relu:
return x, x_bn_relu
return x
class ResBlock_bot(nn.Module):
def __init__(self, in_channels, out_channels, stride=1, dilation=1, dropout=0.):
super(ResBlock_bot, self).__init__()
self.same_shape = (in_channels == out_channels and stride == 1)
self.bn_branch2a = nn.BatchNorm2d(in_channels)
self.conv_branch2a = nn.Conv2d(in_channels, out_channels//4, 1, stride, bias=False)
self.bn_branch2b1 = nn.BatchNorm2d(out_channels//4)
self.dropout_2b1 = torch.nn.Dropout2d(dropout)
self.conv_branch2b1 = nn.Conv2d(out_channels//4, out_channels//2, 3, padding=dilation, dilation=dilation, bias=False)
self.bn_branch2b2 = nn.BatchNorm2d(out_channels//2)
self.dropout_2b2 = torch.nn.Dropout2d(dropout)
self.conv_branch2b2 = nn.Conv2d(out_channels//2, out_channels, 1, bias=False)
if not self.same_shape:
self.conv_branch1 = nn.Conv2d(in_channels, out_channels, 1, stride, bias=False)
def forward(self, x, get_x_bn_relu=False, x_bn_relu_detach=False):
branch2 = self.bn_branch2a(x)
branch2 = F.relu(branch2)
if x_bn_relu_detach:
branch2 = branch2.detach()
x_bn_relu = branch2
if not self.same_shape:
branch1 = self.conv_branch1(branch2)
else:
branch1 = branch2
branch2 = self.conv_branch2a(branch2)
branch2 = self.bn_branch2b1(branch2)
branch2 = F.relu(branch2)
branch2 = self.dropout_2b1(branch2)
branch2 = self.conv_branch2b1(branch2)
branch2 = self.bn_branch2b2(branch2)
branch2 = F.relu(branch2)
branch2 = self.dropout_2b2(branch2)
branch2 = self.conv_branch2b2(branch2)
x = branch1 + branch2
if get_x_bn_relu:
return x, x_bn_relu
return x
class ResNet38(nn.Module):
def __init__(self, strides=[2, 2, 2, 1]):
super().__init__()
print('ResNet38 strides:', strides)
self.strides = strides
self.conv1a = nn.Conv2d(3, 64, 3, padding=1, bias=False)
self.b2 = ResBlock(64, 128, 128, stride=strides[0])
self.b2_1 = ResBlock(128, 128, 128)
self.b2_2 = ResBlock(128, 128, 128)
self.b3 = ResBlock(128, 256, 256, stride=strides[1])
self.b3_1 = ResBlock(256, 256, 256)
self.b3_2 = ResBlock(256, 256, 256) # get
self.b4 = ResBlock(256, 512, 512, stride=strides[2])
self.b4_1 = ResBlock(512, 512, 512)
self.b4_2 = ResBlock(512, 512, 512)
self.b4_3 = ResBlock(512, 512, 512)
self.b4_4 = ResBlock(512, 512, 512)
self.b4_5 = ResBlock(512, 512, 512) # get
self.b5 = ResBlock(512, 512, 1024, stride=strides[3], first_dilation=1, dilation=2)
self.b5_1 = ResBlock(1024, 512, 1024, dilation=2)
self.b5_2 = ResBlock(1024, 512, 1024, dilation=2) # get
self.b6 = ResBlock_bot(1024, 2048, stride=1, dilation=4, dropout=0.3) # get
self.b7 = ResBlock_bot(2048, 4096, dilation=4, dropout=0.5)
self.bn7 = nn.BatchNorm2d(4096)
def forward(self, x, return_stages=False, detach_between_stages=False):
if return_stages:
return self.forward_stages(x, detach_between_stages=detach_between_stages)
x = self.conv1a(x)
x = self.b2(x)
x = self.b2_1(x)
x = self.b2_2(x)
x = self.b3(x)
x = self.b3_1(x)
x = self.b3_2(x)
#x = self.b4(x)
x = self.b4(x)
x = self.b4_1(x)
x = self.b4_2(x)
x = self.b4_3(x)
x = self.b4_4(x)
x = self.b4_5(x)
x = self.b5(x)
x = self.b5_1(x)
x = self.b5_2(x)
x = self.b6(x)
x = self.b7(x)
x = F.relu(self.bn7(x))
return x
def forward_stages(self, x, detach_between_stages=False):
x = self.conv1a(x)
x = self.b2(x)
x = self.b2_1(x)
x = self.b2_2(x)
x, stage1 = self.b3(x, get_x_bn_relu=True, x_bn_relu_detach=detach_between_stages)
x = self.b3_1(x)
x = self.b3_2(x)
x, stage2 = self.b4(x, get_x_bn_relu=True, x_bn_relu_detach=detach_between_stages)
x = self.b4_1(x)
x = self.b4_2(x)
x = self.b4_3(x)
x = self.b4_4(x)
x = self.b4_5(x)
x, stage3 = self.b5(x, get_x_bn_relu=True, x_bn_relu_detach=detach_between_stages)
x = self.b5_1(x)
x = self.b5_2(x)
x, stage4 = self.b6(x, get_x_bn_relu=True, x_bn_relu_detach=detach_between_stages)
x = self.b7(x)
stage5 = F.relu(self.bn7(x))
stages = [stage1, stage2, stage3, stage4, stage5]
return stages
def convert_mxnet_to_torch(filename='../weight/ilsvrc-cls_rna-a1_cls1000_ep-0001.params'):
import mxnet
save_dict = mxnet.nd.load(filename)
renamed_dict = dict()
bn_param_mx_pt = {'beta': 'bias', 'gamma': 'weight', 'mean': 'running_mean', 'var': 'running_var'}
for k, v in save_dict.items():
v = torch.from_numpy(v.asnumpy())
toks = k.split('_')
if 'conv1a' in toks[0]:
renamed_dict['conv1a.weight'] = v
elif 'linear1000' in toks[0]:
pass
elif 'branch' in toks[1]:
pt_name = []
if toks[0][-1] != 'a':
pt_name.append('b' + toks[0][-3] + '_' + toks[0][-1])
else:
pt_name.append('b' + toks[0][-2])
if 'res' in toks[0]:
layer_type = 'conv'
last_name = 'weight'
else: # 'bn' in toks[0]:
layer_type = 'bn'
last_name = bn_param_mx_pt[toks[-1]]
pt_name.append(layer_type + '_' + toks[1])
pt_name.append(last_name)
torch_name = '.'.join(pt_name)
renamed_dict[torch_name] = v
else:
last_name = bn_param_mx_pt[toks[-1]]
renamed_dict['bn7.' + last_name] = v
return renamed_dict
| 7,556 | 29.844898 | 125 | py |
SemFormer | SemFormer-main/core/module/pooling.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair
class GlobalSumPool2d(nn.Module):
def forward(self, x):
return x.view(*x.shape[:-2], -1).sum(dim=-1)[..., None, None] | 241 | 21 | 69 | py |
SemFormer | SemFormer-main/core/module/aspp.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair
class CustomASPP(nn.Module):
def __init__(self, in_channels, out_channels, dilations=[1, 3, 6, 12], act_last=True):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.dilations = dilations
self.act_last = act_last
self.aspp_module = nn.ModuleList()
for i in range(len(dilations)):
aspp = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(True),
nn.Conv2d(out_channels, out_channels, 3, dilation=dilations[i], padding=dilations[i], bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(True)
)
self.aspp_module.append(aspp)
self.aspp_global = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(True),
nn.AdaptiveAvgPool2d(1)
)
self.conv = nn.Conv2d(out_channels * (len(dilations) + 1), out_channels, 1, bias=False)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(True)
def forward(self, x):
aspp_local = [aspp_module(x) for aspp_module in self.aspp_module]
aspp_global = self.aspp_global(x)
aspp_global = aspp_global.expand_as(aspp_local[0])
aspp = aspp_local + [aspp_global]
aspp = torch.cat(aspp, dim=1)
out = self.conv(aspp)
out = self.bn(out)
if self.act_last:
out = self.relu(out)
return out | 1,744 | 33.215686 | 114 | py |
SemFormer | SemFormer-main/core/module/activation.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair
from .. import functional as FN
class SMU(nn.Module):
def __init__(self, miu=1e6):
super().__init__()
self.miu = nn.Parameter(torch.tensor(miu, dtype=torch.float))
def forward(self, x):
return FN.smu(x, self.miu)
class SMUG(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return FN.smug(x)
class SMUL(nn.Module):
def __init__(self, alpha=0.25, miu=4.352665993287951e-9):
super().__init__()
self.alpha = alpha
self.miu = nn.Parameter(torch.tensor(miu, dtype=torch.float))
def forward(self, x):
return FN.smul(x, self.alpha, self.miu)
| 778 | 18.475 | 69 | py |
SemFormer | SemFormer-main/core/module/non_local.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair
class NonLocal2d(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
B, C, H, W = x.shape
# (B, C, HW)
k = x.view(B, C, -1)
# (B, HW, C)
q = k.transpose(1, 2)
# (B, HW, C) @ (B, C, HW) -> (B, HW, HW)
attn = (q @ k).softmax(dim=-1)
# (B, HW, HW) @ (B, HW, c) -> (B, HW, C)
out = attn @ q
out = out.transpose(1, 2).view(B, C, H, W)
out += x
return out
| 598 | 20.392857 | 50 | py |
SemFormer | SemFormer-main/core/module/convolution.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair
class MultiDilatedConv2d(nn.Conv2d):
def __init__(self, *args, dilations=[1], **kwargs):
super().__init__(*args, **kwargs)
self.dilations = dilations
self.num_branch = len(dilations)
def forward(self, x):
outputs = []
for dilation in self.dilations:
padding = _pair(dilation)
if self.padding_mode != 'zeros':
out = F.conv2d(F.pad(x, padding, mode=self.padding_mode),
self.weight, self.bias, self.stride,
_pair(0), dilation, self.groups)
else:
out = F.conv2d(x, self.weight, self.bias, self.stride,
padding, dilation, self.groups)
outputs.append(out)
return outputs
# SwitchableMultiDilatedConv2d
class SMDConv2d(MultiDilatedConv2d):
def __init__(self, *args, tau=1.0, pixel_wise=False, ratio=1. / 16., gumbel=False, **kwargs):
super().__init__(*args, **kwargs)
self.tau = tau
self.pixel_wise = pixel_wise
self.ratio = ratio
self.gumbel = gumbel
if pixel_wise:
self.conv_gate = nn.Conv2d(self.in_channels, len(self.dilations), 5, padding=2)
else:
self.inter_channels = int(self.in_channels * self.ratio)
self.conv_gate = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(self.in_channels, self.inter_channels, 1),
nn.LayerNorm([self.inter_channels, 1, 1]),
nn.ReLU(True),
nn.Conv2d(self.inter_channels, len(self.dilations), 1)
)
nn.init.constant_(self.conv_gate[-1].weight, val=0)
nn.init.constant_(self.conv_gate[-1].bias, val=0)
def forward(self, x):
gates = self.conv_gate(x)
if self.gumbel:
gates = F.gumbel_softmax(gates, tau=self.tau, hard=True, dim=1)
else:
if self.tau != 1:
gates = gates / self.tau
gates = F.softmax(gates, dim=1)
gates = torch.split(gates, dim=1, split_size_or_sections=1)
conv_outs = super().forward(x)
gated_outs = [gates[i] * conv_outs[i] for i in range(self.num_branch)]
return sum(gated_outs)
| 2,389 | 33.637681 | 97 | py |
SemFormer | SemFormer-main/core/module/linear.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair
import math
class SeparateLinear(nn.Module):
def __init__(self, in_channels, out_channels, groups=1, bias=True):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.groups = groups
self.weight = nn.Parameter(torch.Tensor(groups, in_channels, out_channels))
if bias:
self.bias = nn.Parameter(torch.Tensor(groups, out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) -> None:
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
num_input_fmaps = self.weight.size(1)
num_output_fmaps = self.weight.size(2)
receptive_field_size = 1
fan_in = num_input_fmaps * receptive_field_size
fan_out = num_output_fmaps * receptive_field_size
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(self.bias, -bound, bound)
def forward(self, x):
n_dim = x.dim()
assert n_dim > 2, '{} requires input with ndim > 2'.format(self.__class__.__name__)
shape = [1] * (n_dim - 2)
weight = self.weight.view(*shape, *self.weight.shape)
weight = weight.expand(*x.shape[:-2], -1, -1, -1)
out = torch.matmul(x[..., None, :], self.weight)[..., 0, :]
if self.bias is not None:
bias = self.bias.view(*shape, *self.bias.shape)
bias = bias.expand(*x.shape[:-2], -1, -1)
out = out + bias
return out | 1,692 | 34.270833 | 91 | py |
SemFormer | SemFormer-main/core/module/ops.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair
class Flatten(nn.Module):
def __init__(self, start_dim=0, end_dim=-1):
super().__init__()
self.start_dim = start_dim
self.end_dim = end_dim
def forward(self, x):
return torch.flatten(x, self.start_dim, self.end_dim)
class Permute(nn.Module):
def __init__(self, *permutation):
super().__init__()
self.permutation = permutation
def forward(self, inputs):
output = inputs.permute(*self.permutation)
return output
class Transpose(nn.Module):
def __init__(self, *transpose):
super().__init__()
self.transpose = transpose
def forward(self, inputs):
output = inputs.transpose(*self.transpose)
return output
class Cat(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, inputs):
output = torch.cat(inputs, dim=self.dim)
return output
| 1,050 | 19.211538 | 61 | py |
SemFormer | SemFormer-main/core/module/normalization.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair
from ..sync_batchnorm.batchnorm import SynchronizedBatchNorm2d
class FixedBatchNorm(nn.BatchNorm2d):
def forward(self, x):
return F.batch_norm(x, self.running_mean, self.running_var, self.weight, self.bias, training=False, eps=self.eps)
def group_norm(features):
return nn.GroupNorm(4, features) | 424 | 27.333333 | 121 | py |
SemFormer | SemFormer-main/core/module/padding.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair
from .. import functional as FN
class SamePad2d(nn.Module):
def __init__(self, kernel_size, stride=1, dilation=1, pad_mode='around'):
super().__init__()
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.dilation = _pair(dilation)
self.pad_mode = pad_mode
def forward(self, x):
return FN.same_pad2d(x, self.kernel_size, self.stride, self.dilation, self.pad_mode)
class AdaptivePad2d(nn.Module):
def __init__(self, output_size, pad_mode='corner'):
super().__init__()
self.output_size = _pair(output_size)
self.pad_mode = pad_mode
def forward(self, x):
return FN.adaptive_pad2d(x, self.output_size, self.pad_mode) | 846 | 25.46875 | 92 | py |
SemFormer | SemFormer-main/core/module/interpolate.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair
class Interpolate(nn.Module):
def __init__(self, size=None, scale_factor=None, mode='bilinear', align_corners=True):
super().__init__()
self.size = size
self.scale_factor = scale_factor
self.mode = mode
self.align_corners = align_corners
def forward(self, input):
return F.interpolate(input, self.size, self.scale_factor, self.mode, self.align_corners, recompute_scale_factor=True) | 559 | 28.473684 | 125 | py |
SemFormer | SemFormer-main/core/arch_vgg/vgg.py | import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import math
__all__ = [
'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19',
]
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
}
class VGG(nn.Module):
def __init__(self, features, num_classes=1000, init_weights=True):
super(VGG, self).__init__()
self.features = features
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
self.img_embedding_index = 4
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
img_embedding = None
for i, layer in enumerate(self.classifier):
x = layer(x)
if i == self.img_embedding_index:
img_embedding = x
return img_embedding, x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def vgg11(pretrained=False, **kwargs):
"""VGG 11-layer model (configuration "A")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['A']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg11']))
return model
def vgg11_bn(pretrained=False, **kwargs):
"""VGG 11-layer model (configuration "A") with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['A'], batch_norm=True), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg11_bn']))
return model
def vgg13(pretrained=False, **kwargs):
"""VGG 13-layer model (configuration "B")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['B']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg13']))
return model
def vgg13_bn(pretrained=False, **kwargs):
"""VGG 13-layer model (configuration "B") with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['B'], batch_norm=True), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg13_bn']))
return model
def vgg16(pretrained=False, **kwargs):
"""VGG 16-layer model (configuration "D")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['D']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg16']))
return model
def vgg16_bn(pretrained=False, **kwargs):
"""VGG 16-layer model (configuration "D") with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['D'], batch_norm=True), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg16_bn']))
return model
def vgg19(pretrained=False, **kwargs):
"""VGG 19-layer model (configuration "E")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['E']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg19']))
return model
def vgg19_bn(pretrained=False, **kwargs):
"""VGG 19-layer model (configuration 'E') with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['E'], batch_norm=True), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg19_bn']))
return model
| 6,475 | 31.218905 | 113 | py |
SemFormer | SemFormer-main/core/arch_resnest/resnet.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Hang Zhang
## Email: zhanghang0704@gmail.com
## Copyright (c) 2020
##
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
"""ResNet variants"""
import math
import torch
import torch.nn as nn
from .splat import SplAtConv2d
__all__ = ['ResNet', 'Bottleneck']
class DropBlock2D(object):
def __init__(self, *args, **kwargs):
raise NotImplementedError
class GlobalAvgPool2d(nn.Module):
def __init__(self):
"""Global average pooling over the input's spatial dimensions"""
super(GlobalAvgPool2d, self).__init__()
def forward(self, inputs):
return nn.functional.adaptive_avg_pool2d(inputs, 1).view(inputs.size(0), -1)
class Bottleneck(nn.Module):
"""ResNet Bottleneck
"""
# pylint: disable=unused-argument
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None,
radix=1, cardinality=1, bottleneck_width=64,
avd=False, avd_first=False, dilation=1, is_first=False,
rectified_conv=False, rectify_avg=False,
norm_layer=None, dropblock_prob=0.0, last_gamma=False):
super(Bottleneck, self).__init__()
group_width = int(planes * (bottleneck_width / 64.)) * cardinality
self.conv1 = nn.Conv2d(inplanes, group_width, kernel_size=1, bias=False)
self.bn1 = norm_layer(group_width)
self.dropblock_prob = dropblock_prob
self.radix = radix
self.avd = avd and (stride > 1 or is_first)
self.avd_first = avd_first
if self.avd:
self.avd_layer = nn.AvgPool2d(3, stride, padding=1)
stride = 1
if dropblock_prob > 0.0:
self.dropblock1 = DropBlock2D(dropblock_prob, 3)
if radix == 1:
self.dropblock2 = DropBlock2D(dropblock_prob, 3)
self.dropblock3 = DropBlock2D(dropblock_prob, 3)
if radix >= 1:
self.conv2 = SplAtConv2d(
group_width, group_width, kernel_size=3,
stride=stride, padding=dilation,
dilation=dilation, groups=cardinality, bias=False,
radix=radix, rectify=rectified_conv,
rectify_avg=rectify_avg,
norm_layer=norm_layer,
dropblock_prob=dropblock_prob)
elif rectified_conv:
from rfconv import RFConv2d
self.conv2 = RFConv2d(
group_width, group_width, kernel_size=3, stride=stride,
padding=dilation, dilation=dilation,
groups=cardinality, bias=False,
average_mode=rectify_avg)
self.bn2 = norm_layer(group_width)
else:
self.conv2 = nn.Conv2d(
group_width, group_width, kernel_size=3, stride=stride,
padding=dilation, dilation=dilation,
groups=cardinality, bias=False)
self.bn2 = norm_layer(group_width)
self.conv3 = nn.Conv2d(
group_width, planes * 4, kernel_size=1, bias=False)
self.bn3 = norm_layer(planes*4)
if last_gamma:
from torch.nn.init import zeros_
zeros_(self.bn3.weight)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.dilation = dilation
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
if self.dropblock_prob > 0.0:
out = self.dropblock1(out)
out = self.relu(out)
if self.avd and self.avd_first:
out = self.avd_layer(out)
out = self.conv2(out)
if self.radix == 0:
out = self.bn2(out)
if self.dropblock_prob > 0.0:
out = self.dropblock2(out)
out = self.relu(out)
if self.avd and not self.avd_first:
out = self.avd_layer(out)
out = self.conv3(out)
out = self.bn3(out)
if self.dropblock_prob > 0.0:
out = self.dropblock3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
"""ResNet Variants
Parameters
----------
block : Block
Class for the residual block. Options are BasicBlockV1, BottleneckV1.
layers : list of int
Numbers of layers in each block
classes : int, default 1000
Number of classification classes.
dilated : bool, default False
Applying dilation strategy to pretrained ResNet yielding a stride-8 model,
typically used in Semantic Segmentation.
norm_layer : object
Normalization layer used in backbone network (default: :class:`mxnet.gluon.nn.BatchNorm`;
for Synchronized Cross-GPU BachNormalization).
Reference:
- He, Kaiming, et al. "Deep residual learning for image recognition." Proceedings of the IEEE conference on computer vision and pattern recognition. 2016.
- Yu, Fisher, and Vladlen Koltun. "Multi-scale context aggregation by dilated convolutions."
"""
# pylint: disable=unused-variable
def __init__(self, block, layers, radix=1, groups=1, bottleneck_width=64,
num_classes=1000, dilated=False, dilation=1,
deep_stem=False, stem_width=64, avg_down=False,
rectified_conv=False, rectify_avg=False,
avd=False, avd_first=False,
final_drop=0.0, dropblock_prob=0,
last_gamma=False, norm_layer=nn.BatchNorm2d):
self.cardinality = groups
self.bottleneck_width = bottleneck_width
# ResNet-D params
self.inplanes = stem_width*2 if deep_stem else 64
self.avg_down = avg_down
self.last_gamma = last_gamma
# ResNeSt params
self.radix = radix
self.avd = avd
self.avd_first = avd_first
super(ResNet, self).__init__()
self.rectified_conv = rectified_conv
self.rectify_avg = rectify_avg
if rectified_conv:
from rfconv import RFConv2d
conv_layer = RFConv2d
else:
conv_layer = nn.Conv2d
conv_kwargs = {'average_mode': rectify_avg} if rectified_conv else {}
if deep_stem:
self.conv1 = nn.Sequential(
conv_layer(3, stem_width, kernel_size=3, stride=2, padding=1, bias=False, **conv_kwargs),
norm_layer(stem_width),
nn.ReLU(inplace=True),
conv_layer(stem_width, stem_width, kernel_size=3, stride=1, padding=1, bias=False, **conv_kwargs),
norm_layer(stem_width),
nn.ReLU(inplace=True),
conv_layer(stem_width, stem_width*2, kernel_size=3, stride=1, padding=1, bias=False, **conv_kwargs),
)
else:
self.conv1 = conv_layer(3, 64, kernel_size=7, stride=2, padding=3,
bias=False, **conv_kwargs)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], norm_layer=norm_layer, is_first=False)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, norm_layer=norm_layer)
if dilated or dilation == 4:
self.layer3 = self._make_layer(block, 256, layers[2], stride=1,
dilation=2, norm_layer=norm_layer,
dropblock_prob=dropblock_prob)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1,
dilation=4, norm_layer=norm_layer,
dropblock_prob=dropblock_prob)
elif dilation==2:
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilation=1, norm_layer=norm_layer,
dropblock_prob=dropblock_prob)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1,
dilation=2, norm_layer=norm_layer,
dropblock_prob=dropblock_prob)
else:
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
norm_layer=norm_layer,
dropblock_prob=dropblock_prob)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
norm_layer=norm_layer,
dropblock_prob=dropblock_prob)
self.avgpool = GlobalAvgPool2d()
self.drop = nn.Dropout(final_drop) if final_drop > 0.0 else None
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, norm_layer):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, norm_layer=None,
dropblock_prob=0.0, is_first=True):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
down_layers = []
if self.avg_down:
if dilation == 1:
down_layers.append(nn.AvgPool2d(kernel_size=stride, stride=stride,
ceil_mode=True, count_include_pad=False))
else:
down_layers.append(nn.AvgPool2d(kernel_size=1, stride=1,
ceil_mode=True, count_include_pad=False))
down_layers.append(nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=1, bias=False))
else:
down_layers.append(nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False))
down_layers.append(norm_layer(planes * block.expansion))
downsample = nn.Sequential(*down_layers)
layers = []
if dilation == 1 or dilation == 2:
layers.append(block(self.inplanes, planes, stride, downsample=downsample,
radix=self.radix, cardinality=self.cardinality,
bottleneck_width=self.bottleneck_width,
avd=self.avd, avd_first=self.avd_first,
dilation=1, is_first=is_first, rectified_conv=self.rectified_conv,
rectify_avg=self.rectify_avg,
norm_layer=norm_layer, dropblock_prob=dropblock_prob,
last_gamma=self.last_gamma))
elif dilation == 4:
layers.append(block(self.inplanes, planes, stride, downsample=downsample,
radix=self.radix, cardinality=self.cardinality,
bottleneck_width=self.bottleneck_width,
avd=self.avd, avd_first=self.avd_first,
dilation=2, is_first=is_first, rectified_conv=self.rectified_conv,
rectify_avg=self.rectify_avg,
norm_layer=norm_layer, dropblock_prob=dropblock_prob,
last_gamma=self.last_gamma))
else:
raise RuntimeError("=> unknown dilation size: {}".format(dilation))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes,
radix=self.radix, cardinality=self.cardinality,
bottleneck_width=self.bottleneck_width,
avd=self.avd, avd_first=self.avd_first,
dilation=dilation, rectified_conv=self.rectified_conv,
rectify_avg=self.rectify_avg,
norm_layer=norm_layer, dropblock_prob=dropblock_prob,
last_gamma=self.last_gamma))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
# print(x.size())
x = self.avgpool(x)
#x = x.view(x.size(0), -1)
x = torch.flatten(x, 1)
if self.drop:
x = self.drop(x)
x = self.fc(x)
return x
| 13,241 | 41.854369 | 162 | py |
SemFormer | SemFormer-main/core/arch_resnest/resnest.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Hang Zhang
## Email: zhanghang0704@gmail.com
## Copyright (c) 2020
##
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
"""ResNeSt models"""
import torch
from .resnet import ResNet, Bottleneck
__all__ = ['resnest50', 'resnest101', 'resnest200', 'resnest269']
_url_format = 'https://github.com/zhanghang1989/ResNeSt/releases/download/weights_step1/{}-{}.pth'
_model_sha256 = {name: checksum for checksum, name in [
('528c19ca', 'resnest50'),
('22405ba7', 'resnest101'),
('75117900', 'resnest200'),
('0cc87c48', 'resnest269'),
]}
def short_hash(name):
if name not in _model_sha256:
raise ValueError('Pretrained model for {name} is not available.'.format(name=name))
return _model_sha256[name][:8]
resnest_model_urls = {name: _url_format.format(name, short_hash(name)) for
name in _model_sha256.keys()
}
def resnest50(pretrained=False, root='~/.encoding/models', **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3],
radix=2, groups=1, bottleneck_width=64,
deep_stem=True, stem_width=32, avg_down=True,
avd=True, avd_first=False, **kwargs)
if pretrained:
model.load_state_dict(torch.hub.load_state_dict_from_url(
resnest_model_urls['resnest50'], progress=True, check_hash=True))
return model
def resnest101(pretrained=False, root='~/.encoding/models', **kwargs):
model = ResNet(Bottleneck, [3, 4, 23, 3],
radix=2, groups=1, bottleneck_width=64,
deep_stem=True, stem_width=64, avg_down=True,
avd=True, avd_first=False, **kwargs)
if pretrained:
model.load_state_dict(torch.hub.load_state_dict_from_url(
resnest_model_urls['resnest101'], progress=True, check_hash=True))
return model
def resnest200(pretrained=False, root='~/.encoding/models', **kwargs):
model = ResNet(Bottleneck, [3, 24, 36, 3],
radix=2, groups=1, bottleneck_width=64,
deep_stem=True, stem_width=64, avg_down=True,
avd=True, avd_first=False, **kwargs)
if pretrained:
model.load_state_dict(torch.hub.load_state_dict_from_url(
resnest_model_urls['resnest200'], progress=True, check_hash=True))
return model
def resnest269(pretrained=False, root='~/.encoding/models', **kwargs):
model = ResNet(Bottleneck, [3, 30, 48, 8],
radix=2, groups=1, bottleneck_width=64,
deep_stem=True, stem_width=64, avg_down=True,
avd=True, avd_first=False, **kwargs)
if pretrained:
model.load_state_dict(torch.hub.load_state_dict_from_url(
resnest_model_urls['resnest269'], progress=True, check_hash=True))
return model
| 2,938 | 39.819444 | 98 | py |
SemFormer | SemFormer-main/core/arch_resnest/splat.py | """Split-Attention"""
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import Conv2d, Module, Linear, BatchNorm2d, ReLU
from torch.nn.modules.utils import _pair
__all__ = ['SplAtConv2d']
class SplAtConv2d(Module):
"""Split-Attention Conv2d
"""
def __init__(self, in_channels, channels, kernel_size, stride=(1, 1), padding=(0, 0),
dilation=(1, 1), groups=1, bias=True,
radix=2, reduction_factor=4,
rectify=False, rectify_avg=False, norm_layer=None,
dropblock_prob=0.0, **kwargs):
super(SplAtConv2d, self).__init__()
padding = _pair(padding)
self.rectify = rectify and (padding[0] > 0 or padding[1] > 0)
self.rectify_avg = rectify_avg
inter_channels = max(in_channels*radix//reduction_factor, 32)
self.radix = radix
self.cardinality = groups
self.channels = channels
self.dropblock_prob = dropblock_prob
if self.rectify:
from rfconv import RFConv2d
self.conv = RFConv2d(in_channels, channels*radix, kernel_size, stride, padding, dilation,
groups=groups*radix, bias=bias, average_mode=rectify_avg, **kwargs)
else:
self.conv = Conv2d(in_channels, channels*radix, kernel_size, stride, padding, dilation,
groups=groups*radix, bias=bias, **kwargs)
self.use_bn = norm_layer is not None
if self.use_bn:
self.bn0 = norm_layer(channels*radix)
self.relu = ReLU(inplace=True)
self.fc1 = Conv2d(channels, inter_channels, 1, groups=self.cardinality)
if self.use_bn:
self.bn1 = norm_layer(inter_channels)
self.fc2 = Conv2d(inter_channels, channels*radix, 1, groups=self.cardinality)
if dropblock_prob > 0.0:
self.dropblock = DropBlock2D(dropblock_prob, 3)
self.rsoftmax = rSoftMax(radix, groups)
def forward(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn0(x)
if self.dropblock_prob > 0.0:
x = self.dropblock(x)
x = self.relu(x)
batch, rchannel = x.shape[:2]
if self.radix > 1:
if torch.__version__ < '1.5':
splited = torch.split(x, int(rchannel//self.radix), dim=1)
else:
splited = torch.split(x, rchannel//self.radix, dim=1)
gap = sum(splited)
else:
gap = x
gap = F.adaptive_avg_pool2d(gap, 1)
gap = self.fc1(gap)
if self.use_bn:
gap = self.bn1(gap)
gap = self.relu(gap)
atten = self.fc2(gap)
atten = self.rsoftmax(atten).view(batch, -1, 1, 1)
if self.radix > 1:
if torch.__version__ < '1.5':
attens = torch.split(atten, int(rchannel//self.radix), dim=1)
else:
attens = torch.split(atten, rchannel//self.radix, dim=1)
out = sum([att*split for (att, split) in zip(attens, splited)])
else:
out = atten * x
return out.contiguous()
class rSoftMax(nn.Module):
def __init__(self, radix, cardinality):
super().__init__()
self.radix = radix
self.cardinality = cardinality
def forward(self, x):
batch = x.size(0)
if self.radix > 1:
x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2)
x = F.softmax(x, dim=1)
x = x.reshape(batch, -1)
else:
x = torch.sigmoid(x)
return x
| 3,620 | 35.21 | 101 | py |
SemFormer | SemFormer-main/core/arch_transformer/vit.py | """ Vision Transformer (ViT) in PyTorch
A PyTorch implement of Vision Transformers as described in:
'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale'
- https://arxiv.org/abs/2010.11929
`How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers`
- https://arxiv.org/abs/2106.10270
The official jax code is released and available at https://github.com/google-research/vision_transformer
DeiT model defs and weights from https://github.com/facebookresearch/deit,
paper `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877
Acknowledgments:
* The paper authors for releasing code and weights, thanks!
* I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch ... check it out
for some einops/einsum fun
* Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT
* Bert reference code checks against Huggingface Transformers and Tensorflow Bert
Hacked together by / Copyright 2021 Ross Wightman
"""
import math
import logging
from functools import partial
from collections import OrderedDict
from copy import deepcopy
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.models.helpers import build_model_with_cfg, named_apply, adapt_input_conv
# from timm.models.layers import PatchEmbed, Mlp, DropPath, trunc_normal_, lecun_normal_
from timm.models.layers import Mlp, DropPath, trunc_normal_, lecun_normal_
from timm.models.registry import register_model
from .layers import PatchEmbed
_logger = logging.getLogger(__name__)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = {
# patch models (weights from official Google JAX impl)
'vit_tiny_patch16_224': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/'
'Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz'),
'vit_tiny_patch16_384': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/'
'Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz',
input_size=(3, 384, 384), crop_pct=1.0),
'vit_small_patch32_224': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/'
'S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz'),
'vit_small_patch32_384': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/'
'S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz',
input_size=(3, 384, 384), crop_pct=1.0),
'vit_small_patch16_224': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/'
'S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz'),
'vit_small_patch16_384': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/'
'S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz',
input_size=(3, 384, 384), crop_pct=1.0),
'vit_base_patch32_224': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/'
'B_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz'),
'vit_base_patch32_384': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/'
'B_32-i21k-300ep-lr_0.001-aug_light1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz',
input_size=(3, 384, 384), crop_pct=1.0),
'vit_base_patch16_224': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/'
'B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_224.npz'),
'vit_base_patch16_384': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/'
'B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz',
input_size=(3, 384, 384), crop_pct=1.0),
'vit_base_patch8_224': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/'
'B_8-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_224.npz'),
'vit_large_patch32_224': _cfg(
url='', # no official model weights for this combo, only for in21k
),
'vit_large_patch32_384': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p32_384-9b920ba8.pth',
input_size=(3, 384, 384), crop_pct=1.0),
'vit_large_patch16_224': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/'
'L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz'),
'vit_large_patch16_384': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/'
'L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.npz',
input_size=(3, 384, 384), crop_pct=1.0),
# patch models, imagenet21k (weights from official Google JAX impl)
'vit_tiny_patch16_224_in21k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0.npz',
num_classes=21843),
'vit_small_patch32_224_in21k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0.npz',
num_classes=21843),
'vit_small_patch16_224_in21k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0.npz',
num_classes=21843),
'vit_base_patch32_224_in21k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/B_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.03-do_0.0-sd_0.0.npz',
num_classes=21843),
'vit_base_patch16_224_in21k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0.npz',
num_classes=21843),
'vit_base_patch8_224_in21k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/B_8-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0.npz',
num_classes=21843),
'vit_large_patch32_224_in21k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch32_224_in21k-9046d2e7.pth',
num_classes=21843),
'vit_large_patch16_224_in21k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1.npz',
num_classes=21843),
'vit_huge_patch14_224_in21k': _cfg(
url='https://storage.googleapis.com/vit_models/imagenet21k/ViT-H_14.npz',
hf_hub='timm/vit_huge_patch14_224_in21k',
num_classes=21843),
# SAM trained models (https://arxiv.org/abs/2106.01548)
'vit_base_patch32_sam_224': _cfg(
url='https://storage.googleapis.com/vit_models/sam/ViT-B_32.npz'),
'vit_base_patch16_sam_224': _cfg(
url='https://storage.googleapis.com/vit_models/sam/ViT-B_16.npz'),
# deit models (FB weights)
'deit_tiny_patch16_224': _cfg(
url='https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
'deit_small_patch16_224': _cfg(
url='https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
'deit_base_patch16_224': _cfg(
url='https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
'deit_base_patch16_384': _cfg(
url='https://dl.fbaipublicfiles.com/deit/deit_base_patch16_384-8de9b5d1.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, input_size=(3, 384, 384), crop_pct=1.0),
'deit_tiny_distilled_patch16_224': _cfg(
url='https://dl.fbaipublicfiles.com/deit/deit_tiny_distilled_patch16_224-b40b3cf7.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, classifier=('head', 'head_dist')),
'deit_small_distilled_patch16_224': _cfg(
url='https://dl.fbaipublicfiles.com/deit/deit_small_distilled_patch16_224-649709d9.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, classifier=('head', 'head_dist')),
'deit_base_distilled_patch16_224': _cfg(
url='https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_224-df68dfff.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, classifier=('head', 'head_dist')),
'deit_base_distilled_patch16_384': _cfg(
url='https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_384-d0272ac0.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, input_size=(3, 384, 384), crop_pct=1.0,
classifier=('head', 'head_dist')),
# ViT ImageNet-21K-P pretraining by MILL
'vit_base_patch16_224_miil_in21k': _cfg(
url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/vit_base_patch16_224_in21k_miil.pth',
mean=(0, 0, 0), std=(1, 1, 1), crop_pct=0.875, interpolation='bilinear', num_classes=11221,
),
'vit_base_patch16_224_miil': _cfg(
url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm'
'/vit_base_patch16_224_1k_miil_84_4.pth',
mean=(0, 0, 0), std=(1, 1, 1), crop_pct=0.875, interpolation='bilinear',
),
}
# class Attention(nn.Module):
# def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):
# super().__init__()
# self.num_heads = num_heads
# head_dim = dim // num_heads
# self.scale = head_dim ** -0.5
# self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
# self.attn_drop = nn.Dropout(attn_drop)
# self.proj = nn.Linear(dim, dim)
# self.proj_drop = nn.Dropout(proj_drop)
# def forward(self, x):
# B, N, C = x.shape
# qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
# q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
# attn = (q @ k.transpose(-2, -1)) * self.scale
# attn = attn.softmax(dim=-1)
# attn = self.attn_drop(attn)
# x = (attn @ v).transpose(1, 2).reshape(B, N, C)
# x = self.proj(x)
# x = self.proj_drop(x)
# return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, return_attn=False):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
# attn: (B, H, N, N)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
ret_attn = attn
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
if return_attn:
return x, ret_attn
else:
return x
# class Block(nn.Module):
# def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,
# drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
# super().__init__()
# self.norm1 = norm_layer(dim)
# self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)
# # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
# self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
# self.norm2 = norm_layer(dim)
# mlp_hidden_dim = int(dim * mlp_ratio)
# self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
# def forward(self, x):
# x = x + self.drop_path(self.attn(self.norm1(x)))
# x = x + self.drop_path(self.mlp(self.norm2(x)))
# return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, return_attn=False):
attn_result = self.attn(self.norm1(x), return_attn=return_attn)
if return_attn:
tmp, attn = attn_result
else:
tmp = attn_result
x = x + self.drop_path(tmp)
x = x + self.drop_path(self.mlp(self.norm2(x)))
if return_attn:
return x, attn
else:
return x
VIT_NET_CFG = {
'tiny': {
'embed_dim': 192,
'depth': 12,
'num_heads': 3
},
'small': {
'embed_dim': 384,
'depth': 12,
'num_heads': 6
},
'base': {
'embed_dim': 768,
'depth': 12,
'num_heads': 12
},
'large': {
'embed_dim': 1024,
'depth': 24,
'num_heads': 16
},
'huge': {
'embed_dim': 1280,
'depth': 32,
'num_heads': 16
}
}
class VisionTransformer(nn.Module):
""" Vision Transformer
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale`
- https://arxiv.org/abs/2010.11929
Includes distillation token & head support for `DeiT: Data-efficient Image Transformers`
- https://arxiv.org/abs/2012.12877
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=True, representation_size=None, distilled=False,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None,
act_layer=None, weight_init=''):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
distilled (bool): model includes a distillation token and head as in DeiT models
drop_rate (float): dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
embed_layer (nn.Module): patch embedding layer
norm_layer: (nn.Module): normalization layer
weight_init: (str): weight init scheme
"""
super().__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.num_tokens = 2 if distilled else 1
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
self.norm_layer = norm_layer
act_layer = act_layer or nn.GELU
self.act_layer = act_layer
self.patch_size = patch_size
self.patch_embed = embed_layer(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
self.embed_dim = embed_dim
self.depth = depth
self.num_heads = num_heads
self.mlp_ratio = mlp_ratio
self.qkv_bias = qkv_bias
self.drop_rate = drop_rate
self.attn_drop_rate = attn_drop_rate
self.drop_path_rate = drop_path_rate
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.dpr = dpr
# self.blocks = nn.Sequential(*[
# Block(
# dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate,
# attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer)
# for i in range(depth)])
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate,
attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Representation layer
if representation_size and not distilled:
self.num_features = representation_size
self.pre_logits = nn.Sequential(OrderedDict([
('fc', nn.Linear(embed_dim, representation_size)),
('act', nn.Tanh())
]))
else:
self.pre_logits = nn.Identity()
# Classifier head(s)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
self.head_dist = None
if distilled:
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
self.init_weights(weight_init)
def init_weights(self, mode=''):
assert mode in ('jax', 'jax_nlhb', 'nlhb', '')
head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0.
trunc_normal_(self.pos_embed, std=.02)
if self.dist_token is not None:
trunc_normal_(self.dist_token, std=.02)
if mode.startswith('jax'):
# leave cls token as zeros to match jax impl
named_apply(partial(_init_vit_weights, head_bias=head_bias, jax_impl=True), self)
else:
trunc_normal_(self.cls_token, std=.02)
self.apply(_init_vit_weights)
def _init_weights(self, m):
# this fn left here for compat with downstream users
_init_vit_weights(m)
@torch.jit.ignore()
def load_pretrained(self, checkpoint_path, prefix=''):
_load_weights(self, checkpoint_path, prefix)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token', 'dist_token'}
def get_classifier(self):
if self.dist_token is None:
return self.head
else:
return self.head, self.head_dist
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
if self.num_tokens == 2:
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.patch_embed(x)
cls_token = self.cls_token.expand(x.shape[0], -1, -1) # stole cls_tokens impl from Phil Wang, thanks
if self.dist_token is None:
x = torch.cat((cls_token, x), dim=1)
else:
x = torch.cat((cls_token, self.dist_token.expand(x.shape[0], -1, -1), x), dim=1)
x = self.pos_drop(x + self.pos_embed)
x = self.blocks(x)
x = self.norm(x)
if self.dist_token is None:
return self.pre_logits(x[:, 0])
else:
return x[:, 0], x[:, 1]
def forward(self, x):
x = self.forward_features(x)
if self.head_dist is not None:
x, x_dist = self.head(x[0]), self.head_dist(x[1]) # x must be a tuple
if self.training and not torch.jit.is_scripting():
# during inference, return the average of both classifier predictions
return x, x_dist
else:
return (x + x_dist) / 2
else:
x = self.head(x)
return x
def _init_vit_weights(module: nn.Module, name: str = '', head_bias: float = 0., jax_impl: bool = False):
""" ViT weight initialization
* When called without n, head_bias, jax_impl args it will behave exactly the same
as my original init for compatibility with prev hparam / downstream use cases (ie DeiT).
* When called w/ valid n (module name) and jax_impl=True, will (hopefully) match JAX impl
"""
if isinstance(module, nn.Linear):
if name.startswith('head'):
nn.init.zeros_(module.weight)
nn.init.constant_(module.bias, head_bias)
elif name.startswith('pre_logits'):
lecun_normal_(module.weight)
nn.init.zeros_(module.bias)
else:
if jax_impl:
nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
if 'mlp' in name:
nn.init.normal_(module.bias, std=1e-6)
else:
nn.init.zeros_(module.bias)
else:
trunc_normal_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif jax_impl and isinstance(module, nn.Conv2d):
# NOTE conv was left to pytorch default in my original init
lecun_normal_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, (nn.LayerNorm, nn.GroupNorm, nn.BatchNorm2d)):
nn.init.zeros_(module.bias)
nn.init.ones_(module.weight)
@torch.no_grad()
def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ''):
""" Load weights from .npz checkpoints for official Google Brain Flax implementation
"""
import numpy as np
def _n2p(w, t=True):
if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1:
w = w.flatten()
if t:
if w.ndim == 4:
w = w.transpose([3, 2, 0, 1])
elif w.ndim == 3:
w = w.transpose([2, 0, 1])
elif w.ndim == 2:
w = w.transpose([1, 0])
return torch.from_numpy(w)
w = np.load(checkpoint_path)
if not prefix and 'opt/target/embedding/kernel' in w:
prefix = 'opt/target/'
if hasattr(model.patch_embed, 'backbone'):
# hybrid
backbone = model.patch_embed.backbone
stem_only = not hasattr(backbone, 'stem')
stem = backbone if stem_only else backbone.stem
stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel'])))
stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale']))
stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias']))
if not stem_only:
for i, stage in enumerate(backbone.stages):
for j, block in enumerate(stage.blocks):
bp = f'{prefix}block{i + 1}/unit{j + 1}/'
for r in range(3):
getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel']))
getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale']))
getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias']))
if block.downsample is not None:
block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel']))
block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale']))
block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias']))
embed_conv_w = _n2p(w[f'{prefix}embedding/kernel'])
else:
embed_conv_w = adapt_input_conv(
model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel']))
model.patch_embed.proj.weight.copy_(embed_conv_w)
model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias']))
model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False))
pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False)
if pos_embed_w.shape != model.pos_embed.shape:
pos_embed_w = resize_pos_embed( # resize pos embedding when different size from pretrained weights
pos_embed_w, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size)
model.pos_embed.copy_(pos_embed_w)
model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale']))
model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias']))
if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]:
model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel']))
model.head.bias.copy_(_n2p(w[f'{prefix}head/bias']))
if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w:
model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel']))
model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias']))
for i, block in enumerate(model.blocks.children()):
block_prefix = f'{prefix}Transformer/encoderblock_{i}/'
mha_prefix = block_prefix + 'MultiHeadDotProductAttention_1/'
block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale']))
block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias']))
block.attn.qkv.weight.copy_(torch.cat([
_n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('query', 'key', 'value')]))
block.attn.qkv.bias.copy_(torch.cat([
_n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('query', 'key', 'value')]))
block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1))
block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias']))
for r in range(2):
getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/kernel']))
getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/bias']))
block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/scale']))
block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/bias']))
def resize_pos_embed(posemb, posemb_new, num_tokens=1, gs_new=()):
# Rescale the grid of position embeddings when loading from state_dict. Adapted from
# https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224
_logger.info('Resized position embedding: %s to %s', posemb.shape, posemb_new.shape)
ntok_new = posemb_new.shape[1]
if num_tokens:
posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, num_tokens:]
ntok_new -= num_tokens
else:
posemb_tok, posemb_grid = posemb[:, :0], posemb[0]
gs_old = int(math.sqrt(len(posemb_grid)))
if not len(gs_new): # backwards compatibility
gs_new = [int(math.sqrt(ntok_new))] * 2
assert len(gs_new) >= 2
_logger.info('Position embedding grid-size from %s to %s', [gs_old, gs_old], gs_new)
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
posemb_grid = F.interpolate(posemb_grid, size=gs_new, mode='bicubic', align_corners=False)
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new[0] * gs_new[1], -1)
posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
return posemb
def checkpoint_filter_fn(state_dict, model):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
out_dict = {}
if 'model' in state_dict:
# For deit models
state_dict = state_dict['model']
for k, v in state_dict.items():
if 'patch_embed.proj.weight' in k and len(v.shape) < 4:
# For old models that I trained prior to conv based patchification
O, I, H, W = model.patch_embed.proj.weight.shape
v = v.reshape(O, -1, H, W)
elif k == 'pos_embed' and v.shape != model.pos_embed.shape:
# To resize pos embedding when using model at different size from pretrained weights
v = resize_pos_embed(
v, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size)
out_dict[k] = v
return out_dict
def _create_vision_transformer(variant, pretrained=False, default_cfg=None, **kwargs):
default_cfg = default_cfg or default_cfgs[variant]
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
# NOTE this extra code to support handling of repr size for in21k pretrained models
default_num_classes = default_cfg['num_classes']
num_classes = kwargs.get('num_classes', default_num_classes)
repr_size = kwargs.pop('representation_size', None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
model = build_model_with_cfg(
VisionTransformer, variant, pretrained,
default_cfg=default_cfg,
representation_size=repr_size,
pretrained_filter_fn=checkpoint_filter_fn,
pretrained_custom_load='npz' in default_cfg['url'],
**kwargs)
return model
@register_model
def vit_tiny_patch16_224(pretrained=False, **kwargs):
""" ViT-Tiny (Vit-Ti/16)
"""
model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs)
model = _create_vision_transformer('vit_tiny_patch16_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_tiny_patch16_384(pretrained=False, **kwargs):
""" ViT-Tiny (Vit-Ti/16) @ 384x384.
"""
model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs)
model = _create_vision_transformer('vit_tiny_patch16_384', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_small_patch32_224(pretrained=False, **kwargs):
""" ViT-Small (ViT-S/32)
"""
model_kwargs = dict(patch_size=32, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = _create_vision_transformer('vit_small_patch32_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_small_patch32_384(pretrained=False, **kwargs):
""" ViT-Small (ViT-S/32) at 384x384.
"""
model_kwargs = dict(patch_size=32, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = _create_vision_transformer('vit_small_patch32_384', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_small_patch16_224(pretrained=False, **kwargs):
""" ViT-Small (ViT-S/16)
NOTE I've replaced my previous 'small' model definition and weights with the small variant from the DeiT paper
"""
model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = _create_vision_transformer('vit_small_patch16_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_small_patch16_384(pretrained=False, **kwargs):
""" ViT-Small (ViT-S/16)
NOTE I've replaced my previous 'small' model definition and weights with the small variant from the DeiT paper
"""
model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = _create_vision_transformer('vit_small_patch16_384', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base_patch32_224(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer('vit_base_patch32_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base_patch32_384(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer('vit_base_patch32_384', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base_patch16_224(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer('vit_base_patch16_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base_patch16_384(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer('vit_base_patch16_384', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base_patch8_224(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/8) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=8, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer('vit_base_patch8_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_large_patch32_224(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights.
"""
model_kwargs = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer('vit_large_patch32_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_large_patch32_384(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer('vit_large_patch32_384', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_large_patch16_224(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer('vit_large_patch16_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_large_patch16_384(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer('vit_large_patch16_384', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base_patch16_sam_224(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/16) w/ SAM pretrained weights. Paper: https://arxiv.org/abs/2106.01548
"""
# NOTE original SAM weights release worked with representation_size=768
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, representation_size=0, **kwargs)
model = _create_vision_transformer('vit_base_patch16_sam_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base_patch32_sam_224(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/32) w/ SAM pretrained weights. Paper: https://arxiv.org/abs/2106.01548
"""
# NOTE original SAM weights release worked with representation_size=768
model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, representation_size=0, **kwargs)
model = _create_vision_transformer('vit_base_patch32_sam_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_tiny_patch16_224_in21k(pretrained=False, **kwargs):
""" ViT-Tiny (Vit-Ti/16).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer
"""
model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs)
model = _create_vision_transformer('vit_tiny_patch16_224_in21k', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_small_patch32_224_in21k(pretrained=False, **kwargs):
""" ViT-Small (ViT-S/16)
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer
"""
model_kwargs = dict(patch_size=32, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = _create_vision_transformer('vit_small_patch32_224_in21k', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_small_patch16_224_in21k(pretrained=False, **kwargs):
""" ViT-Small (ViT-S/16)
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer
"""
model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = _create_vision_transformer('vit_small_patch16_224_in21k', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base_patch32_224_in21k(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer
"""
model_kwargs = dict(
patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer('vit_base_patch32_224_in21k', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base_patch16_224_in21k(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer
"""
model_kwargs = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer('vit_base_patch16_224_in21k', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base_patch8_224_in21k(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/8) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer
"""
model_kwargs = dict(
patch_size=8, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer('vit_base_patch8_224_in21k', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_large_patch32_224_in21k(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
NOTE: this model has a representation layer but the 21k classifier head is zero'd out in original weights
"""
model_kwargs = dict(
patch_size=32, embed_dim=1024, depth=24, num_heads=16, representation_size=1024, **kwargs)
model = _create_vision_transformer('vit_large_patch32_224_in21k', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_large_patch16_224_in21k(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer
"""
model_kwargs = dict(
patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer('vit_large_patch16_224_in21k', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_huge_patch14_224_in21k(pretrained=False, **kwargs):
""" ViT-Huge model (ViT-H/14) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
NOTE: this model has a representation layer but the 21k classifier head is zero'd out in original weights
"""
model_kwargs = dict(
patch_size=14, embed_dim=1280, depth=32, num_heads=16, representation_size=1280, **kwargs)
model = _create_vision_transformer('vit_huge_patch14_224_in21k', pretrained=pretrained, **model_kwargs)
return model
@register_model
def deit_tiny_patch16_224(pretrained=False, **kwargs):
""" DeiT-tiny model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs)
model = _create_vision_transformer('deit_tiny_patch16_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def deit_small_patch16_224(pretrained=False, **kwargs):
""" DeiT-small model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = _create_vision_transformer('deit_small_patch16_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def deit_base_patch16_224(pretrained=False, **kwargs):
""" DeiT base model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer('deit_base_patch16_224', pretrained=pretrained, **model_kwargs)
return model
@register_model
def deit_base_patch16_384(pretrained=False, **kwargs):
""" DeiT base model @ 384x384 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer('deit_base_patch16_384', pretrained=pretrained, **model_kwargs)
return model
@register_model
def deit_tiny_distilled_patch16_224(pretrained=False, **kwargs):
""" DeiT-tiny distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs)
model = _create_vision_transformer(
'deit_tiny_distilled_patch16_224', pretrained=pretrained, distilled=True, **model_kwargs)
return model
@register_model
def deit_small_distilled_patch16_224(pretrained=False, **kwargs):
""" DeiT-small distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = _create_vision_transformer(
'deit_small_distilled_patch16_224', pretrained=pretrained, distilled=True, **model_kwargs)
return model
@register_model
def deit_base_distilled_patch16_224(pretrained=False, **kwargs):
""" DeiT-base distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
'deit_base_distilled_patch16_224', pretrained=pretrained, distilled=True, **model_kwargs)
return model
@register_model
def deit_base_distilled_patch16_384(pretrained=False, **kwargs):
""" DeiT-base distilled model @ 384x384 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
'deit_base_distilled_patch16_384', pretrained=pretrained, distilled=True, **model_kwargs)
return model
@register_model
def vit_base_patch16_224_miil_in21k(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, **kwargs)
model = _create_vision_transformer('vit_base_patch16_224_miil_in21k', pretrained=pretrained, **model_kwargs)
return model
@register_model
def vit_base_patch16_224_miil(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, **kwargs)
model = _create_vision_transformer('vit_base_patch16_224_miil', pretrained=pretrained, **model_kwargs)
return model
| 48,791 | 46.187621 | 140 | py |
SemFormer | SemFormer-main/core/arch_transformer/layers.py | """ Image to Patch Embedding using Conv2d
A convolution based approach to patchifying a 2D image w/ embedding projection.
Based on the impl in https://github.com/google-research/vision_transformer
Hacked together by / Copyright 2020 Ross Wightman
"""
from torch import nn as nn
import timm
from timm.models.layers.helpers import to_2tuple
from timm.models.layers.trace_utils import _assert
class PatchEmbed(nn.Module):
""" 2D Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.flatten = flatten
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
def forward(self, x):
B, C, H, W = x.shape
# _assert(H == self.img_size[0], f"Input image height ({H}) doesn't match model ({self.img_size[0]}).")
# _assert(W == self.img_size[1], f"Input image width ({W}) doesn't match model ({self.img_size[1]}).")
x = self.proj(x)
if self.flatten:
x = x.flatten(2).transpose(1, 2) # BCHW -> BNC
x = self.norm(x)
return x
| 1,542 | 36.634146 | 111 | py |
SemFormer | SemFormer-main/core/functional/math.py | import torch
import torch.nn.functional as F
import math
from .utils import nanmean, nansum
def scale_thresed_sigmoid(x, scale=1.0, thres=0.0):
return (scale * (x - thres)).sigmoid()
def scale_2sigmoid(x, scale=1.0):
return 2. * (scale * x).sigmoid() - 1.
def fast_softmax(x, dim, eps=1e-12):
x = F.relu(x, inplace=True)
x = x / x.sum(dim=dim).clamp(min=eps)
return x
def info_entropy(x, dim=-1, eps=1e-12):
x = x.clamp(min=eps)
ie = -(x * x.log()).sum(dim=dim)
return ie.mean()
def kl_divergence(x, y, dim=-1, eps=1e-12):
assert x.dim() == y.dim(), 'input dim must be the same'
x = x.clamp(min=eps)
y = y.clamp(min=eps)
kl_div = (x * (x.log() - y.log())).sum(dim=dim)
return kl_div.mean()
def js_divergence(x, y, dim=-1, eps=1e-12):
assert x.dim() == y.dim(), 'input dim must be the same'
def _kl_divergence(x, y):
return (x * (x.log() - y.log())).sum(dim=dim)
x = x.clamp(min=eps)
y = y.clamp(min=eps)
mean_xy = (x + y) / 2.
kl_x_mean = _kl_divergence(x, mean_xy)
kl_y_mean = _kl_divergence(y, mean_xy)
js_div = (kl_x_mean + kl_y_mean) / 2.
return js_div.mean()
def dot_product(x, y, is_aligned=False):
assert x.dim() == y.dim(), 'input dimension must be the same'
if is_aligned:
out = torch.matmul(x[..., None, :], y[..., :, None])[..., 0, 0]
else:
out = torch.matmul(x, y.transpose(-2, -1))
return out
def jsd_mutual_information(joint_samples, marginal_samples, T=dot_product,
joint_aligned=True, marginal_aligned=False,
joint_mask=None, marginal_mask=None):
"""implementation of JSD-based MI estimator:
I(X, Y) >= I_{JSD}(X, Y) = E_{p(x, y)}(-softplus(-T(x, y))) - E_{p(x)p(y)}(softplus(T(x, y)))
Args:
joint_samples: (x1, y1)
x1: (N, C)
y1: (N, C)
marginal_samples: (x2, y2)
x2: (N1, C)
y2: (N2, C)
"""
assert isinstance(joint_samples, (list, tuple)) and len(joint_samples) == 2
assert isinstance(marginal_samples, (list, tuple)) and len(marginal_samples) == 2
x1, y1 = joint_samples
x2, y2 = marginal_samples
pxy = -F.softplus(-T(x1, y1, is_aligned=joint_aligned))
if joint_mask is not None:
pxy = pxy[joint_mask]
px_py = F.softplus(T(x2, y2, is_aligned=marginal_aligned))
if marginal_mask is not None:
px_py = px_py[marginal_mask]
# jsd_mi = pxy.mean() - px_py.mean()
jsd_mi = nanmean(pxy) - nanmean(px_py)
return jsd_mi
def fast_cosine_similarity(query, key, eps=1e-8):
input_dim = query.dim()
assert query.dim() == key.dim(), 'input dims must be the same'
if input_dim == 4:
B, C, Hx, Wx = query.shape
query = query.view(B, C, -1)
query = F.normalize(query, p=2, dim=1, eps=eps).transpose(1, 2)
key = key.view(B, C, -1)
key = F.normalize(key, p=2, dim=1, eps=eps)
similarity = torch.matmul(query, key)
return similarity
elif input_dim == 2:
N, C = query.shape
query = F.normalize(query, p=2, dim=1, eps=eps)
key = F.normalize(key, p=2, dim=1, eps=eps).transpose(0, 1)
similarity = torch.matmul(query, key)
return similarity
else:
raise ValueError('only 2D or 4D inputs is supported')
def cosine_similarity(x, y, dim=-1, is_aligned=False, normalize=True, eps=1e-8):
# assert x.shape == y.shape, 'input must be with the same shape'
if not is_aligned:
assert x.shape[:-2] == y.shape[:-2], 'input must be with the same shape when not aligned'
if normalize:
x = F.normalize(x, p=2, dim=dim, eps=eps)
y = F.normalize(y, p=2, dim=dim, eps=eps)
if is_aligned: # cos_sim: (*, N)
cos_sim = (x * y).sum(dim=dim)
# dim == -2:
# x: (*, E, N)
# y: (*, E, N)
# or dim == -1:
# x: (*, N, E)
# y: (*, N, E)
else: # cos_sim: (*, N, N)
candidate_dims = [-2, -1, x.dim() - 2, x.dim() - 1]
assert dim in candidate_dims, 'when is_aligned=False, dim must be one of {}, but got {}'.format(candidate_dims, dim)
if dim in candidate_dims[0::2]:
x_t = x.transpose(dim, dim + 1) # (*, N, E)
cos_sim = torch.matmul(x_t, y) # (*, N, N)
else:
y_t = y.transpose(dim - 1, dim) # (*, E, N)
cos_sim = torch.matmul(x, y_t) # (*, N, N)
cos_sim = cos_sim.clamp(min=-1. + 1e-4, max=1. - 1e-4)
return cos_sim
"""
Implementation of `SMU: Smooth Activation Function for Deep Networks using Smoothing Maximum Technique`
"""
def general_smu(x1, x2, miu):
out = ((x1 + x2) + (x1 - x2) * torch.erf(miu * (x1 - x2))) / 2.
return out
"""
`smu approximate ReLU`
"""
def smu(x, miu):
out = (x + x * torch.erf(miu * x)) / 2.
return out
"""
`smug approximate GeLU`
"""
def smug(x):
out = (x + x * torch.erf(x / math.sqrt(2))) / 2.
return out
"""
`smul approximate Leaky ReLU`
"""
def smul(x, alpha, miu):
out = ((1 + alpha) * x + (1 - alpha) * x * torch.erf(miu * (1 - alpha) * x)) / 2.
return out
| 5,067 | 29.902439 | 124 | py |
SemFormer | SemFormer-main/core/functional/utils.py | import torch
import torch.nn.functional as F
def check_all(x, func):
return torch.all(func(x))
def all_in(x, min, max):
return check_all(x, lambda x: (x >= min) & (x <= max))
def all_pos(x):
return check_all(x, lambda x: x > 0)
def all_neg(x):
return check_all(x, lambda x: x < 0)
def all_not_neg(x):
return check_all(x, lambda x: x >= 0)
def all_not_pos(x):
return check_all(x, lambda x: x <= 0)
def check_any(x, func):
return torch.any(func(x))
def any_in(x, min, max):
return check_any(x, lambda x: (x >= min) & (x <= max))
def any_pos(x):
return check_any(x, lambda x: x > 0)
def any_neg(x):
return check_any(x, lambda x: x < 0)
def any_not_neg(x):
return check_any(x, lambda x: x >= 0)
def any_not_pos(x):
return check_any(x, lambda x: x <= 0)
def filter_tensor(filter_func, x, reverse=False, return_zero=True):
mask = filter_func(x)
if reverse:
mask = ~mask
if (mask.sum().item() == 0) and return_zero:
return x.new_zeros([1]).mean()
return x[mask]
def filter_nan(x):
return filter_tensor(torch.isnan, x, reverse=True)
def filter_inf(x):
return filter_tensor(torch.isinf, x, reverse=True)
def replace_nan(x, value=0.):
mask = torch.isnan(x).float()
x = (1 - mask) * x + (mask * value)
return x
def replace_inf(x, value=0.):
mask = torch.isinf(x).float()
x = (1 - mask) * x + (mask * value)
return x
def replace_nonnum(x, value=0.):
mask = (torch.isinf(x) & torch.isnan(x)).float()
x = (1 - mask) * x + (mask * value)
return x
def nansum(x, dim=None):
if dim is None:
return filter_tensor(torch.isnan, x, reverse=True, return_zero=True).sum()
else:
x = replace_nan(x)
return x.sum(dim=dim)
def infsum(x, dim=None):
if dim is None:
return filter_tensor(torch.isinf, x, reverse=True, return_zero=True).sum()
else:
x = replace_inf(x)
return x.sum(dim=dim)
def nanmean(x, dim=None):
if dim is None:
return filter_tensor(torch.isnan, x, reverse=True, return_zero=True).mean()
else:
x = replace_nan(x)
return x.mean(dim=dim)
def infmean(x, dim=None):
if dim is None:
return filter_tensor(torch.isinf, x, reverse=True, return_zero=True).mean()
else:
x = replace_inf(x)
return x.mean(dim=dim)
def safesum(x, dim=None):
if dim is None:
return filter_tensor(lambda x: torch.isinf(x) & torch.isnan(x),
x, reverse=True, return_zero=True).sum()
else:
x = replace_nonnum(x)
return x.sum()
def safemean(x, dim=None):
if dim is None:
return filter_tensor(lambda x: torch.isinf(x) & torch.isnan(x),
x, reverse=True, return_zero=True).mean()
else:
x = replace_nonnum(x)
return x.mean()
| 2,843 | 23.101695 | 83 | py |
SemFormer | SemFormer-main/core/functional/convolution.py | import torch
import torch.nn.functional as F
def dynamic_conv2d(self, x, weight, bias=None, stride=1, dilation=1, groups=1, padding=0, return_unview=False):
B, C, H, W = x.shape
C_out, C_in, *kernel_size = weight.shape
assert B * C == C_in
assert C_out % B == 0
# padding = ((K_h - 1) // 2, (K_w - 1) // 2)
# x = same_pad2d(x, kernel_size, stride=stride, dilation=dilation, pad_mode='around')
x = x.view(1, B * C, H, W)
out = F.conv2d(x, weight, bias=bias, stride=stride, dialtion=dialtion, padding=padding, groups=B * groups)
if return_unview:
# (1, C_out, H, W)
return out
out = out.view(B, C_out // B, H, W)
return out | 685 | 33.3 | 111 | py |
SemFormer | SemFormer-main/core/functional/padding.py | import torch
import torch.nn.functional as F
from torch.nn.modules.utils import _pair
import math
# code modified from mmcv
def same_pad2d(x, kernel_size, stride=1, dilation=1, pad_mode='corner'):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
dilation = _pair(dilation)
img_h, img_w = x.size()[-2:]
kernel_h, kernel_w = kernel_size
stride_h, stride_w = stride
output_h = math.ceil(img_h / stride_h)
output_w = math.ceil(img_w / stride_w)
pad_h = (
max((output_h - 1) * stride[0] +
(kernel_h - 1) * dilation[0] + 1 - img_h, 0))
pad_w = (
max((output_w - 1) * stride[1] +
(kernel_w - 1) * dilation[1] + 1 - img_w, 0))
if (pad_h > 0) or (pad_w > 0):
if pad_mode == 'corner':
padding = [0, pad_w, 0, pad_h]
else:
padding = [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2]
x = F.pad(x, padding)
return x
def adaptive_pad2d(x, output_size, pad_mode='corner'):
output_h, output_w = _pair(output_size)
h, w = x.shape[-2:]
patch_h = math.ceil(h / output_h)
patch_w = math.ceil(w / output_w)
padded_h = patch_h * output_h
padded_w = patch_w * output_w
pad_h = padded_h - h
pad_w = padded_w - w
if (pad_h > 0) or (pad_w > 0):
if pad_mode == 'corner':
padding = [0, pad_w, 0, pad_h]
else:
padding = [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2]
x = F.pad(x, padding)
return x
def patchable_pad2d(x, patch_size, pad_mode='corner'):
patch_size = _pair(patch_size)
h, w = x.shape[-2:]
patch_h = math.ceil(h / patch_size[0])
patch_w = math.ceil(w / patch_size[1])
padded_h = patch_h * patch_size[0]
padded_w = patch_w * patch_size[1]
pad_h = padded_h - h
pad_w = padded_w - w
if (pad_h > 0) or (pad_w > 0):
if pad_mode == 'corner':
padding = [0, pad_w, 0, pad_h]
else:
padding = [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2]
x = F.pad(x, padding)
return x | 2,132 | 27.065789 | 86 | py |
SemFormer | SemFormer-main/core/functional/fold.py | import torch
import torch.nn.functional as F
def unfold_w_center(x, kernel_size, dilation):
assert x.dim() == 4
assert kernel_size % 2 == 1
# using SAME padding
padding = (kernel_size + (dilation - 1) * (kernel_size - 1)) // 2
unfolded_x = F.unfold(
x, kernel_size=kernel_size,
padding=padding,
dilation=dilation
)
unfolded_x = unfolded_x.view(
x.size(0), x.size(1), -1, x.size(2), x.size(3)
)
return unfolded_x
def unfold_wo_center(x, kernel_size, dilation):
assert x.dim() == 4
assert kernel_size % 2 == 1
# using SAME padding
padding = (kernel_size + (dilation - 1) * (kernel_size - 1)) // 2
unfolded_x = F.unfold(
x, kernel_size=kernel_size,
padding=padding,
dilation=dilation
)
unfolded_x = unfolded_x.view(
x.size(0), x.size(1), -1, x.size(2), x.size(3)
)
# remove the center pixels
size = kernel_size ** 2
ind_list = list(range(size))
ind_list.remove(size // 2)
# unfolded_x = torch.cat((
# unfolded_x[:, :, :size // 2],
# unfolded_x[:, :, size // 2 + 1:]
# ), dim=2)
indices = torch.tensor(ind_list, dtype=torch.long, device=x.device)
unfolded_x = unfolded_x[:, :, indices, :, :]
return unfolded_x | 1,301 | 24.038462 | 71 | py |
tensorflow-onnx | tensorflow-onnx-main/tools/profile_conversion_time.py | # SPDX-License-Identifier: Apache-2.0
# coding: utf-8
"""
Profiles the conversion of a Keras model.
"""
import sys
import cProfile
from pstats import SortKey, Stats
import io
import argparse
import tensorflow as tf
from tensorflow.keras.applications import MobileNet, EfficientNetB2
from tf2onnx import tfonnx
try:
from pyinstrument import Profiler
except ImportError:
Profiler = None
def spy_model(name):
"Creates the model."
with tf.compat.v1.Session(graph=tf.Graph()) as session:
if name == "MobileNet":
model = MobileNet()
elif name == "EfficientNetB2":
model = EfficientNetB2()
else:
raise ValueError("Unknown model name %r." % name)
graph_def = tf.compat.v1.graph_util.convert_variables_to_constants(
sess=session,
input_graph_def=session.graph_def,
output_node_names=[model.output.op.name])
return graph_def, model
def spy_convert(graph_def, model):
"Converts the model."
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def=graph_def, name='')
def spy_convert_in():
return tfonnx.process_tf_graph(
tf_graph=graph, input_names=[model.input.name],
output_names=[model.output.name])
spy_convert_in()
def create(name):
"Creates the model."
graph_def, model = spy_model(name)
return graph_def, model
def convert(graph_def, model):
"Converts the model."
spy_convert(graph_def, model)
def profile(profiler="none", name="MobileNet", show_all=False):
"""
Profiles the conversion of a model.
:param profiler: one among none, spy, pyinstrument, cProfile
:param name: model to profile, MobileNet, EfficientNetB2
:param showall: used by pyinstrument to show all functions
"""
print("create(%r, %r)" % (profiler, name))
graph_def, model = create(name)
print("profile(%r, %r)" % (profiler, name))
if profiler == "none":
convert(graph_def, model)
elif profiler == "spy":
# py-spy record -r 10 -o profile.svg -- python conversion_time.py spy
convert(graph_def, model)
elif profiler == "pyinstrument":
if Profiler is None:
raise ImportError("pyinstrument is not installed")
profiler = Profiler(interval=0.0001)
profiler.start()
convert(graph_def, model)
profiler.stop()
print(profiler.output_text(unicode=False, color=False, show_all=show_all))
elif profiler == "cProfile":
pr = cProfile.Profile()
pr.enable()
convert(graph_def, model)
pr.disable()
s = io.StringIO()
sortby = SortKey.CUMULATIVE
ps = Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print(s.getvalue())
else:
raise ValueError("Unknown profiler %r." % profiler)
def main(args):
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--profiler', default='none',
choices=['none', 'spy', 'pyinstrument', 'cProfile'],
help='a profiler')
parser.add_argument('--name', default="MobileNet",
choices=['MobileNet', 'EfficientNetB2'],
help="a model")
parser.add_argument('--showall', type=bool, default=False,
help="used by pyinstrument to show all functions")
res = parser.parse_args(args)
profile(res.profiler, res.name, res.showall)
if __name__ == '__main__':
print('Begin profiling with', sys.argv[1:])
main(sys.argv[1:])
print('Profile complete.')
| 3,675 | 29.890756 | 82 | py |
tensorflow-onnx | tensorflow-onnx-main/examples/end2end_tfhub.py | # SPDX-License-Identifier: Apache-2.0
"""
This example retrieves a model from tensorflowhub.
It is converted into ONNX. Predictions are compared to
the predictions from tensorflow to check there is no
discrepencies. Inferencing time is also compared between
*onnxruntime*, *tensorflow* and *tensorflow.lite*.
"""
from onnxruntime import InferenceSession
import os
import sys
import subprocess
import timeit
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import Input
try:
import tensorflow_hub as tfhub
except ImportError:
# no tensorflow_hub
print("tensorflow_hub not installed.")
sys.exit(0)
########################################
# Downloads the model.
hub_layer = tfhub.KerasLayer(
"https://tfhub.dev/google/efficientnet/b0/classification/1")
model = keras.Sequential()
model.add(Input(shape=(224, 224, 3), dtype=tf.float32))
model.add(hub_layer)
print(model.summary())
########################################
# Saves the model.
if not os.path.exists("efficientnetb0clas"):
os.mkdir("efficientnetb0clas")
tf.keras.models.save_model(model, "efficientnetb0clas")
input_names = [n.name for n in model.inputs]
output_names = [n.name for n in model.outputs]
print('inputs:', input_names)
print('outputs:', output_names)
########################################
# Testing the model.
input = np.random.randn(2, 224, 224, 3).astype(np.float32)
expected = model.predict(input)
print(expected)
########################################
# Run the command line.
proc = subprocess.run(
'python -m tf2onnx.convert --saved-model efficientnetb0clas '
'--output efficientnetb0clas.onnx --opset 12'.split(),
capture_output=True)
print(proc.returncode)
print(proc.stdout.decode('ascii'))
print(proc.stderr.decode('ascii'))
########################################
# Runs onnxruntime.
session = InferenceSession("efficientnetb0clas.onnx")
got = session.run(None, {'input_1': input})
print(got[0])
########################################
# Measures the differences.
print(np.abs(got[0] - expected).max())
########################################
# Measures processing time.
print('tf:', timeit.timeit('model.predict(input)',
number=10, globals=globals()))
print('ort:', timeit.timeit("session.run(None, {'input_1': input})",
number=10, globals=globals()))
| 2,389 | 29.641026 | 68 | py |
tensorflow-onnx | tensorflow-onnx-main/examples/end2end_tfkeras.py | # SPDX-License-Identifier: Apache-2.0
"""
This example builds a simple model without training.
It is converted into ONNX. Predictions are compared to
the predictions from tensorflow to check there is no
discrepencies. Inferencing time is also compared between
*onnxruntime*, *tensorflow* and *tensorflow.lite*.
"""
from onnxruntime import InferenceSession
import os
import subprocess
import timeit
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, Input
########################################
# Creates the model.
model = keras.Sequential()
model.add(Input((4, 4)))
model.add(layers.SimpleRNN(8))
model.add(layers.Dense(2))
print(model.summary())
input_names = [n.name for n in model.inputs]
output_names = [n.name for n in model.outputs]
print('inputs:', input_names)
print('outputs:', output_names)
########################################
# Training
# ....
# Skipped.
########################################
# Testing the model.
input = np.random.randn(2, 4, 4).astype(np.float32)
expected = model.predict(input)
print(expected)
########################################
# Saves the model.
if not os.path.exists("simple_rnn"):
os.mkdir("simple_rnn")
tf.keras.models.save_model(model, "simple_rnn")
########################################
# Run the command line.
proc = subprocess.run('python -m tf2onnx.convert --saved-model simple_rnn '
'--output simple_rnn.onnx --opset 12'.split(),
capture_output=True)
print(proc.returncode)
print(proc.stdout.decode('ascii'))
print(proc.stderr.decode('ascii'))
########################################
# Runs onnxruntime.
session = InferenceSession("simple_rnn.onnx")
got = session.run(None, {'input_1': input})
print(got[0])
########################################
# Measures the differences.
print(np.abs(got[0] - expected).max())
########################################
# Measures processing time.
print('tf:', timeit.timeit('model.predict(input)',
number=100, globals=globals()))
print('ort:', timeit.timeit("session.run(None, {'input_1': input})",
number=100, globals=globals()))
| 2,202 | 29.178082 | 75 | py |
tensorflow-onnx | tensorflow-onnx-main/examples/getting_started.py | # SPDX-License-Identifier: Apache-2.0
"""
This example shows how to convert tf functions and keras models using the Python API.
It also demonstrates converting saved_models from the command line.
"""
import tensorflow as tf
import tf2onnx
import numpy as np
import onnxruntime as ort
import os
##################### tf function #####################
@tf.function
def f(a, b):
return a + b
input_signature = [tf.TensorSpec([2, 3], tf.float32), tf.TensorSpec([2, 3], tf.float32)]
onnx_model, _ = tf2onnx.convert.from_function(f, input_signature, opset=13)
a_val = np.ones([2, 3], np.float32)
b_val = np.zeros([2, 3], np.float32)
print("Tensorflow result")
print(f(a_val, b_val).numpy())
print("ORT result")
sess = ort.InferenceSession(onnx_model.SerializeToString())
res = sess.run(None, {'a': a_val, 'b': b_val})
print(res[0])
##################### Keras Model #####################
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(4, activation="relu"))
input_signature = [tf.TensorSpec([3, 3], tf.float32, name='x')]
onnx_model, _ = tf2onnx.convert.from_keras(model, input_signature, opset=13)
x_val = np.ones((3, 3), np.float32)
print("Keras result")
print(model(x_val).numpy())
print("ORT result")
sess = ort.InferenceSession(onnx_model.SerializeToString())
res = sess.run(None, {'x': x_val})
print(res[0])
##################### Saved Model #####################
model.save("savedmodel")
os.system("python -m tf2onnx.convert --saved-model savedmodel --output model.onnx --opset 13")
print("ORT result")
sess = ort.InferenceSession("model.onnx")
res = sess.run(None, {'dense_input': x_val})
print(res[0])
print("Conversion succeeded") | 1,673 | 25.15625 | 94 | py |
tensorflow-onnx | tensorflow-onnx-main/tf2onnx/tf_utils.py | # SPDX-License-Identifier: Apache-2.0
"""
tf2onnx.tf_utils - misc utilities for tf2onnx that interface with tensorflow
"""
import collections
from packaging.version import Version
import numpy as np
import tensorflow as tf
from tensorflow.core.framework import types_pb2, tensor_pb2, graph_pb2
from tensorflow.python.framework import tensor_util
from onnx import helper, onnx_pb, numpy_helper
from tf2onnx.utils import make_sure, is_tf_const_op, port_name, map_onnx_to_numpy_type
from . import logging
logger = logging.getLogger(__name__)
#
# mapping dtypes from tensorflow to onnx
#
TF_TO_ONNX_DTYPE = {
types_pb2.DT_FLOAT: onnx_pb.TensorProto.FLOAT,
types_pb2.DT_HALF: onnx_pb.TensorProto.FLOAT16,
types_pb2.DT_BFLOAT16: onnx_pb.TensorProto.FLOAT16,
types_pb2.DT_DOUBLE: onnx_pb.TensorProto.DOUBLE,
types_pb2.DT_INT32: onnx_pb.TensorProto.INT32,
types_pb2.DT_INT16: onnx_pb.TensorProto.INT16,
types_pb2.DT_INT8: onnx_pb.TensorProto.INT8,
types_pb2.DT_UINT8: onnx_pb.TensorProto.UINT8,
types_pb2.DT_UINT16: onnx_pb.TensorProto.UINT16,
types_pb2.DT_UINT32: onnx_pb.TensorProto.UINT32,
types_pb2.DT_UINT64: onnx_pb.TensorProto.UINT64,
types_pb2.DT_INT64: onnx_pb.TensorProto.INT64,
types_pb2.DT_STRING: onnx_pb.TensorProto.STRING,
types_pb2.DT_COMPLEX64: onnx_pb.TensorProto.COMPLEX64,
types_pb2.DT_COMPLEX128: onnx_pb.TensorProto.COMPLEX128,
types_pb2.DT_BOOL: onnx_pb.TensorProto.BOOL,
types_pb2.DT_RESOURCE: onnx_pb.TensorProto.INT64, # TODO: hack to allow processing on control flow
types_pb2.DT_VARIANT: onnx_pb.TensorProto.UNDEFINED,
types_pb2.DT_QUINT8: onnx_pb.TensorProto.UINT8,
}
def tf_to_onnx_tensor(tensor, name=""):
"""Convert tensorflow tensor to onnx tensor."""
np_data = get_tf_tensor_data(tensor)
if np_data.dtype == object:
# assume np_data is string, numpy_helper.from_array accepts ndarray,
# in which each item is of str while the whole dtype is of object.
try:
# Faster but fails on Unicode
np_data = np_data.astype(str).astype(object)
except UnicodeDecodeError:
decode = np.vectorize(lambda x: x.decode('UTF-8'))
np_data = decode(np_data).astype(object)
except: # pylint: disable=bare-except
raise RuntimeError("Not support type: {}".format(type(np_data.flat[0])))
return numpy_helper.from_array(np_data, name=name)
def get_tf_tensor_data(tensor):
"""Get data from tensor."""
make_sure(isinstance(tensor, tensor_pb2.TensorProto), "Require TensorProto")
np_data = tensor_util.MakeNdarray(tensor)
make_sure(isinstance(np_data, np.ndarray), "%r isn't ndarray", np_data)
return np_data
def get_tf_const_value(op, as_list=True):
"""
If as_list=True, return the array as a (possibly nested) list.
Otherwise, return data of type np.ndarray.
If a tensor is a scalar having value 1,
when as_list=False, return np.array(1), type is <class 'numpy.ndarray'>
when as_list=True, return 1, type is <class 'int'>.
"""
make_sure(is_tf_const_op(op), "%r isn't a const op", op.name)
value = get_tf_tensor_data(op.get_attr("value"))
if as_list:
value = value.tolist()
return value
def get_tf_shape_attr(node):
"""Get shape from tensorflow attr "shape"."""
dims = None
try:
shape = get_tf_node_attr(node, "shape")
if not shape.unknown_rank:
dims = [int(d.size) for d in shape.dim]
except: # pylint: disable=bare-except
pass
return dims
def get_tf_tensor_shape(tensor):
shape = []
try:
shape = tensor.get_shape().as_list()
except Exception: # pylint: disable=broad-except
shape = None
return shape
def map_tf_dtype(dtype):
if dtype:
dtype = TF_TO_ONNX_DTYPE[dtype]
return dtype
def get_tf_node_attr(node, name):
"""Parser TF node attribute."""
return node.get_attr(name)
def get_tf_version():
return Version(tf.__version__)
def compress_graph_def(graph_def):
"""
Remove large const values from graph. This lets us import the graph and run shape inference without TF crashing.
"""
node_defs = list(graph_def.node)
const_node_values = {}
for node_def in node_defs:
if node_def.op == 'Const':
tensor = node_def.attr["value"].tensor
# Small constants are sometimes used to store shape information and must be maintained
if len(tensor.tensor_content) > 1000:
make_sure(node_def.name not in const_node_values, "Two nodes in graph have same name %s", node_def.name)
const_node_values[node_def.name] = tensor.tensor_content
tensor.tensor_content = b''
return const_node_values
def get_index_from_strided_slice_of_shape(node, outputs_to_values):
"""Returns the index of the dimension that the strided slice is reading from the shape node or None"""
attr_vals = {
'shrink_axis_mask': 1,
'ellipsis_mask': 0,
'begin_mask': 0,
'new_axis_mask': 0,
'end_mask': 0
}
for a in node.node_def.attr:
if a in attr_vals:
i = get_tf_node_attr(node, a)
if i != attr_vals[a]:
return None
i1 = outputs_to_values.get(node.inputs[1].name)
i2 = outputs_to_values.get(node.inputs[2].name)
i3 = outputs_to_values.get(node.inputs[3].name)
if i1 is None or i2 is None or i3 is None:
return None
if i1.shape != (1,) or i2.shape != (1,) or i3.shape != (1,):
return None
i1, i2, i3 = i1[0], i2[0], i3[0]
if i1 + 1 != i2 or i3 != 1:
return None
return i1
def compute_const_folding_using_tf(g, const_node_values, graph_outputs):
"""Find nodes with constant inputs and compute their values using TF"""
if const_node_values is None:
const_node_values = {}
graph_outputs = set(graph_outputs)
from tf2onnx.tf_loader import tf_session, tf_placeholder # pylint: disable=import-outside-toplevel
ops = g.get_operations()
outputs_to_values = {}
outputs_to_dtypes = {}
outputs_to_shapes = {}
shape_node_outputs = {}
def is_small_shape(x):
return np.product(x) <= 1000
def is_huge_shape(x):
return np.product(x) >= 1000000
for node in ops:
# Load values of constants. Use const_node_values if possible
if node.type in ["Const", "ConstV2"]:
tensor = node.node_def.attr["value"].tensor
if node.name in const_node_values:
tensor.tensor_content = const_node_values[node.name]
outputs_to_values[node.outputs[0].name] = get_tf_tensor_data(tensor)
outputs_to_dtypes[node.outputs[0].name] = node.outputs[0].dtype
for out in node.outputs:
outputs_to_shapes[out.name] = get_tf_tensor_shape(out)
for node in ops:
if node.type == "Shape":
shape = outputs_to_shapes.get(node.inputs[0].name)
if shape is not None:
shape_node_outputs[node.outputs[0].name] = shape
unneeded_outputs = set()
progress = True
while progress:
progress = False
for node in ops:
# Find ops with constant inputs and compute their values
input_names = [i.name for i in node.inputs]
output_names = [i.name for i in node.outputs]
if node.type == 'StridedSlice' and input_names[0] in shape_node_outputs \
and output_names[0] not in outputs_to_values \
and output_names[0] not in unneeded_outputs:
shape = shape_node_outputs[input_names[0]]
i = get_index_from_strided_slice_of_shape(node, outputs_to_values)
if i is not None and 0 <= i < len(shape) and shape[i] is not None:
np_dtype = map_onnx_to_numpy_type(map_tf_dtype(node.outputs[0].dtype))
outputs_to_values[output_names[0]] = np.array(shape[i], dtype=np_dtype)
outputs_to_dtypes[node.outputs[0].name] = node.outputs[0].dtype
progress = True
can_fold = node.type not in ['Enter', 'Placeholder', 'PlaceholderWithDefault', 'Switch', 'Merge',
'NextIteration', 'Exit', 'QuantizeAndDequantizeV2', 'QuantizeAndDequantizeV3',
'QuantizeAndDequantizeV4']
can_fold = can_fold and not node.type.startswith('Random')
can_fold = can_fold and len(input_names) > 0 and all(inp in outputs_to_values for inp in input_names)
# We can only fold nodes with a single output
can_fold = can_fold and len(output_names) == 1 and output_names[0] not in outputs_to_values
# Skip if value already computed, used, and discarded
can_fold = can_fold and output_names[0] not in unneeded_outputs and output_names[0] not in graph_outputs
if can_fold:
# Make a mini graph containing just the node to fold
g2 = tf.Graph()
with g2.as_default():
for inp in input_names:
tf_placeholder(outputs_to_dtypes[inp], name=inp.split(':')[0])
mini_graph_def = g2.as_graph_def()
mini_graph_def.node.append(node.node_def)
g3 = tf.Graph()
with g3.as_default():
feed_dict = {}
inp_shapes = []
for inp in input_names:
inp_np = outputs_to_values[inp]
feed_dict[inp] = inp_np
inp_shapes.append(inp_np.shape)
try:
with tf_session() as sess:
tf.import_graph_def(mini_graph_def, name='')
results = sess.run(output_names, feed_dict=feed_dict)
if is_huge_shape(results[0].shape) and all(is_small_shape(inp) for inp in inp_shapes):
logger.debug("Skipping folding of node %s since result shape %s is much larger "
"than input shapes %s", node.name, results[0].shape, inp_shapes)
else:
outputs_to_values[output_names[0]] = results[0]
outputs_to_dtypes[output_names[0]] = node.outputs[0].dtype
progress = True
except Exception: # pylint: disable=broad-except
logger.debug("Could not fold node %s", node.name)
unneeded_outputs.update(outputs_to_values.keys())
for node in ops:
# Mark values we need to keep
input_names = [i.name for i in node.inputs]
output_names = [i.name for i in node.outputs]
if len(output_names) == 1 and output_names[0] in outputs_to_values:
continue
for i in input_names:
if i in unneeded_outputs:
unneeded_outputs.remove(i)
for node in unneeded_outputs:
# Remove unneeded values to prevent memory usage explosion
if node in outputs_to_values:
del outputs_to_values[node]
del outputs_to_dtypes[node]
for node in ops:
# We don't need the constants any more
if node.type in ["Const", "ConstV2"] and node.outputs[0].name in outputs_to_values:
del outputs_to_values[node.outputs[0].name]
del outputs_to_dtypes[node.outputs[0].name]
logger.info("Computed %d values for constant folding", len(outputs_to_values))
return outputs_to_values, outputs_to_dtypes
class HashTableInfo:
def __init__(self, shared_name, key_dtype, val_dtype, resource_input=None):
self.shared_name = shared_name
self.key_dtype = key_dtype
self.val_dtype = val_dtype
self.resource_input = resource_input
def get_hash_table_info(nodes_or_graph_def):
"""
Return lists of the shared_names, key_dtypes, and value_dtypes of all hash tables declared in the graph_def
or list of nodes
"""
if isinstance(nodes_or_graph_def, graph_pb2.GraphDef):
nodes = nodes_or_graph_def.node
else:
nodes = nodes_or_graph_def
info = []
for n in nodes:
if n.op == "LookupTableFindV2":
info.append(HashTableInfo(None, n.attr['Tin'].type, n.attr['Tout'].type, n.input[0]))
if n.op in ["HashTableV2", "MutableHashTableV2"]:
if all(k in n.attr for k in ['shared_name', 'key_dtype', 'value_dtype']):
name = n.attr['shared_name'].s
if name != b'':
info.append(HashTableInfo(name, n.attr['key_dtype'].type, n.attr['value_dtype'].type))
return info
def replace_placeholders_with_tables(graph_def, placeholder_to_table_info):
"""
Given a graph_def and a map from placeholder names to a tuple of table names, key dtypes, and value dtypes,
Replaces placeholder ops in the graph_def with HashTableV2 ops
"""
for n in graph_def.node:
if n.op == "Placeholder" and n.name in placeholder_to_table_info:
info = placeholder_to_table_info[n.name]
for a in list(n.attr):
del n.attr[a]
n.op = "HashTableV2"
n.attr['shared_name'].s = info.shared_name
n.attr['key_dtype'].type = info.key_dtype
n.attr['value_dtype'].type = info.val_dtype
def read_tf_node_def_attrs(node_def, input_dtypes, input_shapes):
"""Given a tf node def, returns a dict of attribute names to values"""
from tf2onnx.tf_loader import tf_session, tf_placeholder # pylint: disable=import-outside-toplevel
del node_def.input[:]
node_def.name = "node"
# read_tf_node_attrs uses some tf methods that require the node to be loaded into a valid TF graph
g = tf.Graph()
with g.as_default():
for i, (dtype, shape) in enumerate(zip(input_dtypes, input_shapes)):
inp = "input" + str(i)
tf_placeholder(dtype, name=inp, shape=shape)
node_def.input.append(inp)
mini_graph_def = g.as_graph_def()
mini_graph_def.node.append(node_def)
g2 = tf.Graph()
with g2.as_default():
with tf_session() as sess:
tf.import_graph_def(mini_graph_def, name='')
node = sess.graph.get_operation_by_name("node")
return read_tf_node_attrs(node)
# ignore the following attributes
TF_IGNORED_NODE_ATTRS = {
"T", "unknown_rank", "_class", "Tshape", "use_cudnn_on_gpu", "Index", "Tpaddings",
"TI", "Tparams", "Tindices", "Tlen", "Tdim", "Tin", "dynamic_size", "Tmultiples",
"Tblock_shape", "Tcrops", "index_type", "Taxis", "U", "maxval",
"Tout", "Tlabels", "Tindex", "element_shape", "Targmax", "Tperm", "Tcond",
"T_threshold", "shape_type", "_lower_using_switch_merge",
"parallel_iterations", "_num_original_outputs", "output_types", "output_shapes",
"key_dtype", "value_dtype", "Tin", "Tout", "capacity", "component_types", "shapes",
"Toutput_types", "dense_shapes", "Tdense", "Tsegmentids", "Tshift", "Tnumsegments", "SrcT",
"Tcomplex", "Treal", # For RFFT, Tcomplex is ignored because
# onnx.helper.make_node fails,
# TODO: it should be added back.
}
TF_SUBGRAPH_ATTRS = {
"body", "cond", "then_branch", "else_branch", "f"
}
def read_tf_node_attrs(node):
"""Given a tf Node, returns a dict of attribute names to values"""
attr = {}
attr_cnt = collections.Counter()
for a in node.node_def.attr:
attr_cnt[a] += 1
value = get_tf_node_attr(node, a)
if a in TF_IGNORED_NODE_ATTRS or a in TF_SUBGRAPH_ATTRS or isinstance(value, tensor_pb2.TensorProto):
pass
elif a == "shape":
shape = get_tf_shape_attr(node)
if shape is not None:
attr[a] = shape
elif a == "DstT":
attr["to"] = map_tf_dtype(value)
elif isinstance(value, tf.DType):
attr[a] = map_tf_dtype(value)
elif isinstance(value, list) and len(value) > 0 and isinstance(value[0], tf.DType):
attr[a] = [map_tf_dtype(v) for v in value]
else:
attr[a] = get_tf_node_attr(node, a)
return attr, attr_cnt
def tflist_to_onnx(g, shape_override, const_node_values=None, ignore_default=None, use_default=None):
"""
Convert the tf-node list into an onnx graph with minimal rewrites so
we can use the onnx graph as intermediate graph.
"""
node_list = g.get_operations()
functions = {}
# some stats
op_cnt = collections.Counter()
attr_cnt = collections.Counter()
onnx_nodes = []
output_shapes = {}
dtypes = {}
# find outputs
ops = node_list
# create dict with output to shape mappings
for node in ops:
for out in node.outputs:
shape = shape_override.get(out.name)
if shape is None:
shape = get_tf_tensor_shape(out)
dtypes[out.name] = map_tf_dtype(out.dtype)
output_shapes[out.name] = shape
for node in ops:
attr, new_attr_cnt = read_tf_node_attrs(node)
attr_cnt += new_attr_cnt
takeit = True
op_cnt[node.type] += 1
for a in node.node_def.attr:
attr_cnt[a] += 1
value = get_tf_node_attr(node, a)
if a == "T":
if value and not isinstance(value, list):
dtypes[node.name] = map_tf_dtype(value)
elif a in TF_SUBGRAPH_ATTRS:
input_shapes = [inp.get_shape() for inp in node.inputs]
nattr = get_tf_node_attr(node, a)
attr[a] = nattr.name
functions[nattr.name] = input_shapes
elif isinstance(value, tensor_pb2.TensorProto):
if const_node_values and node.name in const_node_values:
value.tensor_content = const_node_values[node.name]
onnx_tensor = tf_to_onnx_tensor(value, name=port_name(node.name))
attr[a] = onnx_tensor
node_type = node.type
input_names = [i.name for i in node.inputs]
output_names = [i.name for i in node.outputs]
if node_type == 'PlaceholderWithDefault':
if ignore_default and node.name in ignore_default:
node_type = 'Placeholder'
input_names = []
elif use_default and node.name in use_default:
node_type = 'Identity'
elif node.name.endswith('keras_learning_phase'):
logger.warning("Removing optional input %s that appears to be a keras learning phase parameter. "
"Use --ignore_default to force this into an input.", node.name)
node_type = 'Identity'
if takeit:
try:
onnx_node = helper.make_node(node_type, input_names, output_names, name=node.name, **attr)
onnx_nodes.append(onnx_node)
except Exception as ex:
logger.error("pass1 convert failed for %s, ex=%s", node, ex)
raise
return onnx_nodes, op_cnt, attr_cnt, output_shapes, dtypes, functions
def tensorflow_to_onnx(graph, shape_override, const_node_values=None, ignore_default=None, use_default=None):
"""
Load tensorflow graph and do a conversion.
"""
return tflist_to_onnx(graph, shape_override, const_node_values, ignore_default, use_default)
| 19,760 | 40.514706 | 120 | py |
tensorflow-onnx | tensorflow-onnx-main/tf2onnx/constants.py | # SPDX-License-Identifier: Apache-2.0
"""
common constants
"""
from onnx import helper
TF2ONNX_PACKAGE_NAME = __name__.split('.')[0]
# Built-in supported domains
ONNX_DOMAIN = ""
AI_ONNX_ML_DOMAIN = "ai.onnx.ml"
MICROSOFT_DOMAIN = "com.microsoft"
CONTRIB_OPS_DOMAIN = "ai.onnx.contrib"
# Default opset version for onnx domain.
# The current update policy is that the default should be set to
# the latest released version as of 18 months ago.
# Opset 15 was released in ONNX v1.10.0 (Jul, 2021).
PREFERRED_OPSET = 15
# Default opset for custom ops
TENSORFLOW_OPSET = helper.make_opsetid("ai.onnx.converters.tensorflow", 1)
# Built-in supported opset
AI_ONNX_ML_OPSET = helper.make_opsetid(AI_ONNX_ML_DOMAIN, 2)
# Target for the generated onnx graph. It possible targets:
# onnx-1.1 = onnx at v1.1 (winml in rs4 is based on this)
# caffe2 = include some workarounds for caffe2 and winml
TARGET_RS4 = "rs4"
TARGET_RS5 = "rs5"
TARGET_RS6 = "rs6"
TARGET_CAFFE2 = "caffe2"
TARGET_TENSORRT = "tensorrt"
TARGET_CHANNELS_LAST = "nhwc"
TARGET_CHANNELS_FIRST = "nchw"
POSSIBLE_TARGETS = [TARGET_RS4, TARGET_RS5, TARGET_RS6, TARGET_CAFFE2, TARGET_TENSORRT, TARGET_CHANNELS_LAST]
DEFAULT_TARGET = []
NCHW_TO_NHWC = [0, 2, 3, 1]
NHWC_TO_NCHW = [0, 3, 1, 2]
NDHWC_TO_NCDHW = [0, 4, 1, 2, 3]
NCDHW_TO_NDHWC = [0, 2, 3, 4, 1]
HWCN_TO_NCHW = [3, 2, 0, 1]
NCHW_TO_HWCN = [2, 3, 1, 0]
# Environment variables
ENV_TF2ONNX_DEBUG_MODE = "TF2ONNX_DEBUG_MODE"
ENV_TF2ONNX_CATCH_ERRORS = "TF2ONNX_CATCH_ERRORS"
# Mapping opset to IR version.
# Note: opset 7 and opset 8 came out with IR3 but we need IR4 because of PlaceholderWithDefault
# Refer from https://github.com/onnx/onnx/blob/main/docs/Versioning.md#released-versions
OPSET_TO_IR_VERSION = {
1: 3, 2: 3, 3: 3, 4: 3, 5: 3, 6: 3, 7: 4, 8: 4, 9: 4, 10: 5, 11: 6, 12: 7, 13: 7, 14: 7, 15: 8, 16: 8, 17: 8, 18: 8
}
| 1,860 | 30.016667 | 119 | py |
tensorflow-onnx | tensorflow-onnx-main/tf2onnx/graph.py | # SPDX-License-Identifier: Apache-2.0
"""
tf2onnx.graph - class to manage graph manipulation on top of onnx
"""
import collections
import copy
import logging
import six
import numpy as np
from onnx import helper, numpy_helper, shape_inference, AttributeProto, TensorProto
from tf2onnx import utils, __version__, git_version
from tf2onnx.utils import make_name, port_name, find_opset
from tf2onnx import optimizer
from tf2onnx.schemas import get_schema, infer_onnx_shape_dtype
from tf2onnx import constants
logger = logging.getLogger(__name__)
# todo(pengwa): remove protected-access later
# pylint: disable=broad-except,protected-access
class ExternalTensorStorage():
"""Passed into graph and node methods to accumulate tensors to save externally"""
def __init__(self):
self.name_to_tensor_data = {}
self.name_counter = 0
self.external_tensor_size_threshold = 1024
self.node_to_modified_value_attr = {}
class Node(object):
"""A Node - wrapper around onnx nodes that we use for graph manipulations."""
def __init__(self, node, graph, skip_conversion=False):
"""Create Node.
Args:
node: Onnx node in NodeProto
graph: Graph() we are part of
"""
self._op = node
self.graph = graph
self._input = list(node.input)
self._output = list(node.output)
self._attr = {}
graph.set_node_by_name(self)
# dict to original attributes
for a in node.attribute:
self._attr[a.name] = a
self._skip_conversion = skip_conversion
@property
def input(self):
return self._input
@input.setter
def input(self, val):
# The setter can catch that all inputs are change
# but it cannot catch that one input is changed.
# That's method replace_input and replace_inputs must
# be used to change inputs to let the graph instance
# update its internal indices.
self._input = copy.deepcopy(val)
@property
def output(self):
return self._output
@output.setter
def output(self, val):
"""Set op output. Output should be updated explicitly,
changing it would require output mapping changed.
"""
self._graph_check()
for o in self._output:
del self.graph._output_to_node_name[o]
self._output = val.copy()
for o in self._output:
utils.make_sure(o not in self.graph._output_to_node_name, "output %s already in output mapping", o)
self.graph._output_to_node_name[o] = self.name
@property
def inputs(self):
"""Input node objects."""
self._graph_check()
val = [self.graph.get_node_by_output(n) for n in self._input]
return val
@property
def attr(self):
return self._attr
def get_value_attr(self, external_tensor_storage=None):
"""Return onnx attr for value property of node.
Attr is modified to point to external tensor data stored in external_tensor_storage, if included.
"""
a = self._attr["value"]
if external_tensor_storage is not None and self in external_tensor_storage.node_to_modified_value_attr:
return external_tensor_storage.node_to_modified_value_attr[self]
if external_tensor_storage is None or a.type != AttributeProto.TENSOR:
return a
if np.product(a.t.dims) > external_tensor_storage.external_tensor_size_threshold:
a = copy.deepcopy(a)
tensor_name = self.name.strip() + "_" + str(external_tensor_storage.name_counter)
for c in '~"#%&*:<>?/\\{|}':
tensor_name = tensor_name.replace(c, '_')
external_tensor_storage.name_counter += 1
external_tensor_storage.name_to_tensor_data[tensor_name] = a.t.raw_data
external_tensor_storage.node_to_modified_value_attr[self] = a
a.t.raw_data = b''
a.t.ClearField("raw_data")
location = a.t.external_data.add()
location.key = "location"
location.value = tensor_name
a.t.data_location = TensorProto.EXTERNAL
return a
def get_onnx_attrs(self, external_tensor_storage=None):
"""Return onnx valid attributes.
Attrs point to external tensor data stored in external_tensor_storage, if included."""
schema = get_schema(self.type, self.graph.opset, self.domain)
if schema is None and not (self.is_const() or self.is_graph_input()):
logger.debug("Node %s uses non-stardard onnx op <%s, %s>, skip attribute check",
self.name, self.domain, self.type)
onnx_attrs = {}
for a in self._attr.values():
if a.name == "value":
onnx_attrs[a.name] = self.get_value_attr(external_tensor_storage)
elif schema is None or schema.has_attribute(a.name):
onnx_attrs[a.name] = a
return onnx_attrs
@property
def name(self):
return self._op.name
def child_name(self):
return utils.make_name(self.name)
@property
def op(self):
"""TODO: have a better interface for this."""
return self._op
@property
def type(self):
"""Return Op type."""
return self._op.op_type
@type.setter
def type(self, val):
"""Set Op type."""
self._op.op_type = val
@property
def domain(self):
"""Return Op type."""
return self._op.domain
@domain.setter
def domain(self, val):
"""Set Op type."""
self._op.domain = val
@property
def data_format(self):
"""Return data_format."""
attr_str = self.get_attr_value("data_format")
return "unkown" if attr_str is None else attr_str.decode("utf-8")
@data_format.setter
def data_format(self, val):
"""Set data_format."""
self.set_attr("data_format", val)
def is_nhwc(self):
"""Return True if node is in NHWC format."""
utils.make_sure('D' not in self.data_format, "is_nhwc called on %s with spatial=2 but data_format=%s",
self.name, self.data_format)
return self.data_format == "NHWC"
def is_const(self):
"""Return True if node is a constant."""
return self.type in ["Const", "ConstV2"]
def is_scalar(self):
"""Return True if node is a constant with a scalar value."""
if not self.is_const():
return False
t = self.get_attr("value", default=None)
if t is None:
return False
t = numpy_helper.to_array(helper.get_attribute_value(t))
return t.shape == tuple()
def is_graph_input(self):
return self.type in ["Placeholder", "PlaceholderWithDefault", "PlaceholderV2"]
def is_graph_input_default_const(self):
return self.is_const() and any(
out.is_graph_input() for out in self.graph.find_output_consumers(self.output[0])
)
def is_while(self):
return self.type in ["While", "StatelessWhile", "Loop"]
def __str__(self):
return str(self._op)
def __repr__(self):
return "<onnx op type='%s' name=%s>" % (self.type, self._op.name)
@property
def summary(self):
"""Return node summary information."""
lines = []
lines.append("OP={}".format(self.type))
lines.append("Name={}".format(self.name))
g = self.graph
if self.input:
lines.append("Inputs:")
for name in self.input:
node = g.get_node_by_output(name)
op = node.type if node else "N/A"
lines.append("\t{}={}, {}, {}".format(name, op, g.get_shape(name), g.get_dtype(name)))
if self.output:
for name in self.output:
lines.append("Outpus:")
lines.append("\t{}={}, {}".format(name, g.get_shape(name), g.get_dtype(name)))
return '\n'.join(lines)
def get_attr(self, name, default=None):
"""Get raw attribute value."""
attr = self.attr.get(name, default)
return attr
def get_attr_value(self, name, default=None):
attr = self.get_attr(name)
if attr:
return helper.get_attribute_value(attr)
return default
def get_attr_int(self, name):
"""Get attribute value as int."""
attr_int = self.get_attr_value(name)
utils.make_sure(
attr_int is not None and isinstance(attr_int, int),
"attribute %s is None", name
)
return attr_int
def get_attr_str(self, name, encoding="utf-8"):
"""Get attribute value as string."""
attr_str = self.get_attr_value(name)
utils.make_sure(
attr_str is not None and isinstance(attr_str, bytes),
"attribute %s is None", name
)
return attr_str.decode(encoding)
def set_attr(self, name, value):
self.attr[name] = helper.make_attribute(name, value)
def set_attr_onnx(self, value):
self.attr[value.name] = value
@property
def skip_conversion(self):
return self._skip_conversion
@skip_conversion.setter
def skip_conversion(self, val):
self._skip_conversion = val
# If some Node is created as onnx_node, then we don't need convert it
def need_skip(self):
return self._skip_conversion
@property
def output_shapes(self):
"""Get output shapes."""
self._graph_check()
val = [self.graph.get_shape(n) for n in self._output]
return val
@property
def output_dtypes(self):
"""Get output dtypes."""
self._graph_check()
val = [self.graph.get_dtype(n) for n in self._output]
return val
def get_tensor_value(self, as_list=True):
"""Get value for onnx tensor.
Args:
as_list: whether return numpy ndarray in list.
Returns:
If as_list=True, return the array as a (possibly nested) list.
Otherwise, return data of type np.ndarray.
If a tensor is a scalar having value 1,
when as_list=False, return np.array(1), type is <class 'numpy.ndarray'>
when as_list=True, return 1, type is <class 'int'>.
"""
if not self.is_const():
raise ValueError("get tensor value: '{}' must be Const".format(self.name))
t = self.get_attr("value")
if t:
t = numpy_helper.to_array(helper.get_attribute_value(t))
if as_list is True:
t = t.tolist() # t might be scalar after tolist()
return t
def scalar_to_dim1(self):
"""Get value for onnx tensor."""
if not self.is_const():
raise ValueError("get tensor value: {} must be Const".format(self.name))
t = self.get_attr("value")
if t:
t = helper.get_attribute_value(t)
if not t.dims:
t.dims.extend([1])
return t.dims
def set_tensor_value(self, new_val):
"""Set new value for existing onnx tensor.
Args:
new_val: value of type numpy ndarray
"""
if not self.is_const():
raise ValueError("set tensor value: {} must be Const".format(self.name))
t = self.get_attr("value")
if not t:
raise ValueError("set tensor value: {} is None".format(self.name))
t = helper.get_attribute_value(t)
onnx_tensor = numpy_helper.from_array(new_val, t.name)
del t
self.set_attr("value", onnx_tensor)
# track shapes in _output_shapes
self._graph_check()
self.graph.set_shape(onnx_tensor.name, list(onnx_tensor.dims))
def get_body_graphs(self):
self._graph_check()
return self.graph.contained_graphs.get(self.name, None)
def set_body_graph_as_attr(self, attr_name, graph):
self._graph_check()
if self.name not in self.graph.contained_graphs:
self.graph.contained_graphs[self.name] = {}
self.graph.contained_graphs[self.name].update({attr_name: graph})
graph.parent_graph = self.graph
def update_proto(self, external_tensor_storage=None):
"""Update protobuf from internal structure."""
nodes = list(self._op.input)
for node in nodes:
self._op.input.remove(node)
self._op.input.extend(self.input)
nodes = list(self._op.output)
for node in nodes:
self._op.output.remove(node)
self._op.output.extend(self.output)
# update attributes to proto
del self._op.attribute[:]
# check attribute of type GraphProto
attr_graphs = self.get_body_graphs()
if attr_graphs:
for attr_name, sub_graph in attr_graphs.items():
graph_proto = sub_graph.make_graph("graph for " + self.name + " " + attr_name,
external_tensor_storage=external_tensor_storage)
self.set_attr(attr_name, graph_proto)
attr = list(self.get_onnx_attrs(external_tensor_storage).values())
if attr:
self._op.attribute.extend(attr)
def get_implicit_inputs(self, recursive=True):
"""Get implicit inputs if the node has attributes being GraphProto."""
output_available_in_cur_graph = set()
all_node_inputs = set()
graphs = []
body_graphs = self.get_body_graphs()
if body_graphs:
graphs.extend(body_graphs.values())
while graphs:
graph = graphs.pop()
for n in graph.get_nodes():
output_available_in_cur_graph |= set(n.output)
for i in n.input:
all_node_inputs.add(i)
if recursive:
b_graphs = n.get_body_graphs()
if b_graphs:
graphs.extend(b_graphs.values())
outer_scope_node_input_ids = all_node_inputs - output_available_in_cur_graph
return list(outer_scope_node_input_ids)
def _graph_check(self):
utils.make_sure(self.graph is not None, "Node %s not belonging any graph",
self.name)
def maybe_cast_input(self, supported, type_map):
""".maybe_cast_input
Args:
supported: list of supported types for inputs
type_map: dict type to supported type mapping
"""
did_cast = False
for i, name in enumerate(self.input):
dtype = self.graph.get_dtype(name)
if dtype not in supported[i]:
tdtype = type_map.get(dtype)
if tdtype is None:
raise RuntimeError("don't know how to cast type {} on node {}".format(dtype, name))
shape = self.graph.get_shape(name)
cast_node = self.graph.insert_new_node_on_input(
self, "Cast", name, to=tdtype)
self.graph.set_dtype(cast_node.output[0], tdtype)
self.graph.set_shape(cast_node.output[0], shape)
did_cast = True
return did_cast
class Graph(object):
""""Class that provides graph manipulation and matching."""
def __init__(self, nodes, output_shapes=None, dtypes=None, target=None, opset=None, extra_opset=None,
input_names=None, output_names=None, is_subgraph=False, graph_name=None):
"""Create Graph.
Args:
nodes: list of Node()
output_shapes: dict of tensorflow output shapes
dtypes: dict of tensorflow dtype
"""
if target is None:
target = []
self._nodes = []
self._nodes_by_name = {}
self._output_to_node_name = {}
self._output_to_consumers = {}
self._input_to_graph = {}
self.shapes = {}
self.graph_name = graph_name or utils.make_name("tf2onnx")
self._is_subgraph = is_subgraph
self.ta_reads = []
# A list of index, output tuples of potential scan outputs in this graph
# Used by the tflite while loop handler
self.scan_outputs = []
# Used by lstm_tf2_rewriter to indicate this subgraph is an LSTM cell
self.lstm_rewriter_context = None
self.gru_rewriter_context = None
self.func_inputs = []
self.ragged_variant_list_reads = []
self.ragged_variant_list_writes = []
self._dtypes = dtypes
self._output_shapes = output_shapes
self.set_config(target, opset, extra_opset)
self.outputs = output_names if output_names is not None else []
self.parent_graph = None
self.contained_graphs = {} # {node_name: {node_attribute_name: Graph}}
ops = [Node(node, self) for node in nodes]
if input_names is not None:
input_names_set = set(input_names)
for n in ops:
for i, out in enumerate(n.output):
if out in input_names_set and not n.is_graph_input():
n.output[i] = utils.make_name("@@ALLOC")
ops.append(Node(helper.make_node("Placeholder", [], outputs=[out], name=out), self))
logger.info("Created placeholder for input %s", out)
input_nodes = {n.output[0]: n for n in ops if n.is_graph_input()}
if input_names is not None:
self.inputs = [input_nodes[n] for n in input_names]
else:
self.inputs = list(input_nodes.values())
self.reset_nodes(ops)
# add identity node after each output, in case it is renamed during conversion.
for o in self.outputs:
n = self.get_node_by_output_in_current_graph(o)
if n.is_graph_input():
# Don't add identity if the node is also an input. We want to keep input names the same.
continue
new_output_name = port_name(n.name + "_" + utils.make_name("raw_output_"))
n_shapes = n.output_shapes
n_dtypes = n.output_dtypes
o_shape = self.get_shape(o)
o_dtype = self.get_dtype(o)
body_graphs = n.graph.contained_graphs.pop(n.name, None)
self.remove_node(n.name)
new_outputs = [output if output != o else new_output_name for output in n.output]
# domain should be passed to new node
branches = {}
if body_graphs:
for attr_name, body_graph in body_graphs.items():
body_graph.parent_graph = self
branches[attr_name] = body_graph
_ = self.make_node(n.type, n.input, outputs=new_outputs, attr=n.attr, name=n.name,
skip_conversion=n._skip_conversion, dtypes=n_dtypes, shapes=n_shapes,
domain=n.domain, branches=branches)
self.replace_all_inputs(o, new_output_name, ops=self.get_nodes())
self.make_node("Identity", [new_output_name], outputs=[o], op_name_scope=n.name + "_" + "graph_outputs",
dtypes=[o_dtype], shapes=[o_shape])
self.copy_shape(new_output_name, o)
self.copy_dtype(new_output_name, o)
def create_new_graph_with_same_config(self):
"""Create a clean graph inheriting current graph's configuration."""
return Graph([], output_shapes={}, dtypes={}, target=self._target, opset=self._opset,
extra_opset=self.extra_opset, output_names=[])
def set_config(self, target=None, opset=None, extra_opset=None):
"""Set graph fields containing conversion options"""
if target is None:
target = constants.DEFAULT_TARGET
self._opset = find_opset(opset)
self._target = set(target)
if extra_opset is not None:
utils.make_sure(isinstance(extra_opset, list), "invalid extra_opset")
self._extra_opset = extra_opset
@property
def input_names(self):
"""Placeholder node outputs"""
return [node.output[0] for node in self.inputs]
@property
def opset(self):
return self._opset
@property
def extra_opset(self):
return self._extra_opset
def is_target(self, *names):
"""Return True if target platform contains any name."""
return any(name in self._target for name in names)
def make_consts(self, values, np_type=np.int64, skip_conversion=False, raw=True):
"""create list of consts of same type"""
consts = []
for value in values:
np_val = np.array(value).astype(np_type)
consts.append(self.make_const(utils.make_name("const"), np_val, skip_conversion, raw))
return consts
def make_const(self, name, np_val, skip_conversion=False, raw=True):
"""Make a new constant in the graph.
Args:
name: const node name, must be unique.
np_val: value of type numpy ndarray.
skip_conversion: bool, indicate whether this created node would be mapped during conversion.
raw: whether to store data at field of raw_data or the specific field according to its dtype
"""
np_val_flat = np_val.flatten()
is_bytes = np_val.dtype == object and len(np_val_flat) > 0 and isinstance(np_val_flat[0], bytes)
if raw and not is_bytes:
onnx_tensor = numpy_helper.from_array(np_val, name)
else:
onnx_tensor = helper.make_tensor(name, utils.map_numpy_to_onnx_dtype(np_val.dtype),
np_val.shape, np_val_flat, raw=False)
dtype = onnx_tensor.data_type
node = self.make_node("Const", [], outputs=[name], name=name, attr={"value": onnx_tensor},
skip_conversion=skip_conversion, dtypes=[dtype], infer_shape_dtype=False)
self.set_shape(name, np_val.shape)
self.set_dtype(name, utils.map_numpy_to_onnx_dtype(np_val.dtype))
return node
def copy_const(self, node, name=None):
"""Copy a const node, using name if specified"""
# TODO: support attr copy starting at opset 12
if name is None:
name = utils.make_name(node.name)
return self.make_const(name, node.get_tensor_value(as_list=False))
def make_node(self, op_type, inputs, attr=None, output_count=1, outputs=None, skip_conversion=True,
op_name_scope=None, name=None, shapes=None, dtypes=None, domain=constants.ONNX_DOMAIN,
infer_shape_dtype=True, branches=None):
"""Make a new onnx node in the graph"""
if attr is None:
attr = {}
if shapes is None:
shapes = []
if dtypes is None:
dtypes = []
if branches is None:
branches = {}
if name is None:
name = utils.make_name(op_type)
if op_name_scope:
name = "_".join([op_name_scope, name])
logger.debug("Making node: Name=%s, OP=%s", name, op_type)
if outputs is None:
outputs = [name + ":" + str(i) for i in range(output_count)]
output_count = len(outputs)
raw_attr = {}
onnx_attrs = []
for a, v in attr.items():
if isinstance(v, AttributeProto):
onnx_attrs.append(v)
else:
raw_attr[a] = v
n = self.get_node_by_name(name)
utils.make_sure(n is None, "name %s already exists in node: \n%s", name, n)
for o in outputs:
n = self.get_node_by_output_in_current_graph(o)
utils.make_sure(n is None, "output tensor named %s already exists in node: \n%s", o, n)
onnx_node = helper.make_node(op_type, inputs, outputs, name=name, domain=domain, **raw_attr)
for name2 in onnx_node.input:
self._register_input_name(name2, onnx_node)
if op_type in ["If", "Loop", "Scan"]:
# we force the op containing inner graphs not skipped during conversion.
skip_conversion = False
node = Node(onnx_node, self, skip_conversion=skip_conversion)
if onnx_attrs:
_ = [node.set_attr_onnx(a) for a in onnx_attrs]
for branch, body in branches.items():
node.set_body_graph_as_attr(branch, body)
if shapes:
utils.make_sure(len(shapes) == output_count,
"output shape count %s not equal to output count %s", len(shapes), output_count)
for i in range(output_count):
self.set_shape(node.output[i], shapes[i])
if dtypes:
utils.make_sure(len(dtypes) == output_count,
"output dtypes count %s not equal to output count %s", len(dtypes), output_count)
for i in range(output_count):
self.set_dtype(node.output[i], dtypes[i])
if (not shapes or not dtypes) and infer_shape_dtype:
self.update_node_shape_dtype(node, override=False)
logger.debug("Made node: %s\n%s", node.name, node.summary)
self._nodes.append(node)
return node
def append_node(self, node):
"""Add a node to the graph."""
output_shapes = node.output_shapes
output_dtypes = node.output_dtypes
node.graph = self
self._nodes.append(node)
self._nodes_by_name[node.name] = node
for i, name in enumerate(node.output):
self._output_to_node_name[name] = node.name
self.set_dtype(name, output_dtypes[i])
self.set_shape(name, output_shapes[i])
for name in node.input:
self._register_input_name(name, node)
def remove_node(self, node_name):
"""Remove node in current graph."""
utils.make_sure(node_name in self._nodes_by_name, "node %s not in current graph, cannot remove", node_name)
node = self.get_node_by_name(node_name)
del self._nodes_by_name[node_name]
if node_name in self.contained_graphs:
del self.contained_graphs[node_name]
if node in self.inputs:
self.inputs.remove(node)
for op_output in node.output:
if op_output == "":
continue
del self._output_to_node_name[op_output]
if op_output in self._output_shapes:
del self._output_shapes[op_output]
if op_output in self._dtypes:
del self._dtypes[op_output]
for op_input in node.input:
if op_input == "":
continue
utils.make_sure(
op_input in self._output_to_consumers,
"Input %r of node %r not found.", op_input, node_name)
self._unregister_input_name(op_input, node)
self._nodes.remove(node)
node.graph = None
def reset_nodes(self, ops):
"""Reset the graph with node list."""
remained_dtypes = {}
remained_shapes = {}
remained_sub_graphs = {}
for op in ops:
for op_output in op.output:
# this check should be removed once we make sure all output tensors have dtype/shape.
if op_output in self._dtypes:
remained_dtypes[op_output] = self._dtypes[op_output]
if op_output in self._output_shapes:
remained_shapes[op_output] = self._output_shapes[op_output]
if op.name in self.contained_graphs:
remained_sub_graphs[op.name] = self.contained_graphs[op.name]
self._nodes = ops
self.contained_graphs = remained_sub_graphs
self._nodes_by_name = {op.name: op for op in ops}
self._output_to_node_name = {}
self._output_to_consumers = {}
for op in ops:
for op_output in op.output:
self._output_to_node_name[op_output] = op.name
inps = op.input
for op_input in inps:
self._register_input_name(op_input, op)
for n in self.inputs:
if n not in ops:
raise ValueError("graph input '" + n.name + "' not exist")
for o in self.outputs:
if o not in self._output_to_node_name:
raise ValueError("graph output '" + o.name + "' not exist")
self._dtypes = remained_dtypes
self._output_shapes = remained_shapes
def is_empty_input(self, name):
# in ONNX, operation may have optional input and an empty string may be used
# in the place of an actual argument's name to indicate a missing argument
return name == utils.ONNX_EMPTY_INPUT
def check_integrity(self):
"""
Check graph integrity. Every node's input needs to associate with a node.
Return broken outputs.
"""
broken_outputs = set()
for node in self.get_nodes():
for inp in node.input:
if self.get_node_by_output(inp) is None and not self.is_empty_input(inp):
broken_outputs.add(inp)
return list(broken_outputs)
def update_node_shape_dtype(self, node, override=False):
"""Try the best to infer shapes and dtypes for outputs of the node,
by default, we respect TF shapes and dtypes.
"""
if node.is_const() or node.is_graph_input():
return
# NOTE: only support onnx node for now
if not utils.is_onnx_domain(node.domain):
return
logger.debug("Infer shape and dtype for [%s]", node.name)
# NOTE: shape inference for some ops need the input values of the op, e.g., Reshape
# op needs the "Shape" value to infer output shape.
initializers = []
for i, inp in enumerate(node.inputs):
if inp is None:
if not self.is_empty_input(node.input[i]):
if logger.isEnabledFor(logging.INFO):
logger.warning(
"[%s] infer a inexistent node: [%s], please check the code",
node.name, node.input[i]
)
continue
if inp.is_const():
t = inp.get_attr("value")
tensor = helper.get_attribute_value(t)
tensor.name = inp.output[0]
initializers.append(tensor)
input_shapes = [self.get_shape(i) for i in node.input]
input_dtypes = [self.get_dtype(i) for i in node.input]
shapes, dtypes = infer_onnx_shape_dtype(node, self._opset, input_shapes, input_dtypes, initializers)
if not shapes or not dtypes:
return
for output, shape, dtype in zip(node.output, shapes, dtypes):
if dtype == TensorProto.UNDEFINED:
logger.debug("Inferred dtype for [%s, type: %s] is UNDEFINED, SKIP", node.name, node.type)
else:
existing_dtype = self.get_dtype(output)
if existing_dtype is not None and existing_dtype != dtype and not override:
dtype = existing_dtype
self.set_dtype(output, dtype)
logger.debug("Set dtype of [%s] to %s", output, dtype)
if shape is None:
logger.debug("Inferred shape for [%s, type: %s] is None, SKIP", node.name, node.type)
else:
existing_shape = self.get_shape(output)
if existing_shape is not None and not utils.are_shapes_equal(existing_shape, shape) and not override:
shape = existing_shape
self.set_shape(output, shape)
logger.debug("Set shape of [%s] to %s", output, shape)
def update_proto(self, external_tensor_storage=None):
"""Update the onnx protobuf from out internal Node structure."""
for node in self._nodes:
node.update_proto(external_tensor_storage)
def get_nodes(self):
"""Get node list."""
return self._nodes
def get_node_by_output(self, output, search_in_parent_graphs=True):
"""Get node by node output id recursively going through nested graphs.
Args:
search_in_parent_graphs: search in all parent graphs
"""
ret = None
g = self
while not ret and g:
ret = g.get_node_by_output_in_current_graph(output)
if ret:
return ret
if not search_in_parent_graphs:
break
g = g.parent_graph
return ret
def get_node_by_output_in_current_graph(self, output):
"""Get node by node output id."""
name = self._output_to_node_name.get(output)
ret = None
if name:
ret = self._nodes_by_name.get(name)
return ret
def get_node_by_name(self, name):
"""Get node by name."""
ret = self._nodes_by_name.get(name)
return ret
def set_node_by_name(self, node):
"""Set node by name."""
self._nodes_by_name[node.name] = node
for op_output in node.output:
self._output_to_node_name[op_output] = node.name
for name in node.input:
self._register_input_name(name, node)
def is_const(self, output):
return self.get_node_by_output(output).is_const()
def get_tensor_value(self, output, as_list=True):
return self.get_node_by_output(output).get_tensor_value(as_list)
def rename_tensors(self, tensors_to_rename):
"""Replace tensor names within nodes and graph inputs/outputs"""
def rename_list(l):
return [tensors_to_rename.get(t, t) for t in l]
def rename_keys(d):
return {tensors_to_rename.get(k, k): v for k, v in d.items()}
self._output_to_node_name = rename_keys(self._output_to_node_name)
self._output_to_consumers = rename_keys(self._output_to_consumers)
self._dtypes = rename_keys(self._dtypes)
self._output_shapes = rename_keys(self._output_shapes)
self.outputs = rename_list(self.outputs)
for node in self._nodes:
node._input = rename_list(node._input)
node._output = rename_list(node._output)
def change_node_name(self, node, new_name):
"""Remove node in current graph."""
utils.make_sure(new_name not in self._nodes_by_name, "node %s not unique ", new_name)
dtypes = node.output_dtypes
shapes = node.output_shapes
self.remove_node(node.name)
new_node = self.make_node(node.type, node.input, output_count=len(node.output),
attr=node.attr, dtypes=dtypes, shapes=shapes, name=new_name)
for i, old_output in enumerate(node.output):
new_output = port_name(new_name, i)
for j, k in enumerate(self.outputs):
if k == old_output:
self.outputs[j] = new_output
break
self.replace_all_inputs(old_output, new_output, ops=self.get_nodes())
return new_node
def add_graph_input(self, name, dtype=None, shape=None):
"""Add placeholder node as graph's input. Order matters only for subgraph.
Placeholders in original graph are assumed for main graph, order not matters.
"""
if dtype is None:
dtype = self.get_dtype(name)
if shape is None:
shape = self.get_shape(name)
new_node = self.make_node("Placeholder", [], outputs=[name], dtypes=[dtype], shapes=[shape])
self.inputs.append(new_node)
def add_graph_input_with_default(self, name, default_const, dtype=None, shape=None):
"""Add placeholderwithdefault."""
if dtype is None:
dtype = self.get_dtype(name)
if shape is None:
shape = self.get_shape(name)
default_const_name = port_name(make_name("{}_default".format(name)))
default_const.output = [default_const_name]
new_node = self.make_node("PlaceholderWithDefault", [default_const_name], outputs=[name],
dtypes=[dtype], shapes=[shape])
self.inputs.append(new_node)
def add_graph_output(self, name, dtype=None, shape=None):
"""Add node output as graph's output."""
utils.make_sure(name in self._output_to_node_name, "output %s not exist in the graph", name)
if dtype is None:
dtype = self.get_dtype(name)
if shape is None:
shape = self.get_shape(name)
if name not in self.outputs:
utils.make_sure(shape is not None, "shape for output %s should not be None", name)
utils.make_sure(dtype is not None, "dtype for output %s should not be None", name)
self.outputs.append(name)
self.set_shape(name, shape)
self.set_dtype(name, dtype)
else:
raise ValueError("graph output " + name + " already exists")
def get_dtype(self, name):
"""Get dtype for node."""
node = self.get_node_by_output(name, search_in_parent_graphs=True)
return node.graph._dtypes.get(name) if node else None
def set_dtype(self, name, dtype):
"""Set dtype for node."""
node = self.get_node_by_output(name, search_in_parent_graphs=True)
node.graph._dtypes[name] = dtype
def copy_dtype(self, src_name, dst_name):
"""Copy dtype from another node."""
dtype = self.get_dtype(src_name)
self.set_dtype(dst_name, dtype)
def get_shape(self, name):
"""Get shape for node."""
utils.make_sure(isinstance(name, six.text_type), "get_shape name is invalid type: %s", name)
node = self.get_node_by_output(name, search_in_parent_graphs=True)
shape = node.graph._output_shapes.get(name) if node else None
if shape:
for i, v in enumerate(shape):
if v is None:
# pylint: disable=unsupported-assignment-operation
shape[i] = -1
# hack to allow utils.ONNX_UNKNOWN_DIMENSION to override batchsize if needed.
# default is -1.
if shape[0] == -1:
# pylint: disable=unsupported-assignment-operation
shape[0] = utils.ONNX_UNKNOWN_DIMENSION
return shape
return shape
def get_rank(self, name):
"""Returns len(get_shape(name)) or None if shape is None"""
shape = self.get_shape(name)
if shape is None:
return None
return len(shape)
def set_shape(self, name, val):
"""Set new shape of node."""
if isinstance(val, np.ndarray):
val = val.tolist()
if isinstance(val, tuple):
val = list(val)
node = self.get_node_by_output(name, search_in_parent_graphs=True)
utils.make_sure(node is not None, "cannot find node by output id %s", name)
node.graph._output_shapes[name] = val
def copy_shape(self, input_name, output_name):
"""Copy shape from another node."""
shape = self.get_shape(input_name)
# assert shape is not None
if shape is not None:
self.set_shape(output_name, shape)
def topological_sort(self, ops):
"""Topological sort of graph."""
# sort by name, the result will be reversed alphabeta
ops.sort(key=lambda op: op.name)
def _push_stack(stack, node, in_stack):
stack.append(node)
if node in in_stack:
raise ValueError('Graph has cycles, node.name=%r.' % ops[node].name)
in_stack[node] = True
def _get_unvisited_child(g, node, not_visited):
for child in g[node]:
if child in not_visited:
return child
return -1
n = len(ops)
g = [[] for _ in range(n)]
op_name_to_index = {}
for i, op in enumerate(ops):
op_name_to_index[op.name] = i
for i, op in enumerate(ops):
all_input = set(op.input)
implicit_inputs = op.get_implicit_inputs()
all_input |= set(implicit_inputs)
# remove those empty inputs
all_input = list(filter(lambda a: a != '', all_input))
for inp in sorted(all_input):
j = self.get_node_by_output(inp)
utils.make_sure(j is not None, "Cannot find node with output %r in graph %r", inp, self.graph_name)
if self.parent_graph and j.name not in op_name_to_index:
# there might be some outer-scoped inputs for an inner Graph.
pass
else:
g[op_name_to_index[j.name]].append(i)
# label for each op. highest = sink nodes.
label = [-1 for _ in range(n)]
stack = []
in_stack = dict()
not_visited = dict.fromkeys(range(n))
label_counter = n - 1
while not_visited:
node = list(not_visited.keys())[0]
_push_stack(stack, node, in_stack)
while stack:
node = _get_unvisited_child(g, stack[-1], not_visited)
if node != -1:
_push_stack(stack, node, in_stack)
else:
node = stack.pop()
in_stack.pop(node)
not_visited.pop(node)
label[node] = label_counter
label_counter -= 1
ret = [x for _, x in sorted(zip(label, ops))]
self.reset_nodes(ret)
def make_graph(self, doc, graph_name=None, external_tensor_storage=None):
"""
Create GraphProto for onnx from internal graph.
Args:
optimize: optimize graph via onnx
doc: text for doc string of the graph
"""
graph_name = graph_name or self.graph_name
self.delete_unused_nodes(self.outputs)
self.topological_sort(self.get_nodes())
self.update_proto(external_tensor_storage)
# TODO: we'd want to do something like this so that transpose optimizer is active
# for all (unit) tests
# if optimize:
# from tf2onnx.optimizer.transpose_optimizer import TransposeOptimizer
# optimizer = TransposeOptimizer(self, False)
# optimizer.optimize()
ops = []
const_ops = []
graph_inputs = self.inputs.copy()
for op in self.get_nodes():
if op.is_const():
const_ops.append(op)
elif op.is_graph_input():
if op not in graph_inputs:
graph_inputs.append(op)
else:
ops.append(op)
# create initializers for placeholder with default nodes
initializers = []
placeholder_default_const_ops = []
for op in graph_inputs:
if op.type == "PlaceholderWithDefault":
utils.make_sure(op.inputs[0] is not None, "Cannot find node with output {}".format(op.input[0]))
utils.make_sure(op.inputs[0].is_const(),
"non-const default value for PlaceholderWithDefault node '%s' is not supported. "
"Use the --use_default or --ignore_default flags to convert this node.", op.name)
# copy the tensor value, set its name to current node's output, add as initializer
value = op.inputs[0].get_tensor_value(as_list=False)
tensor = numpy_helper.from_array(value, op.output[0])
initializers.append(tensor)
placeholder_default_const_ops.append(op.inputs[0])
# create initializers for constant nodes
const_ops = [op for op in const_ops if op not in placeholder_default_const_ops]
for op in const_ops:
# not to use numpy_helper.from_array to create a new tensor
# because sometimes onnx will have a bug that only check the tensor data in specific field
# such as at upsample it only checks the float_data field.
t = op.get_value_attr(external_tensor_storage)
tensor = helper.get_attribute_value(t)
tensor.name = op.output[0]
initializers.append(tensor)
# create input_tensor_values
input_ids = [op.output[0] for op in graph_inputs]
# onnx with IR version below 4 requires initializer should be in inputs.
# here we check opset version rather than IR version for the reason:
# https://github.com/onnx/tensorflow-onnx/pull/557
# opset 9 come with IR 4.
if self.opset < 9:
input_ids += [op.output[0] for op in const_ops]
input_tensor_values = self.make_onnx_graph_io(input_ids)
# create output_tensor_values
output_tensor_values = self.make_onnx_graph_io(self.outputs)
tensor_value_info = []
for op in ops:
if op.domain in [constants.ONNX_DOMAIN, constants.AI_ONNX_ML_DOMAIN]:
continue
# We still don't 100% trust the accuracy of all the shapes in graph.py, but for custom ops they are
# almost certainly accurate and onnx has no other way of knowing them.
for out in op.output:
if out == '' or out in self.outputs:
continue
dtype = self.get_dtype(out)
shape = self.get_shape(out)
v = utils.make_onnx_inputs_outputs(out, dtype, shape)
tensor_value_info.append(v)
# create graph proto
graph = helper.make_graph([op.op for op in ops],
graph_name,
input_tensor_values,
output_tensor_values,
initializer=initializers,
doc_string=doc,
value_info=tensor_value_info)
return graph
def make_model(self, graph_doc, optimize=False, graph_name="tf2onnx", external_tensor_storage=None, **kwargs):
"""
Create final ModelProto for onnx from internal graph.
Args:
optimize: optimize graph via onnx
doc: text for doc string of the model
"""
graph = self.make_graph(graph_doc, graph_name, external_tensor_storage)
if "producer_name" not in kwargs:
kwargs = {
"producer_name": "tf2onnx",
"producer_version": __version__ + " " + git_version[:6]
}
if "opset_imports" not in kwargs:
opsets = [helper.make_opsetid(constants.ONNX_DOMAIN, self._opset)]
opsets.append(constants.AI_ONNX_ML_OPSET)
if self.extra_opset is not None:
opsets.extend(self.extra_opset)
kwargs["opset_imports"] = opsets
model_proto = helper.make_model(graph, **kwargs)
utils.make_sure(self.opset in constants.OPSET_TO_IR_VERSION,
"Opset %s is not supported yet. Please use a lower opset" % self.opset)
# set the IR version based on opset
try:
model_proto.ir_version = constants.OPSET_TO_IR_VERSION.get(self.opset, model_proto.ir_version)
except: # pylint: disable=bare-except
logger.error("ir_version override failed - install the latest onnx version")
# optimize the model proto.
# TODO: this is disabled by default because of bugs in fuse_consecutive_transposes
if optimize:
model_proto = optimizer.optimize(model_proto)
return model_proto
def make_onnx_graph_io(self, ids):
"""Create tensor_value_info for passed input/output ids."""
tensor_value_infos = []
for name in ids:
dtype = self.get_dtype(name)
shape = self.get_shape(name)
utils.make_sure(dtype is not None, "missing output dtype for " + name)
# TODO: allow None output shape or not? e.g. shape=(?,)
#utils.make_sure(shape is not None, "missing output shape for " + name)
if shape is None: logger.warning("missing output shape for %s", name)
v = utils.make_onnx_inputs_outputs(name, dtype, shape)
tensor_value_infos.append(v)
return tensor_value_infos
def dump_graph(self):
"""Dump graph with shapes (helpful for debugging)."""
for node in self.get_nodes():
input_names = ["{}{}".format(n, self.get_shape(n)) for n in node.input]
logger.debug("%s %s %s %s",
node.type,
self.get_shape(node.output[0]),
node.name,
", ".join(input_names))
def follow_inputs(self, node, num, space=""):
"""Follow inputs for (helpful for debugging)."""
val = []
top = space == ""
if num == 0:
return []
val.append("{}{} {} {}".format(space, node.type, node.name, self.get_shape(port_name(node.name))))
space += " "
for j in node.inputs:
val.extend(self.follow_inputs(j, num - 1, space))
if top:
print("\n".join(reversed(val)))
print()
return []
return val
def dump_node_statistics(self, include_attrs=False, include_subgraphs=True):
"""Return a counter of op types (and optionally attribute names) within the graph"""
op_cnt = collections.Counter()
attr_cnt = collections.Counter()
for n in self.get_nodes():
op_cnt[n.type] += 1
for k in n.attr.keys():
attr_cnt[k] += 1
body_graphs = n.get_body_graphs()
if body_graphs and include_subgraphs:
for b_g in body_graphs.values():
g_op_cnt, g_attr_cnt = b_g.dump_node_statistics(include_attrs=True, include_subgraphs=True)
op_cnt += g_op_cnt
attr_cnt += g_attr_cnt
if include_attrs:
return op_cnt, attr_cnt
return op_cnt
def remove_input(self, node, to_be_removed, input_index=None):
"""Remove input from Node.
Args:
node: the node we expect the input on
to_be_removed: the node name we want to remove
input_index: if not None, index of the input to be removed,
the method is more efficient if *input_index* is specified,
otherwise, it has to look for every input named *old_input*.
"""
assert isinstance(node, Node) and isinstance(to_be_removed, six.text_type)
if input_index is not None:
assert node.input[input_index] == to_be_removed
if node.input[input_index] in self._output_to_consumers:
to_ops = self._output_to_consumers[node.input[input_index]]
if node.name in to_ops:
to_ops.remove(node.name)
del node.input[input_index]
return
for i, name in enumerate(node.input):
if name == to_be_removed:
utils.make_sure(
node.input.count(node.input[i]) <= 1,
"Node %r takes multiple times the same input %r. This case is not handled.",
node.name, node.input[i])
self._unregister_input_name(node.input[i], node)
del node.input[i]
break
# don't remove output from parent since others might depend on it
def insert_new_node_on_input(self, node, op_type, input_name, name=None, domain=None, input_index=None, **kwargs):
"""Create and insert a new node into the graph.
Args:
node: we want to replace the input for this node
op_type: type for new operation
input_name: the name(s) of the outputs above us
if scalar, new node placed above input_name
if list, new node placed above input_name[0]. list is inputs into new node
name: the name of the new op
kwargs: attributes of the new node
Returns:
node that was inserted
"""
if name is None:
name = utils.make_name(node.name)
new_output = port_name(name)
if not isinstance(input_name, list):
input_name = [input_name]
new_node = self.make_node(op_type, input_name, attr=kwargs, outputs=[new_output], name=name, domain=domain)
if input_index is None:
for i, n in enumerate(node.input):
if n == input_name[0]:
self.replace_input(node, node.input[i], new_output, i)
break
else:
self.replace_input(node, node.input[input_index], new_output, input_index)
return new_node
def insert_node_on_output(self, node, output_name=None):
"""
The inserted node takes the *output_name* as input and produces a
new output. The function goes through every node taking *output_name*
and replaces it by the new output name.
"""
if output_name is None:
output_name = node.input[0]
new_output = node.output[0]
to_replace = [self.get_node_by_name(n) for n in self._output_to_consumers[output_name]]
to_replace = [n for n in to_replace if n != node]
self.replace_all_inputs(output_name, new_output, ops=to_replace)
return node
def insert_new_node_on_output(self, op_type, output_name=None, name=None, inputs=None, domain=None, **kwargs):
"""Create and insert a new node into the graph.
It then calls insert_node_on_output.
Args:
op_type: type for new operation
output_name: the names of the outputs above us
name: the name of the new op
kwargs: attributes of the new node
Returns:
node that was inserted
"""
utils.make_sure(isinstance(output_name, six.text_type), "output_name's type is not expected: %s",
type(output_name))
utils.make_sure(isinstance(op_type, six.text_type), "op_type's type is not expected: %s",
type(op_type))
utils.make_sure(output_name is not None, "output_name cannot be None for op_type=%r.", op_type)
if inputs is None:
inputs = [output_name]
if name is None:
name = utils.make_name(op_type)
new_output = port_name(name)
new_node = self.make_node(op_type, inputs, attr=kwargs, outputs=[new_output], name=name, domain=domain)
return self.insert_node_on_output(new_node, output_name)
def find_output_consumers(self, output_name):
"""Find all nodes consuming a given output."""
if output_name in self._output_to_consumers:
ops = self._output_to_consumers[output_name]
ops = [self.get_node_by_name(n) for n in ops]
else:
ops = [] # self.get_nodes()
nodes = []
for node in ops:
if node is None:
continue
if output_name in node.input:
nodes.append(node)
# find consumers in sub graphs
if output_name in self._input_to_graph:
for g in self._input_to_graph[output_name].values():
nodes.extend(g.find_output_consumers(output_name))
return nodes
def _register_input_name(self, input_name, node, only_graph=False):
"Register node taking a specific input."
if not only_graph:
if input_name not in self._output_to_consumers:
self._output_to_consumers[input_name] = set()
self._output_to_consumers[input_name].add(node.name)
if self.parent_graph is not None:
if input_name not in self.parent_graph._input_to_graph:
self.parent_graph._input_to_graph[input_name] = {}
self.parent_graph._input_to_graph[input_name][id(self)] = self
self.parent_graph._register_input_name(input_name, node, only_graph=True)
def _unregister_input_name(self, input_name, node, only_graph=False):
"Unregister node taking a specific input."
node_name = node.name
if not only_graph:
if input_name in self._output_to_consumers[input_name]:
if node_name in self._output_to_consumers[input_name]:
self._output_to_consumers[input_name].remove(node_name)
if (self.parent_graph is not None and
input_name in self.parent_graph._input_to_graph and
id(self) in self.parent_graph._input_to_graph[input_name]):
del self.parent_graph._input_to_graph[input_name][id(self)]
self.parent_graph._unregister_input_name(input_name, node, only_graph=True)
def replace_all_inputs(self, old_input, new_input, ops=None):
"""
Replace all inputs pointing to old_input with new_input.
*ops* is used if defined, otherwise `_output_to_consumers`
is used to determine the impacted nodes.
"""
if old_input == new_input:
return
if new_input not in self._output_to_consumers:
self._output_to_consumers[new_input] = set()
if ops is not None:
keep_ops = True
elif old_input in self._output_to_consumers:
ops = list(
filter(lambda a: a is not None,
map(self.get_node_by_name, self._output_to_consumers[old_input])))
keep_ops = False
else:
ops = []
keep_ops = False
for node in ops:
assert node is not None
if old_input in node.input and new_input in node.output:
raise RuntimeError("creating a circle in the graph is not allowed: " + node.name)
self._register_input_name(new_input, node)
for i, input_name in enumerate(node.input):
if input_name == old_input:
self.replace_input(node, node.input[i], new_input, i)
# modify references in sub graphs
if old_input in self._input_to_graph:
for g in self._input_to_graph[old_input].values():
g.replace_all_inputs(old_input, new_input,
ops=g.get_nodes() if keep_ops else None)
def replace_input(self, node, old_input, new_input, input_index=None):
"""
Replace one input in a node.
The method is more efficient if *input_index* is specified.
Otherwise, it renames every output named *old_input*.
"""
assert isinstance(node, Node) and isinstance(old_input, six.text_type) and isinstance(new_input, six.text_type)
is_replaced = False
if input_index is None:
for i, input_name in enumerate(node.input):
if input_name == old_input:
node.input[i] = new_input
is_replaced = True
elif node.input[input_index] == old_input:
node.input[input_index] = new_input
is_replaced = True
else:
raise RuntimeError("Unable to replace input %r into %r for node %r." % (old_input, new_input, node.name))
to_ops = self._output_to_consumers.get(old_input, None)
if to_ops is not None:
if node.name in to_ops:
# A node may take twice the same entry.
to_ops.remove(node.name)
self._register_input_name(new_input, node)
return is_replaced
def replace_inputs(self, node, new_inputs):
"""Replace node inputs."""
assert isinstance(node, Node) and isinstance(new_inputs, list)
for old_input in node.input:
to_ops = self._output_to_consumers.get(old_input, None)
if to_ops is not None and old_input in to_ops:
# To avoid issues when a node
# takes twice the same entry.
to_ops.remove(old_input)
for input_name in new_inputs:
assert isinstance(input_name, six.text_type)
self._register_input_name(input_name, node)
node.input = new_inputs
return True
def _extract_sub_graph_nodes(self, dest_node, input_checker=None):
"""Return nodes of subgraph ending with dest_node.
Args:
dest_node: output node of the subgraph to find
input_checker: customized input check function: bool func(node)
Return:
a set of nodes
"""
res_set = set()
if not dest_node or (input_checker and input_checker(dest_node) is False):
return res_set
processing_set = set([dest_node])
while processing_set:
top_node = processing_set.pop()
res_set.add(top_node)
all_inputs = top_node.input + list(top_node.get_implicit_inputs())
for input_id in all_inputs:
# we don't care about nested graph here, just handle current graph cropping.
node = self.get_node_by_output(input_id, search_in_parent_graphs=False)
if not node:
# some nodes (for example Scan) have optional inputs, which
# might have empty input.
# subgraph might have input defined in outer graph
continue
if node not in res_set:
if input_checker and input_checker(node) is False:
continue
processing_set.add(node)
return res_set
def extract_sub_graph_nodes(self, outputs_name, input_checker=None, remove_unused_inputs=True):
"""Return nodes of subgraph having output_ids as outputs.
Args:
output_ids: output node output id of the subgraph to find
input_checker: customized input check function: bool func(node)
remove_unused_inputs: bool, indicates whether unused placeholder inputs will be removed
in the resulting nodes.
Return:
a list of nodes
"""
res_set = set()
outputs_to_keep = list(outputs_name)
if not remove_unused_inputs:
# add placeholder nodes even if they are not connected to outputs.
# placeholder nodes with defaults can have inputs themselves
outputs_to_keep += [inp.output[0] for inp in self.inputs]
for output in outputs_to_keep:
node = self.get_node_by_output(output, search_in_parent_graphs=False)
res_set = res_set.union(self._extract_sub_graph_nodes(node, input_checker))
return list(res_set)
def delete_unused_nodes(self, outputs_name):
"""Delete nodes not in subgraph ending with output_names."""
if not outputs_name:
logger.debug("Outputs not specified, delete_unused_nodes not taking effect.")
return
# we need keep those placeholders that are used as input of Loop's body graph.
# some of them are not used in the graph, but still need be there to keep the graph complete.
related_nodes = self.extract_sub_graph_nodes(outputs_name, remove_unused_inputs=False)
for node in related_nodes:
attr_body_graphs = node.get_body_graphs()
if attr_body_graphs:
for body_graph in attr_body_graphs.values():
body_graph.delete_unused_nodes(body_graph.outputs)
self.reset_nodes(related_nodes)
def safe_to_remove_nodes(self, to_delete):
""" List of nodes that safe to delete (i.e. outputs not consumed by other nodes.)"""
safe_to_remove = []
delete_set = set(to_delete)
for n in delete_set:
out_consumers = set()
for out in n.output:
out_consumers |= set(self.find_output_consumers(out))
if out_consumers.issubset(delete_set):
safe_to_remove.append(n)
return safe_to_remove
# TODO(tomwildenhain): Remove this function
def safe_remove_nodes(self, to_delete):
"""Delete nodes in `to_delete` without third-party node consuming it."""
delete_set = set(to_delete)
for n in delete_set:
out_consumers = set()
for out in n.output:
out_consumers |= set(self.find_output_consumers(out))
if out_consumers.issubset(delete_set):
self.remove_node(n.name)
def is_safe_to_remove_nodes(self, to_delete, outputs_to_ignore=None):
"""Returns true if the outputs of all the nodes in to_delete have no third-party nodes consuming them."""
delete_set = set(to_delete)
outputs_to_ignore_set = set(outputs_to_ignore or [])
for n in delete_set:
out_consumers = set()
for out in n.output:
if out in outputs_to_ignore_set:
continue
out_consumers |= set(self.find_output_consumers(out))
if not out_consumers.issubset(delete_set):
return False
return True
class GraphUtil(object):
"""Utilities for Graph manipulation."""
@staticmethod
def optimize_graph(graph, catch_errors=True, optimizers=None):
return optimizer.optimize_graph(graph, catch_errors, optimizers=optimizers)
@staticmethod
def optimize_model_proto(onnx_model_proto, catch_errors=True, return_graph=False,
optimizers=None):
"""Optimize the model proto, for example: eliminating all useless Transpose pairs.
Returns:
model proto (and possibly graph) after optimization, if optimizer run successfully
or onnx_model_proto, if exceptions happens
"""
try:
kwargs = GraphUtil.get_onnx_model_properties(onnx_model_proto)
graph = GraphUtil.create_graph_from_onnx_model(onnx_model_proto)
graph = GraphUtil.optimize_graph(graph, catch_errors, optimizers=optimizers)
model_proto = graph.make_model(onnx_model_proto.graph.doc_string,
graph_name=onnx_model_proto.graph.name, **kwargs)
if onnx_model_proto.metadata_props:
metadata_props = {p.key: p.value for p in onnx_model_proto.metadata_props}
helper.set_model_props(model_proto, metadata_props)
if return_graph:
return model_proto, graph
return model_proto
except Exception as e:
if not catch_errors:
raise e
# sometimes, onnx shape inference will fail for some reason,
# return onnx_model_proto for this case
logger.warning("Failed to optimize model proto", exc_info=1)
if return_graph:
return onnx_model_proto, None
return onnx_model_proto
@staticmethod
def get_onnx_model_properties(onnx_model_proto):
"""Get ModelProto properties."""
kwargs = {}
if onnx_model_proto.HasField('ir_version'):
kwargs["ir_version"] = onnx_model_proto.ir_version
if onnx_model_proto.HasField('producer_name'):
kwargs["producer_name"] = onnx_model_proto.producer_name
if onnx_model_proto.HasField('producer_version'):
kwargs["producer_version"] = onnx_model_proto.producer_version
if onnx_model_proto.HasField('domain'):
kwargs["domain"] = onnx_model_proto.domain
if onnx_model_proto.HasField('model_version'):
kwargs["model_version"] = onnx_model_proto.model_version
if onnx_model_proto.HasField('doc_string'):
kwargs["doc_string"] = onnx_model_proto.doc_string
kwargs["opset_imports"] = onnx_model_proto.opset_import
return kwargs
@staticmethod
def create_graph_from_onnx_model(onnx_model_proto, target=None):
"""Create Graph loading onnx model proto."""
# apply shape inference on the model
inferred_model = shape_inference.infer_shapes(onnx_model_proto)
utils.initialize_name_counter(inferred_model)
graph_proto = inferred_model.graph
opset_version = None
extra_opset = []
for opset in onnx_model_proto.opset_import:
if not opset.domain:
# domain field is None or empty means it is onnx domain
opset_version = opset.version
else:
extra_opset.append(opset)
utils.make_sure(opset_version is not None, "opset version is not specified for onnx domain")
main_graph = GraphUtil.create_graph_from_onnx_graph(graph_proto, opset_version, extra_opset, target)
return main_graph
@staticmethod
def create_graph_from_onnx_graph(graph_proto, opset_version=None, extra_opset=None, target=None):
"""Create Graph loading onnx graph proto."""
output_shapes = {}
output_dtypes = {}
shapes, dtypes = GraphUtil._parse_shape_and_type_from_value_infos(graph_proto.value_info)
output_shapes.update(shapes)
output_dtypes.update(dtypes)
shapes, dtypes = GraphUtil._parse_shape_and_type_from_value_infos(graph_proto.output)
output_shapes.update(shapes)
output_dtypes.update(dtypes)
nodes_to_append = []
for n in graph_proto.node:
if n.op_type == "Constant":
n.op_type = "Const"
# some pytorch model had empty names - make one up
if not n.name:
n.name = utils.make_name("was_empty")
nodes_to_append.append(n)
output_names = []
for n in graph_proto.output:
output_names.append(n.name)
g = Graph(nodes_to_append, output_shapes, output_dtypes, target, opset_version, extra_opset, None, output_names)
const_nodes = GraphUtil._parse_graph_initializer(g, graph_proto)
GraphUtil._parse_graph_input(g, graph_proto, [n.name for n in const_nodes])
for n in g.get_nodes():
for attr_name, attr_val in n.attr.items():
if attr_val.HasField('g'):
# it was assumed that the a.g has inferred shapes/dtypes.
sub_g = GraphUtil.create_graph_from_onnx_graph(attr_val.g, opset_version, extra_opset)
n.set_body_graph_as_attr(attr_name, sub_g)
return g
@staticmethod
def get_node_count_from_onnx_graph(graph_proto):
op_cnt = collections.Counter()
for n in graph_proto.node:
op_cnt[n.op_type] += 1
return op_cnt
@staticmethod
def _parse_shape_and_type_from_value_infos(value_infos):
"""Get nodes output shapes and types from value infos."""
output_shapes = {}
output_dtypes = {}
for shape_info in value_infos:
type_proto = shape_info.type
elem_type = type_proto.tensor_type.elem_type
output_dtypes[shape_info.name] = elem_type
if not type_proto.tensor_type.HasField("shape"):
output_shapes[shape_info.name] = None
continue
shape = type_proto.tensor_type.shape
tuned_shape = []
for d in shape.dim:
if d.HasField('dim_param'):
tuned_shape.append(-1)
elif d.HasField('dim_value'):
tuned_shape.append(d.dim_value)
else:
# it is found, some unknown dims is missing after inference.
tuned_shape.append(-1)
output_shapes[shape_info.name] = tuned_shape
return output_shapes, output_dtypes
@staticmethod
def _parse_graph_initializer(g, graph_proto):
"""Get graph initializers and put into Graph object."""
const_nodes = []
for initializer in graph_proto.initializer:
np_val = numpy_helper.to_array(initializer)
const_nodes.append(g.make_const(initializer.name, np_val))
return const_nodes
@staticmethod
def _parse_graph_input(g, graph_proto, const_node_names):
"""Get graph inputs not defined as initializers and put into Graph object."""
shapes, dtypes = GraphUtil._parse_shape_and_type_from_value_infos(graph_proto.input)
# make sure the input is added in order we read from graph_proto,
# because for subgraphs, the input orders matter.
for graph_input in graph_proto.input:
name = graph_input.name
const_initializer_node = g.get_node_by_output_in_current_graph(name)
if const_initializer_node is None: # is actual input rather than initializer
shape = shapes[name]
dtype = dtypes[name]
if name not in const_node_names:
g.add_graph_input(name, dtype, shape)
else:
g.add_graph_input_with_default(name, g.get_node_by_name(name), dtype, shape)
| 73,318 | 39.687569 | 120 | py |
tensorflow-onnx | tensorflow-onnx-main/tf2onnx/tfjs_utils.py | # SPDX-License-Identifier: Apache-2.0
"""
tf2onnx.tfjs_utils - utilities for parsing tfjs files into onnx graphs
Main functions of interest are graphs_from_tfjs and read_tfjs_graph
"""
import json
import os
import base64
import gzip
import struct
import logging
from onnx import numpy_helper, helper
import numpy as np
from google.protobuf.json_format import ParseDict
import tensorflow as tf
from tensorflow.python.framework import c_api_util
from tensorflow.core.framework import types_pb2, node_def_pb2
from tf2onnx import utils
from tf2onnx.graph import Graph
from tf2onnx import tf_utils
logger = logging.getLogger(__name__)
tf_api_def_map = c_api_util.ApiDefMap()
def read_tfjs_attr(attr, tf_dtypes=False):
"""
Reads the value of a single tfjs node attribute. If tf_dtypes is True, tensorflow dtypes are returned instead of
onnx dtypes
"""
k = list(attr.keys())[0]
return read_tfjs_attr_helper(k, attr[k], tf_dtypes)
def fix_string_attr(tfjs_node):
"""
Older tfjs models store strings as lists of ints (representing byte values). This function finds and replaces
those strings, so protobuf can correctly decode the json.
"""
def fix(v):
if isinstance(v, list):
return base64.encodebytes(bytes(v)).decode()
return v
if 'attr' not in tfjs_node:
return
for v in tfjs_node['attr'].values():
if 's' in v:
v['s'] = fix(v['s'])
if 'list' in v and 's' in v['list']:
for i, x in enumerate(v['list']['s']):
v['list']['s'][i] = fix(x)
def read_tfjs_attr_helper(k, v, tf_dtypes=False):
"""
A tfjs attribute value is itself a dict with a single key specifying the type and a value with the actual data
like { axis: { i: -1 }} or { pads: { list: { i: [1, 2, 3, 4] } } }. This helper takes the key specifying the
type (like 'i' or 'list') and the value and decodes the attribute value.
"""
supported_types = ['func', 'shape', 'type', 'list', 's', 'i', 'f', 'b']
utils.make_sure(k in supported_types, "Unrecognized tfjs attribute type %s", k)
if k == 'list':
non_empty_keys = [k2 for k2, v2 in v.items() if len(v2) > 0]
if len(non_empty_keys) == 0:
return []
k2 = non_empty_keys[0]
return [read_tfjs_attr_helper(k2, v2, tf_dtypes) for v2 in v[k2]]
if k == 'type':
dtype = v
if not isinstance(dtype, int):
dtype = getattr(types_pb2, dtype)
if not tf_dtypes:
dtype = tf_utils.map_tf_dtype(dtype)
return dtype
if k == 'func':
return v['name']
if k == 'shape':
return [int(d['size']) for d in v.get('dim', [])]
if k == 's':
return base64.decodebytes(v.encode())
if k == 'i':
# ints are stored in the tfjs json as strings
return int(v)
return v
def tfjs_node_to_tf_node_def(node):
"""Converts a tfjs node to a tf node_def for use in tf shape inferencing"""
node_def = node_def_pb2.NodeDef()
ParseDict(node, node_def)
return node_def
def resolve_output(output, op_info, func_name=None):
"""
Given an output name from a tfjs model and an op_info dict containing info about the nodes available, (and the
function name if this is a subgraph), returns the canonical name to use as the output name in the onnx model.
The resulting string is always "node_name:port_number"
"""
cnt = output.count(':')
# outputs in the tfjs model can use one of 3 different formats interchangably.
if cnt == 0:
# If no port is specified, it is referring to port 0
if output in op_info:
return output + ':0'
# Output isn't from an op and may be an input (no port number)
return output
if cnt == 1:
# Already in our standard format
return output
# Format is node_name:output_name:subindex
node, output_arg_name, index = output.split(':')
if node not in op_info and func_name is not None:
# In very rare cases, tfjs prepends the func_name to a node but forgets to fix the outputs
long_node_name = func_name + "/" + node
if long_node_name in op_info:
node = long_node_name
op_type, tf_attr, inp_dtypes = op_info[node]
names, _ = get_output_names_and_dtypes(op_type, tf_attr, inp_dtypes)
idx = names.index(output_arg_name) + int(index)
return node + ':' + str(idx)
def get_output_names_and_dtypes(op_type, tf_attr, inp_dtypes):
"""Parses the tf documentation to determine the names and dtypes of the outputs of the op"""
# TODO: ['Prelu', 'Conv1D', 'DepthwiseConv2d', 'FusedDepthwiseConv2dNative', 'Ones', 'Zeros']
if op_type == 'Prelu':
return ['activations'], [inp_dtypes[0]]
try:
tf_op_def = tf_api_def_map.get_op_def(op_type)
except ValueError:
raise ValueError("Failed to determine dtypes for op type %s. May be an unsupported op type." % op_type)
dtypes = []
names = []
for arg in tf_op_def.output_arg:
num_copies = 1
if arg.type_list_attr:
dtypes += tf_attr[arg.type_list_attr]
num_copies = len(tf_attr[arg.type_list_attr])
else:
if arg.type_attr:
dtype = tf_attr[arg.type_attr]
else:
dtype = arg.type
if arg.number_attr:
dtypes += [dtype] * tf_attr[arg.number_attr]
num_copies = tf_attr[arg.number_attr]
else:
dtypes.append(dtype)
names += [arg.name] * num_copies
return names, dtypes
def get_output_dtypes(op_type, tf_attr, inp_dtypes):
"""Returns a list of the tf dtypes for the op's outputs"""
_, out_dtypes = get_output_names_and_dtypes(op_type, tf_attr, inp_dtypes)
return out_dtypes
def get_output_shapes(node_def, input_dtypes, input_shapes, inp_consts):
"""Returns a list of the output shapes of an op. input_dtypes should be tf dtypes."""
from tf2onnx.tf_loader import tf_session, tf_placeholder # pylint: disable=import-outside-toplevel
if node_def.op in ["Prelu", "Enter"]:
return [input_shapes[0]]
if node_def.op == "Merge":
# Find the first non-None shape (if it exists) and return it
non_none = ([t for t in input_shapes if t is not None] + [None])[0]
# The second output of merge is a scalar int indicating which input was selected
return [non_none, []]
if node_def.op == "Placeholder":
shape = None
if 'shape' in node_def.attr:
shape = [d.size for d in node_def.attr['shape'].shape.dim]
shape = [None if d == -1 else d for d in shape]
if len(shape) == 0:
# According to TF docs, "If the shape has 0 dimensions, the shape is unconstrained."
shape = None
return [shape]
del node_def.input[:]
node_def.name = "node"
if "_class" in node_def.attr:
# Remove colocation information (list of nodes tf wants computed on same device)
del node_def.attr["_class"]
g = tf.Graph()
with g.as_default():
for i, (dtype, shape, const) in enumerate(zip(input_dtypes, input_shapes, inp_consts)):
inp = "input" + str(i)
if const is None:
if shape is not None and -1 in shape:
shape = [d if d != -1 else None for d in shape]
tf_placeholder(dtype, name=inp, shape=shape)
else:
tf.constant(const, dtype, name=inp)
node_def.input.append(inp)
mini_graph_def = g.as_graph_def()
mini_graph_def.node.append(node_def)
g2 = tf.Graph()
with g2.as_default():
with tf_session() as sess:
tf.import_graph_def(mini_graph_def, name='')
node = sess.graph.get_operation_by_name("node")
outputs_shapes = [tf_utils.get_tf_tensor_shape(out) for out in node.outputs]
return outputs_shapes
def sort_tfjs_functions(funcs):
"""Topologically sorts a list of tfjs functions"""
dependencies = {}
name_to_func = {}
for f in funcs:
name = f['signature']['name']
dependencies[name] = get_tfjs_func_dependencies(f)
name_to_func[name] = f
ordered = utils.topological_sort(dependencies)
return [name_to_func[n] for n in ordered]
def get_tfjs_func_dependencies(func):
"""Returns a list of names of functions the provided tfjs func depends on"""
dependencies = set()
for node in func.get('nodeDef', []):
for v in node.get('attr', {}).values():
if list(v.keys())[0] == 'func':
dependencies.add(read_tfjs_attr(v))
return list(dependencies)
def read_model_json(model_path):
"""Given the path to a model.json file, parses the json and returns a dict (and flag indicating if the weights are
compressed)"""
zip_compressed = False
with open(model_path, "rb") as f:
magic_number = f.read(2)
f.seek(0)
if magic_number == b'\x1f\x8b':
# Sometimes models from tfhub look normal but are gzip compressed without warning
unziped_bytes = gzip.decompress(f.read())
model = json.loads(unziped_bytes)
zip_compressed = True
else:
model = json.load(f)
return model, zip_compressed
def graphs_from_tfjs(model_path, input_names=None, output_names=None, shape_override=None,
ignore_default=None, use_default=None):
"""Given the path to a model.json file, parses the model into onnx graphs and returns the main graph and a
topologically sorted list of subgraphs."""
model, zip_compressed = read_model_json(model_path)
model_format = model['modelTopology'].get('format')
if model_format is None:
if 'keras_version' in model['modelTopology']:
model_format = 'layers-model'
else:
model_format = 'graph-model'
utils.make_sure(model_format == 'graph-model', "tf2onnx only supports conversion from tfjs graph models, "
"not format %s. Use Google's tfjs converter to convert to a graph model, then try again.",
model_format)
weights_manifest = model['weightsManifest'][0]
sharded_data = []
for path in weights_manifest["paths"]:
with open(os.path.join(os.path.dirname(model_path), path), "rb") as f:
shard_bytes = f.read()
if zip_compressed:
shard_bytes = gzip.decompress(shard_bytes)
sharded_data.append(shard_bytes)
weights_data = b''.join(sharded_data)
weights = {}
i = 0
for weight in weights_manifest['weights']:
weight_name, np_arr, num_bytes = read_tfjs_weight(weight, weights_data, offset=i)
weights[weight_name] = np_arr
i += num_bytes
utils.make_sure(len(weights_data) == i, "Total weight bytes %d doesn't match read bytes %d", len(weights_data), i)
topology = model['modelTopology']
tensors_to_rename = {}
if output_names is None and 'signature' in model:
outputs = model['signature'].get('outputs')
inputs = model['signature'].get('inputs')
if outputs is not None:
output_names = [v['name'] for v in outputs.values()]
tensors_to_rename.update({v['name']: k for k, v in outputs.items()})
if inputs is not None:
tensors_to_rename.update({v['name']: k for k, v in inputs.items()})
main_g = read_tfjs_graph(topology['node'], weights, None, input_names, output_names, shape_override,
ignore_default, use_default)
main_g.rename_tensors(tensors_to_rename)
subgraphs = []
funcs = sort_tfjs_functions(topology.get('library', {}).get('function', []))
for func in funcs:
sub_g = read_tfjs_graph(func.get('nodeDef', []), weights, func, None, None, shape_override,
ignore_default, use_default)
subgraphs.append(sub_g)
return main_g, subgraphs
def read_tfjs_weight(weight, weights_data, offset):
"""Returns the name, numpy array, and number of bytes for a tfjs weight"""
name = weight['name']
count = np.product(weight['shape'], dtype=np.int64)
if weight['dtype'] == 'string':
num_strings = np.prod(weight['shape'], dtype=np.int64)
string_list, num_bytes = read_string_weight(weights_data, offset, num_strings)
np_arr = np.array(string_list).reshape(weight['shape'])
return name, np_arr, num_bytes
np_dtype = np.dtype(weight['dtype'])
if 'quantization' in weight:
q_info = weight['quantization']
q_dtype = np.dtype(q_info['dtype'])
np_arr = np.frombuffer(weights_data, dtype=q_dtype, count=count, offset=offset)
num_bytes = np_arr.nbytes
if 'scale' in q_info:
np_arr = np_arr.astype(np_dtype) * q_info['scale'] + q_info['min']
else:
np_arr = np_arr.astype(np_dtype)
else:
np_arr = np.frombuffer(weights_data, dtype=np_dtype, count=count, offset=offset)
num_bytes = np_arr.nbytes
np_arr = np_arr.reshape(weight['shape'])
return name, np_arr, num_bytes
def read_string_weight(weights_data, offset, num_strings):
"""Decodes binary weight data for a tfjs string"""
string_list = []
j = offset
for _ in range(num_strings):
# TFJS strings start with a 4 byte unsigned int indicating their length, followed by the bytes of the string
length = struct.unpack('<I', weights_data[j:j + 4])[0]
j += 4
string_list.append(weights_data[j:j + length])
j += length
return string_list, j - offset
def read_tfjs_function(func):
"""Parses properties of a tfjs function."""
tf_dtypes = {}
output_shapes = {}
signature = func['signature']
inputs = []
for i, inp in enumerate(signature['inputArg']):
inp_name = inp['name']
inputs.append(inp_name)
tf_dtypes[inp_name] = getattr(types_pb2, inp['type'])
out_shapes_attr = func.get('argAttr', {}).get(str(i), {}).get('attr', {}).get('_output_shapes')
if out_shapes_attr is not None:
output_shapes[inp_name] = read_tfjs_attr(out_shapes_attr)[0]
else:
output_shapes[inp_name] = None
ret_map = func['ret']
outputs = [ret_map[out['name']] for out in signature['outputArg']]
name = signature['name']
return tf_dtypes, output_shapes, inputs, outputs, name
def read_tfjs_graph(nodes, weights, func=None, graph_inputs=None, graph_outputs=None, shape_override=None,
ignore_default=None, use_default=None):
"""Creates an onnx graph from the provided tfjs nodes"""
if shape_override is None:
shape_override = {}
onnx_nodes = []
output_shapes = {}
tf_dtypes = {}
op_info = {}
graph_name = 'tfjs_model'
func_name = None
def update_shapes(new_shapes):
if isinstance(new_shapes, dict):
new_shapes = new_shapes.items()
for k, v in new_shapes:
output_shapes[k] = shape_override.get(k, v)
if func is not None:
tf_dtypes, fn_input_shapes, graph_inputs, graph_outputs, func_name = read_tfjs_function(func)
update_shapes(fn_input_shapes)
graph_name = func_name
for inp in graph_inputs:
onnx_nodes.append(helper.make_node("Placeholder", [], outputs=[inp], name=inp))
if graph_inputs is None:
placeholder_ops = ["Placeholder", "PlaceholderWithDefault", "PlaceholderV2"]
graph_inputs = [n['name'] + ':0' for n in nodes if n['op'] in placeholder_ops]
for node in nodes:
if node['op'] == "NextIteration":
# NextIteration nodes can violate the topological sort with cyclic dependencies, so we do them first.
node_name = node['name']
output_name = node_name + ':0'
output_shapes[output_name] = None
tf_dtypes[output_name] = read_tfjs_attr(node['attr']['T'], tf_dtypes=True)
op_info[node_name] = (node['op'], {'dtype': tf_dtypes[output_name]}, [tf_dtypes[output_name]])
for node in nodes:
op_type = node['op']
node_name = node['name']
if op_type == "Const":
np_arr = weights[node_name]
out_name = node_name + ':0'
tf_dtype = read_tfjs_attr(node['attr']['dtype'], tf_dtypes=True)
onnx_dtype = tf_utils.map_tf_dtype(tf_dtype)
# The dtype of a Const in tfjs can differ from that of the weight used to get its value
np_dtype = utils.map_onnx_to_numpy_type(onnx_dtype)
onnx_tensor = numpy_helper.from_array(np_arr.astype(np_dtype), out_name)
onnx_node = helper.make_node("Const", [], outputs=[out_name], name=node_name, value=onnx_tensor)
onnx_nodes.append(onnx_node)
output_shapes[out_name] = shape_override.get(out_name, list(np_arr.shape))
tf_dtypes[out_name] = tf_dtype
op_info[node_name] = (op_type, {'dtype': tf_dtypes[out_name]}, [])
continue
tf_attr = {}
onnx_attr = {}
fix_string_attr(node)
node_def = tfjs_node_to_tf_node_def(node)
for k, v in node.get('attr', {}).items():
tf_attr[k] = read_tfjs_attr(v, tf_dtypes=True)
if k in tf_utils.TF_IGNORED_NODE_ATTRS:
continue
if k == 'DstT':
k = 'to'
onnx_attr[k] = read_tfjs_attr(v)
if op_type == "FusedDepthwiseConv2dNative":
# This op isn't in tensorflow but can be converted to a TF op
op_type = "_FusedDepthwiseConv2dNative"
err_msg = "explicit_paddings for supported for _FusedDepthwiseConv2dNative"
if "explicit_paddings" in tf_attr:
utils.make_sure(len(tf_attr['explicit_paddings']) == 0, err_msg)
del tf_attr['explicit_paddings']
del onnx_attr['explicit_paddings']
del node_def.attr['explicit_paddings']
node_def.op = op_type
input_names = [inp for inp in node.get('input', []) if not inp.startswith('^')]
input_names = [resolve_output(inp, op_info, func_name) for inp in input_names]
inp_dtypes = [tf_dtypes[inp] for inp in input_names]
inp_shapes = [output_shapes[inp] for inp in input_names]
inp_consts = [weights.get(inp.split(':')[0]) for inp in input_names]
out_dtypes = get_output_dtypes(op_type, tf_attr, inp_dtypes)
out_shapes = get_output_shapes(node_def, inp_dtypes, inp_shapes, inp_consts)
op_info[node_name] = (op_type, tf_attr, inp_dtypes)
output_names = [node_name + ":" + str(i) for i in range(len(out_dtypes))]
tf_dtypes.update(zip(output_names, out_dtypes))
update_shapes(zip(output_names, out_shapes))
if op_type == "PlaceholderWithDefault":
remove = False
if ignore_default and node_name in ignore_default:
op_type = 'Placeholder'
input_names = []
elif use_default and node_name in use_default:
remove = True
elif node_name.endswith('keras_learning_phase'):
logger.warning("Removing optional input %s that appears to be a keras learning phase parameter. "
"Use --ignore_default to force this into an input.", node_name)
remove = True
if remove:
op_type = 'Identity'
graph_inputs = [inp for inp in graph_inputs if inp != node_name + ":0"]
onnx_node = helper.make_node(op_type, input_names, output_names, name=node_name, **onnx_attr)
onnx_nodes.append(onnx_node)
for inp in graph_inputs:
if output_shapes[inp] is None:
logger.warning("Input %s has unknown shape. Specify shape with --inputs flag.", inp)
dtypes = {k: tf_utils.map_tf_dtype(v) for k, v in tf_dtypes.items()}
if graph_outputs is None:
output_to_node = {out: node.name for node in onnx_nodes for out in node.output}
node_to_outputs = {node.name: list(node.output) for node in onnx_nodes}
used_nodes = set(output_to_node[out] for node in onnx_nodes for out in node.input)
unused_nodes = [node for node in onnx_nodes if node.name not in used_nodes]
graph_outputs = [out for node in unused_nodes for out in node_to_outputs[node.name]]
graph_outputs_mapped = [resolve_output(out, op_info, func_name) for out in graph_outputs]
g = Graph(onnx_nodes, output_shapes, dtypes, input_names=graph_inputs, output_names=graph_outputs_mapped,
is_subgraph=func is not None, graph_name=graph_name)
g.rename_tensors(dict(zip(graph_outputs_mapped, graph_outputs)))
return g
| 20,879 | 40.428571 | 118 | py |
tensorflow-onnx | tensorflow-onnx-main/tf2onnx/convert.py | # SPDX-License-Identifier: Apache-2.0
"""
python -m tf2onnx.convert : api and commandline tool to convert a tensorflow model to onnx
"""
# pylint: disable=unused-argument,unused-import,ungrouped-imports,wrong-import-position
import argparse
import os
import sys
from packaging.version import Version
os.environ['TF_CPP_MIN_LOG_LEVEL'] = "3"
import tensorflow as tf
from tf2onnx.tfonnx import process_tf_graph
from tf2onnx import constants, logging, utils, optimizer
from tf2onnx import tf_loader
from tf2onnx.graph import ExternalTensorStorage
from tf2onnx.tf_utils import compress_graph_def, get_tf_version
# pylint: disable=unused-argument
_HELP_TEXT = """
Usage Examples:
python -m tf2onnx.convert --saved-model saved_model_dir --output model.onnx
python -m tf2onnx.convert --input frozen_graph.pb --inputs X:0 --outputs output:0 --output model.onnx
python -m tf2onnx.convert --checkpoint checkpoint.meta --inputs X:0 --outputs output:0 --output model.onnx
For help and additional information see:
https://github.com/onnx/tensorflow-onnx
If you run into issues, open an issue here:
https://github.com/onnx/tensorflow-onnx/issues
"""
def get_args():
"""Parse commandline."""
parser = argparse.ArgumentParser(description="Convert tensorflow graphs to ONNX.",
formatter_class=argparse.RawDescriptionHelpFormatter, epilog=_HELP_TEXT)
parser.add_argument("--input", help="input from graphdef")
parser.add_argument("--graphdef", help="input from graphdef")
parser.add_argument("--saved-model", help="input from saved model")
parser.add_argument("--tag", help="tag to use for saved_model")
parser.add_argument("--signature_def", help="signature_def from saved_model to use")
parser.add_argument("--concrete_function", type=int, default=None,
help="For TF2.x saved_model, index of func signature in __call__ (--signature_def is ignored)")
parser.add_argument("--checkpoint", help="input from checkpoint")
parser.add_argument("--keras", help="input from keras model")
parser.add_argument("--tflite", help="input from tflite model")
parser.add_argument("--tfjs", help="input from tfjs model")
parser.add_argument("--large_model", help="use the large model format (for models > 2GB)", action="store_true")
parser.add_argument("--output", help="output model file")
parser.add_argument("--inputs", help="model input_names (optional for saved_model, keras, and tflite)")
parser.add_argument("--outputs", help="model output_names (optional for saved_model, keras, and tflite)")
parser.add_argument("--ignore_default", help="comma-separated list of names of PlaceholderWithDefault "
"ops to change into Placeholder ops")
parser.add_argument("--use_default", help="comma-separated list of names of PlaceholderWithDefault ops to "
"change into Identity ops using their default value")
parser.add_argument("--rename-inputs", help="input names to use in final model (optional)")
parser.add_argument("--rename-outputs", help="output names to use in final model (optional)")
parser.add_argument("--use-graph-names", help="(saved model only) skip renaming io using signature names",
action="store_true")
parser.add_argument("--opset", type=int, default=None, help="opset version to use for onnx domain")
parser.add_argument("--dequantize", help="remove quantization from model. Only supported for tflite currently.",
action="store_true")
parser.add_argument("--custom-ops", help="comma-separated map of custom ops to domains in format OpName:domain. "
"Domain 'ai.onnx.converters.tensorflow' is used by default.")
parser.add_argument("--extra_opset", default=None,
help="extra opset with format like domain:version, e.g. com.microsoft:1")
parser.add_argument("--load_op_libraries",
help="comma-separated list of tf op library paths to register before loading model")
parser.add_argument("--target", default=",".join(constants.DEFAULT_TARGET), choices=constants.POSSIBLE_TARGETS,
help="target platform")
parser.add_argument("--continue_on_error", help="continue_on_error", action="store_true")
parser.add_argument("--verbose", "-v", help="verbose output, option is additive", action="count")
parser.add_argument("--debug", help="debug mode", action="store_true")
parser.add_argument("--output_frozen_graph", help="output frozen tf graph to file")
# experimental
parser.add_argument("--inputs-as-nchw", help="transpose inputs as from nhwc to nchw")
parser.add_argument("--outputs-as-nchw", help="transpose outputs as from nhwc to nchw")
args = parser.parse_args()
args.shape_override = None
if args.input:
# for backward compatibility
args.graphdef = args.input
if args.graphdef or args.checkpoint:
if not args.inputs or not args.outputs:
parser.error("graphdef and checkpoint models need to provide inputs and outputs")
if not any([args.graphdef, args.checkpoint, args.saved_model, args.keras, args.tflite, args.tfjs]):
parser.print_help()
sys.exit(1)
if args.inputs:
args.inputs, args.shape_override = utils.split_nodename_and_shape(args.inputs)
if args.outputs:
args.outputs = args.outputs.split(",")
if args.ignore_default:
args.ignore_default = args.ignore_default.split(",")
if args.use_default:
args.use_default = args.use_default.split(",")
if args.rename_outputs:
args.rename_outputs = args.rename_outputs.split(",")
if args.rename_inputs:
args.rename_inputs = args.rename_inputs.split(",")
if args.inputs_as_nchw:
args.inputs_as_nchw = args.inputs_as_nchw.split(",")
if args.outputs_as_nchw:
args.outputs_as_nchw = args.outputs_as_nchw.split(",")
if args.target:
args.target = args.target.split(",")
if args.signature_def:
args.signature_def = [args.signature_def]
if args.dequantize:
if not args.tflite:
parser.error("dequantize flag is currently only supported for tflite")
if args.extra_opset:
all_extra_opsets = args.extra_opset.split(',')
extra_opset_list = []
for extra_opset in all_extra_opsets:
tokens = extra_opset.split(':')
if len(tokens) != 2:
parser.error("invalid extra_opset argument")
extra_opset_list.append(utils.make_opsetid(tokens[0], int(tokens[1])))
args.extra_opset = extra_opset_list
if args.load_op_libraries:
args.load_op_libraries = args.load_op_libraries.split(",")
return args
def make_default_custom_op_handler(domain):
def default_custom_op_handler(ctx, node, name, args):
node.domain = domain
return node
return default_custom_op_handler
def _convert_common(frozen_graph, name="unknown", large_model=False, output_path=None,
output_frozen_graph=None, custom_ops=None, custom_op_handlers=None, optimizers=None, **kwargs):
"""Common processing for conversion."""
model_proto = None
external_tensor_storage = None
const_node_values = None
if custom_ops is not None:
if custom_op_handlers is None:
custom_op_handlers = {}
custom_op_handlers.update(
{op: (make_default_custom_op_handler(domain), []) for op, domain in custom_ops.items()})
with tf.Graph().as_default() as tf_graph:
if large_model:
const_node_values = compress_graph_def(frozen_graph)
external_tensor_storage = ExternalTensorStorage()
if output_frozen_graph:
utils.save_protobuf(output_frozen_graph, frozen_graph)
if not kwargs.get("tflite_path") and not kwargs.get("tfjs_path"):
tf.import_graph_def(frozen_graph, name='')
g = process_tf_graph(tf_graph, const_node_values=const_node_values,
custom_op_handlers=custom_op_handlers, **kwargs)
if constants.ENV_TF2ONNX_CATCH_ERRORS in os.environ:
catch_errors = constants.ENV_TF2ONNX_CATCH_ERRORS.upper() == "TRUE"
else:
catch_errors = not large_model
onnx_graph = optimizer.optimize_graph(g, catch_errors, optimizers=optimizers)
model_proto = onnx_graph.make_model("converted from {}".format(name),
external_tensor_storage=external_tensor_storage)
if output_path:
if large_model:
utils.save_onnx_zip(output_path, model_proto, external_tensor_storage)
else:
utils.save_protobuf(output_path, model_proto)
return model_proto, external_tensor_storage
def main():
args = get_args()
logging.basicConfig(level=logging.get_verbosity_level(args.verbose))
if args.debug:
utils.set_debug_mode(True)
logger = logging.getLogger(constants.TF2ONNX_PACKAGE_NAME)
extra_opset = args.extra_opset or []
tflite_path = None
tfjs_path = None
custom_op_handlers = {}
initialized_tables = None
tensors_to_rename = {}
if args.custom_ops:
using_tf_opset = False
for op in args.custom_ops.split(","):
if ":" in op:
op, domain = op.split(":")
else:
# default custom ops for tensorflow-onnx are in the "tf" namespace
using_tf_opset = True
domain = constants.TENSORFLOW_OPSET.domain
custom_op_handlers[op] = (make_default_custom_op_handler(domain), [])
if using_tf_opset:
extra_opset.append(constants.TENSORFLOW_OPSET)
if any(opset.domain == constants.CONTRIB_OPS_DOMAIN for opset in extra_opset):
try:
import tensorflow_text # pylint: disable=import-outside-toplevel
except ModuleNotFoundError:
logger.warning("tensorflow_text not installed. Model will fail to load if tensorflow_text ops are used.")
# get the frozen tensorflow model from graphdef, checkpoint or saved_model.
graph_def = None
inputs = None
outputs = None
model_path = None
if not utils.is_cpp_protobuf():
logger.warning("***IMPORTANT*** Installed protobuf is not cpp accelerated. Conversion will be extremely slow. "
"See https://github.com/onnx/tensorflow-onnx/issues/1557")
if args.load_op_libraries:
for op_filepath in args.load_op_libraries:
# change relative path to absolute path to satisfy the tf.load_op_library().
if not os.path.isabs(op_filepath):
op_filepath = os.getcwd() + "/" + op_filepath
tf.load_op_library(op_filepath)
if args.graphdef:
graph_def, inputs, outputs = tf_loader.from_graphdef(args.graphdef, args.inputs, args.outputs)
model_path = args.graphdef
if args.checkpoint:
graph_def, inputs, outputs = tf_loader.from_checkpoint(args.checkpoint, args.inputs, args.outputs)
model_path = args.checkpoint
if args.saved_model:
graph_def, inputs, outputs, initialized_tables, tensors_to_rename = tf_loader.from_saved_model(
args.saved_model, args.inputs, args.outputs, args.tag, args.signature_def, args.concrete_function,
args.large_model, return_initialized_tables=True, return_tensors_to_rename=True,
use_graph_names=args.use_graph_names)
model_path = args.saved_model
if args.keras:
graph_def, inputs, outputs = tf_loader.from_keras(
args.keras, args.inputs, args.outputs)
model_path = args.keras
if args.tflite:
# Optional, but used to cut graph if provided.
inputs = args.inputs
outputs = args.outputs
tflite_path = args.tflite
model_path = tflite_path
if args.tfjs:
inputs = args.inputs
outputs = args.outputs
tfjs_path = args.tfjs
model_path = tfjs_path
if args.verbose:
logger.info("inputs: %s", inputs)
logger.info("outputs: %s", outputs)
if args.rename_inputs:
tensors_to_rename.update(zip(inputs, args.rename_inputs))
if args.rename_outputs:
tensors_to_rename.update(zip(outputs, args.rename_outputs))
with tf.device("/cpu:0"):
model_proto, _ = _convert_common(
graph_def,
name=model_path,
continue_on_error=args.continue_on_error,
target=args.target,
opset=args.opset,
custom_op_handlers=custom_op_handlers,
extra_opset=extra_opset,
shape_override=args.shape_override,
input_names=inputs,
output_names=outputs,
inputs_as_nchw=args.inputs_as_nchw,
outputs_as_nchw=args.outputs_as_nchw,
large_model=args.large_model,
tensors_to_rename=tensors_to_rename,
ignore_default=args.ignore_default,
use_default=args.use_default,
tflite_path=tflite_path,
dequantize=args.dequantize,
tfjs_path=tfjs_path,
initialized_tables=initialized_tables,
output_frozen_graph=args.output_frozen_graph,
output_path=args.output)
# write onnx graph
logger.info("")
logger.info("Successfully converted TensorFlow model %s to ONNX", model_path)
logger.info("Model inputs: %s", [n.name for n in model_proto.graph.input])
logger.info("Model outputs: %s", [n.name for n in model_proto.graph.output])
if args.output:
if args.large_model:
logger.info("Zipped ONNX model is saved at %s. Unzip before opening in onnxruntime.", args.output)
else:
logger.info("ONNX model is saved at %s", args.output)
else:
logger.info("To export ONNX model to file, please run with `--output` option")
def tensor_names_from_structed(concrete_func, input_names, output_names):
tensors_to_rename = {}
flat_structured_inp = tf.nest.flatten(concrete_func.structured_input_signature)
structured_inputs = [t.name for t in flat_structured_inp if isinstance(t, tf.TensorSpec)]
tensors_to_rename.update(zip(input_names, structured_inputs))
if isinstance(concrete_func.structured_outputs, dict):
for k, v in concrete_func.structured_outputs.items():
tensors_to_rename[v.name] = k
return tensors_to_rename
def _rename_duplicate_keras_model_names(model):
"""
In very rare cases, keras has a bug where it will give multiple outputs the same name.
We must edit the model or the TF trace will fail. Returns old_out_names (or None if no edit was made).
IMPORTANT: model may be edited. Assign model.output_names to old_out_names to restore.
"""
old_out_names = None
if model.output_names and len(set(model.output_names)) != len(model.output_names):
# In very rare cases, keras has a bug where it will give multiple outputs the same name
# We must edit the model or the TF trace will fail
old_out_names = model.output_names
used_names = set()
new_out_names = []
for name in model.output_names:
new_name = name
i = 0
while new_name in used_names:
i += 1
new_name = name + "_" + str(i)
used_names.add(new_name)
new_out_names.append(new_name)
model.output_names = new_out_names
return old_out_names
def _is_legacy_keras_model(model):
"""Inspects model class to determine if it is from tf or legacy keras"""
logger = logging.getLogger(constants.TF2ONNX_PACKAGE_NAME)
unknown_type_err = "model is not instance of tf.keras.Model or keras.Model"
if isinstance(model, tf.keras.Model):
return False
try:
import keras # pylint: disable=import-outside-toplevel
if isinstance(model, keras.Model):
return True
logger.warning(unknown_type_err)
except ImportError:
logger.warning(unknown_type_err)
return False
def _from_keras_tf1(model, opset=None, custom_ops=None, custom_op_handlers=None, custom_rewriter=None,
inputs_as_nchw=None, outputs_as_nchw=None, extra_opset=None, shape_override=None,
target=None, large_model=False, output_path=None):
"""from_keras for tf 1.15"""
input_names = [t.name for t in model.inputs]
output_names = [t.name for t in model.outputs]
old_out_names = _rename_duplicate_keras_model_names(model)
tensors_to_rename = dict(zip(input_names, model.input_names))
tensors_to_rename.update(zip(output_names, model.output_names))
if old_out_names is not None:
model.output_names = old_out_names
if _is_legacy_keras_model(model):
import keras # pylint: disable=import-outside-toplevel
sess = keras.backend.get_session()
else:
sess = tf.keras.backend.get_session(model.outputs)
with tf.device("/cpu:0"):
frozen_graph, initialized_tables = tf_loader.freeze_session(sess, input_names, output_names, get_tables=True)
with tf.Graph().as_default():
tf.import_graph_def(frozen_graph, name="")
frozen_graph = tf_loader.tf_optimize(input_names, output_names, frozen_graph)
model_proto, external_tensor_storage = _convert_common(
frozen_graph,
name=model.name,
continue_on_error=True,
target=target,
opset=opset,
custom_ops=custom_ops,
custom_op_handlers=custom_op_handlers,
custom_rewriter=custom_rewriter,
extra_opset=extra_opset,
shape_override=shape_override,
input_names=input_names,
output_names=output_names,
inputs_as_nchw=inputs_as_nchw,
outputs_as_nchw=outputs_as_nchw,
large_model=large_model,
tensors_to_rename=tensors_to_rename,
initialized_tables=initialized_tables,
output_path=output_path)
return model_proto, external_tensor_storage
def from_keras(model, input_signature=None, opset=None, custom_ops=None, custom_op_handlers=None,
custom_rewriter=None, inputs_as_nchw=None, outputs_as_nchw=None, extra_opset=None, shape_override=None,
target=None, large_model=False, output_path=None, optimizers=None):
"""Returns a ONNX model_proto for a tf.keras model.
Args:
model: the tf.keras model we want to convert
input_signature: a tf.TensorSpec or a numpy array defining the shape/dtype of the input
opset: the opset to be used for the ONNX model, default is the latest
custom_ops: if a model contains ops not recognized by onnx runtime,
you can tag these ops with a custom op domain so that the
runtime can still open the model. Type is a dictionary `{op name: domain}`.
target: list of workarounds applied to help certain platforms
custom_op_handlers: dictionary of custom ops handlers
custom_rewriter: list of custom graph rewriters
extra_opset: list of extra opset's, for example the opset's used by custom ops
shape_override: dict with inputs that override the shapes given by tensorflow
inputs_as_nchw: transpose inputs in list from nhwc to nchw
outputs_as_nchw: transpose outputs in list from nhwc to nchw
large_model: use the ONNX external tensor storage format
output_path: save model to output_path
optimizers: list (subset) of tf2onnx optimizers if applying all optimizers is not desired.
Returns:
An ONNX model_proto and an external_tensor_storage dict.
"""
if get_tf_version() < Version("2.0"):
return _from_keras_tf1(model, opset, custom_ops, custom_op_handlers, custom_rewriter, inputs_as_nchw,
outputs_as_nchw, extra_opset, shape_override, target, large_model, output_path)
old_out_names = _rename_duplicate_keras_model_names(model)
from tensorflow.python.keras.saving import saving_utils as _saving_utils # pylint: disable=import-outside-toplevel
# let tensorflow do the checking if model is a valid model
function = _saving_utils.trace_model_call(model, input_signature)
try:
concrete_func = function.get_concrete_function()
except TypeError as e:
# Legacy keras models don't accept the training arg tf provides so we hack around it
if "got an unexpected keyword argument 'training'" not in str(e):
raise e
model_call = model.call
def wrap_call(*args, training=False, **kwargs):
return model_call(*args, **kwargs)
model.call = wrap_call
function = _saving_utils.trace_model_call(model, input_signature)
try:
# Legacy keras get make TF erroneously enter eager mode when it should be making symbolic tensors
import tensorflow_core # pylint: disable=import-outside-toplevel
old_get_learning_phase = tensorflow_core.python.keras.backend.learning_phase
tensorflow_core.python.keras.backend.learning_phase = \
tensorflow_core.python.keras.backend.symbolic_learning_phase
except ImportError:
old_get_learning_phase = None
try:
concrete_func = function.get_concrete_function()
finally:
# Put everything back
model.call = model_call
if old_get_learning_phase is not None:
tensorflow_core.python.keras.backend.learning_phase = old_get_learning_phase
# These inputs will be removed during freezing (includes resources, etc.)
graph_captures = concrete_func.graph._captures # pylint: disable=protected-access
captured_inputs = [t_name.name for t_val, t_name in graph_captures.values()]
input_names = [input_tensor.name for input_tensor in concrete_func.inputs
if input_tensor.name not in captured_inputs]
output_names = [output_tensor.name for output_tensor in concrete_func.outputs
if output_tensor.dtype != tf.dtypes.resource]
tensors_to_rename = tensor_names_from_structed(concrete_func, input_names, output_names)
reverse_lookup = {v: k for k, v in tensors_to_rename.items()}
if model.output_names:
# model.output_names is an optional field of Keras models indicating output order. It is None if unused.
output_names = [reverse_lookup[out] for out in model.output_names]
elif isinstance(concrete_func.structured_outputs, dict):
# Other models specify output order using the key order of structured_outputs
output_names = [reverse_lookup[out] for out in concrete_func.structured_outputs.keys()]
if old_out_names is not None:
model.output_names = old_out_names
with tf.device("/cpu:0"):
frozen_graph, initialized_tables = \
tf_loader.from_trackable(model, concrete_func, input_names, output_names, large_model)
model_proto, external_tensor_storage = _convert_common(
frozen_graph,
name=model.name,
continue_on_error=True,
target=target,
opset=opset,
custom_ops=custom_ops,
custom_op_handlers=custom_op_handlers,
optimizers=optimizers,
custom_rewriter=custom_rewriter,
extra_opset=extra_opset,
shape_override=shape_override,
input_names=input_names,
output_names=output_names,
inputs_as_nchw=inputs_as_nchw,
outputs_as_nchw=outputs_as_nchw,
large_model=large_model,
tensors_to_rename=tensors_to_rename,
initialized_tables=initialized_tables,
output_path=output_path)
return model_proto, external_tensor_storage
def from_function(function, input_signature=None, opset=None, custom_ops=None, custom_op_handlers=None,
custom_rewriter=None, inputs_as_nchw=None, outputs_as_nchw=None, extra_opset=None,
shape_override=None, target=None, large_model=False, output_path=None):
"""Returns a ONNX model_proto for a tf.function.
Args:
function: the tf.function we want to convert
input_signature: a tf.TensorSpec or a numpy array defining the shape/dtype of the input
opset: the opset to be used for the ONNX model, default is the latest
custom_ops: if a model contains ops not recognized by onnx runtime,
you can tag these ops with a custom op domain so that the
runtime can still open the model. Type is a dictionary `{op name: domain}`.
target: list of workarounds applied to help certain platforms
custom_op_handlers: dictionary of custom ops handlers
custom_rewriter: list of custom graph rewriters
extra_opset: list of extra opset's, for example the opset's used by custom ops
shape_override: dict with inputs that override the shapes given by tensorflow
inputs_as_nchw: transpose inputs in list from nhwc to nchw
outputs_as_nchw: transpose outputs in list from nhwc to nchw
large_model: use the ONNX external tensor storage format
output_path: save model to output_path
Returns:
An ONNX model_proto and an external_tensor_storage dict.
"""
if get_tf_version() < Version("2.0"):
raise NotImplementedError("from_function requires tf-2.0 or newer")
if input_signature is None:
raise ValueError("from_function requires input_signature")
concrete_func = function.get_concrete_function(*input_signature)
input_names = [input_tensor.name for input_tensor in concrete_func.inputs
if input_tensor.dtype != tf.dtypes.resource]
output_names = [output_tensor.name for output_tensor in concrete_func.outputs
if output_tensor.dtype != tf.dtypes.resource]
initialized_tables = None
tensors_to_rename = tensor_names_from_structed(concrete_func, input_names, output_names)
with tf.device("/cpu:0"):
frozen_graph = tf_loader.from_function(concrete_func, input_names, output_names, large_model=large_model)
model_proto, external_tensor_storage = _convert_common(
frozen_graph,
name=concrete_func.name,
continue_on_error=True,
target=target,
opset=opset,
custom_ops=custom_ops,
custom_op_handlers=custom_op_handlers,
custom_rewriter=custom_rewriter,
extra_opset=extra_opset,
shape_override=shape_override,
input_names=input_names,
output_names=output_names,
inputs_as_nchw=inputs_as_nchw,
outputs_as_nchw=outputs_as_nchw,
large_model=large_model,
tensors_to_rename=tensors_to_rename,
initialized_tables=initialized_tables,
output_path=output_path)
return model_proto, external_tensor_storage
def from_graph_def(graph_def, name=None, input_names=None, output_names=None, opset=None, custom_ops=None,
custom_op_handlers=None, custom_rewriter=None, inputs_as_nchw=None, outputs_as_nchw=None,
extra_opset=None, shape_override=None, target=None, large_model=False,
tensors_to_rename=None, output_path=None):
"""Returns a ONNX model_proto for a tensorflow graphdef.
Args:
graph_def: the graphdef we want to convert
input_names: list of input names
output_names: list of output names
name: A name for the graph
opset: the opset to be used for the ONNX model, default is the latest
custom_ops: if a model contains ops not recognized by onnx runtime,
you can tag these ops with a custom op domain so that the
runtime can still open the model. Type is a dictionary `{op name: domain}`.
target: list of workarounds applied to help certain platforms
custom_op_handlers: dictionary of custom ops handlers
custom_rewriter: list of custom graph rewriters
extra_opset: list of extra opset's, for example the opset's used by custom ops
shape_override: dict with inputs that override the shapes given by tensorflow
inputs_as_nchw: transpose inputs in list from nhwc to nchw
outputs_as_nchw: transpose outputs in list from nhwc to nchw
large_model: use the ONNX external tensor storage format
output_path: save model to output_path
Returns:
An ONNX model_proto and an external_tensor_storage dict.
"""
if not input_names:
raise ValueError("input_names needs to be provided")
if not output_names:
raise ValueError("output_names needs to be provided")
if not name:
name = "unknown"
initialized_tables = None
with tf.device("/cpu:0"):
with tf.Graph().as_default() as tf_graph:
with tf_loader.tf_session(graph=tf_graph) as sess:
tf.import_graph_def(graph_def, name='')
frozen_graph = tf_loader.freeze_session(sess, input_names=input_names, output_names=output_names)
input_names = tf_loader.inputs_without_resource(sess, input_names)
frozen_graph = tf_loader.tf_optimize(input_names, output_names, graph_def)
model_proto, external_tensor_storage = _convert_common(
frozen_graph,
name=name,
continue_on_error=True,
target=target,
opset=opset,
custom_ops=custom_ops,
custom_op_handlers=custom_op_handlers,
custom_rewriter=custom_rewriter,
extra_opset=extra_opset,
shape_override=shape_override,
input_names=input_names,
output_names=output_names,
inputs_as_nchw=inputs_as_nchw,
outputs_as_nchw=outputs_as_nchw,
large_model=large_model,
tensors_to_rename=tensors_to_rename,
initialized_tables=initialized_tables,
output_path=output_path)
return model_proto, external_tensor_storage
def from_tflite(tflite_path, input_names=None, output_names=None, opset=None, custom_ops=None, custom_op_handlers=None,
custom_rewriter=None, inputs_as_nchw=None, outputs_as_nchw=None, extra_opset=None, shape_override=None,
target=None, large_model=False, output_path=None):
"""Returns a ONNX model_proto for a tflite model file.
Args:
tflite_path: the tflite model file full path
input_names: list of input names
output_names: list of output names
opset: the opset to be used for the ONNX model, default is the latest
custom_ops: if a model contains ops not recognized by onnx runtime,
you can tag these ops with a custom op domain so that the
runtime can still open the model. Type is a dictionary `{op name: domain}`.
custom_op_handlers: dictionary of custom ops handlers
custom_rewriter: list of custom graph rewriters
inputs_as_nchw: transpose inputs in list from nhwc to nchw
outputs_as_nchw: transpose outputs in list from nhwc to nchw
extra_opset: list of extra opset's, for example the opset's used by custom ops
shape_override: dict with inputs that override the shapes given by tensorflow
target: list of workarounds applied to help certain platforms
large_model: use the ONNX external tensor storage format
output_path: save model to output_path
Returns:
An ONNX model_proto and an external_tensor_storage dict.
"""
if not tflite_path:
raise ValueError("tflite_path needs to be provided")
with tf.device("/cpu:0"):
model_proto, external_tensor_storage = _convert_common(
None,
tflite_path=tflite_path,
name=os.path.splitext(os.path.basename(tflite_path))[0],
continue_on_error=True,
target=target,
opset=opset,
custom_ops=custom_ops,
custom_op_handlers=custom_op_handlers,
custom_rewriter=custom_rewriter,
extra_opset=extra_opset,
shape_override=shape_override,
input_names=input_names,
output_names=output_names,
inputs_as_nchw=inputs_as_nchw,
outputs_as_nchw=outputs_as_nchw,
large_model=large_model,
tensors_to_rename=None,
initialized_tables=None,
output_path=output_path)
return model_proto, external_tensor_storage
if __name__ == "__main__":
main()
| 32,900 | 45.274262 | 119 | py |
tensorflow-onnx | tensorflow-onnx-main/tf2onnx/keras2onnx_api.py | # SPDX-License-Identifier: Apache-2.0
"""
tf2onnx.keras2onnx_api - Ease migration from keras2onnx to tf2onnx.
Use tf2onnx.keras2onnx_api.convert_keras instead of deprecated keras2onnx.convert_keras
"""
# pylint: disable=unused-argument,missing-docstring
from onnx import mapping, defs
import tensorflow as tf
import tf2onnx
from tf2onnx.constants import OPSET_TO_IR_VERSION
def to_tf_tensor_spec(onnx_type, name=None, unknown_dim=1):
shp = [unknown_dim if isinstance(n_, str) else n_ for n_ in onnx_type.shape]
return tf.TensorSpec(shp, mapping.TENSOR_TYPE_TO_NP_TYPE[onnx_type.to_onnx_type().tensor_type.elem_type],
name=name)
def _process_initial_types(initial_types, unknown_dim=1):
if initial_types is None:
return None
input_specs = []
c_ = 0
while c_ < len(initial_types):
name = None
type_idx = c_
if isinstance(initial_types[c_], str):
name = initial_types[c_]
type_idx = c_ + 1
ts_spec = to_tf_tensor_spec(initial_types[type_idx], name, unknown_dim)
input_specs.append(ts_spec)
c_ += 1 if name is None else 2
return input_specs
def get_maximum_opset_supported():
return min(max(OPSET_TO_IR_VERSION.keys()), defs.onnx_opset_version())
def convert_keras(model, name=None, doc_string='', target_opset=None, initial_types=None,
channel_first_inputs=None, debug_mode=False, custom_op_conversions=None):
"""
:param model: keras model
:param name: the converted onnx model internal name
:param doc_string: doc string
:param target_opset: the targeted onnx model opset
:param initial_types: the overridden input type for the target ONNX model.
:param channel_first_inputs: A list of channel first input
:param debug_mode: ignored
:param custom_op_conversions: ignored
:return an ONNX ModelProto
"""
if target_opset is None:
target_opset = get_maximum_opset_supported()
input_signature = _process_initial_types(initial_types, unknown_dim=None)
name = name or model.name
model, _ = tf2onnx.convert.from_keras(model, input_signature, opset=target_opset,
inputs_as_nchw=channel_first_inputs)
model.graph.name = name
model.graph.doc_string = doc_string
return model
| 2,350 | 35.169231 | 109 | py |
tensorflow-onnx | tensorflow-onnx-main/tf2onnx/tf_loader.py | # SPDX-License-Identifier: Apache-2.0
"""Methods to load tensorflow graph from graphdef, checkpoint or saved_model."""
import logging
import uuid
from packaging.version import Version
import tensorflow as tf
import numpy as np
from google.protobuf.message import DecodeError
from tensorflow.core.framework import tensor_pb2
from tensorflow.core.protobuf import saved_model_pb2
from tensorflow.python.ops import lookup_ops
from tensorflow.python.util import compat
from tf2onnx import utils
from tf2onnx.tf_utils import (get_tf_version, tflist_to_onnx, get_hash_table_info, replace_placeholders_with_tables,
HashTableInfo)
logger = logging.getLogger(__name__)
# pylint: disable=unused-argument,unused-import,no-value-for-parameter,unexpected-keyword-arg,ungrouped-imports
# pylint: disable=missing-function-docstring,import-outside-toplevel,useless-import-alias,missing-docstring
def is_tf2():
return tf.__version__.startswith("2.")
def _not_implemented_tf_placeholder(name):
"""Creates a placeholder function for missing Tensorflow imports"""
def not_implemented_tf_placeholder(*args, **kwargs):
raise NotImplementedError(
f'Tensorflow verison {tf.__version__} does not implement '
f'`{name}`, try converting your model with a different version.'
)
return not_implemented_tf_placeholder
try:
from tensorflow.python.framework.function_def_to_graph import function_def_to_graph
except ImportError:
function_def_to_graph = _not_implemented_tf_placeholder('function_def_to_graph')
try:
# pylint: disable=protected-access
from tensorflow.python.saved_model.load import _RestoredResource as TfRestoredResourceType
from tensorflow.python.ops.lookup_ops import StaticHashTable as TfStaticHashTableType
from tensorflow.python.training.tracking.base import Trackable as TfTrackableType
except ImportError:
TfRestoredResourceType = tuple() # isinstance(x, tuple()) is always false
TfStaticHashTableType = tuple()
TfTrackableType = tuple()
if is_tf2():
convert_variables_to_constants = tf.compat.v1.graph_util.convert_variables_to_constants
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
else:
from tensorflow.python.framework.graph_util import convert_variables_to_constants
convert_variables_to_constants_v2 = _not_implemented_tf_placeholder('convert_variables_to_constants_v2')
if is_tf2():
tf_reset_default_graph = tf.compat.v1.reset_default_graph
tf_global_variables = tf.compat.v1.global_variables
tf_session = tf.compat.v1.Session # pylint: disable=invalid-name
tf_graphdef = tf.compat.v1.GraphDef
tf_import_meta_graph = tf.compat.v1.train.import_meta_graph
tf_gfile = tf.io.gfile
tf_placeholder = tf.compat.v1.placeholder
tf_placeholder_with_default = tf.compat.v1.placeholder_with_default
elif Version(tf.__version__) >= Version("1.13"):
# 1.13 introduced the compat namespace
tf_reset_default_graph = tf.compat.v1.reset_default_graph
tf_global_variables = tf.compat.v1.global_variables
tf_session = tf.compat.v1.Session # pylint: disable=invalid-name
tf_graphdef = tf.compat.v1.GraphDef
tf_import_meta_graph = tf.compat.v1.train.import_meta_graph
tf_gfile = tf.gfile
tf_placeholder = tf.compat.v1.placeholder
tf_placeholder_with_default = tf.compat.v1.placeholder_with_default
else:
# older than 1.13
tf_reset_default_graph = tf.reset_default_graph
tf_global_variables = tf.global_variables
tf_session = tf.Session # pylint: disable=invalid-name
tf_graphdef = tf.GraphDef
tf_import_meta_graph = tf.train.import_meta_graph
tf_gfile = tf.gfile
tf_placeholder = tf.placeholder
tf_placeholder_with_default = tf.placeholder_with_default
def inputs_without_resource(sess, input_names):
try:
new_input_names = []
for n in input_names:
t = sess.graph.get_tensor_by_name(n)
if t.dtype != tf.dtypes.resource:
new_input_names.append(n)
input_names = new_input_names
except: # pylint: disable=bare-except
pass
return input_names
def convert_variables_to_constants_large_model(func):
# For large models we use internal tf methods as a hack
if tf.__version__.startswith("2.1.") or tf.__version__.startswith("2.0."):
from tensorflow.python.framework import convert_to_constants
orig_fn = convert_to_constants._construct_concrete_function # pylint: disable=protected-access
def fake_construct_fn(func, output_graph_def, converted_input_indices):
# Return graph_def without loading it to avoid crash. Will fix errors in graph_def later.
return output_graph_def
convert_to_constants._construct_concrete_function = fake_construct_fn # pylint: disable=protected-access
try:
frozen_graph_def = convert_to_constants.convert_variables_to_constants_v2(func, lower_control_flow=False)
finally:
convert_to_constants._construct_concrete_function = orig_fn # pylint: disable=protected-access
return frozen_graph_def
if tf.__version__.startswith("2.2."):
try:
from tensorflow.python.framework.convert_to_constants import \
_convert_variables_to_constants_v2_impl # pylint: disable=protected-access
except ImportError:
_not_implemented_tf_placeholder("_convert_variables_to_constants_v2_impl")()
frozen_graph_def, _ = \
_convert_variables_to_constants_v2_impl(func, lower_control_flow=False, aggressive_inlining=True)
return frozen_graph_def
try:
from tensorflow.python.framework.convert_to_constants import \
_FunctionConverterData, _replace_variables_by_constants # pylint: disable=protected-access
except ImportError:
_not_implemented_tf_placeholder("_replace_variables_by_constants")()
from tensorflow.python.framework import tensor_util, tensor_shape
make_tensor_proto_original = tensor_util.make_tensor_proto
# Hack to avoid 2GB check
def make_tensor_proto_wrapped(values, dtype=None, shape=None, verify_shape=False, allow_broadcast=False):
try:
return make_tensor_proto_original(values, dtype, shape, verify_shape, allow_broadcast)
except ValueError:
if dtype is None:
dtype = tf.dtypes.as_dtype(values.dtype).as_datatype_enum
tensor_proto = tensor_pb2.TensorProto(
dtype=dtype,
tensor_shape=tensor_shape.as_shape(values.shape).as_proto())
tensor_proto.tensor_content = values.tobytes()
return tensor_proto
tensor_util.make_tensor_proto = make_tensor_proto_wrapped
try:
function_converter = _FunctionConverterData
if Version(tf.__version__) >= Version("2.6.0"):
from tensorflow.python.eager import context
from tensorflow.python.framework.convert_to_constants import _FunctionConverterDataInEager, \
_FunctionConverterDataInGraph
if context.executing_eagerly():
function_converter = _FunctionConverterDataInEager
else:
function_converter = _FunctionConverterDataInGraph
else:
function_converter = _FunctionConverterData
converter_data = function_converter(func=func, lower_control_flow=False, aggressive_inlining=True)
frozen_graph_def, _ = _replace_variables_by_constants(converter_data=converter_data)
finally:
tensor_util.make_tensor_proto = make_tensor_proto_original
return frozen_graph_def
def fix_freezing_errors(graph_def):
assign_var_ops = []
for i in reversed(range(len(graph_def.node))):
if graph_def.node[i].op in ["AssignVariableOp", "AssignSubVariableOp"]:
assign_var_ops.append(graph_def.node.pop(i).name)
logger.warning("Removed %s %s", graph_def.node[i].op, assign_var_ops[-1])
names_to_remove = set(assign_var_ops)
for n in graph_def.node:
for i in reversed(range(len(n.input))):
if n.input[i].startswith("^") and n.input[i][1:] in names_to_remove:
n.input.pop(i)
return graph_def
def fix_freezing_errors_part2(graph_def):
# Sometimes tf freezing fails to convert ResourceGather ops in subgraphs
for f in graph_def.library.function:
for n in f.node_def:
if n.op == "ResourceGather":
# Convert to standard Gather op. Freezing will have replaced resource with constant.
# Needed because of: https://github.com/tensorflow/tensorflow/issues/51488
n.op = "Gather"
n.attr['Tparams'].type = n.attr['dtype'].type
del n.attr['dtype']
if 'batch_dims' in n.attr:
v = n.attr['batch_dims'].i
utils.make_sure(v == 0, "Unsupported batch_dims value of ResourceGather %d", v)
del n.attr['batch_dims']
return graph_def
def from_trackable(trackable, concrete_func, inputs, outputs, large_model):
err_large_model = "model exceeds maximum protobuf size of 2GB. Try setting large_model."
# Avoid errors due to bug in TF freezing
removed_resource_to_placeholder, placeholder_to_resource, graph_captures_copy, func_captures_copy = \
_remove_non_variable_resources_from_captures(concrete_func)
try:
frozen_graph = from_function(concrete_func, inputs, outputs, large_model)
except ValueError as e:
if any(msg in str(e) for msg in ["exceeds maximum protobuf size of 2GB", "string too long"]):
raise ValueError(err_large_model)
raise e
# We might be returning the concrete_func so let's put it back in working order
_restore_captured_resources(concrete_func, graph_captures_copy, func_captures_copy)
table_info = get_hash_table_info(frozen_graph)
placeholder_to_table_info = {}
_get_hash_table_info_from_trackable(trackable, table_info,
removed_resource_to_placeholder, placeholder_to_table_info)
initialized_tables = {}
for info in table_info:
if info.shared_name is not None:
h = lookup_ops.hash_table_v2(info.key_dtype, info.val_dtype, shared_name=info.shared_name)
n = info.shared_name
elif info.resource_input in placeholder_to_resource and info.resource_input not in placeholder_to_table_info:
# We found a lookup op with no corresponding HashTable op, but we can associate the placeholder input
# from the op with the resource handle from graph captures and make up a shared_name
h = placeholder_to_resource[info.resource_input]
n = str(uuid.uuid4()).encode()
info.shared_name = n
placeholder_to_table_info[info.resource_input] = info
else:
# Found a lookup op but the corresponding HashTable op has already been found and processed.
continue
try:
k, v = lookup_ops.lookup_table_export_v2(h, info.key_dtype, info.val_dtype)
initialized_tables[n] = (k.numpy(), v.numpy())
except Exception: # pylint: disable=broad-except
logger.warning("Could not initialize table with shared_name = %r", n)
for placeholder in removed_resource_to_placeholder.values():
if placeholder not in placeholder_to_table_info:
logger.error("Could not find table resource to replace placeholder %s", placeholder)
replace_placeholders_with_tables(frozen_graph, placeholder_to_table_info)
return frozen_graph, initialized_tables
def from_function(func, input_names, output_names, large_model=False):
if large_model:
return convert_variables_to_constants_large_model(func)
try:
if get_tf_version() < Version("2.2"):
frozen_func = convert_variables_to_constants_v2(func, lower_control_flow=False)
else:
frozen_func = convert_variables_to_constants_v2(func, lower_control_flow=False, aggressive_inlining=True)
except ValueError as e:
if "incompatible with expected resource" in str(e):
bad_graph_def = convert_variables_to_constants_large_model(func)
logger.warning("TF freezing failed. Attempting to fix freezing errors.")
graph_def = fix_freezing_errors(bad_graph_def)
else:
raise e
else:
graph_def = frozen_func.graph.as_graph_def(add_shapes=True)
graph_def = fix_freezing_errors_part2(graph_def)
# output_names = [i.name for i in frozen_func.outputs]
with tf.Graph().as_default() as tf_graph:
with tf_session(graph=tf_graph) as sess:
tf.import_graph_def(graph_def, name='')
input_names = inputs_without_resource(sess, input_names)
graph_def = tf_optimize(input_names, output_names, graph_def)
return graph_def
def freeze_session(sess, input_names=None, output_names=None, get_tables=False):
"""Freezes the state of a session into a pruned computation graph."""
output_node_names = [i.split(':')[0] for i in output_names]
keep_var_names = [i.split(':')[0] for i in input_names]
with sess.graph.as_default():
output_node_names = output_node_names or []
output_node_names += [v.op.name for v in tf_global_variables()]
output_node_names += keep_var_names
graph_def = sess.graph.as_graph_def(add_shapes=True)
for node in graph_def.node:
node.device = ""
graph_def = convert_variables_to_constants(sess, graph_def, output_node_names)
table_info = get_hash_table_info(graph_def)
if get_tables:
initialized_tables = {}
tf.tables_initializer().run(session=sess)
for info in table_info:
if info.shared_name is None:
continue
h = lookup_ops.hash_table_v2(info.key_dtype, info.val_dtype, shared_name=info.shared_name)
n = info.shared_name
try:
k, v = lookup_ops.lookup_table_export_v2(h, info.key_dtype, info.val_dtype)
k, v = sess.run([k, v])
initialized_tables[n] = (k, v)
except Exception: # pylint: disable=broad-except
logger.warning("Could not initialize table with shared_name = %r", n)
return graph_def, initialized_tables
return graph_def
def remove_redundant_inputs(frozen_graph, input_names):
"""Remove redundant inputs not in frozen graph."""
frozen_inputs = []
# get inputs in frozen graph
node_names = set(n.name for n in frozen_graph.node)
frozen_inputs = [inp for inp in input_names if utils.node_name(inp) in node_names]
deleted_inputs = list(set(input_names) - set(frozen_inputs))
if deleted_inputs:
logger.warning("inputs [%s] is not in frozen graph, delete them", ",".join(deleted_inputs))
return frozen_inputs
def from_graphdef(model_path, input_names, output_names):
"""Load tensorflow graph from graphdef."""
# make sure we start with clean default graph
tf_reset_default_graph()
with tf_session() as sess:
graph_def = tf_graphdef()
with tf_gfile.GFile(model_path, 'rb') as f:
try:
content = f.read()
except Exception as e:
raise OSError(
"Unable to load file '{}'.".format(model_path)) from e
try:
graph_def.ParseFromString(content)
except DecodeError:
content_as_bytes = compat.as_bytes(content)
saved_model = saved_model_pb2.SavedModel()
saved_model.ParseFromString(content_as_bytes)
graph_def = saved_model.meta_graphs[0].graph_def
except Exception as e:
raise RuntimeError(
"Unable to parse file '{}'.".format(model_path)) from e
tf.import_graph_def(graph_def, name='')
input_names = inputs_without_resource(sess, input_names)
frozen_graph = freeze_session(sess, input_names=input_names, output_names=output_names)
input_names = remove_redundant_inputs(frozen_graph, input_names)
tf_reset_default_graph()
with tf_session() as sess:
input_names = inputs_without_resource(sess, input_names)
frozen_graph = tf_optimize(input_names, output_names, frozen_graph)
tf_reset_default_graph()
return frozen_graph, input_names, output_names
def from_checkpoint(model_path, input_names, output_names):
"""Load tensorflow graph from checkpoint."""
# make sure we start with clean default graph
tf_reset_default_graph()
# model_path = checkpoint/checkpoint.meta
with tf.device("/cpu:0"):
with tf_session() as sess:
saver = tf_import_meta_graph(model_path, clear_devices=True)
# restore from model_path minus the ".meta"
saver.restore(sess, model_path[:-5])
input_names = inputs_without_resource(sess, input_names)
frozen_graph = freeze_session(sess, input_names=input_names, output_names=output_names)
input_names = remove_redundant_inputs(frozen_graph, input_names)
tf_reset_default_graph()
with tf_session() as sess:
frozen_graph = tf_optimize(input_names, output_names, frozen_graph)
tf_reset_default_graph()
return frozen_graph, input_names, output_names
def _from_saved_model_v1(sess, model_path, input_names, output_names, tag, signature_names, use_graph_names):
"""Load tensorflow graph from saved_model."""
wrn_no_tag = "'--tag' not specified for saved_model. Using --tag serve"
wrn_empty_tag = "'--tag' value is empty string. Using tags = []"
wrn_empty_sig = "'--signature_def' not provided. Using all signatures."
if tag is None:
tag = [tf.saved_model.tag_constants.SERVING]
logger.warning(wrn_no_tag)
if not signature_names:
logger.warning(wrn_empty_sig)
if tag == '':
tag = []
logger.warning(wrn_empty_tag)
if not isinstance(tag, list):
tag = [tag]
imported = tf.saved_model.loader.load(sess, tag, model_path)
signatures = []
for k in imported.signature_def.keys():
if k in signature_names or (not signature_names and not k.startswith("_")):
signatures.append(k)
try:
from tensorflow.contrib.saved_model.python.saved_model import signature_def_utils
# pylint: disable=unnecessary-lambda
get_signature_def = lambda meta_graph_def, k: \
signature_def_utils.get_signature_def_by_key(meta_graph_def, k)
except ImportError:
# TF1.12 changed the api
get_signature_def = lambda meta_graph_def, k: meta_graph_def.signature_def[k]
tensors_to_rename = {}
if input_names is None:
input_names = []
for k in signatures:
inputs_tensor_info = get_signature_def(imported, k).inputs
for structured_name, input_tensor in inputs_tensor_info.items():
if input_tensor.name not in input_names:
input_names.append(input_tensor.name)
if not use_graph_names:
tensors_to_rename[input_tensor.name] = structured_name
if output_names is None:
output_names = []
for k in signatures:
outputs_tensor_info = get_signature_def(imported, k).outputs
for structured_name, output_tensor in outputs_tensor_info.items():
if output_tensor.name not in output_names:
output_names.append(output_tensor.name)
if not use_graph_names:
tensors_to_rename[output_tensor.name] = structured_name
frozen_graph, initialized_tables = \
freeze_session(sess, input_names=input_names, output_names=output_names, get_tables=True)
return frozen_graph, input_names, output_names, initialized_tables, tensors_to_rename
def _get_hash_table_info_from_trackable(trackable, table_info,
removed_resource_to_placeholder, placeholder_to_table_info):
# pylint: disable=protected-access
stack = [trackable]
visited = set()
while stack:
r = stack.pop()
visited.add(id(r))
try:
for trackable_ref in r._checkpoint_dependencies:
if id(trackable_ref.ref) not in visited:
if isinstance(trackable_ref.ref, TfTrackableType):
stack.append(trackable_ref.ref)
except Exception: # pylint: disable=broad-except
continue
for t in r.__dict__.values() if hasattr(r, '__dict__') else []:
if isinstance(t, TfStaticHashTableType) and hasattr(t, '_shared_name'):
info = HashTableInfo(t._shared_name.encode(), t.key_dtype.as_datatype_enum,
t.value_dtype.as_datatype_enum)
table_info.append(info)
table_handle = id(t.resource_handle)
if table_handle in removed_resource_to_placeholder:
placeholder_to_table_info[removed_resource_to_placeholder[table_handle]] = info
if isinstance(r, TfRestoredResourceType) and hasattr(r, '_create_resource'):
try:
table_handle = id(r.resource_handle)
except Exception: # pylint: disable=broad-except
continue
initializer = r._create_resource.concrete_functions[0].function_def
new_table_info = get_hash_table_info(initializer.node_def)
table_info.extend(new_table_info)
if table_handle in removed_resource_to_placeholder and len(new_table_info) == 1:
placeholder_to_table_info[removed_resource_to_placeholder[table_handle]] = new_table_info[0]
def _remove_non_variable_resources_from_captures(concrete_func):
"""
Removes all non-variable resources (such as tables) from a function's captured inputs to prevent tf from
raising a 'cannot convert dtype resource to numpy' error while freezing the graph.
"""
# pylint: disable=protected-access
resource_id_to_placeholder = {}
placeholder_to_resource = {}
graph_captures_copy = None
func_captures_copy = None
if hasattr(concrete_func.graph, '_captures') and hasattr(concrete_func, '_captured_inputs'):
graph_captures_copy = concrete_func.graph._captures.copy()
func_captures_copy = concrete_func._captured_inputs.copy()
variable_handles = {id(v.handle) for v in concrete_func.graph.variables}
for k, v in list(concrete_func.graph._captures.items()):
val_tensor, name_tensor = v
if val_tensor.dtype == tf.resource and id(val_tensor) not in variable_handles:
resource_id_to_placeholder[id(val_tensor)] = name_tensor.name.split(':')[0]
placeholder_to_resource[name_tensor.name.split(':')[0]] = val_tensor
del concrete_func.graph._captures[k]
for i in reversed(range(len(concrete_func._captured_inputs))):
if concrete_func._captured_inputs[i] is val_tensor:
concrete_func._captured_inputs.pop(i)
elif val_tensor.dtype != tf.resource:
npval = val_tensor.numpy()
if not hasattr(npval, 'dtype'):
# Hack around a TF bug until PR is merged: https://github.com/tensorflow/tensorflow/pull/45610
arr = np.array(npval)
val_tensor.numpy = lambda arr=arr: arr
else:
logger.warning(
"Could not search for non-variable resources. Concrete function internal representation may have changed.")
return resource_id_to_placeholder, placeholder_to_resource, graph_captures_copy, func_captures_copy
def _restore_captured_resources(concrete_func, graph_captures_copy, func_captures_copy):
"""Undoes effect of _remove_non_variable_resources_from_captures on concrete_func"""
# pylint: disable=protected-access
if hasattr(concrete_func.graph, '_captures') and hasattr(concrete_func, '_captured_inputs'):
concrete_func.graph._captures = graph_captures_copy
concrete_func._captured_inputs = func_captures_copy
def _from_saved_model_v2(model_path, input_names, output_names, tag, signature_def,
concrete_function_index, large_model, use_graph_names):
"""Load tensorflow graph from saved_model."""
wrn_no_tag = "'--tag' not specified for saved_model. Using --tag serve"
wrn_empty_tag = "'--tag' value is empty string. Using tag =[[]]"
wrn_sig_1 = "'--signature_def' not specified, using first signature: %s"
err_many_sig = "Cannot load multiple signature defs in TF2.x: %s"
err_no_call = "Model doesn't contain usable concrete functions under __call__. Try --signature-def instead."
err_index = "Invalid concrete_function value: %i. Valid values are [0 to %i]"
err_no_sig = "No signatures found in model. Try --concrete_function instead."
err_sig_nomatch = "Specified signature not in model %s"
if tag is None:
tag = ['serve']
logger.warning(wrn_no_tag)
if tag == '':
tag = [[]]
logger.warning(wrn_empty_tag)
utils.make_sure(len(signature_def) < 2, err_many_sig, str(signature_def))
imported = tf.saved_model.load(model_path, tags=tag) # pylint: disable=no-value-for-parameter
all_sigs = imported.signatures.keys()
valid_sigs = [s for s in all_sigs if not s.startswith("_")]
logger.info("Signatures found in model: %s", "[" + ",".join(valid_sigs) + "].")
concrete_func = None
if concrete_function_index is not None:
utils.make_sure(hasattr(imported, "__call__"), err_no_call)
utils.make_sure(concrete_function_index < len(imported.__call__.concrete_functions),
err_index, concrete_function_index, len(imported.__call__.concrete_functions) - 1)
args, kwargs = imported.__call__.concrete_functions[concrete_function_index].structured_input_signature
concrete_func = imported.__call__.get_concrete_function(*args, **kwargs)
elif signature_def:
utils.make_sure(signature_def[0] in valid_sigs, err_sig_nomatch, signature_def[0])
concrete_func = imported.signatures[signature_def[0]]
else:
utils.make_sure(len(valid_sigs) > 0, err_no_sig)
logger.warning(wrn_sig_1, valid_sigs[0])
concrete_func = imported.signatures[valid_sigs[0]]
tensors_to_rename = {}
if input_names is None:
inputs = [tensor.name for tensor in concrete_func.inputs if tensor.dtype != tf.dtypes.resource]
graph_captures = concrete_func.graph._captures # pylint: disable=protected-access
captured_inputs = [t_name.name for _, t_name in graph_captures.values()]
inputs = [inp for inp in inputs if inp not in captured_inputs]
if concrete_func.structured_input_signature is not None and not use_graph_names:
flat_structured_inp = tf.nest.flatten(concrete_func.structured_input_signature)
structured_inputs = [t.name for t in flat_structured_inp if isinstance(t, tf.TensorSpec)]
tensors_to_rename.update(zip(inputs, structured_inputs))
else:
inputs = input_names
if output_names is None:
outputs = [tensor.name for tensor in concrete_func.outputs if tensor.dtype != tf.dtypes.resource]
if isinstance(concrete_func.structured_outputs, dict) and not use_graph_names:
# outputs are sorted, sort structured_outputs the same way
structured_outputs = sorted(concrete_func.structured_outputs.keys())
tensors_to_rename.update(zip(outputs, structured_outputs))
logger.info("Output names: %r", structured_outputs)
else:
logger.info("Output names: %r", outputs)
else:
outputs = output_names
frozen_graph, initialized_tables = from_trackable(imported, concrete_func, inputs, outputs, large_model)
return frozen_graph, inputs, outputs, concrete_func, imported, initialized_tables, tensors_to_rename
def from_saved_model(model_path, input_names, output_names, tag=None,
signatures=None, concrete_function=None, large_model=False,
return_concrete_func=False, return_initialized_tables=False,
return_tensors_to_rename=False, use_graph_names=False):
"""Load tensorflow graph from saved_model."""
if signatures is None:
signatures = []
tf_reset_default_graph()
with tf.device("/cpu:0"):
if is_tf2():
frozen_graph, input_names, output_names, concrete_func, imported, initialized_tables, tensors_to_rename = \
_from_saved_model_v2(model_path, input_names, output_names,
tag, signatures, concrete_function, large_model, use_graph_names)
result = [frozen_graph, input_names, output_names]
if return_concrete_func:
result += [concrete_func, imported]
if return_initialized_tables:
result += [initialized_tables]
if return_tensors_to_rename:
result += [tensors_to_rename]
else:
with tf_session() as sess:
frozen_graph, input_names, output_names, initialized_tables, tensors_to_rename = \
_from_saved_model_v1(sess, model_path, input_names, output_names, tag, signatures, use_graph_names)
result = [frozen_graph, input_names, output_names]
if return_initialized_tables:
result += [initialized_tables]
if return_tensors_to_rename:
result += [tensors_to_rename]
tf_reset_default_graph()
return result
def from_keras(model_path, input_names, output_names):
"""Load keras model - experimental for now."""
from tensorflow.python import keras as _keras
from tensorflow.python.eager import context
from tensorflow.python.keras.saving import saving_utils as _saving_utils
# Handles Keras when Eager mode is enabled.
custom_objects = None
with tf.device("/cpu:0"):
if context.executing_eagerly():
_keras.backend.clear_session()
_keras.backend.set_learning_phase(False)
keras_model = _keras.models.load_model(model_path, custom_objects)
function = _saving_utils.trace_model_call(keras_model)
concrete_func = function.get_concrete_function()
# allow to pass inputs and outputs from caller if we don't want all of them
input_names = [input_tensor.name for input_tensor in concrete_func.inputs
if input_tensor.dtype != tf.dtypes.resource]
output_names = [output_tensor.name for output_tensor in concrete_func.outputs
if output_tensor.dtype != tf.dtypes.resource]
frozen_graph = from_function(concrete_func, input_names, output_names)
else:
# Handles Keras when Eager mode is disabled.
_keras.backend.clear_session()
_keras.backend.set_learning_phase(False)
keras_model = _keras.models.load_model(model_path, custom_objects)
# allow to pass inputs and outputs from caller if we don't want all of them
input_names = keras_model.inputs
output_names = keras_model.outputs
sess = _keras.backend.get_session()
input_names = inputs_without_resource(sess, input_names)
frozen_graph = freeze_session(sess, input_names=input_names, output_names=output_names)
tf_reset_default_graph()
with tf_session() as sess:
frozen_graph = tf_optimize(input_names, output_names, frozen_graph)
tf_reset_default_graph()
return frozen_graph, input_names, output_names
def tf_optimize_grappler(input_names, output_names, graph_def):
from tensorflow.core.protobuf import meta_graph_pb2 as meta_graph_pb2, config_pb2, rewriter_config_pb2
from tensorflow.python.grappler import tf_optimizer as tf_opt
config = config_pb2.ConfigProto()
rewrite_options = config.graph_options.rewrite_options
config.graph_options.infer_shapes = True
# TODO: if we turn on pruning, grappler removes some identities that the tf-1.x lstm rewriter
# depends on so for now don't turn this on, constfold is always enabled now.
rewrite_options.optimizers[:] = [
# 'pruning', 'constfold', 'arithmetic', 'dependency', 'function',
'constfold', 'function'
]
if is_tf2():
# add for tf2.x lstm optimization.
rewrite_options.optimizers.append('dependency')
if Version(tf.__version__) >= Version("2.5"):
# This flag disables folding QDQ nodes around constants in the network (eg: around conv/FC weights)
rewrite_options.experimental_disable_folding_quantization_emulation = True
meta_graph = tf.compat.v1.train.export_meta_graph(graph_def=graph_def)
fetch_collection = meta_graph_pb2.CollectionDef()
for t in input_names + output_names:
fetch_collection.node_list.value.append(t)
meta_graph.collection_def["train_op"].CopyFrom(fetch_collection)
graph_def = tf_opt.OptimizeGraph(config, meta_graph)
return graph_def
def tf_optimize(input_names, output_names, graph_def):
"""Extract inference subgraph and optimize graph."""
assert isinstance(input_names, list)
assert isinstance(output_names, list)
want_grappler = is_tf2() or Version(tf.__version__) >= Version("1.15")
if want_grappler:
graph_def = tf_optimize_grappler(input_names, output_names, graph_def)
else:
# the older transform path
from tensorflow.tools.graph_transforms import TransformGraph # pylint: disable=redefined-outer-name
transforms = [
"fold_constants(ignore_errors=true)",
"remove_attribute(attribute_name=_class)", # remove node colocation attributes
"fold_batch_norms",
"fold_old_batch_norms",
]
graph_def = TransformGraph(graph_def, input_names, output_names, transforms)
return graph_def
def tf_reload_graph(tf_graph):
"""Invoke tensorflow cpp shape inference by reloading graph_def."""
# invoke c api if tf version is below 1.8
if get_tf_version() < Version("1.8"):
logger.debug(
"On TF < 1.8, graph is constructed by python API, "
"which doesn't invoke shape inference, please set "
"TF_C_API_GRAPH_CONSTRUCTION=1 to enable it"
)
graph_def = tf_graph.as_graph_def(add_shapes=True)
with tf.Graph().as_default() as inferred_graph:
tf.import_graph_def(graph_def, name="")
return inferred_graph
def is_function(g):
if is_tf2():
return 'tensorflow.python.framework.func_graph.FuncGraph' in str(type(g))
return False
_FUNCTIONS = {}
def resolve_functions(tf_graph):
def toposort(data):
while True:
ordered = set(item for item, dep in data.items() if not dep)
if not ordered:
break
yield ordered
data = {item: (dep - ordered) for item, dep in data.items() if item not in ordered}
_, _, _, _, _, functions = tflist_to_onnx(tf_graph, {})
data = {}
for k, fdef in tf_graph._functions.items(): # pylint: disable=protected-access
input_shapes = functions.get(k)
fdef = fdef.definition
if input_shapes and len(fdef.signature.input_arg) < len(input_shapes):
input_shapes = input_shapes[:len(fdef.signature.input_arg)]
try:
func = function_def_to_graph(fdef, input_shapes=input_shapes)
except: # pylint: disable=bare-except
# if there is a mismatch between caller and function use the functions shape
logger.warning("shape mismatch between caller and function: %s", k)
func = function_def_to_graph(fdef)
_FUNCTIONS[k] = func
_, _, _, _, _, tfunctions = tflist_to_onnx(func, {})
functions.update(tfunctions)
data[k] = set(tfunctions.keys())
result = []
for d in toposort(data):
result.extend(list(d))
return [_FUNCTIONS[k] for k in result]
def set_function(name, func):
_FUNCTIONS[name] = func
def find_function(name):
return _FUNCTIONS.get(name)
def clear_functions():
_FUNCTIONS.clear()
| 36,808 | 45.358942 | 119 | py |
tensorflow-onnx | tensorflow-onnx-main/tf2onnx/onnx_opset/math.py | # SPDX-License-Identifier: Apache-2.0
"""
math
"""
import logging
import numpy as np
from onnx import onnx_pb
from tf2onnx import constants, utils
from tf2onnx.handler import tf_op
from tf2onnx.onnx_opset import common
from tf2onnx.graph_builder import GraphBuilder
logger = logging.getLogger(__name__)
# pylint: disable=unused-argument,missing-docstring
@tf_op(["Add", "AddV2", "Div", "Mul", "Sub"])
class BroadcastOp(common.BroadcastOp):
pass
@tf_op(["RealDiv", "TruncateDiv"], onnx_op="Div")
class RealDiv(common.BroadcastOp):
pass
@tf_op(["LeakyRelu", "Softplus", "Softsign"])
class DirectOpSinceOpset1:
@classmethod
def version_1(cls, ctx, node, **kwargs):
pass
@tf_op(["Abs", "Ceil", "Elu", "Exp", "Floor", "Log", "Neg", "Relu", "Sigmoid", "Sqrt",
"Tanh", "Reciprocal"])
class DirectOp:
@classmethod
def version_1(cls, ctx, node, **kwargs):
pass
@classmethod
def version_6(cls, ctx, node, **kwargs):
if node.type == "Log":
# ORT doesn't implement Log on doubles
double_to_float = {onnx_pb.TensorProto.DOUBLE: onnx_pb.TensorProto.FLOAT}
dtypes = node.output_dtypes
if node.maybe_cast_input([[onnx_pb.TensorProto.FLOAT]], double_to_float):
cast_back_node = ctx.insert_new_node_on_output(
"Cast", node.output[0], name=utils.make_name(node.name + "_castback"),
to=dtypes[0])
ctx.set_dtype(cast_back_node.output[0], dtypes[0])
ctx.copy_shape(node.name, cast_back_node.output[0])
ctx.copy_dtype(node.input[0], node.output[0])
@tf_op(["Acos", "Asin", "Atan", "Cos", "Sin", "Tan"])
class TrigOpSinceOpset7:
@classmethod
def version_7(cls, ctx, node, **kwargs):
pass
@tf_op(["Acosh", "Asinh", "Atanh", "Cosh", "Sinh"])
class TrigOpSinceOpset9:
@classmethod
def version_9(cls, ctx, node, **kwargs):
pass
@tf_op(["Prelu"], onnx_op="PRelu")
class Prelu:
@classmethod
def version_1(cls, ctx, node, **kwargs):
pass
def make_min_or_max_op(ctx, op_type, inputs, outputs,
output_shapes=None, output_dtypes=None):
# support more dtype
supported_dtypes = [
onnx_pb.TensorProto.FLOAT,
onnx_pb.TensorProto.FLOAT16,
onnx_pb.TensorProto.DOUBLE
]
target_dtype = onnx_pb.TensorProto.FLOAT
need_cast = False
cast_inputs = []
for inp in inputs:
dtype = ctx.get_dtype(inp)
utils.make_sure(dtype is not None, "dtype of {} is None".format(inp))
if dtype not in supported_dtypes:
cast_inp = ctx.make_node("Cast", [inp], attr={"to": target_dtype})
cast_inputs.append(cast_inp.output[0])
need_cast = True
else:
cast_inputs.append(inp)
node = ctx.make_node(op_type, cast_inputs, shapes=output_shapes)
actual_outputs = node.output
if need_cast:
origin_dtype = ctx.get_dtype(inputs[0])
if output_dtypes is not None:
origin_dtype = output_dtypes[0]
ctx.set_dtype(node.output[0], target_dtype)
cast_name = utils.make_name(node.name)
cast_node = ctx.insert_new_node_on_output("Cast", node.output[0], name=cast_name, to=origin_dtype)
ctx.set_dtype(cast_node.output[0], origin_dtype)
ctx.copy_shape(node.output[0], cast_node.output[0])
actual_outputs = cast_node.output
final_node = ctx.make_node("Identity", actual_outputs, outputs=outputs,
shapes=output_shapes, dtypes=output_dtypes)
# tensorflow minimum/maximum does support broadcast, onnx < opset 8 does not.
# handle this by doing something like:
# y = min(x1, add(x2, sub(x1, x1))), where x1, x2 are the inputs and x2 is a scalar
# this will create a tensor of zeros of the shape of x1, adds x2 to it (which broadcasts) and use that for min.
shapeo = ctx.get_shape(node.output[0])
needs_broadcast_op = []
has_correct_shape = []
if ctx.opset < 8:
for i, input_name in enumerate(node.input):
if ctx.get_shape(input_name) != shapeo:
needs_broadcast_op.append(i)
else:
has_correct_shape.append(input_name)
if needs_broadcast_op:
has_correct_shape = has_correct_shape[0]
for i in needs_broadcast_op:
input_node = node.inputs[i]
# get a tensor with zeros (since there is no Fill op as of opset8)
sub_node = ctx.make_node("Sub", [has_correct_shape, has_correct_shape],
op_name_scope=input_node.name)
# use add as 'broadcast' op
add_node = ctx.make_node("Add", [input_node.output[0], sub_node.output[0]],
op_name_scope=input_node.name)
ctx.replace_input(node, node.input[i], add_node.output[0], i)
return final_node
@tf_op("Minimum", onnx_op="Min")
@tf_op("Maximum", onnx_op="Max")
class MinMaxOp:
@classmethod
def version_1(cls, ctx, node, **kwargs):
shapes = node.output_shapes
dtypes = node.output_dtypes
ctx.remove_node(node.name)
make_min_or_max_op(ctx, node.type, node.input, node.output, shapes, dtypes)
@classmethod
def version_12(cls, ctx, node, **kwargs):
pass # support all numeric types and broadcasting
@tf_op("ClipByValue")
class ClipByValueOp:
# in tf-1.8 there was a ClipByValue op which in later versions was replaced by max(min(x, a), b)
# To support models generated with tf-1.8 rewrite the tf ClipByValue op to max(min(x, a), b)
@classmethod
def version_8(cls, ctx, node, **kwargs):
supported = [onnx_pb.TensorProto.FLOAT16, onnx_pb.TensorProto.FLOAT, onnx_pb.TensorProto.DOUBLE]
# fetch those upfront since they are not accessible once we remove 'node'
shapes = node.output_shapes
dtypes = node.output_dtypes
input_dtype = ctx.get_dtype(node.input[0])
name = node.name
min_node = node.input[1]
if ctx.get_dtype(min_node) not in supported:
# cast min if needed
min_node = ctx.insert_new_node_on_input(node, "Cast", min_node, to=onnx_pb.TensorProto.FLOAT).output[0]
max_node = node.input[2]
if ctx.get_dtype(max_node) not in supported:
# cast max if needed
max_node = ctx.insert_new_node_on_input(node, "Cast", max_node, to=onnx_pb.TensorProto.FLOAT).output[0]
ctx.remove_node(name)
new_node = ctx.make_node("Max", [node.input[0], min_node], outputs=[node.output[0]],
shapes=shapes, dtypes=dtypes)
if input_dtype not in supported:
# cast the data tensor if needed
ctx.insert_new_node_on_input(new_node, "Cast", new_node.input[0], to=onnx_pb.TensorProto.FLOAT)
new_node = ctx.insert_new_node_on_output("Min", new_node.output[0], name=utils.make_name(name))
new_node.input.append(max_node)
# copy shape and type
ctx.set_dtype(new_node.output[0], dtypes[0])
ctx.set_shape(new_node.output[0], shapes[0])
if dtypes[0] not in supported:
# cast output if needed
new_node = ctx.insert_new_node_on_output("Cast", new_node.output[0],
name=utils.make_name(name), to=dtypes[0])
# copy shape and type
ctx.set_dtype(new_node.output[0], dtypes[0])
ctx.set_shape(new_node.output[0], shapes[0])
@classmethod
def version_12(cls, ctx, node, **kwargs):
node.type = 'Clip' # clip supports all types now
@tf_op(["LogSoftmax", "Softmax"])
class Softmax:
@classmethod
def version_1(cls, ctx, node, **kwargs):
# T output = Softmax(T logits). The axis softmax would be performed on is always on -1.
# T output = Softmax(T input, @int axis). Default axis is 1.
axis = node.get_attr_value("axis")
if axis is None:
# by default use the last dim
axis = len(ctx.get_shape(node.input[0])) - 1
node.set_attr("axis", axis)
@classmethod
def version_11(cls, ctx, node, **kwargs):
cls.version_1(ctx, node, **kwargs)
@classmethod
def version_13(cls, ctx, node, **kwargs):
# Default axis is now -1.
pass
@tf_op("Square")
class Square:
@classmethod
def version_1(cls, ctx, node, **kwargs):
node.type = "Mul"
node.input.append(node.input[0])
@tf_op("Relu6")
class Relu6:
@classmethod
def version_1(cls, ctx, node, **kwargs):
# relu6 = min(max(features, 0), 6)
# relu6 = min(max(features, 0), 6)
node.type = "Clip"
node.set_attr("min", 0.0)
node.set_attr("max", 6.0)
@classmethod
def version_11(cls, ctx, node, **kwargs):
# add min and max as inputs
node.type = "Clip"
onnx_dtype = ctx.get_dtype(node.input[0])
np_dtype = utils.ONNX_TO_NUMPY_DTYPE[onnx_dtype]
clip_min = ctx.make_const(utils.make_name("{}_min".format(node.name)), np.array(0.0, dtype=np_dtype))
clip_max = ctx.make_const(utils.make_name("{}_max".format(node.name)), np.array(6.0, dtype=np_dtype))
node.input.append(clip_min.output[0])
node.input.append(clip_max.output[0])
@tf_op("Rsqrt")
class Rsqrt:
@classmethod
def version_1(cls, ctx, node, **kwargs):
node.type = "Sqrt"
op_name = utils.make_name(node.name)
reciprocal = ctx.insert_new_node_on_output("Reciprocal", node.output[0], name=op_name)
ctx.copy_shape(node.output[0], reciprocal.output[0])
@tf_op("SquaredDifference")
class SquaredDifference:
@classmethod
def version_1(cls, ctx, node, **kwargs):
node.type = "Sub"
op_name = utils.make_name(node.name)
node_out = node.output[0]
ctx.insert_new_node_on_output("Mul", node_out, inputs=[node_out, node_out], name=op_name)
@tf_op("Sign")
class Sign:
@classmethod
def version_1(cls, ctx, node, **kwargs):
"""Sign op."""
# T sign = Sign(T Input)
node_dtype = ctx.get_dtype(node.output[0])
utils.make_sure(node_dtype, "Dtype of {} is None".format(node.name))
if node_dtype in [onnx_pb.TensorProto.COMPLEX64, onnx_pb.TensorProto.COMPLEX128]:
raise ValueError("dtype " + str(node_dtype) + " is not supported in onnx for now")
zero_name = utils.make_name("{}_zero".format(node.name))
ctx.make_const(zero_name, np.array(0, dtype=np.float32))
if node_dtype not in [onnx_pb.TensorProto.FLOAT16, onnx_pb.TensorProto.FLOAT, onnx_pb.TensorProto.DOUBLE]:
cast_node_0 = ctx.make_node("Cast", [node.input[0]], {"to": onnx_pb.TensorProto.FLOAT})
greater_node = ctx.make_node("Greater", [cast_node_0.output[0], zero_name])
less_node = ctx.make_node("Less", [cast_node_0.output[0], zero_name])
else:
greater_node = ctx.make_node("Greater", [node.input[0], zero_name])
less_node = ctx.make_node("Less", [node.input[0], zero_name])
cast_node_1 = ctx.make_node("Cast", [greater_node.output[0]], {"to": node_dtype})
cast_node_2 = ctx.make_node("Cast", [less_node.output[0]], {"to": node_dtype})
shapes = node.output_shapes
dtypes = node.output_dtypes
ctx.remove_node(node.name)
ctx.make_node("Sub", [cast_node_1.output[0], cast_node_2.output[0]], outputs=[node.output[0]],
shapes=shapes, dtypes=dtypes)
@classmethod
def version_9(cls, ctx, node, **kwargs):
node_dtype = ctx.get_dtype(node.output[0])
utils.make_sure(node_dtype, "dtype of {} is None".format(node.name))
if node_dtype in [onnx_pb.TensorProto.BOOL, onnx_pb.TensorProto.COMPLEX64, onnx_pb.TensorProto.COMPLEX128]:
raise ValueError("dtype " + str(node_dtype) + " is not supported in onnx for now")
@tf_op("Pow")
class Pow:
@classmethod
def version_1(cls, ctx, node, **kwargs):
if ctx.is_target(constants.TARGET_CAFFE2):
# workaround a bug in caffe2 pre Feb2018, pow(a, b) becomes np.exp(np.log(a) * b)
node.type = "Log"
b = node.input[1]
ctx.remove_input(node, node.input[1], 1)
op_name = utils.make_name(node.name)
mul_op = ctx.insert_new_node_on_output("Mul", node.output[0], name=op_name)
mul_op.input.append(b)
op_name = utils.make_name(node.name)
exp_op = ctx.insert_new_node_on_output("Exp", mul_op.output[0], name=op_name)
ctx.copy_shape(node.output[0], exp_op.output[0])
BroadcastOp.version_1(ctx, mul_op, **kwargs)
@classmethod
def version_7(cls, ctx, node, **kwargs):
pass
@tf_op("DivNoNan")
class DivNoNan:
@classmethod
def version_9(cls, ctx, node, **kwargs):
node.type = "Div"
np_dtype = utils.map_onnx_to_numpy_type(ctx.get_dtype(node.input[1]))
zero_const = ctx.make_const(utils.make_name("const_zero"), np.array(0, np_dtype)).output[0]
is_zero = ctx.make_node("Equal", [node.input[1], zero_const]).output[0]
where_node = ctx.make_node("Where", [is_zero, zero_const, node.output[0]])
ctx.insert_node_on_output(where_node, node.output[0])
@tf_op("LRN")
class LRN:
@classmethod
def version_1(cls, ctx, node, **kwargs):
# ONNX: Each input value is divided by (bias+(alpha/size)*sum(xi^2 for every xi in the local region))^beta
# TF: sqr_sum[a, b, c, d] = sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2)
# output = input / (bias + alpha * sqr_sum) ** beta
# by default, depth_radius is 5 in tensorflow
size = node.get_attr_value("depth_radius", 5) * 2 + 1
node.set_attr("size", size)
node.set_attr("alpha", size * node.get_attr("alpha").f)
shapes = node.output_shapes[0]
dtypes = node.output_dtypes[0]
ctx.insert_new_node_on_input(node, "Transpose", node.input[0], perm=constants.NHWC_TO_NCHW)
ctx.update_node_shape_dtype(node, override=True)
op_name = utils.make_name(node.name)
ctx.insert_new_node_on_output("Transpose", node.output[0], perm=constants.NCHW_TO_NHWC,
name=op_name, shapes=shapes, dtypes=dtypes)
@tf_op(["MatMul", "BatchMatMul", "BatchMatMulV2", "BatchMatMulV3"])
class MatMul:
@classmethod
def version_1(cls, ctx, node, **kwargs):
# tensorflow allows transpose and conjugated. If found, insert the required transpose.
# We could use Gemm as well but tensorflow does not pass bias in matmul.
if node.type != "MatMulInteger": node.type = "MatMul"
attrs = ["transpose_a", "transpose_b", "adjoint_a", "adjoint_b", "adj_x", "adj_y"]
attrs_val = [node.get_attr(attr) for attr in attrs]
attrs_val = [0 if val is None else val.i for val in attrs_val]
dtype = ctx.get_dtype(node.output[0])
if any(attrs_val[2:]):
# conjugation operation on complex data not supported in onnx for now
# so if it's complex than raise exception
if dtype not in [onnx_pb.TensorProto.FLOAT, onnx_pb.TensorProto.FLOAT16, onnx_pb.TensorProto.DOUBLE]:
raise ValueError("dtype " + dtype + " is not supported in onnx matmul for now")
transpose_a = (attrs_val[0] + attrs_val[2] + attrs_val[4]) % 2
transpose_b = (attrs_val[1] + attrs_val[3] + attrs_val[5]) % 2
if transpose_a != 0:
shape = ctx.get_shape(node.input[0])
if shape:
perm = list(range(0, len(shape)))
tmp = perm[-1]
perm[-1] = perm[-2]
perm[-2] = tmp
ctx.insert_new_node_on_input(node, "Transpose", node.input[0], input_index=0, perm=perm)
if transpose_b != 0:
shape = ctx.get_shape(node.input[1])
if shape:
perm = list(range(0, len(shape)))
tmp = perm[-1]
perm[-1] = perm[-2]
perm[-2] = tmp
ctx.insert_new_node_on_input(node, "Transpose", node.input[1], input_index=1, perm=perm)
unsupported = ["a_is_sparse", "b_is_sparse"]
for i in unsupported:
val = node.get_attr(i)
if val is not None and val.i != 0:
raise ValueError(node.type + " attribute " + i + " is not supported")
@classmethod
def version_10(cls, ctx, node, **kwargs):
if (ctx.get_dtype(node.input[0]) in [onnx_pb.TensorProto.INT8, onnx_pb.TensorProto.UINT8] and
ctx.get_dtype(node.input[1]) in [onnx_pb.TensorProto.INT8, onnx_pb.TensorProto.UINT8] and
ctx.get_dtype(node.output[0]) == onnx_pb.TensorProto.INT32):
node.type = "MatMulInteger"
zpdata_a = np.zeros(1, dtype=utils.map_onnx_to_numpy_type(ctx.get_dtype(node.input[0])))
zero_point_node_a = ctx.make_const(utils.make_name("zero_point_a"), zpdata_a)
zpdata_b = np.zeros(1, dtype=utils.map_onnx_to_numpy_type(ctx.get_dtype(node.input[1])))
zero_point_node_b = ctx.make_const(utils.make_name("zero_point_b"), zpdata_b)
ctx.replace_inputs(node, [node.input[0], node.input[1],
zero_point_node_a.output[0], zero_point_node_b.output[0]])
cls.version_1(ctx, node, **kwargs)
@tf_op("Erf")
class Erf:
@classmethod
def version_1(cls, ctx, node, **kwargs):
"""Error function."""
# constant names
a1 = "erf_a1"
a2 = "erf_a2"
a3 = "erf_a3"
a4 = "erf_a4"
a5 = "erf_a5"
p = "erf_p"
one = "erf_one"
null = "erf_null"
n = node.name
output_name = node.output[0]
erf_a1_node = ctx.get_node_by_output("erf_a1")
if erf_a1_node is None:
# insert the constants for erf once
ctx.make_const(a1, np.array(0.254829592, dtype=np.float32))
ctx.make_const(a2, np.array(-0.284496736, dtype=np.float32))
ctx.make_const(a3, np.array(1.421413741, dtype=np.float32))
ctx.make_const(a4, np.array(-1.453152027, dtype=np.float32))
ctx.make_const(a5, np.array(1.061405429, dtype=np.float32))
ctx.make_const(p, np.array(0.3275911, dtype=np.float32))
ctx.make_const(one, np.array(1., dtype=np.float32))
ctx.make_const(null, np.array(0., dtype=np.float32))
x = node.input[0]
# erf(x):
# sign = 1 if x >= 0 else -1
# x = abs(x)
# # A&S formula 7.1.26
# t = 1.0 / (1.0 + p * x)
# y = 1.0 - (((((a5 * t + a4) * t) + a3) * t + a2) * t + a1) * t * math.exp(-x * x)
# return sign * y # erf(-x) = -erf(x)
x_node = ctx.make_node("Abs", [x], op_name_scope=node.name, name="x")
negx_node = ctx.make_node("Sub", [null, x], op_name_scope=node.name, name="negx")
is_positive_node = ctx.make_node("Greater", [x, null], op_name_scope=node.name, name="isPositive")
is_positive_value_node = ctx.make_node("Cast", is_positive_node.output, op_name_scope=node.name,
name="isPositiveValue", attr={"to": onnx_pb.TensorProto.FLOAT})
is_neg_node = ctx.make_node("Less", [x, null], op_name_scope=node.name, name="isNeg")
ig_neg_value_node = ctx.make_node("Cast", is_neg_node.output, op_name_scope=node.name, name="isNegValue",
attr={"to": onnx_pb.TensorProto.FLOAT})
sign0_node = ctx.make_node("Sub", [is_positive_value_node.output[0], ig_neg_value_node.output[0]],
op_name_scope=node.name, name="sign0")
sign_add_one_node = ctx.make_node("Add", [sign0_node.output[0], one], op_name_scope=node.name,
name="signAddOne")
non_zero_node = ctx.make_node("Abs", sign0_node.output, op_name_scope=node.name, name="nonZero")
sign_node = ctx.make_node("Sub", [sign_add_one_node.output[0], non_zero_node.output[0]],
op_name_scope=node.name, name="sign")
num_4_node = ctx.make_node("Mul", [x_node.output[0], p], op_name_scope=node.name, name="4")
num_5_node = ctx.make_node("Add", [num_4_node.output[0], one], op_name_scope=node.name, name="5")
t_node = ctx.make_node("Div", [one, num_5_node.output[0]], op_name_scope=node.name, name="t")
xsq_node = ctx.make_node("Mul", [x, negx_node.output[0]], op_name_scope=node.name, name="xsq")
num_6_node = ctx.make_node("Exp", xsq_node.output, op_name_scope=node.name, name="6")
num_7_node = ctx.make_node("Mul", [num_6_node.output[0], t_node.output[0]], op_name_scope=node.name, name="7")
num_8_node = ctx.make_node("Mul", [t_node.output[0], a5], op_name_scope=node.name, name="8")
num_9_node = ctx.make_node("Add", [num_8_node.output[0], a4], op_name_scope=node.name, name="9")
num_10_node = ctx.make_node("Mul", [num_9_node.output[0], t_node.output[0]], op_name_scope=node.name, name="10")
num_11_node = ctx.make_node("Add", [num_10_node.output[0], a3], op_name_scope=node.name, name="11")
num_12_node = ctx.make_node("Mul", [num_11_node.output[0], t_node.output[0]], op_name_scope=node.name,
name="12")
num_13_node = ctx.make_node("Add", [num_12_node.output[0], a2], op_name_scope=node.name, name="13")
num_14_node = ctx.make_node("Mul", [num_13_node.output[0], t_node.output[0]], op_name_scope=node.name,
name="14")
num_15_node = ctx.make_node("Add", [num_14_node.output[0], a1], op_name_scope=node.name, name="15")
num_16_node = ctx.make_node("Mul", [num_15_node.output[0], num_7_node.output[0]], op_name_scope=node.name,
name="16")
num_17_node = ctx.make_node("Sub", [one, num_16_node.output[0]], op_name_scope=node.name, name="17")
shapes = node.output_shapes
dtypes = node.output_dtypes
ctx.remove_node(node.name)
ctx.make_node("Mul", [num_17_node.output[0], sign_node.output[0]], outputs=[output_name], name=n,
shapes=shapes, dtypes=dtypes)
@classmethod
def version_9(cls, ctx, node, **kwargs):
pass
@tf_op("FloorDiv")
class FloorDiv:
@classmethod
def version_6(cls, ctx, node, **kwargs):
# T output = FloorDiv(T x, T y)
node.type = "Div"
dtype = ctx.get_dtype(node.input[0])
if dtype in [onnx_pb.TensorProto.FLOAT, onnx_pb.TensorProto.FLOAT16, onnx_pb.TensorProto.DOUBLE]:
new_node_name = utils.make_name("floor_div_res")
floor_res = ctx.insert_new_node_on_output(op_type="Floor", output_name=node.output[0],
name=new_node_name)
ctx.copy_dtype(node.output[0], floor_res.output[0])
ctx.copy_shape(node.output[0], floor_res.output[0])
@tf_op("FloorMod")
class FloorMod:
@classmethod
def version_7(cls, ctx, node, **kwargs):
# T output = FloorMod(T x, T y)
div = ctx.make_node(op_type="Div", inputs=node.input)
dtype = ctx.get_dtype(node.input[0])
if dtype in [onnx_pb.TensorProto.FLOAT, onnx_pb.TensorProto.FLOAT16, onnx_pb.TensorProto.DOUBLE]:
div = ctx.make_node(op_type="Floor", inputs=div.output)
mul = ctx.make_node(op_type="Mul", inputs=[div.output[0], node.input[1]])
# res node will take over shape&dtype&output connection info of original "node"
shapes = node.output_shapes
dtypes = node.output_dtypes
ctx.remove_node(node.name)
ctx.make_node(op_type="Sub", inputs=[node.input[0], mul.output[0]],
name=node.name, outputs=node.output, shapes=shapes, dtypes=dtypes)
@tf_op("Selu")
class Selu:
@classmethod
def version_1(cls, ctx, node, **kwargs):
pass
@tf_op("Cumsum", onnx_op="CumSum")
class CumSum:
@classmethod
def version_11(cls, ctx, node, **kwargs):
pass
@tf_op("Cumprod")
class CumProd:
@classmethod
def version_10(cls, ctx, node, **kwargs):
# opset 10 required for Slice to support starts/ends/axes/steps as inputs
axis_node = node.inputs[1]
is_axis_const = axis_node.is_const()
if is_axis_const: # we can compute axis value right now
axis = axis_node.get_tensor_value()
axis_node = ctx.make_const(utils.make_name("axis"), np.array([axis], dtype=np.int64))
else:
axis_node = ctx.make_node("Cast", inputs=[axis_node.output[0]], attr={"to": onnx_pb.TensorProto.INT64},
op_name_scope=node.name, outputs=[utils.make_name("axis")])
axis_node = GraphBuilder(ctx).make_unsqueeze({'data': axis_node.output[0], 'axes': [0]}, return_node=True)
axis = axis_node.output[0]
input_rank = len(ctx.get_shape(node.input[0]))
cond_true_node = ctx.make_const(utils.make_name("cond_in"), np.ones((), dtype=bool))
input_shape_node = ctx.make_node("Shape", inputs=[node.input[0]], op_name_scope=node.name,
outputs=[utils.make_name("input_shape")])
axis_length_node = ctx.make_node("Gather", inputs=[input_shape_node.output[0], node.input[1]],
op_name_scope=node.name, outputs=[utils.make_name("axis_length")])
one_node = ctx.make_const(utils.make_name("one"), np.array([1], "int64"))
axis_length_plus_one_node = ctx.make_node("Add", inputs=[axis_length_node.output[0], one_node.output[0]],
op_name_scope=node.name,
outputs=[utils.make_name("axis_length_plus_one")])
num_iter_node = ctx.make_node("Sub", inputs=[axis_length_node.output[0], one_node.output[0]],
op_name_scope=node.name, outputs=[utils.make_name("num_iter")])
if node.get_attr_value("exclusive"): # one iter less, crop the input, then pad the output
num_iter_node = ctx.make_node("Sub", inputs=[num_iter_node.output[0], one_node.output[0]],
op_name_scope=node.name, outputs=[utils.make_name("num_iter")])
zero_node = ctx.make_const(utils.make_name("zero"), np.array([0], "int64"))
if node.get_attr_value("reverse"):
pad_axis = [0, 1]
start_slice = one_node.output[0]
end_slice = axis_length_plus_one_node.output[0]
else:
minus_one_node = ctx.make_const(utils.make_name("minus_one"), np.array([-1], "int64"))
pad_axis = [1, 0]
start_slice = zero_node.output[0]
end_slice = minus_one_node.output[0]
pads_node = cls.get_pads_node(ctx, pad_axis, axis, input_rank, node.name)
slice_shape = [-1] * len(ctx.get_shape(node.input[0]))
inputs_node = ctx.make_node("Slice", inputs=[node.input[0], start_slice, end_slice, axis_node.output[0]],
op_name_scope=node.name, outputs=[utils.make_name("slice")],
shapes=[slice_shape], dtypes=[ctx.get_dtype(node.input[0])])
inputs = inputs_node.output[0]
else:
inputs = node.input[0]
loop_graph = cls.make_loop_graph(ctx, node, inputs, input_rank, axis)
loop_graph.parent_graph = ctx
loop_inputs = [num_iter_node.output[0], cond_true_node.output[0], inputs,
axis_length_plus_one_node.output[0], inputs]
loop_outputs = [utils.make_name("loop_inputs_out"), utils.make_name("loop_axis_length_plus_one_out"),
utils.make_name("loop_accumulator_out")]
if not is_axis_const: # axis is a tensor, we neeed to feed it to the loop graph
loop_inputs.append(axis)
loop_outputs.append(utils.make_name("loop_axis_out"))
loop_outputs_shapes = [loop_graph.get_shape(o) for o in loop_graph.outputs[1:]]
loop_outputs_dtypes = [loop_graph.get_dtype(o) for o in loop_graph.outputs[1:]]
loop_node = ctx.make_node("Loop", inputs=loop_inputs, branches={"body": loop_graph}, outputs=loop_outputs,
shapes=loop_outputs_shapes, dtypes=loop_outputs_dtypes, op_name_scope=node.name)
if node.get_attr_value("exclusive"): # pad the output
if ctx.get_dtype(loop_node.output[2]) != ctx.get_dtype(one_node.output[0]):
pad_const_node = ctx.make_node("Cast", inputs=[one_node.output[0]],
attr={"to": ctx.get_dtype(loop_node.output[2])},
op_name_scope=node.name, outputs=[utils.make_name("pad_const")])
else:
pad_const_node = one_node
output_node = ctx.make_node("Pad", op_name_scope=node.name, outputs=[utils.make_name("cumprod_out")],
inputs=[loop_node.output[2], pads_node.output[0], pad_const_node.output[0]])
output = output_node.output[0]
else:
output = loop_node.output[2]
output_node = ctx.make_node("Identity", inputs=[output], outputs=[utils.make_name("cumprod_out")],
shapes=[ctx.get_shape(node.input[0])], dtypes=[ctx.get_dtype(node.input[0])])
ctx.insert_node_on_output(output_node, node.output[0])
ctx.remove_node(node.name)
@classmethod
def make_loop_graph(cls, ctx, node, inputs_tensor, input_rank, axis):
inputs_tensor_shape = ctx.get_shape(inputs_tensor)
inputs_tensor_dtype = ctx.get_dtype(inputs_tensor)
graph = ctx.create_new_graph_with_same_config()
graph.add_graph_input(utils.make_name("iteration_num"), onnx_pb.TensorProto.INT64, [])
graph.add_graph_input(utils.make_name("condition_in"), onnx_pb.TensorProto.BOOL, [])
graph.add_graph_input(utils.make_name("inputs"), inputs_tensor_dtype, inputs_tensor_shape)
graph.add_graph_input(utils.make_name("axis_length_plus_one"), onnx_pb.TensorProto.INT64, [1])
graph.add_graph_input(utils.make_name("accumulator"), inputs_tensor_dtype, inputs_tensor_shape)
if not isinstance(axis, int): # axis is a tensor, we need to feed it to the loop graph
graph.add_graph_input(utils.make_name("axis"), onnx_pb.TensorProto.INT64, [1])
axis = graph.input_names[-1]
axis_node = graph.get_node_by_output(axis)
else:
axis_node = graph.make_const(utils.make_name("axis"), np.array([axis], "int64"))
# main loop graph
loop_name = node.name + "/loop"
iter_num = GraphBuilder(graph).make_unsqueeze({'data': graph.input_names[0], 'axes': [0]})
one_node = graph.make_const(utils.make_name("one"), np.array(1, "int64"))
zero_node = graph.make_const(utils.make_name("zero"), np.array([0], "int64"))
add_node = graph.make_node("Add", inputs=[iter_num, one_node.output[0]],
outputs=[utils.make_name("add")], op_name_scope=loop_name)
if node.get_attr_value("reverse"):
pad_axis = [zero_node.output[0], add_node.output[0]]
start_slice = add_node.output[0]
end_slice = graph.input_names[3]
else:
neg_node = graph.make_node("Neg", inputs=[add_node.output[0]],
outputs=[utils.make_name("neg")], op_name_scope=loop_name)
pad_axis = [add_node.output[0], zero_node.output[0]]
start_slice = zero_node.output[0]
end_slice = neg_node.output[0]
pads_node = cls.get_pads_node(graph, pad_axis, axis, input_rank, is_pad_axis_const=False, base_name=loop_name)
slice_node = graph.make_node("Slice", op_name_scope=loop_name, outputs=[utils.make_name("slice")],
inputs=[graph.input_names[2], start_slice, end_slice, axis_node.output[0]])
if graph.get_dtype(slice_node.output[0]) != graph.get_dtype(one_node.output[0]):
pad_const_node = graph.make_node("Cast", inputs=[one_node.output[0]],
attr={"to": graph.get_dtype(slice_node.output[0])},
op_name_scope=loop_name, outputs=[utils.make_name("pad_const")])
else:
pad_const_node = one_node
pad_node = graph.make_node("Pad", inputs=[slice_node.output[0], pads_node.output[0], pad_const_node.output[0]],
op_name_scope=loop_name, outputs=[utils.make_name("pad")])
mul_node = graph.make_node("Mul", inputs=[graph.input_names[4], pad_node.output[0]],
op_name_scope=loop_name, outputs=[utils.make_name("mul")],
shapes=[inputs_tensor_shape], dtypes=[inputs_tensor_dtype])
# manage loop outputs
output_cond_node = graph.make_node("Identity", inputs=[graph.input_names[1]], op_name_scope=loop_name,
outputs=[utils.make_name("condition_out")])
output_inp_node = graph.make_node("Identity", inputs=[graph.input_names[2]], op_name_scope=loop_name,
outputs=[utils.make_name("inputs_out")])
output_axis_length_plus_one_node = graph.make_node("Identity", inputs=[graph.input_names[3]],
op_name_scope=loop_name,
outputs=[utils.make_name("axis_length_plus_one_out")])
output_acc_node = graph.make_node("Identity", inputs=[mul_node.output[0]], op_name_scope=loop_name,
outputs=[utils.make_name("accumulator_out")])
graph.add_graph_output(output_cond_node.output[0]) # 1 condition output
graph.add_graph_output(output_inp_node.output[0]) # N loop carried dependencies outputs
graph.add_graph_output(output_axis_length_plus_one_node.output[0]) # N loop carried dependencies outputs
graph.add_graph_output(output_acc_node.output[0]) # N loop carried dependencies outputs
if not isinstance(axis, int): # axis is a tensor, we need to feed it to the loop graph
output_axis_node = graph.make_node("Identity", inputs=[axis], op_name_scope=loop_name,
outputs=[utils.make_name("axis_out")])
graph.add_graph_output(output_axis_node.output[0]) # N loop carried dependencies outputs
return graph
@classmethod
def get_pads_node(cls, graph, pad_axis, axis, rank, is_pad_axis_const=True, base_name=""):
if isinstance(axis, int): # axis, is a const, we directly compute padding values
pre_pad = np.zeros(axis, "int64")
post_pad = np.zeros(rank - axis - 1, "int64")
if is_pad_axis_const: # pylint: disable=R1705
pads = np.concatenate([pre_pad, pad_axis[0:1], post_pad,
pre_pad, pad_axis[1:2], post_pad])
pads_node = graph.make_const(utils.make_name("pads"), pads)
return pads_node
else:
pre_pad_node = graph.make_const(utils.make_name("pre_pad"), pre_pad)
post_pad_node = graph.make_const(utils.make_name("post_pad"), post_pad)
else: # axis is a tensor, we need to compute padding values at runtime
if is_pad_axis_const:
pad_axis = [graph.make_const(utils.make_name("pad"),
np.array([pad], "int64")).output[0] for pad in pad_axis]
rank_tensor = graph.make_const(utils.make_name("rank"), np.array([rank], "int64")).output[0]
zero_node = graph.make_const(utils.make_name("zero"), np.array([0], "int64"))
one_node = graph.make_const(utils.make_name("zero"), np.array([1], "int64"))
post_repeat_node = graph.make_node("Sub", inputs=[rank_tensor, axis],
outputs=[utils.make_name("post_repeat")], op_name_scope=base_name)
post_repeat_node = graph.make_node("Sub", inputs=[post_repeat_node.output[0], one_node.output[0]],
outputs=[utils.make_name("post_repeat")], op_name_scope=base_name)
pre_pad_node = graph.make_node("Tile", inputs=[zero_node.output[0], axis], op_name_scope=base_name,
attr={"axis": 0}, outputs=[utils.make_name("pre_pad")])
post_pad_node = graph.make_node("Tile", inputs=[zero_node.output[0], post_repeat_node.output[0]],
attr={"axis": 0}, outputs=[utils.make_name("post_pad")],
op_name_scope=base_name)
pads_node = graph.make_node("Concat", attr={"axis": 0}, outputs=[utils.make_name("pads")],
op_name_scope=base_name,
inputs=[pre_pad_node.output[0], pad_axis[0], post_pad_node.output[0],
pre_pad_node.output[0], pad_axis[1], post_pad_node.output[0]])
return pads_node
@tf_op("Round")
class Round:
@classmethod
def version_1(cls, ctx, node, **kwargs):
# Not exactly nearest even but close enough
np_dtype = utils.map_onnx_to_numpy_type(ctx.get_dtype(node.input[0]))
const_half = ctx.make_const(utils.make_name("const_half"), np.array(0.5, np_dtype)).output[0]
add_node = ctx.make_node("Add", [node.input[0], const_half], op_name_scope=node.name).output[0]
node.type = "Floor"
ctx.replace_inputs(node, [add_node])
@classmethod
def version_11(cls, ctx, node, **kwargs):
pass
@tf_op("Rint", onnx_op="Round")
class Rint:
@classmethod
def version_11(cls, ctx, node, **kwargs):
# Same with tf round, two different people just happened to write the function.
# https://github.com/tensorflow/tensorflow/issues/709
pass
@tf_op("MatrixDeterminant", onnx_op="Det")
class Det:
@classmethod
def version_11(cls, ctx, node, **kwargs):
pass
@tf_op(["LeftShift", "RightShift"])
class BitShift:
@classmethod
def version_11(cls, ctx, node, **kwargs):
dir_map = {"LeftShift": "LEFT", "RightShift": "RIGHT"}
direction = dir_map[node.type]
supported = [onnx_pb.TensorProto.UINT8, onnx_pb.TensorProto.UINT16,
onnx_pb.TensorProto.UINT32, onnx_pb.TensorProto.UINT64]
type_map = {onnx_pb.TensorProto.INT8: onnx_pb.TensorProto.UINT8,
onnx_pb.TensorProto.INT16: onnx_pb.TensorProto.UINT32,
onnx_pb.TensorProto.INT32: onnx_pb.TensorProto.UINT64}
shapes = node.output_shapes
dtypes = node.output_dtypes
ctx.remove_node(node.name)
node = ctx.make_node("BitShift", inputs=node.input, outputs=node.output, name=node.name,
shapes=shapes, dtypes=dtypes, domain=constants.ONNX_DOMAIN, attr={'direction': direction})
if node.maybe_cast_input([supported, supported], type_map):
cast_back_node = ctx.insert_new_node_on_output(
"Cast", node.output[0], name=utils.make_name(node.name) + "_castback",
to=dtypes[0])
ctx.set_dtype(cast_back_node.output[0], dtypes[0])
ctx.copy_shape(node.name, cast_back_node.output[0])
ctx.copy_dtype(node.input[0], node.output[0])
@tf_op("BitwiseAnd")
@tf_op("BitwiseOr")
@tf_op("BitwiseXor")
@tf_op("Invert", onnx_op="BitwiseNot")
class BitwiseOps:
@classmethod
def version_18(cls, ctx, node, **kwargs):
pass
@tf_op("SquaredDistance", onnx_op="MeanSquaredDistance")
class SquaredDistance:
@classmethod
def version_12(cls, ctx, node, **kwargs):
node.attr["reduction"] = "none"
@tf_op("Einsum")
class Einsum:
@classmethod
def version_12(cls, ctx, node, **kwargs):
del node.attr["N"]
node.attr["equation"].s = node.attr["equation"].s.lower()
def should_replace_with_matmul():
# True is 2nd inp is const and eqn is ...ik,kj->...ij (possibly transpose 2nd inp)
# When the 2nd input is const, ort pre-packs the Matmul but not Einsum so this is faster
eqn = node.get_attr_value("equation").decode()
parts = eqn.split('->')
lhs = parts[0]
terms = lhs.split(',')
if len(parts) >= 2:
rhs = parts[1]
else:
rhs = sorted(terms)
if len(terms) != 2:
return False, None
t1, t2 = terms
# No repeat vars and all terms have >= 2 vars
if any(len(set(t)) < len(t) or len(t) < 2 for t in [t1, t2, rhs]):
return False, None
if len(t2) != 2:
return False, None
i = rhs[-2]
j = rhs[-1]
if t2[0] == j:
k = t2[1]
transpose_t2 = True
elif t2[1] == j:
k = t2[0]
transpose_t2 = False
else:
return False, None
return t1.endswith(i + k) and t1[:-2] == rhs[:-2], transpose_t2
should_replace, transpose_t2 = should_replace_with_matmul()
if should_replace:
if transpose_t2:
inp_trans = ctx.make_node("Transpose", [node.input[1]], attr={'perm': [1, 0]}).output[0]
ctx.replace_inputs(node, [node.input[0], inp_trans])
node.type = "MatMul"
del node.attr["equation"]
@tf_op("IsFinite")
class IsFinite:
@classmethod
def version_10(cls, ctx, node, **kwargs):
# map to onnx as:
# not (isinf(x) or isnan(x))
shapes = node.output_shapes
dtypes = [onnx_pb.TensorProto.BOOL] * len(node.output_dtypes)
outputs = node.output
ctx.remove_node(node.name)
inf_node = ctx.make_node("IsInf", inputs=node.input, name=utils.make_name(node.name),
shapes=shapes, dtypes=dtypes)
nan_node = ctx.make_node("IsNaN", inputs=node.input, name=utils.make_name(node.name),
shapes=shapes, dtypes=dtypes)
or_node = ctx.make_node("Or", inputs=[inf_node.output[0], nan_node.output[0]], name=utils.make_name(node.name),
shapes=shapes, dtypes=dtypes)
_ = ctx.make_node("Not", inputs=or_node.output, name=node.name, outputs=outputs,
shapes=shapes, dtypes=dtypes)
@tf_op("Atan2")
class Atan2Op:
# support more dtype
@classmethod
def version_9(cls, ctx, node, **kwargs):
"""
Obtained with a linear regression.
::
def atan2(y, x):
sx = numpy.sign(x)
sy = numpy.sign(y)
pi_part = (sy + sx * (sy ** 2 - 1)) * (sx - 1) * (-numpy.pi/2)
atan_part = numpy.arctan(y / (x + (1 - sx ** 2))) * sx ** 2
return atan_part + pi_part
"""
supported_dtypes = [
onnx_pb.TensorProto.FLOAT,
onnx_pb.TensorProto.FLOAT16,
onnx_pb.TensorProto.DOUBLE
]
onnx_dtype = ctx.get_dtype(node.input[0])
utils.make_sure(onnx_dtype in supported_dtypes, "Unsupported input type.")
shape = ctx.get_shape(node.input[0])
np_dtype = utils.map_onnx_to_numpy_type(onnx_dtype)
# sign part
sign_x_node = ctx.make_node(
"Sign", inputs=node.input[1:],
name=utils.make_name(node.name + 'signx'))
sign_y_node = ctx.make_node(
"Sign", inputs=node.input[:1],
name=utils.make_name(node.name + 'signy'))
sx_node = ctx.make_node(
"Cast", sign_x_node.output[:1], attr={"to": onnx_dtype},
name=utils.make_name(node.name + 'csignx'))
sy_node = ctx.make_node(
"Cast", sign_y_node.output[:1], attr={"to": onnx_dtype},
name=utils.make_name(node.name + 'csigny'))
# cst
one_node = ctx.make_const(
utils.make_name("{}_one".format(node.name)),
np.array([1], dtype=np_dtype))
pib2_node = ctx.make_const(
utils.make_name("{}_pi".format(node.name)),
np.array(- np.pi / 2, dtype=np_dtype))
# pi_part = (sy + sx * (sy ** 2 - 1)) * (sx - 1) * (-numpy.pi/2)
sxm1_node = ctx.make_node(
"Sub", [sx_node.output[0], one_node.output[0]],
name=utils.make_name(node.name + 'sxm1'))
sy2_node = ctx.make_node(
"Mul", [sy_node.output[0], sy_node.output[0]],
name=utils.make_name(node.name + 'sy2'))
sy2m1_node = ctx.make_node(
"Sub", [sy2_node.output[0], one_node.output[0]],
name=utils.make_name(node.name + 'sy2m1'))
sxsy2m1_node = ctx.make_node(
"Mul", [sx_node.output[0], sy2m1_node.output[0]],
name=utils.make_name(node.name + 'sxsy2m1'))
sysxsy2m1_node = ctx.make_node(
"Add", [sy_node.output[0], sxsy2m1_node.output[0]],
name=utils.make_name(node.name + 'sysxsy2m1'))
m1_node = ctx.make_node(
"Mul", [sysxsy2m1_node.output[0], sxm1_node.output[0]],
name=utils.make_name(node.name + 'm1'))
pi_part = ctx.make_node(
"Mul", [m1_node.output[0], pib2_node.output[0]],
name=utils.make_name(node.name + 'pip'))
# atan
sx2_node = ctx.make_node(
"Mul", [sx_node.output[0], sx_node.output[0]],
name=utils.make_name(node.name + 'sx2'))
sx2m1_node = ctx.make_node(
"Sub", [sx2_node.output[0], one_node.output[0]],
name=utils.make_name(node.name + 'sx2m1'))
xsx2m1_node = ctx.make_node(
"Add", [node.input[1], sx2m1_node.output[0]],
name=utils.make_name(node.name + 'xsx2m1'))
div_node = ctx.make_node(
"Div", inputs=[node.input[0], xsx2m1_node.output[0]],
name=utils.make_name(node.name + 'div'))
atan0_node = ctx.make_node(
"Atan", inputs=[div_node.output[0]],
name=utils.make_name(node.name + 'atan0'))
atan_node = ctx.make_node(
"Mul", inputs=[sx2_node.output[0], atan0_node.output[0]],
name=utils.make_name(node.name + 'atan'))
# final
ctx.remove_node(node.name)
last_node = ctx.make_node(
"Add", inputs=[atan_node.output[0], pi_part.output[0]],
op_name_scope=node.name + 'all',
shapes=[shape], dtypes=[onnx_dtype])
ctx.replace_all_inputs(node.output[0], last_node.output[0]) # ops=ctx.get_nodes()
@tf_op("InvertPermutation")
class InvertPermutationOp:
@classmethod
def version_11(cls, ctx, node, **kwargs):
supported_dtypes = [onnx_pb.TensorProto.INT32, onnx_pb.TensorProto.INT64]
onnx_dtype = ctx.get_dtype(node.input[0])
utils.make_sure(onnx_dtype in supported_dtypes, "InvertPermutation only applies on INT32, INT64.")
shape = ctx.get_shape(node.input[0])
shape_node = ctx.make_node(
"Shape", inputs=node.input, name=utils.make_name(node.name + '_shape'))
neg_node = ctx.make_node(
"Neg", inputs=node.input, name=utils.make_name(node.name + '_neg'))
topk_node = ctx.make_node(
"TopK", inputs=[neg_node.output[0], shape_node.output[0]],
name=utils.make_name(node.name + '_topk'), output_count=2)
ctx.remove_node(node.name)
last_node = ctx.make_node(
"Identity", inputs=topk_node.output[1:], name=utils.make_name(node.name + '_indices'),
shapes=[shape], dtypes=[onnx_dtype])
ctx.replace_all_inputs(node.output[0], last_node.output[0]) # ops=ctx.get_nodes()
@tf_op(["HardSwish"])
class HardSwish:
# Note: this doesn't really exist in tensorflow but it does in tflite
@classmethod
def version_14(cls, ctx, node, **kwargs):
pass
@tf_op(["L2Normalization"], onnx_op="LpNormalization")
class L2Normalization:
@classmethod
def version_1(cls, ctx, node, **kwargs):
axis = node.get_attr_value("axis")
if axis is None:
# by default use the last dim
axis = -1
node.set_attr("axis", axis)
node.set_attr("p", 2)
| 49,158 | 45.158685 | 120 | py |
tensorflow-onnx | tensorflow-onnx-main/tf2onnx/onnx_opset/nn.py | # SPDX-License-Identifier: Apache-2.0
"""
nn
"""
import logging
import numpy as np
from onnx import onnx_pb, helper
from onnx.onnx_pb import TensorProto
from tf2onnx import constants, utils
from tf2onnx.graph_builder import GraphBuilder
from tf2onnx.handler import tf_op
from tf2onnx.onnx_opset import common, controlflow, tensor
logger = logging.getLogger(__name__)
# pylint: disable=unused-argument,missing-docstring,unused-variable
def spatial_map(shape, perm):
new_shape = shape[:]
for i in perm:
new_shape[i] = shape[perm[i]]
return new_shape
def is_channels_last(node):
"""Returns whether node is channels last, so (N, ..., C)."""
return not node.data_format.startswith("NC")
def make_shape_channels_first(shape):
"""Makes a (N, ..., C) shape into (N, C, ...)."""
return shape[:1] + shape[-1:] + shape[1:-1]
def make_shape_channels_last(shape):
"""Makes a (N, C, ...) shape into (N, ..., C)."""
return shape[:1] + shape[1:-1] + shape[1:2]
def get_channels_first_permutation(spatial):
"""Returns a permutation to make a (N, ..., C) array into (N, C, ...)."""
return [0, spatial + 1] + list(range(1, spatial + 1))
def get_channels_last_permutation(spatial):
"""Returns a permutation to make a (N, C, ...) array into (N, ..., C)."""
return [0] + list(range(2, spatial + 2)) + [1]
def conv_convert_inputs(ctx, node, with_kernel=False, new_kernel_shape=None,
input_indices=None, output_indices=None, spatial=2,
quantization_axis=0):
"""Convert input and kernel from tensorflow to onnx. This may be required to
insert transpose ops for input, kernel, and output unless they are constants
and we can transpose the constant.
We transpose inputs if they are in NHWC. We always transpose the kernel from
HWNC to NCHW. Outputs are transposed if the format is NHWC.
Some convolutions like depthwise_conv2d require a reshape of the kernel.
Args:
ctx: The parent graph.
node: Node of the convolution op.
with_kernel: Transpose the kernel.
new_kernel_shape: Pass to reshape the kernel.
input_indices: Indices that define the inputs.
output_indices: Indices that define the outputs.
quantization_axis: Axis for the inserted QDQ nodes
"""
if input_indices is None:
input_indices = [0]
if output_indices is None:
output_indices = [0]
# Transpose inputs if needed.
if is_channels_last(node):
# Ge channels first permutation.
permutation = get_channels_first_permutation(spatial)
# Transpose input if needed, no need to record shapes on input
for idx in input_indices:
# If input is a constant, transpose that one if we are the only consumer.
input_node = node.inputs[idx]
input_name = node.input[idx]
if input_node.is_const() and len(ctx.find_output_consumers(input_name)) == 1:
# Transpose constant to make it channels first.
val = input_node.get_tensor_value(as_list=False)
val = np.transpose(val, permutation)
input_node.set_tensor_value(val)
else:
# Insert transpose op.
transpose = ctx.insert_new_node_on_input(node, "Transpose", input_name)
transpose.set_attr("perm", permutation)
transpose.skip_conversion = True
shape = ctx.get_shape(input_name)
if shape is not None:
new_shape = make_shape_channels_first(shape)
ctx.set_shape(transpose.output[0], new_shape)
# Transpose kernel if needed.
if with_kernel:
# Some ONNX convolution ops require to reshape the kernel (ie. depthwise_conv2d).
if new_kernel_shape:
kernel_name = node.input[1]
if ctx.opset < 5:
# Old reshape takes new shape as attribute.
reshape = ctx.insert_new_node_on_input(node, "Reshape", kernel_name)
reshape.set_attr("shape", new_kernel_shape)
reshape.skip_conversion = True
else:
# New reshape takes new shape as input[1].
shape_name = utils.make_name(node.name)
ctx.make_const(shape_name, np.array(new_kernel_shape, dtype=np.int64))
reshape = ctx.make_node("Reshape", [kernel_name, shape_name])
ctx.replace_input(node, kernel_name, reshape.output[0], 1)
reshape.skip_conversion = True
ctx.set_shape(reshape.output[0], new_kernel_shape)
# Get kernel (may have be changed to a reshape above).
kernel_node = node.inputs[1]
kernel_name = node.input[1]
# Transpose kernel from (..., C_in, C_out) to (C_out, C_in, ...)
permutation = [spatial + 1, spatial] + list(range(spatial))
# If kernel is a constant, transpose that one if we are the only consumer.
need_transpose = True
if (kernel_node.is_const() or kernel_node.op.op_type == "DequantizeLinear") \
and len(ctx.find_output_consumers(kernel_name)) == 1:
if kernel_node.op.op_type == 'DequantizeLinear':
# Assuming the model was trained in NHWC in TF,
# the weights would be in [fH, fW, C_in, C_out].
# orig_conv_weights -> Q -> DQ -> new_conv_weights -> conv
weights_node = kernel_node.inputs[0].inputs[0]
val = weights_node.get_tensor_value(as_list=False)
val = np.transpose(val, permutation)
weights_node.set_tensor_value(val)
need_transpose = False
# Change the quantization axis for Q and DQ node accordingly
kernel_node.set_attr("axis", quantization_axis) # DQ node
kernel_node.inputs[0].set_attr("axis", quantization_axis) # Q node
else:
val = kernel_node.get_tensor_value(as_list=False)
val = np.transpose(val, permutation)
kernel_node.set_tensor_value(val)
need_transpose = False
if need_transpose:
transpose = ctx.insert_new_node_on_input(node, "Transpose", kernel_name)
transpose.set_attr("perm", permutation)
transpose.skip_conversion = True
new_shape = spatial_map(ctx.get_shape(kernel_name), permutation)
ctx.set_shape(transpose.output[0], new_shape)
# Transpose outputs back if needed.
if is_channels_last(node):
for idx in output_indices:
# Make output channels last again by transposing.
output_name = node.output[idx]
output_shape = ctx.get_shape(node.output[idx])
permutation = get_channels_last_permutation(spatial)
op_name = utils.make_name(node.name)
transpose = ctx.insert_new_node_on_output("Transpose", output_name, name=op_name)
transpose.set_attr("perm", permutation)
transpose.skip_conversion = True
# Set tensorflow channels last shape as the transpose node shape.
ctx.set_shape(transpose.output[0], output_shape)
# Make the current ONNX convolution output shape channels first.
ctx.set_shape(output_name, make_shape_channels_first(output_shape))
# NOTE: Not strictly correct as it can also be NCW or NCDHW for example.
# NOTE: Generally speaking it's channels first.
node.data_format = "NCHW"
def add_padding(ctx, node, kernel_shape, strides, dilations=None, spatial=2):
padding = node.get_attr("padding")
if not padding:
return
if dilations is None:
dilations = [1] * spatial
padding = padding.s.decode("utf-8")
if padding == "SAME":
# Initialize with all zeros.
# Paddings are in (x_begin, y_begin, ..., x_end, y_end, ...) order.
pads = [0] * (spatial * 2)
# Get shapes and check whether valid.
input_shape = ctx.get_shape(node.input[0])
output_shape = ctx.get_shape(node.output[0])
if len(input_shape) != spatial + 2:
raise ValueError(
"node {} output needs to be rank {}, is {}".format(
node.name, spatial + 2, len(input_shape)
)
)
if len(output_shape) != spatial + 2:
raise ValueError(
"node {} output needs to be rank {}, is {}".format(
node.name, spatial + 2, len(output_shape)
)
)
# Transpose to channels first if not so.
if is_channels_last(node):
input_shape = make_shape_channels_first(input_shape)
output_shape = make_shape_channels_first(output_shape)
# Check for unknown input/output dimensions. Fall back to auto padding if so.
if any(input_shape[i + 2] == -1 or output_shape[i + 2] == -1 for i in range(spatial)):
logger.debug(
"node %s has unknown dim for pads calculation, fallback to auto_pad: "
"input_shape=%s, output_shape=%s",
node.name,
input_shape,
output_shape,
)
node.set_attr("auto_pad", "SAME_UPPER")
return
# Calculate paddings.
for i in range(spatial):
pad = (
(output_shape[i + 2] - 1) * strides[i]
+ dilations[i] * (kernel_shape[i] - 1) + 1
- input_shape[i + 2]
)
pad = max(pad, 0)
pads[i] = pad // 2
pads[i + spatial] = pad - pad // 2
node.set_attr("pads", pads)
elif padding == "VALID":
pass
elif padding == "EXPLICIT":
pads = node.get_attr_value("explicit_paddings")
start_pads = []
end_pads = []
d = 1 if is_channels_last(node) else 2
for i in range(spatial):
start_pads.append(pads[(d + i) * 2])
end_pads.append(pads[(d + i) * 2 + 1])
node.set_attr("pads", start_pads + end_pads)
else:
raise ValueError("invalid padding value: {}".format(padding))
def parse_dims_attr(node, dims, spatial):
if is_channels_last(node):
# We have (N, ..., C) or (...).
if len(dims) != spatial:
dims = dims[1:-1]
else:
# We have (N, C, ...) or (...).
if len(dims) != spatial:
dims = dims[2:]
return dims
def conv_dims_attr(node, name, new_name=None, spatial=2):
# Fetch attribute.
if new_name is None:
new_name = name
dims = node.get_attr(name)
if not dims:
return None
# Get spatial part.
dims = dims.ints
dims = parse_dims_attr(node, dims, spatial)
# Set new value and return it.
node.set_attr(new_name, dims)
return dims
def conv_kernel_shape(ctx, node, input_idx, spatial=2):
# Kernel shape is (..., C_in, C_out).
kernel_shape = ctx.get_shape(node.input[input_idx])
if len(kernel_shape) != spatial + 2:
raise ValueError("kernel rank must be spatial+2")
# Get spatial part.
kernel_shape = kernel_shape[:spatial]
# Set attribute value only if all dimensions are known.
if all(d > 0 for d in kernel_shape):
node.set_attr("kernel_shape", kernel_shape)
return kernel_shape
def build_dynamic_target_size(ctx, transposed_intput, target_hw):
"""
Build the target tensor shape for the Resize op.
Args:
- ctx: the graph context
- transposed_intput: A tensor of rank 4 of shape [n c h w]
- target_hw: tensor of rank 2 containing the target size for a resize: [nh nw]
Returns:
A tensor of rank 2 containing [n c nh nw]
"""
# We get the first half [n c] of the target shape
shape_of_transposed_input = ctx.make_node("Shape", [transposed_intput])
first_half_of_shape = GraphBuilder(ctx).make_slice(
{"data": shape_of_transposed_input.output[0], "ends": [2], "starts": [0]})
if ctx.get_dtype(target_hw) != TensorProto.INT64:
target_hw = ctx.make_node("Cast", [target_hw], attr={'to': TensorProto.INT64}).output[0]
# We build a tensor containing [n c nh nw]
final_target_size = ctx.make_node("Concat", [first_half_of_shape, target_hw], {'axis': 0})
return final_target_size
@tf_op(["Conv1D", "Conv2D", "Conv3D"])
class ConvOp:
@classmethod
def any_version(cls, opset, ctx, node, **kwargs):
# ONNX specification:
#
# T output = Conv2D(T input, T filter, @list(int) strides, @bool use_cudnn_on_gpu,
# @string padding, @string data_format)
#
# T Y = Conv(T X, T W, T B, @AttrType.STRING auto_pad, @AttrType.INTS dilations, @AttrType.INT group,
# @AttrType.INTS kernel_shape, @AttrType.INTS pads, @AttrType.INTS strides)
#
# Determine number of spatial dimensions.
spatial = int(node.type[-2])
# Make it a convolution node.
node.type = "Conv"
# Determine kernel spatial shape, strides and dilations.
kernel_shape = conv_kernel_shape(ctx, node, 1, spatial=spatial)
strides = conv_dims_attr(node, "strides", spatial=spatial)
dilations = conv_dims_attr(node, "dilations", spatial=spatial)
# prefix with batch dim of [1] to satisfy rank requirements
input_shape = ctx.get_shape(node.input[0])
if input_shape is not None and len(input_shape) == spatial + 1:
gb = GraphBuilder(ctx)
usq_node = gb.make_unsqueeze({"axes": [0], 'data': node.input[0]}, return_node=True)
ctx.replace_inputs(node, [usq_node.output[0]] + node.input[1:])
# Set padding.
add_padding(
ctx, node, kernel_shape, strides, dilations=dilations, spatial=spatial
)
groups = int(1)
data_format = str(node.attr["data_format"].s, encoding="utf8")
shape_dim = -1
if data_format == "NHWC":
shape_dim = ctx.get_shape(node.input[0])[-1]
elif data_format == "NCHW":
shape_dim = ctx.get_shape(node.input[0])[1]
if shape_dim != -1:
filter_in_channels = ctx.get_shape(node.input[1])[-2]
if filter_in_channels != -1:
groups = shape_dim // filter_in_channels
node.set_attr("group", groups)
# Convert input and filters.
conv_convert_inputs(ctx, node, with_kernel=True, spatial=spatial)
@classmethod
def version_1(cls, ctx, node, **kwargs):
cls.any_version(1, ctx, node, **kwargs)
@classmethod
def version_11(cls, ctx, node, **kwargs):
# No change.
cls.any_version(11, ctx, node, **kwargs)
@classmethod
def version_13(cls, ctx, node, **kwargs):
# Signature change for operator Unsqueeze.
cls.any_version(13, ctx, node, **kwargs)
def get_shape_from_const_or_concat(ctx, node):
if node.is_const():
return node.get_tensor_value()
if node.type == 'Concat':
# Sometimes the shape is formed by concating a bunch of consts together
res = []
if any(ctx.get_shape(inp) != [1] for inp in node.input):
return None
for i, inp in enumerate(node.inputs):
# The concat is converted from a Pack. Conversion adds an unsqueeze to the inputs.
if node.inputs[i].type == 'Unsqueeze' and node.inputs[i].inputs[0].is_scalar():
res.append(node.inputs[i].inputs[0].get_tensor_value())
else:
if i == 0:
# For the batch dimension we don't care if it is unknown
res.append(-1)
else:
return None
return res
return None
@tf_op(["Conv2DBackpropInput", "Conv3DBackpropInputV2"])
class ConvTranspose:
@classmethod
def version_1(cls, ctx, node, **kwargs):
# T output = Conv2DBackpropInput(int32 input_sizes, T filter, T out_backprop,
# @list(int) strides, @bool use_cudnn_on_gpu, @string padding, @string data_format, @list(int) dilations)
# T Y = ConvTranspose(T X, T W, T B, @STRING auto_pad, @INTS dilations,
# @INT group, @INTS kernel_shape, @INTS output_shape, @INTS pads, @INTS strides)
if node.type == "Conv3DBackpropInputV2":
spatial = 3
else:
spatial = 2
node.type = "ConvTranspose"
# Note: inputs are reversed from what one would expect.
conv_kernel_shape(ctx, node, 1, spatial=spatial)
input_shape = ctx.get_shape(node.input[2])
input_batch_dim = input_shape[0]
output_c_dim = ctx.get_shape(node.input[1])[-2]
if is_channels_last(node):
input_dims = input_shape[1:1+spatial]
else:
input_dims = input_shape[2:2+spatial]
output_shape_orig = node.output_shapes
# output_shape is explicitly specified here and then converted to explicit pads.
output_shape = get_shape_from_const_or_concat(ctx, node.inputs[0])
if output_shape is not None:
if is_channels_last(node):
new_output_shape = [output_shape[1], output_shape[2]]
if spatial == 3:
new_output_shape.append(output_shape[3])
else:
new_output_shape = [output_shape[2], output_shape[3]]
if spatial == 3:
new_output_shape.append(output_shape[4])
utils.make_sure(new_output_shape.count(-1) <= 0, "output dims need to be known")
utils.make_sure(all(new_output_shape[i] >= input_dims[i] for i in range(spatial)),
"output dims cannot be smaller than input dims.")
if -1 in input_dims:
node.set_attr("output_shape", new_output_shape)
else:
if "strides" in node.attr:
strides = parse_dims_attr(node, node.get_attr("strides").ints, spatial)
else:
strides = [1] * spatial
if "dilations" in node.attr:
dilations = parse_dims_attr(node, node.get_attr("dilations").ints, spatial)
else:
dilations = [1] * spatial
kernel_shape = parse_dims_attr(node, node.get_attr("kernel_shape").ints, spatial)
total_padding = [-1] * spatial
pads = [1] * (spatial * 2)
for i in range(spatial):
total_padding[i] = (strides[i] * (input_dims[i] - 1)
+ ((kernel_shape[i] - 1) * dilations[i] + 1)
- new_output_shape[i])
start_i = i
end_i = i + spatial
pads[start_i] = int(total_padding[i] / 2)
pads[end_i] = total_padding[i] - pads[start_i]
node.set_attr("pads", pads)
node.set_attr("auto_pad", "NOTSET")
else:
utils.make_sure(ctx.opset >= 10, "Opset 10 needed for Conv Backprop Input with non-constant shape")
strides = parse_dims_attr(node, node.get_attr('strides').ints, spatial)
if 'dilations' in node.attr:
dilations = parse_dims_attr(node, node.get_attr('dilations').ints, spatial)
else:
dilations = [1] * spatial
kernel_shape = parse_dims_attr(node, node.get_attr('kernel_shape').ints, spatial)
new_dims = [-1] * spatial
for i in range(spatial):
new_dims[i] = strides[i] * (input_dims[i] - 1) + ((kernel_shape[i] - 1) * dilations[i] + 1)
if is_channels_last(node):
new_shape = [input_batch_dim] + new_dims + [output_c_dim]
else:
new_shape = [input_batch_dim, output_c_dim] + new_dims
ctx.set_shape(node.output[0], new_shape)
use_strides_workaround = any(d > 1 for d in strides)
if use_strides_workaround and ctx.opset < 12:
# When strides > 1, ONNX and TF have an implementation difference in ConvTranspose. ONNX outputs a
# slightly smaller tensor which must be padded with a row of 0s. Pad with dynamic shape requires
# opset >= 11 and Max of int64 needs opset >= 12. Depending on the output_shape, this row of 0s might
# be shaved off, in which case TF and ONNX agree. When output_shape is dynamic it is impossible to
# know at conversion time whether this is the case and the workaround is needed.
logger.warning("Conv Backprop Input with strides > 1 and non-constant shape has known bug. "
"Workaround requires opset 12.")
use_strides_workaround = False
input_shape = ctx.make_node("Cast", [node.input[0]], attr={'to': TensorProto.INT64})
output_shape = ctx.make_node("Shape", [node.output[0]])
output_h = GraphBuilder(ctx).make_slice(
{"data": output_shape.output[0], "ends": [2], "starts": [1], "axes": [0]})
output_w = GraphBuilder(ctx).make_slice(
{"data": output_shape.output[0], "ends": [3], "starts": [2], "axes": [0]})
expect_h = GraphBuilder(ctx).make_slice(
{"data": input_shape.output[0], "ends": [2], "starts": [1], "axes": [0]})
expect_w = GraphBuilder(ctx).make_slice(
{"data": input_shape.output[0], "ends": [3], "starts": [2], "axes": [0]})
diff_h = ctx.make_node("Sub", [output_h, expect_h])
diff_w = ctx.make_node("Sub", [output_w, expect_w])
nonneg_diff_h = diff_h
nonneg_diff_w = diff_w
if use_strides_workaround:
const_zero = ctx.make_const(utils.make_name(node.name + "_const_zero"), np.array([0], dtype=np.int64))
nonneg_diff_h = ctx.make_node("Max", [diff_h.output[0], const_zero.output[0]])
nonneg_diff_w = ctx.make_node("Max", [diff_w.output[0], const_zero.output[0]])
const_two = ctx.make_const(utils.make_name(node.name + "_const_two"), np.array([2], dtype=np.int64))
start_h = ctx.make_node("Div", [nonneg_diff_h.output[0], const_two.output[0]])
start_w = ctx.make_node("Div", [nonneg_diff_w.output[0], const_two.output[0]])
end_h = ctx.make_node("Add", [start_h.output[0], expect_h])
end_w = ctx.make_node("Add", [start_w.output[0], expect_w])
if spatial == 3:
output_d = GraphBuilder(ctx).make_slice(
{"data": output_shape.output[0], "ends": [4], "starts": [3], "axes": [0]})
expect_d = GraphBuilder(ctx).make_slice(
{"data": input_shape.output[0], "ends": [4], "starts": [3], "axes": [0]})
diff_d = ctx.make_node("Sub", [output_d, expect_d])
nonneg_diff_d = diff_d
if use_strides_workaround:
nonneg_diff_d = ctx.make_node("Max", [diff_d.output[0], const_zero.output[0]])
start_d = ctx.make_node("Div", [nonneg_diff_d.output[0], const_two.output[0]])
end_d = ctx.make_node("Add", [start_d.output[0], expect_d])
starts = ctx.make_node("Concat", [start_h.output[0], start_w.output[0], start_d.output[0]],
attr={"axis": 0})
ends = ctx.make_node("Concat", [end_h.output[0], end_w.output[0], end_d.output[0]], attr={"axis": 0})
slice_axes = ctx.make_const(utils.make_name(node.name + "_const_slice_axes"),
np.array([1, 2, 3], dtype=np.int64))
else:
starts = ctx.make_node("Concat", [start_h.output[0], start_w.output[0]], attr={"axis": 0})
ends = ctx.make_node("Concat", [end_h.output[0], end_w.output[0]], attr={"axis": 0})
slice_axes = ctx.make_const(utils.make_name(node.name + "_const_slice_axes"),
np.array([1, 2], dtype=np.int64))
slice_node = ctx.make_node("Slice",
[node.output[0], starts.output[0], ends.output[0], slice_axes.output[0]],
shapes=output_shape_orig)
final_node = slice_node
if use_strides_workaround:
cz = const_zero.output[0]
neg_diff_h = ctx.make_node("Neg", [diff_h.output[0]])
shrink_h_by = ctx.make_node("Max", [neg_diff_h.output[0], const_zero.output[0]])
shb = shrink_h_by.output[0]
neg_diff_w = ctx.make_node("Neg", [diff_w.output[0]])
shrink_w_by = ctx.make_node("Max", [neg_diff_w.output[0], const_zero.output[0]])
swb = shrink_w_by.output[0]
if spatial == 3:
neg_diff_d = ctx.make_node("Neg", [diff_d.output[0]])
shrink_d_by = ctx.make_node("Max", [neg_diff_d.output[0], const_zero.output[0]])
sdb = shrink_d_by.output[0]
pads = ctx.make_node("Concat", [cz, cz, cz, cz, cz, cz, shb, swb, sdb, cz], attr={"axis": 0})
padded_node = ctx.make_node("Pad", [slice_node.output[0], pads.output[0]])
else:
pads = ctx.make_node("Concat", [cz, cz, cz, cz, cz, shb, swb, cz], attr={"axis": 0})
padded_node = ctx.make_node("Pad", [slice_node.output[0], pads.output[0]])
final_node = padded_node
downstream_nodes = ctx.find_output_consumers(node.output[0])
downstream_nodes.remove(output_shape)
downstream_nodes.remove(slice_node)
ctx.replace_all_inputs(node.output[0], final_node.output[0], ops=downstream_nodes)
conv_dims_attr(node, "strides", spatial=spatial)
conv_dims_attr(node, "dilations", spatial=spatial)
# remove output_shapes input
ctx.remove_input(node, node.input[0], 0)
# swap data and kernel
t = node.input[0]
ctx.replace_input(node, node.input[0], node.input[1], 0)
ctx.replace_input(node, node.input[1], t, 1)
conv_convert_inputs(ctx, node, with_kernel=True, spatial=spatial, quantization_axis=1)
@classmethod
def version_11(cls, ctx, node, **kwargs):
cls.version_1(ctx, node, **kwargs)
@tf_op(["DepthwiseConv2d", "DepthwiseConv2dNative"])
class DepthwiseConv2d:
@classmethod
def version_1(cls, ctx, node, **kwargs):
# T output = DepthwiseConv2dNative(T input, T filter, @list(int) strides, @string padding, @string data_format)
# T Y = ConvTranspose(T X, T W, T B, @AttrType.STRING auto_pad, @AttrType.INTS dilations, @AttrType.INT group,
# @AttrType.INTS kernel_shape, @AttrType.INTS output_shape, @AttrType.INTS pads, @AttrType.INTS strides)
#
# this is not documented well in onnx, the hint comes from pytorch documentation:
# http://pytorch.org/docs/master/nn.html#torch.nn.Conv2d
# The configuration when groups == in_channels and out_channels = K * in_channels
# where K is a positive integer is termed in literature as depthwise convolution.
# In other words, for an input of size (N,Cin,Hin,Win),
# if you want a depthwise convolution with a depthwise multiplier K,
# then you use the constructor arguments (in_channels=Cin,out_channels=Cin*K,...,groups=Cin)
#
node.type = "Conv"
input_shape = ctx.get_shape(node.input[0])
if len(input_shape) != 4:
raise ValueError("only Conv2D is supported")
kernel_shape = ctx.get_shape(node.input[1])
if len(kernel_shape) != 4:
raise ValueError("only Conv2D is supported")
k_h, k_w, k_input_channels, k_channel_multiplier = kernel_shape
if "depth_multiplier" in node.attr:
depth_multiplier = node.get_attr_int("depth_multiplier")
k_input_channels //= depth_multiplier
k_channel_multiplier *= depth_multiplier
if k_input_channels < 1:
raise ValueError("input channel must be positive")
k_output_channels = k_input_channels * k_channel_multiplier
if k_h > 0 and k_w > 0:
node.set_attr("kernel_shape", [k_h, k_w])
strides = conv_dims_attr(node, "strides")
dilations = conv_dims_attr(node, "dilations")
node.set_attr("group", k_input_channels)
add_padding(ctx, node, kernel_shape, strides, dilations)
new_kernel_shape = [k_h, k_w, 1, k_output_channels]
conv_convert_inputs(ctx, node, with_kernel=True, new_kernel_shape=new_kernel_shape)
@tf_op(["AvgPool", "AvgPool3D"], onnx_op="AveragePool")
@tf_op(["MaxPool", "MaxPoolV2", "MaxPool3D"], onnx_op="MaxPool")
class PoolOp:
@classmethod
def version_1(cls, ctx, node, **kwargs):
cls._convert(ctx, node, **kwargs)
@classmethod
def version_10(cls, ctx, node, **kwargs):
cls._convert(ctx, node, **kwargs)
@classmethod
def version_11(cls, ctx, node, **kwargs):
# no change
cls._convert(ctx, node, **kwargs)
@classmethod
def _convert(cls, ctx, node, **kwargs):
# T output = MaxPool(T input, @list(int) ksize, @list(int) strides, @string padding, @string data_format)
# T Y = MaxPool(T X, @AttrType.STRING auto_pad, @AttrType.INTS kernel_shape, @AttrType.INTS pads,
# @AttrType.INTS strides)
# above seems wrong - input[1] is ksize, input[2] is strides
# stride and ksize in tf is not always NHWC, so watch out when converting into onnx's NCHW
if kwargs["tf_op"] in ["AvgPool3D", "MaxPool3D"]:
spatial = 3
else:
spatial = 2
origin_dtype = ctx.get_dtype(node.output[0])
if origin_dtype not in [onnx_pb.TensorProto.FLOAT16, onnx_pb.TensorProto.FLOAT, onnx_pb.TensorProto.DOUBLE]:
# the onnx spec doesn't allow int types for pool ops
input_shapes = [ctx.get_shape(node.input[0])]
output_shapes = [ctx.get_shape(node.output[0])]
cast_node = ctx.make_node("Cast", [node.input[0]], dtypes=[onnx_pb.TensorProto.FLOAT], shapes=input_shapes,
name=node.name + "_cast", attr={"to": onnx_pb.TensorProto.FLOAT})
_ = ctx.insert_node_on_output(cast_node, node.inputs[0].output[0])
cast_back_node = ctx.make_node("Cast", [node.output[0]], dtypes=[origin_dtype], shapes=output_shapes,
name=node.name + "_castback", attr={"to": origin_dtype})
_ = ctx.insert_node_on_output(cast_back_node, node.output[0])
ctx.set_dtype(node.output[0], onnx_pb.TensorProto.FLOAT)
if len(node.input) < 3:
kernel_shape_tf = node.get_attr("ksize").ints
strides_tf = node.get_attr("strides").ints
else:
kernel_shape_tf = node.inputs[1].get_tensor_value()
strides_tf = node.inputs[2].get_tensor_value()
ctx.remove_input(node, node.input[2], 2)
ctx.remove_input(node, node.input[1], 1)
kernel_shape_hw = parse_dims_attr(node, kernel_shape_tf, spatial)
strides_hw = parse_dims_attr(node, strides_tf, spatial)
node.set_attr("kernel_shape", kernel_shape_hw)
node.set_attr("strides", strides_hw)
dilations = conv_dims_attr(node, "dilations", spatial=spatial)
add_padding(ctx, node, kernel_shape_hw, strides_hw, dilations=dilations, spatial=spatial)
conv_convert_inputs(ctx, node, with_kernel=False, spatial=spatial)
@tf_op(["MaxPoolWithArgmax"], onnx_op="MaxPool")
class MaxPoolWithArgmaxOp:
@classmethod
def version_8(cls, ctx, node, **kwargs):
# T output = MaxPool(T input, @list(int) ksize, @list(int) strides, @string padding, @string data_format)
# Set kernel_shape attribute
kernel_shape = node.get_attr("ksize").ints
kernel_shape = [kernel_shape[1], kernel_shape[2]]
node.set_attr("kernel_shape", kernel_shape)
# Set strides attribute
strides = node.get_attr("strides").ints
strides = [strides[1], strides[2]]
node.set_attr("strides", strides)
# The input data_format is NHWC for TF MaxPoolWithArgmax
node.set_attr("data_format", "NHWC")
# Convert indices from NCHW to NHWC format
input_shape = ctx.make_node("Shape", [node.input[0]]).output[0]
input_shape_guess = ctx.get_shape(node.input[0])
n, h, w, c = ctx.make_node("Split", [input_shape], attr={'axis': 0, 'num_outputs': 4}, output_count=4).output
hw = ctx.make_node("Mul", [h, w]).output[0]
chw = ctx.make_node("Mul", [hw, c]).output[0]
consumers = ctx.find_output_consumers(node.output[1])
if ctx.opset >= 10:
xy = ctx.make_node("Mod", [node.output[1], hw]).output[0]
else:
xy_div = ctx.make_node("Div", [node.output[1], hw]).output[0]
xy_mul = ctx.make_node("Mul", [xy_div, hw]).output[0]
xy = ctx.make_node("Sub", [node.output[1], xy_mul]).output[0]
xy_scale_c = ctx.make_node("Mul", [xy, c]).output[0]
const_zero = ctx.make_const(utils.make_name("const_zero"), np.array(0, np.int64)).output[0]
const_one = ctx.make_const(utils.make_name("const_one"), np.array(1, np.int64)).output[0]
if input_shape_guess is not None and input_shape_guess[3] > 0:
c_range_np = np.arange(input_shape_guess[3], dtype=np.int64)
c_range = ctx.make_const(utils.make_name("c_range"), c_range_np).output[0]
else:
utils.make_sure(ctx.opset >= 11, "opset 11 required for MaxPoolWithArgmax with non-const num channels")
c_sq = GraphBuilder(ctx).make_squeeze({'data': c, 'axes': [0]})
c_range = ctx.make_node("Range", [const_zero, c_sq, const_one]).output[0]
xyc = ctx.make_node("Add", [xy_scale_c, c_range]).output[0]
single_batch = input_shape_guess is not None and input_shape_guess[0] == 1
# Documentation says include_batch_in_index has default False, but tf 1.13 excludes it and assumes True
if node.get_attr_value('include_batch_in_index', True) and not single_batch:
utils.make_sure(ctx.opset >= 11, "opset 11 required for MaxPoolWithArgmax with include_batch_in_index")
n_sq = GraphBuilder(ctx).make_squeeze({'data': n, 'axes': [0]})
n_range = ctx.make_node("Range", [const_zero, n_sq, const_one]).output[0]
n_range_unsq = GraphBuilder(ctx).make_unsqueeze({'data': n_range, 'axes': [1, 2, 3]})
n_range_scale = ctx.make_node("Mul", [n_range_unsq, chw]).output[0]
result = ctx.make_node("Add", [xyc, n_range_scale]).output[0]
else:
result = xyc
ctx.replace_all_inputs(node.output[1], result, ops=consumers)
add_padding(ctx, node, kernel_shape, strides)
conv_convert_inputs(ctx, node, with_kernel=False, input_indices=[0], output_indices=[0, 1])
@tf_op(["BiasAdd", "BiasAddV1"])
class BiasAdd:
@classmethod
def version_1(cls, ctx, node, **kwargs):
# T output = BiasAdd(T value, T bias, @string data_format)
# T output = BiasAddV1(T value, T bias)
# TODO: for now use add. We may need to convert to NCHW.
node.type = "Add"
common.BroadcastOp.version_1(ctx, node, **kwargs)
@classmethod
def version_7(cls, ctx, node, **kwargs):
# T output = BiasAdd(T value, T bias, @string data_format)
# T output = BiasAddV1(T value, T bias)
# According TF bias_add definition, the input dim is always only 1.
node.type = "Add"
common.BroadcastOp.version_6(ctx, node, **kwargs)
# on NHWC, bias will broadcast from largest dim, which is default onnx Add op broadcast behavior.
if not node.is_nhwc():
# however, in NCHW, bias should be at 2nd dim, which by default onnx Add op has no way to know,
# so it needs being reshaped into 3-dim tensor before add
shape0 = ctx.get_shape(node.input[0])
shape1 = ctx.get_shape(node.input[1])
if node.inputs[1].type == 'Const' and len(shape1) == 1:
new_broadcast_shape = [shape1[0]] + [1] * (len(shape0) - 2)
shape_name = utils.make_name(node.name)
ctx.make_const(shape_name, np.array(new_broadcast_shape, dtype=np.int64))
op_name = node.input[1]
reshape_node = ctx.make_node("Reshape", [op_name, shape_name])
ctx.replace_input(node, op_name, reshape_node.output[0], 1)
ctx.set_shape(reshape_node.output[0], new_broadcast_shape)
@tf_op(["Pad", "PadV2", "MirrorPad"], onnx_op="Pad")
class Pad:
@classmethod
def convert_symmetric_pads(cls, ctx, node):
"""Currently there isn't a symmetric padding mode in ONNX so we add a dummy row then use the reflect mode
and remove the dummy row with compress. Ex: 1234 -> 012340 -> 2101234043 -> 21123443. Only do this to
dims with non-zero pads (if pads are constant)"""
rank = ctx.get_rank(node.input[0])
utils.make_sure(rank is not None, "Cannot convert pad with symmetric mode and unknown rank")
utils.make_sure(ctx.opset >= 9, "opset 9 required for symmetric padding mode")
node.set_attr("mode", "reflect")
const_pads = None
consumers = ctx.find_output_consumers(node.output[0])
output_shape = ctx.get_shape(node.output[0])
if ctx.opset < 11:
const_pads = node.get_attr_value("pads")
elif node.inputs[1].is_const():
const_pads = node.inputs[1].get_tensor_value()
non_zero_axes = list(range(rank))
if const_pads is not None:
non_zero_axes = []
for i in range(rank):
if const_pads[i] != 0 or const_pads[i + rank] != 0:
non_zero_axes.append(i)
inc_pads = [0] * (rank * 2)
for a in non_zero_axes:
inc_pads[a] = 1
inc_pads[a + rank] = 1
if ctx.opset < 11:
padded_inp = ctx.make_node("Pad", [node.input[0]], attr={'mode': 'constant', 'pads': inc_pads}).output[0]
else:
pad1_pads_const = ctx.make_const(utils.make_name("pad1_pads"), np.array(inc_pads, np.int64)).output[0]
padded_inp = ctx.make_node("Pad", [node.input[0], pad1_pads_const], attr={'mode': 'constant'}).output[0]
ctx.replace_input(node, node.input[0], padded_inp, 0)
ctx.update_node_shape_dtype(node, override=True)
output = node.output[0]
shape = ctx.make_node("Shape", [output]).output[0]
dims = ctx.make_node("Split", [shape], attr={'num_outputs': rank}, output_count=rank).output
two_false = ctx.make_const(utils.make_name("two_false"), np.array([False, False], bool)).output[0]
inv_second = ctx.make_const(utils.make_name("inv_second"), np.array([1, -1], np.int64)).output[0]
dec_second = ctx.make_const(utils.make_name("dec_second"), np.array([0, 1], np.int64)).output[0]
for a in non_zero_axes:
one_tensor = helper.make_tensor("value", onnx_pb.TensorProto.BOOL, dims=[1], vals=[1])
ones_of_shape = ctx.make_node("ConstantOfShape", [dims[a]], attr={'value': one_tensor}).output[0]
if const_pads is not None:
to_remove_val = [const_pads[a], -1 - const_pads[a + rank]]
to_remove = ctx.make_const(utils.make_name("to_remove"), np.array(to_remove_val, np.int64)).output[0]
else:
pads_idx = ctx.make_const(utils.make_name("pads_idx"), np.array([a, a + rank], np.int64)).output[0]
pads_vals = ctx.make_node("Gather", [node.input[1], pads_idx]).output[0]
pads_inv_second = ctx.make_node("Mul", [pads_vals, inv_second]).output[0]
to_remove = ctx.make_node("Sub", [pads_inv_second, dec_second]).output[0]
scatter_op = "ScatterElements" if ctx.opset >= 11 else "Scatter"
dims_to_keep = ctx.make_node(scatter_op, [ones_of_shape, to_remove, two_false]).output[0]
compress = ctx.make_node("Compress", [output, dims_to_keep], attr={'axis': a})
output = compress.output[0]
ctx.replace_all_inputs(node.output[0], output, consumers)
ctx.set_shape(output, output_shape)
@classmethod
def version_1(cls, ctx, node, **kwargs):
node.type = "Pad"
# T output = Pad(T input, int32 paddings, @type Tpaddings), CONST model using default value
# or PadV2(T input, int32 paddings, T constant_value, @type Tpaddings), CONST mode - default value specified
# or MirrorPad(T input, int32 paddings, @type Tpaddings, @STRING mode), other mode.
# T output = Pad(T data, @STRING mode, @INTS pads, @FLOAT value)
paddings = np.array(node.inputs[1].get_tensor_value()).transpose().flatten()
mode = node.get_attr("mode")
if mode:
mode = mode.s.decode("utf-8").lower()
node.set_attr("mode", mode)
if mode not in [None, "symmetric", "constant", "reflect"]:
raise ValueError(mode + " pad mode is not supported")
if mode in [None, "constant"] and len(node.input) == 3:
const_val = node.inputs[2].get_tensor_value()
node.set_attr("value", const_val)
ctx.remove_input(node, node.input[2], 2)
ctx.remove_input(node, node.input[1], 1)
node.set_attr("pads", paddings)
origin_dtype = ctx.get_dtype(node.output[0])
if origin_dtype not in [onnx_pb.TensorProto.FLOAT16, onnx_pb.TensorProto.FLOAT,
onnx_pb.TensorProto.DOUBLE]:
cast_node = ctx.insert_new_node_on_input(node, "Cast", node.input[0], to=onnx_pb.TensorProto.FLOAT)
ctx.set_dtype(cast_node.output[0], onnx_pb.TensorProto.FLOAT)
ctx.copy_shape(node.name, cast_node.output[0])
cast_back_node = ctx.insert_new_node_on_output("Cast", node.output[0],
name=utils.make_name(node.name) + "_castback",
to=origin_dtype)
ctx.set_dtype(cast_back_node.output[0], origin_dtype)
ctx.copy_shape(node.name, cast_back_node.output[0])
if mode == "symmetric":
cls.convert_symmetric_pads(ctx, node)
@classmethod
def version_11(cls, ctx, node, **kwargs):
mode = node.get_attr("mode")
if mode:
mode = mode.s.decode("utf-8").lower()
node.set_attr("mode", mode)
if mode not in [None, "symmetric", "constant", "reflect"]:
raise ValueError(mode + " pad mode is not supported")
if not node.inputs[1].is_const():
# pads must be int64.
if ctx.get_dtype(node.input[1]) != onnx_pb.TensorProto.INT64:
ctx.insert_new_node_on_input(node, "Cast", node.input[1], to=onnx_pb.TensorProto.INT64)
ctx.insert_new_node_on_input(node, "Transpose", node.input[1])
shape_const = ctx.make_const(utils.make_name(node.name), np.array([-1]).astype(np.int64))
ctx.insert_new_node_on_input(node, "Reshape", [node.input[1], shape_const.name])
else:
paddings = node.inputs[1].get_tensor_value(as_list=False).astype(np.int64).transpose().flatten()
pad_const = ctx.make_const(utils.make_name("pad_const"), paddings)
ctx.replace_input(node, node.input[1], pad_const.output[0], 1)
origin_dtype = ctx.get_dtype(node.output[0])
if origin_dtype not in [TensorProto.FLOAT, TensorProto.DOUBLE,
TensorProto.INT32, TensorProto.INT64]:
cast_node = ctx.insert_new_node_on_input(node, "Cast", node.input[0], to=TensorProto.FLOAT)
ctx.set_dtype(cast_node.output[0], TensorProto.FLOAT)
ctx.copy_shape(node.name, cast_node.output[0])
cast_back_node = ctx.insert_new_node_on_output("Cast", node.output[0],
name=utils.make_name(node.name) + "_castback",
to=origin_dtype)
ctx.set_dtype(cast_back_node.output[0], origin_dtype)
ctx.copy_shape(node.name, cast_back_node.output[0])
if mode == "symmetric":
cls.convert_symmetric_pads(ctx, node)
@tf_op(["FusedBatchNorm", "FusedBatchNormV2", "FusedBatchNormV3"])
class BatchNorm:
@classmethod
def version_6(cls, ctx, node, **kwargs):
tf_type = node.type
input_rank = len(ctx.get_shape(node.input[0]))
if input_rank == 4:
spatial = 2
elif input_rank == 5:
spatial = 3
else:
raise ValueError("node input must be 4 or 5-dimensional, is {} now".format(input_rank))
node.type = "BatchNormalization"
# tf inputs: x, scale, bias, mean, variance
# tf outputs: y, batch_mean, batch_var
# a: data_format, epsilon, is_training
# onnx inputs: X, scale, B, mean, variance, attributes: epsilon, momentum=0.9, spatial : 1
# output: y, mean, var, savedmean, savedvar,
# detach unused outputs. While we could let the unused outputs dangle,
# some runtimes like pytorch/caffe2 do complain about it.
# onnx batchnorm requires same T for all inputs
mean_type = ctx.get_dtype(node.input[3])
x_dtype = ctx.get_dtype(node.input[0])
if x_dtype != mean_type:
# TODO: this works but more efficient would be to flip the other inputs. We'd need to check
# TODO: first if this works with the onnx implementation so its a later for now
ctx.insert_new_node_on_input(node, "Cast", node.input[0], to=mean_type)
# casting the input[0] will change the output dtype of bn so we need to cast back
cast_back_node = ctx.insert_new_node_on_output("Cast", node.output[0],
name=utils.make_name(node.name) + "_castback",
to=x_dtype)
ctx.set_dtype(cast_back_node.output[0], x_dtype)
ctx.copy_shape(node.name, cast_back_node.output[0])
ctx.set_dtype(node.output[0], mean_type)
consumers = [ctx.find_output_consumers(output_name) for output_name in node.output[1:]]
if not any(consumers):
new_output = [node.output[0]]
# the setter makes a copy of new_output
node.output = new_output
conv_convert_inputs(ctx, node, with_kernel=False, spatial=spatial)
inp_shape = ctx.get_shape(node.input[0])
inp_rank = len(inp_shape) if inp_shape is not None else None
scale_shape = ctx.get_shape(node.input[1])
mean_shape = ctx.get_shape(node.input[3])
var_shape = ctx.get_shape(node.input[4])
val_type = utils.map_onnx_to_numpy_type(ctx.get_dtype(node.input[1]))
is_training = node.get_attr_value('is_training', True)
if is_training and node.get_attr_value('exponential_avg_factor', 1.0) == 1.0:
# Sometimes TF uses a BatchNorm op with training = True and exponential_avg_factor = 1.0
# to perform layer mean/variance normalization. In such cases, the mean/var are computed from the input.
# TF allows mean/variance to be excluded only if is_training and exponential_avg_factor == 1.0
utils.make_sure(inp_rank is not None, "Cannot convert node %s of type %s with input of unknown rank.",
node.name, tf_type)
dims = [0] + list(range(2, inp_rank))
avg = GraphBuilder(ctx).make_reduce_mean({"data": node.input[0], "axes": dims, "keepdims": True})
avg_squeezed = GraphBuilder(ctx).make_squeeze({"data": avg, "axes": dims})
sub = ctx.make_node("Sub", [node.input[0], avg]).output[0]
var_squeezed = GraphBuilder(ctx).make_reduce_sum_square({"data": sub, "axes": dims, "keepdims": False})
inp_shape = ctx.make_node("Shape", [node.input[0]]).output[0]
dims_const = ctx.make_const(utils.make_name("axes_const"), np.array(dims, dtype=np.int64)).output[0]
reduce_dims = ctx.make_node("Gather", [inp_shape, dims_const]).output[0]
dims_product = GraphBuilder(ctx).make_reduce_prod({"data": reduce_dims, "axes": [0], "keepdims": False})
cnt_float = ctx.make_node("Cast", [dims_product], attr={'to': ctx.get_dtype(node.input[0])})
pop_var_squeezed = ctx.make_node("Div", [var_squeezed, cnt_float.output[0]]).output[0]
ctx.replace_inputs(node, node.input[:3] + [avg_squeezed, pop_var_squeezed])
elif is_training:
logger.warning("Node %s of type %s has is_training set to true, which is not supperted. "
"Please re-save the model with training set to false.",
node.name, tf_type)
# As long as the mean/variance estimates are provided, we should be OK
is_training = False
if not is_training and mean_shape != scale_shape and all(d >= 0 for d in scale_shape):
new_mean_value = np.array(np.resize(node.inputs[3].get_tensor_value(as_list=False), scale_shape),
dtype=val_type)
new_mean_node_name = utils.make_name(node.name)
ctx.make_const(new_mean_node_name, new_mean_value)
ctx.replace_input(node, node.input[3], new_mean_node_name, 3)
if not is_training and var_shape != scale_shape and all(d >= 0 for d in scale_shape):
new_var_value = np.array(np.resize(node.inputs[4].get_tensor_value(as_list=False), scale_shape),
dtype=val_type)
new_val_node_name = utils.make_name(node.name)
ctx.make_const(new_val_node_name, new_var_value)
ctx.replace_input(node, node.input[4], new_val_node_name, 4)
@classmethod
def version_9(cls, ctx, node, **kwargs):
# is_test was removed - no change for us
cls.version_6(ctx, node, **kwargs)
@tf_op(["SpaceToDepth"])
class SpaceToDepth:
@classmethod
def version_1(cls, ctx, node, **kwargs):
block_size = node.get_attr("block_size")
node.set_attr("blocksize", block_size.i)
conv_convert_inputs(ctx, node, with_kernel=False)
@tf_op(["DepthToSpace"])
class DepthToSpace:
@classmethod
def version_1(cls, ctx, node, **kwargs):
block_size = node.get_attr("block_size")
node.set_attr("blocksize", block_size.i)
conv_convert_inputs(ctx, node, with_kernel=False)
@classmethod
def version_11(cls, ctx, node, **kwargs):
# Onnx-11 CRD mode added. No change for tf2onnx
cls.version_1(ctx, node, **kwargs)
@tf_op(["SampleDistortedBoundingBox", "SampleDistortedBoundingBoxV2"])
class SampleDistortedBoundingBox:
@classmethod
def version_9(cls, ctx, node, **kwargs):
# See tensorflow sample_distorted_bounding_box_op.cc
image_size, bounding_boxes, min_object_covered = node.input
seed = node.get_attr_value("seed", 0)
seed2 = node.get_attr_value("seed2", 0)
onnx_seed = utils.combine_seeds(seed, seed2)
rand_attr = {}
if onnx_seed is not None:
rand_attr['seed'] = onnx_seed
min_aspect_ratio, max_aspect_ratio = node.get_attr_value("aspect_ratio_range", [0.75, 1.33])
ratio_range = max_aspect_ratio - min_aspect_ratio
min_area, max_area = node.get_attr_value("area_range", [0.05, 1.0])
max_attempts = node.get_attr_value("max_attempts", 100)
use_image_if_no_bounding_boxes = node.get_attr_value("use_image_if_no_bounding_boxes", 0)
min_area_node = ctx.make_const(utils.make_name("min_area"), np.array(min_area, np.float32)).output[0]
max_area_node = ctx.make_const(utils.make_name("max_area"), np.array(max_area, np.float32)).output[0]
min_ratio_node = ctx.make_const(utils.make_name("min_ratio"), np.array(min_aspect_ratio, np.float32)).output[0]
ratio_range_node = ctx.make_const(utils.make_name("max_ratio"), np.array(ratio_range, np.float32)).output[0]
boxes_tensor_shape = ctx.get_shape(bounding_boxes)
if boxes_tensor_shape is not None and 0 in boxes_tensor_shape and use_image_if_no_bounding_boxes:
no_boxes = True
min_area_node = ctx.make_node("Max", [min_object_covered, min_area_const]).output[0]
else:
no_boxes = False
rand_attr['shape'] = [max_attempts, 4]
random_nums = ctx.make_node("RandomUniform", [], attr=rand_attr, op_name_scope=node.name).output[0]
r1, r2, r3, r4 = ctx.make_node("Split", [random_nums], attr={'axis': 1, 'num_outputs': 4},
output_count=4).output
# Use r1 to sample the aspect ratio
scaled_r1 = ctx.make_node("Mul", [r1, ratio_range_node]).output[0]
aspect_ratio = ctx.make_node("Add", [scaled_r1, min_ratio_node]).output[0]
image_size_float = ctx.make_node("Cast", [image_size], attr={'to': TensorProto.FLOAT}).output[0]
img_height = GraphBuilder(ctx).make_slice({"data": image_size_float, "starts": [0], "ends": [1], "axes": [0]})
img_width = GraphBuilder(ctx).make_slice({"data": image_size_float, "starts": [1], "ends": [2], "axes": [0]})
img_aspect_ratio = ctx.make_node("Div", [img_width, img_height]).output[0]
adjusted_aspect_ratio = ctx.make_node("Div", [aspect_ratio, img_aspect_ratio]).output[0]
# Use r2 to sample height
const_one = ctx.make_const(utils.make_name("const_one"), np.array(1, np.float32)).output[0]
min_height_squared = ctx.make_node("Div", [min_area_node, adjusted_aspect_ratio]).output[0]
max_height_squared = ctx.make_node("Div", [max_area_node, adjusted_aspect_ratio]).output[0]
min_height = ctx.make_node("Sqrt", [min_height_squared]).output[0]
max_height = ctx.make_node("Sqrt", [max_height_squared]).output[0]
max_allowed_height = ctx.make_node("Div", [img_aspect_ratio, aspect_ratio]).output[0]
max_allowed_height2 = ctx.make_node("Min", [max_allowed_height, const_one]).output[0]
max_height2 = ctx.make_node("Min", [max_height, max_allowed_height2]).output[0]
min_height2 = ctx.make_node("Min", [min_height, max_height2]).output[0]
height_range = ctx.make_node("Sub", [max_height2, min_height2]).output[0]
scaled_r2 = ctx.make_node("Mul", [r2, height_range]).output[0]
box_height = ctx.make_node("Add", [scaled_r2, min_height2]).output[0]
box_width = ctx.make_node("Mul", [box_height, adjusted_aspect_ratio]).output[0]
# Use r3 and r4 to get x and y pos
max_shift_x = ctx.make_node("Sub", [const_one, box_width]).output[0]
max_shift_y = ctx.make_node("Sub", [const_one, box_height]).output[0]
x1 = ctx.make_node("Mul", [r3, max_shift_x]).output[0]
y1 = ctx.make_node("Mul", [r4, max_shift_y]).output[0]
x2 = ctx.make_node("Add", [x1, box_width]).output[0]
y2 = ctx.make_node("Add", [y1, box_height]).output[0]
all_boxes = ctx.make_node("Concat", [y1, x1, y2, x2], attr={'axis': 1}).output[0]
area = ctx.make_node("Mul", [box_height, box_width]).output[0]
area_too_large = ctx.make_node("Greater", [area, max_area_node]).output[0]
area_too_small = ctx.make_node("Less", [area, min_area_node]).output[0]
area_out_of_bounds = ctx.make_node("Or", [area_too_large, area_too_small]).output[0]
area_in_bounds = ctx.make_node("Not", [area_out_of_bounds]).output[0]
acceptable = area_in_bounds
if not no_boxes:
boxes_shape = ctx.make_const(utils.make_name("reshape_const"), np.array([-1, 4, 1], np.int64)).output[0]
bounding_boxes_flat = ctx.make_node("Reshape", [bounding_boxes, boxes_shape]).output[0]
box_y1, box_x1, box_y2, box_x2 = \
ctx.make_node("Split", [bounding_boxes_flat], attr={'axis': 1, 'num_outputs': 4}, output_count=4).output
combined_max_y = ctx.make_node("Min", [y2, box_y2]).output[0]
combined_max_x = ctx.make_node("Min", [x2, box_x2]).output[0]
combined_min_y = ctx.make_node("Max", [y1, box_y1]).output[0]
combined_min_x = ctx.make_node("Max", [x1, box_x1]).output[0]
box_height = ctx.make_node("Sub", [box_y2, box_y1]).output[0]
box_width = ctx.make_node("Sub", [box_x2, box_x1]).output[0]
box_area = ctx.make_node("Mul", [box_height, box_width]).output[0]
const_zero = ctx.make_const(utils.make_name("const_zero"), np.array(0, np.float32)).output[0]
overlap_height = ctx.make_node("Sub", [combined_max_y, combined_min_y]).output[0]
non_neg_height = ctx.make_node("Max", [overlap_height, const_zero]).output[0]
overlap_width = ctx.make_node("Sub", [combined_max_x, combined_min_x]).output[0]
non_neg_width = ctx.make_node("Max", [overlap_width, const_zero]).output[0]
overlap_area = ctx.make_node("Mul", [non_neg_height, non_neg_width]).output[0]
overlap_ratio = ctx.make_node("Div", [overlap_area, box_area]).output[0]
overlap_bad = ctx.make_node("Less", [overlap_ratio, min_object_covered]).output[0]
overlap_ok = ctx.make_node("Not", [overlap_bad]).output[0]
overlap_ok_fp = ctx.make_node("Cast", [overlap_ok], attr={'to': TensorProto.FLOAT}).output[0]
num_ok = GraphBuilder(ctx).make_reduce_sum(
{"data": overlap_ok_fp, "axes": [0], "keepdims": 0, "noop_with_empty_axes": 1})
any_ok = ctx.make_node("Greater", [num_ok, const_zero]).output[0]
acceptable = ctx.make_node("And", [acceptable, any_ok]).output[0]
acceptable_sq = GraphBuilder(ctx).make_squeeze({'data': acceptable, 'axes': [1]})
boxes_shape = ctx.get_shape(all_boxes)
filtered_shape = [-1] + boxes_shape[1:] if boxes_shape is not None else None
filtered = ctx.make_node("Compress", [all_boxes, acceptable_sq], attr={'axis': 0},
dtypes=[ctx.get_dtype(all_boxes)], shapes=[filtered_shape]).output[0]
default_box = np.array([0.0, 0.0, 1.0, 1.0], np.float32).reshape([1, 4])
const_default_box = ctx.make_const(utils.make_name("default_box"), default_box).output[0]
filtered_non_empty = ctx.make_node("Concat", [filtered, const_default_box], attr={'axis': 0}).output[0]
first_valid_box = GraphBuilder(ctx).make_slice(
{"data": filtered_non_empty, "starts": [0], "ends": [1], "axes": [0]})
first_valid_box_sq = GraphBuilder(ctx).make_squeeze({'data': first_valid_box, 'axes': [0]})
int_dtype = ctx.get_dtype(image_size)
np_int_dtype = utils.map_onnx_to_numpy_type(int_dtype)
scale = ctx.make_node("Concat", [img_height, img_width, img_height, img_width], attr={'axis': 0}).output[0]
box_scaled = ctx.make_node("Mul", [first_valid_box_sq, scale]).output[0]
if ctx.opset >= 11:
box_rounded = ctx.make_node("Round", [box_scaled]).output[0]
else:
box_rounded = box_scaled # Close enough
box_cast = ctx.make_node("Cast", [box_rounded], attr={'to': int_dtype}).output[0]
bb_begin, bb_end = ctx.make_node("Split", [box_cast], attr={'axis': 0, 'num_outputs': 2}, output_count=2).output
bb_size = ctx.make_node("Sub", [bb_end, bb_begin]).output[0]
const_zero_int = ctx.make_const(utils.make_name("const_zero"), np.array([0], np_int_dtype)).output[0]
const_neg_one_int = ctx.make_const(utils.make_name("const_neg_one"), np.array([-1], np_int_dtype)).output[0]
begin = ctx.make_node("Concat", [bb_begin, const_zero_int], attr={'axis': 0},
op_name_scope=node.name).output[0]
size = ctx.make_node("Concat", [bb_size, const_neg_one_int], attr={'axis': 0},
op_name_scope=node.name).output[0]
bboxes = GraphBuilder(ctx).make_unsqueeze({'data': first_valid_box, 'axes': [0]})
ctx.replace_all_inputs(node.output[0], begin)
ctx.replace_all_inputs(node.output[1], size)
ctx.replace_all_inputs(node.output[2], bboxes)
ctx.remove_node(node.name)
@tf_op(["CropAndResize"])
class CropAndResize:
@classmethod
def version_10(cls, ctx, node, **kwargs):
utils.make_sure(node.inputs[1].type == "Const", "boxes input must be a Const")
utils.make_sure(node.inputs[3].type == "Const", "boxes input must be a Const")
name = node.name
output_height = node.inputs[3].get_tensor_value()[0]
output_width = node.inputs[3].get_tensor_value()[1]
rois = node.inputs[1].get_tensor_value()
rois_shape = ctx.get_shape(node.input[1])
img_shape = ctx.get_shape(node.input[0])
transform_rois = np.zeros(list(rois_shape), dtype=np.float32)
for i in range(rois_shape[0]):
y1, x1, y2, x2 = rois[i]
y1 = y1 * (img_shape[1] - 1)
y2 = y2 * (img_shape[1] - 1)
x1 = x1 * (img_shape[2] - 1)
x2 = x2 * (img_shape[2] - 1)
spacing_h = (y2 - y1)
spacing_w = (x2 - x1)
b1 = y1 - 0.5 * spacing_h / (output_height - 1)
a1 = x1 - 0.5 * spacing_w / (output_width - 1)
b2 = y2 + 0.5 * spacing_h / (output_height - 1)
a2 = x2 + 0.5 * spacing_w / (output_width - 1)
transform_rois[i][0] = a1
transform_rois[i][1] = b1
transform_rois[i][2] = a2
transform_rois[i][3] = b2
cast_node = ctx.make_node("Cast", [node.input[2]], attr={"to": onnx_pb.TensorProto.INT64})
bbox_node = ctx.make_const(utils.make_name("bbox"), transform_rois)
dtypes = [ctx.get_dtype(node.output[0])]
shapes = [ctx.get_shape(node.output[0])]
input_nchw = ctx.make_node("Transpose", [node.input[0]], {"perm": [0, 3, 1, 2]},
name=utils.make_name(node.name))
crop_and_resize = ctx.make_node("RoiAlign", inputs=[input_nchw.output[0], bbox_node.output[0],
cast_node.output[0]],
attr={"output_height": output_height, "output_width": output_width,
"spatial_scale": 1.0, "sampling_ratio": 1},
name=utils.make_name(node.name))
ctx.remove_node(name)
ctx.make_node("Transpose", crop_and_resize.output, {"perm": [0, 2, 3, 1]},
name=name, outputs=node.output, shapes=shapes, dtypes=dtypes)
@classmethod
def any_version_after11(cls, opset, ctx, node, **kwargs):
# create loop of resize to cater to tensorflow CropAndResize, one box one iteration
mode = "nearest" if node.get_attr("method") is not None and node.get_attr(
"method").s == b"nearest" else "linear"
extrapolation_value = float(node.get_attr("extrapolation_value", "0").f)
input_x = node.input[0]
x_shape = ctx.make_node("Shape", [input_x]).output[0]
num_channels = GraphBuilder(ctx).make_slice({"data": x_shape, "starts": [3], "ends": [4], "axes": [0]})
boxes = node.input[1]
box_ind = node.input[2]
crop_size = node.input[3]
if ctx.get_dtype(crop_size) != TensorProto.INT64:
crop_size = ctx.make_node("Cast", [crop_size], attr={'to': TensorProto.INT64}).output[0]
trip_name = utils.make_name(node.name + "_i")
cond_name = utils.make_name(node.name + "_cond")
cond_out_name = utils.make_name(node.name + "cond_out")
g = ctx.create_new_graph_with_same_config()
g.add_graph_input(trip_name, TensorProto.INT64, [1])
g.add_graph_input(cond_name, TensorProto.BOOL, [])
g.parent_graph = ctx
const_zero = g.make_const(utils.make_name(node.name + "_const_zero"), np.array([0], dtype=np.int32))
const_zero_long = g.make_const(utils.make_name(node.name + "_const_zero_long"), np.array([0], dtype=np.int64))
const_one = g.make_const(utils.make_name(node.name + "_const_one"), np.array([1], dtype=np.int32))
const_one_long = g.make_const(utils.make_name(node.name + "_const_one_long"), np.array([1], dtype=np.int64))
index_end = g.make_node("Add", [trip_name, const_one_long.output[0]])
box_index_from = g.make_node("Slice", [box_ind, trip_name, index_end.output[0]], name="Slice_a")
box_index_to = g.make_node("Add", [box_index_from.output[0], const_one.output[0]])
target_x = g.make_node("Slice", [input_x, box_index_from.output[0], box_index_to.output[0],
const_zero.output[0]], name="Slice_b")
transposed_x = g.make_node("Transpose", [target_x.output[0]], attr={'perm': constants.NHWC_TO_NCHW})
const_zero_zero = g.make_const(utils.make_name(node.name + "_const_zero_zero"),
np.array([0, 0], dtype=np.float32))
const_one_one = g.make_const(utils.make_name(node.name + "_const_one_one"),
np.array([1, 1], dtype=np.float32))
const_four = g.make_const(utils.make_name(node.name + "_const_four"), np.array([4], dtype=np.int64))
const_empty_float = g.make_const(utils.make_name("const_empty_float"), np.array([], dtype=np.float32))
box = g.make_node("Slice", [boxes, trip_name, index_end.output[0], const_zero_long.output[0]],
name="Slice_c")
roi_raw = g.make_node("Reshape", [box.output[0], const_four.output[0]])
roi_raw_first_half = GraphBuilder(g).make_slice({"data": roi_raw.output[0], "ends": [2], "starts": [0]})
roi_raw_second_half = GraphBuilder(g).make_slice({"data": roi_raw.output[0], "ends": [4], "starts": [2]})
roi_concat_1 = g.make_node("Concat", [const_zero_zero.output[0], roi_raw_first_half], attr={'axis': 0})
roi_concat_2 = g.make_node("Concat", [const_one_one.output[0], roi_raw_second_half], attr={'axis': 0})
final_roi = g.make_node("Concat", [roi_concat_1.output[0], roi_concat_2.output[0]], attr={'axis': 0})
final_crop_size = build_dynamic_target_size(g, transposed_x.output[0], crop_size)
resized_x = g.make_node("Resize", [transposed_x.output[0], final_roi.output[0], const_empty_float.output[0],
final_crop_size.output[0]],
attr={"mode": mode, "extrapolation_value": extrapolation_value,
"coordinate_transformation_mode": "tf_crop_and_resize"})
recovered_x = g.make_node("Transpose", [resized_x.output[0]], attr={'perm': constants.NCHW_TO_NHWC})
squeeze_x = GraphBuilder(g).make_squeeze({'data': recovered_x.output[0], 'axes': [0]}, return_node=True)
g.make_node("Identity", [cond_name], outputs=[cond_out_name])
g.add_graph_output(cond_out_name, TensorProto.BOOL, [])
g.add_graph_output(squeeze_x.output[0], ctx.get_dtype(node.input[0]), [-1, -1, -1])
trip_node = ctx.make_node("Size", [box_ind])
cond_const = ctx.make_const(utils.make_name("cond"), np.ones((), dtype=bool))
ctx.remove_node(node.name)
branches = {"body": g}
inner_loop = ctx.make_node("Loop", [trip_node.output[0], cond_const.output[0]], name=node.name,
outputs=node.output, branches=branches)
const_neg_one = ctx.make_const(utils.make_name("const_neg_one"), np.array([-1], np.int64)).output[0]
final_shape = ctx.make_node("Concat", [const_neg_one, crop_size, num_channels], attr={'axis': 0}).output[0]
# This reshape fixes the case when there are no iterations and the scan output is empty.
ctx.insert_new_node_on_output("Reshape", inner_loop.output[0], inputs=[inner_loop.output[0], final_shape])
@classmethod
def version_11(cls, ctx, node, **kwargs):
cls.any_version_after11(11, ctx, node, **kwargs)
@classmethod
def version_13(cls, ctx, node, **kwargs):
# Signature of operator Squeeze changed.
cls.any_version_after11(13, ctx, node, **kwargs)
@tf_op(["ResizeBilinear", "ResizeNearestNeighbor", "ResizeBicubic"])
class Resize:
@classmethod
def version_7(cls, ctx, node, **kwargs):
utils.make_sure(node.type != "ResizeBicubic", "Opset 11 is required for bicubic interpolation for node %s",
node.name)
mode = "linear" if node.type == "ResizeBilinear" else "nearest"
node.type = "Upsample"
shape = ctx.get_shape(node.input[0])
target_shape = node.inputs[1].get_tensor_value()
# https://www.tensorflow.org/api_docs/python/tf/image/resize_nearest_neighbor
# wants the input to be NHWC - adjust target_shape to this.
n, h, w, c = shape
nh, nw = target_shape
utils.make_sure(all(i != -1 for i in [nh, nw]), "h and w need to be known")
# scaler is nchw
scaler = [1., 1., float(nh) / h, float(nw) / w]
node.set_attr("scales", scaler)
node.set_attr("mode", mode)
ctx.remove_input(node, node.input[1], 1)
node.data_format = "NHWC"
conv_convert_inputs(ctx, node, with_kernel=False)
@classmethod
def version_9(cls, ctx, node, **kwargs):
cls._convert_since_9(ctx, node, op_type="Upsample")
@classmethod
def version_10(cls, ctx, node, **kwargs):
cls._convert_since_9(ctx, node, op_type="Resize")
@classmethod
def version_11(cls, ctx, node, **kwargs):
cubic_coeff_a = None
exclude_outside = False
if node.type == "ResizeBilinear":
mode = "linear"
elif node.type == "ResizeBicubic":
mode = "cubic"
cubic_coeff_a = -0.5
exclude_outside = True
else:
mode = "nearest"
roi = ctx.make_const(utils.make_name("roi"), np.array([]).astype(np.float32))
input_nchw = ctx.make_node("Transpose", [node.input[0]], {"perm": constants.NHWC_TO_NCHW})
shape = ctx.get_shape(node.input[0])
if shape and shape[2] != -1 and shape[1] != -1 and node.inputs[1].is_const():
target_shape = node.inputs[1].get_tensor_value()
n, h, w, c = shape
nh, nw = target_shape
if "sizes" in node.attr:
sizes_val = np.array([1.0, 1.0, nh, nw]).astype(np.int64)
resize_params = ctx.make_const(utils.make_name("sizes"), sizes_val, raw=False)
else: # scales
scale_val = np.array([1.0, 1.0, float(nh) / h, float(nw) / w]).astype(np.float32)
resize_params = ctx.make_const(utils.make_name("scales"), scale_val, raw=False)
resize_inputs = [
input_nchw.output[0],
roi.output[0],
resize_params.output[0]
]
else:
const_zero = ctx.make_const(utils.make_name("const_zero"), np.array([0]).astype(np.int64))
const_two = ctx.make_const(utils.make_name("const_two"), np.array([2]).astype(np.int64))
const_empty_float = ctx.make_const(utils.make_name("const_empty_float"), np.array([]).astype(np.float32))
shape_input = ctx.make_node("Shape", [input_nchw.output[0]])
sliced_shape = ctx.make_node("Slice", [shape_input.output[0], const_zero.output[0], const_two.output[0]])
size_int64 = ctx.make_node("Cast", [node.input[1]], attr={"to": onnx_pb.TensorProto.INT64})
concat_shape = ctx.make_node("Concat", [sliced_shape.output[0], size_int64.output[0]], {'axis': 0})
resize_inputs = [
input_nchw.output[0],
roi.output[0],
const_empty_float.output[0],
concat_shape.output[0]
]
transformation_mode = "asymmetric"
nearest_mode = "floor"
if "align_corners" in node.attr and node.attr["align_corners"].i:
transformation_mode = "align_corners"
nearest_mode = "round_prefer_ceil"
if "half_pixel_centers" in node.attr and node.attr["half_pixel_centers"].i:
if node.type == "ResizeNearestNeighbor" and not ctx.is_target(constants.TARGET_TENSORRT):
# TensorRT only supports nearest_mode = "floor" for mode = "nearest"
transformation_mode = "tf_half_pixel_for_nn"
else:
transformation_mode = "half_pixel"
attr = {"mode": mode, "nearest_mode": nearest_mode, "coordinate_transformation_mode": transformation_mode,
"exclude_outside": exclude_outside}
if cubic_coeff_a is not None:
attr["cubic_coeff_a"] = cubic_coeff_a
resize = ctx.make_node("Resize", resize_inputs, attr=attr)
shapes = node.output_shapes
dtypes = node.output_dtypes
ctx.remove_node(node.name)
ctx.make_node("Transpose", resize.output, {"perm": constants.NCHW_TO_NHWC},
name=node.name, outputs=node.output, shapes=shapes, dtypes=dtypes)
@classmethod
def _convert_since_9(cls, ctx, node, op_type, use_target_size=False):
# float32 out = ResizeBilinear/ResizeNearestNeighbor(T images, int size)
# https://www.tensorflow.org/api_docs/python/tf/image/resize_nearest_neighbor
# wants the input to be NHWC - adjust target_shape to this.
utils.make_sure(node.type != "ResizeBicubic", "Opset 11 is required for bicubic interpolation for node %s",
node.name)
if "align_corners" in node.attr:
utils.make_sure(not node.attr["align_corners"].i,
"Opset 11 is required for align_corners=True for node %s", node.name)
if "half_pixel_centers" in node.attr:
utils.make_sure(not node.attr["half_pixel_centers"].i,
"Opset 11 is required for half_pixel_centers=True for node %s", node.name)
mode = "linear" if node.type == "ResizeBilinear" else "nearest"
# because onnxruntime only supports to scale the last two dims so transpose is inserted
input_nchw = ctx.make_node("Transpose", [node.input[0]], {"perm": constants.NHWC_TO_NCHW})
if use_target_size:
final_target_size = build_dynamic_target_size(ctx, input_nchw.output[0], node.input[1])
roi = ctx.make_const(utils.make_name("roi"), np.array([]).astype(np.float32))
const_empty_float = ctx.make_const(utils.make_name("const_empty_float"), np.array([], dtype=np.float32))
resize_inputs = [
input_nchw.output[0],
roi.output[0],
const_empty_float.output[0],
final_target_size.output[0]
]
upsample = ctx.make_node("Resize", resize_inputs,
attr={"mode": mode, "nearest_mode": "floor",
"coordinate_transformation_mode": "asymmetric"})
else:
# first create "scales" info for onnx upsample
# if shape of input and output known then "scale" is calculated statically and set as a const node
shape = ctx.get_shape(node.input[0])
if shape and shape[2] != -1 and shape[1] != -1 and node.inputs[1].is_const():
target_shape = node.inputs[1].get_tensor_value()
n, h, w, c = shape
nh, nw = target_shape
# scales is nchw
# the reason not storing data at raw field is because of the bug:
# https://github.com/onnx/onnx/issues/1852
scale_val = np.array([1.0, 1.0, float(nh) / h, float(nw) / w]).astype(np.float32)
scales = ctx.make_const(utils.make_name("scales"), scale_val, raw=False)
else:
ori_shape = ctx.make_node("Shape", [node.input[0]])
attr = {"axes": [0], "starts": [1], "ends": [3]}
inputs_map = {"data": ori_shape.output[0], **attr}
ori_shape_hw = GraphBuilder(ctx).make_slice(inputs_map)
ori_shape_hw_float = ctx.make_node("Cast", [ori_shape_hw], attr={"to": onnx_pb.TensorProto.FLOAT})
target_hw = node.inputs[1]
target_hw_float = ctx.make_node("Cast", target_hw.output, attr={"to": onnx_pb.TensorProto.FLOAT})
scales_hw = ctx.make_node("Div", [target_hw_float.output[0], ori_shape_hw_float.output[0]])
const_one_array = ctx.make_const(utils.make_name("one"), np.array([1.0, 1.0]).astype(np.float32))
# scales is nchw
scales = ctx.make_node("Concat", [const_one_array.output[0], scales_hw.output[0]], {"axis": 0})
upsample = ctx.make_node(op_type, [input_nchw.output[0], scales.output[0]], attr={"mode": mode})
shapes = node.output_shapes
dtypes = node.output_dtypes
ctx.remove_node(node.name)
ctx.make_node("Transpose", upsample.output, {"perm": constants.NCHW_TO_NHWC},
name=node.name, outputs=node.output, shapes=shapes, dtypes=dtypes)
@tf_op("AdjustContrastv2")
class AdjustContrastv2:
@classmethod
def version_1(cls, ctx, node, **kwargs):
images, contrast_factor = node.input
dtype = ctx.get_dtype(images)
if ctx.get_dtype(contrast_factor) != dtype:
contrast_factor = ctx.make_node("Cast", [dtype], attr={'to': dtype}).output[0]
rank = ctx.get_rank(images)
utils.make_sure(rank is not None, "AdjustContrastv2 requires input of known rank")
# Reduce height and width only
axes_to_reduce = list(range(rank))[-3:-1]
mean = GraphBuilder(ctx).make_reduce_mean({"data": images, "axes": axes_to_reduce, "keepdims": True},
op_name_scope=node.name)
diff = ctx.make_node("Sub", [images, mean], op_name_scope=node.name).output[0]
scaled = ctx.make_node("Mul", [diff, contrast_factor], op_name_scope=node.name).output[0]
result = ctx.make_node("Add", [scaled, mean], op_name_scope=node.name).output[0]
ctx.replace_all_inputs(node.output[0], result)
ctx.remove_node(node.name)
@tf_op("AdjustSaturation")
class AdjustSaturation:
@classmethod
def version_11(cls, ctx, node, **kwargs):
images, factor = node.input
dtype = ctx.get_dtype(images)
np_dtype = utils.map_onnx_to_numpy_type(dtype)
k = ctx.make_const(utils.make_name("three"), np.array([3], np.int64)).output[0]
ordered, indices = ctx.make_node("TopK", [images, k], attr={'axis': -1}, output_count=2).output
# Sorted and separated into channels
max_c, mid_c, min_c = ctx.make_node("Split", [ordered], attr={'axis': -1, 'num_outputs': 3},
output_count=3).output
delta = ctx.make_node("Sub", [max_c, min_c]).output[0]
scaled_delta = ctx.make_node("Mul", [delta, factor], op_name_scope=node.name).output[0]
new_delta = ctx.make_node("Min", [scaled_delta, max_c]).output[0]
new_min = ctx.make_node("Sub", [max_c, new_delta]).output[0]
delta2 = ctx.make_node("Sub", [mid_c, min_c]).output[0]
const_zero = ctx.make_const(utils.make_name("zero"), np.array(0, np_dtype)).output[0]
delta_z = ctx.make_node("Equal", [delta, const_zero]).output[0]
delta_z_cast = ctx.make_node("Cast", [delta_z], attr={'to': dtype}).output[0]
delta_nz = ctx.make_node("Add", [delta, delta_z_cast]).output[0]
delta2_scale = ctx.make_node("Div", [new_delta, delta_nz]).output[0]
new_delta2 = ctx.make_node("Mul", [delta2, delta2_scale], op_name_scope=node.name).output[0]
new_mid = ctx.make_node("Add", [new_min, new_delta2]).output[0]
new_ordered = ctx.make_node("Concat", [max_c, new_mid, new_min], attr={'axis': -1}).output[0]
# Now put it back in order
result = ctx.make_node("GatherElements", [new_ordered, indices], attr={'axis': -1}).output[0]
ctx.replace_all_inputs(node.output[0], result)
ctx.remove_node(node.name)
@tf_op("AdjustHue")
class AdjustHue:
@classmethod
def version_11(cls, ctx, node, **kwargs):
images, angle_delta = node.input
dtype = ctx.get_dtype(images)
np_dtype = utils.map_onnx_to_numpy_type(dtype)
const_one = ctx.make_const(utils.make_name("const_one"), np.array(1.0, np_dtype)).output[0]
const_six = ctx.make_const(utils.make_name("const_six"), np.array(6.0, np_dtype)).output[0]
const_half = ctx.make_const(utils.make_name("const_half"), np.array(0.5, np_dtype)).output[0]
const_zero = ctx.make_const(utils.make_name("zero"), np.array(0, np_dtype)).output[0]
k = ctx.make_const(utils.make_name("three"), np.array([3], np.int64)).output[0]
ordered, indices = ctx.make_node("TopK", [images, k], attr={'axis': -1},
output_count=2, op_name_scope=node.name).output
# Sorted and separated into channels
max_c, mid_c, min_c = ctx.make_node("Split", [ordered], attr={'axis': -1, 'num_outputs': 3},
output_count=3).output
delta = ctx.make_node("Sub", [max_c, min_c]).output[0]
delta2 = ctx.make_node("Sub", [mid_c, min_c]).output[0]
delta_z = ctx.make_node("Equal", [delta, const_zero]).output[0]
delta_z_cast = ctx.make_node("Cast", [delta_z], attr={'to': dtype}).output[0]
delta_nz = ctx.make_node("Add", [delta, delta_z_cast]).output[0]
progress_within_sector = ctx.make_node("Div", [delta2, delta_nz]).output[0]
# Compute HSV angle
sector_lookup = np.zeros((3, 3, 3, 1), np_dtype)
parity_lookup = np.zeros((3, 3, 3, 1), np_dtype)
sector_lookup[0, 1, 2, 0] = 0.0
sector_lookup[1, 0, 2, 0] = 2 / 6
sector_lookup[1, 2, 0, 0] = 2 / 6
sector_lookup[2, 1, 0, 0] = 4 / 6
sector_lookup[2, 0, 1, 0] = 4 / 6
sector_lookup[0, 2, 1, 0] = 1.0
parity_lookup[0, 1, 2, 0] = 1 / 6
parity_lookup[1, 0, 2, 0] = -1 / 6
parity_lookup[1, 2, 0, 0] = 1 / 6
parity_lookup[2, 1, 0, 0] = -1 / 6
parity_lookup[2, 0, 1, 0] = 1 / 6
parity_lookup[0, 2, 1, 0] = -1 / 6
sector_lookup_const = ctx.make_const(utils.make_name("hue_sector_lookup"), sector_lookup).output[0]
parity_lookup_const = ctx.make_const(utils.make_name("hue_parity_lookup"), parity_lookup).output[0]
sectors = ctx.make_node("GatherND", [sector_lookup_const, indices]).output[0]
parities = ctx.make_node("GatherND", [parity_lookup_const, indices]).output[0]
angle_offset = ctx.make_node("Mul", [progress_within_sector, parities]).output[0]
initial_angle = ctx.make_node("Add", [angle_offset, sectors]).output[0]
# Add angle delta
angle_delta_pos = ctx.make_node("Add", [angle_delta, const_one]).output[0]
new_angle = ctx.make_node("Add", [initial_angle, angle_delta_pos]).output[0]
# Convert to RGB
sector_to_rgb = ctx.make_const(utils.make_name("sector_to_rgb_const"), np.array([0, 4/6, 2/6], np_dtype))
add_node = ctx.make_node("Add", [new_angle, sector_to_rgb.output[0]]).output[0]
mod_node = ctx.make_node("Mod", [add_node, const_one], attr={'fmod': 1}).output[0]
sub_node = ctx.make_node("Sub", [mod_node, const_half]).output[0]
abs_node = ctx.make_node("Abs", [sub_node]).output[0]
mul_node = ctx.make_node("Mul", [abs_node, const_six]).output[0]
sub_node_2 = ctx.make_node("Sub", [mul_node, const_one]).output[0]
clip_node = ctx.make_node("Clip", [sub_node_2, const_zero, const_one]).output[0]
scaled_node = ctx.make_node("Mul", [clip_node, delta], op_name_scope=node.name).output[0]
offset_node = ctx.make_node("Add", [scaled_node, min_c], op_name_scope=node.name).output[0]
ctx.replace_all_inputs(node.output[0], offset_node)
ctx.remove_node(node.name)
@tf_op("MatrixBandPart")
class MatrixBandPart:
@classmethod
def version_7(cls, ctx, node, **kwargs):
# T output = MatrixBandPart(T input, int num_lower, int num_upper)
# data-flow: first generate mask matrix and then use element-wise mul op
input_rank = len(ctx.get_shape(node.input[0]))
utils.make_sure(input_rank == 2, error_msg="MatrixBandPart op: only rank 2 is supported")
bandpart = [node.inputs[ind].get_tensor_value() for ind in [1, 2]]
utils.make_sure(bandpart in [[-1, 0], [0, -1]], "only support Lower/Upper triangular for opset < 11")
# methods to generate mask matrix: if lower triangular is needed, then generate column one by one
# otherwise row is generated one by one.
axis, counter_axis, squeeze_axis = (1, 0, 2) if bandpart == [-1, 0] else (0, 1, 1)
# 1: subgraph to implement tf.onelike(input[:, 0]),
# no need to worry about the dtype, because bool type is needed as Xor only support bool
node_name = utils.make_name("const_zero")
const_zero = ctx.make_const(name=node_name, np_val=np.array([0]).astype(np.int32))
first_col_or_row = ctx.make_node(op_type="Gather", inputs=[node.input[0], const_zero.output[0]],
attr={"axis": axis})
first_col_or_row_casted = ctx.make_node(op_type="Cast", inputs=first_col_or_row.output,
attr={"to": onnx_pb.TensorProto.BOOL})
# line means one col or one row
zero_line = ctx.make_node(op_type="Xor", inputs=first_col_or_row_casted.output * 2)
one_line = ctx.make_node(op_type="Not", inputs=zero_line.output)
# 2: "loop" to generate mask matrix: generate col or row of matrix one by one
g = ctx.create_new_graph_with_same_config()
node_name = utils.make_name("const_zero_bool")
const_zero_bool = g.make_const(name=node_name, np_val=np.array([[0]]).astype(bool))
g.set_dtype(const_zero_bool.output[0], onnx_pb.TensorProto.BOOL)
g.add_graph_input("trip", onnx_pb.TensorProto.INT64, [])
g.add_graph_input("cond", onnx_pb.TensorProto.BOOL, [])
g.add_graph_input("line", onnx_pb.TensorProto.BOOL, [-1, -1])
# shift right the line and add zero at the left.
new_line = g.make_node(op_type="Concat", inputs=[const_zero_bool.output[0], "line"],
attr={"axis": counter_axis},
dtypes=[onnx_pb.TensorProto.BOOL])
attr = {"axes": [counter_axis], "starts": [0], "ends": [-1]}
inputs_map = {"data": new_line.output[0], **attr}
slice_node = GraphBuilder(g).make_slice(inputs_map)
g.make_node("Identity", ["cond"], outputs=["cond_out"])
g.make_node("Identity", ["line"], outputs=["res"])
g.make_node("Identity", [slice_node], outputs=["line_out"])
g.add_graph_output("cond_out", onnx_pb.TensorProto.BOOL, [])
g.add_graph_output("line_out", onnx_pb.TensorProto.BOOL, [-1, -1])
g.add_graph_output("res", onnx_pb.TensorProto.BOOL, [-1, -1])
# initial value of body vars
shape = ctx.make_node(op_type="Shape", inputs=[node.input[0]]) # dtype of result is int64
node_name = utils.make_name("line_num_index")
col_or_row_num_index = ctx.make_const(name=node_name, np_val=np.array(axis).astype(np.int32))
line_num = ctx.make_node(op_type="Gather", inputs=[shape.output[0], col_or_row_num_index.output[0]])
trip_cnt = line_num.output[0]
node_name = utils.make_name("true")
cond = ctx.make_const(name=node_name, np_val=np.array(1).astype(bool))
col_init = one_line.output[0]
branches = {"body": g}
loop_node = ctx.make_node(op_type="Loop", inputs=[trip_cnt, cond.output[0], col_init],
output_count=2, branches=branches)
# convert generated mask matrix from bool to right shape and data type
squeeze = GraphBuilder(ctx).make_squeeze(
{'data': loop_node.output[1], 'axes': [squeeze_axis]}, return_node=True)
cast1 = ctx.make_node(op_type="Cast", inputs=squeeze.output, attr={"to": onnx_pb.TensorProto.FLOAT})
if axis == 1:
mask_matrix = ctx.make_node(op_type="Transpose", inputs=cast1.output)
else:
mask_matrix = squeeze
cast2 = ctx.make_node(op_type="Cast", inputs=mask_matrix.output,
attr={"to": ctx.get_dtype(node.input[0])})
shapes = node.output_shapes
dtypes = node.output_dtypes
ctx.remove_node(node.name)
ctx.make_node(op_type="Mul", inputs=[cast2.output[0], node.input[0]],
name=node.name, outputs=node.output, shapes=shapes,
dtypes=dtypes)
@classmethod
def version_11(cls, ctx, node, **kwargs):
num_lower_const = node.inputs[1].get_tensor_value() if node.inputs[1].is_const() else None
num_upper_const = node.inputs[2].get_tensor_value() if node.inputs[2].is_const() else None
data, num_lower, num_upper = node.input
rank = ctx.get_rank(data)
int_max_val = utils.get_max_value(np.int64)
dtype = ctx.get_dtype(data)
if rank == 2:
shape = ctx.make_node("Shape", [data]).output[0]
else:
whole_shape = ctx.make_node("Shape", [data]).output[0]
shape = GraphBuilder(ctx).make_slice(
{'data': whole_shape, 'starts': [-2], 'ends': [int_max_val], 'axes': [0]})
if num_lower_const == 0 and num_upper_const == 0:
if rank == 2:
identity_node = ctx.make_node("EyeLike", [data]).output[0]
else:
zero_tensor = helper.make_tensor("value", dtype, dims=[1], vals=[0])
const_of_shape = ctx.make_node("ConstantOfShape", [shape], attr={'value': zero_tensor}).output[0]
identity_node = ctx.make_node("EyeLike", [const_of_shape]).output[0]
shapes = node.output_shapes
dtypes = node.output_dtypes
ctx.remove_node(node.name)
ctx.make_node(op_type="Mul", inputs=[identity_node, data],
name=node.name, outputs=node.output, shapes=shapes,
dtypes=dtypes)
return
zero_const = ctx.make_const(utils.make_name("zero"), np.array(0, np.int64)).output[0]
one_const = ctx.make_const(utils.make_name("one"), np.array(1, np.int64)).output[0]
conditions = []
row_cnt = GraphBuilder(ctx).make_slice({'data': shape, 'axes': [0], 'starts': [0], 'ends': [1]})
col_cnt = GraphBuilder(ctx).make_slice({'data': shape, 'axes': [0], 'starts': [1], 'ends': [2]})
limit = ctx.make_node("Mul", [row_cnt, col_cnt]).output[0]
# idx_cnt = ctx.make_node("Range", [zero_const, limit, one_const]).output[0]
ones_of_shape = ctx.make_node("Expand", [one_const, limit]).output[0]
idx_cnt = ctx.make_node("CumSum", [ones_of_shape, zero_const], attr={'exclusive': True}).output[0]
idx_reshape = ctx.make_node("Reshape", [idx_cnt, shape]).output[0]
row_idx = ctx.make_node("Div", [idx_reshape, col_cnt]).output[0]
col_idx = ctx.make_node("Mod", [idx_reshape, col_cnt]).output[0]
idx_diff = ctx.make_node("Sub", [col_idx, row_idx]).output[0]
if num_upper_const is None or num_upper_const >= 0:
if ctx.get_dtype(num_upper) != TensorProto.INT64:
num_upper = ctx.make_node("Cast", [num_upper], attr={'to': TensorProto.INT64}).output[0]
greater = ctx.make_node("Greater", [idx_diff, num_upper]).output[0]
less_or_equal = ctx.make_node("Not", [greater]).output[0]
conditions.append(less_or_equal)
if num_lower_const is None or num_lower_const >= 0:
if ctx.get_dtype(num_lower) != TensorProto.INT64:
num_lower = ctx.make_node("Cast", [num_lower], attr={'to': TensorProto.INT64}).output[0]
num_lower_neg = ctx.make_node("Neg", [num_lower]).output[0]
greater = ctx.make_node("Greater", [num_lower_neg, idx_diff]).output[0]
less_or_equal = ctx.make_node("Not", [greater]).output[0]
conditions.append(less_or_equal)
if len(conditions) == 0:
node.type = "Identity"
ctx.replace_inputs(node, [data])
return
if len(conditions) == 1:
cond = conditions[0]
if len(conditions) == 2:
cond = ctx.make_node("And", conditions).output[0]
mask = ctx.make_node("Cast", [cond], attr={'to': ctx.get_dtype(data)}).output[0]
shapes = node.output_shapes
dtypes = node.output_dtypes
ctx.remove_node(node.name)
ctx.make_node(op_type="Mul", inputs=[mask, data],
name=node.name, outputs=node.output, shapes=shapes,
dtypes=dtypes)
def _make_softmax_cross_entropy_with_logits(ctx, label, logit, tf_ori_node):
label_dtype = ctx.get_dtype(label.output[0])
logit_dtype = ctx.get_dtype(logit.output[0])
utils.make_sure(label_dtype == logit_dtype, "the following logic only works on same dtype of label and logit")
log_softmax = ctx.make_node(op_type="LogSoftmax", inputs=logit.output)
# implement tf.multiply(-1, tf.reduce_sum(tf.multiply(label, log_softmax), axis=1))
mul1 = ctx.make_node(op_type="Mul", inputs=[label.output[0], log_softmax.output[0]])
reduce_sum_output = GraphBuilder(ctx).make_reduce_sum(
{"data": mul1.output[0], "axes": [-1], "keepdims": 1, "noop_with_empty_axes": 1})
const_negative_one = ctx.make_const(name=utils.make_name("const_negative_one"),
np_val=np.array(-1).astype(utils.ONNX_TO_NUMPY_DTYPE[logit_dtype]))
mul2 = ctx.make_node(op_type="Mul", inputs=[const_negative_one.output[0], reduce_sum_output])
shapes = tf_ori_node.output_shapes
dtypes = tf_ori_node.output_dtypes
ctx.remove_node(tf_ori_node.name)
GraphBuilder(ctx).make_squeeze({'axes': [1], 'data': mul2.output[0], 'outputs': [tf_ori_node.output[0]]},
shapes=[shapes[0]], dtypes=[dtypes[0]])
def sparse_softmax_cross_entropy_with_logits_op_by_gathernd(ctx, node, **kwargs):
# make subgraph to implement one_hot, idea comes from onehot_op
indices_name = node.input[1]
indices_shape = ctx.get_shape(indices_name)
if len(indices_shape) != 1:
# TODO: this works for rank=1 but tensorflow supports more than this.
# Same principle should work but we need to implement our own eye.
raise ValueError("onehot op: only rank1 is supported")
logit_name = node.input[0]
logit_dtype = ctx.get_dtype(logit_name)
logit_shape = ctx.get_shape(logit_name)
utils.make_sure(logit_dtype, "Dtype of {} is None".format(logit_name))
indices_dtype = ctx.get_dtype(indices_name)
if indices_dtype != TensorProto.INT64:
indices_cast = ctx.make_node("Cast", [indices_name], attr={"to": TensorProto.INT64})
indices_name = indices_cast.output[0]
indices_size = ctx.make_node("Size", [indices_name])
gb = GraphBuilder(ctx)
indices_unsqueeze = gb.make_unsqueeze({'data': indices_name, "axes": [1]}, return_node=True)
zero_const = ctx.make_const(utils.make_name("zero"), np.array(0, dtype=np.int64))
one_const = ctx.make_const(utils.make_name("one"), np.array(1, dtype=np.int64))
id_name = utils.make_name("sparse_softmax_id")
id_output = utils.port_name(id_name)
controlflow.make_range(ctx, zero_const.output[0], indices_size.output[0], one_const.output[0],
id_output, id_name, shape=[-1], dtype=TensorProto.INT64)
id_unsqueeze = gb.make_unsqueeze({'data': id_output, "axes": [1]}, return_node=True)
indices_with_id = ctx.make_node("Concat",
[id_unsqueeze.output[0], indices_unsqueeze.output[0]],
attr={"axis": 1})
log_softmax = ctx.make_node(op_type="LogSoftmax",
inputs=[logit_name], dtypes=[logit_dtype], shapes=[logit_shape])
gathernd_name = utils.make_name("sparse_softmax_gathernd")
gathernd_output = utils.port_name(gathernd_name)
tensor.make_gathernd(ctx, log_softmax.output[0], indices_with_id.output[0], gathernd_output,
gathernd_name, logit_dtype, [logit_shape], [logit_dtype])
const_name = utils.make_name("const_negative_one")
const_negative_one = ctx.make_const(const_name, np.array(-1).astype(utils.map_onnx_to_numpy_type(logit_dtype)))
mul2 = ctx.make_node(op_type="Mul", inputs=[const_negative_one.output[0], gathernd_output])
shapes = node.output_shapes
dtypes = node.output_dtypes
ctx.remove_node(node.name)
gb = GraphBuilder(ctx)
gb.make_squeeze({'data': mul2.output[0], 'outputs': [node.output[0]], "axes": [1]},
shapes=[shapes[0]], dtypes=[dtypes[0]])
@tf_op("SoftmaxCrossEntropyWithLogits")
class SoftmaxCrossEntropyWithLogits:
@classmethod
def version_7(cls, ctx, node, **kwargs):
logits = node.inputs[0]
logit_dtype = ctx.get_dtype(logits.output[0])
labels = node.inputs[1]
label_dtype = ctx.get_dtype(labels.output[0])
if label_dtype != logit_dtype:
labels = ctx.make_node("Cast", labels.output, attr={"to": logit_dtype}, dtypes=[logit_dtype])
_make_softmax_cross_entropy_with_logits(ctx, labels, logits, node)
def _make_sparse_softmax_cross_entropy_with_logits(ctx, label, logit, tf_ori_node):
logit = logit.output[0]
label = label.output[0]
label_dtype = ctx.get_dtype(label)
logit_dtype = ctx.get_dtype(logit)
utils.make_sure(label_dtype == logit_dtype, "the following logic only works on same dtype of label and logit")
# when label is onehot, logic "tf.multiply(-1, tf.reduce_sum(tf.multiply(label, log_softmax), axis=1))" is equal to
# "-log(q_i)" where i is the selected index specified by label, q_i = logic_i/sum, the detail process is as follows:
# logit_exp=exp(logit) >> sum = tf.reduce_sum(logit_exp, axis = -1), masked_sum = reduce_sum(mul(logit_exp, mul))
# >> -log(masked_sum/sum)
logit_max = GraphBuilder(ctx).make_reduce_max({"data": logit, "axes": [-1], "keepdims": 1})
logit_norm = ctx.make_node(op_type="Sub", inputs=[logit, logit_max]).output[0]
logit_exp = ctx.make_node(op_type="Exp", inputs=[logit_norm]).output[0]
logit_exp_sum = GraphBuilder(ctx).make_reduce_sum(
{"data": logit_exp, "axes": [-1], "keepdims": 0, "noop_with_empty_axes": 1})
masked = ctx.make_node(op_type="Mul", inputs=[label, logit_exp]).output[0]
masked_sum = GraphBuilder(ctx).make_reduce_sum(
{"data": masked, "axes": [-1], "keepdims": 0, "noop_with_empty_axes": 1})
probability = ctx.make_node(op_type="Div", inputs=[masked_sum, logit_exp_sum]).output[0]
log_prob = ctx.make_node(op_type="Log", inputs=[probability]).output[0]
const_negative_one = ctx.make_const(name=utils.make_name("const_negative_one"),
np_val=np.array(-1).astype(utils.ONNX_TO_NUMPY_DTYPE[logit_dtype])).output[0]
shapes = tf_ori_node.output_shapes
dtypes = tf_ori_node.output_dtypes
ctx.remove_node(tf_ori_node.name)
ctx.make_node(op_type="Mul", inputs=[log_prob, const_negative_one],
outputs=[tf_ori_node.output[0]], shapes=[shapes[0]], dtypes=[dtypes[0]])
@tf_op("SparseSoftmaxCrossEntropyWithLogits")
class SparseSoftmaxCrossEntropyWithLogits:
@classmethod
def version_7(cls, ctx, node, **kwargs):
# make subgraph to implement one_hot, idea comes from onehot_op
indices_name = node.input[1]
indices_shape = ctx.get_shape(indices_name)
if len(indices_shape) != 1:
# TODO: this works for rank=1 but tensorflow supports more than this.
# Same principle should work but we need to implement our own eye.
raise ValueError("onehot op: only rank1 is supported")
logit_name = node.input[0]
depth = ctx.get_shape(logit_name)[-1]
# if number of classes is unknown or too large
if depth == utils.ONNX_UNKNOWN_DIMENSION or depth > 20000:
sparse_softmax_cross_entropy_with_logits_op_by_gathernd(ctx, node, **kwargs)
return
logit_dtype = ctx.get_dtype(logit_name)
utils.make_sure(logit_dtype, "Dtype of {} is None".format(logit_name))
dtype = utils.map_onnx_to_numpy_type(logit_dtype)
eye = np.eye(depth).astype(dtype)
const_name = utils.make_name("const_eye")
const_eye = ctx.make_const(name=const_name, np_val=eye)
onehot = ctx.make_node(op_type="Gather", inputs=[const_eye.output[0], indices_name], attr={"axis": 0})
log_softmax = ctx.make_node(op_type="LogSoftmax", inputs=[logit_name])
# implement tf.multiply(np.float32(-1.0), tf.reduce_sum(tf.multiply(one_hot, log_softmax), axis=1))
mul1 = ctx.make_node(op_type="Mul", inputs=[onehot.output[0], log_softmax.output[0]])
reduce_sum_output = GraphBuilder(ctx).make_reduce_sum(
{"data": mul1.output[0], "axes": [1], "keepdims": 1, "noop_with_empty_axes": 1})
const_name = utils.make_name("const_negative_one")
const_negative_one = ctx.make_const(name=const_name, np_val=np.array(-1).astype(dtype))
mul2 = ctx.make_node(op_type="Mul", inputs=[const_negative_one.output[0], reduce_sum_output])
shapes = node.output_shapes
dtypes = node.output_dtypes
ctx.remove_node(node.name)
ctx.make_node(op_type="Squeeze", inputs=[mul2.output[0]], outputs=[node.output[0]], attr={"axes": [1]},
shapes=[shapes[0]], dtypes=[dtypes[0]])
@classmethod
def version_9(cls, ctx, node, **kwargs):
# float32/64 output = SparseSoftmaxCrossEntropyWithLogits(float32/64 features, int32/64 labels)
# the detail math process of this op is: a = onehot(labels), b = logsoftmax(features), reduce_sum(mul(a, b))
logit_node = node.inputs[0]
logit_shape = ctx.get_shape(node.input[0])
logit_dtype = ctx.get_dtype(node.input[0])
label_name = node.input[1]
if logit_shape is not None and logit_shape[-1] != -1:
num_class = logit_shape[-1]
node_nme = utils.make_name("onehot_depth")
depth_node = ctx.make_const(node_nme, np.array([num_class]).astype(np.int64)).output[0]
else:
logit_shape = ctx.make_node("Shape", [node.input[0]]).output[0]
slice_args = {"data": logit_shape,
"starts": [-1], "ends": [int(utils.get_max_value(np.int32))]}
num_class = GraphBuilder(ctx).make_slice(kwargs=slice_args)
depth_node = num_class
values_node = ctx.make_const(utils.make_name("onehot_values"), np.array([0, 1]).astype(np.int64)).output[0]
label_dtype = ctx.get_dtype(label_name)
if label_dtype != TensorProto.INT64:
onehot_indice = ctx.make_node("Cast", [label_name], attr={"to": TensorProto.INT64}).output[0]
else:
onehot_indice = label_name
if ctx.opset < 11:
label_node = ctx.make_node(op_type="OneHot",
inputs=[onehot_indice, depth_node, values_node])
else:
# OneHot is very slow but this workaround requires opset 11
index_unsq = GraphBuilder(ctx).make_unsqueeze({'data': onehot_indice, 'axes': [-1]})
depth_sq = GraphBuilder(ctx).make_squeeze({'data': depth_node, 'axes': [0]})
zero_const = ctx.make_const(utils.make_name("const_zero"), np.array(0, np.int64)).output[0]
one_const = ctx.make_const(utils.make_name("const_one"), np.array(1, np.int64)).output[0]
dp_range = ctx.make_node("Range", [zero_const, depth_sq, one_const]).output[0]
label_node = ctx.make_node("Equal", [index_unsq, dp_range])
# the above logic makes output dtype of label_node now always int64
# make sure label has same dtype as logit
if logit_dtype != ctx.get_dtype(label_node.output[0]):
label_node = ctx.make_node("Cast", label_node.output, attr={"to": logit_dtype}, dtypes=[logit_dtype])
_make_sparse_softmax_cross_entropy_with_logits(ctx, label_node, logit_node, node)
@tf_op("CTCGreedyDecoder")
class CTCGreedyDecoder:
@classmethod
def version_11(cls, ctx, node, **kwargs):
# shape = [max_time, batch_size, num_classes]
inp = node.input[0]
# shape = [batch_size]
seq_lens = node.input[1]
seq_lens_int64 = ctx.make_node("Cast", [seq_lens], attr={"to": TensorProto.INT64}).output[0]
# shape = [1, batch_size, 1]
seq_lens_unsq = GraphBuilder(ctx).make_unsqueeze({"data": seq_lens_int64, "axes": [0, 2]})
merge_repeated = node.get_attr_value("merge_repeated", False)
inp_shape = ctx.make_node("Shape", [inp]).output[0]
max_time_unsq, num_batch_unsq, num_classes_unsq = ctx.make_node("Split", [inp_shape], attr={'num_outputs': 3},
output_count=3).output
max_time = GraphBuilder(ctx).make_squeeze({"data": max_time_unsq, "axes": [0]})
num_batch = GraphBuilder(ctx).make_squeeze({"data": num_batch_unsq, "axes": [0]})
num_classes = GraphBuilder(ctx).make_squeeze({"data": num_classes_unsq, "axes": [0]})
const_one = ctx.make_const(utils.make_name("const_one"), np.array(1, np.int64)).output[0]
const_one_unsq = ctx.make_const(utils.make_name("const_one"), np.array([1], np.int64)).output[0]
const_zero = ctx.make_const(utils.make_name("const_zero"), np.array(0, np.int64)).output[0]
blank_label = ctx.make_node("Sub", [num_classes, const_one]).output[0]
time = ctx.make_node("Range", [const_zero, max_time, const_one]).output[0]
batch = ctx.make_node("Range", [const_zero, num_batch, const_one]).output[0]
# shape = [max_time, 1, 1]
time_unsq = GraphBuilder(ctx).make_unsqueeze({"data": time, "axes": [1, 2]})
valid_elts = ctx.make_node("Less", [time_unsq, seq_lens_unsq]).output[0]
# shape = [max_time, batch_size, 1]
valid_mask = ctx.make_node("Cast", [valid_elts], attr={"to": TensorProto.FLOAT}).output[0]
# shape = [max_time, batch_size, num_classes]
valid_inp = ctx.make_node("Mul", [inp, valid_mask]).output[0]
# shape = [max_time, batch_size, 1]
max_val, max_idx = ctx.make_node("TopK", [valid_inp, const_one_unsq], attr={"axis": 2},
output_count=2, op_name_scope=node.name).output
# shape = [batch_size, 1]
sum_max = GraphBuilder(ctx).make_reduce_sum({"data": max_val, "axes": [0], "keepdims": False})
sum_max_neg = ctx.make_node("Neg", [sum_max]).output[0]
valid_elts_sq = GraphBuilder(ctx).make_squeeze({"data": valid_elts, "axes": [2]})
max_idx_sq = GraphBuilder(ctx).make_squeeze({"data": max_idx, "axes": [2]})
# shape = [batch_size, max_time]
max_idx_trans = ctx.make_node("Transpose", [max_idx_sq], attr={"perm": [1, 0]}).output[0]
valid_elts_trans = ctx.make_node("Transpose", [valid_elts_sq], attr={"perm": [1, 0]}).output[0]
# value = [batch_size, max_time]
idx_shape = ctx.make_node("Shape", [max_idx_trans]).output[0]
keep_idx = ctx.make_node("Less", [max_idx_trans, blank_label]).output[0]
keep_idx = ctx.make_node("And", [keep_idx, valid_elts_trans]).output[0]
if merge_repeated:
# val = [batch_size, 1]
shift_row_shape = ctx.make_node("Concat", [num_batch_unsq, const_one_unsq], attr={'axis': 0}).output[0]
neg_one_tensor = helper.make_tensor("value", onnx_pb.TensorProto.INT64, dims=[1], vals=[-1])
# shape = [batch_size, 1]
neg_ones = ctx.make_node("ConstantOfShape", [shift_row_shape], {'value': neg_one_tensor}).output[0]
max_idx_cut = GraphBuilder(ctx).make_slice(
{"data": max_idx_trans, "starts": [0], "ends": [-1], "axes": [1]})
# shape = [batch_size, max_time]
max_idx_shift = ctx.make_node("Concat", [neg_ones, max_idx_cut], attr={"axis": 1}).output[0]
repeat_elts = ctx.make_node("Equal", [max_idx_shift, max_idx_trans]).output[0]
not_repeat = ctx.make_node("Not", [repeat_elts]).output[0]
keep_idx = ctx.make_node("And", [keep_idx, not_repeat]).output[0]
batch_unsq = GraphBuilder(ctx).make_unsqueeze({"data": batch, "axes": [1]})
batch_expand = ctx.make_node("Expand", [batch_unsq, idx_shape]).output[0]
keep_idx_int = ctx.make_node("Cast", [keep_idx], attr={"to": TensorProto.INT64}).output[0]
filtered_time = ctx.make_node("CumSum", [keep_idx_int, const_one], attr={"exclusive": True}).output[0]
flat_shape = ctx.make_const(utils.make_name("const_neg_one"), np.array([-1], np.int64)).output[0]
flat_shape2 = ctx.make_const(utils.make_name("const_shape"), np.array([-1, 1], np.int64)).output[0]
idx_flat = ctx.make_node("Reshape", [max_idx_trans, flat_shape]).output[0]
keep_idx_flat = ctx.make_node("Reshape", [keep_idx, flat_shape]).output[0]
time_flat = ctx.make_node("Reshape", [filtered_time, flat_shape2]).output[0]
batch_flat = ctx.make_node("Reshape", [batch_expand, flat_shape2]).output[0]
sparse_idx = ctx.make_node("Concat", [batch_flat, time_flat], attr={'axis': 1}).output[0]
idx_compress = ctx.make_node("Compress", [idx_flat, keep_idx_flat], attr={'axis': 0}, shapes=[[-1]],
op_name_scope=node.name).output[0]
sparse_idx_compress = ctx.make_node("Compress", [sparse_idx, keep_idx_flat], attr={'axis': 0},
shapes=[[-1, 2]], op_name_scope=node.name).output[0]
max_sparse_idx = GraphBuilder(ctx).make_reduce_max({"data": sparse_idx_compress, "axes": [0],
"keepdims": False})
max_time = GraphBuilder(ctx).make_slice(
{"data": max_sparse_idx, "starts": [1], "ends": [2], "axes": [0]})
max_time_inc = ctx.make_node("Add", [max_time, const_one]).output[0]
sparse_shape = ctx.make_node("Concat", [num_batch_unsq, max_time_inc], attr={'axis': 0}).output[0]
ctx.replace_all_inputs(node.output[0], sparse_idx_compress)
ctx.replace_all_inputs(node.output[1], idx_compress)
ctx.replace_all_inputs(node.output[2], sparse_shape)
ctx.replace_all_inputs(node.output[3], sum_max_neg)
ctx.remove_node(node.name)
| 112,073 | 52.598278 | 120 | py |
tensorflow-onnx | tensorflow-onnx-main/tf2onnx/onnx_opset/common.py | # SPDX-License-Identifier: Apache-2.0
"""
common
"""
import logging
from tf2onnx import constants
logger = logging.getLogger(__name__)
# pylint: disable=unused-argument,missing-docstring
class BroadcastOp:
@classmethod
def version_1(cls, ctx, node, **kwargs):
"""Elementwise Ops with broadcast flag."""
if node.type == "AddV2":
node.type = "Add"
shape0 = ctx.get_shape(node.input[0])
shape1 = ctx.get_shape(node.input[1])
if shape0 != shape1:
node.set_attr("broadcast", 1)
# this works around shortcomings in the broadcasting code
# of caffe2 and winml/rs4.
if ctx.is_target(constants.TARGET_RS4):
# in rs4 mul and add do not support scalar correctly
if not shape0:
if node.inputs[0].is_const():
shape0 = node.inputs[0].scalar_to_dim1()
if not shape1:
if node.inputs[1].is_const():
shape1 = node.inputs[1].scalar_to_dim1()
if shape0 and shape1 and len(shape0) < len(shape1) and node.type in ["Mul", "Add"]:
tmp = node.input[0]
ctx.replace_input(node, node.input[0], node.input[1], 0)
ctx.replace_input(node, node.input[1], tmp, 1)
else:
node.set_attr("broadcast", 0)
@classmethod
def version_6(cls, ctx, node, **kwargs):
"""Elementwise Ops with broadcast flag."""
if node.type == "AddV2":
node.type = "Add"
shape0 = ctx.get_shape(node.input[0])
shape1 = ctx.get_shape(node.input[1])
if shape0 != shape1:
# this works around shortcomings in the broadcasting code
# of caffe2 and winml/rs4.
if ctx.is_target(constants.TARGET_RS4):
# in rs4 mul and add do not support scalar correctly
if not shape0:
if node.inputs[0].is_const():
shape0 = node.inputs[0].scalar_to_dim1()
if not shape1:
if node.inputs[1].is_const():
shape1 = node.inputs[1].scalar_to_dim1()
if shape0 and shape1 and len(shape0) < len(shape1) and node.type in ["Mul", "Add"]:
tmp = node.input[0]
ctx.replace_input(node, node.input[0], node.input[1], 0)
ctx.replace_input(node, node.input[1], tmp, 1)
| 2,487 | 36.69697 | 95 | py |
tensorflow-onnx | tensorflow-onnx-main/tf2onnx/rewriter/gru_tf2_rewriter.py | # SPDX-License-Identifier: Apache-2.0
"""
tf2onnx.rewriter.gru_tf2_rewriter - Rewrites GRU pattern used by tf2.
"""
from tf2onnx.graph_matcher import GraphMatcher
from tf2onnx.rewriter.rnn_utils import make_grucell_pattern, keras_gru_pattern
from tf2onnx.tf_loader import find_function
from tf2onnx.rewriter.unit_rnn_rewriter_base import UnitRnnContext
from tf2onnx.rewriter.gru_rewriter import GRUUnitRewriter
from tf2onnx.graph_builder import GraphBuilder
# pylint: disable=invalid-name,unused-argument,missing-docstring, unused-variable
def rewrite_gru_tf2(g, ops):
pattern1 = make_grucell_pattern("Identity")
pattern2 = keras_gru_pattern
for pattern in [pattern1, pattern2]:
matcher = GraphMatcher(pattern, allow_reorder=True)
match_results = list(matcher.match_ops(ops))
for match_result in match_results:
activation_op = match_result.get_op("optional_activation")
activations = ["Sigmoid", activation_op.type]
if activation_op.type not in ["Relu", "Tanh", "Sigmoid"]:
continue
if pattern is pattern1:
concat = match_result.get_op("cell_inputs")
if len(concat.inputs) != 3:
continue
get_item = concat.inputs[0]
init_state = concat.inputs[1]
else:
get_item = match_result.get_op("gru_input")
init_state = match_result.get_op("state")
if not get_item.type == "TensorListGetItem":
continue
x_e = get_item.inputs[0]
if not x_e.is_graph_input():
continue
x_idx = g.input_names.index(x_e.output[0])
if not init_state.is_graph_input():
continue
init_state_idx = g.input_names.index(init_state.output[0])
cell_output = match_result.get_op("cell_output")
final_consumers = g.find_output_consumers(cell_output.output[0])
select_ops = [n for n in final_consumers if n.type == "Select"]
def has_tensor_list_consumer(n):
return any(c.type == "TensorListSetItem" for c in g.find_output_consumers(n.output[0]))
select_ops = [n for n in select_ops if has_tensor_list_consumer(n)]
if len(select_ops) == 1:
greater_eq = select_ops[0].inputs[0]
if greater_eq.type != "GreaterEqual":
continue
seq_len = greater_eq.inputs[1]
if not seq_len.is_graph_input():
continue
seq_len_idx = g.input_names.index(seq_len.output[0])
final_consumers = g.find_output_consumers(select_ops[0].output[0])
else:
seq_len_idx = None
tensor_set_items = [n for n in final_consumers if n.type == "TensorListSetItem"]
if len(tensor_set_items) != 1:
continue
if not tensor_set_items[0].inputs[0].is_graph_input():
continue
out_idx = g.input_names.index(tensor_set_items[0].input[0])
hk = match_result.get_op("hidden_kernel")
while hk.type == "Identity":
hk = hk.inputs[0]
if not hk.is_graph_input():
continue
hk_idx = g.input_names.index(hk.output[0])
hb = match_result.get_op("hidden_bias")
if not hb.is_graph_input():
continue
hb_idx = g.input_names.index(hb.output[0])
gk = match_result.get_op("gate_kernel")
while gk.type == "Identity":
gk = gk.inputs[0]
if not gk.is_graph_input():
continue
gk_idx = g.input_names.index(gk.output[0])
gb = match_result.get_op("gate_bias")
if not gb.is_graph_input():
continue
gb_idx = g.input_names.index(gb.output[0])
bias_add = match_result.get_op("bias_add")
if bias_add is not None and bias_add.data_format != "NHWC":
continue
g.gru_rewriter_context = {
"x_idx": x_idx,
"out_idx": out_idx,
"initial_state_idx": init_state_idx,
"hidden_kernel_idx": hk_idx,
"hidden_bias_idx": hb_idx,
"gate_kernel_idx": gk_idx,
"gate_bias_idx": gb_idx,
"seq_len_idx": seq_len_idx,
"activations": activations,
"from_keras": pattern is pattern2,
"linear_before_reset": 1 if pattern is pattern2 else 0,
}
for op in ops:
if op.is_while():
body_graph = find_function(op.get_attr_str("body"))
if body_graph.gru_rewriter_context is None:
continue
body_context = body_graph.gru_rewriter_context
hk = op.input[body_context["hidden_kernel_idx"]]
hb = op.input[body_context["hidden_bias_idx"]]
gk = op.input[body_context["gate_kernel_idx"]]
gb = op.input[body_context["gate_bias_idx"]]
if not all(g.is_const(w) for w in [hk, hb, gk, gb]):
continue
hk_const = g.get_tensor_value(hk, as_list=False)
hb_const = g.get_tensor_value(hb, as_list=False)
gk_const = g.get_tensor_value(gk, as_list=False)
gb_const = g.get_tensor_value(gb, as_list=False)
initial_state_sq = op.input[body_context["initial_state_idx"]]
initial_state = GraphBuilder(g).make_unsqueeze({"data": initial_state_sq, "axes": [0]})
context = UnitRnnContext()
context.from_keras = body_context["from_keras"]
context.weights.update({
"hidden_kernel": hk_const,
"hidden_bias": hb_const,
"gate_kernel": gk_const,
"gate_bias": gb_const
})
context.attributes["activations"] = body_context["activations"]
context.attributes["linear_before_reset"] = body_context["linear_before_reset"]
tensor_array_inp = op.inputs[body_context["x_idx"]]
if not tensor_array_inp.type == "TensorListFromTensor":
continue
final_consumers = g.find_output_consumers(op.output[body_context["out_idx"]])
output_ys = [n.output[0] for n in final_consumers if n.type == "TensorListStack"]
context.onnx_input_ids["X"] = tensor_array_inp.input[0]
if body_context["seq_len_idx"] is None:
context.onnx_input_ids["sequence_lens"] = ""
else:
context.onnx_input_ids["sequence_lens"] = op.input[body_context["seq_len_idx"]]
context.onnx_input_ids["initial_state"] = initial_state
gru_rewriter = GRUUnitRewriter(g)
gru_rewriter.process_weights_and_bias(context)
gru_node = gru_rewriter.create_rnn_node(context)
squeeze_output = GraphBuilder(g).make_squeeze({"data": gru_node.output[0], "axes": [1]})
for output in output_ys:
g.replace_all_inputs(output, squeeze_output)
f_state_squeeze = GraphBuilder(g).make_squeeze({"data": gru_node.output[1], "axes": [0]})
g.replace_all_inputs(op.output[body_context["initial_state_idx"]], f_state_squeeze)
return g.get_nodes()
| 7,461 | 42.132948 | 103 | py |
tensorflow-onnx | tensorflow-onnx-main/tf2onnx/rewriter/gru_rewriter.py | # SPDX-License-Identifier: Apache-2.0
"""
tf2onnx.rewriter.gru_rewriter
"""
import logging
import numpy as np
from tf2onnx import utils
from tf2onnx.graph_builder import GraphBuilder
from tf2onnx.rewriter.rnn_utils import RNNUnitType, get_weights_from_const_node
from tf2onnx.rewriter.unit_rnn_rewriter_base import UnitRnnRewriterBase
# pylint: disable=invalid-name,unused-argument,missing-docstring
logger = logging.getLogger(__name__)
class GRUUnitRewriter(UnitRnnRewriterBase):
def __init__(self, g):
super(GRUUnitRewriter, self).__init__(g)
self.gru_cell_type = None
self.state_variable_handlers = [
{"state": (self._state_variable_finder, self._connect_gru_state_to_graph)}
]
def run(self):
logger.debug("enter gru rewriter")
return super(GRUUnitRewriter, self).run()
def find_cell(self, context):
gru_cell_types = [RNNUnitType.GRUCell, RNNUnitType.GRUBlockCell, RNNUnitType.CudnnCompatibleGRUCell]
for cell_type in gru_cell_types:
cell_match = self._match_cell(context, cell_type)
if cell_match:
self.gru_cell_type = cell_type
logger.debug("parsing unit is %s", cell_type)
return cell_match
logger.debug("cannot parse unit")
return None
def get_weight_and_bias(self, context):
match = context.cell_match
gate_kernel = get_weights_from_const_node(self.g, match.get_op("gate_kernel"))
gate_bias = get_weights_from_const_node(self.g, match.get_op("gate_bias"))
res = {
"gate_kernel": gate_kernel,
"gate_bias": gate_bias
}
# differ on memory gate:
# GRUCell: h'_t = tanh(concat(x_t, r_t .* h_t-1) * W + b)
# CudnnCompatibleGRUCell: h'_t = tanh(x_t * W_x + b_x + r_t .* (h_t-1 * W_h + b_h))
if self.gru_cell_type == RNNUnitType.CudnnCompatibleGRUCell:
hidden_state_kernel = get_weights_from_const_node(
self.g, match.get_op("hidden_state_kernel")
)
hidden_state_bias = get_weights_from_const_node(
self.g, match.get_op("hidden_state_bias")
)
hidden_input_kernel = get_weights_from_const_node(
self.g, match.get_op("hidden_input_kernel")
)
hidden_input_bias = get_weights_from_const_node(
self.g, match.get_op("hidden_input_bias")
)
if not all(val is not None for val in [
hidden_state_kernel, hidden_state_bias,
hidden_input_kernel, hidden_input_bias
]):
logger.debug("rnn weights check failed, skip")
return None
hidden_kernel = np.concatenate([hidden_input_kernel, hidden_state_kernel])
# apply the linear transformation before multiplying by the output of reset gate
context.attributes["linear_before_reset"] = 1
res["hidden_kernel"] = hidden_kernel
res["hidden_bias"] = hidden_input_bias
# recurrence bias for hidden gate
res["Rb_h"] = hidden_state_bias
elif self.gru_cell_type in [RNNUnitType.GRUCell, RNNUnitType.GRUBlockCell]:
hidden_kernel = get_weights_from_const_node(self.g, match.get_op("hidden_kernel"))
hidden_bias = get_weights_from_const_node(self.g, match.get_op("hidden_bias"))
res["hidden_kernel"] = hidden_kernel
res["hidden_bias"] = hidden_bias
if not all(val is not None for val in res.values()):
logger.debug("rnn weights check failed, skip")
return None
logger.debug("find needed weights")
return res
def _state_variable_finder(self, context):
if self.gru_cell_type in [
RNNUnitType.GRUCell,
RNNUnitType.CudnnCompatibleGRUCell
]:
gru_cell = context.cell_match
return self._find_state_variable_with_select(
context,
gru_cell.get_op("cell_output").output[0],
[gru_cell.get_op("cell_inputs")]
)
if self.gru_cell_type == RNNUnitType.GRUBlockCell:
gru_block_cell = context.cell_match.get_op("gru_block_cell")
return self._find_state_variable_with_select(
context,
gru_block_cell.output[3],
[gru_block_cell]
)
return None
def parse_attributes(self, context):
# in tf, only activation of hidden gate is optional, input and update gate always use sigmoid
match = context.cell_match
activations = ["Sigmoid", "Tanh"]
if self.gru_cell_type == RNNUnitType.GRUCell:
activation_op = match.get_op("optional_activation")
activations = ["Sigmoid", activation_op.type]
context.attributes["activations"] = activations
return True
def is_valid(self, context):
# except for ct, ht or ct_ht, there are at most 2 state variables
other_state_variables_num = len(context.loop_properties.state_variables) - \
len(context.state_variables)
if other_state_variables_num > 2:
logger.debug("found %d other state variables", other_state_variables_num)
return False
# output should be no more than 1
outputs = context.loop_properties.scan_outputs_exits
if len(outputs) > 1:
logger.debug("found %d outputs for gru: %s", len(outputs), outputs)
return False
return True
def _make_constants(self, context, W_zrh, R_zrh, B_zrh):
input_size = W_zrh.shape[-1]
hidden_size = R_zrh.shape[-1]
w_name = utils.make_name("W")
w_node = self.g.make_const(w_name, W_zrh, skip_conversion=True)
r_name = utils.make_name("R")
r_node = self.g.make_const(r_name, R_zrh, skip_conversion=True)
b_name = utils.make_name("B")
b_node = self.g.make_const(b_name, B_zrh, skip_conversion=True)
context.input_size = input_size
context.hidden_size = hidden_size
context.onnx_input_ids["W"] = w_node.output[0]
context.onnx_input_ids["R"] = r_node.output[0]
context.onnx_input_ids["B"] = b_node.output[0]
def _process_weights_and_bias_keras(self, context):
weights = context.weights
W_zrh = np.expand_dims(weights["gate_kernel"].transpose(), axis=0)
R_zrh = np.expand_dims(weights["hidden_kernel"].transpose(), axis=0)
Wb_zrh = weights["gate_bias"]
Rb_zrh = weights["hidden_bias"]
B_zrh = np.expand_dims(np.concatenate((Wb_zrh, Rb_zrh), axis=0), axis=0)
self._make_constants(context, W_zrh, R_zrh, B_zrh)
def process_weights_and_bias(self, context):
"""
why split the data in this way should refer to code of tensorflow GRU cell and official document of ONNX GRU
"""
if context.from_keras:
self._process_weights_and_bias_keras(context)
return
weights = context.weights
# from code of tensorflow GRU cell, it can be known that shape of hidden_kernel(or candidate_kernel)
# is (input_size+hidden_unit, hidden_unit)
hidden_size = weights["hidden_kernel"].shape[1]
input_size = weights["hidden_kernel"].shape[0] - hidden_size
weight_dtype = weights["hidden_kernel"].dtype
bias_dtype = weights["hidden_bias"].dtype
# below code will use same notation as ONNX document
# z means update gate, r means reset gate, h means hidden gate;
# at this time weights of gate include input and state, will split it next
r_kernel, z_kernel = np.split(weights["gate_kernel"], [hidden_size], axis=1)
h_kernel = weights["hidden_kernel"]
r_bias, z_bias = np.split(weights["gate_bias"], [hidden_size], axis=0)
h_bias = weights["hidden_bias"]
for k in sorted(weights.keys()):
print(k, weights[k].shape)
# ONNX GRU split weights of input and state, so have to split *_kernel
input_r_kernel, state_r_kernel = np.split(r_kernel, [input_size], axis=0)
input_z_kernel, state_z_kernel = np.split(z_kernel, [input_size], axis=0)
input_h_kernel, state_h_kernel = np.split(h_kernel, [input_size], axis=0)
W_zrh = np.concatenate((input_z_kernel, input_r_kernel, input_h_kernel), axis=1)
R_zrh = np.concatenate((state_z_kernel, state_r_kernel, state_h_kernel), axis=1)
# transpose weight matrix
W_zrh = np.transpose(np.expand_dims(W_zrh, axis=0), axes=(0, 2, 1))
R_zrh = np.transpose(np.expand_dims(R_zrh, axis=0), axes=(0, 2, 1))
W_zrh = W_zrh.astype(weight_dtype)
R_zrh = R_zrh.astype(weight_dtype)
assert W_zrh.shape == (1, 3*hidden_size, input_size)
assert R_zrh.shape == (1, 3*hidden_size, hidden_size)
Wb_zrh = np.concatenate((z_bias, r_bias, h_bias), axis=0)
# if tf doesn't provide bias for state, use 0
zero = np.zeros_like(z_bias)
# Rb_h is set in CudnnCompatibleGRUCell
Rb_h = weights["Rb_h"] if "Rb_h" in weights else zero
Rb_zrh = np.concatenate((zero, zero, Rb_h), axis=0)
B_zrh = np.concatenate((Wb_zrh, Rb_zrh), axis=0)
B_zrh = np.expand_dims(B_zrh, axis=0)
B_zrh = B_zrh.astype(bias_dtype)
assert B_zrh.shape == (1, 6*hidden_size)
# create const ONNX node
self._make_constants(context, W_zrh, R_zrh, B_zrh)
def process_var_init_nodes(self, context):
assert "state" in context.state_variables.keys()
initializer_input_id = context.state_variables["state"].enter_input_id
node = self.g.get_node_by_output(initializer_input_id)
if node.is_const():
val = node.get_tensor_value(as_list=False)
initial_name = utils.make_name("Const")
new_val = np.expand_dims(val, axis=0)
const_node = self.g.make_const(initial_name, new_val)
context.onnx_input_ids["initial_state"] = const_node.output[0]
return
squeeze_node = GraphBuilder(self.g).make_unsqueeze(
{'data': initializer_input_id, 'axes': [0]}, return_node=True)
to_replace = [n for n in self.g.get_nodes() if n != squeeze_node]
self.g.replace_all_inputs(initializer_input_id, squeeze_node.output[0], ops=to_replace)
context.onnx_input_ids["initial_state"] = squeeze_node.output[0]
def create_rnn_node(self, context):
# specify if the RNN is forward, reverse, or bidirectional.
# Must be one of forward (default), reverse, or bidirectional.
# Here we won't mark bidirectional/reverse, we will have another rewriter running after this one,
# which will based on patterns to combine a forward GRU and a backward GRU into a bidirectional one.
num_direction = 1
# todo: input_forget
context.attributes["direction"] = "forward"
context.attributes["hidden_size"] = context.hidden_size
inputs = context.onnx_input_ids
# sequence length is optional
seq_len_input = utils.ONNX_EMPTY_INPUT
if inputs["sequence_lens"]:
seq_len_input = inputs["sequence_lens"]
gru_inputs = [
inputs["X"], inputs["W"], inputs["R"], inputs["B"],
seq_len_input, inputs["initial_state"]]
x_shape = self.g.get_shape(gru_inputs[0])
x_seq_length = x_shape[0]
x_batch_size = x_shape[1]
out_dtype = self.g.get_dtype(gru_inputs[0])
gru_node = self.g.make_node("GRU", gru_inputs, attr=context.attributes, output_count=2,
shapes=[[x_seq_length, num_direction, x_batch_size, context.hidden_size],
[num_direction, x_batch_size, context.hidden_size]],
dtypes=[out_dtype, out_dtype], op_name_scope=context.rnn_scope)
return gru_node
def _connect_gru_state_to_graph(self, context):
# in tf, state output shape is: [batch, hidden]
# in onnx, output shape is: [number_directions, batch, hidden]
exit_output_id = context.state_variables["state"].exit_output.id
if not exit_output_id:
logger.debug("no one consume state variable")
return
output_id = context.rnn_node.output[1]
gru_state_shape = self.g.get_shape(output_id)
output_shape = [gru_state_shape[1], gru_state_shape[2]]
squeeze_node = GraphBuilder(self.g).make_squeeze(
{'data': output_id, "axes": [0]}, shapes=[output_shape],
dtypes=[self.g.get_dtype(output_id)], return_node=True)
self.g.replace_all_inputs(exit_output_id, squeeze_node.output[0]) # ops=self.g.get_nodes()
| 12,892 | 45.713768 | 116 | py |
tensorflow-onnx | tensorflow-onnx-main/tf2onnx/rewriter/unit_rnn_rewriter_base.py | # SPDX-License-Identifier: Apache-2.0
"""
tf2onnx.rewriter.unit_rnn_rewriter_base
"""
import logging
from tf2onnx.rewriter.loop_rewriter_base import LoopRewriterBase, Context
from tf2onnx.rewriter.rnn_utils import REWRITER_RESULT, get_pattern, \
get_rnn_scope_name, parse_rnn_loop, seq_len_pattern0, seq_len_pattern1
from tf2onnx.utils import is_tf_select_op, is_tf_tensor_array_write_op
from tf2onnx.graph_matcher import GraphMatcher
from tf2onnx.graph_builder import GraphBuilder
logger = logging.getLogger(__name__)
# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test,broad-except,protected-access
class UnitRnnContext(Context):
def __init__(self):
super(UnitRnnContext, self).__init__()
self.rnn_scope = None
self.cell_match = None # matched cell
self.weights = {}
self.seq_len_node = None
self.state_variables = {}
self.input_size = None
self.hidden_size = None
self.from_keras = False
self.attributes = {} # onnx attributes
# onnx inputs: [X, W, R, B, sequence_lens, initial_h, initial_c, P],
# sequence_lens is optional, i.e., None
self.onnx_input_ids = {}
class UnitRnnRewriterBase(LoopRewriterBase):
"""
main procedures:
1 extract info of while_loop based on loop_rewriter_base
2 check whether extracted loop is a unit rnn, fall back in necessity:
1 parse rnn scope name
2 check if it's a dynamic_rnn
3 find needed info from tensorflow graph
3 process found info according to ONNX requirement
"""
def __init__(self, g):
super(UnitRnnRewriterBase, self).__init__(g)
# {var_name: (finder, connector)}
self.state_variable_handler = None
self.state_variable_handlers = None
def create_context(self):
return UnitRnnContext()
def run(self):
return self.run_internal()
def need_rewrite(self, context):
context.rnn_scope = get_rnn_scope_name(context.while_context_scope)
if not parse_rnn_loop(self.g, context.loop_properties, context.rnn_scope,
context.while_context_scope):
logger.debug("parse_rnn_loop failed, SKIP")
return False
if not self.parse_unit_rnn(context):
logger.debug("failed to parse unit rnn, SKIP")
return False
if not self.is_valid(context):
logger.debug("parsed rnn is not valid, SKIP")
return False
return True
def is_valid(self, context):
return True
def parse_unit_rnn(self, context):
"""
parse needed info from tensorflow graph:
1 weight
2 state variables used in rnn unit, such as c_t, h_t
3 sequence node
4 input_x
5 attributes, e.g., activation_alpha, activation_beta... optional
"""
logger.debug("parse unit rnn")
logger.debug("match unit cell against loop body graph")
cell_match = self.find_cell(context)
if not cell_match:
logger.debug('failed to match cell pattern')
return False
context.cell_match = cell_match
logger.debug("get_weight_and_bias starts")
weights = self.get_weight_and_bias(context)
if not weights:
logger.debug("rnn weights check failed, SKIP")
return False
context.weights = weights
if not self.get_state_variables(context):
logger.debug("no cell variable initializers found, SKIP")
return False
seq_len_node = self.find_sequence_length_node(context)
if seq_len_node:
logger.debug("find sequence node: %s", seq_len_node.name)
context.onnx_input_ids["sequence_lens"] = seq_len_node.output[0]
else:
context.onnx_input_ids["sequence_lens"] = None
# require exact one input
inputs = context.loop_properties.scan_inputs_initial_values
if len(inputs) != 1:
logger.debug("found %d inputs for the unit rnn: %s",
len(inputs), inputs)
return False
context.onnx_input_ids["X"] = inputs[0]
if not self.parse_attributes(context):
logger.debug("wrong attributes found")
return False
return True
def find_cell(self, context):
raise NotImplementedError()
def _match_cell(self, context, unittype):
"""match unit cell"""
for cell_pattern in get_pattern(unittype):
matcher = GraphMatcher(cell_pattern, allow_reorder=True)
loop_props = context.loop_properties
inputs = loop_props.state_inputs + loop_props.scan_inputs
input_ids = [input_tensor_value_info.id for input_tensor_value_info in inputs]
outputs = loop_props.state_outputs + loop_props.scan_outputs
output_ids = [out_tensor_value_info.id for out_tensor_value_info in outputs]
body_graph_ops, _, _ = LoopRewriterBase.find_subgraph(
set(input_ids),
set(output_ids),
self.g, merge_as_end=True
)
match_results = list(matcher.match_ops(body_graph_ops))
if len(match_results) == 1:
return match_results[0]
return None
def get_weight_and_bias(self, context):
raise NotImplementedError()
def parse_attributes(self, context):
return True
def rewrite(self, context):
logger.debug("enter unit rnn rewrite function")
logger.debug("process the weights/bias/ft_bias, to fit onnx weights/bias requirements")
self.process_weights_and_bias(context)
self.process_var_init_nodes(context)
logger.debug("start to build new rnn node")
rnn_node = self.create_rnn_node(context)
context.rnn_node = rnn_node
logger.debug("start to handle outputs")
# format of ONNX output is different with tf
self.process_outputs(context)
logger.debug("rewrite successfully")
return REWRITER_RESULT.OK
def get_state_variables(self, context):
"""
Get state variables by provided handlers. There maybe several handlers corresponding to
different patterns of state variables.
The commone method is to find state variables from loop property according to its
next_iteration_input and switch_true_identity_output, see lstm_rewriter_v2
"""
for handler in self.state_variable_handlers:
can_handle = True
for var_name, funcs in handler.items():
finder = funcs[0]
state_variable = finder(context)
if state_variable:
logger.debug("found state variable %s", var_name)
context.state_variables[var_name] = state_variable
else:
logger.debug("failed to get state variable %s", var_name)
can_handle = False
break
if can_handle:
self.state_variable_handler = handler
return True
return False
def find_sequence_length_node(self, context):
# get any state variable
state_variable = list(context.state_variables.values())[0]
next_iter_input_node = self.g.get_node_by_output(state_variable.next_iteration_input.id)
if not is_tf_select_op(next_iter_input_node):
logger.debug("no sequence length node is given")
return None
matcher = GraphMatcher(seq_len_pattern0)
match_result = matcher.match_op(next_iter_input_node)
if not match_result:
matcher = GraphMatcher(seq_len_pattern1)
match_result = matcher.match_op(next_iter_input_node)
if not match_result:
raise RuntimeError("failed to find sequence length.")
return match_result.get_op("seq_len_node")
def process_weights_and_bias(self, context):
raise NotImplementedError()
def process_var_init_nodes(self, context):
raise NotImplementedError()
def create_rnn_node(self, context):
raise NotImplementedError()
def process_outputs(self, context):
for var_name, funcs in self.state_variable_handler.items():
output_connector = funcs[1]
output_connector(context)
logger.debug("connect output of %s to graph", var_name)
self.connect_unit_rnn_output_to_graph(context)
def connect_unit_rnn_output_to_graph(self, context):
outputs = context.loop_properties.scan_outputs_exits
if not outputs:
logger.debug("no one consume output")
return
gather_output_id = outputs[0].id
logger.debug("found output for rnn: %s", gather_output_id)
# in tf batch major mode, output shape is : [batch, time, hidden]
# in time major mode, output shape is: [time, batch, hidden]
# in onnx, output shape is : [time, num_directions, batch, hidden]
rnn_node = context.rnn_node
output_id = rnn_node.output[0]
rnn_output_shape = self.g.get_shape(output_id)
squeeze_output_shape = [rnn_output_shape[0], rnn_output_shape[2], rnn_output_shape[3]]
gb = GraphBuilder(self.g)
squeeze_node = gb.make_squeeze({'data': output_id, "axes": [1]},
shapes=[squeeze_output_shape],
dtypes=[self.g.get_dtype(output_id)],
return_node=True)
self.g.replace_all_inputs(gather_output_id, squeeze_node.output[0]) # ops=self.g.get_nodes()
def _find_state_variable_with_select(self, context,
next_iteration_input,
switch_true_identity_consumers):
"""
Find state variables from switch_true_identity_consumers to next_iteration_input.
Select maybe added after next_iteration_input.
"""
# find all select not followed by TensorArrayWrite
select = []
for c in self.g.find_output_consumers(next_iteration_input):
if not is_tf_select_op(c):
continue
out_ta_writer = [
o for o in self.g.find_output_consumers(c.output[0]) if is_tf_tensor_array_write_op(o)
]
if out_ta_writer:
continue
select.append(c)
if len(select) == 1:
next_iteration_input = select[0].output[0]
switch_true_identity_consumers.append(select[0])
logger.debug(
"try to find state variable from [%s, %s]",
next_iteration_input,
switch_true_identity_consumers
)
def checker(state_variable):
if state_variable.next_iteration_input.id != next_iteration_input:
return False
for consumer in switch_true_identity_consumers:
if state_variable.switch_true_identity_output.id not in consumer.input:
return False
return True
state_variables = context.loop_properties.get_variables(checker)
if len(state_variables) != 1:
logger.debug("found %d state variables", len(state_variables))
return None
return state_variables[0]
| 11,475 | 36.503268 | 114 | py |
tensorflow-onnx | tensorflow-onnx-main/tf2onnx/rewriter/rnn_utils.py | # SPDX-License-Identifier: Apache-2.0
"""
tf2onnx.rewriter.rnn_utils - rnn support
"""
from collections import defaultdict
from enum import Enum
import logging
import numpy as np
from tf2onnx import utils
from tf2onnx.graph_builder import GraphBuilder
from tf2onnx.graph_matcher import OpTypePattern # pylint: disable=unused-import
# pylint: disable=invalid-name,unused-argument,missing-docstring
logger = logging.getLogger(__name__)
class REWRITER_RESULT(Enum):
SKIP = 1
OK = 2
FAIL = 3
# TensorFlow LSTMCell/BasicLSTMCell and Keras LSTM computation graph matching
def insert_activation(activation, name="", inputs=None):
inputs = inputs if inputs else [] # to avoid empty list as default arg
if activation == "hard_sigmoid":
return OpTypePattern("Maximum", inputs=[
OpTypePattern("Minimum", inputs=[
OpTypePattern("Add|AddV2", inputs=[
OpTypePattern("Mul", inputs=[
*inputs,
OpTypePattern("*") # mul(x, 0.2)
]), OpTypePattern("*") # add(x, 0.5)
]), OpTypePattern("*") # minimum(x, 1)
]), OpTypePattern("*") # maximum(x, 0)
])
# Additional activation pattern can be added when needed:
# https://www.tensorflow.org/api_docs/python/tf/keras/activations
# otherwise, use default activations
return OpTypePattern("Tanh|Relu|Sigmoid", name=name, inputs=inputs)
def make_lstm_xc_pattern(enter_or_id="Enter", from_keras=False, use_bias=False):
if from_keras:
lstm_xh_pattern = OpTypePattern("Add|AddV2", allow_reorder=False, inputs=[
# xt*(W^T)
OpTypePattern("MatMul", name='x', inputs=[
OpTypePattern("TensorListGetItem", name="xt"),
OpTypePattern("*", name="W"),
], allow_reorder=False),
# (ht-1)*(R^T)
OpTypePattern("MatMul", name='h', inputs=[
OpTypePattern("*", name="ht-1"),
OpTypePattern("*", name="R"),
], allow_reorder=False),
])
return lstm_xh_pattern if not use_bias else \
OpTypePattern("BiasAdd", name="bias_add", inputs=[
lstm_xh_pattern,
OpTypePattern("*", name="cell_bias")
])
return OpTypePattern("BiasAdd", name="bias_add", inputs=[
OpTypePattern("MatMul", inputs=[
OpTypePattern("ConcatV2|Concat", name="xh"),
OpTypePattern(enter_or_id, inputs=[
OpTypePattern("*", name="cell_kernel"),
]),
]),
OpTypePattern(enter_or_id, inputs=[
OpTypePattern("*", name="cell_bias"),
]),
])
def make_lstm_pattern(enter_or_id="Enter", from_keras=False, use_bias=False,
activation="", recurrent_activation=""):
# split (Xt*(W[ifco]^T) + Ht-1*(R[ifco]^T)) on 'Const' axis
lstm_xc_pattern = OpTypePattern('Split', inputs=[
OpTypePattern("Const"),
make_lstm_xc_pattern(enter_or_id, from_keras, use_bias)
])
# TF forget gate bias
lstm_fb_pattern = lstm_xc_pattern if from_keras else \
OpTypePattern("Add|AddV2", inputs=[
lstm_xc_pattern,
OpTypePattern("*", name="ft_bias"),
])
# cell state
lstm_ct_pattern = OpTypePattern("Add|AddV2", name="ct", inputs=[
OpTypePattern("Mul", name="ct_identity_consumer", inputs=[
insert_activation(recurrent_activation, name="ft", inputs=[lstm_fb_pattern]),
OpTypePattern("*", name="c"),
]),
OpTypePattern("Mul", inputs=[
insert_activation(recurrent_activation, name="it", inputs=[lstm_xc_pattern]),
insert_activation(activation, name="gt", inputs=[lstm_xc_pattern]),
]),
])
return OpTypePattern("Mul", name="ht", inputs=[
insert_activation(recurrent_activation, name="ot", inputs=[lstm_xc_pattern]),
insert_activation(activation, name="ct'", inputs=[lstm_ct_pattern]),
])
lstmcell_pattern = make_lstm_pattern()
xc_pattern_optimized = \
OpTypePattern('Split', inputs=[
OpTypePattern("Const"),
OpTypePattern("Identity", inputs=[
OpTypePattern("MatMul", inputs=[
OpTypePattern("ConcatV2|Concat", name="xh"),
OpTypePattern("Const", name="cell_kernel"),
]),
]),
])
lstmcell_pattern_optimized = \
OpTypePattern('Mul', name='ht', inputs=[
OpTypePattern("Sigmoid", name="ot", inputs=[xc_pattern_optimized]),
OpTypePattern('Tanh', inputs=[
OpTypePattern("Add|AddV2", name="ct", inputs=[
OpTypePattern("Mul", name="ct_identity_consumer", inputs=[
OpTypePattern("Sigmoid", name="ft", inputs=[
OpTypePattern("Add|AddV2", inputs=[
xc_pattern_optimized,
OpTypePattern("*", name="ft_bias"),
]),
]),
OpTypePattern("*"),
]),
OpTypePattern("Mul", inputs=[
OpTypePattern("Sigmoid", name="it", inputs=[xc_pattern_optimized]),
OpTypePattern("Tanh", name="gt", inputs=[xc_pattern_optimized]),
]),
]),
]),
])
# input sequence: top to down, left to right
# split into update gate and reset gate
def make_gru_split_pattern(enter_or_id="Enter"):
return OpTypePattern("Split", inputs=[
OpTypePattern("Const"), # split dim, a constant
OpTypePattern("Sigmoid", inputs=[
OpTypePattern("BiasAdd", name="bias_add", inputs=[
OpTypePattern(enter_or_id, inputs=[
OpTypePattern("*", name="gate_bias")
]),
OpTypePattern("MatMul", name="update_reset_gate", inputs=[
OpTypePattern(enter_or_id, inputs=[
OpTypePattern("*", name="gate_kernel")
]),
OpTypePattern("ConcatV2|Concat", name="cell_inputs")
])
])
])
])
gru_split_pattern = make_gru_split_pattern()
def make_grucell_pattern(enter_or_id="Enter"):
return OpTypePattern("Add|AddV2", name="cell_output", inputs=[
OpTypePattern("Mul", inputs=[
make_gru_split_pattern(enter_or_id),
OpTypePattern("Identity|Placeholder")
]),
OpTypePattern("Mul", inputs=[
OpTypePattern("Sub", inputs=[
OpTypePattern("Const"), # 1-u
make_gru_split_pattern(enter_or_id)
], allow_reorder=False),
OpTypePattern("*", name="optional_activation", inputs=[
OpTypePattern("BiasAdd", inputs=[
OpTypePattern(enter_or_id, inputs=[
OpTypePattern("*", name="hidden_bias")
]),
OpTypePattern("MatMul", inputs=[
OpTypePattern(enter_or_id, inputs=[
OpTypePattern("*", name="hidden_kernel")
]),
OpTypePattern("ConcatV2|Concat")
])
])
])
])
])
grucell_pattern = make_grucell_pattern()
def make_keras_gru_split_pattern(bias_name, kernel_name, input_name, input_op_type):
return OpTypePattern("Split", inputs=[
OpTypePattern("Const"),
OpTypePattern("BiasAdd", inputs=[
OpTypePattern("MatMul", inputs=[
OpTypePattern(input_op_type, name=input_name),
OpTypePattern("Placeholder|PlaceholderV2|Identity", name=kernel_name),
], allow_reorder=False),
OpTypePattern("Placeholder|PlaceholderV2", name=bias_name)
])
])
keras_gru_split0_pattern = make_keras_gru_split_pattern("gate_bias", "gate_kernel", "gru_input", "TensorListGetItem")
keras_gru_split1_pattern = \
make_keras_gru_split_pattern("hidden_bias", "hidden_kernel", "state", "Placeholder|PlaceholderV2")
keras_gru_sigmoid_pattern = \
OpTypePattern("Sigmoid", inputs=[
OpTypePattern("Add|AddV2", inputs=[
keras_gru_split0_pattern,
keras_gru_split1_pattern
])
])
keras_gru_pattern = \
OpTypePattern("Add|AddV2", name="cell_output", inputs=[
OpTypePattern("Mul", inputs=[
keras_gru_sigmoid_pattern,
OpTypePattern("Placeholder|PlaceholderV2")
]),
OpTypePattern("Mul", inputs=[
OpTypePattern("Sub", inputs=[
OpTypePattern("Const"),
keras_gru_sigmoid_pattern
], allow_reorder=False),
OpTypePattern("*", name="optional_activation", inputs=[
OpTypePattern("Add|AddV2", inputs=[
keras_gru_split0_pattern,
OpTypePattern("Mul", inputs=[
keras_gru_sigmoid_pattern,
keras_gru_split1_pattern
])
])
])
])
])
cudnn_compatible_grucell_pattern = \
OpTypePattern("Add", name="cell_output", inputs=[
OpTypePattern("Mul", inputs=[
OpTypePattern("Sub", inputs=[
OpTypePattern("Const"), # 1-u
gru_split_pattern
], allow_reorder=False),
OpTypePattern("*", name="optional_activation", inputs=[
OpTypePattern("Add", inputs=[
OpTypePattern("Mul", inputs=[
gru_split_pattern,
OpTypePattern("BiasAdd", inputs=[
OpTypePattern("Enter", inputs=[
OpTypePattern("*", name="hidden_state_bias")
]),
OpTypePattern("MatMul", inputs=[
OpTypePattern("Enter", inputs=[
OpTypePattern("*", name="hidden_state_kernel"),
]),
OpTypePattern("Identity")
])
])
]),
OpTypePattern("BiasAdd", inputs=[
OpTypePattern("Enter", inputs=[
OpTypePattern("*", name="hidden_input_bias")
]),
OpTypePattern("MatMul", inputs=[
OpTypePattern("Enter", inputs=[
OpTypePattern("*", name="hidden_input_kernel"),
]),
OpTypePattern("*")
])
])
])
])
]),
OpTypePattern("Mul", inputs=[
gru_split_pattern,
OpTypePattern("Identity")
])
])
grublockcell_pattern0 = OpTypePattern("GRUBlockCell", name="gru_block_cell", inputs=[
OpTypePattern("*"),
OpTypePattern("*"),
OpTypePattern("Enter", inputs=[
OpTypePattern("*", name="gate_kernel")
]),
OpTypePattern("Enter", inputs=[
OpTypePattern("*", name="hidden_kernel")
]),
OpTypePattern("Enter", inputs=[
OpTypePattern("*", name="gate_bias")
]),
OpTypePattern("Enter", inputs=[
OpTypePattern("*", name="hidden_bias")
])
])
grublockcell_pattern1 = OpTypePattern("GRUBlockCell", name="gru_block_cell", inputs=[
OpTypePattern("*"),
OpTypePattern("*"),
OpTypePattern("Const", name="gate_kernel"),
OpTypePattern("Const", name="hidden_kernel"),
OpTypePattern("Const", name="gate_bias"),
OpTypePattern("Const", name="hidden_bias")
])
lstmblockcell_pattern = \
OpTypePattern("LSTMBlockCell", name="lstm_block_cell", inputs=[
OpTypePattern("*"),
OpTypePattern("*"),
OpTypePattern("*"),
OpTypePattern("Enter", inputs=[
OpTypePattern("*", name="cell_kernel")
]),
OpTypePattern("*", name="Pi"),
OpTypePattern("*", name="Pf"),
OpTypePattern("*", name="Po"),
OpTypePattern("Enter", inputs=[
OpTypePattern("*", name="cell_bias")
])
])
seq_len_pattern0 = OpTypePattern("Select|SelectV2", inputs=[
OpTypePattern("GreaterEqual", inputs=[
OpTypePattern("*"),
OpTypePattern("Enter", inputs=[
OpTypePattern("*", name="seq_len_node")
])
]),
OpTypePattern("*"),
OpTypePattern("*")
])
seq_len_pattern1 = OpTypePattern("Select|SelectV2", inputs=[
OpTypePattern("GreaterEqual", inputs=[
OpTypePattern("*"),
OpTypePattern("Const", name="seq_len_node")
]),
OpTypePattern("*"),
OpTypePattern("*")
])
class RNNUnitType(Enum):
LSTMCell = 0 # TF LSTMCell and BasicLSTMCell share the same pattern
LSTMBlockCell = 1
GRUCell = 2
GRUBlockCell = 3
CudnnCompatibleGRUCell = 4
rnn_cell_patterns = {
RNNUnitType.LSTMCell: [lstmcell_pattern, lstmcell_pattern_optimized],
RNNUnitType.LSTMBlockCell: [lstmblockcell_pattern],
RNNUnitType.GRUCell: [grucell_pattern],
RNNUnitType.GRUBlockCell: [grublockcell_pattern0, grublockcell_pattern1],
RNNUnitType.CudnnCompatibleGRUCell: [cudnn_compatible_grucell_pattern]
}
def get_pattern(cell_type_name):
return rnn_cell_patterns[cell_type_name]
def get_rnn_scope_name(while_scope_name):
parts = while_scope_name.split('/')
rnn_scope = '/'.join(parts[0:-2]) + "/"
return rnn_scope
def parse_rnn_loop(graph, loop_properties, rnn_scope, while_context_scope):
"""check if the while loop is generated by dynamic_rnn or bidirectional_rnn
Args:
loop_properties: LoopProperties
rnn_scope: rnn scope name
while_context_scope: while loop scope name
check a while loop is generated by dynamic_rnn or bidirectional_rnn by
1. some patterns in _time_step in dynamic_rnn: tensor array read, tensor array write
2. some patterns in control_flow_ops.while_loop in dynamic_rnn:
cond: time < loop_bound
loop_vars: (time, output_ta, state)
time has name called "time"
iteration_cnt is added by control flow.
be noted:
1. iteration counter does not exist in tf1.4 or earlier versions
2. if dynamic_rnn's first input is not consumed, output ta does not exist.
"""
from tf2onnx.rewriter.loop_rewriter_base import TensorArrayVariableType # pylint: disable=import-outside-toplevel
time_name = rnn_scope + "time"
ta_array_name_prefix = rnn_scope + "dynamic_rnn/output_"
iteration_counter_name = while_context_scope + "iteration_counter"
found_time = False
is_rnn_out_ta = None
time_var = None
iteration_var = None
for val in loop_properties.all_variables.values():
enter_input_node = graph.get_node_by_output(val.enter_input_id)
if val.tensor_array_type == TensorArrayVariableType.GATHER_ALL:
ta_name = enter_input_node.get_attr("tensor_array_name").s.decode("utf-8")
if not ta_name.startswith(ta_array_name_prefix):
is_rnn_out_ta = False
elif enter_input_node.name == time_name:
found_time = True
time_var = val
elif enter_input_node.name == iteration_counter_name:
iteration_var = val
if not found_time or is_rnn_out_ta is False:
logger.debug("this should not be a dynamic_rnn loop, found_time: %s, is_rnn_out_ta: %s",
found_time, is_rnn_out_ta)
return None
if not loop_properties.tensor_array_inputs:
logger.debug("this should not be a dynamic_rnn loop, no ta input is found")
return None
return time_var, iteration_var
def get_weights_from_const_node(g, node):
temp = node
val = None
# this would help ignore Identity in non-const_folded graph.
while temp.type == 'Identity':
temp = temp.inputs[0]
if temp and temp.type == 'Const':
val = temp.get_tensor_value(as_list=False)
dtype = utils.map_onnx_to_numpy_type(g.get_dtype(temp.output[0]))
val = val.astype(dtype)
logger.debug("found weights %s", temp.name)
else:
logger.debug("weight node seems not to be Const, skip, node name is %s", temp.name)
return None
return val
######################################################
#### Utilities for bidirectional rnn #######
######################################################
class ONNX_RNN_TYPE(Enum):
GRU = 0
LSTM = 1
onnx_rnn_type_mapping = {
ONNX_RNN_TYPE.GRU: "GRU",
ONNX_RNN_TYPE.LSTM: "LSTM"
}
onnx_rnn_attr_mapping = {
ONNX_RNN_TYPE.LSTM: [
"clip",
"hidden_size",
"input_forget"
],
ONNX_RNN_TYPE.GRU: {
"clip",
"hidden_size",
"linear_before_reset"
}
}
onnx_rnn_seq_len_index_mapping = {
ONNX_RNN_TYPE.LSTM: 4,
ONNX_RNN_TYPE.GRU: 4
}
def find_bidirectional_rnns(g, ops, rnn_type):
"""
Find possible bidirectional rnns, return: list of tuple,
Format of tuple is (fw onnx rnn node, bw onnx rnn node).
"""
fw_rnns = defaultdict(list)
bw_rnns = defaultdict(list)
for n in g.get_nodes():
if n.type != onnx_rnn_type_mapping[rnn_type]:
continue
input_id = n.input[0]
temp = n.inputs[0]
is_bw = False
is_transposed = False
if temp.type == "Transpose":
input_id = temp.input[0]
temp = temp.inputs[0]
is_transposed = True
if utils.is_tf_reverse_op(temp):
input_id = temp.input[0]
temp = temp.inputs[0]
is_bw = True
if (not is_transposed) and temp.type == "Transpose":
input_id = temp.input[0]
temp = temp.inputs[0]
input_ids = [input_id]
if temp.type == "Identity":
input_ids.append(temp.input[0])
temp = temp.inputs[0]
if temp.type == "Identity":
input_ids.append(temp.input[0])
if is_bw:
# if output 0 is consumed and there is no reverse after the 1st output.
# it's not backward rnn.
if g.find_output_consumers(n.output[0]) and not get_reverse_or_slice_nodes_after_y_output(g, n):
logger.warning("rnn %s following Reverse op isn't the part of bi-rnn.", n.name)
continue
logger.debug("find bw rnn %s", input_ids)
for input_id in input_ids:
bw_rnns[input_id].append(n)
else:
logger.debug("find fw rnn %s", input_ids)
for input_id in input_ids:
fw_rnns[input_id].append(n)
# fw_rnn and bw_rnn must share the same input
birnn_input = list(set(fw_rnns.keys()).intersection(bw_rnns.keys()))
bi_rnns = []
matched_rnn = []
for inp in birnn_input:
fw_rnn = fw_rnns[inp]
bw_rnn = bw_rnns[inp]
# it's possible several bi-rnns share the same input
for fw_n in fw_rnn:
for bw_n in bw_rnn:
if belong_to_birnn(g, fw_n, bw_n, rnn_type) and \
fw_n not in matched_rnn and bw_n not in matched_rnn:
logger.debug("found birnn comprising %s and %s", fw_n.name, bw_n.name)
bi_rnns.append((fw_n, bw_n))
matched_rnn.extend([fw_n, bw_n])
return bi_rnns
def belong_to_birnn(g, fw_rnn, bw_rnn, rnn_type):
"""
Check whether fw_rnn and bw_rnn are part of the same birnn.
If fw_rnn and bw_rnn have the same attributes except those related to activation
and share the same seq_len, they are able to be merged into a bi-rnn.
"""
logger.debug("check whether %s and %s are part of birnn", fw_rnn.name, bw_rnn.name)
for name in onnx_rnn_attr_mapping[rnn_type]:
fw_attr_value = fw_rnn.get_attr_value(name)
bw_attr_value = bw_rnn.get_attr_value(name)
if fw_attr_value != bw_attr_value:
logger.debug(
"fw_rnn and bw_rnn mismatch at attr %s: %s, %s",
name, fw_attr_value, bw_attr_value
)
return False
seq_len_index = onnx_rnn_seq_len_index_mapping[rnn_type]
fw_seq_len = fw_rnn.input[seq_len_index]
bw_seq_len = bw_rnn.input[seq_len_index]
if not utils.have_same_inference_value(g, fw_seq_len, bw_seq_len):
logger.debug(
"fw_rnn and bw_rnn have different seq_len input: %s, %s",
fw_seq_len, bw_seq_len
)
return False
return True
def is_tail_slice_op(node):
return (
node.type == 'StridedSlice' and
node.inputs[1].get_tensor_value() == [-1] and
node.inputs[2].get_tensor_value() == [0] and
node.inputs[3].get_tensor_value() == [1] and
node.get_attr('shrink_axis_mask').i == 1
)
def get_reverse_or_slice_nodes_after_y_output(g, rnn_bw):
bw_consumers = g.find_output_consumers(rnn_bw.output[0])
# todo: figure out a better way to remove reverse op
squeeze_nodes = [c for c in bw_consumers if c.type == "Squeeze"]
s_cnt = len(squeeze_nodes)
if s_cnt == 1:
s = squeeze_nodes[0]
reverse_or_slice_nodes = g.find_output_consumers(s.output[0])
if len(reverse_or_slice_nodes) == 1:
if reverse_or_slice_nodes[0].type == "Transpose":
reverse_or_slice_nodes = g.find_output_consumers(reverse_or_slice_nodes[0].output[0])
if len(reverse_or_slice_nodes) == 1 and reverse_or_slice_nodes[0].type == "Identity":
reverse_or_slice_nodes = g.find_output_consumers(reverse_or_slice_nodes[0].output[0])
if len(reverse_or_slice_nodes) == 1 and reverse_or_slice_nodes[0].type == "Identity":
reverse_or_slice_nodes = g.find_output_consumers(reverse_or_slice_nodes[0].output[0])
are_all_reverse_or_slice = all([
utils.is_tf_reverse_op(r_op) or is_tail_slice_op(r_op)
for r_op in reverse_or_slice_nodes
])
if are_all_reverse_or_slice:
return reverse_or_slice_nodes
logger.debug("bw y output is used followed by reverse node")
return []
logger.debug("unexpected number of transpose after RNN 1st output:%s", s_cnt)
return []
logger.debug("unexpected number of squeeze following RNN 1st output:%s", s_cnt)
return []
def get_np_val_for_const(g, node, input_index):
return node.inputs[input_index].get_tensor_value(as_list=False)
def check_const(g, input_id):
node = g.get_node_by_output(input_id)
if node and node.is_const():
return (True, node.get_tensor_value(as_list=False))
return (None, None)
def process_single_init_node(g, fw_init_input_id, bw_init_input_id, to_append):
fw_init_is_const, init_fw_val = check_const(g, fw_init_input_id)
bw_init_is_const, init_bw_val = check_const(g, bw_init_input_id)
if fw_init_is_const and bw_init_is_const:
initial_val = np.concatenate((init_fw_val, init_bw_val), axis=0)
init_name = utils.make_name("initial")
init_node = g.make_const(init_name, initial_val, skip_conversion=True)
else:
init_node = g.make_node("Concat", [fw_init_input_id, bw_init_input_id], attr={"axis": 0})
to_append.append(init_node)
return init_node
def slice_birnn_for_original_rnn_consumers(g, rnn_fw, rnn_bw, bi_rnn, rnn_output_index, all_nodes, to_remove):
fw_consumers = g.find_output_consumers(rnn_fw.output[rnn_output_index])
bw_consumers = g.find_output_consumers(rnn_bw.output[rnn_output_index])
if not fw_consumers and not bw_consumers:
return
if rnn_output_index == 0:
axis = 1
# remove reverse(return_sequence=True) or tail slice(return_sequence=False) op for rnn_bw
reverse_or_slice_nodes = get_reverse_or_slice_nodes_after_y_output(g, rnn_bw)
for r_op in reverse_or_slice_nodes:
if utils.is_tf_reverse_op(r_op):
logger.debug("remove reverse op %s", r_op.name)
g.replace_all_inputs(r_op.output[0], r_op.input[0], ops=all_nodes)
to_remove.append(r_op.name)
elif is_tail_slice_op(r_op):
# in case of return_sequence=False
# replace output[-1:] to output[0:1]
attr = {"axes": [0], "starts": [0], "ends": [1]}
inputs_map = {"data": r_op.input[0], **attr}
slice_node_bw = GraphBuilder(g).make_slice(inputs_map)
all_nodes.append(g.get_node_by_output(slice_node_bw))
inputs_map = {"data": slice_node_bw, "axes": [0]}
squeeze_node_bw = GraphBuilder(g).make_squeeze(inputs_map)
all_nodes.append(g.get_node_by_output(squeeze_node_bw))
g.replace_all_inputs(r_op.output[0], squeeze_node_bw, ops=all_nodes)
to_remove.append(r_op.name)
elif rnn_output_index in [1, 2]:
axis = 0
else:
raise ValueError("rnn only should has 3 outputs.")
if fw_consumers:
attr = {"axes": [axis], "starts": [0], "ends": [1]}
inputs_map = {"data": bi_rnn.output[rnn_output_index], **attr}
slice_node_fw = GraphBuilder(g).make_slice(inputs_map)
all_nodes.append(g.get_node_by_output(slice_node_fw))
g.replace_all_inputs(rnn_fw.output[rnn_output_index], slice_node_fw, ops=fw_consumers)
if bw_consumers:
attr = {"axes": [axis], "starts": [1], "ends": [2]}
inputs_map = {"data": bi_rnn.output[rnn_output_index], **attr}
slice_node_bw = GraphBuilder(g).make_slice(inputs_map)
all_nodes.append(g.get_node_by_output(slice_node_bw))
g.replace_all_inputs(rnn_bw.output[rnn_output_index], slice_node_bw, ops=bw_consumers)
def remove_reverse_in_bw_input(g, bw_rnn_input_x, rnn_type):
old_x_consumers = g.find_output_consumers(bw_rnn_input_x)
# the transpose/reverse here must be followed by RNN if it is still useful.
# this is guaranteed by dynamic_rnn logic.
old_x_has_rnn_as_consumer = [n for n in old_x_consumers if n.type == onnx_rnn_type_mapping[rnn_type]]
if not old_x_has_rnn_as_consumer:
logger.debug("plan to remove useless reverse op in bw")
reverse_node = g.get_node_by_output(bw_rnn_input_x)
if reverse_node.type == "Transpose":
reverse_node = reverse_node.inputs[0]
g.replace_all_inputs(reverse_node.output[0], reverse_node.input[0]) # ops=g.get_nodes()
g.remove_node(reverse_node.name)
else:
raise ValueError("Reverse is still used by RNN as input, cannot remove")
| 26,847 | 35.929849 | 118 | py |
tensorflow-onnx | tensorflow-onnx-main/tf2onnx/rewriter/lstm_tf2_rewriter.py | # SPDX-License-Identifier: Apache-2.0
"""
tf2onnx.rewriter.lstm_tf2_rewriter - Rewrites LSTM pattern used by tf2.
"""
import numpy as np
from tf2onnx.graph_matcher import GraphMatcher
from tf2onnx.rewriter.rnn_utils import make_lstm_pattern
from tf2onnx.tf_loader import find_function
from tf2onnx.rewriter.lstm_rewriter_base import LSTMContext
from tf2onnx.rewriter.lstm_rewriter import LSTMRewriter
from tf2onnx.graph_builder import GraphBuilder
from tf2onnx import utils
# pylint: disable=invalid-name,unused-argument,missing-docstring, unused-variable
def _make_lstm_pattern_from_params(params):
return make_lstm_pattern(enter_or_id="Identity") if not params.get("from_keras", False) \
else make_lstm_pattern(
from_keras=True,
use_bias=params.get("use_bias", False),
activation=params.get("activation", ""),
recurrent_activation=params.get("recurrent_activation", "")
)
def rewriter_lstm_tf2(g, ops):
lstm_params_variations = [
# default activations
{"enter_or_id": "Identity"}, # TF LSTM
{"from_keras": True, "use_bias": False}, # keras LSTM
{"from_keras": True, "use_bias": True}, # keras LSTM with bias
# hard sigmoid as recurrent activation
{"from_keras": True, "use_bias": False, "recurrent_activation": "hard_sigmoid"}, # keras LSTM
{"from_keras": True, "use_bias": True, "recurrent_activation": "hard_sigmoid"} # keras LSTM with bias
# Note: add other LSTM variations as needed
]
for params in lstm_params_variations:
pattern = _make_lstm_pattern_from_params(params)
matcher = GraphMatcher(pattern, allow_reorder=False)
match_results = list(matcher.match_ops(ops))
for match_result in match_results:
is_ft_hard_sigmoid = params.get("recurrent_activation", "") == "hard_sigmoid"
recurrent_activation_f = "HardSigmoid" if is_ft_hard_sigmoid else \
match_result.get_op("ft").type
activation_g = match_result.get_op("gt").type
activation_h = match_result.get_op("ct'").type
default_activations = ["Relu", "Sigmoid", "Tanh"]
if ((activation_g not in default_activations) or
(activation_h not in default_activations) or
(not is_ft_hard_sigmoid and recurrent_activation_f not in default_activations)):
continue
activations_fgh = [
recurrent_activation_f,
activation_g,
activation_h
]
# extract input x_t
from_keras = params.get("from_keras", False)
if from_keras:
get_item = match_result.get_op("xt")
else:
concat = match_result.get_op("xh")
if len(concat.inputs) != 3:
continue
get_item = concat.inputs[0]
if not get_item.type == "TensorListGetItem":
continue
x_e = get_item.inputs[0]
if not x_e.is_graph_input():
continue
x_idx = g.input_names.index(x_e.output[0])
# extract output h_t
ht_mul = match_result.get_op("ht")
final_consumers = g.find_output_consumers(ht_mul.output[0])
select_ops = [n for n in final_consumers if n.type == "Select"]
def has_tensor_list_consumer(n):
return any(c.type == "TensorListSetItem" for c in g.find_output_consumers(n.output[0]))
select_ops = [n for n in select_ops if has_tensor_list_consumer(n)]
if len(select_ops) == 1:
greater_eq = select_ops[0].inputs[0]
if greater_eq.type != "GreaterEqual":
continue
seq_len = greater_eq.inputs[1]
if not seq_len.is_graph_input():
continue
seq_len_idx = g.input_names.index(seq_len.output[0])
final_consumers = g.find_output_consumers(select_ops[0].output[0])
else:
seq_len_idx = None
tensor_set_items = [n for n in final_consumers if n.type == "TensorListSetItem"]
if len(tensor_set_items) != 1:
continue
if not tensor_set_items[0].inputs[0].is_graph_input():
continue
out_idx = g.input_names.index(tensor_set_items[0].input[0])
# extract input h_(t-1) and c_(t-1)
init_state = match_result.get_op("ht-1") if from_keras else concat.inputs[1]
if init_state.is_graph_input():
# c and h are separate
h_idx = g.input_names.index(init_state.output[0])
c_e = match_result.get_op("c")
if not c_e.is_graph_input():
continue
c_idx = g.input_names.index(c_e.output[0])
ch_info = {
"state_is_tuple": True,
"c_idx": c_idx,
"h_idx": h_idx,
}
else:
# c and h are concatenated
if not init_state.type == "Slice":
continue
ch_e = init_state.inputs[0]
if not ch_e.is_graph_input():
continue
ch_idx = g.input_names.index(ch_e.output[0])
c_e = match_result.get_op("c")
if not c_e.type == "Slice" or c_e.input[0] != ch_e.output[0]:
continue
ch_info = {
"state_is_tuple": False,
"ch_idx": ch_idx,
}
# extract weights and bias
w_idx = hk_idx = gk_idx = 0
ft_bias = None
if from_keras:
# hidden kernel
hk = match_result.get_op("R")
while hk.type == "Identity":
hk = hk.inputs[0]
if not hk.is_graph_input():
continue
hk_idx = g.input_names.index(hk.output[0])
# gate kernel
gk = match_result.get_op("W")
while gk.type == "Identity":
gk = gk.inputs[0]
if not gk.is_graph_input():
continue
gk_idx = g.input_names.index(gk.output[0])
# Wb and Rb are concatenated
b_idx = None
if from_keras and params.get("use_bias", False):
bias_add = match_result.get_op("bias_add")
if bias_add is not None and bias_add.data_format != "NHWC":
continue
b_e = match_result.get_op("cell_bias")
while b_e.type == "Identity":
b_e = b_e.inputs[0]
if not b_e.is_graph_input():
continue
b_idx = g.input_names.index(b_e.output[0])
else:
# W and R are concatenated
w_e = match_result.get_op("cell_kernel")
if not w_e.is_graph_input():
continue
w_idx = g.input_names.index(w_e.output[0])
bias_add = match_result.get_op("bias_add")
if bias_add is not None and bias_add.data_format != "NHWC":
continue
b_e = match_result.get_op("cell_bias")
if not b_e.is_graph_input():
continue
b_idx = g.input_names.index(b_e.output[0])
ft_bias_node = match_result.get_op("ft_bias")
if not ft_bias_node.is_const():
continue
if g.get_dtype(ft_bias_node.output[0]) != g.get_dtype(b_e.output[0]):
continue
ft_bias = ft_bias_node.get_tensor_value(as_list=False)
g.lstm_rewriter_context = {
# common
"x_idx": x_idx,
"out_idx": out_idx,
"seq_len_idx": seq_len_idx,
"bias_idx": b_idx,
"from_keras": from_keras,
"activations_fgh": activations_fgh,
**ch_info, # {state_is_tuple, h_idx, c_idx} or {state_is_tuple, ch_idx}
# TF
"weight_idx": w_idx,
"ft_bias": ft_bias,
# Keras
"w_idx": gk_idx,
"r_idx": hk_idx,
}
for op in ops:
if op.is_while():
body_graph = find_function(op.get_attr_str("body"))
if body_graph.lstm_rewriter_context is None:
continue
body_context = body_graph.lstm_rewriter_context
# parse weights
consts = []
if body_context["from_keras"]:
wx = op.input[body_context["w_idx"]]
wh = op.input[body_context["r_idx"]]
wx_const = g.get_tensor_value(wx, as_list=False)
wh_const = g.get_tensor_value(wh, as_list=False)
consts.extend([wx, wh])
else:
w = op.input[body_context["weight_idx"]]
w_const = g.get_tensor_value(w, as_list=False)
consts.append(w)
# parse bias
if body_context["bias_idx"] is not None:
b = op.input[body_context["bias_idx"]]
b_const = g.get_tensor_value(b, as_list=False)
consts.append(b)
else:
b_const = None
if not all(g.is_const(c) for c in consts):
continue
# parse states
if body_context["state_is_tuple"]:
initial_c_sq = op.input[body_context["c_idx"]]
initial_h_sq = op.input[body_context["h_idx"]]
initial_c = GraphBuilder(g).make_unsqueeze({"data": initial_c_sq, "axes": [0]})
initial_h = GraphBuilder(g).make_unsqueeze({"data": initial_h_sq, "axes": [0]})
else:
initial_ch = op.input[body_context["ch_idx"]]
if not g.is_const(initial_ch):
continue
initial_ch_const = g.get_tensor_value(initial_ch, as_list=False)
if not len(initial_ch_const.shape) == 2:
continue
initial_ch_const = np.expand_dims(initial_ch_const, axis=0)
initial_c_const, initial_h_const = np.split(initial_ch_const, 2, axis=2)
initial_c = g.make_const(utils.make_name("initial_c"), initial_c_const).output[0]
initial_h = g.make_const(utils.make_name("initial_h"), initial_h_const).output[0]
# build LSTMContext
context = LSTMContext()
context.from_keras = body_context["from_keras"]
if context.from_keras:
context.weights.append({"w": wx_const, "r": wh_const, "bias": b_const})
else:
context.weights.append({"weight": w_const, "bias": b_const, "ft_bias": body_context["ft_bias"]})
context.onnx_input_ids.append({})
context.input_size.append(None)
context.hidden_size.append(None)
context.attributes.append({"activations": body_context['activations_fgh']})
tensor_array_inp = op.inputs[body_context["x_idx"]]
if not tensor_array_inp.type == "TensorListFromTensor":
continue
final_consumers = g.find_output_consumers(op.output[body_context["out_idx"]])
output_ys = [n.output[0] for n in final_consumers if n.type == "TensorListStack"]
context.onnx_input_ids[0]["X"] = tensor_array_inp.input[0]
if body_context["seq_len_idx"] is None:
context.onnx_input_ids[0]["sequence_lens"] = ""
else:
context.onnx_input_ids[0]["sequence_lens"] = op.input[body_context["seq_len_idx"]]
context.onnx_input_ids[0]["initial_c"] = initial_c
context.onnx_input_ids[0]["initial_h"] = initial_h
lstm_rewriter = LSTMRewriter(g)
lstm_rewriter.num_lstm_layers = 1
lstm_rewriter.process_weights_and_bias(context)
lstm_node = lstm_rewriter.create_rnn_node(context)[0]
squeeze_output = GraphBuilder(g).make_squeeze({"data": lstm_node.output[0], "axes": [1]})
for output in output_ys:
g.replace_all_inputs(output, squeeze_output)
if body_context["state_is_tuple"]:
c_squeeze = GraphBuilder(g).make_squeeze({"data": lstm_node.output[2], "axes": [0]})
h_squeeze = GraphBuilder(g).make_squeeze({"data": lstm_node.output[1], "axes": [0]})
g.replace_all_inputs(op.output[body_context["c_idx"]], c_squeeze)
g.replace_all_inputs(op.output[body_context["h_idx"]], h_squeeze)
else:
concat_ch = g.make_node("Concat", [lstm_node.output[2], lstm_node.output[1]],
attr={"axis": 2}).output[0]
ch_squeeze = GraphBuilder(g).make_squeeze({"data": concat_ch, "axes": [0]})
ch_output = op.output[body_context["ch_idx"]]
g.replace_all_inputs(ch_output, ch_squeeze)
return g.get_nodes()
| 13,566 | 42.207006 | 112 | py |
tensorflow-onnx | tensorflow-onnx-main/tf2onnx/rewriter/lstm_rewriter.py | # SPDX-License-Identifier: Apache-2.0
"""
tf2onnx.rewriter.lstm_rewriter
"""
import logging
import numpy as np
from tf2onnx import utils
from tf2onnx.graph_builder import GraphBuilder
from tf2onnx.rewriter.rnn_utils import RNNUnitType, get_weights_from_const_node
from tf2onnx.utils import is_tf_concat_op, is_tf_slice_op
from tf2onnx.rewriter.lstm_rewriter_base import LSTMRewriterBase
# pylint: disable=invalid-name,unused-argument,missing-docstring
logger = logging.getLogger(__name__)
class LSTMRewriter(LSTMRewriterBase):
def __init__(self, g):
super(LSTMRewriter, self).__init__(g)
self.lstm_cell_type = None
self.num_lstm_layers = 0
def run(self):
logger.debug("enter lstm rewriter")
return super(LSTMRewriter, self).run()
def find_cell(self, context):
lstm_cell_types = [RNNUnitType.LSTMCell, RNNUnitType.LSTMBlockCell]
for cell_type in lstm_cell_types:
cell_match = self._match_cell(context, cell_type)
if cell_match and len(cell_match) >= 1:
self.num_lstm_layers = len(cell_match)
logger.debug("number of LSTM layers: %s", self.num_lstm_layers)
for i in range(self.num_lstm_layers):
self.state_variable_handlers.append({
"ct" + str(i): (self._ct_variable_finder, self._connect_lstm_yc_to_graph, i),
"ht" + str(i): (self._ht_variable_finder, self._connect_lstm_yh_to_graph, i)
})
self.state_variable_handlers.append({
"ct_ht" + str(i): (self._ct_ht_shared_variable_finder, self._connect_lstm_ych_to_graph, i)
})
logger.debug("parsing unit is %s, num layers is %d", cell_type, self.num_lstm_layers)
if cell_match:
self.lstm_cell_type = cell_type
logger.debug("parsing unit is %s", cell_type)
return cell_match
logger.debug("cannot parse unit")
return None
def get_weight_and_bias(self, context):
weight_and_bias = list()
for i in range(self.num_lstm_layers):
if self.lstm_cell_type == RNNUnitType.LSTMCell:
weight_and_bias.append(self._get_weight_and_bias_for_lstm_cell(context, i))
if self.lstm_cell_type == RNNUnitType.LSTMBlockCell:
weight_and_bias.append(self._get_weight_and_bias_for_lstmblock_cell(context, i))
return weight_and_bias
def _get_weight_and_bias_for_lstmblock_cell(self, context, i):
cell_match = context.cell_match[i]
w_node = cell_match.get_op("cell_kernel")
w = get_weights_from_const_node(self.g, w_node)
if w is None:
logger.warning("Cannot find weight, SKIP")
return None
b_node = cell_match.get_op("cell_bias")
b = get_weights_from_const_node(self.g, b_node)
if b is None or b.shape[0] != w.shape[1]:
logger.warning("cell_kernel and cell_bias's dimension doesn't match, SKIP")
return None
lstm_block_cell = cell_match.get_op("lstm_block_cell")
ft_bias_val = np.array(
lstm_block_cell.get_attr("forget_bias").f,
dtype=b.dtype
)
return {
"weight": w,
"bias": b,
"ft_bias": ft_bias_val
}
def _get_weight_and_bias_for_lstm_cell(self, context, i):
match = context.cell_match[i]
w_e = match.get_op("cell_kernel")
w = get_weights_from_const_node(self.g, w_e)
if w is None or w.size == 0:
return None
# check https://www.tensorflow.org/versions/r1.8/api_docs/cc/class/tensorflow/ops/bias-add
# for bias_add data format
bias_add = match.get_op("bias_add")
if bias_add is not None and bias_add.data_format != "NHWC":
logger.debug("BiasAdd data_format is not NHWC, SKIP")
return None
b_e = match.get_op("cell_bias")
if b_e is None:
b = np.array([0 for i in range(len(w[0]))]).astype(w.dtype)
else:
b = get_weights_from_const_node(self.g, b_e)
if b is None or b.shape[0] != w.shape[1]:
logger.warning("cell_kernel and cell_bias's dimensions does not match, skip")
return None
ft_bias_node = match.get_op("ft_bias")
ft_bias = get_weights_from_const_node(self.g, ft_bias_node)
if ft_bias is None:
return None
if not b.dtype == ft_bias.dtype:
return None
return {
"weight": w,
"bias": b,
"ft_bias": ft_bias
}
def parse_attributes(self, context):
if self.lstm_cell_type == RNNUnitType.LSTMBlockCell:
lstm_block_cell = context.cell_match[0].get_op("lstm_block_cell")
clip = float(lstm_block_cell.get_attr("cell_clip").f)
# current LSTM op cannot handle clip
if clip > 0:
return False
use_peephole = lstm_block_cell.get_attr_value("use_peephole")
if use_peephole:
return False
return True
def _ct_variable_finder(self, context, i):
if self.lstm_cell_type == RNNUnitType.LSTMCell:
lstm_cell = context.cell_match[i]
return self._find_state_variable_with_select(
context,
lstm_cell.get_op("ct").output[0],
[lstm_cell.get_op("ct_identity_consumer")]
)
if self.lstm_cell_type == RNNUnitType.LSTMBlockCell:
lstm_block_cell = context.cell_match[i].get_op("lstm_block_cell")
return self._find_state_variable_with_select(
context,
lstm_block_cell.output[1],
[lstm_block_cell]
)
return None
def _ht_variable_finder(self, context, i):
if self.lstm_cell_type == RNNUnitType.LSTMCell:
lstm_cell = context.cell_match[i]
return self._find_state_variable_with_select(
context,
lstm_cell.get_op("ht").output[0],
[lstm_cell.get_op("xh")]
)
if self.lstm_cell_type == RNNUnitType.LSTMBlockCell:
lstm_block_cell = context.cell_match[i].get_op("lstm_block_cell")
return self._find_state_variable_with_select(
context,
lstm_block_cell.output[6],
[lstm_block_cell]
)
return None
def _ct_ht_shared_variable_finder(self, context, i):
if self.lstm_cell_type == RNNUnitType.LSTMBlockCell:
return None
lstm_cell = context.cell_match[i]
ct = lstm_cell.get_op("ct").output[0]
ht = lstm_cell.get_op("ht").output[0]
ct_concat = [c for c in self.g.find_output_consumers(ct) if is_tf_concat_op(c)]
ht_concat = [c for c in self.g.find_output_consumers(ht) if is_tf_concat_op(c)]
if len(ct_concat) != 1 or len(ht_concat) != 1 or ct_concat[0] != ht_concat[0]:
logger.debug("failed to find ct-ht concat")
return None
ct_ht_shared_output = ct_concat[0].output[0]
consumers = []
ct_identity_consumer = lstm_cell.get_op("ct_identity_consumer")
ht_identity_consumer = lstm_cell.get_op("xh")
ct_slice = [c for c in ct_identity_consumer.inputs if is_tf_slice_op(c)]
ht_slice = [c for c in ht_identity_consumer.inputs if is_tf_slice_op(c)]
if len(ct_slice) != 1 or len(ht_slice) != 1:
logger.debug("failed to find slice op before identity consumers")
return None
consumers.extend([ct_slice[0], ht_slice[0]])
return self._find_state_variable_with_select(
context,
ct_ht_shared_output,
consumers
)
def is_valid(self, context):
# except for ct, ht or ct_ht, there are at most 2 state variables
if len(context.loop_properties.state_variables) - \
len(context.state_variables) > 2:
return False
# output is no more than 1
outputs = context.loop_properties.scan_outputs_exits
if len(outputs) > 1:
logger.debug("found %d outputs for lstm: %s", len(outputs), outputs)
return False
return True
def _convert_gates_icfo_to_iofc(self, gates, axis=0):
# from Tensorflow
return (np.concatenate((g[0], g[3], g[2], g[1]), axis=axis) for g in gates)
def _convert_gates_ifco_to_iofc(self, gates, axis=0):
# from Keras
return (np.concatenate((g[0], g[3], g[1], g[2]), axis=axis) for g in gates)
def _process_weights_and_bias_per_layer(self, context, i):
weights = context.weights[i]
if context.from_keras:
wx = weights["w"]
wh = weights["r"]
w_dtype = weights["w"].dtype
hidden_size = int(wx.shape[1] / 4)
# split bias for each hidden unit
if weights["bias"] is not None:
b_r_icfo = weights["bias"] # (4 * num_units,)
b_dtype = weights["bias"].dtype
bias_dim = b_r_icfo.shape[0]
assert int(bias_dim / 4) == hidden_size
b_r_icfo = np.reshape(b_r_icfo, (1, bias_dim))
bias_gates = np.split(b_r_icfo, 4, axis=1)
wb_bias_iofc, = self._convert_gates_ifco_to_iofc([bias_gates], axis=1)
else:
bias_dim = 4 * hidden_size
b_dtype = w_dtype # use w_dtype if bias is not given
wb_bias_iofc = np.zeros((1, bias_dim), dtype=b_dtype)
else:
w_r_icfo = weights["weight"]
w_dtype = weights["weight"].dtype
b_r_icfo = weights["bias"]
b_dtype = weights["bias"].dtype
ft_bias_scalar = weights["ft_bias"]
# split bias for each hidden unit
bias_dim = b_r_icfo.shape[0]
hidden_size = int(bias_dim / 4)
b_r_icfo = np.reshape(b_r_icfo, (1, bias_dim)) # (4 * num_units,)
bias_gates = np.split(b_r_icfo, 4, axis=1)
bias_gates[2] = np.add(bias_gates[2], ft_bias_scalar)
wb_bias_iofc, = self._convert_gates_icfo_to_iofc([bias_gates], axis=1)
[wx, wh] = np.split(w_r_icfo, [-1 * hidden_size])
assert int(wx.shape[1] / 4) == hidden_size
# fill Rb with zeros since TF and Keras have Wb bias.
rb_bias_iofc = np.zeros((1, bias_dim), dtype=b_dtype)
B = np.concatenate((wb_bias_iofc, rb_bias_iofc), axis=1)
assert B.shape == (1, 2 * bias_dim)
w_gates = np.split(wx, 4, axis=1)
h_gates = np.split(wh, 4, axis=1)
new_w, new_r = self._convert_gates_ifco_to_iofc([w_gates, h_gates], axis=1) if context.from_keras else \
self._convert_gates_icfo_to_iofc([w_gates, h_gates], axis=1)
W_iofc = np.transpose(new_w)
R_iofc = np.transpose(new_r)
W = np.array([W_iofc], w_dtype)
R = np.array([R_iofc], w_dtype)
return W, R, B
def _make_constants(self, context, i, W, R, B):
input_size = W.shape[-1]
hidden_size = R.shape[-1]
w_name = utils.make_name("W" + str(i))
w_node = self.g.make_const(w_name, W, skip_conversion=True)
r_name = utils.make_name("R" + str(i))
r_node = self.g.make_const(r_name, R, skip_conversion=True)
b_name = utils.make_name("B" + str(i))
b_node = self.g.make_const(b_name, B, skip_conversion=True)
context.input_size[i] = input_size
context.hidden_size[i] = hidden_size
context.onnx_input_ids[i]["W"] = w_node.output[0]
context.onnx_input_ids[i]["R"] = r_node.output[0]
context.onnx_input_ids[i]["B"] = b_node.output[0]
def process_weights_and_bias_per_layer(self, context, i):
W, R, B = self._process_weights_and_bias_per_layer(context, i)
self._make_constants(context, i, W, R, B) # create node
def process_weights_and_bias(self, context):
for i in range(self.num_lstm_layers):
self.process_weights_and_bias_per_layer(context, i)
def process_var_init_nodes(self, context):
for i in range(self.num_lstm_layers):
self.process_var_init_nodes_per_layer(context, i)
def process_var_init_nodes_per_layer(self, context, i):
init_h_id = None
init_c_id = None
if "ct_ht" + str(i) in context.state_variables:
init_h_id, init_c_id = self._process_non_tuple_ch_init_nodes(context, i)
elif "ct" + str(i) in context.state_variables and ("ht" + str(i)) in context.state_variables:
init_h_id, init_c_id = self._process_tuple_ch_init_nodes(context, i)
else:
raise ValueError("no initializers, unexpected")
assert init_h_id and init_c_id
context.onnx_input_ids[i]["initial_h"] = init_h_id
context.onnx_input_ids[i]["initial_c"] = init_c_id
def _process_non_tuple_ch_init_nodes(self, context, i):
gb = GraphBuilder(self.g)
input_id = context.state_variables["ct_ht" + str(i)].enter_input_id
hidden_size = context.hidden_size[i]
attr = {"axes": [1], "starts": [0], "ends": [hidden_size]}
inputs_map = {"data": input_id, **attr}
slice_node1 = GraphBuilder(self.g).make_slice(inputs_map)
unsqueeze_node_1 = gb.make_unsqueeze({'data': slice_node1, "axes": [0]}, return_node=True)
attr = {"axes": [1], "starts": [hidden_size], "ends": [hidden_size * 2]}
inputs_map = {"data": input_id, **attr}
slice_node2 = GraphBuilder(self.g).make_slice(inputs_map)
unsqueeze_node_2 = gb.make_unsqueeze({'data': slice_node2, "axes": [0]}, return_node=True)
return unsqueeze_node_1.output[0], unsqueeze_node_2.output[0]
def _process_tuple_ch_init_nodes(self, context, i):
h_init_input_id = context.state_variables["ht" + str(i)].enter_input_id
c_init_input_id = context.state_variables["ct" + str(i)].enter_input_id
h_node_output = self._process_c_or_h_init_nodes(h_init_input_id, context)
c_node_output = self._process_c_or_h_init_nodes(c_init_input_id, context)
return h_node_output, c_node_output
def _process_c_or_h_init_nodes(self, initializer_input_id, context):
node = self.g.get_node_by_output(initializer_input_id)
if node.is_const():
val = node.get_tensor_value(as_list=False)
initial_name = utils.make_name("Const")
new_val = np.expand_dims(val, axis=0)
const_node = self.g.make_const(initial_name, new_val)
return const_node.output[0]
gb = GraphBuilder(self.g)
squeeze_node = gb.make_unsqueeze({'data': initializer_input_id, "axes": [0]}, return_node=True)
to_replace = [n for n in self.g.get_nodes() if n != squeeze_node]
self.g.replace_all_inputs(initializer_input_id, squeeze_node.output[0], ops=to_replace)
return squeeze_node.output[0]
def create_single_rnn_node(self, context, i):
# specify if the RNN is forward, reverse, or bidirectional.
# Must be one of forward (default), reverse, or bidirectional.
# Here we won't mark bidirectional/reverse, we will have another rewriter running
# after this one, which will based on patterns to combine a forward LSTM and a
# backward LSTM into a bidirectional one.
num_direction = 1
context.attributes[i]["direction"] = "forward"
context.attributes[i]["hidden_size"] = context.hidden_size[i]
inputs = context.onnx_input_ids[i]
lstm_inputs = [
inputs["X"], inputs["W"], inputs["R"], inputs["B"],
inputs["sequence_lens"], inputs["initial_h"], inputs["initial_c"]]
x_shape = self.g.get_shape(lstm_inputs[0])
x_seq_length = x_shape[0]
x_batch_size = x_shape[1]
out_dtype = self.g.get_dtype(lstm_inputs[0])
lstm_node = self.g.make_node("LSTM", lstm_inputs, attr=context.attributes[i], output_count=3,
shapes=[[x_seq_length, num_direction, x_batch_size, context.hidden_size[i]],
[num_direction, x_batch_size, context.hidden_size[i]],
[num_direction, x_batch_size, context.hidden_size[i]]],
dtypes=[out_dtype, out_dtype, out_dtype], op_name_scope=context.rnn_scope)
return lstm_node
def create_rnn_node(self, context):
gb = GraphBuilder(self.g)
rnn_nodes = list()
outputs = context.loop_properties.scan_outputs_exits
logger.debug("number of rnn node outputs: %s", len(outputs))
for i in range(self.num_lstm_layers):
logger.debug("creating rnn node for layer: %s", i)
rnn_nodes.append(self.create_single_rnn_node(context, i))
output_id = rnn_nodes[i].output[0]
rnn_output_shape = self.g.get_shape(output_id)
squeeze_output_shape = [rnn_output_shape[0], rnn_output_shape[2], rnn_output_shape[3]]
squeeze_node = gb.make_squeeze({"data": output_id, "axes": [1]},
shapes=[squeeze_output_shape],
dtypes=[self.g.get_dtype(output_id)],
return_node=True)
if i + 1 < self.num_lstm_layers:
logger.debug("setting input for layer: %s", i + 1)
context.onnx_input_ids[i + 1]["X"] = squeeze_node.output[0]
return rnn_nodes
def _connect_lstm_yh_to_graph(self, context, i):
# in tf, y_h output shape is: [batch, hidden]
# in onnx, output shape is: [number_directions, batch, hidden]
gb = GraphBuilder(self.g)
exit_output = context.state_variables["ht" + str(i)].exit_output
output_id = context.rnn_node[i].output[1]
lstm_yh_shape = self.g.get_shape(output_id)
squeeze_node = gb.make_squeeze({"data": output_id, "axes": [0]},
shapes=[[lstm_yh_shape[1], lstm_yh_shape[2]]],
dtypes=[self.g.get_dtype(output_id)],
return_node=True)
self.g.replace_all_inputs(exit_output.id, squeeze_node.output[0]) # ops=self.g.get_nodes()
def _connect_lstm_yc_to_graph(self, context, i):
# in tf, y_c output shape is: [batch, hidden]
# in onnx, output shape is: [number_directions, batch, hidden]
gb = GraphBuilder(self.g)
exit_output = context.state_variables["ct" + str(i)].exit_output
output_id = context.rnn_node[i].output[2]
lstm_yc_shape = self.g.get_shape(output_id)
squeeze_node = gb.make_squeeze({"data": output_id, "axes": [0]},
shapes=[[lstm_yc_shape[1], lstm_yc_shape[2]]],
dtypes=[self.g.get_dtype(output_id)],
return_node=True)
self.g.replace_all_inputs(exit_output.id, squeeze_node.output[0]) # ops=self.g.get_nodes()
def _connect_lstm_ych_to_graph(self, context, i):
# in tf, concat of y_c and y_h output shape is: [batch, hidden *2]
# in onnx, y_c/y_h output shape is: [number_directions, batch, hidden]
gb = GraphBuilder(self.g)
exit_output = context.state_variables["ct_ht" + str(i)].exit_output
lstm_node = context.rnn_node[i]
yc_shape = self.g.get_shape(lstm_node.output[2])
concat_output_shape = [yc_shape[0], yc_shape[1], yc_shape[2] * 2]
concat = self.g.make_node("Concat", [lstm_node.output[2], lstm_node.output[1]],
attr={"axis": 2}, shapes=[concat_output_shape],
dtypes=[self.g.get_dtype(lstm_node.output[2])])
squeeze_output_shape = [concat_output_shape[1], concat_output_shape[2]]
squeeze_node = gb.make_squeeze({'data': concat.output[0], "axes": [0]},
shapes=[squeeze_output_shape],
dtypes=[self.g.get_dtype(concat.output[0])],
return_node=True)
self.g.replace_all_inputs(exit_output.id, squeeze_node.output[0]) # ops=self.g.get_nodes()
| 20,582 | 43.359914 | 114 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/test_lstm.py | # SPDX-License-Identifier: Apache-2.0
"""Unit Tests for lstm."""
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope
from backend_test_base import Tf2OnnxBackendTestBase
from common import check_tf_min_version, unittest_main, check_opset_after_tf_version, \
skip_tf2, skip_tf_versions, check_op_count, skip_tfjs
from tf2onnx.tf_loader import is_tf2
# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test,cell-var-from-loop
# pylint: disable=invalid-name
if is_tf2():
# There is no LSTMBlockCell in tf-2.x
BasicLSTMCell = tf.compat.v1.nn.rnn_cell.BasicLSTMCell
LSTMCell = tf.compat.v1.nn.rnn_cell.LSTMCell
MultiRNNCell = tf.compat.v1.nn.rnn_cell.MultiRNNCell
dynamic_rnn = tf.compat.v1.nn.dynamic_rnn
bidirectional_dynamic_rnn = tf.compat.v1.nn.bidirectional_dynamic_rnn
else:
BasicLSTMCell = tf.contrib.rnn.BasicLSTMCell
LSTMCell = tf.contrib.rnn.LSTMCell
LSTMBlockCell = tf.contrib.rnn.LSTMBlockCell
MultiRNNCell = tf.contrib.rnn.MultiRNNCell
dynamic_rnn = tf.nn.dynamic_rnn
bidirectional_dynamic_rnn = tf.nn.bidirectional_dynamic_rnn
# pylint: enable=invalid-name
class LSTMTests(Tf2OnnxBackendTestBase):
def run_test_case(self, *args, require_lstm_count=1, #pylint: disable=arguments-differ
graph_validator=None, **kwargs):
# TF LSTM has an unknown dim
tmp = self.config.allow_missing_shapes
self.config.allow_missing_shapes = True
def new_graph_validator(g):
good = True
if graph_validator is not None:
good = good and graph_validator(g)
if require_lstm_count is None or ":" not in g.outputs[0]:
# Skip checks for tflite graphs (no ":" in outputs)
return good
good = good and check_op_count(g, "LSTM", require_lstm_count, disabled=False)
# If LSTM op rewriter failed to work, Loop op will be shown in general.
good = good and check_op_count(g, "Loop", 0, disabled=False)
return good
try:
super().run_test_case(*args, graph_validator=new_graph_validator, **kwargs)
finally:
self.config.allow_missing_shapes = tmp
@check_opset_after_tf_version("1.15", 8, "might need Scan")
def test_test_single_dynamic_lstm_state_is_tuple(self):
self.internal_test_single_dynamic_lstm(True)
@check_opset_after_tf_version("1.15", 8, "might need Scan")
def test_test_single_dynamic_lstm_state_is_not_tuple(self):
self.internal_test_single_dynamic_lstm(False)
def internal_test_single_dynamic_lstm(self, state_is_tuple):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
initializer = init_ops.constant_initializer(0.5)
# no scope
cell = LSTMCell(
units,
initializer=initializer,
state_is_tuple=state_is_tuple)
outputs, cell_state = dynamic_rnn(
cell,
x,
dtype=tf.float32)
return tf.identity(outputs, name="output"), tf.identity(cell_state, name="cell_state")
input_names_with_port = ["input_1:0"]
feed_dict = {"input_1:0": x_val}
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
@check_opset_after_tf_version("1.15", 8, "might need Scan")
def test_single_dynamic_lstm_time_major(self):
units = 5
seq_len = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]], dtype=np.float32)
x_val = np.stack([x_val] * seq_len)
def func(x):
initializer = init_ops.constant_initializer(0.5)
# no scope
cell = LSTMCell(
units,
initializer=initializer)
outputs, cell_state = dynamic_rnn(
cell,
x,
time_major=True,
dtype=tf.float32)
return tf.identity(outputs, name="output"), tf.identity(cell_state, name="cell_state")
input_names_with_port = ["input_1:0"]
feed_dict = {"input_1:0": x_val}
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
@check_opset_after_tf_version("1.15", 8, "might need Scan")
def test_single_dynamic_lstm_forget_bias(self):
units = 5
seq_len = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]], dtype=np.float32)
x_val = np.stack([x_val] * seq_len)
def func(x):
initializer = init_ops.constant_initializer(0.5)
# no scope
cell = LSTMCell(
units,
initializer=initializer,
forget_bias=0.5)
outputs, cell_state = dynamic_rnn(
cell,
x,
time_major=True,
dtype=tf.float32)
return tf.identity(outputs, name="output"), tf.identity(cell_state, name="cell_state")
input_names_with_port = ["input_1:0"]
feed_dict = {"input_1:0": x_val}
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
@check_opset_after_tf_version("1.15", 8, "might need Select")
def test_single_dynamic_lstm_seq_length_is_const(self):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.], [5., 5.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
state_is_tuple = True
def func(x):
initializer = init_ops.constant_initializer(0.5)
# no scope
cell = LSTMCell(
units,
initializer=initializer,
state_is_tuple=state_is_tuple)
outputs, cell_state = dynamic_rnn(
cell,
x,
dtype=tf.float32,
sequence_length=[4, 3, 4, 5, 2, 1])
return tf.identity(outputs, name="output"), tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
@check_opset_after_tf_version("1.15", 8, "might need Select")
def test_single_dynamic_lstm_seq_length_is_not_const(self):
for np_dtype in [np.int32, np.int64, np.float32]:
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.], [5., 5.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
y_val = np.array([4, 3, 4, 5, 2, 1], dtype=np_dtype)
state_is_tuple = True
def func(x, seq_length):
initializer = init_ops.constant_initializer(0.5)
# no scope
cell = LSTMCell(
units,
initializer=initializer,
state_is_tuple=state_is_tuple)
outputs, cell_state = dynamic_rnn(
cell,
x,
dtype=tf.float32,
sequence_length=tf.identity(seq_length))
return tf.identity(outputs, name="output"), tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val, "input_2:0": y_val}
input_names_with_port = ["input_1:0", "input_2:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
@check_opset_after_tf_version("1.15", 8, "might need Scan")
def test_single_dynamic_lstm_placeholder_input(self):
units = 5
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]], dtype=np.float32)
x_val = np.stack([x_val] * 6)
state_is_tuple = True
def func(x):
initializer = init_ops.constant_initializer(0.5)
# no scope
cell = LSTMCell(
units,
initializer=initializer,
state_is_tuple=state_is_tuple)
outputs, cell_state = dynamic_rnn(
cell,
x,
dtype=tf.float32) # by default zero initializer is used
return tf.identity(outputs, name="output"), tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
@check_opset_after_tf_version("1.15", 8, "might need Scan")
def test_single_dynamic_lstm_ch_zero_state_initializer(self):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.], [5., 5.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
state_is_tuple = True
def func(x):
initializer = init_ops.constant_initializer(0.5)
# no scope
cell = LSTMCell(
units,
initializer=initializer,
state_is_tuple=state_is_tuple)
# defining initial state
initial_state = cell.zero_state(batch_size, dtype=tf.float32)
outputs, cell_state = dynamic_rnn(
cell,
x,
initial_state=initial_state,
dtype=tf.float32)
return tf.identity(outputs, name="output"), tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
@check_opset_after_tf_version("1.15", 8, "might need Scan")
def test_single_dynamic_lstm_consume_one_of_ch_tuple(self):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
initializer = init_ops.constant_initializer(0.5)
state_is_tuple = True
# no scope
cell = LSTMCell(
units,
initializer=initializer,
state_is_tuple=state_is_tuple)
outputs, cell_state = dynamic_rnn(
cell,
x,
dtype=tf.float32)
return tf.identity(outputs, name="output"), \
tf.identity(cell_state.c, name="cell_state_c")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "cell_state_c:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
@check_opset_after_tf_version("1.15", 8, "might need Scan")
def test_single_dynamic_lstm_random_weights(self, state_is_tuple=True):
hidden_size = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
initializer = tf.random_uniform_initializer(-1.0, 1.0, seed=42)
# no scope
cell = LSTMCell(
hidden_size,
initializer=initializer,
state_is_tuple=state_is_tuple)
outputs, cell_state = dynamic_rnn(
cell,
x,
dtype=tf.float32)
return tf.identity(outputs, name="output"), tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=0.0001)
@check_opset_after_tf_version("1.15", 8, "might need Select")
def test_single_dynamic_lstm_random_weights2(self, state_is_tuple=True):
hidden_size = 128
batch_size = 1
x_val = np.random.randn(1, 133).astype('f')
x_val = np.stack([x_val] * batch_size)
def func(x):
initializer = tf.random_uniform_initializer(0.0, 1.0, seed=42)
# no scope
cell = LSTMCell(
hidden_size,
initializer=initializer,
state_is_tuple=state_is_tuple)
outputs, cell_state = dynamic_rnn(
cell,
x,
dtype=tf.float32)
return tf.identity(outputs, name="output"), tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=0.01)
@check_opset_after_tf_version("1.15", 8, "might need Select")
def test_multiple_dynamic_lstm_state_is_tuple(self):
self.internal_test_multiple_dynamic_lstm_with_parameters(True)
@check_opset_after_tf_version("1.15", 8, "might need Scan")
def test_multiple_dynamic_lstm_state_is_not_tuple(self):
self.internal_test_multiple_dynamic_lstm_with_parameters(False)
def internal_test_multiple_dynamic_lstm_with_parameters(self, state_is_tuple):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
initializer = init_ops.constant_initializer(0.5)
lstm_output_list = []
lstm_cell_state_list = []
# no scope
cell = LSTMCell(
units,
initializer=initializer,
state_is_tuple=state_is_tuple)
outputs, cell_state = dynamic_rnn(
cell,
x,
dtype=tf.float32)
lstm_output_list.append(outputs)
lstm_cell_state_list.append(cell_state)
# given scope
cell = LSTMCell(
units,
initializer=initializer,
state_is_tuple=state_is_tuple)
with variable_scope.variable_scope("root1") as scope:
outputs, cell_state = dynamic_rnn(
cell,
x,
dtype=tf.float32,
sequence_length=[4, 4, 4, 4, 4, 4],
scope=scope)
lstm_output_list.append(outputs)
lstm_cell_state_list.append(cell_state)
return tf.identity(lstm_output_list, name="output"), tf.identity(lstm_cell_state_list, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06,
require_lstm_count=2)
@check_opset_after_tf_version("1.15", 8, "might need Scan")
@skip_tf2() # Still failing likely due to inconsistent random number initialization
def test_dynamic_basiclstm(self):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
cell1 = BasicLSTMCell(
units,
state_is_tuple=True)
outputs, cell_state = dynamic_rnn(
cell1,
x,
dtype=tf.float32)
return tf.identity(outputs, name="output"), tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=0.0001, atol=1e-06)
@check_opset_after_tf_version("1.15", 8, "might need Scan")
def test_dynamic_lstm_output_consumed_only(self):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
initializer = tf.random_uniform_initializer(0.0, 1.0, seed=42)
cell1 = LSTMCell(
units,
initializer=initializer,
state_is_tuple=True)
outputs, _ = dynamic_rnn(
cell1,
x,
dtype=tf.float32)
return tf.identity(outputs, name="output")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=0.0001, atol=1e-07)
@check_opset_after_tf_version("1.15", 8, "might need Scan")
def test_dynamic_lstm_state_consumed_only(self):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
initializer = tf.random_uniform_initializer(0.0, 1.0, seed=42)
cell1 = LSTMCell(units, initializer=initializer, state_is_tuple=True)
_, cell_state = dynamic_rnn(cell1, x, dtype=tf.float32)
return tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=0.0001)
@check_opset_after_tf_version("1.15", 10, "might need ReverseV2")
def test_dynamic_bilstm_state_is_tuple(self):
self.internal_test_dynamic_bilstm_with_parameters(True)
@check_opset_after_tf_version("1.15", 10, "might need ReverseV2")
def test_dynamic_bilstm_state_is_not_tuple(self):
self.internal_test_dynamic_bilstm_with_parameters(False)
def internal_test_dynamic_bilstm_with_parameters(self, state_is_tuple):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
initializer = init_ops.constant_initializer(0.5)
# bilstm, no scope
cell1 = LSTMCell(
units,
initializer=initializer,
state_is_tuple=state_is_tuple) # state_is_tuple will impact Pack node (for cell_state)'s usage pattern
cell2 = LSTMCell(
units,
initializer=initializer,
state_is_tuple=state_is_tuple)
outputs, cell_state = bidirectional_dynamic_rnn(
cell1,
cell2,
x,
dtype=tf.float32)
return tf.identity(outputs, name="output"), tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
@check_opset_after_tf_version("1.15", 10, "might need ReverseV2")
def test_dynamic_bilstm_output_consumed_only(self, state_is_tuple=True):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
initializer = init_ops.constant_initializer(0.5)
# bilstm, no scope
cell1 = LSTMCell(
units,
initializer=initializer,
state_is_tuple=state_is_tuple) # state_is_tuple will impact Pack node (for cell_state)'s usage pattern
cell2 = LSTMCell(
units,
initializer=initializer,
state_is_tuple=state_is_tuple)
outputs, _ = bidirectional_dynamic_rnn(
cell1,
cell2,
x,
dtype=tf.float32)
return tf.identity(outputs, name="output")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
@check_opset_after_tf_version("1.15", 10, "might need ReverseV2")
def test_dynamic_bilstm_state_consumed_only(self, state_is_tuple=True):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
initializer = init_ops.constant_initializer(0.5)
# bilstm, no scope
cell1 = LSTMCell(
units,
initializer=initializer,
state_is_tuple=state_is_tuple) # state_is_tuple will impact Pack node (for cell_state)'s usage pattern
cell2 = LSTMCell(
units,
initializer=initializer,
state_is_tuple=state_is_tuple)
_, cell_state = bidirectional_dynamic_rnn(
cell1,
cell2,
x,
dtype=tf.float32)
return tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
@check_opset_after_tf_version("1.15", 10, "might need ReverseV2")
def test_dynamic_bilstm_outputs_partially_consumed(self, state_is_tuple=True):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
initializer = init_ops.constant_initializer(0.5)
# bilstm, no scope
cell1 = LSTMCell(
units,
initializer=initializer,
state_is_tuple=state_is_tuple) # state_is_tuple will impact Pack node (for cell_state)'s usage pattern
cell2 = LSTMCell(
units,
initializer=initializer,
state_is_tuple=state_is_tuple)
(output_fw, _), (_, state_bw) = bidirectional_dynamic_rnn(
cell1,
cell2,
x,
dtype=tf.float32)
return tf.identity(output_fw, name="output"), tf.identity(state_bw, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
@check_opset_after_tf_version("1.15", 10, "might need ReverseV2")
def test_dynamic_bilstm_unknown_batch_size(self, state_is_tuple=True):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
initializer = init_ops.constant_initializer(0.5)
cell1 = LSTMCell(
units,
initializer=initializer,
state_is_tuple=state_is_tuple)
cell2 = LSTMCell(
units,
initializer=initializer,
state_is_tuple=state_is_tuple)
_, cell_state = bidirectional_dynamic_rnn(
cell1,
cell2,
x,
dtype=tf.float32,
)
return tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
@check_opset_after_tf_version("1.15", 10, "might need ReverseV2")
@skip_tf_versions("2.1", "Bug in TF 2.1")
def test_dynamic_multi_bilstm_with_same_input_hidden_size(self):
batch_size = 10
x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
initializer1 = tf.random_uniform_initializer(0.0, 1.0, seed=42)
initializer2 = tf.random_uniform_initializer(0.0, 1.0, seed=43)
initializer3 = tf.random_uniform_initializer(0.0, 1.0, seed=44)
initializer4 = tf.random_uniform_initializer(0.0, 1.0, seed=45)
units = 5
cell1 = LSTMCell(units, name="cell1", initializer=initializer1)
cell2 = LSTMCell(units, name="cell2", initializer=initializer2)
outputs_1, cell_state_1 = bidirectional_dynamic_rnn(
cell1,
cell2,
x,
dtype=tf.float32,
scope="bilstm_1"
)
units = 10
cell3 = LSTMCell(units, name="cell3", initializer=initializer3)
cell4 = LSTMCell(units, name="cell4", initializer=initializer4)
outputs_2, cell_state_2 = bidirectional_dynamic_rnn(
cell3,
cell4,
x,
dtype=tf.float32,
scope="bilstm_2"
)
return tf.identity(outputs_1, name="output_1"), \
tf.identity(cell_state_1, name="cell_state_1"), \
tf.identity(outputs_2, name="output_2"), \
tf.identity(cell_state_2, name="cell_state_2")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output_1:0", "cell_state_1:0", "output_2:0", "cell_state_2:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-3, atol=1e-06,
require_lstm_count=2)
@check_opset_after_tf_version("1.15", 10, "might need ReverseV2")
@skip_tf_versions("2.1", "Bug in TF 2.1")
def test_dynamic_multi_bilstm_with_same_input_seq_len(self):
units = 5
batch_size = 10
x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
seq_len_val = np.array([3], dtype=np.int32)
def func(x, y1, y2):
initializer1 = tf.random_uniform_initializer(0.0, 1.0, seed=42)
initializer2 = tf.random_uniform_initializer(0.0, 1.0, seed=43)
seq_len1 = tf.tile(y1, [batch_size])
cell1 = LSTMCell(units, initializer=initializer1)
cell2 = LSTMCell(units, initializer=initializer2)
outputs_1, cell_state_1 = bidirectional_dynamic_rnn(
cell1,
cell2,
x,
sequence_length=seq_len1,
dtype=tf.float32,
scope="bilstm_1"
)
initializer1 = tf.random_uniform_initializer(0.0, 1.0, seed=44)
initializer2 = tf.random_uniform_initializer(0.0, 1.0, seed=45)
seq_len2 = tf.tile(y2, [batch_size])
cell1 = LSTMCell(units, initializer=initializer1)
cell2 = LSTMCell(units, initializer=initializer2)
outputs_2, cell_state_2 = bidirectional_dynamic_rnn(
cell1,
cell2,
x,
sequence_length=seq_len2,
dtype=tf.float32,
scope="bilstm_2"
)
return tf.identity(outputs_1, name="output_1"), \
tf.identity(cell_state_1, name="cell_state_1"), \
tf.identity(outputs_2, name="output_2"), \
tf.identity(cell_state_2, name="cell_state_2")
feed_dict = {"input_1:0": x_val, "input_2:0": seq_len_val, "input_3:0": seq_len_val}
input_names_with_port = ["input_1:0", "input_2:0", "input_3:0"]
output_names_with_port = ["output_1:0", "cell_state_1:0", "output_2:0", "cell_state_2:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-3, atol=1e-06,
require_lstm_count=2)
@check_tf_min_version("2.0")
@skip_tf_versions("2.1", "Bug in TF 2.1")
def test_keras_lstm(self):
in_shape = [10, 3]
x_val = np.random.uniform(size=[2, 10, 3]).astype(np.float32)
model_in = tf.keras.layers.Input(tuple(in_shape), batch_size=2)
x = tf.keras.layers.LSTM(
units=5,
return_sequences=True,
return_state=True,
kernel_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=42),
recurrent_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=44),
bias_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=43)
)(model_in)
model = tf.keras.models.Model(inputs=model_in, outputs=x)
def func(x):
y = model(x)
# names for input and outputs for tests
return tf.identity(y[0], name="output"), tf.identity(y[1], name="output1")
self.run_test_case(func, {"input:0": x_val}, [], ["output:0", "output1:0"], rtol=1e-05, atol=1e-06)
@check_tf_min_version("2.0")
@skip_tf_versions("2.1", "Bug in TF 2.1")
def test_keras_lstm_recurrent_activation_is_hard_sigmoid(self):
in_shape = [10, 3]
x_val = np.random.uniform(size=[2, 10, 3]).astype(np.float32)
model_in = tf.keras.layers.Input(tuple(in_shape), batch_size=2)
x = tf.keras.layers.LSTM(
units=5,
return_sequences=True,
return_state=True,
kernel_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=42),
recurrent_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=44),
bias_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=43),
recurrent_activation="hard_sigmoid"
)(model_in)
model = tf.keras.models.Model(inputs=model_in, outputs=x)
def func(x):
y = model(x)
return tf.identity(y[0], name="output"), tf.identity(y[1], name="output1")
self.run_test_case(func, {"input:0": x_val}, [], ["output:0", "output1:0"], rtol=1e-05, atol=1e-06)
@check_tf_min_version("2.0")
@skip_tfjs("TFJS converts model incorrectly")
def test_keras_lstm_sigmoid_dropout(self):
in_shape = [16, 16]
batch_size = 2
x_val = np.random.uniform(size=[batch_size] + in_shape).astype(np.float32)
model = tf.keras.models.Sequential()
model_in = tf.keras.layers.Input(shape=tuple(in_shape), name="input")
lstm = tf.keras.layers.LSTM(16, activation='sigmoid', dropout=0.1)
model.add(model_in)
model.add(lstm)
def func(x):
y = model(x)
return tf.identity(y[0], name="output")
self.run_test_case(func, {"input:0": x_val}, [], ["output:0"], rtol=1e-05, atol=1e-06)
if __name__ == '__main__':
unittest_main()
| 32,175 | 39.320802 | 119 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/test_api.py | # SPDX-License-Identifier: Apache-2.0
"""Unit tests using onnx backends."""
# pylint: disable=missing-docstring,unused-import
import os
import zipfile
import numpy as np
import tensorflow as tf
from onnx import helper
from common import check_tf_min_version, unittest_main, requires_custom_ops, check_opset_min_version, skip_tf_versions
from tf2onnx.tf_loader import is_tf2
from backend_test_base import Tf2OnnxBackendTestBase
import tf2onnx
class ApiTests(Tf2OnnxBackendTestBase):
"""Test tf2onnx python API."""
def setUp(self):
super().setUp()
os.makedirs(self.test_data_directory, exist_ok=True)
def create_model(self):
strides = 2
inp = tf.keras.Input(shape=[None, None, 3], name="input")
n = tf.keras.Input(shape=(), name="n")
conv = tf.keras.layers.Conv2D(
filters=64, kernel_size=7, strides=strides, name="conv1")(inp)
conv = tf.keras.layers.BatchNormalization()(conv)
conv = conv * n
model = tf.keras.Model(inputs=[inp, n], outputs=conv,
name="test_model", trainable=True)
optimizer = tf.keras.optimizers.Adam(1e-4, 1e-8)
model.compile(optimizer=optimizer, loss="mean_squared_error")
return model
def _test_keras_api(self, large_model=False):
model = self.create_model()
shape = [1, 224, 224, 3]
x = np.arange(np.prod(shape)).reshape(shape).astype(np.float32)
n = np.array([2.], dtype=np.float32)
ky = model.predict([x, n])
spec = (tf.TensorSpec((None, 224, 224, 3), tf.float32, name="input"),
tf.TensorSpec((), tf.float32, name="n"))
if large_model:
output_path = os.path.join(self.test_data_directory, "model.zip")
else:
output_path = os.path.join(self.test_data_directory, "model.onnx")
model_proto, _ = tf2onnx.convert.from_keras(
model, input_signature=spec, opset=self.config.opset, large_model=large_model, output_path=output_path)
# input_names = [n.name for n in model_proto.graph.input]
output_names = [n.name for n in model_proto.graph.output]
if large_model:
# need to unpack the zip for run_onnxruntime()
with zipfile.ZipFile(output_path, 'r') as z:
z.extractall(os.path.dirname(output_path))
output_path = os.path.join(os.path.dirname(output_path), "__MODEL_PROTO.onnx")
oy = self.run_onnxruntime(output_path, {"input": x, "n": n}, output_names)
self.assertAllClose(ky, oy[0], rtol=0.3, atol=0.1)
# make sure the original keras model wasn't trashed
ky1 = model.predict([x, n])
self.assertAllClose(ky1, oy[0], rtol=0.3, atol=0.1)
@check_tf_min_version("1.15")
def test_keras_api(self):
self._test_keras_api(large_model=False)
@check_tf_min_version("1.15")
@skip_tf_versions(["2.0", "2.1"], "TF 2 requires 2.2 for large model freezing")
def test_keras_api_large(self):
self._test_keras_api(large_model=True)
@requires_custom_ops()
@check_tf_min_version("1.15")
@check_opset_min_version(11, "SparseToDense")
@skip_tf_versions(["2.1"], "TF 2.1 keras model doesn't work; table not initialized")
def test_keras_hashtable(self):
feature_cols = [
tf.feature_column.numeric_column("f_inp", dtype=tf.float32),
tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list("s_inp", ["a", "b", "z"], num_oov_buckets=1)
)
]
feature_layer = tf.keras.layers.DenseFeatures(feature_cols)
input_dict = {}
input_dict["f_inp"] = tf.keras.Input(name="f_inp", shape=(1,), dtype=tf.float32)
input_dict["s_inp"] = tf.keras.Input(name="s_inp", shape=(1,), dtype=tf.string)
inputs = list(input_dict.values())
standard_features = feature_layer(input_dict)
hidden1 = tf.keras.layers.Dense(512, activation='relu')(standard_features)
output = tf.keras.layers.Dense(10, activation='softmax')(hidden1)
model = tf.keras.Model(inputs=inputs, outputs=output)
model.compile(optimizer='adam', loss=tf.keras.losses.mean_squared_error)
inp1 = np.array([[2.], [3.]], dtype=np.float32)
inp2 = np.array([["a"], ["b"]], dtype=str)
if not is_tf2():
tf.keras.backend.get_session().run(tf.tables_initializer(name='init_all_tables'))
k_res = model.predict([inp1, inp2])
spec = (tf.TensorSpec((None, 1), dtype=tf.float32, name="f_inp"),
tf.TensorSpec((None, 1), tf.string, name="s_inp"))
output_path = os.path.join(self.test_data_directory, "model.onnx")
model_proto, _ = tf2onnx.convert.from_keras(
model, input_signature=spec, opset=self.config.opset, output_path=output_path,
extra_opset=[helper.make_opsetid("ai.onnx.contrib", 1)])
output_names = [n.name for n in model_proto.graph.output]
o_res = self.run_onnxruntime(output_path, {"f_inp": inp1, "s_inp": inp2}, output_names, use_custom_ops=True)
self.assertAllClose(k_res, o_res[0], rtol=0.3, atol=0.1)
# make sure the original keras model wasn't trashed
k_res2 = model.predict([inp1, inp2])
self.assertAllClose(k_res2, o_res[0], rtol=0.3, atol=0.1)
@check_tf_min_version("2.0")
def test_function(self):
def func(x, y):
return x * y
output_path = os.path.join(self.test_data_directory, "model.onnx")
shape = [1, 10, 10]
x = np.arange(np.prod(shape)).reshape(shape).astype(np.float32)
y = np.arange(np.prod(shape)).reshape(shape).astype(np.float32)
spec = (tf.TensorSpec((None, 10, 10), tf.float32, name="x"),
tf.TensorSpec((None, 10, 10), tf.float32, name="y"))
concrete_func = tf.function(func, input_signature=spec)
ky = func(x, y)
model_proto, _ = tf2onnx.convert.from_function(concrete_func, input_signature=spec,
opset=self.config.opset, output_path=output_path)
output_names = [n.name for n in model_proto.graph.output]
oy = self.run_onnxruntime(output_path, {"x": x, "y": y}, output_names)
self.assertAllClose(ky, oy[0], rtol=0.3, atol=0.1)
@check_tf_min_version("2.0")
def test_function_non_tensor_inputs(self):
class Foo:
a = 42
@tf.function
def func(foo, a, x, b, w):
if a:
return x + foo.a + b / w
return x + b
output_path = os.path.join(self.test_data_directory, "model.onnx")
x = np.arange(20).reshape([2, 10]).astype(np.float32)
w = np.arange(10).reshape([10]).astype(np.float32)
res_tf = func(Foo(), True, x, 123, w)
spec = (
Foo(),
True,
tf.TensorSpec((2, None), tf.float32, name="x"),
123,
tf.TensorSpec((None), tf.float32, name="w")
)
model_proto, _ = tf2onnx.convert.from_function(func, input_signature=spec,
opset=self.config.opset, output_path=output_path)
output_names = [n.name for n in model_proto.graph.output]
res_onnx = self.run_onnxruntime(output_path, {"x": x, "w": w}, output_names)
self.assertAllClose(res_tf, res_onnx[0], rtol=1e-5, atol=1e-5)
@check_tf_min_version("2.0")
def test_function_nparray(self):
@tf.function
def func(x):
return tf.math.sqrt(x)
output_path = os.path.join(self.test_data_directory, "model.onnx")
x = np.asarray([1.0, 2.0])
res_tf = func(x)
spec = np.asarray([[1.0, 2.0]])
model_proto, _ = tf2onnx.convert.from_function(func, input_signature=spec,
opset=self.config.opset, output_path=output_path)
output_names = [n.name for n in model_proto.graph.output]
res_onnx = self.run_onnxruntime(output_path, {'x': x}, output_names)
self.assertAllClose(res_tf, res_onnx[0], rtol=1e-5, atol=1e-5)
@check_tf_min_version("1.15")
def _test_graphdef(self):
def func(x, y):
return x * y
output_path = os.path.join(self.test_data_directory, "model.onnx")
shape = [1, 10, 10]
x = np.arange(np.prod(shape)).reshape(shape).astype(np.float32)
y = np.arange(np.prod(shape)).reshape(shape).astype(np.float32)
ky = func(x, y)
# make a graphdef
spec = (tf.TensorSpec((None, 10, 10), tf.float32, name="x"),
tf.TensorSpec((None, 10, 10), tf.float32, name="y"))
function = tf.function(func, input_signature=spec)
concrete_func = function.get_concrete_function(*spec)
graph_def = concrete_func.graph.as_graph_def(add_shapes=True)
model_proto, _ = tf2onnx.convert.from_graph(graph_def, input_names=["x:0", "y:0"], output_names=["Identity:0"],
opset=self.config.opset, output_path=output_path)
output_names = [n.name for n in model_proto.graph.output]
oy = self.run_onnxruntime(output_path, {"x:0": x, "y:0": y}, output_names)
self.assertAllClose(ky, oy[0], rtol=0.3, atol=0.1)
@check_tf_min_version("1.15")
def test_graphdef(self):
output_path = os.path.join(self.test_data_directory, "model.onnx")
graph_def, _, _ = tf2onnx.tf_loader.from_graphdef(
"tests/models/regression/graphdef/frozen.pb", ["X:0"], ["pred:0"])
x = np.array([5.], dtype=np.float32)
tensors_to_rename = {"pred:0": "pred", "X:0": "X"}
model_proto, _ = tf2onnx.convert.from_graph_def(graph_def, input_names=["X:0"], output_names=["pred:0"],
opset=self.config.opset, output_path=output_path,
tensors_to_rename=tensors_to_rename)
output_names = [n.name for n in model_proto.graph.output]
oy = self.run_onnxruntime(output_path, {"X": x}, output_names)
self.assertTrue(output_names[0] == "pred")
self.assertAllClose([2.1193342], oy[0], rtol=0.1, atol=0.1)
@check_tf_min_version("2.0")
def test_tflite(self):
output_path = os.path.join(self.test_data_directory, "model.onnx")
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
model_proto, _ = tf2onnx.convert.from_tflite("tests/models/regression/tflite/test_api_model.tflite",
input_names=["input"], output_names=["output"],
output_path=output_path)
actual_output_names = [n.name for n in model_proto.graph.output]
oy = self.run_onnxruntime(output_path, {"input": x_val}, actual_output_names)
self.assertTrue(actual_output_names[0] == "output")
exp_result = tf.add(x_val, x_val)
self.assertAllClose(exp_result, oy[0], rtol=0.1, atol=0.1)
@check_tf_min_version("2.0")
def test_tflite_without_input_output_names(self):
output_path = os.path.join(self.test_data_directory, "model.onnx")
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
model_proto, _ = tf2onnx.convert.from_tflite("tests/models/regression/tflite/test_api_model.tflite",
output_path=output_path)
actual_input_names = [n.name for n in model_proto.graph.input]
actual_output_names = [n.name for n in model_proto.graph.output]
oy = self.run_onnxruntime(output_path, {actual_input_names[0]: x_val}, output_names=None)
self.assertTrue(actual_output_names[0] == "output")
exp_result = tf.add(x_val, x_val)
self.assertAllClose(exp_result, oy[0], rtol=0.1, atol=0.1)
if __name__ == '__main__':
unittest_main()
| 12,073 | 44.390977 | 119 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/test_example.py | # SPDX-License-Identifier: Apache-2.0
"""Test examples."""
import os
import subprocess
import unittest
from common import check_opset_min_version, check_opset_max_version, check_tf_min_version
class TestExample(unittest.TestCase):
"""test examples"""
def run_example(self, name, expected=None):
"Executes one example."
full = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"..", "examples", name)
if not os.path.exists(full):
raise FileNotFoundError(full)
proc = subprocess.run(['python', full],
capture_output=True, check=True)
self.assertEqual(0, proc.returncode)
out = proc.stdout.decode('ascii')
if 'tensorflow_hub not installed' in out:
return
err = proc.stderr.decode('ascii')
self.assertTrue(err is not None)
if expected is not None:
for exp in expected:
self.assertIn(exp, out)
@check_tf_min_version("2.3", "use tf.keras")
@check_opset_min_version(12)
@check_opset_max_version(13)
def test_end2end_tfkeras(self):
self.run_example(
"end2end_tfkeras.py",
expected=["ONNX model is saved at simple_rnn.onnx",
"Optimizing ONNX model",
"Using opset <onnx, 12>"])
@check_tf_min_version("2.3", "use tf.keras")
@check_opset_min_version(12)
@check_opset_max_version(13)
def test_end2end_tfhub(self):
self.run_example(
"end2end_tfhub.py",
expected=["ONNX model is saved at efficientnetb0clas.onnx",
"Optimizing ONNX model",
"Using opset <onnx, 12>"])
@check_tf_min_version("2.3", "use tf.keras")
@check_opset_min_version(13)
@check_opset_max_version(13)
def test_getting_started(self):
self.run_example(
"getting_started.py",
expected=["Conversion succeeded"])
if __name__ == '__main__':
unittest.main()
| 2,053 | 30.6 | 89 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/huggingface.py | # SPDX-License-Identifier: Apache-2.0
"""
Unit tests for huggingface tensorflow transformers.
tested with tf-2.4.1, transformers-4.5.1
"""
# pylint: disable=missing-docstring,invalid-name,unused-argument
# pylint: disable=bad-classmethod-argument,wrong-import-position
# pylint: disable=import-outside-toplevel
import os
import time
import unittest
import zipfile
os.environ["CUDA_VISIBLE_DEVICES"] = ""
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import numpy as np
import onnxruntime as rt
import tensorflow as tf
import tf2onnx
compare_perf = True
time_to_run = 10
time_step = 10
class TestTransformers(unittest.TestCase):
def setUp(self):
tf.compat.v1.reset_default_graph()
@classmethod
def assertAllClose(cls, expected, actual, **kwargs):
np.testing.assert_allclose(expected, actual, **kwargs)
def run_onnxruntime(self, model_path, input_dict, output_names):
"""Run test against onnxruntime backend."""
providers = ['CPUExecutionProvider']
if rt.get_device() == "GPU":
gpus = os.environ.get("CUDA_VISIBLE_DEVICES")
if gpus is None or len(gpus) > 1:
providers = ['CUDAExecutionProvider']
opt = rt.SessionOptions()
# in case of issues with the runtime, one can enable more logging
# opt.log_severity_level = 0
# opt.log_verbosity_level = 255
# opt.enable_profiling = True
m = rt.InferenceSession(model_path, sess_options=opt, providers=providers)
results = m.run(output_names, input_dict)
if compare_perf:
n = 0
time_start = time.time()
time_stop = time_start + time_to_run
while time.time() < time_stop:
for _ in range(time_step):
_ = m.run(output_names, input_dict)
n += time_step
time_end = time.time()
val = (time_end - time_start) / n
print(f'= avg ort name={self.name}, time={val}, n={n}')
return results
def run_keras(self, model, inputs):
pred = model(inputs)
if compare_perf:
n = 0
time_start = time.time()
time_stop = time_start + time_to_run
while time.time() < time_stop:
for _ in range(time_step):
_ = model(inputs)
n += time_step
time_stop = time.time()
val = (time_stop - time_start) / n
print(f'= avg keras name={self.name}, time={val}, n={n}')
return pred
def run_test(self, model, input_dict, rtol=1e-2, atol=1e-4, input_signature=None,
outputs=None, large=True, extra_input=None):
# always use external model format for consistency
large = True
self.name = self._testMethodName.replace("test_", "")
print(f"==== {self.name}")
dst = os.path.join("/tmp", "test_transformers", self.name)
os.makedirs(dst, exist_ok=True)
# run keras model
print("= running keras")
tf_results = self.run_keras(model, input_dict)
if not outputs:
# no outputs given ... take all
outputs = list(tf_results.keys())
# filter outputs
tf_results = [v.numpy() for k, v in tf_results.items() if k in outputs]
# input tensors to numpy
input_dict = {k: v.numpy() for k, v in input_dict.items()}
model_path = os.path.join(dst, self.name)
if not large:
model_path = model_path + ".onnx"
print("= convert")
time_start = time.time()
_, _ = tf2onnx.convert.from_keras(model, input_signature=input_signature,
opset=13, large_model=large, output_path=model_path)
time_stop = time.time()
print(f"= convertsion took {time_stop - time_start}")
if large:
# need to unpack the zip for run_onnxruntime()
with zipfile.ZipFile(model_path, 'r') as z:
z.extractall(os.path.dirname(model_path))
model_path = os.path.join(os.path.dirname(model_path), "__MODEL_PROTO.onnx")
print("= running ort")
if extra_input:
input_dict.update(extra_input)
onnx_results = self.run_onnxruntime(model_path, input_dict, outputs)
self.assertAllClose(tf_results, onnx_results, rtol=rtol, atol=atol)
def spec_and_pad(self, input_dict, max_length=None, batchdim=None):
spec = []
new_dict = {}
for k, v in input_dict.items():
shape = v.shape
if len(shape) == 2:
if not max_length:
shape = [batchdim, None]
else:
shape = [batchdim, max_length]
spec.append(tf.TensorSpec(shape, dtype=v.dtype, name=k))
if max_length:
l = len(v[0])
v = tf.pad(v, [[0, 0], [0, max_length-l]])
new_dict[k] = v
return tuple(spec), new_dict
# BERT
def test_TFBertModel(self):
from transformers import BertTokenizer, TFBertForQuestionAnswering
tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
model = TFBertForQuestionAnswering.from_pretrained('bert-base-cased')
question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
input_dict = tokenizer(question, text, return_tensors='tf')
spec, input_dict = self.spec_and_pad(input_dict)
self.run_test(model, input_dict, input_signature=spec)
def test_TFBertFineTunedSquadModel(self):
from transformers import BertTokenizer, TFBertForQuestionAnswering
name = "bert-large-uncased-whole-word-masking-finetuned-squad"
tokenizer = BertTokenizer.from_pretrained(name)
model = TFBertForQuestionAnswering.from_pretrained(name)
question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
input_dict = tokenizer(question, text, return_tensors='tf')
spec, input_dict = self.spec_and_pad(input_dict)
self.run_test(model, input_dict, input_signature=spec)
def test_TFDisillBertModel(self):
from transformers import DistilBertTokenizer, TFDistilBertForQuestionAnswering
name = 'distilbert-base-uncased-distilled-squad'
tokenizer = DistilBertTokenizer.from_pretrained(name)
model = TFDistilBertForQuestionAnswering.from_pretrained(name)
question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
input_dict = tokenizer(question, text, return_tensors='tf')
spec, input_dict = self.spec_and_pad(input_dict)
outputs = ["start_logits", "end_logits"]
self.run_test(model, input_dict, input_signature=spec, outputs=outputs, rtol=1e-5)
## FUNNEL
def _test_TFFunnel(self, size, large=False):
from transformers import FunnelTokenizer, TFFunnelForQuestionAnswering
tokenizer = FunnelTokenizer.from_pretrained(size)
model = TFFunnelForQuestionAnswering.from_pretrained(size)
question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
input_dict = tokenizer(question, text, return_tensors='tf')
spec, input_dict = self.spec_and_pad(input_dict, 128)
outputs = ["start_logits", "end_logits"]
self.run_test(model, input_dict, input_signature=spec, outputs=outputs, rtol=1e-5)
def test_TFFunnelSmall(self):
self._test_TFFunnel("funnel-transformer/small")
def test_TFFunnelSmallBase(self):
self._test_TFFunnel("funnel-transformer/small-base")
def test_TFFunnelMedium(self):
self._test_TFFunnel("funnel-transformer/medium")
def test_TFFunnelMediumBase(self):
self._test_TFFunnel("funnel-transformer/medium-base")
def test_TFFunnelIntermediate(self):
self._test_TFFunnel("funnel-transformer/intermediate")
def test_TFFunnelIntermediateBase(self):
self._test_TFFunnel("funnel-transformer/intermediate-base")
def test_TFFunnelLarge(self):
self._test_TFFunnel("funnel-transformer/large")
def test_TFFunnelLargeBase(self):
self._test_TFFunnel("funnel-transformer/large-base")
def test_TFFunnelXLarge(self):
self._test_TFFunnel("funnel-transformer/xlarge")
def test_TFFunnelXLargeBase(self):
self._test_TFFunnel("funnel-transformer/xlarge-base")
## T5
def _test_TFT5Model(self, size, large=False):
from transformers import T5Tokenizer, TFT5Model
tokenizer = T5Tokenizer.from_pretrained(size)
model = TFT5Model.from_pretrained(size)
input_ids = \
tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="tf").input_ids
decoder_input_ids = \
tokenizer("Studies show that", return_tensors="tf").input_ids
input_dict = {"input_ids": input_ids, "decoder_input_ids": decoder_input_ids}
spec, input_dict = self.spec_and_pad(input_dict)
outputs = ["last_hidden_state"]
self.run_test(model, input_dict,
input_signature=spec, outputs=outputs, large=large)
def test_TFT5ModelSmall(self):
self._test_TFT5Model("t5-small")
def test_TFT5ModelBase(self):
self._test_TFT5Model("t5-base")
def test_TFT5ModelLarge(self):
self._test_TFT5Model("t5-large", large=True)
def test_TFT5Model3B(self):
self._test_TFT5Model("t5-3b", large=True)
def test_TFT5Model11B(self):
self._test_TFT5Model("t5-11b", large=True)
## Albert
def _test_TFAlbert(self, size, large=False):
from transformers import AlbertTokenizer, TFAlbertModel
tokenizer = AlbertTokenizer.from_pretrained(size)
model = TFAlbertModel.from_pretrained(size)
input_dict = tokenizer("Hello, my dog is cute", return_tensors="tf")
spec, input_dict = self.spec_and_pad(input_dict)
outputs = ["last_hidden_state"]
self.run_test(model, input_dict, input_signature=spec, outputs=outputs, large=large)
def test_TFAlbertBaseV1(self):
self._test_TFAlbert("albert-base-v1", large=True)
def test_TFAlbertLargeV1(self):
self._test_TFAlbert("albert-large-v1", large=True)
def test_TFAlbertXLargeV1(self):
self._test_TFAlbert("albert-xlarge-v1", large=True)
def test_TFAlbertXXLargeV1(self):
self._test_TFAlbert("albert-xxlarge-v1", large=True)
def test_TFAlbertBaseV2(self):
self._test_TFAlbert("albert-base-v2")
def test_TFAlbertLargeV2(self):
self._test_TFAlbert("albert-large-v2", large=True)
def test_TFAlbertXLargeV2(self):
self._test_TFAlbert("albert-xlarge-v2", large=True)
def test_TFAlbertXXLargeV2(self):
self._test_TFAlbert("albert-xxlarge-v2", large=True)
# CTRL
def test_TFCTRL(self):
from transformers import CTRLTokenizer, TFCTRLModel
tokenizer = CTRLTokenizer.from_pretrained('ctrl')
model = TFCTRLModel.from_pretrained('ctrl')
input_dict = tokenizer("Hello, my dog is cute", return_tensors="tf")
spec, input_dict = self.spec_and_pad(input_dict)
outputs = ["last_hidden_state"]
self.run_test(model, input_dict, input_signature=spec, outputs=outputs, large=True)
def _test_TFGpt2(self, size, large=False):
from transformers import GPT2Tokenizer, TFGPT2Model
tokenizer = GPT2Tokenizer.from_pretrained(size)
model = TFGPT2Model.from_pretrained(size)
input_dict = tokenizer("Hello, my dog is cute", return_tensors="tf")
spec, input_dict = self.spec_and_pad(input_dict)
outputs = ["last_hidden_state"]
self.run_test(model, input_dict, input_signature=spec, outputs=outputs, large=large)
# GPT2
def test_TFDistilGpt2(self):
self._test_TFGpt2("distilgpt2")
def test_TFGpt2(self):
self._test_TFGpt2("gpt2")
def test_TFGpt2Large(self):
self._test_TFGpt2("gpt2-large", large=True)
def test_TFGpt2XLarge(self):
self._test_TFGpt2("gpt2-xl", large=True)
def test_TFDialoGPT(self):
self._test_TFGpt2("microsoft/DialoGPT-large", large=True)
def test_TFDialoGPTSmall(self):
self._test_TFGpt2("microsoft/DialoGPT-small", large=True)
# LONGFORMER
def _test_TFLongformer(self, size, large=False):
from transformers import LongformerTokenizer, TFLongformerModel
tokenizer = LongformerTokenizer.from_pretrained(size)
model = TFLongformerModel.from_pretrained(size)
input_dict = tokenizer("Hello, my dog is cute", return_tensors="tf")
spec, input_dict = self.spec_and_pad(input_dict, max_length=512)
outputs = ["last_hidden_state"]
self.run_test(model, input_dict, input_signature=spec, outputs=outputs, large=large)
def test_TFLongformerBase(self):
# fails since transformers-2.4.2?
#
# transformers/models/longformer/modeling_tf_longformer.py", line 1839, in _pad_to_window_size
# if tf.math.greater(padding_len, 0)
# OperatorNotAllowedInGraphError: using a `tf.Tensor` as a Python `bool` is not allowed
#
self._test_TFLongformer("allenai/longformer-base-4096", large=True)
def test_TFLongformerLarge(self):
self._test_TFLongformer("allenai/longformer-large-4096", large=True)
# PEGASUS
def _test_TFPegasus(self, size, large=False):
from transformers import PegasusTokenizer, TFPegasusModel
tokenizer = PegasusTokenizer.from_pretrained(size)
model = TFPegasusModel.from_pretrained(size)
input_ids = \
tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="tf").input_ids
decoder_input_ids = \
tokenizer("Studies show that", return_tensors="tf").input_ids
input_dict = {"input_ids": input_ids, "decoder_input_ids": decoder_input_ids}
# this comes from TFPegasusEncoder/Decoder like:
# self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
# while this is mean to come from config tf tells us that those are model inputs
# this might be new in tensformers-2.4.2, we did not notice before that
extra_input = {"tf_pegasus_model/model/decoder/mul/y:0": np.array([32.], dtype=np.float32),
"tf_pegasus_model/model/encoder/mul/y:0": np.array([32.], dtype=np.float32)}
spec, input_dict = self.spec_and_pad(input_dict, max_length=model.config.max_length)
outputs = ["last_hidden_state"]
self.run_test(model, input_dict, input_signature=spec, outputs=outputs, large=large, extra_input=extra_input)
def test_TFPegasus(self):
self._test_TFPegasus("google/pegasus-xsum", large=True)
# XLM
def _test_TFXLM(self, size, large=False):
from transformers import TFXLMModel, XLMTokenizer
tokenizer = XLMTokenizer.from_pretrained(size)
model = TFXLMModel.from_pretrained(size)
input_dict = tokenizer("Hello, my dog is cute", return_tensors="tf")
spec, input_dict = self.spec_and_pad(input_dict)
outputs = ["last_hidden_state"]
self.run_test(model, input_dict, input_signature=spec, outputs=outputs, large=large, atol=0.005)
def test_TFXLM(self):
self._test_TFXLM("xlm-mlm-en-2048", large=True)
def test_TFXLM_ENDE(self):
self._test_TFXLM("xlm-mlm-ende-1024", large=True)
def test_TFXLM_CLMENDE(self):
self._test_TFXLM("xlm-clm-ende-1024", large=True)
# BART
def _test_TFBart(self, size, large=False):
from transformers import BartTokenizer, TFBartModel
tokenizer = BartTokenizer.from_pretrained(size)
model = TFBartModel.from_pretrained(size)
input_dict = tokenizer("Hello, my dog is cute", return_tensors="tf")
spec, input_dict = self.spec_and_pad(input_dict, max_length=128)
outputs = ["last_hidden_state"]
self.run_test(model, input_dict, input_signature=spec, outputs=outputs, large=large)
def test_TFBartBase(self):
self._test_TFBart("facebook/bart-base", large=True)
def test_TFBartLarge(self):
self._test_TFBart("facebook/bart-large", large=True)
def test_TFBartLargeCnn(self):
self._test_TFBart("facebook/bart-large-cnn", large=True)
# ELECTRA
def _test_Electra(self, size, large=False):
from transformers import ElectraTokenizer, TFElectraModel
tokenizer = ElectraTokenizer.from_pretrained(size)
model = TFElectraModel.from_pretrained(size)
input_dict = tokenizer("Hello, my dog is cute", return_tensors="tf")
spec, input_dict = self.spec_and_pad(input_dict)
outputs = ["last_hidden_state"]
self.run_test(model, input_dict, input_signature=spec, outputs=outputs, large=large)
def test_TFElectraSmall(self):
self._test_Electra("google/electra-small-discriminator", large=True)
def _test_ElectraForPreTraining(self, size, large=False):
from transformers import ElectraTokenizer, TFElectraForPreTraining
tokenizer = ElectraTokenizer.from_pretrained(size)
model = TFElectraForPreTraining.from_pretrained(size)
input_dict = tokenizer("Hello, my dog is cute", return_tensors="tf")
spec, input_dict = self.spec_and_pad(input_dict)
outputs = ["logits"]
self.run_test(model, input_dict, input_signature=spec, outputs=outputs, large=large)
def test_TFElectraForPreTrainingSmall(self):
self._test_ElectraForPreTraining("google/electra-small-discriminator", large=True)
def _test_ElectraForMaskedLM(self, size, large=False):
from transformers import ElectraTokenizer, TFElectraForMaskedLM
tokenizer = ElectraTokenizer.from_pretrained(size)
model = TFElectraForMaskedLM.from_pretrained(size)
input_dict = tokenizer("The capital of France is [MASK].", return_tensors="tf")
input_dict["labels"] = tokenizer("The capital of France is Paris.", return_tensors="tf")["input_ids"]
spec, input_dict = self.spec_and_pad(input_dict)
outputs = ["logits"]
self.run_test(model, input_dict, input_signature=spec, outputs=outputs, large=large)
def test_TFElectraForMaskedLMSmall(self):
self._test_ElectraForMaskedLM("google/electra-small-discriminator", large=True)
def _test_ElectraForSequenceClassification(self, size, large=False):
from transformers import ElectraTokenizer, TFElectraForSequenceClassification
tokenizer = ElectraTokenizer.from_pretrained(size)
model = TFElectraForSequenceClassification.from_pretrained(size)
input_dict = tokenizer("Hello, my dog is cute", return_tensors="tf")
input_dict["labels"] = tf.reshape(tf.constant(1), (-1, 1)) # Batch size 1
spec, input_dict = self.spec_and_pad(input_dict)
outputs = ["logits"]
self.run_test(model, input_dict, input_signature=spec, outputs=outputs, large=large)
def test_TFElectraForSequenceClassificationSmall(self):
self._test_ElectraForSequenceClassification("google/electra-small-discriminator", large=True)
def _test_ElectraForTokenClassification(self, size, large=False):
from transformers import ElectraTokenizer, TFElectraForTokenClassification
tokenizer = ElectraTokenizer.from_pretrained(size)
model = TFElectraForTokenClassification.from_pretrained(size)
input_dict = tokenizer("Hello, my dog is cute", return_tensors="tf")
# input_ids = input_dict["input_ids"]
# input_dict["labels"] = tf.reshape(tf.constant([1] * tf.size(input_ids).numpy()), (-1, tf.size(input_ids)))
spec, input_dict = self.spec_and_pad(input_dict, max_length=128)
outputs = ["logits"]
self.run_test(model, input_dict, input_signature=spec, outputs=outputs, large=large)
def test_TFElectraForTokenClassificationSmall(self):
self._test_ElectraForTokenClassification("google/electra-small-discriminator", large=True)
def _test_ElectraForQuestionAnswering(self, size, large=False):
from transformers import ElectraTokenizer, TFElectraForQuestionAnswering
tokenizer = ElectraTokenizer.from_pretrained(size)
model = TFElectraForQuestionAnswering.from_pretrained(size)
question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
input_dict = tokenizer(question, text, return_tensors='tf')
spec, input_dict = self.spec_and_pad(input_dict, max_length=128)
outputs = ["start_logits", "end_logits"]
self.run_test(model, input_dict, input_signature=spec, outputs=outputs, large=large)
def test_TFElectraForQuestionAnsweringSmall(self):
self._test_ElectraForQuestionAnswering("google/electra-small-discriminator", large=True)
# XLNET
def _test_TFXLNET(self, size, large=False):
from transformers import XLNetTokenizer, TFXLNetModel
tokenizer = XLNetTokenizer.from_pretrained(size)
model = TFXLNetModel.from_pretrained(size)
input_dict = tokenizer("Hello, my dog is cute", return_tensors="tf")
spec, input_dict = self.spec_and_pad(input_dict)
outputs = ["last_hidden_state"]
self.run_test(model, input_dict, input_signature=spec, outputs=outputs, large=large)
def test_TFXLNETBase(self):
self._test_TFXLNET("xlnet-base-cased", large=True)
def test_TFXLNETLarge(self):
self._test_TFXLNET("xlnet-large-cased", large=True)
# Roberta
def _test_TFRoberta(self, size, large=False):
from transformers import RobertaTokenizer, TFRobertaModel
tokenizer = RobertaTokenizer.from_pretrained(size)
model = TFRobertaModel.from_pretrained(size)
input_dict = tokenizer("Hello, my dog is cute", return_tensors="tf")
spec, input_dict = self.spec_and_pad(input_dict)
outputs = ["last_hidden_state"]
self.run_test(model, input_dict, input_signature=spec, outputs=outputs, large=large)
def test_TFRobertaBase(self):
self._test_TFRoberta("roberta-base", large=True)
def test_TFDistilRobertaBase(self):
self._test_TFRoberta("distilroberta-base", large=True)
# LayoutLM
def _test_TFLayoutLM(self, size, large=False):
from transformers import LayoutLMTokenizer, TFLayoutLMModel
tokenizer = LayoutLMTokenizer.from_pretrained(size)
model = TFLayoutLMModel.from_pretrained(size)
words = ["Hello", "world"]
normalized_word_boxes = [637, 773, 693, 782], [698, 773, 733, 782]
token_boxes = []
for word, box in zip(words, normalized_word_boxes):
word_tokens = tokenizer.tokenize(word)
token_boxes.extend([box] * len(word_tokens))
# add bounding boxes of cls + sep tokens
token_boxes = [[0, 0, 0, 0]] + token_boxes + [[1000, 1000, 1000, 1000]]
input_dict = tokenizer(' '.join(words), return_tensors="tf")
spec, input_dict = self.spec_and_pad(input_dict)
outputs = ["last_hidden_state"]
self.run_test(model, input_dict, input_signature=spec, outputs=outputs, large=large)
def test_TFLayoutLM(self):
self._test_TFLayoutLM("microsoft/layoutlm-base-uncased", large=True)
# MBart
def _test_TFMbart(self, size, large=False):
from transformers import MBartTokenizer, TFMBartModel
tokenizer = MBartTokenizer.from_pretrained(size)
model = TFMBartModel.from_pretrained(size)
input_dict = tokenizer("Hello, my dog is cute", return_tensors="tf")
spec, input_dict = self.spec_and_pad(input_dict, max_length=128)
outputs = ["last_hidden_state"]
self.run_test(model, input_dict, input_signature=spec, outputs=outputs, large=large, rtol=1.2)
def test_TFMBartLarge(self):
self._test_TFMbart("facebook/mbart-large-en-ro", large=True)
if __name__ == "__main__":
unittest.main()
| 24,026 | 41.45053 | 117 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/test_backend.py | # SPDX-License-Identifier: Apache-2.0
"""Unit tests using onnx backends."""
import os
import unittest
from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal
from packaging.version import Version
import tensorflow as tf
from tensorflow.python.ops import lookup_ops
from backend_test_base import Tf2OnnxBackendTestBase
# pylint reports unused-wildcard-import which is false positive, __all__ is defined in common
from common import * # pylint: disable=wildcard-import,unused-wildcard-import
from tf2onnx import constants, utils
from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher
from tf2onnx.tf_loader import is_tf2, tf_placeholder_with_default, tf_placeholder
from tf2onnx.onnx_opset.signal import make_dft_constant
# pylint: disable=missing-docstring,invalid-name,unused-argument,function-redefined,cell-var-from-loop
NCHW_TO_NHWC = [0, 2, 3, 1]
NHWC_TO_NCHW = [0, 3, 1, 2]
HWCN_TO_NCHW = [3, 2, 0, 1]
_STRIDE1x1 = [1, 1, 1, 1]
_KERNEL3x3 = [3, 3, 1, 1]
_DILATIONS1x1 = [1, 1, 1, 1]
# names for input and outputs for tests
_TFINPUT = "input"
_INPUT = "input:0"
_TFINPUT1 = "input1"
_INPUT1 = "input1:0"
_TFINPUT2 = "input2"
_INPUT2 = "input2:0"
_TFINPUT3 = "input3"
_INPUT3 = "input3:0"
_TFOUTPUT = "output"
_OUTPUT = "output:0"
_TFOUTPUT1 = "output1"
_OUTPUT1 = "output1:0"
_TFOUTPUT2 = "output2"
_OUTPUT2 = "output2:0"
_TFOUTPUT3 = "output3"
_OUTPUT3 = "output3:0"
if is_tf2():
conv2d_backprop_input = tf.compat.v1.nn.conv2d_backprop_input
conv3d_transpose = tf.compat.v1.nn.conv3d_transpose
multinomial = tf.compat.v1.random.multinomial
space_to_batch_nd = tf.compat.v1.space_to_batch_nd
batch_to_space_nd = tf.compat.v1.batch_to_space_nd
reverse_v2 = tf.compat.v1.reverse_v2
random_normal = tf.compat.v1.random_normal
random_uniform = tf.compat.v1.random_uniform
fused_batch_norm = tf.compat.v1.nn.fused_batch_norm
dropout = tf.compat.v1.nn.dropout
resize_nearest_neighbor = tf.compat.v1.image.resize_nearest_neighbor
quantize_and_dequantize = tf.quantization.quantize_and_dequantize
resize_bilinear = tf.compat.v1.image.resize_bilinear
resize_bilinear_v2 = tf.compat.v2.image.resize
is_nan = tf.math.is_nan
is_inf = tf.math.is_inf
floormod = tf.math.floormod
matrix_diag_part = tf.compat.v1.matrix_diag_part
fake_quant_with_min_max_args = tf.quantization.fake_quant_with_min_max_args
fake_quant_with_min_max_vars = tf.quantization.fake_quant_with_min_max_vars
elif Version(tf.__version__) >= Version("1.13"):
conv2d_backprop_input = tf.compat.v1.nn.conv2d_backprop_input
conv3d_transpose = tf.compat.v1.nn.conv3d_transpose
multinomial = tf.compat.v1.random.multinomial
space_to_batch_nd = tf.compat.v1.space_to_batch_nd
batch_to_space_nd = tf.compat.v1.batch_to_space_nd
reverse_v2 = tf.compat.v1.reverse_v2
random_normal = tf.compat.v1.random_normal
random_uniform = tf.compat.v1.random_uniform
fused_batch_norm = tf.compat.v1.nn.fused_batch_norm
dropout = tf.compat.v1.nn.dropout
quantize_and_dequantize = tf.compat.v1.quantization.quantize_and_dequantize
resize_nearest_neighbor = tf.compat.v1.image.resize_nearest_neighbor
resize_bilinear = tf.compat.v1.image.resize_bilinear
if Version(tf.__version__) >= Version("1.14"):
resize_bilinear_v2 = tf.compat.v2.image.resize
is_nan = tf.math.is_nan
is_inf = tf.math.is_inf
floormod = tf.floormod
matrix_diag_part = tf.compat.v1.matrix_diag_part
fake_quant_with_min_max_args = tf.compat.v1.quantization.fake_quant_with_min_max_args
fake_quant_with_min_max_vars = tf.compat.v1.quantization.fake_quant_with_min_max_vars
else:
conv2d_backprop_input = tf.nn.conv2d_backprop_input
conv3d_transpose = tf.nn.conv3d_transpose
multinomial = tf.multinomial
space_to_batch_nd = tf.space_to_batch_nd
batch_to_space_nd = tf.batch_to_space_nd
reverse_v2 = tf.reverse_v2
random_normal = tf.random_normal
random_uniform = tf.random_uniform
fused_batch_norm = tf.nn.fused_batch_norm
dropout = tf.nn.dropout
resize_nearest_neighbor = tf.image.resize_nearest_neighbor
resize_bilinear = tf.image.resize_bilinear
is_nan = tf.is_nan
is_inf = tf.is_inf
floormod = tf.floormod
matrix_diag_part = tf.matrix_diag_part
def make_xval(shape):
x_val = np.arange(np.prod(shape)).astype("float32").reshape(shape)
return x_val
def get_conv_getdata(kind=1):
if kind == 0:
# generate all combinations (costly)
dims = [
("padding", ["SAME", "VALID"]),
("input_sizes", [[32, 35, 35, 3], [32, 17, 17, 3], [1, 28, 28, 3], [32, 8, 8, 3]]),
("filter_sizes", [[1, 3, 3, 1], [1, 2, 2, 1], [1, 5, 5, 1], [1, 1, 1, 1], [1, 5, 2, 1], [1, 2, 5, 1]]),
("strides", [[1, 2, 2, 1], [1, 1, 1, 1]]),
]
values = [key_values[1] for key_values in dims]
for idx, v in enumerate(product(*values)):
if True or idx == 30:
yield (idx,) + v
elif kind == 1:
# some combination to that give decent padding coverage
data = [
('SAME', [32, 35, 35, 3], [1, 3, 3, 1], [1, 2, 2, 1]),
('SAME', [32, 35, 35, 3], [1, 2, 2, 1], [1, 2, 2, 1]),
('SAME', [32, 35, 35, 3], [1, 1, 1, 1], [1, 1, 1, 1]),
('SAME', [32, 35, 35, 3], [1, 5, 2, 1], [1, 2, 2, 1]),
('SAME', [32, 35, 35, 3], [1, 2, 5, 1], [1, 2, 2, 1]),
('SAME', [32, 35, 35, 3], [1, 2, 5, 1], [1, 1, 1, 1]),
('SAME', [1, 28, 28, 3], [1, 3, 3, 1], [1, 2, 2, 1]),
('SAME', [1, 28, 28, 3], [1, 3, 3, 1], [1, 1, 1, 1]),
('SAME', [1, 28, 28, 3], [1, 2, 2, 1], [1, 2, 2, 1]),
('SAME', [1, 28, 28, 3], [1, 2, 2, 1], [1, 1, 1, 1]),
('SAME', [1, 28, 28, 3], [1, 5, 5, 1], [1, 2, 2, 1]),
('SAME', [1, 28, 28, 3], [1, 5, 5, 1], [1, 1, 1, 1]),
('SAME', [1, 28, 28, 3], [1, 5, 2, 1], [1, 2, 2, 1]),
('SAME', [32, 8, 8, 3], [1, 3, 3, 1], [1, 2, 2, 1]),
('SAME', [32, 8, 8, 3], [1, 3, 3, 1], [1, 1, 1, 1]),
('VALID', [32, 35, 35, 3], [1, 3, 3, 1], [1, 1, 1, 1]),
('VALID', [32, 35, 35, 3], [1, 2, 2, 1], [1, 2, 2, 1]),
]
for idx, v in enumerate(data):
yield (idx,) + v
else:
raise ValueError("kind not known")
def get_maxpoolwithargmax_getdata():
data = [
('SAME', [1, 3, 3, 2], [1, 3, 3, 1], [1, 2, 2, 1]),
('SAME', [2, 5, 5, 3], [1, 4, 4, 1], [1, 2, 2, 1]),
('SAME', [2, 10, 5, 1], [1, 2, 2, 1], [1, 2, 2, 1]),
('SAME', [2, 10, 5, 3], [1, 4, 4, 1], [1, 1, 1, 1]),
('VALID', [2, 3, 3, 3], [1, 3, 3, 1], [1, 2, 2, 1]),
('VALID', [2, 5, 5, 3], [1, 4, 4, 1], [1, 2, 2, 1]),
]
for idx, v in enumerate(data):
yield (idx,) + v
class BackendTests(Tf2OnnxBackendTestBase):
def _run_test_case(self, func, output_names_with_port, feed_dict, **kwargs):
kwargs["convert_var_to_const"] = False
return self.run_test_case(func, feed_dict, [], output_names_with_port, **kwargs)
def _test_expand_dims_known_rank(self, idx):
x_val = make_xval([3, 4])
def func(x):
op = tf.expand_dims(x, idx)
return tf.identity(op, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_expand_dims_known_rank(self):
for i in [-1, 0, 1, -2]:
self._test_expand_dims_known_rank(i)
def test_expand_dims_one_unknown_rank(self):
x_val = make_xval([3, 4])
def func(x):
op = tf.expand_dims(x, 0)
return tf.identity(op, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_expand_dims_with_list(self):
x_val = make_xval([3, 4])
def func(x):
op = tf.expand_dims(x, [[0]])
return tf.identity(op, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def _test_expand_dims_more_unknown_rank(self, idx):
x_val = make_xval([3, 4])
def func(x):
op = tf.expand_dims(x, idx)
return tf.identity(op, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_expand_dims_more_unknown_rank(self):
for i in [-1, 0, 1, -2]:
self._test_expand_dims_more_unknown_rank(i)
@check_opset_min_version(13, "Unsqueeze")
def test_expand_dims_nonconst_dims(self):
x_val = make_xval([3, 4])
y_val = np.array([-1], dtype=np.int32)
def func(x, y):
op = tf.expand_dims(x, y)
return tf.identity(op, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(9, "ConstantOfShape")
def test_layer_normalization(self):
x_val = make_xval([3, 4, 5])
scale_val = make_xval([3, 4, 5]) * 0.2
bias_val = make_xval([3, 4, 5]) * 0.1
def func(x):
mean = tf.reduce_mean(x, axis=[2], keepdims=True)
centered = tf.subtract(x, mean)
variance = tf.add(tf.reduce_mean(tf.square(centered), axis=[2], keepdims=True), 0.001)
inv_std_dev = tf.math.rsqrt(variance)
normalized = tf.multiply(centered, inv_std_dev)
scaled = tf.multiply(normalized, scale_val)
biased = tf.add(scaled, bias_val)
return tf.identity(biased, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-05,
graph_validator=lambda g: (check_op_count(g, "InstanceNormalization", 1)))
@check_opset_min_version(9, "ConstantOfShape")
def test_eye_non_const1(self):
# tf.eye(num_rows), num_rows is not const here
x_val = np.array(5, dtype=np.int32)
def func(x):
y = tf.eye(x, dtype=tf.int32)
y1 = tf.eye(x, dtype=tf.int64)
y2 = tf.eye(x, dtype=tf.float32)
return tf.identity(y, name=_TFOUTPUT), tf.identity(y1, name=_TFOUTPUT1), tf.identity(y2, name=_TFOUTPUT2)
self._run_test_case(func, [_OUTPUT, _OUTPUT1, _OUTPUT2], {_INPUT: x_val}, rtol=0)
# tf.eye(num_rows, num_columns), both num_rows and num_columns are not const here
x_val = np.array([5, 10], dtype=np.int32)
def func(x):
y = tf.eye(x[0], x[1], dtype=tf.int32)
y1 = tf.eye(x[0], x[1], dtype=tf.int64)
y2 = tf.eye(x[0], x[1], dtype=tf.float32)
return tf.identity(y, name=_TFOUTPUT), tf.identity(y1, name=_TFOUTPUT1), tf.identity(y2, name=_TFOUTPUT2)
self._run_test_case(func, [_OUTPUT, _OUTPUT1, _OUTPUT2], {_INPUT: x_val}, rtol=0)
@check_tf_min_version("1.11", "eye has bug when version is below 1.11")
@check_opset_min_version(9, "ConstantOfShape")
def test_eye_non_const2(self):
# tf.eye(num_rows), num_rows is not const here
for np_dtype in [np.int32, np.int64, np.float32, np.float64]:
x_val = np.array(5, dtype=np_dtype)
def func(x):
y = tf.eye(x, dtype=tf.int32)
y1 = tf.eye(x, dtype=tf.float32)
return tf.identity(y, name=_TFOUTPUT),\
tf.identity(y1, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: x_val}, rtol=0)
# tf.eye(num_rows, num_columns), both num_rows and num_columns are not const here
for np_dtype in [np.int32, np.int64, np.float32, np.float64]:
x_val = np.array([5, 10], dtype=np_dtype)
def func(x):
y = tf.eye(x[0], x[1], dtype=tf.int32)
y1 = tf.eye(x[0], x[1], dtype=tf.float32)
return tf.identity(y, name=_TFOUTPUT), \
tf.identity(y1, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: x_val}, rtol=0)
@check_opset_min_version(7, "trig")
def test_trig_ops(self):
for op in [tf.sin, tf.cos, tf.tan, tf.asin, tf.acos, tf.atan]:
x_val = make_xval([3, 4])
def func(x):
op_ = op(x)
return tf.identity(op_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-06)
@check_opset_min_version(9, "trigh")
def test_atrig_ops(self):
for op in [tf.sinh, tf.cosh, tf.atanh, tf.asinh, tf.acosh]:
x_val = make_xval([3, 4])
def func(x):
op_ = op(x)
return tf.identity(op_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend()
@check_opset_min_version(7, "multinomial")
def test_multinomial(self):
x_val = np.array([[10., 10.]], dtype=np.float32)
def func(x):
op = multinomial(tf.math.log(x), 5, output_dtype=tf.int32)
return tf.identity(op, name=_TFOUTPUT)
# since returned indexes are random we can only check type and shape
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, check_value=False,
check_shape=True, check_dtype=True)
@skip_caffe2_backend()
@check_opset_min_version(7, "multinomial")
def test_multinomial1(self):
shape = [2, 10]
x_val = np.ones(np.prod(shape)).astype("float32").reshape(shape)
def func(x):
op = multinomial(x, 2, output_dtype=tf.int32)
return tf.identity(op, name=_TFOUTPUT)
# since returned indexes are random we can only check type and shape
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, check_value=False,
check_shape=True, check_dtype=True)
def test_maxpool(self):
for p in get_conv_getdata():
_, padding, x_shape, ksize, strides = p
x_val = make_xval(x_shape)
def func(x):
mp = tf.nn.max_pool(x, ksize, strides, padding=padding)
return tf.identity(mp, name=_TFOUTPUT)
self.logger.debug(str(p))
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_tf_min_version("1.15", "required for max_pool args")
def test_maxpool_int(self):
x_shape = [8, 16, 16, 3]
x_val = make_xval(x_shape).astype("int32")
def func(x):
mp = tf.nn.max_pool(x, ksize=[2], strides=[1, 2, 2, 1], padding="SAME")
return tf.identity(mp, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_tf_cpu("only tf_gpu can run maxpool with NCHW format")
def test_maxpool_gpu(self):
# make sure converter behaves well when data format is NCHW
# and when data format is NCHW, only gpu version of tensorflow can run it.
ksize = [1, 1, 2, 2]
strides = [1, 1, 2, 2]
x_val = make_xval([1, 3, 50, 80])
for padding in ["SAME", "VALID"]:
def func(x):
mp = tf.nn.max_pool(x, ksize, strides, padding=padding, data_format="NCHW")
return tf.identity(mp, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_onnxruntime_incompatibility("AveragePool")
def test_avgpool(self):
for p in get_conv_getdata(kind=0):
_, padding, x_shape, ksize, strides = p
x_val = make_xval(x_shape)
def func(x):
mp = tf.nn.avg_pool(x, ksize, strides, padding=padding)
return tf.identity(mp, name=_TFOUTPUT)
self.logger.debug(str(p))
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-06)
@check_onnxruntime_incompatibility("AveragePool")
@skip_tf_cpu("only tf_gpu can run avgpool with NCHW format")
def test_avgpool_gpu(self):
ksize = [1, 1, 2, 2]
strides = [1, 1, 2, 2]
x_val = make_xval([1, 3, 50, 80])
for padding in ["SAME", "VALID"]:
def func(x):
mp = tf.nn.avg_pool(x, ksize, strides, padding=padding, data_format="NCHW")
return tf.identity(mp, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def _conv_test(self, x_val, w, strides=None, padding="VALID", dilations=None, rtol=1e-07):
if strides is None:
strides = _STRIDE1x1
if dilations is None:
dilations = _DILATIONS1x1
def func(x):
kernel = tf.constant(w, dtype=tf.float32, name='k')
conv = tf.nn.conv2d(x, kernel, strides=strides, padding=padding, dilations=dilations)
return tf.identity(conv, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=rtol)
def test_conv2d_1(self):
x_val = make_xval((1, 1, 5, 5)).transpose(NCHW_TO_NHWC)
w = np.array([[2., 1., 1.],
[1., 3., 1.],
[1., 1., 4.]], dtype=np.float32).reshape(_KERNEL3x3)
self._conv_test(x_val, w)
def test_conv2d_2(self):
x_val = np.array([[4, 3, 1, 0],
[2, 1, 0, 1],
[1, 2, 4, 1],
[3, 1, 0, 2]], dtype=np.float32).reshape([1, 4, 4, 1])
w = np.array([[1, 0, 1],
[2, 1, 0],
[0, 0, 1]], dtype=np.float32).reshape(_KERNEL3x3)
self._conv_test(x_val, w)
def test_conv2d_3(self):
x_val = make_xval((1, 1, 5, 5)).transpose(NCHW_TO_NHWC)
w = np.array([[2., 1., 1.],
[1., 3., 1.],
[1., 1., 4.]], dtype=np.float32).reshape(_KERNEL3x3)
self._conv_test(x_val, w)
def test_conv2d_4(self):
x_val = make_xval((1, 1, 5, 5)).transpose(NCHW_TO_NHWC)
w = np.random.random_sample(_KERNEL3x3).astype(np.float32)
self._conv_test(x_val, w, padding="SAME", rtol=1e-05)
def test_conv2d_5(self):
x_val = make_xval((1, 1, 5, 5)).transpose(NCHW_TO_NHWC)
kernel_shape = [3, 3, 1, 2]
w = np.random.random_sample(kernel_shape).astype(np.float32)
self._conv_test(x_val, w, padding="SAME", rtol=1e-05)
def test_conv2d_6(self):
x_shape = [1, 35, 35, 288] # out: [1, 17, 17, 384]
kernel_shape = [3, 3, 288, 384]
strides = [1, 2, 2, 1]
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("float32").reshape(x_shape)
kernel_val = np.arange(1, 1 + np.prod(kernel_shape)).astype("float32").reshape(kernel_shape)
self._conv_test(x_val, kernel_val, strides=strides, padding="VALID", rtol=1.1e-05)
@check_tf_min_version("1.14", "tf 1.14 needed for explicit padding")
def test_conv2d_explicit_padding(self):
x_shape = [1, 35, 35, 288]
kernel_shape = [3, 3, 288, 384]
pads = [[0, 0], [1, 2], [3, 4], [0, 0]]
strides = [1, 1, 1, 1]
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("float32").reshape(x_shape)
kernel_val = np.arange(1, 1 + np.prod(kernel_shape)).astype("float32").reshape(kernel_shape)
self._conv_test(x_val, kernel_val, strides=strides, padding=pads, rtol=1.1e-05)
def test_conv2d_dilation_same(self):
x_shape = [1, 35, 35, 288] # NHWC
kernel_shape = [3, 3, 288, 384] # [filter_height, filter_width, in_channels, out_channels]
strides = [1, 1, 1, 1] # NHWC
dilations = [1, 3, 1, 1] # NHWC
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("float32").reshape(x_shape)
kernel_val = np.arange(1, 1 + np.prod(kernel_shape)).astype("float32").reshape(kernel_shape)
self._conv_test(x_val, kernel_val, strides=strides, padding="SAME", dilations=dilations, rtol=1.1e-05)
def test_conv2d_dilation_strides_same(self):
x_shape = [1, 35, 35, 288] # NHWC
kernel_shape = [3, 3, 288, 384] # [filter_height, filter_width, in_channels, out_channels]
strides = [1, 2, 4, 1] # NHWC
dilations = [1, 3, 1, 1] # NHWC
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("float32").reshape(x_shape)
kernel_val = np.arange(1, 1 + np.prod(kernel_shape)).astype("float32").reshape(kernel_shape)
self._conv_test(x_val, kernel_val, strides=strides, padding="SAME", dilations=dilations, rtol=1e-05)
def test_conv3d_1(self):
strides = [1, 1, 1, 1, 1]
dilations = [1, 1, 1, 1, 1]
x_val = np.random.random_sample([2, 10, 9, 8, 5]).astype(np.float32)
w = np.random.random_sample([2, 3, 4, 5, 6]).astype(np.float32)
padding = "VALID"
def func(x):
kernel = tf.constant(w, dtype=tf.float32, name='k')
conv = tf.nn.conv3d(x, kernel, strides=strides, padding=padding, data_format="NDHWC", dilations=dilations)
return tf.identity(conv, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-05)
def test_conv3d_2(self):
strides = [1, 2, 3, 1, 1]
dilations = [1, 1, 1, 1, 1]
x_val = np.random.random_sample([2, 10, 9, 8, 5]).astype(np.float32)
w = np.random.random_sample([2, 3, 4, 5, 6]).astype(np.float32)
padding = "VALID"
def func(x):
kernel = tf.constant(w, dtype=tf.float32, name='k')
conv = tf.nn.conv3d(x, kernel, strides=strides, padding=padding, data_format="NDHWC", dilations=dilations)
return tf.identity(conv, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-05)
def test_conv3d_3(self):
strides = [1, 2, 3, 1, 1]
dilations = [1, 1, 1, 1, 1]
x_val = np.random.random_sample([2, 10, 9, 8, 5]).astype(np.float32)
w = np.random.random_sample([2, 3, 4, 5, 6]).astype(np.float32)
padding = "SAME"
def func(x):
kernel = tf.constant(w, dtype=tf.float32, name='k')
conv = tf.nn.conv3d(x, kernel, strides=strides, padding=padding, data_format="NDHWC", dilations=dilations)
return tf.identity(conv, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-05)
def test_avgpool3d(self):
strides = [1, 1, 1, 1, 1]
ksize = [1, 2, 2, 3, 1]
x_val = np.random.random_sample([2, 10, 9, 8, 5]).astype(np.float32)
padding = "VALID"
def func(x):
mp = tf.nn.avg_pool3d(x, ksize, strides, padding=padding, data_format="NDHWC")
return tf.identity(mp, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_maxpool3d(self):
strides = [1, 1, 1, 1, 1]
ksize = [1, 2, 2, 3, 1]
x_val = np.random.random_sample([2, 10, 9, 8, 5]).astype(np.float32)
padding = "VALID"
def func(x):
mp = tf.nn.max_pool3d(x, ksize, strides, padding=padding, data_format="NDHWC")
return tf.identity(mp, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_tf_min_version("1.14", "tf.nn.avg_pool2d doesn't exist before tf 1.14")
def test_avgpool2d(self):
strides = [1, 1, 1, 1]
ksize = [1, 2, 3, 1]
x_val = make_xval([2, 10, 12, 3])
padding = "VALID"
def func(x):
mp = tf.nn.avg_pool2d(x, ksize, strides, padding=padding, data_format="NHWC")
return tf.identity(mp, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_tf_min_version("1.7", "tf only support dilation is 1 for now")
def test_conv2d_7(self):
x_shape = [1, 35, 35, 288] # out: [1, 17, 17, 384]
kernel_shape = [3, 3, 288, 384]
strides = [1, 2, 2, 1]
dilations = [1, 3, 3, 1]
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("float32").reshape(x_shape)
kernel_val = np.arange(1, 1 + np.prod(kernel_shape)).astype("float32").reshape(kernel_shape)
self._conv_test(x_val, kernel_val, strides=strides, padding="VALID",
dilations=dilations, rtol=1e-05)
def test_conv2d_8(self):
for input_shape in [[10, 10], [5, 5]]:
x_val = make_xval((1, 1, *input_shape)).transpose(NCHW_TO_NHWC)
w = np.random.random_sample([3, 3, 1, 2]).astype(np.float32)
strides = [1, 2, 2, 1]
def func(x):
kernel = tf.constant(w, dtype=tf.float32, name='k')
conv = tf.nn.conv2d(x, kernel, strides=strides, padding="SAME")
return tf.identity(conv, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-5)
def test_conv2d_with_pad_valid(self):
x_val = make_xval((1, 1, 5, 5)).transpose(NCHW_TO_NHWC)
w = np.random.random_sample([3, 3, 1, 2]).astype(np.float32)
strides = [1, 1, 1, 1]
def func(x):
kernel = tf.constant(w, dtype=tf.float32, name='k')
x_pad = tf.pad(x, paddings=[[0, 0], [2, 2], [2, 2], [0, 0]])
conv = tf.nn.conv2d(x_pad, kernel, strides=strides, padding="VALID")
return tf.identity(conv, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-5)
def test_conv2d_with_pad_same(self):
x_val = make_xval((1, 1, 5, 5)).transpose(NCHW_TO_NHWC)
w = np.random.random_sample([3, 3, 1, 2]).astype(np.float32)
strides = [1, 1, 1, 1]
def func(x):
kernel = tf.constant(w, dtype=tf.float32, name='k')
x_pad = tf.pad(x, paddings=[[0, 0], [2, 2], [2, 2], [0, 0]])
conv = tf.nn.conv2d(x_pad, kernel, strides=strides, padding="SAME")
return tf.identity(conv, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-5)
def test_conv2d_transpose(self):
x_shape = [2, 6, 4, 3]
output_shape = [2, 13, 9, 2]
kernel_shape = [3, 3, 2, 3]
strides = [1, 2, 2, 1]
x_val = make_xval(x_shape)
kernel_val = make_xval(kernel_shape)
def func(x):
f = tf.constant(kernel_val, name="kernel", dtype=tf.float32)
conv = tf.nn.conv2d_transpose(x, f, output_shape, strides=strides, padding="VALID")
return tf.identity(conv, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-05)
@check_onnxruntime_min_version("0.5.0", "conv transpose is added since onnxruntime-0.5.0")
def test_conv2d_transpose2(self):
# output_shape is dynamic
extra_opset = [utils.make_opsetid(constants.MICROSOFT_DOMAIN, 1)]
process_args = {"extra_opset": extra_opset}
x_shape = [2, 6, 4, 3]
output_shape = np.array([2, 13, 9, 2]).astype(np.int32)
kernel_shape = [3, 3, 2, 3]
strides = [1, 2, 2, 1]
x_val = make_xval(x_shape)
kernel_val = make_xval(kernel_shape)
def func(x, output_shape_placeholder):
f = tf.constant(kernel_val, name="kernel", dtype=tf.float32)
conv = tf.nn.conv2d_transpose(x, f, output_shape_placeholder, strides=strides, padding="VALID")
return tf.identity(conv, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: output_shape},
rtol=1e-05, process_args=process_args)
@check_opset_min_version(10, "quantize_and_dequantize")
def test_conv2d_quantization_axis(self):
x_shape = [1, 1, 5, 5]
kernel_shape = _KERNEL3x3
strides = [1, 1, 1, 1]
x_val = make_xval(x_shape).transpose(NCHW_TO_NHWC)
kernel_val = make_xval(_KERNEL3x3)
def func(x):
f = tf.constant(kernel_val, name="kernel", dtype=tf.float32)
kernel_dq = quantize_and_dequantize(f, 0, np.prod(kernel_shape))
conv = tf.nn.conv2d(x, kernel_dq, strides=strides, padding="VALID")
return tf.identity(conv, name=_TFOUTPUT)
def graph_validator(g):
return check_quantization_axis(g, "DequantizeLinear", 0)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-05, graph_validator=graph_validator,
check_shape=False)
@check_opset_min_version(10, "quantize_and_dequantize")
def test_conv2d_transpose_quantization_axis(self):
x_shape = [2, 6, 4, 3]
output_shape = [2, 13, 9, 2]
kernel_shape = [3, 3, 2, 3]
strides = [1, 2, 2, 1]
x_val = make_xval(x_shape)
kernel_val = make_xval(kernel_shape)
def func(x):
f = tf.constant(kernel_val, name="kernel", dtype=tf.float32)
kernel_dq = quantize_and_dequantize(f, 0, np.prod(kernel_shape))
conv = tf.nn.conv2d_transpose(x, kernel_dq, output_shape, strides=strides, padding="VALID")
return tf.identity(conv, name=_TFOUTPUT)
def graph_validator(g):
return check_quantization_axis(g, "DequantizeLinear", 1)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-05, graph_validator=graph_validator,
check_shape=False)
def test_depthwiseconv_0(self):
x_shape = [1, 3, 4, 3]
kernel_shape = [3, 3, 3, 3]
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("float32").reshape(x_shape)
kernel_val = np.arange(1, 1 + np.prod(kernel_shape)).astype("float32").reshape(kernel_shape)
def func(x):
kernel = tf.constant(kernel_val, dtype=tf.float32, name='k')
conv = tf.nn.depthwise_conv2d(x, kernel, strides=[1, 1, 1, 1], padding='VALID')
return tf.identity(conv, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=0.08)
def test_depthwiseconv_1(self):
x_shape = [1, 112, 112, 32]
kernel_shape = [3, 3, 32, 1]
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("float32").reshape(x_shape)
kernel_val = np.arange(1, 1 + np.prod(kernel_shape)).astype("float32").reshape(kernel_shape)
def func(x):
kernel = tf.constant(kernel_val, dtype=tf.float32, name='k')
conv = tf.nn.depthwise_conv2d(x, kernel, strides=_STRIDE1x1, padding='VALID')
return tf.identity(conv, name=_TFOUTPUT)
# rtol is a bit high, 2 values have a bit high error. Maybe use different input data.
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=0.08)
def test_depthwiseconv_3(self):
x_shape = [1, 112, 112, 32]
kernel_shape = [3, 3, 32, 1]
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("float32").reshape(x_shape)
kernel_val = np.arange(1, 1 + np.prod(kernel_shape)).astype("float32").reshape(kernel_shape)
def func(x):
kernel = tf.constant(kernel_val, dtype=tf.float32, name='k')
conv = tf.nn.depthwise_conv2d(x, kernel, strides=[1, 1, 1, 1], padding='VALID')
return tf.identity(conv, name=_TFOUTPUT)
# rtol is a bit high, 2 values have a bit high error. Maybe use different input data.
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=0.01)
def test_depthwiseconv_shared_kernel(self):
x_shape = [1, 3, 4, 3]
kernel_shape = [3, 3, 3, 3]
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("float32").reshape(x_shape)
kernel_val = np.arange(1, 1 + np.prod(kernel_shape)).astype("float32").reshape(kernel_shape)
def func(x, y):
kernel = tf.constant(kernel_val, dtype=tf.float32, name='k')
conv1 = tf.nn.depthwise_conv2d(x, kernel, strides=[1, 1, 1, 1], padding='VALID')
conv2 = tf.nn.depthwise_conv2d(y, kernel, strides=[1, 1, 1, 1], padding='VALID')
conv = tf.add(conv1, conv2)
return tf.identity(conv, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: x_val}, rtol=0.08)
@check_tf_min_version("1.14", "tf depthwise_conv2d dilations")
@check_opset_min_version(11, "non-const pads")
def test_depthwiseconv_dilations(self):
x_shape = [1, 32, 32, 1]
kernel_shape = [5, 5, 1, 1]
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("float32").reshape(x_shape)
kernel_val = np.arange(1, 1 + np.prod(kernel_shape)).astype("float32").reshape(kernel_shape)
def func(x):
kernel = tf.constant(kernel_val, dtype=tf.float32, name='k')
conv = tf.nn.depthwise_conv2d(x, kernel, strides=[1, 1, 1, 1], padding='SAME', dilations=[3, 4])
return tf.identity(conv, name=_TFOUTPUT)
# rtol is a bit high, 2 values have a bit high error. Maybe use different input data.
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=0.01)
@check_tf_max_version("1.15", "not supported in tf-2.0")
def test_dropout(self):
x_val = np.ones([1, 24, 24, 3], dtype=np.float32)
# Define a scope for reusing the variables
def func(x):
is_training = tf.constant(False, tf.bool)
x_ = tf.identity(x)
fc1 = tf.layers.dropout(x_, rate=.1, training=is_training)
return tf.identity(fc1, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val},
graph_validator=lambda g: (check_op_count(g, "RandomUniform", 0) and
check_op_count(g, "RandomUniformLike", 0)))
def test_nn_dropout(self):
x_val = np.ones([1, 24, 24, 3], dtype=np.float32)
# Define a scope for reusing the variables
def func(x, keep_prob):
x_ = tf.identity(x)
fc1 = dropout(x_, keep_prob)
return tf.identity(fc1, name=_TFOUTPUT)
# when constant_fold is enabled, PlaceholderWithDefault will be folded into either a const or a placeholder.
# here we set it False to test PlaceholderWithDefault bug: https://github.com/onnx/tensorflow-onnx/pull/446
# Dropout with ratio 1.0 will be optimized so that only one Identity is left
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: np.array(1., dtype=np.float32)},
graph_validator=lambda g: (check_op_count(g, "RandomUniform", 0) and
check_op_count(g, "RandomUniformLike", 0)))
@check_tf_min_version("1.13")
def test_nn_dropout_with_rate(self):
rate = tf.constant(0., name="rate")
x_val = np.ones([1, 24, 24, 3], dtype=np.float32)
# Define a scope for reusing the variables
def func(x):
x_ = tf.identity(x)
fc1 = tf.nn.dropout(x_, rate=rate)
return tf.identity(fc1, name="output")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port,
graph_validator=lambda g: (check_op_count(g, "RandomUniform", 0) and
check_op_count(g, "RandomUniformLike", 0)))
def test_inputs_as_nchw_arg(self):
x_shape = [2, 32, 32, 3]
kernel_shape = [3, 3, 3, 3]
x_val = make_xval(x_shape)
x_val_for_onnx = x_val.transpose(NHWC_TO_NCHW)
def func(x):
kernel = tf.constant(make_xval(kernel_shape), dtype=tf.float32, name='k')
conv = tf.nn.conv2d(x, kernel, strides=[1, 1, 1, 1], padding="SAME")
return tf.identity(conv, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-05,
process_args={"inputs_as_nchw": [_INPUT]},
onnx_feed_dict={_INPUT: x_val_for_onnx})
def test_outputs_as_nchw_arg(self):
x_shape = [2, 32, 32, 3]
kernel_shape = [3, 3, 3, 3]
x_val = make_xval(x_shape)
def func(x):
kernel = tf.constant(make_xval(kernel_shape), dtype=tf.float32, name='kernel')
conv = tf.nn.conv2d(x, kernel, strides=[1, 1, 1, 1], padding="SAME")
return tf.identity(conv, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-05,
process_args={"outputs_as_nchw": [_OUTPUT]})
@skip_tflite("TFlite adds ops that obscure pattern")
@check_tf_min_version("1.15")
def test_conv1d_dilations_rewriter(self):
x_shape = [2, 32, 3]
x_val = make_xval(x_shape)
for p in ['SAME', 'VALID']:
def func(x):
t = tf.keras.layers.Conv1D(filters=768, kernel_size=3, dilation_rate=3, padding=p)
t.build(x_shape)
y = t.call(x)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-04, atol=1e-2, as_session=True,
graph_validator=lambda g: check_op_count(g, "Reshape", 0, disabled=False))
@check_tf_min_version("1.15")
@skip_tf_cpu("only tf_gpu can run conv2d with NCHW format")
def test_conv2d_biasadd_rewriter(self):
x_shape = [2, 3, 32, 16]
x_val = make_xval(x_shape)
def func(x):
middles = tf.keras.layers.ZeroPadding2D(
padding=(0, 4),
data_format="channels_first",
name="padding"
)(x)
t = tf.keras.layers.Conv2D(
filters=768,
kernel_size=3,
strides=1,
use_bias=True,
data_format="channels_first",
name="conv2d"
)(middles)
return tf.identity(t, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-04, atol=1e-2, as_session=True,
graph_validator=lambda g: check_op_count(g, "Add", 0, disabled=False))
@check_tf_min_version("1.15")
def test_conv2d_dilations_rewriter(self):
x_shape = [2, 32, 16, 3]
x_val = make_xval(x_shape)
for p in ['SAME', 'VALID']:
def func(x):
t = tf.keras.layers.Conv2D(filters=768, kernel_size=3, dilation_rate=3, padding=p)
t.build(x_shape)
y = t.call(x)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-04, atol=1e-2, as_session=True,
graph_validator=lambda g: check_op_count(g, "Reshape", 0, disabled=False))
def func(x):
t = tf.keras.layers.DepthwiseConv2D(kernel_size=3, dilation_rate=3, padding=p)
t.build(x_shape)
y = t.call(x)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-04, atol=1e-2, as_session=True,
graph_validator=lambda g: check_op_count(g, "Reshape", 0, disabled=False))
@check_tf_min_version("1.15")
@skip_tf_cpu("only tf_gpu can run conv2d with NCHW format")
def test_nchw_conv2d_dilations_rewriter(self):
x_shape = [2, 3, 32, 16]
x_val = make_xval(x_shape)
for p in ['SAME', 'VALID']:
def func(x):
t = tf.keras.layers.Conv2D(
filters=768,
kernel_size=3,
dilation_rate=3,
padding=p,
data_format='channels_first'
)
t.build(x_shape)
y = t.call(x)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-04, atol=1e-2, as_session=True,
graph_validator=lambda g: check_op_count(g, "Reshape", 0, disabled=False))
def func(x):
t = tf.keras.layers.DepthwiseConv2D(
kernel_size=3,
dilation_rate=3,
padding=p,
data_format='channels_first'
)
t.build(x_shape)
y = t.call(x)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-04, atol=1e-2, as_session=True,
graph_validator=lambda g: check_op_count(g, "Reshape", 0, disabled=False))
@check_tf_min_version("1.15")
@skip_tflite("TFlite adds ops that obscure pattern")
@allow_missing_shapes("Rewriting makes some shapes known")
def test_conv2d_dilations_rewriter_unknown_shape(self):
x_shape = [2, 32, 16, 3]
x_val = make_xval(x_shape)
def func():
x = tf_placeholder(tf.float32, [2, None, None, 3], name=_TFINPUT)
t = tf.keras.layers.Conv2D(filters=768, kernel_size=3, dilation_rate=3, padding="VALID")
t.build(x_shape)
y = t.call(x)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-04, atol=1e-2,
as_session=True, premade_placeholders=True,
graph_validator=lambda g: check_op_count(g, "Reshape", 0, disabled=False))
@check_tf_min_version("1.15")
@skip_tflite("TFlite adds ops that obscure pattern")
@skip_tf_cpu("only tf_gpu can run conv2d with NCHW format")
@allow_missing_shapes("Rewriting makes some shapes known")
def test_nchw_conv2d_dilations_rewriter_unknown_shape(self):
x_shape = [2, 3, 32, 16]
x_val = make_xval(x_shape)
def func():
x = tf_placeholder(tf.float32, [2, 3, None, None], name=_TFINPUT)
t = tf.keras.layers.Conv2D(
filters=768,
kernel_size=3,
dilation_rate=3,
padding="VALID",
data_format='channels_first'
)
t.build(x_shape)
y = t.call(x)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-04, atol=1e-2,
as_session=True, premade_placeholders=True,
graph_validator=lambda g: check_op_count(g, "Reshape", 0, disabled=False))
@check_tf_min_version("1.15")
def test_conv3d_dilations_rewriter(self):
x_shape = [2, 32, 16, 8, 3]
x_val = make_xval(x_shape)
for p in ['SAME', 'VALID']:
def func(x):
t = tf.keras.layers.Conv3D(filters=768, kernel_size=3, dilation_rate=3, padding=p)
t.build(x_shape)
y = t.call(x)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-04, atol=1e-2, as_session=True,
graph_validator=lambda g: check_op_count(g, "Reshape", 0, disabled=False))
@check_tf_min_version("1.15")
@skip_tf_cpu("only tf_gpu can run conv3d with NCDHW format")
def test_ncdhw_conv3d_dilations_rewriter(self):
x_shape = [2, 3, 32, 16, 8]
x_val = make_xval(x_shape)
for p in ['SAME', 'VALID']:
def func(x):
t = tf.keras.layers.Conv3D(
filters=768,
kernel_size=3,
dilation_rate=3,
padding=p,
data_format='channels_first'
)
t.build(x_shape)
y = t.call(x)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-04, atol=1e-2, as_session=True,
graph_validator=lambda g: check_op_count(g, "Reshape", 0, disabled=False))
@skip_tf2("Uses tf.layers")
def test_conv1d_tf1_dilations_rewriter(self):
x_shape = [2, 32, 3]
x_val = make_xval(x_shape)
for p in ['SAME', 'VALID']:
def func(x):
y = tf.layers.conv1d(x, filters=768, kernel_size=3, dilation_rate=3, padding=p, name="conv1")
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-04, atol=1e-2, as_session=True,
graph_validator=lambda g: check_op_count(g, "Reshape", 0, disabled=False))
@skip_tf2("Uses tf.layers")
def test_conv1d_tf1_dilations_rewriter_unknown_shape(self):
x_shape = [2, 32, 3]
x_val = make_xval(x_shape)
def func():
x = tf_placeholder(tf.float32, [2, None, 3], name=_TFINPUT)
y = tf.layers.conv1d(x, filters=768, kernel_size=3, dilation_rate=3, padding="VALID", name="conv1")
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-04, atol=1e-2,
as_session=True, premade_placeholders=True,
graph_validator=lambda g: check_op_count(g, "Reshape", 0, disabled=False))
def test_lrn_default(self):
x_shape = [1, 3, 4, 3]
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("float32").reshape(x_shape)
def func(x):
op = tf.nn.local_response_normalization(x)
return tf.identity(op, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-05)
def test_lrn(self):
# can't set bias = 0
x_shape = [1, 2, 2, 8]
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("float32").reshape(x_shape)
def func(x):
op = tf.nn.local_response_normalization(x, depth_radius=4, bias=2, alpha=2, beta=1)
return tf.identity(op, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-05)
@check_onnxruntime_incompatibility("Abs")
def test_abs(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.abs(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_onnxruntime_incompatibility("Add")
def test_const(self):
x_val = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32).reshape((2, 2))
def func(x):
y = tf.constant(x_val, name="y")
return tf.add(x, y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_onnxruntime_incompatibility("Add")
def test_add(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.add(x, x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_placeholder(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func(x):
return tf.identity(x, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_placeholder_with_default_use_default(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func():
x = tf.constant(x_val, name="x")
y = tf_placeholder_with_default(x, x_val.shape, name=_TFINPUT)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {}, as_session=True, premade_placeholders=True)
def test_placeholder_with_default_use_feed(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func():
x = tf.constant(x_val, name="x")
y = tf_placeholder_with_default(x, x_val.shape, name=_TFINPUT)
return tf.identity(y, name=_TFOUTPUT)
x_feed_val = np.array([11.0, 22.0, -33.0, -44.0], dtype=np.float32).reshape((2, 2))
self._run_test_case(func, [_OUTPUT], {_INPUT: x_feed_val}, as_session=True, premade_placeholders=True)
def test_placeholder_with_default_computed_use_default(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
y_val = np.array([2.0, -4.0, 6.0, -8.0], dtype=np.float32).reshape((2, 2))
def func():
x = tf_placeholder(tf.float32, x_val.shape, name=_TFINPUT)
y = tf_placeholder(tf.float32, y_val.shape, name=_TFINPUT1)
total = tf.add(x, y)
z = tf_placeholder_with_default(total, x_val.shape, name=_TFINPUT2)
total2 = tf.add(total, z)
return tf.identity(total2, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val}, as_session=True,
premade_placeholders=True, process_args={'use_default': [_TFINPUT2]})
def test_placeholder_with_default_computed_ignore_default(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
y_val = np.array([2.0, -4.0, 6.0, -8.0], dtype=np.float32).reshape((2, 2))
z_val = np.array([3.0, 6.0, 9.0, 10.0], dtype=np.float32).reshape((2, 2))
def func():
x = tf_placeholder(tf.float32, x_val.shape, name=_TFINPUT)
y = tf_placeholder(tf.float32, y_val.shape, name=_TFINPUT1)
total = tf.add(x, y)
z = tf_placeholder_with_default(total, x_val.shape, name=_TFINPUT2)
total2 = tf.add(total, z)
return tf.identity(total2, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val, _INPUT2: z_val}, as_session=True,
premade_placeholders=True, process_args={'ignore_default': [_TFINPUT2]})
def test_fold_cond_keras_learning_phase(self):
# keras_learning_phase can slip into frozen graphs and cause huge inefficiencies with If nodes.
# Should be removed and Ifs folded.
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func():
x = tf_placeholder(tf.float32, [None, None], name=_TFINPUT)
learning_phase = tf_placeholder_with_default(False, [], name="keras_learning_phase")
y = tf.cond(learning_phase, lambda: x * 2, lambda: x * 3)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, as_session=True, premade_placeholders=True,
graph_validator=lambda g: check_op_count(g, "If", 0, disabled=False))
@check_onnxruntime_incompatibility("Add")
def test_add_bcast(self):
x1_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
x2_val = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], dtype=np.float32).reshape((2, 2, 2))
def func(x1, x2):
x_ = tf.add(x1, x2)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x1_val, _INPUT1: x2_val})
@check_onnxruntime_incompatibility("Add")
def test_add_bcast1(self):
# example taken from onnx doc
x1_val = np.random.randn(3, 4, 5).astype(np.float32)
x2_val = np.random.randn(5).astype(np.float32)
def func(x1, x2):
x_ = tf.add(x1, x2)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x1_val, _INPUT1: x2_val})
def test_matmul0(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.matmul(x, x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_tflite("Issue with matmul with 2 copies of same input")
def test_matmul1(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0, 5.0, 6.0], dtype=np.float32).reshape((2, 3))
def func(x):
x_ = tf.matmul(x, x, transpose_a=True)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_matmul2(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
y_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func(x, y):
x_ = tf.matmul(x, y, transpose_b=True)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@unittest.skipIf(get_test_config().is_mac and get_test_config().is_onnxruntime_backend
and get_test_config().backend_version == "0.2.1", "onnxruntime 0.2.1 has bug on mac")
def test_matmul3(self):
x_shape = [1, 12, 256, 64]
x_val = np.arange(np.prod(x_shape)).astype("float32").reshape((x_shape))
def func(x, y):
x_ = tf.matmul(x, y, transpose_b=True)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: x_val}, rtol=1e-5)
@check_tf_min_version("2.6")
def test_matmulinteger(self):
x_val = np.array([1, 2, -3, -4], dtype=np.int8).reshape((2, 2))
y_val = np.array([1, 2, -3, -4], dtype=np.int8).reshape((2, 2))
def func(x, y):
x_ = tf.matmul(x, y, output_type=tf.int32)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_onnxruntime_incompatibility("Sub")
def test_sub(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.subtract(x, x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_onnxruntime_incompatibility("Mul")
def test_multiply(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.multiply(x, x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_onnxruntime_incompatibility("Div")
def test_div(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.realdiv(x, x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_tf_min_version("1.14")
@check_opset_min_version(11, "float equality")
def test_div_no_nan(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0, 5.0, 0.0, float("nan"), float("-inf"), float("inf")], dtype=np.float32)
y_val = np.array([1.0, 0.5, 0.0, -4.0, 0.0, 0.0, 0.0, 2.0, 0.0], dtype=np.float32)
def func(x, y):
x_ = tf.math.divide_no_nan(x, y)
return tf.identity(x_, name=_TFOUTPUT)
# TFLite expresses infinity as a value > 1e38
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val}, mtol=1e38)
@check_onnxruntime_incompatibility("Exp")
def test_exp(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.exp(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-05)
@check_onnxruntime_incompatibility("Log")
def test_log(self):
x_val = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.math.log(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_onnxruntime_incompatibility("Log")
def test_log_double(self):
x_val = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float64).reshape((2, 2))
def func(x):
x_ = tf.math.log(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_gather(self):
x_val = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)
idx = np.array([1, 0, 2], dtype=np.int32)
idx_flattened = np.array([i * x_val.shape[1] + idx for i in range(0, x_val.shape[0])])
def func(x):
x_ = tf.gather(tf.reshape(x, [-1]), tf.constant(idx_flattened))
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_tf_min_version("1.14")
@check_opset_min_version(12, "GatherND with batch_dims")
def test_gather_batch_dims_no_trans(self):
x_val = np.arange(2 * 2 * 3 * 5 * 4, dtype=np.float32).reshape((2, 2, 3, 5, 4))
idx_val = np.array([[[1, 0, 2, 0], [1, 1, 1, 0]], [[0, 0, 0, 0], [2, 1, 1, 0]]], dtype=np.int32)
def func(x, idx):
x_ = tf.gather(x, idx, batch_dims=2, axis=2)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: idx_val})
@check_tf_min_version("1.14")
@check_opset_min_version(12, "GatherND with batch_dims")
def test_gather_batch_dims(self):
x_val = np.arange(2 * 2 * 3 * 5 * 4, dtype=np.float32).reshape((2, 2, 3, 5, 4))
idx_val = np.array([[[1, 0, 2, 0], [1, 1, 1, 0]], [[0, 0, 0, 0], [2, 1, 1, 0]]], dtype=np.int32)
def func(x, idx):
x_ = tf.gather(x, idx, batch_dims=2, axis=3)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: idx_val})
@check_opset_min_version(10, "Slice")
def test_roll_axis_scalar(self):
x_val = np.arange(4 * 3 * 5 * 2, dtype=np.float32).reshape((4, 3, 5, 2))
shift_val = np.array(4, dtype=np.int64)
axes_val = np.array(2, dtype=np.int32)
def func(x, shift):
x_ = tf.roll(x, shift, axes_val)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: shift_val})
@check_opset_min_version(10, "Slice")
def test_roll_axis_vector(self):
x_val = np.arange(4 * 3 * 5 * 2, dtype=np.float32).reshape((4, 3, 5, 2))
shift_val = np.array([2, 3, 4], dtype=np.int32)
axes_val = np.array([1, 2, 1], dtype=np.int32)
def func(x, shift):
x_ = tf.roll(x, shift, axes_val)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: shift_val})
@check_opset_min_version(10, "Slice")
def test_roll_neg_axis(self):
def func(input_ids):
shifted_input_ids = tf.cast(input_ids, tf.int32)
shifted_input_ids = tf.roll(shifted_input_ids, 1, axis=-1)
return tf.identity(shifted_input_ids, name=_TFOUTPUT)
x_val = np.array([[0, 1, 2, 3, 4, 5, 6, 7], [1, 2, 3, 4, 5, 6, 7, 8]], dtype=np.int64)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(10, "Slice")
def test_roll_neg_shift(self):
x_val = np.arange(4 * 3 * 5 * 2, dtype=np.float32).reshape((4, 3, 5, 2))
shift_val = np.array([-2, 13, -3], dtype=np.int32)
axes_val = np.array([1, 2, -1], dtype=np.int32)
def func(x, shift):
x_ = tf.roll(x, shift, axes_val)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: shift_val})
@check_tf_min_version("2.2")
def test_large_model_format(self):
x_val = np.array([2.0], dtype=np.float32)
y_const = np.arange(2000, dtype=np.float32)
def func(x):
x_ = tf.multiply(x, tf.constant(y_const))
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, large_model=True)
@check_target('rs6', 'GatherNd')
def test_gathernd(self):
x_val = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)
indices = np.array([[[0, 1], [1, 1]], [[1, 2], [0, 2]]], dtype=np.int32)
def func(x):
x_ = tf.gather_nd(x, tf.constant(indices))
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
x_val = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.float32)
indices = np.array([[[0], [2]], [[4], [7]], [[6], [1]]], dtype=np.int32)
def func(x):
x_ = tf.gather_nd(x, tf.constant(indices))
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_target('rs6', 'GatherNd')
def test_gathernd_less_index(self):
x_val = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)
indices = np.array([[[0], [1]], [[2], [0]]], dtype=np.int32)
def func(x):
x_ = tf.gather_nd(x, tf.constant(indices))
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
# shape: 2*2*2
x_val = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=np.float32)
indices = np.array([[[0, 0], [0, 1]], [[1, 0], [1, 1]]], dtype=np.int32)
def func(x):
x_ = tf.gather_nd(x, tf.constant(indices))
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend()
@check_opset_min_version(7, "tile")
def test_tile(self):
x_val = np.array([[0, 1], [2, 3]], dtype=np.float32)
def func(x):
multiple = tf.constant([2, 2])
x_ = tf.tile(x, multiple)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(7, "tile")
def test_tile_const(self):
# Should be folded
x_val = np.array([[0, 1], [2, 3]], dtype=np.float32)
def func():
multiple = tf.constant([1000, 2])
x_ = tf.tile(tf.constant(x_val), multiple)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {}, graph_validator=lambda g: check_op_count(g, "Tile", 0, disabled=False))
@check_opset_min_version(7, "tile")
def test_tile_large_const(self):
# Should not be folded since it is so large
x_val = np.array([[0, 1], [2, 3]], dtype=np.float32)
def func():
multiple = tf.constant([1000000, 2])
x_ = tf.tile(tf.constant(x_val), multiple)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {}, graph_validator=lambda g: check_op_count(g, "Tile", 1, disabled=False))
@check_onnxruntime_incompatibility("Neg")
def test_neg(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.negative(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_onnxruntime_incompatibility("Mul")
def test_square(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.square(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_onnxruntime_incompatibility("Min")
def test_min(self):
x_val1 = np.array([4.0, 16.0, 4.0, 1.6], dtype=np.float32).reshape((2, 2))
x_val2 = np.array([4.0, 4.0, 4.0, 4.0], dtype=np.float32).reshape((2, 2))
def func(x1, x2):
mi = tf.minimum(x1, x2)
return tf.identity(mi, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2})
x_val1 = np.array([4.0, 16.0, 4.0, 1.6], dtype=np.int32).reshape((2, 2))
x_val2 = np.array([4.0, 4.0, 4.0, 4.0], dtype=np.int32).reshape((2, 2))
def func(x1, x2):
mi = tf.minimum(x1, x2)
return tf.identity(mi, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2})
@skip_caffe2_backend("issue with broadcasting scalar")
@check_onnxruntime_incompatibility("Sub")
def test_min_broadcast(self):
# tests if the broadcast for min/max is working
x_val1 = np.array([2.0, 16.0, 5.0, 1.6], dtype=np.float32).reshape((2, 2))
x_val2 = np.array([4.0], dtype=np.float32)
def func(x1):
x2 = tf.constant(x_val2, dtype=tf.float32, name='x2')
mi = tf.minimum(x1, x2)
return tf.identity(mi, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1})
@check_onnxruntime_incompatibility("Add")
def test_logicaland(self):
x_val1 = np.array([1, 0, 1, 1], dtype=bool).reshape((2, 2))
x_val2 = np.array([0, 1, 1, 1], dtype=bool).reshape((2, 2))
def func(x1, x2):
mi = tf.logical_and(x1, x2)
return tf.identity(mi, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2})
@check_onnxruntime_incompatibility("Greater")
def test_greater(self):
for op in [tf.greater, tf.greater_equal]:
x_val1 = np.array([4, 2, 4, 1], dtype=np.float32).reshape((2, 2))
x_val2 = np.array([2, 4, 4, 1], dtype=np.float32).reshape((2, 2))
def func(x1, x2):
mi = op(x1, x2)
return tf.identity(mi, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2})
@check_onnxruntime_incompatibility("Greater")
def test_greater_unsupport_type(self):
for op in [tf.greater, tf.greater_equal]:
x_val1 = np.array([4, 2, 4, 1], dtype=np.int32).reshape((2, 2))
x_val2 = np.array([2, 4, 4, 1], dtype=np.int32).reshape((2, 2))
def func(x1, x2):
mi = op(x1, x2)
return tf.identity(mi, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2})
@check_onnxruntime_incompatibility("Less")
def test_less(self):
x_val1 = np.array([4, 2, 4, 1], dtype=np.float32).reshape((2, 2))
x_val2 = np.array([2, 4, 4, 1], dtype=np.float32).reshape((2, 2))
def func(x1, x2):
mi = tf.less(x1, x2)
return tf.identity(mi, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2})
@check_onnxruntime_incompatibility("Less")
def test_less_unsupport_type(self):
x_val1 = np.array([4, 2, 4, 1], dtype=np.int32).reshape((2, 2))
x_val2 = np.array([2, 4, 4, 1], dtype=np.int32).reshape((2, 2))
def func(x1, x2):
mi = tf.less(x1, x2)
return tf.identity(mi, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2})
@check_opset_min_version(11, "Equal")
def test_equal_float(self):
x_val1 = np.array([0., 1., 2., 3., 4., -1., -2], dtype=np.float32)
x_val2 = np.array([0., 1., 2.1, 3.5, 4.6, -1.1, -2.9], dtype=np.float32)
def func(x1, x2):
mi = tf.equal(x1, x2)
return tf.identity(mi, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2})
def test_equal_string(self):
x_val1 = np.array(['1'], dtype=np.string_)
x_val2 = np.array(['2'], dtype=np.string_)
def func(x1, x2):
mi = tf.equal(x1, x2)
return tf.identity(mi, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2})
def test_equal(self):
x_val1 = np.array([4, 2, 4, 1], dtype=np.int32).reshape((2, 2))
x_val2 = np.array([2, 4, 4, 1], dtype=np.int32).reshape((2, 2))
def func(x1, x2):
mi = tf.equal(x1, x2)
return tf.identity(mi, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2})
x_val1 = np.array([4, 2, 4, 1], dtype=np.float32).reshape((2, 2))
x_val2 = np.array([2, 4, 4, 1], dtype=np.float32).reshape((2, 2))
def func(x1, x2):
mi = tf.equal(x1, x2)
return tf.identity(mi, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2})
def test_not_equal(self):
x_val1 = np.array([4, 2, 4, 1], dtype=np.int32).reshape((2, 2))
x_val2 = np.array([2, 4, 4, 1], dtype=np.int32).reshape((2, 2))
def func(x1, x2):
mi = tf.not_equal(x1, x2)
return tf.identity(mi, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2})
x_val1 = np.array([4, 2, 4, 1], dtype=np.float32).reshape((2, 2))
x_val2 = np.array([2, 4, 4, 1], dtype=np.float32).reshape((2, 2))
def func(x1, x2):
mi = tf.not_equal(x1, x2)
return tf.identity(mi, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2})
def test_sequeeze_no_axis_specified(self):
x_val = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32).reshape((2, 1, 2, 1, 1))
def func(x):
x_ = tf.squeeze(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_sequeeze_no_axis(self):
x_val = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.squeeze(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(11, "Pad")
def test_sequeeze_no_axis_specified_unknown_rank(self):
x_val = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
y_val = np.array([2, 1, 2, 1, 1], dtype=np.int64)
z_val = np.zeros((1, 2), dtype=np.int64)
def func(x, y, z):
y_ = tf.pad(y, z)
x_ = tf.reshape(x, y_)
x_ = tf.squeeze(x_)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val, _INPUT2: z_val})
def test_sequeeze_positive_axis(self):
x_val = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32).reshape((2, 2, 1))
def func(x):
x_ = tf.squeeze(x, [2])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_sequeeze_negative_axis(self):
x_val = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32).reshape((2, 2, 1))
def func(x):
x_ = tf.squeeze(x, [-1])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_sequeeze_mixed_axis(self):
x_val = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32).reshape((1, 2, 2, 1))
def func(x):
x_ = tf.squeeze(x, [0, -1])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(11, "Squeeze")
def test_sequeeze_mixed_axis_unknown_rank(self):
x_val = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
y_val = np.array([2, 1, 2, 1, 1], dtype=np.int64)
z_val = np.zeros((1, 2), dtype=np.int64)
def func(x, y, z):
y_ = tf.pad(y, z)
x_ = tf.reshape(x, y_)
x_ = tf.squeeze(x_, [1, -1])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val, _INPUT2: z_val})
def test_transpose(self):
x_val = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], dtype=np.float32).reshape((2, 3))
def func(x):
x_ = tf.transpose(x) # perm=[1,0])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_reshape(self):
x_val = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32).reshape((2, 2))
def func(x):
shape = tf.constant([1, 4])
x_ = tf.reshape(x, shape)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, check_shape=True)
def test_reshape_reshape(self):
x_val = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32).reshape((2, 2))
def func(x):
shape = tf.constant([1, 4])
shape_2 = tf.constant([4, 1])
x_ = tf.reshape(x, shape)
x_ = tf.reshape(x_, shape_2)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val},
graph_validator=lambda g: check_op_count(g, "Reshape", 1, disabled=False))
@check_opset_min_version(6, "cast")
def test_reshape_int(self):
x_val = np.array([1, 2, 3, 4], dtype=np.int32).reshape((2, 2))
def func(x):
shape = tf.constant([1, 4])
x_ = tf.reshape(x, shape)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, check_shape=True)
@check_opset_min_version(6, "cast")
def test_reshape_dynamic(self):
x_val = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32).reshape((2, 2))
shape_val = np.array([4, 1], dtype=np.int32)
def func(x, shape):
x_ = tf.reshape(x, shape)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: shape_val}, check_shape=True)
@check_onnxruntime_incompatibility("Relu")
def test_relu(self):
x_val = np.array([0.5, 1.0, -0.5, -1.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.nn.relu(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend("fails on caffe2 with dim issue")
@check_onnxruntime_incompatibility("Mul")
@check_tf_min_version("1.6")
def test_leaky_relu_int(self):
# starting from tf 1.6, leaky_relu supports `feature` x of int type
x_types = [np.int32, np.int64]
for x_type in x_types:
x_val = 1000 * np.random.random_sample([1000, 100]).astype(x_type)
for alpha in [0.1, -0.1, 1.0, -1.0]:
def func(x):
x_ = tf.nn.leaky_relu(x, alpha)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend("fails on caffe2 with dim issue")
@check_onnxruntime_incompatibility("Mul")
def test_leaky_relu_with_dependency(self):
x_val = 1000 * np.random.random_sample([1000, 100]).astype(np.float32)
def func(x):
# simulate leaky_relu
alpha = tf.constant(0.5)
y = alpha * x
x_ = tf.maximum(y, x)
dependency = y - 1
return tf.identity(x_, name=_TFOUTPUT), tf.identity(dependency, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: x_val})
@skip_caffe2_backend("fails on caffe2 with dim issue")
@check_onnxruntime_incompatibility("Mul")
def test_leaky_relu_float(self):
x_val = 1000 * np.random.random_sample([1000, 100]).astype(np.float32)
for alpha in [0.1, -0.1, 1.0, -1.0]:
def func(x):
x_ = tf.nn.leaky_relu(x, alpha)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_onnxruntime_incompatibility("Elu")
def test_elu(self):
x_val = np.array([0.5, 1.0, -0.5, -1.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.nn.elu(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_onnxruntime_incompatibility("Tanh")
def test_tanh(self):
x_val = np.array([0.5, 1.0, -0.5, -1.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.tanh(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-05)
def test_relu6(self):
x_val = np.array([0.5, 1.0, -0.5, -1.0, 6, 7], dtype=np.float32).reshape((2, 3))
def func(x):
x_ = tf.nn.relu6(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_onnxruntime_incompatibility("Sub")
def test_relu6_dynamic(self):
x_val = np.array([0.5, 1.0, -0.5, -1.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.nn.relu6(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_concat(self):
x_val1 = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
x_val2 = np.array([[7, 8, 9], [10, 11, 12]], dtype=np.float32)
x_val3 = np.array([[13, 14, 15], [16, 17, 18]], dtype=np.float32)
def func(x1, x2, x3):
x_ = tf.concat([x1, x2, x3], 0)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2, "input3:0": x_val3})
def test_concat_empty_const_input(self):
x_val1 = np.array([1, 2, 3], dtype=np.float32)
x_val2 = np.array([], dtype=np.float32)
def func(x1):
x2 = tf.constant(x_val2, dtype=tf.float32)
x_ = tf.concat([x1, x2], 0)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1})
x_val1 = np.array([[1, 2, 3]], dtype=np.float32)
x_val2 = np.array([[]], dtype=np.float32)
def func(x1):
x2 = tf.constant(x_val2, dtype=tf.float32)
x_ = tf.concat([x1, x2], 1)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1})
x_val1 = np.array([1, 2, 3], dtype=np.float32)
x_val2 = np.array([], dtype=np.float32)
x_val3 = np.array([13, 14, 15], dtype=np.float32)
def func(x1, x3):
x2 = tf.constant(x_val2, dtype=tf.float32)
x_ = tf.concat([x1, x2, x3], 0)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val3})
@check_opset_min_version(6, "cast")
def test_concat_int64(self):
x_val1 = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int64)
x_val2 = np.array([[7, 8, 9], [10, 11, 12]], dtype=np.int64)
x_val3 = np.array([[13, 14, 15], [16, 17, 18]], dtype=np.int64)
def func(x1, x2, x3):
x_ = tf.concat([x1, x2, x3], 0)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2, "input3:0": x_val3})
def test_concat_negative_axis(self):
x_val1 = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
x_val2 = np.array([[7, 8, 9], [10, 11, 12]], dtype=np.float32)
x_val3 = np.array([[13, 14, 15], [16, 17, 18]], dtype=np.float32)
def func(x1, x2, x3):
x_ = tf.concat([x1, x2, x3], -1)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2, "input3:0": x_val3})
def test_concat_negative_axis_none_shape(self):
x_val = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], dtype=np.float32).reshape((2, 3))
y_val = np.array([7.0, 8.0, 9.0, 10.0, 11.0, 12.0], dtype=np.float32).reshape((2, 3))
s1_val = np.array([1, 1], dtype=np.int32)
s2_val = np.array([1, 1], dtype=np.int32)
def func():
x = tf_placeholder(tf.float32, [2, 3], name=_TFINPUT)
y = tf_placeholder(tf.float32, [2, 3], name=_TFINPUT1)
s1 = tf_placeholder(tf.int32, [2], name="input3")
s2 = tf_placeholder(tf.int32, [2], name="input4")
s = tf.add(s1, s2)
x_with_none_shape = tf.slice(x, [0, 0], s)
t = tf.concat([x_with_none_shape, y], -1)
return tf.identity(t, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val, "input3:0": s1_val, "input4:0": s2_val},
as_session=True, premade_placeholders=True)
def test_concat_const_string(self):
x_val1 = np.array([["Hello world", "abc"], ["def", "♦♥♠♣"]], dtype=str)
const_val = np.array([["Hello there", "wxyz"], ["", "π"]], dtype=str)
def func(x1):
x_ = tf.concat([x1, const_val], 0)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1})
@check_onnxruntime_incompatibility("Pow")
def test_pow(self):
x_val = np.array([4.0, 16.0, 4.0, 1.6], dtype=np.float32)
e = np.array([2.0, 2.0, 2.0, 2.0], dtype=np.float32)
def func(x):
x_ = tf.pow(x, tf.constant(e))
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_embedding_lookup(self):
x_val1 = np.array([[1]], dtype=np.int32)
x_val2 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]], dtype=np.float32)
def func(x):
t = tf.constant(x_val2)
x_ = tf.nn.embedding_lookup(t, x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1})
@skip_tflite("Advanced constant shape folding not implemented for tflite")
@skip_tfjs("Advanced constant folding not implemented for tfjs")
def test_slice_from_shape_const_fold(self):
x_val = np.array([4, 3], dtype=np.int64)
x_shape = np.array([-1, 3], dtype=np.int64)
def func(x):
z = tf.zeros(x)
x = tf.reshape(z, tf.constant(x_shape))
s = tf.shape(x)
t1 = tf.constant([1], dtype=tf.int32)
t2 = tf.constant([2], dtype=tf.int32)
y = tf.strided_slice(s, t1, t2, shrink_axis_mask=1)
return tf.identity(y, name=_TFOUTPUT)
def graph_validator(g):
# After constant folding just an input and const output node remain
return len(g.get_nodes()) == 2
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, graph_validator=graph_validator)
@timeout(5)
def test_slice_const_fold_halts(self):
# Regression test for infinite loop during constant-folding.
x_val = np.array([4, 3], dtype=np.int32)
x_shape = np.array([-1, 3], dtype=np.int32)
def func(x):
x_reshaped = tf.reshape(tf.zeros(x), tf.constant(x_shape))
s = tf.shape(x_reshaped)
const1 = tf.constant([1], dtype=tf.int32)
const2 = tf.constant([2], dtype=tf.int32)
s_indexed = tf.strided_slice(s, const1, const2, strides=const1, shrink_axis_mask=1)
x_indexed = tf.strided_slice(x, const1, const2, strides=const1, shrink_axis_mask=1)
mul = tf.multiply(s_indexed, tf.constant(2, dtype=s_indexed.dtype))
add = const1 + x_indexed + mul
return tf.identity(add, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_slice(self):
x_val = np.array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype=np.float32)
def func(x):
t1 = tf.constant([0, 1], dtype=tf.int32)
t2 = tf.constant([2, 2], dtype=tf.int32)
x_ = tf.slice(x, t1, t2)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_slice_neg_size(self):
x_val = np.array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype=np.float32)
def func(x):
t1 = tf.constant([0, 1], dtype=tf.int32)
t2 = tf.constant([-1, 2], dtype=tf.int32)
x_ = tf.slice(x, t1, t2)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(10, "Slice in opset 10 can accept dymaic 'start' and 'ends'")
def test_slice_with_non_const(self):
x_val = np.array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype=np.float32)
t1 = np.array([0, 1], dtype=np.int32)
t2 = np.array([2, 2], dtype=np.int32)
def func(x, t1, t2):
x_ = tf.slice(x, t1, t2)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: t1, _INPUT2: t2})
@check_opset_min_version(10, "Slice in opset 10 can accept dymaic 'start' and 'ends'")
def test_slice_with_size_is_negative_one(self):
x_val = np.array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype=np.float32)
t1 = np.array([0, 1], dtype=np.int32)
# input "size" contains -1
t2 = np.array([2, -1], dtype=np.int32)
def func(x, t1, t2):
x_ = tf.slice(x, t1, t2)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: t1, _INPUT2: t2})
@skip_caffe2_backend()
def test_slice1(self):
# FIXME: only 1 dimension supported by caffe2
x_val = np.array([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]], [[5, 5, 5], [6, 6, 6]]], dtype=np.float32)
def func(x):
t1 = tf.constant([1, 0, 0], dtype=tf.int32)
t2 = tf.constant([1, 1, 3], dtype=tf.int32)
x_ = tf.slice(x, t1, t2)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_split(self):
x_val = np.linspace(1.0, 5 * 30.0, 5 * 30).astype(np.float32).reshape((5, 30))
def func(x):
x_, _, _ = tf.split(x, [4, 15, 11], 1)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(13, "Split")
def test_split_nonconst(self):
x_val = np.linspace(1.0, 5 * 30.0, 5 * 30).astype(np.float32).reshape((5, 30))
y_val = np.array([4, 15, 11], np.int32)
def func(x, y):
x_, _, _ = tf.split(x, y, 1)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@skip_tfjs("TFJS executes model incorrectly")
def test_split_with_more_outputs(self):
x_val = np.linspace(1.0, 5 * 30.0, 5 * 30).astype(np.float32).reshape((5, 30))
def func(x):
return tf.split(x, [4, 15, 11], 1, name="split_test")
self._run_test_case(func, ["split_test:0", "split_test:1", "split_test:2"], {_INPUT: x_val})
def test_negative_split(self):
x_val = np.linspace(1.0, 5 * 30.0, 5 * 30).astype(np.float32).reshape((5, 30))
def func(x):
x_, _, _ = tf.split(x, [4, 15, -1], 1)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_reducesum(self):
# not supported by onnx-caffe2
x_val = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.reduce_sum(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(13, "ReduceSum")
def test_reducesum_nonconst_axis(self):
x_val = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32).reshape((2, 1, 2))
y_val = np.array([1, 2], dtype=np.int32)
def func(x, y):
x_ = tf.reduce_sum(x, axis=y)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(13, "ReduceSum")
def test_reducesum_empty_axis(self):
x_val = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32).reshape((2, 1, 2))
y_val = np.array([], dtype=np.int32)
def func(x, y):
x_ = tf.reduce_sum(x, axis=y)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(11, "ScatterND")
def test_segment_sum_data_vector(self):
segs_val = np.array([0, 0, 0, 1, 2, 2, 3, 3], dtype=np.int32)
data_val = np.array([5, 1, 7, 2, 3, 4, 1, 3], dtype=np.float32)
def func(data, segments):
x_ = tf.math.segment_sum(data, segments)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: data_val, _INPUT1: segs_val})
@check_opset_min_version(11, "ScatterND")
def test_segment_sum_unknown_rank(self):
segs_val = np.array([0, 0, 0, 1, 2, 2, 3, 3], dtype=np.int32)
data_val = np.arange(8 * 2 * 3, dtype=np.float32).reshape([8, 2, 3])
data_shape_val = np.array([8, 2, 3, 1], dtype=np.int64)
shape_pad_val = np.zeros((1, 2), dtype=np.int64)
def func(data, segments, data_shape, shape_pad):
# Some hackery to make the rank unknown
data_shape_ = tf.pad(data_shape, shape_pad, constant_values=0)
data = tf.reshape(data, data_shape_)
x_ = tf.math.segment_sum(data, segments)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT],
{_INPUT: data_val, _INPUT1: segs_val, _INPUT2: data_shape_val, _INPUT3: shape_pad_val})
@check_opset_min_version(11, "ScatterND")
def test_segment_ops_data_tensor(self):
for tf_op in [tf.math.segment_sum, tf.math.segment_prod, tf.math.segment_min, tf.math.segment_max]:
segs_val = np.array([0, 0, 0, 1, 2, 2, 3, 3], dtype=np.int32)
data_val = np.arange(8 * 2 * 3, dtype=np.float32).reshape([8, 2, 3])
def func(data, segments):
x_ = tf_op(data, segments)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: data_val, _INPUT1: segs_val})
@check_opset_min_version(11, "ScatterND")
@skip_tflite("unknown rank")
def test_segment_mean_unknown_rank(self):
segs_val = np.array([0, 0, 0, 1, 2, 2, 3, 3], dtype=np.int32)
data_val = np.arange(8 * 2 * 3, dtype=np.float32).reshape([8, 2, 3])
data_shape_val = np.array([8, 2, 3, 1], dtype=np.int64)
shape_pad_val = np.zeros((1, 2), dtype=np.int64)
def func(data, segments, data_shape, shape_pad):
# Some hackery to make the rank unknown
data_shape_ = tf.pad(data_shape, shape_pad, constant_values=0)
data = tf.reshape(data, data_shape_)
x_ = tf.math.segment_mean(data, segments)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT],
{_INPUT: data_val, _INPUT1: segs_val, _INPUT2: data_shape_val, _INPUT3: shape_pad_val})
@check_opset_min_version(11, "ScatterND")
def test_sparse_segment_sum(self):
data_val = np.arange(8 * 2 * 3, dtype=np.float32).reshape([8, 2, 3])
indices_val = np.array([2, 0, 1, 3, 5, 4, 3, 5, 5], dtype=np.int32)
segs_val = np.array([0, 0, 0, 1, 2, 2, 3, 3, 3], dtype=np.int32)
def func(data, indices, segments):
x_ = tf.sparse.segment_sum(data, indices, segments)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: data_val, _INPUT1: indices_val, _INPUT2: segs_val})
@check_opset_min_version(11, "ScatterND")
def test_sparse_segment_mean(self):
data_val = np.arange(8 * 2 * 3, dtype=np.float32).reshape([8, 2, 3])
indices_val = np.array([2, 0, 1, 3, 5, 4, 3, 5, 5], dtype=np.int32)
segs_val = np.array([0, 0, 0, 1, 2, 2, 3, 3, 3], dtype=np.int32)
def func(data, indices, segments):
x_ = tf.sparse.segment_mean(data, indices, segments)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: data_val, _INPUT1: indices_val, _INPUT2: segs_val})
@check_opset_min_version(11, "ScatterND")
def test_sparse_segment_sqrtn(self):
data_val = np.arange(8 * 2 * 3, dtype=np.float32).reshape([8, 2, 3])
indices_val = np.array([2, 0, 1, 3, 5, 4, 3, 5, 5], dtype=np.int32)
segs_val = np.array([0, 0, 0, 1, 2, 2, 3, 3, 3], dtype=np.int32)
def func(data, indices, segments):
x_ = tf.sparse.segment_sqrt_n(data, indices, segments)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: data_val, _INPUT1: indices_val, _INPUT2: segs_val})
@check_opset_min_version(11, "ScatterND")
def test_sparse_segment_ops_with_num_segments(self):
for tf_op in [tf.sparse.segment_sum, tf.sparse.segment_mean, tf.sparse.segment_sqrt_n]:
data_val = np.arange(8 * 2 * 3, dtype=np.float32).reshape([8, 2, 3])
indices_val = np.array([2, 0, 1, 3, 5, 4, 3, 5, 5], dtype=np.int32)
segs_val = np.array([0, 0, 0, 1, 3, 3, 4, 4, 4], dtype=np.int32)
def func(data, indices, segments):
x_ = tf_op(data, indices, segments, num_segments=6)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: data_val, _INPUT1: indices_val, _INPUT2: segs_val})
@check_opset_min_version(11, "ScatterND")
@check_tf_min_version("2.3", "needs tf 2.3")
def test_unsorted_segment_ops(self):
tf_ops = [
tf.math.unsorted_segment_max,
tf.math.unsorted_segment_min,
tf.math.unsorted_segment_sum,
tf.math.unsorted_segment_prod,
tf.math.unsorted_segment_mean,
tf.math.unsorted_segment_sqrt_n,
]
for tf_op in tf_ops:
segs_val = np.array([1, 3, 0, 1, 2, 4, 2, 1], dtype=np.int32)
data_val = np.arange(8 * 2 * 3, dtype=np.float32).reshape([8, 2, 3])
def func(data, segments):
x_ = tf_op(data, segments, num_segments=5)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: data_val, _INPUT1: segs_val})
@check_opset_min_version(11, "ScatterND")
@check_tf_min_version("2.3", "num_segments can be int64 in tf 2.3")
def test_segment_op_types(self):
data_dtypes = [np.int32, np.float32]
seg_dtypes = [np.int32, np.int64]
for dtypes in product(data_dtypes, seg_dtypes, seg_dtypes, seg_dtypes):
data_val = np.arange(8 * 2 * 3, dtype=dtypes[0]).reshape([8, 2, 3])
indices_val = np.array([2, 0, 1, 3, 5, 4, 3, 5, 5], dtype=dtypes[1])
segs_val = np.array([0, 0, 0, 1, 3, 3, 4, 4, 4], dtype=dtypes[2])
def func(data, indices, segments):
x_ = tf.sparse.segment_sum(data, indices, segments, num_segments=np.array(6, dtype=dtypes[3]))
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: data_val, _INPUT1: indices_val, _INPUT2: segs_val})
@check_opset_min_version(11, "CumSum")
@check_tf_min_version("1.14")
def test_set_union(self):
a_val = np.array([[10, 2, 30, 2, 5], [10, 9, 1, 9, 3]], np.int32)
b_val = np.array([[4, 5, 10, 8, 9], [1, 4, 1, 1, 5]], np.int32)
def func(a, b):
s = tf.sets.union(a, b)
indices, values, shape = s.indices, s.values, s.dense_shape
indices = tf.identity(indices, name=_TFOUTPUT)
values = tf.identity(values, name=_TFOUTPUT1)
shape = tf.identity(shape, name=_TFOUTPUT2)
return indices, values, shape
self._run_test_case(func, [_OUTPUT, _OUTPUT1, _OUTPUT2], {_INPUT: a_val, _INPUT1: b_val})
@check_opset_min_version(11, "CumSum")
@check_tf_min_version("1.14")
def test_set_intersection(self):
a_val = np.array([[10, 2, 30, 2, 5], [10, 9, 1, 9, 3]], np.int32)
b_val = np.array([[4, 5, 10, 8, 9], [1, 4, 1, 1, 5]], np.int32)
def func(a, b):
s = tf.sets.intersection(a, b)
indices, values, shape = s.indices, s.values, s.dense_shape
indices = tf.identity(indices, name=_TFOUTPUT)
values = tf.identity(values, name=_TFOUTPUT1)
shape = tf.identity(shape, name=_TFOUTPUT2)
return indices, values, shape
self._run_test_case(func, [_OUTPUT, _OUTPUT1, _OUTPUT2], {_INPUT: a_val, _INPUT1: b_val})
@check_opset_min_version(11, "CumSum")
@check_tf_min_version("1.14")
def test_set_difference(self):
a_val = np.array([[10, 2, 30, 2, 5], [10, 9, 1, 9, 3]], np.int32)
b_val = np.array([[4, 5, 10, 8, 9], [1, 4, 1, 1, 5]], np.int32)
for aminusb in [True, False]:
def func(a, b):
s = tf.sets.difference(a, b, aminusb)
indices, values, shape = s.indices, s.values, s.dense_shape
indices = tf.identity(indices, name=_TFOUTPUT)
values = tf.identity(values, name=_TFOUTPUT1)
shape = tf.identity(shape, name=_TFOUTPUT2)
return indices, values, shape
self._run_test_case(func, [_OUTPUT, _OUTPUT1, _OUTPUT2], {_INPUT: a_val, _INPUT1: b_val})
@check_onnxruntime_incompatibility("Sqrt")
def test_sqrt(self):
x_val = np.array([4.0, 16.0, 4.0, 1.6], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.math.sqrt(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def _test_range_const(self, extra_opset=None):
process_args = {}
if extra_opset is not None:
process_args["extra_opset"] = [extra_opset]
def func():
x = tf.range(5)
return tf.identity(x, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {}, process_args=process_args)
def func():
x = tf.range(3, 3, 5)
return tf.identity(x, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {}, process_args=process_args)
def func():
x = tf.range(0, -5, -2)
return tf.identity(x, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {}, process_args=process_args)
def func():
x = tf.range(-5.0, 5.0, 1.5)
return tf.identity(x, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {}, process_args=process_args)
def func():
x = tf.range(2.5, 5.0, 10.0)
return tf.identity(x, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {}, process_args=process_args)
def _test_range_non_const(self, extra_opset=None):
process_args = {}
if extra_opset is not None:
process_args["extra_opset"] = [extra_opset]
def func():
x = tf.range(5.0)
return tf.identity(x, name=_TFOUTPUT)
g = self._run_test_case(func, [_OUTPUT], {}, process_args=process_args)
# TODO: tf-2.0 uses the optimizer which will most likely make the range const which is not what we want to test
# self.assertTrue(extra_opset is None
# or check_node_domain(group_nodes_by_type(g)["Range"][0], extra_opset.domain))
def func():
x = tf.range(0, -5.0, -2)
return tf.identity(x*x, name=_TFOUTPUT)
g = self._run_test_case(func, [_OUTPUT], {}, process_args=process_args)
# TODO: tf-2.0 uses the optimizer which will most likely make the range const which is not what we want to test
# self.assertTrue(extra_opset is None
# or check_node_domain(group_nodes_by_type(g)["Range"][0], extra_opset.domain))
# disable this case due to onnxruntime loop issue
# https://github.com/microsoft/onnxruntime/issues/1272
# x = tf.range(3.0, 3.0, 5)
# return tf.identity(x, name=_TFOUTPUT)
# g = self._run_test_case(func, [_OUTPUT], {}, process_args=process_args)
# self.assertTrue(extra_opset is None
# or check_node_domain(group_nodes_by_type(g)["Range"][0], extra_opset.domain))
delta_val = np.array(1.5, dtype=np.float32)
def func(delta):
x = tf.range(-5.0, 5.0, delta)
return tf.identity(x, name=_TFOUTPUT)
g = self._run_test_case(func, [_OUTPUT], {_INPUT: delta_val}, process_args=process_args)
self.assertTrue(extra_opset is None
or check_node_domain(group_nodes_by_type(g)["Range"][0], extra_opset.domain))
start_val = np.array(2.5, dtype=np.float32)
def func(start):
x = tf.range(start, 5.0, 10.0)
return tf.identity(x, name=_TFOUTPUT)
g = self._run_test_case(func, [_OUTPUT], {_INPUT: start_val}, process_args=process_args)
self.assertTrue(extra_opset is None
or check_node_domain(group_nodes_by_type(g)["Range"][0], extra_opset.domain))
@check_opset_min_version(7, "cast")
def test_range_const(self):
self._test_range_const()
def test_range_non_const(self):
self._test_range_non_const()
@test_ms_domain()
def test_ms_range_const(self, extra_opset):
self._test_range_const(extra_opset)
@test_ms_domain()
def test_ms_range_non_const(self, extra_opset):
self._test_range_non_const(extra_opset)
@check_onnxruntime_incompatibility("Sqrt")
def test_rsqrt(self):
x_val = np.array([4.0, 16.0, 4.0, 1.6], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.math.rsqrt(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-05)
@check_onnxruntime_incompatibility("Reciprocal")
def test_reciprocal(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.math.reciprocal(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-04)
def test_reducemax(self):
# not supported by onnx-caffe2
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.reduce_max(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-05)
def test_reducemax_global_max_pool(self):
for keepdims in [True, False]:
x_val = make_xval((2, 3, 4, 5, 6))
def func(x):
x_ = tf.reduce_max(x, axis=[2, 3, 4], keepdims=keepdims)
return tf.add(x_, 0, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend()
def test_reduceprod(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.reduce_prod(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_reducemean(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.reduce_mean(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_reducemean_global_avg_pool(self):
for keepdims in [True, False]:
x_val = make_xval((2, 3, 4, 5))
def func(x):
x_ = tf.reduce_mean(x, axis=[2, 3], keepdims=keepdims)
return tf.add(x_, 0, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend()
@check_onnxruntime_incompatibility("Pow")
def test_pow_scalar(self):
x_val = np.array([4.0, 16.0, 4.0, 1.6], dtype=np.float32)
e = np.array(2.0, dtype=np.float32)
def func(x):
x_ = tf.pow(x, tf.constant(e))
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend()
def test_pad_const_default_val(self):
params = [
("CONSTANT", [[1, 1], [2, 2]], [[1.0, 1.2], [2.3, 3.4], [4.5, 5.7]]),
("CONSTANT", [[0, 0], [3, 3], [3, 3], [0, 0]], np.random.randn(1, 3, 4, 5).astype(np.float32)),
]
for p in params:
mode, pad, xv = p
x_val = np.array(xv, dtype=np.float32)
def func(x):
paddings = tf.constant(pad)
op = tf.pad(x, paddings, mode)
return tf.identity(op, name=_TFOUTPUT)
self.logger.debug(str(p))
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend()
def test_pad_const(self):
x_val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
def func(x):
paddings = tf.constant([[1, 1], [2, 2]], name="paddings")
op = tf.pad(x, paddings, mode="CONSTANT", name="const_with_val", constant_values=999)
return tf.identity(op, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend()
def test_pad_reflect(self):
x_val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
def func(x):
paddings = tf.constant([[1, 1], [2, 2]], name="paddings")
op = tf.pad(x, paddings, mode="REFLECT", name="reflect")
return tf.identity(op, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(9, "Compress")
def test_pad_symmetric(self):
x_val = make_xval([4, 1, 5])
def func(x):
paddings = tf.constant([[1, 3], [0, 0], [2, 4]], name="paddings")
op = tf.pad(x, paddings, mode="SYMMETRIC", name="symmetric")
return tf.identity(op, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(11, "Pad")
def test_dynamic_pad_symmetric(self):
x_val = make_xval([4, 1, 5])
y_val = np.array([[1, 3], [0, 0], [2, 4]], np.int32)
def func(x, y):
op = tf.pad(x, y, mode="SYMMETRIC", name="symmetric")
return tf.identity(op, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@skip_caffe2_backend()
def test_randomuniform(self):
def func():
shape = tf.constant([2, 3], name="shape")
x_ = random_uniform(shape, name="rand", dtype=tf.float32)
x_ = tf.identity(x_, name="output1")
x_ = tf.identity(x_, name="output2")
return tf.identity(x_, name=_TFOUTPUT)
# since results are random, compare the shapes only
self._run_test_case(func, [_OUTPUT], {}, check_value=False, check_shape=True)
def test_random_std_normal(self):
def func():
shape = tf.constant([20, 10, 50], name="shape")
x_ = tf.random.normal(shape)
return tf.identity(x_, name=_TFOUTPUT)
# since results are random, compare the shapes only
g = self._run_test_case(func, [_OUTPUT], {}, check_value=False, check_shape=True)
results = self.run_backend(g, g.outputs, {})[0]
self.assertTrue(-0.1 < np.mean(results) < 0.1)
self.assertTrue(0.9 < np.std(results) < 1.1)
def test_randomnormal(self):
def func():
shape = tf.constant([20, 10, 50], name="shape")
x_ = tf.random.normal(shape, mean=10, stddev=2)
return tf.identity(x_, name=_TFOUTPUT)
# since results are random, compare the shapes only
g = self._run_test_case(func, [_OUTPUT], {}, check_value=False, check_shape=True)
results = self.run_backend(g, g.outputs, {})[0]
self.assertTrue(9.8 < np.mean(results) < 10.2)
self.assertTrue(1.9 < np.std(results) < 2.1)
@check_opset_min_version(9, "RandomNormalLike")
def test_randomnormal_unknown_shape(self):
shape_val = np.array([20, 10, 50], np.int32)
def func(shape):
x_ = tf.random.normal(shape)
return tf.identity(x_, name=_TFOUTPUT)
# since results are random, compare the shapes only
feed_dict = {_INPUT: shape_val}
g = self._run_test_case(func, [_OUTPUT], feed_dict, check_value=False, check_shape=True)
if "input" in g.input_names:
# TFLite inputs don't have port numbers
feed_dict = {k.split(":")[0]: v for k, v in feed_dict.items()}
results = self.run_backend(g, g.outputs, feed_dict)[0]
self.assertTrue(-0.1 < np.mean(results) < 0.1)
self.assertTrue(0.9 < np.std(results) < 1.1)
@check_opset_min_version(10, "TopK")
def test_random_shuffle(self):
x_val = make_xval([5, 4, 3])
def func(x):
x_ = tf.random.shuffle(x)
return tf.identity(x_, name=_TFOUTPUT)
# since results are random, compare the shapes only
g = self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, check_value=False, check_shape=True)
feed_dict = {_INPUT: x_val}
if "input" in g.input_names:
# TFLite inputs don't have port numbers
feed_dict = {k.split(":")[0]: v for k, v in feed_dict.items()}
results = self.run_backend(g, g.outputs, feed_dict)
np.testing.assert_allclose(x_val, np.sort(results[0], axis=0))
def test_randomuniform_int(self):
def func():
shape = tf.constant([100, 3], name="shape")
x_ = random_uniform(shape, name="rand", dtype=tf.int32, minval=2, maxval=10)
x_ = tf.identity(x_, name="output1")
x_ = tf.identity(x_, name="output2")
return tf.identity(x_, name=_TFOUTPUT)
# since results are random, compare the shapes only
g = self._run_test_case(func, [_OUTPUT], {}, check_value=False, check_shape=True)
results = self.run_backend(g, g.outputs, {})
numbers = set(results[0].flatten())
self.assertEqual(sorted(numbers), list(range(2, 10)))
def test_randomuniform_int_scalar(self):
def func():
shape = tf.constant(np.array([], np.int32), name="shape")
x_ = random_uniform(shape, name="rand", dtype=tf.int32, minval=2, maxval=10)
x_ = tf.identity(x_, name="output1")
x_ = tf.identity(x_, name="output2")
return tf.identity(x_, name=_TFOUTPUT)
# since results are random, compare the shapes only
g = self._run_test_case(func, [_OUTPUT], {}, check_value=False, check_shape=True)
results = self.run_backend(g, g.outputs, {})
self.assertTrue(2 <= results[0] < 10)
def test_randomuniform_int_nonconst_max(self):
m_val = np.array(8, dtype=np.int32)
def func(m):
shape = tf.constant([100, 3], name="shape")
x_ = random_uniform(shape, name="rand", dtype=tf.int32, minval=0, maxval=m)
x_ = tf.identity(x_, name="output1")
x_ = tf.identity(x_, name="output2")
return tf.identity(x_, name=_TFOUTPUT)
g = self._run_test_case(func, [_OUTPUT], {_INPUT: m_val}, check_value=False, check_shape=True)
feed_dict = {_INPUT: m_val}
if "input" in g.input_names:
# TFLite inputs don't have port numbers
feed_dict = {k.split(":")[0]: v for k, v in feed_dict.items()}
results = self.run_backend(g, g.outputs, feed_dict)
numbers = set(results[0].flatten())
self.assertEqual(sorted(numbers), list(range(8)))
def test_randomuniform_int_nonconst_min_max(self):
n_val = np.array(2, dtype=np.int32)
m_val = np.array(10, dtype=np.int32)
def func(n, m):
shape = tf.constant([100, 3], name="shape")
x_ = random_uniform(shape, name="rand", dtype=tf.int32, minval=n, maxval=m)
x_ = tf.identity(x_, name="output1")
x_ = tf.identity(x_, name="output2")
return tf.identity(x_, name=_TFOUTPUT)
g = self._run_test_case(func, [_OUTPUT], {_INPUT: n_val, _INPUT1: m_val}, check_value=False, check_shape=True)
feed_dict = {_INPUT: n_val, _INPUT1: m_val}
if "input" in g.input_names:
# TFLite inputs don't have port numbers
feed_dict = {k.split(":")[0]: v for k, v in feed_dict.items()}
results = self.run_backend(g, g.outputs, feed_dict)
numbers = set(results[0].flatten())
self.assertEqual(sorted(numbers), list(range(2, 10)))
@check_opset_min_version(9, "RandomUniformLike")
def test_randomuniform_int_nonconst_min_max_shape(self):
n_val = np.array(2, dtype=np.int32)
m_val = np.array(10, dtype=np.int32)
s_val = np.array([100, 3], dtype=np.int64)
def func(n, m, s):
x_ = random_uniform(s, name="rand", dtype=tf.int32, minval=n, maxval=m)
x_ = tf.identity(x_, name="output1")
x_ = tf.identity(x_, name="output2")
return tf.identity(x_, name=_TFOUTPUT)
g = self._run_test_case(func, [_OUTPUT], {_INPUT: n_val, _INPUT1: m_val, _INPUT2: s_val},
check_value=False, check_shape=True)
feed_dict = {_INPUT: n_val, _INPUT1: m_val, _INPUT2: s_val}
if "input" in g.input_names:
# TFLite inputs don't have port numbers
feed_dict = {k.split(":")[0]: v for k, v in feed_dict.items()}
results = self.run_backend(g, g.outputs, feed_dict)
numbers = set(results[0].flatten())
self.assertEqual(sorted(numbers), list(range(2, 10)))
@skip_caffe2_backend()
@check_opset_after_tf_version("2.2", 9, "RandomUniform")
def test_randomuniform_dyn_shape(self):
# test for dynamic shape coming from a shape op
x_val = np.array([0, 1, 2, 3, 5], dtype=np.int64)
def func(x):
ret = random_uniform(x[3:], dtype=tf.float32)
return tf.identity(ret, name=_TFOUTPUT)
# since results are random, compare the shapes only
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, check_value=False, check_shape=True)
@skip_caffe2_backend()
def test_randomuniform_calc_shape(self):
# test for dynamic shape coming from some subgraph
x_val = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)
def func(x):
x_ = tf.identity(x)
x_ = tf.shape(x_, name="shape")[1:]
x_ = random_uniform(x_, name="rand", dtype=tf.float32)
x_ = tf.identity(x_)
return tf.identity(x_, name=_TFOUTPUT)
# since results are random, compare the shapes only
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, check_value=False, check_shape=True)
@check_opset_min_version(9, "Compress")
@skip_onnx_checker("Checker fails type inference for Compress")
def test_sample_distorted_bounding_box_v2(self):
x_val = np.array([200, 300, 3], dtype=np.int32)
y_val = np.random.uniform(size=[1, 1000, 4]).astype(np.float32)
y_val = np.array([[0, 0, 0.1, 0.1], [0.9, 0.9, 1, 1]], np.float32).reshape([1, 2, 4])
def func(image_size, bounding_boxes):
begin, size, bboxes = tf.image.sample_distorted_bounding_box(
image_size, bounding_boxes, seed=42, min_object_covered=0.8,
aspect_ratio_range=[0.05, 3], area_range=[0.05, 1], max_attempts=100,
use_image_if_no_bounding_boxes=False)
begin_ = tf.identity(begin, name=_TFOUTPUT)
size_ = tf.identity(size, name=_TFOUTPUT1)
bboxes_ = tf.identity(bboxes, name=_TFOUTPUT2)
return begin_, size_, bboxes_
# since results are random, compare the shapes only
self._run_test_case(func, [_OUTPUT, _OUTPUT1, _OUTPUT2], {_INPUT: x_val, _INPUT1: y_val},
check_value=False, check_shape=True)
@skip_caffe2_backend()
def test_argminmax(self):
x_val = np.array([0.5, 1.0, -0.5, -1.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.argmin(x, axis=0)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
x_val = np.array([1, 2, -2, -1], dtype=np.int32).reshape((2, 2))
def func(x):
x_ = tf.argmax(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
x_val = np.array([1, 2, -2, -1], dtype=np.int32).reshape((2, 2))
def func(x):
x_ = tf.argmax(x, output_type=x_val.dtype)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(6, "cast")
def test_cast(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.cast(x, tf.int32)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_tflite("tflite does not support uint32 if tf version <= 2.3.0")
@check_opset_min_version(6, "cast")
def test_cast_unit32(self):
x_val = np.array([1, 2, 3, 4], dtype=np.uint32).reshape((2, 2))
def func(x):
x_ = tf.cast(x, tf.uint64)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(7, "sign")
def test_sign(self):
x_vals = [np.array([1.0, 2.0, 0.0, -1.0, 0.0, -2.0], dtype=np.float32).reshape((2, 3)),
np.array([1, 2, 0, -1, 0, -2], dtype=np.int32).reshape((2, 3)),
np.array([1, 2, 0, -1, 0, -2], dtype=np.int64).reshape((2, 3))]
for x_val in x_vals:
def func(x):
x_ = tf.math.sign(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_tfjs("tfjs produces incorrect results")
def test_onehot0(self):
x_val = np.array([0, 1, 2], dtype=np.int32)
depth = 5
for dtype, axis in [(tf.float32, -1), (tf.int64, 0), (tf.float64, 1)]:
def func(x):
val1 = tf.constant(5, dtype)
val2 = tf.constant(1, dtype)
x_ = tf.one_hot(x, depth, on_value=val1, axis=axis, off_value=val2, dtype=dtype)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@unittest.skip("only rank 1 is currently implemented")
def test_onehot1(self):
# only rank 1 is currently implemented
x_val = np.array([[0, 2], [1, -1]], dtype=np.int32)
depth = 3
def func(x):
x_ = tf.one_hot(x, depth, on_value=5.0, axis=-1, off_value=0.0, dtype=tf.float32)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_target("rs6", "onehot")
def test_onehot2(self):
for axis in [-1, 0, 1]:
x_val = np.array([0, 1, 2, 1, 2, 0, 1, 2, 1, 2], dtype=np.int32)
depth = 20
def func(x):
x_ = tf.one_hot(x, depth, on_value=5.0, axis=axis, off_value=1.0, dtype=tf.float32)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_target("rs6", "onehot")
@check_opset_min_version(9, "onehot")
def test_onehot3(self):
# rank 1
for np_dtype in [np.int32, np.int64]:
x_val = np.array([0, 1, 2, 1, 2, 0, 1, 2, 1, 2], dtype=np_dtype)
depth = np.array(20).astype(np.int64)
def func(x):
on_off = np.array([5.6, 1.2]).astype(np_dtype)
x_ = tf.one_hot(x, depth, on_value=on_off[0], axis=-1, off_value=on_off[1])
return tf.identity(x_, name=_TFOUTPUT)
graph = self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
self.assertTrue(len(group_nodes_by_type(graph)["OneHot"]) == 1, "onnx onehot should be used")
# rank 2
for aixs in [-1, 0, 1, 2]:
for np_dtype in [np.int32, np.int64]:
x_val = np.arange(0, 50, dtype=np_dtype).reshape([-1, 10])
depth = np.array(20).astype(np.int64)
def func(x):
on_off = np.array([5.6, 1.2]).astype(np_dtype)
x_ = tf.one_hot(x, depth, on_value=on_off[0], axis=aixs, off_value=on_off[1])
return tf.identity(x_, name=_TFOUTPUT)
graph = self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
self.assertTrue(len(group_nodes_by_type(graph)["OneHot"]) == 1, "onnx onehot should be used")
@check_opset_min_version(9, "onehot")
@skip_tfjs("tfjs produces incorrect results")
def test_onehot_rank0(self):
depth = 5
for np_dtype in [np.int32, np.int64]:
x_val = np.array(3, dtype=np_dtype)
for axis in [-1, 0]:
def func(x):
x_ = tf.one_hot(x, depth, axis=axis)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend("issue undefined dim 1")
@check_tf_max_version("1.15", "not supported in tf-2.0")
def test_flatten0(self):
x_val = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]], dtype=np.float32)
def func(x):
x_ = tf.contrib.layers.flatten(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend("issue undefined dim 1")
@check_tf_max_version("1.15", "not supported in tf-2.0")
def test_flatten1(self):
x_val = np.array([[[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]], dtype=np.float32)
def func(x):
x_ = tf.contrib.layers.flatten(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_tf_max_version("1.15", "not supported in tf-2.0")
def test_flatten2(self):
x_val = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]], dtype=np.float32)
def func(x):
x_ = tf.contrib.layers.flatten(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_cancel_transpose(self):
x_val = np.array([[[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]], dtype=np.float32)
def func(x):
x_ = tf.identity(x, _TFINPUT)
x_ = tf.transpose(x_, perm=NHWC_TO_NCHW)
x_ = tf.transpose(x_, perm=NCHW_TO_NHWC)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_onnxruntime_min_version("0.5.0", "topk-10's shape inference function has a bug")
@check_opset_min_version(6, "cast")
def test_topk1(self):
x_val = np.arange(3 * 2 * 3).astype("float32")
def func(x):
values, _ = tf.nn.top_k(x, 5, sorted=True)
return tf.identity(values, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(10, "TopK with dynamic K")
def test_topk2(self):
x_val = np.arange(3 * 2 * 3).astype("float32")
k_val = np.array(10).astype(np.int32)
def func(x, k):
values, _ = tf.nn.top_k(x, k, sorted=True)
return tf.identity(values, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: k_val})
@check_onnxruntime_min_version("0.5.0", "topk-10's shape inference function has a bug")
def test_topk3(self):
# test topk index output
x_val = np.arange(3 * 2 * 3).astype("float32")
def func(x):
_, idx = tf.nn.top_k(x, 5, sorted=True)
return tf.identity(idx, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_stack_axis(self):
for axis in [0, 1]:
x_val = [np.random.randn(3, 4).astype("float32") for _ in range(10)]
def func():
x = [tf.constant(x_val[i], dtype=tf.float32) for i in range(10)]
x_ = tf.stack(x, axis=axis)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {})
def test_unstack_axis(self):
for axis in [0, 1]:
x_val = np.random.randn(10, 3, 4).astype("float32")
def func():
x = tf.constant(x_val, dtype=tf.float32)
x_ = tf.unstack(x, axis=axis)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {})
def _test_reorganize_data(self, op, shape):
x_val = make_xval(shape)
def func(x):
x_ = op(x, block_size=2)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend("Space2Depth not implemented")
def test_space_to_depth(self):
self._test_reorganize_data(tf.nn.space_to_depth, [1, 28, 28, 3])
@skip_caffe2_backend("Depth2Space not implemented")
def test_depth_to_space(self):
self._test_reorganize_data(tf.nn.depth_to_space, [1, 14, 14, 12])
def _test_reorganize_data_gpu(self, op, shape):
x_val = make_xval(shape)
def func(x):
x_ = op(x, block_size=2, data_format="NCHW")
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_tf_cpu("only tf_gpu can run Space2Depth with NCHW format")
@skip_caffe2_backend("Space2Depth not implemented")
def test_space_to_depth_gpu(self):
self._test_reorganize_data_gpu(tf.nn.space_to_depth, [1, 3, 50, 80])
@skip_tf_cpu("only tf_gpu can run Depth2Space with NCHW format")
@skip_caffe2_backend("Depth2Space not implemented")
def test_depth_to_space_gpu(self):
self._test_reorganize_data_gpu(tf.nn.depth_to_space, [1, 120, 25, 40])
@check_opset_min_version(6, "addn")
def test_addn(self):
x_val = np.arange(3 * 2 * 3).astype("float32")
def func(x):
x_ = tf.add_n([x, x, x])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend("multiple dims not supported")
def test_strided_slice1(self):
x_val = np.arange(3 * 2 * 3).astype("float32").reshape((3, 2, 3))
def func(x):
x_ = tf.strided_slice(x, [1, 0, 0], [2, 1, 3], [1, 1, 1])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_strided_slice2(self):
x_val = np.arange(3 * 2 * 3).astype("float32").reshape((3, 2, 3))
def func(x):
x_ = tf.strided_slice(x, [1, 0, 0], [2, 2, 3], [1, 1, 1])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_strided_slice3(self):
x_val = np.arange(3 * 2 * 3).astype("float32").reshape((3, 2, 3))
def func(x):
x_ = x[1:]
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_strided_slice4(self):
x_val = np.arange(3 * 2 * 3).astype("float32").reshape((3, 2, 3))
def func(x):
x_ = x[:2]
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend("multiple dims not supported")
def test_strided_slice5(self):
x_val = np.arange(3 * 2 * 3).astype("float32").reshape((3, 2, 3))
def func(x):
x_ = x[:2, 0:1, 1:]
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend("multiple dims not supported")
def test_strided_slice6(self):
# example from here:
# https://www.tensorflow.org/versions/r1.0/api_docs/cc/class/tensorflow/ops/strided-slice
x_val = np.arange(5 * 6).astype("float32").reshape((5, 6))
def func(x):
x_ = x[2, :]
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend("multiple dims not supported")
def test_strided_slice7(self):
x_val = np.arange(5 * 6).astype("float32").reshape((5, 6))
def func(x):
x_ = tf.strided_slice(x, [0, 1], [3, 4], [1, 1], begin_mask=2)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def func(x):
x_ = tf.strided_slice(x, [0, 1], [3, 4], [1, 1], end_mask=2)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def func(x):
x_ = tf.strided_slice(x, [0, 1], [3, 4], [1, 1], shrink_axis_mask=2)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def func(x):
x_ = tf.strided_slice(x, [0, 1], [3, 4], [1, 1], ellipsis_mask=2)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend("multiple dims not supported")
def test_strided_slice8(self):
x_val = np.arange(1 * 2 * 3 * 4 * 5 * 6).astype("float32").reshape((1, 2, 3, 4, 5, 6))
def func(x):
x_ = x[0:1, ..., 1, 2:, :6]
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
x_val = np.arange(1 * 2 * 3 * 4 * 5 * 6).astype("float32").reshape((1, 2, 3, 4, 5, 6))
def func(x):
x_ = x[0:1, 1, 2:, :6, ...]
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
x_val = np.arange(1 * 2 * 3 * 4 * 5 * 6).astype("float32").reshape((1, 2, 3, 4, 5, 6))
def func(x):
x_ = x[..., 0:1, 1, 2:, :6]
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(10, "Slice")
@skip_caffe2_backend("multiple dims not supported")
def test_strided_slice_dynamic_1(self):
# simple case
x_val = np.arange(3 * 2 * 3).astype("float32").reshape((3, 2, 3))
y_val = np.array([0, 1, 2], dtype=np.int32)
def func(x, y):
x_ = tf.strided_slice(x, y, [2, 2, 3], [1, 1, 1])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(10, "Slice")
@skip_caffe2_backend("multiple dims not supported")
def test_strided_slice_dynamic_2(self):
# int32
x_val = np.arange(3 * 2 * 3).astype("int32").reshape((3, 2, 3))
y_val = np.array([0, 1, 2], dtype=np.int32)
def func(x, y):
x_ = tf.strided_slice(x, y, [2, 2, 3], [1, 1, 1])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(10, "Slice")
@skip_caffe2_backend("multiple dims not supported")
def test_strided_slice_dynamic_3(self):
# common usage, ellipsis_mask
x_val = np.arange(3 * 2 * 3).astype("float32").reshape((3, 2, 3))
y_val = np.array(1, dtype=np.int32)
def func(x, y):
x_ = x[y:2, :, :]
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@skip_tflite("tflite converts strided slice incorrectly (steps 1 dim larger than starts/stops)")
@check_opset_min_version(10, "Slice")
@skip_caffe2_backend("multiple dims not supported")
def test_strided_slice_dynamic_4(self):
# begin_mask, end_mask
x_val = np.arange(3 * 2 * 3).astype("float32").reshape((3, 2, 3))
y_val = np.array(1, dtype=np.int32)
def func(x, y):
x_ = x[y:, :y]
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@skip_tflite("tflite converts strided slice incorrectly (steps 1 dim larger than starts/stops)")
@check_opset_min_version(10, "Slice")
@skip_caffe2_backend("multiple dims not supported")
def test_strided_slice_dynamic_5(self):
# only slice the first axis
x_val = np.arange(3 * 2 * 3).astype("float32").reshape((3, 2, 3))
y_val = np.array(1, dtype=np.int32)
def func(x, y):
x_ = x[y:2]
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@skip_tflite("tflite converts strided slice incorrectly (steps 1 dim larger than starts/stops)")
@check_opset_min_version(10, "Slice")
@skip_caffe2_backend("multiple dims not supported")
def test_strided_slice_dynamic_6(self):
# shrink mask
x_val = np.arange(3 * 2 * 3).astype("float32").reshape((3, 2, 3))
y_val = np.array(1, dtype=np.int32)
def func(x, y):
x_ = x[y]
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
x_val = np.arange(3 * 2 * 3).astype("float32").reshape((3, 2, 3))
y_val = np.array(-1, dtype=np.int32)
def func(x, y):
x_ = x[y]
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(10, "Slice")
@skip_caffe2_backend("multiple dims not supported")
def test_strided_slice_dynamic_7(self):
x_val = np.arange(1 * 2 * 3 * 4 * 5 * 6).astype("float32").reshape((1, 2, 3, 4, 5, 6))
y_val = np.array(1, dtype=np.int32)
def func(x, y):
x_ = x[0:y, ..., y, y:, :y]
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
x_val = np.arange(1 * 2 * 3 * 4 * 5 * 6).astype("float32").reshape((1, 2, 3, 4, 5, 6))
y_val = np.array(1, dtype=np.int32)
def func(x, y):
x_ = x[0:y, y, y:, :y, ...]
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
x_val = np.arange(1 * 2 * 3 * 4 * 5 * 6).astype("float32").reshape((1, 2, 3, 4, 5, 6))
y_val = np.array(1, dtype=np.int32)
def func(x, y):
x_ = x[..., 0:y, y, y:, :y]
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(10, "Slice")
def test_strided_slice_reverse_1(self):
x_val = np.arange(16 * 32).astype(np.float32).reshape((1, 16, 32, 1))
def func(x):
return tf.concat([x[:, :, :10], x[:, :, :21:-1]], axis=0, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(10, "Slice")
def test_strided_slice_reverse_2(self):
x_val = np.arange(16 * 32).astype(np.float32).reshape((1, 16, 32, 1))
def func(x):
return tf.concat([x[:, :, :10], x[:, :, 9::-1]], axis=0, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_tflite("tflite converts strided slice incorrectly (steps 1 dim larger than starts/stops)")
@check_opset_min_version(10, "Slice")
def test_strided_slice_reverse_3(self):
x_val = np.zeros((1, 16, 32, 1)).astype(np.float32)
y_val = np.array(9).astype(np.int32)
z_val = np.array(-1).astype(np.int32)
def func(x, y, z):
return tf.concat([x[:, :, :10], x[:, :, y::z]], axis=0, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val, _INPUT2: z_val})
@check_opset_min_version(10, "Slice")
@skip_tfjs("TFJS executes model incorrectly")
def test_new_axis_mask(self):
def func(x, y):
x_ = x[tf.newaxis, 0:y, y::2, tf.newaxis, :, tf.newaxis, :y, tf.newaxis, ..., 9]
return tf.identity(x_, name=_TFOUTPUT)
x_val = np.arange(5*10*10*10*10*20*30).astype("float32").reshape((5, 10, 10, 10, 10, 20, 30))
y_val = np.array(9, dtype=np.int32)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(10, "Slice")
@skip_tflite("not supported in tflite")
def test_strided_slice_ellipse(self):
def func1(x):
x_ = x[..., tf.newaxis]
return tf.identity(x_, name=_TFOUTPUT)
shape = [1, 8, 64]
x_val = np.arange(np.prod(shape)).astype("float32").reshape(shape)
self._run_test_case(func1, [_OUTPUT], {_INPUT: x_val})
def func2(x):
x_ = x[:, tf.newaxis, ..., :, tf.newaxis]
return tf.identity(x_, name=_TFOUTPUT)
shape = [2, 3, 4, 5]
x_val = np.arange(np.prod(shape)).astype("float32").reshape(shape)
self._run_test_case(func2, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(10, "Slice")
@skip_tflite("not supported in tflite")
def test_strided_slice_only_ellipsis(self):
def func1(x):
x_ = x[...]
return tf.identity(x_, name=_TFOUTPUT)
shape = [1, 8, 64]
x_val = np.arange(np.prod(shape)).astype("float32").reshape(shape)
self._run_test_case(func1, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(7, "batchnorm")
def test_fused_batchnorm(self):
x_shape = [1, 28, 28, 2]
x_dtype = np.float32
scale_dtype = np.float32
scale_shape = [2]
# only nhwc is support on cpu for tensorflow
data_format = "NHWC"
x_val = np.random.random_sample(x_shape).astype(x_dtype)
scale_val = np.random.random_sample(scale_shape).astype(scale_dtype)
offset_val = np.random.random_sample(scale_shape).astype(scale_dtype)
mean_val = np.random.random_sample(scale_shape).astype(scale_dtype)
var_val = np.random.random_sample(scale_shape).astype(scale_dtype)
def func(x):
scale = tf.constant(scale_val, name='scale')
offset = tf.constant(offset_val, name='offset')
mean = tf.constant(mean_val, name='mean')
var = tf.constant(var_val, name='variance')
epsilon = 0.001
y, _, _ = fused_batch_norm(
x, scale, offset, mean=mean, variance=var,
epsilon=epsilon, data_format=data_format, is_training=False)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-04)
@check_opset_min_version(7, "batchnorm")
@check_tf_min_version("2.4", "tf version above 2.4 supports NDHWC")
def test_fused_batchnorm_3d(self):
x_shape = [1, 28, 28, 2, 2]
x_dtype = np.float32
scale_dtype = np.float32
scale_shape = [2]
data_format = "NDHWC"
x_val = np.random.random_sample(x_shape).astype(x_dtype)
scale_val = np.random.random_sample(scale_shape).astype(scale_dtype)
offset_val = np.random.random_sample(scale_shape).astype(scale_dtype)
mean_val = np.random.random_sample(scale_shape).astype(scale_dtype)
var_val = np.random.random_sample(scale_shape).astype(scale_dtype)
def func(x):
scale = tf.constant(scale_val, name='scale')
offset = tf.constant(offset_val, name='offset')
mean = tf.constant(mean_val, name='mean')
var = tf.constant(var_val, name='variance')
epsilon = 0.001
y, _, _ = fused_batch_norm(
x, scale, offset, mean=mean, variance=var,
epsilon=epsilon, data_format=data_format, is_training=False)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-04)
@check_opset_min_version(7, "batchnorm")
@skip_tfjs("TFJS executes model incorrectly")
def test_fused_batchnorm_training(self):
x_shape = [1, 28, 28, 2]
x_dtype = np.float32
scale_dtype = np.float32
scale_shape = [2]
# only nhwc is support on cpu for tensorflow
data_format = "NHWC"
x_val = np.random.random_sample(x_shape).astype(x_dtype)
scale_val = np.random.random_sample(scale_shape).astype(scale_dtype)
offset_val = np.random.random_sample(scale_shape).astype(scale_dtype)
def func(x):
scale = tf.constant(scale_val, name='scale')
offset = tf.constant(offset_val, name='offset')
epsilon = 0.001
y, _, _ = fused_batch_norm(
x, scale, offset, mean=None, variance=None,
epsilon=epsilon, data_format=data_format, is_training=True)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-04)
@skip_tflite("tflite converts aborts")
@skip_tfjs("TFJS executes model incorrectly")
@check_opset_min_version(11, "batchnorm")
@check_tf_min_version("2.4")
def test_batchnorm_mixed(self):
x_shape = [1, 32, 32, 2]
x_dtype = np.float16
scale_dtype = np.float32
scale_shape = [2]
x_val = np.random.random_sample(x_shape).astype(x_dtype)
scale_val = np.random.random_sample(scale_shape).astype(scale_dtype)
offset_val = np.random.random_sample(scale_shape).astype(scale_dtype)
mean_val = np.random.random_sample(scale_shape).astype(scale_dtype)
var_val = np.random.random_sample(scale_shape).astype(scale_dtype)
def func(x, mean, offset, var):
scale = tf.constant(scale_val, name='scale')
y = tf.raw_ops.FusedBatchNormV3(x=x, scale=scale, offset=offset, mean=mean, variance=var,
is_training=False, name=_TFOUTPUT)
return y
self._run_test_case(func, [_OUTPUT],
{_INPUT: x_val, _INPUT1: mean_val, _INPUT2: offset_val, _INPUT3: var_val})
@check_opset_min_version(7, "batchnorm")
@check_tf_min_version("1.13")
def test_batchnorm(self):
x_shape = [1, 128, 128, 2]
x_dtype = np.float32
scale_dtype = np.float32
scale_shape = [2]
x_val = np.random.random_sample(x_shape).astype(x_dtype)
scale_val = np.random.random_sample(scale_shape).astype(scale_dtype)
offset_val = np.random.random_sample(scale_shape).astype(scale_dtype)
mean_val = np.random.random_sample(scale_shape).astype(scale_dtype)
var_val = np.random.random_sample(scale_shape).astype(scale_dtype)
def func(x, mean, offset, var):
scale = tf.constant(scale_val, name='scale')
epsilon = 0.001
y = tf.nn.batch_normalization(x, mean, var, offset, scale, epsilon)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: mean_val, _INPUT2: offset_val, _INPUT3: var_val})
@check_opset_min_version(7, "batchnorm")
def test_conv2d_batchnorm_fusion(self):
x_shape = [1, 28, 28, 2]
x_val = np.random.random_sample(x_shape).astype(np.float32)
w = np.array([[2., 1., 1.],
[1., 3., 1.],
[1., 1., 4.]], dtype=np.float32).reshape(_KERNEL3x3)
# 2 channels for input and output
w = np.concatenate([w, w, w, w]).reshape([3, 3, 2, 2])
scale_dtype = np.float32
scale_shape = x_shape[-1:]
scale_val = np.random.random_sample(scale_shape).astype(scale_dtype)
offset_val = np.random.random_sample(scale_shape).astype(scale_dtype)
mean_val = np.random.random_sample(scale_shape).astype(scale_dtype)
var_val = np.random.random_sample(scale_shape).astype(scale_dtype)
def func_conv2d(x):
kernel = tf.constant(w, dtype=tf.float32, name='k')
conv = tf.nn.conv2d(x, kernel, strides=[1, 1, 1, 1], padding='VALID')
return conv
def func_fusedbn(x):
scale = tf.constant(scale_val, name='scale')
offset = tf.constant(offset_val, name='offset')
mean = tf.constant(mean_val, name='mean')
var = tf.constant(var_val, name='variance')
epsilon = 0.1234
y, _, _ = fused_batch_norm(
func_conv2d(x), scale, offset, mean=mean, variance=var,
epsilon=epsilon, data_format='NHWC', is_training=False)
return tf.identity(y, name=_TFOUTPUT)
def graph_validator(g):
if 'BatchNormalization' in [n.type for n in g.get_nodes()]:
return False
return True
self._run_test_case(func_fusedbn, [_OUTPUT], {_INPUT: x_val}, rtol=1e-05, graph_validator=graph_validator)
@check_opset_min_version(7, "batchnorm")
def test_multiple_conv2d_fused_batchnorm(self):
x_shape = [1, 28, 28, 2]
x_val = np.random.random_sample(x_shape).astype(np.float32)
w = np.array([[2., 1., 1.],
[1., 3., 1.],
[1., 1., 4.]], dtype=np.float32).reshape(_KERNEL3x3)
# 2 channels for input and output
w = np.concatenate([w, w, w, w]).reshape([3, 3, 2, 2])
scale_dtype = np.float32
scale_shape = x_shape[-1:]
scale_val = np.random.random_sample(scale_shape).astype(scale_dtype)
offset_val = np.random.random_sample(scale_shape).astype(scale_dtype)
mean_val = np.random.random_sample(scale_shape).astype(scale_dtype)
var_val = np.random.random_sample(scale_shape).astype(scale_dtype)
def func_conv2d(x):
kernel = tf.constant(w, dtype=tf.float32, name='k')
conv = tf.nn.conv2d(x, kernel, strides=[1, 1, 1, 1], padding='VALID')
return conv
def func_multiple_fusedbn(x):
scale = tf.constant(scale_val, name='scale')
offset = tf.constant(offset_val, name='offset')
mean = tf.constant(mean_val, name='mean')
var = tf.constant(var_val, name='variance')
epsilon = 0.1234
y, _, _ = fused_batch_norm(
func_conv2d(x), scale, offset, mean=mean, variance=var,
epsilon=epsilon, data_format='NHWC', is_training=False)
y = tf.nn.relu(y)
y, _, _ = fused_batch_norm(
func_conv2d(y), scale, offset, mean=mean, variance=var,
epsilon=epsilon, data_format='NHWC', is_training=False)
y, _, _ = fused_batch_norm(
func_conv2d(y), scale, offset, mean=mean, variance=var,
epsilon=epsilon, data_format='NHWC', is_training=False)
return tf.identity(y, name=_TFOUTPUT)
def graph_validator(g):
if 'BatchNormalization' in [n.type for n in g.get_nodes()]:
return False
return True
self._run_test_case(func_multiple_fusedbn, [_OUTPUT], {_INPUT: x_val}, rtol=1e-05,
graph_validator=graph_validator)
@check_tf_min_version("1.15")
@check_opset_min_version(10, "quantize_and_dequantize")
def test_qdq_unsigned_input(self):
x_shape = [3, 3, 2]
x_val = np.arange(1, 1+np.prod(x_shape)).astype("float32").reshape(x_shape)
def func(x):
x_ = quantize_and_dequantize(x, 1.0, 6.0, signed_input=False, range_given=True)
return tf.identity(x_, name=_TFOUTPUT)
_ = self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_tf_min_version("1.15")
@check_opset_min_version(10, "quantize_and_dequantize")
def test_qdq_optimizer(self):
x_shape = [3, 3, 2]
x_val = np.arange(1, 1+np.prod(x_shape)).astype("float32").reshape(x_shape)
def func(x):
x_ = quantize_and_dequantize(x, 1.0, 6.0, signed_input=False, range_given=True)
x_ = tf.transpose(x_, [1, 2, 0])
x_ = tf.reshape(x_, tf.constant([9, 2]))
x_ = quantize_and_dequantize(x_, 1.0, 6.0, signed_input=False, range_given=True)
return tf.identity(x_, name=_TFOUTPUT)
_ = self._run_test_case(func, [_OUTPUT], {_INPUT: x_val},
graph_validator=lambda g: check_op_count(g, "DequantizeLinear", 1, disabled=False))
@check_tf_min_version("1.15")
@check_opset_min_version(10, "quantize_and_dequantize")
def test_qdq_optimizer_split_concat(self):
x_shape = [7, 3, 5]
y_shape = [7, 2, 5]
x_val = np.arange(1, 1+np.prod(x_shape)).astype("float32").reshape(x_shape)
y_val = np.arange(1, 1+np.prod(y_shape)).astype("float32").reshape(y_shape)
def func(x, y):
x_ = quantize_and_dequantize(x, 1.0, 30.0, signed_input=False, range_given=True)
a, _, c = tf.unstack(x_, axis=1)
ac = tf.stack([a, c], axis=1)
y_ = quantize_and_dequantize(y, 1.0, 30.0, signed_input=False, range_given=True)
m = tf.matmul(ac, tf.transpose(y_, [0, 2, 1]))
m_ = m[2:, :, :]
m_ = quantize_and_dequantize(m_, 1.0, 30.0, signed_input=False, range_given=True)
return tf.identity(m_, name=_TFOUTPUT)
def validate_graph(g):
# MatMul should be wrapped in Dq/Q
for n in g.get_nodes():
if n.type == "MatMul":
if not all(inp.type == "DequantizeLinear" for inp in n.inputs):
return False
if not all(c.type == "QuantizeLinear" for c in g.find_output_consumers(n.output[0])):
return False
return True
_ = self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val}, graph_validator=validate_graph)
@check_tf_min_version("1.15")
@check_opset_min_version(11, "ScatterND")
@skip_tflite("TFLite uses a pattern for ScatterND so number of DequantizeLinear won't match")
def test_qdq_optimizer_scatter(self):
x_val = np.array([10, 20, 30, 40], dtype=np.float32).reshape((4))
y_val = np.array([0, 2], dtype=np.int64).reshape((2, 1))
z_val = np.array([8, 11], dtype=np.float32).reshape((2))
def func(x, y, z):
x_ = quantize_and_dequantize(x, 1.0, 30.0, signed_input=False, range_given=True)
z_ = quantize_and_dequantize(z, 1.0, 30.0, signed_input=False, range_given=True)
w = tf.tensor_scatter_nd_update(x_, y, z_)
w_ = quantize_and_dequantize(w, 1.0, 30.0, signed_input=False, range_given=True)
return tf.identity(w_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val, _INPUT2: z_val},
graph_validator=lambda g: check_op_count(g, "DequantizeLinear", 1, disabled=False))
def func(x, y, z):
x_ = quantize_and_dequantize(x, 1.0, 30.0, signed_input=False, range_given=True)
w = tf.tensor_scatter_nd_update(x_, y, z)
w_ = quantize_and_dequantize(w, 1.0, 30.0, signed_input=False, range_given=True)
return tf.identity(w_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val, _INPUT2: z_val})
@check_tf_min_version("1.15")
@check_opset_min_version(10, "quantize_and_dequantize")
def test_qdq_dyn_range_unsigned_input(self):
x_shape = [3, 3, 2]
x_val = np.arange(1, 1+np.prod(x_shape)).astype("float32").reshape(x_shape) + 0.1
def func(x):
x_ = quantize_and_dequantize(x, 1.0, 6.0, signed_input=False, range_given=False)
return tf.identity(x_, name=_TFOUTPUT)
_ = self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_tflite("tflite converter mistranslates quantize op")
@check_tf_min_version("1.15")
@check_opset_min_version(10, "quantize_and_dequantize")
def test_qdq_signed_input(self):
x_shape = [3, 3, 2]
x_val = np.arange(-np.prod(x_shape)/2, np.prod(x_shape)/2).astype("float32").reshape(x_shape)
def func(x):
x_ = quantize_and_dequantize(x, -6.0, 6.0, signed_input=True, narrow_range=False, range_given=True)
return tf.identity(x_, name=_TFOUTPUT)
_ = self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_tflite("tflite converter crashes")
@check_tf_min_version("2.0")
@check_opset_min_version(13, "quantize_and_dequantize")
def test_qdq_per_channel_signed_input(self):
x_shape = [3, 3, 2]
x_val = np.arange(-np.prod(x_shape)/2, np.prod(x_shape)/2).astype("float32").reshape(x_shape)
def func(x):
x_ = quantize_and_dequantize(x, np.array([-1.72, -3.89]).astype(np.float32), \
np.array([5.12, 2.36]).astype(np.float32), \
signed_input=True, narrow_range=False, \
range_given=True, axis=-1)
return tf.identity(x_, name=_TFOUTPUT)
_ = self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_tflite("tflite converter crashes")
@check_tf_min_version("2.0")
@check_opset_min_version(13, "quantize_and_dequantize")
def test_qdq_dyn_range_per_channel_signed_input(self):
x_shape = [3, 3, 2]
x_val = np.arange(-np.prod(x_shape)/2, np.prod(x_shape)/2).astype("float32").reshape(x_shape)
def func(x):
x_ = quantize_and_dequantize(x, np.array([-1.72, -3.89]).astype(np.float32), \
np.array([5.12, 2.36]).astype(np.float32), \
signed_input=True, narrow_range=False, \
range_given=False, axis=-1)
return tf.identity(x_, name=_TFOUTPUT)
_ = self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend()
@check_opset_min_version(7, "resize_nearest_neighbor")
def test_resize_nearest_neighbor(self):
x_shape = [1, 15, 20, 2]
x_new_size = [30, 40]
x_val = np.arange(1, 1 + np.prod(x_shape)/10, 0.1).astype("float32").reshape(x_shape)
def func(x):
x_new_size_ = tf.constant(x_new_size)
x_ = resize_nearest_neighbor(x, x_new_size_)
return tf.identity(x_, name=_TFOUTPUT)
_ = self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(9, "resize_nearest_neighbor")
def test_resize_nearest_neighbor_with_non_const(self):
x_shape = [3, 10, 8, 5]
x_val = np.arange(1, 1 + np.prod(x_shape)/10, 0.1, dtype=np.float32).reshape(x_shape)
x_new_size = np.array([20, 16]).astype(np.int32)
def func(x, x_new_size_):
x_ = resize_nearest_neighbor(x, x_new_size_)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: x_new_size})
@skip_caffe2_backend()
@check_opset_min_version(7, "resize_bilinear")
def test_resize_bilinear(self):
x_shape = [1, 15, 20, 2]
x_new_size = [30, 40]
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("float32").reshape(x_shape)
def func(x):
x_new_size_ = tf.constant(x_new_size)
x_ = resize_bilinear(x, x_new_size_)
return tf.identity(x_, name=_TFOUTPUT)
_ = self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend()
@check_tf_min_version("1.14")
@check_opset_min_version(11, "coordinate_transformation_mode attr of resize_bilinear")
def test_resize_bilinear_align_coreners(self):
x_shape = [1, 15, 20, 2]
x_new_size = [30, 40]
x_val = np.arange(1, 1 + np.prod(x_shape)/10, 0.1).astype("float32").reshape(x_shape)
def func(x):
x_new_size_ = tf.constant(x_new_size)
x_ = resize_bilinear(x, x_new_size_, align_corners=True)
return tf.identity(x_, name=_TFOUTPUT)
_ = self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend()
@check_tf_min_version("1.14")
@check_opset_min_version(11, "coordinate_transformation_mode attr")
def test_resize_bilinear_half_pixel_centers(self):
x_shape = [1, 15, 20, 2]
x_new_size = [30, 40]
x_val = np.arange(1, 1 + np.prod(x_shape)/10, 0.1).astype("float32").reshape(x_shape)
def func(x):
x_new_size_ = tf.constant(x_new_size)
x_ = resize_bilinear(x, x_new_size_, half_pixel_centers=True)
return tf.identity(x_, name=_TFOUTPUT)
_ = self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(9, "resize_bilinear")
def test_resize_bilinear_with_non_const(self):
x_shape = [3, 10, 8, 5]
x_val = np.arange(1, 1 + np.prod(x_shape)/10, 0.1, dtype=np.float32).reshape(x_shape)
x_new_size = np.array([20, 16]).astype(np.int32)
def func(x, x_new_size_):
x_ = resize_bilinear(x, x_new_size_)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: x_new_size})
@check_opset_min_version(10, "resize scale can less than 1")
def test_resize_bilinear_with_non_const2(self):
# scales has an element larger than 1 and also has an element less that 1
x_shape = [3, 100, 8, 5]
x_val = np.arange(1, 1 + np.prod(x_shape)/10, 0.1, dtype=np.float32).reshape(x_shape)
x_new_size = np.array([20, 16]).astype(np.int32)
def func(x, x_new_size_):
x_ = resize_bilinear(x, x_new_size_)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: x_new_size})
@check_tf_min_version("1.14")
@check_opset_min_version(11, "resize_bilinear_v2")
def test_resize_bilinear_v2_with_non_const(self):
x_shape = [3, 10, 8, 5]
x_val = np.arange(1, 1 + np.prod(x_shape)/10, 0.1, dtype=np.float32).reshape(x_shape)
x_new_size = np.array([20, 16]).astype(np.int32)
def func(x, x_new_size_):
x_ = resize_bilinear_v2(x, x_new_size_)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: x_new_size})
def test_adjust_contrast(self):
def func(x, y):
x_ = tf.image.adjust_contrast(x, y)
return tf.identity(x_, name=_TFOUTPUT)
for x_shape in [[4, 3, 2], [2, 3, 4, 5], [3, 4, 2, 4, 3]]:
x_val = np.arange(1, 1 + np.prod(x_shape), dtype=np.float32).reshape(x_shape)
y_val = np.array(2.1, np.float32)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(11, "GatherElements")
def test_adjust_saturation(self):
x_val = np.array([[1, 2, 3], [4, 4, 4], [3, 2, 3], [3, 2, 2]], dtype=np.float32).reshape([2, 2, 3])
y_val = np.array(2.1, np.float32)
def func(x, y):
x_ = tf.image.adjust_saturation(x, y)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
y_val = np.array(0.5, np.float32)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(11, "GatherND")
def test_adjust_hue(self):
x_val = np.array([[1, 2, 3], [4, 4, 4], [10, 2, 1], [10, 1, 2],
[4, 6, 5], [5, 6, 4], [1, 3, 2], [3, 5, 3]], dtype=np.float32).reshape([2, 4, 3])
def func(x, y):
x_ = tf.image.adjust_hue(x, y)
return tf.identity(x_, name=_TFOUTPUT)
for i in range(-10, 10, 2):
y_val = np.array(i / 10, np.float32)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val}, rtol=1e-6, atol=2e-5)
# https://github.com/microsoft/onnxruntime/issues/12302
@skip_onnxruntime_backend("resize op can't work well under Cubic mode with ORT 1.12")
@check_tf_min_version("2.0", "Results are slightly different in tf1")
@check_opset_min_version(11, "resize bicubic")
def test_resize_bicubic(self):
x_shape = [1, 15, 20, 2]
new_size_val = np.array([30, 40], dtype=np.int32)
x_val = np.arange(1, 1 + np.prod(x_shape)/10, 0.1).astype("float32").reshape(x_shape)
def func(x, new_size):
y = tf.image.resize(x, new_size, method=tf.image.ResizeMethod.BICUBIC)
return tf.identity(y, name=_TFOUTPUT)
_ = self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: new_size_val}, rtol=1e-6, atol=1e-5)
@check_opset_min_version(10, "resize scale can less than 1")
def test_resize_nearest_neighbor2(self):
x_shape = [1, 300, 20, 2]
x_new_size = [30, 40]
x_val = np.arange(1, 1 + np.prod(x_shape)/10, 0.1).astype("float32").reshape(x_shape)
def func(x):
x_new_size_ = tf.constant(x_new_size)
x_ = resize_nearest_neighbor(x, x_new_size_)
return tf.identity(x_, name=_TFOUTPUT)
_ = self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_tf_min_version("1.14")
@check_opset_min_version(11, "coordinate_transformation_mode attr")
def test_resize_nearest_neighbor_half_pixel_centers(self):
x_shape = [1, 10, 20, 2]
x_new_size = [20, 40]
x_val = np.arange(1, 1 + np.prod(x_shape)/10, 0.1).astype("float32").reshape(x_shape)
def func(x):
x_new_size_ = tf.constant(x_new_size)
x_ = resize_nearest_neighbor(x, x_new_size_, half_pixel_centers=True)
return tf.identity(x_, name=_TFOUTPUT)
_ = self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_tf_min_version("1.14")
@check_opset_min_version(11, "coordinate_transformation_mode and nearest_mode attr")
def test_resize_nearest_neighbor_align_corners(self):
x_shape = [1, 10, 20, 2]
x_new_size = [20, 40]
x_val = np.arange(1, 1 + np.prod(x_shape)/10, 0.1).astype("float32").reshape(x_shape)
def func(x):
x_new_size_ = tf.constant(x_new_size)
x_ = resize_nearest_neighbor(x, x_new_size_, align_corners=True)
return tf.identity(x_, name=_TFOUTPUT)
_ = self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(9, "fill")
def test_fill_float32(self):
x_shape = [1, 15, 20, 2]
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("float32").reshape(x_shape)
def func(x0):
x1 = tf.fill(x_val.shape, 9.0)
x2 = tf.add(x0, x1)
return tf.identity(x2, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(9, "fill")
def test_fill_int32(self):
x_shape = [1, 15, 20, 2]
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("int32").reshape(x_shape)
def func(x0):
x1 = tf.fill(x_val.shape, 9)
x2 = tf.add(x0, x1)
return tf.identity(x2, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(7, "fill")
def test_fill7_float32(self):
x_shape = [1, 15, 20, 2]
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("float32").reshape(x_shape)
def func(x0):
x1 = tf.fill(x_val.shape, 9.0)
x2 = tf.add(x0, x1)
return tf.identity(x2, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(7, "fill")
def test_fill7_int32(self):
x_shape = [1, 15, 20, 2]
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("int32").reshape(x_shape)
def func(x0):
x1 = tf.fill(x_val.shape, 9)
x2 = tf.add(x0, x1)
return tf.identity(x2, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(7, "div")
def test_tf_div(self):
# pylint: disable=E0001,C0415
from tensorflow.python.ops.gen_math_ops import div
shape = 1000
# test floating data
x_val = (np.random.sample(shape) + 1e-6).astype(np.float32)
y_val = (np.random.sample(shape) + 1e-6).astype(np.float32)
def func(x, y):
output = div(x, y, name=_TFOUTPUT)
# assert output.op.type == "Div"
return output
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
# test integer data
x_val = (100 * np.random.sample(shape) + 1).astype(np.int32)
y_val = (100 * np.random.sample(shape) + 1).astype(np.int32)
def func(x, y):
output = div(x, y, name=_TFOUTPUT)
# assert output.op.type == "Div"
return output
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(7, "erf")
def test_erf(self):
x_shape = [2, 2]
x_val0 = np.random.random(np.prod(x_shape)).astype(np.float32).reshape(x_shape)
x_val1 = np.array([[-1, -0.5], [1, 0.5]]).astype(np.float32)
for x_val in [x_val0, x_val1]:
def func(x):
x_ = tf.math.erf(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=0.01)
@check_opset_min_version(8, "Scan")
@skip_opset(9, "ReverseSequence")
def test_reverse_sequence_batch_major(self):
x_val = np.array([[[1, 2, 3], [4, 5, 6], [0, 0, 0]],
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
[[1, 2, 3], [0, 0, 0], [0, 0, 0]]],
dtype=np.float32)
def func(x):
x_ = tf.reverse_sequence(x, seq_axis=1, batch_axis=0, seq_lengths=[2, 3, 1])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
x_val = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3],
[4, 5, 6], [4, 5, 6], [1, 1, 1],
[0, 0, 0], [7, 8, 9], [0, 0, 0]
],
dtype=np.float32)
def func(x):
x_ = tf.reverse_sequence(x, seq_axis=1, batch_axis=0, seq_lengths=[3] * 9)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
x_val_shape = [5, 5, 7, 8, 9]
x_val = np.random.randint(0, 100, x_val_shape).astype(np.float32)
def func(x):
x_ = tf.reverse_sequence(x, seq_axis=1, batch_axis=0, seq_lengths=[5, 5, 5, 5, 5])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(8, "Scan")
@skip_opset(9, "ReverseSequence")
def test_reverse_sequence_time_major(self):
x_val = np.array([[[1, 2, 3], [1, 2, 3], [1, 2, 3]],
[[4, 5, 6], [4, 5, 6], [0, 0, 0]],
[[0, 0, 0], [7, 8, 9], [0, 0, 0]]],
dtype=np.float32)
def func(x):
x_ = tf.reverse_sequence(x, seq_axis=0, batch_axis=1, seq_lengths=[2, 3, 1])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
x_val = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3],
[4, 5, 6], [4, 5, 6], [1, 1, 1],
[0, 0, 0], [7, 8, 9], [0, 0, 0]],
dtype=np.float32)
def func(x):
x_ = tf.reverse_sequence(x, seq_axis=0, batch_axis=1, seq_lengths=[9, 9, 9])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
x_val_shape = [5, 5, 7, 8, 9]
x_val = np.random.randint(0, 100, x_val_shape).astype(np.float32)
def func(x):
x_ = tf.reverse_sequence(x, seq_axis=0, batch_axis=1, seq_lengths=[5, 5, 5, 5, 5])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_tflite("tflite interpreter crashes on empty axis")
@check_opset_min_version(10, "ReverseSequence")
def test_reversev2_constant_axis(self):
# Tests for constant axis.
x_val_shape = [1, 2, 3, 4]
x_val = np.random.randint(0, 100, x_val_shape).astype(np.float32)
def func(x):
x_ = reverse_v2(x, axis=[3])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
# Empty axis vector.
x_val_shape = [2, 3, 4]
x_val = np.random.randint(0, 100, x_val_shape).astype(np.float32)
def func(x):
x_ = reverse_v2(x, axis=[])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_tflite("tflite reverse_v2 does not support multiple axes")
@check_opset_min_version(10, "ReverseSequence")
def test_reversev2_vector_axis(self):
x_val_shape = [1, 2, 3, 4]
x_val = np.random.randint(0, 100, x_val_shape).astype(np.float32)
def func(x):
x_ = reverse_v2(x, axis=[0, -3, 2, 3])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
x_val_shape = [2, 3, 4]
x_val = np.random.randint(0, 100, x_val_shape).astype(np.float32)
def func(x):
x_ = reverse_v2(x, axis=[-3, 1, 2])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
x_val_shape = [5, 5, 9, 7, 8, 9]
x_val = np.random.randint(0, 100, x_val_shape).astype(np.float32)
def func(x):
x_ = reverse_v2(x, axis=[0, 1, -2, 3, 5])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_tflite("tflite interpreter crashes on empty axis")
@check_opset_min_version(10, "ReverseSequence")
def test_reversev2_1D_tensor(self):
# For tensors with 1 dimension and no axis to reverse.
# Adds an identity block.
x_val_shape = [4]
x_val = np.random.randint(0, 100, x_val_shape).astype(np.float32)
def func(x):
x_ = reverse_v2(x, axis=[0])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def func(x):
x_ = reverse_v2(x, axis=[])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(7, "GreaterEqual")
def test_where(self):
x_val = np.array([1, 2, -3, 4, -5, -6, -7, 8, 9, 0], dtype=np.float32)
true_result = np.array([111, 222, 333, 444, 555, 666, 777, 888, 999, 1000],
dtype=np.float32)
false_result = np.array([-111, -222, -333, -444, -555, -666, -777, -888, -999, -1000],
dtype=np.float32)
def func(x):
picks = tf.where(x > -1, true_result, false_result)
return tf.identity(picks, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
x_val = np.array(1, dtype=np.float32)
true_result = np.array(100, dtype=np.float32)
false_result = np.array(-111, dtype=np.float32)
def func(x):
picks = tf.where(x > -1, true_result, false_result)
return tf.identity(picks, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(9, "IsNaN")
def test_where_isnan(self):
x_val = np.array([1, 2, -3, float('nan'), -5, -6, float('nan'), 8, 9, 0], dtype=np.float32)
true_result = np.array([111, 222, 333, 444, 555, 666, 777, 888, 999, 1000],
dtype=np.float32)
def func(x):
picks = tf.where(is_nan(x), true_result, x)
return tf.identity(picks, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(9, "IsNaN")
def test_where_ismulinf(self):
x_val1 = np.array([np.inf], dtype=np.float32)
x_val2 = np.array([0], dtype=np.float32)
true_result = np.array([np.inf], dtype=np.float32)
def func(x1, x2):
mul = tf.multiply(x1, x2)
picks = tf.where(x1 < mul, true_result, x2)
return tf.identity(picks, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2})
@check_opset_min_version(9, "Where for strings needs opset 9")
@skip_tfjs("Technically tf where doesn't support strings and tfjs doesn't like it")
def test_where_string(self):
x_val = np.array([1, 2, -3, 4, -5, -6, -7, 8, 9, 0], dtype=np.float32)
true_result = np.array([111, 222, 333, 444, 555, 666, 777, 888, 999, 1000],
dtype=str)
false_result = np.array([-111, -222, -333, -444, -555, -666, -777, -888, -999, -1000],
dtype=str)
def func(x):
picks = tf.where(x > -1, true_result, false_result)
return tf.identity(picks, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(7, "GreaterEqual")
def test_where_bool(self):
x_val = np.array([1, 2, -3, 4, -5], dtype=np.float32)
true_result = np.array([True, False, True, False, True],
dtype=bool)
false_result = np.array([False, True, False, True, True],
dtype=bool)
def func(x):
picks = tf.where(x > -1, true_result, false_result)
return tf.identity(picks, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(7, "GreaterEqual")
#@check_target("rs6", "onnxruntime Where type limitation")
def test_where_int32(self):
x_val = np.array([1, 2, -3, 4, -5, -6, -7, 8, 9, 0], dtype=np.int32)
true_result = np.array([111, 222, 333, 444, 555, 666, 777, 888, 999, 1000],
dtype=np.int32)
false_result = np.array([-111, -222, -333, -444, -555, -666, -777, -888, -999, -1000],
dtype=np.int32)
def func(x):
picks = tf.where(tf.greater_equal(x, 0), true_result, false_result)
return tf.identity(picks, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(7, "GreaterEqual")
@check_tf_max_version("1.15", "issues in tf-2.0, fix later")
def test_where_with_two_rank_input(self):
x_val = np.array([1, 2, -3, 4, -5, -6, -7, 8, 9, 0], dtype=np.float32)
true_result = np.array([[111, 111], [222, 222], [333, 333], [444, 444], [555, 555],
[666, 666], [777, 777], [888, 888], [999, 999], [1000, 1000]],
dtype=np.float32)
false_result = np.array([[-111, -111], [-222, -222], [-333, -333], [-444, -444], [-555, -555],
[-666, -666], [-777, -777], [-888, -888], [-999, -999], [-1000, -1000]],
dtype=np.float32)
def func(x):
cond = tf.greater_equal(x, 0)
picks = tf.where(cond, true_result, false_result)
return tf.identity(picks, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(7, "GreaterEqual")
def test_where_with_two_rank_condition(self):
x_val = np.array([[1, 2, -3, 4, -5, -6, -7, 8, 9, 0]], dtype=np.float32)
true_result = np.array([[111, 222, 333, 444, 555, 666, 777, 888, 999, 1000]],
dtype=np.float32)
false_result = np.array([[-111, -222, -333, -444, -555, -666, -777, -888, -999, -1000]],
dtype=np.float32)
def func(x):
picks = tf.where(tf.greater_equal(x, 0), true_result, false_result)
return tf.identity(picks, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(7, "GreaterEqual")
def test_where_with_three_rank_condition(self):
x_val = np.array([[[1, 2, -3, 4, -5, -6, -7, 8, 9, 0]]], dtype=np.float32)
true_result = np.array([[[111, 222, 333, 444, 555, 666, 777, 888, 999, 1000]]],
dtype=np.float32)
false_result = np.array([[[-111, -222, -333, -444, -555, -666, -777, -888, -999, -1000]]],
dtype=np.float32)
def func(x):
picks = tf.where(tf.greater_equal(x, 0), true_result, false_result)
return tf.identity(picks, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(7, "GreaterEqual")
def test_where_scalar(self):
x_val = np.array(6, dtype=np.float32)
true_result = np.array([111, 222, 333, 444, 555, 666, 777, 888, 999, 1000],
dtype=np.float32)
false_result = np.array([-111, -222, -333, -444, -555, -666, -777, -888, -999, -1000],
dtype=np.float32)
def func(x):
picks = tf.where(tf.greater_equal(x, 0), true_result, false_result)
return tf.identity(picks, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(9, "NonZero")
#@check_target("rs6", "onnxruntime Transpose type limitation")
def test_where_with_cond_only(self):
for np_type in [np.int32, np.float32]:
x_val = np.random.randint(0, 2, size=[10, 20, 30]).astype(np_type)
def func(x):
# FIXME: was tf_placeholder(tf_type, shape=[None] * x_val.ndim, name=_TFINPUT)
res = tf.where(x)
return tf.identity(res, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_tf_min_version("1.14", "tf.strings.lower")
@check_opset_min_version(10, "StringNormalizer")
def test_string_lower(self):
text_val1 = np.array([["a", "Test 1 2 3", "♠♣"], ["Hi there", "test test", "♥♦"]], dtype=str)
def func(text1):
x = tf.strings.lower(text1)
x_ = tf.identity(x, name=_TFOUTPUT)
return x_
self._run_test_case(func, [_OUTPUT], {_INPUT: text_val1})
@check_tf_min_version("1.14", "tf.strings.lower")
@check_opset_min_version(10, "StringNormalizer")
def test_string_lower_flat(self):
text_val1 = np.array(["a", "Test 1 2 3", "♠♣", "Hi there", "test test", "♥♦"], dtype=str)
def func(text1):
x = tf.strings.lower(text1)
x_ = tf.identity(x, name=_TFOUTPUT)
return x_
self._run_test_case(func, [_OUTPUT], {_INPUT: text_val1})
@check_tf_min_version("1.14", "tf.strings.lower")
@check_opset_min_version(10, "StringNormalizer")
def test_string_upper(self):
text_val1 = np.array([["a", "Test 1 2 3", "♠♣"], ["Hi there", "test test", "♥♦"]], dtype=str)
def func(text1):
x = tf.strings.upper(text1)
x_ = tf.identity(x, name=_TFOUTPUT)
return x_
self._run_test_case(func, [_OUTPUT], {_INPUT: text_val1})
@check_opset_min_version(6, "cast")
def test_shape_int32(self):
x_val = np.array([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]], dtype=np.float32)
def func(x):
x_ = tf.multiply(x, x)
x_ = tf.shape(x_, out_type=tf.int32)
return tf.identity(x_, name=_TFOUTPUT)
kwargs = {"check_dtype": True}
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, **kwargs)
@unittest.skipIf(get_test_config().is_onnxruntime_backend and get_test_config().opset < 7,
"mul-1, mul-6 not supported in onnxruntime. conversion is covered since opset6")
def test_shape_int64(self):
x_val = np.array([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]], dtype=np.float32)
def func(x):
x_ = tf.multiply(x, x)
x_ = tf.shape(x_, out_type=tf.int64)
return tf.identity(x_, name=_TFOUTPUT)
kwargs = {"check_dtype": True}
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, **kwargs)
# @check_opset_min_version(7, "broadcasting op")
@unittest.skip("disable it for now, since fold const has bug")
def test_softmax_cross_entropy_with_logits(self):
num_class = 5
data_shape = [100, num_class]
for np_dtype in [np.int32, np.int64]:
label_val = np.random.randint(0, num_class - 1, data_shape).astype(np_dtype)
logits_val = np.random.random(data_shape).astype(np.float32)
def func(label, logits):
res1 = tf.nn.softmax_cross_entropy_with_logits_v2(labels=label, logits=logits)
return tf.identity(res1, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: label_val, _INPUT1: logits_val}, atol=1e-5)
@check_opset_min_version(9, "sparse_softmax_cross_entropy_with_logits")
def test_sparse_softmax_cross_entropy_with_logits(self):
# FIXME: fails for opset 8 on onnxruntime-1.0, disable for now
num_class = 5
label_val = np.array([3, 2, 0, 4]).astype(np.int32)
logits_val = np.random.random((len(label_val), num_class)).astype(np.float32)
def func(label, logits):
res1 = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label, logits=logits)
return tf.identity(res1, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: label_val, _INPUT1: logits_val})
@check_target('rs6', 'SparseSoftmaxCrossEntropyWithLogits')
def test_sparse_softmax_cross_entropy_with_logits_large_class(self):
num_class = 30000
label_val = np.array([3374, 2127, 10002, 48]).astype(np.int32)
logits_val = np.random.random((len(label_val), num_class)).astype(np.float32)
def func(label, logits):
res = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label, logits=logits)
return tf.identity(res, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: label_val, _INPUT1: logits_val}, rtol=1e-6)
def test_matrix_band_part(self):
input_val = np.random.randint(0, 666, (10, 15)).astype(np.int32)
def func(input_x):
res = tf.linalg.band_part(input_x, -1, 0)
res1 = tf.linalg.band_part(input_x, 0, -1)
return tf.identity(res, name=_TFOUTPUT), tf.identity(res1, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: input_val})
def test_matrix_band_part_2(self):
input_val = np.random.randint(0, 666, (1, 1)).astype(np.int32)
def func(input_x):
res = tf.linalg.band_part(input_x, -1, 0)
res1 = tf.linalg.band_part(input_x, 0, -1)
return tf.identity(res, name=_TFOUTPUT), tf.identity(res1, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: input_val})
@check_opset_min_version(11, "CumSum")
def test_matrix_band_part_3(self):
for low, high in [(-1, 3), (2, 3), (4, 3), (0, -1), (0, 0), (-1, -1)]:
input_val = np.random.randint(0, 666, (10, 15)).astype(np.int32)
def func(input_x):
res = tf.linalg.band_part(input_x, low, high)
return tf.identity(res, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_val})
@check_opset_min_version(11, "CumSum")
def test_matrix_band_part_4(self):
for low, high in [(-1, 3), (2, 3), (4, 3), (0, -1), (0, 0)]:
input_val = np.random.randint(0, 666, (2, 3, 10, 15)).astype(np.int32)
def func(input_x):
res = tf.linalg.band_part(input_x, low, high)
return tf.identity(res, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_val})
@check_opset_min_version(11, "CumSum")
def test_matrix_band_part_5(self):
for low_val, high_val in [(2, 3), (4, 3), (0, 0), (2, 0)]:
low_val = np.array(low_val, np.int32)
high_val = np.array(high_val, np.int32)
input_val = np.random.randint(0, 666, (2, 3, 10, 15)).astype(np.int32)
def func(input_x, low, high):
res = tf.linalg.band_part(input_x, low, high)
return tf.identity(res, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_val, _INPUT1: low_val, _INPUT2: high_val})
def test_floordiv(self):
input_val_1 = np.random.random_sample(100).astype(np.int32)
input_val_2 = (np.random.random_sample(100) + 1).astype(np.int32)
def func(input_1, input_2):
res = tf.math.floordiv(input_1, input_2)
return tf.identity(res, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_val_1, _INPUT1: input_val_2})
input_val_1 = np.random.random_sample(100).astype(np.float32)
input_val_2 = (np.random.random_sample(100) + 1).astype(np.float32)
def func(input_1, input_2):
res = tf.math.floordiv(input_1, input_2)
return tf.identity(res, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_val_1, _INPUT1: input_val_2})
# test broadcasting
input_val_1 = np.random.random_sample((10, 50)).astype(np.float32)
input_val_2 = (np.random.random_sample(50) + 1).astype(np.float32)
def func(input_1, input_2):
res = tf.math.floordiv(input_1, input_2)
return tf.identity(res, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_val_1, _INPUT1: input_val_2})
def test_floormod(self):
input_val_1 = 100 * np.random.random_sample(100).astype(np.int32)
input_val_2 = (100 * np.random.random_sample(100) + 1).astype(np.int32)
def func(input_1, input_2):
res = floormod(input_1, input_2)
return tf.identity(res, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_val_1, _INPUT1: input_val_2})
input_val_1 = 100 * np.random.random_sample(100).astype(np.float32)
input_val_2 = (100 * np.random.random_sample(100) + 1).astype(np.float32)
def func(input_1, input_2):
res = floormod(input_1, input_2)
return tf.identity(res, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_val_1, _INPUT1: input_val_2}, rtol=1e-5)
# test broadcasting case
input_val_1 = (50 * np.random.random_sample((10, 50)) + 1).astype(np.float32)
input_val_2 = (50 * np.random.random_sample(50) + 1).astype(np.float32)
def func(input_1, input_2):
res = floormod(input_1, input_2)
return tf.identity(res, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_val_1, _INPUT1: input_val_2}, rtol=1e-4)
def test_logical_not(self):
input_val = np.random.randint(0, 2, (10, 20)).astype(bool)
def func(x):
res = tf.logical_not(x)
return tf.identity(res, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_val})
def test_reduce_all(self):
input_val = np.random.randint(0, 2, (2, 20)).astype(bool)
def func(x):
res = tf.reduce_all(input_tensor=x, keepdims=False)
res1 = tf.reduce_all(input_tensor=x, axis=[0], keepdims=False)
return tf.identity(res, name=_TFOUTPUT), tf.identity(res1, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: input_val})
input_val = np.random.randint(0, 2, (2, 20)).astype(bool)
def func(input_x):
res = tf.reduce_all(input_tensor=input_x, keepdims=True)
res1 = tf.reduce_all(input_tensor=input_x, axis=[0], keepdims=True)
return tf.identity(res, name=_TFOUTPUT), tf.identity(res1, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: input_val})
def test_reduce_any(self):
input_val = np.random.randint(0, 2, (2, 20)).astype(bool)
def func(x):
res = tf.reduce_any(input_tensor=x, keepdims=False)
res1 = tf.reduce_any(input_tensor=x, axis=[0], keepdims=False)
return tf.identity(res, name=_TFOUTPUT), tf.identity(res1, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: input_val})
input_val = np.random.randint(0, 2, (2, 20)).astype(bool)
def func(x):
res = tf.reduce_any(input_tensor=x, keepdims=True)
res1 = tf.reduce_any(input_tensor=x, axis=[0], keepdims=True)
return tf.identity(res, name=_TFOUTPUT), tf.identity(res1, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: input_val})
@check_opset_min_version(11, "ReduceMin")
def test_reduce_all_negative_axis(self):
input_val = np.random.randint(0, 2, (2, 20)).astype(bool)
def func(x):
res = tf.reduce_all(input_tensor=x, keepdims=False)
res1 = tf.reduce_all(input_tensor=x, axis=[-1], keepdims=False)
return tf.identity(res, name=_TFOUTPUT), tf.identity(res1, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: input_val})
input_val = np.random.randint(0, 2, (2, 20)).astype(bool)
def func(input_x):
res = tf.reduce_all(input_tensor=input_x, keepdims=True)
res1 = tf.reduce_all(input_tensor=input_x, axis=[-1], keepdims=True)
return tf.identity(res, name=_TFOUTPUT), tf.identity(res1, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: input_val})
@check_opset_min_version(11, "ReduceSum")
def test_reduce_any_negative_axis(self):
input_val = np.random.randint(0, 2, (2, 20)).astype(bool)
def func(x):
res = tf.reduce_any(input_tensor=x, keepdims=False)
res1 = tf.reduce_any(input_tensor=x, axis=[-1], keepdims=False)
return tf.identity(res, name=_TFOUTPUT), tf.identity(res1, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: input_val})
input_val = np.random.randint(0, 2, (2, 20)).astype(bool)
def func(x):
res = tf.reduce_any(input_tensor=x, keepdims=True)
res1 = tf.reduce_any(input_tensor=x, axis=[-1], keepdims=True)
return tf.identity(res, name=_TFOUTPUT), tf.identity(res1, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: input_val})
@check_opset_min_version(11, "ReduceSum")
@check_tf_min_version("1.15")
def test_reduce_any_empty_axis(self):
input_val = np.random.randint(0, 2, (2, 20)).astype(bool)
def func(x):
res = tf.reduce_any(input_tensor=x, keepdims=False)
res1 = tf.reduce_any(input_tensor=x, axis=[], keepdims=False)
return tf.identity(res, name=_TFOUTPUT), tf.identity(res1, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: input_val})
def test_reduce_all_scalar_axis(self):
input_val = np.random.randint(0, 2, (2, 20)).astype(bool)
def func(x):
res = tf.reduce_all(input_tensor=x, keepdims=False)
res1 = tf.reduce_all(input_tensor=x, axis=0, keepdims=False)
return tf.identity(res, name=_TFOUTPUT), tf.identity(res1, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: input_val})
@check_opset_min_version(13, "ReduceSum")
@check_tf_min_version("1.15")
def test_reduce_any_nonconst_axis(self):
input_val = np.random.randint(0, 2, (2, 20)).astype(bool)
y_val = np.array([1], np.int32)
def func(x, y):
res = tf.reduce_any(input_tensor=x, axis=y, keepdims=False)
return tf.identity(res, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_val, _INPUT1: y_val})
@check_opset_min_version(7, "fill")
def test_zeros_like(self):
input_x = np.random.random_sample([10, 20]).astype(np.float32)
input_y = np.array([20, 10]).astype(np.int64)
def func(x, y):
z = tf.reshape(x, y)
return tf.zeros_like(z, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_x, _INPUT1: input_y})
self._run_test_case(func, [_OUTPUT], {_INPUT: input_x.astype(np.int32), _INPUT1: input_y})
@check_opset_min_version(8, "BroadcastTo")
def test_zeros_like_bool(self):
input_x = np.random.random_sample([10, 20]).astype(np.float32)
input_y = np.array([20, 10]).astype(np.int64)
def func(x, y):
z = tf.reshape(x, y)
return tf.zeros_like(z, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_x > 0.5, _INPUT1: input_y})
@check_opset_min_version(9, "ConstantOfShape")
def test_zeros_like_opset9(self):
input_x = np.random.random_sample([3, 16, 16]).astype(np.float32)
input_y = np.array([16, 16, 3]).astype(np.int64)
def func(x, y):
z = tf.reshape(x, y)
return tf.zeros_like(z, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_x, _INPUT1: input_y})
self._run_test_case(func, [_OUTPUT], {_INPUT: input_x.astype(np.int32), _INPUT1: input_y}, as_session=True,
graph_validator=lambda g: check_op_count(g, "ConstantOfShape", 1, disabled=False))
@check_opset_min_version(9, "is_nan")
def test_isnan(self):
# only compatible with dtype `float32`
x_val1 = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
x_val2 = np.array([np.nan, np.nan, np.nan, np.nan], dtype=np.float32).reshape((2, 2))
x_val3 = np.array([1.0, np.nan, -3.0, np.nan], dtype=np.float32).reshape((2, 2))
for x_val in [x_val1, x_val2, x_val3]:
def func(x):
x_ = is_nan(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_ceil(self):
x_val = np.array([-1.5, 1.2], dtype=np.float32)
def func(x):
x_ = tf.math.ceil(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_softplus(self):
x_val = np.array([-1, 0, 1], dtype=np.float32)
def func(x):
x_ = tf.math.softplus(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_softsign(self):
x_val = np.array([-1, 0, 1], dtype=np.float32)
def func(x):
x_ = tf.math.softsign(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_batch_to_spacend(self):
block_size = [2, 2]
crop = [[1, 0], [2, 1]]
input_val = np.random.random_sample([40, 3, 5, 100]).astype(np.float32)
def func(x):
return batch_to_space_nd(x, block_size, crop, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_val})
@check_opset_min_version(11, "BatchToSpaceND")
@unittest.skip("this was recently removed - but don't we want this to work ?")
def test_batch_to_spacend_non_const(self):
def func(input_x, block_shape, crops):
return batch_to_space_nd(input_x, block_shape, crops, name=_TFOUTPUT)
input_x_val = np.random.random_sample([40, 3, 5, 100]).astype(np.float32) # NHWC
block_shape_val = np.array([2, 2]).astype(np.int64)
crops_val = np.array([[1, 0], [2, 1]]).astype(np.int64)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_x_val, _INPUT1: block_shape_val, _INPUT2: crops_val})
@check_opset_min_version(11, "SpaceToBatchND")
@unittest.skip("this was recently removed - but don't we want this to work ?")
def test_space_to_batchnd_non_const(self):
input_x_val = np.random.random_sample([40, 5, 7, 66]).astype(np.float32) # NHWC
def func(input_x, block_size, pad):
return batch_to_space_nd(input_x, block_size, pad, name=_TFOUTPUT)
block_size_val = np.array([2, 2]).astype(np.int64)
pad_val = np.array([[0, 1], [2, 1]]).astype(np.int64)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_x_val, _INPUT1: block_size_val, _INPUT2: pad_val})
@check_opset_min_version(11, "BatchToSpaceND")
def test_batch_to_spacend_non_const_7d(self):
x_type, y_type, z_type = np.float32, np.int64, np.int64
# test 3D upto 7D input tensors
for x_shape in [[12, 4, 4], [12, 4, 8, 3], [12, 4, 8, 3, 2], [12, 4, 8, 3, 2, 3], [12, 4, 8, 3, 2, 1, 3]]:
# test 1D upto 2D block shapes
for block_shape in [[2, 3], [2, 2], [2]]:
# crop 1 layer at end of each dim
# x and z can be dynamic.
# y = block_shape cannot be dynamic without change to Transpose op spec
crops = [[0, 1] for dim in block_shape]
y_val = np.array(block_shape).astype(y_type)
x_val = np.array([x + 1 for x in range(0, np.prod(x_shape))], dtype=x_type).reshape(x_shape)
z_val = np.array(crops).astype(z_type)
def func(x, z):
y = tf.constant(dtype=y_type, value=y_val, shape=y_val.shape, name=_TFINPUT1)
return batch_to_space_nd(x, y, z, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT2: z_val})
def test_depthwise_dilations_pattern(self):
x_val = np.random.random_sample([1, 33, 34, 960]).astype(np.float32)
kernel = np.random.random_sample([3, 3, 960, 1]).astype(np.float32)
block_size = np.array([3, 3], np.int64)
pad = np.array([[2, 4], [5, 3]])
crop = np.array([[0, 0], [0, 0]])
def func(x):
y = space_to_batch_nd(x, block_size, pad)
z = tf.nn.depthwise_conv2d(y, kernel, strides=[1, 1, 1, 1], padding='VALID')
return batch_to_space_nd(z, block_size, crop, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(11, "SpaceToBatchND")
def test_space_to_batchnd_non_const_7d(self):
x_type, y_type, z_type = np.float32, np.int64, np.int64
# test 3D upto 7D input tensors
for x_shape in [[2, 4, 4], [1, 4, 8, 3], [1, 4, 8, 3, 2], [1, 4, 8, 3, 2, 3], [1, 4, 8, 3, 2, 1, 3]]:
# test 1D upto 2D block shapes
for block_shape in [[2], [2, 2]]:
# pad 1 layer at begin and end of each dim
pads = [[1, 1] for dim in block_shape]
y_val = np.array(block_shape).astype(y_type)
x_val = np.array([x + 1 for x in range(0, np.prod(x_shape))], dtype=x_type).reshape(x_shape)
z_val = np.array(pads).astype(z_type)
# x and z can be dynamic.
# y = block_shape cannot be dynamic without change to Transpose op spec
def func(x, z):
y = tf.constant(dtype=y_type, value=y_val, shape=y_val.shape, name=_TFINPUT1)
return space_to_batch_nd(x, y, z, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT2: z_val})
@check_opset_min_version(10, "CropAndResize")
def test_crop_and_resize(self):
boxes_val = [[0.5, 0.7, 0.7, 0.9], [0.2, 0.4, 0.4, 0.6]]
def func(input_x, box_ind):
boxes = tf.constant(boxes_val, dtype=tf.float32)
corp_size = tf.constant(np.array([20, 20]).astype(np.int32))
return tf.image.crop_and_resize(input_x, boxes, box_ind, corp_size, name=_TFOUTPUT, method='bilinear')
input_x_val = np.random.randint(low=0, high=256, size=[2, 36, 36, 3]).astype(np.float32) # NHWC
box_ind_val = np.array([1, 0]).astype(np.int32)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_x_val, _INPUT2: box_ind_val},
rtol=1e-04, atol=1e-03)
@check_opset_min_version(11, "CropAndResize")
def test_crop_and_resize_linear(self):
def func(input_x, boxes, box_ind, corp_size):
return tf.image.crop_and_resize(input_x, boxes, box_ind, corp_size, name=_TFOUTPUT, method='bilinear')
input_x_val = np.random.randint(low=0, high=256, size=[2, 36, 36, 3]).astype(np.float32) # NHWC
boxes_val = np.array([[0.5, 0.7, 0.7, 0.9], [0.2, 0.4, 0.4, 0.6]]).astype(np.float32)
box_ind_val = np.array([1, 0]).astype(np.int32)
corp_size_val = np.array([20, 20]).astype(np.int32)
self._run_test_case(func, [_OUTPUT],
{_INPUT: input_x_val, _INPUT1: boxes_val, _INPUT2: box_ind_val, _INPUT3: corp_size_val},
rtol=1e-05, atol=1e-04)
@check_tf_min_version("1.9")
@check_opset_min_version(11, "CropAndResize")
def test_crop_and_resize_nearest(self):
def func(input_x, boxes, box_ind, corp_size):
return tf.image.crop_and_resize(input_x, boxes, box_ind, corp_size, name=_TFOUTPUT, method='nearest')
input_x_val = np.random.randint(low=0, high=256, size=[1, 36, 36, 3]).astype(np.float32) # NHWC
boxes_val = np.array([[0.2, 0.4, 0.6, 0.8]]).astype(np.float32)
box_ind_val = np.array([0]).astype(np.int32)
corp_size_val = np.array([30, 30]).astype(np.int32)
self._run_test_case(func, [_OUTPUT],
{_INPUT: input_x_val, _INPUT1: boxes_val, _INPUT2: box_ind_val, _INPUT3: corp_size_val},
rtol=1e-05, atol=1e-04)
@check_opset_min_version(11, "CropAndResize")
def test_crop_and_resize_extrapolation(self):
def func(input_x, boxes, box_ind, corp_size):
return tf.image.crop_and_resize(input_x, boxes, box_ind, corp_size, name=_TFOUTPUT, extrapolation_value=1.0)
input_x_val = np.random.randint(low=0, high=256, size=[1, 36, 36, 3]).astype(np.float32) # NHWC
boxes_val = np.array([[0.2, 0.4, 1.2, 1.4]]).astype(np.float32)
box_ind_val = np.array([0]).astype(np.int32)
corp_size_val = np.array([40, 40]).astype(np.int32)
self._run_test_case(func, [_OUTPUT],
{_INPUT: input_x_val, _INPUT1: boxes_val, _INPUT2: box_ind_val, _INPUT3: corp_size_val},
rtol=1e-04, atol=1e-03)
@check_opset_min_version(11, "CropAndResize")
def test_crop_and_resize_empty_tensor(self):
def func(input_x, boxes, box_ind, corp_size):
return tf.image.crop_and_resize(input_x, boxes, box_ind, corp_size, name=_TFOUTPUT, extrapolation_value=1.0)
input_x_val = np.random.randint(low=0, high=256, size=[0, 36, 36, 3]).astype(np.float32) # NHWC
boxes_val = np.array([]).astype(np.float32).reshape([0, 4])
box_ind_val = np.array([]).astype(np.int32)
corp_size_val = np.array([40, 40]).astype(np.int32)
self._run_test_case(func, [_OUTPUT],
{_INPUT: input_x_val, _INPUT1: boxes_val, _INPUT2: box_ind_val, _INPUT3: corp_size_val},
rtol=1e-04, atol=1e-03)
def test_batch_to_space3d(self):
block_size = [2, 2]
crop = [[0, 1], [2, 1]]
input_val = np.random.random_sample([40, 3, 100]).astype(np.float32)
def func(x):
return batch_to_space_nd(x, block_size, crop, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_val})
def test_space_to_batchnd(self):
block_size = [2, 2]
pad = [[0, 1], [2, 1]]
input_val = np.random.random_sample([40, 5, 7, 66]).astype(np.float32)
def func(x):
return space_to_batch_nd(x, block_size, pad, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_val})
pad = [[0, 0], [1, 2]]
input_val = np.random.random_sample([10, 6, 7, 66]).astype(np.float32)
def func(x):
return space_to_batch_nd(x, block_size, pad, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_val})
@check_opset_min_version(10, "is_inf")
def test_isinf(self):
x_types = [np.float32, np.float64]
for x_type in x_types:
x_val1 = np.array([1.0, -2.0, 3.0, -4.0], dtype=x_type)
x_val2 = np.array([np.inf, np.inf, np.inf, np.inf], dtype=x_type).reshape((2, 2))
x_val3 = np.array([1.0, np.inf, -3.0, np.inf, 5.0, np.inf, -7.0, np.inf], dtype=x_type).reshape((2, 2, 2))
for x_val in [x_val1, x_val2, x_val3]:
def func(x):
x_ = is_inf(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_tf_min_version("2.3")
@check_opset_min_version(10, "NonMaxSuppression")
def test_non_max_suppression_v2(self):
box_num = 10
boxes_val = np.random.random_sample([box_num, 4]).astype(np.float32)
scores_val = np.random.random_sample([box_num]).astype(np.float32)
def func(boxes, scores):
res1 = tf.raw_ops.NonMaxSuppressionV2(boxes=boxes, scores=scores,
max_output_size=int(box_num / 2), iou_threshold=0.5)
res2 = tf.raw_ops.NonMaxSuppressionV2(boxes=boxes, scores=scores,
max_output_size=0, iou_threshold=0.5)
return tf.identity(res1, name=_TFOUTPUT), tf.identity(res2, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: boxes_val, _INPUT1: scores_val})
@check_tf_min_version("2.3")
@check_opset_min_version(10, "NonMaxSuppression")
def test_non_max_suppression_v3(self):
box_num = 10
boxes_val = np.random.random_sample([box_num, 4]).astype(np.float32)
scores_val = np.random.random_sample([box_num]).astype(np.float32)
def func(boxes, scores):
res1 = tf.raw_ops.NonMaxSuppressionV3(boxes=boxes, scores=scores, score_threshold=0.1,
max_output_size=int(box_num / 2), iou_threshold=0.5)
res2 = tf.raw_ops.NonMaxSuppressionV3(boxes=boxes, scores=scores, score_threshold=0.1,
max_output_size=0, iou_threshold=0.5)
return tf.identity(res1, name=_TFOUTPUT), tf.identity(res2, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: boxes_val, _INPUT1: scores_val})
@check_tf_min_version("2.3")
@check_opset_min_version(10, "NonMaxSuppression")
@skip_tfjs("TFJS executes model incorrectly")
def test_non_max_suppression_v4(self):
box_num = 10
boxes_val = np.random.random_sample([box_num, 4]).astype(np.float32)
scores_val = np.random.random_sample([box_num]).astype(np.float32)
def func1(boxes, scores):
res1, res2 = tf.raw_ops.NonMaxSuppressionV4(boxes=boxes, scores=scores, score_threshold=0.1,
max_output_size=int(box_num / 2), iou_threshold=0.5)
return tf.identity(res1, name=_TFOUTPUT), tf.identity(res2, name=_TFOUTPUT1)
self._run_test_case(func1, [_OUTPUT, _OUTPUT1], {_INPUT: boxes_val, _INPUT1: scores_val})
def func2(boxes, scores):
res1, res2 = tf.raw_ops.NonMaxSuppressionV4(boxes=boxes, scores=scores, score_threshold=0.1,
max_output_size=2 * box_num, iou_threshold=0.5,
pad_to_max_output_size=True)
return tf.identity(res1, name=_TFOUTPUT), tf.identity(res2, name=_TFOUTPUT1)
self._run_test_case(func2, [_OUTPUT, _OUTPUT1], {_INPUT: boxes_val, _INPUT1: scores_val})
@check_tf_min_version("2.3")
@check_opset_min_version(10, "NonMaxSuppression")
def test_non_max_suppression_v5(self):
box_num = 10
boxes_val = np.random.random_sample([box_num, 4]).astype(np.float32)
scores_val = np.random.random_sample([box_num]).astype(np.float32)
def func1(boxes, scores):
res1, res2, res3 = tf.raw_ops.NonMaxSuppressionV5(boxes=boxes, scores=scores, score_threshold=0.1,
max_output_size=int(box_num / 2), iou_threshold=0.5,
soft_nms_sigma=0)
return tf.identity(res1, name=_TFOUTPUT), tf.identity(res2, name=_TFOUTPUT1), \
tf.identity(res3, name=_TFOUTPUT2)
self._run_test_case(func1, [_OUTPUT, _OUTPUT1, _OUTPUT2], {_INPUT: boxes_val, _INPUT1: scores_val})
def func2(boxes, scores):
res1, res2, res3 = tf.raw_ops.NonMaxSuppressionV5(boxes=boxes, scores=scores, score_threshold=0.1,
max_output_size=2 * box_num, iou_threshold=0.5,
soft_nms_sigma=0, pad_to_max_output_size=True)
return tf.identity(res1, name=_TFOUTPUT), tf.identity(res2, name=_TFOUTPUT1), \
tf.identity(res3, name=_TFOUTPUT2)
self._run_test_case(func2, [_OUTPUT, _OUTPUT1, _OUTPUT2], {_INPUT: boxes_val, _INPUT1: scores_val})
@check_opset_min_version(10, "NonMaxSuppression")
def test_non_max_suppression(self):
box_num = 10
boxes_val = np.random.random_sample([box_num, 4]).astype(np.float32)
scores_val = np.random.random_sample([box_num]).astype(np.float32)
def func(boxes, scores):
res1 = tf.image.non_max_suppression(boxes, scores, max_output_size=int(box_num / 2))
res2 = tf.image.non_max_suppression(boxes, scores, max_output_size=0)
return tf.identity(res1, name=_TFOUTPUT), tf.identity(res2, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: boxes_val, _INPUT1: scores_val})
@check_opset_min_version(10, "NonMaxSuppression")
@allow_missing_shapes("TF shape inference misses reshape to scalar")
@skip_tfjs("TFJS executes model incorrectly")
def test_non_max_suppression_v4_padded(self):
box_num = 10
boxes_val = np.random.random_sample([box_num, 4]).astype(np.float32)
scores_val = np.random.random_sample([box_num]).astype(np.float32)
def func(boxes, scores):
ret1, ret2 = tf.image.non_max_suppression_padded(boxes, scores, max_output_size=int(box_num * 2),
pad_to_max_output_size=True)
return tf.identity(ret1, name=_TFOUTPUT), tf.identity(ret2, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: boxes_val, _INPUT1: scores_val})
@check_opset_min_version(10, "NonMaxSuppression")
@allow_missing_shapes("TF shape inference misses reshape to scalar")
@skip_tfjs("TFJS executes model incorrectly")
def test_non_max_suppression_v4_no_padding(self):
box_num = 10
boxes_val = np.random.random_sample([box_num, 4]).astype(np.float32)
scores_val = np.random.random_sample([box_num]).astype(np.float32)
def func(boxes, scores):
ret1, ret2 = tf.image.non_max_suppression_padded(boxes, scores, max_output_size=int(box_num),
pad_to_max_output_size=False)
return tf.identity(ret1, name=_TFOUTPUT), tf.identity(ret2, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: boxes_val, _INPUT1: scores_val})
@check_tf_min_version("1.15")
@check_opset_min_version(10, "NonMaxSuppression")
def test_non_max_suppression_v5(self):
box_num = 10
boxes_val = np.random.random_sample([box_num, 4]).astype(np.float32)
scores_val = np.random.random_sample([box_num]).astype(np.float32)
def func(boxes, scores):
ret1, ret2 = tf.image.non_max_suppression_with_scores(boxes, scores, max_output_size=int(box_num / 2),
soft_nms_sigma=0.0)
return tf.identity(ret1, name=_TFOUTPUT), tf.identity(ret2, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: boxes_val, _INPUT1: scores_val})
@check_tf_min_version("2.3")
@check_opset_min_version(12, "GatherND with batch_dims")
def test_combined_non_max_suppression_pad_and_clip(self):
batch_size = 8
box_num = 10
classes_num = 2
max_total_size = 9
boxes_val = np.random.random_sample([batch_size, box_num, 1, 4]).astype(np.float32) * 2 - 0.5
scores_val = np.random.random_sample([batch_size, box_num, classes_num]).astype(np.float32)
def func(boxes, scores):
nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections = \
tf.image.combined_non_max_suppression(boxes=boxes, scores=scores, score_threshold=0.1,
max_output_size_per_class=3, max_total_size=max_total_size,
iou_threshold=0.5, pad_per_class=True, clip_boxes=True)
out1 = tf.identity(nmsed_boxes, name=_TFOUTPUT)
out2 = tf.identity(nmsed_scores, name=_TFOUTPUT1)
out3 = tf.identity(nmsed_classes, name=_TFOUTPUT2)
out4 = tf.identity(valid_detections, name=_TFOUTPUT3)
return out1, out2, out3, out4
self._run_test_case(func, [_OUTPUT, _OUTPUT1, _OUTPUT2, _OUTPUT3], {_INPUT: boxes_val, _INPUT1: scores_val})
@check_tf_min_version("2.3")
@check_opset_min_version(12, "GatherND with batch_dims")
def test_combined_non_max_suppression_no_pad_no_clip(self):
batch_size = 8
box_num = 10
classes_num = 2
max_total_size = 9
boxes_val = np.random.random_sample([batch_size, box_num, 1, 4]).astype(np.float32) * 2 - 0.5
scores_val = np.random.random_sample([batch_size, box_num, classes_num]).astype(np.float32)
def func(boxes, scores):
nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections = \
tf.image.combined_non_max_suppression(boxes=boxes, scores=scores, score_threshold=0.1,
max_output_size_per_class=3, max_total_size=max_total_size,
iou_threshold=0.5, pad_per_class=False, clip_boxes=False)
out1 = tf.identity(nmsed_boxes, name=_TFOUTPUT)
out2 = tf.identity(nmsed_scores, name=_TFOUTPUT1)
out3 = tf.identity(nmsed_classes, name=_TFOUTPUT2)
out4 = tf.identity(valid_detections, name=_TFOUTPUT3)
return out1, out2, out3, out4
self._run_test_case(func, [_OUTPUT, _OUTPUT1, _OUTPUT2, _OUTPUT3], {_INPUT: boxes_val, _INPUT1: scores_val})
def _conv1d_test(self, x_val, w, stride=None, padding="VALID", rtol=1e-07):
if stride is None:
stride = 1
def func(x):
kernel = tf.constant(w, dtype=tf.float32, name='k')
conv = tf.nn.conv1d(x, kernel, stride=stride, padding=padding)
return tf.identity(conv, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=rtol)
def test_conv1d_1(self):
x_val = make_xval((1, 7, 1))
w = np.array([2., 1., 3.], dtype=np.float32).reshape(3, 1, 1)
self._conv1d_test(x_val, w)
def test_conv1d_2(self):
x_val = make_xval((1, 7, 1))
w = np.array([2., 1., 3.], dtype=np.float32).reshape(3, 1, 1)
self._conv1d_test(x_val, w, stride=2)
def test_conv1d_3(self):
x_val = make_xval((1, 7, 1))
w = np.array([2., 1., 3.], dtype=np.float32).reshape(3, 1, 1)
self._conv1d_test(x_val, w, padding="SAME")
def test_conv1d_4(self):
x_val = make_xval((1, 7, 1))
w = np.array([2., 1., 3.], dtype=np.float32).reshape(3, 1, 1)
self._conv1d_test(x_val, w, rtol=1e-05)
def test_conv1d_5(self):
x_val = make_xval((1, 7, 1))
w = np.array([3., 3., 3.], dtype=np.float32).reshape(3, 1, 1)
self._conv1d_test(x_val, w)
@check_opset_min_version(10, "ThresholdedRelu")
def test_thresholded_relu(self):
# tf.keras.layers.ThresholdedReLU only supports `float32` for x
x_val = np.array([0.0, 1.0, -1.0, 2.0, -2.0, 0.5, -0.5, 1.5, -1.5], dtype=np.float32).reshape((3, 3))
theta_vals = [0.0, 0.5, 1.0, 2.0]
for theta_val in theta_vals:
def func(x):
t = tf.keras.layers.ThresholdedReLU(theta=theta_val)
x_ = t.call(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val},
graph_validator=lambda g: check_op_count(g, "ThresholdedRelu", 1))
@check_tf_min_version("1.13")
@check_opset_min_version(11, "MaxPoolWithArgmax")
def test_maxpoolwithargmax(self):
for p in get_maxpoolwithargmax_getdata():
_, padding, x_shape, ksize, strides = p
x_val = np.random.uniform(0, 10, x_shape)
def func(x):
mp = tf.nn.max_pool_with_argmax(x, ksize, strides, padding=padding)
return tf.identity(mp[0], name=_TFOUTPUT), tf.identity(mp[1], name=_TFOUTPUT1)
self.logger.debug(str(p))
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: x_val})
@check_tf_min_version("1.15")
@check_opset_min_version(11, "MaxPoolWithArgmax")
def test_maxpoolwithargmax_batch_in_index(self):
padding = 'SAME'
x_shape = [2, 10, 5, 3]
ksize = [1, 4, 4, 1]
strides = [1, 1, 1, 1]
x_val = np.random.uniform(0, 10, x_shape)
def func(x):
mp = tf.nn.max_pool_with_argmax(x, ksize, strides, padding=padding, include_batch_in_index=True)
return tf.identity(mp[0], name=_TFOUTPUT), tf.identity(mp[1], name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: x_val})
@check_tf_min_version("1.15")
@check_opset_min_version(11, "MaxPoolWithArgmax")
def test_maxpoolwithargmax_unknown_c(self):
padding = 'SAME'
x_shape = [2, 10, 5, 1]
ksize = [1, 4, 4, 1]
strides = [1, 1, 1, 1]
x_val = np.random.uniform(0, 10, x_shape)
s_val = np.array([2, 10, 5, 4], np.int64)
def func(x, s):
x = tf.broadcast_to(x, s)
mp = tf.nn.max_pool_with_argmax(x, ksize, strides, padding=padding, include_batch_in_index=True)
return tf.identity(mp[0], name=_TFOUTPUT), tf.identity(mp[1], name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: x_val, _INPUT1: s_val})
@check_opset_min_version(10, "Selu")
def test_selu(self):
x_val = np.random.random_sample([3]).astype(np.float32)
def func(x):
y = tf.nn.selu(x)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(8, "ClipByValue (needs broadcast)")
def test_clip_by_value(self):
# float32, dynamic min/max
x_val = np.arange(0, 24, dtype=np.float32).reshape([3, 8])
x_minval = np.array(8.5, dtype=np.float32)
x_maxval = np.array(16.5, dtype=np.float32)
def func(x, x_min, x_max):
y = tf.clip_by_value(x, x_min, x_max)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: x_minval, _INPUT2: x_maxval})
# float32, const min/max
x_val = np.arange(0, 24, dtype=np.float32).reshape([3, 8])
def func(x):
y = tf.clip_by_value(x, 8.5, 16.5)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
# int32, converter needs to cast, const min/max
x_val = np.arange(0, 24, dtype=np.int32).reshape([3, 8])
def func(x):
y = tf.clip_by_value(x, 8, 16)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_softmax(self):
x_val = np.arange(0, 24, dtype=np.float32).reshape([3, 1, 8])
def func(x):
y = tf.nn.softmax(x)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_log_softmax(self):
x_val = np.arange(0, 24, dtype=np.float32).reshape([3, 1, 8])
def func(x):
y = tf.nn.log_softmax(x)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(11, "Range")
def test_ctc_greedy_decoder(self):
x_val = np.random.uniform(size=(3, 4, 5)).astype(np.float32)
s_val = np.array([3, 3, 2, 3], np.int32)
def func(x, s):
[decoded], logits = tf.nn.ctc_greedy_decoder(x, s, merge_repeated=False)
r1 = tf.identity(decoded.indices, name=_TFOUTPUT)
r2 = tf.identity(decoded.values, name=_TFOUTPUT1)
r3 = tf.identity(decoded.dense_shape, name=_TFOUTPUT2)
r4 = tf.identity(logits, name=_TFOUTPUT3)
return r1, r2, r3, r4
self._run_test_case(func, [_OUTPUT, _OUTPUT1, _OUTPUT2, _OUTPUT3], {_INPUT: x_val, _INPUT1: s_val})
@check_opset_min_version(11, "Range")
def test_ctc_greedy_decoder_merge_repeated(self):
x_val = np.random.uniform(size=(6, 4, 5)).astype(np.float32)
s_val = np.array([5, 6, 4, 6], np.int32)
def func(x, s):
[decoded], logits = tf.nn.ctc_greedy_decoder(x, s, merge_repeated=True)
r1 = tf.identity(decoded.indices, name=_TFOUTPUT)
r2 = tf.identity(decoded.values, name=_TFOUTPUT1)
r3 = tf.identity(decoded.dense_shape, name=_TFOUTPUT2)
r4 = tf.identity(logits, name=_TFOUTPUT3)
return r1, r2, r3, r4
self._run_test_case(func, [_OUTPUT, _OUTPUT1, _OUTPUT2, _OUTPUT3], {_INPUT: x_val, _INPUT1: s_val})
# test for gemm pattern0: alpha*A*B + beta*C
def test_gemm_pattern0(self):
max_number = 10
m = np.random.randint(max_number)
n = np.random.randint(max_number)
k = np.random.randint(max_number)
x_val1 = np.random.rand(m, n).astype("float32")
x_val2 = np.random.rand(n, k).astype("float32")
x_val3 = np.random.rand(m, k).astype("float32")
def func(a, b, c):
alpha = tf.constant(1.0, dtype=tf.float32)
beta = tf.constant(2.0, dtype=tf.float32)
mul1 = tf.multiply(alpha, tf.matmul(a, b))
mul2 = tf.multiply(beta, c)
x_ = mul1 + mul2
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2, _INPUT2: x_val3},
graph_validator=lambda g: check_op_count(g, "Gemm", 1))
# test for gemm pattern1: alpha*A*B + C
def test_gemm_pattern1(self):
max_number = 10
m = np.random.randint(max_number)
n = np.random.randint(max_number)
k = np.random.randint(max_number)
x_val1 = np.random.rand(m, n).astype("float32")
x_val2 = np.random.rand(n, k).astype("float32")
x_val3 = np.random.rand(m, k).astype("float32")
def func(a, b, c):
alpha = tf.constant(1.0, dtype=tf.float32)
x_ = tf.multiply(alpha, tf.matmul(a, b)) + c
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2, _INPUT2: x_val3},
graph_validator=lambda g: check_op_count(g, "Gemm", 1))
# test for gemm pattern2: A*B + beta*C
def test_gemm_pattern2(self):
max_number = 10
m = np.random.randint(max_number)
n = np.random.randint(max_number)
k = np.random.randint(max_number)
x_val1 = np.random.rand(m, n).astype("float32")
x_val2 = np.random.rand(n, k).astype("float32")
x_val3 = np.random.rand(m, k).astype("float32")
def func(a, b, c):
beta = tf.constant(2.0, dtype=tf.float32)
x_ = tf.matmul(a, b) + tf.multiply(beta, c)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2, _INPUT2: x_val3},
graph_validator=lambda g: check_op_count(g, "Gemm", 1))
# test for gemm pattern3: A*B + C
def test_gemm_pattern3(self):
max_number = 10
m = np.random.randint(max_number)
n = np.random.randint(max_number)
k = np.random.randint(max_number)
x_val1 = np.random.rand(m, n).astype("float32")
x_val2 = np.random.rand(n, k).astype("float32")
x_val3 = np.random.rand(m, k).astype("float32")
def func(a, b, c):
x_ = tf.matmul(a, b) + c
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2, _INPUT2: x_val3},
graph_validator=lambda g: check_op_count(g, "Gemm", 1))
# test for gemm pattern4: A*B + C [addbias] - 1D bias!
def test_gemm_pattern4(self):
max_number = 10
m = np.random.randint(max_number)
n = np.random.randint(max_number)
k = np.random.randint(max_number) # bias add requires 1D tensor
x_val1 = np.random.rand(m, n).astype("float32")
x_val2 = np.random.rand(n, k).astype("float32")
x_val3 = np.random.rand(k).astype("float32")
def func(a, b, c):
x_ = tf.nn.bias_add(tf.matmul(a, b), c)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2, _INPUT2: x_val3},
graph_validator=lambda g: check_op_count(g, "Gemm", 1))
# test for gemm pattern0: alpha*A*B + beta*C
@check_opset_min_version(12, "Optimizer bug in ORT 1.2")
def test_gemm_pattern0_fail_broadcast(self):
# shapes (3, 3) * (3, 1) + (1, 4) => (3, 1) + (1, 4)
# c not uni-broadcastable to a * b, so should not use GEMM
m, n, k = 3, 3, 1
x_val1 = np.random.rand(m, n).astype("float32")
x_val2 = np.random.rand(n, k).astype("float32")
x_val3 = np.random.rand(k, 4).astype("float32")
def func(a, b, c):
alpha = tf.constant(1.0, dtype=tf.float32)
beta = tf.constant(2.0, dtype=tf.float32)
mul1 = tf.multiply(alpha, tf.matmul(a, b))
mul2 = tf.multiply(beta, c)
x_ = mul1 + mul2
return tf.identity(x_, name=_TFOUTPUT)
def graph_validator(g):
if 'Gemm' in [n.type for n in g.get_nodes()]: return False
return True
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2, _INPUT2: x_val3},
graph_validator=graph_validator)
def test_graph_matcher(self):
shape = [2, 6]
x_val = np.random.random(shape).astype(np.float32)
y_val = np.random.random(shape).astype(np.float32)
z_val = np.random.random(shape).astype(np.float32)
def func(x, y, z):
tmp1 = x + y
tmp2 = x - y
tmp3 = tf.multiply(tmp1, z)
tmp4 = tf.multiply(tmp2, z)
return tf.add(tmp4, tmp3, name=_TFOUTPUT)
onnx_graph = self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val, _INPUT2: z_val})
pattern = \
OpTypePattern('Add', name='output', inputs=[
OpTypePattern('Mul', inputs=[
OpTypePattern('Add', name='input1'),
OpTypePattern('*', name='input2')]),
OpTypePattern('Mul', inputs=[
OpTypePattern('Sub', name='input1'),
OpTypePattern('*', name='input2')])])
matcher = GraphMatcher(pattern, allow_reorder=False)
match_results = list(matcher.match_ops(onnx_graph.get_nodes()))
self.assertTrue(len(match_results) == 0)
matcher = GraphMatcher(pattern, allow_reorder=True)
match_results = list(matcher.match_ops(onnx_graph.get_nodes()))
self.assertTrue(len(match_results) == 1)
def test_add2(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.add(x, x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(11, "CumSum")
def test_cumsum(self):
x_val = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.cumsum(x, axis=1)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(11, "CumSum")
def test_cumsum_axis1_reverse_exclusive(self):
x_val = np.array([1., 2., 3., 4.,
5., 6., 7., 8.,
9., 10., 11., 12.,
13., 14., 15., 16.,
17., 18., 19., 20.,
21., 22., 23., 24.], dtype=np.float32).reshape((2, 3, 4))
def func(x):
x_ = tf.cumsum(x, axis=1, reverse=True)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(10, "Slice")
def test_cumprod(self):
x_val = np.array([2.0, 3.0, 4.0, 5.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.math.cumprod(x, axis=0)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(10, "Slice")
def test_cumprod_axis1(self):
x_val = np.array([1., 2., 3., 4.,
5., 6., 7., 8.,
9., 10., 11., 12.,
13., 14., 15., 16.,
17., 18., 19., 20.,
21., 22., 23., 24.], dtype=np.float32).reshape((2, 3, 4))
def func(x):
x_ = tf.math.cumprod(x, axis=1)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(10, "Slice")
def test_cumprod_axis1_reverse(self):
x_val = np.array([1., 2., 3., 4.,
5., 6., 7., 8.,
9., 10., 11., 12.,
13., 14., 15., 16.,
17., 18., 19., 20.,
21., 22., 23., 24.], dtype=np.float32).reshape((2, 3, 4))
def func(x):
x_ = tf.math.cumprod(x, axis=1, reverse=True)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(10, "Slice")
def test_cumprod_axis1_reverse_exclusive(self):
x_val = np.array([1., 2., 3., 4.,
5., 6., 7., 8.,
9., 10., 11., 12.,
13., 14., 15., 16.,
17., 18., 19., 20.,
21., 22., 23., 24.], dtype=np.float32).reshape((2, 3, 4))
def func(x):
x_ = tf.math.cumprod(x, axis=1, reverse=True, exclusive=True)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(11, "Round")
def test_round(self):
x_val = np.array([-0.7, -0.5, -0.0, 0.0, +0.0, 0.3, 0.5, 0.7, float('nan')], dtype=np.float32)
def func(x):
x_ = tf.round(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_round_approx(self):
# In lower opsets there is no Round, but we can approximate it forgoing nearest even
x_val = np.array([-0.7, -0.5, -0.0, 0.0, +0.0, 0.3, 1.5, 0.7, float('nan')], dtype=np.float32)
def func(x):
x_ = tf.round(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(11, "Round")
def test_rint(self):
x_val = np.array([-2.7, -1.5, -0.0, +0.0, 0.3, 0.5, 1.5, 2.5, 3.4, 3.5, float('nan')], dtype=np.float32)
def func(x):
x_ = tf.math.rint(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(11, "Det")
@unittest.skip("unclear how this is called in tf-2, fix later")
def test_determinant(self):
x_val = np.array([1., 2., 3., 4., 1., 2.,
2., 1., 1., 3., 3., 1.,
1., 2., 3., 4., 1., 2.,
2., 1., 1., 3., 3., 1.],
dtype=np.float32).reshape((1, 2, 3, 2, 2))
def func(x):
x_ = tf.matrix_determinant(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(11, "BitShift")
def test_bitshift_left(self):
x_val = np.array([16, 4, 1], dtype=np.int32)
y_val = np.array([1, 2, 3], dtype=np.int32)
def func(x, y):
x_ = tf.bitwise.left_shift(x, y)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(11, "BitShift")
def test_bitshift_right(self):
info = np.iinfo(np.int32)
x_val = np.array([-1, 0, 1, info.max, info.min], dtype=np.int32)
def func(x):
x_ = tf.bitwise.right_shift(x, 1)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(18, "BitwiseAnd")
def test_bitwise_and(self):
x_val = np.array([21, 4, 1], dtype=np.int32)
y_val = np.array([45, 69, 3], dtype=np.int32)
def func(x, y):
x_ = tf.bitwise.bitwise_and(x, y)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(18, "BitwiseOr")
def test_bitwise_or(self):
x_val = np.array([21, 4, 87], dtype=np.int32)
y_val = np.array([45, 69, 173], dtype=np.int32)
def func(x, y):
x_ = tf.bitwise.bitwise_or(x, y)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(18, "BitwiseXor")
def test_bitwise_xor(self):
x_val = np.array([21, 4, 87], dtype=np.int32)
y_val = np.array([45, 69, 173], dtype=np.int32)
def func(x, y):
x_ = tf.bitwise.bitwise_xor(x, y)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(18, "BitwiseNot")
def test_bitwise_not(self):
x_val = np.array([21, 4, 1], dtype=np.int32)
def func(x):
x_ = tf.bitwise.invert(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_tf_min_version("1.14", "tensor_scatter_nd_update needs tf 1.14")
@check_opset_min_version(11, "ScatterND")
def test_tensor_scatter_update(self):
x_val = np.array([10, 20, 30, 40], dtype=np.int32).reshape((4))
y_val = np.array([0, 2], dtype=np.int64).reshape((2, 1))
z_val = np.array([8, 11], dtype=np.int32).reshape((2))
def func(x, y, z):
x_ = tf.tensor_scatter_nd_update(x, y, z)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val, _INPUT2: z_val})
@check_tf_min_version("1.15", "tensor_scatter_nd_update for strings needs tf 1.15")
@check_opset_min_version(11, "ScatterND")
@skip_tflite("Conversion crashes")
def test_tensor_scatter_update_str(self):
x_val = np.array(['A', '♠♣♥♦', 'B', 'C'], dtype=str).reshape((4))
y_val = np.array([0, 2], dtype=np.int64).reshape((2, 1))
z_val = np.array(['☺', '11'], dtype=str).reshape((2))
def func(x, y, z):
x_ = tf.tensor_scatter_nd_update(x, y, z)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val, _INPUT2: z_val})
@check_tf_min_version("1.15", "tensor_scatter_nd_update for strings needs tf 1.15")
@check_opset_min_version(11, "ScatterND")
@skip_tflite("Conversion crashes")
def test_tensor_scatter_update_str_const(self):
x_val = np.array(['A', '♠♣♥♦', 'B', 'C'], dtype=str).reshape((4))
y_val = np.array([0, 2], dtype=np.int64).reshape((2, 1))
z_val = np.array(['☺', '11'], dtype=str).reshape((2))
def func(x, y):
z = tf.constant(z_val)
x_ = tf.tensor_scatter_nd_update(x, y, z)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_tf_min_version("1.14", "tensor_scatter_nd_update needs tf 1.14")
@check_opset_min_version(11, "ScatterND")
def test_tensor_scatter_update_cast_indices(self):
x_val = np.array([10, 20, 30, 40], dtype=np.int32).reshape((4))
y_val = np.array([0, 2], dtype=np.int32).reshape((2, 1))
z_val = np.array([8, 11], dtype=np.int32).reshape((2))
def func(x, y, z):
x_ = tf.tensor_scatter_nd_update(x, y, z)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val, _INPUT2: z_val})
@check_opset_min_version(16, "ScatterND")
def test_scatternd_add(self):
x_val = np.array([10, 20, 30, 40], dtype=np.int32).reshape((4))
y_val = np.array([0, 2], dtype=np.int64).reshape((2, 1))
z_val = np.array([20, 30], dtype=np.int32).reshape((2))
def func(x, y, z):
x_ = tf.tensor_scatter_nd_add(x, y, z)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val, _INPUT2: z_val})
@check_opset_min_version(11, "ScatterND")
def test_scatternd_1d(self):
x_val = np.array([4, 3, 1, 7], dtype=np.int32).reshape((4, 1))
y_val = np.array([9, 10, 11, 12], dtype=np.int64).reshape((4))
z_val = np.array([8], dtype=np.int32).reshape(1)
def func(x, y, z):
x_ = tf.scatter_nd(x, y, z)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val, _INPUT2: z_val})
@check_opset_min_version(11, "ScatterND")
def test_scatternd_3d(self):
x_val = np.array([0, 2], dtype=np.int32).reshape((2, 1))
y_val = np.array([[[5, 5, 5, 5], [6, 6, 6, 6],
[7, 7, 7, 7], [8, 8, 8, 8]],
[[5, 5, 5, 5], [6, 6, 6, 6],
[7, 7, 7, 7], [8, 8, 8, 8]]], dtype=np.float32).reshape((2, 4, 4))
z_val = np.array([4, 4, 4], dtype=np.int32).reshape(3)
def func(x, y, z):
x_ = tf.scatter_nd(x, y, z)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val, _INPUT2: z_val})
@check_opset_min_version(11, "Unique")
def test_unique(self):
x_val = np.array([1, 2, 8, 1, 2, 2, 7, 7, 7, 1], dtype=np.float32)
def func(x):
x1_, _ = tf.unique(x)
y1 = tf.identity(x1_, name=_TFOUTPUT)
return y1
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(11, "Unique")
def test_unique_indices_int64(self):
x_val = np.array([2, 3, 3, 6, 4, 1, 1], dtype=np.float32)
def func(x):
x1_, x2_ = tf.unique(x, out_idx=tf.int64)
y1 = tf.identity(x1_, name=_TFOUTPUT)
y2 = tf.identity(x2_, name=_TFOUTPUT1)
return y1, y2
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: x_val})
@check_opset_min_version(11, "Unique")
def test_unique_indices_int32(self):
x_val = np.array([2, 3, 3, 6, 4, 1, 1], dtype=np.float32)
def func(x):
x1_, x2_ = tf.unique(x, out_idx=tf.int32)
y1 = tf.identity(x1_, name=_TFOUTPUT)
y2 = tf.identity(x2_, name=_TFOUTPUT1)
return y1, y2
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: x_val})
@check_opset_min_version(11, "Unique")
def test_bincount(self):
x_val = np.array([5, 2, 3, 1, 3, 2, 7, 5, 9, 10], dtype=np.int32)
def func(x):
x_ = tf.math.bincount(x)
y_ = tf.identity(x_, name=_TFOUTPUT)
return y_
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_tflite("Bug in tflite output shapes")
@skip_tfjs("TFJS executes model incorrectly")
@check_opset_min_version(11, "Unique")
@check_tf_min_version("2.3", "needs tf.math.bincount with axis attr")
def test_dense_bincount(self):
x_val = np.array([[5, 2, 3, 1, 3], [2, 7, 5, 9, 10]], dtype=np.int32)
y_val = np.array([[2.0, 1.5, 3.5, 4.5, 5.5], [6.5, 7.5, 8.5, 9.5, 10.5]], dtype=np.float32)
for a in [0, -1]:
for b in [True, False]:
def func(x, y):
x_ = tf.math.bincount(x, axis=a, binary_output=b)
y_ = tf.identity(x_, name=_TFOUTPUT)
return y_
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(11, "ScatterND")
def test_sparse_to_dense(self):
i_val = np.array([[0, 0, 0], [0, 0, 2], [0, 1, 3], [1, 2, 2], [1, 2, 3]], dtype=np.int64)
v_val = np.array([1.5, 1.6, 1.7, 1.8, 1.9], dtype=np.float32)
ds_val = np.array([2, 3, 4], dtype=np.int64)
d_val = np.array(2.5, dtype=np.float32)
def func(indices, values, dense_shape, default):
st = tf.SparseTensor(indices, values, dense_shape)
dense = tf.sparse.to_dense(st, default, validate_indices=True)
x_ = tf.identity(dense, name=_TFOUTPUT)
return x_
self._run_test_case(func, [_OUTPUT], {_INPUT: i_val, _INPUT1: v_val, _INPUT2: ds_val, _INPUT3: d_val})
@check_opset_min_version(11, "Unique")
def test_sparse_fill_empty_rows(self):
i_val = np.array([[1, 0, 0], [1, 0, 2], [1, 1, 3], [3, 2, 2], [3, 2, 3]], dtype=np.int64)
v_val = np.array([1.5, 1.6, 1.7, 1.8, 1.9], dtype=np.float32)
ds_val = np.array([5, 3, 4], dtype=np.int64)
d_val = np.array(2.5, dtype=np.float32)
def func(indices, values, dense_shape, default):
st = tf.SparseTensor(indices, values, dense_shape)
st_, indicator = tf.sparse.fill_empty_rows(st, default)
dense = tf.sparse.to_dense(st_, 0, validate_indices=False)
dense_ = tf.identity(dense, name=_TFOUTPUT)
indicator_ = tf.identity(indicator, name=_TFOUTPUT1)
return dense_, indicator_
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: i_val, _INPUT1: v_val, _INPUT2: ds_val, _INPUT3: d_val})
@check_opset_min_version(11, "CumSum")
def test_sparse_reshape(self):
indices_val = np.array([[1, 0, 0], [1, 0, 2], [1, 1, 3], [3, 2, 2], [3, 2, 3]], dtype=np.int64)
values_val = np.array([1.5, 1.6, 1.7, 1.8, 1.9], dtype=np.int64)
dense_shape_val = np.array([5, 3, 4], dtype=np.int64)
new_shape_val = np.array([2, -1, 1, 3], dtype=np.int64)
def func(indices, values, dense_shape, new_shape):
st = tf.SparseTensor(indices, values, dense_shape)
st_ = tf.sparse.reshape(st, new_shape)
indices_ = st_.indices
dense_shape_ = st_.dense_shape
indices_ = tf.identity(indices_, name=_TFOUTPUT)
dense_shape_ = tf.identity(dense_shape_, name=_TFOUTPUT1)
return indices_, dense_shape_
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: indices_val, _INPUT1: values_val,
_INPUT2: dense_shape_val, _INPUT3: new_shape_val})
@check_opset_min_version(11, "CumSum")
def test_sparse_reshape_unknown_rank(self):
indices_val = np.array([[1, 0, 0], [1, 0, 2], [1, 1, 3], [3, 2, 2], [3, 2, 3]], dtype=np.int64)
values_val = np.array([1.5, 1.6, 1.7, 1.8, 1.9], dtype=np.int64)
dense_shape_val = np.array([5, 3, 4], dtype=np.int64)
new_shape_val = np.array([2, 10, 1, 3], dtype=np.int64)
shape_pad_val = np.zeros((1, 2), dtype=np.int64)
def func(indices, dense_shape, new_shape, shape_pad):
st = tf.SparseTensor(indices, values_val, dense_shape)
# Some hackery to make the rank unknown
new_shape_ = tf.pad(new_shape, shape_pad, constant_values=0)
st_ = tf.sparse.reshape(st, new_shape_)
indices_ = st_.indices
dense_shape_ = st_.dense_shape
indices_ = tf.identity(indices_, name=_TFOUTPUT)
dense_shape_ = tf.identity(dense_shape_, name=_TFOUTPUT1)
return indices_, dense_shape_
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: indices_val, _INPUT1: dense_shape_val,
_INPUT2: new_shape_val, _INPUT3: shape_pad_val})
@check_tf_min_version("1.14", "ragged needs tf 1.14")
@check_opset_min_version(11, "CumSum")
def test_ragged_tensor_to_sparse(self):
splits_val1 = np.array([0, 1, 1, 5], dtype=np.int32)
splits_val2 = np.array([0, 3, 3, 5, 9, 10], dtype=np.int32)
dense_vals_val = np.array([10, 11, 12, 13, 14, 15, 16, 17, 18, 19], dtype=np.float32)
def func(splits1, splits2, rt_dense_values):
x = tf.RaggedTensor.from_nested_row_splits(rt_dense_values, [splits1, splits2], validate=True)
s = x.to_sparse()
indices, values, shape = s.indices, s.values, s.dense_shape
indices = tf.identity(indices, name=_TFOUTPUT)
values = tf.identity(values, name=_TFOUTPUT1)
shape = tf.identity(shape, name=_TFOUTPUT2)
return indices, values, shape
self._run_test_case(func, [_OUTPUT, _OUTPUT1, _OUTPUT2],
{_INPUT: splits_val1, _INPUT1: splits_val2, _INPUT2: dense_vals_val})
@check_tf_min_version("1.14", "ragged needs tf 1.14")
@check_opset_min_version(11, "CumSum")
def test_ragged_gather(self):
splits_val = np.array([0, 3, 3, 5, 9, 10], dtype=np.int32)
dense_vals_val = np.array([10, 11, 12, 13, 14, 15, 16, 17, 18, 19], dtype=np.float32)
indices_val = np.array([1, 3, 2, 0, 1, 1, 4, 3, 3], dtype=np.int32)
def func(splits, rt_dense_values, indices):
x = tf.RaggedTensor.from_nested_row_splits(rt_dense_values, [splits], validate=True)
g = tf.gather(x, indices)
rt_nested_splits = tf.identity(g.row_splits, name=_TFOUTPUT)
rt_dense_values = tf.identity(g.flat_values, name=_TFOUTPUT1)
return rt_nested_splits, rt_dense_values
self._run_test_case(func, [_OUTPUT, _OUTPUT1],
{_INPUT: splits_val, _INPUT1: dense_vals_val, _INPUT2: indices_val})
@check_tf_min_version("1.14", "ragged needs tf 1.14")
@check_opset_min_version(11, "CumSum")
@skip_tflite("unknown rank")
def test_ragged_tensor_to_tensor(self):
splits_val1 = np.array([0, 1, 1, 5], dtype=np.int32)
splits_val2 = np.array([0, 3, 3, 5, 9, 10], dtype=np.int32)
dense_vals_val = np.array([10, 11, 12, 13, 14, 15, 16, 17, 18, 19], dtype=np.float32)
def func(splits1, splits2, rt_dense_values):
x = tf.RaggedTensor.from_nested_row_splits(rt_dense_values, [splits1, splits2], validate=True)
y = x.to_tensor(default_value=7)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: splits_val1, _INPUT1: splits_val2, _INPUT2: dense_vals_val})
@check_tf_min_version("1.14", "ragged needs tf 1.14")
@check_opset_min_version(11, "CumSum")
@skip_tflite("unknown rank")
def test_ragged_tensor_to_tensor_row_ids(self):
ids_val1 = np.array([0, 0, 0, 2, 2], dtype=np.int32)
ids_val2 = np.array([0, 0, 2, 2, 2, 3, 3, 4], dtype=np.int32)
dense_vals_val = make_xval([8, 2, 3])
def func(ids1, ids2, rt_dense_values):
x = tf.RaggedTensor.from_nested_value_rowids(rt_dense_values, [ids1, ids2], [4, 5])
y = x.to_tensor(default_value=7)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: ids_val1, _INPUT1: ids_val2, _INPUT2: dense_vals_val})
@check_tf_min_version("2.2", "ragged to_tensor with constrained shape")
@check_opset_min_version(11, "CumSum")
def test_ragged_tensor_to_tensor_constrain_shape(self):
splits_val1 = np.array([0, 1, 1, 5], dtype=np.int32)
splits_val2 = np.array([0, 3, 3, 5, 9, 10], dtype=np.int32)
dense_vals_val = np.array([10, 11, 12, 13, 14, 15, 16, 17, 18, 19], dtype=np.float32)
dense_vals_val = make_xval([10, 2, 3])
def func(splits1, splits2, rt_dense_values):
x = tf.RaggedTensor.from_nested_row_splits(rt_dense_values, [splits1, splits2], validate=True)
y = x.to_tensor(default_value=7, shape=[20, None, 2, None, 3])
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: splits_val1, _INPUT1: splits_val2, _INPUT2: dense_vals_val})
@check_tf_min_version("1.14", "ragged needs tf 1.14")
@check_opset_min_version(11, "Range")
def test_ragged_range_float(self):
starts_val = np.array([0, 0, 1, 10, 0.5, 0.5], dtype=np.float32)
limits_val = np.array([-5, -2, 7, 100, 1, 1], dtype=np.float32)
deltas_val = np.array([-1, 1, 2, 20, 1, 1.1], dtype=np.float32)
def func(starts, limits, deltas):
x = tf.ragged.range(starts, limits, deltas)
rt_nested_splits = tf.identity(x.row_splits, name=_TFOUTPUT)
rt_dense_values = tf.identity(x.flat_values, name=_TFOUTPUT1)
return rt_nested_splits, rt_dense_values
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: starts_val, _INPUT1: limits_val,
_INPUT2: deltas_val})
@check_tf_min_version("1.14", "ragged needs tf 1.14")
@check_opset_min_version(11, "Range")
def test_ragged_range_int(self):
starts_val = np.array([0, 1, 3, 0, 0, 0, 0, 0, 0, 0, 0], dtype=np.int32)
limits_val = np.array([-6, -5, -4, -1, 0, 1, 4, 5, 6, 2, -2], dtype=np.int32)
deltas_val = np.array([-5, -5, -5, -5, 5, 5, 5, 5, 5, 1, -1], dtype=np.int32)
def func(starts, limits, deltas):
x = tf.ragged.range(starts, limits, deltas)
rt_nested_splits = tf.identity(x.row_splits, name=_TFOUTPUT)
rt_dense_values = tf.identity(x.flat_values, name=_TFOUTPUT1)
return rt_nested_splits, rt_dense_values
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: starts_val, _INPUT1: limits_val,
_INPUT2: deltas_val})
@check_tf_min_version("1.14", "ragged needs tf 1.14")
@check_opset_min_version(11, "Range")
def test_ragged_range_scalar(self):
starts_val = np.array(0, dtype=np.int32)
limits_val = np.array([5, -1, -1, 2, 7, 100, 4, 5, 6], dtype=np.int32)
deltas_val = np.array(1, dtype=np.int32)
def func(starts, limits, deltas):
x = tf.ragged.range(starts, limits, deltas)
rt_nested_splits = tf.identity(x.row_splits, name=_TFOUTPUT)
rt_dense_values = tf.identity(x.flat_values, name=_TFOUTPUT1)
return rt_nested_splits, rt_dense_values
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: starts_val, _INPUT1: limits_val,
_INPUT2: deltas_val})
@check_tf_min_version("2.2", "ragged variant needs tf 2.2")
@check_opset_min_version(13, "Loop over tensor sequences")
def test_ragged_to_variant(self):
splits_val = np.array([0, 3, 3, 5, 9, 10], dtype=np.int32)
dense_vals_val = np.arange(10 * 3 * 2, dtype=np.float32).reshape([10, 3, 2])
def fn(elem):
res = elem + elem * elem
return res
def func(splits, rt_dense_values):
x = tf.RaggedTensor.from_nested_row_splits(rt_dense_values, [splits], validate=True)
y = tf.map_fn(fn, x)
return tf.identity(y.row_splits, name=_TFOUTPUT), tf.identity(y.flat_values, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: splits_val, _INPUT1: dense_vals_val})
@check_tf_min_version("2.2", "ragged variant needs tf 2.2")
@check_opset_min_version(13, "Loop over tensor sequences")
def test_ragged_to_variant_unknown_shape(self):
splits_val = np.array([0, 3, 3, 5, 9, 10], dtype=np.int64)
dense_vals_shape = np.array([10, 3, 2], dtype=np.int32)
splits_pads_val = np.array([[0, 0]], dtype=np.int32)
def fn(elem):
res = elem + elem * elem
return res
def func(splits, rt_dense_values_shape, splits_pads):
rt_dense_values = tf.ones(rt_dense_values_shape, dtype=tf.int32)
splits = tf.pad(splits, splits_pads)
x = tf.RaggedTensor.from_nested_row_splits(rt_dense_values, [splits], validate=True)
y = tf.map_fn(fn, x)
return tf.identity(y.row_splits, name=_TFOUTPUT), tf.identity(y.flat_values, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1],
{_INPUT: splits_val, _INPUT1: dense_vals_shape, _INPUT2: splits_pads_val})
@check_opset_min_version(9, "Compress")
def test_dynamic_partition_both_vector(self):
data_val = np.array([1, 2, 3, 4, 5, 6, 7, 8], dtype=np.float32)
part_val = np.array([0, 0, 1, 1, 0, 2, 1, 0], dtype=np.int32)
def func(data, partitions):
p1, p2, p3 = tf.dynamic_partition(data, partitions, num_partitions=3)
p1_ = tf.identity(p1, name=_TFOUTPUT)
p2_ = tf.identity(p2, name=_TFOUTPUT1)
p3_ = tf.identity(p3, name=_TFOUTPUT2)
return p1_, p2_, p3_
self._run_test_case(func, [_OUTPUT, _OUTPUT1, _OUTPUT2], {_INPUT: data_val, _INPUT1: part_val})
@check_opset_min_version(9, "Compress")
def test_dynamic_partition_data_tensor(self):
data_val = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]], dtype=np.float32)
part_val = np.array([0, 2, 1, 0, 1], dtype=np.int32)
def func(data, partitions):
p1, p2, p3 = tf.dynamic_partition(data, partitions, num_partitions=3)
p1_ = tf.identity(p1, name=_TFOUTPUT)
p2_ = tf.identity(p2, name=_TFOUTPUT1)
p3_ = tf.identity(p3, name=_TFOUTPUT2)
return p1_, p2_, p3_
self._run_test_case(func, [_OUTPUT, _OUTPUT1, _OUTPUT2], {_INPUT: data_val, _INPUT1: part_val})
@check_opset_min_version(11, "ScatterElements")
@unittest.skip("this test is failing for some opsets, disabled until fixed")
def test_dynamic_stitch_both_vector(self):
data_val = np.array([[5, 1, 3], [7, 2, 4]], dtype=np.float32)
indices_val = np.array([[0, 1, 4], [2, 3, 5]], dtype=np.int32)
def func(indices, data):
x = tf.dynamic_stitch(tf.unstack(indices), tf.unstack(data))
x_ = tf.identity(x, name=_TFOUTPUT)
return x_
self._run_test_case(func, [_OUTPUT], {_INPUT: indices_val, _INPUT1: data_val})
@check_opset_min_version(11, "ScatterElements")
def test_dynamic_stitch_data_tensor(self):
data_val = np.arange(2 * 3 * 2 * 4, dtype=np.float32).reshape((2, 3, 2, 4))
indices_val = np.array([[0, 1, 4], [2, 3, 5]], dtype=np.int32)
def func(indices, data):
x = tf.dynamic_stitch(tf.unstack(indices), tf.unstack(data))
x_ = tf.identity(x, name=_TFOUTPUT)
return x_
self._run_test_case(func, [_OUTPUT], {_INPUT: indices_val, _INPUT1: data_val})
@check_opset_min_version(10, "Conv2DBackpropInput")
def test_Conv2DBackpropInput_const(self):
input_sizes_val_ = np.array([1, 10, 10, 3], dtype=np.int32)
def func(filter_val, out_backprop_val):
input_sizes_val = tf.constant(input_sizes_val_, dtype=tf.int32)
return conv2d_backprop_input(input_sizes=input_sizes_val, filter=filter_val,
out_backprop=out_backprop_val, strides=[1, 1, 1, 1],
padding='SAME', name=_TFOUTPUT)
filters_val = np.random.randint(low=0, high=256, size=[3, 3, 3, 5]).astype(np.float32)
out_backprop_val = np.random.randint(low=0, high=256, size=[1, 10, 10, 5]).astype(np.float32)
self._run_test_case(func, [_OUTPUT], {_INPUT: filters_val, _INPUT1: out_backprop_val})
@check_opset_min_version(10, "Conv2DBackpropInput")
def test_Conv2DBackpropInput_const_strided(self):
input_sizes_val_ = np.array([1, 10, 10, 3], dtype=np.int32)
def func(filter_val, out_backprop_val):
input_sizes_val = tf.constant(input_sizes_val_, dtype=tf.int32)
return conv2d_backprop_input(input_sizes=input_sizes_val, filter=filter_val,
out_backprop=out_backprop_val, strides=[1, 2, 2, 1],
padding='SAME', name=_TFOUTPUT)
filters_val = np.random.randint(low=0, high=256, size=[3, 3, 3, 5]).astype(np.float32)
out_backprop_val = np.random.randint(low=0, high=256, size=[1, 5, 5, 5]).astype(np.float32)
self._run_test_case(func, [_OUTPUT], {_INPUT: filters_val, _INPUT1: out_backprop_val})
@check_tf_min_version("1.15", "tf.repeat needs tf 1.15")
@check_opset_min_version(10, "Conv2DBackpropInput")
def test_Conv2DBackpropInput_shape_implied(self):
batch_dim_val = np.array(1, dtype=np.int32)
def func(filter_val, out_backprop_val, batch_dim):
out_backprop_val = tf.repeat(out_backprop_val, batch_dim, axis=0)
s = tf.shape(out_backprop_val)
t1 = tf.constant([0], dtype=tf.int32)
t2 = tf.constant([1], dtype=tf.int32)
batch_dim = tf.strided_slice(s, t1, t2, shrink_axis_mask=1)
# Sometimes the size given is a stack of constants with unknown batch dim
input_sizes_val = tf.stack([batch_dim, 10, 10, 3])
return conv2d_backprop_input(input_sizes=input_sizes_val, filter=filter_val,
out_backprop=out_backprop_val, strides=[1, 2, 2, 1],
padding='SAME', name=_TFOUTPUT)
filters_val = np.random.randint(low=0, high=256, size=[3, 3, 3, 5]).astype(np.float32)
out_backprop_val = np.random.randint(low=0, high=256, size=[1, 5, 5, 5]).astype(np.float32)
def graph_validator(g):
for n in g.get_nodes():
if n.type == 'ConvTranspose':
return "pads" in n.attr or "output_shape" in n.attr
return False
self._run_test_case(func, [_OUTPUT], {_INPUT: filters_val, _INPUT1: out_backprop_val, _INPUT2: batch_dim_val},
graph_validator=graph_validator)
@check_opset_min_version(10, "Conv2DBackpropInput")
def test_Conv2DBackpropInput_const_valid(self):
input_sizes_val_ = np.array([1, 12, 12, 3], dtype=np.int32)
def func(filter_val, out_backprop_val):
input_sizes_val = tf.constant(input_sizes_val_, dtype=tf.int32)
return conv2d_backprop_input(input_sizes=input_sizes_val, filter=filter_val,
out_backprop=out_backprop_val, strides=[1, 1, 1, 1],
padding='VALID', name=_TFOUTPUT)
filters_val = np.random.randint(low=0, high=256, size=[3, 3, 3, 5]).astype(np.float32)
out_backprop_val = np.random.randint(low=0, high=256, size=[1, 10, 10, 5]).astype(np.float32)
self._run_test_case(func, [_OUTPUT], {_INPUT: filters_val, _INPUT1: out_backprop_val})
@check_opset_min_version(10, "Conv2DBackpropInput")
def test_Conv2DBackpropInput(self):
def func(input_sizes, filters, out_backprop):
return conv2d_backprop_input(input_sizes, filters, out_backprop, strides=[1, 1, 1, 1],
padding='SAME', name=_TFOUTPUT)
filters_val = np.random.randint(low=0, high=256, size=[3, 3, 3, 5]).astype(np.float32)
out_backprop_val = np.random.randint(low=0, high=256, size=[1, 10, 10, 5]).astype(np.float32)
input_sizes_val = np.array([1, 10, 10, 3], dtype=np.int32)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_sizes_val, _INPUT1: filters_val, _INPUT2: out_backprop_val})
@check_opset_min_version(10, "Conv2DBackpropInput")
def test_Conv2DBackpropInput_strided(self):
def func(input_sizes, filters, out_backprop):
return conv2d_backprop_input(input_sizes, filters, out_backprop, strides=[1, 2, 2, 1], padding='SAME',
name=_TFOUTPUT)
input_sizes_val = np.array([1, 10, 10, 3], dtype=np.int32)
filters_val = np.random.randint(low=0, high=256, size=[3, 3, 3, 5]).astype(np.float32)
out_backprop_val = np.random.randint(low=0, high=256, size=[1, 5, 5, 5]).astype(np.float32)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_sizes_val, _INPUT1: filters_val, _INPUT2: out_backprop_val})
@check_opset_min_version(10, "Conv2DBackpropInput")
def test_Conv2DBackpropInput_valid(self):
def func(input_sizes, filters, out_backprop):
return conv2d_backprop_input(input_sizes, filters, out_backprop, strides=[1, 1, 1, 1],
padding='VALID', name=_TFOUTPUT)
input_sizes_val = np.array([1, 12, 12, 3], dtype=np.int32)
filters_val = np.random.randint(low=0, high=256, size=[3, 3, 3, 5]).astype(np.float32)
out_backprop_val = np.random.randint(low=0, high=256, size=[1, 10, 10, 5]).astype(np.float32)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_sizes_val, _INPUT1: filters_val, _INPUT2: out_backprop_val})
@check_opset_min_version(12, "Conv2DBackpropInput with strided workaround")
def test_Conv2DBackpropInput_strided_same(self):
def func(input_sizes, filters, out_backprop):
return conv2d_backprop_input(input_sizes, filters, out_backprop, strides=[1, 5, 10, 1], padding='SAME',
name=_TFOUTPUT)
input_sizes_val = np.array([1, 10, 10, 3], dtype=np.int32)
filters_val = np.random.randint(low=0, high=256, size=[3, 3, 3, 5]).astype(np.float32)
out_backprop_val = np.random.randint(low=0, high=256, size=[1, 2, 1, 5]).astype(np.float32)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_sizes_val, _INPUT1: filters_val, _INPUT2: out_backprop_val})
@check_opset_min_version(10, "Conv3DBackpropInputV2")
def test_Conv3DBackpropInputV2_const(self):
output_shape_val_ = np.array([1, 10, 10, 10, 3], dtype=np.int32)
def func(value, filters):
output_shape_val = tf.constant(output_shape_val_, dtype=tf.int32)
return conv3d_transpose(value, filters, output_shape_val, strides=[1, 1, 1, 1, 1],
padding='SAME', data_format="NDHWC", name=_TFOUTPUT)
filters_val = np.random.randint(low=0, high=256, size=[3, 3, 3, 3, 5]).astype(np.float32)
value_val = np.random.randint(low=0, high=256, size=[1, 10, 10, 10, 5]).astype(np.float32)
self._run_test_case(func, [_OUTPUT], {_INPUT: value_val, _INPUT1: filters_val}, rtol=1e-6)
@check_opset_min_version(10, "Conv3DBackpropInputV2")
def test_Conv3DBackpropInputV2_const_strided(self):
output_shape_val_ = np.array([1, 10, 10, 10, 3], dtype=np.int32)
def func(value, filters):
output_shape_val = tf.constant(output_shape_val_, dtype=tf.int32)
return conv3d_transpose(value, filters, output_shape_val, strides=[1, 2, 2, 2, 1],
padding='SAME', data_format="NDHWC", name=_TFOUTPUT)
filters_val = np.random.randint(low=0, high=256, size=[3, 3, 3, 3, 5]).astype(np.float32)
value_val = np.random.randint(low=0, high=256, size=[1, 5, 5, 5, 5]).astype(np.float32)
self._run_test_case(func, [_OUTPUT], {_INPUT: value_val, _INPUT1: filters_val}, rtol=1e-6)
@check_opset_min_version(10, "Conv3DBackpropInputV2")
def test_Conv3DBackpropInputV2_const_valid(self):
output_shape_val_ = np.array([1, 12, 12, 12, 3], dtype=np.int32)
def func(value, filters):
output_shape_val = tf.constant(output_shape_val_, dtype=tf.int32)
return conv3d_transpose(value, filters, output_shape_val, strides=[1, 1, 1, 1, 1],
padding='VALID', data_format="NDHWC", name=_TFOUTPUT)
filters_val = np.random.randint(low=0, high=256, size=[3, 3, 3, 3, 5]).astype(np.float32)
value_val = np.random.randint(low=0, high=256, size=[1, 10, 10, 10, 5]).astype(np.float32)
self._run_test_case(func, [_OUTPUT], {_INPUT: value_val, _INPUT1: filters_val}, rtol=1e-6)
@check_opset_min_version(10, "Conv3DBackpropInputV2")
def test_Conv3DBackpropInputV2(self):
def func(value, filters, output_shape):
return conv3d_transpose(value, filters, output_shape, strides=[1, 1, 1, 1, 1],
padding='SAME', data_format="NDHWC", name=_TFOUTPUT)
filters_val = np.random.randint(low=0, high=256, size=[2, 3, 4, 4, 5]).astype(np.float32)
value_val = np.random.randint(low=0, high=256, size=[2, 7, 8, 9, 5]).astype(np.float32)
output_shape_val = np.array([2, 7, 8, 9, 4], dtype=np.int32)
self._run_test_case(func, [_OUTPUT], {_INPUT: value_val, _INPUT1: filters_val, _INPUT2: output_shape_val},
rtol=1e-6)
@check_opset_min_version(10, "Conv3DBackpropInputV2")
def test_Conv3DBackpropInputV2_strided(self):
def func(value, filters, output_shape):
return conv3d_transpose(value, filters, output_shape, strides=[1, 2, 2, 2, 1],
padding='SAME', data_format="NDHWC", name=_TFOUTPUT)
filters_val = np.random.randint(low=0, high=256, size=[3, 3, 3, 3, 5]).astype(np.float32)
value_val = np.random.randint(low=0, high=256, size=[1, 5, 5, 5, 5]).astype(np.float32)
output_shape_val = np.array([1, 10, 10, 10, 3], dtype=np.int32)
self._run_test_case(func, [_OUTPUT], {_INPUT: value_val, _INPUT1: filters_val, _INPUT2: output_shape_val},
rtol=1e-6)
@check_opset_min_version(10, "Conv3DBackpropInputV2")
def test_Conv3DBackpropInputV2_valid(self):
def func(value, filters, output_shape):
return conv3d_transpose(value, filters, output_shape, strides=[1, 1, 1, 1, 1],
padding='VALID', data_format="NDHWC", name=_TFOUTPUT)
filters_val = np.random.randint(low=0, high=256, size=[3, 3, 3, 3, 5]).astype(np.float32)
value_val = np.random.randint(low=0, high=256, size=[1, 10, 10, 10, 5]).astype(np.float32)
output_shape_val = np.array([1, 12, 12, 12, 3], dtype=np.int32)
self._run_test_case(func, [_OUTPUT], {_INPUT: value_val, _INPUT1: filters_val, _INPUT2: output_shape_val},
rtol=1e-6)
@check_opset_min_version(12, "Conv3DBackpropInputV2 with strided workaround")
def test_Conv3DBackpropInputV2_strided_same(self):
def func(value, filters, output_shape):
return conv3d_transpose(value, filters, output_shape, strides=[1, 10, 4, 3, 1],
padding='SAME', data_format="NDHWC", name=_TFOUTPUT)
filters_val = np.random.randint(low=1, high=256, size=[1, 1, 1, 1, 1]).astype(np.float32)
value_val = np.random.randint(low=1, high=256, size=[1, 3, 2, 5, 1]).astype(np.float32)
output_shape_val = np.array([1, 30, 8, 15, 1], dtype=np.int32)
self._run_test_case(func, [_OUTPUT], {_INPUT: value_val, _INPUT1: filters_val, _INPUT2: output_shape_val},
rtol=1e-6)
@check_opset_min_version(8, "CategoryMapper")
@skip_tfjs("TFJS does not initialize table")
@skip_onnx_checker("ONNX can't do type inference on CategoryMapper")
def test_hashtable_lookup(self):
filnm = "vocab.tmp"
words = ["apple", "pear", "banana", "cherry", "grape"]
query = np.array(['cherry'], dtype=object)
with open(filnm, "w") as f:
for word in words:
f.write(word + "\n")
def func(query_holder):
hash_table = lookup_ops.index_table_from_file(filnm)
lookup_results = hash_table.lookup(query_holder)
ret = tf.add(lookup_results, 0, name=_TFOUTPUT)
return ret
self._run_test_case(func, [_OUTPUT], {_INPUT: query}, as_session=True)
os.remove(filnm)
@check_opset_min_version(8, "CategoryMapper")
@skip_tfjs("TFJS does not initialize table")
@skip_onnx_checker("ONNX can't do type inference on CategoryMapper")
def test_hashtable_lookup_invert(self):
filnm = "vocab.tmp"
words = ["apple", "pear", "banana", "cherry", "grape"]
query = np.array([3], dtype=np.int64)
with open(filnm, "w") as f:
for word in words:
f.write(word + "\n")
def func(query_holder):
hash_table = lookup_ops.index_to_string_table_from_file(filnm)
lookup_results = hash_table.lookup(query_holder)
ret = tf.identity(lookup_results, name=_TFOUTPUT)
return ret
self._run_test_case(func, [_OUTPUT], {_INPUT: query}, as_session=True)
os.remove(filnm)
@check_opset_min_version(8, "CategoryMapper")
@skip_tfjs("TFJS does not initialize table")
def test_hashtable_lookup_const(self):
filnm = "vocab.tmp"
words = ["apple", "pear", "banana", "cherry ♥", "grape"]
query_val = np.array(['cherry ♥', 'banana'], dtype=object).reshape((1, 2, 1))
with open(filnm, "w", encoding='UTF-8') as f:
for word in words:
f.write(word + "\n")
def func():
hash_table = lookup_ops.index_table_from_file(filnm)
query = tf.constant(query_val)
lookup_results = hash_table.lookup(query)
ret = tf.add(lookup_results, 0, name=_TFOUTPUT)
return ret
self._run_test_case(func, [_OUTPUT], {}, as_session=True)
os.remove(filnm)
@check_opset_min_version(8, "CategoryMapper")
@skip_tfjs("TFJS does not initialize table")
def test_hashtable_lookup_invert_const(self):
filnm = "vocab.tmp"
words = ["apple", "pear", "banana", "cherry", "grape"]
query_val = np.array([3, 2], dtype=np.int64).reshape((1, 2, 1))
with open(filnm, "w", encoding='UTF-8') as f:
for word in words:
f.write(word + "\n")
def func():
hash_table = lookup_ops.index_to_string_table_from_file(filnm)
query = tf.constant(query_val)
lookup_results = hash_table.lookup(query)
ret = tf.identity(lookup_results, name=_TFOUTPUT)
return ret
self._run_test_case(func, [_OUTPUT], {}, as_session=True)
os.remove(filnm)
@skip_tfjs("TFJS does not initialize table")
def test_hashtable_size(self):
filnm = "vocab.tmp"
words = ["apple", "pear", "banana", "cherry", "grape"]
query = np.array(['cherry'], dtype=object)
with open(filnm, "w") as f:
for word in words:
f.write(word + "\n")
def func(query_holder):
hash_table = lookup_ops.index_table_from_file(filnm)
lookup_size = hash_table.size()
ret = tf.add(lookup_size, 0, name=_TFOUTPUT)
return ret
self._run_test_case(func, [_OUTPUT], {_INPUT: query}, as_session=True)
os.remove(filnm)
@check_opset_min_version(11)
@skip_onnx_checker("Fails. Fix later.")
def test_matrix_diag_part(self):
input_vals = [
np.array([[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15], [16, 17, 18, 19, 20]]], dtype=np.int64),
np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]], dtype=np.int64),
np.array([[[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]],
[[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]]], dtype=np.int64)]
def func(input_holder):
return matrix_diag_part(input_holder, name=_TFOUTPUT)
for input_val in input_vals:
self._run_test_case(func, [_OUTPUT], {_INPUT: input_val})
@check_opset_min_version(8)
def test_broadcast(self):
input_tensor_val = np.random.randint(low=0, high=256, size=[2, 3]).astype(np.float32)
new_shape_val = np.array([3, 2, 3]).astype(np.int64)
def func(input_tensor, new_shape):
return tf.broadcast_to(input_tensor, new_shape, _TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_tensor_val, _INPUT1: new_shape_val})
def test_bfloat(self):
x_val = np.array([0, 1, 2], dtype=np.float32)
y_val = np.array([3, 4, 5], dtype=np.float32)
def func(x, y):
x_ = tf.cast(x, tf.bfloat16)
y_ = tf.cast(y, tf.bfloat16)
s_ = tf.add(x_, y_)
return tf.cast(s_, tf.float32, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(11)
@check_tf_min_version("2.2")
@skip_onnx_checker("Fails. Fix later.")
def test_matrix_diag_part_v3(self):
def func(X, K):
v2 = tf.raw_ops.MatrixDiagPartV2(input=X, k=K, padding_value=0.123, name=_TFOUTPUT)
v3 = tf.raw_ops.MatrixDiagPartV3(input=X, k=K, padding_value=0.123, align='LEFT_RIGHT', name=_TFOUTPUT1)
return v2, v3
for x_shape in ([4, 5], [2, 3, 4, 5], [5, 4], [7, 5]):
x_val = np.random.random(x_shape).astype(np.float32)
for raw_k in ([0], [1], [3], [-1], [-3], [1, 2], [-2, -1], [-1, 1]):
k_val = np.array(raw_k).astype(np.int32)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: x_val, _INPUT1: k_val})
@test_ms_domain()
def test_inverse(self, extra_opset):
# this depends on onnx Inverse which was removed from opset-12 but does exists in the ms-domain
x_val = np.random.random([5, 5]).astype(np.float32)
def func(x):
return tf.linalg.inv(x, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, process_args={"extra_opset": [extra_opset]})
@check_opset_min_version(12)
def test_squared_distance(self):
x_val = np.random.random([4, 5]).astype(np.float32)
y_val = np.random.random([4, 5]).astype(np.float32)
def func(x, y):
return tf.math.squared_difference(x, y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(12)
@check_tf_min_version("2.1")
def test_einsum(self):
x_val = np.random.random([10]).astype(np.float32)
y_val = np.random.random([10]).astype(np.float32)
def func(x, y):
ret = tf.einsum("i,j->ij", x, y)
return tf.identity(ret, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(12)
@check_tf_min_version("2.1")
def test_einsum_to_matmul(self):
x_val = np.random.random([4, 10, 20]).astype(np.float32)
y_val = np.random.random([20, 30]).astype(np.float32)
def func(x, y):
ret = tf.einsum("bik,kj->bij", x, y)
return tf.identity(ret, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(12)
@check_tf_min_version("2.1")
def test_einsum_to_matmul_transpose(self):
x_val = np.random.random([4, 10, 20]).astype(np.float32)
y_val = np.random.random([30, 20]).astype(np.float32)
def func(x, y):
ret = tf.einsum("bik,jk->bij", x, y)
return tf.identity(ret, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(7)
def test_compare(self):
x_val = np.random.random([10, 20]).astype(np.float32)
y_val = np.random.random([10, 20]).astype(np.float32)
def func(x, y):
return tf.math.less_equal(x, y, name=_TFOUTPUT), \
tf.math.greater_equal(x, y, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: x_val, _INPUT1: y_val})
@check_tf_min_version("1.14", "required for tf.math.is_finite")
@check_opset_min_version(10)
def test_is_finite(self):
x_val = np.array([5.0, 4.8, 6.8, np.inf, np.nan], dtype=np.float32)
def func(x):
y = tf.math.is_finite(x)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(12)
@check_tf_min_version("2.2")
def test_matrix_diag_v3_multi_dim(self):
raw_diag = [[[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0],
[7.0, 8.0, 9.0]],
[[10.0, 11.0, 12.0],
[13.0, 14.0, 15.0],
[16.0, 17.0, 18.0]]]
diag_val = np.array(raw_diag).astype(np.float32)
k_val = np.array([-1, 1]).astype(np.int32)
row_val = np.array(-1).astype(np.int32)
col_val = np.array(-1).astype(np.int32)
def func(diag, k, row, col):
return tf.raw_ops.MatrixDiagV3(diagonal=diag, k=k, num_rows=row, num_cols=col,
padding_value=0.123, align='RIGHT_RIGHT', name=_TFOUTPUT), \
tf.raw_ops.MatrixDiagV2(diagonal=diag, k=k, num_rows=row, num_cols=col,
padding_value=0.123, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: diag_val, _INPUT1: k_val,
_INPUT2: row_val, _INPUT3: col_val})
@check_opset_min_version(12)
@check_tf_min_version("2.2")
def test_matrix_diag_v3_multi_dim_min_row(self):
raw_diag = [[[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0]],
[[7.0, 8.0, 9.0],
[10.0, 11.0, 12.0]]]
diag_val = np.array(raw_diag).astype(np.float32)
k_val = np.array([2, 3]).astype(np.int32)
row_val = np.array(-1).astype(np.int32)
col_val = np.array(6).astype(np.int32)
def func(diag, k, row, col):
return tf.raw_ops.MatrixDiagV3(diagonal=diag, k=k, num_rows=row, num_cols=col,
padding_value=0.456, align='LEFT_LEFT', name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: diag_val, _INPUT1: k_val,
_INPUT2: row_val, _INPUT3: col_val})
@check_opset_min_version(12)
@check_tf_min_version("2.2")
def test_matrix_diag_v3_single_dim_min_col(self):
raw_diag = [1.0, 2.0, 3.0]
diag_val = np.array(raw_diag).astype(np.float32)
k_val = np.array(-1).astype(np.int32)
row_val = np.array(5).astype(np.int32)
col_val = np.array(-1).astype(np.int32)
def func(diag, k, row, col):
return tf.raw_ops.MatrixDiagV3(diagonal=diag, k=k, num_rows=row, num_cols=col,
padding_value=0.789, align='LEFT_RIGHT', name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: diag_val, _INPUT1: k_val,
_INPUT2: row_val, _INPUT3: col_val})
@check_opset_min_version(12)
@check_tf_min_version("2.2")
def test_matrix_diag_v3_2single_dim_row_col(self):
raw_diag = [[1, 2, 3], [4, 5, 6]]
diag_val = np.array(raw_diag).astype(np.int64)
k_val = np.array(0).astype(np.int32)
row_val = np.array(3).astype(np.int32)
col_val = np.array(4).astype(np.int32)
def func(diag, k, row, col):
return tf.raw_ops.MatrixDiagV3(diagonal=diag, k=k, num_rows=row, num_cols=col,
padding_value=7, align='LEFT_RIGHT', name=_TFOUTPUT), \
tf.raw_ops.MatrixDiag(diagonal=diag, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1],
{_INPUT: diag_val, _INPUT1: k_val,
_INPUT2: row_val, _INPUT3: col_val})
@check_opset_min_version(12)
@check_tf_min_version("2.2")
def test_matrix_diag_v3_1single_dim_row_col(self):
raw_diag = [1, 2, 3, 4, 5]
diag_val = np.array(raw_diag).astype(np.int64)
k_val = np.array(0).astype(np.int32)
row_val = np.array(5).astype(np.int32)
col_val = np.array(10).astype(np.int32)
def func(diag, k, row, col):
return tf.raw_ops.MatrixDiagV3(diagonal=diag, k=k, num_rows=row, num_cols=col,
padding_value=7, align='LEFT_RIGHT', name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: diag_val, _INPUT1: k_val,
_INPUT2: row_val, _INPUT3: col_val})
@check_opset_min_version(12)
@check_tf_min_version("2.2")
@skip_onnx_checker("Checker fails. Fix later.")
def test_matrix_set_diag_v3(self):
input_val = np.array([[[7, 7, 7, 7],
[7, 7, 7, 7],
[7, 7, 7, 7]],
[[7, 7, 7, 7],
[7, 7, 7, 7],
[7, 7, 7, 7]]]).astype(np.int64)
diag_val = np.array([[1, 2, 3],
[4, 5, 6]]).astype(np.int64)
k_val = np.array([0]).astype(np.int32)
def func(base_matrix, diag, k):
return tf.raw_ops.MatrixSetDiagV3(input=base_matrix, diagonal=diag, k=k, align='RIGHT_LEFT', name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_val, _INPUT1: diag_val, _INPUT2: k_val})
@check_opset_min_version(10)
@check_tf_min_version("1.14")
@skip_tfjs("TFJS executes model incorrectly")
def test_fakequant_with_min_max(self):
def func(x):
ret = fake_quant_with_min_max_args(
x, min=-1024, max=1023, num_bits=8, narrow_range=False, name=None)
return tf.identity(ret, name=_TFOUTPUT)
x_val = np.random.random(size=[4, 3]).astype(np.float32) * 2048. - 1024.
x_val0 = np.abs(x_val)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val0}, rtol=1e-6, atol=1e-4)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-6, atol=1e-4)
x_val = np.random.random(size=[4, 3]).astype(np.float32) * 2048. - 1024.
x_val[0, 0] = -1024
x_val[0, 1] = -1023
x_val[0, 2] = 1024
x_val[1, 0] = 1023
x_val[1, 1] = 1025
x_val[1, 2] = -1025
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-6, atol=1e-4)
@check_opset_min_version(10)
@check_tf_min_version("1.14")
def test_fakequant_with_min_max_same_sign(self):
def func_neg(x):
ret = fake_quant_with_min_max_args(
x, min=-1024*3, max=-1024, num_bits=8, narrow_range=False, name=None)
return tf.identity(ret, name=_TFOUTPUT)
x_val = np.random.random(size=[4, 3]).astype(np.float32) * 2048. - 1024 * 3.
try:
self._run_test_case(func_neg, [_OUTPUT], {_INPUT: x_val}, rtol=1e-6, atol=1e-4)
except ValueError:
pass
@check_opset_min_version(10)
@check_tf_min_version("1.14")
@skip_tfjs("Results differ slightly in TFJS")
def test_fakequant_with_min_max_vars(self):
def func(x):
ret = fake_quant_with_min_max_vars(
x, min=-1024, max=1023, num_bits=8, narrow_range=False, name=None)
return tf.identity(ret, name=_TFOUTPUT)
x_val = np.random.random(size=[4, 3]).astype(np.float32) * 2048. - 1024.
x_val0 = np.abs(x_val)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val0}, rtol=1e-6, atol=1e-4)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-6, atol=1e-4)
x_val = np.random.random(size=[4, 3]).astype(np.float32) * 2048. - 1024.
x_val[0, 0] = -1024
x_val[0, 1] = -1023
x_val[0, 2] = 1024
x_val[1, 0] = 1023
x_val[1, 1] = 1025
x_val[1, 2] = -1025
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-6, atol=1e-4)
@check_opset_min_version(9, "atan2")
def test_atan2(self):
# Test all possible pairs of pos, neg, zero for x and y.
def atan2(y, x):
sx = np.sign(x)
sy = np.sign(y)
pi_part = (sy + sx * (sy ** 2 - 1)) * (sx - 1) * (-np.pi/2)
atan_part = np.arctan(y / (x + (1 - sx ** 2))) * sx ** 2
return atan_part + pi_part
test_pairs = [[y, x] for x in [3., -4., 0.] for y in [5., -6., 0.]]
y_val = np.array([y for y, x in test_pairs], dtype=np.float32)
x_val = np.array([x for y, x in test_pairs], dtype=np.float32)
assert_almost_equal(np.arctan2(y_val, x_val), atan2(y_val, x_val))
def func(y, x):
atan2_ = tf.math.atan2(y, x)
return tf.identity(atan2_, name=_TFOUTPUT)
self._run_test_case(
func, [_OUTPUT], {_INPUT: y_val, _INPUT2: x_val}, rtol=1e-06)
def _conv_kernel_as_input_test(self, x_val, w_val, strides=None,
padding="VALID", dilations=None, rtol=1e-07):
if strides is None:
strides = _STRIDE1x1
if dilations is None:
dilations = _DILATIONS1x1
def func(x, kernel):
conv = tf.nn.conv2d(x, kernel, strides=strides, padding=padding,
dilations=dilations)
return tf.identity(conv, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT2: w_val}, rtol=rtol)
def test_conv2d_1_kernel_as_input(self):
x_val = make_xval((1, 1, 5, 5)).transpose(NCHW_TO_NHWC)
w_val = np.array([[2., 1., 1.],
[1., 3., 1.],
[1., 1., 4.]], dtype=np.float32).reshape(_KERNEL3x3)
self._conv_kernel_as_input_test(x_val, w_val)
def test_equal_with_different_parameters(self):
input_val = np.array([5], dtype=np.int32)
def func(input_val):
tensor = tf.zeros(input_val)
input_size = tf.size(tensor)
constant = tf.constant(3, dtype=tf.int32)
return tf.math.equal(input_size, constant, name="output")
feed_dict = {"input:0": input_val}
input_names_with_port = ["input:0"]
output_names_with_port = ["output:0"]
current_opset = self.config.opset
self.config.opset = 12
try:
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port)
finally:
self.config.opset = current_opset
@check_tf_min_version("1.14")
@skip_tf_versions("2.4", "Fails to run on TF 2.4.x")
@skip_tfjs("Fails to run tfjs model")
def test_rfft_ops(self):
def dft_slow(x, M, fft_length):
xt = x[:, :fft_length].T
size = fft_length // 2 + 1
res = np.dot(M[:, :, :fft_length], xt)[:, :size, :]
return np.transpose(res, (0, 2, 1))
x_val = make_xval([2, 4]).astype(np.float32)
M_both = make_dft_constant(x_val.shape[1], x_val.dtype, x_val.shape[1])
fft = dft_slow(x_val, M_both, x_val.shape[1])
fft_npy = np.fft.rfft(x_val)
assert_almost_equal(fft[0, :, :], np.real(fft_npy))
assert_almost_equal(fft[1, :, :], np.imag(fft_npy))
x_val = make_xval([2, 4]).astype(np.float32)
M_both = make_dft_constant(x_val.shape[1], x_val.dtype, x_val.shape[1]-1)
fft = dft_slow(x_val, M_both, x_val.shape[1]-1)
fft_npy = np.fft.rfft(x_val, x_val.shape[1]-1)
assert_almost_equal(fft[0, :, :], np.real(fft_npy))
assert_almost_equal(fft[1, :, :], np.imag(fft_npy))
x_val = make_xval([3, 4]).astype(np.float32)
def func1(x):
op_ = tf.signal.rfft(x)
return tf.abs(op_, name=_TFOUTPUT)
self._run_test_case(func1, [_OUTPUT], {_INPUT: x_val})
def func2(x):
op_ = tf.signal.rfft(x)
return tf.cos(op_, name=_TFOUTPUT)
with self.assertRaises(ValueError):
self._run_test_case(func2, [_OUTPUT], {_INPUT: x_val})
def func3(x):
op_ = tf.signal.rfft(x)
return tf.identity(op_, name=_TFOUTPUT)
with self.assertRaises(ValueError):
self._run_test_case(func3, [_OUTPUT], {_INPUT: x_val})
@check_tf_min_version("1.14")
@skip_tf_versions("2.4", "Fails to run on TF 2.4.x")
@skip_tfjs("TFJS executes rfft with poor accuracy")
@check_opset_min_version(10, "Slice")
def test_rfft_ops_fft_length(self):
x_val = make_xval([3, 9]).astype(np.float32)
def func1_length(x):
op_ = tf.signal.rfft(x, np.array([8], dtype=np.int32))
return tf.abs(op_, name=_TFOUTPUT)
self._run_test_case(func1_length, [_OUTPUT], {_INPUT: x_val})
@check_tf_min_version("1.14")
@skip_tf_versions("2.4", "Fails to run on TF 2.4.x")
@skip_tfjs("TFJS executes rfft with poor accuracy")
@check_opset_min_version(10, "Slice")
def test_rfft_ops_fft_length_many(self):
for i in range(4, 7):
for j in range(4, 7):
for m in range(0, 3):
with self.subTest(shape=(i, j), fft_length=j-m):
x_val = make_xval([i, j]).astype(np.float32)
def func1_length(x):
op_ = tf.signal.rfft(x, np.array([j-m], dtype=np.int32))
return tf.abs(op_, name=_TFOUTPUT)
self._run_test_case(func1_length, [_OUTPUT], {_INPUT: x_val})
@check_tf_min_version("1.14")
@skip_tf_versions("2.4", "Fails to run on TF 2.4.x")
@check_opset_min_version(10, "Slice")
def test_rfft_ops_fft_length_many_bigger(self):
for i in range(4, 7):
for j in range(4, 7):
for m in range(0, 3):
with self.subTest(shape=(i, j), fft_length=j+m):
x_val = make_xval([i, j]).astype(np.float32) / 10
def func1_length(x):
op_ = tf.signal.rfft(x, np.array([j+m], dtype=np.int32))
return tf.abs(op_, name=_TFOUTPUT)
self._run_test_case(func1_length, [_OUTPUT], {_INPUT: x_val})
@check_tf_min_version("1.14")
@skip_tflite("Slight accuracy issues with some shapes")
@skip_tfjs("TFJS executes rfft with poor accuracy")
@check_opset_min_version(10, "Slice")
def test_rfft_ops_fft_length_many_larger(self):
for i in range(4, 7):
for j in range(4, 7):
for m in range(-3, 3):
with self.subTest(shape=(3, i, j), fft_length=j+m):
x_val = make_xval([3, i, j]).astype(np.float32) / 10
def func1_length(x):
op_ = tf.signal.rfft(x, np.array([j+m], dtype=np.int32))
return tf.abs(op_, name=_TFOUTPUT)
self._run_test_case(func1_length, [_OUTPUT], {_INPUT: x_val}, optimize=False)
self._run_test_case(func1_length, [_OUTPUT], {_INPUT: x_val})
@check_tf_min_version("1.14")
@check_opset_min_version(11, "CumSum")
def test_rfft2d_ops(self):
x_val = make_xval([3, 4]).astype(np.float32)
def func1(x):
op_ = tf.signal.rfft2d(x)
return tf.abs(op_, name=_TFOUTPUT)
self._run_test_case(func1, [_OUTPUT], {_INPUT: x_val}, optimize=False)
self._run_test_case(func1, [_OUTPUT], {_INPUT: x_val})
def func2(x):
op_ = tf.signal.rfft2d(x)
return tf.cos(op_, name=_TFOUTPUT)
with self.assertRaises(ValueError):
self._run_test_case(func2, [_OUTPUT], {_INPUT: x_val})
def func3(x):
op_ = tf.signal.rfft2d(x)
return tf.identity(op_, name=_TFOUTPUT)
with self.assertRaises(ValueError):
self._run_test_case(func3, [_OUTPUT], {_INPUT: x_val})
@check_tf_min_version("1.14")
@check_opset_min_version(11, "CumSum")
def test_rfft2d_ops_fft_length(self):
x_val = make_xval([3, 4]).astype(np.float32)
def func1_length(x):
op_ = tf.signal.rfft2d(x, np.array([3, 3], dtype=np.int32))
return tf.abs(op_, name=_TFOUTPUT)
with self.subTest(optimize=False):
self._run_test_case(func1_length, [_OUTPUT], {_INPUT: x_val}, optimize=False)
with self.subTest(optimize=True):
self._run_test_case(func1_length, [_OUTPUT], {_INPUT: x_val})
@check_tf_min_version("1.14")
@check_opset_min_version(11, "CumSum")
def test_rfft2d_ops_fft_length_many(self):
for i in range(7, 4, -1):
for j in range(7, 4, -1):
for m in range(0, 3):
for n in range(0, 3):
for opt in [False, True]:
with self.subTest(shape=(i, j), fft_length=(m, n), optimize=opt):
x_val = make_xval([i, j]).astype(np.float32) / 100
def func1_length(x):
op_ = tf.signal.rfft2d(x, np.array([i-m, j-n], dtype=np.int32))
return tf.abs(op_, name=_TFOUTPUT)
self._run_test_case(func1_length, [_OUTPUT], {_INPUT: x_val}, optimize=opt)
@check_tf_min_version("1.14")
@check_opset_min_version(11, "CumSum")
@unittest.skipIf(True, reason="Not fully implemented for dynamic shape.")
def test_fft_ops(self):
x_val = make_xval([3, 4]).astype(np.float32)
def func1(x):
xc = tf.cast(x, tf.complex64)
op_ = tf.signal.fft(xc)
return tf.abs(op_, name=_TFOUTPUT)
self._run_test_case(func1, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(11, "topk")
def test_invert_permutation(self):
def func(x):
op_ = tf.math.invert_permutation(x)
return tf.identity(op_, name=_TFOUTPUT)
x_val = np.array([0, 1, 2, 3], dtype=np.int64)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
x_val = np.array([1, 5, 2, 0, 3, 4], dtype=np.int64)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_tf_min_version("1.14")
@check_opset_min_version(11, "CumSum")
def test_rfft2d_ops_specific_dimension(self):
x_val = make_xval([3, 1, 4]).astype(np.float32)
def func1(x):
op_ = tf.signal.rfft2d(x, np.array([1, 4], dtype=np.int32))
return tf.abs(op_, name=_TFOUTPUT)
with self.subTest(shape=(3, 1, 4), fft_length=(1, 4), optimize=False):
self._run_test_case(func1, [_OUTPUT], {_INPUT: x_val}, optimize=False)
with self.subTest(shape=(3, 1, 4), fft_length=(1, 4), optimize=True):
self._run_test_case(func1, [_OUTPUT], {_INPUT: x_val})
for shape in [(3, 1, 4), (5, 7), (3, 5, 7), (7, 5)]:
for fft_length in [shape[-2:], (1, shape[-1]),
(min(2, shape[-2]), shape[-1]),
(shape[-2], 2),
(min(3, shape[-2]), min(4, shape[-2]))]:
if fft_length == (1, 1):
# The code fails in this case but that's unlikely to happen.
continue
for optimize in [False, True]:
with self.subTest(shape=shape, fft_length=fft_length, optimize=optimize):
x_val = make_xval(list(shape)).astype(np.float32)
x_val /= x_val.size
def func1(x):
op_ = tf.signal.rfft2d(x, np.array(fft_length, dtype=np.int32))
return tf.abs(op_, name=_TFOUTPUT)
self._run_test_case(func1, [_OUTPUT], {_INPUT: x_val}, optimize=optimize)
@check_tf_min_version("2.1")
@skip_tflite("TFlite errors on some attributes")
@check_opset_min_version(9, "string")
def test_asstring(self):
def func(x):
op_ = tf.strings.as_string(x)
return tf.identity(op_, name=_TFOUTPUT)
x_val = np.array([0, 1, 2, 3], dtype=np.int32)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
x_val = np.array([0, 1, 2, 3], dtype=np.float32)
# can't check the values because in onnx they are padded with 0, in tf they are not
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, check_value=False)
@check_tf_min_version("2.1")
@skip_tflite("TFlite errors on some attributes")
@check_opset_min_version(9, "string")
def test_string_to_number(self):
def func(x):
op_ = tf.strings.to_number(x)
return tf.identity(op_, name=_TFOUTPUT)
# tf gets this wrong and returns fp32 instead of int
x_val = np.array("123", dtype=object)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
x_val = np.array("123.1", dtype=object)
# can't check the values because in onnx they are padded with 0, in tf they are not
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, check_value=False)
@check_tf_min_version("2.5")
@check_opset_min_version(14, "hardswish")
@skip_tfjs("not supported in tfjs")
def test_hardswish(self):
def func(x):
# there is no hardswich in tf but toco will optimize to it
op_ = x * tf.nn.relu6(x + np.float32(3)) * np.float32(1. / 6.)
return tf.identity(op_, name=_TFOUTPUT)
# tf gets this wrong and returns fp32 instead of int
x_val = np.array([0.5, 1.0, -0.5, -1.0], dtype=np.float32).reshape((2, 2))
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_tfjs("not supported in tfjs")
def test_l2normalization(self):
def func(x):
op_ = tf.math.l2_normalize(x)
return tf.identity(op_, name=_TFOUTPUT)
x_val = make_xval([3, 4])
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(10, "Slice")
def test_addition_two_newaxis_simultaneously(self):
def func(x):
op = x[..., tf.newaxis, tf.newaxis]
return tf.identity(op, name=_TFOUTPUT)
x_val = make_xval([2, 3])
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(10, "Slice")
def test_addition_three_newaxis_simultaneously(self):
def func(x):
op = x[..., tf.newaxis, tf.newaxis, tf.newaxis]
return tf.identity(op, name=_TFOUTPUT)
x_val = make_xval([2, 3])
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(11, "Pad")
def test_conv_unknown_kernel_channels(self):
x_shape = [2, 10, 3]
x_val = make_xval(x_shape)
kernel_shape = [4, 3, 5]
kernel_val = make_xval(kernel_shape)
pad_val = np.array([[0, 0], [0, 0], [0, 0]], np.int64)
def func(x, kernel, pad):
# Make kernel dimensions unknown
kernel = tf.pad(kernel, pad)
conv = tf.nn.conv1d(x, kernel, stride=[1], padding='VALID')
return tf.identity(conv, name='output')
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: kernel_val, _INPUT2: pad_val})
@check_tf_min_version("2.3.0")
@check_opset_min_version(16, "ScatterND")
@skip_tfjs("not supported in tfjs")
def test_tensor_scatter_max(self):
def func(tensor, indices, updates):
op = tf.tensor_scatter_nd_max(tensor, indices, updates)
return tf.identity(op, name=_TFOUTPUT)
tensor_val = make_xval([3, 4, 5])
indices_val = np.array([[2, 3], [0, 1]], np.int32)
indices64_val = indices_val.astype(np.int64)
updates_val = make_xval([2, 5]) + 3
self._run_test_case(func, [_OUTPUT], {_INPUT: tensor_val, _INPUT1: indices_val, _INPUT2: updates_val})
self._run_test_case(func, [_OUTPUT], {_INPUT: tensor_val, _INPUT1: indices64_val, _INPUT2: updates_val})
@check_tf_min_version("2.3.0")
@check_opset_min_version(16, "ScatterND")
@skip_tfjs("not supported in tfjs")
def test_tensor_scatter_min(self):
def func(tensor, indices, updates):
op = tf.tensor_scatter_nd_min(tensor, indices, updates)
return tf.identity(op, name=_TFOUTPUT)
tensor_val = make_xval([3, 4, 5])
indices_val = np.array([[2, 3], [0, 1]], np.int32)
indices64_val = indices_val.astype(np.int64)
updates_val = make_xval([2, 5]) + 3
self._run_test_case(func, [_OUTPUT], {_INPUT: tensor_val, _INPUT1: indices_val, _INPUT2: updates_val})
self._run_test_case(func, [_OUTPUT], {_INPUT: tensor_val, _INPUT1: indices64_val, _INPUT2: updates_val})
@check_tf_min_version("1.12.1")
@check_opset_min_version(16, "ScatterND")
@skip_tfjs("not supported in tfjs")
def test_tensor_scatter_sub(self):
def func(tensor, indices, updates):
op = tf.tensor_scatter_nd_sub(tensor, indices, updates)
return tf.identity(op, name=_TFOUTPUT)
tensor_val = make_xval([3, 4, 5])
indices_val = np.array([[2, 3], [0, 1]], np.int32)
indices64_val = indices_val.astype(np.int64)
updates_val = make_xval([2, 5]) + 3
self._run_test_case(func, [_OUTPUT], {_INPUT: tensor_val, _INPUT1: indices_val, _INPUT2: updates_val})
self._run_test_case(func, [_OUTPUT], {_INPUT: tensor_val, _INPUT1: indices64_val, _INPUT2: updates_val})
if __name__ == '__main__':
unittest_main()
| 305,157 | 47.530216 | 120 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/test_gru.py | # SPDX-License-Identifier: Apache-2.0
"""Unit Tests for gru."""
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope
from backend_test_base import Tf2OnnxBackendTestBase
from common import * # pylint: disable=wildcard-import,unused-wildcard-import
from tf2onnx.tf_loader import is_tf2
# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test,cell-var-from-loop
# names for input and outputs for tests
_TFINPUT = "input"
_INPUT = "input:0"
_TFINPUT1 = "input1"
_INPUT1 = "input1:0"
_TFINPUT2 = "input2"
_INPUT2 = "input2:0"
_TFINPUT3 = "input3"
_INPUT3 = "input3:0"
_TFOUTPUT = "output"
_OUTPUT = "output:0"
_TFOUTPUT1 = "output1"
_OUTPUT1 = "output1:0"
_TFOUTPUT2 = "output2"
_OUTPUT2 = "output2:0"
_TFOUTPUT3 = "output3"
_OUTPUT3 = "output3:0"
if is_tf2():
# There is no LSTMBlockCell in tf-2.x
BasicLSTMCell = tf.compat.v1.nn.rnn_cell.BasicLSTMCell
LSTMCell = tf.compat.v1.nn.rnn_cell.LSTMCell
GRUCell = tf.compat.v1.nn.rnn_cell.GRUCell
MultiRNNCell = tf.compat.v1.nn.rnn_cell.MultiRNNCell
dynamic_rnn = tf.compat.v1.nn.dynamic_rnn
bidirectional_dynamic_rnn = tf.compat.v1.nn.bidirectional_dynamic_rnn
else:
BasicLSTMCell = tf.contrib.rnn.BasicLSTMCell
LSTMCell = tf.contrib.rnn.LSTMCell
GRUCell = tf.contrib.rnn.GRUCell
LSTMBlockCell = tf.contrib.rnn.LSTMBlockCell
MultiRNNCell = tf.contrib.rnn.MultiRNNCell
dynamic_rnn = tf.nn.dynamic_rnn
bidirectional_dynamic_rnn = tf.nn.bidirectional_dynamic_rnn
# TODO: as a workaround, set batch_size to 1 for now to bypass a onnxruntime bug, revert it when the bug is fixed
class GRUTests(Tf2OnnxBackendTestBase):
def run_test_case(self, *args, graph_validator=None, **kwargs): #pylint: disable=arguments-differ
# TF GRU has an unknown dim
tmp = self.config.allow_missing_shapes
self.config.allow_missing_shapes = True
def new_graph_validator(g):
good = True
if graph_validator is not None:
good = good and graph_validator(g)
if is_tf2() and ':' in g.outputs[0]:
# Only check for tf2 and tfjs, not tflite
good = good and check_op_count(g, "Loop", 0, disabled=False)
good = good and check_op_count(g, "Scan", 0, disabled=False)
return good
try:
super().run_test_case(*args, graph_validator=new_graph_validator, **kwargs)
finally:
self.config.allow_missing_shapes = tmp
@check_opset_after_tf_version("1.15", 8, "might need Scan")
def test_single_dynamic_gru(self):
units = 5
batch_size = 1
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
# no scope
cell = GRUCell(
units,
activation=None,
kernel_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=42),
bias_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=43))
outputs, cell_state = dynamic_rnn(
cell,
x,
dtype=tf.float32)
return tf.identity(outputs, name="output"), tf.identity(cell_state, name="cell_state")
input_names_with_port = ["input_1:0"]
feed_dict = {"input_1:0": x_val}
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-03, atol=1e-06,
graph_validator=lambda g: check_gru_count(g, 1))
@check_opset_after_tf_version("1.15", 8, "might need Scan")
def test_multiple_dynamic_gru(self):
units = 5
batch_size = 1
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
gru_output_list = []
gru_cell_state_list = []
# no scope
cell = GRUCell(
units,
activation=None,
kernel_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=42),
bias_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=43))
outputs, cell_state = dynamic_rnn(
cell,
x,
dtype=tf.float32)
gru_output_list.append(outputs)
gru_cell_state_list.append(cell_state)
# given scope
cell = GRUCell(
units,
activation=None,
kernel_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=44),
bias_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=45))
with variable_scope.variable_scope("root1") as scope:
outputs, cell_state = dynamic_rnn(
cell,
x,
dtype=tf.float32,
sequence_length=[4],
scope=scope)
gru_output_list.append(outputs)
gru_cell_state_list.append(cell_state)
return tf.identity(gru_output_list, name="output"), tf.identity(gru_cell_state_list, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-3, atol=1e-06)
# graph_validator=lambda g: check_gru_count(g, 2))
@check_opset_after_tf_version("1.15", 8, "might need Select")
def test_single_dynamic_gru_seq_length_is_const(self):
units = 5
batch_size = 1
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.], [5., 5.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
initializer = init_ops.constant_initializer(0.5)
# no scope
cell = GRUCell(
units,
kernel_initializer=initializer)
outputs, cell_state = dynamic_rnn(
cell,
x,
dtype=tf.float32,
sequence_length=[5])
return tf.identity(outputs, name="output"), tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-3, atol=1e-06,
graph_validator=lambda g: check_gru_count(g, 1))
@check_opset_after_tf_version("1.15", 8, "might need Select")
def test_single_dynamic_gru_seq_length_is_not_const(self):
for np_dtype in [np.int32, np.int64, np.float32]:
units = 5
batch_size = 1
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.], [5., 5.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
y_val = np.array([5], dtype=np_dtype)
def func(x, seq_length):
initializer = init_ops.constant_initializer(0.5)
# no scope
cell = GRUCell(
units,
kernel_initializer=initializer)
outputs, cell_state = dynamic_rnn(
cell,
x,
dtype=tf.float32,
sequence_length=tf.identity(seq_length))
return tf.identity(outputs, name="output"), tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val, "input_2:0": y_val}
input_names_with_port = ["input_1:0", "input_2:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-03, atol=1e-06,
graph_validator=lambda g: check_gru_count(g, 1))
@check_opset_after_tf_version("1.15", 8, "might need Scan")
def test_single_dynamic_gru_placeholder_input(self):
units = 5
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]], dtype=np.float32)
x_val = np.stack([x_val] * 1)
def func(x):
initializer = init_ops.constant_initializer(0.5)
# no scope
cell = GRUCell(
units,
kernel_initializer=initializer)
outputs, cell_state = dynamic_rnn(
cell,
x,
dtype=tf.float32) # by default zero initializer is used
return tf.identity(outputs, name="output"), tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-03, atol=1e-06,
graph_validator=lambda g: check_gru_count(g, 1))
@check_opset_after_tf_version("1.15", 8, "might need Scan")
def test_single_dynamic_gru_ch_zero_state_initializer(self):
units = 5
batch_size = 1
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.], [5., 5.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
initializer = init_ops.constant_initializer(0.5)
# no scope
cell = GRUCell(
units,
kernel_initializer=initializer)
# defining initial state
initial_state = cell.zero_state(batch_size, dtype=tf.float32)
outputs, cell_state = dynamic_rnn(
cell,
x,
initial_state=initial_state,
dtype=tf.float32)
return tf.identity(outputs, name="output"), tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-03, atol=1e-06,
graph_validator=lambda g: check_gru_count(g, 1))
@check_opset_after_tf_version("1.15", 8, "might need Scan")
def test_single_dynamic_gru_random_weights(self):
hidden_size = 5
batch_size = 1
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
initializer = tf.random_uniform_initializer(-1.0, 1.0, seed=42)
# no scope
cell = GRUCell(
hidden_size,
kernel_initializer=initializer)
outputs, cell_state = dynamic_rnn(
cell,
x,
dtype=tf.float32)
return tf.identity(outputs, name="output"), tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.0001,
graph_validator=lambda g: check_gru_count(g, 1))
@check_opset_after_tf_version("1.15", 8, "might need Scan")
def test_single_dynamic_gru_random_weights2(self):
hidden_size = 128
batch_size = 1
x_val = np.random.randn(1, 133).astype('f')
x_val = np.stack([x_val] * batch_size)
def func(x):
#initializer = tf.constant_initializer(5.0)
initializer = tf.random_uniform_initializer(0.0, 1.0, seed=42)
# no scope
cell = GRUCell(
hidden_size,
kernel_initializer=initializer)
outputs, cell_state = dynamic_rnn(
cell,
x,
dtype=tf.float32)
return tf.identity(outputs, name="output"), tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.01,
graph_validator=lambda g: check_gru_count(g, 1))
@check_opset_after_tf_version("1.15", 8, "might need Scan")
def test_dynamic_gru_output_consumed_only(self):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
initializer = tf.random_uniform_initializer(-1.0, 1.0, seed=42)
cell1 = GRUCell(
units,
kernel_initializer=initializer)
outputs, _ = dynamic_rnn(
cell1,
x,
dtype=tf.float32)
return tf.identity(outputs, name="output")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.0001,
graph_validator=lambda g: check_gru_count(g, 1))
@check_opset_after_tf_version("1.15", 8, "might need Scan")
def test_dynamic_gru_state_consumed_only(self):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
initializer = tf.random_uniform_initializer(-1.0, 1.0, seed=42)
cell1 = GRUCell(
units,
kernel_initializer=initializer)
_, cell_state = dynamic_rnn(
cell1,
x,
dtype=tf.float32)
return tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=0.0001, atol=1e-06,
graph_validator=lambda g: check_gru_count(g, 1))
@check_opset_after_tf_version("1.15", 10, "might need ReverseV2")
def test_dynamic_bigru(self):
units = 5
batch_size = 1
x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
initializer = init_ops.constant_initializer(0.5)
# bigru, no scope
cell1 = GRUCell(
units,
kernel_initializer=initializer)
cell2 = GRUCell(
units,
kernel_initializer=initializer)
outputs, cell_state = bidirectional_dynamic_rnn(
cell1,
cell2,
x,
dtype=tf.float32)
return tf.identity(outputs, name="output"), tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-3, atol=1e-06,
graph_validator=lambda g: check_gru_count(g, 1))
@check_opset_after_tf_version("1.15", 10, "might need ReverseV2")
def test_dynamic_bigru_output_consumed_only(self):
units = 5
batch_size = 1
x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
initializer = init_ops.constant_initializer(0.5)
# bigru, no scope
cell1 = GRUCell(
units,
kernel_initializer=initializer)
cell2 = GRUCell(
units,
kernel_initializer=initializer)
outputs, _ = bidirectional_dynamic_rnn(
cell1,
cell2,
x,
dtype=tf.float32)
return tf.identity(outputs, name="output")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-3, atol=1e-06,
graph_validator=lambda g: check_gru_count(g, 1))
@check_opset_after_tf_version("1.15", 10, "might need ReverseV2")
def test_dynamic_bigru_state_consumed_only(self):
units = 5
batch_size = 1
x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
initializer = init_ops.constant_initializer(0.5)
# bigru, no scope
cell1 = GRUCell(
units,
kernel_initializer=initializer)
cell2 = GRUCell(
units,
kernel_initializer=initializer)
_, cell_state = bidirectional_dynamic_rnn(
cell1,
cell2,
x,
dtype=tf.float32)
return tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-3, atol=1e-06,
graph_validator=lambda g: check_gru_count(g, 1))
@check_opset_after_tf_version("1.15", 10, "might need ReverseV2")
def test_dynamic_bidirectional_but_one_gru(self):
units = 5
batch_size = 1
x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
initializer = init_ops.constant_initializer(0.5)
# bigru, no scope
cell = GRUCell(
units,
kernel_initializer=initializer)
outputs, cell_state = bidirectional_dynamic_rnn(
cell,
cell,
x,
dtype=tf.float32)
return tf.identity(outputs, name="output"), tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-3, atol=1e-06,
graph_validator=lambda g: check_gru_count(g, 1))
@check_opset_after_tf_version("1.15", 10, "might need ReverseV2")
def test_dynamic_bidirectional_but_one_gru_and_output_consumed_only(self):
units = 5
batch_size = 1
x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
# bigru, no scope
cell = GRUCell(
units,
kernel_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=42),
bias_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=43))
outputs, _ = bidirectional_dynamic_rnn(
cell,
cell,
x,
dtype=tf.float32)
return tf.identity(outputs, name="output")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-3, atol=1e-06,
graph_validator=lambda g: check_gru_count(g, 1))
@check_opset_after_tf_version("1.15", 10, "might need ReverseV2")
def test_dynamic_bidirectional_but_one_gru_and_state_consumed_only(self):
units = 5
batch_size = 1
x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
# bigru, no scope
cell = GRUCell(
units,
kernel_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=42),
bias_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=43))
_, cell_state = bidirectional_dynamic_rnn(
cell,
cell,
x,
dtype=tf.float32)
return tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-3, atol=1e-06,
graph_validator=lambda g: check_gru_count(g, 1))
@check_opset_after_tf_version("1.15", 10, "might need ReverseV2")
def test_dynamic_bigru_unknown_batch_size(self):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
cell1 = GRUCell(
units,
kernel_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=42),
bias_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=43))
cell2 = GRUCell(
units,
kernel_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=44),
bias_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=45))
_, cell_state = bidirectional_dynamic_rnn(
cell1,
cell2,
x,
dtype=tf.float32,
)
return tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06,
graph_validator=lambda g: check_gru_count(g, 1))
@check_opset_after_tf_version("1.15", 10, "might need ReverseV2")
def test_dynamic_bigru_outputs_partially_consumed(self):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
cell1 = GRUCell(
units,
kernel_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=42),
bias_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=43))
cell2 = GRUCell(
units,
kernel_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=44),
bias_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=45))
(output_fw, _), (_, state_bw) = bidirectional_dynamic_rnn(
cell1,
cell2,
x,
dtype=tf.float32)
return tf.identity(output_fw, name="output"), tf.identity(state_bw, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06,
graph_validator=lambda g: check_gru_count(g, 1))
@check_opset_after_tf_version("1.15", 10, "might need ReverseV2")
@skip_tf_versions(["2.1"], "TF fails to correctly add output_2 node.")
def test_dynamic_multi_bigru_with_same_input_hidden_size(self):
batch_size = 10
x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
# bigru, no scope
units = 5
cell1 = GRUCell(
units,
kernel_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=42),
bias_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=43))
cell2 = GRUCell(
units,
kernel_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=44),
bias_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=45))
outputs_1, cell_state_1 = bidirectional_dynamic_rnn(
cell1,
cell2,
x,
dtype=tf.float32,
scope="bigru_1"
)
units = 10
cell1 = GRUCell(
units,
kernel_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=42),
bias_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=43))
cell2 = GRUCell(
units,
kernel_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=44),
bias_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=45))
outputs_2, cell_state_2 = bidirectional_dynamic_rnn(
cell1,
cell2,
x,
dtype=tf.float32,
scope="bigru_2"
)
return tf.identity(outputs_1, name="output_1"), \
tf.identity(cell_state_1, name="cell_state_1"), \
tf.identity(outputs_2, name="output_2"), \
tf.identity(cell_state_2, name="cell_state_2")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output_1:0", "cell_state_1:0", "output_2:0", "cell_state_2:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-3, atol=1e-06)
# graph_validator=lambda g: check_gru_count(g, 2))
@check_opset_after_tf_version("1.15", 10, "might need ReverseV2")
@skip_tf_versions(["2.1"], "TF fails to correctly add output_2 node.")
def test_dynamic_multi_bigru_with_same_input_seq_len(self):
units = 5
batch_size = 10
x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
seq_len_val = np.array([3], dtype=np.int32)
def func(x, y1, y2):
seq_len1 = tf.tile(y1, [batch_size])
cell1 = GRUCell(
units,
kernel_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=42),
bias_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=43))
cell2 = GRUCell(
units,
kernel_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=44),
bias_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=45))
outputs_1, cell_state_1 = bidirectional_dynamic_rnn(
cell1,
cell2,
x,
sequence_length=seq_len1,
dtype=tf.float32,
scope="bigru_1"
)
seq_len2 = tf.tile(y2, [batch_size])
cell1 = GRUCell(
units,
kernel_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=46),
bias_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=47))
cell2 = GRUCell(
units,
kernel_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=48),
bias_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=49))
outputs_2, cell_state_2 = bidirectional_dynamic_rnn(
cell1,
cell2,
x,
sequence_length=seq_len2,
dtype=tf.float32,
scope="bigru_2"
)
return tf.identity(outputs_1, name="output_1"), \
tf.identity(cell_state_1, name="cell_state_1"), \
tf.identity(outputs_2, name="output_2"), \
tf.identity(cell_state_2, name="cell_state_2")
feed_dict = {"input_1:0": x_val, "input_2:0": seq_len_val, "input_3:0": seq_len_val}
input_names_with_port = ["input_1:0", "input_2:0", "input_3:0"]
output_names_with_port = ["output_1:0", "cell_state_1:0", "output_2:0", "cell_state_2:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-3, atol=1e-06)
# graph_validator=lambda g: check_gru_count(g, 2))
@check_tf_min_version("2.2")
def test_keras_gru(self):
in_shape = [10, 3]
x_val = np.random.uniform(size=[2, 10, 3]).astype(np.float32)
model_in = tf.keras.layers.Input(tuple(in_shape), batch_size=2)
x = tf.keras.layers.GRU(5, return_sequences=True, return_state=True,
kernel_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=42),
recurrent_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=44),
bias_initializer=tf.random_uniform_initializer(0.0, 1.0, seed=43))(model_in)
model = tf.keras.models.Model(inputs=model_in, outputs=x)
def func(x):
y = model(x)
return tf.identity(y[0], name=_TFOUTPUT), tf.identity(y[1], name=_TFOUTPUT1)
self.run_test_case(func, {_INPUT: x_val}, [], [_OUTPUT, _OUTPUT1], rtol=1e-05, atol=1e-06)
if __name__ == '__main__':
unittest_main()
| 30,029 | 39.635995 | 118 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/backend_test_base.py | # SPDX-License-Identifier: Apache-2.0
"""Unit Test Base."""
# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test,import-outside-toplevel
# pylint: disable=wrong-import-position,invalid-unary-operand-type
import logging
import os
import unittest
import re
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.ops import lookup_ops
import onnx
from common import get_test_config
from tfjs_runner import run_tfjs
from tf2onnx import constants
from tf2onnx import utils
from tf2onnx.tfonnx import process_tf_graph
from tf2onnx import optimizer
from tf2onnx.tf_loader import tf_reset_default_graph, tf_session, tf_placeholder, from_function, freeze_session
from tf2onnx.tf_loader import tf_optimize, is_tf2, get_hash_table_info
from tf2onnx.tf_utils import compress_graph_def
from tf2onnx.graph import ExternalTensorStorage
from tf2onnx.tflite.Model import Model
if is_tf2():
tf_set_random_seed = tf.compat.v1.set_random_seed
tf_tables_initializer = tf.compat.v1.tables_initializer
tf_lite = tf.compat.v1.lite
else:
tf_set_random_seed = tf.set_random_seed
tf_tables_initializer = tf.tables_initializer
tf_lite = None
class Tf2OnnxBackendTestBase(unittest.TestCase):
def setUp(self):
self.config = get_test_config()
tf_reset_default_graph()
# reset name generation on every test
utils.INTERNAL_NAME = 1
np.random.seed(1) # Make it reproducible.
self.logger = logging.getLogger(self.__class__.__name__)
def tearDown(self):
if not self.config.is_debug_mode:
utils.delete_directory(self.test_data_directory)
@property
def test_data_directory(self):
return os.path.join(self.config.temp_dir, self._testMethodName)
@staticmethod
def assertAllClose(expected, actual, **kwargs):
np.testing.assert_allclose(expected, actual, **kwargs)
@staticmethod
def assertAllEqual(expected, actual, **kwargs):
np.testing.assert_array_equal(expected, actual, **kwargs)
def run_onnxcaffe2(self, onnx_graph, inputs):
"""Run test against caffe2 backend."""
import caffe2.python.onnx.backend
prepared_backend = caffe2.python.onnx.backend.prepare(onnx_graph)
results = prepared_backend.run(inputs)
return results
def run_onnxruntime(self, model_path, inputs, output_names, use_custom_ops=False):
"""Run test against onnxruntime backend."""
import onnxruntime as rt
providers = ['CPUExecutionProvider']
if rt.get_device() == "GPU":
gpus = os.environ.get("CUDA_VISIBLE_DEVICES")
if gpus is None or len(gpus) > 1:
providers = ['CUDAExecutionProvider']
opt = rt.SessionOptions()
if use_custom_ops:
from onnxruntime_extensions import get_library_path
opt.register_custom_ops_library(get_library_path())
# in case of issues with the runtime, one can enable more logging
# opt.log_severity_level = 0
# opt.log_verbosity_level = 255
# opt.enable_profiling = True
m = rt.InferenceSession(model_path, opt, providers=providers)
results = m.run(output_names, inputs)
return results
def run_backend(self, g, outputs, input_dict, large_model=False, postfix="", use_custom_ops=False):
tensor_storage = ExternalTensorStorage() if large_model else None
model_proto = g.make_model("test", external_tensor_storage=tensor_storage)
model_path = self.save_onnx_model(model_proto, input_dict, external_tensor_storage=tensor_storage,
postfix=postfix)
if self.config.backend == "onnxruntime":
y = self.run_onnxruntime(model_path, input_dict, outputs, use_custom_ops)
elif self.config.backend == "caffe2":
y = self.run_onnxcaffe2(model_proto, input_dict)
else:
raise ValueError("unknown backend")
return y
def assert_results_equal(self, expected, actual, rtol, atol, mtol=None,
check_value=True, check_shape=True, check_dtype=True):
for expected_val, actual_val in zip(expected, actual):
if check_value:
if expected_val.dtype == object:
# TFLite pads strings with nul bytes
decode = np.vectorize(lambda x: x.replace(b'\x00', b'').decode('UTF-8'))
expected_val_str = decode(expected_val)
self.assertAllEqual(expected_val_str, actual_val)
elif expected_val.dtype.kind == 'U':
self.assertAllEqual(expected_val, actual_val)
else:
if mtol is not None:
expected_val = np.minimum(expected_val, mtol)
expected_val = np.maximum(expected_val, -mtol)
actual_val = np.minimum(actual_val, mtol)
actual_val = np.maximum(actual_val, -mtol)
self.assertAllClose(expected_val, actual_val, rtol=rtol, atol=atol)
if check_dtype:
self.assertEqual(expected_val.dtype, actual_val.dtype)
# why need shape checke: issue when compare [] with scalar
# https://github.com/numpy/numpy/issues/11071
if check_shape:
self.assertEqual(expected_val.shape, actual_val.shape)
def freeze_and_run_tf(self, func, feed_dict, outputs, as_session, premade_placeholders, large_model):
np.random.seed(1) # Make it reproducible.
clean_feed_dict = {utils.node_name(k): v for k, v in feed_dict.items()}
if is_tf2() and not as_session:
#
# use eager to execute the tensorflow func
#
# numpy doesn't work for all ops, make it tf.Tensor()
input_tensors = [tf.TensorSpec(shape=v.shape, dtype=tf.as_dtype(v.dtype), name=utils.node_name(k))
for k, v in feed_dict.items()]
input_list = [tf.convert_to_tensor(v, dtype=tf.as_dtype(v.dtype), name=utils.node_name(k))
for k, v in feed_dict.items()]
tf.random.set_seed(1)
result = func(*input_list)
if isinstance(result, (list, tuple)):
# list or tuple
result = [x.numpy() for x in result]
else:
# single result
result = [result.numpy()]
# now make the eager functions a graph
concrete_func = tf.function(func, input_signature=tuple(input_tensors))
concrete_func = concrete_func.get_concrete_function()
graph_def = from_function(concrete_func,
input_names=list(feed_dict.keys()),
output_names=outputs,
large_model=large_model)
initialized_tables = None
else:
#
# use graph to execute the tensorflow func
#
with tf_session() as sess:
tf_set_random_seed(1)
input_list = []
if not premade_placeholders:
for k, v in clean_feed_dict.items():
input_list.append(tf_placeholder(name=k, shape=v.shape, dtype=tf.as_dtype(v.dtype)))
func(*input_list)
variables_lib.global_variables_initializer().run()
tf_tables_initializer().run()
output_dict = []
for out_name in outputs:
output_dict.append(sess.graph.get_tensor_by_name(out_name))
result = sess.run(output_dict, feed_dict=feed_dict)
graph_def = freeze_session(sess,
input_names=list(feed_dict.keys()),
output_names=outputs)
table_info = get_hash_table_info(graph_def)
initialized_tables = {}
for info in table_info:
if info.shared_name is None:
continue
h = lookup_ops.hash_table_v2(info.key_dtype, info.val_dtype, shared_name=info.shared_name)
k, v = lookup_ops.lookup_table_export_v2(h, info.key_dtype, info.val_dtype)
initialized_tables[info.shared_name] = (sess.run(k), sess.run(v))
tf_reset_default_graph()
with tf_session() as sess:
tf.import_graph_def(graph_def, name='')
graph_def = tf_optimize(list(feed_dict.keys()), outputs, graph_def)
return result, graph_def, initialized_tables
def convert_to_tfjs(self, graph_def_path, output_names):
try:
from tensorflowjs.converters import converter
except ImportError:
self.logger.warning("Tensorflowjs.converters package imports failed.")
return None
tfjs_path = os.path.join(self.test_data_directory, self._testMethodName + "_tfjs")
try:
converter.convert([graph_def_path, tfjs_path, '--input_format', 'tf_frozen_model',
'--output_node_names', ','.join(output_names)])
except ValueError:
self.logger.warning("Convert tensorflowjs graph failed.")
return None
model_path = os.path.join(tfjs_path, 'model.json')
if not os.path.exists(model_path):
self.logger.warning("Tensorflowjs model path %s is empty.", model_path)
return None
return model_path
def convert_to_tflite(self, graph_def, feed_dict, outputs):
if not feed_dict:
return None # Can't make TFlite model with no inputs
tf_reset_default_graph()
with tf_session() as sess:
tf.import_graph_def(graph_def, name='')
sess_inputs = [sess.graph.get_tensor_by_name(k) for k in feed_dict.keys()]
sess_outputs = [sess.graph.get_tensor_by_name(n) for n in outputs]
converter = tf_lite.TFLiteConverter.from_session(sess, sess_inputs, sess_outputs)
#converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, # enable TensorFlow Lite ops.
tf.lite.OpsSet.SELECT_TF_OPS, # enable TensorFlow flex ops.
]
from tensorflow.lite.python.convert import ConverterError
try:
tflite_model = converter.convert()
tflite_path = os.path.join(self.test_data_directory, self._testMethodName + ".tflite")
dir_name = os.path.dirname(tflite_path)
if dir_name:
os.makedirs(dir_name, exist_ok=True)
with open(tflite_path, 'wb') as f:
f.write(tflite_model)
return tflite_path
except ConverterError:
return None
def tflite_has_supported_types(self, tflite_path):
try:
with open(tflite_path, 'rb') as f:
buf = f.read()
buf = bytearray(buf)
model = Model.GetRootAsModel(buf, 0)
tensor_cnt = model.Subgraphs(0).TensorsLength()
interpreter = tf.lite.Interpreter(tflite_path)
for i in range(tensor_cnt):
dtype = interpreter._get_tensor_details(i)['dtype'] # pylint: disable=protected-access
if np.dtype(dtype).kind == 'O':
return False
return True
except (RuntimeError, ValueError):
return False
def run_tflite(self, tflite_path, feed_dict):
try:
interpreter = tf.lite.Interpreter(tflite_path)
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
input_name_to_index = {n['name'].split(':')[0]: n['index'] for n in input_details}
feed_dict_without_port = {k.split(':')[0]: v for k, v in feed_dict.items()}
for k, v in feed_dict_without_port.items():
interpreter.resize_tensor_input(input_name_to_index[k], v.shape)
interpreter.allocate_tensors()
# The output names might be different in the tflite but the order is the same
output_names = [n['name'] for n in output_details]
for k, v in feed_dict_without_port.items():
interpreter.set_tensor(input_name_to_index[k], v)
interpreter.invoke()
result = [interpreter.get_tensor(output['index']) for output in output_details]
return result, output_names
except (RuntimeError, ValueError):
# tflite sometimes converts from tf but produces an invalid model
return None, None
def assert_shapes_correct(self, graph, allow_missing=False, run_checker=True, check_shape=True):
if not check_shape:
return None
model_proto = graph.make_model("test")
if run_checker and not any(graph.get_shape(out) is None for out in graph.outputs + graph.input_names):
try:
onnx.checker.check_model(model_proto, full_check=True)
except onnx.shape_inference.InferenceError as e:
# onnx checker verifies number of subgraph inputs incorrectly in IR 3
if re.search(r"Graph has \d* inputs but \d* were provided", str(e)):
run_checker = False
else:
raise e
model_shapes = onnx.shape_inference.infer_shapes(model_proto)
def get_shape(info):
if not info.type.tensor_type.HasField("shape"):
return None
return [d.dim_value if d.HasField('dim_value') else -1 for d in info.type.tensor_type.shape.dim]
def get_dtype(info):
tensor_type = info.type.tensor_type
is_seq = False
result = None
if info.type.HasField("sequence_type"):
tensor_type = info.type.sequence_type.elem_type.tensor_type
is_seq = True
if tensor_type.HasField("elem_type"):
result = tensor_type.elem_type
return utils.SeqType(result) if is_seq else result
for info in model_shapes.graph.value_info:
if info.name == "":
continue
onnx_shape = get_shape(info)
tf2onnx_shape = graph.get_shape(info.name)
if onnx_shape is None:
continue
if allow_missing and tf2onnx_shape is None:
continue
self.assertTrue(tf2onnx_shape is not None)
if -1 in onnx_shape or (allow_missing and -1 in tf2onnx_shape):
self.assertEqual(len(onnx_shape), len(tf2onnx_shape))
for d1, d2 in zip(onnx_shape, tf2onnx_shape):
if d1 != -1 and (d2 != -1 or not allow_missing):
self.assertEqual(d1, d2)
else:
self.assertEqual(onnx_shape, tf2onnx_shape)
self.assertEqual(get_dtype(info), graph.get_dtype(info.name))
def run_test_case(self, func, feed_dict, input_names_with_port, output_names_with_port,
rtol=1e-07, atol=1e-5, mtol=None, convert_var_to_const=True, check_value=True,
check_shape=True, check_dtype=True, process_args=None, onnx_feed_dict=None,
graph_validator=None, as_session=False, large_model=False, premade_placeholders=False,
use_custom_ops=False, optimize=True):
"""
This function tests all scenarios available through the command line.
The command line always runs the optimizers.
However, they may modify the final graph into something different than the
tested converter implements. Set `optimize=False` to keep the original
set of nodes and helps debugging. However, the same function should
be called with `optimize=True` to test what the user would actually get.
"""
test_tf = not self.config.skip_tf_tests
test_tflite = not self.config.skip_tflite_tests
test_tfjs = not self.config.skip_tfjs_tests
run_tfl_consistency_test = test_tf and test_tflite and self.config.run_tfl_consistency_test
# optional - passed to process_tf_graph
if process_args is None:
process_args = {}
# optional - pass distinct feed_dict to onnx runtime
if onnx_feed_dict is None:
onnx_feed_dict = feed_dict
input_names_with_port = list(feed_dict)
tf_reset_default_graph()
if tf_lite is None:
test_tflite = False
g = None
expected, graph_def, initialized_tables = \
self.freeze_and_run_tf(func, feed_dict, output_names_with_port, as_session,
premade_placeholders, large_model)
graph_def_path = os.path.join(self.test_data_directory, self._testMethodName + "_after_tf_optimize.pb")
utils.save_protobuf(graph_def_path, graph_def)
self.logger.debug("created file %s", graph_def_path)
tfl_process_args = process_args.copy()
if test_tfjs:
tfjs_path = self.convert_to_tfjs(graph_def_path, output_names_with_port)
if tfjs_path is None:
test_tfjs = False
if test_tflite:
tflite_path = self.convert_to_tflite(graph_def, feed_dict, output_names_with_port)
test_tflite = tflite_path is not None and self.tflite_has_supported_types(tflite_path)
if test_tf:
tf_reset_default_graph()
with tf_session() as sess:
const_node_values = None
if large_model:
const_node_values = compress_graph_def(graph_def)
tf.import_graph_def(graph_def, name='')
g = process_tf_graph(sess.graph, opset=self.config.opset,
input_names=list(feed_dict.keys()),
output_names=output_names_with_port,
target=self.config.target,
const_node_values=const_node_values,
initialized_tables=initialized_tables,
**process_args)
if optimize:
g = optimizer.optimize_graph(g, catch_errors=False)
actual = self.run_backend(g, output_names_with_port, onnx_feed_dict, large_model,
use_custom_ops=use_custom_ops)
if 'outputs_as_nchw' in tfl_process_args:
for output_name in tfl_process_args['outputs_as_nchw']:
i = output_names_with_port.index(output_name)
actual[i] = np.transpose(actual[i], constants.NCHW_TO_NHWC)
self.assert_results_equal(expected, actual, rtol, atol, mtol, check_value, check_shape,
check_dtype)
self.assert_shapes_correct(g, self.config.allow_missing_shapes, not self.config.skip_onnx_checker,
check_shape)
if graph_validator:
self.assertTrue(graph_validator(g))
if test_tflite:
tfl_res, tfl_outputs = self.run_tflite(tflite_path, feed_dict)
test_tflite = tfl_res is not None
if test_tflite:
if run_tfl_consistency_test:
self.assert_results_equal(expected, tfl_res, rtol, atol, mtol, check_value, check_shape, check_dtype)
if 'inputs_as_nchw' in tfl_process_args:
nchw_inps_with_port = tfl_process_args['inputs_as_nchw']
tfl_process_args['inputs_as_nchw'] = [i.split(':')[0] for i in nchw_inps_with_port]
input_names_without_port = [inp.split(':')[0] for inp in feed_dict.keys()]
if 'outputs_as_nchw' in tfl_process_args:
nchw_outps_with_port = tfl_process_args['outputs_as_nchw']
tfl_process_args['outputs_as_nchw'] = [i.split(':')[0] for i in nchw_outps_with_port]
output_names_with_port = [i.split(':')[0] for i in nchw_outps_with_port]
g = process_tf_graph(None, opset=self.config.opset,
input_names=input_names_without_port,
output_names=tfl_outputs,
target=self.config.target,
tflite_path=tflite_path,
**tfl_process_args)
if optimize:
g = optimizer.optimize_graph(g)
onnx_feed_dict_without_port = {k.split(':')[0]: v for k, v in onnx_feed_dict.items()}
onnx_tfl_res = self.run_backend(g, tfl_outputs, onnx_feed_dict_without_port,
postfix="_from_tflite", use_custom_ops=use_custom_ops)
if 'outputs_as_nchw' in tfl_process_args:
for output_name in tfl_process_args['outputs_as_nchw']:
i = output_names_with_port.index(output_name)
onnx_tfl_res[i] = np.transpose(onnx_tfl_res[i], constants.NCHW_TO_NHWC)
self.assert_results_equal(tfl_res, onnx_tfl_res, rtol, atol, mtol, check_value, check_shape, check_dtype)
self.assert_shapes_correct(g, self.config.allow_missing_shapes, not self.config.skip_onnx_checker,
check_shape)
if graph_validator:
self.assertTrue(graph_validator(g))
if test_tfjs:
try:
tfjs_res = run_tfjs(tfjs_path, feed_dict)
except RuntimeError as e:
ignored_errors = ["is not yet supported", "Operands could not be broadcast together",
"unknown dtype null", "must be [NaN", "Cannot read property 'name' of undefined",
"Either strides or dilations must be 1", "does not support"]
if any(err in str(e) for err in ignored_errors):
test_tfjs = False
else:
raise e
if test_tfjs:
g = process_tf_graph(None, opset=self.config.opset,
input_names=list(feed_dict.keys()),
output_names=None,
target=self.config.target,
tfjs_path=tfjs_path,
**process_args)
g = optimizer.optimize_graph(g)
onnx_tfjs_res = self.run_backend(g, None, onnx_feed_dict, large_model,
postfix="_from_tfjs", use_custom_ops=use_custom_ops)
if 'outputs_as_nchw' in tfl_process_args:
for output_name in tfl_process_args['outputs_as_nchw']:
i = output_names_with_port.index(output_name)
onnx_tfjs_res[i] = np.transpose(onnx_tfjs_res[i], constants.NCHW_TO_NHWC)
self.assert_results_equal(tfjs_res, onnx_tfjs_res, rtol, atol, mtol, check_value, check_shape,
check_dtype=False)
self.assert_shapes_correct(g, self.config.allow_missing_shapes, not self.config.skip_onnx_checker,
check_shape)
if graph_validator:
self.assertTrue(graph_validator(g))
if g is None:
raise unittest.SkipTest("tf, tflite, and tfjs marked to skip")
return g
def save_onnx_model(self, model_proto, feed_dict, postfix="", external_tensor_storage=None):
target_path = utils.save_onnx_model(self.test_data_directory, self._testMethodName + postfix, feed_dict,
model_proto, include_test_data=self.config.is_debug_mode,
as_text=self.config.is_debug_mode,
external_tensor_storage=external_tensor_storage)
self.logger.debug("create model file: %s", target_path)
return target_path
| 24,541 | 47.791252 | 117 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/common.py | # SPDX-License-Identifier: Apache-2.0
""" test common utilities."""
import argparse
import os
import sys
import unittest
from collections import defaultdict
from packaging.version import Version
from parameterized import parameterized
import timeout_decorator
import numpy as np
import tensorflow as tf
from tf2onnx import constants, logging, utils, tf_utils, tf_loader
# pylint: disable=import-outside-toplevel
__all__ = [
"TestConfig",
"get_test_config",
"unittest_main",
"check_onnxruntime_backend",
"check_tf_min_version",
"check_tf_max_version",
"check_tfjs_min_version",
"check_tfjs_max_version",
"skip_tf_versions",
"skip_tf_cpu",
"check_onnxruntime_min_version",
"check_opset_min_version",
"check_opset_max_version",
"skip_tf2",
"skip_tflite",
"skip_tfjs",
"requires_tflite",
"check_opset_after_tf_version",
"check_target",
"skip_caffe2_backend",
"allow_missing_shapes",
"skip_onnx_checker",
"skip_onnxruntime_backend",
"skip_opset",
"check_onnxruntime_incompatibility",
"validate_const_node",
"group_nodes_by_type",
"test_ms_domain",
"check_node_domain",
"check_op_count",
"check_gru_count",
"check_lstm_count",
"check_quantization_axis",
"timeout",
]
# pylint: disable=missing-docstring,unused-argument
class TestConfig(object):
def __init__(self):
self.platform = sys.platform
self.tf_version = tf_utils.get_tf_version()
self.opset = int(os.environ.get("TF2ONNX_TEST_OPSET", constants.PREFERRED_OPSET))
self.target = os.environ.get("TF2ONNX_TEST_TARGET", ",".join(constants.DEFAULT_TARGET)).split(',')
self.backend = os.environ.get("TF2ONNX_TEST_BACKEND", "onnxruntime")
self.skip_tflite_tests = os.environ.get("TF2ONNX_SKIP_TFLITE_TESTS", "FALSE").upper() == "TRUE"
self.skip_tfjs_tests = os.environ.get("TF2ONNX_SKIP_TFJS_TESTS", "FALSE").upper() == "TRUE"
self.skip_tf_tests = os.environ.get("TF2ONNX_SKIP_TF_TESTS", "FALSE").upper() == "TRUE"
self.skip_onnx_checker = False
self.allow_missing_shapes = False
self.run_tfl_consistency_test = os.environ.get("TF2ONNX_RUN_TFL_CONSISTENCY_TEST", "FALSE").upper() == "TRUE"
self.backend_version = self._get_backend_version()
self.log_level = logging.WARNING
self.temp_dir = utils.get_temp_directory()
@property
def is_mac(self):
return self.platform == "darwin"
@property
def is_windows(self):
return self.platform.startswith("win")
@property
def is_onnxruntime_backend(self):
return self.backend == "onnxruntime"
@property
def is_caffe2_backend(self):
return self.backend == "caffe2"
@property
def is_debug_mode(self):
return utils.is_debug_mode()
def _get_backend_version(self):
version = None
if self.backend == "onnxruntime":
import onnxruntime as ort
version = ort.__version__
elif self.backend == "caffe2":
# TODO: get caffe2 version
pass
if version:
version = Version(version)
return version
def __str__(self):
return "\n\t".join(["TestConfig:",
"platform={}".format(self.platform),
"tf_version={}".format(self.tf_version),
"opset={}".format(self.opset),
"target={}".format(self.target),
"skip_tflite_tests={}".format(self.skip_tflite_tests),
"skip_tfjs_tests={}".format(self.skip_tfjs_tests),
"skip_tf_tests={}".format(self.skip_tf_tests),
"run_tfl_consistency_test={}".format(self.run_tfl_consistency_test),
"backend={}".format(self.backend),
"backend_version={}".format(self.backend_version),
"is_debug_mode={}".format(self.is_debug_mode),
"temp_dir={}".format(self.temp_dir)])
@staticmethod
def load():
config = TestConfig()
# if not launched by pytest, parse console arguments to override config
if "pytest" not in sys.argv[0]:
parser = argparse.ArgumentParser()
parser.add_argument("--backend", default=config.backend,
choices=["caffe2", "onnxruntime"],
help="backend to test against")
parser.add_argument("--opset", type=int, default=config.opset, help="opset to test against")
parser.add_argument("--target", default=",".join(config.target), choices=constants.POSSIBLE_TARGETS,
help="target platform")
parser.add_argument("--verbose", "-v", help="verbose output, option is additive", action="count")
parser.add_argument("--debug", help="output debugging information", action="store_true")
parser.add_argument("--temp_dir", help="temp dir")
parser.add_argument("unittest_args", nargs='*')
args = parser.parse_args()
if args.debug:
utils.set_debug_mode(True)
config.backend = args.backend
config.opset = args.opset
config.target = args.target.split(',')
config.log_level = logging.get_verbosity_level(args.verbose, config.log_level)
if args.temp_dir:
config.temp_dir = args.temp_dir
# Now set the sys.argv to the unittest_args (leaving sys.argv[0] alone)
sys.argv[1:] = args.unittest_args
return config
# need to load config BEFORE main is executed when launched from script
# otherwise, it will be too late for test filters to take effect
_config = TestConfig.load()
def get_test_config():
global _config
return _config
def unittest_main():
config = get_test_config()
logging.basicConfig(level=config.log_level)
with logging.set_scope_level(logging.INFO) as logger:
logger.info(config)
unittest.main()
def _append_message(reason, message):
if message:
reason = reason + ": " + message
return reason
def check_opset_after_tf_version(tf_version, required_opset, message=""):
""" Skip if tf_version > max_required_version """
config = get_test_config()
reason = _append_message("conversion requires opset {} after tf {}".format(required_opset, tf_version), message)
skip = config.tf_version >= Version(tf_version) and config.opset < required_opset
return unittest.skipIf(skip, reason)
def skip_tf2(message=""):
""" Skip if tf_version > max_required_version """
reason = _append_message("test needs to be fixed for tf-2.x", message)
return unittest.skipIf(tf_loader.is_tf2(), reason)
def skip_tfjs(message=""):
""" Skip the tfjs conversion for this test """
config = get_test_config()
reason = _append_message("test disabled for tfjs", message)
if config.skip_tf_tests and config.skip_tflite_tests:
# If we are skipping tf and tflite also, there is no reason to run this test
return unittest.skip(reason)
def decorator(func):
def test(self):
tmp = config.skip_tfjs_tests
config.skip_tfjs_tests = True
try:
func(self)
finally:
config.skip_tfjs_tests = tmp
return test
return decorator
def skip_tflite(message=""):
""" Skip the tflite conversion for this test """
config = get_test_config()
reason = _append_message("test disabled for tflite", message)
if config.skip_tf_tests and config.skip_tfjs_tests:
# If we are skipping tf and tfjs also, there is no reason to run this test
return unittest.skip(reason)
def decorator(func):
def test(self):
tmp = config.skip_tflite_tests
config.skip_tflite_tests = True
try:
func(self)
finally:
config.skip_tflite_tests = tmp
return test
return decorator
def skip_onnx_checker(message=""):
""" Skip running the onnx checker for this test """
config = get_test_config()
def decorator(func):
def test(self):
tmp = config.skip_onnx_checker
config.skip_onnx_checker = True
try:
func(self)
finally:
config.skip_onnx_checker = tmp
return test
return decorator
def allow_missing_shapes(message=""):
""" Only check for incompatible, not missing shapes/dims """
config = get_test_config()
def decorator(func):
def test(self):
tmp = config.allow_missing_shapes
config.allow_missing_shapes = True
try:
func(self)
finally:
config.allow_missing_shapes = tmp
return test
return decorator
def requires_tflite(message=""):
""" Skip test if tflite tests are disabled """
config = get_test_config()
reason = _append_message("test requires tflite", message)
return unittest.skipIf(config.skip_tflite_tests, reason)
def requires_custom_ops(message=""):
""" Skip until custom ops framework is on PyPI. """
reason = _append_message("test needs custom ops framework", message)
try:
import onnxruntime_extensions #pylint: disable=import-outside-toplevel,unused-import
can_import = True
except ModuleNotFoundError:
can_import = False
return unittest.skipIf(not can_import, reason)
def check_tfjs_max_version(max_accepted_version, message=""):
""" Skip if tfjs_version > max_required_version """
config = get_test_config()
reason = _append_message("conversion requires tensorflowjs <= {}".format(max_accepted_version), message)
try:
import tensorflowjs
can_import = True
except ModuleNotFoundError:
can_import = False
return unittest.skipIf(can_import and not config.skip_tfjs_tests and \
Version(tensorflowjs.__version__) > Version(max_accepted_version), reason)
def check_tfjs_min_version(min_required_version, message=""):
""" Skip if tjs_version < min_required_version """
config = get_test_config()
reason = _append_message("conversion requires tensorflowjs >= {}".format(min_required_version), message)
try:
import tensorflowjs
can_import = True
except ModuleNotFoundError:
can_import = False
return unittest.skipIf(can_import and not config.skip_tfjs_tests and \
Version(tensorflowjs.__version__) < Version(min_required_version), reason)
def check_tf_max_version(max_accepted_version, message=""):
""" Skip if tf_version > max_required_version """
config = get_test_config()
reason = _append_message("conversion requires tf <= {}".format(max_accepted_version), message)
return unittest.skipIf(config.tf_version > Version(max_accepted_version), reason)
def check_tf_min_version(min_required_version, message=""):
""" Skip if tf_version < min_required_version """
config = get_test_config()
reason = _append_message("conversion requires tf >= {}".format(min_required_version), message)
return unittest.skipIf(config.tf_version < Version(min_required_version), reason)
def skip_tf_versions(excluded_versions, message=""):
""" Skip if tf_version matches any of excluded_versions. """
if not isinstance(excluded_versions, list):
excluded_versions = [excluded_versions]
config = get_test_config()
condition = False
reason = _append_message("conversion excludes tf {}".format(excluded_versions), message)
for excluded_version in excluded_versions:
# tf version with same specificity as excluded_version
tf_version = '.'.join(str(config.tf_version).split('.')[:excluded_version.count('.') + 1])
if excluded_version == tf_version:
condition = True
return unittest.skipIf(condition, reason)
def is_tf_gpu():
return tf.test.is_gpu_available()
def skip_tf_cpu(message=""):
is_tf_cpu = not is_tf_gpu()
return unittest.skipIf(is_tf_cpu, message)
def check_opset_min_version(min_required_version, message=""):
""" Skip if opset < min_required_version """
config = get_test_config()
reason = _append_message("conversion requires opset >= {}".format(min_required_version), message)
return unittest.skipIf(config.opset < min_required_version, reason)
def check_opset_max_version(max_accepted_version, message=""):
""" Skip if opset > max_accepted_version """
config = get_test_config()
reason = _append_message("conversion requires opset <= {}".format(max_accepted_version), message)
return unittest.skipIf(config.opset > max_accepted_version, reason)
def skip_opset(opset_v, message=""):
""" Skip if opset = opset_v """
config = get_test_config()
reason = _append_message("conversion requires opset != {}".format(opset_v), message)
return unittest.skipIf(config.opset == opset_v, reason)
def check_target(required_target, message=""):
""" Skip if required_target is NOT specified """
config = get_test_config()
reason = _append_message("conversion requires target {} specified".format(required_target), message)
return unittest.skipIf(required_target not in config.target, reason)
def skip_onnxruntime_backend(message=""):
""" Skip if backend is onnxruntime """
config = get_test_config()
reason = _append_message("not supported by onnxruntime", message)
return unittest.skipIf(config.is_onnxruntime_backend, reason)
def check_onnxruntime_backend(message=""):
""" Skip if backend is NOT onnxruntime """
config = get_test_config()
reason = _append_message("only supported by onnxruntime", message)
return unittest.skipIf(not config.is_onnxruntime_backend, reason)
def check_onnxruntime_min_version(min_required_version, message=""):
""" Skip if onnxruntime version < min_required_version """
config = get_test_config()
reason = _append_message("conversion requires onnxruntime >= {}".format(min_required_version), message)
return unittest.skipIf(config.is_onnxruntime_backend and
config.backend_version < Version(min_required_version), reason)
def skip_caffe2_backend(message=""):
""" Skip if backend is caffe2 """
config = get_test_config()
reason = _append_message("not supported by caffe2", message)
return unittest.skipIf(config.is_caffe2_backend, reason)
def check_onnxruntime_incompatibility(op):
""" Skip if backend is onnxruntime AND op is NOT supported in current opset """
config = get_test_config()
if not config.is_onnxruntime_backend:
return unittest.skipIf(False, None)
support_since = {
"Abs": 6, # Abs-1
"Add": 7, # Add-1, Add-6
"AveragePool": 7, # AveragePool-1
"Div": 7, # Div-1, Div-6
"Elu": 6, # Elu-1
"Equal": 7, # Equal-1
"Exp": 6, # Exp-1
"Greater": 7, # Greater-1
"Less": 7, # Less-1
"Log": 6, # Log-1
"Max": 6, # Max-1
"Min": 6, # Min-1
"Mul": 7, # Mul-1, Mul-6
"Neg": 6, # Neg-1
"Pow": 7, # Pow-1
"Reciprocal": 6, # Reciprocal-1
"Relu": 6, # Relu-1
"Sqrt": 6, # Sqrt-1
"Sub": 7, # Sub-1, Sub-6
"Tanh": 6, # Tanh-1
}
if op not in support_since or config.opset >= support_since[op]:
return unittest.skipIf(False, None)
reason = "{} is not supported by onnxruntime before opset {}".format(op, support_since[op])
return unittest.skipIf(True, reason)
def validate_const_node(node, expected_val):
if node.is_const():
node_val = node.get_tensor_value()
np.testing.assert_allclose(expected_val, node_val)
return True
return False
def group_nodes_by_type(graph):
res = defaultdict(list)
for node in graph.get_nodes():
attr_body_graphs = node.get_body_graphs()
if attr_body_graphs:
for _, body_graph in attr_body_graphs.items():
body_graph_res = group_nodes_by_type(body_graph)
for k, v in body_graph_res.items():
res[k].extend(v)
res[node.type].append(node)
return res
def check_op_count(graph, op_type, expected_count, disabled=True):
# The grappler optimization may change some of the op counts.
return disabled or len(group_nodes_by_type(graph)[op_type]) == expected_count
def check_lstm_count(graph, expected_count):
return len(group_nodes_by_type(graph)["LSTM"]) == expected_count
def check_gru_count(graph, expected_count):
return check_op_count(graph, "GRU", expected_count)
def check_quantization_axis(graph, op_type, expected_axis):
return np.all(np.array([n.get_attr_int("axis") for n in group_nodes_by_type(graph)[op_type]]) == expected_axis)
_MAX_MS_OPSET_VERSION = 1
def test_ms_domain(versions=None):
""" Parameterize test case to apply ms opset(s) as extra_opset. """
@check_onnxruntime_backend()
def _custom_name_func(testcase_func, param_num, param):
del param_num
arg = param.args[0]
return "%s_%s" % (testcase_func.__name__, arg.version)
# Test all opset versions in ms domain if versions is not specified
if versions is None:
versions = list(range(1, _MAX_MS_OPSET_VERSION + 1))
opsets = []
for version in versions:
opsets.append([utils.make_opsetid(constants.MICROSOFT_DOMAIN, version)])
return parameterized.expand(opsets, testcase_func_name=_custom_name_func)
def check_node_domain(node, domain):
# None or empty string means onnx domain
if not domain:
return not node.domain
return node.domain == domain
def timeout(seconds):
"""
Decorator for enforcing a time limit on a test.
NOTE: Please only use for ensuring that a test does not time out.
Do not write tests that intentionally time out.
"""
if get_test_config().is_windows:
return unittest.skip("timeout testing is unreliable on Windows.")
return timeout_decorator.timeout(seconds)
| 18,336 | 34.536822 | 117 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/run_pretrained_models.py | # SPDX-License-Identifier: Apache-2.0
"""Tool to convert and test pre-trained tensorflow models."""
# pylint: disable=broad-except,logging-not-lazy,unused-argument,unnecessary-lambda,import-outside-toplevel
# pylint: disable=wrong-import-position,too-many-nested-blocks
import argparse
import os
import re
import shutil
import sys
import tarfile
import tempfile
import time
import zipfile
import random
from collections import namedtuple
from packaging.version import Version
import yaml
import numpy as np
import PIL.Image
import six
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import tensorflow as tf
# contrib ops are registered only when the module is imported, the following import statement is needed,
# otherwise tf runtime error will show up when the tf model is restored from pb file because of un-registered ops.
try:
import tensorflow.contrib.rnn # pylint: disable=unused-import
except: # pylint: disable=bare-except
# not needed for tf-2.0
pass
try:
import tensorflow_text # pylint: disable=unused-import
except Exception as err:
pass
from tf2onnx import tf_loader, logging, optimizer, utils, tf_utils, constants
from tf2onnx.tfonnx import process_tf_graph
from tf2onnx.tf_loader import tf_session, tf_reset_default_graph
from tf2onnx.graph import ExternalTensorStorage
from tfjs_runner import run_tfjs
logger = logging.getLogger("run_pretrained")
TEMP_DIR = os.path.join(utils.get_temp_directory(), "run_pretrained")
PERF_STEP = 10
PERF_TIME = 10
def get_img(shape, path, dtype, should_scale=True):
"""Get image as input."""
resize_to = shape[1:3]
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), path)
img = PIL.Image.open(path)
img = img.resize(resize_to, PIL.Image.LANCZOS)
img_np = np.array(img).astype(dtype)
img_np = np.stack([img_np] * shape[0], axis=0).reshape(shape)
if should_scale:
img_np = img_np / 255
return img_np
def get_beach(shape):
"""Get beach image as input."""
return get_img(shape, "beach.jpg", np.float32, should_scale=True)
def get_beach_uint8(shape):
"""Get beach image as uint8."""
return get_img(shape, "ade20k.jpg", np.uint8, should_scale=False)
def get_car(shape):
"""Get car image as input."""
return get_img(shape, "car.JPEG", np.float32, should_scale=True)
def get_ade20k(shape):
"""Get truck image from ade20k segmentation dataset."""
return get_img(shape, "ade20k.jpg", np.float32, should_scale=True)
def get_ade20k_uint8(shape):
"""Get truck image from ade20k segmentation dataset."""
return get_img(shape, "ade20k.jpg", np.uint8, should_scale=False)
def get_random(shape):
"""Get random input."""
np.random.seed(42)
return np.random.sample(shape).astype(np.float32)
def get_random256(shape):
"""Get random imput between 0 and 255."""
np.random.seed(42)
return np.round(np.random.sample(shape) * 256).astype(np.float32)
def get_ramp(shape):
"""Get ramp input."""
size = np.prod(shape)
return np.linspace(1, size, size).reshape(shape).astype(np.float32)
def get_ones(shape):
"""Get ones."""
return np.ones(shape).astype(np.float32)
def get_zeros(shape):
"""Get zeros."""
return np.zeros(shape).astype(np.float32)
def get_zeros_int32(shape):
"""Get zeros."""
return np.zeros(shape).astype(np.int32)
def get_zeros_int64(shape):
"""Get zeros."""
return np.zeros(shape).astype(np.int64)
def get_ones_int32(shape):
"""Get ones."""
return np.ones(shape).astype(np.int32)
def get_small_rand_int32(shape):
"""Get random ints in range [1, 99]"""
np.random.seed(42)
return np.random.randint(low=1, high=100, size=shape, dtype=np.int32)
def get_zeros_then_ones(shape):
"""Fill half the tensor with zeros and the rest with ones"""
cnt = np.prod(shape)
zeros_cnt = cnt // 2
ones_cnt = cnt - zeros_cnt
return np.concatenate((np.zeros(zeros_cnt, dtype=np.int32), np.ones(ones_cnt, dtype=np.int32))).reshape(shape)
def get_wav(shape):
"""Get sound data."""
return np.sin(np.linspace(-np.pi, np.pi, shape[0]), dtype=np.float32)
def get_sentences(shape):
"""Get sentences of shape"""
words = "the quick brown fox jumps over a lazy dog".split(' ')
random.seed(42)
def get_sentence():
length = random.randint(2, 7)
return ' '.join(random.choice(words) for _ in range(length))
return np.array([get_sentence() for _ in range(np.product(shape))]).reshape(shape)
_INPUT_FUNC_MAPPING = {
"get_beach": get_beach,
"get_beach_uint8": get_beach_uint8,
"get_car": get_car,
"get_ade20k": get_ade20k,
"get_ade20k_uint8": get_ade20k_uint8,
"get_random": get_random,
"get_random256": get_random256,
"get_ramp": get_ramp,
"get_ones": get_ones,
"get_zeros": get_zeros,
"get_wav": get_wav,
"get_zeros_int32": get_zeros_int32,
"get_zeros_int64": get_zeros_int64,
"get_ones_int32": get_ones_int32,
"get_small_rand_int32": get_small_rand_int32,
"get_zeros_then_ones": get_zeros_then_ones,
"get_sentences": get_sentences,
}
OpsetConstraint = namedtuple("OpsetConstraint", "domain, min_version, max_version, excluded_version")
class Test(object):
"""Main Test class."""
cache_dir = None
target = []
def __init__(self, url, local, input_func, input_names, output_names,
disabled=False, rtol=0.01, atol=1e-6, ptol=0, dequantize=False,
check_only_shape=False, model_type="frozen", force_input_shape=False,
skip_tensorflow=False, opset_constraints=None, tf_min_version=None, tag=None,
skip_conversion=False, converted_model=None, signature_def=None, concrete_function=None,
large_model=False, structured_outputs=None, run_tf_frozen=None, use_custom_ops=False,
ort_profile=None, tf_profile=None):
self.url = url
self.input_func = input_func
self.local = local
self.input_names = input_names
self.output_names = output_names
self.disabled = disabled
self.large_model = large_model
self.ort_profile = ort_profile
self.tf_profile = tf_profile
self.use_custom_ops = use_custom_ops
if run_tf_frozen is None:
run_tf_frozen = not self.large_model
self.run_tf_frozen = run_tf_frozen
self.structured_outputs = structured_outputs # Needed to determine output order for tf_function
self.rtol = rtol
self.atol = atol
self.ptol = ptol
self.dequantize = dequantize
self.check_only_shape = check_only_shape
self.perf = None
self.tf_runtime = 0
self.onnx_runtime = 0
self.model_type = model_type
self.tag = tag
self.force_input_shape = force_input_shape
self.skip_tensorflow = skip_tensorflow
self.skip_conversion = skip_conversion
self.converted_model = converted_model
self.opset_constraints = opset_constraints
self.tf_min_version = tf_min_version
self.signatures = [signature_def] if signature_def else None
self.concrete_function = concrete_function
def make_input(self, v):
"""Allows each input to specify its own function while defaulting to the input_get function"""
if isinstance(v, dict):
if "input_get" in v:
return _INPUT_FUNC_MAPPING[v["input_get"]](v["shape"])
if "value" in v:
return np.array(v["value"])
return self.input_func(v)
def download_model(self):
"""Download model from url."""
cache_dir = Test.cache_dir
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
url = self.url
if url.startswith(r'module://'):
return self.download_from_module()
k = url.rfind('/')
fname = self.url[k + 1:]
dir_name = fname + "_dir"
ftype = None
if url.endswith(".tar.gz") or url.endswith(".tgz"):
ftype = 'tgz'
dir_name = fname.replace(".tar.gz", "").replace(".tgz", "")
elif url.endswith('.zip'):
ftype = 'zip'
dir_name = fname.replace(".zip", "")
elif url.endswith('.tflite'):
ftype = 'tflite'
dir_name = fname.replace(".tflite", "")
elif self.model_type == 'tflite':
fname = self.local
dir_name = fname.replace(".tflite", "") + "_dir"
elif self.model_type == 'tfjs':
ftype = 'tgz'
fname = 'model.tar.gz'
dir_name = "_".join(url.split("/")[5:-3]) + "_dir"
dir_name = os.path.join(cache_dir, dir_name)
os.makedirs(dir_name, exist_ok=True)
fpath = os.path.join(dir_name, fname)
if not os.path.exists(fpath):
utils.get_url(url, fpath)
model_path = os.path.join(dir_name, self.local)
if not os.path.exists(model_path) or self.local == ".":
if ftype == 'tgz':
tar = tarfile.open(fpath)
tar.extractall(dir_name)
tar.close()
elif ftype == 'zip':
zip_ref = zipfile.ZipFile(fpath, 'r')
zip_ref.extractall(dir_name)
zip_ref.close()
return fpath, dir_name
def download_from_module(self):
"""Download a model from a python module"""
cache_dir = Test.cache_dir
from importlib import import_module
i = self.url.rfind('//')
module, model_name = self.url[i + 2:].split('/')
mod_object = import_module(module)
model_class = getattr(mod_object, model_name)
model = model_class()
fpath = os.path.join(cache_dir, self.local)
model.save(fpath)
return fpath, cache_dir
def run_tensorflow(self, sess, inputs):
"""Run model on tensorflow so we have a reference output."""
feed_dict = {}
for k, v in inputs.items():
k = sess.graph.get_tensor_by_name(k)
feed_dict[k] = v
logger.info("Running TF")
result = sess.run(self.output_names, feed_dict=feed_dict)
if self.perf:
logger.info("Running TF perf")
n = 0
start = time.time()
stop = start + PERF_TIME
while time.time() < stop:
for _ in range(PERF_STEP):
_ = sess.run(self.output_names, feed_dict=feed_dict)
n += PERF_STEP
self.tf_runtime = 1000 * (time.time() - start) / n
logger.info("TF perf {:.2f}ms/inference, n={}".format(self.tf_runtime, n))
return result
def to_onnx(self, tf_graph, opset=None, extra_opset=None, shape_override=None, input_names=None,
const_node_values=None, initialized_tables=None, tflite_path=None, tensors_to_rename=None,
tfjs_path=None):
"""Convert graph to tensorflow."""
if extra_opset is None:
extra_opset = []
if self.use_custom_ops:
extra_opset.append(utils.make_opsetid(constants.CONTRIB_OPS_DOMAIN, 1))
return process_tf_graph(tf_graph, continue_on_error=False, opset=opset,
extra_opset=extra_opset, target=Test.target, shape_override=shape_override,
input_names=input_names, output_names=self.output_names,
const_node_values=const_node_values, initialized_tables=initialized_tables,
tflite_path=tflite_path, dequantize=self.dequantize,
tensors_to_rename=tensors_to_rename, tfjs_path=tfjs_path)
def run_onnxruntime(self, name, model_proto, inputs, outputs, external_tensor_storage=None):
"""Run test against onnxruntime backend."""
import onnxruntime as rt
model_path = utils.save_onnx_model(TEMP_DIR, name, inputs, model_proto, include_test_data=True,
as_text=utils.is_debug_mode(),
external_tensor_storage=external_tensor_storage)
logger.info("Model saved to %s", model_path)
providers = ['CPUExecutionProvider']
if rt.get_device() == "GPU":
gpus = os.environ.get("CUDA_VISIBLE_DEVICES")
if gpus is None or len(gpus) > 1:
providers = ['CUDAExecutionProvider']
opt = rt.SessionOptions()
if self.use_custom_ops:
from onnxruntime_extensions import get_library_path
opt.register_custom_ops_library(get_library_path())
if self.ort_profile is not None:
opt.enable_profiling = True
m = rt.InferenceSession(model_path, sess_options=opt, providers=providers)
results = m.run(outputs, inputs)
if self.perf:
n = 0
start = time.time()
stop = start + PERF_TIME
while time.time() < stop:
for _ in range(PERF_STEP):
_ = m.run(outputs, inputs)
n += PERF_STEP
self.onnx_runtime = 1000 * (time.time() - start) / n
logger.info("ORT perf {:.2f}ms/inference, n={}".format(self.onnx_runtime, n))
if self.ort_profile is not None:
tmp_path = m.end_profiling()
shutil.move(tmp_path, self.ort_profile)
return results
@staticmethod
def create_onnx_file(name, model_proto, inputs, outdir, external_tensor_storage=None):
os.makedirs(outdir, exist_ok=True)
if external_tensor_storage is None:
model_path = os.path.join(outdir, name + ".onnx")
utils.save_protobuf(model_path, model_proto)
else:
model_path = os.path.join(outdir, name + ".zip")
utils.save_onnx_zip(model_path, model_proto, external_tensor_storage)
logger.info("Created %s", model_path)
def run_test(self, name, backend="onnxruntime", onnx_file=None, opset=None, extra_opset=None, perf=None):
"""Run complete test against backend."""
self.perf = perf
# get the model
if self.url:
_, dir_name = self.download_model()
logger.info("Downloaded to %s", dir_name)
model_path = os.path.join(dir_name, self.local) if self.local != "." else dir_name
else:
model_path = self.local
logger.info("Load model from %s", model_path)
tf_reset_default_graph()
tf.keras.backend.clear_session()
input_names = list(self.input_names.keys())
initialized_tables = {}
outputs = self.output_names
tflite_path = None
tfjs_path = None
to_rename = {}
if self.model_type in ["checkpoint"]:
graph_def, input_names, outputs = tf_loader.from_checkpoint(model_path, input_names, outputs)
elif self.model_type in ["saved_model"]:
loaded = tf_loader.from_saved_model(model_path, None, None, self.tag, self.signatures,
self.concrete_function, self.large_model,
return_concrete_func=not self.run_tf_frozen,
return_initialized_tables=True, return_tensors_to_rename=True)
if not self.run_tf_frozen:
# Must maintain ref to imported since concrete_func uses weak refs
# pylint: disable=unused-variable
graph_def, input_names, outputs, concrete_func, imported, initialized_tables, to_rename = loaded
else:
graph_def, input_names, outputs, initialized_tables, to_rename = loaded
elif self.model_type in ["keras"]:
graph_def, input_names, outputs = tf_loader.from_keras(model_path, input_names, outputs)
elif self.model_type in ["tflite"]:
tflite_path = model_path
graph_def = None
elif self.model_type in ["tfjs"]:
tfjs_path = model_path
graph_def = None
else:
graph_def, input_names, outputs = tf_loader.from_graphdef(model_path, input_names, outputs)
if utils.is_debug_mode():
utils.save_protobuf(os.path.join(TEMP_DIR, name + "_after_tf_optimize.pb"), graph_def)
if tflite_path is not None:
inputs = {}
for k in input_names:
v = self.input_names[k]
inputs[k] = self.make_input(v)
interpreter = tf.lite.Interpreter(tflite_path)
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
input_name_to_index = {n['name'].split(':')[0]: n['index'] for n in input_details}
for k, v in inputs.items():
interpreter.resize_tensor_input(input_name_to_index[k], v.shape)
interpreter.allocate_tensors()
def run_tflite():
for k, v in inputs.items():
interpreter.set_tensor(input_name_to_index[k], v)
interpreter.invoke()
result = [interpreter.get_tensor(output['index']) for output in output_details]
return result
if not self.skip_tensorflow:
tf_results = run_tflite()
if self.perf:
logger.info("Running TFLite perf")
n = 0
start = time.time()
stop = start + PERF_TIME
while time.time() < stop:
for _ in range(PERF_STEP):
_ = run_tflite()
n += PERF_STEP
self.tf_runtime = 1000 * (time.time() - start) / n
logger.info("TFLite perf {:.2f}ms/inference, n={}".format(self.tf_runtime, n))
logger.info("TFLite OK")
if tfjs_path is not None:
inputs = {}
for k in input_names:
v = self.input_names[k]
inputs[k] = self.make_input(v)
if not self.skip_tensorflow:
logger.info("Running TFJS")
tf_results = run_tfjs(tfjs_path, inputs, outputs)
logger.info("TFJS OK")
if not self.run_tf_frozen:
inputs = {}
for k in input_names:
v = self.input_names[k]
inputs[to_rename.get(k, k)] = tf.constant(self.make_input(v))
tf_func = tf.function(concrete_func)
logger.info("Running TF")
tf_results_d = tf_func(**inputs)
# If there is only a single output a dict might not be returned
if isinstance(tf_results_d, tf.Tensor):
tf_results = [tf_results_d]
else:
tf_results = [tf_results_d[k] for k in sorted(tf_results_d.keys())]
tf_results = [tf_res.numpy() for tf_res in tf_results]
if self.perf:
logger.info("Running TF perf")
n = 0
start = time.time()
stop = start + PERF_TIME
if self.tf_profile is not None:
tf.profiler.experimental.start(self.tf_profile)
while time.time() < stop:
for _ in range(PERF_STEP):
_ = concrete_func(**inputs)
n += PERF_STEP
if self.tf_profile is not None:
tf.profiler.experimental.stop()
self.tf_runtime = 1000 * (time.time() - start) / n
logger.info("TF perf {:.2f}ms/inference, n={}".format(self.tf_runtime, n))
logger.info("TensorFlow OK")
const_node_values = None
tf_graph = None
if graph_def is not None:
inputs = {}
tf_reset_default_graph()
with tf.Graph().as_default() as tf_graph:
from tf2onnx.tf_utils import compress_graph_def
if self.large_model:
const_node_values = compress_graph_def(graph_def)
tf.import_graph_def(graph_def, name='')
with tf_session(graph=tf_graph) as sess:
# create the input data
for k in input_names:
v = self.input_names[k]
t = sess.graph.get_tensor_by_name(k)
expected_dtype = tf.as_dtype(t.dtype).name
if isinstance(v, six.text_type) and v.startswith("np."):
np_value = eval(v) # pylint: disable=eval-used
if expected_dtype != np_value.dtype:
logger.warning("dtype mismatch for input %s: expected=%s, actual=%s", k, expected_dtype,
np_value.dtype)
inputs[k] = np_value.astype(expected_dtype)
else:
if expected_dtype == "string":
inputs[k] = self.make_input(v).astype(str).astype(object)
else:
inputs[k] = self.make_input(v).astype(expected_dtype)
# run the model with tensorflow
if self.skip_tensorflow:
logger.info("TensorFlow SKIPPED")
elif self.run_tf_frozen:
if self.tf_profile is not None:
tf.profiler.experimental.start(self.tf_profile)
tf_results = self.run_tensorflow(sess, inputs)
if self.tf_profile is not None:
tf.profiler.experimental.stop()
logger.info("TensorFlow OK")
tf_graph = sess.graph
model_proto = None
if self.skip_conversion:
if self.large_model:
external_tensor_storage = ExternalTensorStorage()
model_proto = utils.model_proto_from_zip(self.converted_model, external_tensor_storage)
else:
external_tensor_storage = None
model_proto = utils.model_proto_from_file(self.converted_model)
logger.info("ONNX loaded from file")
else:
try:
# convert model to onnx
if self.force_input_shape:
shape_override = {k: list(v.shape) for k, v in inputs.items()}
else:
shape_override = None
onnx_graph = self.to_onnx(tf_graph, opset=opset, extra_opset=extra_opset,
shape_override=shape_override, input_names=inputs.keys(),
const_node_values=const_node_values,
initialized_tables=initialized_tables, tflite_path=tflite_path,
tensors_to_rename=to_rename, tfjs_path=tfjs_path)
onnx_graph = optimizer.optimize_graph(onnx_graph)
print("ONNX", onnx_graph.dump_node_statistics())
external_tensor_storage = ExternalTensorStorage() if self.large_model else None
model_proto = onnx_graph.make_model("converted from tf2onnx",
external_tensor_storage=external_tensor_storage)
logger.info("To_ONNX, OK")
if onnx_file:
self.create_onnx_file(name, model_proto, inputs, onnx_file, external_tensor_storage)
if self.converted_model:
if self.large_model:
utils.save_onnx_zip(self.converted_model, model_proto, external_tensor_storage)
else:
utils.save_protobuf(self.converted_model, model_proto)
logger.info("Created %s", self.converted_model)
except Exception:
logger.error("To_ONNX FAIL", exc_info=1)
return False
try:
onnx_results = None
if backend == "onnxruntime":
struc_outputs = [to_rename.get(k, k) for k in self.output_names]
struc_inputs = {to_rename.get(k, k): v for k, v in inputs.items()}
onnx_results = self.run_onnxruntime(
name, model_proto, struc_inputs, struc_outputs, external_tensor_storage)
else:
raise ValueError("unknown backend")
logger.info("Run_ONNX OK")
try:
if self.skip_tensorflow:
logger.info("Results: skipped tensorflow")
else:
if self.check_only_shape:
for tf_res, onnx_res in zip(tf_results, onnx_results):
np.testing.assert_array_equal(tf_res.shape, onnx_res.shape)
else:
for tf_res, onnx_res in zip(tf_results, onnx_results):
good_cnt = np.count_nonzero(np.isclose(tf_res, onnx_res, rtol=self.rtol, atol=self.atol))
bad_cnt = tf_res.size - good_cnt
if bad_cnt > self.ptol * tf_res.size:
# Prints a nice error message with stats
np.testing.assert_allclose(tf_res, onnx_res, rtol=self.rtol, atol=self.atol)
logger.info("Results: OK")
return True
except Exception:
logger.error("Results", exc_info=1)
except Exception:
logger.error("Run_ONNX FAIL", exc_info=1)
return False
def check_opset_constraints(self, opset, extra_opset=None):
""" Return (condition, reason) tuple, condition is True if constraints are met. """
if not self.opset_constraints:
return True, None
opsets = {"onnx": opset}
if extra_opset:
for e in extra_opset:
opsets[e.domain] = e.version
for constraint in self.opset_constraints:
domain = constraint.domain
opset_version = opsets.get(domain)
if not opset_version:
return False, "conversion requires opset {}".format(domain)
if constraint.min_version and opset_version < constraint.min_version:
reason = "conversion requires opset {} >= {}".format(domain, constraint.min_version)
return False, reason
if constraint.max_version and opset_version > constraint.max_version:
reason = "conversion requires opset {} <= {}".format(domain, constraint.max_version)
return False, reason
if constraint.excluded_version:
if utils.is_list_or_tuple(constraint.excluded_version):
skip = opset_version in constraint.excluded_version
else:
skip = opset_version == constraint.excluded_version
if skip:
reason = "conversion requires opset {} != {}".format(domain, constraint.excluded_version)
return False, reason
return True, None
def get_args():
"""Parse commandline."""
parser = argparse.ArgumentParser()
parser.add_argument("--cache", default=os.path.join(tempfile.gettempdir(), 'pre-trained'),
help="pre-trained models cache dir")
parser.add_argument("--config", default="tests/run_pretrained_models.yaml", help="yaml config to use")
parser.add_argument("--tests", help="tests to run")
parser.add_argument("--target", default="", help="target platform")
parser.add_argument("--backend", default="onnxruntime",
choices=["onnxruntime"], help="backend to use")
parser.add_argument("--opset", type=int, default=None, help="opset to use")
parser.add_argument("--extra_opset", default=None,
help="extra opset with format like domain:version, e.g. com.microsoft:1")
parser.add_argument("--skip_tf_tests", help="skip non-tflite tests", default="False")
parser.add_argument("--skip_tflite_tests", help="skip tflite tests", default="False")
parser.add_argument("--skip_tfjs_tests", help="skip tfjs tests", default="False")
parser.add_argument("--verbose", "-v", help="verbose output, option is additive", action="count")
parser.add_argument("--debug", help="debug mode", action="store_true")
parser.add_argument("--list", help="list tests", action="store_true")
parser.add_argument("--onnx-file", help="create onnx file in directory")
parser.add_argument("--perf", help="capture performance numbers")
parser.add_argument("--include-disabled", help="include disabled tests", action="store_true")
args = parser.parse_args()
args.target = args.target.split(",")
args.skip_tf_tests = args.skip_tf_tests.upper() == "TRUE"
args.skip_tflite_tests = args.skip_tflite_tests.upper() == "TRUE"
args.skip_tfjs_tests = args.skip_tfjs_tests.upper() == "TRUE"
if args.extra_opset:
tokens = args.extra_opset.split(':')
if len(tokens) != 2:
raise ValueError("invalid extra_opset argument")
args.extra_opset = [utils.make_opsetid(tokens[0], int(tokens[1]))]
return args
def load_tests_from_yaml(path):
"""Create test class from yaml file."""
path = os.path.abspath(path)
base_dir = os.path.dirname(path)
tests = {}
config = yaml.safe_load(open(path, 'r').read())
for name, settings in config.items():
if name in tests:
raise ValueError("Found duplicated test: {}".format(name))
# parse model and url, non-absolute local path is relative to yaml directory
model = settings.get("model")
url = settings.get("url")
if not url and not os.path.isabs(model):
model = os.path.join(base_dir, model)
# parse input_get
input_func = settings.get("input_get")
input_func = _INPUT_FUNC_MAPPING[input_func]
# parse inputs, non-absolute npy file path for np.load is relative to yaml directory
inputs = settings.get("inputs")
for k, v in list(inputs.items()):
if isinstance(v, str):
# assume at most 1 match
matches = re.findall(r"np\.load\((r?['\"].*?['\"])", v)
if matches:
npy_path = matches[0].lstrip('r').strip("'").strip('"')
if not os.path.isabs(npy_path):
abs_npy_path = os.path.join(base_dir, npy_path)
inputs[k] = v.replace(matches[0], "r'{}'".format(abs_npy_path))
# parse opset_constraints
opset_constraints = []
section = settings.get("opset_constraints")
if section:
for k, v in section.items():
c = OpsetConstraint(k, min_version=v.get("min"), max_version=v.get("max"),
excluded_version=v.get("excluded"))
opset_constraints.append(c)
kwargs = {}
for kw in ["rtol", "atol", "ptol", "disabled", "check_only_shape", "model_type", "concrete_function",
"skip_tensorflow", "force_input_shape", "tf_min_version", "tag", "skip_conversion",
"converted_model", "signature_def", "large_model", "structured_outputs", "run_tf_frozen",
"use_custom_ops", "dequantize", "ort_profile", "tf_profile"]:
if settings.get(kw) is not None:
kwargs[kw] = settings[kw]
test = Test(url, model, input_func, inputs, settings.get("outputs"),
opset_constraints=opset_constraints, **kwargs)
tests[name] = test
return tests
def main():
args = get_args()
logging.basicConfig(level=logging.get_verbosity_level(args.verbose))
if args.debug:
utils.set_debug_mode(True)
Test.cache_dir = args.cache
Test.target = args.target
tests = load_tests_from_yaml(args.config)
if args.list:
logger.info(sorted(tests.keys()))
return 0
if args.tests:
test_keys = args.tests.split(",")
else:
test_keys = list(tests.keys())
failed = 0
count = 0
for test in test_keys:
logger.info("===================================")
t = tests[test]
if args.tests is None:
if t.disabled and not args.include_disabled:
logger.info("Skip %s: disabled", test)
continue
if args.skip_tfjs_tests and t.model_type == "tfjs":
logger.info("Skip %s: tfjs test", test)
continue
if args.skip_tflite_tests and t.model_type == "tflite":
logger.info("Skip %s: tflite test", test)
continue
if args.skip_tf_tests and t.model_type not in ["tflite", "tfjs"]:
logger.info("Skip %s: tf test", test)
continue
condition, reason = t.check_opset_constraints(args.opset, args.extra_opset)
if not condition:
logger.info("Skip %s: %s", test, reason)
continue
if t.tf_min_version:
if tf_utils.get_tf_version() < Version(str(t.tf_min_version)):
logger.info("Skip %s: %s %s", test, "Min TF version needed:", t.tf_min_version)
continue
count += 1
try:
logger.info("Running %s", test)
ret = t.run_test(test, backend=args.backend, onnx_file=args.onnx_file,
opset=args.opset, extra_opset=args.extra_opset, perf=args.perf)
except Exception:
logger.error("Failed to run %s", test, exc_info=1)
ret = None
finally:
if not utils.is_debug_mode():
utils.delete_directory(TEMP_DIR)
if not ret:
failed += 1
logger.info("===================================")
logger.info("RESULT: %s failed of %s, backend=%s", failed, count, args.backend)
if args.perf:
with open(args.perf, "w") as f:
f.write("test,tensorflow,onnx\n")
for test in test_keys:
t = tests[test]
if t.perf:
# Report perf in ms per inference
f.write("{},{},{}\n".format(test, t.tf_runtime, t.onnx_runtime))
return failed
if __name__ == "__main__":
sys.exit(main())
| 34,586 | 40.872881 | 117 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/yolov3/yolov3.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import inspect
import colorsys
import onnx
import numpy as np
import tensorflow as tf
import keras
from PIL import Image, ImageFont, ImageDraw
from keras import backend as K
from keras.layers import Input
from keras.models import load_model
from mock_keras2onnx import convert_keras
from mock_keras2onnx import set_converter
from mock_keras2onnx.proto import onnx_proto
from tf2onnx.keras2onnx_api import get_maximum_opset_supported
from onnxconverter_common.onnx_fx import Graph
from onnxconverter_common.onnx_fx import GraphFunctionType as _Ty
from os.path import dirname, abspath
yolo3_dir = os.path.join(os.path.dirname(__file__), '../../../keras-yolo3')
if os.path.exists(yolo3_dir):
sys.path.insert(0, yolo3_dir)
import yolo3
from yolo3.model import yolo_body, tiny_yolo_body, yolo_boxes_and_scores
from yolo3.utils import letterbox_image
class YOLOEvaluationLayer(keras.layers.Layer):
def __init__(self, **kwargs):
super(YOLOEvaluationLayer, self).__init__()
self.anchors = np.array(kwargs.get('anchors'))
self.num_classes = kwargs.get('num_classes')
def get_config(self):
config = {
"anchors": self.anchors,
"num_classes": self.num_classes,
}
return config
def call(self, inputs, **kwargs):
"""Evaluate YOLO model on given input and return filtered boxes."""
yolo_outputs = inputs[0:-1]
input_image_shape = K.squeeze(inputs[-1], axis=0)
num_layers = len(yolo_outputs)
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]] if num_layers == 3 else [[3, 4, 5],
[1, 2, 3]] # default setting
input_shape = K.shape(yolo_outputs[0])[1:3] * 32
boxes = []
box_scores = []
for l in range(num_layers):
_boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs[l], self.anchors[anchor_mask[l]], self.num_classes,
input_shape, input_image_shape)
boxes.append(_boxes)
box_scores.append(_box_scores)
boxes = K.concatenate(boxes, axis=0)
box_scores = K.concatenate(box_scores, axis=0)
return [boxes, box_scores]
def compute_output_shape(self, input_shape):
assert isinstance(input_shape, list)
return [(None, 4), (None, None)]
class YOLONMSLayer(keras.layers.Layer):
def __init__(self, **kwargs):
super(YOLONMSLayer, self).__init__()
self.max_boxes = kwargs.get('max_boxes', 20)
self.score_threshold = kwargs.get('score_threshold', .6)
self.iou_threshold = kwargs.get('iou_threshold', .5)
self.num_classes = kwargs.get('num_classes')
def get_config(self):
config = {
"max_boxes": self.max_boxes,
"score_threshold": self.score_threshold,
"iou_threshold": self.iou_threshold,
"num_classes": self.num_classes,
}
return config
def call(self, inputs, **kwargs):
boxes = inputs[0]
box_scores = inputs[1]
box_scores_transpose = tf.transpose(box_scores, perm=[1, 0])
boxes_number = tf.shape(boxes)[0]
box_range = tf.range(boxes_number)
mask = box_scores >= self.score_threshold
max_boxes_tensor = K.constant(self.max_boxes, dtype='int32')
classes_ = []
batch_indexs_ = []
nms_indexes_ = []
class_box_range_ = []
for c in range(self.num_classes):
class_boxes = tf.boolean_mask(boxes, mask[:, c])
class_box_scores = tf.boolean_mask(box_scores[:, c], mask[:, c])
class_box_range = tf.boolean_mask(box_range, mask[:, c])
nms_index = tf.image.non_max_suppression(
class_boxes, class_box_scores, max_boxes_tensor, iou_threshold=self.iou_threshold)
class_box_scores = K.gather(class_box_scores, nms_index)
class_box_range = K.gather(class_box_range, nms_index)
classes = K.ones_like(class_box_scores, 'int32') * c
batch_index = K.zeros_like(class_box_scores, 'int32')
batch_indexs_.append(batch_index)
classes_.append(classes)
nms_indexes_.append(nms_index)
class_box_range_.append(class_box_range)
classes_ = K.concatenate(classes_, axis=0)
batch_indexs_ = K.concatenate(batch_indexs_, axis=0)
class_box_range_ = K.concatenate(class_box_range_, axis=0)
boxes_1 = tf.expand_dims(boxes, 0)
classes_1 = tf.expand_dims(classes_, 1)
batch_indexs_ = tf.expand_dims(batch_indexs_, 1)
class_box_range_ = tf.expand_dims(class_box_range_, 1)
box_scores_transpose_1 = tf.expand_dims(box_scores_transpose, 0)
nms_final_ = K.concatenate([batch_indexs_, classes_1, class_box_range_], axis=1)
nms_final_1 = tf.expand_dims(nms_final_, 0)
return [boxes_1, box_scores_transpose_1, nms_final_1]
def compute_output_shape(self, input_shape):
assert isinstance(input_shape, list)
return [(None, None, 4), (None, self.num_classes, None), (None, None, 3)]
class YOLO(object):
def __init__(self, model_path='model_data/yolo.h5', anchors_path='model_data/yolo_anchors.txt', yolo3_dir=None):
self.yolo3_dir = yolo3_dir
self.model_path = model_path
self.anchors_path = anchors_path
self.classes_path = 'model_data/coco_classes.txt'
self.score = 0.3
self.iou = 0.45
self.class_names = self._get_class()
self.anchors = self._get_anchors()
self.sess = K.get_session()
self.model_image_size = (416, 416) # fixed size or (None, None), hw
self.session = None
self.final_model = None
# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / len(self.class_names), 1., 1.)
for x in range(len(self.class_names))]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
self.colors))
np.random.seed(10101) # Fixed seed for consistent colors across runs.
np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.
np.random.seed(None) # Reset seed to default.
K.set_learning_phase(0)
@staticmethod
def _get_data_path(name, yolo3_dir):
path = os.path.expanduser(name)
if not os.path.isabs(path):
if yolo3_dir is None:
yolo3_dir = os.path.dirname(inspect.getabsfile(yolo3))
path = os.path.join(yolo3_dir, os.path.pardir, path)
return path
def _get_class(self):
classes_path = self._get_data_path(self.classes_path, self.yolo3_dir)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def _get_anchors(self):
anchors_path = self._get_data_path(self.anchors_path, self.yolo3_dir)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def load_model(self, yolo_weights=None):
model_path = self._get_data_path(self.model_path, self.yolo3_dir)
assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'
if yolo_weights is None:
# Load model, or construct model and load weights.
num_anchors = len(self.anchors)
num_classes = len(self.class_names)
is_tiny_version = num_anchors == 6 # default setting
try:
self.yolo_model = load_model(model_path, compile=False)
except:
self.yolo_model = tiny_yolo_body(Input(shape=(None, None, 3)), num_anchors // 2, num_classes) \
if is_tiny_version else yolo_body(Input(shape=(None, None, 3)), num_anchors // 3, num_classes)
self.yolo_model.load_weights(self.model_path) # make sure model, anchors and classes match
else:
assert self.yolo_model.layers[-1].output_shape[-1] == \
num_anchors / len(self.yolo_model.output) * (num_classes + 5), \
'Mismatch between model and given anchor and class sizes'
else:
self.yolo_model = yolo_weights
input_image_shape = keras.Input(shape=(2,), name='image_shape')
image_input = keras.Input((None, None, 3), dtype='float32', name='input_1')
y = list(self.yolo_model(image_input))
y.append(input_image_shape)
if len(y) == 3:
evaluation_input = [keras.Input((None, None, 255), dtype='float32', name='conv2d_10'),
keras.Input((None, None, 255), dtype='float32', name='conv2d_13'),
keras.Input(shape=(2,), name='image_shape')
]
elif len(y) == 4:
evaluation_input = [keras.Input((None, None, 255), dtype='float32', name='conv2d_59'),
keras.Input((None, None, 255), dtype='float32', name='conv2d_67'),
keras.Input((None, None, 255), dtype='float32', name='conv2d_75'),
keras.Input(shape=(2,), name='image_shape')
]
boxes, box_scores = \
YOLOEvaluationLayer(anchors=self.anchors, num_classes=len(self.class_names))(inputs=evaluation_input)
self.evaluation_model = keras.Model(inputs=evaluation_input,
outputs=[boxes, box_scores])
nms_input = [keras.Input((4,), dtype='float32', name='concat_9'),
keras.Input((80,), dtype='float32', name='concat_10'),]
out_boxes, out_scores, out_indices = \
YOLONMSLayer(anchors=self.anchors, num_classes=len(self.class_names))(
inputs=nms_input)
self.nms_model = keras.Model(inputs=nms_input,
outputs=[out_boxes, out_scores, out_indices])
boxes, box_scores = \
YOLOEvaluationLayer(anchors=self.anchors, num_classes=len(self.class_names))(inputs=y)
out_boxes, out_scores, out_indices = \
YOLONMSLayer(anchors=self.anchors, num_classes=len(self.class_names))(
inputs = [boxes, box_scores])
self.final_model = keras.Model(inputs=[image_input, input_image_shape],
outputs = [out_boxes, out_scores, out_indices])
self.final_model.save('final_model.h5')
print('{} model, anchors, and classes loaded.'.format(model_path))
def prepare_keras_data(self, image):
if self.model_image_size != (None, None):
assert self.model_image_size[0] % 32 == 0, 'Multiples of 32 required'
assert self.model_image_size[1] % 32 == 0, 'Multiples of 32 required'
boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))
else:
new_image_size = (image.width - (image.width % 32),
image.height - (image.height % 32))
boxed_image = letterbox_image(image, new_image_size)
image_data = np.array(boxed_image, dtype='float32')
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
return image_data
def detect_with_onnx(self, image):
self.load_model()
image_data = self.prepare_keras_data(image)
all_boxes_k, all_scores_k, indices_k = self.final_model.predict([image_data, np.array([image.size[1], image.size[0]], dtype='float32').reshape(1, 2)])
image_data_onnx = np.transpose(image_data, [0, 3, 1, 2])
feed_f = dict(zip(['input_1', 'image_shape'],
(image_data_onnx, np.array([image.size[1], image.size[0]], dtype='float32').reshape(1, 2))))
all_boxes, all_scores, indices = self.session.run(None, input_feed=feed_f)
out_boxes, out_scores, out_classes = [], [], []
for idx_ in indices[0]:
out_classes.append(idx_[1])
out_scores.append(all_scores[tuple(idx_)])
idx_1 = (idx_[0], idx_[2])
out_boxes.append(all_boxes[idx_1])
font = ImageFont.truetype(font=self._get_data_path('font/FiraMono-Medium.otf', self.yolo3_dir),
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 300
for i, c in reversed(list(enumerate(out_classes))):
predicted_class = self.class_names[c]
box = out_boxes[i]
score = out_scores[i]
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
for i in range(thickness):
draw.rectangle(
[left + i, top + i, right - i, bottom - i],
outline=self.colors[c])
draw.rectangle(
[tuple(text_origin), tuple(text_origin + label_size)],
fill=self.colors[c])
draw.text(text_origin, label, fill=(0, 0, 0), font=font)
del draw
return image
def detect_img(yolo, img_url, model_file_name):
import onnxruntime
image = Image.open(img_url)
yolo.session = onnxruntime.InferenceSession(model_file_name)
r_image = yolo.detect_with_onnx(image)
n_ext = img_url.rindex('.')
score_file = img_url[0:n_ext] + '_score' + img_url[n_ext:]
r_image.save(score_file, "JPEG")
def convert_NMSLayer(scope, operator, container):
# type: (mock_keras2onnx.common.InterimContext, mock_keras2onnx.common.Operator, mock_keras2onnx.common.OnnxObjectContainer) -> None
pass
set_converter(YOLONMSLayer, convert_NMSLayer)
yolo_model_graph_tiny = None
evaluation_model_graph_tiny = None
nms_model_graph_tiny = None
@Graph.trace(
input_types=[_Ty.F(shape=['N', 3, 'M1', 'M2']), _Ty.F(shape=['N', 2])],
output_types=[_Ty.F(shape=[1, 'M1', 4]), _Ty.F(shape=[1, 80, 'M2']), _Ty.I32(shape=[1, 'M3', 3])],
outputs=["yolonms_layer_1", "yolonms_layer_1_1", "yolonms_layer_1_2"])
def combine_model_tiny(input_1, image_shape):
global yolo_model_graph_tiny
global evaluation_model_graph_tiny
global nms_model_graph_tiny
output_1 = yolo_model_graph_tiny(input_1)
input_2 = output_1 + (image_shape,)
yolo_evaluation_layer_1, yolo_evaluation_layer_2 = evaluation_model_graph_tiny(*input_2)
nms_layer_1_1, nms_layer_1_2, nms_layer_1_3 = nms_model_graph_tiny(yolo_evaluation_layer_1, yolo_evaluation_layer_2)
return nms_layer_1_1, nms_layer_1_2, nms_layer_1_3
yolo_model_graph = None
evaluation_model_graph = None
nms_model_graph = None
@Graph.trace(
input_types=[_Ty.F(shape=['N', 3, 'M1', 'M2']), _Ty.F(shape=['N', 2])],
output_types=[_Ty.F(shape=[1, 'M1', 4]), _Ty.F(shape=[1, 80, 'M2']), _Ty.I32(shape=[1, 'M3', 3])],
outputs=["yolonms_layer_1", "yolonms_layer_1_1", "yolonms_layer_1_2"])
def combine_model(input_1, image_shape):
global yolo_model_graph
global evaluation_model_graph
global nms_model_graph
output_1 = yolo_model_graph(input_1)
input_2 = output_1 + (image_shape,)
yolo_evaluation_layer_1, yolo_evaluation_layer_2 = evaluation_model_graph(*input_2)
nms_layer_1_1, nms_layer_1_2, nms_layer_1_3 = nms_model_graph(yolo_evaluation_layer_1, yolo_evaluation_layer_2)
return nms_layer_1_1, nms_layer_1_2, nms_layer_1_3
def convert_model(yolo, is_tiny_yolo, target_opset=None):
if target_opset is None:
target_opset = get_maximum_opset_supported()
onnxmodel_1 = convert_keras(yolo.yolo_model, target_opset=target_opset, channel_first_inputs=['input_1'])
onnxmodel_2 = convert_keras(yolo.evaluation_model, target_opset=target_opset)
onnxmodel_3 = convert_keras(yolo.nms_model, target_opset=target_opset)
Graph.opset = target_opset
if is_tiny_yolo:
global yolo_model_graph_tiny
global evaluation_model_graph_tiny
global nms_model_graph_tiny
yolo_model_graph_tiny = Graph.load(onnxmodel_1,
inputs=[input_.name for input_ in onnxmodel_1.graph.input]) # define the order of arguments
evaluation_model_graph_tiny = Graph.load(onnxmodel_2,
inputs=[input_.name for input_ in onnxmodel_2.graph.input],
outputs=[output_.name for output_ in onnxmodel_2.graph.output])
nms_model_graph_tiny = Graph.load(onnxmodel_3,
inputs=[input_.name for input_ in onnxmodel_3.graph.input],
outputs=[output_.name for output_ in onnxmodel_3.graph.output])
return combine_model_tiny.oxml
else:
global yolo_model_graph
global evaluation_model_graph
global nms_model_graph
yolo_model_graph = Graph.load(onnxmodel_1,
inputs=[input_.name for input_ in
onnxmodel_1.graph.input]) # define the order of arguments
evaluation_model_graph = Graph.load(onnxmodel_2,
inputs=[input_.name for input_ in onnxmodel_2.graph.input],
outputs=[output_.name for output_ in onnxmodel_2.graph.output])
nms_model_graph = Graph.load(onnxmodel_3,
inputs=[input_.name for input_ in onnxmodel_3.graph.input],
outputs=[output_.name for output_ in onnxmodel_3.graph.output])
return combine_model.oxml
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Need an image file for object detection.")
exit(-1)
target_opset = 10
model_file_name = 'model_data/yolov3.onnx'
model_path = 'model_data/yolo.h5' # model path or trained weights path
anchors_path = 'model_data/yolo_anchors.txt'
'''
# For tiny yolov3 case, use:
model_file_name = 'model_data/yolov3-tiny.onnx'
model_path = 'model_data/yolo-tiny.h5'
anchors_path = 'model_data/tiny_yolo_anchors.txt'
'''
if not os.path.exists(model_file_name):
yolo = YOLO(model_path, anchors_path)
yolo.load_model()
onnxmodel = convert_model(yolo, target_opset)
onnx.save_model(onnxmodel, model_file_name)
detect_img(YOLO(), sys.argv[1], model_file_name)
| 19,425 | 43.657471 | 158 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/lpcnet/convert_lpcnet_to_onnx.py | # SPDX-License-Identifier: Apache-2.0
import lpcnet
import sys
model, enc, dec = lpcnet.new_lpcnet_model(use_gpu=False)
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])
model_file = sys.argv[1]
model.load_weights(model_file)
import mock_keras2onnx
oxml_enc = mock_keras2onnx.convert_keras(enc, 'lpcnet_enc')
oxml_dec = mock_keras2onnx.convert_keras(dec, 'lpcnet_dec')
import onnx
onnx.save(oxml_enc, "lpcnet_enc.onnx")
onnx.save(oxml_dec, "lpcnet_dec.onnx")
| 526 | 28.277778 | 112 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/model_source/densenet_2/densenet_2.py | # SPDX-License-Identifier: Apache-2.0
# From https://github.com/tdeboissiere/DeepLearningImplementations/blob/master/DenseNet/densenet.py
# Modifications Copyright (c) Microsoft.
from mock_keras2onnx.proto import keras
from keras.models import Model
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import AveragePooling2D
from keras.layers.pooling import GlobalAveragePooling2D
from keras.layers import Input, Concatenate
from keras.layers.normalization import BatchNormalization
from keras.regularizers import l2
import keras.backend as K
def conv_factory(x, concat_axis, nb_filter,
dropout_rate=None, weight_decay=1E-4):
"""Apply BatchNorm, Relu 3x3Conv2D, optional dropout
:param x: Input keras network
:param concat_axis: int -- index of contatenate axis
:param nb_filter: int -- number of filters
:param dropout_rate: int -- dropout rate
:param weight_decay: int -- weight decay factor
:returns: keras network with b_norm, relu and Conv2D added
:rtype: keras network
"""
x = BatchNormalization(axis=concat_axis,
gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay))(x)
x = Activation('relu')(x)
x = Conv2D(nb_filter, (3, 3),
kernel_initializer="he_uniform",
padding="same",
use_bias=False,
kernel_regularizer=l2(weight_decay))(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
return x
def transition(x, concat_axis, nb_filter,
dropout_rate=None, weight_decay=1E-4):
"""Apply BatchNorm, Relu 1x1Conv2D, optional dropout and Maxpooling2D
:param x: keras model
:param concat_axis: int -- index of contatenate axis
:param nb_filter: int -- number of filters
:param dropout_rate: int -- dropout rate
:param weight_decay: int -- weight decay factor
:returns: model
:rtype: keras model, after applying batch_norm, relu-conv, dropout, maxpool
"""
x = BatchNormalization(axis=concat_axis,
gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay))(x)
x = Activation('relu')(x)
x = Conv2D(nb_filter, (1, 1),
kernel_initializer="he_uniform",
padding="same",
use_bias=False,
kernel_regularizer=l2(weight_decay))(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
x = AveragePooling2D((2, 2), strides=(2, 2))(x)
return x
def denseblock(x, concat_axis, nb_layers, nb_filter, growth_rate,
dropout_rate=None, weight_decay=1E-4):
"""Build a denseblock where the output of each
conv_factory is fed to subsequent ones
:param x: keras model
:param concat_axis: int -- index of contatenate axis
:param nb_layers: int -- the number of layers of conv_
factory to append to the model.
:param nb_filter: int -- number of filters
:param dropout_rate: int -- dropout rate
:param weight_decay: int -- weight decay factor
:returns: keras model with nb_layers of conv_factory appended
:rtype: keras model
"""
list_feat = [x]
for i in range(nb_layers):
x = conv_factory(x, concat_axis, growth_rate,
dropout_rate, weight_decay)
list_feat.append(x)
x = Concatenate(axis=concat_axis)(list_feat)
nb_filter += growth_rate
return x, nb_filter
def denseblock_altern(x, concat_axis, nb_layers, nb_filter, growth_rate,
dropout_rate=None, weight_decay=1E-4):
"""Build a denseblock where the output of each conv_factory
is fed to subsequent ones. (Alternative of a above)
:param x: keras model
:param concat_axis: int -- index of contatenate axis
:param nb_layers: int -- the number of layers of conv_
factory to append to the model.
:param nb_filter: int -- number of filters
:param dropout_rate: int -- dropout rate
:param weight_decay: int -- weight decay factor
:returns: keras model with nb_layers of conv_factory appended
:rtype: keras model
* The main difference between this implementation and the implementation
above is that the one above
"""
for i in range(nb_layers):
merge_tensor = conv_factory(x, concat_axis, growth_rate,
dropout_rate, weight_decay)
x = Concatenate(axis=concat_axis)([merge_tensor, x])
nb_filter += growth_rate
return x, nb_filter
def DenseNet(nb_classes, img_dim, depth, nb_dense_block, growth_rate,
nb_filter, dropout_rate=None, weight_decay=1E-4):
""" Build the DenseNet model
:param nb_classes: int -- number of classes
:param img_dim: tuple -- (channels, rows, columns)
:param depth: int -- how many layers
:param nb_dense_block: int -- number of dense blocks to add to end
:param growth_rate: int -- number of filters to add
:param nb_filter: int -- number of filters
:param dropout_rate: float -- dropout rate
:param weight_decay: float -- weight decay
:returns: keras model with nb_layers of conv_factory appended
:rtype: keras model
"""
if K.common.image_dim_ordering() == "th":
concat_axis = 1
elif K.common.image_dim_ordering() == "tf":
concat_axis = -1
model_input = Input(shape=img_dim)
assert (depth - 4) % 3 == 0, "Depth must be 3 N + 4"
# layers in each dense block
nb_layers = int((depth - 4) / 3)
# Initial convolution
x = Conv2D(nb_filter, (3, 3),
kernel_initializer="he_uniform",
padding="same",
name="initial_conv2D",
use_bias=False,
kernel_regularizer=l2(weight_decay))(model_input)
# Add dense blocks
for block_idx in range(nb_dense_block - 1):
x, nb_filter = denseblock(x, concat_axis, nb_layers,
nb_filter, growth_rate,
dropout_rate=dropout_rate,
weight_decay=weight_decay)
# add transition
x = transition(x, nb_filter, dropout_rate=dropout_rate,
weight_decay=weight_decay)
# The last denseblock does not have a transition
x, nb_filter = denseblock(x, concat_axis, nb_layers,
nb_filter, growth_rate,
dropout_rate=dropout_rate,
weight_decay=weight_decay)
x = BatchNormalization(axis=concat_axis,
gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay))(x)
x = Activation('relu')(x)
x = GlobalAveragePooling2D(data_format=K.image_data_format())(x)
x = Dense(nb_classes,
activation='softmax',
kernel_regularizer=l2(weight_decay),
bias_regularizer=l2(weight_decay))(x)
densenet = Model(inputs=[model_input], outputs=[x], name="DenseNet")
return densenet
| 7,208 | 34.338235 | 99 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/model_source/densenet_1/densenet_1.py | # SPDX-License-Identifier: Apache-2.0
# From https://github.com/titu1994/DenseNet/blob/master/densenet.py
# Modifications Copyright (c) Microsoft.
'''DenseNet models for Keras.
# Reference
- [Densely Connected Convolutional Networks](https://arxiv.org/pdf/1608.06993.pdf)
- [The One Hundred Layers Tiramisu: Fully Convolutional DenseNets for Semantic Segmentation](https://arxiv.org/pdf/1611.09326.pdf)
'''
import warnings
from mock_keras2onnx.proto import keras
from keras.models import Model
from keras.layers.core import Dense, Dropout, Activation, Reshape
from keras.layers.convolutional import Conv2D, Conv2DTranspose, UpSampling2D
from keras.layers.pooling import AveragePooling2D, MaxPooling2D
from keras.layers.pooling import GlobalAveragePooling2D
from keras.layers import Input
from keras.layers.merge import concatenate
from keras.layers.normalization import BatchNormalization
from keras.regularizers import l2
from keras.utils.layer_utils import convert_all_kernels_in_model, convert_dense_weights_data_format
from keras.utils.data_utils import get_file
from keras.engine.topology import get_source_inputs
from keras_applications.imagenet_utils import _obtain_input_shape
from keras.applications.imagenet_utils import decode_predictions
import keras.backend as K
from subpixel import SubPixelUpscaling
DENSENET_121_WEIGHTS_PATH = r'https://github.com/titu1994/DenseNet/releases/download/v3.0/DenseNet-BC-121-32.h5'
DENSENET_161_WEIGHTS_PATH = r'https://github.com/titu1994/DenseNet/releases/download/v3.0/DenseNet-BC-161-48.h5'
DENSENET_169_WEIGHTS_PATH = r'https://github.com/titu1994/DenseNet/releases/download/v3.0/DenseNet-BC-169-32.h5'
DENSENET_121_WEIGHTS_PATH_NO_TOP = r'https://github.com/titu1994/DenseNet/releases/download/v3.0/DenseNet-BC-121-32-no-top.h5'
DENSENET_161_WEIGHTS_PATH_NO_TOP = r'https://github.com/titu1994/DenseNet/releases/download/v3.0/DenseNet-BC-161-48-no-top.h5'
DENSENET_169_WEIGHTS_PATH_NO_TOP = r'https://github.com/titu1994/DenseNet/releases/download/v3.0/DenseNet-BC-169-32-no-top.h5'
def preprocess_input(x, data_format=None):
"""Preprocesses a tensor encoding a batch of images.
# Arguments
x: input Numpy tensor, 4D.
data_format: data format of the image tensor.
# Returns
Preprocessed tensor.
"""
if data_format is None:
data_format = K.image_data_format()
assert data_format in {'channels_last', 'channels_first'}
if data_format == 'channels_first':
if x.ndim == 3:
# 'RGB'->'BGR'
x = x[::-1, ...]
# Zero-center by mean pixel
x[0, :, :] -= 103.939
x[1, :, :] -= 116.779
x[2, :, :] -= 123.68
else:
x = x[:, ::-1, ...]
x[:, 0, :, :] -= 103.939
x[:, 1, :, :] -= 116.779
x[:, 2, :, :] -= 123.68
else:
# 'RGB'->'BGR'
x = x[..., ::-1]
# Zero-center by mean pixel
x[..., 0] -= 103.939
x[..., 1] -= 116.779
x[..., 2] -= 123.68
x *= 0.017 # scale values
return x
def DenseNet(input_shape=None, depth=40, nb_dense_block=3, growth_rate=12, nb_filter=-1, nb_layers_per_block=-1,
bottleneck=False, reduction=0.0, dropout_rate=0.0, weight_decay=1e-4, subsample_initial_block=False,
include_top=True, weights=None, input_tensor=None,
classes=10, activation='softmax'):
'''Instantiate the DenseNet architecture,
optionally loading weights pre-trained
on CIFAR-10. Note that when using TensorFlow,
for best performance you should set
`image_data_format='channels_last'` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The dimension ordering
convention used by the model is the one
specified in your Keras config file.
# Arguments
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(32, 32, 3)` (with `channels_last` dim ordering)
or `(3, 32, 32)` (with `channels_first` dim ordering).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 8.
E.g. `(200, 200, 3)` would be one valid value.
depth: number or layers in the DenseNet
nb_dense_block: number of dense blocks to add to end (generally = 3)
growth_rate: number of filters to add per dense block
nb_filter: initial number of filters. -1 indicates initial
number of filters is 2 * growth_rate
nb_layers_per_block: number of layers in each dense block.
Can be a -1, positive integer or a list.
If -1, calculates nb_layer_per_block from the network depth.
If positive integer, a set number of layers per dense block.
If list, nb_layer is used as provided. Note that list size must
be (nb_dense_block + 1)
bottleneck: flag to add bottleneck blocks in between dense blocks
reduction: reduction factor of transition blocks.
Note : reduction value is inverted to compute compression.
dropout_rate: dropout rate
weight_decay: weight decay rate
subsample_initial_block: Set to True to subsample the initial convolution and
add a MaxPool2D before the dense blocks are added.
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization) or
'imagenet' (pre-training on ImageNet)..
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
activation: Type of activation at the top layer. Can be one of 'softmax' or 'sigmoid'.
Note that if sigmoid is used, classes must be 1.
# Returns
A Keras model instance.
'''
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `cifar10` '
'(pre-training on CIFAR-10).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as ImageNet with `include_top`'
' as true, `classes` should be 1000')
if activation not in ['softmax', 'sigmoid']:
raise ValueError('activation must be one of "softmax" or "sigmoid"')
if activation == 'sigmoid' and classes != 1:
raise ValueError('sigmoid activation can only be used when classes = 1')
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=32,
min_size=8,
data_format=K.image_data_format(),
require_flatten=include_top)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = __create_dense_net(classes, img_input, include_top, depth, nb_dense_block,
growth_rate, nb_filter, nb_layers_per_block, bottleneck, reduction,
dropout_rate, weight_decay, subsample_initial_block, activation)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='densenet')
# load weights
if weights == 'imagenet':
weights_loaded = False
if (depth == 121) and (nb_dense_block == 4) and (growth_rate == 32) and (nb_filter == 64) and \
(bottleneck is True) and (reduction == 0.5) and (dropout_rate == 0.0) and (subsample_initial_block):
if include_top:
weights_path = get_file('DenseNet-BC-121-32.h5',
DENSENET_121_WEIGHTS_PATH,
cache_subdir='models',
md5_hash='a439dd41aa672aef6daba4ee1fd54abd')
else:
weights_path = get_file('DenseNet-BC-121-32-no-top.h5',
DENSENET_121_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='55e62a6358af8a0af0eedf399b5aea99')
model.load_weights(weights_path)
weights_loaded = True
if (depth == 161) and (nb_dense_block == 4) and (growth_rate == 48) and (nb_filter == 96) and \
(bottleneck is True) and (reduction == 0.5) and (dropout_rate == 0.0) and (subsample_initial_block):
if include_top:
weights_path = get_file('DenseNet-BC-161-48.h5',
DENSENET_161_WEIGHTS_PATH,
cache_subdir='models',
md5_hash='6c326cf4fbdb57d31eff04333a23fcca')
else:
weights_path = get_file('DenseNet-BC-161-48-no-top.h5',
DENSENET_161_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='1a9476b79f6b7673acaa2769e6427b92')
model.load_weights(weights_path)
weights_loaded = True
if (depth == 169) and (nb_dense_block == 4) and (growth_rate == 32) and (nb_filter == 64) and \
(bottleneck is True) and (reduction == 0.5) and (dropout_rate == 0.0) and (subsample_initial_block):
if include_top:
weights_path = get_file('DenseNet-BC-169-32.h5',
DENSENET_169_WEIGHTS_PATH,
cache_subdir='models',
md5_hash='914869c361303d2e39dec640b4e606a6')
else:
weights_path = get_file('DenseNet-BC-169-32-no-top.h5',
DENSENET_169_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='89c19e8276cfd10585d5fadc1df6859e')
model.load_weights(weights_path)
weights_loaded = True
if weights_loaded:
if K.backend() == 'theano':
convert_all_kernels_in_model(model)
if K.image_data_format() == 'channels_first' and K.backend() == 'tensorflow':
warnings.warn('You are using the TensorFlow backend, yet you '
'are using the Theano '
'image data format convention '
'(`image_data_format="channels_first"`). '
'For best performance, set '
'`image_data_format="channels_last"` in '
'your Keras config '
'at ~/.keras/keras.json.')
print("Weights for the model were loaded successfully")
return model
def DenseNetFCN(input_shape, nb_dense_block=5, growth_rate=16, nb_layers_per_block=4,
reduction=0.0, dropout_rate=0.0, weight_decay=1e-4, init_conv_filters=48,
include_top=True, weights=None, input_tensor=None, classes=1, activation='softmax',
upsampling_conv=128, upsampling_type='deconv'):
'''Instantiate the DenseNet FCN architecture.
Note that when using TensorFlow,
for best performance you should set
`image_data_format='channels_last'` in your Keras config
at ~/.keras/keras.json.
# Arguments
nb_dense_block: number of dense blocks to add to end (generally = 3)
growth_rate: number of filters to add per dense block
nb_layers_per_block: number of layers in each dense block.
Can be a positive integer or a list.
If positive integer, a set number of layers per dense block.
If list, nb_layer is used as provided. Note that list size must
be (nb_dense_block + 1)
reduction: reduction factor of transition blocks.
Note : reduction value is inverted to compute compression.
dropout_rate: dropout rate
init_conv_filters: number of layers in the initial convolution layer
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization) or
'cifar10' (pre-training on CIFAR-10)..
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(32, 32, 3)` (with `channels_last` dim ordering)
or `(3, 32, 32)` (with `channels_first` dim ordering).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 8.
E.g. `(200, 200, 3)` would be one valid value.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
activation: Type of activation at the top layer. Can be one of 'softmax' or 'sigmoid'.
Note that if sigmoid is used, classes must be 1.
upsampling_conv: number of convolutional layers in upsampling via subpixel convolution
upsampling_type: Can be one of 'upsampling', 'deconv' and
'subpixel'. Defines type of upsampling algorithm used.
batchsize: Fixed batch size. This is a temporary requirement for
computation of output shape in the case of Deconvolution2D layers.
Parameter will be removed in next iteration of Keras, which infers
output shape of deconvolution layers automatically.
# Returns
A Keras model instance.
'''
if weights not in {None}:
raise ValueError('The `weights` argument should be '
'`None` (random initialization) as no '
'model weights are provided.')
upsampling_type = upsampling_type.lower()
if upsampling_type not in ['upsampling', 'deconv', 'subpixel']:
raise ValueError('Parameter "upsampling_type" must be one of "upsampling", '
'"deconv" or "subpixel".')
if input_shape is None:
raise ValueError('For fully convolutional models, input shape must be supplied.')
if type(nb_layers_per_block) is not list and nb_dense_block < 1:
raise ValueError('Number of dense layers per block must be greater than 1. Argument '
'value was %d.' % (nb_layers_per_block))
if activation not in ['softmax', 'sigmoid']:
raise ValueError('activation must be one of "softmax" or "sigmoid"')
if activation == 'sigmoid' and classes != 1:
raise ValueError('sigmoid activation can only be used when classes = 1')
# Determine proper input shape
min_size = 2 ** nb_dense_block
if K.image_data_format() == 'channels_first':
if input_shape is not None:
if ((input_shape[1] is not None and input_shape[1] < min_size) or
(input_shape[2] is not None and input_shape[2] < min_size)):
raise ValueError('Input size must be at least ' +
str(min_size) + 'x' + str(min_size) + ', got '
'`input_shape=' + str(input_shape) + '`')
else:
input_shape = (classes, None, None)
else:
if input_shape is not None:
if ((input_shape[0] is not None and input_shape[0] < min_size) or
(input_shape[1] is not None and input_shape[1] < min_size)):
raise ValueError('Input size must be at least ' +
str(min_size) + 'x' + str(min_size) + ', got '
'`input_shape=' + str(input_shape) + '`')
else:
input_shape = (None, None, classes)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = __create_fcn_dense_net(classes, img_input, include_top, nb_dense_block,
growth_rate, reduction, dropout_rate, weight_decay,
nb_layers_per_block, upsampling_conv, upsampling_type,
init_conv_filters, input_shape, activation)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='fcn-densenet')
return model
def DenseNetImageNet121(input_shape=None,
bottleneck=True,
reduction=0.5,
dropout_rate=0.0,
weight_decay=1e-4,
include_top=True,
weights='imagenet',
input_tensor=None,
classes=1000,
activation='softmax'):
return DenseNet(input_shape, depth=121, nb_dense_block=4, growth_rate=32, nb_filter=64,
nb_layers_per_block=[6, 12, 24, 16], bottleneck=bottleneck, reduction=reduction,
dropout_rate=dropout_rate, weight_decay=weight_decay, subsample_initial_block=True,
include_top=include_top, weights=weights, input_tensor=input_tensor,
classes=classes, activation=activation)
def DenseNetImageNet169(input_shape=None,
bottleneck=True,
reduction=0.5,
dropout_rate=0.0,
weight_decay=1e-4,
include_top=True,
weights='imagenet',
input_tensor=None,
classes=1000,
activation='softmax'):
return DenseNet(input_shape, depth=169, nb_dense_block=4, growth_rate=32, nb_filter=64,
nb_layers_per_block=[6, 12, 32, 32], bottleneck=bottleneck, reduction=reduction,
dropout_rate=dropout_rate, weight_decay=weight_decay, subsample_initial_block=True,
include_top=include_top, weights=weights, input_tensor=input_tensor,
classes=classes, activation=activation)
def DenseNetImageNet201(input_shape=None,
bottleneck=True,
reduction=0.5,
dropout_rate=0.0,
weight_decay=1e-4,
include_top=True,
weights=None,
input_tensor=None,
classes=1000,
activation='softmax'):
return DenseNet(input_shape, depth=201, nb_dense_block=4, growth_rate=32, nb_filter=64,
nb_layers_per_block=[6, 12, 48, 32], bottleneck=bottleneck, reduction=reduction,
dropout_rate=dropout_rate, weight_decay=weight_decay, subsample_initial_block=True,
include_top=include_top, weights=weights, input_tensor=input_tensor,
classes=classes, activation=activation)
def DenseNetImageNet264(input_shape=None,
bottleneck=True,
reduction=0.5,
dropout_rate=0.0,
weight_decay=1e-4,
include_top=True,
weights=None,
input_tensor=None,
classes=1000,
activation='softmax'):
return DenseNet(input_shape, depth=201, nb_dense_block=4, growth_rate=32, nb_filter=64,
nb_layers_per_block=[6, 12, 64, 48], bottleneck=bottleneck, reduction=reduction,
dropout_rate=dropout_rate, weight_decay=weight_decay, subsample_initial_block=True,
include_top=include_top, weights=weights, input_tensor=input_tensor,
classes=classes, activation=activation)
def DenseNetImageNet161(input_shape=None,
bottleneck=True,
reduction=0.5,
dropout_rate=0.0,
weight_decay=1e-4,
include_top=True,
weights='imagenet',
input_tensor=None,
classes=1000,
activation='softmax'):
return DenseNet(input_shape, depth=161, nb_dense_block=4, growth_rate=48, nb_filter=96,
nb_layers_per_block=[6, 12, 36, 24], bottleneck=bottleneck, reduction=reduction,
dropout_rate=dropout_rate, weight_decay=weight_decay, subsample_initial_block=True,
include_top=include_top, weights=weights, input_tensor=input_tensor,
classes=classes, activation=activation)
def __conv_block(ip, nb_filter, bottleneck=False, dropout_rate=None, weight_decay=1e-4):
''' Apply BatchNorm, Relu, 3x3 Conv2D, optional bottleneck block and dropout
Args:
ip: Input keras tensor
nb_filter: number of filters
bottleneck: add bottleneck block
dropout_rate: dropout rate
weight_decay: weight decay factor
Returns: keras tensor with batch_norm, relu and convolution2d added (optional bottleneck)
'''
concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip)
x = Activation('relu')(x)
if bottleneck:
inter_channel = nb_filter * 4 # Obtained from https://github.com/liuzhuang13/DenseNet/blob/master/densenet.lua
x = Conv2D(inter_channel, (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False,
kernel_regularizer=l2(weight_decay))(x)
x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x)
x = Activation('relu')(x)
x = Conv2D(nb_filter, (3, 3), kernel_initializer='he_normal', padding='same', use_bias=False)(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
return x
def __dense_block(x, nb_layers, nb_filter, growth_rate, bottleneck=False, dropout_rate=None, weight_decay=1e-4,
grow_nb_filters=True, return_concat_list=False):
''' Build a dense_block where the output of each conv_block is fed to subsequent ones
Args:
x: keras tensor
nb_layers: the number of layers of conv_block to append to the model.
nb_filter: number of filters
growth_rate: growth rate
bottleneck: bottleneck block
dropout_rate: dropout rate
weight_decay: weight decay factor
grow_nb_filters: flag to decide to allow number of filters to grow
return_concat_list: return the list of feature maps along with the actual output
Returns: keras tensor with nb_layers of conv_block appended
'''
concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
x_list = [x]
for i in range(nb_layers):
cb = __conv_block(x, growth_rate, bottleneck, dropout_rate, weight_decay)
x_list.append(cb)
x = concatenate([x, cb], axis=concat_axis)
if grow_nb_filters:
nb_filter += growth_rate
if return_concat_list:
return x, nb_filter, x_list
else:
return x, nb_filter
def __transition_block(ip, nb_filter, compression=1.0, weight_decay=1e-4):
''' Apply BatchNorm, Relu 1x1, Conv2D, optional compression, dropout and Maxpooling2D
Args:
ip: keras tensor
nb_filter: number of filters
compression: calculated as 1 - reduction. Reduces the number of feature maps
in the transition block.
dropout_rate: dropout rate
weight_decay: weight decay factor
Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool
'''
concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip)
x = Activation('relu')(x)
x = Conv2D(int(nb_filter * compression), (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False,
kernel_regularizer=l2(weight_decay))(x)
x = AveragePooling2D((2, 2), strides=(2, 2))(x)
return x
def __transition_up_block(ip, nb_filters, type='deconv', weight_decay=1E-4):
''' SubpixelConvolutional Upscaling (factor = 2)
Args:
ip: keras tensor
nb_filters: number of layers
type: can be 'upsampling', 'subpixel', 'deconv'. Determines type of upsampling performed
weight_decay: weight decay factor
Returns: keras tensor, after applying upsampling operation.
'''
if type == 'upsampling':
x = UpSampling2D()(ip)
elif type == 'subpixel':
x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay),
use_bias=False, kernel_initializer='he_normal')(ip)
x = SubPixelUpscaling(scale_factor=2)(x)
x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay),
use_bias=False, kernel_initializer='he_normal')(x)
else:
x = Conv2DTranspose(nb_filters, (3, 3), activation='relu', padding='same', strides=(2, 2),
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(ip)
return x
def __create_dense_net(nb_classes, img_input, include_top, depth=40, nb_dense_block=3, growth_rate=12, nb_filter=-1,
nb_layers_per_block=-1, bottleneck=False, reduction=0.0, dropout_rate=None, weight_decay=1e-4,
subsample_initial_block=False, activation='softmax'):
''' Build the DenseNet model
Args:
nb_classes: number of classes
img_input: tuple of shape (channels, rows, columns) or (rows, columns, channels)
include_top: flag to include the final Dense layer
depth: number or layers
nb_dense_block: number of dense blocks to add to end (generally = 3)
growth_rate: number of filters to add per dense block
nb_filter: initial number of filters. Default -1 indicates initial number of filters is 2 * growth_rate
nb_layers_per_block: number of layers in each dense block.
Can be a -1, positive integer or a list.
If -1, calculates nb_layer_per_block from the depth of the network.
If positive integer, a set number of layers per dense block.
If list, nb_layer is used as provided. Note that list size must
be (nb_dense_block + 1)
bottleneck: add bottleneck blocks
reduction: reduction factor of transition blocks. Note : reduction value is inverted to compute compression
dropout_rate: dropout rate
weight_decay: weight decay rate
subsample_initial_block: Set to True to subsample the initial convolution and
add a MaxPool2D before the dense blocks are added.
subsample_initial:
activation: Type of activation at the top layer. Can be one of 'softmax' or 'sigmoid'.
Note that if sigmoid is used, classes must be 1.
Returns: keras tensor with nb_layers of conv_block appended
'''
concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
if reduction != 0.0:
assert reduction <= 1.0 and reduction > 0.0, 'reduction value must lie between 0.0 and 1.0'
# layers in each dense block
if type(nb_layers_per_block) is list or type(nb_layers_per_block) is tuple:
nb_layers = list(nb_layers_per_block) # Convert tuple to list
assert len(nb_layers) == (nb_dense_block), 'If list, nb_layer is used as provided. ' \
'Note that list size must be (nb_dense_block)'
final_nb_layer = nb_layers[-1]
nb_layers = nb_layers[:-1]
else:
if nb_layers_per_block == -1:
assert (depth - 4) % 3 == 0, 'Depth must be 3 N + 4 if nb_layers_per_block == -1'
count = int((depth - 4) / 3)
if bottleneck:
count = count // 2
nb_layers = [count for _ in range(nb_dense_block)]
final_nb_layer = count
else:
final_nb_layer = nb_layers_per_block
nb_layers = [nb_layers_per_block] * nb_dense_block
# compute initial nb_filter if -1, else accept users initial nb_filter
if nb_filter <= 0:
nb_filter = 2 * growth_rate
# compute compression factor
compression = 1.0 - reduction
# Initial convolution
if subsample_initial_block:
initial_kernel = (7, 7)
initial_strides = (2, 2)
else:
initial_kernel = (3, 3)
initial_strides = (1, 1)
x = Conv2D(nb_filter, initial_kernel, kernel_initializer='he_normal', padding='same',
strides=initial_strides, use_bias=False, kernel_regularizer=l2(weight_decay))(img_input)
if subsample_initial_block:
x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
# Add dense blocks
for block_idx in range(nb_dense_block - 1):
x, nb_filter = __dense_block(x, nb_layers[block_idx], nb_filter, growth_rate, bottleneck=bottleneck,
dropout_rate=dropout_rate, weight_decay=weight_decay)
# add transition_block
x = __transition_block(x, nb_filter, compression=compression, weight_decay=weight_decay)
nb_filter = int(nb_filter * compression)
# The last dense_block does not have a transition_block
x, nb_filter = __dense_block(x, final_nb_layer, nb_filter, growth_rate, bottleneck=bottleneck,
dropout_rate=dropout_rate, weight_decay=weight_decay)
x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x)
x = Activation('relu')(x)
x = GlobalAveragePooling2D()(x)
if include_top:
x = Dense(nb_classes, activation=activation)(x)
return x
def __create_fcn_dense_net(nb_classes, img_input, include_top, nb_dense_block=5, growth_rate=12,
reduction=0.0, dropout_rate=None, weight_decay=1e-4,
nb_layers_per_block=4, nb_upsampling_conv=128, upsampling_type='upsampling',
init_conv_filters=48, input_shape=None, activation='deconv'):
''' Build the DenseNet model
Args:
nb_classes: number of classes
img_input: tuple of shape (channels, rows, columns) or (rows, columns, channels)
include_top: flag to include the final Dense layer
nb_dense_block: number of dense blocks to add to end (generally = 3)
growth_rate: number of filters to add per dense block
reduction: reduction factor of transition blocks. Note : reduction value is inverted to compute compression
dropout_rate: dropout rate
weight_decay: weight decay
nb_layers_per_block: number of layers in each dense block.
Can be a positive integer or a list.
If positive integer, a set number of layers per dense block.
If list, nb_layer is used as provided. Note that list size must
be (nb_dense_block + 1)
nb_upsampling_conv: number of convolutional layers in upsampling via subpixel convolution
upsampling_type: Can be one of 'upsampling', 'deconv' and 'subpixel'. Defines
type of upsampling algorithm used.
input_shape: Only used for shape inference in fully convolutional networks.
activation: Type of activation at the top layer. Can be one of 'softmax' or 'sigmoid'.
Note that if sigmoid is used, classes must be 1.
Returns: keras tensor with nb_layers of conv_block appended
'''
concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
if concat_axis == 1: # channels_first dim ordering
_, rows, cols = input_shape
else:
rows, cols, _ = input_shape
if reduction != 0.0:
assert reduction <= 1.0 and reduction > 0.0, 'reduction value must lie between 0.0 and 1.0'
# check if upsampling_conv has minimum number of filters
# minimum is set to 12, as at least 3 color channels are needed for correct upsampling
assert nb_upsampling_conv > 12 and nb_upsampling_conv % 4 == 0, 'Parameter `upsampling_conv` number of channels must ' \
'be a positive number divisible by 4 and greater ' \
'than 12'
# layers in each dense block
if type(nb_layers_per_block) is list or type(nb_layers_per_block) is tuple:
nb_layers = list(nb_layers_per_block) # Convert tuple to list
assert len(nb_layers) == (nb_dense_block + 1), 'If list, nb_layer is used as provided. ' \
'Note that list size must be (nb_dense_block + 1)'
bottleneck_nb_layers = nb_layers[-1]
rev_layers = nb_layers[::-1]
nb_layers.extend(rev_layers[1:])
else:
bottleneck_nb_layers = nb_layers_per_block
nb_layers = [nb_layers_per_block] * (2 * nb_dense_block + 1)
# compute compression factor
compression = 1.0 - reduction
# Initial convolution
x = Conv2D(init_conv_filters, (7, 7), kernel_initializer='he_normal', padding='same', name='initial_conv2D',
use_bias=False, kernel_regularizer=l2(weight_decay))(img_input)
x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x)
x = Activation('relu')(x)
nb_filter = init_conv_filters
skip_list = []
# Add dense blocks and transition down block
for block_idx in range(nb_dense_block):
x, nb_filter = __dense_block(x, nb_layers[block_idx], nb_filter, growth_rate, dropout_rate=dropout_rate,
weight_decay=weight_decay)
# Skip connection
skip_list.append(x)
# add transition_block
x = __transition_block(x, nb_filter, compression=compression, weight_decay=weight_decay)
nb_filter = int(nb_filter * compression) # this is calculated inside transition_down_block
# The last dense_block does not have a transition_down_block
# return the concatenated feature maps without the concatenation of the input
_, nb_filter, concat_list = __dense_block(x, bottleneck_nb_layers, nb_filter, growth_rate,
dropout_rate=dropout_rate, weight_decay=weight_decay,
return_concat_list=True)
skip_list = skip_list[::-1] # reverse the skip list
# Add dense blocks and transition up block
for block_idx in range(nb_dense_block):
n_filters_keep = growth_rate * nb_layers[nb_dense_block + block_idx]
# upsampling block must upsample only the feature maps (concat_list[1:]),
# not the concatenation of the input with the feature maps (concat_list[0].
l = concatenate(concat_list[1:], axis=concat_axis)
t = __transition_up_block(l, nb_filters=n_filters_keep, type=upsampling_type, weight_decay=weight_decay)
# concatenate the skip connection with the transition block
x = concatenate([t, skip_list[block_idx]], axis=concat_axis)
# Dont allow the feature map size to grow in upsampling dense blocks
x_up, nb_filter, concat_list = __dense_block(x, nb_layers[nb_dense_block + block_idx + 1], nb_filter=growth_rate,
growth_rate=growth_rate, dropout_rate=dropout_rate,
weight_decay=weight_decay, return_concat_list=True,
grow_nb_filters=False)
if include_top:
x = Conv2D(nb_classes, (1, 1), activation='linear', padding='same', use_bias=False)(x_up)
if K.image_data_format() == 'channels_first':
channel, row, col = input_shape
else:
row, col, channel = input_shape
x = Reshape((row * col, nb_classes))(x)
x = Activation(activation)(x)
x = Reshape((row, col, nb_classes))(x)
else:
x = x_up
return x
if __name__ == '__main__':
from keras.utils.vis_utils import plot_model
#model = DenseNetFCN((32, 32, 3), growth_rate=16, nb_layers_per_block=[4, 5, 7, 10, 12, 15], upsampling_type='deconv')
model = DenseNet((32, 32, 3), depth=100, nb_dense_block=3,
growth_rate=12, bottleneck=True, reduction=0.5, weights=None)
model.summary()
from keras.callbacks import ModelCheckpoint, TensorBoard
#plot_model(model, 'test.png', show_shapes=True)
| 38,501 | 46.828571 | 130 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/model_source/densenet_1/tensorflow_backend.py | # SPDX-License-Identifier: Apache-2.0
# From https://github.com/titu1994/DenseNet/blob/master/tensorflow_backend.py
# Modifications Copyright (c) Microsoft.
import tensorflow as tf
from mock_keras2onnx.proto import keras
from keras.backend import tensorflow_backend as KTF
from keras.backend.common import image_data_format
py_all = all
def depth_to_space(input, scale, data_format=None):
''' Uses phase shift algorithm to convert channels/depth for spatial resolution '''
if data_format is None:
data_format = image_data_format()
if data_format == 'channels_first':
data_format = 'NCHW'
else:
data_format = 'NHWC'
data_format = data_format.lower()
out = tf.depth_to_space(input, scale, data_format=data_format)
return out
| 782 | 28 | 87 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/model_source/densenet_1/subpixel.py | # SPDX-License-Identifier: Apache-2.0
# From https://github.com/titu1994/DenseNet/blob/master/subpixel.py
# Modifications Copyright (c) Microsoft.
from mock_keras2onnx.proto import keras
from keras import backend as K
from keras.engine import Layer
from keras.utils.generic_utils import get_custom_objects
from keras.backend import normalize_data_format
if K.backend() == 'theano':
import theano_backend as K_BACKEND
else:
import tensorflow_backend as K_BACKEND
class SubPixelUpscaling(Layer):
""" Sub-pixel convolutional upscaling layer based on the paper "Real-Time Single Image
and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network"
(https://arxiv.org/abs/1609.05158).
This layer requires a Convolution2D prior to it, having output filters computed according to
the formula :
filters = k * (scale_factor * scale_factor)
where k = a user defined number of filters (generally larger than 32)
scale_factor = the upscaling factor (generally 2)
This layer performs the depth to space operation on the convolution filters, and returns a
tensor with the size as defined below.
# Example :
```python
# A standard subpixel upscaling block
x = Convolution2D(256, 3, 3, padding='same', activation='relu')(...)
u = SubPixelUpscaling(scale_factor=2)(x)
[Optional]
x = Convolution2D(256, 3, 3, padding='same', activation='relu')(u)
```
In practice, it is useful to have a second convolution layer after the
SubPixelUpscaling layer to speed up the learning process.
However, if you are stacking multiple SubPixelUpscaling blocks, it may increase
the number of parameters greatly, so the Convolution layer after SubPixelUpscaling
layer can be removed.
# Arguments
scale_factor: Upscaling factor.
data_format: Can be None, 'channels_first' or 'channels_last'.
# Input shape
4D tensor with shape:
`(samples, k * (scale_factor * scale_factor) channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, k * (scale_factor * scale_factor) channels)` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(samples, k channels, rows * scale_factor, cols * scale_factor))` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows * scale_factor, cols * scale_factor, k channels)` if data_format='channels_last'.
"""
def __init__(self, scale_factor=2, data_format=None, **kwargs):
super(SubPixelUpscaling, self).__init__(**kwargs)
self.scale_factor = scale_factor
self.data_format = normalize_data_format(data_format)
def build(self, input_shape):
pass
def call(self, x, mask=None):
y = K_BACKEND.depth_to_space(x, self.scale_factor, self.data_format)
return y
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
b, k, r, c = input_shape
return (b, k // (self.scale_factor ** 2), r * self.scale_factor, c * self.scale_factor)
else:
b, r, c, k = input_shape
return (b, r * self.scale_factor, c * self.scale_factor, k // (self.scale_factor ** 2))
def get_config(self):
config = {'scale_factor': self.scale_factor,
'data_format': self.data_format}
base_config = super(SubPixelUpscaling, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
get_custom_objects().update({'SubPixelUpscaling': SubPixelUpscaling})
| 3,693 | 42.458824 | 107 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/mask_rcnn/mask_rcnn.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import numpy as np
import skimage
import onnx
import mock_keras2onnx
from mrcnn.config import Config
from mrcnn.model import BatchNorm, DetectionLayer
from mrcnn import model as modellib
from mrcnn import visualize
from mock_keras2onnx import set_converter
from mock_keras2onnx.ke2onnx.batch_norm import convert_keras_batch_normalization
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import convert_tf_crop_and_resize
ROOT_DIR = os.path.abspath("./")
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
class CocoConfig(Config):
"""Configuration for training on MS COCO.
Derives from the base Config class and overrides values specific
to the COCO dataset.
"""
# Give the configuration a recognizable name
NAME = "coco"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 2
# Uncomment to train on 8 GPUs (default is 1)
# GPU_COUNT = 8
# Number of classes (including background)
NUM_CLASSES = 1 + 80 # COCO has 80 classes
class InferenceConfig(CocoConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
config.display()
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
# Load weights trained on MS-COCO
model.load_weights(COCO_MODEL_PATH, by_name=True)
def convert_BatchNorm(scope, operator, container):
convert_keras_batch_normalization(scope, operator, container)
def norm_boxes_graph(scope, operator, container, oopb, image_meta):
image_shapes = oopb.add_node('Slice',
[image_meta,
('_start', oopb.int64, np.array([4], dtype='int64')),
('_end', oopb.int64, np.array([7], dtype='int64')),
('_axes', oopb.int64, np.array([1], dtype='int64'))
],
operator.inputs[0].full_name + '_image_shapes')
image_shape = oopb.add_node('Slice',
[image_shapes,
('_start', oopb.int64, np.array([0], dtype='int64')),
('_end', oopb.int64, np.array([1], dtype='int64')),
('_axes', oopb.int64, np.array([0], dtype='int64'))
],
operator.inputs[0].full_name + '_image_shape')
image_shape_squeeze = oopb.apply_squeeze(image_shape, name=operator.full_name + '_image_shape_squeeze', axes=[0])[0]
window = oopb.add_node('Slice',
[image_meta,
('_start', oopb.int64, np.array([7], dtype='int64')),
('_end', oopb.int64, np.array([11], dtype='int64')),
('_axes', oopb.int64, np.array([1], dtype='int64'))
],
operator.inputs[0].full_name + '_window')
h_norm = oopb.add_node('Slice',
[image_shape_squeeze,
('_start', oopb.int64, np.array([0], dtype='int64')),
('_end', oopb.int64, np.array([1], dtype='int64')),
('_axes', oopb.int64, np.array([0], dtype='int64'))
],
operator.inputs[0].full_name + '_h_norm')
w_norm = oopb.add_node('Slice',
[image_shape_squeeze,
('_start', oopb.int64, np.array([1], dtype='int64')),
('_end', oopb.int64, np.array([2], dtype='int64')),
('_axes', oopb.int64, np.array([0], dtype='int64'))
],
operator.inputs[0].full_name + '_w_norm')
h_norm_float = scope.get_unique_variable_name('h_norm_float')
attrs = {'to': 1}
container.add_node('Cast', h_norm, h_norm_float, op_version=operator.target_opset,
**attrs)
w_norm_float = scope.get_unique_variable_name('w_norm_float')
attrs = {'to': 1}
container.add_node('Cast', w_norm, w_norm_float, op_version=operator.target_opset,
**attrs)
hw_concat = scope.get_unique_variable_name(operator.inputs[0].full_name + '_hw_concat')
attrs = {'axis': -1}
container.add_node("Concat",
[h_norm_float, w_norm_float, h_norm_float, w_norm_float],
hw_concat,
op_version=operator.target_opset,
name=operator.inputs[0].full_name + '_hw_concat', **attrs)
scale = oopb.add_node('Sub',
[hw_concat,
('_sub', oopb.float, np.array([1.0], dtype='float32'))
],
operator.inputs[0].full_name + '_scale')
boxes_shift = oopb.add_node('Sub',
[window,
('_sub', oopb.float, np.array([0.0, 0.0, 1.0, 1.0], dtype='float32'))
],
operator.inputs[0].full_name + '_boxes_shift')
divide = oopb.add_node('Div',
[boxes_shift, scale],
operator.inputs[0].full_name + '_divide')
# output shape: [batch, 4]
return divide
def convert_DetectionLayer(scope, operator, container):
# type: (mock_keras2onnx.common.InterimContext, mock_keras2onnx.common.Operator, mock_keras2onnx.common.OnnxObjectContainer) -> None
pass
set_converter(DetectionLayer, convert_DetectionLayer)
set_converter(BatchNorm, convert_BatchNorm)
# Run detection
class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
'teddy bear', 'hair drier', 'toothbrush']
def generate_image(images, molded_images, windows, results):
results_final = []
for i, image in enumerate(images):
final_rois, final_class_ids, final_scores, final_masks = \
model.unmold_detections(results[0][i], results[3][i], # detections[i], mrcnn_mask[i]
image.shape, molded_images[i].shape,
windows[i])
results_final.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
r = results_final[i]
visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
class_names, r['scores'])
return results_final
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Need an image file for object detection.")
exit(-1)
model_file_name = './mrcnn.onnx'
if not os.path.exists(model_file_name):
# use opset 11 or later
set_converter('CropAndResize', convert_tf_crop_and_resize)
oml = mock_keras2onnx.convert_keras(model.keras_model, target_opset=11)
onnx.save_model(oml, model_file_name)
# run with ONNXRuntime
import onnxruntime
filename = sys.argv[1]
image = skimage.io.imread(filename)
images = [image]
sess = onnxruntime.InferenceSession(model_file_name)
# preprocessing
molded_images, image_metas, windows = model.mold_inputs(images)
anchors = model.get_anchors(molded_images[0].shape)
anchors = np.broadcast_to(anchors, (model.config.BATCH_SIZE,) + anchors.shape)
results = \
sess.run(None, {"input_image": molded_images.astype(np.float32),
"input_anchors": anchors,
"input_image_meta": image_metas.astype(np.float32)})
# postprocessing
results_final = generate_image(images, molded_images, windows, results)
| 9,141 | 40.93578 | 136 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_wavenet.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import numpy as np
from mock_keras2onnx.proto import keras
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_keras_and_ort, test_level_0
K = keras.backend
Activation = keras.layers.Activation
AtrousConvolution1D = keras.layers.AtrousConvolution1D
AveragePooling2D = keras.layers.AveragePooling2D
Add = keras.layers.Add
BatchNormalization = keras.layers.BatchNormalization
concatenate = keras.layers.concatenate
Convolution1D = keras.layers.Convolution1D
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
GlobalAveragePooling2D = keras.layers.GlobalAveragePooling2D
Input = keras.layers.Input
Lambda = keras.layers.Lambda
LeakyReLU = keras.layers.LeakyReLU
MaxPooling2D = keras.layers.MaxPooling2D
multiply = keras.layers.multiply
Permute = keras.layers.Permute
Reshape = keras.layers.Reshape
SeparableConv2D = keras.layers.SeparableConv2D
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
def wavenetBlock(n_atrous_filters, atrous_filter_size, atrous_rate):
def f(input_):
residual = input_
tanh_out = AtrousConvolution1D(n_atrous_filters, atrous_filter_size,
atrous_rate=atrous_rate,
border_mode='same',
activation='tanh')(input_)
sigmoid_out = AtrousConvolution1D(n_atrous_filters, atrous_filter_size,
atrous_rate=atrous_rate,
border_mode='same',
activation='sigmoid')(input_)
merged = keras.layers.Multiply()([tanh_out, sigmoid_out])
skip_out = Convolution1D(1, 1, activation='relu', border_mode='same')(merged)
out = keras.layers.Add()([skip_out, residual])
return out, skip_out
return f
def get_basic_generative_model(input_size):
input_ = Input(shape=(input_size, 1))
A, B = wavenetBlock(64, 2, 2)(input_)
skip_connections = [B]
for i in range(20):
A, B = wavenetBlock(64, 2, 2**((i+2)%9))(A)
skip_connections.append(B)
net = keras.layers.Add()(skip_connections)
net = Activation('relu')(net)
net = Convolution1D(1, 1, activation='relu')(net)
net = Convolution1D(1, 1)(net)
net = Flatten()(net)
net = Dense(input_size, activation='softmax')(net)
model = Model(input=input_, output=net)
model.compile(loss='categorical_crossentropy', optimizer='sgd',
metrics=['accuracy'])
model.summary()
return model
# Model from https://github.com/usernaamee/keras-wavenet
class TestWavenet(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_wavenet(self):
K.clear_session()
keras_model = get_basic_generative_model(128)
data = np.random.rand(2, 128, 1).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 3,709 | 34.673077 | 112 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_super_resolution.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import numpy as np
from mock_keras2onnx.proto import keras
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_keras_and_ort, test_level_0
K = keras.backend
import tensorflow as tf
Activation = keras.layers.Activation
add = keras.layers.add
Average = keras.layers.Average
AveragePooling2D = keras.layers.AveragePooling2D
Add = keras.layers.Add
BatchNormalization = keras.layers.BatchNormalization
concatenate = keras.layers.concatenate
Conv1D = keras.layers.Conv1D
Conv2D = keras.layers.Conv2D
Conv3D = keras.layers.Conv3D
Convolution2D = keras.layers.Convolution2D
Conv2DTranspose = keras.layers.Conv2DTranspose
Dense = keras.layers.Dense
dot = keras.layers.dot
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
GlobalAveragePooling2D = keras.layers.GlobalAveragePooling2D
Input = keras.layers.Input
Lambda = keras.layers.Lambda
LeakyReLU = keras.layers.LeakyReLU
MaxPool1D = keras.layers.MaxPool1D
MaxPooling2D = keras.layers.MaxPooling2D
multiply = keras.layers.multiply
Permute = keras.layers.Permute
PReLU = keras.layers.PReLU
Reshape = keras.layers.Reshape
SeparableConv2D = keras.layers.SeparableConv2D
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
_image_scale_multiplier = 1
img_size = 128 * _image_scale_multiplier
stride = 64 * _image_scale_multiplier
class BaseSuperResolutionModel(object):
def __init__(self, model_name, scale_factor):
"""
Base model to provide a standard interface of adding Super Resolution models
"""
self.model = None # type: Model
self.model_name = model_name
self.scale_factor = scale_factor
self.weight_path = None
self.type_scale_type = "norm" # Default = "norm" = 1. / 255
self.type_requires_divisible_shape = False
self.type_true_upscaling = False
self.evaluation_func = None
self.uses_learning_phase = False
def create_model(self, height=32, width=32, channels=3, load_weights=False, batch_size=128) -> Model:
if self.type_requires_divisible_shape and height is not None and width is not None:
assert height * _image_scale_multiplier % 4 == 0, "Height of the image must be divisible by 4"
assert width * _image_scale_multiplier % 4 == 0, "Width of the image must be divisible by 4"
if width is not None and height is not None:
shape = (width * _image_scale_multiplier, height * _image_scale_multiplier, channels)
else:
shape = (None, None, channels)
init = Input(shape=shape)
return init
class ImageSuperResolutionModel(BaseSuperResolutionModel):
def __init__(self, scale_factor):
super(ImageSuperResolutionModel, self).__init__("Image SR", scale_factor)
self.f1 = 9
self.f2 = 1
self.f3 = 5
self.n1 = 64
self.n2 = 32
self.weight_path = "weights/SR Weights %dX.h5" % (self.scale_factor)
def create_model(self, height=32, width=32, channels=3, load_weights=False, batch_size=128):
init = super(ImageSuperResolutionModel, self).create_model(height, width, channels, load_weights, batch_size)
x = Convolution2D(self.n1, (self.f1, self.f1), activation='relu', padding='same', name='level1')(init)
x = Convolution2D(self.n2, (self.f2, self.f2), activation='relu', padding='same', name='level2')(x)
out = Convolution2D(channels, (self.f3, self.f3), padding='same', name='output')(x)
model = Model(init, out)
self.model = model
return model
class ExpantionSuperResolution(BaseSuperResolutionModel):
def __init__(self, scale_factor):
super(ExpantionSuperResolution, self).__init__("Expanded Image SR", scale_factor)
self.f1 = 9
self.f2_1 = 1
self.f2_2 = 3
self.f2_3 = 5
self.f3 = 5
self.n1 = 64
self.n2 = 32
self.weight_path = "weights/Expantion SR Weights %dX.h5" % (self.scale_factor)
def create_model(self, height=32, width=32, channels=3, load_weights=False, batch_size=128):
"""
Creates a model to be used to scale images of specific height and width.
"""
init = super(ExpantionSuperResolution, self).create_model(height, width, channels, load_weights, batch_size)
x = Convolution2D(self.n1, (self.f1, self.f1), activation='relu', padding='same', name='level1')(init)
x1 = Convolution2D(self.n2, (self.f2_1, self.f2_1), activation='relu', padding='same', name='lavel1_1')(x)
x2 = Convolution2D(self.n2, (self.f2_2, self.f2_2), activation='relu', padding='same', name='lavel1_2')(x)
x3 = Convolution2D(self.n2, (self.f2_3, self.f2_3), activation='relu', padding='same', name='lavel1_3')(x)
x = Average()([x1, x2, x3])
out = Convolution2D(channels, (self.f3, self.f3), activation='relu', padding='same', name='output')(x)
model = Model(init, out)
self.model = model
return model
class DenoisingAutoEncoderSR(BaseSuperResolutionModel):
def __init__(self, scale_factor):
super(DenoisingAutoEncoderSR, self).__init__("Denoise AutoEncoder SR", scale_factor)
self.n1 = 64
self.n2 = 32
self.weight_path = "weights/Denoising AutoEncoder %dX.h5" % (self.scale_factor)
def create_model(self, height=32, width=32, channels=3, load_weights=False, batch_size=128):
"""
Creates a model to remove / reduce noise from upscaled images.
"""
from keras.layers.convolutional import Deconvolution2D
# Perform check that model input shape is divisible by 4
init = super(DenoisingAutoEncoderSR, self).create_model(height, width, channels, load_weights, batch_size)
output_shape = (None, width, height, channels)
level1_1 = Convolution2D(self.n1, (3, 3), activation='relu', padding='same')(init)
level2_1 = Convolution2D(self.n1, (3, 3), activation='relu', padding='same')(level1_1)
level2_2 = Conv2DTranspose(self.n1, (3, 3), activation='relu', padding='same')(level2_1)
level2 = Add()([level2_1, level2_2])
level1_2 = Conv2DTranspose(self.n1, (3, 3), activation='relu', padding='same')(level2)
level1 = Add()([level1_1, level1_2])
decoded = Convolution2D(channels, (5, 5), activation='linear', padding='same')(level1)
model = Model(init, decoded)
self.model = model
return model
class DeepDenoiseSR(BaseSuperResolutionModel):
def __init__(self, scale_factor):
super(DeepDenoiseSR, self).__init__("Deep Denoise SR", scale_factor)
# Treat this model as a denoising auto encoder
# Force the fit, evaluate and upscale methods to take special care about image shape
self.type_requires_divisible_shape = True
self.n1 = 64
self.n2 = 128
self.n3 = 256
self.weight_path = "weights/Deep Denoise Weights %dX.h5" % (self.scale_factor)
def create_model(self, height=32, width=32, channels=3, load_weights=False, batch_size=128):
# Perform check that model input shape is divisible by 4
init = super(DeepDenoiseSR, self).create_model(height, width, channels, load_weights, batch_size)
c1 = Convolution2D(self.n1, (3, 3), activation='relu', padding='same')(init)
c1 = Convolution2D(self.n1, (3, 3), activation='relu', padding='same')(c1)
x = MaxPooling2D((2, 2))(c1)
c2 = Convolution2D(self.n2, (3, 3), activation='relu', padding='same')(x)
c2 = Convolution2D(self.n2, (3, 3), activation='relu', padding='same')(c2)
x = MaxPooling2D((2, 2))(c2)
c3 = Convolution2D(self.n3, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D()(c3)
c2_2 = Convolution2D(self.n2, (3, 3), activation='relu', padding='same')(x)
c2_2 = Convolution2D(self.n2, (3, 3), activation='relu', padding='same')(c2_2)
m1 = Add()([c2, c2_2])
m1 = UpSampling2D()(m1)
c1_2 = Convolution2D(self.n1, (3, 3), activation='relu', padding='same')(m1)
c1_2 = Convolution2D(self.n1, (3, 3), activation='relu', padding='same')(c1_2)
m2 = Add()([c1, c1_2])
decoded = Convolution2D(channels, 5, 5, activation='linear', border_mode='same')(m2)
model = Model(init, decoded)
self.model = model
return model
class ResNetSR(BaseSuperResolutionModel):
def __init__(self, scale_factor):
super(ResNetSR, self).__init__("ResNetSR", scale_factor)
# Treat this model as a denoising auto encoder
# Force the fit, evaluate and upscale methods to take special care about image shape
self.type_requires_divisible_shape = True
self.uses_learning_phase = False
self.n = 64
self.mode = 2
self.weight_path = "weights/ResNetSR %dX.h5" % (self.scale_factor)
self.type_true_upscaling = True
def create_model(self, height=32, width=32, channels=3, load_weights=False, batch_size=128):
init = super(ResNetSR, self).create_model(height, width, channels, load_weights, batch_size)
x0 = Convolution2D(64, (3, 3), activation='relu', padding='same', name='sr_res_conv1')(init)
#x1 = Convolution2D(64, (3, 3), activation='relu', padding='same', strides=(2, 2), name='sr_res_conv2')(x0)
#x2 = Convolution2D(64, (3, 3), activation='relu', padding='same', strides=(2, 2), name='sr_res_conv3')(x1)
x = self._residual_block(x0, 1)
nb_residual = 5
for i in range(nb_residual):
x = self._residual_block(x, i + 2)
x = Add()([x, x0])
x = self._upscale_block(x, 1)
#x = Add()([x, x1])
#x = self._upscale_block(x, 2)
#x = Add()([x, x0])
x = Convolution2D(3, (3, 3), activation="linear", padding='same', name='sr_res_conv_final')(x)
model = Model(init, x)
self.model = model
return model
def _residual_block(self, ip, id):
mode = False if self.mode == 2 else None
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
init = ip
x = Convolution2D(64, (3, 3), activation='linear', padding='same',
name='sr_res_conv_' + str(id) + '_1')(ip)
x = BatchNormalization(axis=channel_axis, name="sr_res_batchnorm_" + str(id) + "_1")(x, training=mode)
x = Activation('relu', name="sr_res_activation_" + str(id) + "_1")(x)
x = Convolution2D(64, (3, 3), activation='linear', padding='same',
name='sr_res_conv_' + str(id) + '_2')(x)
x = BatchNormalization(axis=channel_axis, name="sr_res_batchnorm_" + str(id) + "_2")(x, training=mode)
m = Add(name="sr_res_merge_" + str(id))([x, init])
return m
def _upscale_block(self, ip, id):
init = ip
channel_dim = 1 if K.image_data_format() == 'channels_first' else -1
channels = init._keras_shape[channel_dim]
#x = Convolution2D(256, (3, 3), activation="relu", padding='same', name='sr_res_upconv1_%d' % id)(init)
#x = SubPixelUpscaling(r=2, channels=self.n, name='sr_res_upscale1_%d' % id)(x)
x = UpSampling2D()(init)
x = Convolution2D(self.n, (3, 3), activation="relu", padding='same', name='sr_res_filter1_%d' % id)(x)
return x
class GANImageSuperResolutionModel(BaseSuperResolutionModel):
def __init__(self, scale_factor):
super(GANImageSuperResolutionModel, self).__init__("GAN Image SR", scale_factor)
self.f1 = 9
self.f2 = 1
self.f3 = 5
self.n1 = 64
self.n2 = 32
self.gen_model = None # type: Model
self.disc_model = None # type: Model
self.type_scale_type = 'tanh'
self.weight_path = "weights/GAN SR Weights %dX.h5" % (self.scale_factor)
self.gen_weight_path = "weights/GAN SR Pretrain Weights %dX.h5" % (self.scale_factor)
self.disc_weight_path = "weights/GAN SR Discriminator Weights %dX.h5" % (self.scale_factor)
def create_model(self, mode='test', height=32, width=32, channels=3, load_weights=False, batch_size=128):
"""
Creates a model to be used to scale images of specific height and width.
"""
channel_axis = -1
gen_init = super(GANImageSuperResolutionModel, self).create_model(height, width, channels, load_weights, batch_size)
x = Convolution2D(self.n1, (self.f1, self.f1), activation='relu', padding='same', name='gen_level1')(gen_init)
x = LeakyReLU(alpha=0.25)(x)
x = Convolution2D(self.n2, (self.f2, self.f2), activation='relu', padding='same', name='gen_level2')(x)
x = LeakyReLU(alpha=0.25)(x)
out = Convolution2D(channels, (self.f3, self.f3), activation='tanh', padding='same', name='gen_output')(x)
gen_model = Model(gen_init, out)
self.model = gen_model
return self.model
class DistilledResNetSR(BaseSuperResolutionModel):
def __init__(self, scale_factor):
super(DistilledResNetSR, self).__init__("DistilledResNetSR", scale_factor)
# Treat this model as a denoising auto encoder
# Force the fit, evaluate and upscale methods to take special care about image shape
self.type_requires_divisible_shape = True
self.uses_learning_phase = False
self.n = 32
self.mode = 2
self.weight_path = "weights/DistilledResNetSR %dX.h5" % (self.scale_factor)
self.type_true_upscaling = True
def create_model(self, height=32, width=32, channels=3, load_weights=False, batch_size=128):
init = super(DistilledResNetSR, self).create_model(height, width, channels, load_weights, batch_size)
x0 = Convolution2D(self.n, (3, 3), activation='relu', padding='same', name='student_sr_res_conv1')(init)
x = self._residual_block(x0, 1)
x = Add(name='student_residual')([x, x0])
x = self._upscale_block(x, 1)
x = Convolution2D(3, (3, 3), activation="linear", padding='same', name='student_sr_res_conv_final')(x)
model = Model(init, x)
# dont compile yet
if load_weights: model.load_weights(self.weight_path, by_name=True)
self.model = model
return model
def _residual_block(self, ip, id):
mode = False if self.mode == 2 else None
channel_axis = -1
init = ip
x = Convolution2D(self.n, (3, 3), activation='linear', padding='same',
name='student_sr_res_conv_' + str(id) + '_1')(ip)
x = BatchNormalization(axis=channel_axis, name="student_sr_res_batchnorm_" + str(id) + "_1")(x, training=mode)
x = Activation('relu', name="student_sr_res_activation_" + str(id) + "_1")(x)
x = Convolution2D(self.n, (3, 3), activation='linear', padding='same',
name='student_sr_res_conv_' + str(id) + '_2')(x)
x = BatchNormalization(axis=channel_axis, name="student_sr_res_batchnorm_" + str(id) + "_2")(x, training=mode)
m = Add(name="student_sr_res_merge_" + str(id))([x, init])
return m
def _upscale_block(self, ip, id):
init = ip
channel_dim = -1
channels = init._keras_shape[channel_dim]
x = UpSampling2D(name='student_upsampling_%d' % id)(init)
x = Convolution2D(self.n * 2, (3, 3), activation="relu", padding='same', name='student_sr_res_filter1_%d' % id)(x)
return x
def _convND(ip, rank, channels):
assert rank in [3, 4, 5], "Rank of input must be 3, 4 or 5"
if rank == 3:
x = Conv1D(channels, 1, padding='same', use_bias=False)(ip)
elif rank == 4:
x = Conv2D(channels, (1, 1), padding='same', use_bias=False)(ip)
else:
x = Conv3D(channels, (1, 1, 1), padding='same', use_bias=False)(ip)
return x
def non_local_block(ip, computation_compression=2, mode='embedded'):
channel_dim = 1 if K.image_data_format() == 'channels_first' else -1
ip_shape = K.int_shape(ip)
if mode not in ['gaussian', 'embedded', 'dot', 'concatenate']:
raise ValueError('`mode` must be one of `gaussian`, `embedded`, `dot` or `concatenate`')
dim1, dim2, dim3 = None, None, None
if len(ip_shape) == 3: # time series data
rank = 3
batchsize, dim1, channels = ip_shape
elif len(ip_shape) == 4: # image data
rank = 4
if channel_dim == 1:
batchsize, channels, dim1, dim2 = ip_shape
else:
batchsize, dim1, dim2, channels = ip_shape
elif len(ip_shape) == 5: # Video / Voxel data
rank = 5
if channel_dim == 1:
batchsize, channels, dim1, dim2, dim3 = ip_shape
else:
batchsize, dim1, dim2, dim3, channels = ip_shape
else:
raise ValueError('Input dimension has to be either 3 (temporal), 4 (spatial) or 5 (spatio-temporal)')
if mode == 'gaussian': # Gaussian instantiation
x1 = Reshape((-1, channels))(ip) # xi
x2 = Reshape((-1, channels))(ip) # xj
f = dot([x1, x2], axes=2)
f = Activation('softmax')(f)
elif mode == 'dot': # Dot instantiation
# theta path
theta = _convND(ip, rank, channels // 2)
theta = Reshape((-1, channels // 2))(theta)
# phi path
phi = _convND(ip, rank, channels // 2)
phi = Reshape((-1, channels // 2))(phi)
f = dot([theta, phi], axes=2)
# scale the values to make it size invariant
if batchsize is not None:
f = Lambda(lambda z: 1./ batchsize * z)(f)
else:
f = Lambda(lambda z: 1. / 128 * z)(f)
elif mode == 'concatenate': # Concatenation instantiation
raise NotImplemented('Concatenation mode has not been implemented yet')
else: # Embedded Gaussian instantiation
# theta path
theta = _convND(ip, rank, channels // 2)
theta = Reshape((-1, channels // 2))(theta)
# phi path
phi = _convND(ip, rank, channels // 2)
phi = Reshape((-1, channels // 2))(phi)
if computation_compression > 1:
# shielded computation
phi = MaxPool1D(computation_compression)(phi)
f = dot([theta, phi], axes=2)
f = Activation('softmax')(f)
# g path
g = _convND(ip, rank, channels // 2)
g = Reshape((-1, channels // 2))(g)
if computation_compression > 1 and mode == 'embedded':
# shielded computation
g = MaxPool1D(computation_compression)(g)
# compute output path
y = dot([f, g], axes=[2, 1])
# reshape to input tensor format
if rank == 3:
y = Reshape((dim1, channels // 2))(y)
elif rank == 4:
if channel_dim == -1:
y = Reshape((dim1, dim2, channels // 2))(y)
else:
y = Reshape((channels // 2, dim1, dim2))(y)
else:
if channel_dim == -1:
y = Reshape((dim1, dim2, dim3, channels // 2))(y)
else:
y = Reshape((channels // 2, dim1, dim2, dim3))(y)
# project filters
y = _convND(y, rank, channels)
# residual connection
residual = add([ip, y])
return residual
class NonLocalResNetSR(BaseSuperResolutionModel):
def __init__(self, scale_factor):
super(NonLocalResNetSR, self).__init__("NonLocalResNetSR", scale_factor)
# Treat this model as a denoising auto encoder
# Force the fit, evaluate and upscale methods to take special care about image shape
self.type_requires_divisible_shape = True
self.uses_learning_phase = False
self.n = 32
self.mode = 2
self.weight_path = "weights/NonLocalResNetSR %dX.h5" % (self.scale_factor)
self.type_true_upscaling = True
def create_model(self, height=32, width=32, channels=3, load_weights=False, batch_size=128):
init = super(NonLocalResNetSR, self).create_model(height, width, channels, load_weights, batch_size)
x0 = Convolution2D(self.n, (3, 3), activation='relu', padding='same', name='sr_res_conv1')(init)
x0 = non_local_block(x0)
x = self._residual_block(x0, 1)
nb_residual = 5
for i in range(nb_residual):
x = self._residual_block(x, i + 2)
x = non_local_block(x, computation_compression=2)
x = Add()([x, x0])
x = self._upscale_block(x, 1)
x = Convolution2D(3, (3, 3), activation="linear", padding='same', name='sr_res_conv_final')(x)
model = Model(init, x)
self.model = model
return model
def _residual_block(self, ip, id):
mode = False if self.mode == 2 else None
channel_axis = -1
init = ip
x = Convolution2D(self.n, (3, 3), activation='linear', padding='same',
name='sr_res_conv_' + str(id) + '_1')(ip)
x = BatchNormalization(axis=channel_axis, name="sr_res_batchnorm_" + str(id) + "_1")(x, training=mode)
x = Activation('relu', name="sr_res_activation_" + str(id) + "_1")(x)
x = Convolution2D(self.n, (3, 3), activation='linear', padding='same',
name='sr_res_conv_' + str(id) + '_2')(x)
x = BatchNormalization(axis=channel_axis, name="sr_res_batchnorm_" + str(id) + "_2")(x, training=mode)
m = Add(name="sr_res_merge_" + str(id))([x, init])
return m
def _upscale_block(self, ip, id):
init = ip
channel_dim = -1
x = UpSampling2D()(init)
x = Convolution2D(self.n, (3, 3), activation="relu", padding='same', name='sr_res_filter1_%d' % id)(x)
return x
def get_srresnet_model(input_channel_num=3, feature_dim=64, resunit_num=16):
def _residual_block(inputs):
x = Conv2D(feature_dim, (3, 3), padding="same", kernel_initializer="he_normal")(inputs)
x = BatchNormalization()(x)
x = PReLU(shared_axes=[1, 2])(x)
x = Conv2D(feature_dim, (3, 3), padding="same", kernel_initializer="he_normal")(x)
x = BatchNormalization()(x)
m = Add()([x, inputs])
return m
inputs = Input(shape=(None, None, input_channel_num))
x = Conv2D(feature_dim, (3, 3), padding="same", kernel_initializer="he_normal")(inputs)
x = PReLU(shared_axes=[1, 2])(x)
x0 = x
for i in range(resunit_num):
x = _residual_block(x)
x = Conv2D(feature_dim, (3, 3), padding="same", kernel_initializer="he_normal")(x)
x = BatchNormalization()(x)
x = Add()([x, x0])
x = Conv2D(input_channel_num, (3, 3), padding="same", kernel_initializer="he_normal")(x)
model = Model(inputs=inputs, outputs=x)
return model
DIV2K_RGB_MEAN = np.array([0.4488, 0.4371, 0.4040]) * 255
def normalize(x, rgb_mean=DIV2K_RGB_MEAN):
return (x - rgb_mean) / 127.5
def denormalize(x, rgb_mean=DIV2K_RGB_MEAN):
return x * 127.5 + rgb_mean
def normalize_01(x):
"""Normalizes RGB images to [0, 1]."""
return x / 255.0
def normalize_m11(x):
"""Normalizes RGB images to [-1, 1]."""
return x / 127.5 - 1
def denormalize_m11(x):
"""Inverse of normalize_m11."""
return (x + 1) * 127.5
def pixel_shuffle(scale):
return lambda x: tf.nn.depth_to_space(x, scale)
LR_SIZE = 24
HR_SIZE = 96
def upsample(x_in, num_filters):
x = Conv2D(num_filters, kernel_size=3, padding='same')(x_in)
x = Lambda(pixel_shuffle(scale=2))(x)
return PReLU(shared_axes=[1, 2])(x)
def res_block(x_in, num_filters, momentum=0.8):
x = Conv2D(num_filters, kernel_size=3, padding='same')(x_in)
x = BatchNormalization(momentum=momentum)(x)
x = PReLU(shared_axes=[1, 2])(x)
x = Conv2D(num_filters, kernel_size=3, padding='same')(x)
x = BatchNormalization(momentum=momentum)(x)
x = Add()([x_in, x])
return x
def sr_resnet(num_filters=64, num_res_blocks=16):
x_in = Input(shape=(None, None, 3))
x = Lambda(normalize_01)(x_in)
x = Conv2D(num_filters, kernel_size=9, padding='same')(x)
x = x_1 = PReLU(shared_axes=[1, 2])(x)
for _ in range(num_res_blocks):
x = res_block(x, num_filters)
x = Conv2D(num_filters, kernel_size=3, padding='same')(x)
x = BatchNormalization()(x)
x = Add()([x_1, x])
x = upsample(x, num_filters * 4)
x = upsample(x, num_filters * 4)
x = Conv2D(3, kernel_size=9, padding='same', activation='tanh')(x)
x = Lambda(denormalize_m11)(x)
return Model(x_in, x)
# Model from https://github.com/titu1994/Image-Super-Resolution
class TestSuperResolution(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_ImageSuperResolutionModel(self):
K.clear_session()
model_type = ImageSuperResolutionModel(2.0)
keras_model = model_type.create_model()
data = np.random.rand(2, 32, 32, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
def test_ExpantionSuperResolution(self):
K.clear_session()
model_type = ExpantionSuperResolution(2.0)
keras_model = model_type.create_model()
data = np.random.rand(2, 32, 32, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_DenoisingAutoEncoderSR(self):
K.clear_session()
model_type = DenoisingAutoEncoderSR(2.0)
keras_model = model_type.create_model()
data = np.random.rand(2, 32, 32, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_DeepDenoiseSR(self):
K.clear_session()
model_type = DeepDenoiseSR(2.0)
keras_model = model_type.create_model()
data = np.random.rand(2, 32, 32, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_ResNetSR(self):
K.clear_session()
model_type = ResNetSR(2.0)
keras_model = model_type.create_model()
data = np.random.rand(2, 32, 32, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_GANImageSuperResolutionModel(self):
K.clear_session()
model_type = GANImageSuperResolutionModel(2.0)
keras_model = model_type.create_model()
data = np.random.rand(2, 32, 32, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_DistilledResNetSR(self):
K.clear_session()
model_type = DistilledResNetSR(2.0)
keras_model = model_type.create_model()
data = np.random.rand(2, 32, 32, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_NonLocalResNetSR(self):
K.clear_session()
model_type = NonLocalResNetSR(2.0)
keras_model = model_type.create_model()
data = np.random.rand(2, 32, 32, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files, atol=1e-4))
# From https://github.com/yu4u/noise2noise/blob/master/model.py
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_SRResNet(self):
K.clear_session()
keras_model = get_srresnet_model()
data = np.random.rand(2, 32, 32, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
mock_keras2onnx.save_model(onnx_model, 'sr_resnet.onnx')
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files, rtol=1e-2, atol=1e-4))
# From https://github.com/krasserm/super-resolution/blob/master/model/srgan.py
@unittest.skipIf(test_level_0,
"TODO: perf")
def test_sr_resnet(self):
K.clear_session()
keras_model = sr_resnet()
data = np.random.rand(2, 32, 32, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
# From https://github.com/krasserm/super-resolution/blob/master/model/edsr.py
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_edsr(self):
K.clear_session()
def edsr(scale, num_filters=64, num_res_blocks=8, res_block_scaling=None):
x_in = Input(shape=(None, None, 3))
x = Lambda(normalize)(x_in)
x = b = Conv2D(num_filters, 3, padding='same')(x)
for i in range(num_res_blocks):
b = res_block(b, num_filters, res_block_scaling)
b = Conv2D(num_filters, 3, padding='same')(b)
x = Add()([x, b])
x = upsample(x, scale, num_filters)
x = Conv2D(3, 3, padding='same')(x)
x = Lambda(denormalize)(x)
return Model(x_in, x, name="edsr")
def res_block(x_in, filters, scaling):
x = Conv2D(filters, 3, padding='same', activation='relu')(x_in)
x = Conv2D(filters, 3, padding='same')(x)
if scaling:
x = Lambda(lambda t: t * scaling)(x)
x = Add()([x_in, x])
return x
def upsample(x, scale, num_filters):
def upsample_1(x, factor, **kwargs):
x = Conv2D(num_filters * (factor ** 2), 3, padding='same', **kwargs)(x)
return Lambda(pixel_shuffle(scale=factor))(x)
if scale == 2:
x = upsample_1(x, 2, name='conv2d_1_scale_2')
elif scale == 3:
x = upsample_1(x, 3, name='conv2d_1_scale_3')
elif scale == 4:
x = upsample_1(x, 2, name='conv2d_1_scale_2')
x = upsample_1(x, 2, name='conv2d_2_scale_2')
return x
keras_model = edsr(2.0)
data = np.random.rand(2, 32, 32, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files, rtol=1e-2, atol=1e-3))
if __name__ == "__main__":
unittest.main()
| 32,874 | 35.126374 | 134 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_prn.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import numpy as np
from mock_keras2onnx.proto import keras
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_keras_and_ort, test_level_0
K = keras.backend
Activation = keras.layers.Activation
AveragePooling2D = keras.layers.AveragePooling2D
Add = keras.layers.Add
BatchNormalization = keras.layers.BatchNormalization
concatenate = keras.layers.concatenate
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
GlobalAveragePooling2D = keras.layers.GlobalAveragePooling2D
Input = keras.layers.Input
Lambda = keras.layers.Lambda
LeakyReLU = keras.layers.LeakyReLU
MaxPooling2D = keras.layers.MaxPooling2D
multiply = keras.layers.multiply
Permute = keras.layers.Permute
Reshape = keras.layers.Reshape
SeparableConv2D = keras.layers.SeparableConv2D
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
def PRN(height, width, node_count):
input = Input(shape=(height, width, 17))
y = Flatten()(input)
x = Dense(node_count, activation='relu')(y)
x = Dropout(0.5)(x)
x = Dense(width * height * 17, activation='relu')(x)
x = keras.layers.Add()([x, y])
x = keras.layers.Activation('softmax')(x)
x = Reshape((height, width, 17))(x)
model = Model(inputs=input, outputs=x)
return model
def PRN_Seperate(height, width, node_count):
input = Input(shape=(height, width, 17))
y = Flatten()(input)
x = Dense(node_count, activation='relu')(y)
x = Dropout(0.5)(x)
x = Dense(width * height * 17, activation='relu')(x)
x = keras.layers.Add()([x, y])
out = []
start = 0
end = width * height
for i in range(17):
o = keras.layers.Lambda(lambda x: x[:, start:end])(x)
o = Activation('softmax')(o)
out.append(o)
start = end
end = start + width * height
x = keras.layers.Concatenate()(out)
x = Reshape((height, width, 17))(x)
model = Model(inputs=input, outputs=x)
return model
# Model from https://github.com/mkocabas/pose-residual-network
class TestPRN(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_PRN(self):
K.clear_session()
keras_model = PRN(28, 18, 15)
data = np.random.rand(2, 28, 18, 17).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def test_PRN_Separate(self):
K.clear_session()
keras_model = PRN_Seperate(28, 18, 15)
data = np.random.rand(2, 28, 18, 17).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 3,588 | 30.761062 | 112 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_se_densenet.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import numpy as np
from mock_keras2onnx.proto import keras
from mock_keras2onnx.proto.tfcompat import is_tf2
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_keras_and_ort, test_level_0
from keras_applications.imagenet_utils import _obtain_input_shape
K = keras.backend
from keras.regularizers import l2
is_keras_tensor = K.is_keras_tensor
Activation = keras.layers.Activation
AveragePooling2D = keras.layers.AveragePooling2D
Add = keras.layers.Add
BatchNormalization = keras.layers.BatchNormalization
concatenate = keras.layers.concatenate
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
GlobalAveragePooling2D = keras.layers.GlobalAveragePooling2D
Input = keras.layers.Input
Lambda = keras.layers.Lambda
LeakyReLU = keras.layers.LeakyReLU
MaxPooling2D = keras.layers.MaxPooling2D
multiply = keras.layers.multiply
Permute = keras.layers.Permute
Reshape = keras.layers.Reshape
SeparableConv2D = keras.layers.SeparableConv2D
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
def squeeze_excite_block(input_tensor, ratio=16):
init = input_tensor
channel_axis = 1 if K.image_data_format() == "channels_first" else -1
filters = init.shape[channel_axis]
se_shape = (1, 1, filters)
se = GlobalAveragePooling2D()(init)
se = Reshape(se_shape)(se)
se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se)
se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se)
if K.image_data_format() == 'channels_first':
se = Permute((3, 1, 2))(se)
x = multiply([init, se])
return x
def SEDenseNet(input_shape=None,
depth=40,
nb_dense_block=3,
growth_rate=12,
nb_filter=-1,
nb_layers_per_block=-1,
bottleneck=False,
reduction=0.0,
dropout_rate=0.0,
weight_decay=1e-4,
subsample_initial_block=False,
include_top=True,
weights=None,
input_tensor=None,
classes=10,
activation='softmax'):
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=32,
min_size=8,
data_format=K.image_data_format(),
require_flatten=include_top)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = __create_dense_net(classes, img_input, include_top, depth, nb_dense_block,
growth_rate, nb_filter, nb_layers_per_block, bottleneck, reduction,
dropout_rate, weight_decay, subsample_initial_block, activation)
inputs = img_input
# Create model.
model = Model(inputs, x, name='se-densenet')
return model
def __conv_block(ip, nb_filter, bottleneck=False, dropout_rate=None, weight_decay=1e-4):
""" Apply BatchNorm, Relu, 3x3 Conv2D, optional bottleneck block and dropout
Args:
ip: Input keras tensor
nb_filter: number of filters
bottleneck: add bottleneck block
dropout_rate: dropout rate
weight_decay: weight decay factor
Returns: keras tensor with batch_norm, relu and convolution2d added (optional bottleneck)
"""
concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip)
x = Activation('relu')(x)
if bottleneck:
inter_channel = nb_filter * 4 # Obtained from https://github.com/liuzhuang13/DenseNet/blob/master/densenet.lua
x = Conv2D(inter_channel, (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False,
kernel_regularizer=l2(weight_decay))(x)
x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x)
x = Activation('relu')(x)
x = Conv2D(nb_filter, (3, 3), kernel_initializer='he_normal', padding='same', use_bias=False)(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
return x
def __dense_block(x, nb_layers, nb_filter, growth_rate, bottleneck=False, dropout_rate=None, weight_decay=1e-4,
grow_nb_filters=True, return_concat_list=False):
""" Build a dense_block where the output of each conv_block is fed to subsequent ones
Args:
x: keras tensor
nb_layers: the number of layers of conv_block to append to the model.
nb_filter: number of filters
growth_rate: growth rate
bottleneck: bottleneck block
dropout_rate: dropout rate
weight_decay: weight decay factor
grow_nb_filters: flag to decide to allow number of filters to grow
return_concat_list: return the list of feature maps along with the actual output
Returns: keras tensor with nb_layers of conv_block appended
"""
concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
x_list = [x]
for i in range(nb_layers):
cb = __conv_block(x, growth_rate, bottleneck, dropout_rate, weight_decay)
x_list.append(cb)
x = concatenate([x, cb], axis=concat_axis)
if grow_nb_filters:
nb_filter += growth_rate
# squeeze and excite block
x = squeeze_excite_block(x)
if return_concat_list:
return x, nb_filter, x_list
else:
return x, nb_filter
def __transition_block(ip, nb_filter, compression=1.0, weight_decay=1e-4):
""" Apply BatchNorm, Relu 1x1, Conv2D, optional compression, dropout and Maxpooling2D
Args:
ip: keras tensor
nb_filter: number of filters
compression: calculated as 1 - reduction. Reduces the number of feature maps
in the transition block.
weight_decay: weight decay factor
Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool
"""
concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip)
x = Activation('relu')(x)
x = Conv2D(int(nb_filter * compression), (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False,
kernel_regularizer=l2(weight_decay))(x)
x = AveragePooling2D((2, 2), strides=(2, 2))(x)
# squeeze and excite block
x = squeeze_excite_block(x)
return x
def __create_dense_net(nb_classes, img_input, include_top, depth=40, nb_dense_block=3, growth_rate=12, nb_filter=-1,
nb_layers_per_block=-1, bottleneck=False, reduction=0.0, dropout_rate=None, weight_decay=1e-4,
subsample_initial_block=False, activation='softmax'):
concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
if reduction != 0.0:
assert 1.0 >= reduction > 0.0, 'reduction value must lie between 0.0 and 1.0'
# layers in each dense block
if type(nb_layers_per_block) is list or type(nb_layers_per_block) is tuple:
nb_layers = list(nb_layers_per_block) # Convert tuple to list
assert len(nb_layers) == nb_dense_block, 'If list, nb_layer is used as provided. ' \
'Note that list size must be (nb_dense_block)'
final_nb_layer = nb_layers[-1]
nb_layers = nb_layers[:-1]
else:
if nb_layers_per_block == -1:
assert (depth - 4) % 3 == 0, 'Depth must be 3 N + 4 if nb_layers_per_block == -1'
count = int((depth - 4) / 3)
nb_layers = [count for _ in range(nb_dense_block)]
final_nb_layer = count
else:
final_nb_layer = nb_layers_per_block
nb_layers = [nb_layers_per_block] * nb_dense_block
# compute initial nb_filter if -1, else accept users initial nb_filter
if nb_filter <= 0:
nb_filter = 2 * growth_rate
# compute compression factor
compression = 1.0 - reduction
# Initial convolution
if subsample_initial_block:
initial_kernel = (7, 7)
initial_strides = (2, 2)
else:
initial_kernel = (3, 3)
initial_strides = (1, 1)
x = Conv2D(nb_filter, initial_kernel, kernel_initializer='he_normal', padding='same',
strides=initial_strides, use_bias=False, kernel_regularizer=l2(weight_decay))(img_input)
if subsample_initial_block:
x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
# Add dense blocks
for block_idx in range(nb_dense_block - 1):
x, nb_filter = __dense_block(x, nb_layers[block_idx], nb_filter, growth_rate, bottleneck=bottleneck,
dropout_rate=dropout_rate, weight_decay=weight_decay)
# add transition_block
x = __transition_block(x, nb_filter, compression=compression, weight_decay=weight_decay)
nb_filter = int(nb_filter * compression)
# The last dense_block does not have a transition_block
x, nb_filter = __dense_block(x, final_nb_layer, nb_filter, growth_rate, bottleneck=bottleneck,
dropout_rate=dropout_rate, weight_decay=weight_decay)
x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x)
x = Activation('relu')(x)
x = GlobalAveragePooling2D()(x)
if include_top:
x = Dense(nb_classes, activation=activation)(x)
return x
# Model from https://github.com/titu1994/keras-squeeze-excite-network
class TestSEDenseNet(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skipIf(test_level_0 or not is_tf2,
"Test level 0 only.")
def test_SE_Densenet(self):
K.clear_session()
keras_model = SEDenseNet()
data = np.random.rand(2, 32, 32, 3).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 10,900 | 36.332192 | 119 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_name_entity_recognition.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
import numpy as np
from mock_keras2onnx.proto import keras
from onnxconverter_common.onnx_ex import get_maximum_opset_supported
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_keras_and_ort, test_level_0
K = keras.backend
Activation = keras.layers.Activation
AveragePooling2D = keras.layers.AveragePooling2D
Add = keras.layers.Add
Bidirectional = keras.layers.Bidirectional
concatenate = keras.layers.concatenate
Conv1D = keras.layers.Conv1D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
GaussianNoise = keras.layers.GaussianNoise
GlobalAveragePooling2D = keras.layers.GlobalAveragePooling2D
Input = keras.layers.Input
Lambda = keras.layers.Lambda
LSTM = keras.layers.LSTM
MaxPooling1D = keras.layers.MaxPooling1D
Reshape = keras.layers.Reshape
TimeDistributed = keras.layers.TimeDistributed
Sequential = keras.models.Sequential
Model = keras.models.Model
# Model from https://github.com/kamalkraj/Named-Entity-Recognition-with-Bidirectional-LSTM-CNNs
class TestNameEntityRecognition(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skipIf(get_maximum_opset_supported() < 11,
"Deep speech conversion need opset >= 11.")
def test_name_entity_recognition(self):
K.clear_session()
words_input = Input(shape=(None,), dtype='int32', name='words_input')
words = Embedding(input_dim=10, output_dim=20,
weights=None, trainable=False)(words_input)
casing_input = Input(shape=(None,), dtype='int32', name='casing_input')
casing = Embedding(output_dim=20, input_dim=12,
weights=None, trainable=False)(casing_input)
character_input = Input(shape=(None, 52,), name='char_input')
embed_char_out = TimeDistributed(
Embedding(26, 20),
name='char_embedding')(character_input)
dropout = Dropout(0.5)(embed_char_out)
conv1d_out = TimeDistributed(Conv1D(kernel_size=3, filters=30, padding='same', activation='tanh', strides=1))(
dropout)
maxpool_out = TimeDistributed(MaxPooling1D(52))(conv1d_out)
char = TimeDistributed(Flatten())(maxpool_out)
char = Dropout(0.5)(char)
output = concatenate([words, casing, char])
output = Bidirectional(LSTM(200, return_sequences=True, dropout=0.50, recurrent_dropout=0.25))(output)
output = TimeDistributed(Dense(35, activation='softmax'))(output)
keras_model = Model(inputs=[words_input, casing_input, character_input], outputs=[output])
batch_size = 100
data1 = np.random.randint(5, 10, size=(batch_size, 6)).astype(np.int32)
data2 = np.random.randint(5, 10, size=(batch_size, 6)).astype(np.int32)
data3 = np.random.rand(batch_size, 6, 52).astype(np.float32)
expected = keras_model.predict([data1, data2, data3])
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model,
{keras_model.input_names[0]: data1,
keras_model.input_names[1]: data2,
keras_model.input_names[2]: data3}, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| 3,685 | 41.367816 | 118 | py |
tensorflow-onnx | tensorflow-onnx-main/tests/keras2onnx_applications/nightly_build/test_segnet.py | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import keras_segmentation
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_image
img_path = os.path.join(os.path.dirname(__file__), '../data', 'street.jpg')
class TestSegNet(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
def test_segnet(self):
# From https://github.com/divamgupta/image-segmentation-keras/blob/master/keras_segmentation/models/segnet.py
model = keras_segmentation.models.segnet.segnet(101)
res = run_image(model, self.model_files, img_path, target_size=(416, 608))
self.assertTrue(*res)
def test_vgg_segnet(self):
# From https://github.com/divamgupta/image-segmentation-keras/blob/master/keras_segmentation/models/segnet.py
model = keras_segmentation.models.segnet.vgg_segnet(101)
res = run_image(model, self.model_files, img_path, rtol=3.e-3, target_size=(416, 608))
self.assertTrue(*res)
def test_mobilenet_segnet(self):
# From https://github.com/divamgupta/image-segmentation-keras/blob/master/keras_segmentation/models/segnet.py
model = keras_segmentation.models.segnet.mobilenet_segnet(101)
res = run_image(model, self.model_files, img_path, target_size=(224, 224))
self.assertTrue(*res)
if __name__ == "__main__":
unittest.main()
| 1,563 | 35.372093 | 117 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.