python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
|---|---|---|
doc(title="Operations",
underline_char="=",
entries=[
"general/general_ops_index.py",
"image_processing/index.py",
"audio_processing/index.py",
"sequence_processing/index.py",
])
|
DALI-main
|
docs/examples/operations_index.py
|
doc(title="General Purpose",
underline_char="=",
entries=[
"expressions/index.py",
doc_entry(
"reductions.ipynb",
op_reference('fn.reductions', "Tutorial describing how to use reductions")),
doc_entry(
"tensor_join.ipynb",
[
op_reference('fn.cat', "Tutorial describing tensor joining"),
op_reference('fn.stack', "Tutorial describing tensor joining")]),
doc_entry(
"reinterpret.ipynb",
[
op_reference('fn.reshape', "Tutorial describing tensor reshaping"),
op_reference('fn.squeeze', "Tutorial describing tensor squeezing"),
op_reference('fn.expand_dims', "Tutorial describing tensor dimensions expanding"),
op_reference('fn.reinterpret', "Tutorial describing tensor reinterpreting")]),
doc_entry(
"normalize.ipynb",
op_reference('fn.normalize', "Tutorial describing tensor normalization")),
doc_entry(
"../math/geometric_transforms.ipynb",
[
op_reference('fn.transforms', "Tutorial describing tensor geometric transformations to transform points and images"),
op_reference('fn.warp_affine', "Tutorial showing how to use afine transform"),
op_reference('fn.coord_transform', "Tutorial describing how to transform points accompanying images")]),
doc_entry(
"erase.ipynb",
op_reference('fn.erase', "Tutorial describing tensor erasing"))
])
|
DALI-main
|
docs/examples/general/general_ops_index.py
|
doc(title="Data Loading",
underline_char="=",
entries=[
doc_entry("external_input.ipynb",
op_reference("fn.external_source", "Intro tutorial for external source")),
doc_entry(
"parallel_external_source.ipynb",
op_reference("fn.external_source", "How to use parallel mode for external source")),
doc_entry(
"parallel_external_source_fork.ipynb",
op_reference("fn.external_source",
"How to use parallel mode for external source in fork mode")),
doc_entry(
"dataloading_lmdb.ipynb",
[op_reference("fn.readers.caffe",
"Example of reading data stored in LMDB in the Caffe format"),
op_reference("fn.readers.caffe2",
"Example of reading data stored in LMDB in the Caffe 2 format")]),
doc_entry(
"dataloading_recordio.ipynb",
op_reference("fn.readers.mxnet",
"Example of reading data stored in the MXNet RecordIO format")),
doc_entry(
"dataloading_tfrecord.ipynb",
op_reference("fn.readers.tfrecord",
"Example of reading data stored in the TensorFlow TFRecord format")),
doc_entry(
"dataloading_webdataset.ipynb",
op_reference("fn.readers.webdataset",
"Example of reading data stored in the Webdataset format")),
doc_entry(
"coco_reader.ipynb",
op_reference("fn.readers.coco",
"Example of reading a subset of COCO dataset")),
doc_entry(
"numpy_reader.ipynb",
op_reference("fn.readers.numpy",
"Example of reading NumPy array files, "
"including reading directly to GPU memory utilizing the GPUDirect storage"))
])
|
DALI-main
|
docs/examples/general/data_loading/index.py
|
doc(title="DALI Expressions and Arithmetic Operations",
underline_char="=",
entries=[
"expr_examples.ipynb",
"expr_type_promotions.ipynb",
"expr_blend_image.ipynb",
"expr_conditional_and_masking.ipynb",
])
|
DALI-main
|
docs/examples/general/expressions/index.py
|
doc(title="Custom Operations",
underline_char="=",
entries=[
"custom_operator/create_a_custom_operator.ipynb",
doc_entry("python_operator.ipynb",
[op_reference('fn.python_function', "Running custom Python code with the family of python_function operators"),
op_reference('plugin.pytorch.fn.torch_python_function',
"Running custom Python code with the family of python_function operators"),
op_reference('fn.dl_tensor_python_function', "Running custom Python code with the family of python_function operators"), ]),
doc_entry("gpu_python_operator.ipynb",
[op_reference('fn.python_function', "Processing GPU Data with Python Operators"),
op_reference('plugin.pytorch.fn.torch_python_function',
"Processing GPU Data with Python Operators"),
op_reference('fn.dl_tensor_python_function', "Processing GPU Data with Python Operators"), ]),
doc_entry("numba_function.ipynb",
op_reference('plugin.numba.fn.experimental.numba_function',
"Running custom operations written as Numba JIT-compiled functions"),
),
])
|
DALI-main
|
docs/examples/custom_operations/index.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from nvidia.dali import pipeline_def
from nvidia.dali.types import DALIImageType
import nvidia.dali.fn as fn
# Load the Custom Operator
import nvidia.dali.plugin_manager as plugin_manager
plugin_manager.load_library('./build/libnaivehistogram.so')
# List test files. This step should be customized.
dali_extra_path = os.environ['DALI_EXTRA_PATH']
test_file_list = [
dali_extra_path + "/db/single/jpeg/100/swan-3584559_640.jpg",
dali_extra_path + "/db/single/jpeg/113/snail-4368154_1280.jpg",
dali_extra_path + "/db/single/jpeg/100/swan-3584559_640.jpg",
dali_extra_path + "/db/single/jpeg/113/snail-4368154_1280.jpg",
dali_extra_path + "/db/single/jpeg/100/swan-3584559_640.jpg",
dali_extra_path + "/db/single/jpeg/113/snail-4368154_1280.jpg",
]
# DALI pipeline definition
@pipeline_def
def naive_hist_pipe():
img, _ = fn.readers.file(files=test_file_list)
# The naive_histogram accepts single-channels image, thus we conert the image to Grayscale.
img = fn.decoders.image(img, device='mixed', output_type=DALIImageType.GRAY)
img = img.gpu()
img = fn.naive_histogram(img, n_bins=24)
return img
pipe = naive_hist_pipe(batch_size=2, num_threads=1, device_id=0)
pipe.build()
out = pipe.run()
print(out[0].as_cpu().as_array())
|
DALI-main
|
docs/examples/custom_operations/custom_operator/naive_histogram/naive_histogram_test.py
|
doc(title="Audio Processing",
underline_char="=",
entries=[
doc_entry(
"audio_decoder.ipynb",
op_reference("fn.decoders.audio", "Audio decoder tutorial")),
doc_entry(
"spectrogram.ipynb",
[
op_reference("fn.spectrogram", "Audio spectrogram tutorial"),
op_reference("fn.mel_filter_bank", "Audio spectrogram tutorial"),
op_reference("fn.to_decibels", "Audio spectrogram tutorial"),
op_reference("fn.mfcc", "Audio spectrogram tutorial"),
])
])
|
DALI-main
|
docs/examples/audio_processing/index.py
|
doc(title="Use Cases",
underline_char="=",
entries=[
"video_superres/README.rst",
"pytorch/resnet50/pytorch-resnet50.rst",
"pytorch/single_stage_detector/pytorch_ssd.rst",
"pytorch/efficientnet/readme.rst",
"tensorflow/resnet-n/README.rst",
"tensorflow/yolov4/readme.rst",
"tensorflow/efficientdet/README.rst",
"paddle/index.py",
"mxnet/mxnet-resnet50.ipynb",
"detection_pipeline.ipynb",
"webdataset-externalsource.ipynb",
])
|
DALI-main
|
docs/examples/use_cases/index.py
|
DALI-main
|
docs/examples/use_cases/mxnet/resnetn/__init__.py
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import argparse
import logging
logging.basicConfig(level=logging.DEBUG)
from common import find_mxnet, data, fit
from common.util import download_file
import mxnet as mx
if __name__ == '__main__':
# parse args
parser = argparse.ArgumentParser(description="train imagenet-1k",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
fit.add_fit_args(parser)
data.add_data_args(parser)
data.add_data_aug_args(parser)
# use a large aug level
data.set_data_aug_level(parser, 3)
parser.set_defaults(
# network
network = 'resnet',
num_layers = 50,
# data
num_classes = 1000,
num_examples = 1281167,
image_shape = '3,224,224',
min_random_scale = 1, # if input image has min size k, suggest to use
# 256.0/x, e.g. 0.533 for 480
# train
num_epochs = 80,
lr_step_epochs = '30,60',
dtype = 'float32'
)
args = parser.parse_args()
# load network
from importlib import import_module
net = import_module('symbols.'+args.network)
sym = net.get_symbol(**vars(args))
# train
fit.fit(args, sym, data.get_rec_iter)
|
DALI-main
|
docs/examples/use_cases/mxnet/resnetn/train_imagenet.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" example train fit utility """
import logging
import os
import time
import re
import math
import mxnet as mx
def _get_lr_scheduler(args, kv):
if 'lr_factor' not in args or args.lr_factor >= 1:
return (args.lr, None)
epoch_size = args.num_examples / args.batch_size
if 'dist' in args.kv_store:
epoch_size /= kv.num_workers
begin_epoch = args.load_epoch if args.load_epoch else 0
if 'pow' in args.lr_step_epochs:
lr = args.lr
max_up = args.num_epochs * epoch_size
pwr = float(re.sub('pow[- ]*', '', args.lr_step_epochs))
poly_sched = mx.lr_scheduler.PolyScheduler(max_up, lr, pwr)
return (lr, poly_sched)
step_epochs = [int(l) for l in args.lr_step_epochs.split(',')]
lr = args.lr
for s in step_epochs:
if begin_epoch >= s:
lr *= args.lr_factor
if lr != args.lr:
logging.info('Adjust learning rate to %e for epoch %d',
lr, begin_epoch)
steps = [epoch_size * (x - begin_epoch)
for x in step_epochs if x - begin_epoch > 0]
return (lr, mx.lr_scheduler.MultiFactorScheduler(step=steps, factor=args.lr_factor))
def _load_model(args, rank=0):
if 'load_epoch' not in args or args.load_epoch is None:
return (None, None, None)
assert args.model_prefix is not None
model_prefix = args.model_prefix
if rank > 0 and os.path.exists("%s-%d-symbol.json" % (model_prefix, rank)):
model_prefix += "-%d" % (rank)
sym, arg_params, aux_params = mx.model.load_checkpoint(
model_prefix, args.load_epoch)
logging.info('Loaded model %s_%04d.params', model_prefix, args.load_epoch)
return (sym, arg_params, aux_params)
def _save_model(args, rank=0):
if args.model_prefix is None:
return None
dst_dir = os.path.dirname(args.model_prefix)
if not os.path.isdir(dst_dir):
os.mkdir(dst_dir)
return mx.callback.do_checkpoint(args.model_prefix if rank == 0 else "%s-%d" % (
args.model_prefix, rank))
def add_fit_args(parser):
"""
parser : argparse.ArgumentParser
return a parser added with args required by fit
"""
train = parser.add_argument_group('Training', 'model training')
train.add_argument('--network', type=str,
help='the neural network to use')
train.add_argument('--num-layers', type=int,
help='number of layers in the neural network, \
required by some networks such as resnet')
train.add_argument('--gpus', type=str,
help='list of gpus to run, e.g. 0 or 0,2,5. empty means using cpu')
train.add_argument('--kv-store', type=str, default='device',
help='key-value store type')
train.add_argument('--num-epochs', type=int, default=100,
help='max num of epochs')
train.add_argument('--lr', type=float, default=0.1,
help='initial learning rate')
train.add_argument('--lr-factor', type=float, default=0.1,
help='the ratio to reduce lr on each step')
train.add_argument('--lr-step-epochs', type=str,
help='the epochs to reduce the lr, e.g. 30,60')
train.add_argument('--initializer', type=str, default='default',
help='the initializer type')
train.add_argument('--optimizer', type=str, default='sgd',
help='the optimizer type')
train.add_argument('--mom', type=float, default=0.9,
help='momentum for sgd')
train.add_argument('--wd', type=float, default=0.0001,
help='weight decay for sgd')
train.add_argument('--batch-size', type=int, default=128,
help='the batch size')
train.add_argument('--disp-batches', type=int, default=20,
help='show progress for every n batches')
train.add_argument('--model-prefix', type=str,
help='model prefix')
parser.add_argument('--monitor', dest='monitor', type=int, default=0,
help='log network parameters every N iters if larger than 0')
train.add_argument('--load-epoch', type=int,
help='load the model on an epoch using the model-load-prefix')
train.add_argument('--top-k', type=int, default=0,
help='report the top-k accuracy. 0 means no report.')
train.add_argument('--loss', type=str, default='',
help='show the cross-entropy or nll loss. ce strands for cross-entropy, nll-loss stands for likelihood loss')
train.add_argument('--test-io', type=int, default=0,
help='1 means test reading speed without training')
train.add_argument('--dtype', type=str, default='float32',
help='precision: float32 or float16')
train.add_argument('--gc-type', type=str, default='none',
help='type of gradient compression to use, \
takes `2bit` or `none` for now')
train.add_argument('--gc-threshold', type=float, default=0.5,
help='threshold for 2bit gradient compression')
# additional parameters for large batch sgd
train.add_argument('--macrobatch-size', type=int, default=0,
help='distributed effective batch size')
train.add_argument('--warmup-epochs', type=int, default=5,
help='the epochs to ramp-up lr to scaled large-batch value')
train.add_argument('--warmup-strategy', type=str, default='linear',
help='the ramping-up strategy for large batch sgd')
return train
def fit(args, network, data_loader, **kwargs):
"""
train a model
args : argparse returns
network : the symbol definition of the nerual network
data_loader : function that returns the train and val data iterators
"""
# kvstore
kv = mx.kvstore.create(args.kv_store)
if args.gc_type != 'none':
kv.set_gradient_compression({'type': args.gc_type,
'threshold': args.gc_threshold})
# logging
head = '%(asctime)-15s Node[' + str(kv.rank) + '] %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
logging.info('start with arguments %s', args)
# data iterators
(train, val) = data_loader(args, kv)
if args.test_io:
tic = time.time()
for i, batch in enumerate(train):
if isinstance(batch, list):
for b in batch:
for j in b.data:
j.wait_to_read()
else:
for j in batch.data:
j.wait_to_read()
if (i + 1) % args.disp_batches == 0:
logging.info('Batch [%d]\tSpeed: %.2f samples/sec', i,
args.disp_batches * args.batch_size / (time.time() - tic))
tic = time.time()
return
# load model
if 'arg_params' in kwargs and 'aux_params' in kwargs:
arg_params = kwargs['arg_params']
aux_params = kwargs['aux_params']
else:
sym, arg_params, aux_params = _load_model(args, kv.rank)
if sym is not None:
assert sym.tojson() == network.tojson()
# save model
checkpoint = _save_model(args, kv.rank)
# devices for training
devs = mx.cpu() if args.gpus is None or args.gpus == "" else [
mx.gpu(int(i)) for i in args.gpus.split(',')]
# learning rate
lr, lr_scheduler = _get_lr_scheduler(args, kv)
# create model
model = mx.mod.Module(
context=devs,
symbol=network
)
lr_scheduler = lr_scheduler
optimizer_params = {
'learning_rate': lr,
'wd': args.wd,
'lr_scheduler': lr_scheduler,
'multi_precision': True}
# Only a limited number of optimizers have 'momentum' property
has_momentum = {'sgd', 'dcasgd', 'nag'}
if args.optimizer in has_momentum:
optimizer_params['momentum'] = args.mom
monitor = mx.mon.Monitor(
args.monitor, pattern=".*") if args.monitor > 0 else None
# A limited number of optimizers have a warmup period
has_warmup = {'lbsgd', 'lbnag'}
if args.optimizer in has_warmup:
if 'dist' in args.kv_store:
nworkers = kv.num_workers
else:
nworkers = 1
epoch_size = args.num_examples / args.batch_size / nworkers
if epoch_size < 1:
epoch_size = 1
macrobatch_size = args.macrobatch_size
if macrobatch_size < args.batch_size * nworkers:
macrobatch_size = args.batch_size * nworkers
#batch_scale = round(float(macrobatch_size) / args.batch_size / nworkers +0.4999)
batch_scale = math.ceil(
float(macrobatch_size) / args.batch_size / nworkers)
optimizer_params['updates_per_epoch'] = epoch_size
optimizer_params['begin_epoch'] = args.load_epoch if args.load_epoch else 0
optimizer_params['batch_scale'] = batch_scale
optimizer_params['warmup_strategy'] = args.warmup_strategy
optimizer_params['warmup_epochs'] = args.warmup_epochs
optimizer_params['num_epochs'] = args.num_epochs
if args.initializer == 'default':
if args.network == 'alexnet':
# AlexNet will not converge using Xavier
initializer = mx.init.Normal()
# VGG will not trend to converge using Xavier-Gaussian
elif 'vgg' in args.network:
initializer = mx.init.Xavier()
else:
initializer = mx.init.Xavier(
rnd_type='gaussian', factor_type="in", magnitude=2)
# initializer = mx.init.Xavier(factor_type="in", magnitude=2.34),
elif args.initializer == 'xavier':
initializer = mx.init.Xavier()
elif args.initializer == 'msra':
initializer = mx.init.MSRAPrelu()
elif args.initializer == 'orthogonal':
initializer = mx.init.Orthogonal()
elif args.initializer == 'normal':
initializer = mx.init.Normal()
elif args.initializer == 'uniform':
initializer = mx.init.Uniform()
elif args.initializer == 'one':
initializer = mx.init.One()
elif args.initializer == 'zero':
initializer = mx.init.Zero()
# evaluation metrices
eval_metrics = ['accuracy']
if args.top_k > 0:
eval_metrics.append(mx.metric.create(
'top_k_accuracy', top_k=args.top_k))
supported_loss = ['ce', 'nll_loss']
if len(args.loss) > 0:
# ce or nll loss is only applicable to softmax output
loss_type_list = args.loss.split(',')
if 'softmax_output' in network.list_outputs():
for loss_type in loss_type_list:
loss_type = loss_type.strip()
if loss_type == 'nll':
loss_type = 'nll_loss'
if loss_type not in supported_loss:
logging.warning(loss_type + ' is not an valid loss type, only cross-entropy or ' \
'negative likelihood loss is supported!')
else:
eval_metrics.append(mx.metric.create(loss_type))
else:
logging.warning("The output is not softmax_output, loss argument will be skipped!")
# callbacks that run after each batch
batch_end_callbacks = [mx.callback.Speedometer(
args.batch_size, args.disp_batches)]
if 'batch_end_callback' in kwargs:
cbs = kwargs['batch_end_callback']
batch_end_callbacks += cbs if isinstance(cbs, list) else [cbs]
# run
model.fit(train,
begin_epoch=args.load_epoch if args.load_epoch else 0,
num_epoch=args.num_epochs,
eval_data=val,
eval_metric=eval_metrics,
kvstore=kv,
optimizer=args.optimizer,
optimizer_params=optimizer_params,
initializer=initializer,
arg_params=arg_params,
aux_params=aux_params,
batch_end_callback=batch_end_callbacks,
epoch_end_callback=checkpoint,
allow_missing=True,
monitor=monitor)
|
DALI-main
|
docs/examples/use_cases/mxnet/resnetn/common/fit.py
|
DALI-main
|
docs/examples/use_cases/mxnet/resnetn/common/__init__.py
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os, sys
try:
import mxnet as mx
except ImportError:
curr_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(curr_path, "../../../python"))
import mxnet as mx
|
DALI-main
|
docs/examples/use_cases/mxnet/resnetn/common/find_mxnet.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
import random
from mxnet.io import DataBatch, DataIter
import numpy as np
def add_data_args(parser):
data = parser.add_argument_group('Data', 'the input images')
data.add_argument('--data-train', type=str, help='the training data')
data.add_argument('--data-train-idx', type=str, default='', help='the index of training data')
data.add_argument('--data-val', type=str, help='the validation data')
data.add_argument('--data-val-idx', type=str, default='', help='the index of validation data')
data.add_argument('--rgb-mean', type=str, default='123.68,116.779,103.939',
help='a tuple of size 3 for the mean rgb')
data.add_argument('--pad-size', type=int, default=0,
help='padding the input image')
data.add_argument('--image-shape', type=str,
help='the image shape feed into the network, e.g. (3,224,224)')
data.add_argument('--num-classes', type=int, help='the number of classes')
data.add_argument('--num-examples', type=int, help='the number of training examples')
data.add_argument('--data-nthreads', type=int, default=4,
help='number of threads for data decoding')
data.add_argument('--benchmark', type=int, default=0,
help='if 1, then feed the network with synthetic data')
return data
def add_data_aug_args(parser):
aug = parser.add_argument_group(
'Image augmentations', 'implemented in src/io/image_aug_default.cc')
aug.add_argument('--random-crop', type=int, default=1,
help='if or not randomly crop the image')
aug.add_argument('--random-mirror', type=int, default=1,
help='if or not randomly flip horizontally')
aug.add_argument('--max-random-h', type=int, default=0,
help='max change of hue, whose range is [0, 180]')
aug.add_argument('--max-random-s', type=int, default=0,
help='max change of saturation, whose range is [0, 255]')
aug.add_argument('--max-random-l', type=int, default=0,
help='max change of intensity, whose range is [0, 255]')
aug.add_argument('--max-random-aspect-ratio', type=float, default=0,
help='max change of aspect ratio, whose range is [0, 1]')
aug.add_argument('--max-random-rotate-angle', type=int, default=0,
help='max angle to rotate, whose range is [0, 360]')
aug.add_argument('--max-random-shear-ratio', type=float, default=0,
help='max ratio to shear, whose range is [0, 1]')
aug.add_argument('--max-random-scale', type=float, default=1,
help='max ratio to scale')
aug.add_argument('--min-random-scale', type=float, default=1,
help='min ratio to scale, should >= img_size/input_shape. otherwise use --pad-size')
return aug
def set_data_aug_level(aug, level):
if level >= 1:
aug.set_defaults(random_crop=1, random_mirror=1)
if level >= 2:
aug.set_defaults(max_random_h=36, max_random_s=50, max_random_l=50)
if level >= 3:
aug.set_defaults(max_random_rotate_angle=10, max_random_shear_ratio=0.1, max_random_aspect_ratio=0.25)
class SyntheticDataIter(DataIter):
def __init__(self, num_classes, data_shape, max_iter, dtype):
self.batch_size = data_shape[0]
self.cur_iter = 0
self.max_iter = max_iter
self.dtype = dtype
label = np.random.randint(0, num_classes, [self.batch_size,])
data = np.random.uniform(-1, 1, data_shape)
self.data = mx.nd.array(data, dtype=self.dtype, ctx=mx.Context('cpu_pinned', 0))
self.label = mx.nd.array(label, dtype=self.dtype, ctx=mx.Context('cpu_pinned', 0))
def __iter__(self):
return self
@property
def provide_data(self):
return [mx.io.DataDesc('data', self.data.shape, self.dtype)]
@property
def provide_label(self):
return [mx.io.DataDesc('softmax_label', (self.batch_size,), self.dtype)]
def next(self):
self.cur_iter += 1
if self.cur_iter <= self.max_iter:
return DataBatch(data=(self.data,),
label=(self.label,),
pad=0,
index=None,
provide_data=self.provide_data,
provide_label=self.provide_label)
else:
raise StopIteration
def __next__(self):
return self.next()
def reset(self):
self.cur_iter = 0
def get_rec_iter(args, kv=None):
image_shape = tuple([int(l) for l in args.image_shape.split(',')])
if 'benchmark' in args and args.benchmark:
data_shape = (args.batch_size,) + image_shape
train = SyntheticDataIter(args.num_classes, data_shape, 1000, np.float32)
return (train, None)
if kv:
(rank, nworker) = (kv.rank, kv.num_workers)
else:
(rank, nworker) = (0, 1)
rgb_mean = [float(i) for i in args.rgb_mean.split(',')]
train = mx.io.ImageRecordIter(
path_imgrec = args.data_train,
path_imgidx = args.data_train_idx,
label_width = 1,
mean_r = rgb_mean[0],
mean_g = rgb_mean[1],
mean_b = rgb_mean[2],
data_name = 'data',
label_name = 'softmax_label',
data_shape = image_shape,
batch_size = args.batch_size,
rand_crop = args.random_crop,
max_random_scale = args.max_random_scale,
pad = args.pad_size,
fill_value = 127,
min_random_scale = args.min_random_scale,
max_aspect_ratio = args.max_random_aspect_ratio,
random_h = args.max_random_h,
random_s = args.max_random_s,
random_l = args.max_random_l,
max_rotate_angle = args.max_random_rotate_angle,
max_shear_ratio = args.max_random_shear_ratio,
rand_mirror = args.random_mirror,
preprocess_threads = args.data_nthreads,
shuffle = True,
num_parts = nworker,
part_index = rank)
if args.data_val is None:
return (train, None)
val = mx.io.ImageRecordIter(
path_imgrec = args.data_val,
path_imgidx = args.data_val_idx,
label_width = 1,
mean_r = rgb_mean[0],
mean_g = rgb_mean[1],
mean_b = rgb_mean[2],
data_name = 'data',
label_name = 'softmax_label',
batch_size = args.batch_size,
data_shape = image_shape,
preprocess_threads = args.data_nthreads,
rand_crop = False,
rand_mirror = False,
num_parts = nworker,
part_index = rank)
return (train, val)
|
DALI-main
|
docs/examples/use_cases/mxnet/resnetn/common/data.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""References:
Szegedy, Christian, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed, Dragomir
Anguelov, Dumitru Erhan, Vincent Vanhoucke, and Andrew Rabinovich. "Going deeper
with convolutions." arXiv preprint arXiv:1409.4842 (2014).
"""
import mxnet as mx
def ConvFactory(data, num_filter, kernel, stride=(1,1), pad=(0, 0), name=None, suffix=''):
conv = mx.symbol.Convolution(data=data, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, name='conv_%s%s' %(name, suffix))
act = mx.symbol.Activation(data=conv, act_type='relu', name='relu_%s%s' %(name, suffix))
return act
def InceptionFactory(data, num_1x1, num_3x3red, num_3x3, num_d5x5red, num_d5x5, pool, proj, name):
# 1x1
c1x1 = ConvFactory(data=data, num_filter=num_1x1, kernel=(1, 1), name=('%s_1x1' % name))
# 3x3 reduce + 3x3
c3x3r = ConvFactory(data=data, num_filter=num_3x3red, kernel=(1, 1), name=('%s_3x3' % name), suffix='_reduce')
c3x3 = ConvFactory(data=c3x3r, num_filter=num_3x3, kernel=(3, 3), pad=(1, 1), name=('%s_3x3' % name))
# double 3x3 reduce + double 3x3
cd5x5r = ConvFactory(data=data, num_filter=num_d5x5red, kernel=(1, 1), name=('%s_5x5' % name), suffix='_reduce')
cd5x5 = ConvFactory(data=cd5x5r, num_filter=num_d5x5, kernel=(5, 5), pad=(2, 2), name=('%s_5x5' % name))
# pool + proj
pooling = mx.symbol.Pooling(data=data, kernel=(3, 3), stride=(1, 1), pad=(1, 1), pool_type=pool, name=('%s_pool_%s_pool' % (pool, name)))
cproj = ConvFactory(data=pooling, num_filter=proj, kernel=(1, 1), name=('%s_proj' % name))
# concat
concat = mx.symbol.Concat(*[c1x1, c3x3, cd5x5, cproj], name='ch_concat_%s_chconcat' % name)
return concat
def get_symbol(num_classes = 1000, **kwargs):
data = mx.sym.Variable("data")
conv1 = ConvFactory(data, 64, kernel=(7, 7), stride=(2,2), pad=(3, 3), name="conv1")
pool1 = mx.sym.Pooling(conv1, kernel=(3, 3), stride=(2, 2), pool_type="max")
conv2 = ConvFactory(pool1, 64, kernel=(1, 1), stride=(1,1), name="conv2")
conv3 = ConvFactory(conv2, 192, kernel=(3, 3), stride=(1, 1), pad=(1,1), name="conv3")
pool3 = mx.sym.Pooling(conv3, kernel=(3, 3), stride=(2, 2), pool_type="max")
in3a = InceptionFactory(pool3, 64, 96, 128, 16, 32, "max", 32, name="in3a")
in3b = InceptionFactory(in3a, 128, 128, 192, 32, 96, "max", 64, name="in3b")
pool4 = mx.sym.Pooling(in3b, kernel=(3, 3), stride=(2, 2), pool_type="max")
in4a = InceptionFactory(pool4, 192, 96, 208, 16, 48, "max", 64, name="in4a")
in4b = InceptionFactory(in4a, 160, 112, 224, 24, 64, "max", 64, name="in4b")
in4c = InceptionFactory(in4b, 128, 128, 256, 24, 64, "max", 64, name="in4c")
in4d = InceptionFactory(in4c, 112, 144, 288, 32, 64, "max", 64, name="in4d")
in4e = InceptionFactory(in4d, 256, 160, 320, 32, 128, "max", 128, name="in4e")
pool5 = mx.sym.Pooling(in4e, kernel=(3, 3), stride=(2, 2), pool_type="max")
in5a = InceptionFactory(pool5, 256, 160, 320, 32, 128, "max", 128, name="in5a")
in5b = InceptionFactory(in5a, 384, 192, 384, 48, 128, "max", 128, name="in5b")
pool6 = mx.sym.Pooling(in5b, kernel=(7, 7), stride=(1,1), pool_type="avg")
flatten = mx.sym.Flatten(data=pool6)
fc1 = mx.sym.FullyConnected(data=flatten, num_hidden=num_classes)
softmax = mx.symbol.SoftmaxOutput(data=fc1, name='softmax')
return softmax
|
DALI-main
|
docs/examples/use_cases/mxnet/resnetn/symbols/googlenet.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""References:
Simonyan, Karen, and Andrew Zisserman. "Very deep convolutional networks for
large-scale image recognition." arXiv preprint arXiv:1409.1556 (2014).
"""
import mxnet as mx
import numpy as np
def get_feature(internel_layer, layers, filters, batch_norm = False, **kwargs):
for i, num in enumerate(layers):
for j in range(num):
internel_layer = mx.sym.Convolution(data = internel_layer, kernel=(3, 3), pad=(1, 1), num_filter=filters[i], name="conv%s_%s" %(i + 1, j + 1))
if batch_norm:
internel_layer = mx.symbol.BatchNorm(data=internel_layer, name="bn%s_%s" %(i + 1, j + 1))
internel_layer = mx.sym.Activation(data=internel_layer, act_type="relu", name="relu%s_%s" %(i + 1, j + 1))
internel_layer = mx.sym.Pooling(data=internel_layer, pool_type="max", kernel=(2, 2), stride=(2,2), name="pool%s" %(i + 1))
return internel_layer
def get_classifier(input_data, num_classes, **kwargs):
flatten = mx.sym.Flatten(data=input_data, name="flatten")
fc6 = mx.sym.FullyConnected(data=flatten, num_hidden=4096, name="fc6")
relu6 = mx.sym.Activation(data=fc6, act_type="relu", name="relu6")
drop6 = mx.sym.Dropout(data=relu6, p=0.5, name="drop6")
fc7 = mx.sym.FullyConnected(data=drop6, num_hidden=4096, name="fc7")
relu7 = mx.sym.Activation(data=fc7, act_type="relu", name="relu7")
drop7 = mx.sym.Dropout(data=relu7, p=0.5, name="drop7")
fc8 = mx.sym.FullyConnected(data=drop7, num_hidden=num_classes, name="fc8")
return fc8
def get_symbol(num_classes, num_layers=11, batch_norm=False, dtype='float32', **kwargs):
"""
Parameters
----------
num_classes : int, default 1000
Number of classification classes.
num_layers : int
Number of layers for the variant of densenet. Options are 11, 13, 16, 19.
batch_norm : bool, default False
Use batch normalization.
dtype: str, float32 or float16
Data precision.
"""
vgg_spec = {11: ([1, 1, 2, 2, 2], [64, 128, 256, 512, 512]),
13: ([2, 2, 2, 2, 2], [64, 128, 256, 512, 512]),
16: ([2, 2, 3, 3, 3], [64, 128, 256, 512, 512]),
19: ([2, 2, 4, 4, 4], [64, 128, 256, 512, 512])}
if not vgg_spec.has_key(num_layers):
raise ValueError("Invalide num_layers {}. Possible choices are 11,13,16,19.".format(num_layers))
layers, filters = vgg_spec[num_layers]
data = mx.sym.Variable(name="data")
if dtype == 'float16':
data = mx.sym.Cast(data=data, dtype=np.float16)
feature = get_feature(data, layers, filters, batch_norm)
classifier = get_classifier(feature, num_classes)
if dtype == 'float16':
classifier = mx.sym.Cast(data=classifier, dtype=np.float32)
symbol = mx.sym.SoftmaxOutput(data=classifier, name='softmax')
return symbol
|
DALI-main
|
docs/examples/use_cases/mxnet/resnetn/symbols/vgg.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# -*- coding:utf-8 -*-
__author__ = 'zhangshuai'
modified_date = '16/7/5'
__modify__ = 'anchengwu'
modified_date = '17/2/22'
'''
Inception v4 , suittable for image with around 299 x 299
Reference:
Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning
Christian Szegedy, Sergey Ioffe, Vincent Vanhoucke
arXiv.1602.07261
'''
import mxnet as mx
import numpy as np
def Conv(data, num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), name=None, suffix=''):
conv = mx.sym.Convolution(data=data, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, no_bias=True, name='%s%s_conv2d' %(name, suffix))
bn = mx.sym.BatchNorm(data=conv, name='%s%s_batchnorm' %(name, suffix), fix_gamma=True)
act = mx.sym.Activation(data=bn, act_type='relu', name='%s%s_relu' %(name, suffix))
return act
def Inception_stem(data, name= None):
c = Conv(data, 32, kernel=(3, 3), stride=(2, 2), name='%s_conv1_3*3' %name)
c = Conv(c, 32, kernel=(3, 3), name='%s_conv2_3*3' %name)
c = Conv(c, 64, kernel=(3, 3), pad=(1, 1), name='%s_conv3_3*3' %name)
p1 = mx.sym.Pooling(c, kernel=(3, 3), stride=(2, 2), pool_type='max', name='%s_maxpool_1' %name)
c2 = Conv(c, 96, kernel=(3, 3), stride=(2, 2), name='%s_conv4_3*3' %name)
concat = mx.sym.Concat(*[p1, c2], name='%s_concat_1' %name)
c1 = Conv(concat, 64, kernel=(1, 1), pad=(0, 0), name='%s_conv5_1*1' %name)
c1 = Conv(c1, 96, kernel=(3, 3), name='%s_conv6_3*3' %name)
c2 = Conv(concat, 64, kernel=(1, 1), pad=(0, 0), name='%s_conv7_1*1' %name)
c2 = Conv(c2, 64, kernel=(7, 1), pad=(3, 0), name='%s_conv8_7*1' %name)
c2 = Conv(c2, 64, kernel=(1, 7), pad=(0, 3), name='%s_conv9_1*7' %name)
c2 = Conv(c2, 96, kernel=(3, 3), pad=(0, 0), name='%s_conv10_3*3' %name)
concat = mx.sym.Concat(*[c1, c2], name='%s_concat_2' %name)
c1 = Conv(concat, 192, kernel=(3, 3), stride=(2, 2), name='%s_conv11_3*3' %name)
p1 = mx.sym.Pooling(concat, kernel=(3, 3), stride=(2, 2), pool_type='max', name='%s_maxpool_2' %name)
concat = mx.sym.Concat(*[c1, p1], name='%s_concat_3' %name)
return concat
def InceptionA(input, name=None):
p1 = mx.sym.Pooling(input, kernel=(3, 3), pad=(1, 1), pool_type='avg', name='%s_avgpool_1' %name)
c1 = Conv(p1, 96, kernel=(1, 1), pad=(0, 0), name='%s_conv1_1*1' %name)
c2 = Conv(input, 96, kernel=(1, 1), pad=(0, 0), name='%s_conv2_1*1' %name)
c3 = Conv(input, 64, kernel=(1, 1), pad=(0, 0), name='%s_conv3_1*1' %name)
c3 = Conv(c3, 96, kernel=(3, 3), pad=(1, 1), name='%s_conv4_3*3' %name)
c4 = Conv(input, 64, kernel=(1, 1), pad=(0, 0), name='%s_conv5_1*1' % name)
c4 = Conv(c4, 96, kernel=(3, 3), pad=(1, 1), name='%s_conv6_3*3' % name)
c4 = Conv(c4, 96, kernel=(3, 3), pad=(1, 1), name='%s_conv7_3*3' %name)
concat = mx.sym.Concat(*[c1, c2, c3, c4], name='%s_concat_1' %name)
return concat
def ReductionA(input, name=None):
p1 = mx.sym.Pooling(input, kernel=(3, 3), stride=(2, 2), pool_type='max', name='%s_maxpool_1' %name)
c2 = Conv(input, 384, kernel=(3, 3), stride=(2, 2), name='%s_conv1_3*3' %name)
c3 = Conv(input, 192, kernel=(1, 1), pad=(0, 0), name='%s_conv2_1*1' %name)
c3 = Conv(c3, 224, kernel=(3, 3), pad=(1, 1), name='%s_conv3_3*3' %name)
c3 = Conv(c3, 256, kernel=(3, 3), stride=(2, 2), pad=(0, 0), name='%s_conv4_3*3' %name)
concat = mx.sym.Concat(*[p1, c2, c3], name='%s_concat_1' %name)
return concat
def InceptionB(input, name=None):
p1 = mx.sym.Pooling(input, kernel=(3, 3), pad=(1, 1), pool_type='avg', name='%s_avgpool_1' %name)
c1 = Conv(p1, 128, kernel=(1, 1), pad=(0, 0), name='%s_conv1_1*1' %name)
c2 = Conv(input, 384, kernel=(1, 1), pad=(0, 0), name='%s_conv2_1*1' %name)
c3 = Conv(input, 192, kernel=(1, 1), pad=(0, 0), name='%s_conv3_1*1' %name)
c3 = Conv(c3, 224, kernel=(1, 7), pad=(0, 3), name='%s_conv4_1*7' %name)
#paper wrong
c3 = Conv(c3, 256, kernel=(7, 1), pad=(3, 0), name='%s_conv5_1*7' %name)
c4 = Conv(input, 192, kernel=(1, 1), pad=(0, 0), name='%s_conv6_1*1' %name)
c4 = Conv(c4, 192, kernel=(1, 7), pad=(0, 3), name='%s_conv7_1*7' %name)
c4 = Conv(c4, 224, kernel=(7, 1), pad=(3, 0), name='%s_conv8_7*1' %name)
c4 = Conv(c4, 224, kernel=(1, 7), pad=(0, 3), name='%s_conv9_1*7' %name)
c4 = Conv(c4, 256, kernel=(7, 1), pad=(3, 0), name='%s_conv10_7*1' %name)
concat = mx.sym.Concat(*[c1, c2, c3, c4], name='%s_concat_1' %name)
return concat
def ReductionB(input,name=None):
p1 = mx.sym.Pooling(input, kernel=(3, 3), stride=(2, 2), pool_type='max', name='%s_maxpool_1' %name)
c2 = Conv(input, 192, kernel=(1, 1), pad=(0, 0), name='%s_conv1_1*1' %name)
c2 = Conv(c2, 192, kernel=(3, 3), stride=(2, 2), name='%s_conv2_3*3' %name)
c3 = Conv(input, 256, kernel=(1, 1), pad=(0, 0), name='%s_conv3_1*1' %name)
c3 = Conv(c3, 256, kernel=(1, 7), pad=(0, 3), name='%s_conv4_1*7' %name)
c3 = Conv(c3, 320, kernel=(7, 1), pad=(3, 0), name='%s_conv5_7*1' %name)
c3 = Conv(c3, 320, kernel=(3, 3), stride=(2, 2), name='%s_conv6_3*3' %name)
concat = mx.sym.Concat(*[p1, c2, c3], name='%s_concat_1' %name)
return concat
def InceptionC(input, name=None):
p1 = mx.sym.Pooling(input, kernel=(3, 3), pad=(1, 1), pool_type='avg', name='%s_avgpool_1' %name)
c1 = Conv(p1, 256, kernel=(1, 1), pad=(0, 0), name='%s_conv1_1*1' %name)
c2 = Conv(input, 256, kernel=(1, 1), pad=(0, 0), name='%s_conv2_1*1' %name)
c3 = Conv(input, 384, kernel=(1, 1), pad=(0, 0), name='%s_conv3_1*1' %name)
c3_1 = Conv(c3, 256, kernel=(1, 3), pad=(0, 1), name='%s_conv4_3*1' %name)
c3_2 = Conv(c3, 256, kernel=(3, 1), pad=(1, 0), name='%s_conv5_1*3' %name)
c4 = Conv(input, 384, kernel=(1, 1), pad=(0, 0), name='%s_conv6_1*1' %name)
c4 = Conv(c4, 448, kernel=(1, 3), pad=(0, 1), name='%s_conv7_1*3' %name)
c4 = Conv(c4, 512, kernel=(3, 1), pad=(1, 0), name='%s_conv8_3*1' %name)
c4_1 = Conv(c4, 256, kernel=(3, 1), pad=(1, 0), name='%s_conv9_1*3' %name)
c4_2 = Conv(c4, 256, kernel=(1, 3), pad=(0, 1), name='%s_conv10_3*1' %name)
concat = mx.sym.Concat(*[c1, c2, c3_1, c3_2, c4_1, c4_2], name='%s_concat' %name)
return concat
def get_symbol(num_classes=1000, dtype='float32', **kwargs):
data = mx.sym.Variable(name="data")
if dtype == 'float32':
data = mx.sym.identity(data=data, name='id')
else:
if dtype == 'float16':
data = mx.sym.Cast(data=data, dtype=np.float16)
x = Inception_stem(data, name='in_stem')
#4 * InceptionA
# x = InceptionA(x, name='in1A')
# x = InceptionA(x, name='in2A')
# x = InceptionA(x, name='in3A')
# x = InceptionA(x, name='in4A')
for i in range(4):
x = InceptionA(x, name='in%dA' %(i+1))
#Reduction A
x = ReductionA(x, name='re1A')
#7 * InceptionB
# x = InceptionB(x, name='in1B')
# x = InceptionB(x, name='in2B')
# x = InceptionB(x, name='in3B')
# x = InceptionB(x, name='in4B')
# x = InceptionB(x, name='in5B')
# x = InceptionB(x, name='in6B')
# x = InceptionB(x, name='in7B')
for i in range(7):
x = InceptionB(x, name='in%dB' %(i+1))
#ReductionB
x = ReductionB(x, name='re1B')
#3 * InceptionC
# x = InceptionC(x, name='in1C')
# x = InceptionC(x, name='in2C')
# x = InceptionC(x, name='in3C')
for i in range(3):
x = InceptionC(x, name='in%dC' %(i+1))
#Average Pooling
x = mx.sym.Pooling(x, kernel=(8, 8), pad=(1, 1), pool_type='avg', name='global_avgpool')
#Dropout
x = mx.sym.Dropout(x, p=0.2)
flatten = mx.sym.Flatten(x, name='flatten')
fc1 = mx.sym.FullyConnected(flatten, num_hidden=num_classes, name='fc1')
if dtype == 'float16':
fc1 = mx.sym.Cast(data=fc1, dtype=np.float32)
softmax = mx.sym.SoftmaxOutput(fc1, name='softmax')
return softmax
|
DALI-main
|
docs/examples/use_cases/mxnet/resnetn/symbols/inception-v4.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''
Adapted from https://github.com/tornadomeet/ResNet/blob/master/symbol_resnet.py
(Original author Wei Wu) by Antti-Pekka Hynninen
Implementing the original resnet ILSVRC 2015 winning network from:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun. "Deep Residual Learning for Image Recognition"
'''
import mxnet as mx
import numpy as np
def residual_unit(data, num_filter, stride, dim_match, name, bottle_neck=True, bn_mom=0.9, workspace=256, memonger=False):
"""Return ResNet Unit symbol for building ResNet
Parameters
----------
data : str
Input data
num_filter : int
Number of output channels
bnf : int
Bottle neck channels factor with regard to num_filter
stride : tuple
Stride used in convolution
dim_match : Boolean
True means channel number between input and output is the same, otherwise means differ
name : str
Base name of the operators
workspace : int
Workspace used in convolution operator
"""
if bottle_neck:
conv1 = mx.sym.Convolution(data=data, num_filter=int(num_filter*0.25), kernel=(1,1), stride=stride, pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_conv1')
bn1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1')
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1')
conv2 = mx.sym.Convolution(data=act1, num_filter=int(num_filter*0.25), kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2')
bn2 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2')
act2 = mx.sym.Activation(data=bn2, act_type='relu', name=name + '_relu2')
conv3 = mx.sym.Convolution(data=act2, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0), no_bias=True,
workspace=workspace, name=name + '_conv3')
bn3 = mx.sym.BatchNorm(data=conv3, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3')
if dim_match:
shortcut = data
else:
conv1sc = mx.sym.Convolution(data=data, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
workspace=workspace, name=name+'_conv1sc')
shortcut = mx.sym.BatchNorm(data=conv1sc, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_sc')
if memonger:
shortcut._set_attr(mirror_stage='True')
return mx.sym.Activation(data=bn3 + shortcut, act_type='relu', name=name + '_relu3')
else:
conv1 = mx.sym.Convolution(data=data, num_filter=num_filter, kernel=(3,3), stride=stride, pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv1')
bn1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn1')
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1')
conv2 = mx.sym.Convolution(data=act1, num_filter=num_filter, kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2')
bn2 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn2')
if dim_match:
shortcut = data
else:
conv1sc = mx.sym.Convolution(data=data, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
workspace=workspace, name=name+'_conv1sc')
shortcut = mx.sym.BatchNorm(data=conv1sc, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_sc')
if memonger:
shortcut._set_attr(mirror_stage='True')
return mx.sym.Activation(data=bn2 + shortcut, act_type='relu', name=name + '_relu3')
def resnet(units, num_stages, filter_list, num_classes, image_shape, bottle_neck=True, bn_mom=0.9, workspace=256, dtype='float32', memonger=False):
"""Return ResNet symbol of
Parameters
----------
units : list
Number of units in each stage
num_stages : int
Number of stage
filter_list : list
Channel size of each stage
num_classes : int
Ouput size of symbol
dataset : str
Dataset type, only cifar10 and imagenet supports
workspace : int
Workspace used in convolution operator
dtype : str
Precision (float32 or float16)
"""
num_unit = len(units)
assert(num_unit == num_stages)
data = mx.sym.Variable(name='data')
if dtype == 'float32':
data = mx.sym.identity(data=data, name='id')
else:
if dtype == 'float16':
data = mx.sym.Cast(data=data, dtype=np.float16)
(nchannel, height, width) = image_shape
if height <= 32: # such as cifar10
body = mx.sym.Convolution(data=data, num_filter=filter_list[0], kernel=(3, 3), stride=(1,1), pad=(1, 1),
no_bias=True, name="conv0", workspace=workspace)
# Is this BatchNorm supposed to be here?
body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn0')
else: # often expected to be 224 such as imagenet
body = mx.sym.Convolution(data=data, num_filter=filter_list[0], kernel=(7, 7), stride=(2,2), pad=(3, 3),
no_bias=True, name="conv0", workspace=workspace)
body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn0')
body = mx.sym.Activation(data=body, act_type='relu', name='relu0')
body = mx.sym.Pooling(data=body, kernel=(3, 3), stride=(2,2), pad=(1,1), pool_type='max')
for i in range(num_stages):
body = residual_unit(body, filter_list[i+1], (1 if i==0 else 2, 1 if i==0 else 2), False,
name='stage%d_unit%d' % (i + 1, 1), bottle_neck=bottle_neck, workspace=workspace,
memonger=memonger)
for j in range(units[i]-1):
body = residual_unit(body, filter_list[i+1], (1,1), True, name='stage%d_unit%d' % (i + 1, j + 2),
bottle_neck=bottle_neck, workspace=workspace, memonger=memonger)
# bn1 = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn1')
# relu1 = mx.sym.Activation(data=bn1, act_type='relu', name='relu1')
# Although kernel is not used here when global_pool=True, we should put one
pool1 = mx.sym.Pooling(data=body, global_pool=True, kernel=(7, 7), pool_type='avg', name='pool1')
flat = mx.sym.Flatten(data=pool1)
fc1 = mx.sym.FullyConnected(data=flat, num_hidden=num_classes, name='fc1')
if dtype == 'float16':
fc1 = mx.sym.Cast(data=fc1, dtype=np.float32)
return mx.sym.SoftmaxOutput(data=fc1, name='softmax')
def get_symbol(num_classes, num_layers, image_shape, conv_workspace=256, dtype='float32', **kwargs):
"""
Adapted from https://github.com/tornadomeet/ResNet/blob/master/symbol_resnet.py
(Original author Wei Wu) by Antti-Pekka Hynninen
Implementing the original resnet ILSVRC 2015 winning network from:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun. "Deep Residual Learning for Image Recognition"
"""
image_shape = [int(l) for l in image_shape.split(',')]
(nchannel, height, width) = image_shape
if height <= 28:
num_stages = 3
if (num_layers-2) % 9 == 0 and num_layers >= 164:
per_unit = [(num_layers-2)//9]
filter_list = [16, 64, 128, 256]
bottle_neck = True
elif (num_layers-2) % 6 == 0 and num_layers < 164:
per_unit = [(num_layers-2)//6]
filter_list = [16, 16, 32, 64]
bottle_neck = False
else:
raise ValueError("no experiments done on num_layers {}, you can do it yourself".format(num_layers))
units = per_unit * num_stages
else:
if num_layers >= 50:
filter_list = [64, 256, 512, 1024, 2048]
bottle_neck = True
else:
filter_list = [64, 64, 128, 256, 512]
bottle_neck = False
num_stages = 4
if num_layers == 18:
units = [2, 2, 2, 2]
elif num_layers == 34:
units = [3, 4, 6, 3]
elif num_layers == 50:
units = [3, 4, 6, 3]
elif num_layers == 101:
units = [3, 4, 23, 3]
elif num_layers == 152:
units = [3, 8, 36, 3]
elif num_layers == 200:
units = [3, 24, 36, 3]
elif num_layers == 269:
units = [3, 30, 48, 8]
else:
raise ValueError("no experiments done on num_layers {}, you can do it yourself".format(num_layers))
return resnet(units = units,
num_stages = num_stages,
filter_list = filter_list,
num_classes = num_classes,
image_shape = image_shape,
bottle_neck = bottle_neck,
workspace = conv_workspace,
dtype = dtype)
|
DALI-main
|
docs/examples/use_cases/mxnet/resnetn/symbols/resnet-v1.py
|
DALI-main
|
docs/examples/use_cases/mxnet/resnetn/symbols/__init__.py
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
a simple multilayer perceptron
"""
import mxnet as mx
def get_symbol(num_classes=10, **kwargs):
data = mx.symbol.Variable('data')
data = mx.sym.Flatten(data=data)
fc1 = mx.symbol.FullyConnected(data = data, name='fc1', num_hidden=128)
act1 = mx.symbol.Activation(data = fc1, name='relu1', act_type="relu")
fc2 = mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 64)
act2 = mx.symbol.Activation(data = fc2, name='relu2', act_type="relu")
fc3 = mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=num_classes)
mlp = mx.symbol.SoftmaxOutput(data = fc3, name = 'softmax')
return mlp
|
DALI-main
|
docs/examples/use_cases/mxnet/resnetn/symbols/mlp.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''
Adapted from https://github.com/tornadomeet/ResNet/blob/master/symbol_resnet.py
Original author Wei Wu
Implemented the following paper:
Saining Xie, Ross Girshick, Piotr Dollar, Zhuowen Tu, Kaiming He. "Aggregated Residual Transformations for Deep Neural Network"
'''
import mxnet as mx
import numpy as np
def residual_unit(data, num_filter, stride, dim_match, name, bottle_neck=True, num_group=32, bn_mom=0.9, workspace=256, memonger=False):
"""Return ResNet Unit symbol for building ResNet
Parameters
----------
data : str
Input data
num_filter : int
Number of output channels
bnf : int
Bottle neck channels factor with regard to num_filter
stride : tuple
Stride used in convolution
dim_match : Boolean
True means channel number between input and output is the same, otherwise means differ
name : str
Base name of the operators
workspace : int
Workspace used in convolution operator
"""
if bottle_neck:
# the same as https://github.com/facebook/fb.resnet.torch#notes, a bit difference with origin paper
conv1 = mx.sym.Convolution(data=data, num_filter=int(num_filter*0.5), kernel=(1,1), stride=(1,1), pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_conv1')
bn1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1')
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1')
conv2 = mx.sym.Convolution(data=act1, num_filter=int(num_filter*0.5), num_group=num_group, kernel=(3,3), stride=stride, pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2')
bn2 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2')
act2 = mx.sym.Activation(data=bn2, act_type='relu', name=name + '_relu2')
conv3 = mx.sym.Convolution(data=act2, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0), no_bias=True,
workspace=workspace, name=name + '_conv3')
bn3 = mx.sym.BatchNorm(data=conv3, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3')
if dim_match:
shortcut = data
else:
shortcut_conv = mx.sym.Convolution(data=data, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
workspace=workspace, name=name+'_sc')
shortcut = mx.sym.BatchNorm(data=shortcut_conv, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_sc_bn')
if memonger:
shortcut._set_attr(mirror_stage='True')
eltwise = bn3 + shortcut
return mx.sym.Activation(data=eltwise, act_type='relu', name=name + '_relu')
else:
conv1 = mx.sym.Convolution(data=data, num_filter=num_filter, kernel=(3,3), stride=stride, pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv1')
bn1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn1')
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1')
conv2 = mx.sym.Convolution(data=act1, num_filter=num_filter, kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2')
bn2 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn2')
if dim_match:
shortcut = data
else:
shortcut_conv = mx.sym.Convolution(data=data, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
workspace=workspace, name=name+'_sc')
shortcut = mx.sym.BatchNorm(data=shortcut_conv, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_sc_bn')
if memonger:
shortcut._set_attr(mirror_stage='True')
eltwise = bn2 + shortcut
return mx.sym.Activation(data=eltwise, act_type='relu', name=name + '_relu')
def resnext(units, num_stages, filter_list, num_classes, num_group, image_shape, bottle_neck=True, bn_mom=0.9, workspace=256, dtype='float32', memonger=False):
"""Return ResNeXt symbol of
Parameters
----------
units : list
Number of units in each stage
num_stages : int
Number of stage
filter_list : list
Channel size of each stage
num_classes : int
Ouput size of symbol
num_groupes: int
Number of conv groups
dataset : str
Dataset type, only cifar10 and imagenet supports
workspace : int
Workspace used in convolution operator
dtype : str
Precision (float32 or float16)
"""
num_unit = len(units)
assert(num_unit == num_stages)
data = mx.sym.Variable(name='data')
if dtype == 'float32':
data = mx.sym.identity(data=data, name='id')
else:
if dtype == 'float16':
data = mx.sym.Cast(data=data, dtype=np.float16)
data = mx.sym.BatchNorm(data=data, fix_gamma=True, eps=2e-5, momentum=bn_mom, name='bn_data')
(nchannel, height, width) = image_shape
if height <= 32: # such as cifar10
body = mx.sym.Convolution(data=data, num_filter=filter_list[0], kernel=(3, 3), stride=(1,1), pad=(1, 1),
no_bias=True, name="conv0", workspace=workspace)
else: # often expected to be 224 such as imagenet
body = mx.sym.Convolution(data=data, num_filter=filter_list[0], kernel=(7, 7), stride=(2,2), pad=(3, 3),
no_bias=True, name="conv0", workspace=workspace)
body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn0')
body = mx.sym.Activation(data=body, act_type='relu', name='relu0')
body = mx.sym.Pooling(data=body, kernel=(3, 3), stride=(2,2), pad=(1,1), pool_type='max')
for i in range(num_stages):
body = residual_unit(body, filter_list[i+1], (1 if i==0 else 2, 1 if i==0 else 2), False,
name='stage%d_unit%d' % (i + 1, 1), bottle_neck=bottle_neck, num_group=num_group,
bn_mom=bn_mom, workspace=workspace, memonger=memonger)
for j in range(units[i]-1):
body = residual_unit(body, filter_list[i+1], (1,1), True, name='stage%d_unit%d' % (i + 1, j + 2),
bottle_neck=bottle_neck, num_group=num_group, bn_mom=bn_mom, workspace=workspace, memonger=memonger)
pool1 = mx.sym.Pooling(data=body, global_pool=True, kernel=(7, 7), pool_type='avg', name='pool1')
flat = mx.sym.Flatten(data=pool1)
fc1 = mx.sym.FullyConnected(data=flat, num_hidden=num_classes, name='fc1')
if dtype == 'float16':
fc1 = mx.sym.Cast(data=fc1, dtype=np.float32)
return mx.sym.SoftmaxOutput(data=fc1, name='softmax')
def get_symbol(num_classes, num_layers, image_shape, num_group=32, conv_workspace=256, dtype='float32', **kwargs):
"""
Adapted from https://github.com/tornadomeet/ResNet/blob/master/train_resnet.py
Original author Wei Wu
"""
image_shape = [int(l) for l in image_shape.split(',')]
(nchannel, height, width) = image_shape
if height <= 32:
num_stages = 3
if (num_layers-2) % 9 == 0 and num_layers >= 164:
per_unit = [(num_layers-2)//9]
filter_list = [16, 64, 128, 256]
bottle_neck = True
elif (num_layers-2) % 6 == 0 and num_layers < 164:
per_unit = [(num_layers-2)//6]
filter_list = [16, 16, 32, 64]
bottle_neck = False
else:
raise ValueError("no experiments done on num_layers {}, you can do it yourself".format(num_layers))
units = per_unit * num_stages
else:
if num_layers >= 50:
filter_list = [64, 256, 512, 1024, 2048]
bottle_neck = True
else:
filter_list = [64, 64, 128, 256, 512]
bottle_neck = False
num_stages = 4
if num_layers == 18:
units = [2, 2, 2, 2]
elif num_layers == 34:
units = [3, 4, 6, 3]
elif num_layers == 50:
units = [3, 4, 6, 3]
elif num_layers == 101:
units = [3, 4, 23, 3]
elif num_layers == 152:
units = [3, 8, 36, 3]
elif num_layers == 200:
units = [3, 24, 36, 3]
elif num_layers == 269:
units = [3, 30, 48, 8]
else:
raise ValueError("no experiments done on num_layers {}, you can do it yourself".format(num_layers))
return resnext(units = units,
num_stages = num_stages,
filter_list = filter_list,
num_classes = num_classes,
num_group = num_group,
image_shape = image_shape,
bottle_neck = bottle_neck,
workspace = conv_workspace,
dtype = dtype)
|
DALI-main
|
docs/examples/use_cases/mxnet/resnetn/symbols/resnext.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Contains the definition of the Inception Resnet V2 architecture.
As described in http://arxiv.org/abs/1602.07261.
Inception-v4, Inception-ResNet and the Impact of Residual Connections
on Learning
Christian Szegedy, Sergey Ioffe, Vincent Vanhoucke, Alex Alemi
"""
import mxnet as mx
def ConvFactory(data, num_filter, kernel, stride=(1, 1), pad=(0, 0), act_type="relu", mirror_attr={}, with_act=True):
conv = mx.symbol.Convolution(
data=data, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad)
bn = mx.symbol.BatchNorm(data=conv)
if with_act:
act = mx.symbol.Activation(
data=bn, act_type=act_type, attr=mirror_attr)
return act
else:
return bn
def block35(net, input_num_channels, scale=1.0, with_act=True, act_type='relu', mirror_attr={}):
tower_conv = ConvFactory(net, 32, (1, 1))
tower_conv1_0 = ConvFactory(net, 32, (1, 1))
tower_conv1_1 = ConvFactory(tower_conv1_0, 32, (3, 3), pad=(1, 1))
tower_conv2_0 = ConvFactory(net, 32, (1, 1))
tower_conv2_1 = ConvFactory(tower_conv2_0, 48, (3, 3), pad=(1, 1))
tower_conv2_2 = ConvFactory(tower_conv2_1, 64, (3, 3), pad=(1, 1))
tower_mixed = mx.symbol.Concat(*[tower_conv, tower_conv1_1, tower_conv2_2])
tower_out = ConvFactory(
tower_mixed, input_num_channels, (1, 1), with_act=False)
net += scale * tower_out
if with_act:
act = mx.symbol.Activation(
data=net, act_type=act_type, attr=mirror_attr)
return act
else:
return net
def block17(net, input_num_channels, scale=1.0, with_act=True, act_type='relu', mirror_attr={}):
tower_conv = ConvFactory(net, 192, (1, 1))
tower_conv1_0 = ConvFactory(net, 129, (1, 1))
tower_conv1_1 = ConvFactory(tower_conv1_0, 160, (1, 7), pad=(1, 2))
tower_conv1_2 = ConvFactory(tower_conv1_1, 192, (7, 1), pad=(2, 1))
tower_mixed = mx.symbol.Concat(*[tower_conv, tower_conv1_2])
tower_out = ConvFactory(
tower_mixed, input_num_channels, (1, 1), with_act=False)
net += scale * tower_out
if with_act:
act = mx.symbol.Activation(
data=net, act_type=act_type, attr=mirror_attr)
return act
else:
return net
def block8(net, input_num_channels, scale=1.0, with_act=True, act_type='relu', mirror_attr={}):
tower_conv = ConvFactory(net, 192, (1, 1))
tower_conv1_0 = ConvFactory(net, 192, (1, 1))
tower_conv1_1 = ConvFactory(tower_conv1_0, 224, (1, 3), pad=(0, 1))
tower_conv1_2 = ConvFactory(tower_conv1_1, 256, (3, 1), pad=(1, 0))
tower_mixed = mx.symbol.Concat(*[tower_conv, tower_conv1_2])
tower_out = ConvFactory(
tower_mixed, input_num_channels, (1, 1), with_act=False)
net += scale * tower_out
if with_act:
act = mx.symbol.Activation(
data=net, act_type=act_type, attr=mirror_attr)
return act
else:
return net
def repeat(inputs, repetitions, layer, *args, **kwargs):
outputs = inputs
for i in range(repetitions):
outputs = layer(outputs, *args, **kwargs)
return outputs
def get_symbol(num_classes=1000, **kwargs):
data = mx.symbol.Variable(name='data')
conv1a_3_3 = ConvFactory(data=data, num_filter=32,
kernel=(3, 3), stride=(2, 2))
conv2a_3_3 = ConvFactory(conv1a_3_3, 32, (3, 3))
conv2b_3_3 = ConvFactory(conv2a_3_3, 64, (3, 3), pad=(1, 1))
maxpool3a_3_3 = mx.symbol.Pooling(
data=conv2b_3_3, kernel=(3, 3), stride=(2, 2), pool_type='max')
conv3b_1_1 = ConvFactory(maxpool3a_3_3, 80, (1, 1))
conv4a_3_3 = ConvFactory(conv3b_1_1, 192, (3, 3))
maxpool5a_3_3 = mx.symbol.Pooling(
data=conv4a_3_3, kernel=(3, 3), stride=(2, 2), pool_type='max')
tower_conv = ConvFactory(maxpool5a_3_3, 96, (1, 1))
tower_conv1_0 = ConvFactory(maxpool5a_3_3, 48, (1, 1))
tower_conv1_1 = ConvFactory(tower_conv1_0, 64, (5, 5), pad=(2, 2))
tower_conv2_0 = ConvFactory(maxpool5a_3_3, 64, (1, 1))
tower_conv2_1 = ConvFactory(tower_conv2_0, 96, (3, 3), pad=(1, 1))
tower_conv2_2 = ConvFactory(tower_conv2_1, 96, (3, 3), pad=(1, 1))
tower_pool3_0 = mx.symbol.Pooling(data=maxpool5a_3_3, kernel=(
3, 3), stride=(1, 1), pad=(1, 1), pool_type='avg')
tower_conv3_1 = ConvFactory(tower_pool3_0, 64, (1, 1))
tower_5b_out = mx.symbol.Concat(
*[tower_conv, tower_conv1_1, tower_conv2_2, tower_conv3_1])
net = repeat(tower_5b_out, 10, block35, scale=0.17, input_num_channels=320)
tower_conv = ConvFactory(net, 384, (3, 3), stride=(2, 2))
tower_conv1_0 = ConvFactory(net, 256, (1, 1))
tower_conv1_1 = ConvFactory(tower_conv1_0, 256, (3, 3), pad=(1, 1))
tower_conv1_2 = ConvFactory(tower_conv1_1, 384, (3, 3), stride=(2, 2))
tower_pool = mx.symbol.Pooling(net, kernel=(
3, 3), stride=(2, 2), pool_type='max')
net = mx.symbol.Concat(*[tower_conv, tower_conv1_2, tower_pool])
net = repeat(net, 20, block17, scale=0.1, input_num_channels=1088)
tower_conv = ConvFactory(net, 256, (1, 1))
tower_conv0_1 = ConvFactory(tower_conv, 384, (3, 3), stride=(2, 2))
tower_conv1 = ConvFactory(net, 256, (1, 1))
tower_conv1_1 = ConvFactory(tower_conv1, 288, (3, 3), stride=(2, 2))
tower_conv2 = ConvFactory(net, 256, (1, 1))
tower_conv2_1 = ConvFactory(tower_conv2, 288, (3, 3), pad=(1, 1))
tower_conv2_2 = ConvFactory(tower_conv2_1, 320, (3, 3), stride=(2, 2))
tower_pool = mx.symbol.Pooling(net, kernel=(
3, 3), stride=(2, 2), pool_type='max')
net = mx.symbol.Concat(
*[tower_conv0_1, tower_conv1_1, tower_conv2_2, tower_pool])
net = repeat(net, 9, block8, scale=0.2, input_num_channels=2080)
net = block8(net, with_act=False, input_num_channels=2080)
net = ConvFactory(net, 1536, (1, 1))
net = mx.symbol.Pooling(net, kernel=(
1, 1), global_pool=True, stride=(2, 2), pool_type='avg')
net = mx.symbol.Flatten(net)
net = mx.symbol.Dropout(data=net, p=0.2)
net = mx.symbol.FullyConnected(data=net, num_hidden=num_classes)
softmax = mx.symbol.SoftmaxOutput(data=net, name='softmax')
return softmax
|
DALI-main
|
docs/examples/use_cases/mxnet/resnetn/symbols/inception-resnet-v2.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
LeCun, Yann, Leon Bottou, Yoshua Bengio, and Patrick Haffner.
Gradient-based learning applied to document recognition.
Proceedings of the IEEE (1998)
"""
import mxnet as mx
def get_loc(data, attr={'lr_mult':'0.01'}):
"""
the localisation network in lenet-stn, it will increase acc about more than 1%,
when num-epoch >=15
"""
loc = mx.symbol.Convolution(data=data, num_filter=30, kernel=(5, 5), stride=(2,2))
loc = mx.symbol.Activation(data = loc, act_type='relu')
loc = mx.symbol.Pooling(data=loc, kernel=(2, 2), stride=(2, 2), pool_type='max')
loc = mx.symbol.Convolution(data=loc, num_filter=60, kernel=(3, 3), stride=(1,1), pad=(1, 1))
loc = mx.symbol.Activation(data = loc, act_type='relu')
loc = mx.symbol.Pooling(data=loc, global_pool=True, kernel=(2, 2), pool_type='avg')
loc = mx.symbol.Flatten(data=loc)
loc = mx.symbol.FullyConnected(data=loc, num_hidden=6, name="stn_loc", attr=attr)
return loc
def get_symbol(num_classes=10, add_stn=False, **kwargs):
data = mx.symbol.Variable('data')
if add_stn:
data = mx.sym.SpatialTransformer(data=data, loc=get_loc(data), target_shape = (28,28),
transform_type="affine", sampler_type="bilinear")
# first conv
conv1 = mx.symbol.Convolution(data=data, kernel=(5,5), num_filter=20)
tanh1 = mx.symbol.Activation(data=conv1, act_type="tanh")
pool1 = mx.symbol.Pooling(data=tanh1, pool_type="max",
kernel=(2,2), stride=(2,2))
# second conv
conv2 = mx.symbol.Convolution(data=pool1, kernel=(5,5), num_filter=50)
tanh2 = mx.symbol.Activation(data=conv2, act_type="tanh")
pool2 = mx.symbol.Pooling(data=tanh2, pool_type="max",
kernel=(2,2), stride=(2,2))
# first fullc
flatten = mx.symbol.Flatten(data=pool2)
fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=500)
tanh3 = mx.symbol.Activation(data=fc1, act_type="tanh")
# second fullc
fc2 = mx.symbol.FullyConnected(data=tanh3, num_hidden=num_classes)
# loss
lenet = mx.symbol.SoftmaxOutput(data=fc2, name='softmax')
return lenet
|
DALI-main
|
docs/examples/use_cases/mxnet/resnetn/symbols/lenet.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# -*- coding:utf-8 -*-
'''
mobilenet
Suittable for image with around resolution x resolution, resolution is multiple of 32.
Reference:
MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications
https://arxiv.org/abs/1704.04861
'''
__author__ = 'qingzhouzhen'
__date__ = '17/8/5'
__modify__ = 'dwSun'
__modified_date__ = '17/11/30'
import mxnet as mx
alpha_values = [0.25, 0.50, 0.75, 1.0]
def Conv(data, num_filter=1, kernel=(1, 1), stride=(1, 1), pad=(0, 0), num_group=1, name='', suffix=''):
conv = mx.sym.Convolution(data=data, num_filter=num_filter, kernel=kernel, num_group=num_group, stride=stride, pad=pad, no_bias=True, name='%s%s_conv2d' % (name, suffix))
bn = mx.sym.BatchNorm(data=conv, name='%s%s_batchnorm' % (name, suffix), fix_gamma=True)
act = mx.sym.Activation(data=bn, act_type='relu', name='%s%s_relu' % (name, suffix))
return act
def Conv_DPW(data, depth=1, stride=(1, 1), name='', idx=0, suffix=''):
conv_dw = Conv(data, num_group=depth, num_filter=depth, kernel=(3, 3), pad=(1, 1), stride=stride, name="conv_%d_dw" % (idx), suffix=suffix)
conv = Conv(conv_dw, num_filter=depth * stride[0], kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_%d" % (idx), suffix=suffix)
return conv
def get_symbol_compact(num_classes, alpha=1, resolution=224, **kwargs):
assert alpha in alpha_values, 'Invalid alpha={0}, must be one of {1}'.format(alpha, alpha_values)
assert resolution % 32 == 0, 'resolution must be multiple of 32'
base = int(32 * alpha)
data = mx.symbol.Variable(name="data") # 224
conv_1 = Conv(data, num_filter=base, kernel=(3, 3), pad=(1, 1), stride=(2, 2), name="conv_1") # 32*alpha, 224/112
conv_2_dw = Conv(conv_1, num_group=base, num_filter=base, kernel=(3, 3), pad=(1, 1), stride=(1, 1), name="conv_2_dw") # 112/112
conv_2 = Conv(conv_2_dw, num_filter=base * 2, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_2") # 32*alpha, 112/112
conv_3_dpw = Conv_DPW(conv_2, depth=base * 2, stride=(2, 2), idx=3) # 64*alpha, 112/56 => 56/56
conv_4_dpw = Conv_DPW(conv_3_dpw, depth=base * 4, stride=(1, 1), idx=4) # 128*alpha, 56/56 =>56/56
conv_5_dpw = Conv_DPW(conv_4_dpw, depth=base * 4, stride=(2, 2), idx=5) # 128*alpha, 56/28 => 28/28
conv_6_dpw = Conv_DPW(conv_5_dpw, depth=base * 8, stride=(1, 1), idx=6) # 256*alpha, 28/28 => 28/28
conv_7_dpw = Conv_DPW(conv_6_dpw, depth=base * 8, stride=(2, 2), idx=7) # 256*alpha, 28/14 => 14/14
conv_dpw = conv_7_dpw
for idx in range(8, 13):
conv_dpw = Conv_DPW(conv_dpw, depth=base * 16, stride=(1, 1), idx=idx) # 512*alpha, 14/14
conv_12_dpw = conv_dpw
conv_13_dpw = Conv_DPW(conv_12_dpw, depth=base * 16, stride=(2, 2), idx=13) # 512*alpha, 14/7 => 7/7
conv_14_dpw = Conv_DPW(conv_13_dpw, depth=base * 32, stride=(1, 1), idx=14) # 1024*alpha, 7/7 => 7/7
pool_size = int(resolution / 32)
pool = mx.sym.Pooling(data=conv_14_dpw, kernel=(pool_size, pool_size), stride=(1, 1), pool_type="avg", name="global_pool")
flatten = mx.sym.Flatten(data=pool, name="flatten")
fc = mx.symbol.FullyConnected(data=flatten, num_hidden=num_classes, name='fc')
softmax = mx.symbol.SoftmaxOutput(data=fc, name='softmax')
return softmax
def get_symbol(num_classes, alpha=1, resolution=224, **kwargs):
assert alpha in alpha_values, 'Invalid alpha=[{0}], must be one of [{1}]'.format(alpha, alpha_values)
assert resolution % 32 == 0, 'resolution must be multpile of 32'
base = int(32 * alpha)
data = mx.symbol.Variable(name="data") # 224
depth = base # 32*alpha
conv_1 = Conv(data, num_filter=depth, kernel=(3, 3), pad=(1, 1), stride=(2, 2), name="conv_1") # 224/112
depth = base # 32*alpha
conv_2_dw = Conv(conv_1, num_group=depth, num_filter=depth, kernel=(3, 3), pad=(1, 1), stride=(1, 1), name="conv_2_dw") # 112/112
conv_2 = Conv(conv_2_dw, num_filter=depth * 2, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_2") # 112/112
depth = base * 2 # 64*alpha
conv_3_dw = Conv(conv_2, num_group=depth, num_filter=depth, kernel=(3, 3), pad=(1, 1), stride=(2, 2), name="conv_3_dw") # 112/56
conv_3 = Conv(conv_3_dw, num_filter=depth * 2, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_3") # 56/56
depth = base * 4 # 128*alpha
conv_4_dw = Conv(conv_3, num_group=depth, num_filter=depth, kernel=(3, 3), pad=(1, 1), stride=(1, 1), name="conv_4_dw") # 56/56
conv_4 = Conv(conv_4_dw, num_filter=depth, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_4") # 56/56
depth = base * 4 # 128*alpha
conv_5_dw = Conv(conv_4, num_group=depth, num_filter=depth, kernel=(3, 3), pad=(1, 1), stride=(2, 2), name="conv_5_dw") # 56/28
conv_5 = Conv(conv_5_dw, num_filter=depth * 2, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_5") # 28/28
depth = base * 8 # 256*alpha
conv_6_dw = Conv(conv_5, num_group=depth, num_filter=depth, kernel=(3, 3), pad=(1, 1), stride=(1, 1), name="conv_6_dw") # 28/28
conv_6 = Conv(conv_6_dw, num_filter=depth, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_6") # 28/28
depth = base * 8 # 256*alpha
conv_7_dw = Conv(conv_6, num_group=depth, num_filter=depth, kernel=(3, 3), pad=(1, 1), stride=(2, 2), name="conv_7_dw") # 28/14
conv_7 = Conv(conv_7_dw, num_filter=depth * 2, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_7") # 14/14
depth = base * 16 # 512*alpha
conv_8_dw = Conv(conv_7, num_group=depth, num_filter=depth, kernel=(3, 3), pad=(1, 1), stride=(1, 1), name="conv_8_dw") # 14/14
conv_8 = Conv(conv_8_dw, num_filter=depth, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_8") # 14/14
conv_9_dw = Conv(conv_8, num_group=depth, num_filter=depth, kernel=(3, 3), pad=(1, 1), stride=(1, 1), name="conv_9_dw") # 14/14
conv_9 = Conv(conv_9_dw, num_filter=depth, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_9") # 14/14
conv_10_dw = Conv(conv_9, num_group=depth, num_filter=depth, kernel=(3, 3), pad=(1, 1), stride=(1, 1), name="conv_10_dw") # 14/14
conv_10 = Conv(conv_10_dw, num_filter=depth, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_10") # 14/14
conv_11_dw = Conv(conv_10, num_group=depth, num_filter=depth, kernel=(3, 3), pad=(1, 1), stride=(1, 1), name="conv_11_dw") # 14/14
conv_11 = Conv(conv_11_dw, num_filter=depth, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_11") # 14/14
conv_12_dw = Conv(conv_11, num_group=depth, num_filter=depth, kernel=(3, 3), pad=(1, 1), stride=(1, 1), name="conv_12_dw") # 14/14
conv_12 = Conv(conv_12_dw, num_filter=depth, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_12") # 14/14
depth = base * 16 # 512*alpha
conv_13_dw = Conv(conv_12, num_group=depth, num_filter=depth, kernel=(3, 3), pad=(1, 1), stride=(2, 2), name="conv_13_dw") # 14/7
conv_13 = Conv(conv_13_dw, num_filter=depth * 2, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_13") # 7/7
depth = base * 32 # 1024*alpha
conv_14_dw = Conv(conv_13, num_group=depth, num_filter=depth, kernel=(3, 3), pad=(1, 1), stride=(1, 1), name="conv_14_dw") # 7/7
conv_14 = Conv(conv_14_dw, num_filter=depth, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_14") # 7/7
pool_size = int(resolution / 32)
pool = mx.sym.Pooling(data=conv_14, kernel=(pool_size, pool_size), stride=(1, 1), pool_type="avg", name="global_pool")
flatten = mx.sym.Flatten(data=pool, name="flatten")
fc = mx.symbol.FullyConnected(data=flatten, num_hidden=num_classes, name='fc')
softmax = mx.symbol.SoftmaxOutput(data=fc, name='softmax')
return softmax
|
DALI-main
|
docs/examples/use_cases/mxnet/resnetn/symbols/mobilenet.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Inception + BN, suitable for images with around 224 x 224
Reference:
Sergey Ioffe and Christian Szegedy. Batch normalization: Accelerating deep
network training by reducing internal covariate shift. arXiv preprint
arXiv:1502.03167, 2015.
"""
import mxnet as mx
eps = 1e-10 + 1e-5
bn_mom = 0.9
fix_gamma = False
def ConvFactory(data, num_filter, kernel, stride=(1,1), pad=(0, 0), name=None, suffix='', attr={}):
conv = mx.symbol.Convolution(data=data, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, name='conv_%s%s' %(name, suffix))
bn = mx.symbol.BatchNorm(data=conv, fix_gamma=fix_gamma, eps=eps, momentum=bn_mom, name='bn_%s%s' %(name, suffix))
act = mx.symbol.Activation(data=bn, act_type='relu', name='relu_%s%s' %(name, suffix), attr=attr)
return act
def InceptionFactoryA(data, num_1x1, num_3x3red, num_3x3, num_d3x3red, num_d3x3, pool, proj, name):
# 1x1
c1x1 = ConvFactory(data=data, num_filter=num_1x1, kernel=(1, 1), name=('%s_1x1' % name))
# 3x3 reduce + 3x3
c3x3r = ConvFactory(data=data, num_filter=num_3x3red, kernel=(1, 1), name=('%s_3x3' % name), suffix='_reduce')
c3x3 = ConvFactory(data=c3x3r, num_filter=num_3x3, kernel=(3, 3), pad=(1, 1), name=('%s_3x3' % name))
# double 3x3 reduce + double 3x3
cd3x3r = ConvFactory(data=data, num_filter=num_d3x3red, kernel=(1, 1), name=('%s_double_3x3' % name), suffix='_reduce')
cd3x3 = ConvFactory(data=cd3x3r, num_filter=num_d3x3, kernel=(3, 3), pad=(1, 1), name=('%s_double_3x3_0' % name))
cd3x3 = ConvFactory(data=cd3x3, num_filter=num_d3x3, kernel=(3, 3), pad=(1, 1), name=('%s_double_3x3_1' % name))
# pool + proj
pooling = mx.symbol.Pooling(data=data, kernel=(3, 3), stride=(1, 1), pad=(1, 1), pool_type=pool, name=('%s_pool_%s_pool' % (pool, name)))
cproj = ConvFactory(data=pooling, num_filter=proj, kernel=(1, 1), name=('%s_proj' % name))
# concat
concat = mx.symbol.Concat(*[c1x1, c3x3, cd3x3, cproj], name='ch_concat_%s_chconcat' % name)
return concat
def InceptionFactoryB(data, num_3x3red, num_3x3, num_d3x3red, num_d3x3, name):
# 3x3 reduce + 3x3
c3x3r = ConvFactory(data=data, num_filter=num_3x3red, kernel=(1, 1), name=('%s_3x3' % name), suffix='_reduce')
c3x3 = ConvFactory(data=c3x3r, num_filter=num_3x3, kernel=(3, 3), pad=(1, 1), stride=(2, 2), name=('%s_3x3' % name))
# double 3x3 reduce + double 3x3
cd3x3r = ConvFactory(data=data, num_filter=num_d3x3red, kernel=(1, 1), name=('%s_double_3x3' % name), suffix='_reduce')
cd3x3 = ConvFactory(data=cd3x3r, num_filter=num_d3x3, kernel=(3, 3), pad=(1, 1), stride=(1, 1), name=('%s_double_3x3_0' % name))
cd3x3 = ConvFactory(data=cd3x3, num_filter=num_d3x3, kernel=(3, 3), pad=(1, 1), stride=(2, 2), name=('%s_double_3x3_1' % name))
# pool + proj
pooling = mx.symbol.Pooling(data=data, kernel=(3, 3), stride=(2, 2), pad=(1, 1), pool_type="max", name=('max_pool_%s_pool' % name))
# concat
concat = mx.symbol.Concat(*[c3x3, cd3x3, pooling], name='ch_concat_%s_chconcat' % name)
return concat
# A Simple Downsampling Factory
def DownsampleFactory(data, ch_3x3, name, attr):
# conv 3x3
conv = ConvFactory(data=data, name=name+'_conv',kernel=(3, 3), stride=(2, 2), num_filter=ch_3x3, pad=(1, 1), attr=attr)
# pool
pool = mx.symbol.Pooling(data=data, name=name+'_pool',kernel=(3, 3), stride=(2, 2), pad=(1, 1), pool_type='max', attr=attr)
# concat
concat = mx.symbol.Concat(*[conv, pool], name=name+'_ch_concat')
return concat
# A Simple module
def SimpleFactory(data, ch_1x1, ch_3x3, name, attr):
# 1x1
conv1x1 = ConvFactory(data=data, name=name+'_1x1', kernel=(1, 1), pad=(0, 0), num_filter=ch_1x1, attr=attr)
# 3x3
conv3x3 = ConvFactory(data=data, name=name+'_3x3', kernel=(3, 3), pad=(1, 1), num_filter=ch_3x3, attr=attr)
#concat
concat = mx.symbol.Concat(*[conv1x1, conv3x3], name=name+'_ch_concat')
return concat
def get_symbol(num_classes, image_shape, **kwargs):
image_shape = [int(l) for l in image_shape.split(',')]
(nchannel, height, width) = image_shape
# attr = {'force_mirroring': 'true'}
attr = {}
# data
data = mx.symbol.Variable(name="data")
if height <= 28:
# a simper version
conv1 = ConvFactory(data=data, kernel=(3,3), pad=(1,1), name="1", num_filter=96, attr=attr)
in3a = SimpleFactory(conv1, 32, 32, 'in3a', attr)
in3b = SimpleFactory(in3a, 32, 48, 'in3b', attr)
in3c = DownsampleFactory(in3b, 80, 'in3c', attr)
in4a = SimpleFactory(in3c, 112, 48, 'in4a', attr)
in4b = SimpleFactory(in4a, 96, 64, 'in4b', attr)
in4c = SimpleFactory(in4b, 80, 80, 'in4c', attr)
in4d = SimpleFactory(in4c, 48, 96, 'in4d', attr)
in4e = DownsampleFactory(in4d, 96, 'in4e', attr)
in5a = SimpleFactory(in4e, 176, 160, 'in5a', attr)
in5b = SimpleFactory(in5a, 176, 160, 'in5b', attr)
pool = mx.symbol.Pooling(data=in5b, pool_type="avg", kernel=(7,7), name="global_pool", attr=attr)
else:
# stage 1
conv1 = ConvFactory(data=data, num_filter=64, kernel=(7, 7), stride=(2, 2), pad=(3, 3), name='1')
pool1 = mx.symbol.Pooling(data=conv1, kernel=(3, 3), stride=(2, 2), name='pool_1', pool_type='max')
# stage 2
conv2red = ConvFactory(data=pool1, num_filter=64, kernel=(1, 1), stride=(1, 1), name='2_red')
conv2 = ConvFactory(data=conv2red, num_filter=192, kernel=(3, 3), stride=(1, 1), pad=(1, 1), name='2')
pool2 = mx.symbol.Pooling(data=conv2, kernel=(3, 3), stride=(2, 2), name='pool_2', pool_type='max')
# stage 2
in3a = InceptionFactoryA(pool2, 64, 64, 64, 64, 96, "avg", 32, '3a')
in3b = InceptionFactoryA(in3a, 64, 64, 96, 64, 96, "avg", 64, '3b')
in3c = InceptionFactoryB(in3b, 128, 160, 64, 96, '3c')
# stage 3
in4a = InceptionFactoryA(in3c, 224, 64, 96, 96, 128, "avg", 128, '4a')
in4b = InceptionFactoryA(in4a, 192, 96, 128, 96, 128, "avg", 128, '4b')
in4c = InceptionFactoryA(in4b, 160, 128, 160, 128, 160, "avg", 128, '4c')
in4d = InceptionFactoryA(in4c, 96, 128, 192, 160, 192, "avg", 128, '4d')
in4e = InceptionFactoryB(in4d, 128, 192, 192, 256, '4e')
# stage 4
in5a = InceptionFactoryA(in4e, 352, 192, 320, 160, 224, "avg", 128, '5a')
in5b = InceptionFactoryA(in5a, 352, 192, 320, 192, 224, "max", 128, '5b')
# global avg pooling
pool = mx.symbol.Pooling(data=in5b, kernel=(7, 7), stride=(1, 1), name="global_pool", pool_type='avg')
# linear classifier
flatten = mx.symbol.Flatten(data=pool)
fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=num_classes)
softmax = mx.symbol.SoftmaxOutput(data=fc1, name='softmax')
return softmax
|
DALI-main
|
docs/examples/use_cases/mxnet/resnetn/symbols/inception-bn.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''
Adapted from https://github.com/tornadomeet/ResNet/blob/master/symbol_resnet.py
Original author Wei Wu
Implemented the following paper:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun. "Identity Mappings in Deep Residual Networks"
'''
import mxnet as mx
import numpy as np
def residual_unit(data, num_filter, stride, dim_match, name, bottle_neck=True, bn_mom=0.9, workspace=256, memonger=False):
"""Return ResNet Unit symbol for building ResNet
Parameters
----------
data : str
Input data
num_filter : int
Number of output channels
bnf : int
Bottle neck channels factor with regard to num_filter
stride : tuple
Stride used in convolution
dim_match : Boolean
True means channel number between input and output is the same, otherwise means differ
name : str
Base name of the operators
workspace : int
Workspace used in convolution operator
"""
if bottle_neck:
# the same as https://github.com/facebook/fb.resnet.torch#notes, a bit difference with origin paper
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1')
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1')
conv1 = mx.sym.Convolution(data=act1, num_filter=int(num_filter*0.25), kernel=(1,1), stride=(1,1), pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_conv1')
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2')
act2 = mx.sym.Activation(data=bn2, act_type='relu', name=name + '_relu2')
conv2 = mx.sym.Convolution(data=act2, num_filter=int(num_filter*0.25), kernel=(3,3), stride=stride, pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2')
bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3')
act3 = mx.sym.Activation(data=bn3, act_type='relu', name=name + '_relu3')
conv3 = mx.sym.Convolution(data=act3, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0), no_bias=True,
workspace=workspace, name=name + '_conv3')
if dim_match:
shortcut = data
else:
shortcut = mx.sym.Convolution(data=act1, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
workspace=workspace, name=name+'_sc')
if memonger:
shortcut._set_attr(mirror_stage='True')
return conv3 + shortcut
else:
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn1')
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1')
conv1 = mx.sym.Convolution(data=act1, num_filter=num_filter, kernel=(3,3), stride=stride, pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv1')
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn2')
act2 = mx.sym.Activation(data=bn2, act_type='relu', name=name + '_relu2')
conv2 = mx.sym.Convolution(data=act2, num_filter=num_filter, kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2')
if dim_match:
shortcut = data
else:
shortcut = mx.sym.Convolution(data=act1, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
workspace=workspace, name=name+'_sc')
if memonger:
shortcut._set_attr(mirror_stage='True')
return conv2 + shortcut
def resnet(units, num_stages, filter_list, num_classes, image_shape, bottle_neck=True, bn_mom=0.9, workspace=256, dtype='float32', memonger=False):
"""Return ResNet symbol of
Parameters
----------
units : list
Number of units in each stage
num_stages : int
Number of stage
filter_list : list
Channel size of each stage
num_classes : int
Ouput size of symbol
dataset : str
Dataset type, only cifar10 and imagenet supports
workspace : int
Workspace used in convolution operator
dtype : str
Precision (float32 or float16)
"""
num_unit = len(units)
assert(num_unit == num_stages)
data = mx.sym.Variable(name='data')
if dtype == 'float32':
data = mx.sym.identity(data=data, name='id')
else:
if dtype == 'float16':
data = mx.sym.Cast(data=data, dtype=np.float16)
data = mx.sym.BatchNorm(data=data, fix_gamma=True, eps=2e-5, momentum=bn_mom, name='bn_data')
(nchannel, height, width) = image_shape
if height <= 32: # such as cifar10
body = mx.sym.Convolution(data=data, num_filter=filter_list[0], kernel=(3, 3), stride=(1,1), pad=(1, 1),
no_bias=True, name="conv0", workspace=workspace)
else: # often expected to be 224 such as imagenet
body = mx.sym.Convolution(data=data, num_filter=filter_list[0], kernel=(7, 7), stride=(2,2), pad=(3, 3),
no_bias=True, name="conv0", workspace=workspace)
body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn0')
body = mx.sym.Activation(data=body, act_type='relu', name='relu0')
body = mx.sym.Pooling(data=body, kernel=(3, 3), stride=(2,2), pad=(1,1), pool_type='max')
for i in range(num_stages):
body = residual_unit(body, filter_list[i+1], (1 if i==0 else 2, 1 if i==0 else 2), False,
name='stage%d_unit%d' % (i + 1, 1), bottle_neck=bottle_neck, workspace=workspace,
memonger=memonger)
for j in range(units[i]-1):
body = residual_unit(body, filter_list[i+1], (1,1), True, name='stage%d_unit%d' % (i + 1, j + 2),
bottle_neck=bottle_neck, workspace=workspace, memonger=memonger)
bn1 = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn1')
relu1 = mx.sym.Activation(data=bn1, act_type='relu', name='relu1')
# Although kernel is not used here when global_pool=True, we should put one
pool1 = mx.sym.Pooling(data=relu1, global_pool=True, kernel=(7, 7), pool_type='avg', name='pool1')
flat = mx.sym.Flatten(data=pool1)
fc1 = mx.sym.FullyConnected(data=flat, num_hidden=num_classes, name='fc1')
if dtype == 'float16':
fc1 = mx.sym.Cast(data=fc1, dtype=np.float32)
return mx.sym.SoftmaxOutput(data=fc1, name='softmax')
def get_symbol(num_classes, num_layers, image_shape, conv_workspace=256, dtype='float32', **kwargs):
"""
Adapted from https://github.com/tornadomeet/ResNet/blob/master/train_resnet.py
Original author Wei Wu
"""
image_shape = [int(l) for l in image_shape.split(',')]
(nchannel, height, width) = image_shape
if height <= 28:
num_stages = 3
if (num_layers-2) % 9 == 0 and num_layers >= 164:
per_unit = [(num_layers-2)//9]
filter_list = [16, 64, 128, 256]
bottle_neck = True
elif (num_layers-2) % 6 == 0 and num_layers < 164:
per_unit = [(num_layers-2)//6]
filter_list = [16, 16, 32, 64]
bottle_neck = False
else:
raise ValueError("no experiments done on num_layers {}, you can do it yourself".format(num_layers))
units = per_unit * num_stages
else:
if num_layers >= 50:
filter_list = [64, 256, 512, 1024, 2048]
bottle_neck = True
else:
filter_list = [64, 64, 128, 256, 512]
bottle_neck = False
num_stages = 4
if num_layers == 18:
units = [2, 2, 2, 2]
elif num_layers == 34:
units = [3, 4, 6, 3]
elif num_layers == 50:
units = [3, 4, 6, 3]
elif num_layers == 101:
units = [3, 4, 23, 3]
elif num_layers == 152:
units = [3, 8, 36, 3]
elif num_layers == 200:
units = [3, 24, 36, 3]
elif num_layers == 269:
units = [3, 30, 48, 8]
else:
raise ValueError("no experiments done on num_layers {}, you can do it yourself".format(num_layers))
return resnet(units = units,
num_stages = num_stages,
filter_list = filter_list,
num_classes = num_classes,
image_shape = image_shape,
bottle_neck = bottle_neck,
workspace = conv_workspace,
dtype = dtype)
|
DALI-main
|
docs/examples/use_cases/mxnet/resnetn/symbols/resnet.py
|
"""References:
Simonyan, Karen, and Andrew Zisserman. "Very deep convolutional networks for
large-scale image recognition." arXiv preprint arXiv:1409.1556 (2014).
This implements Variant D from the paper.
"""
import mxnet as mx
def get_symbol(num_classes, **kwargs):
## define alexnet
data = mx.symbol.Variable(name="data")
# group 1
conv1_1 = mx.symbol.Convolution(data=data, kernel=(3, 3), pad=(1, 1), num_filter=64, name="conv1_1")
relu1_1 = mx.symbol.Activation(data=conv1_1, act_type="relu", name="relu1_1")
conv1_2 = mx.symbol.Convolution(data=relu1_1, kernel=(3, 3), pad=(1, 1), num_filter=64, name="conv1_2")
relu1_2 = mx.symbol.Activation(data=conv1_2, act_type="relu", name="relu1_2")
pool1 = mx.symbol.Pooling(
data=relu1_2, pool_type="max", kernel=(2, 2), stride=(2,2), name="pool1")
# group 2
conv2_1 = mx.symbol.Convolution(
data=pool1, kernel=(3, 3), pad=(1, 1), num_filter=128, name="conv2_1")
relu2_1 = mx.symbol.Activation(data=conv2_1, act_type="relu", name="relu2_1")
conv2_2 = mx.symbol.Convolution(
data=relu2_1, kernel=(3, 3), pad=(1, 1), num_filter=128, name="conv2_2")
relu2_2 = mx.symbol.Activation(data=conv2_2, act_type="relu", name="relu2_2")
pool2 = mx.symbol.Pooling(
data=relu2_2, pool_type="max", kernel=(2, 2), stride=(2,2), name="pool2")
# group 3
conv3_1 = mx.symbol.Convolution(
data=pool2, kernel=(3, 3), pad=(1, 1), num_filter=256, name="conv3_1")
relu3_1 = mx.symbol.Activation(data=conv3_1, act_type="relu", name="relu3_1")
conv3_2 = mx.symbol.Convolution(
data=relu3_1, kernel=(3, 3), pad=(1, 1), num_filter=256, name="conv3_2")
relu3_2 = mx.symbol.Activation(data=conv3_2, act_type="relu", name="relu3_2")
conv3_3 = mx.symbol.Convolution(
data=relu3_2, kernel=(3, 3), pad=(1, 1), num_filter=256, name="conv3_3")
relu3_3 = mx.symbol.Activation(data=conv3_3, act_type="relu", name="relu3_3")
pool3 = mx.symbol.Pooling(
data=relu3_3, pool_type="max", kernel=(2, 2), stride=(2,2), name="pool3")
# group 4
conv4_1 = mx.symbol.Convolution(
data=pool3, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv4_1")
relu4_1 = mx.symbol.Activation(data=conv4_1, act_type="relu", name="relu4_1")
conv4_2 = mx.symbol.Convolution(
data=relu4_1, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv4_2")
relu4_2 = mx.symbol.Activation(data=conv4_2, act_type="relu", name="relu4_2")
conv4_3 = mx.symbol.Convolution(
data=relu4_2, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv4_3")
relu4_3 = mx.symbol.Activation(data=conv4_3, act_type="relu", name="relu4_3")
pool4 = mx.symbol.Pooling(
data=relu4_3, pool_type="max", kernel=(2, 2), stride=(2,2), name="pool4")
# group 5
conv5_1 = mx.symbol.Convolution(
data=pool4, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv5_1")
relu5_1 = mx.symbol.Activation(data=conv5_1, act_type="relu", name="relu5_1")
conv5_2 = mx.symbol.Convolution(
data=relu5_1, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv5_2")
relu5_2 = mx.symbol.Activation(data=conv5_2, act_type="relu", name="relu5_2")
conv5_3 = mx.symbol.Convolution(
data=relu5_2, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv5_3")
relu5_3 = mx.symbol.Activation(data=conv5_3, act_type="relu", name="relu5_3")
pool5 = mx.symbol.Pooling(
data=relu5_3, pool_type="max", kernel=(2, 2), stride=(2,2), name="pool5")
# group 6
flatten = mx.symbol.Flatten(data=pool5, name="flatten")
fc6 = mx.symbol.FullyConnected(data=flatten, num_hidden=4096, name="fc6")
relu6 = mx.symbol.Activation(data=fc6, act_type="relu", name="relu6")
drop6 = mx.symbol.Dropout(data=relu6, p=0.5, name="drop6")
# group 7
fc7 = mx.symbol.FullyConnected(data=drop6, num_hidden=4096, name="fc7")
relu7 = mx.symbol.Activation(data=fc7, act_type="relu", name="relu7")
drop7 = mx.symbol.Dropout(data=relu7, p=0.5, name="drop7")
# output
fc8 = mx.symbol.FullyConnected(data=drop7, num_hidden=num_classes, name="fc8")
softmax = mx.symbol.SoftmaxOutput(data=fc8, name='softmax')
return softmax
|
DALI-main
|
docs/examples/use_cases/mxnet/resnetn/symbols/vgg16.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Inception V3, suitable for images with around 299 x 299
Reference:
Szegedy, Christian, et al. "Rethinking the Inception Architecture for Computer Vision." arXiv preprint arXiv:1512.00567 (2015).
"""
import mxnet as mx
import numpy as np
def Conv(data, num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), name=None, suffix=''):
conv = mx.sym.Convolution(data=data, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, no_bias=True, name='%s%s_conv2d' %(name, suffix))
bn = mx.sym.BatchNorm(data=conv, name='%s%s_batchnorm' %(name, suffix), fix_gamma=True)
act = mx.sym.Activation(data=bn, act_type='relu', name='%s%s_relu' %(name, suffix))
return act
def Inception7A(data,
num_1x1,
num_3x3_red, num_3x3_1, num_3x3_2,
num_5x5_red, num_5x5,
pool, proj,
name):
tower_1x1 = Conv(data, num_1x1, name=('%s_conv' % name))
tower_5x5 = Conv(data, num_5x5_red, name=('%s_tower' % name), suffix='_conv')
tower_5x5 = Conv(tower_5x5, num_5x5, kernel=(5, 5), pad=(2, 2), name=('%s_tower' % name), suffix='_conv_1')
tower_3x3 = Conv(data, num_3x3_red, name=('%s_tower_1' % name), suffix='_conv')
tower_3x3 = Conv(tower_3x3, num_3x3_1, kernel=(3, 3), pad=(1, 1), name=('%s_tower_1' % name), suffix='_conv_1')
tower_3x3 = Conv(tower_3x3, num_3x3_2, kernel=(3, 3), pad=(1, 1), name=('%s_tower_1' % name), suffix='_conv_2')
pooling = mx.sym.Pooling(data=data, kernel=(3, 3), stride=(1, 1), pad=(1, 1), pool_type=pool, name=('%s_pool_%s_pool' % (pool, name)))
cproj = Conv(pooling, proj, name=('%s_tower_2' % name), suffix='_conv')
concat = mx.sym.Concat(*[tower_1x1, tower_5x5, tower_3x3, cproj], name='ch_concat_%s_chconcat' % name)
return concat
# First Downsample
def Inception7B(data,
num_3x3,
num_d3x3_red, num_d3x3_1, num_d3x3_2,
pool,
name):
tower_3x3 = Conv(data, num_3x3, kernel=(3, 3), pad=(0, 0), stride=(2, 2), name=('%s_conv' % name))
tower_d3x3 = Conv(data, num_d3x3_red, name=('%s_tower' % name), suffix='_conv')
tower_d3x3 = Conv(tower_d3x3, num_d3x3_1, kernel=(3, 3), pad=(1, 1), stride=(1, 1), name=('%s_tower' % name), suffix='_conv_1')
tower_d3x3 = Conv(tower_d3x3, num_d3x3_2, kernel=(3, 3), pad=(0, 0), stride=(2, 2), name=('%s_tower' % name), suffix='_conv_2')
pooling = mx.sym.Pooling(data=data, kernel=(3, 3), stride=(2, 2), pad=(0,0), pool_type="max", name=('max_pool_%s_pool' % name))
concat = mx.sym.Concat(*[tower_3x3, tower_d3x3, pooling], name='ch_concat_%s_chconcat' % name)
return concat
def Inception7C(data,
num_1x1,
num_d7_red, num_d7_1, num_d7_2,
num_q7_red, num_q7_1, num_q7_2, num_q7_3, num_q7_4,
pool, proj,
name):
tower_1x1 = Conv(data=data, num_filter=num_1x1, kernel=(1, 1), name=('%s_conv' % name))
tower_d7 = Conv(data=data, num_filter=num_d7_red, name=('%s_tower' % name), suffix='_conv')
tower_d7 = Conv(data=tower_d7, num_filter=num_d7_1, kernel=(1, 7), pad=(0, 3), name=('%s_tower' % name), suffix='_conv_1')
tower_d7 = Conv(data=tower_d7, num_filter=num_d7_2, kernel=(7, 1), pad=(3, 0), name=('%s_tower' % name), suffix='_conv_2')
tower_q7 = Conv(data=data, num_filter=num_q7_red, name=('%s_tower_1' % name), suffix='_conv')
tower_q7 = Conv(data=tower_q7, num_filter=num_q7_1, kernel=(7, 1), pad=(3, 0), name=('%s_tower_1' % name), suffix='_conv_1')
tower_q7 = Conv(data=tower_q7, num_filter=num_q7_2, kernel=(1, 7), pad=(0, 3), name=('%s_tower_1' % name), suffix='_conv_2')
tower_q7 = Conv(data=tower_q7, num_filter=num_q7_3, kernel=(7, 1), pad=(3, 0), name=('%s_tower_1' % name), suffix='_conv_3')
tower_q7 = Conv(data=tower_q7, num_filter=num_q7_4, kernel=(1, 7), pad=(0, 3), name=('%s_tower_1' % name), suffix='_conv_4')
pooling = mx.sym.Pooling(data=data, kernel=(3, 3), stride=(1, 1), pad=(1, 1), pool_type=pool, name=('%s_pool_%s_pool' % (pool, name)))
cproj = Conv(data=pooling, num_filter=proj, kernel=(1, 1), name=('%s_tower_2' % name), suffix='_conv')
# concat
concat = mx.sym.Concat(*[tower_1x1, tower_d7, tower_q7, cproj], name='ch_concat_%s_chconcat' % name)
return concat
def Inception7D(data,
num_3x3_red, num_3x3,
num_d7_3x3_red, num_d7_1, num_d7_2, num_d7_3x3,
pool,
name):
tower_3x3 = Conv(data=data, num_filter=num_3x3_red, name=('%s_tower' % name), suffix='_conv')
tower_3x3 = Conv(data=tower_3x3, num_filter=num_3x3, kernel=(3, 3), pad=(0,0), stride=(2, 2), name=('%s_tower' % name), suffix='_conv_1')
tower_d7_3x3 = Conv(data=data, num_filter=num_d7_3x3_red, name=('%s_tower_1' % name), suffix='_conv')
tower_d7_3x3 = Conv(data=tower_d7_3x3, num_filter=num_d7_1, kernel=(1, 7), pad=(0, 3), name=('%s_tower_1' % name), suffix='_conv_1')
tower_d7_3x3 = Conv(data=tower_d7_3x3, num_filter=num_d7_2, kernel=(7, 1), pad=(3, 0), name=('%s_tower_1' % name), suffix='_conv_2')
tower_d7_3x3 = Conv(data=tower_d7_3x3, num_filter=num_d7_3x3, kernel=(3, 3), stride=(2, 2), name=('%s_tower_1' % name), suffix='_conv_3')
pooling = mx.sym.Pooling(data=data, kernel=(3, 3), stride=(2, 2), pool_type=pool, name=('%s_pool_%s_pool' % (pool, name)))
# concat
concat = mx.sym.Concat(*[tower_3x3, tower_d7_3x3, pooling], name='ch_concat_%s_chconcat' % name)
return concat
def Inception7E(data,
num_1x1,
num_d3_red, num_d3_1, num_d3_2,
num_3x3_d3_red, num_3x3, num_3x3_d3_1, num_3x3_d3_2,
pool, proj,
name):
tower_1x1 = Conv(data=data, num_filter=num_1x1, kernel=(1, 1), name=('%s_conv' % name))
tower_d3 = Conv(data=data, num_filter=num_d3_red, name=('%s_tower' % name), suffix='_conv')
tower_d3_a = Conv(data=tower_d3, num_filter=num_d3_1, kernel=(1, 3), pad=(0, 1), name=('%s_tower' % name), suffix='_mixed_conv')
tower_d3_b = Conv(data=tower_d3, num_filter=num_d3_2, kernel=(3, 1), pad=(1, 0), name=('%s_tower' % name), suffix='_mixed_conv_1')
tower_3x3_d3 = Conv(data=data, num_filter=num_3x3_d3_red, name=('%s_tower_1' % name), suffix='_conv')
tower_3x3_d3 = Conv(data=tower_3x3_d3, num_filter=num_3x3, kernel=(3, 3), pad=(1, 1), name=('%s_tower_1' % name), suffix='_conv_1')
tower_3x3_d3_a = Conv(data=tower_3x3_d3, num_filter=num_3x3_d3_1, kernel=(1, 3), pad=(0, 1), name=('%s_tower_1' % name), suffix='_mixed_conv')
tower_3x3_d3_b = Conv(data=tower_3x3_d3, num_filter=num_3x3_d3_2, kernel=(3, 1), pad=(1, 0), name=('%s_tower_1' % name), suffix='_mixed_conv_1')
pooling = mx.sym.Pooling(data=data, kernel=(3, 3), stride=(1, 1), pad=(1, 1), pool_type=pool, name=('%s_pool_%s_pool' % (pool, name)))
cproj = Conv(data=pooling, num_filter=proj, kernel=(1, 1), name=('%s_tower_2' % name), suffix='_conv')
# concat
concat = mx.sym.Concat(*[tower_1x1, tower_d3_a, tower_d3_b, tower_3x3_d3_a, tower_3x3_d3_b, cproj], name='ch_concat_%s_chconcat' % name)
return concat
# In[49]:
def get_symbol(num_classes=1000, dtype='float32', **kwargs):
data = mx.sym.Variable(name="data")
if dtype == 'float32':
data = mx.sym.identity(data=data, name='id')
else:
if dtype == 'float16':
data = mx.sym.Cast(data=data, dtype=np.float16)
# stage 1
conv = Conv(data, 32, kernel=(3, 3), stride=(2, 2), name="conv")
conv_1 = Conv(conv, 32, kernel=(3, 3), name="conv_1")
conv_2 = Conv(conv_1, 64, kernel=(3, 3), pad=(1, 1), name="conv_2")
pool = mx.sym.Pooling(data=conv_2, kernel=(3, 3), stride=(2, 2), pool_type="max", name="pool")
# stage 2
conv_3 = Conv(pool, 80, kernel=(1, 1), name="conv_3")
conv_4 = Conv(conv_3, 192, kernel=(3, 3), name="conv_4")
pool1 = mx.sym.Pooling(data=conv_4, kernel=(3, 3), stride=(2, 2), pool_type="max", name="pool1")
# stage 3
in3a = Inception7A(pool1, 64,
64, 96, 96,
48, 64,
"avg", 32, "mixed")
in3b = Inception7A(in3a, 64,
64, 96, 96,
48, 64,
"avg", 64, "mixed_1")
in3c = Inception7A(in3b, 64,
64, 96, 96,
48, 64,
"avg", 64, "mixed_2")
in3d = Inception7B(in3c, 384,
64, 96, 96,
"max", "mixed_3")
# stage 4
in4a = Inception7C(in3d, 192,
128, 128, 192,
128, 128, 128, 128, 192,
"avg", 192, "mixed_4")
in4b = Inception7C(in4a, 192,
160, 160, 192,
160, 160, 160, 160, 192,
"avg", 192, "mixed_5")
in4c = Inception7C(in4b, 192,
160, 160, 192,
160, 160, 160, 160, 192,
"avg", 192, "mixed_6")
in4d = Inception7C(in4c, 192,
192, 192, 192,
192, 192, 192, 192, 192,
"avg", 192, "mixed_7")
in4e = Inception7D(in4d, 192, 320,
192, 192, 192, 192,
"max", "mixed_8")
# stage 5
in5a = Inception7E(in4e, 320,
384, 384, 384,
448, 384, 384, 384,
"avg", 192, "mixed_9")
in5b = Inception7E(in5a, 320,
384, 384, 384,
448, 384, 384, 384,
"max", 192, "mixed_10")
# pool
pool = mx.sym.Pooling(data=in5b, kernel=(8, 8), stride=(1, 1), pool_type="avg", name="global_pool")
flatten = mx.sym.Flatten(data=pool, name="flatten")
fc1 = mx.sym.FullyConnected(data=flatten, num_hidden=num_classes, name='fc1')
if dtype == 'float16':
fc1 = mx.sym.Cast(data=fc1, dtype=np.float32)
softmax = mx.sym.SoftmaxOutput(data=fc1, name='softmax')
return softmax
|
DALI-main
|
docs/examples/use_cases/mxnet/resnetn/symbols/inception-v3.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Reference:
Krizhevsky, Alex, Ilya Sutskever, and Geoffrey E. Hinton. "Imagenet classification with deep convolutional neural networks." Advances in neural information processing systems. 2012.
"""
import mxnet as mx
import numpy as np
def get_symbol(num_classes, dtype='float32', **kwargs):
input_data = mx.sym.Variable(name="data")
if dtype == 'float16':
input_data = mx.sym.Cast(data=input_data, dtype=np.float16)
# stage 1
conv1 = mx.sym.Convolution(name='conv1',
data=input_data, kernel=(11, 11), stride=(4, 4), num_filter=96)
relu1 = mx.sym.Activation(data=conv1, act_type="relu")
lrn1 = mx.sym.LRN(data=relu1, alpha=0.0001, beta=0.75, knorm=2, nsize=5)
pool1 = mx.sym.Pooling(
data=lrn1, pool_type="max", kernel=(3, 3), stride=(2,2))
# stage 2
conv2 = mx.sym.Convolution(name='conv2',
data=pool1, kernel=(5, 5), pad=(2, 2), num_filter=256)
relu2 = mx.sym.Activation(data=conv2, act_type="relu")
lrn2 = mx.sym.LRN(data=relu2, alpha=0.0001, beta=0.75, knorm=2, nsize=5)
pool2 = mx.sym.Pooling(data=lrn2, kernel=(3, 3), stride=(2, 2), pool_type="max")
# stage 3
conv3 = mx.sym.Convolution(name='conv3',
data=pool2, kernel=(3, 3), pad=(1, 1), num_filter=384)
relu3 = mx.sym.Activation(data=conv3, act_type="relu")
conv4 = mx.sym.Convolution(name='conv4',
data=relu3, kernel=(3, 3), pad=(1, 1), num_filter=384)
relu4 = mx.sym.Activation(data=conv4, act_type="relu")
conv5 = mx.sym.Convolution(name='conv5',
data=relu4, kernel=(3, 3), pad=(1, 1), num_filter=256)
relu5 = mx.sym.Activation(data=conv5, act_type="relu")
pool3 = mx.sym.Pooling(data=relu5, kernel=(3, 3), stride=(2, 2), pool_type="max")
# stage 4
flatten = mx.sym.Flatten(data=pool3)
fc1 = mx.sym.FullyConnected(name='fc1', data=flatten, num_hidden=4096)
relu6 = mx.sym.Activation(data=fc1, act_type="relu")
dropout1 = mx.sym.Dropout(data=relu6, p=0.5)
# stage 5
fc2 = mx.sym.FullyConnected(name='fc2', data=dropout1, num_hidden=4096)
relu7 = mx.sym.Activation(data=fc2, act_type="relu")
dropout2 = mx.sym.Dropout(data=relu7, p=0.5)
# stage 6
fc3 = mx.sym.FullyConnected(name='fc3', data=dropout2, num_hidden=num_classes)
if dtype == 'float16':
fc3 = mx.sym.Cast(data=fc3, dtype=np.float32)
softmax = mx.sym.SoftmaxOutput(data=fc3, name='softmax')
return softmax
|
DALI-main
|
docs/examples/use_cases/mxnet/resnetn/symbols/alexnet.py
|
doc(title="PaddlePaddle Use-Cases",
underline_char="=",
entries=[
"resnet50/paddle-resnet50.rst",
"ssd/paddle-ssd.rst",
"tsm/paddle-tsm.rst",
])
|
DALI-main
|
docs/examples/use_cases/paddle/index.py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from paddle import fluid
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.framework import Variable
__all__ = ['ResNet']
class ResNet(object):
def __init__(self, depth=50, num_classes=1000):
super(ResNet, self).__init__()
assert depth in [18, 34, 50, 101, 152], \
"depth {} not in [18, 34, 50, 101, 152]"
self.depth = depth
self.num_classes = num_classes
self.stage_filters = [64, 128, 256, 512]
self.stages, self.block_func = {
18: ([2, 2, 2, 2], self.basicblock),
34: ([3, 4, 6, 3], self.basicblock),
50: ([3, 4, 6, 3], self.bottleneck),
101: ([3, 4, 23, 3], self.bottleneck),
152: ([3, 8, 36, 3], self.bottleneck)
}[depth]
def _conv_norm(self,
input,
num_filters,
filter_size,
stride=1,
act=None,
name=None):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
act=None,
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False,
name=name + '.conv2d.output.1')
if 'conv1' in name:
bn_name = "bn_" + name
else:
bn_name = name.replace("res", "bn")
return fluid.layers.batch_norm(
input=conv,
act=act,
name=bn_name + '.output.1',
param_attr=ParamAttr(name=bn_name + '_scale'),
bias_attr=ParamAttr(name=bn_name + '_offset'),
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance', )
def _shortcut(self, input, ch_out, stride, is_first, name):
ch_in = input.shape[1]
if ch_in != ch_out or stride != 1 or (self.depth < 50 and is_first):
return self._conv_norm(input, ch_out, 1, stride, name=name)
else:
return input
def bottleneck(self, input, num_filters, stride, is_first, name):
stride1, stride2 = 1, stride
conv_def = [[num_filters, 1, stride1, 'relu', name + "_branch2a"],
[num_filters, 3, stride2, 'relu', name + "_branch2b"],
[num_filters * 4, 1, 1, None, name + "_branch2c"]]
residual = input
for (c, k, s, act, _name) in conv_def:
residual = self._conv_norm(
input=residual,
num_filters=c,
filter_size=k,
stride=s,
act=act,
name=_name)
short = self._shortcut(
input,
num_filters * 4,
stride,
is_first=is_first,
name=name + "_branch1")
return fluid.layers.elementwise_add(
x=short, y=residual, act='relu', name=name + ".add.output.5")
def basicblock(self, input, num_filters, stride, is_first, name):
conv0 = self._conv_norm(
input=input,
num_filters=num_filters,
filter_size=3,
act='relu',
stride=stride,
name=name + "_branch2a")
conv1 = self._conv_norm(
input=conv0,
num_filters=num_filters,
filter_size=3,
act=None,
name=name + "_branch2b")
short = self._shortcut(
input, num_filters, stride, is_first, name=name + "_branch1")
return fluid.layers.elementwise_add(x=short, y=conv1, act='relu')
def layer_warp(self, input, stage_num):
assert stage_num in [2, 3, 4, 5]
stages, block_func = self.stages, self.block_func
count = stages[stage_num - 2]
ch_out = self.stage_filters[stage_num - 2]
is_first = False if stage_num != 2 else True
conv = input
for i in range(count):
if self.depth in [101, 152] and stage_num == 2:
if i == 0:
conv_name = "res" + str(stage_num) + "a"
else:
conv_name = "res" + str(stage_num) + "b" + str(i)
else:
conv_name = "res" + str(stage_num) + chr(97 + i)
if self.depth < 50:
is_first = True if i == 0 and stage_num == 2 else False
conv = block_func(
input=conv,
num_filters=ch_out,
stride=2 if i == 0 and stage_num != 2 else 1,
is_first=is_first,
name=conv_name)
return conv
def c1_stage(self, input):
input = self._conv_norm(
input=input,
num_filters=self.stage_filters[0],
filter_size=7,
stride=2,
act='relu',
name='conv1')
output = fluid.layers.pool2d(
input=input,
pool_size=3,
pool_stride=2,
pool_padding=1,
pool_type='max')
return output
def __call__(self, input):
assert isinstance(input, Variable)
res = self.c1_stage(input)
for i in range(2, 6):
res = self.layer_warp(res, i)
pool = fluid.layers.pool2d(res, pool_size=7, pool_type='avg',
global_pooling=True)
stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0)
return fluid.layers.fc(pool,
size=self.num_classes,
param_attr=ParamAttr(
initializer=fluid.initializer.Uniform(
-stdv, stdv)))
|
DALI-main
|
docs/examples/use_cases/paddle/resnet50/resnet.py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import math
import os
import shutil
import time
import numpy as np
from paddle import fluid
import paddle
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.types as types
import nvidia.dali.fn as fn
from nvidia.dali.plugin.paddle import DALIClassificationIterator, LastBatchPolicy
def create_dali_pipeline(batch_size, num_threads, device_id, data_dir, crop, size,
shard_id, num_shards, dali_cpu=False, is_training=True):
pipeline = Pipeline(batch_size, num_threads, device_id, seed=12 + device_id)
with pipeline:
images, labels = fn.readers.file(file_root=data_dir,
shard_id=shard_id,
num_shards=num_shards,
random_shuffle=is_training,
pad_last_batch=True,
name="Reader")
dali_device = 'cpu' if dali_cpu else 'gpu'
decoder_device = 'cpu' if dali_cpu else 'mixed'
# ask nvJPEG to preallocate memory for the biggest sample in ImageNet for CPU and GPU to avoid reallocations in runtime
device_memory_padding = 211025920 if decoder_device == 'mixed' else 0
host_memory_padding = 140544512 if decoder_device == 'mixed' else 0
# ask HW NVJPEG to allocate memory ahead for the biggest image in the data set to avoid reallocations in runtime
preallocate_width_hint = 5980 if decoder_device == 'mixed' else 0
preallocate_height_hint = 6430 if decoder_device == 'mixed' else 0
if is_training:
images = fn.decoders.image_random_crop(images,
device=decoder_device, output_type=types.RGB,
device_memory_padding=device_memory_padding,
host_memory_padding=host_memory_padding,
preallocate_width_hint=preallocate_width_hint,
preallocate_height_hint=preallocate_height_hint,
random_aspect_ratio=[0.8, 1.25],
random_area=[0.1, 1.0],
num_attempts=100)
images = fn.resize(images,
device=dali_device,
resize_x=crop,
resize_y=crop,
interp_type=types.INTERP_TRIANGULAR)
mirror = fn.random.coin_flip(probability=0.5)
else:
images = fn.decoders.image(images,
device=decoder_device,
output_type=types.RGB)
images = fn.resize(images,
device=dali_device,
size=size,
mode="not_smaller",
interp_type=types.INTERP_TRIANGULAR)
mirror = False
images = fn.crop_mirror_normalize(images.gpu(),
dtype=types.FLOAT,
output_layout="CHW",
crop=(crop, crop),
mean=[0.485 * 255,0.456 * 255,0.406 * 255],
std=[0.229 * 255,0.224 * 255,0.225 * 255],
mirror=mirror)
labels = labels.gpu()
labels = fn.cast(labels, dtype=types.INT64)
pipeline.set_outputs(images, labels)
return pipeline
class AverageMeter(object):
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def build():
from resnet import ResNet
model = ResNet(FLAGS.depth, num_classes=1000)
image = fluid.layers.data(name='data', shape=[3, 224, 224],
dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int32')
logits = model(image)
loss, pred = fluid.layers.softmax_with_cross_entropy(
logits, label, return_softmax=True)
avg_loss = fluid.layers.mean(x=loss)
avg_loss.persistable = True
acc_top1 = fluid.layers.accuracy(input=pred, label=label, k=1)
acc_top5 = fluid.layers.accuracy(input=pred, label=label, k=5)
return avg_loss, acc_top1, acc_top5
def run(exe, prog, fetch_list, loader, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
total_batches = int(loader._size / FLAGS.batch_size)
for i, batch in enumerate(loader):
data_time.update(time.time() - end)
loss, prec1, prec5 = exe.run(
prog, feed=batch, fetch_list=fetch_list)
prec5 = np.mean(prec5)
loss = np.mean(loss)
prec1 = np.mean(prec1)
prec5 = np.mean(prec5)
num_items = batch[0]['label'].shape()[0]
losses.update(loss, num_items)
top1.update(prec1, num_items)
top5.update(prec5, num_items)
batch_time.update(time.time() - end)
end = time.time()
if FLAGS.local_rank == 0 and i % FLAGS.print_freq == 0 and i > 1:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Speed {3:.3f} ({4:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, total_batches,
FLAGS.whole_batch_size / batch_time.val,
FLAGS.whole_batch_size / batch_time.avg,
batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
return batch_time.avg, top1.avg, top5.avg
def main():
env = os.environ
FLAGS.local_rank = int(env.get('PADDLE_TRAINER_ID', 0))
FLAGS.world_size = int(env.get('PADDLE_TRAINERS_NUM', 1))
FLAGS.device_id = int(env['FLAGS_selected_gpus'])
FLAGS.whole_batch_size = FLAGS.world_size * FLAGS.batch_size
pipe = create_dali_pipeline(batch_size=FLAGS.batch_size,
num_threads=FLAGS.num_threads,
device_id=FLAGS.device_id,
data_dir=os.path.join(FLAGS.data, 'train'),
crop=224,
size=256,
dali_cpu=False,
shard_id=FLAGS.local_rank,
num_shards=FLAGS.world_size,
is_training=True)
pipe.build()
sample_per_shard = pipe.epoch_size("Reader") // FLAGS.world_size
train_loader = DALIClassificationIterator(pipe, reader_name="Reader")
if FLAGS.local_rank == 0:
pipe = create_dali_pipeline(batch_size=FLAGS.batch_size,
num_threads=FLAGS.num_threads,
device_id=FLAGS.device_id,
data_dir=os.path.join(FLAGS.data, 'val'),
crop=224,
size=256,
dali_cpu=False,
shard_id=0,
num_shards=1,
is_training=False)
pipe.build()
val_loader = DALIClassificationIterator(pipe, reader_name="Reader")
place = fluid.CUDAPlace(FLAGS.device_id)
exe = fluid.Executor(place)
startup_prog = fluid.Program()
train_prog = fluid.Program()
eval_prog = fluid.Program()
step_per_epoch = int(math.ceil(sample_per_shard / FLAGS.batch_size))
milestones = [step_per_epoch * e for e in (30, 60, 80)]
values = [FLAGS.lr * (0.1**i) for i in range(len(milestones) + 1)]
with fluid.program_guard(train_prog, startup_prog):
with fluid.unique_name.guard():
train_fetch_list = build()
learning_rate = fluid.layers.piecewise_decay(
boundaries=milestones, values=values)
learning_rate = fluid.layers.linear_lr_warmup(
learning_rate=learning_rate,
warmup_steps=5 * step_per_epoch,
start_lr=0.,
end_lr=FLAGS.lr)
decay = FLAGS.weight_decay
optimizer = fluid.optimizer.Momentum(
learning_rate=learning_rate,
momentum=FLAGS.momentum,
regularization=fluid.regularizer.L2Decay(decay))
avg_loss = train_fetch_list[0]
optimizer.minimize(avg_loss)
with fluid.program_guard(eval_prog, startup_prog):
with fluid.unique_name.guard():
eval_fetch_list = build()
eval_prog = eval_prog.clone(True)
build_strategy = fluid.BuildStrategy()
build_strategy.trainer_id = FLAGS.local_rank
build_strategy.num_trainers = FLAGS.world_size
config = fluid.DistributeTranspilerConfig()
config.mode = "nccl2"
t = fluid.DistributeTranspiler(config=config)
t.transpile(
FLAGS.local_rank,
trainers=os.environ.get('PADDLE_TRAINER_ENDPOINTS'),
current_endpoint=os.environ.get('PADDLE_CURRENT_ENDPOINT'),
startup_program=startup_prog,
program=train_prog)
exec_strategy = fluid.ExecutionStrategy()
exe.run(startup_prog)
compiled_train_prog = fluid.CompiledProgram(train_prog).with_data_parallel(
loss_name=avg_loss.name,
build_strategy=build_strategy,
exec_strategy=exec_strategy)
compiled_eval_prog = fluid.compiler.CompiledProgram(eval_prog)
total_time = AverageMeter()
for epoch in range(FLAGS.epochs):
if FLAGS.local_rank == 0:
print("==== train epoch {:02d} ====".format(epoch + 1))
avg_time, _, _ = run(
exe, compiled_train_prog, train_fetch_list, train_loader, epoch)
total_time.update(avg_time)
# reset DALI iterators
train_loader.reset()
if FLAGS.local_rank == 0:
print("==== validation epoch {:02d} ====".format(epoch + 1))
_, prec1, prec5 = run(
exe, compiled_eval_prog, eval_fetch_list, val_loader, epoch)
val_loader.reset()
ckpt_path = os.path.join('checkpoint', "{:02d}".format(epoch + 1))
if os.path.isdir(ckpt_path):
shutil.rmtree(ckpt_path)
print('Save model to {}.'.format(ckpt_path))
fluid.io.save_persistables(exe, ckpt_path, train_prog)
time_per_sample = FLAGS.whole_batch_size / total_time.avg
if epoch == FLAGS.epochs-1:
print('##Top-1 {0}\n'
'##Top-5 {1}\n'
'##Perf {2}'.format(
prec1 * 100, prec5 * 100, time_per_sample))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Paddle ImageNet Training')
parser.add_argument('data', metavar='DIR', help='path to dataset '
'(should have subdirectories named "train" and "val"')
parser.add_argument('-d', '--depth', default=50, type=int,
metavar='N', help='number of layers (default: 50)')
parser.add_argument('-j', '--num_threads', default=4, type=int,
metavar='N', help='number of threads (default: 4)')
parser.add_argument('-b', '--batch-size', default=128, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('-e', '--epochs', default=90, type=int,
metavar='N', help='number of epochs to be run (default 90)')
FLAGS = parser.parse_args()
assert FLAGS.data, "error: must provide data path"
# In PaddlePaddle 2.x, we turn on dynamic graph mode by default, and 'data()' is only supported in static graph mode.
# So if you want to use this api, please call 'paddle.enable_static()' before this api to enter static graph mode.
paddle.enable_static()
main()
|
DALI-main
|
docs/examples/use_cases/paddle/resnet50/main.py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle import fluid
from paddle.fluid.param_attr import ParamAttr
class VGG(object):
"""
VGG, see https://arxiv.org/abs/1409.1556
"""
def __init__(self):
super(VGG, self).__init__()
def __call__(self, input):
layers = []
layers += self._vgg_block(input)
layers += self._add_extras_block(layers[-1])
norm_cfg = [20., -1, -1, -1, -1, -1]
for k, v in enumerate(layers):
if not norm_cfg[k] == -1:
layers[k] = self._l2_norm_scale(v, init_scale=norm_cfg[k])
return layers
def _vgg_block(self, input):
num_layers = [2, 2, 3, 3, 3]
vgg_base = [64, 128, 256, 512, 512]
conv = input
layers = []
for k, v in enumerate(vgg_base):
conv = self._conv_block(
conv, v, num_layers[k], name="conv{}_".format(k + 1))
layers.append(conv)
if k == 4:
conv = self._pooling_block(conv, 3, 1, pool_padding=1)
else:
conv = self._pooling_block(conv, 2, 2)
fc6 = self._conv_layer(conv, 1024, 3, 1, 6, dilation=6, name="fc6")
fc7 = self._conv_layer(fc6, 1024, 1, 1, 0, name="fc7")
return [layers[3], fc7]
def _add_extras_block(self, input):
cfg = [[256, 512, 1, 2, 3], [128, 256, 1, 2, 3],
[128, 256, 0, 1, 3], [128, 256, 0, 1, 3]]
conv = input
layers = []
for k, v in enumerate(cfg):
conv = self._extra_block(
conv, v[0], v[1], v[2], v[3], v[4],
name="conv{}_".format(6 + k))
layers.append(conv)
return layers
def _conv_block(self, input, num_filter, groups, name=None):
conv = input
for i in range(groups):
conv = self._conv_layer(
input=conv,
num_filters=num_filter,
filter_size=3,
stride=1,
padding=1,
act='relu',
name=name + str(i + 1))
return conv
def _extra_block(self,
input,
num_filters1,
num_filters2,
padding_size,
stride_size,
filter_size,
name=None):
# 1x1 conv
conv_1 = self._conv_layer(
input=input,
num_filters=int(num_filters1),
filter_size=1,
stride=1,
act='relu',
padding=0,
name=name + "1")
# 3x3 conv
conv_2 = self._conv_layer(
input=conv_1,
num_filters=int(num_filters2),
filter_size=filter_size,
stride=stride_size,
act='relu',
padding=padding_size,
name=name + "2")
return conv_2
def _conv_layer(self,
input,
num_filters,
filter_size,
stride,
padding,
dilation=1,
act='relu',
use_cudnn=True,
name=None):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
dilation=dilation,
act=act,
use_cudnn=use_cudnn,
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=ParamAttr(name=name + "_biases"),
name=name + '.conv2d.output.1')
return conv
def _pooling_block(self,
conv,
pool_size,
pool_stride,
pool_padding=0,
ceil_mode=True):
pool = fluid.layers.pool2d(
input=conv,
pool_size=pool_size,
pool_type='max',
pool_stride=pool_stride,
pool_padding=pool_padding,
ceil_mode=ceil_mode)
return pool
def _l2_norm_scale(self, input, init_scale=1.0, channel_shared=False):
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.initializer import Constant
helper = LayerHelper("Scale")
l2_norm = fluid.layers.l2_normalize(
input, axis=1) # l2 norm along channel
shape = [1] if channel_shared else [input.shape[1]]
scale = helper.create_parameter(
attr=helper.param_attr,
shape=shape,
dtype=input.dtype,
default_initializer=Constant(init_scale))
out = fluid.layers.elementwise_mul(
x=l2_norm, y=scale, axis=-1 if channel_shared else 1,
name="conv4_3_norm_scale")
return out
class SSD(object):
"""
Single Shot MultiBox Detector, see https://arxiv.org/abs/1512.02325
"""
def __init__(self, num_classes=81):
super(SSD, self).__init__()
self.backbone = VGG()
self.num_classes = num_classes
def __call__(self, image, gt_box, gt_label):
body_feats = self.backbone(image)
locs, confs, box, box_var = fluid.layers.multi_box_head(
inputs=body_feats,
image=image,
num_classes=self.num_classes,
min_ratio=15,
max_ratio=90,
base_size=300,
min_sizes=[30.0, 60.0, 111.0, 162.0, 213.0, 264.0],
max_sizes=[60.0, 111.0, 162.0, 213.0, 264.0, 315.0],
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
steps=[8, 16, 32, 64, 100, 300],
offset=0.5,
flip=True,
min_max_aspect_ratios_order=False,
kernel_size=3,
pad=1)
loss = fluid.layers.ssd_loss(locs, confs, gt_box, gt_label, box,
box_var)
loss = fluid.layers.reduce_sum(loss)
return loss
|
DALI-main
|
docs/examples/use_cases/paddle/ssd/ssd.py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import os
import shutil
import sys
import tarfile
import tempfile
try:
from urllib.request import urlopen
from urllib.parse import urlparse
except Exception: # python 2
from urllib2 import urlopen
from urlparse import urlparse
from paddle import fluid
def _extract_tar(filename, dest):
print("extracting to {}".format(dest))
if not os.path.exists(dest):
try:
os.makedirs(dest)
except OSError as e:
if e.errno != errno.EEXIST:
raise
f = tarfile.open(filename)
f.extractall(dest)
def _download_weight(url):
weight_dir = os.path.expanduser("~/.cache/paddle/weights")
filename = os.path.basename(urlparse(url).path)
base, ext = os.path.splitext(filename)
assert ext in ['.tar', '.pdparams'], "Unsupported weight format"
if ext == '.tar':
dest = os.path.join(weight_dir, base)
if os.path.exists(dest):
assert os.path.isdir(dest), "weight path is not a directory"
return dest
else:
dest = os.path.join(weight_dir, filename)
if os.path.isfile(dest):
return dest
print("downloading {}".format(url))
req = urlopen(url)
total = float(req.headers['content-length'])
tmp = tempfile.NamedTemporaryFile(delete=False)
downloaded = 0
try:
while True:
buffer = req.read(8192)
if len(buffer) == 0:
break
tmp.write(buffer)
downloaded += len(buffer)
sys.stdout.write("\r{0:.1f}%".format(100 * downloaded / total))
sys.stdout.flush()
sys.stdout.write('\n')
tmp.close()
if ext == '.tar':
_extract_tar(tmp.name, weight_dir)
else:
shutil.move(tmp.name, dest)
finally:
tmp.close()
if os.path.exists(tmp.name):
os.remove(tmp.name)
return dest
def load_weights(exe, prog, url):
weight_path = _download_weight(url)
if os.path.isdir(weight_path):
fluid.io.load_vars(
exe, weight_path, prog,
predicate=lambda v: os.path.exists(
os.path.join(weight_path, v.name)))
else:
fluid.io.load_params(exe, '', prog, filename=weight_path)
|
DALI-main
|
docs/examples/use_cases/paddle/ssd/utils.py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2017-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import shutil
import time
import numpy as np
from paddle import fluid
import paddle
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.types as types
import nvidia.dali.fn as fn
from nvidia.dali.plugin.paddle import DALIGenericIterator, LastBatchPolicy
from ssd import SSD
from utils import load_weights
PRETRAIN_WEIGHTS = 'https://paddle-imagenet-models-name.bj.bcebos.com/VGG16_caffe_pretrained.tar'
def create_coco_pipeline(file_root,
annotations_file,
batch_size=1,
device_id=0,
num_threads=4,
local_rank=0,
world_size=1):
pipeline = Pipeline(batch_size, num_threads,
local_rank, seed=42 + device_id)
with pipeline:
images, bboxes, labels = fn.readers.coco(file_root=file_root,
annotations_file=annotations_file,
skip_empty=True,
shard_id=local_rank,
num_shards=world_size,
ratio=True,
ltrb=True,
random_shuffle=False,
shuffle_after_epoch=True,
name="Reader")
crop_begin, crop_size, bboxes, labels = fn.random_bbox_crop(bboxes, labels,
device="cpu",
aspect_ratio=[0.5, 2.0],
thresholds=[0, 0.1, 0.3, 0.5, 0.7, 0.9],
scaling=[0.3, 1.0],
bbox_layout="xyXY",
allow_no_crop=True,
num_attempts=50)
images = fn.decoders.image_slice(images, crop_begin, crop_size, device="mixed", output_type=types.RGB)
flip_coin = fn.random.coin_flip(probability=0.5)
images = fn.resize(images,
resize_x=300,
resize_y=300,
min_filter=types.DALIInterpType.INTERP_TRIANGULAR)
# use float to avoid clipping and quantizing the intermediate result
images = fn.hsv(images, dtype=types.FLOAT, hue=fn.random.uniform(range=[-0.5, 0.5]),
saturation=fn.random.uniform(range=[0.5, 1.5]))
images = fn.brightness_contrast(images,
contrast_center = 128, # input is in float, but in 0..255 range
dtype = types.UINT8,
brightness = fn.random.uniform(range=[0.875, 1.125]),
contrast = fn.random.uniform(range=[0.5, 1.5]))
bboxes = fn.bb_flip(bboxes, ltrb=True, horizontal=flip_coin)
images = fn.crop_mirror_normalize(images,
mean=[104., 117., 123.],
std=[1., 1., 1.],
mirror=flip_coin,
dtype=types.FLOAT,
output_layout="CHW",
pad_output=False)
pipeline.set_outputs(images, bboxes, labels)
return pipeline
class AverageMeter(object):
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def build():
model = SSD()
image = fluid.layers.data(
name='image', shape=[3, 300, 300], dtype='float32')
gt_box = fluid.layers.data(
name='gt_box', shape=[4], dtype='float32', lod_level=1)
gt_label = fluid.layers.data(
name='gt_label', shape=[1], dtype='int32', lod_level=1)
return model(image, gt_box, gt_label)
def main():
places = []
for p in fluid.framework.cuda_places():
place = fluid.core.Place()
place.set_place(p)
places.append(place)
file_root = os.path.join(FLAGS.data, 'train2017')
annotations_file = os.path.join(
FLAGS.data, 'annotations/instances_train2017.json')
world_size = len(places)
pipelines = [
create_coco_pipeline(
file_root, annotations_file, FLAGS.batch_size, p.gpu_device_id(),
FLAGS.num_threads, local_rank=idx, world_size=world_size)
for idx, p in enumerate(places)]
train_loader = DALIGenericIterator(
pipelines, ['image', ('gt_box', 1), ('gt_label', 1)],
reader_name="Reader", last_batch_policy=LastBatchPolicy.PARTIAL, auto_reset=True, dynamic_shape=True)
FLAGS.whole_batch_size = FLAGS.batch_size * world_size
total_steps = 400000
if FLAGS.check_loss_steps > 0:
total_steps = FLAGS.check_loss_steps
milestones = [280000, 360000]
values = [FLAGS.lr * (0.1**i) for i in range(len(milestones) + 1)]
exe = fluid.Executor(fluid.CUDAPlace(0))
startup_prog = fluid.Program()
train_prog = fluid.Program()
with fluid.program_guard(train_prog, startup_prog):
with fluid.unique_name.guard():
train_fetch_list = build()
learning_rate = fluid.layers.piecewise_decay(
boundaries=milestones, values=values)
learning_rate = fluid.layers.linear_lr_warmup(
learning_rate=learning_rate,
warmup_steps=500,
start_lr=FLAGS.lr / 3,
end_lr=FLAGS.lr)
decay = FLAGS.weight_decay
optimizer = fluid.optimizer.Momentum(
momentum=FLAGS.momentum,
learning_rate=learning_rate,
regularization=fluid.regularizer.L2Decay(decay))
avg_loss = train_fetch_list[0]
optimizer.minimize(avg_loss)
exe.run(startup_prog)
compiled_train_prog = fluid.CompiledProgram(train_prog).with_data_parallel(
loss_name=avg_loss.name)
load_weights(exe, train_prog, PRETRAIN_WEIGHTS)
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
end = time.time()
def forever():
while True:
try:
yield next(train_loader)
except StopIteration:
pass
for idx, batch in enumerate(forever()):
if idx > total_steps:
break
data_time.update(time.time() - end)
fetches = exe.run(
compiled_train_prog, feed=batch, fetch_list=train_fetch_list)
loss = np.mean(fetches[0])
losses.update(loss, FLAGS.whole_batch_size)
if FLAGS.check_loss_steps > 0:
if idx == 0:
loss_start = loss
else:
loss_end = loss
if idx % FLAGS.print_freq == 0 and idx > 1:
print('Epoch: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Speed {2:.3f} ({3:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
idx, total_steps,
FLAGS.whole_batch_size / batch_time.val,
FLAGS.whole_batch_size / batch_time.avg,
batch_time=batch_time,
data_time=data_time, loss=losses))
if idx % FLAGS.ckpt_freq == 0 and idx > 1:
ckpt_path = os.path.join('checkpoint', "{:02d}".format(idx))
if os.path.isdir(ckpt_path):
shutil.rmtree(ckpt_path)
print('Save model to {}.'.format(ckpt_path))
fluid.io.save_persistables(exe, ckpt_path, train_prog)
batch_time.update(time.time() - end)
end = time.time()
if FLAGS.check_loss_steps > 0:
assert loss_start > loss_end, \
'loss should decrease after training for {} steps'.format(
FLAGS.check_loss_steps)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Paddle Single Shot MultiBox Detector Training')
parser.add_argument('data', metavar='DIR', help='path to dataset')
parser.add_argument('-j', '--num_threads', default=4, type=int,
metavar='N', help='number of threads (default: 4)')
parser.add_argument('-b', '--batch-size', default=8, type=int,
metavar='N', help='mini-batch size (default: 8)')
parser.add_argument('--lr', '--learning-rate', default=0.001, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=5e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--ckpt-freq', '-c', default=5000, type=int,
metavar='N',
help='checkpoint frequency (default: 5000)')
parser.add_argument('--check-loss-steps', '-t', default=-1, type=int,
metavar='N', help='check N steps for loss convergence')
FLAGS = parser.parse_args()
assert FLAGS.data, "error: must provide data path"
# In PaddlePaddle 2.x, we turn on dynamic graph mode by default, and 'data()' is only supported in static graph mode.
# So if you want to use this api, please call 'paddle.enable_static()' before this api to enter static graph mode.
paddle.enable_static()
main()
|
DALI-main
|
docs/examples/use_cases/paddle/ssd/train.py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import paddle.fluid as fluid
class TSM():
def __init__(self, training=False):
self.training = training
self.num_segs = 8
self.num_classes = 400
self.depth = 50
self.layers = [3, 4, 6, 3]
self.num_filters = [64, 128, 256, 512]
def shift_module(self, input):
output = fluid.layers.temporal_shift(input, self.num_segs, 1.0 / 8)
return output
def conv_bn_layer(self,
input,
num_filters,
filter_size,
stride=1,
groups=1,
act=None,
name=None):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
groups=groups,
act=None,
param_attr=fluid.param_attr.ParamAttr(name=name + "_weights"),
bias_attr=False)
if name == "conv1":
bn_name = "bn_" + name
else:
bn_name = "bn" + name[3:]
return fluid.layers.batch_norm(
input=conv,
act=act,
is_test=(not self.training),
param_attr=fluid.param_attr.ParamAttr(name=bn_name + "_scale"),
bias_attr=fluid.param_attr.ParamAttr(bn_name + '_offset'),
moving_mean_name=bn_name + "_mean",
moving_variance_name=bn_name + '_variance')
def shortcut(self, input, ch_out, stride, name):
ch_in = input.shape[1]
if ch_in != ch_out or stride != 1:
return self.conv_bn_layer(input, ch_out, 1, stride, name=name)
else:
return input
def bottleneck_block(self, input, num_filters, stride, name):
shifted = self.shift_module(input)
conv0 = self.conv_bn_layer(
input=shifted,
num_filters=num_filters,
filter_size=1,
act='relu',
name=name + "_branch2a")
conv1 = self.conv_bn_layer(
input=conv0,
num_filters=num_filters,
filter_size=3,
stride=stride,
act='relu',
name=name + "_branch2b")
conv2 = self.conv_bn_layer(
input=conv1,
num_filters=num_filters * 4,
filter_size=1,
act=None,
name=name + "_branch2c")
short = self.shortcut(
input, num_filters * 4, stride, name=name + "_branch1")
return fluid.layers.elementwise_add(x=short, y=conv2, act='relu')
def __call__(self, input):
channels = input.shape[2]
short_size = input.shape[3]
input = fluid.layers.reshape(
x=input, shape=[-1, channels, short_size, short_size])
conv = self.conv_bn_layer(
input=input,
num_filters=64,
filter_size=7,
stride=2,
act='relu',
name='conv1')
conv = fluid.layers.pool2d(
input=conv,
pool_size=3,
pool_stride=2,
pool_padding=1,
pool_type='max')
for block in range(len(self.layers)):
for i in range(self.layers[block]):
conv_name = "res" + str(block + 2) + chr(97 + i)
conv = self.bottleneck_block(
input=conv,
num_filters=self.num_filters[block],
stride=2 if i == 0 and block != 0 else 1,
name=conv_name)
pool = fluid.layers.pool2d(
input=conv, pool_size=7, pool_type='avg', global_pooling=True)
dropout = fluid.layers.dropout(
x=pool, dropout_prob=0.5, is_test=(not self.training))
feature = fluid.layers.reshape(
x=dropout, shape=[-1, self.num_segs, pool.shape[1]])
out = fluid.layers.reduce_mean(feature, dim=1)
stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0)
out = fluid.layers.fc(input=out,
size=self.num_classes,
act='softmax',
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Uniform(-stdv,
stdv)),
bias_attr=fluid.param_attr.ParamAttr(
learning_rate=2.0,
regularizer=fluid.regularizer.L2Decay(0.)))
return out
|
DALI-main
|
docs/examples/use_cases/paddle/tsm/tsm.py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import os
import shutil
import sys
import tarfile
import tempfile
try:
from urllib.request import urlopen
from urllib.parse import urlparse
except Exception: # python 2
from urllib2 import urlopen
from urlparse import urlparse
from paddle import fluid
def _extract_tar(filename, dest):
print("extracting to {}".format(dest))
if not os.path.exists(dest):
try:
os.makedirs(dest)
except OSError as e:
if e.errno != errno.EEXIST:
raise
f = tarfile.open(filename)
f.extractall(dest)
def _download_weight(url):
weight_dir = os.path.expanduser("~/.cache/paddle/weights")
filename = os.path.basename(urlparse(url).path)
base, ext = os.path.splitext(filename)
assert ext in ['.tar', '.pdparams'], "Unsupported weight format"
if ext == '.tar':
dest = os.path.join(weight_dir, base)
if os.path.exists(dest):
assert os.path.isdir(dest), "weight path is not a directory"
return dest
else:
dest = os.path.join(weight_dir, filename)
if os.path.isfile(dest):
return dest
print("downloading {}".format(url))
req = urlopen(url)
total = float(req.headers['content-length'])
tmp = tempfile.NamedTemporaryFile(delete=False)
downloaded = 0
try:
while True:
buffer = req.read(8192)
if len(buffer) == 0:
break
tmp.write(buffer)
downloaded += len(buffer)
sys.stdout.write("\r{0:.1f}%".format(100 * downloaded / total))
sys.stdout.flush()
sys.stdout.write('\n')
tmp.close()
if ext == '.tar':
_extract_tar(tmp.name, weight_dir)
else:
shutil.move(tmp.name, dest)
finally:
tmp.close()
if os.path.exists(tmp.name):
os.remove(tmp.name)
return dest
def load_weights(exe, prog, url):
weight_path = _download_weight(url)
if os.path.isdir(weight_path):
fluid.io.load_vars(
exe, weight_path, prog,
predicate=lambda v: os.path.exists(
os.path.join(weight_path, v.name)))
else:
fluid.io.load_params(exe, '', prog, filename=weight_path)
|
DALI-main
|
docs/examples/use_cases/paddle/tsm/utils.py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
from paddle import fluid
import paddle
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.types as types
import nvidia.dali.fn as fn
from nvidia.dali.plugin.paddle import DALIGenericIterator
from tsm import TSM
from utils import load_weights
PRETRAIN_WEIGHTS = 'https://paddlemodels.bj.bcebos.com/video_classification/TSM_final.pdparams'
def create_video_pipe(video_files, sequence_length=8, target_size=224,stride=30):
pipeline = Pipeline(1, 4, 0, seed=42)
with pipeline:
images = fn.readers.video(device="gpu", filenames=video_files,
sequence_length=sequence_length, stride=stride,
shard_id=0, num_shards=1, random_shuffle=False,
pad_last_batch=True, name="Reader")
images = fn.crop_mirror_normalize(images,
dtype=types.FLOAT,
output_layout="FCHW",
crop=(target_size, target_size),
mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
std=[0.229 * 255, 0.224 * 255, 0.225 * 255])
pipeline.set_outputs(images)
return pipeline
def build(seg_num=8, target_size=224):
image_shape = [seg_num, 3, target_size, target_size]
image = fluid.layers.data(
name='image', shape=image_shape, dtype='float32')
model = TSM()
return model(image)
def main():
seg_num = 8
target_size = 224
video_files = [FLAGS.data + '/' + f for f in os.listdir(FLAGS.data)]
pipeline = create_video_pipe(video_files, seg_num, target_size, FLAGS.stride)
video_loader = DALIGenericIterator(
pipeline, ['image'], reader_name="Reader", dynamic_shape=True)
exe = fluid.Executor(fluid.CUDAPlace(0))
startup_prog = fluid.Program()
eval_prog = fluid.Program()
with fluid.program_guard(eval_prog, startup_prog):
with fluid.unique_name.guard():
fetch_list = build(seg_num, target_size)
exe.run(startup_prog)
compiled_eval_prog = fluid.CompiledProgram(eval_prog)
load_weights(exe, eval_prog, PRETRAIN_WEIGHTS)
labels = json.load(open("kinetics_labels.json"))
for idx, batch in enumerate(video_loader):
fetches = exe.run(
compiled_eval_prog, feed=batch, fetch_list=fetch_list)
pred = fetches[0][0]
topk_indices = pred.argsort()[0 - FLAGS.topk:]
topk_labels = [labels[i] for i in topk_indices]
filename = video_files[idx]
print("prediction for {} is: {}".format(filename, topk_labels))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Paddle Temporal Shift Module Inference')
parser.add_argument('data', metavar='DIR', help='Path to video files')
parser.add_argument('--topk', '-k', default=1, type=int,
metavar='K', help='Top k results (default: 1)')
parser.add_argument('--stride', '-s', default=30, type=int, metavar='S',
help='Distance between frames (default: 30)')
FLAGS = parser.parse_args()
assert FLAGS.data, "error: must provide data path"
# In PaddlePaddle 2.x, we turn on dynamic graph mode by default, and 'data()' is only supported in static graph mode.
# So if you want to use this api, please call 'paddle.enable_static()' before this api to enter static graph mode.
paddle.enable_static()
main()
|
DALI-main
|
docs/examples/use_cases/paddle/tsm/infer.py
|
#!/usr/bin/env python
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import nvutils
from resnet_model import trivial
nvutils.init()
default_args = {
'image_width' : 224,
'image_height' : 224,
'distort_color' : False,
'momentum' : 0.9,
'loss_scale' : 128.0,
# The following params can be changed by cmdline options.
'image_format' : 'channels_last',
'data_dir' : None,
'data_idx_dir' : None,
'batch_size' : 256,
'num_iter' : 300,
'iter_unit' : 'batch',
'log_dir' : None,
'export_dir' : None,
'tensorboard_dir' : None,
'display_every' : 10,
'precision' : 'fp16',
'dali_mode' : None,
'use_xla': False,
'predict' : False,
'use_dali' : "GPU",
}
args = nvutils.parse_cmdline(default_args)
if args['predict']:
nvutils.predict(args)
else:
nvutils.train(trivial, args)
|
DALI-main
|
docs/examples/use_cases/tensorflow/resnet-n/trivial.py
|
import tensorflow as tf
from tensorflow.keras import backend
from tensorflow.keras import initializers
from tensorflow.keras import models
from tensorflow.keras import regularizers
from nvutils import image_processing
layers = tf.keras.layers
L2_WEIGHT_DECAY = 1e-4
BATCH_NORM_DECAY = 0.9
BATCH_NORM_EPSILON = 1e-5
def _gen_l2_regularizer(use_l2_regularizer=True):
return regularizers.l2(L2_WEIGHT_DECAY) if use_l2_regularizer else None
def identity_block(input_tensor,
kernel_size,
filters,
stage,
block,
use_l2_regularizer=True):
"""The identity block is the block that has no conv layer at shortcut.
Args:
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
use_l2_regularizer: whether to use L2 regularizer on Conv layer.
Returns:
Output tensor for the block.
"""
filters1, filters2, filters3 = filters
if backend.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = layers.Conv2D(
filters1, (1, 1),
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name=conv_name_base + '2a')(
input_tensor)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '2a')(
x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(
filters2,
kernel_size,
padding='same',
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name=conv_name_base + '2b')(
x)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '2b')(
x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(
filters3, (1, 1),
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name=conv_name_base + '2c')(
x)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '2c')(
x)
x = layers.add([x, input_tensor])
x = layers.Activation('relu')(x)
return x
def conv_block(input_tensor,
kernel_size,
filters,
stage,
block,
strides=(2, 2),
use_l2_regularizer=True):
"""A block that has a conv layer at shortcut.
Note that from stage 3,
the second conv layer at main path is with strides=(2, 2)
And the shortcut should have strides=(2, 2) as well
Args:
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
strides: Strides for the second conv layer in the block.
use_l2_regularizer: whether to use L2 regularizer on Conv layer.
Returns:
Output tensor for the block.
"""
filters1, filters2, filters3 = filters
if backend.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = layers.Conv2D(
filters1, (1, 1),
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name=conv_name_base + '2a')(
input_tensor)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '2a')(
x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(
filters2,
kernel_size,
strides=strides,
padding='same',
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name=conv_name_base + '2b')(
x)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '2b')(
x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(
filters3, (1, 1),
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name=conv_name_base + '2c')(
x)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '2c')(
x)
shortcut = layers.Conv2D(
filters3, (1, 1),
strides=strides,
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name=conv_name_base + '1')(
input_tensor)
shortcut = layers.BatchNormalization(
axis=bn_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '1')(
shortcut)
x = layers.add([x, shortcut])
x = layers.Activation('relu')(x)
return x
def resnet50(num_classes,
batch_size=None,
use_l2_regularizer=True,
rescale_inputs=False):
"""Instantiates the ResNet50 architecture.
Args:
num_classes: `int` number of classes for image classification.
batch_size: Size of the batches for each step.
use_l2_regularizer: whether to use L2 regularizer on Conv/Dense layer.
rescale_inputs: whether to rescale inputs from 0 to 1.
Returns:
A Keras model instance.
"""
input_shape = (224, 224, 3)
img_input = layers.Input(shape=input_shape, batch_size=batch_size)
if rescale_inputs:
# Hub image modules expect inputs in the range [0, 1]. This rescales these
# inputs to the range expected by the trained model.
x = layers.Lambda(
lambda x: x * 255.0 - backend.constant(
image_processing.CHANNEL_MEANS,
shape=[1, 1, 3],
dtype=x.dtype),
name='rescale')(
img_input)
else:
x = img_input
if backend.image_data_format() == 'channels_first':
x = layers.Lambda(
lambda x: backend.permute_dimensions(x, (0, 3, 1, 2)),
name='transpose')(x)
bn_axis = 1
else: # channels_last
bn_axis = 3
x = layers.ZeroPadding2D(padding=(3, 3), name='conv1_pad')(x)
x = layers.Conv2D(
64, (7, 7),
strides=(2, 2),
padding='valid',
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name='conv1')(
x)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name='bn_conv1')(
x)
x = layers.Activation('relu')(x)
x = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = conv_block(
x,
3, [64, 64, 256],
stage=2,
block='a',
strides=(1, 1),
use_l2_regularizer=use_l2_regularizer)
x = identity_block(
x,
3, [64, 64, 256],
stage=2,
block='b',
use_l2_regularizer=use_l2_regularizer)
x = identity_block(
x,
3, [64, 64, 256],
stage=2,
block='c',
use_l2_regularizer=use_l2_regularizer)
x = conv_block(
x,
3, [128, 128, 512],
stage=3,
block='a',
use_l2_regularizer=use_l2_regularizer)
x = identity_block(
x,
3, [128, 128, 512],
stage=3,
block='b',
use_l2_regularizer=use_l2_regularizer)
x = identity_block(
x,
3, [128, 128, 512],
stage=3,
block='c',
use_l2_regularizer=use_l2_regularizer)
x = identity_block(
x,
3, [128, 128, 512],
stage=3,
block='d',
use_l2_regularizer=use_l2_regularizer)
x = conv_block(
x,
3, [256, 256, 1024],
stage=4,
block='a',
use_l2_regularizer=use_l2_regularizer)
x = identity_block(
x,
3, [256, 256, 1024],
stage=4,
block='b',
use_l2_regularizer=use_l2_regularizer)
x = identity_block(
x,
3, [256, 256, 1024],
stage=4,
block='c',
use_l2_regularizer=use_l2_regularizer)
x = identity_block(
x,
3, [256, 256, 1024],
stage=4,
block='d',
use_l2_regularizer=use_l2_regularizer)
x = identity_block(
x,
3, [256, 256, 1024],
stage=4,
block='e',
use_l2_regularizer=use_l2_regularizer)
x = identity_block(
x,
3, [256, 256, 1024],
stage=4,
block='f',
use_l2_regularizer=use_l2_regularizer)
x = conv_block(
x,
3, [512, 512, 2048],
stage=5,
block='a',
use_l2_regularizer=use_l2_regularizer)
x = identity_block(
x,
3, [512, 512, 2048],
stage=5,
block='b',
use_l2_regularizer=use_l2_regularizer)
x = identity_block(
x,
3, [512, 512, 2048],
stage=5,
block='c',
use_l2_regularizer=use_l2_regularizer)
rm_axes = [1, 2] if backend.image_data_format() == 'channels_last' else [2, 3]
x = layers.Lambda(lambda x: backend.mean(x, rm_axes), name='reduce_mean')(x)
x = layers.Dense(
num_classes,
kernel_initializer=initializers.RandomNormal(stddev=0.01),
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
bias_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name='fc1000')(
x)
# A softmax that is followed by the model loss must be done cannot be done
# in float16 due to numeric issues. So we pass dtype=float32.
x = layers.Activation('softmax', dtype='float32')(x)
# Create model.
return models.Model(img_input, x, name='resnet50')
def trivial(num_classes,
batch_size=None,
use_l2_regularizer=True):
input_shape = (224, 224, 3)
img_input = layers.Input(shape=input_shape, batch_size=batch_size)
x = img_input
if backend.image_data_format() == 'channels_first':
x = layers.Lambda(
lambda x: backend.permute_dimensions(x, (0, 3, 1, 2)),
name='transpose')(x)
bn_axis = 1
else: # channels_last
bn_axis = 3
x = layers.ZeroPadding2D(padding=(3, 3), name='conv1_pad')(x)
x = layers.Conv2D(
64, (7, 7),
strides=(2, 2),
padding='valid',
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name='conv1')(
x)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name='bn_conv1')(
x)
rm_axes = [1, 2] if backend.image_data_format() == 'channels_last' else [2, 3]
x = layers.Lambda(lambda x: backend.mean(x, rm_axes), name='reduce_mean')(x)
x = layers.Dense(
num_classes,
kernel_initializer=initializers.RandomNormal(stddev=0.01),
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
bias_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name='fc1000')(
x)
# A softmax that is followed by the model loss must be done cannot be done
# in float16 due to numeric issues. So we pass dtype=float32.
x = layers.Activation('softmax', dtype='float32')(x)
# Create model.
return models.Model(img_input, x, name='resnet50')
|
DALI-main
|
docs/examples/use_cases/tensorflow/resnet-n/resnet_model.py
|
#!/usr/bin/env python
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import nvutils
from resnet_model import resnet50
nvutils.init()
default_args = {
'image_width' : 224,
'image_height' : 224,
'distort_color' : False,
'momentum' : 0.9,
'loss_scale' : 128.0,
# The following params can be changed by cmdline options.
'image_format' : 'channels_last',
'data_dir' : None,
'data_idx_dir' : None,
'batch_size' : 256,
'num_iter' : 300,
'iter_unit' : 'batch',
'log_dir' : None,
'export_dir' : None,
'tensorboard_dir' : None,
'display_every' : 10,
'precision' : 'fp16',
'dali_mode' : None,
'use_xla': False,
'predict' : False,
'use_dali' : "GPU",
}
args = nvutils.parse_cmdline(default_args)
if args['predict']:
nvutils.predict(args)
else:
nvutils.train(resnet50, args)
|
DALI-main
|
docs/examples/use_cases/tensorflow/resnet-n/resnet.py
|
#!/usr/bin/env python
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import nvutils
from resnet_model import resnet50
nvutils.init()
default_args = {
'image_width' : 224,
'image_height' : 224,
'distort_color' : False,
'momentum' : 0.9,
'loss_scale' : 128.0,
# The following params can be changed by cmdline options.
'image_format' : 'channels_last',
'data_dir' : None,
'data_idx_dir' : None,
'batch_size' : 256,
'num_iter' : 300,
'iter_unit' : 'batch',
'log_dir' : None,
'export_dir' : None,
'tensorboard_dir' : None,
'display_every' : 10,
'precision' : 'fp16',
'dali_mode' : None,
'use_xla': False,
'predict' : False,
'use_dali' : "GPU",
}
args = nvutils.parse_cmdline(default_args)
if args['predict']:
nvutils.predict_ctl(args)
else:
nvutils.train_ctl(resnet50, args)
|
DALI-main
|
docs/examples/use_cases/tensorflow/resnet-n/resnet_ctl.py
|
#!/usr/bin/env python
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from nvutils import image_processing
from nvutils import common
from distutils.version import StrictVersion
import tensorflow as tf
import keras
import os
import time
import re
import horovod.tensorflow.keras as hvd
from keras import backend
print(tf.__version__)
if StrictVersion(tf.__version__) > StrictVersion("2.1.0"):
if StrictVersion(tf.__version__) >= StrictVersion("2.4.0"):
from tensorflow.python.keras.mixed_precision import device_compatibility_check
else:
from tensorflow.python.keras.mixed_precision.experimental import device_compatibility_check
device_compatibility_check._logged_compatibility_check = True
class _ProfileKerasFitCallback(keras.callbacks.Callback):
def __init__(self, batch_size, display_every=10):
self.batch_size = batch_size * hvd.size()
self.log_steps = display_every
self.global_steps = 0
def on_batch_begin(self, batch, logs=None):
self.global_steps += 1
if self.global_steps == 1:
self.start_time = time.time()
def on_batch_end(self, batch, logs=None):
"""Records elapse time of the batch and calculates examples per second."""
if self.global_steps % self.log_steps == 0:
timestamp = time.time()
elapsed_time = timestamp - self.start_time
examples_per_second = (self.batch_size * self.log_steps) / elapsed_time
if hvd.rank() == 0:
print("global_step: %d images_per_sec: %.1f" % (self.global_steps,
examples_per_second))
self.start_time = timestamp
def on_epoch_begin(self, epoch, logs=None):
self.epoch_start = time.time()
def on_epoch_end(self, epoch, logs=None):
epoch_run_time = time.time() - self.epoch_start
if hvd.rank() == 0:
print("epoch: %d time_taken: %.1f" % (epoch, epoch_run_time))
def train(model_func, params):
image_width = params['image_width']
image_height = params['image_height']
image_format = params['image_format']
distort_color = params['distort_color']
momentum = params['momentum']
loss_scale = params['loss_scale']
data_dir = params['data_dir']
data_idx_dir = params['data_idx_dir']
batch_size = params['batch_size']
num_iter = params['num_iter']
iter_unit = params['iter_unit']
log_dir = params['log_dir']
export_dir = params['export_dir']
tensorboard_dir = params['tensorboard_dir']
display_every = params['display_every']
precision = params['precision']
dali_mode = params['dali_mode']
use_xla = params['use_xla']
if data_dir is not None:
file_format = os.path.join(data_dir, '%s-*')
train_files = sorted(tf.io.gfile.glob(file_format % 'train'))
valid_files = sorted(tf.io.gfile.glob(file_format % 'validation'))
num_train_samples = common.get_num_records(train_files)
num_valid_samples = common.get_num_records(valid_files)
else:
num_train_samples = 1281982
num_valid_samples = 5000
train_idx_files = None
valid_idx_files = None
if data_idx_dir is not None:
file_format = os.path.join(data_idx_dir, '%s-*')
train_idx_files = sorted(tf.io.gfile.glob(file_format % 'train'))
valid_idx_files = sorted(tf.io.gfile.glob(file_format % 'validation'))
if iter_unit.lower() == 'epoch':
num_epochs = num_iter
nstep_per_epoch = num_train_samples // (batch_size * hvd.size())
nstep_per_valid = num_valid_samples // (batch_size * hvd.size())
else:
assert iter_unit.lower() == 'batch'
num_epochs = 1
nstep_per_epoch = min(num_iter,
num_train_samples // (batch_size * hvd.size()))
nstep_per_valid = min(10, num_valid_samples // (batch_size * hvd.size()))
initial_epoch = 0
if log_dir:
# We save check points only when using the real data.
assert data_dir, "--data_dir cannot be empty when using --log_dir"
assert os.path.exists(log_dir)
ckpt_format = log_dir +"/model-{epoch:02d}-{val_top1:.2f}.hdf5"
# Looks for the most recent checkpoint and sets the initial epoch from it.
for filename in os.listdir(log_dir):
if filename.startswith('model-'):
initial_epoch = max(int(re.findall(r'\d+', filename)[0]),
initial_epoch)
if tensorboard_dir:
assert os.path.exists(tensorboard_dir)
if export_dir:
assert os.path.exists(export_dir)
save_format = export_dir +"/saved_model_rn50.h5"
if use_xla:
tf.config.optimizer.set_jit(True)
# Horovod: pin GPU to be used to process local rank (one GPU per process)
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
if gpus:
tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU')
if precision == 'fp16':
if StrictVersion(tf.__version__) >= StrictVersion("2.4.0"):
policy = keras.mixed_precision.Policy('mixed_float16')
keras.mixed_precision.set_global_policy(policy)
else:
policy = keras.mixed_precision.experimental.Policy('mixed_float16', loss_scale)
keras.mixed_precision.experimental.set_policy(policy)
lr_schedule = common.create_piecewise_constant_decay_with_warmup(
batch_size=batch_size * hvd.size(),
epoch_size=num_train_samples,
warmup_epochs=common.LR_SCHEDULE[0][1],
boundaries=list(p[1] for p in common.LR_SCHEDULE[1:]),
multipliers=list(p[0] for p in common.LR_SCHEDULE),
compute_lr_on_cpu=True)
opt = keras.optimizers.SGD(learning_rate=lr_schedule, momentum=momentum)
# Horovod: add Horovod DistributedOptimizer. We use a modified version to
# support the custom learning rate schedule.
opt = hvd.DistributedOptimizer(opt)
if StrictVersion(tf.__version__) >= StrictVersion("2.4.0") and precision == 'fp16':
opt = keras.mixed_precision.LossScaleOptimizer(opt, dynamic=False,
initial_scale=loss_scale)
backend.set_image_data_format(image_format)
dtype='float16' if precision == 'fp16' else 'float32'
backend.set_floatx(dtype)
model = model_func(num_classes=image_processing.NUM_CLASSES)
loss_func ='sparse_categorical_crossentropy',
top5 = tf.keras.metrics.SparseTopKCategoricalAccuracy(k=5, name='top5')
top1 = tf.keras.metrics.SparseTopKCategoricalAccuracy(k=1, name='top1')
# Horovod: Specify `experimental_run_tf_function=False` to ensure TensorFlow
# uses hvd.DistributedOptimizer() to compute gradients. However, this option
# will disable the overlapping of the data loading and compute and hurt the
# performace if the model is not under the scope of distribution strategy
# scope.
model.compile(optimizer=opt, loss=loss_func, metrics=[top1, top5],
experimental_run_tf_function=False)
training_hooks = []
training_hooks.append(hvd.callbacks.BroadcastGlobalVariablesCallback(0))
training_hooks.append(_ProfileKerasFitCallback(batch_size, display_every))
if log_dir and hvd.rank() == 0:
ckpt_callback = keras.callbacks.ModelCheckpoint(ckpt_format,
monitor='val_top1', verbose=1, save_best_only=False,
save_weights_only=False, save_frequency=1)
training_hooks.append(ckpt_callback)
if tensorboard_dir and hvd.rank() == 0:
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=tensorboard_dir)
training_hooks.append(tensorboard_callback)
if data_dir is not None:
num_preproc_threads = params['dali_threads'] if dali_mode else 10
train_input = image_processing.image_set(train_files, batch_size,
image_height, image_width, training=True, distort_color=distort_color,
deterministic=False, num_threads=num_preproc_threads,
use_dali=dali_mode, idx_filenames=train_idx_files)
valid_input = image_processing.image_set(valid_files, batch_size,
image_height, image_width, training=False, distort_color=False,
deterministic=False, num_threads=num_preproc_threads,
use_dali=dali_mode, idx_filenames=valid_idx_files)
if dali_mode:
train_input = train_input.get_device_dataset()
valid_input = valid_input.get_device_dataset()
valid_params = {'validation_data': valid_input,
'validation_steps': nstep_per_valid,
'validation_freq': 1}
else:
train_input = image_processing.fake_image_set(batch_size, image_height,
image_width)
valid_params = {}
try:
verbose = 2 if hvd.rank() == 0 else 0
model.fit(train_input, epochs=num_epochs, callbacks=training_hooks,
steps_per_epoch=nstep_per_epoch, verbose=verbose,
initial_epoch=initial_epoch, **valid_params)
except KeyboardInterrupt:
print("Keyboard interrupt")
if export_dir and hvd.rank() == 0:
model.save(save_format)
print(f"The model is saved to {save_format}")
def predict(params):
image_width = params['image_width']
image_height = params['image_height']
batch_size = params['batch_size']
export_dir = params['export_dir']
assert export_dir, "--export_dir must be given."
model_path = export_dir +"/saved_model_rn50.h5"
assert os.path.exists(model_path)
model = keras.models.load_model(model_path, custom_objects={
"PiecewiseConstantDecayWithWarmup":
common.PiecewiseConstantDecayWithWarmup})
predict_input = image_processing.fake_image_set(batch_size, image_height,
image_width, with_label=False)
results = model.predict(predict_input, verbose=1, steps=3)
print(f"The loaded model predicts {results.shape[0]} images.")
|
DALI-main
|
docs/examples/use_cases/tensorflow/resnet-n/nvutils/runner.py
|
#!/usr/bin/env python
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse
import horovod.tensorflow as hvd
def parse_cmdline(init_vals):
f = argparse.ArgumentDefaultsHelpFormatter
p = argparse.ArgumentParser(formatter_class=f)
p.add_argument('--image_format', choices=['channels_last', 'channels_first'],
default=init_vals.get('image_format'),
required=False,
nargs='?', const='GPU',
help="""Set the input format, available values are
[channels_first|channels_last]. Default is channels_last.""")
p.add_argument('--data_dir',
default=init_vals.get('data_dir'),
required=False,
help="""Path to dataset in TFRecord format (aka Example
protobufs). Files should be named 'train-*' and
'validation-*'.""")
p.add_argument('--data_idx_dir',
default=init_vals.get('data_idx_dir'),
required=False,
help="""Path to index files of TFRecord dataset Files should
be named 'train-*.idx' and 'validation-*.idx'.""")
p.add_argument('-b', '--batch_size', type=int,
default=init_vals.get('batch_size'),
required=False,
help="""Size of each minibatch.""")
p.add_argument('-i', '--num_iter', type=int,
default=init_vals.get('num_iter'),
required=False,
help="""Number of batches or epochs to run.""")
p.add_argument('-u', '--iter_unit', choices=['epoch', 'batch'],
default=init_vals.get('iter_unit'),
required=False,
help="""Select whether 'num_iter' is interpreted in terms of
batches or epochs.""")
p.add_argument('--log_dir',
default=init_vals.get('log_dir'),
required=False,
help="""Directory in which to write training summaries and
checkpoints.""")
p.add_argument('--export_dir',
default=init_vals.get('export_dir'),
required=False,
help="""Directory in which to write the saved model.""")
p.add_argument('--tensorboard_dir',
default=init_vals.get('tensorboard_dir'),
required=False,
help="""Directory in which to write tensorboard logs.""")
p.add_argument('--display_every', type=int,
default=init_vals.get('display_every'),
required=False,
help="""How often (in batches) to print out running
information.""")
p.add_argument('--precision', choices=['fp32', 'fp16'],
default=init_vals.get('precision'),
required=False,
help="""Select single or half precision arithmetic.""")
p.add_argument('--dali_mode', choices=['CPU', 'GPU'],
default=init_vals.get('dali_mode'),
required=False,
nargs='?', const='GPU',
help="""Use DALI for input pipeline, available values are
[CPU|GPU] which tell which version of the pipeline run.
Default is GPU""")
p.add_argument('--dali_threads', type=int,
default=4,
required=False,
help="""Number of threads used by DALI.""")
p.add_argument('--use_xla', action='store_true',
help="""Whether to enable xla execution.""")
p.add_argument('--predict', action='store_true',
help="""Whether to conduct prediction""")
FLAGS, unknown_args = p.parse_known_args()
if len(unknown_args) > 0:
for bad_arg in unknown_args:
print("ERROR: Unknown command line arg: %s" % bad_arg)
raise ValueError("Invalid command line arg(s)")
vals = init_vals
vals['image_format'] = FLAGS.image_format
vals['data_dir'] = FLAGS.data_dir
vals['data_idx_dir'] = FLAGS.data_idx_dir
vals['batch_size'] = FLAGS.batch_size
vals['num_iter'] = FLAGS.num_iter
vals['iter_unit'] = FLAGS.iter_unit
vals['log_dir'] = FLAGS.log_dir
vals['export_dir'] = FLAGS.export_dir
vals['tensorboard_dir'] = FLAGS.tensorboard_dir
vals['display_every'] = FLAGS.display_every
vals['precision'] = FLAGS.precision
vals['dali_mode'] = FLAGS.dali_mode
vals['dali_threads'] = FLAGS.dali_threads
vals['use_xla'] = FLAGS.use_xla or vals['use_xla']
vals['predict'] = FLAGS.predict or vals['predict']
if hvd.rank() == 0:
print("Script arguments:")
for flag, val in vals.items():
print(f" --{flag}={val}")
return vals
|
DALI-main
|
docs/examples/use_cases/tensorflow/resnet-n/nvutils/cmdline.py
|
#!/usr/bin/env python
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from .runner import train
from .runner_ctl import train_ctl
from .runner import predict
from .runner_ctl import predict_ctl
from .cmdline import parse_cmdline
import os, sys, random
import tensorflow as tf
import horovod.tensorflow.keras as hvd
def init():
gpu_thread_count = 2
os.environ['TF_GPU_THREAD_MODE'] = 'gpu_private'
os.environ['TF_GPU_THREAD_COUNT'] = str(gpu_thread_count)
os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '1'
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
hvd.init()
if hvd.rank() == 0:
print('PY', sys.version)
print('TF', tf.version.VERSION)
|
DALI-main
|
docs/examples/use_cases/tensorflow/resnet-n/nvutils/__init__.py
|
import tensorflow as tf
BASE_LEARNING_RATE = 0.1
LR_SCHEDULE = [ # (multiplier, epoch to start) tuples
(1.0, 5), (0.1, 30), (0.01, 60), (0.001, 80)
]
def create_piecewise_constant_decay_with_warmup(batch_size, epoch_size,
warmup_epochs, boundaries, multipliers, compute_lr_on_cpu=True, name=None):
if len(boundaries) != len(multipliers) - 1:
raise ValueError('The length of boundaries must be 1 less than the '
'length of multipliers')
base_lr_batch_size = 256
steps_per_epoch = epoch_size // batch_size
rescaled_lr = BASE_LEARNING_RATE * batch_size / base_lr_batch_size
step_boundaries = [float(steps_per_epoch) * x for x in boundaries]
lr_values = [rescaled_lr * m for m in multipliers]
warmup_steps = warmup_epochs * steps_per_epoch
compute_lr_on_cpu = compute_lr_on_cpu
name = name
return PiecewiseConstantDecayWithWarmup(rescaled_lr, step_boundaries,
lr_values, warmup_steps,
compute_lr_on_cpu, name)
@tf.keras.utils.register_keras_serializable(package='Custom')
class PiecewiseConstantDecayWithWarmup(
tf.keras.optimizers.schedules.LearningRateSchedule):
"""Piecewise constant decay with warmup schedule."""
def __init__(self, rescaled_lr, step_boundaries, lr_values, warmup_steps,
compute_lr_on_cpu, name):
super(PiecewiseConstantDecayWithWarmup, self).__init__()
self.rescaled_lr = rescaled_lr
self.step_boundaries = step_boundaries
self.lr_values = lr_values
self.warmup_steps = warmup_steps
self.compute_lr_on_cpu = compute_lr_on_cpu
self.name = name
self.learning_rate_ops_cache = {}
def __call__(self, step):
if tf.executing_eagerly():
return self._get_learning_rate(step)
# In an eager function or graph, the current implementation of optimizer
# repeatedly call and thus create ops for the learning rate schedule. To
# avoid this, we cache the ops if not executing eagerly.
graph = tf.compat.v1.get_default_graph()
if graph not in self.learning_rate_ops_cache:
if self.compute_lr_on_cpu:
with tf.device('/device:CPU:0'):
self.learning_rate_ops_cache[graph] = self._get_learning_rate(step)
else:
self.learning_rate_ops_cache[graph] = self._get_learning_rate(step)
return self.learning_rate_ops_cache[graph]
def _get_learning_rate(self, step):
"""Compute learning rate at given step."""
with tf.compat.v1.name_scope(self.name, 'PiecewiseConstantDecayWithWarmup',
[self.rescaled_lr, self.step_boundaries,
self.lr_values, self.warmup_steps,
self.compute_lr_on_cpu]):
def warmup_lr(step):
return self.rescaled_lr * (
tf.cast(step, tf.float32) / tf.cast(self.warmup_steps, tf.float32))
def piecewise_lr(step):
return tf.compat.v1.train.piecewise_constant(
step, self.step_boundaries, self.lr_values)
return tf.cond(step < self.warmup_steps,
lambda: warmup_lr(step),
lambda: piecewise_lr(step))
def get_config(self):
return {
'rescaled_lr': self.rescaled_lr,
'step_boundaries': self.step_boundaries,
'lr_values': self.lr_values,
'warmup_steps': self.warmup_steps,
'compute_lr_on_cpu': self.compute_lr_on_cpu,
'name': self.name
}
def get_num_records(filenames):
def count_records(tf_record_filename):
count = 0
for _ in tf.compat.v1.python_io.tf_record_iterator(tf_record_filename):
count += 1
return count
nfile = len(filenames)
return (count_records(filenames[0])*(nfile-1) +
count_records(filenames[-1]))
|
DALI-main
|
docs/examples/use_cases/tensorflow/resnet-n/nvutils/common.py
|
# This is a patch for Horovod 0.21.3 to work with our custom learning schedule
# used in CNN resnet50 scripts.
from tensorflow import keras
from horovod.tensorflow import Compression
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
import horovod.tensorflow as hvd
import tensorflow as tf
from nvutils import common
from distutils.version import LooseVersion
from horovod.tensorflow import Average, Compression, Sum
_PRE_TF_2_4_0 = LooseVersion(tf.__version__) < LooseVersion('2.4.0')
def create_distributed_optimizer(
keras, optimizer, name, device_dense, device_sparse, compression,
sparse_as_dense, gradient_predivide_factor, op,
backward_passes_per_step=1, average_aggregated_gradients=False,
groups=None):
class _DistributedOptimizer(keras.optimizers.Optimizer):
_HAS_AGGREGATE_GRAD = True
def __init__(self, **kwargs):
self._name = name or "Distributed%s" % self.__class__.__base__.__name__
self._aggregated_gradients = False
self._allreduce_grads = hvd._make_allreduce_grads_fn(
self._name,
device_dense,
device_sparse,
compression,
sparse_as_dense,
op,
gradient_predivide_factor,
groups)
self._agg_helper = None
if backward_passes_per_step > 1:
if hvd._executing_eagerly():
self._agg_helper = LocalGradientAggregationHelperEager(
backward_passes_per_step=backward_passes_per_step,
allreduce_func=self._allreduce_grads,
sparse_as_dense=sparse_as_dense,
average_aggregated_gradients=average_aggregated_gradients,
)
else:
self._agg_helper = LocalGradientAggregationHelper(
backward_passes_per_step=backward_passes_per_step,
allreduce_func=self._allreduce_grads,
sparse_as_dense=sparse_as_dense,
average_aggregated_gradients=average_aggregated_gradients,
rank=rank(),
optimizer_type=(
LocalGradientAggregationHelper._OPTIMIZER_TYPE_KERAS),
)
super(self.__class__, self).__init__(**kwargs)
def _compute_gradients(self, loss, var_list, grad_loss=None, tape=None):
"""
Compute gradients of all trainable variables.
See Optimizer.get_gradients() for more info.
In DistributedOptimizer, get_gradients() is overriden to also
allreduce the gradients before returning them.
"""
if _PRE_TF_2_4_0:
return super(self.__class__, self)._compute_gradients(
loss, var_list, grad_loss, tape)
tape = backprop.GradientTape() if tape is None else tape
grads_and_vars = super(self.__class__, self)._compute_gradients(
# pylint: disable=protected-access
loss,
var_list,
grad_loss,
tape=tape)
grads, weights = list(zip(*grads_and_vars))
allreduced_grads = self._allreduce(grads, weights)
return list(zip(allreduced_grads, weights))
def get_gradients(self, loss, params):
"""
Compute gradients of all trainable variables.
See Optimizer.get_gradients() for more info.
In DistributedOptimizer, get_gradients() is overriden to also
allreduce the gradients before returning them.
"""
gradients = super(self.__class__, self).get_gradients(loss, params)
return self._allreduce(gradients, params)
def _aggregate_gradients(self, grads_and_vars):
if _PRE_TF_2_4_0:
grads, vars = list(zip(*grads_and_vars))
aggregated_grads = self._allreduce(grads, vars)
return aggregated_grads
else:
return super(self.__class__, self)._aggregate_gradients(
grads_and_vars)
def _allreduce(self, grads, vars):
self._aggregated_gradients = True
if self._agg_helper:
return self._agg_helper.compute_gradients(tuple(grads), tuple(vars))
else:
return self._allreduce_grads(grads, vars)
def apply_gradients(self, *args, **kwargs):
if self._agg_helper:
if isinstance(args[0], zip):
# If grad_and_vars are passed in as a zip object
# convert to a list. This is necessary for TF2.4+
# b/c args[0] is used in both conditional branches
# inside _agg_helper.apply_gradients().
args = list(args)
args[0] = list(args[0])
args = tuple(args)
results = self._agg_helper.apply_gradients(
lambda: super(self.__class__, self).apply_gradients(*args, **kwargs),
self,
*args,
**kwargs,
)
else:
results = super(self.__class__, self).apply_gradients(*args, **kwargs)
if _PRE_TF_2_4_0 and not self._aggregated_gradients:
raise Exception('`apply_gradients()` was called without a call to '
'`get_gradients()` or `_aggregate_gradients`. If '
'you\'re using TensorFlow 2.0, please specify '
'`experimental_run_tf_function=False` in `compile()`.')
return results
# We dynamically create a new class that inherits from the optimizer that was
# passed in. The goal is to override get_gradients() method with an allreduce
# implementation. This class will have the same name as the optimizer it's
# wrapping, so that the saved model could be easily restored without Horovod.
cls = type(optimizer.__class__.__name__, (optimizer.__class__,),
dict(_DistributedOptimizer.__dict__))
# This is the patch to allow the hovorod DistributedOptimizer recognize the
# custom learning rate schedule we have used in CNN resnet50 scripts.
config = optimizer.get_config()
config['learning_rate'] = \
common.PiecewiseConstantDecayWithWarmup.from_config(
config['learning_rate']['config'])
return cls.from_config(config)
def DistributedOptimizer(optimizer, name=None,
device_dense='', device_sparse='',
compression=Compression.none,
sparse_as_dense=False,
gradient_predivide_factor=1.0,
op=Average,
backward_passes_per_step=1,
average_aggregated_gradients=False):
"""
An optimizer that wraps another keras.optimizers.Optimizer, using an allreduce
to average gradient values before applying gradients to model weights.
Args:
optimizer: Optimizer to use for computing gradients and applying updates.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Distributed" followed by the provided
optimizer type.
device_dense: Device to be used for dense tensors. Uses GPU by default
if Horovod was build with HOROVOD_GPU_OPERATIONS.
device_sparse: Device to be used for sparse tensors. Uses GPU by default
if Horovod was build with HOROVOD_GPU_OPERATIONS.
compression: Compression algorithm used to reduce the amount of data
sent and received by each worker node. Defaults to not
using compression.
sparse_as_dense: Treat all sparse gradients as dense tensors. This can
help improve performance and memory utilization if
the original sparse gradient has high density.
Defaults to false.
gradient_predivide_factor: gradient_predivide_factor splits the averaging
before and after the sum. Gradients are scaled
by 1.0 / gradient_predivide_factor before the
sum and gradient_predivide_factor / size after
the sum.
op: The reduction operation to use when combining gradients across
different ranks. Defaults to Average.
backward_passes_per_step: Number of backward passes to perform before
calling hvd.allreduce. This allows accumulating
updates over multiple mini-batches before
reducing and applying them.
average_aggregated_gradients: Whether to average the aggregated gradients
that have been accumulated over multiple
mini-batches. If true divides gradient
updates by backward_passes_per_step.
Only applicable for backward_passes_per_step
> 1.
"""
if gradient_predivide_factor != 1.0 and rocm_built():
raise ValueError('gradient_predivide_factor not supported yet with ROCm')
if op != Average and op != Sum:
raise ValueError('op currently only supports Average and Sum')
return create_distributed_optimizer(
keras=keras,
optimizer=optimizer,
name=name,
device_dense=device_dense,
device_sparse=device_sparse,
compression=compression,
sparse_as_dense=sparse_as_dense,
gradient_predivide_factor=gradient_predivide_factor,
op=op,
backward_passes_per_step=backward_passes_per_step,
average_aggregated_gradients=average_aggregated_gradients,
)
|
DALI-main
|
docs/examples/use_cases/tensorflow/resnet-n/nvutils/hvd_patch.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
import sys
import os
import numpy as np
from subprocess import call
import horovod.tensorflow.keras as hvd
from nvidia.dali.pipeline import pipeline_def
import nvidia.dali.fn as fn
import nvidia.dali.types as types
import nvidia.dali.tfrecord as tfrec
try:
import nvidia.dali.plugin.tf as dali_tf
except:
pass
NUM_CLASSES = 1000
_R_MEAN = 123.68
_G_MEAN = 116.78
_B_MEAN = 103.94
CHANNEL_MEANS = [_R_MEAN, _G_MEAN, _B_MEAN]
def _deserialize_image_record(record):
feature_map = {
'image/encoded': tf.io.FixedLenFeature([ ], tf.string, ''),
'image/class/label': tf.io.FixedLenFeature([1], tf.int64, -1),
'image/class/text': tf.io.FixedLenFeature([ ], tf.string, ''),
'image/object/bbox/xmin': tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin': tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax': tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax': tf.io.VarLenFeature(dtype=tf.float32)
}
with tf.name_scope('deserialize_image_record'):
obj = tf.io.parse_single_example(record, feature_map)
imgdata = obj['image/encoded']
label = tf.cast(obj['image/class/label'], tf.int32)
bbox = tf.stack([obj['image/object/bbox/%s'%x].values
for x in ['ymin', 'xmin', 'ymax', 'xmax']])
bbox = tf.transpose(tf.expand_dims(bbox, 0), [0,2,1])
text = obj['image/class/text']
return imgdata, label, bbox, text
def _decode_jpeg(imgdata, channels=3):
return tf.image.decode_jpeg(imgdata, channels=channels,
fancy_upscaling=False,
dct_method='INTEGER_FAST')
def _crop_and_resize_image(image, original_bbox, height, width, deterministic=False, random_crop=False):
with tf.name_scope('random_crop_and_resize'):
eval_crop_ratio = 0.8
if random_crop:
bbox_begin, bbox_size, bbox = \
tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=tf.zeros(shape=[1,0,4]), # No bounding boxes
min_object_covered=0.1,
aspect_ratio_range=[0.8, 1.25],
area_range=[0.1, 1.0],
max_attempts=100,
seed=7 * (1+hvd.rank()) if deterministic else 0,
use_image_if_no_bounding_boxes=True)
image = tf.slice(image, bbox_begin, bbox_size)
else:
# Central crop
image = tf.image.central_crop(image, eval_crop_ratio)
image = tf.compat.v1.image.resize_images(
image,
[height, width],
tf.image.ResizeMethod.BILINEAR,
align_corners=False)
image.set_shape([height, width, 3])
return image
def _distort_image_color(image, order=0):
with tf.name_scope('distort_color'):
image = tf.math.multiply(image, 1. / 255.)
brightness = lambda img: tf.image.random_brightness(img, max_delta=32. / 255.)
saturation = lambda img: tf.image.random_saturation(img, lower=0.5, upper=1.5)
hue = lambda img: tf.image.random_hue(img, max_delta=0.2)
contrast = lambda img: tf.image.random_contrast(img, lower=0.5, upper=1.5)
if order == 0: ops = [brightness, saturation, hue, contrast]
else: ops = [brightness, contrast, saturation, hue]
for op in ops:
image = op(image)
# The random_* ops do not necessarily clamp the output range
image = tf.clip_by_value(image, 0.0, 1.0)
# Restore the original scaling
image = tf.multiply(image, 255.)
return image
def _parse_and_preprocess_image_record(record, height, width,
deterministic=False, random_crop=False,
distort_color=False):
imgdata, label, bbox, text = _deserialize_image_record(record)
label -= 1 # Change to 0-based (don't use background class)
with tf.name_scope('preprocess_train'):
try: image = _decode_jpeg(imgdata, channels=3)
except: image = tf.image.decode_png(imgdata, channels=3)
image = _crop_and_resize_image(image, bbox, height, width, deterministic, random_crop)
# image comes out of crop as float32, which is what distort_color expects
if distort_color:
image = _distort_image_color(image)
image = tf.cast(image, tf.float32)
if random_crop:
image = tf.image.random_flip_left_right(image,
seed=11 * (1 + hvd.rank()) if deterministic else None)
return image, label
# Synthetic images are generated once, and the same batch is repeated again and
# again. The H2D copy is also repeated.
def fake_image_set(batch_size, height, width, with_label=True):
data_shape = [batch_size, height, width, 3] # 3 channels
images = tf.random.truncated_normal(
data_shape, dtype=tf.float32, mean=112, stddev=70,
name='fake_images')
images = tf.clip_by_value(images, 0.0, 255.0)
images = tf.cast(images, tf.float32)
if with_label:
labels = tf.random.uniform(
[batch_size], minval=0, maxval=1000-1, dtype=tf.int32,
name='fake_labels')
ds = tf.data.Dataset.from_tensor_slices(([images], [labels]))
else:
ds = tf.data.Dataset.from_tensor_slices(([images]))
ds = ds.repeat()
return ds
@pipeline_def
def get_dali_pipeline(
tfrec_filenames,
tfrec_idx_filenames,
height, width,
shard_id,
num_gpus,
dali_cpu=True,
training=True):
inputs = fn.readers.tfrecord(
path=tfrec_filenames,
index_path=tfrec_idx_filenames,
random_shuffle=training,
shard_id=shard_id,
num_shards=num_gpus,
initial_fill=10000,
features={
'image/encoded': tfrec.FixedLenFeature((), tfrec.string, ""),
'image/class/label': tfrec.FixedLenFeature([1], tfrec.int64, -1),
'image/class/text': tfrec.FixedLenFeature([ ], tfrec.string, ''),
'image/object/bbox/xmin': tfrec.VarLenFeature(tfrec.float32, 0.0),
'image/object/bbox/ymin': tfrec.VarLenFeature(tfrec.float32, 0.0),
'image/object/bbox/xmax': tfrec.VarLenFeature(tfrec.float32, 0.0),
'image/object/bbox/ymax': tfrec.VarLenFeature(tfrec.float32, 0.0)})
decode_device = "cpu" if dali_cpu else "mixed"
resize_device = "cpu" if dali_cpu else "gpu"
if training:
images = fn.decoders.image_random_crop(
inputs["image/encoded"],
device=decode_device,
output_type=types.RGB,
random_aspect_ratio=[0.75, 1.25],
random_area=[0.05, 1.0],
num_attempts=100,
# ask HW NVJPEG to allocate memory ahead for the biggest image in the data set to avoid reallocations in runtime
preallocate_width_hint=5980 if decode_device == 'mixed' else 0,
preallocate_height_hint=6430 if decode_device == 'mixed' else 0)
images = fn.resize(images, device=resize_device, resize_x=width, resize_y=height)
else:
images = fn.decoders.image(
inputs["image/encoded"],
device=decode_device,
output_type=types.RGB)
# Make sure that every image > 224 for CropMirrorNormalize
images = fn.resize(images, device=resize_device, resize_shorter=256)
images = fn.crop_mirror_normalize(
images.gpu(),
dtype=types.FLOAT,
crop=(height, width),
mean=[123.68, 116.78, 103.94],
std=[58.4, 57.12, 57.3],
output_layout="HWC",
mirror = fn.random.coin_flip())
labels = inputs["image/class/label"].gpu()
labels -= 1 # Change to 0-based (don't use background class)
return images, labels
class DALIPreprocessor(object):
def __init__(self,
filenames,
idx_filenames,
height, width,
batch_size,
num_threads,
dtype=tf.uint8,
dali_cpu=True,
deterministic=False,
training=False):
device_id = hvd.local_rank()
shard_id = hvd.rank()
num_gpus = hvd.size()
self.pipe = get_dali_pipeline(
tfrec_filenames=filenames,
tfrec_idx_filenames=idx_filenames,
height=height,
width=width,
batch_size=batch_size,
num_threads=num_threads,
device_id=device_id,
shard_id=shard_id,
num_gpus=num_gpus,
dali_cpu=dali_cpu,
training=training,
seed=7 * (1 + hvd.rank()) if deterministic else None)
self.daliop = dali_tf.DALIIterator()
self.batch_size = batch_size
self.height = height
self.width = width
self.device_id = device_id
self.dalidataset = dali_tf.DALIDataset(
pipeline=self.pipe,
output_shapes=((batch_size, height, width, 3), (batch_size)),
batch_size=batch_size,
output_dtypes=(tf.float32, tf.int64),
device_id=device_id)
def get_device_minibatches(self):
with tf.device("/gpu:0"):
images, labels = self.daliop(
pipeline=self.pipe,
shapes=[(self.batch_size, self.height, self.width, 3), ()],
dtypes=[tf.float32, tf.int64],
device_id=self.device_id)
return images, labels
def get_device_dataset(self):
return self.dalidataset
def image_set(filenames, batch_size, height, width, training=False,
distort_color=False, num_threads=10, nsummary=10,
deterministic=False, use_dali=None, idx_filenames=None):
if use_dali:
if idx_filenames is None:
raise ValueError("Must provide idx_filenames if Dali is enabled")
preprocessor = DALIPreprocessor(
filenames,
idx_filenames,
height, width,
batch_size,
num_threads,
dali_cpu=True if use_dali == 'CPU' else False,
deterministic=deterministic, training=training)
return preprocessor
else:
shuffle_buffer_size = 10000
num_readers = 10
ds = tf.data.Dataset.from_tensor_slices(filenames)
# AUTOTUNE can give better perf for non-horovod cases
thread_config = num_threads
# shard should be before any randomizing operations
if training:
ds = ds.shard(hvd.size(), hvd.rank())
# read up to num_readers files and interleave their records
ds = ds.interleave(
tf.data.TFRecordDataset, cycle_length=num_readers)
if training:
# Improve training performance when training data is in remote storage and
# can fit into worker memory.
ds = ds.cache()
if training:
# shuffle data before repeating to respect epoch boundaries
ds = ds.shuffle(shuffle_buffer_size)
ds = ds.repeat()
preproc_func = (lambda record:
_parse_and_preprocess_image_record(record, height, width,
deterministic=deterministic, random_crop=training,
distort_color=distort_color))
ds = ds.map(preproc_func,
num_parallel_calls=thread_config)
ds = ds.batch(batch_size, drop_remainder=True)
# prefetching
ds = ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
options = tf.data.Options()
options.experimental_slack = True
ds = ds.with_options(options)
return ds
|
DALI-main
|
docs/examples/use_cases/tensorflow/resnet-n/nvutils/image_processing.py
|
#!/usr/bin/env python
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from builtins import range
from nvutils import image_processing
from nvutils import common
from distutils.version import StrictVersion
import tensorflow as tf
import keras
import os
import time
import re
from keras import backend
print(tf.__version__)
if StrictVersion(tf.__version__) > StrictVersion("2.1.0"):
if StrictVersion(tf.__version__) >= StrictVersion("2.4.0"):
from tensorflow.python.keras.mixed_precision import device_compatibility_check
else:
from tensorflow.python.keras.mixed_precision.experimental import device_compatibility_check
device_compatibility_check._logged_compatibility_check = True
import horovod.tensorflow as hvd
def train_ctl(model_func, params):
image_width = params['image_width']
image_height = params['image_height']
image_format = params['image_format']
distort_color = params['distort_color']
momentum = params['momentum']
loss_scale = params['loss_scale']
data_dir = params['data_dir']
data_idx_dir = params['data_idx_dir']
batch_size = params['batch_size']
num_iter = params['num_iter']
iter_unit = params['iter_unit']
log_dir = params['log_dir']
export_dir = params['export_dir']
tensorboard_dir = params['tensorboard_dir']
display_every = params['display_every']
precision = params['precision']
dali_mode = params['dali_mode']
use_xla = params['use_xla']
if data_dir is not None:
file_format = os.path.join(data_dir, '%s-*')
train_files = sorted(tf.io.gfile.glob(file_format % 'train'))
valid_files = sorted(tf.io.gfile.glob(file_format % 'validation'))
num_train_samples = common.get_num_records(train_files)
num_valid_samples = common.get_num_records(valid_files)
else:
num_train_samples = 1281982
num_valid_samples = 5000
train_idx_files = None
valid_idx_files = None
if data_idx_dir is not None:
file_format = os.path.join(data_idx_dir, '%s-*')
train_idx_files = sorted(tf.io.gfile.glob(file_format % 'train'))
valid_idx_files = sorted(tf.io.gfile.glob(file_format % 'validation'))
if iter_unit.lower() == 'epoch':
num_epochs = num_iter
nstep_per_epoch = num_train_samples // (batch_size * hvd.size())
nstep_per_valid = num_valid_samples // (batch_size * hvd.size())
else:
assert iter_unit.lower() == 'batch'
num_epochs = 1
nstep_per_epoch = min(num_iter,
num_train_samples // (batch_size * hvd.size()))
nstep_per_valid = min(10, num_valid_samples // (batch_size * hvd.size()))
if export_dir:
assert os.path.exists(export_dir)
save_format = export_dir +"/saved_model_rn50.h5"
if use_xla:
tf.config.optimizer.set_jit(True)
# Horovod: pin GPU to be used to process local rank (one GPU per process)
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
if gpus:
tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU')
if tensorboard_dir and hvd.rank() == 0:
assert os.path.exists(tensorboard_dir)
summary_writer = tf.summary.create_file_writer(tensorboard_dir)
else:
summary_writer = None
if precision == 'fp16':
if StrictVersion(tf.__version__) >= StrictVersion("2.4.0"):
policy = keras.mixed_precision.Policy('mixed_float16')
keras.mixed_precision.set_global_policy(policy)
else:
policy = keras.mixed_precision.experimental.Policy('mixed_float16', loss_scale)
keras.mixed_precision.experimental.set_policy(policy)
lr_schedule = common.create_piecewise_constant_decay_with_warmup(
batch_size=batch_size * hvd.size(),
epoch_size=num_train_samples,
warmup_epochs=common.LR_SCHEDULE[0][1],
boundaries=list(p[1] for p in common.LR_SCHEDULE[1:]),
multipliers=list(p[0] for p in common.LR_SCHEDULE),
compute_lr_on_cpu=True)
opt = keras.optimizers.SGD(learning_rate=lr_schedule, momentum=momentum)
backend.set_image_data_format(image_format)
dtype='float16' if precision == 'fp16' else 'float32'
backend.set_floatx(dtype)
model = model_func(num_classes=image_processing.NUM_CLASSES,
batch_size=batch_size)
loss_func = keras.losses.SparseCategoricalCrossentropy()
train_top1 = tf.keras.metrics.SparseTopKCategoricalAccuracy(k=1,
name='train_top1')
train_top5 = tf.keras.metrics.SparseTopKCategoricalAccuracy(k=5,
name='train_top5')
val_loss = tf.keras.metrics.Mean(name='val_loss', dtype=tf.float32)
val_top1 = tf.keras.metrics.SparseTopKCategoricalAccuracy(k=1,
name='val_top1')
val_top5 = tf.keras.metrics.SparseTopKCategoricalAccuracy(k=5,
name='val_top5')
if log_dir:
# We save check points only when using the real data.
assert data_dir, "--data_dir cannot be empty when using --log_dir"
assert os.path.exists(log_dir)
ckpt = tf.train.Checkpoint(epoch=tf.Variable(0), optimizer=opt, net=model)
manager = tf.train.CheckpointManager(ckpt, log_dir, max_to_keep=3,
checkpoint_name="model-ckpt")
@tf.function
def train_step(inputs, first_batch):
images, labels = inputs
with tf.GradientTape() as tape:
predictions = model(images, training=True)
loss = loss_func(labels, predictions)
loss += tf.reduce_sum(model.losses)
loss_copy = loss
# Scale the losses
if precision == 'fp16':
loss = loss * tf.cast(loss_scale, loss.dtype)
tape = hvd.DistributedGradientTape(tape)
old_grads = tape.gradient(loss, model.trainable_variables)
# Unscale the grads
if precision == 'fp16':
loss_scale_reciprocal = 1. / loss_scale
grads = [g * tf.cast(loss_scale_reciprocal, g.dtype) if g is not
None else None for g in old_grads]
else:
grads = old_grads
opt.apply_gradients(zip(grads, model.trainable_variables))
train_top1.update_state(labels, predictions)
train_top5.update_state(labels, predictions)
if hvd.size() > 1 and first_batch:
hvd.broadcast_variables(model.variables, root_rank=0)
hvd.broadcast_variables(opt.variables(), root_rank=0)
return loss_copy
@tf.function
def valid_step(inputs):
images, labels = inputs
predictions = model(images, training=False)
loss = loss_func(labels, predictions)
val_loss.update_state(loss)
val_top1.update_state(labels, predictions)
val_top5.update_state(labels, predictions)
if data_dir is not None:
num_preproc_threads = 4 if dali_mode else 10
train_input = image_processing.image_set(train_files, batch_size,
image_height, image_width, training=True, distort_color=distort_color,
deterministic=False, num_threads=num_preproc_threads,
use_dali=dali_mode, idx_filenames=train_idx_files)
valid_input = image_processing.image_set(valid_files, batch_size,
image_height, image_width, training=False, distort_color=False,
deterministic=False, num_threads=num_preproc_threads,
use_dali=dali_mode, idx_filenames=valid_idx_files)
else:
if dali_mode:
raise ValueError("Must provide --data_dir if Dali is enabled")
else:
train_input = image_processing.fake_image_set(batch_size, image_height,
image_width)
global_steps = 0
log_steps = display_every
try:
initial_epoch = 0
if log_dir:
ckpt.restore(manager.latest_checkpoint)
if manager.latest_checkpoint:
if hvd.rank() == 0:
print("Restored from {}".format(manager.latest_checkpoint))
initial_epoch = max(
int(re.findall(r'\d+', manager.latest_checkpoint)[0]),
initial_epoch)
else:
if hvd.rank() == 0:
print("Initializing from scratch.")
# Training Loop
for epoch in range(num_epochs):
if epoch < initial_epoch:
continue
# on_epoch_begin
epoch_start = time.time()
total_loss = 0.0
num_batches = 0
train_top1.reset_states()
train_top5.reset_states()
if not dali_mode:
train_iter = iter(train_input)
for _ in range(nstep_per_epoch):
# on_batch_begin
global_steps += 1
if global_steps == 1:
start_time = time.time()
if global_steps == 1 and hvd.rank() == 0 and summary_writer:
tf.summary.trace_on(graph=True, profiler=True)
if not dali_mode:
x = next(train_iter)
else:
x = train_input.get_device_minibatches()
total_loss += train_step(x, global_steps == 1)
if global_steps == 1 and hvd.rank() == 0 and summary_writer:
with summary_writer.as_default():
tf.summary.trace_export(name="train_step", step=0,
profiler_outdir=tensorboard_dir)
# on_batch_end
if global_steps % log_steps == 0:
timestamp = time.time()
elapsed_time = timestamp - start_time
examples_per_second = \
(batch_size * hvd.size() * log_steps) / elapsed_time
if hvd.rank() == 0:
print("global_step: %d images_per_sec: %.1f" % (global_steps,
examples_per_second))
start_time = timestamp
num_batches += 1
train_loss = total_loss / num_batches
# on_epoch_end
epoch_run_time = time.time() - epoch_start
if hvd.rank() == 0:
print("epoch: %d time_taken: %.1f" % (epoch, epoch_run_time))
if data_dir is not None:
val_loss.reset_states()
val_top1.reset_states()
val_top5.reset_states()
if not dali_mode:
test_iter = iter(valid_input)
for _ in range(nstep_per_valid):
if not dali_mode:
x = next(test_iter)
else:
x = valid_input.get_device_minibatches()
valid_step(x)
if log_dir:
ckpt.epoch.assign_add(1)
if hvd.rank() == 0:
save_path = manager.save()
print("Saved checkpoint for epoch {}: {}".format(int(ckpt.epoch),
save_path))
if hvd.rank() == 0:
output_str = ("loss: {} - top1: {} - top5: {} - val_loss: {} - "
"val_top1: {} - val_top5: {}")
print(output_str.format(train_loss, train_top1.result(),
train_top5.result(), val_loss.result(),
val_top1.result(), val_top5.result()))
if hvd.rank() == 0 and summary_writer:
with summary_writer.as_default():
tf.summary.scalar('train_loss', train_loss, global_steps)
tf.summary.scalar('train_top1', train_top1.result(), global_steps)
tf.summary.scalar('train_top5', train_top5.result(), global_steps)
tf.summary.scalar('val_loss', val_loss.result(), global_steps)
tf.summary.scalar('val_top1', val_top1.result(), global_steps)
tf.summary.scalar('val_top5', val_top5.result(), global_steps)
if hvd.rank() == 0 and summary_writer:
summary_writer.close()
except KeyboardInterrupt:
print("Keyboard interrupt")
if export_dir and hvd.rank() == 0:
model.save(save_format)
print(f"The model is saved to {save_format}")
def predict_ctl(params):
image_width = params['image_width']
image_height = params['image_height']
batch_size = params['batch_size']
export_dir = params['export_dir']
assert export_dir, "--export_dir must be given."
model_path = export_dir +"/saved_model_rn50.h5"
assert os.path.exists(model_path)
model = keras.models.load_model(model_path, custom_objects={
"PiecewiseConstantDecayWithWarmup":
common.PiecewiseConstantDecayWithWarmup})
predict_input = image_processing.fake_image_set(batch_size, image_height,
image_width, with_label=False)
results = model.predict(predict_input, verbose=1, steps=3)
print(f"The loaded model predicts {results.shape[0]} images.")
|
DALI-main
|
docs/examples/use_cases/tensorflow/resnet-n/nvutils/runner_ctl.py
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common utils."""
from typing import Text, Tuple, Union
import tensorflow as tf
import argparse
import tensorflow.compat.v1 as tf1
from enum import Enum
from collections import namedtuple
from tensorflow.python.eager import (
tape as tape_lib,
) # pylint:disable=g-direct-tensorflow-import
from tensorflow.python.tpu import (
tpu_function,
) # pylint:disable=g-direct-tensorflow-import
# pylint: disable=logging-format-interpolation
class PipelineType(Enum):
synthetic = 0
tensorflow = 1
dali_cpu = 2
dali_gpu = 3
class InputType(Enum):
tfrecord = 0
coco = 1
# argparse multiline argument help according to: https://stackoverflow.com/a/22157136
class SmartFormatter(argparse.HelpFormatter):
def _split_lines(self, text, width):
if text.startswith("R|"):
return text[2:].splitlines()
# this is the RawTextHelpFormatter._split_lines
return argparse.HelpFormatter._split_lines(self, text, width)
def dict_to_namedtuple(dict_instance):
NamedTuple = namedtuple("NamedTuple", dict_instance.keys())
return NamedTuple._make(dict_instance.values())
def setup_gpus():
for gpu_instance in tf.config.list_physical_devices("GPU"):
tf.config.experimental.set_memory_growth(gpu_instance, True)
tf.config.set_soft_device_placement(True)
def get_dataset(args, total_batch_size, is_training, params, strategy=None):
pipeline = args.pipeline_type
if strategy and not is_training and pipeline == PipelineType.dali_gpu:
strategy = None
pipeline = PipelineType.dali_cpu
if pipeline in [PipelineType.tensorflow, PipelineType.synthetic]:
from pipeline.tf.dataloader import InputReader
if args.input_type != InputType.tfrecord:
raise ValueError(
"tensorflow and syntax pipelines are only compatible with tfrecord input type :<"
)
if is_training:
file_pattern = args.train_file_pattern
else:
file_pattern = args.eval_file_pattern or args.train_file_pattern
dataset = InputReader(
params,
file_pattern,
is_training=is_training,
use_fake_data=(pipeline == PipelineType.synthetic),
).get_dataset(total_batch_size)
elif strategy:
from pipeline.dali.efficientdet_pipeline import EfficientDetPipeline
if pipeline == PipelineType.dali_cpu:
raise ValueError(
"dali_cpu pipeline is not compatible with multi_gpu mode :<"
)
def dali_dataset_fn(input_context):
with tf.device(f"/gpu:{input_context.input_pipeline_id}"):
device_id = input_context.input_pipeline_id
num_shards = input_context.num_input_pipelines
return EfficientDetPipeline(
params,
int(total_batch_size / num_shards),
args,
is_training=is_training,
num_shards=num_shards,
device_id=device_id,
).get_dataset()
input_options = tf.distribute.InputOptions(
experimental_place_dataset_on_device=True,
experimental_fetch_to_device=False,
experimental_replication_mode=tf.distribute.InputReplicationMode.PER_REPLICA,
)
dataset = strategy.distribute_datasets_from_function(
dali_dataset_fn, input_options
)
else:
from pipeline.dali.efficientdet_pipeline import EfficientDetPipeline
cpu_only = pipeline == PipelineType.dali_cpu
device = "/cpu:0" if cpu_only else "/gpu:0"
with tf.device(device):
dataset = EfficientDetPipeline(
params,
total_batch_size,
args,
is_training=is_training,
cpu_only=cpu_only,
).get_dataset()
return dataset
def srelu_fn(x):
"""Smooth relu: a smooth version of relu."""
with tf.name_scope("srelu"):
beta = tf.Variable(20.0, name="srelu_beta", dtype=tf.float32) ** 2
beta = tf.cast(beta ** 2, x.dtype)
safe_log = tf.math.log(tf.where(x > 0.0, beta * x + 1.0, tf.ones_like(x)))
return tf.where((x > 0.0), x - (1.0 / beta) * safe_log, tf.zeros_like(x))
def activation_fn(features: tf1.Tensor, act_type: Text):
"""Customized non-linear activation type."""
if act_type in ("silu", "swish"):
return tf.keras.activations.swish(features)
elif act_type == "swish_native":
return features * tf.keras.activations.sigmoid(features)
elif act_type == "hswish":
return features * tf.nn.relu6(features + 3) / 6
elif act_type == "relu":
return tf.nn.relu(features)
elif act_type == "relu6":
return tf.nn.relu6(features)
elif act_type == "mish":
return features * tf.math.tanh(tf.math.softplus(features))
elif act_type == "srelu":
return srelu_fn(features)
else:
raise ValueError("Unsupported act_type {}".format(act_type))
def cross_replica_mean(t, num_shards_per_group=None):
"""Calculates the average value of input tensor across TPU replicas."""
num_shards = tpu_function.get_tpu_context().number_of_shards
if not num_shards_per_group:
return tf1.tpu.cross_replica_sum(t) / tf.cast(num_shards, t.dtype)
group_assignment = None
if num_shards_per_group > 1:
if num_shards % num_shards_per_group != 0:
raise ValueError(
"num_shards: %d mod shards_per_group: %d, should be 0"
% (num_shards, num_shards_per_group)
)
num_groups = num_shards // num_shards_per_group
group_assignment = [
[x for x in range(num_shards) if x // num_shards_per_group == y]
for y in range(num_groups)
]
return tf1.tpu.cross_replica_sum(t, group_assignment) / tf.cast(
num_shards_per_group, t.dtype
)
class BatchNormalization(tf.keras.layers.BatchNormalization):
"""Fixed default name of BatchNormalization to match TpuBatchNormalization."""
def __init__(self, **kwargs):
if not kwargs.get("name", None):
kwargs["name"] = "tpu_batch_normalization"
super().__init__(**kwargs)
def call(self, inputs, training=None):
outputs = super().call(inputs, training)
# A temporary hack for tf. compatibility with keras batch norm.
for u in self.updates:
tf1.add_to_collection(tf1.GraphKeys.UPDATE_OPS, u)
return outputs
def build_batch_norm(
beta_initializer: Text = "zeros",
gamma_initializer: Text = "ones",
data_format: Text = "channels_last",
momentum: float = 0.99,
epsilon: float = 1e-3,
name: Text = "tpu_batch_normalization",
):
"""Build a batch normalization layer.
Args:
beta_initializer: `str`, beta initializer.
gamma_initializer: `str`, gamma initializer.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
momentum: `float`, momentume of batch norm.
epsilon: `float`, small value for numerical stability.
name: the name of the batch normalization layer
Returns:
A normalized `Tensor` with the same `data_format`.
"""
axis = 1 if data_format == "channels_first" else -1
bn_layer = BatchNormalization(
axis=axis,
momentum=momentum,
epsilon=epsilon,
center=True,
scale=True,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
name=name,
)
return bn_layer
def drop_connect(inputs, is_training, survival_prob):
"""Drop the entire conv with given survival probability."""
# "Deep Networks with Stochastic Depth", https://arxiv.org/pdf/1603.09382.pdf
if not is_training:
return inputs
# Compute tensor.
batch_size = tf.shape(inputs)[0]
random_tensor = survival_prob
random_tensor += tf.random.uniform([batch_size, 1, 1, 1], dtype=inputs.dtype)
binary_tensor = tf.floor(random_tensor)
# Unlike conventional way that multiply survival_prob at test time, here we
# divide survival_prob at training time, such that no addition compute is
# needed at test time.
output = inputs / survival_prob * binary_tensor
return output
def parse_image_size(image_size: Union[Text, int, Tuple[int, int]]):
"""Parse the image size and return (height, width).
Args:
image_size: A integer, a tuple (H, W), or a string with HxW format.
Returns:
A tuple of integer (height, width).
"""
if isinstance(image_size, int):
# image_size is integer, with the same width and height.
return (image_size, image_size)
if isinstance(image_size, str):
# image_size is a string with format WxH
width, height = image_size.lower().split("x")
return (int(height), int(width))
if isinstance(image_size, tuple):
return image_size
raise ValueError(
"image_size must be an int, WxH string, or (height, width)"
"tuple. Was %r" % image_size
)
def get_feat_sizes(image_size: Union[Text, int, Tuple[int, int]], max_level: int):
"""Get feat widths and heights for all levels.
Args:
image_size: A integer, a tuple (H, W), or a string with HxW format.
max_level: maximum feature level.
Returns:
feat_sizes: a list of tuples (height, width) for each level.
"""
image_size = parse_image_size(image_size)
feat_sizes = [{"height": image_size[0], "width": image_size[1]}]
feat_size = image_size
for _ in range(1, max_level + 1):
feat_size = ((feat_size[0] - 1) // 2 + 1, (feat_size[1] - 1) // 2 + 1)
feat_sizes.append({"height": feat_size[0], "width": feat_size[1]})
return feat_sizes
def _recompute_grad(f):
"""An eager-compatible version of recompute_grad.
For f(*args, **kwargs), this supports gradients with respect to args or
kwargs, but kwargs are currently only supported in eager-mode.
Note that for keras layer and model objects, this is handled automatically.
Warning: If `f` was originally a tf.keras Model or Layer object, `g` will not
be able to access the member variables of that object, because `g` returns
through the wrapper function `inner`. When recomputing gradients through
objects that inherit from keras, we suggest keeping a reference to the
underlying object around for the purpose of accessing these variables.
Args:
f: function `f(*x)` that returns a `Tensor` or sequence of `Tensor` outputs.
Returns:
A function `g` that wraps `f`, but which recomputes `f` on the backwards
pass of a gradient call.
"""
@tf1.custom_gradient
def inner(*args, **kwargs):
"""Inner function closure for calculating gradients."""
current_var_scope = tf1.get_variable_scope()
with tape_lib.stop_recording():
result = f(*args, **kwargs)
def grad_wrapper(*wrapper_args, **grad_kwargs):
"""Wrapper function to accomodate lack of kwargs in graph mode decorator."""
@tf1.custom_gradient
def inner_recompute_grad(*dresult):
"""Nested custom gradient function for computing grads in reverse and forward mode autodiff."""
# Gradient calculation for reverse mode autodiff.
variables = grad_kwargs.get("variables")
with tf.GradientTape() as t:
id_args = tf.nest.map_structure(tf.identity, args)
t.watch(id_args)
if variables is not None:
t.watch(variables)
with tf.control_dependencies(dresult):
with tf1.variable_scope(current_var_scope):
result = f(*id_args, **kwargs)
kw_vars = []
if variables is not None:
kw_vars = list(variables)
grads = t.gradient(
result,
list(id_args) + kw_vars,
output_gradients=dresult,
unconnected_gradients=tf.UnconnectedGradients.ZERO,
)
def transpose(*t_args, **t_kwargs):
"""Gradient function calculation for forward mode autodiff."""
# Just throw an error since gradients / activations are not stored on
# tape for recompute.
raise NotImplementedError(
"recompute_grad tried to transpose grad of {}. "
"Consider not using recompute_grad in forward mode"
"autodiff".format(f.__name__)
)
return (grads[: len(id_args)], grads[len(id_args) :]), transpose
return inner_recompute_grad(*wrapper_args)
return result, grad_wrapper
return inner
def recompute_grad(recompute=False):
"""Decorator determine whether use gradient checkpoint."""
def _wrapper(f):
if recompute:
return _recompute_grad(f)
return f
return _wrapper
|
DALI-main
|
docs/examples/use_cases/tensorflow/efficientdet/utils.py
|
import multiprocessing
from absl import logging
import numpy as np
import tensorflow as tf
import random
import re
import os
import hparams_config
import utils
from model import efficientdet_net
from model.utils import optimizers
def run_training(args):
logging.set_verbosity(logging.WARNING)
args = utils.dict_to_namedtuple(args)
config = hparams_config.get_efficientdet_config(args.model_name)
config.override(args.hparams, allow_new_keys=True)
config.image_size = utils.parse_image_size(config.image_size)
params = dict(
config.as_dict(),
seed=args.seed,
batch_size=args.batch_size,
)
logging.info(params)
if args.ckpt_dir:
ckpt_dir = args.ckpt_dir
if not tf.io.gfile.exists(ckpt_dir):
tf.io.gfile.makedirs(ckpt_dir)
config_file = os.path.join(ckpt_dir, "config.yaml")
if not tf.io.gfile.exists(config_file):
tf.io.gfile.GFile(config_file, "w").write(str(config))
if params["seed"]:
seed = params["seed"]
os.environ["PYTHONHASHSEED"] = str(seed)
tf.random.set_seed(seed)
np.random.seed(seed)
random.seed(seed)
os.environ["TF_DETERMINISTIC_OPS"] = "1"
os.environ["TF_CUDNN_DETERMINISTIC"] = "1"
utils.setup_gpus()
num_devices = 1
physical_devices = tf.config.list_physical_devices("GPU")
multi_gpu = args.multi_gpu
if multi_gpu is not None and len(multi_gpu) != 1 and len(physical_devices) > 1:
devices = [f"GPU:{gpu}" for gpu in multi_gpu] if len(multi_gpu) != 0 else None
strategy = tf.distribute.MirroredStrategy(devices)
num_devices = len(devices) if devices else len(physical_devices)
else:
strategy = tf.distribute.get_strategy()
train_dataset = utils.get_dataset(
args,
args.batch_size * num_devices,
True,
params,
strategy if num_devices > 1 else None,
)
if args.eval_after_training or args.eval_during_training:
eval_dataset = utils.get_dataset(
args,
num_devices,
False,
params,
strategy if num_devices > 1 else None,
)
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = (
tf.data.experimental.AutoShardPolicy.DATA
)
eval_dataset = eval_dataset.with_options(options)
with strategy.scope():
model = efficientdet_net.EfficientDetNet(params=params)
global_batch_size = args.batch_size * strategy.num_replicas_in_sync
model.compile(
optimizer=optimizers.get_optimizer(
params, args.epochs, global_batch_size, args.train_steps
)
)
initial_epoch = args.initial_epoch
if args.start_weights:
image_size = params["image_size"]
model.predict(np.zeros((1, image_size[0], image_size[1], 3)))
model.load_weights(args.start_weights)
fname = args.start_weights.split("/")[-1]
ckpt_pattern = f"{args.model_name}\.(\d\d+)\.h5"
match = re.match(ckpt_pattern, fname)
if match:
initial_epoch = int(match.group(1).lstrip("0"))
callbacks = []
if args.ckpt_dir:
ckpt_dir = args.ckpt_dir
if not tf.io.gfile.exists(ckpt_dir):
tf.io.gfile.makedirs(tensorboard_dir)
callbacks.append(
tf.keras.callbacks.ModelCheckpoint(
filepath=os.path.join(
ckpt_dir, "".join([args.model_name, ".{epoch:02d}.h5"])
),
save_weights_only=True,
)
)
if args.log_dir:
log_dir = args.log_dir
if not tf.io.gfile.exists(log_dir):
tf.io.gfile.makedirs(log_dir)
callbacks.append(
tf.keras.callbacks.TensorBoard(log_dir=log_dir, update_freq="epoch")
)
model.fit(
train_dataset,
epochs=args.epochs,
steps_per_epoch=args.train_steps,
initial_epoch=initial_epoch,
callbacks=callbacks,
validation_data=eval_dataset if args.eval_during_training else None,
validation_steps=args.eval_steps,
validation_freq=args.eval_freq,
)
if args.eval_after_training:
print("Evaluation after training:")
model.evaluate(eval_dataset, steps=args.eval_steps)
model.save_weights(args.output_filename)
if __name__ == "__main__":
import argparse
from argparse_utils import enum_action
parser = argparse.ArgumentParser(formatter_class=utils.SmartFormatter)
parser.add_argument(
"--initial_epoch",
type=int,
default=0,
help="Epoch from which to start training.",
)
parser.add_argument(
"--epochs", type=int, default=300, help="Epoch on which training should finish."
)
parser.add_argument(
"--input_type",
action=enum_action(utils.InputType),
required=True,
help="Input type.",
)
parser.add_argument(
"--images_path",
help="Path to COCO images.",
)
parser.add_argument(
"--annotations_path",
help="Path to COCO annotations.",
)
parser.add_argument(
"--train_file_pattern",
help="TFrecord files glob pattern for files with training data.",
)
parser.add_argument("--batch_size", type=int, default=64)
parser.add_argument(
"--train_steps",
type=int,
default=2000,
help="Number of steps (iterations) in each epoch.",
)
parser.add_argument(
"--eval_file_pattern",
help="TFrecord files glob pattern for files with evaluation data, "
"defaults to `train_file_pattern` if not given.",
)
parser.add_argument(
"--eval_steps",
type=int,
default=5000,
help="Number of examples to process during each evaluation.",
)
parser.add_argument(
"--eval_freq", type=int, default=1, help="Run the evaluation every `eval_freq` epochs."
)
parser.add_argument(
"--eval_during_training",
action="store_true",
help="Whether to run evaluation every `eval_freq` epochs.",
)
parser.add_argument(
"--eval_after_training",
action="store_true",
help="Whether to run evaluation after finished training.",
)
parser.add_argument(
"--pipeline_type",
action=enum_action(utils.PipelineType),
required=True,
help="R|Pipeline type used while loading and preprocessing data. One of:\n"
"tensorflow – pipeline used in original EfficientDet implementation on https://github.com/google/automl/tree/master/efficientdet;\n"
"synthetic – like `tensorflow` pipeline type but repeats one batch endlessly;\n"
"dali_gpu – pipeline which uses NVIDIA DALI to run part of data preprocessing on GPUs to improve efficiency;\n"
"dali_cpu – like `dali_gpu` pipeline type but restricted to run only on CPU.",
)
parser.add_argument(
"--multi_gpu",
nargs="*",
type=int,
help="List of GPUs to use, if empty defaults to all visible GPUs.",
)
parser.add_argument("--seed", type=int)
parser.add_argument(
"--hparams", default="", help="String or filename with parameters."
)
parser.add_argument("--model_name", default="efficientdet-d1")
parser.add_argument(
"--output_filename",
default="output.h5",
help="Filename for final weights to save.",
)
parser.add_argument("--start_weights")
parser.add_argument("--log_dir", help="Directory for tensorboard logs.")
parser.add_argument("--ckpt_dir", help="Directory for saving weights each step.")
args = parser.parse_args()
run_training(vars(args))
|
DALI-main
|
docs/examples/use_cases/tensorflow/efficientdet/train.py
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hparams for model architecture and trainer."""
import ast
import collections
import copy
from typing import Any, Dict, Text
import six
import tensorflow as tf
import yaml
def eval_str_fn(val):
if val in {"true", "false"}:
return val == "true"
try:
return ast.literal_eval(val)
except (ValueError, SyntaxError):
return val
# pylint: disable=protected-access
class Config(object):
"""A config utility class."""
def __init__(self, config_dict=None):
self.update(config_dict)
def __setattr__(self, k, v):
self.__dict__[k] = Config(v) if isinstance(v, dict) else copy.deepcopy(v)
def __getattr__(self, k):
return self.__dict__[k]
def __getitem__(self, k):
return self.__dict__[k]
def __repr__(self):
return repr(self.as_dict())
def __deepcopy__(self, memodict):
return type(self)(self.as_dict())
def __str__(self):
try:
return yaml.dump(self.as_dict(), indent=4)
except TypeError:
return str(self.as_dict())
def _update(self, config_dict, allow_new_keys=True):
"""Recursively update internal members."""
if not config_dict:
return
for k, v in six.iteritems(config_dict):
if k not in self.__dict__:
if allow_new_keys:
self.__setattr__(k, v)
else:
raise KeyError("Key `{}` does not exist for overriding. ".format(k))
else:
if isinstance(self.__dict__[k], Config) and isinstance(v, dict):
self.__dict__[k]._update(v, allow_new_keys)
elif isinstance(self.__dict__[k], Config) and isinstance(v, Config):
self.__dict__[k]._update(v.as_dict(), allow_new_keys)
else:
self.__setattr__(k, v)
def get(self, k, default_value=None):
return self.__dict__.get(k, default_value)
def update(self, config_dict):
"""Update members while allowing new keys."""
self._update(config_dict, allow_new_keys=True)
def keys(self):
return self.__dict__.keys()
def override(self, config_dict_or_str, allow_new_keys=False):
"""Update members while disallowing new keys."""
if isinstance(config_dict_or_str, str):
if not config_dict_or_str:
return
elif "=" in config_dict_or_str:
config_dict = self.parse_from_str(config_dict_or_str)
elif config_dict_or_str.endswith(".yaml"):
config_dict = self.parse_from_yaml(config_dict_or_str)
else:
raise ValueError(
'Invalid string {}, must end with .yaml or contains "=".'.format(
config_dict_or_str
)
)
elif isinstance(config_dict_or_str, dict):
config_dict = config_dict_or_str
else:
raise ValueError("Unknown value type: {}".format(config_dict_or_str))
self._update(config_dict, allow_new_keys)
def parse_from_yaml(self, yaml_file_path: Text) -> Dict[Any, Any]:
"""Parses a yaml file and returns a dictionary."""
with tf.io.gfile.GFile(yaml_file_path, "r") as f:
config_dict = yaml.load(f, Loader=yaml.FullLoader)
return config_dict
def save_to_yaml(self, yaml_file_path):
"""Write a dictionary into a yaml file."""
with tf.io.gfile.GFile(yaml_file_path, "w") as f:
yaml.dump(self.as_dict(), f, default_flow_style=False)
def parse_from_str(self, config_str: Text) -> Dict[Any, Any]:
"""Parse a string like 'x.y=1,x.z=2' to nested dict {x: {y: 1, z: 2}}."""
if not config_str:
return {}
config_dict = {}
try:
for kv_pair in config_str.split(","):
if not kv_pair: # skip empty string
continue
key_str, value_str = kv_pair.split("=")
key_str = key_str.strip()
def add_kv_recursive(k, v):
"""Recursively parse x.y.z=tt to {x: {y: {z: tt}}}."""
if "." not in k:
if "*" in v:
# we reserve * to split arrays.
return {k: [eval_str_fn(vv) for vv in v.split("*")]}
return {k: eval_str_fn(v)}
pos = k.index(".")
return {k[:pos]: add_kv_recursive(k[pos + 1 :], v)}
def merge_dict_recursive(target, src):
"""Recursively merge two nested dictionary."""
for k in src.keys():
if (
k in target
and isinstance(target[k], dict)
and isinstance(src[k], collections.abc.Mapping)
):
merge_dict_recursive(target[k], src[k])
else:
target[k] = src[k]
merge_dict_recursive(config_dict, add_kv_recursive(key_str, value_str))
return config_dict
except ValueError:
raise ValueError("Invalid config_str: {}".format(config_str))
def as_dict(self):
"""Returns a dict representation."""
config_dict = {}
for k, v in six.iteritems(self.__dict__):
if isinstance(v, Config):
config_dict[k] = v.as_dict()
else:
config_dict[k] = copy.deepcopy(v)
return config_dict
# pylint: enable=protected-access
def default_detection_configs():
"""Returns a default detection configs."""
h = Config()
# model name.
h.name = "efficientdet-d1"
# activation type: see activation_fn in utils.py.
h.act_type = "swish"
# input preprocessing parameters
h.image_size = 640 # An integer or a string WxH such as 640x320.
h.target_size = None
h.input_rand_hflip = True
h.jitter_min = 0.1
h.jitter_max = 2.0
h.grid_mask = False
# dataset specific parameters
h.num_classes = 91 # 1+ actual classes, 0 is reserved for background.
h.skip_crowd_during_training = True
h.label_map = "coco" # a dict or a string of 'coco', 'voc', 'waymo'.
h.max_instances_per_image = 100 # Default to 100 for COCO.
# model architecture
h.min_level = 3
h.max_level = 7
h.num_scales = 3
# ratio w/h: 2.0 means w=1.4, h=0.7. Can be computed with k-mean per dataset.
h.aspect_ratios = [1.0, 2.0, 0.5] # [[0.7, 1.4], [1.0, 1.0], [1.4, 0.7]]
h.anchor_scale = 4.0
# optimization
h.momentum = 0.9
h.optimizer = "sgd" # can be 'adam' or 'sgd'.
h.learning_rate = 0.08 # 0.008 for adam.
h.lr_warmup_init = 0.008 # 0.0008 for adam.
h.lr_warmup_epoch = 1.0
h.first_lr_drop_epoch = 200.0
h.second_lr_drop_epoch = 250.0
h.poly_lr_power = 0.9
h.clip_gradients_norm = 10.0
h.data_format = "channels_last"
# classification loss
h.label_smoothing = 0.0 # 0.1 is a good default
# Behold the focal loss parameters
h.alpha = 0.25
h.gamma = 1.5
# localization loss
h.delta = 0.1 # regularization parameter of huber loss.
# total loss = box_loss * box_loss_weight + iou_loss * iou_loss_weight
h.box_loss_weight = 50.0
h.iou_loss_type = None
h.iou_loss_weight = 1.0
# regularization l2 loss.
h.weight_decay = 4e-5
# For detection.
h.box_class_repeats = 3
h.fpn_cell_repeats = 3
h.fpn_num_filters = 88
h.separable_conv = True
h.apply_bn_for_resampling = True
h.conv_after_downsample = False
h.conv_bn_act_pattern = False
h.drop_remainder = True # drop remainder for the final batch eval.
# For post-processing nms, must be a dict.
h.nms_configs = {
"method": "gaussian",
"iou_thresh": None, # use the default value based on method.
"score_thresh": 0.0,
"sigma": None,
"max_nms_inputs": 0,
"max_output_size": 100,
}
# version.
h.fpn_name = None
h.fpn_weight_method = None
h.fpn_config = None
# No stochastic depth in default.
h.survival_prob = None
h.lr_decay_method = "cosine"
h.backbone_name = "efficientnet-b1"
h.backbone_config = None
h.var_freeze_expr = None
# A temporary flag to switch between legacy and keras models.
h.positives_momentum = None
h.grad_checkpoint = False
return h
efficientdet_model_param_dict = {
"efficientdet-d0": dict(
model_name="efficientdet-d0",
backbone_name="efficientnet-b0",
image_size=512,
fpn_num_filters=64,
fpn_cell_repeats=3,
box_class_repeats=3,
),
"efficientdet-d1": dict(
model_name="efficientdet-d1",
backbone_name="efficientnet-b1",
image_size=640,
fpn_num_filters=88,
fpn_cell_repeats=4,
box_class_repeats=3,
),
"efficientdet-d2": dict(
model_name="efficientdet-d2",
backbone_name="efficientnet-b2",
image_size=768,
fpn_num_filters=112,
fpn_cell_repeats=5,
box_class_repeats=3,
),
"efficientdet-d3": dict(
name="efficientdet-d3",
backbone_name="efficientnet-b3",
image_size=896,
fpn_num_filters=160,
fpn_cell_repeats=6,
box_class_repeats=4,
),
"efficientdet-d4": dict(
model_name="efficientdet-d4",
backbone_name="efficientnet-b4",
image_size=1024,
fpn_num_filters=224,
fpn_cell_repeats=7,
box_class_repeats=4,
),
"efficientdet-d5": dict(
model_name="efficientdet-d5",
backbone_name="efficientnet-b5",
image_size=1280,
fpn_num_filters=288,
fpn_cell_repeats=7,
box_class_repeats=4,
),
"efficientdet-d6": dict(
model_name="efficientdet-d6",
backbone_name="efficientnet-b6",
image_size=1280,
fpn_num_filters=384,
fpn_cell_repeats=8,
box_class_repeats=5,
fpn_weight_method="sum", # Use unweighted sum for stability.
),
"efficientdet-d7": dict(
model_name="efficientdet-d7",
backbone_name="efficientnet-b6",
image_size=1536,
fpn_num_filters=384,
fpn_cell_repeats=8,
box_class_repeats=5,
anchor_scale=5.0,
fpn_weight_method="sum", # Use unweighted sum for stability.
),
"efficientdet-d7x": dict(
model_name="efficientdet-d7x",
backbone_name="efficientnet-b7",
image_size=1536,
fpn_num_filters=384,
fpn_cell_repeats=8,
box_class_repeats=5,
anchor_scale=4.0,
max_level=8,
fpn_weight_method="sum", # Use unweighted sum for stability.
),
}
def get_efficientdet_config(model_name="efficientdet-d1"):
"""Get the default config for EfficientDet based on model name."""
h = default_detection_configs()
if model_name in efficientdet_model_param_dict:
h.override(efficientdet_model_param_dict[model_name], allow_new_keys=True)
else:
raise ValueError("Unknown model name: {}".format(model_name))
return h
|
DALI-main
|
docs/examples/use_cases/tensorflow/efficientdet/hparams_config.py
|
import multiprocessing
from absl import logging
import numpy as np
import tensorflow as tf
import re
import os
import hparams_config
import utils
from model import efficientdet_net
def run_eval(args):
logging.set_verbosity(logging.WARNING)
args = utils.dict_to_namedtuple(args)
config = hparams_config.get_efficientdet_config(args.model_name)
config.override(args.hparams, allow_new_keys=True)
config.image_size = utils.parse_image_size(config.image_size)
params = dict(config.as_dict(), seed=None)
logging.info(params)
utils.setup_gpus()
dataset = utils.get_dataset(args, 1, False, params, None)
model = efficientdet_net.EfficientDetNet(params=params)
model.compile()
if args.weights:
image_size = params["image_size"]
model.predict(np.zeros((1, image_size[0], image_size[1], 3)))
model.load_weights(args.weights)
model.evaluate(dataset, steps=args.eval_steps)
if __name__ == "__main__":
import argparse
from argparse_utils import enum_action
parser = argparse.ArgumentParser(formatter_class=utils.SmartFormatter)
parser.add_argument(
"--input_type",
action=enum_action(utils.InputType),
required=True,
help="Input type.",
)
parser.add_argument(
"--images_path",
help="Path to COCO images.",
)
parser.add_argument(
"--annotations_path",
help="Path to COCO annotations.",
)
parser.add_argument(
"--eval_file_pattern",
help="TFrecord files glob pattern for files with evaluation data.",
)
parser.add_argument(
"--eval_steps", type=int, default=5000, help="Number of examples to evaluate."
)
parser.add_argument(
"--pipeline_type",
action=enum_action(utils.PipelineType),
required=True,
help="R|Pipeline type used while loading and preprocessing data. One of:\n"
"tensorflow – pipeline used in original EfficientDet implementation on https://github.com/google/automl/tree/master/efficientdet;\n"
"synthetic – like `tensorflow` pipeline type but repeats one batch endlessly;\n"
"dali_gpu – pipeline which uses Nvidia Data Loading Library (DALI) to run part of data preprocessing on GPUs to improve efficiency;\n"
"dali_cpu – like `dali_gpu` pipeline type but restricted to run only on CPU.",
)
parser.add_argument(
"--weights", default="output.h5", help="Name of the file with model weights."
)
parser.add_argument("--model_name", default="efficientdet-d1")
parser.add_argument(
"--hparams", default="", help="String or filename with parameters."
)
args = parser.parse_args()
run_eval(vars(args))
|
DALI-main
|
docs/examples/use_cases/tensorflow/efficientdet/eval.py
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Anchor definition."""
import collections
import numpy as np
import tensorflow as tf
import utils
from .anchors_utils import argmax_matcher
from .anchors_utils import box_list
from .anchors_utils import faster_rcnn_box_coder
from .anchors_utils import region_similarity_calculator
from .anchors_utils import target_assigner
MAX_DETECTION_POINTS = 5000
def decode_box_outputs(pred_boxes, anchor_boxes):
"""Transforms relative regression coordinates to absolute positions.
Network predictions are normalized and relative to a given anchor; this
reverses the transformation and outputs absolute coordinates for the input
image.
Args:
pred_boxes: predicted box regression targets.
anchor_boxes: anchors on all feature levels.
Returns:
outputs: bounding boxes.
"""
anchor_boxes = tf.cast(anchor_boxes, pred_boxes.dtype)
ycenter_a = (anchor_boxes[..., 0] + anchor_boxes[..., 2]) / 2
xcenter_a = (anchor_boxes[..., 1] + anchor_boxes[..., 3]) / 2
ha = anchor_boxes[..., 2] - anchor_boxes[..., 0]
wa = anchor_boxes[..., 3] - anchor_boxes[..., 1]
ty, tx, th, tw = tf.unstack(pred_boxes, num=4, axis=-1)
w = tf.math.exp(tw) * wa
h = tf.math.exp(th) * ha
ycenter = ty * ha + ycenter_a
xcenter = tx * wa + xcenter_a
ymin = ycenter - h / 2.0
xmin = xcenter - w / 2.0
ymax = ycenter + h / 2.0
xmax = xcenter + w / 2.0
return tf.stack([ymin, xmin, ymax, xmax], axis=-1)
class Anchors:
"""Multi-scale anchors class."""
def __init__(
self, min_level, max_level, num_scales, aspect_ratios, anchor_scale, image_size
):
"""Constructs multiscale anchors.
Args:
min_level: integer number of minimum level of the output feature pyramid.
max_level: integer number of maximum level of the output feature pyramid.
num_scales: integer number representing intermediate scales added
on each level. For instances, num_scales=2 adds two additional
anchor scales [2^0, 2^0.5] on each level.
aspect_ratios: list of representing the aspect ratio anchors added
on each level. For instances, aspect_ratios = [1.0, 2.0, 0..5]
adds three anchors on each level.
anchor_scale: float number representing the scale of size of the base
anchor to the feature stride 2^level. Or a list, one value per layer.
image_size: integer number or tuple of integer number of input image size.
"""
self.min_level = min_level
self.max_level = max_level
self.num_scales = num_scales
self.aspect_ratios = aspect_ratios
if isinstance(anchor_scale, (list, tuple)):
assert len(anchor_scale) == max_level - min_level + 1
self.anchor_scales = anchor_scale
else:
self.anchor_scales = [anchor_scale] * (max_level - min_level + 1)
self.image_size = utils.parse_image_size(image_size)
self.feat_sizes = utils.get_feat_sizes(image_size, max_level)
self.config = self._generate_configs()
self.boxes = self._generate_boxes()
def _generate_configs(self):
"""Generate configurations of anchor boxes."""
anchor_configs = {}
feat_sizes = self.feat_sizes
for level in range(self.min_level, self.max_level + 1):
anchor_configs[level] = []
for scale_octave in range(self.num_scales):
for aspect in self.aspect_ratios:
anchor_configs[level].append(
(
(
feat_sizes[0]["height"]
/ float(feat_sizes[level]["height"]),
feat_sizes[0]["width"]
/ float(feat_sizes[level]["width"]),
),
scale_octave / float(self.num_scales),
aspect,
self.anchor_scales[level - self.min_level],
)
)
return anchor_configs
def _generate_boxes(self):
"""Generates multiscale anchor boxes."""
boxes_all = []
for _, configs in self.config.items():
boxes_level = []
for config in configs:
stride, octave_scale, aspect, anchor_scale = config
base_anchor_size_x = anchor_scale * stride[1] * 2 ** octave_scale
base_anchor_size_y = anchor_scale * stride[0] * 2 ** octave_scale
if isinstance(aspect, list):
aspect_x, aspect_y = aspect
else:
aspect_x = np.sqrt(aspect)
aspect_y = 1.0 / aspect_x
anchor_size_x_2 = base_anchor_size_x * aspect_x / 2.0
anchor_size_y_2 = base_anchor_size_y * aspect_y / 2.0
x = np.arange(stride[1] / 2, self.image_size[1], stride[1])
y = np.arange(stride[0] / 2, self.image_size[0], stride[0])
xv, yv = np.meshgrid(x, y)
xv = xv.reshape(-1)
yv = yv.reshape(-1)
boxes = np.vstack(
(
yv - anchor_size_y_2,
xv - anchor_size_x_2,
yv + anchor_size_y_2,
xv + anchor_size_x_2,
)
)
boxes = np.swapaxes(boxes, 0, 1)
boxes_level.append(np.expand_dims(boxes, axis=1))
# concat anchors on the same level to the reshape NxAx4
boxes_level = np.concatenate(boxes_level, axis=1)
boxes_all.append(boxes_level.reshape([-1, 4]))
anchor_boxes = np.vstack(boxes_all)
anchor_boxes = tf.convert_to_tensor(anchor_boxes, dtype=tf.float32)
return anchor_boxes
def get_anchors_per_location(self):
return self.num_scales * len(self.aspect_ratios)
class AnchorLabeler(object):
"""Labeler for multiscale anchor boxes."""
def __init__(self, anchors, num_classes, match_threshold=0.5):
"""Constructs anchor labeler to assign labels to anchors.
Args:
anchors: an instance of class Anchors.
num_classes: integer number representing number of classes in the dataset.
match_threshold: float number between 0 and 1 representing the threshold
to assign positive labels for anchors.
"""
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(
match_threshold,
unmatched_threshold=match_threshold,
negatives_lower_than_unmatched=True,
force_match_for_each_row=True,
)
box_coder = faster_rcnn_box_coder.FasterRcnnBoxCoder()
self._target_assigner = target_assigner.TargetAssigner(
similarity_calc, matcher, box_coder
)
self._anchors = anchors
self._match_threshold = match_threshold
self._num_classes = num_classes
def _unpack_labels(self, labels):
"""Unpacks an array of labels into multiscales labels."""
labels_unpacked = collections.OrderedDict()
anchors = self._anchors
count = 0
for level in range(anchors.min_level, anchors.max_level + 1):
feat_size = anchors.feat_sizes[level]
steps = (
feat_size["height"]
* feat_size["width"]
* anchors.get_anchors_per_location()
)
indices = tf.range(count, count + steps)
count += steps
labels_unpacked[level] = tf.reshape(
tf.gather(labels, indices),
[feat_size["height"], feat_size["width"], -1],
)
return labels_unpacked
def label_anchors(self, gt_boxes, gt_labels):
"""Labels anchors with ground truth inputs.
Args:
gt_boxes: A float tensor with shape [N, 4] representing groundtruth boxes.
For each row, it stores [y0, x0, y1, x1] for four corners of a box.
gt_labels: A integer tensor with shape [N, 1] representing groundtruth
classes.
Returns:
cls_targets_dict: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors]. The height_l and width_l
represent the dimension of class logits at l-th level.
box_targets_dict: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors * 4]. The height_l and
width_l represent the dimension of bounding box regression output at
l-th level.
num_positives: scalar tensor storing number of positives in an image.
"""
gt_box_list = box_list.BoxList(gt_boxes)
anchor_box_list = box_list.BoxList(self._anchors.boxes)
# cls_weights, box_weights are not used
cls_targets, _, box_targets, _, matches = self._target_assigner.assign(
anchor_box_list, gt_box_list, gt_labels
)
# class labels start from 1 and the background class = -1
cls_targets -= 1
cls_targets = tf.cast(cls_targets, tf.int32)
# Unpack labels.
cls_targets_dict = self._unpack_labels(cls_targets)
box_targets_dict = self._unpack_labels(box_targets)
num_positives = tf.reduce_sum(
tf.cast(tf.not_equal(matches.match_results, -1), tf.float32)
)
return cls_targets_dict, box_targets_dict, num_positives
|
DALI-main
|
docs/examples/use_cases/tensorflow/efficientdet/pipeline/anchors.py
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base target assigner module.
The job of a TargetAssigner is, for a given set of anchors (bounding boxes) and
groundtruth detections (bounding boxes), to assign classification and regression
targets to each anchor as well as weights to each anchor (specifying, e.g.,
which anchors should not contribute to training loss).
It assigns classification/regression targets by performing the following steps:
1) Computing pairwise similarity between anchors and groundtruth boxes using a
provided RegionSimilarity Calculator
2) Computing a matching based on the similarity matrix using a provided Matcher
3) Assigning regression targets based on the matching and a provided BoxCoder
4) Assigning classification targets based on the matching and groundtruth labels
Note that TargetAssigners only operate on detections from a single
image at a time, so any logic for applying a TargetAssigner to multiple
images must be handled externally.
"""
import tensorflow.compat.v1 as tf
from . import box_list
from . import shape_utils
KEYPOINTS_FIELD_NAME = "keypoints"
class TargetAssigner(object):
"""Target assigner to compute classification and regression targets."""
def __init__(
self,
similarity_calc,
matcher,
box_coder,
negative_class_weight=1.0,
unmatched_cls_target=None,
):
"""Construct Object Detection Target Assigner.
Args:
similarity_calc: a RegionSimilarityCalculator
matcher: Matcher used to match groundtruth to anchors.
box_coder: BoxCoder used to encode matching groundtruth boxes with
respect to anchors.
negative_class_weight: classification weight to be associated to negative
anchors (default: 1.0). The weight must be in [0., 1.].
unmatched_cls_target: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
If set to None, unmatched_cls_target is set to be [0] for each anchor.
Raises:
ValueError: if similarity_calc is not a RegionSimilarityCalculator or
if matcher is not a Matcher or if box_coder is not a BoxCoder
"""
self._similarity_calc = similarity_calc
self._matcher = matcher
self._box_coder = box_coder
self._negative_class_weight = negative_class_weight
if unmatched_cls_target is None:
self._unmatched_cls_target = tf.constant([0], tf.float32)
else:
self._unmatched_cls_target = unmatched_cls_target
@property
def box_coder(self):
return self._box_coder
def assign(
self,
anchors,
groundtruth_boxes,
groundtruth_labels=None,
groundtruth_weights=None,
**params
):
"""Assign classification and regression targets to each anchor.
For a given set of anchors and groundtruth detections, match anchors
to groundtruth_boxes and assign classification and regression targets to
each anchor as well as weights based on the resulting match (specifying,
e.g., which anchors should not contribute to training loss).
Anchors that are not matched to anything are given a classification target
of self._unmatched_cls_target which can be specified via the constructor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth boxes
groundtruth_labels: a tensor of shape [M, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar inputs). When set
to None, groundtruth_labels assumes a binary problem where all
ground_truth boxes get a positive label (of 1).
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box. The weights
must be in [0., 1.]. If None, all weights are set to 1.
**params: Additional keyword arguments for specific implementations of
the Matcher.
Returns:
cls_targets: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],
where the subshape [d_1, ..., d_k] is compatible with groundtruth_labels
which has shape [num_gt_boxes, d_1, d_2, ... d_k].
cls_weights: a float32 tensor with shape [num_anchors]
reg_targets: a float32 tensor with shape [num_anchors, box_code_dimension]
reg_weights: a float32 tensor with shape [num_anchors]
match: a matcher.Match object encoding the match between anchors and
groundtruth boxes, with rows corresponding to groundtruth boxes
and columns corresponding to anchors.
Raises:
ValueError: if anchors or groundtruth_boxes are not of type
box_list.BoxList
"""
if not isinstance(anchors, box_list.BoxList):
raise ValueError("anchors must be an BoxList")
if not isinstance(groundtruth_boxes, box_list.BoxList):
raise ValueError("groundtruth_boxes must be an BoxList")
if groundtruth_labels is None:
groundtruth_labels = tf.ones(
tf.expand_dims(groundtruth_boxes.num_boxes(), 0)
)
groundtruth_labels = tf.expand_dims(groundtruth_labels, -1)
unmatched_shape_assert = shape_utils.assert_shape_equal(
shape_utils.combined_static_and_dynamic_shape(groundtruth_labels)[1:],
shape_utils.combined_static_and_dynamic_shape(self._unmatched_cls_target),
)
labels_and_box_shapes_assert = shape_utils.assert_shape_equal(
shape_utils.combined_static_and_dynamic_shape(groundtruth_labels)[:1],
shape_utils.combined_static_and_dynamic_shape(groundtruth_boxes.get())[:1],
)
if groundtruth_weights is None:
num_gt_boxes = groundtruth_boxes.num_boxes_static()
if not num_gt_boxes:
num_gt_boxes = groundtruth_boxes.num_boxes()
groundtruth_weights = tf.ones([num_gt_boxes], dtype=tf.float32)
with tf.control_dependencies(
[unmatched_shape_assert, labels_and_box_shapes_assert]
):
match_quality_matrix = self._similarity_calc.compare(
groundtruth_boxes, anchors
)
match = self._matcher.match(match_quality_matrix, **params)
reg_targets = self._create_regression_targets(
anchors, groundtruth_boxes, match
)
cls_targets = self._create_classification_targets(groundtruth_labels, match)
reg_weights = self._create_regression_weights(match, groundtruth_weights)
cls_weights = self._create_classification_weights(
match, groundtruth_weights
)
num_anchors = anchors.num_boxes_static()
if num_anchors is not None:
reg_targets = self._reset_target_shape(reg_targets, num_anchors)
cls_targets = self._reset_target_shape(cls_targets, num_anchors)
reg_weights = self._reset_target_shape(reg_weights, num_anchors)
cls_weights = self._reset_target_shape(cls_weights, num_anchors)
return cls_targets, cls_weights, reg_targets, reg_weights, match
def _reset_target_shape(self, target, num_anchors):
"""Sets the static shape of the target.
Args:
target: the target tensor. Its first dimension will be overwritten.
num_anchors: the number of anchors, which is used to override the target's
first dimension.
Returns:
A tensor with the shape info filled in.
"""
target_shape = target.get_shape().as_list()
target_shape[0] = num_anchors
target.set_shape(target_shape)
return target
def _create_regression_targets(self, anchors, groundtruth_boxes, match):
"""Returns a regression target for each anchor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth_boxes
match: a matcher.Match object
Returns:
reg_targets: a float32 tensor with shape [N, box_code_dimension]
"""
matched_gt_boxes = match.gather_based_on_match(
groundtruth_boxes.get(),
unmatched_value=tf.zeros(4),
ignored_value=tf.zeros(4),
)
matched_gt_boxlist = box_list.BoxList(matched_gt_boxes)
if groundtruth_boxes.has_field(KEYPOINTS_FIELD_NAME):
groundtruth_keypoints = groundtruth_boxes.get_field(KEYPOINTS_FIELD_NAME)
matched_keypoints = match.gather_based_on_match(
groundtruth_keypoints,
unmatched_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]),
ignored_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]),
)
matched_gt_boxlist.add_field(KEYPOINTS_FIELD_NAME, matched_keypoints)
matched_reg_targets = self._box_coder.encode(matched_gt_boxlist, anchors)
match_results_shape = shape_utils.combined_static_and_dynamic_shape(
match.match_results
)
# Zero out the unmatched and ignored regression targets.
unmatched_ignored_reg_targets = tf.tile(
self._default_regression_target(), [match_results_shape[0], 1]
)
matched_anchors_mask = match.matched_column_indicator()
reg_targets = tf.where(
matched_anchors_mask, matched_reg_targets, unmatched_ignored_reg_targets
)
return reg_targets
def _default_regression_target(self):
"""Returns the default target for anchors to regress to.
Default regression targets are set to zero (though in
this implementation what these targets are set to should
not matter as the regression weight of any box set to
regress to the default target is zero).
Returns:
default_target: a float32 tensor with shape [1, box_code_dimension]
"""
return tf.constant([self._box_coder.code_size * [0]], tf.float32)
def _create_classification_targets(self, groundtruth_labels, match):
"""Create classification targets for each anchor.
Assign a classification target of for each anchor to the matching
groundtruth label that is provided by match. Anchors that are not matched
to anything are given the target self._unmatched_cls_target
Args:
groundtruth_labels: a tensor of shape [num_gt_boxes, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar labels).
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
Returns:
a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k], where the
subshape [d_1, ..., d_k] is compatible with groundtruth_labels which has
shape [num_gt_boxes, d_1, d_2, ... d_k].
"""
return match.gather_based_on_match(
groundtruth_labels,
unmatched_value=self._unmatched_cls_target,
ignored_value=self._unmatched_cls_target,
)
def _create_regression_weights(self, match, groundtruth_weights):
"""Set regression weight for each anchor.
Only positive anchors are set to contribute to the regression loss, so this
method returns a weight of 1 for every positive anchor and 0 for every
negative anchor.
Args:
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box.
Returns:
a float32 tensor with shape [num_anchors] representing regression weights.
"""
return match.gather_based_on_match(
groundtruth_weights, ignored_value=0.0, unmatched_value=0.0
)
def _create_classification_weights(self, match, groundtruth_weights):
"""Create classification weights for each anchor.
Positive (matched) anchors are associated with a weight of
positive_class_weight and negative (unmatched) anchors are associated with
a weight of negative_class_weight. When anchors are ignored, weights are set
to zero. By default, both positive/negative weights are set to 1.0,
but they can be adjusted to handle class imbalance (which is almost always
the case in object detection).
Args:
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box.
Returns:
a float32 tensor with shape [num_anchors] representing classification
weights.
"""
return match.gather_based_on_match(
groundtruth_weights,
ignored_value=0.0,
unmatched_value=self._negative_class_weight,
)
def get_box_coder(self):
"""Get BoxCoder of this TargetAssigner.
Returns:
BoxCoder object.
"""
return self._box_coder
|
DALI-main
|
docs/examples/use_cases/tensorflow/efficientdet/pipeline/anchors_utils/target_assigner.py
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Matcher interface and Match class.
This module defines the Matcher interface and the Match object. The job of the
matcher is to match row and column indices based on the similarity matrix and
other optional parameters. Each column is matched to at most one row. There
are three possibilities for the matching:
1) match: A column matches a row.
2) no_match: A column does not match any row.
3) ignore: A column that is neither 'match' nor no_match.
The ignore case is regularly encountered in object detection: when an anchor has
a relatively small overlap with a ground-truth box, one neither wants to
consider this box a positive example (match) nor a negative example (no match).
The Match class is used to store the match results and it provides simple apis
to query the results.
"""
import abc
import tensorflow.compat.v1 as tf
class Match(object):
"""Class to store results from the matcher.
This class is used to store the results from the matcher. It provides
convenient methods to query the matching results.
"""
def __init__(self, match_results):
"""Constructs a Match object.
Args:
match_results: Integer tensor of shape [N] with (1) match_results[i]>=0,
meaning that column i is matched with row match_results[i].
(2) match_results[i]=-1, meaning that column i is not matched.
(3) match_results[i]=-2, meaning that column i is ignored.
Raises:
ValueError: if match_results does not have rank 1 or is not an
integer int32 scalar tensor
"""
if match_results.shape.ndims != 1:
raise ValueError("match_results should have rank 1")
if match_results.dtype != tf.int32:
raise ValueError(
"match_results should be an int32 or int64 scalar " "tensor"
)
self._match_results = match_results
@property
def match_results(self):
"""The accessor for match results.
Returns:
the tensor which encodes the match results.
"""
return self._match_results
def matched_column_indices(self):
"""Returns column indices that match to some row.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(tf.greater(self._match_results, -1)))
def matched_column_indicator(self):
"""Returns column indices that are matched.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return tf.greater_equal(self._match_results, 0)
def num_matched_columns(self):
"""Returns number (int32 scalar tensor) of matched columns."""
return tf.shape(self.matched_column_indices())[0]
def unmatched_column_indices(self):
"""Returns column indices that do not match any row.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(tf.equal(self._match_results, -1)))
def unmatched_column_indicator(self):
"""Returns column indices that are unmatched.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return tf.equal(self._match_results, -1)
def num_unmatched_columns(self):
"""Returns number (int32 scalar tensor) of unmatched columns."""
return tf.shape(self.unmatched_column_indices())[0]
def ignored_column_indices(self):
"""Returns column indices that are ignored (neither Matched nor Unmatched).
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(self.ignored_column_indicator()))
def ignored_column_indicator(self):
"""Returns boolean column indicator where True means the column is ignored.
Returns:
column_indicator: boolean vector which is True for all ignored column
indices.
"""
return tf.equal(self._match_results, -2)
def num_ignored_columns(self):
"""Returns number (int32 scalar tensor) of matched columns."""
return tf.shape(self.ignored_column_indices())[0]
def unmatched_or_ignored_column_indices(self):
"""Returns column indices that are unmatched or ignored.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(tf.greater(0, self._match_results)))
def matched_row_indices(self):
"""Returns row indices that match some column.
The indices returned by this op are ordered so as to be in correspondence
with the output of matched_column_indicator(). For example if
self.matched_column_indicator() is [0,2], and self.matched_row_indices() is
[7, 3], then we know that column 0 was matched to row 7 and column 2 was
matched to row 3.
Returns:
row_indices: int32 tensor of shape [K] with row indices.
"""
return self._reshape_and_cast(
tf.gather(self._match_results, self.matched_column_indices())
)
def _reshape_and_cast(self, t):
return tf.cast(tf.reshape(t, [-1]), tf.int32)
def gather_based_on_match(self, input_tensor, unmatched_value, ignored_value):
"""Gathers elements from `input_tensor` based on match results.
For columns that are matched to a row, gathered_tensor[col] is set to
input_tensor[match_results[col]]. For columns that are unmatched,
gathered_tensor[col] is set to unmatched_value. Finally, for columns that
are ignored gathered_tensor[col] is set to ignored_value.
Note that the input_tensor.shape[1:] must match with unmatched_value.shape
and ignored_value.shape
Args:
input_tensor: Tensor to gather values from.
unmatched_value: Constant tensor value for unmatched columns.
ignored_value: Constant tensor value for ignored columns.
Returns:
gathered_tensor: A tensor containing values gathered from input_tensor.
The shape of the gathered tensor is [match_results.shape[0]] +
input_tensor.shape[1:].
"""
input_tensor = tf.concat(
[tf.stack([ignored_value, unmatched_value]), input_tensor], axis=0
)
gather_indices = tf.maximum(self.match_results + 2, 0)
gathered_tensor = tf.gather(input_tensor, gather_indices)
return gathered_tensor
class Matcher(object):
"""Abstract base class for matcher."""
__metaclass__ = abc.ABCMeta
def match(self, similarity_matrix, scope=None, **params):
"""Computes matches among row and column indices and returns the result.
Computes matches among the row and column indices based on the similarity
matrix and optional arguments.
Args:
similarity_matrix: Float tensor of shape [N, M] with pairwise similarity
where higher value means more similar.
scope: Op scope name. Defaults to 'Match' if None.
**params: Additional keyword arguments for specific implementations of
the Matcher.
Returns:
A Match object with the results of matching.
"""
with tf.name_scope(scope, "Match", [similarity_matrix, params]) as scope:
return Match(self._match(similarity_matrix, **params))
@abc.abstractmethod
def _match(self, similarity_matrix, **params):
"""Method to be overridden by implementations.
Args:
similarity_matrix: Float tensor of shape [N, M] with pairwise similarity
where higher value means more similar.
**params: Additional keyword arguments for specific implementations of
the Matcher.
Returns:
match_results: Integer tensor of shape [M]: match_results[i]>=0 means
that column i is matched to row match_results[i], match_results[i]=-1
means that the column is not matched. match_results[i]=-2 means that
the column is ignored (usually this happens when there is a very weak
match which one neither wants as positive nor negative example).
"""
pass
|
DALI-main
|
docs/examples/use_cases/tensorflow/efficientdet/pipeline/anchors_utils/matcher.py
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Faster RCNN box coder.
Faster RCNN box coder follows the coding schema described below:
ty = (y - ya) / ha
tx = (x - xa) / wa
th = log(h / ha)
tw = log(w / wa)
where x, y, w, h denote the box's center coordinates, width and height
respectively. Similarly, xa, ya, wa, ha denote the anchor's center
coordinates, width and height. tx, ty, tw and th denote the anchor-encoded
center, width and height respectively.
See http://arxiv.org/abs/1506.01497 for details.
"""
import tensorflow.compat.v1 as tf
from . import box_coder
from . import box_list
EPSILON = 1e-8
class FasterRcnnBoxCoder(box_coder.BoxCoder):
"""Faster RCNN box coder."""
def __init__(self, scale_factors=None):
"""Constructor for FasterRcnnBoxCoder.
Args:
scale_factors: List of 4 positive scalars to scale ty, tx, th and tw.
If set to None, does not perform scaling. For Faster RCNN,
the open-source implementation recommends using [10.0, 10.0, 5.0, 5.0].
"""
if scale_factors:
assert len(scale_factors) == 4
for scalar in scale_factors:
assert scalar > 0
self._scale_factors = scale_factors
@property
def code_size(self):
return 4
def _encode(self, boxes, anchors):
"""Encode a box collection with respect to anchor collection.
Args:
boxes: BoxList holding N boxes to be encoded.
anchors: BoxList of anchors.
Returns:
a tensor representing N anchor-encoded boxes of the format
[ty, tx, th, tw].
"""
# Convert anchors to the center coordinate representation.
ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes()
ycenter, xcenter, h, w = boxes.get_center_coordinates_and_sizes()
# Avoid NaN in division and log below.
ha = tf.maximum(EPSILON, ha)
wa = tf.maximum(EPSILON, wa)
h = tf.maximum(EPSILON, h)
w = tf.maximum(EPSILON, w)
tx = (xcenter - xcenter_a) / wa
ty = (ycenter - ycenter_a) / ha
tw = tf.log(w / wa)
th = tf.log(h / ha)
# Scales location targets as used in paper for joint training.
if self._scale_factors:
ty *= self._scale_factors[0]
tx *= self._scale_factors[1]
th *= self._scale_factors[2]
tw *= self._scale_factors[3]
return tf.transpose(tf.stack([ty, tx, th, tw]))
def _decode(self, rel_codes, anchors):
"""Decode relative codes to boxes.
Args:
rel_codes: a tensor representing N anchor-encoded boxes.
anchors: BoxList of anchors.
Returns:
boxes: BoxList holding N bounding boxes.
"""
ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes()
ty, tx, th, tw = tf.unstack(tf.transpose(rel_codes))
if self._scale_factors:
ty /= self._scale_factors[0]
tx /= self._scale_factors[1]
th /= self._scale_factors[2]
tw /= self._scale_factors[3]
w = tf.exp(tw) * wa
h = tf.exp(th) * ha
ycenter = ty * ha + ycenter_a
xcenter = tx * wa + xcenter_a
ymin = ycenter - h / 2.0
xmin = xcenter - w / 2.0
ymax = ycenter + h / 2.0
xmax = xcenter + w / 2.0
return box_list.BoxList(tf.transpose(tf.stack([ymin, xmin, ymax, xmax])))
|
DALI-main
|
docs/examples/use_cases/tensorflow/efficientdet/pipeline/anchors_utils/faster_rcnn_box_coder.py
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils used to manipulate tensor shapes."""
import tensorflow.compat.v1 as tf
def assert_shape_equal(shape_a, shape_b):
"""Asserts that shape_a and shape_b are equal.
If the shapes are static, raises a ValueError when the shapes
mismatch.
If the shapes are dynamic, raises a tf InvalidArgumentError when the shapes
mismatch.
Args:
shape_a: a list containing shape of the first tensor.
shape_b: a list containing shape of the second tensor.
Returns:
Either a tf.no_op() when shapes are all static and a tf.assert_equal() op
when the shapes are dynamic.
Raises:
ValueError: When shapes are both static and unequal.
"""
if all(isinstance(dim, int) for dim in shape_a) and all(
isinstance(dim, int) for dim in shape_b
):
if shape_a != shape_b:
raise ValueError("Unequal shapes {}, {}".format(shape_a, shape_b))
else:
return tf.no_op()
else:
return tf.assert_equal(shape_a, shape_b)
def combined_static_and_dynamic_shape(tensor):
"""Returns a list containing static and dynamic values for the dimensions.
Returns a list of static and dynamic values for shape dimensions. This is
useful to preserve static shapes when available in reshape operation.
Args:
tensor: A tensor of any type.
Returns:
A list of size tensor.shape.ndims containing integers or a scalar tensor.
"""
static_tensor_shape = tensor.shape.as_list()
dynamic_tensor_shape = tf.shape(tensor)
combined_shape = []
for index, dim in enumerate(static_tensor_shape):
if dim is not None:
combined_shape.append(dim)
else:
combined_shape.append(dynamic_tensor_shape[index])
return combined_shape
|
DALI-main
|
docs/examples/use_cases/tensorflow/efficientdet/pipeline/anchors_utils/shape_utils.py
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base box coder.
Box coders convert between coordinate frames, namely image-centric
(with (0,0) on the top left of image) and anchor-centric (with (0,0) being
defined by a specific anchor).
Users of a BoxCoder can call two methods:
encode: which encodes a box with respect to a given anchor
(or rather, a tensor of boxes wrt a corresponding tensor of anchors) and
decode: which inverts this encoding with a decode operation.
In both cases, the arguments are assumed to be in 1-1 correspondence already;
it is not the job of a BoxCoder to perform matching.
"""
from abc import ABCMeta
from abc import abstractmethod
from abc import abstractproperty
import tensorflow.compat.v1 as tf
# Box coder types.
FASTER_RCNN = "faster_rcnn"
KEYPOINT = "keypoint"
MEAN_STDDEV = "mean_stddev"
SQUARE = "square"
class BoxCoder(object):
"""Abstract base class for box coder."""
__metaclass__ = ABCMeta
@abstractproperty
def code_size(self):
"""Return the size of each code.
This number is a constant and should agree with the output of the `encode`
op (e.g. if rel_codes is the output of self.encode(...), then it should have
shape [N, code_size()]). This abstractproperty should be overridden by
implementations.
Returns:
an integer constant
"""
pass
def encode(self, boxes, anchors):
"""Encode a box list relative to an anchor collection.
Args:
boxes: BoxList holding N boxes to be encoded
anchors: BoxList of N anchors
Returns:
a tensor representing N relative-encoded boxes
"""
with tf.name_scope("Encode"):
return self._encode(boxes, anchors)
def decode(self, rel_codes, anchors):
"""Decode boxes that are encoded relative to an anchor collection.
Args:
rel_codes: a tensor representing N relative-encoded boxes
anchors: BoxList of anchors
Returns:
boxlist: BoxList holding N boxes encoded in the ordinary way (i.e.,
with corners y_min, x_min, y_max, x_max)
"""
with tf.name_scope("Decode"):
return self._decode(rel_codes, anchors)
@abstractmethod
def _encode(self, boxes, anchors):
"""Method to be overridden by implementations.
Args:
boxes: BoxList holding N boxes to be encoded
anchors: BoxList of N anchors
Returns:
a tensor representing N relative-encoded boxes
"""
pass
@abstractmethod
def _decode(self, rel_codes, anchors):
"""Method to be overridden by implementations.
Args:
rel_codes: a tensor representing N relative-encoded boxes
anchors: BoxList of anchors
Returns:
boxlist: BoxList holding N boxes encoded in the ordinary way (i.e.,
with corners y_min, x_min, y_max, x_max)
"""
pass
|
DALI-main
|
docs/examples/use_cases/tensorflow/efficientdet/pipeline/anchors_utils/box_coder.py
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bounding Box List definition.
BoxList represents a list of bounding boxes as tensorflow
tensors, where each bounding box is represented as a row of 4 numbers,
[y_min, x_min, y_max, x_max]. It is assumed that all bounding boxes
within a given list correspond to a single image. See also
box_list_ops.py for common box related operations (such as area, iou, etc).
Optionally, users can add additional related fields (such as weights).
We assume the following things to be true about fields:
* they correspond to boxes in the box_list along the 0th dimension
* they have inferable rank at graph construction time
* all dimensions except for possibly the 0th can be inferred
(i.e., not None) at graph construction time.
Some other notes:
* Following tensorflow conventions, we use height, width ordering,
and correspondingly, y,x (or ymin, xmin, ymax, xmax) ordering
* Tensors are always provided as (flat) [N, 4] tensors.
"""
import tensorflow.compat.v1 as tf
class BoxList(object):
"""Box collection."""
def __init__(self, boxes):
"""Constructs box collection.
Args:
boxes: a tensor of shape [N, 4] representing box corners
Raises:
ValueError: if invalid dimensions for bbox data or if bbox data is not in
float32 format.
"""
if len(boxes.get_shape()) != 2 or boxes.get_shape()[-1] != 4:
raise ValueError("Invalid dimensions for box data.")
if boxes.dtype != tf.float32:
raise ValueError("Invalid tensor type: should be tf.float32")
self.data = {"boxes": boxes}
def num_boxes(self):
"""Returns number of boxes held in collection.
Returns:
a tensor representing the number of boxes held in the collection.
"""
return tf.shape(self.data["boxes"])[0]
def num_boxes_static(self):
"""Returns number of boxes held in collection.
This number is inferred at graph construction time rather than run-time.
Returns:
Number of boxes held in collection (integer) or None if this is not
inferable at graph construction time.
"""
return self.data["boxes"].get_shape().as_list()[0]
def get_all_fields(self):
"""Returns all fields."""
return self.data.keys()
def get_extra_fields(self):
"""Returns all non-box fields (i.e., everything not named 'boxes')."""
return [k for k in self.data.keys() if k != "boxes"]
def add_field(self, field, field_data):
"""Add field to box list.
This method can be used to add related box data such as
weights/labels, etc.
Args:
field: a string key to access the data via `get`
field_data: a tensor containing the data to store in the BoxList
"""
self.data[field] = field_data
def has_field(self, field):
return field in self.data
def get(self):
"""Convenience function for accessing box coordinates.
Returns:
a tensor with shape [N, 4] representing box coordinates.
"""
return self.get_field("boxes")
def set(self, boxes):
"""Convenience function for setting box coordinates.
Args:
boxes: a tensor of shape [N, 4] representing box corners
Raises:
ValueError: if invalid dimensions for bbox data
"""
if len(boxes.get_shape()) != 2 or boxes.get_shape()[-1] != 4:
raise ValueError("Invalid dimensions for box data.")
self.data["boxes"] = boxes
def get_field(self, field):
"""Accesses a box collection and associated fields.
This function returns specified field with object; if no field is specified,
it returns the box coordinates.
Args:
field: this optional string parameter can be used to specify
a related field to be accessed.
Returns:
a tensor representing the box collection or an associated field.
Raises:
ValueError: if invalid field
"""
if not self.has_field(field):
raise ValueError("field " + str(field) + " does not exist")
return self.data[field]
def set_field(self, field, value):
"""Sets the value of a field.
Updates the field of a box_list with a given value.
Args:
field: (string) name of the field to set value.
value: the value to assign to the field.
Raises:
ValueError: if the box_list does not have specified field.
"""
if not self.has_field(field):
raise ValueError("field %s does not exist" % field)
self.data[field] = value
def get_center_coordinates_and_sizes(self, scope=None):
"""Computes the center coordinates, height and width of the boxes.
Args:
scope: name scope of the function.
Returns:
a list of 4 1-D tensors [ycenter, xcenter, height, width].
"""
with tf.name_scope(scope, "get_center_coordinates_and_sizes"):
box_corners = self.get()
ymin, xmin, ymax, xmax = tf.unstack(tf.transpose(box_corners))
width = xmax - xmin
height = ymax - ymin
ycenter = ymin + height / 2.0
xcenter = xmin + width / 2.0
return [ycenter, xcenter, height, width]
def transpose_coordinates(self, scope=None):
"""Transpose the coordinate representation in a boxlist.
Args:
scope: name scope of the function.
"""
with tf.name_scope(scope, "transpose_coordinates"):
y_min, x_min, y_max, x_max = tf.split(
value=self.get(), num_or_size_splits=4, axis=1
)
self.set(tf.concat([x_min, y_min, x_max, y_max], 1))
def as_tensor_dict(self, fields=None):
"""Retrieves specified fields as a dictionary of tensors.
Args:
fields: (optional) list of fields to return in the dictionary.
If None (default), all fields are returned.
Returns:
tensor_dict: A dictionary of tensors specified by fields.
Raises:
ValueError: if specified field is not contained in boxlist.
"""
tensor_dict = {}
if fields is None:
fields = self.get_all_fields()
for field in fields:
if not self.has_field(field):
raise ValueError("boxlist must contain all specified fields")
tensor_dict[field] = self.get_field(field)
return tensor_dict
|
DALI-main
|
docs/examples/use_cases/tensorflow/efficientdet/pipeline/anchors_utils/box_list.py
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Region Similarity Calculators for BoxLists.
Region Similarity Calculators compare a pairwise measure of similarity
between the boxes in two BoxLists.
"""
from abc import ABCMeta
from abc import abstractmethod
import tensorflow.compat.v1 as tf
def area(boxlist, scope=None):
"""Computes area of boxes.
Args:
boxlist: BoxList holding N boxes
scope: name scope.
Returns:
a tensor with shape [N] representing box areas.
"""
with tf.name_scope(scope, "Area"):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1
)
return tf.squeeze((y_max - y_min) * (x_max - x_min), [1])
def intersection(boxlist1, boxlist2, scope=None):
"""Compute pairwise intersection areas between boxes.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise intersections
"""
with tf.name_scope(scope, "Intersection"):
y_min1, x_min1, y_max1, x_max1 = tf.split(
value=boxlist1.get(), num_or_size_splits=4, axis=1
)
y_min2, x_min2, y_max2, x_max2 = tf.split(
value=boxlist2.get(), num_or_size_splits=4, axis=1
)
all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2))
all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2))
intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2))
all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2))
intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths
def iou(boxlist1, boxlist2, scope=None):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise iou scores.
"""
with tf.name_scope(scope, "IOU"):
intersections = intersection(boxlist1, boxlist2)
areas1 = area(boxlist1)
areas2 = area(boxlist2)
unions = tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections
return tf.where(
tf.equal(intersections, 0.0),
tf.zeros_like(intersections),
tf.truediv(intersections, unions),
)
class RegionSimilarityCalculator(object):
"""Abstract base class for region similarity calculator."""
__metaclass__ = ABCMeta
def compare(self, boxlist1, boxlist2, scope=None):
"""Computes matrix of pairwise similarity between BoxLists.
This op (to be overridden) computes a measure of pairwise similarity between
the boxes in the given BoxLists. Higher values indicate more similarity.
Note that this method simply measures similarity and does not explicitly
perform a matching.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
scope: Op scope name. Defaults to 'Compare' if None.
Returns:
a (float32) tensor of shape [N, M] with pairwise similarity score.
"""
with tf.name_scope(scope, "Compare", [boxlist1, boxlist2]) as scope:
return self._compare(boxlist1, boxlist2)
@abstractmethod
def _compare(self, boxlist1, boxlist2):
pass
class IouSimilarity(RegionSimilarityCalculator):
"""Class to compute similarity based on Intersection over Union (IOU) metric.
This class computes pairwise similarity between two BoxLists based on IOU.
"""
def _compare(self, boxlist1, boxlist2):
"""Compute pairwise IOU similarity between the two BoxLists.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing pairwise iou scores.
"""
return iou(boxlist1, boxlist2)
|
DALI-main
|
docs/examples/use_cases/tensorflow/efficientdet/pipeline/anchors_utils/region_similarity_calculator.py
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Argmax matcher implementation.
This class takes a similarity matrix and matches columns to rows based on the
maximum value per column. One can specify matched_thresholds and
to prevent columns from matching to rows (generally resulting in a negative
training example) and unmatched_theshold to ignore the match (generally
resulting in neither a positive or negative training example).
This matcher is used in Fast(er)-RCNN.
Note: matchers are used in TargetAssigners. There is a create_target_assigner
factory function for popular implementations.
"""
import tensorflow.compat.v1 as tf
from . import matcher
from . import shape_utils
class ArgMaxMatcher(matcher.Matcher):
"""Matcher based on highest value.
This class computes matches from a similarity matrix. Each column is matched
to a single row.
To support object detection target assignment this class enables setting both
matched_threshold (upper threshold) and unmatched_threshold (lower thresholds)
defining three categories of similarity which define whether examples are
positive, negative, or ignored:
(1) similarity >= matched_threshold: Highest similarity. Matched/Positive!
(2) matched_threshold > similarity >= unmatched_threshold: Medium similarity.
Depending on negatives_lower_than_unmatched, this is either
Unmatched/Negative OR Ignore.
(3) unmatched_threshold > similarity: Lowest similarity. Depending on flag
negatives_lower_than_unmatched, either Unmatched/Negative OR Ignore.
For ignored matches this class sets the values in the Match object to -2.
"""
def __init__(
self,
matched_threshold,
unmatched_threshold=None,
negatives_lower_than_unmatched=True,
force_match_for_each_row=False,
):
"""Construct ArgMaxMatcher.
Args:
matched_threshold: Threshold for positive matches. Positive if
sim >= matched_threshold, where sim is the maximum value of the
similarity matrix for a given column. Set to None for no threshold.
unmatched_threshold: Threshold for negative matches. Negative if
sim < unmatched_threshold. Defaults to matched_threshold
when set to None.
negatives_lower_than_unmatched: Boolean which defaults to True. If True
then negative matches are the ones below the unmatched_threshold,
whereas ignored matches are in between the matched and unmatched
threshold. If False, then negative matches are in between the matched
and unmatched threshold, and everything lower than unmatched is ignored.
force_match_for_each_row: If True, ensures that each row is matched to
at least one column (which is not guaranteed otherwise if the
matched_threshold is high). Defaults to False. See
argmax_matcher_test.testMatcherForceMatch() for an example.
Raises:
ValueError: if unmatched_threshold is set but matched_threshold is not set
or if unmatched_threshold > matched_threshold.
"""
if (matched_threshold is None) and (unmatched_threshold is not None):
raise ValueError(
"Need to also define matched_threshold when"
"unmatched_threshold is defined"
)
self._matched_threshold = matched_threshold
if unmatched_threshold is None:
self._unmatched_threshold = matched_threshold
else:
if unmatched_threshold > matched_threshold:
raise ValueError(
"unmatched_threshold needs to be smaller or equal"
"to matched_threshold"
)
self._unmatched_threshold = unmatched_threshold
if not negatives_lower_than_unmatched:
if self._unmatched_threshold == self._matched_threshold:
raise ValueError(
"When negatives are in between matched and "
"unmatched thresholds, these cannot be of equal "
"value. matched: %s, unmatched: %s",
self._matched_threshold,
self._unmatched_threshold,
)
self._force_match_for_each_row = force_match_for_each_row
self._negatives_lower_than_unmatched = negatives_lower_than_unmatched
def _match(self, similarity_matrix):
"""Tries to match each column of the similarity matrix to a row.
Args:
similarity_matrix: tensor of shape [N, M] representing any similarity
metric.
Returns:
Match object with corresponding matches for each of M columns.
"""
def _match_when_rows_are_empty():
"""Performs matching when the rows of similarity matrix are empty.
When the rows are empty, all detections are false positives. So we return
a tensor of -1's to indicate that the columns do not match to any rows.
Returns:
matches: int32 tensor indicating the row each column matches to.
"""
similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape(
similarity_matrix
)
return -1 * tf.ones([similarity_matrix_shape[1]], dtype=tf.int32)
def _match_when_rows_are_non_empty():
"""Performs matching when the rows of similarity matrix are non empty.
Returns:
matches: int32 tensor indicating the row each column matches to.
"""
# Matches for each column
matches = tf.argmax(similarity_matrix, 0, output_type=tf.int32)
# Deal with matched and unmatched threshold
if self._matched_threshold is not None:
# Get logical indices of ignored and unmatched columns as tf.int64
matched_vals = tf.reduce_max(similarity_matrix, 0)
below_unmatched_threshold = tf.greater(
self._unmatched_threshold, matched_vals
)
between_thresholds = tf.logical_and(
tf.greater_equal(matched_vals, self._unmatched_threshold),
tf.greater(self._matched_threshold, matched_vals),
)
if self._negatives_lower_than_unmatched:
matches = self._set_values_using_indicator(
matches, below_unmatched_threshold, -1
)
matches = self._set_values_using_indicator(
matches, between_thresholds, -2
)
else:
matches = self._set_values_using_indicator(
matches, below_unmatched_threshold, -2
)
matches = self._set_values_using_indicator(
matches, between_thresholds, -1
)
if self._force_match_for_each_row:
similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape(
similarity_matrix
)
force_match_column_ids = tf.argmax(
similarity_matrix, 1, output_type=tf.int32
)
force_match_column_indicators = tf.one_hot(
force_match_column_ids, depth=similarity_matrix_shape[1]
)
force_match_row_ids = tf.argmax(
force_match_column_indicators, 0, output_type=tf.int32
)
force_match_column_mask = tf.cast(
tf.reduce_max(force_match_column_indicators, 0), tf.bool
)
final_matches = tf.where(
force_match_column_mask, force_match_row_ids, matches
)
return final_matches
else:
return matches
if similarity_matrix.shape.is_fully_defined():
if similarity_matrix.shape[0] == 0:
return _match_when_rows_are_empty()
else:
return _match_when_rows_are_non_empty()
else:
return tf.cond(
tf.greater(tf.shape(similarity_matrix)[0], 0),
_match_when_rows_are_non_empty,
_match_when_rows_are_empty,
)
def _set_values_using_indicator(self, x, indicator, val):
"""Set the indicated fields of x to val.
Args:
x: tensor.
indicator: boolean with same shape as x.
val: scalar with value to set.
Returns:
modified tensor.
"""
indicator = tf.cast(indicator, x.dtype)
return x * (1 - indicator) + val * indicator
|
DALI-main
|
docs/examples/use_cases/tensorflow/efficientdet/pipeline/anchors_utils/argmax_matcher.py
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Preprocess images and bounding boxes for detection.
We perform two sets of operations in preprocessing stage:
(a) operations that are applied to both training and testing data,
(b) operations that are applied only to training data for the purpose of
data augmentation.
A preprocessing function receives a set of inputs,
e.g. an image and bounding boxes,
performs an operation on them, and returns them.
Some examples are: randomly cropping the image, randomly mirroring the image,
randomly changing the brightness, contrast, hue and
randomly jittering the bounding boxes.
The image is a rank 4 tensor: [1, height, width, channels] with
dtype=tf.float32. The groundtruth_boxes is a rank 2 tensor: [N, 4] where
in each row there is a box with [ymin xmin ymax xmax].
Boxes are in normalized coordinates meaning
their coordinate values range in [0, 1]
Important Note: In tensor_dict, images is a rank 4 tensor, but preprocessing
functions receive a rank 3 tensor for processing the image. Thus, inside the
preprocess function we squeeze the image to become a rank 3 tensor and then
we pass it to the functions. At the end of the preprocess we expand the image
back to rank 4.
"""
import tensorflow.compat.v1 as tf
from pipeline.anchors_utils import box_list
def _flip_boxes_left_right(boxes):
"""Left-right flip the boxes.
Args:
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
Returns:
Flipped boxes.
"""
ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1)
flipped_xmin = tf.subtract(1.0, xmax)
flipped_xmax = tf.subtract(1.0, xmin)
flipped_boxes = tf.concat([ymin, flipped_xmin, ymax, flipped_xmax], 1)
return flipped_boxes
def _flip_masks_left_right(masks):
"""Left-right flip masks.
Args:
masks: rank 3 float32 tensor with shape
[num_instances, height, width] representing instance masks.
Returns:
flipped masks: rank 3 float32 tensor with shape
[num_instances, height, width] representing instance masks.
"""
return masks[:, :, ::-1]
def keypoint_flip_horizontal(keypoints, flip_point, flip_permutation, scope=None):
"""Flips the keypoints horizontally around the flip_point.
This operation flips the x coordinate for each keypoint around the flip_point
and also permutes the keypoints in a manner specified by flip_permutation.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
flip_point: (float) scalar tensor representing the x coordinate to flip the
keypoints around.
flip_permutation: rank 1 int32 tensor containing the keypoint flip
permutation. This specifies the mapping from original keypoint indices
to the flipped keypoint indices. This is used primarily for keypoints
that are not reflection invariant. E.g. Suppose there are 3 keypoints
representing ['head', 'right_eye', 'left_eye'], then a logical choice for
flip_permutation might be [0, 2, 1] since we want to swap the 'left_eye'
and 'right_eye' after a horizontal flip.
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
with tf.name_scope(scope, "FlipHorizontal"):
keypoints = tf.transpose(keypoints, [1, 0, 2])
keypoints = tf.gather(keypoints, flip_permutation)
v, u = tf.split(value=keypoints, num_or_size_splits=2, axis=2)
u = flip_point * 2.0 - u
new_keypoints = tf.concat([v, u], 2)
new_keypoints = tf.transpose(new_keypoints, [1, 0, 2])
return new_keypoints
def random_horizontal_flip(
image,
boxes=None,
masks=None,
keypoints=None,
keypoint_flip_permutation=None,
seed=None,
):
"""Randomly flips the image and detections horizontally.
The probability of flipping the image is 50%.
Args:
image: rank 3 float32 tensor with shape [height, width, channels].
boxes: (optional) rank 2 float32 tensor with shape [N, 4]
containing the bounding boxes.
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x
normalized coordinates.
keypoint_flip_permutation: rank 1 int32 tensor containing the keypoint flip
permutation.
seed: random seed
Returns:
image: image which is the same shape as input image.
If boxes, masks, keypoints, and keypoint_flip_permutation are not None,
the function also returns the following tensors.
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
keypoints: rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]
Raises:
ValueError: if keypoints are provided but keypoint_flip_permutation is not.
"""
def _flip_image(image):
# flip image
image_flipped = tf.image.flip_left_right(image)
return image_flipped
if keypoints is not None and keypoint_flip_permutation is None:
raise ValueError(
"keypoints are provided but keypoints_flip_permutation is not provided"
)
with tf.name_scope("RandomHorizontalFlip", values=[image, boxes]):
result = []
# random variable defining whether to do flip or not
do_a_flip_random = tf.greater(tf.random_uniform([], seed=seed), 0.5)
# flip image
image = tf.cond(do_a_flip_random, lambda: _flip_image(image), lambda: image)
result.append(image)
# flip boxes
if boxes is not None:
boxes = tf.cond(
do_a_flip_random, lambda: _flip_boxes_left_right(boxes), lambda: boxes
)
result.append(boxes)
# flip masks
if masks is not None:
masks = tf.cond(
do_a_flip_random, lambda: _flip_masks_left_right(masks), lambda: masks
)
result.append(masks)
# flip keypoints
if keypoints is not None and keypoint_flip_permutation is not None:
permutation = keypoint_flip_permutation
keypoints = tf.cond(
do_a_flip_random,
lambda: keypoint_flip_horizontal(keypoints, 0.5, permutation),
lambda: keypoints,
)
result.append(keypoints)
return tuple(result)
def _compute_new_static_size(image, min_dimension, max_dimension):
"""Compute new static shape for resize_to_range method."""
image_shape = image.get_shape().as_list()
orig_height = image_shape[0]
orig_width = image_shape[1]
num_channels = image_shape[2]
orig_min_dim = min(orig_height, orig_width)
# Calculates the larger of the possible sizes
large_scale_factor = min_dimension / float(orig_min_dim)
# Scaling orig_(height|width) by large_scale_factor will make the smaller
# dimension equal to min_dimension, save for floating point rounding errors.
# For reasonably-sized images, taking the nearest integer will reliably
# eliminate this error.
large_height = int(round(orig_height * large_scale_factor))
large_width = int(round(orig_width * large_scale_factor))
large_size = [large_height, large_width]
if max_dimension:
# Calculates the smaller of the possible sizes, use that if the larger
# is too big.
orig_max_dim = max(orig_height, orig_width)
small_scale_factor = max_dimension / float(orig_max_dim)
# Scaling orig_(height|width) by small_scale_factor will make the larger
# dimension equal to max_dimension, save for floating point rounding
# errors. For reasonably-sized images, taking the nearest integer will
# reliably eliminate this error.
small_height = int(round(orig_height * small_scale_factor))
small_width = int(round(orig_width * small_scale_factor))
small_size = [small_height, small_width]
new_size = large_size
if max(large_size) > max_dimension:
new_size = small_size
else:
new_size = large_size
return tf.constant(new_size + [num_channels])
def _compute_new_dynamic_size(image, min_dimension, max_dimension):
"""Compute new dynamic shape for resize_to_range method."""
image_shape = tf.shape(image)
orig_height = tf.to_float(image_shape[0])
orig_width = tf.to_float(image_shape[1])
num_channels = image_shape[2]
orig_min_dim = tf.minimum(orig_height, orig_width)
# Calculates the larger of the possible sizes
min_dimension = tf.constant(min_dimension, dtype=tf.float32)
large_scale_factor = min_dimension / orig_min_dim
# Scaling orig_(height|width) by large_scale_factor will make the smaller
# dimension equal to min_dimension, save for floating point rounding errors.
# For reasonably-sized images, taking the nearest integer will reliably
# eliminate this error.
large_height = tf.to_int32(tf.round(orig_height * large_scale_factor))
large_width = tf.to_int32(tf.round(orig_width * large_scale_factor))
large_size = tf.stack([large_height, large_width])
if max_dimension:
# Calculates the smaller of the possible sizes, use that if the larger
# is too big.
orig_max_dim = tf.maximum(orig_height, orig_width)
max_dimension = tf.constant(max_dimension, dtype=tf.float32)
small_scale_factor = max_dimension / orig_max_dim
# Scaling orig_(height|width) by small_scale_factor will make the larger
# dimension equal to max_dimension, save for floating point rounding
# errors. For reasonably-sized images, taking the nearest integer will
# reliably eliminate this error.
small_height = tf.to_int32(tf.round(orig_height * small_scale_factor))
small_width = tf.to_int32(tf.round(orig_width * small_scale_factor))
small_size = tf.stack([small_height, small_width])
new_size = tf.cond(
tf.to_float(tf.reduce_max(large_size)) > max_dimension,
lambda: small_size,
lambda: large_size,
)
else:
new_size = large_size
return tf.stack(tf.unstack(new_size) + [num_channels])
def resize_to_range(
image,
masks=None,
min_dimension=None,
max_dimension=None,
method=tf.image.ResizeMethod.BILINEAR,
align_corners=False,
pad_to_max_dimension=False,
):
"""Resizes an image so its dimensions are within the provided value.
The output size can be described by two cases:
1. If the image can be rescaled so its minimum dimension is equal to the
provided value without the other dimension exceeding max_dimension,
then do so.
2. Otherwise, resize so the largest dimension is equal to max_dimension.
Args:
image: A 3D tensor of shape [height, width, channels]
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks.
min_dimension: (optional) (scalar) desired size of the smaller image
dimension.
max_dimension: (optional) (scalar) maximum allowed size
of the larger image dimension.
method: (optional) interpolation method used in resizing. Defaults to
BILINEAR.
align_corners: bool. If true, exactly align all 4 corners of the input
and output. Defaults to False.
pad_to_max_dimension: Whether to resize the image and pad it with zeros
so the resulting image is of the spatial size
[max_dimension, max_dimension]. If masks are included they are padded
similarly.
Returns:
Note that the position of the resized_image_shape changes based on whether
masks are present.
resized_image: A 3D tensor of shape [new_height, new_width, channels],
where the image has been resized (with bilinear interpolation) so that
min(new_height, new_width) == min_dimension or
max(new_height, new_width) == max_dimension.
resized_masks: If masks is not None, also outputs masks. A 3D tensor of
shape [num_instances, new_height, new_width].
resized_image_shape: A 1D tensor of shape [3] containing shape of the
resized image.
Raises:
ValueError: if the image is not a 3D tensor.
"""
if len(image.get_shape()) != 3:
raise ValueError("Image should be 3D tensor")
with tf.name_scope("ResizeToRange", values=[image, min_dimension]):
if image.get_shape().is_fully_defined():
new_size = _compute_new_static_size(image, min_dimension, max_dimension)
else:
new_size = _compute_new_dynamic_size(image, min_dimension, max_dimension)
new_image = tf.image.resize_images(
image, new_size[:-1], method=method, align_corners=align_corners
)
if pad_to_max_dimension:
new_image = tf.image.pad_to_bounding_box(
new_image, 0, 0, max_dimension, max_dimension
)
result = [new_image]
if masks is not None:
new_masks = tf.expand_dims(masks, 3)
new_masks = tf.image.resize_images(
new_masks,
new_size[:-1],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=align_corners,
)
new_masks = tf.squeeze(new_masks, 3)
if pad_to_max_dimension:
new_masks = tf.image.pad_to_bounding_box(
new_masks, 0, 0, max_dimension, max_dimension
)
result.append(new_masks)
result.append(new_size)
return result
def _copy_extra_fields(boxlist_to_copy_to, boxlist_to_copy_from):
"""Copies the extra fields of boxlist_to_copy_from to boxlist_to_copy_to.
Args:
boxlist_to_copy_to: BoxList to which extra fields are copied.
boxlist_to_copy_from: BoxList from which fields are copied.
Returns:
boxlist_to_copy_to with extra fields.
"""
for field in boxlist_to_copy_from.get_extra_fields():
boxlist_to_copy_to.add_field(field, boxlist_to_copy_from.get_field(field))
return boxlist_to_copy_to
def box_list_scale(boxlist, y_scale, x_scale, scope=None):
"""scale box coordinates in x and y dimensions.
Args:
boxlist: BoxList holding N boxes
y_scale: (float) scalar tensor
x_scale: (float) scalar tensor
scope: name scope.
Returns:
boxlist: BoxList holding N boxes
"""
with tf.name_scope(scope, "Scale"):
y_scale = tf.cast(y_scale, tf.float32)
x_scale = tf.cast(x_scale, tf.float32)
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1
)
y_min = y_scale * y_min
y_max = y_scale * y_max
x_min = x_scale * x_min
x_max = x_scale * x_max
scaled_boxlist = box_list.BoxList(tf.concat([y_min, x_min, y_max, x_max], 1))
return _copy_extra_fields(scaled_boxlist, boxlist)
def keypoint_scale(keypoints, y_scale, x_scale, scope=None):
"""Scales keypoint coordinates in x and y dimensions.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
y_scale: (float) scalar tensor
x_scale: (float) scalar tensor
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
with tf.name_scope(scope, "Scale"):
y_scale = tf.cast(y_scale, tf.float32)
x_scale = tf.cast(x_scale, tf.float32)
new_keypoints = keypoints * [[[y_scale, x_scale]]]
return new_keypoints
def scale_boxes_to_pixel_coordinates(image, boxes, keypoints=None):
"""Scales boxes from normalized to pixel coordinates.
Args:
image: A 3D float32 tensor of shape [height, width, channels].
boxes: A 2D float32 tensor of shape [num_boxes, 4] containing the bounding
boxes in normalized coordinates. Each row is of the form
[ymin, xmin, ymax, xmax].
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x normalized
coordinates.
Returns:
image: unchanged input image.
scaled_boxes: a 2D float32 tensor of shape [num_boxes, 4] containing the
bounding boxes in pixel coordinates.
scaled_keypoints: a 3D float32 tensor with shape
[num_instances, num_keypoints, 2] containing the keypoints in pixel
coordinates.
"""
boxlist = box_list.BoxList(boxes)
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
scaled_boxes = box_list_scale(boxlist, image_height, image_width).get()
result = [image, scaled_boxes]
if keypoints is not None:
scaled_keypoints = keypoint_scale(keypoints, image_height, image_width)
result.append(scaled_keypoints)
return tuple(result)
|
DALI-main
|
docs/examples/use_cases/tensorflow/efficientdet/pipeline/tf/preprocessor.py
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Data loader and processing."""
from absl import logging
import tensorflow as tf
import utils
from pipeline import anchors
from . import preprocessor
from . import tf_example_decoder
class InputProcessor:
"""Base class of Input processor."""
def __init__(self, image, output_size):
"""Initializes a new `InputProcessor`.
Args:
image: The input image before processing.
output_size: The output image size after calling resize_and_crop_image
function.
"""
self._image = image
if isinstance(output_size, int):
self._output_size = (output_size, output_size)
else:
self._output_size = output_size
# Parameters to control rescaling and shifting during preprocessing.
# Image scale defines scale from original image to scaled image.
self._image_scale = tf.constant(1.0)
# The integer height and width of scaled image.
self._scaled_height = tf.shape(image)[0]
self._scaled_width = tf.shape(image)[1]
# The x and y translation offset to crop scaled image to the output size.
self._crop_offset_y = tf.constant(0)
self._crop_offset_x = tf.constant(0)
def normalize_image(self):
"""Normalize the image to zero mean and unit variance."""
# The image normalization is identical to Cloud TPU ResNet.
self._image = tf.image.convert_image_dtype(self._image, dtype=tf.float32)
offset = tf.constant([0.485, 0.456, 0.406])
offset = tf.expand_dims(offset, axis=0)
offset = tf.expand_dims(offset, axis=0)
self._image -= offset
scale = tf.constant([0.229, 0.224, 0.225])
scale = tf.expand_dims(scale, axis=0)
scale = tf.expand_dims(scale, axis=0)
self._image /= scale
def set_training_random_scale_factors(self, scale_min, scale_max, target_size=None):
"""Set the parameters for multiscale training.
Notably, if train and eval use different sizes, then target_size should be
set as eval size to avoid the discrency between train and eval.
Args:
scale_min: minimal scale factor.
scale_max: maximum scale factor.
target_size: targeted size, usually same as eval. If None, use train size.
"""
if not target_size:
target_size = self._output_size
target_size = utils.parse_image_size(target_size)
logging.info(
"target_size = %s, output_size = %s", target_size, self._output_size
)
# Select a random scale factor.
random_scale_factor = tf.random.uniform([], scale_min, scale_max)
scaled_y = tf.cast(random_scale_factor * target_size[0], tf.int32)
scaled_x = tf.cast(random_scale_factor * target_size[1], tf.int32)
# Recompute the accurate scale_factor using rounded scaled image size.
height = tf.cast(tf.shape(self._image)[0], tf.float32)
width = tf.cast(tf.shape(self._image)[1], tf.float32)
image_scale_y = tf.cast(scaled_y, tf.float32) / height
image_scale_x = tf.cast(scaled_x, tf.float32) / width
image_scale = tf.minimum(image_scale_x, image_scale_y)
# Select non-zero random offset (x, y) if scaled image is larger than
# self._output_size.
scaled_height = tf.cast(height * image_scale, tf.int32)
scaled_width = tf.cast(width * image_scale, tf.int32)
offset_y = tf.cast(scaled_height - self._output_size[0], tf.float32)
offset_x = tf.cast(scaled_width - self._output_size[1], tf.float32)
offset_y = tf.maximum(0.0, offset_y) * tf.random.uniform([], 0, 1)
offset_x = tf.maximum(0.0, offset_x) * tf.random.uniform([], 0, 1)
offset_y = tf.cast(offset_y, tf.int32)
offset_x = tf.cast(offset_x, tf.int32)
self._image_scale = image_scale
self._scaled_height = scaled_height
self._scaled_width = scaled_width
self._crop_offset_x = offset_x
self._crop_offset_y = offset_y
def set_scale_factors_to_output_size(self):
"""Set the parameters to resize input image to self._output_size."""
# Compute the scale_factor using rounded scaled image size.
height = tf.cast(tf.shape(self._image)[0], tf.float32)
width = tf.cast(tf.shape(self._image)[1], tf.float32)
image_scale_y = tf.cast(self._output_size[0], tf.float32) / height
image_scale_x = tf.cast(self._output_size[1], tf.float32) / width
image_scale = tf.minimum(image_scale_x, image_scale_y)
scaled_height = tf.cast(height * image_scale, tf.int32)
scaled_width = tf.cast(width * image_scale, tf.int32)
self._image_scale = image_scale
self._scaled_height = scaled_height
self._scaled_width = scaled_width
def resize_and_crop_image(self, method=tf.image.ResizeMethod.BILINEAR):
"""Resize input image and crop it to the self._output dimension."""
scaled_image = tf.image.resize(
self._image, [self._scaled_height, self._scaled_width], method=method
)
scaled_image = scaled_image[
self._crop_offset_y : self._crop_offset_y + self._output_size[0],
self._crop_offset_x : self._crop_offset_x + self._output_size[1],
:,
]
output_image = tf.image.pad_to_bounding_box(
scaled_image, 0, 0, self._output_size[0], self._output_size[1]
)
return output_image
class DetectionInputProcessor(InputProcessor):
"""Input processor for object detection."""
def __init__(self, image, output_size, boxes=None, classes=None):
InputProcessor.__init__(self, image, output_size)
self._boxes = boxes
self._classes = classes
def random_horizontal_flip(self):
"""Randomly flip input image and bounding boxes."""
self._image, self._boxes = preprocessor.random_horizontal_flip(
self._image, boxes=self._boxes
)
def clip_boxes(self, boxes):
"""Clip boxes to fit in an image."""
ymin, xmin, ymax, xmax = tf.unstack(boxes, axis=1)
ymin = tf.clip_by_value(ymin, 0, self._output_size[0] - 1)
xmin = tf.clip_by_value(xmin, 0, self._output_size[1] - 1)
ymax = tf.clip_by_value(ymax, 0, self._output_size[0] - 1)
xmax = tf.clip_by_value(xmax, 0, self._output_size[1] - 1)
boxes = tf.stack([ymin, xmin, ymax, xmax], axis=1)
return boxes
def resize_and_crop_boxes(self):
"""Resize boxes and crop it to the self._output dimension."""
boxlist = preprocessor.box_list.BoxList(self._boxes)
# boxlist is in range of [0, 1], so here we pass the scale_height/width
# instead of just scale.
boxes = preprocessor.box_list_scale(
boxlist, self._scaled_height, self._scaled_width
).get()
# Adjust box coordinates based on the offset.
box_offset = tf.stack(
[
self._crop_offset_y,
self._crop_offset_x,
self._crop_offset_y,
self._crop_offset_x,
]
)
boxes -= tf.cast(tf.reshape(box_offset, [1, 4]), tf.float32)
# Clip the boxes.
boxes = self.clip_boxes(boxes)
# Filter out ground truth boxes that are illegal.
indices = tf.where(
tf.not_equal((boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]), 0)
)
boxes = tf.gather_nd(boxes, indices)
classes = tf.gather_nd(self._classes, indices)
return boxes, classes
@property
def image_scale(self):
# Return image scale from original image to scaled image.
return self._image_scale
@property
def image_scale_to_original(self):
# Return image scale from scaled image to original image.
return 1.0 / self._image_scale
@property
def offset_x(self):
return self._crop_offset_x
@property
def offset_y(self):
return self._crop_offset_y
def pad_to_fixed_size(data, pad_value, output_shape):
"""Pad data to a fixed length at the first dimension.
Args:
data: Tensor to be padded to output_shape.
pad_value: A constant value assigned to the paddings.
output_shape: The output shape of a 2D tensor.
Returns:
The Padded tensor with output_shape [max_instances_per_image, dimension].
"""
max_instances_per_image = output_shape[0]
dimension = output_shape[1]
data = tf.reshape(data, [-1, dimension])
num_instances = tf.shape(data)[0]
msg = "ERROR: please increase config.max_instances_per_image"
with tf.control_dependencies(
[tf.assert_less(num_instances, max_instances_per_image, message=msg)]
):
pad_length = max_instances_per_image - num_instances
paddings = pad_value * tf.ones([pad_length, dimension])
padded_data = tf.concat([data, paddings], axis=0)
padded_data = tf.reshape(padded_data, output_shape)
return padded_data
class InputReader:
"""Input reader for dataset."""
def __init__(self, params, file_pattern, is_training=False, use_fake_data=False):
self._params = params
self._file_pattern = file_pattern
self._is_training = is_training
self._use_fake_data = use_fake_data
# COCO has 100 limit, but users may set different values for custom dataset.
self._max_instances_per_image = params["max_instances_per_image"] or 100
self._debug = params["seed"] is not None
@tf.autograph.experimental.do_not_convert
def dataset_parser(self, value, example_decoder, anchor_labeler):
"""Parse data to a fixed dimension input image and learning targets.
Args:
value: a single serialized tf.Example string.
example_decoder: TF example decoder.
anchor_labeler: anchor box labeler.
params: a dict of extra parameters.
Returns:
image: Image tensor that is preprocessed to have normalized value and
fixed dimension [image_height, image_width, 3]
cls_targets_dict: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors]. The height_l and width_l
represent the dimension of class logits at l-th level.
box_targets_dict: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors * 4]. The height_l and
width_l represent the dimension of bounding box regression output at
l-th level.
num_positives: Number of positive anchors in the image.
boxes: Groundtruth bounding box annotations. The box is represented in
[y1, x1, y2, x2] format. The tensor is padded with -1 to the fixed
dimension [self._max_instances_per_image, 4].
classes: Groundtruth classes annotations. The tensor is padded with -1
to the fixed dimension [self._max_instances_per_image].
"""
params = self._params
with tf.name_scope("parser"):
data = example_decoder.decode(value)
image = data["image"]
boxes = data["groundtruth_boxes"]
classes = data["groundtruth_classes"]
classes = tf.reshape(tf.cast(classes, dtype=tf.float32), [-1, 1])
if self._is_training:
if params.get("grid_mask", None):
from . import gridmask # pylint: disable=g-import-not-at-top
image, boxes = gridmask.gridmask(image, boxes)
input_processor = DetectionInputProcessor(
image, params["image_size"], boxes, classes
)
input_processor.normalize_image()
if self._is_training:
if params["input_rand_hflip"]:
input_processor.random_horizontal_flip()
input_processor.set_training_random_scale_factors(
params["jitter_min"],
params["jitter_max"],
params.get("target_size", None),
)
else:
input_processor.set_scale_factors_to_output_size()
image = input_processor.resize_and_crop_image()
boxes, classes = input_processor.resize_and_crop_boxes()
# Assign anchors.
(cls_targets, box_targets, num_positives) = anchor_labeler.label_anchors(
boxes, classes
)
# Pad groundtruth data for evaluation.
boxes = pad_to_fixed_size(boxes, -1, [self._max_instances_per_image, 4])
classes = pad_to_fixed_size(classes, -1, [self._max_instances_per_image, 1])
return (
image,
cls_targets,
box_targets,
num_positives,
boxes,
classes,
)
@tf.autograph.experimental.do_not_convert
def process_example(
self,
batch_size,
images,
cls_targets,
box_targets,
num_positives,
boxes,
classes,
):
params = self._params
"""Processes one batch of data."""
if params["data_format"] == "channels_first":
images = tf.transpose(images, [0, 3, 1, 2])
data = [images, num_positives, boxes, tf.cast(classes, tf.int32)]
for level in range(params["min_level"], params["max_level"] + 1):
cls = cls_targets[level]
box = box_targets[level]
if params["data_format"] == "channels_first":
cls = tf.transpose(cls, [0, 3, 1, 2])
box = tf.transpose(box, [0, 3, 1, 2])
data.append(cls)
data.append(box)
return data
@property
def dataset_options(self):
options = tf.data.Options()
options.experimental_deterministic = self._debug or not self._is_training
options.experimental_optimization.map_parallelization = True
options.experimental_optimization.parallel_batch = True
return options
def get_dataset(self, batch_size=64):
params = self._params
input_anchors = anchors.Anchors(
params["min_level"],
params["max_level"],
params["num_scales"],
params["aspect_ratios"],
params["anchor_scale"],
params["image_size"],
)
anchor_labeler = anchors.AnchorLabeler(input_anchors, params["num_classes"])
example_decoder = tf_example_decoder.TfExampleDecoder()
seed = params["seed"]
dataset = tf.data.Dataset.list_files(
self._file_pattern, shuffle=self._is_training, seed=seed
)
dataset = dataset.repeat()
# Prefetch data from files.
def _prefetch_dataset(filename):
return tf.data.TFRecordDataset(filename).prefetch(1)
dataset = dataset.interleave(
_prefetch_dataset, num_parallel_calls=tf.data.experimental.AUTOTUNE
)
dataset = dataset.with_options(self.dataset_options)
if self._is_training:
dataset = dataset.shuffle(64, seed=seed)
# Parse the fetched records to input tensors for model function.
# pylint: disable=g-long-lambda
map_fn = lambda value: self.dataset_parser(
value, example_decoder, anchor_labeler
)
# pylint: enable=g-long-lambda
dataset = dataset.map(map_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.prefetch(batch_size)
dataset = dataset.batch(batch_size, drop_remainder=params["drop_remainder"])
dataset = dataset.map(lambda *args: self.process_example(batch_size, *args))
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
if self._use_fake_data:
# Turn this dataset into a semi-fake dataset which always loop at the
# first batch. This reduces variance in performance and is useful in
# testing.
dataset = dataset.take(1).cache().repeat()
return dataset
|
DALI-main
|
docs/examples/use_cases/tensorflow/efficientdet/pipeline/tf/dataloader.py
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensorflow Example proto decoder for object detection.
A decoder to decode string tensors containing serialized tensorflow.Example
protos for object detection.
"""
import tensorflow.compat.v1 as tf
class TfExampleDecoder(object):
"""Tensorflow Example proto decoder."""
def __init__(self):
self._keys_to_features = {
"image/encoded": tf.FixedLenFeature((), tf.string),
"image/height": tf.FixedLenFeature((), tf.int64, -1),
"image/width": tf.FixedLenFeature((), tf.int64, -1),
"image/object/bbox/xmin": tf.VarLenFeature(tf.float32),
"image/object/bbox/xmax": tf.VarLenFeature(tf.float32),
"image/object/bbox/ymin": tf.VarLenFeature(tf.float32),
"image/object/bbox/ymax": tf.VarLenFeature(tf.float32),
"image/object/class/label": tf.VarLenFeature(tf.int64),
"image/object/area": tf.VarLenFeature(tf.float32),
}
def _decode_image(self, parsed_tensors):
"""Decodes the image and set its static shape."""
image = tf.io.decode_image(parsed_tensors["image/encoded"], channels=3)
image.set_shape([None, None, 3])
return image
def _decode_boxes(self, parsed_tensors):
"""Concat box coordinates in the format of [ymin, xmin, ymax, xmax]."""
xmin = parsed_tensors["image/object/bbox/xmin"]
xmax = parsed_tensors["image/object/bbox/xmax"]
ymin = parsed_tensors["image/object/bbox/ymin"]
ymax = parsed_tensors["image/object/bbox/ymax"]
return tf.stack([ymin, xmin, ymax, xmax], axis=-1)
def decode(self, serialized_example):
"""Decode the serialized example.
Args:
serialized_example: a single serialized tf.Example string.
Returns:
decoded_tensors: a dictionary of tensors with the following fields:
- image: a uint8 tensor of shape [None, None, 3].
- height: an integer scalar tensor.
- width: an integer scalar tensor.
- groundtruth_classes: a int64 tensor of shape [None].
- groundtruth_boxes: a float32 tensor of shape [None, 4].
"""
parsed_tensors = tf.io.parse_single_example(
serialized_example, self._keys_to_features
)
for k in parsed_tensors:
if isinstance(parsed_tensors[k], tf.SparseTensor):
if parsed_tensors[k].dtype == tf.string:
parsed_tensors[k] = tf.sparse_tensor_to_dense(
parsed_tensors[k], default_value=""
)
else:
parsed_tensors[k] = tf.sparse_tensor_to_dense(
parsed_tensors[k], default_value=0
)
image = self._decode_image(parsed_tensors)
boxes = self._decode_boxes(parsed_tensors)
decode_image_shape = tf.logical_or(
tf.equal(parsed_tensors["image/height"], -1),
tf.equal(parsed_tensors["image/width"], -1),
)
image_shape = tf.cast(tf.shape(image), dtype=tf.int64)
parsed_tensors["image/height"] = tf.where(
decode_image_shape, image_shape[0], parsed_tensors["image/height"]
)
parsed_tensors["image/width"] = tf.where(
decode_image_shape, image_shape[1], parsed_tensors["image/width"]
)
decoded_tensors = {
"image": image,
"height": parsed_tensors["image/height"],
"width": parsed_tensors["image/width"],
"groundtruth_classes": parsed_tensors["image/object/class/label"],
"groundtruth_boxes": boxes,
}
return decoded_tensors
|
DALI-main
|
docs/examples/use_cases/tensorflow/efficientdet/pipeline/tf/tf_example_decoder.py
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Grid Masking Augmentation Reference: https://arxiv.org/abs/2001.04086."""
import math
import tensorflow as tf
from tensorflow_addons import image as image_ops
class GridMask(object):
"""GridMask class for grid masking augmentation."""
def __init__(
self,
prob=0.6,
ratio=0.6,
rotate=10,
gridmask_size_ratio=0.5,
fill=1,
interpolation="BILINEAR",
):
"""initialization.
Args:
prob: probablity of occurance.
ratio: grid mask ratio i.e if 0.5 grid and spacing will be equal.
rotate: Rotation of grid mesh.
gridmask_size_ratio: Grid mask size, grid to image size ratio.
fill: Fill value for grids.
interpolation: Interpolation method for rotation.
"""
self.prob = prob
self.ratio = ratio
self.rotate = rotate
self.gridmask_size_ratio = gridmask_size_ratio
self.fill = fill
self.interpolation = interpolation
@tf.function
def random_rotate(self, mask):
"""Randomly rotates mask on given range."""
angle = self.rotate * tf.random.normal([], -1, 1)
angle = math.pi * angle / 180
return image_ops.rotate(mask, angle, interpolation=self.interpolation)
@staticmethod
def crop(mask, h, w):
"""crops in middle of mask and image corners."""
ww = hh = tf.shape(mask)[0]
mask = mask[
(hh - h) // 2 : (hh - h) // 2 + h,
(ww - w) // 2 : (ww - w) // 2 + w,
]
return mask
@tf.function
def mask(self, h, w):
"""mask helper function for initializing grid mask of required size."""
h = tf.cast(h, tf.float32)
w = tf.cast(w, tf.float32)
mask_w = mask_h = tf.cast(
tf.cast((self.gridmask_size_ratio + 1), tf.float32) * tf.math.maximum(h, w),
tf.int32,
)
self.mask_w = mask_w
mask = tf.zeros(shape=[mask_h, mask_w], dtype=tf.int32)
gridblock = tf.random.uniform(
shape=[],
minval=int(tf.math.minimum(h * 0.5, w * 0.3)),
maxval=int(tf.math.maximum(h * 0.5, w * 0.3)) + 1,
dtype=tf.int32,
)
if self.ratio == 1:
length = tf.random.uniform(
shape=[], minval=1, maxval=gridblock + 1, dtype=tf.int32
)
else:
length = tf.cast(
tf.math.minimum(
tf.math.maximum(
int(tf.cast(gridblock, tf.float32) * self.ratio + 0.5), 1
),
gridblock - 1,
),
tf.int32,
)
for _ in range(2):
start_w = tf.random.uniform(
shape=[], minval=0, maxval=gridblock + 1, dtype=tf.int32
)
for i in range(mask_w // gridblock):
start = gridblock * i + start_w
end = tf.math.minimum(start + length, mask_w)
indices = tf.reshape(tf.range(start, end), [end - start, 1])
updates = (
tf.ones(shape=[end - start, mask_w], dtype=tf.int32) * self.fill
)
mask = tf.tensor_scatter_nd_update(mask, indices, updates)
mask = tf.transpose(mask)
return mask
def __call__(self, image, label):
"""Masks input image tensor with random grid mask."""
h = tf.shape(image)[0]
w = tf.shape(image)[1]
grid = self.mask(h, w)
grid = self.random_rotate(grid)
mask = self.crop(grid, h, w)
mask = tf.cast(mask, image.dtype)
mask = tf.reshape(mask, (h, w))
mask = tf.expand_dims(mask, -1) if image._rank() != mask._rank() else mask
occur = tf.random.normal([], 0, 1) < self.prob
image = tf.cond(occur, lambda: image * mask, lambda: image)
return image, label
def gridmask(
image, boxes, prob=0.5, ratio=0.6, rotate=10, gridmask_size_ratio=0.5, fill=1
):
"""Callable instance of GridMask and transforms input image."""
gridmask_obj = GridMask(
prob=prob,
ratio=ratio,
rotate=rotate,
gridmask_size_ratio=gridmask_size_ratio,
fill=fill,
)
image, boxes = gridmask_obj(image, boxes)
return image, boxes
|
DALI-main
|
docs/examples/use_cases/tensorflow/efficientdet/pipeline/tf/gridmask.py
|
# Copyright 2021 Kacper Kluk, Paweł Anikiel, Jagoda Kamińska. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import nvidia.dali as dali
from nvidia.dali import pipeline_def
import nvidia.dali.plugin.tf as dali_tf
import tensorflow as tf
import math
from absl import logging
from glob import glob
from pipeline import anchors
from utils import InputType
from . import ops_util
class EfficientDetPipeline:
def __init__(
self,
params,
batch_size,
args,
is_training=True,
num_shards=1,
device_id=0,
cpu_only=False,
):
self._batch_size = batch_size
self._image_size = params["image_size"]
self._gridmask = params["grid_mask"]
self._input_type = args.input_type
if self._input_type == InputType.tfrecord:
if is_training:
file_pattern = args.train_file_pattern
else:
file_pattern = args.eval_file_pattern or args.train_file_pattern
self._tfrecord_files = glob(file_pattern)
self._tfrecord_idxs = [
filename + "_idx" for filename in self._tfrecord_files
]
else:
self._images_path = args.images_path
self._annotations_path = args.annotations_path
self._is_training = is_training
self._num_shards = num_shards
self._shard_id = None if cpu_only else device_id
self._device = "cpu" if cpu_only else "gpu"
self._anchors = anchors.Anchors(
3, 7, 3, [1.0, 2.0, 0.5], 4.0, params["image_size"]
)
self._boxes = self._get_boxes()
self._max_instances_per_image = params["max_instances_per_image"] or 100
seed = params["seed"] or -1
self._pipe = self._define_pipeline(
batch_size=self._batch_size,
num_threads=self._num_shards,
device_id=device_id,
seed=seed,
)
def _get_boxes(self):
boxes_t = self._anchors.boxes[:, 0] / self._image_size[0]
boxes_l = self._anchors.boxes[:, 1] / self._image_size[1]
boxes_b = self._anchors.boxes[:, 2] / self._image_size[0]
boxes_r = self._anchors.boxes[:, 3] / self._image_size[1]
boxes = tf.transpose(tf.stack([boxes_l, boxes_t, boxes_r, boxes_b]))
return tf.reshape(boxes, boxes.shape[0] * 4).numpy().tolist()
@pipeline_def
def _define_pipeline(self):
if self._input_type == InputType.tfrecord:
images, bboxes, classes, widths, heights = ops_util.input_tfrecord(
self._tfrecord_files,
self._tfrecord_idxs,
device=self._device,
shard_id=self._shard_id,
num_shards=self._num_shards,
random_shuffle=self._is_training,
)
elif self._input_type == InputType.coco:
images, bboxes, classes, widths, heights = ops_util.input_coco(
self._images_path,
self._annotations_path,
device=self._device,
shard_id=self._shard_id,
num_shards=self._num_shards,
random_shuffle=self._is_training,
)
if self._is_training and self._gridmask:
images = ops_util.gridmask(images, widths, heights)
images, bboxes = ops_util.normalize_flip(
images, bboxes, 0.5 if self._is_training else 0.0
)
images, bboxes, classes = ops_util.random_crop_resize(
images,
bboxes,
classes,
widths,
heights,
self._image_size,
[0.1, 2.0] if self._is_training else None,
)
if self._device == "gpu":
bboxes = bboxes.gpu()
classes = classes.gpu()
enc_bboxes, enc_classes = dali.fn.box_encoder(
bboxes, classes, anchors=self._boxes, offset=True
)
num_positives = dali.fn.reductions.sum(
dali.fn.cast(enc_classes != 0, dtype=dali.types.FLOAT),
)
enc_classes -= 1
# convert to tlbr
enc_bboxes = dali.fn.coord_transform(
enc_bboxes,
M=[0, 1, 0, 0,
1, 0, 0, 0,
0, 0, 0, 1,
0, 0, 1, 0]
)
# split into layers by size
enc_bboxes_layers, enc_classes_layers = self._unpack_labels(
enc_bboxes, enc_classes
)
# interleave enc_bboxes_layers and enc_classes_layers
enc_layers = [
item
for pair in zip(enc_classes_layers, enc_bboxes_layers)
for item in pair
]
bboxes = ops_util.bbox_to_effdet_format(
bboxes, self._image_size
)
bboxes = dali.fn.pad(
bboxes,
fill_value=-1,
shape=(self._max_instances_per_image, 4),
)
classes = dali.fn.pad(
classes,
fill_value=-1,
shape=(self._max_instances_per_image,),
)
return images, num_positives, bboxes, classes, *enc_layers
def _unpack_labels(self, enc_bboxes, enc_classes):
# from keras/anchors.py
enc_bboxes_layers = []
enc_classes_layers = []
count = 0
for level in range(self._anchors.min_level, self._anchors.max_level + 1):
feat_size = self._anchors.feat_sizes[level]
steps = (
feat_size["height"]
* feat_size["width"]
* self._anchors.get_anchors_per_location()
)
enc_bboxes_layers.append(
dali.fn.reshape(
dali.fn.slice(
enc_bboxes,
(count, 0),
(steps, 4),
axes=[0, 1],
device=self._device,
),
shape=[feat_size["height"], feat_size["width"], -1],
device=self._device,
)
)
enc_classes_layers.append(
dali.fn.reshape(
dali.fn.slice(
enc_classes, count, steps, axes=[0], device=self._device
),
shape=[feat_size["height"], feat_size["width"], -1],
device=self._device,
)
)
count += steps
return enc_bboxes_layers, enc_classes_layers
def get_dataset(self):
output_shapes = [
(self._batch_size, self._image_size[0], self._image_size[1], 3),
(self._batch_size,),
(self._batch_size, None, 4),
(self._batch_size, None),
]
output_dtypes = [tf.float32, tf.float32, tf.float32, tf.int32]
for level in range(self._anchors.min_level, self._anchors.max_level + 1):
feat_size = self._anchors.feat_sizes[level]
output_shapes.append(
(
self._batch_size,
feat_size["height"],
feat_size["width"],
self._anchors.get_anchors_per_location(),
)
)
output_shapes.append(
(
self._batch_size,
feat_size["height"],
feat_size["width"],
self._anchors.get_anchors_per_location() * 4,
)
)
output_dtypes.append(tf.int32)
output_dtypes.append(tf.float32)
dataset = dali_tf.DALIDataset(
pipeline=self._pipe,
batch_size=self._batch_size,
output_shapes=tuple(output_shapes),
output_dtypes=tuple(output_dtypes),
)
return dataset
def build(self):
self._pipe.build()
def run(self):
return self._pipe.run()
|
DALI-main
|
docs/examples/use_cases/tensorflow/efficientdet/pipeline/dali/efficientdet_pipeline.py
|
# Copyright 2021 Kacper Kluk, Paweł Anikiel, Jagoda Kamińska. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import math
import nvidia.dali as dali
def input_tfrecord(
tfrecord_files, tfrecord_idxs, device, shard_id, num_shards, random_shuffle=True
):
inputs = dali.fn.readers.tfrecord(
path=tfrecord_files,
index_path=tfrecord_idxs,
features={
"image/encoded": dali.tfrecord.FixedLenFeature(
(), dali.tfrecord.string, ""
),
"image/height": dali.tfrecord.FixedLenFeature((), dali.tfrecord.int64, -1),
"image/width": dali.tfrecord.FixedLenFeature((), dali.tfrecord.int64, -1),
"image/object/bbox/xmin": dali.tfrecord.VarLenFeature(
dali.tfrecord.float32, 0.0
),
"image/object/bbox/xmax": dali.tfrecord.VarLenFeature(
dali.tfrecord.float32, 0.0
),
"image/object/bbox/ymin": dali.tfrecord.VarLenFeature(
dali.tfrecord.float32, 0.0
),
"image/object/bbox/ymax": dali.tfrecord.VarLenFeature(
dali.tfrecord.float32, 0.0
),
"image/object/class/label": dali.tfrecord.VarLenFeature(
dali.tfrecord.int64, 0
),
},
shard_id=shard_id,
num_shards=num_shards,
random_shuffle=random_shuffle,
)
images = dali.fn.decoders.image(
inputs["image/encoded"],
device="mixed" if device == "gpu" else "cpu",
output_type=dali.types.RGB,
)
xmin = inputs["image/object/bbox/xmin"]
xmax = inputs["image/object/bbox/xmax"]
ymin = inputs["image/object/bbox/ymin"]
ymax = inputs["image/object/bbox/ymax"]
bboxes = dali.fn.transpose(dali.fn.stack(xmin, ymin, xmax, ymax), perm=[1, 0])
classes = dali.fn.cast(inputs["image/object/class/label"], dtype=dali.types.INT32)
return (
images,
bboxes,
classes,
dali.fn.cast(inputs["image/width"], dtype=dali.types.FLOAT),
dali.fn.cast(inputs["image/height"], dtype=dali.types.FLOAT),
)
def input_coco(
images_path, annotations_path, device, shard_id, num_shards, random_shuffle=True
):
encoded, bboxes, classes = dali.fn.readers.coco(
file_root=images_path,
annotations_file=annotations_path,
ratio=True,
ltrb=True,
shard_id=shard_id,
num_shards=num_shards,
random_shuffle=random_shuffle,
)
images = dali.fn.decoders.image(
encoded,
device="mixed" if device == "gpu" else "cpu",
output_type=dali.types.RGB,
)
shape = dali.fn.peek_image_shape(encoded, dtype=dali.types.FLOAT)
heights = shape[0]
widths = shape[1]
return (
images,
bboxes,
classes,
widths,
heights,
)
def normalize_flip(images, bboxes, p=0.5):
flip = dali.fn.random.coin_flip(probability=p)
images = dali.fn.crop_mirror_normalize(
images,
mirror=flip,
mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
std=[0.229 * 255, 0.224 * 255, 0.225 * 255],
output_layout=dali.types.NHWC
)
bboxes = dali.fn.bb_flip(bboxes, horizontal=flip, ltrb=True)
return images, bboxes
def gridmask(images, widths, heights):
p = dali.fn.random.coin_flip()
ratio = 0.4 * p
angle = dali.fn.random.normal(mean=-1, stddev=1) * 10.0 * (math.pi / 180.0)
l = dali.math.min(0.5 * heights, 0.3 * widths)
r = dali.math.max(0.5 * heights, 0.3 * widths)
tile = dali.fn.cast(
(dali.fn.random.uniform(range=[0.0, 1.0]) * (r - l) + l),
dtype=dali.types.INT32,
)
gridmask = dali.fn.grid_mask(
images, ratio=ratio, angle=angle, tile=tile
)
return images
def random_crop_resize(
images, bboxes, classes, widths, heights, output_size, scaling=[0.1, 2.0]
):
if scaling is None:
scale_factor = 1.0
else:
scale_factor = dali.fn.random.uniform(range=scaling)
sizes = dali.fn.stack(heights, widths)
image_scale = dali.math.min(
scale_factor * output_size[0] / widths,
scale_factor * output_size[1] / heights,
)
scaled_sizes = dali.math.floor(sizes * image_scale + 0.5)
images = dali.fn.resize(
images,
size=scaled_sizes
)
anchors, shapes, bboxes, classes = dali.fn.random_bbox_crop(
bboxes,
classes,
crop_shape=output_size,
input_shape=dali.fn.cast(scaled_sizes, dtype=dali.types.INT32),
bbox_layout="xyXY",
allow_no_crop=False,
total_num_attempts=64,
)
images = dali.fn.slice(
images,
anchors,
shapes,
normalized_anchor=False,
normalized_shape=False,
out_of_bounds_policy="pad"
)
return (
images,
bboxes,
classes,
)
def bbox_to_effdet_format(bboxes, image_size):
w = image_size[0]
h = image_size[1]
M = [0.0, h, 0.0, 0.0,
w, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, h,
0.0, 0.0, w, 0.0]
return dali.fn.coord_transform(bboxes, M=M)
|
DALI-main
|
docs/examples/use_cases/tensorflow/efficientdet/pipeline/dali/ops_util.py
|
# Copyright 2021 Jagoda Kamińska. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Generate TFRecord index files necessary when using DALI preprocessing.
Example usage:
python create_tfrecord_indexes.py --tfrecord2idx_script=~/DALI/tools/tfrecord2idx \
--tfrecord_file_pattern=tfrecord/pascal*.tfrecord
"""
from absl import app
from absl import flags
from absl import logging
from glob import glob
from subprocess import call
import os.path
flags.DEFINE_string("tfrecord_file_pattern", None, "Glob for tfrecord files.")
flags.DEFINE_string(
"tfrecord2idx_script", None, "Absolute path to tfrecord2idx script."
)
FLAGS = flags.FLAGS
def main(_):
if FLAGS.tfrecord_file_pattern is None:
raise RuntimeError("Must specify --tfrecord_file_pattern.")
if FLAGS.tfrecord2idx_script is None:
raise RuntimeError("Must specify --tfrecord2idx_script")
tfrecord_files = glob(FLAGS.tfrecord_file_pattern)
tfrecord_idxs = [filename + "_idx" for filename in tfrecord_files]
if not os.path.isfile(FLAGS.tfrecord2idx_script):
raise ValueError(
f"{FLAGS.tfrecord2idx_script} does not lead to valid tfrecord2idx script."
)
for tfrecord, tfrecord_idx in zip(tfrecord_files, tfrecord_idxs):
logging.info(f"Generating index file for {tfrecord}")
call([FLAGS.tfrecord2idx_script, tfrecord, tfrecord_idx])
if __name__ == "__main__":
app.run(main)
|
DALI-main
|
docs/examples/use_cases/tensorflow/efficientdet/dataset/create_tfrecord_indexes.py
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Convert PASCAL dataset to TFRecord.
Example usage:
python create_pascal_tfrecord.py --data_dir=/tmp/VOCdevkit \
--year=VOC2012 --output_path=/tmp/pascal
"""
import hashlib
import io
import json
import os
from absl import app
from absl import flags
from absl import logging
from lxml import etree
import PIL.Image
import tensorflow as tf
from dataset import tfrecord_util
flags.DEFINE_string("data_dir", "", "Root directory to raw PASCAL VOC dataset.")
flags.DEFINE_string(
"set", "train", "Convert training set, validation set or " "merged set."
)
flags.DEFINE_string(
"annotations_dir", "Annotations", "(Relative) path to annotations directory."
)
flags.DEFINE_string("year", "VOC2007", "Desired challenge year.")
flags.DEFINE_string("output_path", "", "Path to output TFRecord and json.")
flags.DEFINE_string(
"label_map_json_path", None, "Path to label map json file with a dictionary."
)
flags.DEFINE_boolean(
"ignore_difficult_instances", False, "Whether to ignore " "difficult instances"
)
flags.DEFINE_integer("num_shards", 100, "Number of shards for output file.")
flags.DEFINE_integer("num_images", None, "Max number of imags to process.")
FLAGS = flags.FLAGS
SETS = ["train", "val", "trainval", "test"]
YEARS = ["VOC2007", "VOC2012", "merged"]
pascal_label_map_dict = {
"background": 0,
"aeroplane": 1,
"bicycle": 2,
"bird": 3,
"boat": 4,
"bottle": 5,
"bus": 6,
"car": 7,
"cat": 8,
"chair": 9,
"cow": 10,
"diningtable": 11,
"dog": 12,
"horse": 13,
"motorbike": 14,
"person": 15,
"pottedplant": 16,
"sheep": 17,
"sofa": 18,
"train": 19,
"tvmonitor": 20,
}
GLOBAL_IMG_ID = 0 # global image id.
GLOBAL_ANN_ID = 0 # global annotation id.
def get_image_id(filename):
"""Convert a string to a integer."""
# Warning: this function is highly specific to pascal filename!!
# Given filename like '2008_000002', we cannot use id 2008000002 because our
# code internally will convert the int value to float32 and back to int, which
# would cause value mismatch int(float32(2008000002)) != int(2008000002).
# COCO needs int values, here we just use a incremental global_id, but
# users should customize their own ways to generate filename.
del filename
global GLOBAL_IMG_ID
GLOBAL_IMG_ID += 1
return GLOBAL_IMG_ID
def get_ann_id():
"""Return unique annotation id across images."""
global GLOBAL_ANN_ID
GLOBAL_ANN_ID += 1
return GLOBAL_ANN_ID
def dict_to_tf_example(
data,
dataset_directory,
label_map_dict,
ignore_difficult_instances=False,
image_subdirectory="JPEGImages",
ann_json_dict=None,
):
"""Convert XML derived dict to tf.Example proto.
Notice that this function normalizes the bounding box coordinates provided
by the raw data.
Args:
data: dict holding PASCAL XML fields for a single image (obtained by running
tfrecord_util.recursive_parse_xml_to_dict)
dataset_directory: Path to root directory holding PASCAL dataset
label_map_dict: A map from string label names to integers ids.
ignore_difficult_instances: Whether to skip difficult instances in the
dataset (default: False).
image_subdirectory: String specifying subdirectory within the PASCAL dataset
directory holding the actual image data.
ann_json_dict: annotation json dictionary.
Returns:
example: The converted tf.Example.
Raises:
ValueError: if the image pointed to by data['filename'] is not a valid JPEG
"""
img_path = os.path.join(data["folder"], image_subdirectory, data["filename"])
full_path = os.path.join(dataset_directory, img_path)
with tf.io.gfile.GFile(full_path, "rb") as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = PIL.Image.open(encoded_jpg_io)
if image.format != "JPEG":
raise ValueError("Image format not JPEG")
key = hashlib.sha256(encoded_jpg).hexdigest()
width = int(data["size"]["width"])
height = int(data["size"]["height"])
image_id = get_image_id(data["filename"])
if ann_json_dict:
image = {
"file_name": data["filename"],
"height": height,
"width": width,
"id": image_id,
}
ann_json_dict["images"].append(image)
xmin = []
ymin = []
xmax = []
ymax = []
area = []
classes = []
classes_text = []
truncated = []
poses = []
difficult_obj = []
if "object" in data:
for obj in data["object"]:
difficult = bool(int(obj["difficult"]))
if ignore_difficult_instances and difficult:
continue
difficult_obj.append(int(difficult))
xmin.append(float(obj["bndbox"]["xmin"]) / width)
ymin.append(float(obj["bndbox"]["ymin"]) / height)
xmax.append(float(obj["bndbox"]["xmax"]) / width)
ymax.append(float(obj["bndbox"]["ymax"]) / height)
area.append((xmax[-1] - xmin[-1]) * (ymax[-1] - ymin[-1]))
classes_text.append(obj["name"].encode("utf8"))
classes.append(label_map_dict[obj["name"]])
truncated.append(int(obj["truncated"]))
poses.append(obj["pose"].encode("utf8"))
if ann_json_dict:
abs_xmin = int(obj["bndbox"]["xmin"])
abs_ymin = int(obj["bndbox"]["ymin"])
abs_xmax = int(obj["bndbox"]["xmax"])
abs_ymax = int(obj["bndbox"]["ymax"])
abs_width = abs_xmax - abs_xmin
abs_height = abs_ymax - abs_ymin
ann = {
"area": abs_width * abs_height,
"iscrowd": 0,
"image_id": image_id,
"bbox": [abs_xmin, abs_ymin, abs_width, abs_height],
"category_id": label_map_dict[obj["name"]],
"id": get_ann_id(),
"ignore": 0,
"segmentation": [],
}
ann_json_dict["annotations"].append(ann)
example = tf.train.Example(
features=tf.train.Features(
feature={
"image/height": tfrecord_util.int64_feature(height),
"image/width": tfrecord_util.int64_feature(width),
"image/filename": tfrecord_util.bytes_feature(
data["filename"].encode("utf8")
),
"image/source_id": tfrecord_util.bytes_feature(
str(image_id).encode("utf8")
),
"image/key/sha256": tfrecord_util.bytes_feature(key.encode("utf8")),
"image/encoded": tfrecord_util.bytes_feature(encoded_jpg),
"image/format": tfrecord_util.bytes_feature("jpeg".encode("utf8")),
"image/object/bbox/xmin": tfrecord_util.float_list_feature(xmin),
"image/object/bbox/xmax": tfrecord_util.float_list_feature(xmax),
"image/object/bbox/ymin": tfrecord_util.float_list_feature(ymin),
"image/object/bbox/ymax": tfrecord_util.float_list_feature(ymax),
"image/object/area": tfrecord_util.float_list_feature(area),
"image/object/class/text": tfrecord_util.bytes_list_feature(
classes_text
),
"image/object/class/label": tfrecord_util.int64_list_feature(classes),
"image/object/difficult": tfrecord_util.int64_list_feature(
difficult_obj
),
"image/object/truncated": tfrecord_util.int64_list_feature(truncated),
"image/object/view": tfrecord_util.bytes_list_feature(poses),
}
)
)
return example
def main(_):
if FLAGS.set not in SETS:
raise ValueError("set must be in : {}".format(SETS))
if FLAGS.year not in YEARS:
raise ValueError("year must be in : {}".format(YEARS))
if not FLAGS.output_path:
raise ValueError("output_path cannot be empty.")
data_dir = FLAGS.data_dir
years = ["VOC2007", "VOC2012"]
if FLAGS.year != "merged":
years = [FLAGS.year]
output_dir = os.path.dirname(FLAGS.output_path)
if not tf.io.gfile.exists(output_dir):
tf.io.gfile.makedirs(output_dir)
logging.info("Writing to output directory: %s", output_dir)
writers = [
tf.io.TFRecordWriter(
FLAGS.output_path + "-%05d-of-%05d.tfrecord" % (i, FLAGS.num_shards)
)
for i in range(FLAGS.num_shards)
]
if FLAGS.label_map_json_path:
with tf.io.gfile.GFile(FLAGS.label_map_json_path, "rb") as f:
label_map_dict = json.load(f)
else:
label_map_dict = pascal_label_map_dict
ann_json_dict = {
"images": [],
"type": "instances",
"annotations": [],
"categories": [],
}
for year in years:
example_class = list(label_map_dict.keys())[1]
examples_path = os.path.join(
data_dir,
year,
"ImageSets",
"Main",
example_class + "_" + FLAGS.set + ".txt",
)
examples_list = tfrecord_util.read_examples_list(examples_path)
annotations_dir = os.path.join(data_dir, year, FLAGS.annotations_dir)
for class_name, class_id in label_map_dict.items():
cls = {"supercategory": "none", "id": class_id, "name": class_name}
ann_json_dict["categories"].append(cls)
logging.info("Reading from PASCAL %s dataset.", year)
for idx, example in enumerate(examples_list):
if FLAGS.num_images and idx >= FLAGS.num_images:
break
if idx % 100 == 0:
logging.info("On image %d of %d", idx, len(examples_list))
path = os.path.join(annotations_dir, example + ".xml")
with tf.io.gfile.GFile(path, "r") as fid:
xml_str = fid.read()
xml = etree.fromstring(xml_str)
data = tfrecord_util.recursive_parse_xml_to_dict(xml)["annotation"]
tf_example = dict_to_tf_example(
data,
FLAGS.data_dir,
label_map_dict,
FLAGS.ignore_difficult_instances,
ann_json_dict=ann_json_dict,
)
writers[idx % FLAGS.num_shards].write(tf_example.SerializeToString())
for writer in writers:
writer.close()
json_file_path = os.path.join(
os.path.dirname(FLAGS.output_path),
"json_" + os.path.basename(FLAGS.output_path) + ".json",
)
with tf.io.gfile.GFile(json_file_path, "w") as f:
json.dump(ann_json_dict, f)
if __name__ == "__main__":
app.run(main)
|
DALI-main
|
docs/examples/use_cases/tensorflow/efficientdet/dataset/create_pascal_tfrecord.py
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Label map utility functions."""
from absl import logging
from six.moves import range
def _validate_label_map(label_map):
"""Checks if a label map is valid.
Args:
label_map: StringIntLabelMap to validate.
Raises:
ValueError: if label map is invalid.
"""
for item in label_map.item:
if item.id < 0:
raise ValueError("Label map ids should be >= 0.")
if (
item.id == 0
and item.name != "background"
and item.display_name != "background"
):
raise ValueError("Label map id 0 is reserved for the background label")
def create_category_index(categories):
"""Creates dictionary of COCO compatible categories keyed by category id.
Args:
categories: a list of dicts, each of which has the following keys:
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'.
Returns:
category_index: a dict containing the same entries as categories, but keyed
by the 'id' field of each category.
"""
category_index = {}
for cat in categories:
category_index[cat["id"]] = cat
return category_index
def get_max_label_map_index(label_map):
"""Get maximum index in label map.
Args:
label_map: a StringIntLabelMapProto
Returns:
an integer
"""
return max([item.id for item in label_map.item])
def convert_label_map_to_categories(label_map, max_num_classes, use_display_name=True):
"""Given label map proto returns categories list compatible with eval.
This function converts label map proto and returns a list of dicts, each of
which has the following keys:
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'.
'keypoints': (optional) a dictionary of keypoint string 'label' to integer
'id'.
We only allow class into the list if its id-label_id_offset is
between 0 (inclusive) and max_num_classes (exclusive).
If there are several items mapping to the same id in the label map,
we will only keep the first one in the categories list.
Args:
label_map: a StringIntLabelMapProto or None. If None, a default categories
list is created with max_num_classes categories.
max_num_classes: maximum number of (consecutive) label indices to include.
use_display_name: (boolean) choose whether to load 'display_name' field as
category name. If False or if the display_name field does not exist, uses
'name' field as category names instead.
Returns:
categories: a list of dictionaries representing all possible categories.
"""
categories = []
list_of_ids_already_added = []
if not label_map:
label_id_offset = 1
for class_id in range(max_num_classes):
categories.append(
{
"id": class_id + label_id_offset,
"name": "category_{}".format(class_id + label_id_offset),
}
)
return categories
for item in label_map.item:
if not 0 < item.id <= max_num_classes:
logging.info(
"Ignore item %d since it falls outside of requested " "label range.",
item.id,
)
continue
if use_display_name and item.HasField("display_name"):
name = item.display_name
else:
name = item.name
if item.id not in list_of_ids_already_added:
list_of_ids_already_added.append(item.id)
category = {"id": item.id, "name": name}
if item.keypoints:
keypoints = {}
list_of_keypoint_ids = []
for kv in item.keypoints:
if kv.id in list_of_keypoint_ids:
raise ValueError(
"Duplicate keypoint ids are not allowed. "
"Found {} more than once".format(kv.id)
)
keypoints[kv.label] = kv.id
list_of_keypoint_ids.append(kv.id)
category["keypoints"] = keypoints
categories.append(category)
return categories
def create_class_agnostic_category_index():
"""Creates a category index with a single `object` class."""
return {1: {"id": 1, "name": "object"}}
|
DALI-main
|
docs/examples/use_cases/tensorflow/efficientdet/dataset/label_map_util.py
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# This library is mostly based on tensorflow object detection API
# https://github.com/tensorflow/models/blob/master/research/object_detection/dataset_tools/create_coco_tf_record.py
r"""Convert raw COCO 2017 dataset to TFRecord.
Example usage:
python create_coco_tf_record.py --logtostderr \
--image_dir="${TRAIN_IMAGE_DIR}" \
--image_info_file="${TRAIN_IMAGE_INFO_FILE}" \
--object_annotations_file="${TRAIN_ANNOTATIONS_FILE}" \
--caption_annotations_file="${CAPTION_ANNOTATIONS_FILE}" \
--output_file_prefix="${OUTPUT_DIR/FILE_PREFIX}" \
--num_shards=100
"""
import collections
import hashlib
import io
import json
import multiprocessing
import os
from absl import app
from absl import flags
from absl import logging
import numpy as np
import PIL.Image
import tensorflow as tf
import tfrecord_util
import label_map_util
flags.DEFINE_string("image_dir", "", "Directory containing images.")
flags.DEFINE_string(
"image_info_file",
"",
"File containing image information. "
"Tf Examples in the output files correspond to the image "
"info entries in this file. If this file is not provided "
"object_annotations_file is used if present. Otherwise, "
"caption_annotations_file is used to get image info.",
)
flags.DEFINE_string(
"object_annotations_file",
"",
"File containing object " "annotations - boxes.",
)
flags.DEFINE_string(
"caption_annotations_file", "", "File containing image " "captions."
)
flags.DEFINE_string("output_file_prefix", "/tmp/train", "Path to output file")
flags.DEFINE_integer("num_shards", 32, "Number of shards for output file.")
flags.DEFINE_integer("num_threads", None, "Number of threads to run.")
FLAGS = flags.FLAGS
def create_tf_example(
image,
image_dir,
bbox_annotations=None,
category_index=None,
caption_annotations=None,
):
"""Converts image and annotations to a tf.Example proto.
Args:
image: dict with keys: [u'license', u'file_name', u'coco_url', u'height',
u'width', u'date_captured', u'flickr_url', u'id']
image_dir: directory containing the image files.
bbox_annotations:
list of dicts with keys: [u'segmentation', u'area', u'iscrowd',
u'image_id', u'bbox', u'category_id', u'id'] Notice that bounding box
coordinates in the official COCO dataset are given as [x, y, width,
height] tuples using absolute coordinates where x, y represent the
top-left (0-indexed) corner. This function converts to the format
expected by the Tensorflow Object Detection API (which is which is
[ymin, xmin, ymax, xmax] with coordinates normalized relative to image
size).
category_index: a dict containing COCO category information keyed by the
'id' field of each category. See the label_map_util.create_category_index
function.
caption_annotations:
list of dict with keys: [u'id', u'image_id', u'str'].
Returns:
example: The converted tf.Example
num_annotations_skipped: Number of (invalid) annotations that were ignored.
Raises:
ValueError: if the image pointed to by data['filename'] is not a valid JPEG
"""
image_height = image["height"]
image_width = image["width"]
filename = image["file_name"]
image_id = image["id"]
full_path = os.path.join(image_dir, filename)
with tf.io.gfile.GFile(full_path, "rb") as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = PIL.Image.open(encoded_jpg_io)
key = hashlib.sha256(encoded_jpg).hexdigest()
feature_dict = {
"image/height": tfrecord_util.int64_feature(image_height),
"image/width": tfrecord_util.int64_feature(image_width),
"image/filename": tfrecord_util.bytes_feature(filename.encode("utf8")),
"image/source_id": tfrecord_util.bytes_feature(str(image_id).encode("utf8")),
"image/key/sha256": tfrecord_util.bytes_feature(key.encode("utf8")),
"image/encoded": tfrecord_util.bytes_feature(encoded_jpg),
"image/format": tfrecord_util.bytes_feature("jpeg".encode("utf8")),
}
num_annotations_skipped = 0
xmin = []
xmax = []
ymin = []
ymax = []
is_crowd = []
category_names = []
category_ids = []
area = []
if bbox_annotations:
for object_annotations in bbox_annotations:
(x, y, width, height) = tuple(object_annotations["bbox"])
if width <= 0 or height <= 0:
num_annotations_skipped += 1
continue
if x + width > image_width or y + height > image_height:
num_annotations_skipped += 1
continue
xmin.append(float(x) / image_width)
xmax.append(float(x + width) / image_width)
ymin.append(float(y) / image_height)
ymax.append(float(y + height) / image_height)
is_crowd.append(object_annotations["iscrowd"])
category_id = int(object_annotations["category_id"])
category_ids.append(category_id)
category_names.append(category_index[category_id]["name"].encode("utf8"))
area.append(object_annotations["area"])
feature_dict.update(
{
"image/object/bbox/xmin": tfrecord_util.float_list_feature(xmin),
"image/object/bbox/xmax": tfrecord_util.float_list_feature(xmax),
"image/object/bbox/ymin": tfrecord_util.float_list_feature(ymin),
"image/object/bbox/ymax": tfrecord_util.float_list_feature(ymax),
"image/object/class/text": tfrecord_util.bytes_list_feature(category_names),
"image/object/class/label": tfrecord_util.int64_list_feature(category_ids),
"image/object/is_crowd": tfrecord_util.int64_list_feature(is_crowd),
"image/object/area": tfrecord_util.float_list_feature(area),
}
)
if caption_annotations:
captions = []
for caption_annotation in caption_annotations:
captions.append(caption_annotation["caption"].encode("utf8"))
feature_dict.update(
{"image/caption": tfrecord_util.bytes_list_feature(captions)}
)
example = tf.train.Example(features=tf.train.Features(feature=feature_dict))
return key, example, num_annotations_skipped
def _pool_create_tf_example(args):
return create_tf_example(*args)
def _load_object_annotations(object_annotations_file):
"""Loads object annotation JSON file."""
with tf.io.gfile.GFile(object_annotations_file, "r") as fid:
obj_annotations = json.load(fid)
images = obj_annotations["images"]
category_index = label_map_util.create_category_index(obj_annotations["categories"])
img_to_obj_annotation = collections.defaultdict(list)
logging.info("Building bounding box index.")
for annotation in obj_annotations["annotations"]:
image_id = annotation["image_id"]
img_to_obj_annotation[image_id].append(annotation)
missing_annotation_count = 0
for image in images:
image_id = image["id"]
if image_id not in img_to_obj_annotation:
missing_annotation_count += 1
logging.info("%d images are missing bboxes.", missing_annotation_count)
return img_to_obj_annotation, category_index
def _load_caption_annotations(caption_annotations_file):
"""Loads caption annotation JSON file."""
with tf.io.gfile.GFile(caption_annotations_file, "r") as fid:
caption_annotations = json.load(fid)
img_to_caption_annotation = collections.defaultdict(list)
logging.info("Building caption index.")
for annotation in caption_annotations["annotations"]:
image_id = annotation["image_id"]
img_to_caption_annotation[image_id].append(annotation)
missing_annotation_count = 0
images = caption_annotations["images"]
for image in images:
image_id = image["id"]
if image_id not in img_to_caption_annotation:
missing_annotation_count += 1
logging.info("%d images are missing captions.", missing_annotation_count)
return img_to_caption_annotation
def _load_images_info(image_info_file):
with tf.io.gfile.GFile(image_info_file, "r") as fid:
info_dict = json.load(fid)
return info_dict["images"]
def _create_tf_record_from_coco_annotations(
image_info_file,
image_dir,
output_path,
num_shards,
object_annotations_file=None,
caption_annotations_file=None,
):
"""Loads COCO annotation json files and converts to tf.Record format.
Args:
image_info_file: JSON file containing image info. The number of tf.Examples
in the output tf Record files is exactly equal to the number of image info
entries in this file. This can be any of train/val/test annotation json
files Eg. 'image_info_test-dev2017.json',
'instance_annotations_train2017.json',
'caption_annotations_train2017.json', etc.
image_dir: Directory containing the image files.
output_path: Path to output tf.Record file.
num_shards: Number of output files to create.
object_annotations_file: JSON file containing bounding box annotations.
caption_annotations_file: JSON file containing caption annotations.
"""
logging.info("writing to output path: %s", output_path)
writers = [
tf.io.TFRecordWriter(output_path + "-%05d-of-%05d.tfrecord" % (i, num_shards))
for i in range(num_shards)
]
images = _load_images_info(image_info_file)
img_to_obj_annotation = None
img_to_caption_annotation = None
category_index = None
if object_annotations_file:
img_to_obj_annotation, category_index = _load_object_annotations(
object_annotations_file
)
if caption_annotations_file:
img_to_caption_annotation = _load_caption_annotations(caption_annotations_file)
def _get_object_annotation(image_id):
if img_to_obj_annotation:
return img_to_obj_annotation[image_id]
else:
return None
def _get_caption_annotation(image_id):
if img_to_caption_annotation:
return img_to_caption_annotation[image_id]
else:
return None
pool = multiprocessing.Pool(FLAGS.num_threads)
total_num_annotations_skipped = 0
for idx, (_, tf_example, num_annotations_skipped) in enumerate(
pool.imap(
_pool_create_tf_example,
[
(
image,
image_dir,
_get_object_annotation(image["id"]),
category_index,
_get_caption_annotation(image["id"]),
)
for image in images
],
)
):
if idx % 100 == 0:
logging.info("On image %d of %d", idx, len(images))
total_num_annotations_skipped += num_annotations_skipped
writers[idx % num_shards].write(tf_example.SerializeToString())
pool.close()
pool.join()
for writer in writers:
writer.close()
logging.info(
"Finished writing, skipped %d annotations.", total_num_annotations_skipped
)
def main(_):
assert FLAGS.image_dir, "`image_dir` missing."
assert (FLAGS.image_info_file or
FLAGS.object_annotations_file or
FLAGS.caption_annotations_file, "All annotation files are missing.")
if FLAGS.image_info_file:
image_info_file = FLAGS.image_info_file
elif FLAGS.object_annotations_file:
image_info_file = FLAGS.object_annotations_file
else:
image_info_file = FLAGS.caption_annotations_file
directory = os.path.dirname(FLAGS.output_file_prefix)
if not tf.io.gfile.isdir(directory):
tf.io.gfile.mkdir(directory)
_create_tf_record_from_coco_annotations(
image_info_file,
FLAGS.image_dir,
FLAGS.output_file_prefix,
FLAGS.num_shards,
FLAGS.object_annotations_file,
FLAGS.caption_annotations_file,
)
if __name__ == "__main__":
app.run(main)
|
DALI-main
|
docs/examples/use_cases/tensorflow/efficientdet/dataset/create_coco_tfrecord.py
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""TFRecord related utilities."""
from six.moves import range
import tensorflow as tf
def int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def int64_list_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def bytes_list_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def float_list_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def read_examples_list(path):
"""Read list of training or validation examples.
The file is assumed to contain a single example per line where the first
token in the line is an identifier that allows us to find the image and
annotation xml for that example.
For example, the line:
xyz 3
would allow us to find files xyz.jpg and xyz.xml (the 3 would be ignored).
Args:
path: absolute path to examples list file.
Returns:
list of example identifiers (strings).
"""
with tf.io.gfile.GFile(path) as fid:
lines = fid.readlines()
return [line.strip().split(" ")[0] for line in lines]
def recursive_parse_xml_to_dict(xml):
"""Recursively parses XML contents to python dict.
We assume that `object` tags are the only ones that can appear
multiple times at the same level of a tree.
Args:
xml: xml tree obtained by parsing XML file contents using lxml.etree
Returns:
Python dictionary holding XML contents.
"""
if not xml:
return {xml.tag: xml.text}
result = {}
for child in xml:
child_result = recursive_parse_xml_to_dict(child)
if child.tag != "object":
result[child.tag] = child_result[child.tag]
else:
if child.tag not in result:
result[child.tag] = []
result[child.tag].append(child_result[child.tag])
return {xml.tag: result}
def open_sharded_output_tfrecords(exit_stack, base_path, num_shards):
"""Opens all TFRecord shards for writing and adds them to an exit stack.
Args:
exit_stack: A context2.ExitStack used to automatically closed the TFRecords
opened in this function.
base_path: The base path for all shards
num_shards: The number of shards
Returns:
The list of opened TFRecords. Position k in the list corresponds to shard k.
"""
tf_record_output_filenames = [
"{}-{:05d}-of-{:05d}".format(base_path, idx, num_shards)
for idx in range(num_shards)
]
tfrecords = [
exit_stack.enter_context(tf.io.TFRecordWriter(file_name))
for file_name in tf_record_output_filenames
]
return tfrecords
|
DALI-main
|
docs/examples/use_cases/tensorflow/efficientdet/dataset/tfrecord_util.py
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras implementation of efficientdet."""
import functools
from absl import logging
import tensorflow as tf
import hparams_config
import utils
from .backbone import efficientnet_builder
from .utils import postprocess
from .utils import layers
from .utils import losses
import re
# pylint: disable=arguments-differ # fo keras layers.
class EfficientDetNet(tf.keras.Model):
"""EfficientDet keras network with train and test step."""
def __init__(self, model_name=None, params=None, name=""):
"""Initialize model."""
super().__init__(name=name)
self.train_metrics = {
"mean_loss_tracker": tf.keras.metrics.Mean(name="mean_loss"),
"loss_tracker": tf.keras.metrics.Mean(name="loss"),
"lr_tracker": tf.keras.metrics.Mean(name="lr"),
}
self.train_metrics = utils.dict_to_namedtuple(self.train_metrics)
self.mAP_tracker = tf.keras.metrics.Mean(name="mAP")
if params:
self.config = hparams_config.Config(params)
else:
self.config = hparams_config.get_efficientdet_config(model_name)
config = self.config
# Backbone.
backbone_name = config.backbone_name
if "efficientnet" in backbone_name:
override_params = {
"relu_fn": functools.partial(
utils.activation_fn, act_type=config.act_type
),
"grad_checkpoint": self.config.grad_checkpoint,
}
if "b0" in backbone_name:
override_params["survival_prob"] = 0.0
if config.backbone_config is not None:
override_params[
"blocks_args"
] = efficientnet_builder.BlockDecoder().encode(
config.backbone_config.blocks
)
override_params["data_format"] = config.data_format
self.backbone = efficientnet_builder.get_model(
backbone_name, override_params=override_params
)
# Feature network.
self.resample_layers = [] # additional resampling layers.
for level in range(6, config.max_level + 1):
# Adds a coarser level by downsampling the last feature map.
self.resample_layers.append(
layers.ResampleFeatureMap(
feat_level=(level - config.min_level),
target_num_channels=config.fpn_num_filters,
apply_bn=config.apply_bn_for_resampling,
conv_after_downsample=config.conv_after_downsample,
data_format=config.data_format,
name="resample_p%d" % level,
)
)
self.fpn_cells = layers.FPNCells(config)
# class/box output prediction network.
num_anchors = len(config.aspect_ratios) * config.num_scales
num_filters = config.fpn_num_filters
self.class_net = layers.ClassNet(
num_classes=config.num_classes,
num_anchors=num_anchors,
num_filters=num_filters,
min_level=config.min_level,
max_level=config.max_level,
act_type=config.act_type,
repeats=config.box_class_repeats,
separable_conv=config.separable_conv,
survival_prob=config.survival_prob,
grad_checkpoint=config.grad_checkpoint,
data_format=config.data_format,
)
self.box_net = layers.BoxNet(
num_anchors=num_anchors,
num_filters=num_filters,
min_level=config.min_level,
max_level=config.max_level,
act_type=config.act_type,
repeats=config.box_class_repeats,
separable_conv=config.separable_conv,
survival_prob=config.survival_prob,
grad_checkpoint=config.grad_checkpoint,
data_format=config.data_format,
)
def _freeze_vars(self):
if self.config.var_freeze_expr:
return [
v
for v in self.trainable_variables
if not re.match(self.config.var_freeze_expr, v.name)
]
return self.trainable_variables
def _reg_l2_loss(self, weight_decay, regex=r".*(kernel|weight):0$"):
"""Return regularization l2 loss loss."""
var_match = re.compile(regex)
return weight_decay * tf.add_n(
[
tf.nn.l2_loss(v)
for v in self.trainable_variables
if var_match.match(v.name)
]
)
def _unpack_inputs(self, inputs):
config = self.config
features, num_pos, _, _, *targets = inputs
labels = {}
for level in range(config.min_level, config.max_level + 1):
i = 2 * (level - config.min_level)
labels["cls_targets_%d" % level] = targets[i]
labels["box_targets_%d" % level] = targets[i + 1]
labels["mean_num_positives"] = tf.reshape(
tf.tile(tf.expand_dims(tf.reduce_mean(num_pos), 0), [config.batch_size]),
[config.batch_size, 1],
)
return features, labels
def _unpack_outputs(self, cls_out_list, box_out_list):
config = self.config
min_level = config.min_level
max_level = config.max_level
cls_outputs, box_outputs = {}, {}
for i in range(min_level, max_level + 1):
cls_outputs[i] = cls_out_list[i - min_level]
box_outputs[i] = box_out_list[i - min_level]
return cls_outputs, box_outputs
def _calc_mAP(self, pred_boxes, pred_scores, pred_classes, gt_boxes, gt_classes):
def iou(box1, box2):
l = max(box1[0], box2[0])
t = max(box1[1], box2[1])
r = min(box1[2], box2[2])
b = min(box1[3], box2[3])
i = max(0, r - l) * max(0, b - t)
u = (
(box1[2] - box1[0]) * (box1[3] - box1[1])
+ (box2[2] - box2[0]) * (box2[3] - box2[1])
- i
)
return i / u
batch_size = pred_boxes.shape[0]
num_pred_boxes = 0
num_gt_boxes = 0
num_true_positives = 0
stats = []
for batch_idx in range(batch_size):
pred_num_positives = tf.math.count_nonzero(pred_scores[batch_idx, :] > 0.25)
gt_num = tf.math.count_nonzero(gt_classes > -1)
gt_used_idx = []
num_pred_boxes += pred_num_positives
num_gt_boxes += gt_num
# for pred_idx, (pred_box, pred_class) in enumerate(zip(pred_boxes, pred_classes)):
for pred_idx in range(pred_num_positives):
pred_box = pred_boxes[batch_idx, pred_idx]
pred_class = pred_classes[batch_idx, pred_idx]
found = False
# for gt_idx, (gt_box, gt_class) in enumerate(zip(boxes, classes)):
for gt_idx in range(gt_num):
if gt_idx in gt_used_idx:
continue
gt_box = gt_boxes[batch_idx, gt_idx]
gt_class = gt_classes[batch_idx, gt_idx]
if pred_class != gt_class:
continue
if iou(pred_box, gt_box) < 0.5:
continue
found = True
num_true_positives += 1
break
stats.append((pred_scores[batch_idx, pred_idx], found))
if num_pred_boxes == 0:
return 0.0
ap = 0.0
max_prec = num_true_positives / num_pred_boxes
for _, found in sorted(stats):
if found:
ap += max_prec / tf.cast(num_gt_boxes, dtype=tf.float64)
num_true_positives -= 1
num_pred_boxes -= 1
if num_pred_boxes == 0:
break
max_prec = tf.math.maximum(max_prec, num_true_positives / num_pred_boxes)
return ap
def call(self, inputs, training):
config = self.config
# call backbone network.
all_feats = self.backbone(inputs, training=training, features_only=True)
feats = all_feats[config.min_level : config.max_level + 1]
# Build additional input features that are not from backbone.
for resample_layer in self.resample_layers:
feats.append(resample_layer(feats[-1], training, None))
# call feature network.
fpn_feats = self.fpn_cells(feats, training)
# call class/box output network.
class_outputs = self.class_net(fpn_feats, training)
box_outputs = self.box_net(fpn_feats, training)
return (class_outputs, box_outputs)
def train_step(self, inputs):
config = self.config
features, labels = self._unpack_inputs(inputs)
with tf.GradientTape() as tape:
cls_out_list, box_out_list = self.call(features, training=True)
cls_outputs, box_outputs = self._unpack_outputs(cls_out_list, box_out_list)
# cls_loss and box_loss are for logging. only total_loss is optimized.
det_loss, cls_loss, box_loss = losses.detection_loss(
cls_outputs, box_outputs, labels, config
)
reg_l2loss = self._reg_l2_loss(config.weight_decay)
total_loss = det_loss + reg_l2loss
trainable_vars = self._freeze_vars()
gradients = tape.gradient(total_loss, trainable_vars)
if config.clip_gradients_norm:
clip_norm = abs(config.clip_gradients_norm)
gradients = [
tf.clip_by_norm(g, clip_norm) if g is not None else None
for g in gradients
]
gradients, _ = tf.clip_by_global_norm(gradients, clip_norm)
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
self.train_metrics.mean_loss_tracker.update_state(total_loss)
self.train_metrics.loss_tracker.reset_states()
self.train_metrics.loss_tracker.update_state(total_loss)
self.train_metrics.lr_tracker.reset_states()
self.train_metrics.lr_tracker.update_state(
self.optimizer.lr(self.optimizer.iterations)
)
return {m.name: m.result() for m in self.train_metrics}
def test_step(self, inputs):
features, _, gt_boxes, gt_classes, *_ = inputs
# tf.print(gt_boxes, gt_classes)
# gt_boxes = tf.stack(
# [
# gt_boxes[..., 0] * self.config.image_size[0],
# gt_boxes[..., 1] * self.config.image_size[1],
# gt_boxes[..., 2] * self.config.image_size[0],
# gt_boxes[..., 3] * self.config.image_size[1],
# ],
# axis=-1,
# )
cls_out_list, box_out_list = self.call(features, training=False)
ltrb, scores, classes, _ = postprocess.postprocess_per_class(
self.config, cls_out_list, box_out_list
)
classes = tf.cast(classes, dtype=tf.int32)
ap = tf.py_function(
func=self._calc_mAP,
inp=[ltrb, scores, classes, gt_boxes, gt_classes],
Tout=tf.float64,
)
self.mAP_tracker.update_state(ap)
return {m.name: m.result() for m in [self.mAP_tracker]}
@property
def metrics(self):
return list(self.train_metrics) + [self.mAP_tracker]
|
DALI-main
|
docs/examples/use_cases/tensorflow/efficientdet/model/efficientdet_net.py
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""BiFPN/QuFPN and other FPN configs.
BiFPN is presented in the EfficientDet paper.
QuFPN is proposed in https://github.com/google/automl/pull/580
"""
import itertools
import hparams_config
def bifpn_config(min_level, max_level, weight_method):
"""A dynamic bifpn config that can adapt to different min/max levels."""
p = hparams_config.Config()
p.weight_method = weight_method or "fastattn"
# Node id starts from the input features and monotonically increase whenever
# a new node is added. Here is an example for level P3 - P7:
# P7 (4) P7" (12)
# P6 (3) P6' (5) P6" (11)
# P5 (2) P5' (6) P5" (10)
# P4 (1) P4' (7) P4" (9)
# P3 (0) P3" (8)
# So output would be like:
# [
# {'feat_level': 6, 'inputs_offsets': [3, 4]}, # for P6'
# {'feat_level': 5, 'inputs_offsets': [2, 5]}, # for P5'
# {'feat_level': 4, 'inputs_offsets': [1, 6]}, # for P4'
# {'feat_level': 3, 'inputs_offsets': [0, 7]}, # for P3"
# {'feat_level': 4, 'inputs_offsets': [1, 7, 8]}, # for P4"
# {'feat_level': 5, 'inputs_offsets': [2, 6, 9]}, # for P5"
# {'feat_level': 6, 'inputs_offsets': [3, 5, 10]}, # for P6"
# {'feat_level': 7, 'inputs_offsets': [4, 11]}, # for P7"
# ]
num_levels = max_level - min_level + 1
node_ids = {min_level + i: [i] for i in range(num_levels)}
level_last_id = lambda level: node_ids[level][-1]
level_all_ids = lambda level: node_ids[level]
id_cnt = itertools.count(num_levels)
p.nodes = []
for i in range(max_level - 1, min_level - 1, -1):
# top-down path.
p.nodes.append(
{
"feat_level": i,
"inputs_offsets": [level_last_id(i), level_last_id(i + 1)],
}
)
node_ids[i].append(next(id_cnt))
for i in range(min_level + 1, max_level + 1):
# bottom-up path.
p.nodes.append(
{
"feat_level": i,
"inputs_offsets": level_all_ids(i) + [level_last_id(i - 1)],
}
)
node_ids[i].append(next(id_cnt))
return p
def qufpn_config(min_level, max_level, weight_method=None):
"""A dynamic quad fpn config that can adapt to different min/max levels."""
# It extends the idea of BiFPN, and has four paths:
# (up_down -> bottom_up) + (bottom_up -> up_down).
# See test for an example for level 2 and 7.
p = hparams_config.Config()
p.weight_method = weight_method or "fastattn"
p.quad_method = "fastattn"
num_levels = max_level - min_level + 1
node_ids = {min_level + i: [i] for i in range(num_levels)}
level_last_id = lambda level: node_ids[level][-1]
level_all_ids = lambda level: node_ids[level]
level_first_id = lambda level: node_ids[level][0]
id_cnt = itertools.count(num_levels)
p.nodes = []
for i in range(max_level - 1, min_level - 1, -1):
# top-down path 1.
p.nodes.append(
{
"feat_level": i,
"inputs_offsets": [level_last_id(i), level_last_id(i + 1)],
"weight_method": p.weight_method,
}
)
node_ids[i].append(next(id_cnt))
node_ids[max_level].append(node_ids[max_level][-1])
for i in range(min_level + 1, max_level):
# bottom-up path 2.
p.nodes.append(
{
"feat_level": i,
"inputs_offsets": level_all_ids(i) + [level_last_id(i - 1)],
"weight_method": p.weight_method,
}
)
node_ids[i].append(next(id_cnt))
i = max_level
p.nodes.append(
{
"feat_level": i,
"inputs_offsets": [level_first_id(i)] + [level_last_id(i - 1)],
"weight_method": p.weight_method,
}
)
node_ids[i].append(next(id_cnt))
node_ids[min_level].append(node_ids[min_level][-1])
for i in range(min_level + 1, max_level + 1, 1):
# bottom-up path 3.
p.nodes.append(
{
"feat_level": i,
"inputs_offsets": [
level_first_id(i),
level_last_id(i - 1)
if i != min_level + 1
else level_first_id(i - 1),
],
"weight_method": p.weight_method,
}
)
node_ids[i].append(next(id_cnt))
node_ids[min_level].append(node_ids[min_level][-1])
for i in range(max_level - 1, min_level, -1):
# top-down path 4.
p.nodes.append(
{
"feat_level": i,
"inputs_offsets": [node_ids[i][0]]
+ [node_ids[i][-1]]
+ [level_last_id(i + 1)],
"weight_method": p.weight_method,
}
)
node_ids[i].append(next(id_cnt))
i = min_level
p.nodes.append(
{
"feat_level": i,
"inputs_offsets": [node_ids[i][0]] + [level_last_id(i + 1)],
"weight_method": p.weight_method,
}
)
node_ids[i].append(next(id_cnt))
node_ids[max_level].append(node_ids[max_level][-1])
for i in range(max_level, min_level - 1, -1):
# quad-add path.
p.nodes.append(
{
"feat_level": i,
"inputs_offsets": [node_ids[i][2], node_ids[i][4]],
"weight_method": p.quad_method,
}
)
node_ids[i].append(next(id_cnt))
return p
def get_fpn_config(fpn_name, min_level, max_level, weight_method):
"""Get fpn related configuration."""
if not fpn_name:
fpn_name = "bifpn"
name_to_config = {
"bifpn": bifpn_config(min_level, max_level, weight_method),
"qufpn": qufpn_config(min_level, max_level, weight_method),
# legacy only: to be deprecated.
"bifpn_dyn": bifpn_config(min_level, max_level, weight_method),
}
return name_to_config[fpn_name]
|
DALI-main
|
docs/examples/use_cases/tensorflow/efficientdet/model/utils/fpn_configs.py
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Postprocessing for anchor-based detection."""
from typing import List, Tuple
from absl import logging
import tensorflow as tf
from pipeline import anchors
T = tf.Tensor # a shortcut for typing check.
CLASS_OFFSET = 1
def to_list(inputs):
if isinstance(inputs, dict):
return [inputs[k] for k in sorted(inputs.keys())]
if isinstance(inputs, list):
return inputs
raise ValueError("Unrecognized inputs : {}".format(inputs))
def batch_map_fn(map_fn, inputs, *args):
"""Apply map_fn at batch dimension."""
if isinstance(inputs[0], (list, tuple)):
batch_size = len(inputs[0])
else:
batch_size = inputs[0].shape.as_list()[0]
if not batch_size:
# handle dynamic batch size: tf.vectorized_map is faster than tf.map_fn.
return tf.vectorized_map(map_fn, inputs, *args)
outputs = []
for i in range(batch_size):
outputs.append(map_fn([x[i] for x in inputs]))
return [tf.stack(y) for y in zip(*outputs)]
def merge_class_box_level_outputs(
params, cls_outputs: List[T], box_outputs: List[T]
) -> Tuple[T, T]:
"""Concatenates class and box of all levels into one tensor."""
cls_outputs_all, box_outputs_all = [], []
batch_size = tf.shape(cls_outputs[0])[0]
for level in range(0, params["max_level"] - params["min_level"] + 1):
if params["data_format"] == "channels_first":
cls_outputs[level] = tf.transpose(cls_outputs[level], [0, 2, 3, 1])
box_outputs[level] = tf.transpose(box_outputs[level], [0, 2, 3, 1])
cls_outputs_all.append(
tf.reshape(cls_outputs[level], [batch_size, -1, params["num_classes"]])
)
box_outputs_all.append(tf.reshape(box_outputs[level], [batch_size, -1, 4]))
return tf.concat(cls_outputs_all, 1), tf.concat(box_outputs_all, 1)
def topk_class_boxes(params, cls_outputs: T, box_outputs: T) -> Tuple[T, T, T, T]:
"""Pick the topk class and box outputs."""
batch_size = tf.shape(cls_outputs)[0]
num_classes = params["num_classes"]
max_nms_inputs = params["nms_configs"].get("max_nms_inputs", 0)
if max_nms_inputs > 0:
# Prune anchors and detections to only keep max_nms_inputs.
# Due to some issues, top_k is currently slow in graph model.
logging.info("use max_nms_inputs for pre-nms topk.")
cls_outputs_reshape = tf.reshape(cls_outputs, [batch_size, -1])
_, cls_topk_indices = tf.math.top_k(
cls_outputs_reshape, k=max_nms_inputs, sorted=False
)
indices = cls_topk_indices // num_classes
classes = cls_topk_indices % num_classes
cls_indices = tf.stack([indices, classes], axis=2)
cls_outputs_topk = tf.gather_nd(cls_outputs, cls_indices, batch_dims=1)
box_outputs_topk = tf.gather_nd(
box_outputs, tf.expand_dims(indices, 2), batch_dims=1
)
else:
logging.info("use max_reduce for pre-nms topk.")
# Keep all anchors, but for each anchor, just keep the max probablity for
# each class.
cls_outputs_idx = tf.math.argmax(cls_outputs, axis=-1, output_type=tf.int32)
num_anchors = tf.shape(cls_outputs)[1]
classes = cls_outputs_idx
indices = tf.tile(
tf.expand_dims(tf.range(num_anchors), axis=0), [batch_size, 1]
)
cls_outputs_topk = tf.reduce_max(cls_outputs, -1)
box_outputs_topk = box_outputs
return cls_outputs_topk, box_outputs_topk, classes, indices
def pre_nms(params, cls_outputs, box_outputs, topk=True):
"""Detection post processing before nms.
It takes the multi-level class and box predictions from network, merge them
into unified tensors, and compute boxes, scores, and classes.
Args:
params: a dict of parameters.
cls_outputs: a list of tensors for classes, each tensor denotes a level of
logits with shape [N, H, W, num_class * num_anchors].
box_outputs: a list of tensors for boxes, each tensor ddenotes a level of
boxes with shape [N, H, W, 4 * num_anchors].
topk: if True, select topk before nms (mainly to speed up nms).
Returns:
A tuple of (boxes, scores, classes).
"""
# get boxes by apply bounding box regression to anchors.
eval_anchors = anchors.Anchors(
params["min_level"],
params["max_level"],
params["num_scales"],
params["aspect_ratios"],
params["anchor_scale"],
params["image_size"],
)
cls_outputs, box_outputs = merge_class_box_level_outputs(
params, cls_outputs, box_outputs
)
if topk:
# select topK purely based on scores before NMS, in order to speed up nms.
cls_outputs, box_outputs, classes, indices = topk_class_boxes(
params, cls_outputs, box_outputs
)
anchor_boxes = tf.gather(eval_anchors.boxes, indices)
else:
anchor_boxes = eval_anchors.boxes
classes = None
boxes = anchors.decode_box_outputs(box_outputs, anchor_boxes)
# convert logits to scores.
scores = tf.math.sigmoid(cls_outputs)
return boxes, scores, classes
def nms(params, boxes: T, scores: T, classes: T, padded: bool) -> Tuple[T, T, T, T]:
"""Non-maximum suppression.
Args:
params: a dict of parameters.
boxes: a tensor with shape [N, 4], where N is the number of boxes. Box
format is [y_min, x_min, y_max, x_max].
scores: a tensor with shape [N].
classes: a tensor with shape [N].
padded: a bool vallue indicating whether the results are padded.
Returns:
A tuple (boxes, scores, classes, valid_lens), where valid_lens is a scalar
denoting the valid length of boxes/scores/classes outputs.
"""
nms_configs = params["nms_configs"]
method = nms_configs["method"]
max_output_size = nms_configs["max_output_size"]
if method == "hard" or not method:
# hard nms.
sigma = 0.0
iou_thresh = nms_configs["iou_thresh"] or 0.5
score_thresh = nms_configs["score_thresh"] or float("-inf")
elif method == "gaussian":
sigma = nms_configs["sigma"] or 0.5
iou_thresh = 1.0
score_thresh = nms_configs["score_thresh"] or 0.001
else:
raise ValueError("Inference has invalid nms method {}".format(method))
# TF API's sigma is twice as the paper's value, so here we divide it by 2:
# https://github.com/tensorflow/tensorflow/issues/40253.
nms_top_idx, nms_scores, nms_valid_lens = tf.raw_ops.NonMaxSuppressionV5(
boxes=boxes,
scores=scores,
max_output_size=max_output_size,
iou_threshold=iou_thresh,
score_threshold=score_thresh,
soft_nms_sigma=(sigma / 2),
pad_to_max_output_size=padded,
)
nms_boxes = tf.gather(boxes, nms_top_idx)
nms_classes = tf.cast(tf.gather(classes, nms_top_idx) + CLASS_OFFSET, tf.float32)
return nms_boxes, nms_scores, nms_classes, nms_valid_lens
def per_class_nms(params, boxes, scores, classes, image_scales=None):
"""Per-class nms, a utility for postprocess_per_class.
Args:
params: a dict of parameters.
boxes: A tensor with shape [N, K, 4], where N is batch_size, K is num_boxes.
Box format is [y_min, x_min, y_max, x_max].
scores: A tensor with shape [N, K].
classes: A tensor with shape [N, K].
image_scales: scaling factor or the final image and bounding boxes.
Returns:
A tuple of batch level (boxes, scores, classess, valid_len) after nms.
"""
def single_batch_fn(element):
"""A mapping function for a single batch."""
boxes_i, scores_i, classes_i = element[0], element[1], element[2]
nms_boxes_cls, nms_scores_cls, nms_classes_cls = [], [], []
nms_valid_len_cls = []
for cid in range(params["num_classes"]):
indices = tf.where(tf.equal(classes_i, cid))
if indices.shape[0] == 0:
continue
classes_cls = tf.gather_nd(classes_i, indices)
boxes_cls = tf.gather_nd(boxes_i, indices)
scores_cls = tf.gather_nd(scores_i, indices)
nms_boxes, nms_scores, nms_classes, nms_valid_len = nms(
params, boxes_cls, scores_cls, classes_cls, False
)
nms_boxes_cls.append(nms_boxes)
nms_scores_cls.append(nms_scores)
nms_classes_cls.append(nms_classes)
nms_valid_len_cls.append(nms_valid_len)
# Pad zeros and select topk.
max_output_size = params["nms_configs"].get("max_output_size", 100)
nms_boxes_cls = tf.pad(
tf.concat(nms_boxes_cls, 0), [[0, max_output_size], [0, 0]]
)
nms_scores_cls = tf.pad(tf.concat(nms_scores_cls, 0), [[0, max_output_size]])
nms_classes_cls = tf.pad(tf.concat(nms_classes_cls, 0), [[0, max_output_size]])
nms_valid_len_cls = tf.stack(nms_valid_len_cls)
_, indices = tf.math.top_k(nms_scores_cls, k=max_output_size, sorted=True)
return tuple(
(
tf.gather(nms_boxes_cls, indices),
tf.gather(nms_scores_cls, indices),
tf.gather(nms_classes_cls, indices),
tf.minimum(max_output_size, tf.reduce_sum(nms_valid_len_cls)),
)
)
# end of single_batch_fn
nms_boxes, nms_scores, nms_classes, nms_valid_len = batch_map_fn(
single_batch_fn, [boxes, scores, classes]
)
if image_scales is not None:
scales = tf.expand_dims(tf.expand_dims(image_scales, -1), -1)
nms_boxes = nms_boxes * tf.cast(scales, nms_boxes.dtype)
return nms_boxes, nms_scores, nms_classes, nms_valid_len
def postprocess_per_class(params, cls_outputs, box_outputs, image_scales=None):
"""Post processing with per class NMS.
An accurate but relatively slow version of NMS. The idea is to perform NMS for
each class, and then combine them.
Args:
params: a dict of parameters.
cls_outputs: a list of tensors for classes, each tensor denotes a level of
logits with shape [N, H, W, num_class * num_anchors].
box_outputs: a list of tensors for boxes, each tensor ddenotes a level of
boxes with shape [N, H, W, 4 * num_anchors]. Each box format is [y_min,
x_min, y_max, x_man].
image_scales: scaling factor or the final image and bounding boxes.
Returns:
A tuple of batch level (boxes, scores, classess, valid_len) after nms.
"""
cls_outputs = to_list(cls_outputs)
box_outputs = to_list(box_outputs)
boxes, scores, classes = pre_nms(params, cls_outputs, box_outputs)
return per_class_nms(params, boxes, scores, classes, image_scales)
|
DALI-main
|
docs/examples/use_cases/tensorflow/efficientdet/model/utils/postprocess.py
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras efficientdet optimizers."""
from absl import logging
import numpy as np
import tensorflow as tf
_DEFAULT_BATCH_SIZE = 64
def get_optimizer(params, *args):
"""Get optimizer."""
learning_rate = learning_rate_schedule(params, *args)
if params["optimizer"].lower() == "sgd":
logging.info("Use SGD optimizer")
optimizer = tf.keras.optimizers.legacy.SGD(learning_rate, momentum=params["momentum"])
elif params["optimizer"].lower() == "adam":
logging.info("Use Adam optimizer")
optimizer = tf.keras.optimizers.legacy.Adam(learning_rate)
else:
raise ValueError("optimizers should be adam or sgd")
return optimizer
def update_learning_rate_schedule_parameters(
params, epochs, global_batch_size, steps_per_epoch
):
"""Updates params that are related to the learning rate schedule."""
# Learning rate is proportional to the batch size
params["adjusted_learning_rate"] = (
params["learning_rate"] * global_batch_size / _DEFAULT_BATCH_SIZE
)
if "lr_warmup_init" in params:
params["adjusted_lr_warmup_init"] = (
params["lr_warmup_init"] * global_batch_size / _DEFAULT_BATCH_SIZE
)
params["lr_warmup_step"] = int(params["lr_warmup_epoch"] * steps_per_epoch)
params["first_lr_drop_step"] = int(params["first_lr_drop_epoch"] * steps_per_epoch)
params["second_lr_drop_step"] = int(
params["second_lr_drop_epoch"] * steps_per_epoch
)
params["total_steps"] = epochs * steps_per_epoch
def learning_rate_schedule(params, *args):
"""Learning rate schedule based on global step."""
update_learning_rate_schedule_parameters(params, *args)
lr_decay_method = params["lr_decay_method"]
if lr_decay_method == "stepwise":
return StepwiseLrSchedule(
params["adjusted_learning_rate"],
params["adjusted_lr_warmup_init"],
params["lr_warmup_step"],
params["first_lr_drop_step"],
params["second_lr_drop_step"],
)
if lr_decay_method == "cosine":
return CosineLrSchedule(
params["adjusted_learning_rate"],
params["adjusted_lr_warmup_init"],
params["lr_warmup_step"],
params["total_steps"],
)
if lr_decay_method == "polynomial":
return PolynomialLrSchedule(
params["adjusted_learning_rate"],
params["adjusted_lr_warmup_init"],
params["lr_warmup_step"],
params["poly_lr_power"],
params["total_steps"],
)
if lr_decay_method == "constant":
return params["adjusted_learning_rate"]
raise ValueError("unknown lr_decay_method: {}".format(lr_decay_method))
class StepwiseLrSchedule(tf.optimizers.schedules.LearningRateSchedule):
"""Stepwise learning rate schedule."""
def __init__(
self,
adjusted_lr: float,
lr_warmup_init: float,
lr_warmup_step: int,
first_lr_drop_step: int,
second_lr_drop_step: int,
):
"""Build a StepwiseLrSchedule.
Args:
adjusted_lr: `float`, The initial learning rate.
lr_warmup_init: `float`, The warm up learning rate.
lr_warmup_step: `int`, The warm up step.
first_lr_drop_step: `int`, First lr decay step.
second_lr_drop_step: `int`, Second lr decay step.
"""
super().__init__()
logging.info("LR schedule method: stepwise")
self.adjusted_lr = adjusted_lr
self.lr_warmup_init = lr_warmup_init
self.lr_warmup_step = lr_warmup_step
self.first_lr_drop_step = first_lr_drop_step
self.second_lr_drop_step = second_lr_drop_step
def __call__(self, step):
linear_warmup = self.lr_warmup_init + (
tf.cast(step, dtype=tf.float32)
/ self.lr_warmup_step
* (self.adjusted_lr - self.lr_warmup_init)
)
learning_rate = tf.where(
step < self.lr_warmup_step, linear_warmup, self.adjusted_lr
)
lr_schedule = [
[1.0, self.lr_warmup_step],
[0.1, self.first_lr_drop_step],
[0.01, self.second_lr_drop_step],
]
for mult, start_global_step in lr_schedule:
learning_rate = tf.where(
step < start_global_step, learning_rate, self.adjusted_lr * mult
)
return learning_rate
class CosineLrSchedule(tf.optimizers.schedules.LearningRateSchedule):
"""Cosine learning rate schedule."""
def __init__(
self,
adjusted_lr: float,
lr_warmup_init: float,
lr_warmup_step: int,
total_steps: int,
):
"""Build a CosineLrSchedule.
Args:
adjusted_lr: `float`, The initial learning rate.
lr_warmup_init: `float`, The warm up learning rate.
lr_warmup_step: `int`, The warm up step.
total_steps: `int`, Total train steps.
"""
super().__init__()
logging.info("LR schedule method: cosine")
self.adjusted_lr = adjusted_lr
self.lr_warmup_init = lr_warmup_init
self.lr_warmup_step = lr_warmup_step
self.decay_steps = tf.cast(total_steps - lr_warmup_step, tf.float32)
def __call__(self, step):
linear_warmup = self.lr_warmup_init + (
tf.cast(step, dtype=tf.float32)
/ self.lr_warmup_step
* (self.adjusted_lr - self.lr_warmup_init)
)
cosine_lr = (
0.5
* self.adjusted_lr
* (1 + tf.cos(np.pi * tf.cast(step, tf.float32) / self.decay_steps))
)
return tf.where(step < self.lr_warmup_step, linear_warmup, cosine_lr)
class PolynomialLrSchedule(tf.optimizers.schedules.LearningRateSchedule):
"""Polynomial learning rate schedule."""
def __init__(
self,
adjusted_lr: float,
lr_warmup_init: float,
lr_warmup_step: int,
power: float,
total_steps: int,
):
"""Build a PolynomialLrSchedule.
Args:
adjusted_lr: `float`, The initial learning rate.
lr_warmup_init: `float`, The warm up learning rate.
lr_warmup_step: `int`, The warm up step.
power: `float`, power.
total_steps: `int`, Total train steps.
"""
super().__init__()
logging.info("LR schedule method: polynomial")
self.adjusted_lr = adjusted_lr
self.lr_warmup_init = lr_warmup_init
self.lr_warmup_step = lr_warmup_step
self.power = power
self.total_steps = total_steps
def __call__(self, step):
linear_warmup = self.lr_warmup_init + (
tf.cast(step, dtype=tf.float32)
/ self.lr_warmup_step
* (self.adjusted_lr - self.lr_warmup_init)
)
polynomial_lr = self.adjusted_lr * tf.pow(
1 - (tf.cast(step, dtype=tf.float32) / self.total_steps), self.power
)
return tf.where(step < self.lr_warmup_step, linear_warmup, polynomial_lr)
|
DALI-main
|
docs/examples/use_cases/tensorflow/efficientdet/model/utils/optimizers.py
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Loss functions for efficientdet."""
import tensorflow as tf
import tensorflow.compat.v1 as tf1
import utils
# pylint: disable=arguments-differ # fo keras layers.
def focal_loss(y_pred, y_true, alpha, gamma, normalizer, label_smoothing=0.0):
"""Compute the focal loss between `logits` and the golden `target` values.
Focal loss = -(1-pt)^gamma * log(pt)
where pt is the probability of being classified to the true class.
Args:
y_pred: A float tensor of size [batch, height_in, width_in,
num_predictions].
y_true: A float tensor of size [batch, height_in, width_in,
num_predictions].
alpha: A float scalar multiplying alpha to the loss from positive examples
and (1-alpha) to the loss from negative examples.
gamma: A float scalar modulating loss from hard and easy examples.
normalizer: Divide loss by this value.
label_smoothing: Float in [0, 1]. If > `0` then smooth the labels.
Returns:
loss: A float32 scalar representing normalized total loss.
"""
with tf1.name_scope("focal_loss"):
normalizer = tf.cast(normalizer, dtype=y_pred.dtype)
# compute focal loss multipliers before label smoothing, such that it will
# not blow up the loss.
pred_prob = tf.math.sigmoid(y_pred)
p_t = (y_true * pred_prob) + ((1 - y_true) * (1 - pred_prob))
alpha_factor = y_true * alpha + (1 - y_true) * (1 - alpha)
modulating_factor = (1.0 - p_t) ** gamma
# apply label smoothing for cross_entropy for each entry.
if label_smoothing:
y_true = y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing
ce = tf.nn.sigmoid_cross_entropy_with_logits(labels=y_true, logits=y_pred)
# compute the final loss and return
return (1 / normalizer) * alpha_factor * modulating_factor * ce
def _box_loss(box_outputs, box_targets, num_positives, delta=0.1):
"""Computes box regression loss."""
# delta is typically around the mean value of regression target.
# for instances, the regression targets of 512x512 input with 6 anchors on
# P3-P7 pyramid is about [0.1, 0.1, 0.2, 0.2].
normalizer = num_positives * 4.0
mask = tf.not_equal(box_targets, 0.0)
box_loss = tf1.losses.huber_loss(
box_targets,
box_outputs,
weights=mask,
delta=delta,
reduction=tf1.losses.Reduction.SUM,
)
box_loss /= normalizer
return box_loss
def detection_loss(cls_outputs, box_outputs, labels, config):
"""Computes total detection loss.
Computes total detection loss including box and class loss from all levels.
Args:
cls_outputs: an OrderDict with keys representing levels and values
representing logits in [batch_size, height, width, num_anchors].
box_outputs: an OrderDict with keys representing levels and values
representing box regression targets in [batch_size, height, width,
num_anchors * 4].
labels: the dictionary that returned from dataloader that includes
groundtruth targets.
config: the dictionary including training parameters specified in
default_haprams function in this file.
Returns:
total_loss: an integer tensor representing total loss reducing from
class and box losses from all levels.
cls_loss: an integer tensor representing total class loss.
box_loss: an integer tensor representing total box regression loss.
"""
# Sum all positives in a batch for normalization and avoid zero
# num_positives_sum, which would lead to inf loss during training
num_positives_sum = tf.math.reduce_sum(labels["mean_num_positives"]) + 1.0
positives_momentum = config.get("positives_momentum", None) or 0
if positives_momentum > 0:
# normalize the num_positive_examples for training stability.
moving_normalizer_var = tf.Variable(
0.0,
name="moving_normalizer",
dtype=tf.float32,
synchronization=tf.VariableSynchronization.ON_READ,
trainable=False,
aggregation=tf.VariableAggregation.MEAN,
)
num_positives_sum = tf.keras.backend.moving_average_update(
moving_normalizer_var, num_positives_sum, momentum=config.positives_momentum
)
elif positives_momentum < 0:
num_positives_sum = utils.cross_replica_mean(num_positives_sum)
levels = cls_outputs.keys()
cls_losses = []
box_losses = []
for level in levels:
# Onehot encoding for classification labels.
cls_targets_at_level = tf.one_hot(
labels["cls_targets_%d" % level],
config.num_classes,
dtype=cls_outputs[level].dtype,
)
if config.data_format == "channels_first":
bs, _, width, height, _ = cls_targets_at_level.get_shape().as_list()
cls_targets_at_level = tf.reshape(
cls_targets_at_level, [bs, -1, width, height]
)
else:
bs, width, height, _, _ = cls_targets_at_level.get_shape().as_list()
cls_targets_at_level = tf.reshape(
cls_targets_at_level, [bs, width, height, -1]
)
box_targets_at_level = labels["box_targets_%d" % level]
cls_loss = focal_loss(
cls_outputs[level],
cls_targets_at_level,
config.alpha,
config.gamma,
normalizer=num_positives_sum,
label_smoothing=config.label_smoothing,
)
if config.data_format == "channels_first":
cls_loss = tf.reshape(cls_loss, [bs, -1, width, height, config.num_classes])
else:
cls_loss = tf.reshape(cls_loss, [bs, width, height, -1, config.num_classes])
cls_loss *= tf.cast(
tf.expand_dims(tf.not_equal(labels["cls_targets_%d" % level], -2), -1),
cls_loss.dtype,
)
cls_loss_sum = tf.reduce_sum(cls_loss)
cls_losses.append(tf.cast(cls_loss_sum, tf.float32))
if config.box_loss_weight:
box_losses.append(
_box_loss(
box_outputs[level],
box_targets_at_level,
num_positives_sum,
delta=config.delta,
)
)
# Sum per level losses to total loss.
cls_loss = tf.math.add_n(cls_losses)
box_loss = tf.math.add_n(box_losses) if box_losses else tf.constant(0.0)
total_loss = cls_loss + config.box_loss_weight * box_loss
return total_loss, cls_loss, box_loss
|
DALI-main
|
docs/examples/use_cases/tensorflow/efficientdet/model/utils/losses.py
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras layers of efficientdet."""
import functools
from absl import logging
import numpy as np
import tensorflow as tf
import utils
from . import fpn_configs
class FNode(tf.keras.layers.Layer):
"""A Keras Layer implementing BiFPN Node."""
def __init__(
self,
feat_level,
inputs_offsets,
fpn_num_filters,
apply_bn_for_resampling,
conv_after_downsample,
conv_bn_act_pattern,
separable_conv,
act_type,
weight_method,
data_format,
name="fnode",
):
super().__init__(name=name)
self.feat_level = feat_level
self.inputs_offsets = inputs_offsets
self.fpn_num_filters = fpn_num_filters
self.apply_bn_for_resampling = apply_bn_for_resampling
self.separable_conv = separable_conv
self.act_type = act_type
self.conv_after_downsample = conv_after_downsample
self.data_format = data_format
self.weight_method = weight_method
self.conv_bn_act_pattern = conv_bn_act_pattern
self.resample_layers = []
self.vars = []
def fuse_features(self, nodes):
"""Fuse features from different resolutions and return a weighted sum.
Args:
nodes: a list of tensorflow features at different levels
Returns:
A tensor denoting the fused feature.
"""
dtype = nodes[0].dtype
if self.weight_method == "attn":
edge_weights = []
for var in self.vars:
var = tf.cast(var, dtype=dtype)
edge_weights.append(var)
normalized_weights = tf.nn.softmax(tf.stack(edge_weights))
nodes = tf.stack(nodes, axis=-1)
new_node = tf.reduce_sum(nodes * normalized_weights, -1)
elif self.weight_method == "fastattn":
edge_weights = []
for var in self.vars:
var = tf.cast(var, dtype=dtype)
edge_weights.append(var)
weights_sum = tf.add_n(edge_weights)
nodes = [
nodes[i] * edge_weights[i] / (weights_sum + 0.0001)
for i in range(len(nodes))
]
new_node = tf.add_n(nodes)
elif self.weight_method == "channel_attn":
edge_weights = []
for var in self.vars:
var = tf.cast(var, dtype=dtype)
edge_weights.append(var)
normalized_weights = tf.nn.softmax(tf.stack(edge_weights, -1), axis=-1)
nodes = tf.stack(nodes, axis=-1)
new_node = tf.reduce_sum(nodes * normalized_weights, -1)
elif self.weight_method == "channel_fastattn":
edge_weights = []
for var in self.vars:
var = tf.cast(var, dtype=dtype)
edge_weights.append(var)
weights_sum = tf.add_n(edge_weights)
nodes = [
nodes[i] * edge_weights[i] / (weights_sum + 0.0001)
for i in range(len(nodes))
]
new_node = tf.add_n(nodes)
elif self.weight_method == "sum":
new_node = tf.reduce_sum(nodes, axis=0)
else:
raise ValueError("unknown weight_method %s" % self.weight_method)
return new_node
def _add_wsm(self, initializer):
for i, _ in enumerate(self.inputs_offsets):
name = "WSM" + ("" if i == 0 else "_" + str(i))
self.vars.append(self.add_weight(initializer=initializer, name=name))
def build(self, feats_shape):
for i, input_offset in enumerate(self.inputs_offsets):
name = "resample_{}_{}_{}".format(i, input_offset, len(feats_shape))
self.resample_layers.append(
ResampleFeatureMap(
self.feat_level,
self.fpn_num_filters,
self.apply_bn_for_resampling,
self.conv_after_downsample,
data_format=self.data_format,
name=name,
)
)
if self.weight_method == "attn":
self._add_wsm("ones")
elif self.weight_method == "fastattn":
self._add_wsm("ones")
elif self.weight_method == "channel_attn":
num_filters = int(self.fpn_num_filters)
self._add_wsm(lambda: tf.ones([num_filters]))
elif self.weight_method == "channel_fastattn":
num_filters = int(self.fpn_num_filters)
self._add_wsm(lambda: tf.ones([num_filters]))
self.op_after_combine = OpAfterCombine(
self.conv_bn_act_pattern,
self.separable_conv,
self.fpn_num_filters,
self.act_type,
self.data_format,
name="op_after_combine{}".format(len(feats_shape)),
)
self.built = True
super().build(feats_shape)
def call(self, feats, training):
nodes = []
for i, input_offset in enumerate(self.inputs_offsets):
input_node = feats[input_offset]
input_node = self.resample_layers[i](input_node, training, feats)
nodes.append(input_node)
new_node = self.fuse_features(nodes)
new_node = self.op_after_combine(new_node)
return feats + [new_node]
class OpAfterCombine(tf.keras.layers.Layer):
"""Operation after combining input features during feature fusiong."""
def __init__(
self,
conv_bn_act_pattern,
separable_conv,
fpn_num_filters,
act_type,
data_format,
name="op_after_combine",
):
super().__init__(name=name)
self.conv_bn_act_pattern = conv_bn_act_pattern
self.separable_conv = separable_conv
self.fpn_num_filters = fpn_num_filters
self.act_type = act_type
self.data_format = data_format
if self.separable_conv:
conv2d_layer = functools.partial(
tf.keras.layers.SeparableConv2D, depth_multiplier=1
)
else:
conv2d_layer = tf.keras.layers.Conv2D
self.conv_op = conv2d_layer(
filters=fpn_num_filters,
kernel_size=(3, 3),
padding="same",
use_bias=not self.conv_bn_act_pattern,
data_format=self.data_format,
name="conv",
)
self.bn = utils.build_batch_norm(data_format=self.data_format, name="bn")
def call(self, new_node, training):
if not self.conv_bn_act_pattern:
new_node = utils.activation_fn(new_node, self.act_type)
new_node = self.conv_op(new_node)
new_node = self.bn(new_node, training=training)
if self.conv_bn_act_pattern:
new_node = utils.activation_fn(new_node, self.act_type)
return new_node
class ResampleFeatureMap(tf.keras.layers.Layer):
"""Resample feature map for downsampling or upsampling."""
def __init__(
self,
feat_level,
target_num_channels,
apply_bn=False,
conv_after_downsample=False,
data_format=None,
pooling_type=None,
upsampling_type=None,
name="resample_p0",
):
super().__init__(name=name)
self.apply_bn = apply_bn
self.data_format = data_format
self.target_num_channels = target_num_channels
self.feat_level = feat_level
self.conv_after_downsample = conv_after_downsample
self.pooling_type = pooling_type or "max"
self.upsampling_type = upsampling_type or "nearest"
self.conv2d = tf.keras.layers.Conv2D(
self.target_num_channels,
(1, 1),
padding="same",
data_format=self.data_format,
name="conv2d",
)
self.bn = utils.build_batch_norm(data_format=self.data_format, name="bn")
def _pool2d(self, inputs, height, width, target_height, target_width):
"""Pool the inputs to target height and width."""
height_stride_size = int((height - 1) // target_height + 1)
width_stride_size = int((width - 1) // target_width + 1)
if self.pooling_type == "max":
return tf.keras.layers.MaxPooling2D(
pool_size=[height_stride_size + 1, width_stride_size + 1],
strides=[height_stride_size, width_stride_size],
padding="SAME",
data_format=self.data_format,
)(inputs)
if self.pooling_type == "avg":
return tf.keras.layers.AveragePooling2D(
pool_size=[height_stride_size + 1, width_stride_size + 1],
strides=[height_stride_size, width_stride_size],
padding="SAME",
data_format=self.data_format,
)(inputs)
raise ValueError("Unsupported pooling type {}.".format(self.pooling_type))
def _upsample2d(self, inputs, target_height, target_width):
return tf.cast(
tf.image.resize(
tf.cast(inputs, tf.float32),
[target_height, target_width],
method=self.upsampling_type,
),
inputs.dtype,
)
def _maybe_apply_1x1(self, feat, training, num_channels):
"""Apply 1x1 conv to change layer width if necessary."""
if num_channels != self.target_num_channels:
feat = self.conv2d(feat)
if self.apply_bn:
feat = self.bn(feat, training=training)
return feat
def call(self, feat, training, all_feats):
hwc_idx = (2, 3, 1) if self.data_format == "channels_first" else (1, 2, 3)
height, width, num_channels = [feat.shape.as_list()[i] for i in hwc_idx]
if all_feats:
target_feat_shape = all_feats[self.feat_level].shape.as_list()
target_height, target_width, _ = [target_feat_shape[i] for i in hwc_idx]
else:
# Default to downsampling if all_feats is empty.
target_height, target_width = (height + 1) // 2, (width + 1) // 2
# If conv_after_downsample is True, when downsampling, apply 1x1 after
# downsampling for efficiency.
if height > target_height and width > target_width:
if not self.conv_after_downsample:
feat = self._maybe_apply_1x1(feat, training, num_channels)
feat = self._pool2d(feat, height, width, target_height, target_width)
if self.conv_after_downsample:
feat = self._maybe_apply_1x1(feat, training, num_channels)
elif height <= target_height and width <= target_width:
feat = self._maybe_apply_1x1(feat, training, num_channels)
if height < target_height or width < target_width:
feat = self._upsample2d(feat, target_height, target_width)
else:
raise ValueError(
"Incompatible Resampling : feat shape {}x{} target_shape: {}x{}".format(
height, width, target_height, target_width
)
)
return feat
class ClassNet(tf.keras.layers.Layer):
"""Object class prediction network."""
def __init__(
self,
num_classes=90,
num_anchors=9,
num_filters=32,
min_level=3,
max_level=7,
act_type="swish",
repeats=4,
separable_conv=True,
survival_prob=None,
data_format="channels_last",
grad_checkpoint=False,
name="class_net",
**kwargs
):
"""Initialize the ClassNet.
Args:
num_classes: number of classes.
num_anchors: number of anchors.
num_filters: number of filters for "intermediate" layers.
min_level: minimum level for features.
max_level: maximum level for features.
act_type: String of the activation used.
repeats: number of intermediate layers.
separable_conv: True to use separable_conv instead of conv2D.
survival_prob: if a value is set then drop connect will be used.
data_format: string of 'channel_first' or 'channels_last'.
grad_checkpoint: bool, If true, apply grad checkpoint for saving memory.
name: the name of this layerl.
**kwargs: other parameters.
"""
super().__init__(name=name, **kwargs)
self.num_classes = num_classes
self.num_anchors = num_anchors
self.num_filters = num_filters
self.min_level = min_level
self.max_level = max_level
self.repeats = repeats
self.separable_conv = separable_conv
self.survival_prob = survival_prob
self.act_type = act_type
self.data_format = data_format
self.conv_ops = []
self.bns = []
self.grad_checkpoint = grad_checkpoint
if separable_conv:
conv2d_layer = functools.partial(
tf.keras.layers.SeparableConv2D,
depth_multiplier=1,
data_format=data_format,
pointwise_initializer=tf.initializers.variance_scaling(),
depthwise_initializer=tf.initializers.variance_scaling(),
)
else:
conv2d_layer = functools.partial(
tf.keras.layers.Conv2D,
data_format=data_format,
kernel_initializer=tf.random_normal_initializer(stddev=0.01),
)
for i in range(self.repeats):
# If using SeparableConv2D
self.conv_ops.append(
conv2d_layer(
self.num_filters,
kernel_size=3,
bias_initializer=tf.zeros_initializer(),
activation=None,
padding="same",
name="class-%d" % i,
)
)
bn_per_level = []
for level in range(self.min_level, self.max_level + 1):
bn_per_level.append(
utils.build_batch_norm(
data_format=self.data_format,
name="class-%d-bn-%d" % (i, level),
)
)
self.bns.append(bn_per_level)
self.classes = conv2d_layer(
num_classes * num_anchors,
kernel_size=3,
bias_initializer=tf.constant_initializer(-np.log((1 - 0.01) / 0.01)),
padding="same",
name="class-predict",
)
@tf.autograph.experimental.do_not_convert
def _conv_bn_act(self, image, i, level_id, training):
conv_op = self.conv_ops[i]
bn = self.bns[i][level_id]
act_type = self.act_type
@utils.recompute_grad(self.grad_checkpoint)
def _call(image):
original_image = image
image = conv_op(image)
image = bn(image, training=training)
if self.act_type:
image = utils.activation_fn(image, act_type)
if i > 0 and self.survival_prob:
image = utils.drop_connect(image, training, self.survival_prob)
image = image + original_image
return image
return _call(image)
def call(self, inputs, training, **kwargs):
"""Call ClassNet."""
class_outputs = []
for level_id in range(0, self.max_level - self.min_level + 1):
image = inputs[level_id]
for i in range(self.repeats):
image = self._conv_bn_act(image, i, level_id, training)
class_outputs.append(self.classes(image))
return class_outputs
class BoxNet(tf.keras.layers.Layer):
"""Box regression network."""
def __init__(
self,
num_anchors=9,
num_filters=32,
min_level=3,
max_level=7,
act_type="swish",
repeats=4,
separable_conv=True,
survival_prob=None,
data_format="channels_last",
grad_checkpoint=False,
name="box_net",
**kwargs
):
"""Initialize BoxNet.
Args:
num_anchors: number of anchors used.
num_filters: number of filters for "intermediate" layers.
min_level: minimum level for features.
max_level: maximum level for features.
act_type: String of the activation used.
repeats: number of "intermediate" layers.
separable_conv: True to use separable_conv instead of conv2D.
survival_prob: if a value is set then drop connect will be used.
data_format: string of 'channel_first' or 'channels_last'.
grad_checkpoint: bool, If true, apply grad checkpoint for saving memory.
name: Name of the layer.
**kwargs: other parameters.
"""
super().__init__(name=name, **kwargs)
self.num_anchors = num_anchors
self.num_filters = num_filters
self.min_level = min_level
self.max_level = max_level
self.repeats = repeats
self.separable_conv = separable_conv
self.survival_prob = survival_prob
self.act_type = act_type
self.data_format = data_format
self.grad_checkpoint = grad_checkpoint
self.conv_ops = []
self.bns = []
for i in range(self.repeats):
# If using SeparableConv2D
if self.separable_conv:
self.conv_ops.append(
tf.keras.layers.SeparableConv2D(
filters=self.num_filters,
depth_multiplier=1,
pointwise_initializer=tf.initializers.variance_scaling(),
depthwise_initializer=tf.initializers.variance_scaling(),
data_format=self.data_format,
kernel_size=3,
activation=None,
bias_initializer=tf.zeros_initializer(),
padding="same",
name="box-%d" % i,
)
)
# If using Conv2d
else:
self.conv_ops.append(
tf.keras.layers.Conv2D(
filters=self.num_filters,
kernel_initializer=tf.random_normal_initializer(stddev=0.01),
data_format=self.data_format,
kernel_size=3,
activation=None,
bias_initializer=tf.zeros_initializer(),
padding="same",
name="box-%d" % i,
)
)
bn_per_level = []
for level in range(self.min_level, self.max_level + 1):
bn_per_level.append(
utils.build_batch_norm(
data_format=self.data_format,
name="box-%d-bn-%d" % (i, level),
)
)
self.bns.append(bn_per_level)
if self.separable_conv:
self.boxes = tf.keras.layers.SeparableConv2D(
filters=4 * self.num_anchors,
depth_multiplier=1,
pointwise_initializer=tf.initializers.variance_scaling(),
depthwise_initializer=tf.initializers.variance_scaling(),
data_format=self.data_format,
kernel_size=3,
activation=None,
bias_initializer=tf.zeros_initializer(),
padding="same",
name="box-predict",
)
else:
self.boxes = tf.keras.layers.Conv2D(
filters=4 * self.num_anchors,
kernel_initializer=tf.random_normal_initializer(stddev=0.01),
data_format=self.data_format,
kernel_size=3,
activation=None,
bias_initializer=tf.zeros_initializer(),
padding="same",
name="box-predict",
)
@tf.autograph.experimental.do_not_convert
def _conv_bn_act(self, image, i, level_id, training):
conv_op = self.conv_ops[i]
bn = self.bns[i][level_id]
act_type = self.act_type
@utils.recompute_grad(self.grad_checkpoint)
def _call(image):
original_image = image
image = conv_op(image)
image = bn(image, training=training)
if self.act_type:
image = utils.activation_fn(image, act_type)
if i > 0 and self.survival_prob:
image = utils.drop_connect(image, training, self.survival_prob)
image = image + original_image
return image
return _call(image)
def call(self, inputs, training):
"""Call boxnet."""
box_outputs = []
for level_id in range(0, self.max_level - self.min_level + 1):
image = inputs[level_id]
for i in range(self.repeats):
image = self._conv_bn_act(image, i, level_id, training)
box_outputs.append(self.boxes(image))
return box_outputs
class FPNCells(tf.keras.layers.Layer):
"""FPN cells."""
def __init__(self, config, name="fpn_cells"):
super().__init__(name=name)
self.config = config
if config.fpn_config:
self.fpn_config = config.fpn_config
else:
self.fpn_config = fpn_configs.get_fpn_config(
config.fpn_name,
config.min_level,
config.max_level,
config.fpn_weight_method,
)
self.cells = [
FPNCell(self.config, name="cell_%d" % rep)
for rep in range(self.config.fpn_cell_repeats)
]
def call(self, feats, training):
for cell in self.cells:
cell_feats = cell(feats, training)
min_level = self.config.min_level
max_level = self.config.max_level
feats = []
for level in range(min_level, max_level + 1):
for i, fnode in enumerate(reversed(self.fpn_config.nodes)):
if fnode["feat_level"] == level:
feats.append(cell_feats[-1 - i])
break
return feats
class FPNCell(tf.keras.layers.Layer):
"""A single FPN cell."""
def __init__(self, config, name="fpn_cell"):
super().__init__(name=name)
self.config = config
if config.fpn_config:
self.fpn_config = config.fpn_config
else:
self.fpn_config = fpn_configs.get_fpn_config(
config.fpn_name,
config.min_level,
config.max_level,
config.fpn_weight_method,
)
self.fnodes = []
for i, fnode_cfg in enumerate(self.fpn_config.nodes):
logging.info("fnode %d : %s", i, fnode_cfg)
fnode = FNode(
fnode_cfg["feat_level"] - self.config.min_level,
fnode_cfg["inputs_offsets"],
config.fpn_num_filters,
config.apply_bn_for_resampling,
config.conv_after_downsample,
config.conv_bn_act_pattern,
config.separable_conv,
config.act_type,
weight_method=self.fpn_config.weight_method,
data_format=config.data_format,
name="fnode%d" % i,
)
self.fnodes.append(fnode)
def call(self, feats, training):
@utils.recompute_grad(self.config.grad_checkpoint)
def _call(feats):
for fnode in self.fnodes:
feats = fnode(feats, training)
return feats
return _call(feats)
|
DALI-main
|
docs/examples/use_cases/tensorflow/efficientdet/model/utils/layers.py
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model Builder for EfficientNet.
efficientnet-bx (x=0,1,2,3,4,5,6,7) checkpoints are located in:
https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckptsaug/efficientnet-bx.tar.gz
"""
import functools
import os
import re
from absl import logging
import numpy as np
import tensorflow as tf
import utils
from . import efficientnet_model
def efficientnet_params(model_name):
"""Get efficientnet params based on model name."""
params_dict = {
# (width_coefficient, depth_coefficient, resolution, dropout_rate)
"efficientnet-b0": (1.0, 1.0, 224, 0.2),
"efficientnet-b1": (1.0, 1.1, 240, 0.2),
"efficientnet-b2": (1.1, 1.2, 260, 0.3),
"efficientnet-b3": (1.2, 1.4, 300, 0.3),
"efficientnet-b4": (1.4, 1.8, 380, 0.4),
"efficientnet-b5": (1.6, 2.2, 456, 0.4),
"efficientnet-b6": (1.8, 2.6, 528, 0.5),
"efficientnet-b7": (2.0, 3.1, 600, 0.5),
"efficientnet-b8": (2.2, 3.6, 672, 0.5),
"efficientnet-l2": (4.3, 5.3, 800, 0.5),
}
return params_dict[model_name]
class BlockDecoder(object):
"""Block Decoder for readability."""
def _decode_block_string(self, block_string):
"""Gets a block through a string notation of arguments."""
assert isinstance(block_string, str)
ops = block_string.split("_")
options = {}
for op in ops:
splits = re.split(r"(\d.*)", op)
if len(splits) >= 2:
key, value = splits[:2]
options[key] = value
if "s" not in options or len(options["s"]) != 2:
raise ValueError("Strides options should be a pair of integers.")
return efficientnet_model.BlockArgs(
kernel_size=int(options["k"]),
num_repeat=int(options["r"]),
input_filters=int(options["i"]),
output_filters=int(options["o"]),
expand_ratio=int(options["e"]),
id_skip=("noskip" not in block_string),
se_ratio=float(options["se"]) if "se" in options else None,
strides=[int(options["s"][0]), int(options["s"][1])],
conv_type=int(options["c"]) if "c" in options else 0,
fused_conv=int(options["f"]) if "f" in options else 0,
super_pixel=int(options["p"]) if "p" in options else 0,
condconv=("cc" in block_string),
)
def _encode_block_string(self, block):
"""Encodes a block to a string."""
args = [
"r%d" % block.num_repeat,
"k%d" % block.kernel_size,
"s%d%d" % (block.strides[0], block.strides[1]),
"e%s" % block.expand_ratio,
"i%d" % block.input_filters,
"o%d" % block.output_filters,
"c%d" % block.conv_type,
"f%d" % block.fused_conv,
"p%d" % block.super_pixel,
]
if block.se_ratio > 0 and block.se_ratio <= 1:
args.append("se%s" % block.se_ratio)
if block.id_skip is False: # pylint: disable=g-bool-id-comparison
args.append("noskip")
if block.condconv:
args.append("cc")
return "_".join(args)
def decode(self, string_list):
"""Decodes a list of string notations to specify blocks inside the network.
Args:
string_list: a list of strings, each string is a notation of block.
Returns:
A list of namedtuples to represent blocks arguments.
"""
assert isinstance(string_list, list)
blocks_args = []
for block_string in string_list:
blocks_args.append(self._decode_block_string(block_string))
return blocks_args
def encode(self, blocks_args):
"""Encodes a list of Blocks to a list of strings.
Args:
blocks_args: A list of namedtuples to represent blocks arguments.
Returns:
a list of strings, each string is a notation of block.
"""
block_strings = []
for block in blocks_args:
block_strings.append(self._encode_block_string(block))
return block_strings
def swish(features, use_native=True, use_hard=False):
"""Computes the Swish activation function.
We provide three alternatives:
- Native tf.nn.swish, use less memory during training than composable swish.
- Quantization friendly hard swish.
- A composable swish, equivalent to tf.nn.swish, but more general for
finetuning and TF-Hub.
Args:
features: A `Tensor` representing preactivation values.
use_native: Whether to use the native swish from tf.nn that uses a custom
gradient to reduce memory usage, or to use customized swish that uses
default TensorFlow gradient computation.
use_hard: Whether to use quantization-friendly hard swish.
Returns:
The activation value.
"""
if use_native and use_hard:
raise ValueError("Cannot specify both use_native and use_hard.")
if use_native:
return tf.nn.swish(features)
if use_hard:
return features * tf.nn.relu6(features + np.float32(3)) * (1.0 / 6.0)
features = tf.convert_to_tensor(features, name="features")
return features * tf.nn.sigmoid(features)
_DEFAULT_BLOCKS_ARGS = [
"r1_k3_s11_e1_i32_o16_se0.25",
"r2_k3_s22_e6_i16_o24_se0.25",
"r2_k5_s22_e6_i24_o40_se0.25",
"r3_k3_s22_e6_i40_o80_se0.25",
"r3_k5_s11_e6_i80_o112_se0.25",
"r4_k5_s22_e6_i112_o192_se0.25",
"r1_k3_s11_e6_i192_o320_se0.25",
]
def efficientnet(
width_coefficient=None, depth_coefficient=None, dropout_rate=0.2, survival_prob=0.8
):
"""Creates a efficientnet model."""
global_params = efficientnet_model.GlobalParams(
blocks_args=_DEFAULT_BLOCKS_ARGS,
batch_norm_momentum=0.99,
batch_norm_epsilon=1e-3,
dropout_rate=dropout_rate,
survival_prob=survival_prob,
data_format="channels_last",
num_classes=1000,
width_coefficient=width_coefficient,
depth_coefficient=depth_coefficient,
depth_divisor=8,
min_depth=None,
relu_fn=tf.nn.swish,
# The default is TPU-specific batch norm.
# The alternative is tf.layers.BatchNormalization.
batch_norm=utils.BatchNormalization, # TPU-specific requirement.
use_se=True,
clip_projection_output=False,
)
return global_params
def get_model_params(model_name, override_params):
"""Get the block args and global params for a given model."""
if model_name.startswith("efficientnet"):
width_coefficient, depth_coefficient, _, dropout_rate = efficientnet_params(
model_name
)
global_params = efficientnet(width_coefficient, depth_coefficient, dropout_rate)
else:
raise NotImplementedError("model name is not pre-defined: %s" % model_name)
if override_params:
# ValueError will be raised here if override_params has fields not included
# in global_params.
global_params = global_params._replace(**override_params)
decoder = BlockDecoder()
blocks_args = decoder.decode(global_params.blocks_args)
logging.info("global_params= %s", global_params)
return blocks_args, global_params
def get_model(model_name, override_params={}):
"""A helper function to create and return model.
Args:
model_name: string, the predefined model name.
override_params: A dictionary of params for overriding. Fields must exist in
efficientnet_model.GlobalParams.
Returns:
created model
Raises:
When model_name specified an undefined model, raises NotImplementedError.
When override_params has invalid fields, raises ValueError.
"""
if model_name.startswith("efficientnet-"):
blocks_args, global_params = get_model_params(model_name, override_params)
return efficientnet_model.Model(blocks_args, global_params, model_name)
else:
raise ValueError("Unknown model name {}".format(model_name))
|
DALI-main
|
docs/examples/use_cases/tensorflow/efficientdet/model/backbone/efficientnet_builder.py
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for EfficientNet model.
[1] Mingxing Tan, Quoc V. Le
EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks.
ICML'19, https://arxiv.org/abs/1905.11946
"""
import collections
import itertools
import math
from absl import logging
import numpy as np
import six
from six.moves import xrange
import tensorflow as tf
import utils
GlobalParams = collections.namedtuple(
"GlobalParams",
[
"batch_norm_momentum",
"batch_norm_epsilon",
"dropout_rate",
"data_format",
"num_classes",
"width_coefficient",
"depth_coefficient",
"depth_divisor",
"min_depth",
"survival_prob",
"relu_fn",
"batch_norm",
"use_se",
"local_pooling",
"condconv_num_experts",
"clip_projection_output",
"blocks_args",
"fix_head_stem",
"grad_checkpoint",
],
)
GlobalParams.__new__.__defaults__ = (None,) * len(GlobalParams._fields)
BlockArgs = collections.namedtuple(
"BlockArgs",
[
"kernel_size",
"num_repeat",
"input_filters",
"output_filters",
"expand_ratio",
"id_skip",
"strides",
"se_ratio",
"conv_type",
"fused_conv",
"super_pixel",
"condconv",
],
)
# defaults will be a public argument for namedtuple in Python 3.7
# https://docs.python.org/3/library/collections.html#collections.namedtuple
BlockArgs.__new__.__defaults__ = (None,) * len(BlockArgs._fields)
def conv_kernel_initializer(shape, dtype=None, partition_info=None):
"""Initialization for convolutional kernels.
The main difference with tf.variance_scaling_initializer is that
tf.variance_scaling_initializer uses a truncated normal with an uncorrected
standard deviation, whereas here we use a normal distribution. Similarly,
tf.initializers.variance_scaling uses a truncated normal with
a corrected standard deviation.
Args:
shape: shape of variable
dtype: dtype of variable
partition_info: unused
Returns:
an initialization for the variable
"""
del partition_info
kernel_height, kernel_width, _, out_filters = shape
fan_out = int(kernel_height * kernel_width * out_filters)
return tf.random.normal(shape, mean=0.0, stddev=np.sqrt(2.0 / fan_out), dtype=dtype)
def dense_kernel_initializer(shape, dtype=None, partition_info=None):
"""Initialization for dense kernels.
This initialization is equal to
tf.variance_scaling_initializer(scale=1.0/3.0, mode='fan_out',
distribution='uniform').
It is written out explicitly here for clarity.
Args:
shape: shape of variable
dtype: dtype of variable
partition_info: unused
Returns:
an initialization for the variable
"""
del partition_info
init_range = 1.0 / np.sqrt(shape[1])
return tf.random.uniform(shape, -init_range, init_range, dtype=dtype)
def round_filters(filters, global_params, skip=False):
"""Round number of filters based on depth multiplier."""
multiplier = global_params.width_coefficient
divisor = global_params.depth_divisor
min_depth = global_params.min_depth
if skip or not multiplier:
return filters
filters *= multiplier
min_depth = min_depth or divisor
new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_filters < 0.9 * filters:
new_filters += divisor
return int(new_filters)
def round_repeats(repeats, global_params, skip=False):
"""Round number of filters based on depth multiplier."""
multiplier = global_params.depth_coefficient
if skip or not multiplier:
return repeats
return int(math.ceil(multiplier * repeats))
class SE(tf.keras.layers.Layer):
"""Squeeze-and-excitation layer."""
def __init__(self, global_params, se_filters, output_filters, name=None):
super().__init__(name=name)
self._local_pooling = global_params.local_pooling
self._data_format = global_params.data_format
self._relu_fn = global_params.relu_fn or tf.nn.swish
# Squeeze and Excitation layer.
self._se_reduce = tf.keras.layers.Conv2D(
se_filters,
kernel_size=[1, 1],
strides=[1, 1],
kernel_initializer=conv_kernel_initializer,
padding="same",
data_format=self._data_format,
use_bias=True,
name="conv2d",
)
self._se_expand = tf.keras.layers.Conv2D(
output_filters,
kernel_size=[1, 1],
strides=[1, 1],
kernel_initializer=conv_kernel_initializer,
padding="same",
data_format=self._data_format,
use_bias=True,
name="conv2d_1",
)
def call(self, inputs):
h_axis, w_axis = [2, 3] if self._data_format == "channels_first" else [1, 2]
if self._local_pooling:
se_tensor = tf.nn.avg_pool(
inputs,
ksize=[1, inputs.shape[h_axis], inputs.shape[w_axis], 1],
strides=[1, 1, 1, 1],
padding="VALID",
)
else:
se_tensor = tf.reduce_mean(inputs, [h_axis, w_axis], keepdims=True)
se_tensor = self._se_expand(self._relu_fn(self._se_reduce(se_tensor)))
logging.info("Built SE %s : %s", self.name, se_tensor.shape)
return tf.sigmoid(se_tensor) * inputs
class SuperPixel(tf.keras.layers.Layer):
"""Super pixel layer."""
def __init__(self, block_args, global_params, name=None):
super().__init__(name=name)
self._superpixel = tf.keras.layers.Conv2D(
block_args.input_filters,
kernel_size=[2, 2],
strides=[2, 2],
kernel_initializer=conv_kernel_initializer,
padding="same",
data_format=global_params.data_format,
use_bias=False,
name="conv2d",
)
self._bnsp = global_params.batch_norm(
axis=1 if global_params.data_format == "channels_first" else -1,
momentum=global_params.batch_norm_momentum,
epsilon=global_params.batch_norm_epsilon,
name="tpu_batch_normalization",
)
self._relu_fn = global_params.relu_fn or tf.nn.swish
def call(self, inputs, training):
return self._relu_fn(self._bnsp(self._superpixel(inputs), training))
class MBConvBlock(tf.keras.layers.Layer):
"""A class of MBConv: Mobile Inverted Residual Bottleneck.
Attributes:
endpoints: dict. A list of internal tensors.
"""
def __init__(self, block_args, global_params, name=None):
"""Initializes a MBConv block.
Args:
block_args: BlockArgs, arguments to create a Block.
global_params: GlobalParams, a set of global parameters.
name: layer name.
"""
super().__init__(name=name)
self._block_args = block_args
self._global_params = global_params
self._local_pooling = global_params.local_pooling
self._batch_norm_momentum = global_params.batch_norm_momentum
self._batch_norm_epsilon = global_params.batch_norm_epsilon
self._batch_norm = global_params.batch_norm
self._condconv_num_experts = global_params.condconv_num_experts
self._data_format = global_params.data_format
self._channel_axis = 1 if self._data_format == "channels_first" else -1
self._relu_fn = global_params.relu_fn or tf.nn.swish
self._has_se = (
global_params.use_se
and self._block_args.se_ratio is not None
and 0 < self._block_args.se_ratio <= 1
)
self._clip_projection_output = global_params.clip_projection_output
self.endpoints = None
if self._block_args.condconv:
raise ValueError("Condconv is not supported.")
# Builds the block accordings to arguments.
self._build()
@property
def block_args(self):
return self._block_args
def _build(self):
"""Builds block according to the arguments."""
# pylint: disable=g-long-lambda
bid = itertools.count(0)
get_bn_name = lambda: "tpu_batch_normalization" + (
"" if not next(bid) else "_" + str(next(bid) // 2)
)
cid = itertools.count(0)
get_conv_name = lambda: "conv2d" + (
"" if not next(cid) else "_" + str(next(cid) // 2)
)
# pylint: enable=g-long-lambda
if self._block_args.super_pixel == 1:
self.super_pixel = SuperPixel(
self._block_args, self._global_params, name="super_pixel"
)
else:
self.super_pixel = None
filters = self._block_args.input_filters * self._block_args.expand_ratio
kernel_size = self._block_args.kernel_size
if self._block_args.fused_conv:
# Fused expansion phase. Called if using fused convolutions.
self._fused_conv = tf.keras.layers.Conv2D(
filters=filters,
kernel_size=[kernel_size, kernel_size],
strides=self._block_args.strides,
kernel_initializer=conv_kernel_initializer,
padding="same",
data_format=self._data_format,
use_bias=False,
name=get_conv_name(),
)
else:
# Expansion phase. Called if not using fused convolutions and expansion
# phase is necessary.
if self._block_args.expand_ratio != 1:
self._expand_conv = tf.keras.layers.Conv2D(
filters=filters,
kernel_size=[1, 1],
strides=[1, 1],
kernel_initializer=conv_kernel_initializer,
padding="same",
data_format=self._data_format,
use_bias=False,
name=get_conv_name(),
)
self._bn0 = self._batch_norm(
axis=self._channel_axis,
momentum=self._batch_norm_momentum,
epsilon=self._batch_norm_epsilon,
name=get_bn_name(),
)
# Depth-wise convolution phase. Called if not using fused convolutions.
self._depthwise_conv = tf.keras.layers.DepthwiseConv2D(
kernel_size=[kernel_size, kernel_size],
strides=self._block_args.strides,
depthwise_initializer=conv_kernel_initializer,
padding="same",
data_format=self._data_format,
use_bias=False,
name="depthwise_conv2d",
)
self._bn1 = self._batch_norm(
axis=self._channel_axis,
momentum=self._batch_norm_momentum,
epsilon=self._batch_norm_epsilon,
name=get_bn_name(),
)
if self._has_se:
num_reduced_filters = max(
1, int(self._block_args.input_filters * self._block_args.se_ratio)
)
self._se = SE(self._global_params, num_reduced_filters, filters, name="se")
else:
self._se = None
# Output phase.
filters = self._block_args.output_filters
self._project_conv = tf.keras.layers.Conv2D(
filters=filters,
kernel_size=[1, 1],
strides=[1, 1],
kernel_initializer=conv_kernel_initializer,
padding="same",
data_format=self._data_format,
use_bias=False,
name=get_conv_name(),
)
self._bn2 = self._batch_norm(
axis=self._channel_axis,
momentum=self._batch_norm_momentum,
epsilon=self._batch_norm_epsilon,
name=get_bn_name(),
)
def call(self, inputs, training, survival_prob=None):
"""Implementation of call().
Args:
inputs: the inputs tensor.
training: boolean, whether the model is constructed for training.
survival_prob: float, between 0 to 1, drop connect rate.
Returns:
A output tensor.
"""
@utils.recompute_grad(self._global_params.grad_checkpoint)
def _call(inputs):
logging.info("Block %s input shape: %s", self.name, inputs.shape)
x = inputs
# creates conv 2x2 kernel
if self.super_pixel:
x = self.super_pixel(x, training)
logging.info("SuperPixel %s: %s", self.name, x.shape)
if self._block_args.fused_conv:
# If use fused mbconv, skip expansion and use regular conv.
x = self._relu_fn(self._bn1(self._fused_conv(x), training=training))
logging.info("Conv2D shape: %s", x.shape)
else:
# Otherwise, first apply expansion and then apply depthwise conv.
if self._block_args.expand_ratio != 1:
x = self._relu_fn(
self._bn0(self._expand_conv(x), training=training)
)
logging.info("Expand shape: %s", x.shape)
x = self._relu_fn(self._bn1(self._depthwise_conv(x), training=training))
logging.info("DWConv shape: %s", x.shape)
if self._se:
x = self._se(x)
self.endpoints = {"expansion_output": x}
x = self._bn2(self._project_conv(x), training=training)
# Add identity so that quantization-aware training can insert quantization
# ops correctly.
x = tf.identity(x)
if self._clip_projection_output:
x = tf.clip_by_value(x, -6, 6)
if self._block_args.id_skip:
if (
all(s == 1 for s in self._block_args.strides)
and self._block_args.input_filters
== self._block_args.output_filters
):
# Apply only if skip connection presents.
if survival_prob:
x = utils.drop_connect(x, training, survival_prob)
x = tf.add(x, inputs)
logging.info("Project shape: %s", x.shape)
return x
return _call(inputs)
class MBConvBlockWithoutDepthwise(MBConvBlock):
"""MBConv-like block without depthwise convolution and squeeze-and-excite."""
def _build(self):
"""Builds block according to the arguments."""
filters = self._block_args.input_filters * self._block_args.expand_ratio
# pylint: disable=g-long-lambda
cid = itertools.count(0)
get_conv_name = lambda: "conv2d" + (
"" if not next(cid) else "_" + str(next(cid) // 2)
)
# pylint: enable=g-long-lambda
kernel_size = self._block_args.kernel_size
if self._block_args.expand_ratio != 1:
# Expansion phase:
self._expand_conv = tf.keras.layers.Conv2D(
filters,
kernel_size=[kernel_size, kernel_size],
strides=[1, 1],
kernel_initializer=conv_kernel_initializer,
padding="same",
use_bias=False,
name=get_conv_name(),
)
self._bn0 = self._batch_norm(
axis=self._channel_axis,
momentum=self._batch_norm_momentum,
epsilon=self._batch_norm_epsilon,
)
# Output phase:
filters = self._block_args.output_filters
self._project_conv = tf.keras.layers.Conv2D(
filters,
kernel_size=[1, 1],
strides=self._block_args.strides,
kernel_initializer=conv_kernel_initializer,
padding="same",
use_bias=False,
name=get_conv_name(),
)
self._bn1 = self._batch_norm(
axis=self._channel_axis,
momentum=self._batch_norm_momentum,
epsilon=self._batch_norm_epsilon,
)
def call(self, inputs, training, survival_prob=None):
"""Implementation of call().
Args:
inputs: the inputs tensor.
training: boolean, whether the model is constructed for training.
survival_prob: float, between 0 to 1, drop connect rate.
Returns:
A output tensor.
"""
@utils.recompute_grad(self._global_params.grad_checkpoint)
def _call(inputs):
logging.info("Block %s input shape: %s", self.name, inputs.shape)
if self._block_args.expand_ratio != 1:
x = self._relu_fn(
self._bn0(self._expand_conv(inputs), training=training)
)
else:
x = inputs
logging.info("Expand shape: %s", x.shape)
self.endpoints = {"expansion_output": x}
x = self._bn1(self._project_conv(x), training=training)
# Add identity so that quantization-aware training can insert quantization
# ops correctly.
x = tf.identity(x)
if self._clip_projection_output:
x = tf.clip_by_value(x, -6, 6)
if self._block_args.id_skip:
if (
all(s == 1 for s in self._block_args.strides)
and self._block_args.input_filters
== self._block_args.output_filters
):
# Apply only if skip connection presents.
if survival_prob:
x = utils.drop_connect(x, training, survival_prob)
x = tf.add(x, inputs)
logging.info("Project shape: %s", x.shape)
return x
return _call(inputs)
class Stem(tf.keras.layers.Layer):
"""Stem layer at the begining of the network."""
def __init__(self, global_params, stem_filters, name=None):
super().__init__(name=name)
self._conv_stem = tf.keras.layers.Conv2D(
filters=round_filters(
stem_filters, global_params, global_params.fix_head_stem
),
kernel_size=[3, 3],
strides=[2, 2],
kernel_initializer=conv_kernel_initializer,
padding="same",
data_format=global_params.data_format,
use_bias=False,
)
self._bn = global_params.batch_norm(
axis=(1 if global_params.data_format == "channels_first" else -1),
momentum=global_params.batch_norm_momentum,
epsilon=global_params.batch_norm_epsilon,
)
self._relu_fn = global_params.relu_fn or tf.nn.swish
def call(self, inputs, training):
return self._relu_fn(self._bn(self._conv_stem(inputs), training=training))
class Head(tf.keras.layers.Layer):
"""Head layer for network outputs."""
def __init__(self, global_params, name=None):
super().__init__(name=name)
self.endpoints = {}
self._global_params = global_params
self._conv_head = tf.keras.layers.Conv2D(
filters=round_filters(1280, global_params, global_params.fix_head_stem),
kernel_size=[1, 1],
strides=[1, 1],
kernel_initializer=conv_kernel_initializer,
padding="same",
data_format=global_params.data_format,
use_bias=False,
name="conv2d",
)
self._bn = global_params.batch_norm(
axis=(1 if global_params.data_format == "channels_first" else -1),
momentum=global_params.batch_norm_momentum,
epsilon=global_params.batch_norm_epsilon,
)
self._relu_fn = global_params.relu_fn or tf.nn.swish
self._avg_pooling = tf.keras.layers.GlobalAveragePooling2D(
data_format=global_params.data_format
)
if global_params.num_classes:
self._fc = tf.keras.layers.Dense(
global_params.num_classes, kernel_initializer=dense_kernel_initializer
)
else:
self._fc = None
if global_params.dropout_rate > 0:
self._dropout = tf.keras.layers.Dropout(global_params.dropout_rate)
else:
self._dropout = None
self.h_axis, self.w_axis = (
[2, 3] if global_params.data_format == "channels_first" else [1, 2]
)
def call(self, inputs, training, pooled_features_only):
"""Call the layer."""
outputs = self._relu_fn(self._bn(self._conv_head(inputs), training=training))
self.endpoints["head_1x1"] = outputs
if self._global_params.local_pooling:
shape = outputs.get_shape().as_list()
kernel_size = [1, shape[self.h_axis], shape[self.w_axis], 1]
outputs = tf.nn.avg_pool(
outputs, ksize=kernel_size, strides=[1, 1, 1, 1], padding="VALID"
)
self.endpoints["pooled_features"] = outputs
if not pooled_features_only:
if self._dropout:
outputs = self._dropout(outputs, training=training)
self.endpoints["global_pool"] = outputs
if self._fc:
outputs = tf.squeeze(outputs, [self.h_axis, self.w_axis])
outputs = self._fc(outputs)
self.endpoints["head"] = outputs
else:
outputs = self._avg_pooling(outputs)
self.endpoints["pooled_features"] = outputs
if not pooled_features_only:
if self._dropout:
outputs = self._dropout(outputs, training=training)
self.endpoints["global_pool"] = outputs
if self._fc:
outputs = self._fc(outputs)
self.endpoints["head"] = outputs
return outputs
class Model(tf.keras.Model):
"""A class implements tf.keras.Model.
Reference: https://arxiv.org/abs/1807.11626
"""
def __init__(self, blocks_args=None, global_params=None, name=None):
"""Initializes an `Model` instance.
Args:
blocks_args: A list of BlockArgs to construct block modules.
global_params: GlobalParams, a set of global parameters.
name: A string of layer name.
Raises:
ValueError: when blocks_args is not specified as a list.
"""
super().__init__(name=name)
if not isinstance(blocks_args, list):
raise ValueError("blocks_args should be a list.")
self._global_params = global_params
self._blocks_args = blocks_args
self._relu_fn = global_params.relu_fn or tf.nn.swish
self._batch_norm = global_params.batch_norm
self._fix_head_stem = global_params.fix_head_stem
self.endpoints = None
self._build()
def _get_conv_block(self, conv_type):
conv_block_map = {0: MBConvBlock, 1: MBConvBlockWithoutDepthwise}
return conv_block_map[conv_type]
def _build(self):
"""Builds a model."""
self._blocks = []
# Stem part.
self._stem = Stem(self._global_params, self._blocks_args[0].input_filters)
# Builds blocks.
block_id = itertools.count(0)
block_name = lambda: "blocks_%d" % next(block_id)
for i, block_args in enumerate(self._blocks_args):
assert block_args.num_repeat > 0
assert block_args.super_pixel in [0, 1, 2]
# Update block input and output filters based on depth multiplier.
input_filters = round_filters(block_args.input_filters, self._global_params)
output_filters = round_filters(
block_args.output_filters, self._global_params
)
kernel_size = block_args.kernel_size
if self._fix_head_stem and (i == 0 or i == len(self._blocks_args) - 1):
repeats = block_args.num_repeat
else:
repeats = round_repeats(block_args.num_repeat, self._global_params)
block_args = block_args._replace(
input_filters=input_filters,
output_filters=output_filters,
num_repeat=repeats,
)
# The first block needs to take care of stride and filter size increase.
conv_block = self._get_conv_block(block_args.conv_type)
if not block_args.super_pixel: # no super_pixel at all
self._blocks.append(
conv_block(block_args, self._global_params, name=block_name())
)
else:
# if superpixel, adjust filters, kernels, and strides.
depth_factor = int(4 / block_args.strides[0] / block_args.strides[1])
block_args = block_args._replace(
input_filters=block_args.input_filters * depth_factor,
output_filters=block_args.output_filters * depth_factor,
kernel_size=(
(block_args.kernel_size + 1) // 2
if depth_factor > 1
else block_args.kernel_size
),
)
# if the first block has stride-2 and super_pixel trandformation
if block_args.strides[0] == 2 and block_args.strides[1] == 2:
block_args = block_args._replace(strides=[1, 1])
self._blocks.append(
conv_block(block_args, self._global_params, name=block_name())
)
block_args = block_args._replace( # sp stops at stride-2
super_pixel=0,
input_filters=input_filters,
output_filters=output_filters,
kernel_size=kernel_size,
)
elif block_args.super_pixel == 1:
self._blocks.append(
conv_block(block_args, self._global_params, name=block_name())
)
block_args = block_args._replace(super_pixel=2)
else:
self._blocks.append(
conv_block(block_args, self._global_params, name=block_name())
)
if block_args.num_repeat > 1: # rest of blocks with the same block_arg
# pylint: disable=protected-access
block_args = block_args._replace(
input_filters=block_args.output_filters, strides=[1, 1]
)
# pylint: enable=protected-access
for _ in xrange(block_args.num_repeat - 1):
self._blocks.append(
conv_block(block_args, self._global_params, name=block_name())
)
# Head part.
self._head = Head(self._global_params)
def call(self, inputs, training, features_only=None, pooled_features_only=False):
"""Implementation of call().
Args:
inputs: input tensors.
training: boolean, whether the model is constructed for training.
features_only: build the base feature network only.
pooled_features_only: build the base network for features extraction
(after 1x1 conv layer and global pooling, but before dropout and fc
head).
Returns:
output tensors.
"""
outputs = None
self.endpoints = {}
reduction_idx = 0
# Calls Stem layers
outputs = self._stem(inputs, training)
logging.info("Built stem %s : %s", self._stem.name, outputs.shape)
self.endpoints["stem"] = outputs
# Calls blocks.
for idx, block in enumerate(self._blocks):
is_reduction = False # reduction flag for blocks after the stem layer
# If the first block has super-pixel (space-to-depth) layer, then stem is
# the first reduction point.
if block.block_args.super_pixel == 1 and idx == 0:
reduction_idx += 1
self.endpoints["reduction_%s" % reduction_idx] = outputs
elif (idx == len(self._blocks) - 1) or self._blocks[
idx + 1
].block_args.strides[0] > 1:
is_reduction = True
reduction_idx += 1
survival_prob = self._global_params.survival_prob
if survival_prob:
drop_rate = 1.0 - survival_prob
survival_prob = 1.0 - drop_rate * float(idx) / len(self._blocks)
logging.info("block_%s survival_prob: %s", idx, survival_prob)
outputs = block(outputs, training=training, survival_prob=survival_prob)
self.endpoints["block_%s" % idx] = outputs
if is_reduction:
self.endpoints["reduction_%s" % reduction_idx] = outputs
if block.endpoints:
for k, v in six.iteritems(block.endpoints):
self.endpoints["block_%s/%s" % (idx, k)] = v
if is_reduction:
self.endpoints["reduction_%s/%s" % (reduction_idx, k)] = v
self.endpoints["features"] = outputs
if not features_only:
# Calls final layers and returns logits.
outputs = self._head(outputs, training, pooled_features_only)
self.endpoints.update(self._head.endpoints)
return [outputs] + list(
filter(
lambda endpoint: endpoint is not None,
[
self.endpoints.get("reduction_1"),
self.endpoints.get("reduction_2"),
self.endpoints.get("reduction_3"),
self.endpoints.get("reduction_4"),
self.endpoints.get("reduction_5"),
],
)
)
|
DALI-main
|
docs/examples/use_cases/tensorflow/efficientdet/model/backbone/efficientnet_model.py
|
# Copyright 2021 Paweł Anikiel, Kacper Kluk. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
import cv2
def read_img(path, size):
img = tf.image.decode_image(open(path, "rb").read(), channels=3)
pixels = cv2.cvtColor(img.numpy(), cv2.COLOR_RGB2BGR)
img = tf.image.resize(img, (size, size)) / 255
img = tf.reshape(img, (1, size, size, 3))
return (pixels, img)
def add_bboxes(pixels, boxes, scores, classes):
(h, w, _) = pixels.shape
for i in range(len(boxes)):
x1, y1, x2, y2 = boxes[i]
p1 = (int(x1 * w), int(y1 * h))
p2 = (int(x2 * w), int(y2 * h))
pixels = cv2.rectangle(pixels, p1, p2, (255, 0, 0), 2)
label = classes[i] + ": " + str(round(scores[i], 2))
t_size = cv2.getTextSize(label, 0, 0.5, thickness=int(0.6 * (h + w) / 600) // 2)[0]
cv2.rectangle(pixels, p1, (p1[0] + t_size[0], p1[1] - t_size[1] - 3), (255, 0, 0), -1)
cv2.putText(
pixels,
label,
(p1[0], p1[1] - 2),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(255, 255, 255),
int(0.6 * (h + w) / 600) // 2,
lineType=cv2.LINE_AA,
)
return pixels
def draw_img(pixels):
cv2.imshow("Image", pixels)
cv2.waitKey(0)
cv2.destroyAllWindows()
def save_img(filename, pixels):
cv2.imwrite(filename, pixels)
|
DALI-main
|
docs/examples/use_cases/tensorflow/yolov4/src/img.py
|
# Copyright 2021 Kacper Kluk, Jagoda Kamińska. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
import numpy as np
from layers import Mish, ScaledRandomUniform
import utils
anchor_sizes = [
[(12, 16), (19, 36), (40, 28)],
[(36, 75), (76, 55), (72, 146)],
[(142, 110), (192, 243), (459, 401)],
]
scales = [1.2, 1.1, 1.05]
def calc_loss(layer_id, gt, preds, debug=False):
gt_boxes = gt[..., : 4]
gt_labels = tf.cast(gt[..., 4], tf.int32)
gt_count = tf.shape(gt_labels)[-1]
gt_mask = tf.where(gt_labels == -1, 0.0, 1.0)
layer_xywh, layer_obj, layer_cls = utils.decode_layer(preds, layer_id)
cls_count = layer_cls.shape[-1]
s = tf.shape(preds)
batch_size = s[0]
gw = s[1]
gh = s[2]
stride_x = 1 / gw
stride_y = 1 / gh
d = s[3]
truth_mask = tf.zeros((batch_size, gw, gh, 3))
box_loss = 0.0
cls_loss = 0.0
ix = tf.cast(tf.math.floor(tf.cast(gw, tf.float32) * gt_boxes[..., 0]), tf.int32)
iy = tf.cast(tf.math.floor(tf.cast(gh, tf.float32) * gt_boxes[..., 1]), tf.int32)
ix = tf.clip_by_value(ix, 0, gw - 1)
iy = tf.clip_by_value(iy, 0, gh - 1)
box_shape = tf.shape(gt_labels)
zeros = tf.zeros_like(gt_labels, dtype=tf.float32)
gt_shift = tf.stack([zeros, zeros, gt_boxes[..., 2], gt_boxes[..., 3]], axis=-1)
gt_shift = tf.stack([gt_shift, gt_shift, gt_shift], axis=1)
anchors_ws = [tf.cast(tf.fill(box_shape, anchor_sizes[layer_id][ir][0]), dtype=tf.float32) / 608.0 for ir in range(3)]
anchors_hs = [tf.cast(tf.fill(box_shape, anchor_sizes[layer_id][ir][1]), dtype=tf.float32) / 608.0 for ir in range(3)]
anchors = tf.stack([tf.stack([zeros, zeros, anchors_ws[ir], anchors_hs[ir]], axis=-1) for ir in range(3)], axis=1)
ious = utils.calc_ious(gt_shift, anchors)
ious_argmax = tf.cast(tf.argmax(ious, axis=1), dtype=tf.int32)
batch_idx = tf.tile(tf.range(batch_size)[ : , tf.newaxis], [1, box_shape[-1]])
indices = tf.stack([batch_idx, iy, ix, ious_argmax], axis=-1)
pred_boxes = tf.gather_nd(layer_xywh, indices)
box_loss = tf.math.reduce_sum(gt_mask * (1.0 - utils.calc_gious(pred_boxes, gt_boxes)))
cls_one_hot = tf.one_hot(gt_labels, cls_count)
pred_cls = tf.gather_nd(layer_cls, indices)
cls_diffs = tf.math.reduce_sum(tf.math.square(pred_cls - cls_one_hot), axis=-1)
cls_loss = tf.math.reduce_sum(gt_mask * cls_diffs)
indices_not_null = tf.gather_nd(indices, tf.where(gt_labels != -1))
truth_mask = tf.tensor_scatter_nd_update(truth_mask, indices_not_null, tf.ones_like(indices_not_null, dtype=tf.float32)[:,0])
inv_truth_mask = 1.0 - truth_mask
obj_loss = tf.math.reduce_sum(tf.math.square(1 - layer_obj) * truth_mask)
gt_boxes_exp = tf.tile(tf.reshape(gt_boxes, (batch_size, 1, 1, 1, gt_count, 4)), [1, gw, gh, 3, 1, 1])
pred_boxes_exp = tf.tile(tf.reshape(layer_xywh, (batch_size, gw, gh, 3, 1, 4)), [1, 1, 1, 1, gt_count, 1])
iou_mask = tf.cast(tf.math.reduce_max(utils.calc_ious(gt_boxes_exp, pred_boxes_exp), axis=-1) < 0.7, tf.float32)
obj_loss += tf.math.reduce_sum(tf.math.square(layer_obj) * inv_truth_mask * iou_mask)
return (0.05 * box_loss + 1.0 * obj_loss + 0.5 * cls_loss) / tf.cast(batch_size, dtype=tf.float32)
class YOLOv4Model(tf.keras.Model):
def __init__(self, classes_num=80, image_size=(608, 608)):
self.classes_num = classes_num
self.image_size = (image_size[0], image_size[1], 3)
input = tf.keras.Input(shape=self.image_size)
output = self.CSPDarknet53WithSPP()(input)
output = self.YOLOHead()(output)
super().__init__(input, output)
self.loss_tracker = tf.keras.metrics.Mean(name="loss")
self.lr_tracker = tf.keras.metrics.Mean(name="lr")
self.mAP_tracker = tf.keras.metrics.Mean(name="mAP")
def fit(self, dataset, **kwargs):
start_step = 1 + kwargs['steps_per_epoch'] * kwargs['initial_epoch']
self.current_step = tf.Variable(start_step, trainable=False, dtype=tf.int32)
self.total_steps = kwargs['epochs'] * kwargs['steps_per_epoch']
super().fit(dataset, **kwargs)
def train_step(self, data):
input, gt_boxes = data
with tf.GradientTape() as tape:
output = self(input, training=True)
loss0 = calc_loss(0, gt_boxes, output[0])
loss1 = calc_loss(1, gt_boxes, output[1])
loss2 = calc_loss(2, gt_boxes, output[2])
total_loss = loss0 + loss1 + loss2
gradients = tape.gradient(total_loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))
self.loss_tracker.update_state(total_loss)
self.lr_tracker.update_state(self.optimizer.lr(self.current_step))
self.current_step.assign_add(1)
return {"loss" : self.loss_tracker.result(), "lr": self.lr_tracker.result()}
def test_step(self, data):
input, gt_boxes = data
prediction = self(input, training=False)
ap = tf.py_function(
func=lambda *args: utils.calc_mAP(args[:-2], args[-2], args[-1]),
inp=[*prediction, gt_boxes, self.classes_num],
Tout=tf.float64,
)
self.mAP_tracker.update_state(ap)
return {"mAP" : self.mAP_tracker.result()}
@property
def metrics(self):
return [self.loss_tracker, self.mAP_tracker, self.lr_tracker]
def load_weights(self, weights_file):
if weights_file.endswith(".h5"):
super().load_weights(weights_file)
else:
self._load_weights_yolo(weights_file)
# load weights from darknet weight file
def _load_weights_yolo(self, weights_file):
with open(weights_file, "rb") as f:
major, minor, revision = np.fromfile(f, dtype=np.int32, count=3)
if (major * 10 + minor) >= 2:
seen = np.fromfile(f, dtype=np.int64, count=1)
else:
seen = np.fromfile(f, dtype=np.int32, count=1)
j = 0
for i in range(110):
conv_layer_name = "conv2d_%d" % i if i > 0 else "conv2d"
bn_layer_name = "batch_normalization_%d" % j if j > 0 else "batch_normalization"
conv_layer = self.get_layer(conv_layer_name)
in_dim = conv_layer.input_shape[-1]
filters = conv_layer.filters
size = conv_layer.kernel_size[0]
if i not in [93, 101, 109]:
# darknet weights: [beta, gamma, mean, variance]
bn_weights = np.fromfile(f, dtype=np.float32, count=4 * filters)
# tf weights: [gamma, beta, mean, variance]
bn_weights = bn_weights.reshape((4, filters))[[1, 0, 2, 3]]
bn_layer = self.get_layer(bn_layer_name)
j += 1
else:
conv_bias = np.fromfile(f, dtype=np.float32, count=filters)
# darknet shape (out_dim, in_dim, height, width)
conv_shape = (filters, in_dim, size, size)
conv_weights = np.fromfile(f, dtype=np.float32, count=np.product(conv_shape))
# tf shape (height, width, in_dim, out_dim)
conv_weights = conv_weights.reshape(conv_shape).transpose([2, 3, 1, 0])
if i not in [93, 101, 109]:
conv_layer.set_weights([conv_weights])
bn_layer.set_weights(bn_weights)
else:
conv_layer.set_weights([conv_weights, conv_bias])
assert len(f.read()) == 0, "failed to read all data"
def darknetConv(
self, filters, size, strides=1, batch_norm=True, activate=True, activation="leaky"
):
def feed(x):
if strides == 1:
padding = "same"
else:
x = tf.keras.layers.ZeroPadding2D(((1, 0), (1, 0)))(x)
padding = "valid"
x = tf.keras.layers.Conv2D(
filters=filters,
kernel_size=size,
strides=strides,
padding=padding,
use_bias=not batch_norm,
kernel_initializer=ScaledRandomUniform(
scale=tf.sqrt(2 / (size * size * self.image_size[2])), minval=-0.01, maxval=0.01
),
kernel_regularizer=tf.keras.regularizers.l2(0.0005),
)(x)
if batch_norm:
x = tf.keras.layers.BatchNormalization(moving_variance_initializer="zeros", momentum = 0.9)(x)
if activate:
if activation == "mish":
x = Mish()(x)
elif activation == "leaky":
x = tf.keras.layers.LeakyReLU(alpha=0.1)(x)
return x
return feed
def darknetResidualBlock(self, filters, repeats=1, initial=False):
def feed(x):
filters2 = 2 * filters if initial else filters
x = self.darknetConv(2 * filters, 3, strides=2, activation="mish")(x)
route = self.darknetConv(filters2, 1, activation="mish")(x)
x = self.darknetConv(filters2, 1, activation="mish")(x)
for i in range(repeats):
skip = x
x = self.darknetConv(filters, 1, activation="mish")(x)
x = self.darknetConv(filters2, 3, activation="mish")(x)
x = tf.keras.layers.Add()([skip, x])
x = self.darknetConv(filters2, 1, activation="mish")(x)
x = tf.keras.layers.Concatenate()([x, route])
x = self.darknetConv(2 * filters, 1, activation="mish")(x)
return x
return feed
def CSPDarknet53WithSPP(self):
def feed(x):
x = self.darknetConv(32, 3, activation="mish")(x)
x = self.darknetResidualBlock(32, initial=True)(x)
x = self.darknetResidualBlock(64, repeats=2)(x)
x = route_1 = self.darknetResidualBlock(128, repeats=8)(x)
x = route_2 = self.darknetResidualBlock(256, repeats=8)(x)
x = self.darknetResidualBlock(512, repeats=4)(x)
x = self.darknetConv(512, 1)(x)
x = self.darknetConv(1024, 3)(x)
x = self.darknetConv(512, 1)(x)
# SPP
spp1 = tf.keras.layers.MaxPooling2D(pool_size=13, strides=1, padding="same")(x)
spp2 = tf.keras.layers.MaxPooling2D(pool_size=9, strides=1, padding="same")(x)
spp3 = tf.keras.layers.MaxPooling2D(pool_size=5, strides=1, padding="same")(x)
x = tf.keras.layers.Concatenate()([spp1, spp2, spp3, x])
x = self.darknetConv(512, 1)(x)
x = self.darknetConv(1024, 3)(x)
x = self.darknetConv(512, 1)(x)
return route_1, route_2, x
return feed
def yoloUpsampleConvBlock(self, filters):
def feed(x, y):
x = self.darknetConv(filters, 1)(x)
x = tf.keras.layers.UpSampling2D()(x)
y = self.darknetConv(filters, 1)(y)
x = tf.keras.layers.Concatenate()([y, x])
x = self.darknetConv(filters, 1)(x)
x = self.darknetConv(2 * filters, 3)(x)
x = self.darknetConv(filters, 1)(x)
x = self.darknetConv(2 * filters, 3)(x)
x = self.darknetConv(filters, 1)(x)
return x
return feed
def yoloDownsampleConvBlock(self, filters):
def feed(x, y):
x = self.darknetConv(filters, 3, strides=2)(x)
x = tf.keras.layers.Concatenate()([x, y])
x = self.darknetConv(filters, 1)(x)
x = self.darknetConv(2 * filters, 3)(x)
x = self.darknetConv(filters, 1)(x)
x = self.darknetConv(2 * filters, 3)(x)
x = self.darknetConv(filters, 1)(x)
return x
return feed
def yoloBboxConvBlock(self, filters):
def feed(x):
x = self.darknetConv(filters, 3)(x)
x = self.darknetConv(3 * (self.classes_num + 5), 1, activate=False, batch_norm=False)(x)
return x
return feed
def YOLOHead(self):
def feed(x):
route_1, route_2, route = x
x = route_2 = self.yoloUpsampleConvBlock(256)(route, route_2)
x = route_1 = self.yoloUpsampleConvBlock(128)(x, route_1)
small_bbox = self.yoloBboxConvBlock(256)(x)
x = self.yoloDownsampleConvBlock(256)(route_1, route_2)
medium_bbox = self.yoloBboxConvBlock(512)(x)
x = self.yoloDownsampleConvBlock(512)(x, route)
large_bbox = self.yoloBboxConvBlock(1024)(x)
return small_bbox, medium_bbox, large_bbox
return feed
|
DALI-main
|
docs/examples/use_cases/tensorflow/yolov4/src/model.py
|
# Copyright 2021 Kacper Kluk. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
import numpy as np
from inference import decode_prediction
ANCHORS = [
[(12, 16), (19, 36), (40, 28)],
[(36, 75), (76, 55), (72, 146)],
[(142, 110), (192, 243), (459, 401)],
]
SCALES = [1.2, 1.1, 1.05]
SIZE = 608
def sigmoid(x):
return 1 / (1 + tf.math.exp(-x))
def xywh_to_ltrb(boxes):
boxes = tf.convert_to_tensor(boxes)
x = boxes[..., 0]
y = boxes[..., 1]
w = boxes[..., 2]
h = boxes[..., 3]
return tf.stack([x - w / 2, y - h / 2, x + w / 2, y + h / 2], axis=-1)
def ltrb_to_xywh(boxes):
boxes = tf.convert_to_tensor(boxes)
l = boxes[..., 0]
t = boxes[..., 1]
r = boxes[..., 2]
b = boxes[..., 3]
return tf.stack([(l + r) / 2, (t + b) / 2, r - l, b - t], axis=-1)
def calc_ious(boxes1, boxes2):
ltrb1 = xywh_to_ltrb(boxes1)
ltrb2 = xywh_to_ltrb(boxes2)
il = tf.math.maximum(ltrb1[..., 0], ltrb2[..., 0])
it = tf.math.maximum(ltrb1[..., 1], ltrb2[..., 1])
ir = tf.math.minimum(ltrb1[..., 2], ltrb2[..., 2])
ib = tf.math.minimum(ltrb1[..., 3], ltrb2[..., 3])
I = tf.math.maximum(0.0, ir - il) * tf.math.maximum(0.0, ib - it)
A1 = (ltrb1[..., 2] - ltrb1[..., 0]) * (ltrb1[..., 3] - ltrb1[..., 1])
A2 = (ltrb2[..., 2] - ltrb2[..., 0]) * (ltrb2[..., 3] - ltrb2[..., 1])
U = A1 + A2 - I
return I / U
def calc_gious(boxes1, boxes2):
ltrb1 = xywh_to_ltrb(boxes1)
ltrb2 = xywh_to_ltrb(boxes2)
il = tf.math.maximum(ltrb1[..., 0], ltrb2[..., 0])
it = tf.math.maximum(ltrb1[..., 1], ltrb2[..., 1])
ir = tf.math.minimum(ltrb1[..., 2], ltrb2[..., 2])
ib = tf.math.minimum(ltrb1[..., 3], ltrb2[..., 3])
I = tf.math.maximum(0.0, ir - il) * tf.math.maximum(0.0, ib - it)
A1 = (ltrb1[..., 2] - ltrb1[..., 0]) * (ltrb1[..., 3] - ltrb1[..., 1])
A2 = (ltrb2[..., 2] - ltrb2[..., 0]) * (ltrb2[..., 3] - ltrb2[..., 1])
U = A1 + A2 - I
cl = tf.math.minimum(ltrb1[..., 0], ltrb2[..., 0])
ct = tf.math.minimum(ltrb1[..., 1], ltrb2[..., 1])
cr = tf.math.maximum(ltrb1[..., 2], ltrb2[..., 2])
cb = tf.math.maximum(ltrb1[..., 3], ltrb2[..., 3])
C = (cr - cl) * (cb - ct)
return I / U - (C - U) / C
# split model output into xywh, obj and cls tensors
# output tensor shape: [batch, width, height, 3, ...]
def decode_layer(layer, layer_id):
shape = layer.shape # [batch, width, height, 3 * (5 + classes)]
d = shape[3]
gw, gh = shape[1 : 3]
stride_x = 1 / gw
stride_y = 1 / gh
tile_x = tf.cast(tf.tile(tf.expand_dims(tf.range(gw), axis=0), [gw, 1]), tf.float32)
tile_y = tf.cast(tf.tile(tf.expand_dims(tf.range(gw), axis=1), [1, gh]), tf.float32)
output_xywh = []
output_obj = []
output_cls = []
for ir in range(3):
data = layer[..., (d // 3) * ir : (d // 3) * (ir + 1)]
dx = data[..., 0]
dy = data[..., 1]
dw = data[..., 2]
dh = data[..., 3]
x = (sigmoid(dx) * SCALES[layer_id] - 0.5 * (SCALES[layer_id] - 1) + tile_x) * stride_x
y = (sigmoid(dy) * SCALES[layer_id] - 0.5 * (SCALES[layer_id] - 1) + tile_y) * stride_y
w = tf.math.exp(dw) * ANCHORS[layer_id][ir][0] / SIZE
h = tf.math.exp(dh) * ANCHORS[layer_id][ir][1] / SIZE
output_xywh.append(tf.stack([x, y, w, h], axis=-1))
output_obj.append(sigmoid(data[..., 4]))
output_cls.append(sigmoid(data[..., 5 : ]))
return (tf.stack(output_xywh, axis=-2),
tf.stack(output_obj, axis=-1),
tf.stack(output_cls, axis=-2))
# probably slow and works only in eager mode
def calc_mAP(predictions, gt_boxes, num_classes):
def iou(box1, box2):
l = max(box1[0], box2[0])
t = max(box1[1], box2[1])
r = min(box1[2], box2[2])
b = min(box1[3], box2[3])
i = max(0, r - l) * max(0, b - t)
u = (box1[2] - box1[0]) * (box1[3] - box1[1]) + (box2[2] - box2[0]) * (box2[3] - box2[1]) - i
return i / u
batch_size = predictions[0].shape[0]
num_pred_boxes = 0
num_gt_boxes = 0
num_true_positives = 0
stats = []
for batch_idx in range(batch_size):
prediction = tuple(p[batch_idx : batch_idx + 1, ...] for p in predictions)
pred_boxes, scores, pred_classes = decode_prediction(prediction, num_classes)
boxes = gt_boxes[batch_idx, :, : 4]
classes = gt_boxes[batch_idx, :, 4]
gt_used_idx = []
num_pred_boxes += len(pred_boxes)
num_gt_boxes += len(classes)
for pred_idx, (pred_box, pred_class) in enumerate(zip(pred_boxes, pred_classes)):
found = False
for gt_idx, (gt_box, gt_class) in enumerate(zip(boxes, classes)):
if gt_idx in gt_used_idx:
continue
if pred_class != gt_class:
continue
gt_ltrb = (gt_box[0] - gt_box[2] / 2, gt_box[1] - gt_box[3] / 2,
gt_box[0] + gt_box[2] / 2, gt_box[1] + gt_box[3] / 2)
if iou(pred_box, gt_ltrb) < 0.5:
continue
found = True
num_true_positives += 1
break
stats.append((scores[pred_idx], found))
if num_pred_boxes == 0:
return 0.0
ap = 0.0
max_prec = num_true_positives / num_pred_boxes
for _, found in sorted(stats):
if found:
ap += max_prec / num_gt_boxes
num_true_positives -= 1
num_pred_boxes -= 1
if num_pred_boxes == 0:
break
max_prec = max(max_prec, num_true_positives / num_pred_boxes)
return ap
|
DALI-main
|
docs/examples/use_cases/tensorflow/yolov4/src/utils.py
|
# Copyright 2021 Kacper Kluk. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from model import YOLOv4Model
import numpy as np
import tensorflow as tf
from dali.pipeline import YOLOv4Pipeline
from np.pipeline import YOLOv4PipelineNumpy
import os
import random
import atexit
SET_MEMORY_GROWTH = True
class SaveWeightsCallback(tf.keras.callbacks.Callback):
def __init__(self, ckpt_dir):
self.ckpt_dir = ckpt_dir
def on_epoch_begin(self, epoch, logs=None):
self.model.save_weights(self.ckpt_dir + '/epoch_' + str(epoch) + '.h5')
class YOLOLearningRateSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, init_lr):
self.init_lr = init_lr
def __call__(self, step):
warmup = tf.math.minimum(1.0, tf.cast(step, tf.float32) / 1000)
return warmup * self.init_lr
def train(file_root, annotations, batch_size, epochs, steps_per_epoch, **kwargs):
seed = kwargs.get("seed")
if not seed:
seed = int.from_bytes(os.urandom(4), "little")
else:
os.environ['PYTHONHASHSEED']=str(seed)
tf.random.set_seed(seed)
np.random.seed(seed)
random.seed(seed)
os.environ['TF_DETERMINISTIC_OPS'] = '1'
os.environ['TF_CUDNN_DETERMINISTIC'] = '1'
if SET_MEMORY_GROWTH:
pds = tf.config.list_physical_devices('GPU')
for pd in pds:
tf.config.experimental.set_memory_growth(pd, True)
pipeline = kwargs.get("pipeline")
use_mosaic = kwargs.get("use_mosaic")
log_dir = kwargs.get("log_dir")
ckpt_dir = kwargs.get("ckpt_dir")
start_weights = kwargs.get("start_weights")
def get_dataset_fn(file_root, annotations,
batch_size, pipeline, is_training):
def dataset_fn(input_context):
image_size = (608, 608)
device_id = input_context.input_pipeline_id
num_threads = input_context.num_input_pipelines
if pipeline == 'dali-gpu' or pipeline == 'dali-cpu':
with tf.device("/gpu:{}".format(input_context.input_pipeline_id)):
yolo = YOLOv4Pipeline(
file_root, annotations,
batch_size, image_size, num_threads, device_id, seed,
use_gpu=pipeline == 'dali-gpu',
is_training=is_training,
use_mosaic=use_mosaic
)
return yolo.dataset()
if pipeline == 'numpy':
yolo = YOLOv4PipelineNumpy(
file_root, annotations,
batch_size, image_size, num_threads, device_id, seed,
is_training=is_training,
use_mosaic=use_mosaic
)
return yolo.dataset()
return dataset_fn
total_steps = epochs * steps_per_epoch
initial_lr = kwargs.get("lr")
lr_fn = YOLOLearningRateSchedule(initial_lr)
initial_epoch = 0
multigpu = kwargs.get("multigpu")
strategy = tf.distribute.MirroredStrategy() if multigpu else tf.distribute.get_strategy()
if hasattr(strategy._extended._collective_ops, "_pool"):
atexit.register(strategy._extended._collective_ops._pool.close) # type: ignore
with strategy.scope():
model = YOLOv4Model()
model.compile(
optimizer=tf.keras.optimizers.legacy.SGD(learning_rate=lr_fn)
)
if start_weights:
model.load_weights(start_weights)
fn = start_weights.split('/')[-1]
if fn.endswith('.h5') and fn.startswith('epoch_'):
initial_epoch = int(fn[6 : -3])
input_options = tf.distribute.InputOptions(
experimental_place_dataset_on_device = True,
experimental_fetch_to_device = False,
experimental_replication_mode = tf.distribute.InputReplicationMode.PER_REPLICA)
dataset = strategy.distribute_datasets_from_function(
get_dataset_fn(file_root, annotations, batch_size, pipeline, True),
input_options)
eval_file_root = kwargs.get('eval_file_root')
eval_annotations = kwargs.get('eval_annotations')
eval_dataset = None
if not eval_file_root is None and not eval_annotations is None:
eval_dataset = strategy.distribute_datasets_from_function(
get_dataset_fn(eval_file_root, eval_annotations, 1, 'dali-cpu', False),
tf.distribute.InputOptions()
)
callbacks = []
if log_dir:
callbacks.append(tf.keras.callbacks.TensorBoard(
log_dir=log_dir,
update_freq='epoch'
))
if ckpt_dir:
callbacks.append(SaveWeightsCallback(ckpt_dir))
model.fit(
dataset,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
initial_epoch=initial_epoch,
callbacks=callbacks,
validation_data=eval_dataset,
validation_steps=kwargs.get('eval_steps'),
validation_freq=kwargs.get('eval_frequency'),
)
return model
|
DALI-main
|
docs/examples/use_cases/tensorflow/yolov4/src/train.py
|
# Copyright 2021 Kacper Kluk. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
import utils
def decode_prediction(prediction, num_classes):
pred_boxes = [[] for i in range(num_classes)]
for i, layer in enumerate(prediction):
xywh, obj, conf = utils.decode_layer(layer, i)
ltrb = utils.xywh_to_ltrb(xywh)
objectness = tf.math.reduce_max(conf, axis=-1) * obj
clss = tf.argmax(conf, axis=-1)
detected = tf.where(objectness > 0.25)
for idx in detected:
batch, ix, iy, ir = idx
score = objectness[batch, ix, iy, ir].numpy()
cls = clss[batch, ix, iy, ir]
box = list(ltrb[batch, ix, iy, ir].numpy())
pred_boxes[cls].append((score, box))
# nms
def iou(box1, box2):
l = max(box1[0], box2[0])
t = max(box1[1], box2[1])
r = min(box1[2], box2[2])
b = min(box1[3], box2[3])
i = max(0, r - l) * max(0, b - t)
u = (box1[2] - box1[0]) * (box1[3] - box1[1]) + (box2[2] - box2[0]) * (box2[3] - box2[1]) - i
return i / u
boxes = []
scores = []
labels = []
for cls in range(num_classes):
cls_preds = sorted(pred_boxes[cls])
while len(cls_preds) > 0:
score, box = cls_preds[-1]
boxes.append(box)
scores.append(score)
labels.append(cls)
rem = []
for score2, box2 in cls_preds:
if iou(box, box2) < 0.213:
rem.append((score2, box2))
cls_preds = rem
return boxes, scores, labels
|
DALI-main
|
docs/examples/use_cases/tensorflow/yolov4/src/inference.py
|
# Copyright 2021 Kacper Kluk. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from model import YOLOv4Model
from dali.pipeline import YOLOv4Pipeline
from img import read_img, draw_img, save_img, add_bboxes
import inference
import train
import os
def run_infer(weights_file, labels_file, image_path, out_filename):
cls_names = open(labels_file, "r").read().split("\n")
model = YOLOv4Model()
model.load_weights(weights_file)
img, input = read_img(image_path, 608)
prediction = model.predict(input)
boxes, scores, labels = inference.decode_prediction(prediction, len(cls_names))
labels = [cls_names[cls] for cls in labels]
pixels = add_bboxes(img, boxes, scores, labels)
if out_filename:
save_img(out_filename, pixels)
else:
draw_img(pixels)
def run_training(file_root, annotations, batch_size, epochs, steps_per_epoch, **kwargs):
model = train.train(file_root, annotations, batch_size, epochs, steps_per_epoch, **kwargs)
output = kwargs.get("output")
if output:
model.save_weights(output)
def run_eval(file_root, annotations_file, weights_file, batch_size, steps):
model = YOLOv4Model()
model.load_weights(weights_file)
seed = int.from_bytes(os.urandom(4), "little")
pipeline = YOLOv4Pipeline(
file_root, annotations_file, batch_size, (608, 608), 1, 0, seed,
dali_use_gpu=True,
is_training=False
)
dataset = pipeline.dataset()
model.compile(run_eagerly=True)
model.evaluate(pipeline.dataset(), steps=steps)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest="action")
subparsers.required = True
parser_infer = subparsers.add_parser("infer")
parser_infer.add_argument("--weights", "-w", default="yolov4.weights")
parser_infer.add_argument("--classes", "-c", default="coco-labels.txt")
parser_infer.add_argument("--output", "-o")
parser_infer.add_argument("image")
parser_train = subparsers.add_parser("train")
parser_train.add_argument("file_root")
parser_train.add_argument("annotations")
parser_train.add_argument("--batch_size", "-b", default=8, type=int)
parser_train.add_argument("--epochs", "-e", default=5, type=int)
parser_train.add_argument("--steps", "-s", default=1000, type=int)
parser_train.add_argument("--eval_file_root", default=None)
parser_train.add_argument("--eval_annotations", default=None)
parser_train.add_argument("--eval_steps", default=5000, type=int)
parser_train.add_argument("--eval_frequency", default=5, type=int)
parser_train.add_argument("--output", "-o", default="output.h5")
parser_train.add_argument("--start_weights", "-w", default=None)
parser_train.add_argument("--learning_rate", default=1e-3, type=float)
parser_train.add_argument("--pipeline", default="dali-gpu")
parser_train.add_argument("--multigpu", action="store_true")
parser_train.add_argument("--use_mosaic", action="store_true")
parser_train.add_argument("--log_dir", default=None)
parser_train.add_argument("--ckpt_dir", default=None)
parser_train.add_argument("--seed", default=None, type=int)
parser_eval = subparsers.add_parser("eval")
parser_eval.add_argument("file_root")
parser_eval.add_argument("annotations")
parser_eval.add_argument("--weights", "-w", default="yolov4.weights")
parser_eval.add_argument("--batch_size", "-b", default=1, type=int)
parser_eval.add_argument("--steps", "-s", default=1000, type=int)
args = parser.parse_args()
if args.action == "infer":
run_infer(args.weights, args.classes, args.image, args.output)
elif args.action == "train":
run_training(
args.file_root, args.annotations, args.batch_size, args.epochs, args.steps,
eval_file_root=args.eval_file_root,
eval_annotations=args.eval_annotations,
eval_steps=args.eval_steps,
eval_frequency=args.eval_frequency,
output=args.output,
lr=args.learning_rate,
pipeline=args.pipeline,
log_dir=args.log_dir,
ckpt_dir=args.ckpt_dir,
start_weights=args.start_weights,
multigpu=args.multigpu,
use_mosaic=args.use_mosaic,
seed=args.seed
)
elif args.action == "eval":
run_eval(args.file_root, args.annotations, args.weights, args.batch_size, args.steps)
else:
print("The " + args.action + " action is not yet implemented :<")
|
DALI-main
|
docs/examples/use_cases/tensorflow/yolov4/src/main.py
|
# Copyright 2021 Jagoda Kamińska. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
import tensorflow_addons as tfa
class Mish(tf.keras.layers.Layer):
def __init__(self):
super().__init__()
def call(self, inputs):
return tfa.activations.mish(inputs)
class ScaledRandomUniform(tf.keras.initializers.RandomUniform):
def __init__(self, scale=1, **kwags):
super().__init__(**kwags)
self.scale = scale
def __call__(self, *args, **kwargs):
return tf.math.scalar_mul(self.scale, super().__call__(*args, **kwargs))
def get_config(self): # To support serialization
return {"scale": self.scale} | super().get_config()
|
DALI-main
|
docs/examples/use_cases/tensorflow/yolov4/src/layers.py
|
__author__ = 'tylin'
__version__ = '2.0'
# Interface for accessing the Microsoft COCO dataset.
# Microsoft COCO is a large image dataset designed for object detection,
# segmentation, and caption generation. pycocotools is a Python API that
# assists in loading, parsing and visualizing the annotations in COCO.
# Please visit http://mscoco.org/ for more information on COCO, including
# for the data, paper, and tutorials. The exact format of the annotations
# is also described on the COCO website. For example usage of the pycocotools
# please see pycocotools_demo.ipynb. In addition to this API, please download both
# the COCO images and annotations in order to run the demo.
# An alternative to using the API is to load the annotations directly
# into Python dictionary
# Using the API provides additional utility functions. Note that this API
# supports both *instance* and *caption* annotations. In the case of
# captions not all functions are defined (e.g. categories are undefined).
# The following API functions are defined:
# COCO - COCO api class that loads COCO annotation file and prepare data structures.
# decodeMask - Decode binary mask M encoded via run-length encoding.
# encodeMask - Encode binary mask M using run-length encoding.
# getAnnIds - Get ann ids that satisfy given filter conditions.
# getCatIds - Get cat ids that satisfy given filter conditions.
# getImgIds - Get img ids that satisfy given filter conditions.
# loadAnns - Load anns with the specified ids.
# loadCats - Load cats with the specified ids.
# loadImgs - Load imgs with the specified ids.
# annToMask - Convert segmentation in an annotation to binary mask.
# showAnns - Display the specified annotations.
# loadRes - Load algorithm results and create API for accessing them.
# download - Download COCO images from mscoco.org server.
# Throughout the API "ann"=annotation, "cat"=category, and "img"=image.
# Help on each functions can be accessed by: "help COCO>function".
# See also COCO>decodeMask,
# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,
# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,
# COCO>loadImgs, COCO>annToMask, COCO>showAnns
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.
# Licensed under the Simplified BSD License [see bsd.txt]
import json
import time
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
import numpy as np
import copy
import itertools
from pycocotools import mask as maskUtils
import os
from collections import defaultdict
import sys
PYTHON_VERSION = sys.version_info[0]
if PYTHON_VERSION == 2:
from urllib import urlretrieve
elif PYTHON_VERSION == 3:
from urllib.request import urlretrieve
def _isArrayLike(obj):
return hasattr(obj, '__iter__') and hasattr(obj, '__len__')
class COCO:
def __init__(self, annotation_file=None):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict()
self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)
if not annotation_file == None:
print('loading annotations into memory...')
tic = time.time()
dataset = json.load(open(annotation_file, 'r'))
assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset))
print('Done (t={:0.2f}s)'.format(time.time()- tic))
self.dataset = dataset
self.createIndex()
def createIndex(self):
# create index
print('creating index...')
anns, cats, imgs = {}, {}, {}
imgToAnns,catToImgs = defaultdict(list),defaultdict(list)
if 'annotations' in self.dataset:
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']].append(ann)
anns[ann['id']] = ann
if 'images' in self.dataset:
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
for cat in self.dataset['categories']:
cats[cat['id']] = cat
if 'annotations' in self.dataset and 'categories' in self.dataset:
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']].append(ann['image_id'])
print('index created!')
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.dataset['info'].items():
print('{}: {}'.format(key, value))
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if _isArrayLike(imgIds) else [imgIds]
catIds = catIds if _isArrayLike(catIds) else [catIds]
if len(imgIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]
anns = list(itertools.chain.from_iterable(lists))
else:
anns = self.dataset['annotations']
anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
if not iscrowd == None:
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
else:
ids = [ann['id'] for ann in anns]
return ids
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if _isArrayLike(catNms) else [catNms]
supNms = supNms if _isArrayLike(supNms) else [supNms]
catIds = catIds if _isArrayLike(catIds) else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids
def getImgIds(self, imgIds=[], catIds=[]):
'''
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
'''
imgIds = imgIds if _isArrayLike(imgIds) else [imgIds]
catIds = catIds if _isArrayLike(catIds) else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for i, catId in enumerate(catIds):
if i == 0 and len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids)
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if _isArrayLike(ids):
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]]
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if _isArrayLike(ids):
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]]
def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if _isArrayLike(ids):
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]]
def showAnns(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if 'segmentation' in anns[0] or 'keypoints' in anns[0]:
datasetType = 'instances'
elif 'caption' in anns[0]:
datasetType = 'captions'
else:
raise Exception('datasetType not supported')
if datasetType == 'instances':
ax = plt.gca()
ax.set_autoscale_on(False)
polygons = []
color = []
for ann in anns:
c = (np.random.random((1, 3))*0.6+0.4).tolist()[0]
if 'segmentation' in ann:
if type(ann['segmentation']) == list:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((int(len(seg)/2), 2))
polygons.append(Polygon(poly))
color.append(c)
else:
# mask
t = self.imgs[ann['image_id']]
if type(ann['segmentation']['counts']) == list:
rle = maskUtils.frPyObjects([ann['segmentation']], t['height'], t['width'])
else:
rle = [ann['segmentation']]
m = maskUtils.decode(rle)
img = np.ones( (m.shape[0], m.shape[1], 3) )
if ann['iscrowd'] == 1:
color_mask = np.array([2.0,166.0,101.0])/255
if ann['iscrowd'] == 0:
color_mask = np.random.random((1, 3)).tolist()[0]
for i in range(3):
img[:,:,i] = color_mask[i]
ax.imshow(np.dstack( (img, m*0.5) ))
if 'keypoints' in ann and type(ann['keypoints']) == list:
# turn skeleton into zero-based index
sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1
kp = np.array(ann['keypoints'])
x = kp[0::3]
y = kp[1::3]
v = kp[2::3]
for sk in sks:
if np.all(v[sk]>0):
plt.plot(x[sk],y[sk], linewidth=3, color=c)
plt.plot(x[v>0], y[v>0],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2)
plt.plot(x[v>1], y[v>1],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2)
p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)
ax.add_collection(p)
p = PatchCollection(polygons, facecolor='none', edgecolors=color, linewidths=2)
ax.add_collection(p)
elif datasetType == 'captions':
for ann in anns:
print(ann['caption'])
def loadRes(self, resFile):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
print('Loading and preparing results...')
tic = time.time()
if type(resFile) == str: #or type(resFile) == unicode:
anns = json.load(open(resFile))
elif type(resFile) == np.ndarray:
anns = self.loadNumpyAnnotations(resFile)
else:
anns = resFile
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id+1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]
if not 'segmentation' in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2]*bb[3]
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
# now only support compressed RLE format as segmentation results
ann['area'] = maskUtils.area(ann['segmentation'])
if not 'bbox' in ann:
ann['bbox'] = maskUtils.toBbox(ann['segmentation'])
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'keypoints' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
s = ann['keypoints']
x = s[0::3]
y = s[1::3]
x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y)
ann['area'] = (x1-x0)*(y1-y0)
ann['id'] = id + 1
ann['bbox'] = [x0,y0,x1-x0,y1-y0]
print('DONE (t={:0.2f}s)'.format(time.time()- tic))
res.dataset['annotations'] = anns
res.createIndex()
return res
def download(self, tarDir = None, imgIds = [] ):
'''
Download COCO images from mscoco.org server.
:param tarDir (str): COCO results directory name
imgIds (list): images to be downloaded
:return:
'''
if tarDir is None:
print('Please specify target directory')
return -1
if len(imgIds) == 0:
imgs = self.imgs.values()
else:
imgs = self.loadImgs(imgIds)
N = len(imgs)
if not os.path.exists(tarDir):
os.makedirs(tarDir)
for i, img in enumerate(imgs):
tic = time.time()
fname = os.path.join(tarDir, img['file_name'])
if not os.path.exists(fname):
urlretrieve(img['coco_url'], fname)
print('downloaded {}/{} images (t={:0.1f}s)'.format(i, N, time.time()- tic))
def loadNumpyAnnotations(self, data):
"""
Convert result data from a numpy array [Nx7] where each row contains {imageID,x1,y1,w,h,score,class}
:param data (numpy.ndarray)
:return: annotations (python nested list)
"""
print('Converting ndarray to lists...')
assert(type(data) == np.ndarray)
print(data.shape)
assert(data.shape[1] == 7)
N = data.shape[0]
ann = []
for i in range(N):
if i % 1000000 == 0:
print('{}/{}'.format(i,N))
ann += [{
'image_id' : int(data[i, 0]),
'bbox' : [ data[i, 1], data[i, 2], data[i, 3], data[i, 4] ],
'score' : data[i, 5],
'category_id': int(data[i, 6]),
}]
return ann
def annToRLE(self, ann):
"""
Convert annotation which can be polygons, uncompressed RLE to RLE.
:return: binary mask (numpy 2D array)
"""
t = self.imgs[ann['image_id']]
h, w = t['height'], t['width']
segm = ann['segmentation']
if type(segm) == list:
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(segm, h, w)
rle = maskUtils.merge(rles)
elif type(segm['counts']) == list:
# uncompressed RLE
rle = maskUtils.frPyObjects(segm, h, w)
else:
# rle
rle = ann['segmentation']
return rle
def annToMask(self, ann):
"""
Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
:return: binary mask (numpy 2D array)
"""
rle = self.annToRLE(ann)
m = maskUtils.decode(rle)
return m
|
DALI-main
|
docs/examples/use_cases/tensorflow/yolov4/src/np/coco.py
|
# Copyright 2021 Kacper Kluk. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from .coco import COCO
import numpy as np
import tensorflow as tf
import os
import cv2
import random
class YOLOv4PipelineNumpy:
def __init__(
self, file_root, annotations_file,
batch_size, image_size, num_threads, device_id, seed,
**kwargs
):
self._file_root = file_root
self._annotations_file = annotations_file
self._batch_size = batch_size
self._image_size = image_size
self._num_threads = num_threads
self._device_id = device_id
self._is_training = kwargs.get('is_training', False)
self._use_mosaic = kwargs.get('use_mosaic', False)
self._coco = COCO(annotations_file)
self._batch_id = 0
self._image_ids = self._coco.getImgIds()
np.random.shuffle(self._image_ids)
self._num_batches = len(self._image_ids) // (self._batch_size * self._num_threads)
if self._num_batches == 0:
raise Exception("Batch size exceeds training set size")
def __iter__(self):
return self
def __next__(self):
with tf.device('/cpu:0'):
num = 0
if self._batch_id == self._num_batches:
self._batch_id = 0
np.random.shuffle(self._image_ids)
start = self._batch_size * (self._batch_id * self._num_threads + self._device_id)
image_ids = self._image_ids[start : start + self._batch_size]
images, bboxes, classes = self._input(image_ids)
if self._is_training:
self._color_twist(images)
self._flip(images, bboxes)
if self._use_mosaic:
images, bboxes, classes = self._mosaic(images, bboxes, classes)
self._batch_id += 1
lengths = [len(b) for b in bboxes]
bboxes = tf.RaggedTensor.from_row_lengths(tf.concat(bboxes, axis=0), lengths)
bboxes = bboxes.to_tensor(-1)
bboxes = tf.cast(bboxes, dtype=tf.float32)
classes = tf.ragged.stack(classes)
if self._batch_size > 1:
classes = classes.to_tensor(-1)
classes = tf.cast(tf.expand_dims(classes, axis=-1), dtype=tf.float32)
return images, tf.concat([bboxes, classes], axis=-1)
def __len__(self):
return self.num_batches
def _input(self, image_ids):
image_data = self._coco.loadImgs(image_ids)
images = np.zeros((self._batch_size, self._image_size[0], self._image_size[1], 3), dtype=np.float32)
bboxes = []
classes = []
for i in range(self._batch_size):
image_path = os.path.join(self._file_root, image_data[i]['file_name'])
image_width = image_data[i]['width']
image_height = image_data[i]['height']
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = tf.image.resize(image, self._image_size) / 255
image = tf.reshape(image, (1, self._image_size[0], self._image_size[1], 3))
images[i, ...] = image
ann_ids = self._coco.getAnnIds(image_ids[i])
anns = self._coco.loadAnns(ann_ids)
sample_bboxes = np.array([ann['bbox'] for ann in anns])
if len(sample_bboxes.shape) == 1:
sample_bboxes = np.zeros((0, 4))
sample_bboxes[ : , 0] /= image_width
sample_bboxes[ : , 1] /= image_height
sample_bboxes[ : , 2] /= image_width
sample_bboxes[ : , 3] /= image_height
sample_bboxes[ : , 0] += sample_bboxes[ : , 2] / 2
sample_bboxes[ : , 1] += sample_bboxes[ : , 3] / 2
bboxes.append(sample_bboxes)
classes.append(np.array([ann['category_id'] for ann in anns], dtype=int))
return images, bboxes, classes
def _color_twist(self, images):
def random_value():
value = random.uniform(1.0, 1.5)
coin = random.randrange(2)
return coin * value + (1 - coin) * (1.0 / value)
for i in range(self._batch_size):
image = images[i, ...]
hue = random.uniform(-18.0, 18.0)
image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
image[..., 0] += hue
image = cv2.cvtColor(image, cv2.COLOR_HSV2RGB)
brightness = random_value()
contrast = random_value()
image *= brightness * contrast
image += (0.5 - 0.5 * contrast) * brightness
images[i, ...] = image
def _flip(self, images, bboxes):
for i in range(self._batch_size):
if random.randrange(2) == 0:
images[i, ...] = images[i, : , ::-1 , : ]
bboxes[i][: , 0] = 1.0 - bboxes[i][: , 0]
def _mosaic(self, images, bboxes, classes):
def trim_bboxes(bboxes, classes, x0, y0, x1, y1):
bboxes_ltrb = np.copy(bboxes)
bboxes_ltrb[ : , 0] -= bboxes[ : , 2] / 2
bboxes_ltrb[ : , 1] -= bboxes[ : , 3] / 2
bboxes_ltrb[ : , 2] += bboxes_ltrb[ : , 0]
bboxes_ltrb[ : , 3] += bboxes_ltrb[ : , 1]
bboxes_ltrb[ : , 0] = np.maximum(x0, bboxes_ltrb[ : , 0])
bboxes_ltrb[ : , 1] = np.maximum(y0, bboxes_ltrb[ : , 1])
bboxes_ltrb[ : , 2] = np.minimum(x1, bboxes_ltrb[ : , 2])
bboxes_ltrb[ : , 3] = np.minimum(y1, bboxes_ltrb[ : , 3])
bboxes_xywh = np.copy(bboxes_ltrb)
bboxes_xywh[ : , 2] -= bboxes_xywh[ : , 0]
bboxes_xywh[ : , 3] -= bboxes_xywh[ : , 1]
bboxes_xywh[ : , 0] += bboxes_xywh[ : , 2] / 2
bboxes_xywh[ : , 1] += bboxes_xywh[ : , 3] / 2
not_null = np.logical_and(bboxes_xywh[ : , 2] > 0, bboxes_xywh[ : , 3] > 0)
return bboxes_xywh[not_null, : ], classes[not_null]
images_out = np.zeros(images.shape)
bboxes_out = []
classes_out = []
for i in range(self._batch_size):
if random.randrange(2) == 0:
images_out[i, ...] = images[i, ...]
bboxes_out.append(bboxes[i])
classes_out.append(classes[i])
else:
ids = np.random.choice(self._batch_size, 4)
prop_x = np.random.uniform(0.2, 0.8)
prop_y = np.random.uniform(0.2, 0.8)
size_x = int(prop_x * self._image_size[0])
size_y = int(prop_y * self._image_size[1])
images_out[i, : size_y, : size_x, : ] = images[ids[0], : size_y, : size_x, : ]
images_out[i, : size_y, size_x :, : ] = images[ids[1], : size_y, size_x :, : ]
images_out[i, size_y :, : size_x, : ] = images[ids[2], size_y :, : size_x, : ]
images_out[i, size_y :, size_x :, : ] = images[ids[3], size_y :, size_x :, : ]
bboxes00, classes00 = trim_bboxes(bboxes[ids[0]], classes[ids[0]], 0.0, 0.0, prop_x, prop_y)
bboxes10, classes10 = trim_bboxes(bboxes[ids[1]], classes[ids[1]], prop_x, 0.0, 1.0, prop_y)
bboxes01, classes01 = trim_bboxes(bboxes[ids[2]], classes[ids[2]], 0.0, prop_y, prop_x, 1.0)
bboxes11, classes11 = trim_bboxes(bboxes[ids[3]], classes[ids[3]], prop_x, prop_y, 1.0, 1.0)
bboxes_out.append(np.concatenate((bboxes00, bboxes10, bboxes01, bboxes11)))
classes_out.append(np.concatenate((classes00, classes10, classes01, classes11)))
return images_out, bboxes_out, classes_out
def dataset(self):
return tf.data.Dataset.from_generator(lambda: self,
output_signature = (
tf.TensorSpec(shape=(None, 608, 608, 3), dtype=tf.float32),
tf.TensorSpec(shape=(None, None, 5), dtype=tf.float32
)
)).prefetch(tf.data.AUTOTUNE)
|
DALI-main
|
docs/examples/use_cases/tensorflow/yolov4/src/np/pipeline.py
|
# Copyright 2021 Kacper Kluk, Piotr Kowalewski. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import nvidia.dali as dali
def input(file_root, annotations_file, shard_id, num_threads, device):
inputs, bboxes, classes = dali.fn.readers.coco(
file_root=file_root,
annotations_file=annotations_file,
ltrb=True,
shard_id=shard_id,
num_shards=num_threads,
ratio=True,
random_shuffle=True
)
images = dali.fn.decoders.image(inputs, device=device, output_type=dali.types.RGB)
return images, bboxes, classes
# Converts ltrb bbox coordinates to xywh, where xy denotes coordinates of a bbox center.
def ltrb_to_xywh(bboxes):
return dali.fn.coord_transform(
bboxes,
M=[0.5, 0.0, 0.5, 0.0,
0.0, 0.5, 0.0, 0.5,
-1.0, 0.0, 1.0, 0.0,
0.0, -1.0, 0.0, 1.0]
)
# Transforms bbox ltrb coordinates, so that they fit within a window
# anchored at (pos_x, pos_y) and with a shape (shape_x, shape_y).
def bbox_adjust_ltrb(bboxes, shape_x, shape_y, pos_x, pos_y):
sx, sy, ex, ey = pos_x, pos_y, shape_x + pos_x, shape_y + pos_y
MT = dali.fn.transforms.crop(
to_start=dali.fn.stack(sx, sy, sx, sy),
to_end=dali.fn.stack(ex, ey, ex, ey)
)
return dali.fn.coord_transform(bboxes, MT=MT)
# Selects if_true or if_false tensor based on the predicate value.
# predicate should take integer value equal to either 0 or 1.
# if_true and if_false should be 2D tensors with equal size along axis 1.
# Note: this function is a workaround and should be replaced
# with the dedicated operator once available
def select(predicate, if_true, if_false):
true_shape = dali.fn.shapes(if_true, dtype=dali.types.DALIDataType.INT32)
false_shape = dali.fn.shapes(if_false, dtype=dali.types.DALIDataType.INT32)
joined = dali.fn.cat(if_true, if_false)
sh = predicate * true_shape + (1 - predicate) * false_shape
st = dali.fn.cat(
dali.fn.slice(true_shape * (1 - predicate), start=[0], shape=[1], axes=[0]),
dali.fn.constant(idata=0, shape=[1])
)
return dali.fn.slice(joined, start=st, shape=sh, axes=[0,1], out_of_bounds_policy="trim_to_shape")
# Based on https://github.com/AlexeyAB/darknet/blob/005513a9db14878579adfbb61083962c99bb0a89/src/image.c#L1297
def color_twist(images):
def random_value():
value = dali.fn.random.uniform(range=(1, 1.5))
coin = dali.fn.random.coin_flip()
return coin * value + (1.0 - coin) * (1.0 / value)
return dali.fn.color_twist(images,
hue=dali.fn.random.uniform(range=(-18.0, 18.0)),
brightness=random_value(),
contrast=random_value()
)
def flip(images, bboxes):
coin = dali.fn.random.coin_flip()
images = dali.fn.flip(images, horizontal=coin)
bboxes = dali.fn.bb_flip(bboxes, horizontal=coin, ltrb=True)
return images, bboxes
# Performs mosaic using MultiPaste operator.
def mosaic(images, bboxes, labels, image_size):
def generate_tiles(bboxes, labels, shape_x, shape_y):
idx = dali.fn.batch_permutation()
permuted_boxes = dali.fn.permute_batch(bboxes, indices=idx)
permuted_labels = dali.fn.permute_batch(labels, indices=idx)
shape = dali.fn.stack(shape_y, shape_x)
in_anchor, in_shape, bbx, lbl = dali.fn.random_bbox_crop(
permuted_boxes,
permuted_labels,
input_shape=image_size,
crop_shape=shape,
shape_layout="HW",
allow_no_crop=False,
total_num_attempts=64
)
# swap coordinates (x, y) -> (y, x)
in_anchor = dali.fn.reductions.sum(in_anchor) - in_anchor
in_anchor_c = dali.fn.cast(in_anchor, dtype=dali.types.DALIDataType.INT32)
return idx, bbx, lbl, in_anchor_c, shape
prop0_x = dali.fn.random.uniform(range=(0.2, 0.8))
prop0_y = dali.fn.random.uniform(range=(0.2, 0.8))
prop1_x = 1.0 - prop0_x
prop1_y = 1.0 - prop0_y
pix0_x = dali.fn.cast(prop0_x * image_size[0], dtype=dali.types.DALIDataType.INT32)
pix0_y = dali.fn.cast(prop0_y * image_size[1], dtype=dali.types.DALIDataType.INT32)
pix1_x = image_size[0] - pix0_x
pix1_y = image_size[1] - pix0_y
perm_UL, bboxes_UL, labels_UL, in_anchor_UL, size_UL = \
generate_tiles(bboxes, labels, pix0_x, pix0_y)
perm_UR, bboxes_UR, labels_UR, in_anchor_UR, size_UR = \
generate_tiles(bboxes, labels, pix1_x, pix0_y)
perm_LL, bboxes_LL, labels_LL, in_anchor_LL, size_LL = \
generate_tiles(bboxes, labels, pix0_x, pix1_y)
perm_LR, bboxes_LR, labels_LR, in_anchor_LR, size_LR = \
generate_tiles(bboxes, labels, pix1_x, pix1_y)
zeros_i = dali.types.Constant(0)
zeros_f = dali.types.Constant(0.0)
idx = dali.fn.stack(perm_UL, perm_UR, perm_LL, perm_LR)
out_anchors = dali.fn.stack(
dali.fn.stack(zeros_i, zeros_i),
dali.fn.stack(zeros_i, pix0_x),
dali.fn.stack(pix0_y, zeros_i),
dali.fn.stack(pix0_y, pix0_x)
)
in_anchors = dali.fn.stack(
in_anchor_UL, in_anchor_UR, in_anchor_LL, in_anchor_LR
)
shapes = dali.fn.stack(
size_UL, size_UR, size_LL, size_LR
)
bboxes_UL = bbox_adjust_ltrb(bboxes_UL, prop0_x, prop0_y, zeros_f, zeros_f)
bboxes_UR = bbox_adjust_ltrb(bboxes_UR, prop1_x, prop0_y, prop0_x, zeros_f)
bboxes_LL = bbox_adjust_ltrb(bboxes_LL, prop0_x, prop1_y, zeros_f, prop0_y)
bboxes_LR = bbox_adjust_ltrb(bboxes_LR, prop1_x, prop1_y, prop0_x, prop0_y)
stacked_bboxes = dali.fn.cat(bboxes_UL, bboxes_UR, bboxes_LL, bboxes_LR)
stacked_labels = dali.fn.cat(labels_UL, labels_UR, labels_LL, labels_LR)
mosaic = dali.fn.multi_paste(images, in_ids=idx, output_size=image_size, in_anchors=in_anchors,
shapes=shapes, out_anchors=out_anchors, dtype=dali.types.DALIDataType.UINT8)
return mosaic, stacked_bboxes, stacked_labels
|
DALI-main
|
docs/examples/use_cases/tensorflow/yolov4/src/dali/ops.py
|
# Copyright 2021 Kacper Kluk, Piotr Kowalewski. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import nvidia.dali as dali
import nvidia.dali.plugin.tf as dali_tf
import tensorflow as tf
from . import ops
class YOLOv4Pipeline:
def __init__(
self, file_root, annotations_file,
batch_size, image_size, num_threads, device_id, seed,
**kwargs
):
self._file_root = file_root
self._annotations_file = annotations_file
self._batch_size = batch_size
self._image_size = image_size
self._num_threads = num_threads
self._device_id = device_id
self._use_gpu = kwargs.get('use_gpu', False)
self._is_training = kwargs.get('is_training', False)
self._use_mosaic = kwargs.get('use_mosaic', False)
self._pipe = dali.pipeline.Pipeline(
batch_size=batch_size, num_threads=num_threads, device_id=device_id, seed=seed
)
self._define_pipeline()
def _define_pipeline(self):
with self._pipe:
images, bboxes, classes = ops.input(
self._file_root,
self._annotations_file,
self._device_id,
self._num_threads,
"mixed" if self._use_gpu else "cpu"
)
images = dali.fn.resize(
images,
resize_x=self._image_size[0],
resize_y=self._image_size[1],
interp_type=dali.types.DALIInterpType.INTERP_LINEAR
)
if self._is_training:
images = ops.color_twist(images)
images, bboxes = ops.flip(images, bboxes)
if self._use_mosaic:
do_mosaic = dali.fn.random.coin_flip()
images_m, bboxes_m, classes_m = ops.mosaic(images, bboxes, classes, self._image_size)
images = images * (1.0 - do_mosaic) + images_m * do_mosaic
bboxes = ops.select(do_mosaic, bboxes_m, bboxes)
classes = dali.fn.squeeze(ops.select(
do_mosaic,
dali.fn.expand_dims(classes_m, axes=1),
dali.fn.expand_dims(classes, axes=1)
), axes=1)
bboxes = ops.ltrb_to_xywh(bboxes)
images = images * (1.0 / 255.0)
# subtract one to be consistent with darknet's pretrained model weights
classes = dali.fn.expand_dims(classes, axes=1) - 1.0
labels = dali.fn.cat(bboxes, classes, axis=1)
labels = dali.fn.pad(labels, fill_value=-1)
labels = dali.fn.pad(labels, fill_value=-1, shape=(1, 5))
self._pipe.set_outputs(images.gpu(), labels.gpu())
def dataset(self):
output_shapes = ((self._batch_size, self._image_size[0], self._image_size[0], 3), (self._batch_size, None, 5))
output_dtypes = (tf.float32, tf.float32)
return dali_tf.DALIDataset(
pipeline=self._pipe,
batch_size=self._batch_size,
output_shapes=output_shapes,
output_dtypes=output_dtypes,
device_id=self._device_id
)
|
DALI-main
|
docs/examples/use_cases/tensorflow/yolov4/src/dali/pipeline.py
|
DALI-main
|
docs/examples/use_cases/video_superres/__init__.py
|
|
import argparse
import logging as log
import os
import time
import datetime
from functools import reduce
import numpy as np
from math import ceil, floor
from tensorboardX import SummaryWriter
import torch
import torch.distributed as dist
import torch.optim as optim
import torch.utils.data.distributed
from torch.multiprocessing import Process
from torch.autograd import Variable
from dataloading.dataloaders import get_loader
from model.model import VSRNet
from model.clr import cyclic_learning_rate
from common.fp16 import FP16_Optimizer
from common.fp16util import network_to_half
from common.distributed import DistributedDataParallel
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed')
parser.add_argument('--root', type=str, default='.',
help='input data root folder')
parser.add_argument('--frames', type=int, default = 3,
help='num frames in input sequence')
parser.add_argument('--is_cropped', action='store_true',
help='crop input frames?')
parser.add_argument('--crop_size', type=int, nargs='+', default=[256, 256],
help='[height, width] for input crop')
parser.add_argument('--batchsize', type=int, default=1,
help='per rank batch size')
parser.add_argument('--loader', type=str, default='DALI',
help='dataloader: PyTorch or DALI')
parser.add_argument('--rank', type=int, default=0,
help='PyTorch distributed rank')
parser.add_argument('--world_size', default=1, type=int, metavar='N',
help='num processes for PyTorch distributed')
parser.add_argument('--ip', default='localhost', type=str,
help='IP address for distributed init.')
parser.add_argument('--max_iter', type=int, default=1000,
help='num training iters')
parser.add_argument('--fp16', action='store_true',
help='train in fp16?')
parser.add_argument('--checkpoint_dir', type=str, default='.',
help='where to save checkpoints')
parser.add_argument('--min_lr', type=float, default=0.000001,
help='min learning rate for cyclic learning rate')
parser.add_argument('--max_lr', type=float, default=0.00001,
help='max learning rate for cyclic learning rate')
parser.add_argument('--weight_decay', type=float, default=0.0004,
help='ADAM weight decay')
parser.add_argument('--flownet_path', type=str,
default='flownet2-pytorch/networks/FlowNet2-SD_checkpoint.pth.tar',
help='FlowNetSD weights path')
parser.add_argument('--image_freq', type=int, default=100,
help='num iterations between image dumps to Tensorboard ')
parser.add_argument('--timing', action='store_true',
help="Time data loading and model training (default: False)")
def main(args):
if args.rank == 0:
log.basicConfig(level=log.INFO)
writer = SummaryWriter()
writer.add_text('config', str(args))
else:
log.basicConfig(level=log.WARNING)
writer = None
torch.cuda.set_device(args.rank % args.world_size)
torch.manual_seed(args.seed + args.rank)
torch.cuda.manual_seed(args.seed + args.rank)
torch.backends.cudnn.benchmark = True
if args.world_size > 1:
log.info('Initializing process group')
dist.init_process_group(
backend='nccl',
init_method='tcp://' + args.ip + ':3567',
world_size=args.world_size,
rank=args.rank)
log.info('Process group initialized')
log.info('Initializing ' + args.loader + ' training dataloader...')
train_loader, train_batches, sampler = get_loader(args, 'train')
samples_per_epoch = train_batches * args.batchsize
log.info('Dataloader initialized')
model = VSRNet(args.frames, args.flownet_path, args.fp16)
if args.fp16:
network_to_half(model)
model.cuda()
model.train()
for param in model.FlowNetSD_network.parameters():
param.requires_grad = False
model_params = [p for p in model.parameters() if p.requires_grad]
optimizer = optim.Adam(model_params, lr=1, weight_decay=args.weight_decay)
stepsize = 2 * train_batches
clr_lambda = cyclic_learning_rate(args.min_lr, args.max_lr, stepsize)
scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=[clr_lambda])
if args.fp16:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
if args.world_size > 1:
model = DistributedDataParallel(model)
# TRAINING
total_iter = 0
while total_iter * args.world_size < args.max_iter:
epoch = floor(total_iter / train_batches)
# only if we are using DistributedSampler
if args.world_size > 1 and args.loader == 'PyTorch':
sampler.set_epoch(epoch)
model.train()
total_epoch_loss = 0.0
sample_timer = 0.0
data_timer = 0.0
compute_timer = 0.0
iter_start = time.perf_counter()
training_data_times = []
training_start = datetime.datetime.now()
# TRAINING EPOCH LOOP
for i, inputs in enumerate(train_loader):
training_stop = datetime.datetime.now()
dataloading_time = training_stop - training_start
training_data_times.append(dataloading_time.total_seconds() * 1000.0)
if args.loader == 'DALI':
inputs = inputs[0]["data"]
# Needed? It is already gpu
inputs = inputs.cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
if args.fp16:
inputs = inputs.half()
if args.timing:
torch.cuda.synchronize()
data_end = time.perf_counter()
optimizer.zero_grad()
im_out = total_iter % args.image_freq == 0
# writer.add_graph(model, inputs)
loss = model(Variable(inputs), i, writer, im_out)
total_epoch_loss += loss.item()
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
optimizer.step()
scheduler.step()
if args.rank == 0:
if args.timing:
torch.cuda.synchronize()
iter_end = time.perf_counter()
sample_timer += (iter_end - iter_start)
data_duration = data_end - iter_start
data_timer += data_duration
compute_timer += (iter_end - data_end)
torch.cuda.synchronize()
iter_start = time.perf_counter()
writer.add_scalar('learning_rate', scheduler.get_lr()[0], total_iter)
writer.add_scalar('train_loss', loss.item(), total_iter)
log.info('Rank %d, Epoch %d, Iteration %d of %d, loss %.5f' %
(args.rank, epoch, i+1, train_batches, loss.item()))
if total_iter % 100 == 0:
print("Avg dataloading time: " + str(reduce(lambda x, y: x + y, training_data_times) / len(training_data_times)) + "ms")
total_iter += 1
if total_iter > args.max_iter:
break
training_start = datetime.datetime.now()
if args.rank == 0:
if args.timing:
sample_timer_avg = sample_timer / samples_per_epoch
writer.add_scalar('sample_time', sample_timer_avg, total_iter)
data_timer_avg = data_timer / samples_per_epoch
writer.add_scalar('sample_data_time', data_timer_avg, total_iter)
compute_timer_avg = compute_timer / samples_per_epoch
writer.add_scalar('sample_compute_time', compute_timer_avg, total_iter)
epoch_loss_avg = total_epoch_loss / train_batches
log.info('Rank %d, epoch %d: %.5f' % (args.rank, epoch, epoch_loss_avg))
### VALIDATION
log.info('Initializing ' + args.loader + ' validation dataloader...')
val_loader, val_batches, sampler = get_loader(args, 'val')
model.eval()
total_loss = 0
total_psnr = 0
for i, inputs in enumerate(val_loader):
if args.loader == 'DALI':
inputs = inputs[0]["data"]
# Needed? It is already gpu
inputs = inputs.cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
if args.fp16:
inputs = inputs.half()
log.info('Validation it %d of %d' % (i + 1, val_batches))
loss, psnr = model(Variable(inputs), i, None)
total_loss += loss.item()
total_psnr += psnr.item()
loss = total_loss / i
psnr = total_psnr / i
if args.rank == 0:
writer.add_scalar('val_loss', loss, total_iter)
writer.add_scalar('val_psnr', psnr, total_iter)
log.info('Rank %d validation loss %.5f' % (args.rank, loss))
log.info('Rank %d validation psnr %.5f' % (args.rank, psnr))
if __name__=='__main__':
main(parser.parse_args())
|
DALI-main
|
docs/examples/use_cases/video_superres/main.py
|
import argparse
import os
import subprocess
def split_scenes(raw_data_path, out_data_path):
out_data_path = os.path.join(out_data_path,'orig','scenes')
if not os.path.isdir(os.path.join(out_data_path,'train')):
os.makedirs(os.path.join(out_data_path,'train'))
if not os.path.isdir(os.path.join(out_data_path,'val')):
os.makedirs(os.path.join(out_data_path,'val'))
start = "00:00:00.0"
with open("./data/timestamps") as f:
for i, line in enumerate(f.readlines()):
m, s = divmod(float(line), 60)
h, m = divmod(m, 60)
end = "%02d:%02d:%02d" %(h, m, s)
if i < 53:
subset = 'train'
else:
subset = 'val'
filepath = os.path.join(out_data_path, subset)
filename = os.path.join(filepath, 'scene_' + str(i) + '.mp4')
cmd = ["ffmpeg", "-i", raw_data_path, "-ss", start, "-to", end,
"-c:v", "copy", "-an", filename]
print("Running: ", ' '.join(cmd))
subprocess.run(cmd)
start = end
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--raw_data', type=str, default=None)
parser.add_argument('--out_data', type=str, default=None)
args = parser.parse_args()
assert args.raw_data is not None, 'Provide --raw_data path to Myanmar 4K mp4'
assert args.out_data is not None, 'Provide --raw_data path to Myanmar 4K mp4'
split_scenes(args.raw_data, args.out_data)
|
DALI-main
|
docs/examples/use_cases/video_superres/tools/split_scenes.py
|
import argparse
import os
import subprocess
default_format = "png"
default_qscale_jpg = "4"
def extract_frames(main_data, resolution, format, q, quiet,
transcoded, codec, crf, keyint):
if transcoded:
desc = [resolution, 'scenes']
desc += [codec] if codec else []
desc += ["crf"+crf] if crf else []
desc += ["keyint"+keyint] if keyint else []
in_path = os.path.join(main_data,*desc)
else:
if codec:
raise ValueError("--codec specified, but not --transcoded");
if crf:
raise ValueError("--crf specified, but not --transcoded");
if keyint:
raise ValueError("--keyint specified, but not --transcoded");
in_path = os.path.join(main_data,'orig','scenes')
desc = [resolution,'frames']
desc += [codec] if codec else []
desc += ["crf"+crf] if crf else []
desc += ["keyint"+keyint] if keyint else []
if not format:
format = default_format
else:
desc += [format]
if not q:
if format == "jpg":
q = default_qscale_jpg
else:
desc += ["q" + q]
out_path = os.path.join(main_data,*desc)
res_args = []
if resolution == '4K':
pass
else:
if resolution == '1080p':
res_str = '1920:1080'
elif resolution == '720p':
res_str = '1280:720'
elif resolution == '540p':
res_str = '960:540'
else:
raise ValueError("Unknown resolution")
res_args += ["-vf", "scale=%s" % res_str, "-sws_flags", "bilinear"]
codec_args = []
if format == "png":
if q:
codec_args += ["-compression_level", q]
elif format == "jpg":
codec_args += ["-q:v", q]
else:
raise ValueError("Unknown format")
if quiet:
cmdout = subprocess.DEVNULL
else:
cmdout = None
for subset_name, subset_dir in [('training', 'train'), ('validation', 'val')]:
if not os.path.exists(os.path.join(in_path,subset_dir)):
raise ValueError("No "+subset_name+" data found in "+in_path+", " +
"did you run split_scenes.py?")
for in_file in os.listdir(os.path.join(in_path,subset_dir)):
if in_file.endswith('.mp4'):
scene = in_file.split('_')[1].split('.')[0]
cur_out_path = os.path.join(out_path,subset_dir,scene)
if not os.path.isdir(cur_out_path):
os.makedirs(cur_out_path)
cur_in_path = os.path.join(in_path,subset_dir,in_file)
cmd = ["ffmpeg", "-n", "-i", cur_in_path]
cmd += res_args
cmd += codec_args
cmd += [os.path.join(cur_out_path, "%05d."+format)]
print("Running:", " ".join(cmd))
subprocess.run(cmd, stdout=cmdout, stderr=cmdout)
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--main_data', type=str, required=True,
help="Path to root data directory")
parser.add_argument('--resolution', type=str, required=True,
choices=['4K', '1080p', '720p', '540p'])
parser.add_argument('--format', type=str,
choices=['png', 'jpg'])
parser.add_argument('-q', type=str,
help="quality to use for compression [2-31] for jpg and [0-9] for png")
parser.add_argument('--transcoded', action='store_true',
help="Use transcoded videos instead of original split video")
parser.add_argument('--quiet', action='store_true',
help="Suppress ffmpeg output")
parser.add_argument('--codec', type=str, default=None,
choices=['h264', 'hevc'],
help="codec of transcoded video to use")
parser.add_argument('--crf', type=str, default=None,
help="crf value of transcoded video to use")
parser.add_argument('--keyint', type=str, default=None,
help="keyframe interval of transcoded video to use")
args = parser.parse_args()
extract_frames(**vars(args))
|
DALI-main
|
docs/examples/use_cases/video_superres/tools/extract_frames.py
|
import argparse
import os
import subprocess
default_codec = "h264"
default_crf = "18"
default_keyint = "4"
def downsample_scenes(main_data, resolution, codec, crf, keyint, quiet):
desc = [resolution, 'scenes']
if not codec:
codec = default_codec
else:
desc += [codec]
assert codec in ['h264', 'hevc'], '--codec must be one of h264 or hevc'
if not crf:
crf = default_crf
else:
desc += ["crf" + crf]
if not keyint:
keyint = default_keyint
else:
desc += ["keyint" + keyint]
main_out_path = os.path.join(main_data,*desc)
print("Writing output files to:", main_out_path)
for subset in ['train', 'val']:
if not os.path.isdir(os.path.join(main_out_path,subset)):
os.makedirs(os.path.join(main_out_path,subset))
res_args = []
if resolution == '4K':
pass
else:
if resolution == '1080p':
res_str = '1920:1080'
elif resolution == '720p':
res_str = '1280:720'
elif resolution == '540p':
res_str = '960:540'
else:
raise ValueError("Unknown resolution")
res_args = ["-vf", "scale=%s" % res_str, "-sws_flags", "bilinear"]
codec_args = ["-preset", "slow"]
if codec == 'h264':
codec_args = ["-c:v", "libx264", "-g", keyint,
"-profile:v", "high"]
elif codec == 'hevc' or codec == 'h265':
codec_args = ["-c:v", "libx265", "-x265-params",
"keyint=%s:no-open-gop=1" % (keyint)]
else:
raise ValueError("Unknown codec")
if quiet:
cmdout = subprocess.DEVNULL
else:
cmdout = None
def transcode(in_path, out_path):
cmd = ["ffmpeg", "-y", "-i", in_path]
cmd += res_args
cmd += codec_args
cmd += ["-crf", crf, "-an", out_path]
print("Running:", " ".join(cmd))
subprocess.run(cmd, stdout=cmdout, stderr=cmdout)
for subset in ['train', 'val']:
for in_file in os.listdir(os.path.join(main_data,'orig','scenes',subset)):
if in_file.endswith('.mp4'):
in_path = os.path.join(main_data,'orig','scenes',subset,in_file)
out_path = os.path.join(main_out_path,subset,in_file)
transcode(in_path, out_path)
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--main_data', type=str, default=None,
help="Path to root data directory")
parser.add_argument('--resolution', type=str, default=None,
help="one of '4K', '1080p', '720p', or '540p'")
parser.add_argument('--codec', type=str, default=None,
help="one of 'h264' or 'hevc'")
parser.add_argument('--crf', type=str, default=None,
help="crf value passed to ffmpeg")
parser.add_argument('--keyint', type=str, default=None,
help="keyframe interval")
parser.add_argument('--quiet', action='store_true',
help="Suppress ffmpeg output")
args = parser.parse_args()
assert args.main_data is not None, 'Provide --main_data path to root data directory containing split scenes'
assert args.resolution in ['4K', '1080p', '720p', '540p'], '--resolution must be one of 1080p, 720p, 540p'
downsample_scenes(**vars(args))
|
DALI-main
|
docs/examples/use_cases/video_superres/tools/transcode_scenes.py
|
import torch
from torch import nn
from torch.autograd import Variable
from torch.nn.parameter import Parameter
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from common.loss_scaler import DynamicLossScaler, LossScaler
FLOAT_TYPES = (torch.FloatTensor, torch.cuda.FloatTensor)
HALF_TYPES = (torch.HalfTensor, torch.cuda.HalfTensor)
def conversion_helper(val, conversion):
"""Apply conversion to val. Recursively apply conversion if `val` is a nested tuple/list structure."""
if not isinstance(val, (tuple, list)):
return conversion(val)
rtn = [conversion_helper(v, conversion) for v in val]
if isinstance(val, tuple):
rtn = tuple(rtn)
return rtn
def fp32_to_fp16(val):
"""Convert fp32 `val` to fp16"""
def half_conversion(val):
val_typecheck = val
if isinstance(val_typecheck, (Parameter, Variable)):
val_typecheck = val.data
if isinstance(val_typecheck, FLOAT_TYPES):
val = val.half()
return val
return conversion_helper(val, half_conversion)
def fp16_to_fp32(val):
"""Convert fp16 `val` to fp32"""
def float_conversion(val):
val_typecheck = val
if isinstance(val_typecheck, (Parameter, Variable)):
val_typecheck = val.data
if isinstance(val_typecheck, HALF_TYPES):
val = val.float()
return val
return conversion_helper(val, float_conversion)
class FP16_Module(nn.Module):
def __init__(self, module):
super(FP16_Module, self).__init__()
self.add_module('module', module.half())
def forward(self, *inputs, **kwargs):
return fp16_to_fp32(self.module(*(fp32_to_fp16(inputs)), **kwargs))
class FP16_Optimizer(object):
"""
FP16_Optimizer is designed to wrap an existing PyTorch optimizer,
and enable an fp16 model to be trained using a main copy of fp32 weights.
Args:
optimizer (torch.optim.optimizer): Existing optimizer containing initialized fp16 parameters. Internally, FP16_Optimizer replaces the passed optimizer's fp16 parameters with new fp32 parameters copied from the original ones. FP16_Optimizer also stores references to the original fp16 parameters, and updates these fp16 parameters from the main fp32 copy after each step.
static_loss_scale (float, optional, default=1.0): Loss scale used internally to scale fp16 gradients computed by the model. Scaled gradients will be copied to fp32, then downscaled before being applied to the fp32 main params, so static_loss_scale should not affect learning rate.
dynamic_loss_scale (bool, optional, default=False): Use dynamic loss scaling. If True, this will override any static_loss_scale option.
"""
def __init__(self, optimizer, static_loss_scale=1.0, dynamic_loss_scale=False):
if not torch.cuda.is_available:
raise SystemError('Cannot use fp16 without CUDA')
self.fp16_param_groups = []
self.fp32_param_groups = []
self.fp32_flattened_groups = []
for i, param_group in enumerate(optimizer.param_groups):
print("FP16_Optimizer processing param group {}:".format(i))
fp16_params_this_group = []
fp32_params_this_group = []
for param in param_group['params']:
if param.requires_grad:
if param.type() == 'torch.cuda.HalfTensor':
print("FP16_Optimizer received torch.cuda.HalfTensor with {}"
.format(param.size()))
fp16_params_this_group.append(param)
elif param.type() == 'torch.cuda.FloatTensor':
print("FP16_Optimizer received torch.cuda.FloatTensor with {}"
.format(param.size()))
fp32_params_this_group.append(param)
else:
raise TypeError("Wrapped parameters must be either "
"torch.cuda.FloatTensor or torch.cuda.HalfTensor. "
"Received {}".format(param.type()))
fp32_flattened_this_group = None
if len(fp16_params_this_group) > 0:
fp32_flattened_this_group = _flatten_dense_tensors(
[param.detach().data.clone().float() for param in fp16_params_this_group])
fp32_flattened_this_group = Variable(fp32_flattened_this_group, requires_grad = True)
fp32_flattened_this_group.grad = fp32_flattened_this_group.new(
*fp32_flattened_this_group.size())
# python's lovely list concatenation via +
if fp32_flattened_this_group is not None:
param_group['params'] = [fp32_flattened_this_group] + fp32_params_this_group
else:
param_group['params'] = fp32_params_this_group
self.fp16_param_groups.append(fp16_params_this_group)
self.fp32_param_groups.append(fp32_params_this_group)
self.fp32_flattened_groups.append(fp32_flattened_this_group)
# print("self.fp32_flattened_groups = ", self.fp32_flattened_groups)
# print("self.fp16_param_groups = ", self.fp16_param_groups)
self.optimizer = optimizer.__class__(optimizer.param_groups)
# self.optimizer.load_state_dict(optimizer.state_dict())
self.param_groups = self.optimizer.param_groups
if dynamic_loss_scale:
self.dynamic_loss_scale = True
self.loss_scaler = DynamicLossScaler()
else:
self.dynamic_loss_scale = False
self.loss_scaler = LossScaler(static_loss_scale)
self.overflow = False
self.first_closure_call_this_step = True
def zero_grad(self):
"""
Zero fp32 and fp16 parameter grads.
"""
self.optimizer.zero_grad()
for fp16_group in self.fp16_param_groups:
for param in fp16_group:
if param.grad is not None:
param.grad.detach_()
param.grad.zero_()
def _check_overflow(self):
params = []
for group in self.fp16_param_groups:
for param in group:
params.append(param)
for group in self.fp32_param_groups:
for param in group:
params.append(param)
self.overflow = self.loss_scaler.has_overflow(params)
def _update_scale(self, has_overflow=False):
self.loss_scaler.update_scale(has_overflow)
def _copy_grads_fp16_to_fp32(self):
for fp32_group, fp16_group in zip(self.fp32_flattened_groups, self.fp16_param_groups):
if len(fp16_group) > 0:
# This might incur one more deep copy than is necessary.
fp32_group.grad.data.copy_(
_flatten_dense_tensors([fp16_param.grad.data for fp16_param in fp16_group]))
def _downscale_fp32(self):
if self.loss_scale != 1.0:
for param_group in self.optimizer.param_groups:
for param in param_group['params']:
param.grad.data.mul_(1./self.loss_scale)
def clip_fp32_grads(self, max_norm, norm_type=2):
"""
Clips fp32 main gradients via torch.nn.utils.clip_grad_norm.
Args:
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the current fp32 gradients (viewed as a single vector).
.. warning::
Returns -1 if the most recently computed fp16 gradients overflowed (that is, if self.overflow is True).
"""
if not self.overflow:
fp32_params = []
for param_group in self.optimizer.param_groups:
for param in param_group['params']:
fp32_params.append(param)
return torch.nn.utils.clip_grad_norm(fp32_params, max_norm, norm_type)
else:
return -1
def _copy_params_fp32_to_fp16(self):
for fp16_group, fp32_group in zip(self.fp16_param_groups, self.fp32_flattened_groups):
if len(fp16_group) > 0:
for fp16_param, fp32_data in zip(fp16_group,
_unflatten_dense_tensors(fp32_group.data, fp16_group)):
fp16_param.data.copy_(fp32_data)
def state_dict(self):
"""
Returns a dict containing the current state of this FP16_Optimizer instance.
This dict contains attributes of FP16_Optimizer, as well as the state_dict
of the contained PyTorch optimizer.
Untested.
"""
state_dict = {}
state_dict['loss_scaler'] = self.loss_scaler
state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale
state_dict['overflow'] = self.overflow
state_dict['first_closure_call_this_step'] = self.first_closure_call_this_step
state_dict['optimizer_state_dict'] = self.optimizer.state_dict()
return state_dict
def load_state_dict(self, state_dict):
"""
Loads a state_dict created by an earlier call to state_dict.
Untested.
"""
self.loss_scaler = state_dict['loss_scaler']
self.dynamic_loss_scale = state_dict['dynamic_loss_scale']
self.overflow = state_dict['overflow']
self.first_closure_call_this_step = state_dict['first_closure_call_this_step']
self.optimizer.load_state_dict(state_dict['optimizer_state_dict'])
def step(self, closure=None): # could add clip option.
"""
If no closure is supplied, step should be called after fp16_optimizer_obj.backward(loss).
step updates the fp32 main copy of parameters using the optimizer supplied to
FP16_Optimizer's constructor, then copies the updated fp32 params into the fp16 params
originally referenced by Fp16_Optimizer's constructor, so the user may immediately run
another forward pass using their model.
If a closure is supplied, step may be called without a prior call to self.backward(loss).
However, the user should take care that any loss.backward() call within the closure
has been replaced by fp16_optimizer_obj.backward(loss).
Args:
closure (optional): Closure that will be supplied to the underlying optimizer originally passed to FP16_Optimizer's constructor. closure should call zero_grad on the FP16_Optimizer object, compute the loss, call .backward(loss), and return the loss.
Closure example::
# optimizer is assumed to be an FP16_Optimizer object, previously constructed from an
# existing pytorch optimizer.
for input, target in dataset:
def closure():
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
optimizer.backward(loss)
return loss
optimizer.step(closure)
.. note::
The only changes that need to be made compared to
`ordinary optimizer closures`_ are that "optimizer" itself should be an instance of
FP16_Optimizer, and that the call to loss.backward should be replaced by
optimizer.backward(loss).
.. warning::
Currently, calling step with a closure is not compatible with dynamic loss scaling.
.. _`ordinary optimizer closures`:
http://pytorch.org/docs/master/optim.html#optimizer-step-closure
"""
if closure is not None and isinstance(self.loss_scaler, DynamicLossScaler):
raise TypeError("Using step with a closure is currently not "
"compatible with dynamic loss scaling.")
scale = self.loss_scaler.loss_scale
self._update_scale(self.overflow)
if self.overflow:
print("OVERFLOW! Skipping step. Attempted loss scale: {}".format(scale))
return
if closure is not None:
self._step_with_closure(closure)
else:
self.optimizer.step()
self._copy_params_fp32_to_fp16()
return
def _step_with_closure(self, closure):
def wrapped_closure():
if self.first_closure_call_this_step:
"""
We expect that the fp16 params are initially fresh on entering self.step(),
so _copy_params_fp32_to_fp16() is unnecessary the first time wrapped_closure()
is called within self.optimizer.step().
"""
self.first_closure_call_this_step = False
else:
"""
If self.optimizer.step() internally calls wrapped_closure more than once,
it may update the fp32 params after each call. However, self.optimizer
doesn't know about the fp16 params at all. If the fp32 params get updated,
we can't rely on self.optimizer to refresh the fp16 params. We need
to handle that manually:
"""
self._copy_params_fp32_to_fp16()
"""
Our API expects the user to give us ownership of the backward() call by
replacing all calls to loss.backward() with optimizer.backward(loss).
This requirement holds whether or not the call to backward() is made within
a closure.
If the user is properly calling optimizer.backward(loss) within "closure,"
calling closure() here will give the fp32 main params fresh gradients
for the optimizer to play with,
so all wrapped_closure needs to do is call closure() and return the loss.
"""
temp_loss = closure()
return temp_loss
self.optimizer.step(wrapped_closure)
self.first_closure_call_this_step = True
def backward(self, loss, update_fp32_grads=True):
"""
fp16_optimizer_obj.backward performs the following conceptual operations:
fp32_loss = loss.float() (see first Note below)
scaled_loss = fp32_loss*loss_scale
scaled_loss.backward(), which accumulates scaled gradients into the .grad attributes of the
fp16 model's leaves.
fp16 grads are then copied to the stored fp32 params' .grad attributes (see second Note).
Finally, fp32 grads are divided by loss_scale.
In this way, after fp16_optimizer_obj.backward, the fp32 parameters have fresh gradients,
and fp16_optimizer_obj.step may be called.
.. note::
Converting the loss to fp32 before applying the loss scale provides some
additional safety against overflow if the user has supplied an fp16 value.
However, for maximum overflow safety, the user should
compute the loss criterion (MSE, cross entropy, etc) in fp32 before supplying it to
fp16_optimizer_obj.backward.
.. note::
The gradients found in an fp16 model's leaves after a call to
fp16_optimizer_obj.backward should not be regarded as valid in general,
because it's possible
they have been scaled (and in the case of dynamic loss scaling,
the scale factor may change over time).
If the user wants to inspect gradients after a call to fp16_optimizer_obj.backward,
only the main gradients should be regarded as valid, and can be retrieved via
:attr:`inspect_fp32_grad_data()`.
Args:
loss: The loss output by the user's model. loss may be either float or half (but see first Note above).
update_fp32_grads (bool, optional, default=True): Option to copy fp16 grads to fp32 grads on this call. By setting this to False, the user can delay this copy, which is useful to eliminate redundant fp16->fp32 grad copies if fp16_optimizer_obj.backward is being called on multiple losses in one iteration. If set to False, the user becomes responsible for calling fp16_optimizer_obj.update_fp32_grads before calling fp16_optimizer_obj.step.
Example::
# Ordinary operation:
optimizer.backward(loss)
# Naive operation with multiple losses (technically valid, but less efficient):
# fp32 grads will be correct after the second call, but
# the first call incurs an unnecessary fp16->fp32 grad copy.
optimizer.backward(loss1)
optimizer.backward(loss2)
# More efficient way to handle multiple losses:
# The fp16->fp32 grad copy is delayed until fp16 grads from all
# losses have been accumulated.
optimizer.backward(loss1, update_fp32_grads=False)
optimizer.backward(loss2, update_fp32_grads=False)
optimizer.update_fp32_grads()
"""
self.loss_scaler.backward(loss.float())
if update_fp32_grads:
self.update_fp32_grads()
def update_fp32_grads(self):
"""
Copy the .grad attribute from stored references to fp16 parameters to
the .grad attribute of the main fp32 parameters that are directly
updated by the optimizer. :attr:`update_fp32_grads` only needs to be called if
fp16_optimizer_obj.backward was called with update_fp32_grads=False.
"""
if self.dynamic_loss_scale:
self._check_overflow()
if self.overflow: return
self._copy_grads_fp16_to_fp32()
self._downscale_fp32()
def inspect_fp32_grad_data(self):
"""
When running with FP16_Optimizer, .grad attributes of a model's fp16 leaves should not be
regarded as truthful, because they might be scaled.
After a call to :attr:`fp16_optimizer_obj.backward(loss)`, if no overflow was encountered,
the fp32 main params' .grad
attributes will contain valid gradients properly divided by the loss scale. However,
because :attr:`FP16_Optimizer` flattens some parameters, accessing them may be
nonintuitive. :attr:`inspect_fp32_grad_data`
allows those gradients to be viewed with shapes corresponding to their associated model leaves.
Returns:
List of lists (one list for each parameter group). The list for each parameter group
is a list of the .grad.data attributes of the fp32 main params belonging to that group.
"""
raise NotImplementedError("Currently not implemented, working on it...")
fp32_grads_each_group = []
if self.overflow:
print("Warning: calling FP16_Optimizer.inspect_fp32_grad_data while in an overflow state. "
"Gradients are currently invalid (may be inf, nan, or stale). Returning None.")
return None
else:
return None
@property
def loss_scale(self):
return self.loss_scaler.loss_scale
|
DALI-main
|
docs/examples/use_cases/video_superres/common/fp16.py
|
DALI-main
|
docs/examples/use_cases/video_superres/common/__init__.py
|
|
import torch
import torch.nn as nn
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
class tofp16(nn.Module):
"""
Model wrapper that implements::
def forward(self, input):
return input.half()
"""
def __init__(self):
super(tofp16, self).__init__()
def forward(self, input):
return input.half()
def copy_in_params(net, params):
net_params = list(net.parameters())
for i in range(len(params)):
net_params[i].data.copy_(params[i].data)
def set_grad(params, params_with_grad):
for param, param_w_grad in zip(params, params_with_grad):
if param.grad is None:
param.grad = torch.nn.Parameter(param.data.new().resize_(*param.data.size()))
param.grad.data.copy_(param_w_grad.grad.data)
def BN_convert_float(module):
'''
Designed to work with network_to_half.
BatchNorm layers need parameters in single precision.
Find all layers and convert them back to float. This can't
be done with built in .apply as that function will apply
fn to all modules, parameters, and buffers. Thus we wouldn't
be able to guard the float conversion based on the module type.
'''
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
module.float()
for child in module.children():
BN_convert_float(child)
return module
def network_to_half(network):
"""
Convert model to half precision in a batchnorm-safe way.
"""
return nn.Sequential(tofp16(), BN_convert_float(network.half()))
def backwards_debug_hook(grad):
print("Uh oh, main_params is receiving a gradient in the backward pass!")
def create_main_params(model):
# flatten_dense_tensors returns a contiguous flat array.
# http://pytorch.org/docs/master/_modules/torch/_utils.html
main_params = _flatten_dense_tensors([param.data for param in model.parameters()]).float()
main_params = torch.nn.Parameter(main_params)
main_params.requires_grad = True
# main_params.register_hook(backwards_debug_hook)
if main_params.grad is None:
main_params.grad = main_params.new(*main_params.size())
return main_params
def model_grads_to_main_grads(model, main_params):
main_params.grad.data.copy_(
_flatten_dense_tensors([p.grad.data for p in model.parameters() if p.requires_grad]))
def main_params_to_model_params(model, main_params):
params = [param.data for param in model.parameters()]
for param, main in zip(params, _unflatten_dense_tensors(main_params.data, params)):
param.copy_(main)
def params_to_type(params, totype):
new_params = []
for param in params:
new_params.append(param.type(totype))
return new_params
def params_to_fp16(params):
return params_to_type(params, torch.cuda.HalfTensor)
def params_to_fp32(params):
return params_to_type(params, torch.cuda.FloatTensor)
def clone_params(net):
new_params = []
for param in list(net.parameters()):
new_params.append(param.data.clone())
return new_params
def clone_grads(net):
new_params = []
for param in list(net.parameters()):
new_params.append(param.grad.data.clone())
return new_params
def copy_into_params(net, input_tens):
net_params = list(net.parameters())
for i in range(len(input_tens)):
net_params[i].data.copy_(input_tens[i])
def copy_in_grads(params, params_with_grad):
for param, param_w_grad in zip(params, params_with_grad):
if param.grad is None:
param.grad = torch.nn.Parameter(param.data.new().resize_(*param.data.size()))
param.grad.data.copy_(param_w_grad.grad.data)
# NB: only implements overflow-based loss scaling for now.
class DynamicLossScaler:
def __init__(self,
init_scale=2.**15,
scale_factor=2.,
scale_window=100):
self.cur_scale = init_scale
self.cur_iter = 0
self.last_overflow_iter = -1
self.scale_factor = scale_factor
self.scale_window = scale_window
# `params` is a list / generator of torch.Variable
def has_overflow(self, tensors):
try:
for tens in tensors:
if tens is None:
continue
if DynamicLossScaler._has_inf_or_nan(tens):
return True
except TypeError:
return DynamicLossScaler._has_inf_or_nan(tensors)
return False
# `x` is a torch.Tensor
def _has_inf_or_nan(x):
if torch.is_tensor(x):
max_val = x.abs().max()
else:
max_val = x
if max_val == float('inf'):
return True
nan_count = torch.sum(x != x)
return nan_count > 0
# `overflow` is boolean indicating whether we overflowed in gradient
def update_scale(self, overflow):
if overflow:
self.cur_scale /= self.scale_factor
self.last_overflow_iter = self.cur_iter
else:
if (self.cur_iter - self.last_overflow_iter) % self.scale_window == 0:
self.cur_scale *= self.scale_factor
self.cur_iter += 1
@property
def loss_scale(self):
return self.cur_scale
|
DALI-main
|
docs/examples/use_cases/video_superres/common/fp16util.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.