code stringlengths 17 6.64M |
|---|
def get_cuda_arch(cuda_ver):
if ('CUDA_ARCH' in os.environ):
arch = os.environ['CUDA_ARCH']
else:
if (70 <= cuda_ver < 92):
arch = '30'
if (92 <= cuda_ver < 110):
arch = '50'
if (cuda_ver == 110):
arch = '52'
if (cuda_ver == 111):
arch = '80'
return arch
|
def locate_cuda():
"Locate the CUDA environment on the system\n If a valid cuda installation is found\n this returns a dict with keys 'home', 'nvcc', 'include',\n and 'lib64' and values giving the absolute path to each directory.\n Starts by looking for the CUDAHOME env variable.\n If not found, everything is based on finding\n 'nvcc' in the PATH.\n If nvcc can't be found, this returns None\n "
nvcc_bin = 'nvcc'
if sys.platform.startswith('win'):
nvcc_bin = 'nvcc.exe'
found = False
for env_name in ['CUDA_PATH', 'CUDAHOME', 'CUDA_HOME']:
if (env_name not in os.environ):
continue
found = True
home = os.environ[env_name]
nvcc = os.path.join(home, 'bin', nvcc_bin)
break
if (not found):
nvcc = find_in_path(nvcc_bin, os.environ['PATH'])
if (nvcc is None):
logging.warning('The nvcc binary could not be located in your $PATH. Either add it to your path, or set $CUDA_HOME to enable CUDA extensions')
return None
home = os.path.dirname(os.path.dirname(nvcc))
cudaconfig = {'home': home, 'nvcc': nvcc, 'include': os.path.join(home, 'include'), 'lib64': os.path.join(home, 'lib64')}
cuda_ver = os.path.basename(os.path.realpath(home)).split('-')[1].split('.')
(major, minor) = (int(cuda_ver[0]), int(cuda_ver[1]))
cuda_ver = ((10 * major) + minor)
assert (cuda_ver >= 70), f'too low cuda ver {major}.{minor}'
print(f'cuda_ver: {major}.{minor}')
arch = get_cuda_arch(cuda_ver)
sm_list = get_cuda_sm_list(cuda_ver)
compute = get_cuda_compute(cuda_ver)
post_args = (([f'-arch=sm_{arch}'] + [f'-gencode=arch=compute_{sm},code=sm_{sm}' for sm in sm_list]) + [f'-gencode=arch=compute_{compute},code=compute_{compute}', '--ptxas-options=-v', '-O2'])
print(f'nvcc post args: {post_args}')
if HALF_PRECISION:
post_args = [flag for flag in post_args if ('52' not in flag)]
if (sys.platform == 'win32'):
cudaconfig['lib64'] = os.path.join(home, 'lib', 'x64')
post_args += ['-Xcompiler', '/MD', '-std=c++14', '-Xcompiler', '/openmp']
if HALF_PRECISION:
post_args += ['-Xcompiler', '/D HALF_PRECISION']
else:
post_args += ['-c', '--compiler-options', "'-fPIC'", '--compiler-options', "'-std=c++14'"]
if HALF_PRECISION:
post_args += ['--compiler-options', "'-D HALF_PRECISION'"]
for (k, val) in cudaconfig.items():
if (not os.path.exists(val)):
logging.warning('The CUDA %s path could not be located in %s', k, val)
return None
cudaconfig['post_args'] = post_args
return cudaconfig
|
class _UnixCCompiler(unixccompiler.UnixCCompiler):
src_extensions = list(unixccompiler.UnixCCompiler.src_extensions)
src_extensions.append('.cu')
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
if (os.path.splitext(src)[1] != '.cu'):
return unixccompiler.UnixCCompiler._compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts)
_compiler_so = self.compiler_so
try:
nvcc_path = CUDA['nvcc']
post_args = CUDA['post_args']
self.set_executable('compiler_so', nvcc_path)
return unixccompiler.UnixCCompiler._compile(self, obj, src, ext, cc_args, post_args, pp_opts)
finally:
self.compiler_so = _compiler_so
|
class _MSVCCompiler(msvccompiler.MSVCCompiler):
_cu_extensions = ['.cu']
src_extensions = list(unixccompiler.UnixCCompiler.src_extensions)
src_extensions.extend(_cu_extensions)
def _compile_cu(self, sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None):
(macros, objects, extra_postargs, pp_opts, _build) = self._setup_compile(output_dir, macros, include_dirs, sources, depends, extra_postargs)
compiler_so = CUDA['nvcc']
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
post_args = CUDA['post_args']
for obj in objects:
try:
(src, _) = _build[obj]
except KeyError:
continue
try:
self.spawn(((([compiler_so] + cc_args) + [src, '-o', obj]) + post_args))
except errors.DistutilsExecError as e:
raise errors.CompileError(str(e))
return objects
def compile(self, sources, **kwargs):
cu_sources = []
other_sources = []
for source in sources:
if (os.path.splitext(source)[1] == '.cu'):
cu_sources.append(source)
else:
other_sources.append(source)
other_objects = msvccompiler.MSVCCompiler.compile(self, other_sources, **kwargs)
cu_objects = self._compile_cu(cu_sources, **kwargs)
return (other_objects + cu_objects)
|
class CudaBuildExt(setuptools_build_ext):
'Custom `build_ext` command to include CUDA C source files.'
def run(self):
if (CUDA is not None):
def wrap_new_compiler(func):
def _wrap_new_compiler(*args, **kwargs):
try:
return func(*args, **kwargs)
except errors.DistutilsPlatformError:
if (sys.platform != 'win32'):
CCompiler = _UnixCCompiler
else:
CCompiler = _MSVCCompiler
return CCompiler(None, kwargs['dry_run'], kwargs['force'])
return _wrap_new_compiler
ccompiler.new_compiler = wrap_new_compiler(ccompiler.new_compiler)
self.compiler = 'nvidia'
setuptools_build_ext.run(self)
|
def get_logger(name=__file__, level=2):
if (level == 1):
level = logging.WARNING
elif (level == 2):
level = logging.INFO
elif (level == 3):
level = logging.DEBUG
logger = logging.getLogger(name)
if logger.handlers:
return logger
logger.setLevel(level)
sh0 = logging.StreamHandler()
sh0.setLevel(level)
formatter = logging.Formatter('[%(levelname)-8s] %(asctime)s [%(filename)s] [%(funcName)s:%(lineno)d]%(message)s', '%Y-%m-%d %H:%M:%S')
sh0.setFormatter(formatter)
logger.addHandler(sh0)
return logger
|
def load_json_string(cont):
cont = jsmin.jsmin(cont)
cont = re.sub(',[ \t\r\n]*}', '}', cont)
cont = re.sub((',[ \t\r\n]*' + '\\]'), ']', cont)
return json.loads(cont)
|
def load_json_file(fname):
with open(fname, 'r') as fin:
ret = load_json_string(fin.read())
return ret
|
def get_opt_as_proto(raw, proto_type=ConfigProto):
proto = proto_type()
Parse(json.dumps(Option(raw)), proto)
err = []
assert proto.IsInitialized(err), f'''some required fields are missing in proto {err}
{proto}'''
return proto
|
def proto_to_dict(proto):
return MessageToDict(proto, including_default_value_fields=True, preserving_proto_field_name=True)
|
def copy_proto(proto):
newproto = type(proto)()
Parse(json.dumps(proto_to_dict(proto)), newproto)
return newproto
|
class Option(dict):
def __init__(self, *args, **kwargs):
args = [(arg if isinstance(arg, dict) else load_json_file(arg)) for arg in args]
super().__init__(*args, **kwargs)
for arg in args:
if isinstance(arg, dict):
for (k, val) in arg.items():
if isinstance(val, dict):
self[k] = Option(val)
else:
self[k] = val
if kwargs:
for (k, val) in kwargs.items():
if isinstance(val, dict):
self[k] = Option(val)
else:
self[k] = val
def __getattr__(self, attr):
return self.get(attr)
def __setattr__(self, key, value):
self.__setitem__(key, value)
def __setitem__(self, key, value):
super().__setitem__(key, value)
self.__dict__.update({key: value})
def __delattr__(self, item):
self.__delitem__(item)
def __delitem__(self, key):
super().__delitem__(key)
del self.__dict__[key]
def __getstate__(self):
return vars(self)
def __setstate__(self, state):
vars(self).update(state)
|
def get_extend_compile_flags():
flags = ['-march=native']
return flags
|
class CMakeExtension(Extension):
extension_type = 'cmake'
def __init__(self, name):
super().__init__(name, sources=[])
|
def git_version():
def _minimal_ext_cmd(cmd):
env = {}
for k in ['SYSTEMROOT', 'PATH']:
val = os.environ.get(k)
if (val is not None):
env[k] = val
out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
git_revision = out.strip().decode('ascii')
except OSError:
git_revision = 'Unknown'
return git_revision
|
def write_version_py(filename='cuhnsw/version.py'):
cnt = "\nshort_version = '%(version)s'\ngit_revision = '%(git_revision)s'\n"
git_revision = git_version()
with open(filename, 'w') as fout:
fout.write((cnt % {'version': VERSION, 'git_revision': git_revision}))
|
class BuildExtension(BUILDEXT):
def run(self):
for ext in self.extensions:
print(ext.name)
if (hasattr(ext, 'extension_type') and (ext.extension_type == 'cmake')):
self.cmake()
super().run()
def cmake(self):
cwd = pathlib.Path().absolute()
build_temp = pathlib.Path(self.build_temp)
build_temp.mkdir(parents=True, exist_ok=True)
build_type = ('Debug' if self.debug else 'Release')
cmake_args = [('-DCMAKE_BUILD_TYPE=' + build_type), ('-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + CLIB_DIR)]
build_args = []
os.chdir(str(build_temp))
self.spawn((['cmake', str(cwd)] + cmake_args))
if (not self.dry_run):
self.spawn((['cmake', '--build', '.'] + build_args))
os.chdir(str(cwd))
|
def setup_package():
write_version_py()
cmdclass = {'build_ext': BuildExtension}
metadata = dict(name='cuhnsw', maintainer='Jisang Yoon', maintainer_email='vjs10101v@gmail.com', author='Jisang Yoon', author_email='vjs10101v@gmail.com', description=DOCLINES[0], long_description='\n'.join(DOCLINES[2:]), url='https://github.com/js1010/cuhnsw', download_url='https://github.com/js1010/cuhnsw/releases', include_package_data=False, license='Apache2', packages=['cuhnsw/'], cmdclass=cmdclass, classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f], platforms=['Linux', 'Mac OSX', 'Unix'], ext_modules=extensions, install_requires=INSTALL_REQUIRES, entry_points={'console_scripts': []}, python_requires='>=3.6')
metadata['version'] = VERSION
setup(**metadata)
|
class VOCSegGroupLoader(mx.io.DataIter):
def __init__(self, image_root, label_root, annotation_root, data_list, batch_size, group_size, num_block, target_size, pad=False, shuffle=False, rand_scale=False, rand_mirror=False, rand_crop=False, downsample=None):
assert (group_size >= 2), "'group_size': # common-class images, typical value is 2 for pairs"
assert (num_block >= 1), "'num_block': should equal # GPU"
assert ((batch_size % (group_size * num_block)) == 0)
with open(data_list, 'r') as f:
data_names = [x.strip() for x in f.readlines()]
if (pad and ((len(data_names) % batch_size) > 0)):
pad_num = (batch_size - (len(data_names) % batch_size))
data_names = (data_names + data_names[:pad_num])
self.image_src_list = [os.path.join(image_root, (x + '.jpg')) for x in data_names]
self.label_src_list = ([os.path.join(label_root, (x + '.png')) for x in data_names] if (label_root is not None) else ([None] * len(data_names)))
self.ann_list = [VOC.get_annotation(os.path.join(annotation_root, (x + '.xml'))) for x in data_names]
self.batch_size = batch_size
self.group_size = group_size
self.num_block = num_block
self.meta_length = (self.batch_size // (self.num_block * self.group_size))
self.target_size = target_size
self.shuffle = shuffle
self.rand_scale = rand_scale
self.rand_mirror = rand_mirror
self.rand_crop = rand_crop
self.downsample = downsample
scale_pool = [0.5, 0.75, 1, 1.25, 1.5]
self.scale_sampler = (lambda : np.random.choice(scale_pool))
self.index = list(range(len(data_names)))
self.num_batch = (len(data_names) // self.batch_size)
self.reset()
def reset(self):
self.index_pointer = 0
self.cache = []
if self.shuffle:
np.random.shuffle(self.index)
def pop(self):
if (len(self.cache) > 0):
index = self.cache.pop()
elif (self.index_pointer < len(self.index)):
index = self.index[self.index_pointer]
self.index_pointer += 1
else:
raise StopIteration
return index
def is_ok(self, a, b):
lbl_a = self.ann_list[a]
lbl_b = self.ann_list[b]
return (len((set(lbl_a) - set(lbl_b))) < len(lbl_a))
def next(self):
indices = []
while (len(indices) < (self.batch_size // self.group_size)):
cache = []
partners = [self.pop()]
while (len(partners) < self.group_size):
this = self.pop()
while (not all([self.is_ok(prev, this) for prev in partners])):
cache.append(this)
this = self.pop()
partners.append(this)
indices.append(partners)
self.cache = (cache[::(- 1)] + self.cache)
indices = sum([sum(zip(*indices[i:(i + self.meta_length)]), tuple()) for i in range(0, len(indices), self.meta_length)], tuple())
image_src_list = [self.image_src_list[i] for i in indices]
label_src_list = [self.label_src_list[i] for i in indices]
self.cache_image_src_list = image_src_list
batch = load_batch_semantic(image_src_list, label_src_list, self.target_size, self.scale_sampler, self.rand_scale, self.rand_mirror, self.rand_crop, self.downsample)
return batch
|
def resnet101_largefov_SA(x, num_cls, is_downsample=True, in_embed_type='conv', out_embed_type='convbn', sim_type='dot', use_global_stats_backbone=False, use_global_stats_affinity=False, lr_mult=10, reuse=None, **kwargs):
x_raw = _Resnet(x, (3, 4, 23, 3), (64, 256, 512, 1024, 2048), True, use_global_stats=use_global_stats_backbone, strides=(1, 2, 1, 1), dilates=(1, 1, 2, 4), lr_mult=1, reuse=reuse)
x_res = build_self_affinity(x_raw, 1024, 2048, is_downsample, in_embed_type, out_embed_type, sim_type, use_global_stats_affinity, lr_mult, reuse)
x = (x_raw + x_res)
x = Conv(x, num_cls, (3, 3), (1, 1), dilate=(12, 12), pad=(12, 12), no_bias=True, name='fc1', lr_mult=lr_mult, reuse=reuse)
return x
|
def resnet101_largefov_CA(x, num_cls, is_downsample=True, in_embed_type='conv', out_embed_type='convbn', sim_type='dot', group_size=2, merge_type='max', merge_self=True, use_global_stats_backbone=False, use_global_stats_affinity=False, lr_mult=10, reuse=None):
x_raw = _Resnet(x, (3, 4, 23, 3), (64, 256, 512, 1024, 2048), True, use_global_stats=use_global_stats_backbone, strides=(1, 2, 1, 1), dilates=(1, 1, 2, 4), lr_mult=1, reuse=reuse)
(x_res_self, x_res_cross) = build_cross_affinity(x_raw, 1024, 2048, is_downsample, in_embed_type, out_embed_type, sim_type, group_size, merge_type, merge_self, use_global_stats_affinity, lr_mult, reuse)
x_self = (x_raw + x_res_self)
x_self = Conv(x_self, num_cls, (3, 3), (1, 1), dilate=(12, 12), pad=(12, 12), no_bias=True, name='fc1', lr_mult=lr_mult, reuse=reuse)
x_cross = (x_raw + x_res_cross)
x_cross = Conv(x_cross, num_cls, (3, 3), (1, 1), dilate=(12, 12), pad=(12, 12), no_bias=True, name='fc1', lr_mult=lr_mult, reuse=x_self)
return (x_self, x_cross)
|
def resnet50_largefov_SA(x, num_cls, is_downsample=True, in_embed_type='conv', out_embed_type='convbn', sim_type='dot', use_global_stats_backbone=False, use_global_stats_affinity=False, lr_mult=10, reuse=None, **kwargs):
x_raw = _Resnet(x, (3, 4, 6, 3), (64, 256, 512, 1024, 2048), True, use_global_stats=use_global_stats_backbone, strides=(1, 2, 1, 1), dilates=(1, 1, 2, 4), lr_mult=1, reuse=reuse)
x_res = build_self_affinity(x_raw, 1024, 2048, is_downsample, in_embed_type, out_embed_type, sim_type, use_global_stats_affinity, lr_mult, reuse)
x = (x_raw + x_res)
x = Conv(x, num_cls, (3, 3), (1, 1), dilate=(12, 12), pad=(12, 12), no_bias=True, name='fc1', lr_mult=lr_mult, reuse=reuse)
return x
|
def resnet50_largefov_CA(x, num_cls, is_downsample=True, in_embed_type='conv', out_embed_type='convbn', sim_type='dot', group_size=2, merge_type='max', merge_self=True, use_global_stats_backbone=False, use_global_stats_affinity=False, lr_mult=10, reuse=None):
x_raw = _Resnet(x, (3, 4, 6, 3), (64, 256, 512, 1024, 2048), True, use_global_stats=use_global_stats_backbone, strides=(1, 2, 1, 1), dilates=(1, 1, 2, 4), lr_mult=1, reuse=reuse)
(x_res_self, x_res_cross) = build_cross_affinity(x_raw, 1024, 2048, is_downsample, in_embed_type, out_embed_type, sim_type, group_size, merge_type, merge_self, use_global_stats_affinity, lr_mult, reuse)
x_self = (x_raw + x_res_self)
x_self = Conv(x_self, num_cls, (3, 3), (1, 1), dilate=(12, 12), pad=(12, 12), no_bias=True, name='fc1', lr_mult=lr_mult, reuse=reuse)
x_cross = (x_raw + x_res_cross)
x_cross = Conv(x_cross, num_cls, (3, 3), (1, 1), dilate=(12, 12), pad=(12, 12), no_bias=True, name='fc1', lr_mult=lr_mult, reuse=x_self)
return (x_self, x_cross)
|
def in_embedding_conv(x_feat, num_filter_hidden, is_downsample=True, lr_mult=1, reuse=None):
x_query = Conv(x_feat, num_filter_hidden, (1, 1), no_bias=True, name='conv_embed_q', lr_mult=lr_mult, reuse=reuse)
x_key = Conv(x_feat, num_filter_hidden, (1, 1), no_bias=True, name='conv_embed_k', lr_mult=lr_mult, reuse=reuse)
x_value = Conv(x_feat, num_filter_hidden, (1, 1), no_bias=True, name='conv_embed_v', lr_mult=lr_mult, reuse=reuse)
if is_downsample:
x_key = Pool(x_key, (3, 3), (2, 2), (1, 1))
x_value = Pool(x_value, (3, 3), (2, 2), (1, 1))
return (x_query, x_key, x_value)
|
def out_embedding_convbn(x_res, num_filter_out, use_global_stats=False, lr_mult=1, reuse=None):
x_res = Conv(x_res, num_filter_out, (1, 1), no_bias=True, name='conv_out', lr_mult=lr_mult, reuse=reuse)
x_res = BN(x_res, fix_gamma=False, use_global_stats=use_global_stats, name='bn_out', lr_mult=lr_mult, reuse=reuse)
return x_res
|
def compute_sim_mat(x_key, x_query, sim_type):
if (sim_type == 'dot'):
sim_mat = mx.sym.batch_dot(x_key, x_query, transpose_a=True)
elif (sim_type == 'cosine'):
x_key_norm = mx.sym.L2Normalization(x_key, mode='channel')
x_query_norm = mx.sym.L2Normalization(x_query, mode='channel')
sim_mat = mx.sym.batch_dot(x_key_norm, x_query_norm, transpose_a=True)
else:
raise ValueError(sim_type)
return sim_mat
|
def build_self_affinity(x_feat, num_filter_hidden, num_filter_out, is_downsample=True, in_embed_type='conv', out_embed_type='convbn', sim_type='dot', use_global_stats=False, lr_mult=1, reuse=None, return_internals=False):
get_embedding_in = eval(('in_embedding_' + in_embed_type))
get_embedding_out = eval(('out_embedding_' + out_embed_type))
(x_query, x_key, x_value) = get_embedding_in(x_feat, num_filter_hidden, is_downsample, lr_mult, reuse)
x_query = mx.sym.reshape(x_query, (0, 0, (- 3)))
x_key = mx.sym.reshape(x_key, (0, 0, (- 3)))
x_value = mx.sym.reshape(x_value, (0, 0, (- 3)))
sim_mat = compute_sim_mat(x_key, x_query, sim_type)
sim_mat = mx.sym.softmax(sim_mat, axis=1)
x_res = mx.sym.batch_dot(x_value, sim_mat)
x_res = mx.sym.reshape_like(x_res, x_feat, lhs_begin=2, lhs_end=3, rhs_begin=2, rhs_end=4)
x_out = get_embedding_out(x_res, num_filter_out, use_global_stats, lr_mult, reuse)
if return_internals:
return (x_out, x_query, x_key, x_value, sim_mat, x_res)
return x_out
|
def build_cross_affinity(x_feat, num_filter_hidden, num_filter_out, is_downsample=True, in_embed_type='conv', out_embed_type='convbn', sim_type='dot', group_size=2, merge_type='max', merge_self=True, use_global_stats=False, lr_mult=1, reuse=None):
get_embedding_in = eval(('in_embedding_' + in_embed_type))
get_embedding_out = eval(('out_embedding_' + out_embed_type))
(x_out_self, x_query, x_key, x_value, sim_mat_self, x_res_self) = build_self_affinity(x_feat, num_filter_hidden, num_filter_out, is_downsample, in_embed_type, out_embed_type, sim_type, use_global_stats, lr_mult, reuse, True)
x_key_sp = list(mx.sym.split(x_key, num_outputs=group_size, axis=0))
x_value_sp = list(mx.sym.split(x_value, num_outputs=group_size, axis=0))
x_res_list = []
for i in range((group_size - 1)):
x_key_sp = (x_key_sp[1:] + x_key_sp[0:1])
x_value_sp = (x_value_sp[1:] + x_value_sp[0:1])
x_key_roll = mx.sym.concat(*x_key_sp, dim=0)
x_value_roll = mx.sym.concat(*x_value_sp, dim=0)
sim_mat = compute_sim_mat(x_key_roll, x_query, sim_type)
sim_mat = mx.sym.softmax(sim_mat, axis=1)
x_res = mx.sym.batch_dot(x_value_roll, sim_mat)
x_res = mx.sym.reshape_like(x_res, x_feat, lhs_begin=2, lhs_end=3, rhs_begin=2, rhs_end=4)
x_res_list.append(x_res)
if merge_self:
x_res_list.append(x_res_self)
if (merge_type == 'max'):
x_res_cross = x_res_list[0]
for x_res in x_res_list[1:]:
x_res_cross = mx.sym.maximum(x_res_cross, x_res)
elif (merge_type == 'avg'):
x_res_cross = (sum(x_res_list) / len(x_res_list))
x_out_cross = get_embedding_out(x_res_cross, num_filter_out, use_global_stats, lr_mult, x_out_self)
return (x_out_self, x_out_cross)
|
def Convolution(data, num_filter, kernel, stride=None, dilate=None, pad=None, num_group=1, no_bias=False, weight=None, bias=None, name=None, lr_mult=1, reuse=None, **kwargs):
if (reuse is not None):
assert (name is not None)
name = (GetLayerName.get('conv') if (name is None) else name)
stride = (((1,) * len(kernel)) if (stride is None) else stride)
dilate = (((1,) * len(kernel)) if (dilate is None) else dilate)
if isinstance(pad, str):
input_size = kwargs.get('input_size', None)
if (input_size is None):
raise ValueError('`input_size` is needed for padding')
del kwargs['input_size']
if isinstance(input_size, int):
in_size_h = in_size_w = input_size
else:
(in_size_h, in_size_w) = input_size
(ph0, ph1) = padding_helper(in_size_h, kernel[0], stride[0], pad)
(pw0, pw1) = padding_helper(in_size_w, kernel[1], stride[1], pad)
data = mx.sym.pad(data, mode='constant', pad_width=(0, 0, 0, 0, ph0, ph1, pw0, pw1))
pad = ((0,) * len(kernel))
else:
pad = (((0,) * len(kernel)) if (pad is None) else pad)
assert (len(kwargs) == 0), sorted(kwargs)
W = (get_variable((name + '_weight'), lr_mult, reuse) if (weight is None) else weight)
if no_bias:
x = mx.sym.Convolution(data, num_filter=num_filter, kernel=kernel, stride=stride, dilate=dilate, pad=pad, num_group=num_group, no_bias=no_bias, name=(name if (reuse is None) else None), weight=W)
else:
B = (get_variable((name + '_bias'), lr_mult, reuse) if (bias is None) else bias)
x = mx.sym.Convolution(data, num_filter=num_filter, kernel=kernel, stride=stride, dilate=dilate, pad=pad, num_group=num_group, no_bias=no_bias, name=(name if (reuse is None) else None), weight=W, bias=B)
return x
|
def Deconvolution(data, num_filter, kernel, stride=None, dilate=None, pad=None, adj=None, target_shape=None, num_group=1, no_bias=False, weight=None, bias=None, name=None, lr_mult=1, reuse=None):
if (reuse is not None):
assert (name is not None)
name = (GetLayerName.get('deconv') if (name is None) else name)
stride = (((1,) * len(kernel)) if (stride is None) else stride)
dilate = (((1,) * len(kernel)) if (dilate is None) else dilate)
pad = (((0,) * len(kernel)) if (pad is None) else pad)
adj = (((0,) * len(kernel)) if (adj is None) else adj)
target_shape = (tuple([]) if (target_shape is None) else target_shape)
W = (get_variable((name + '_weight'), lr_mult, reuse) if (weight is None) else weight)
if no_bias:
x = mx.sym.Deconvolution(data, num_filter=num_filter, kernel=kernel, stride=stride, dilate=dilate, pad=pad, adj=adj, target_shape=target_shape, num_group=num_group, no_bias=no_bias, name=(name if (reuse is None) else None), weight=W)
else:
B = (get_variable((name + '_bias'), lr_mult, reuse) if (bias is None) else bias)
x = mx.sym.Deconvolution(data, num_filter=num_filter, kernel=kernel, stride=stride, dilate=dilate, pad=pad, adj=adj, target_shape=target_shape, num_group=num_group, no_bias=no_bias, name=(name if (reuse is None) else None), weight=W, bias=B)
return x
|
def FullyConnected(data, num_hidden, flatten=True, no_bias=False, weight=None, bias=None, name=None, lr_mult=1, reuse=None):
if (reuse is not None):
assert (name is not None)
name = (GetLayerName.get('fc') if (name is None) else name)
W = (get_variable((name + '_weight'), lr_mult, reuse) if (weight is None) else weight)
if no_bias:
x = mx.sym.FullyConnected(data, num_hidden=num_hidden, flatten=flatten, no_bias=no_bias, weight=W, name=(name if (reuse is None) else None))
else:
B = (get_variable((name + '_bias'), lr_mult, reuse) if (bias is None) else bias)
x = mx.sym.FullyConnected(data, num_hidden=num_hidden, flatten=flatten, no_bias=no_bias, weight=W, bias=B, name=(name if (reuse is None) else None))
return x
|
def Relu(data, name=None):
name = (GetLayerName.get('relu') if (name is None) else name)
x = mx.sym.Activation(data, act_type='relu', name=name)
return x
|
def LeakyRelu(data, slope=0.25, name=None):
name = (GetLayerName.get('leakyRelu') if (name is None) else name)
x = mx.sym.LeakyReLU(data, slope=slope, act_type='leaky', name=name)
return x
|
def Tanh(data, name=None):
name = (GetLayerName.get('tanh') if (name is None) else name)
x = mx.sym.tanh(data, name=name)
return x
|
def Swish(data, name=None):
name = (GetLayerName.get('swish') if (name is None) else name)
x = (data * mx.sym.sigmoid(data))
return x
|
def Pooling(data, kernel, stride=None, pad=None, pool_type='max', global_pool=False, name=None):
name = (GetLayerName.get('pool') if (name is None) else name)
stride = (kernel if (stride is None) else stride)
pad = (((0,) * len(kernel)) if (pad is None) else pad)
x = mx.sym.Pooling(data, kernel=kernel, stride=stride, pad=pad, pool_type=pool_type, global_pool=global_pool, name=name)
return x
|
def Dropout(data, p, name=None):
name = (GetLayerName.get('drop') if (name is None) else name)
x = mx.sym.Dropout(data, p=p, name=name)
return x
|
def BatchNorm(data, fix_gamma=False, momentum=0.9, eps=1e-05, use_global_stats=False, gamma=None, beta=None, moving_mean=None, moving_var=None, name=None, lr_mult=1, reuse=None):
if (reuse is not None):
assert (name is not None)
name = (GetLayerName.get('bn') if (name is None) else name)
gamma = (get_variable((name + '_gamma'), lr_mult, reuse) if (gamma is None) else gamma)
beta = (get_variable((name + '_beta'), lr_mult, reuse) if (beta is None) else beta)
moving_mean = (get_variable((name + '_moving_mean'), 1, reuse) if (moving_mean is None) else moving_mean)
moving_var = (get_variable((name + '_moving_var'), 1, reuse) if (moving_var is None) else moving_var)
x = mx.sym.BatchNorm(data, fix_gamma=fix_gamma, momentum=momentum, eps=eps, use_global_stats=use_global_stats, gamma=gamma, beta=beta, moving_mean=moving_mean, moving_var=moving_var, name=(name if (reuse is None) else None))
return x
|
def InstanceNorm(data, eps=1e-05, gamma=None, beta=None, name=None, lr_mult=1, reuse=None):
if (reuse is not None):
assert (name is not None)
name = (GetLayerName.get('in') if (name is None) else name)
gamma = (get_variable((name + '_gamma'), lr_mult, reuse) if (gamma is None) else gamma)
beta = (get_variable((name + '_beta'), lr_mult, reuse) if (beta is None) else beta)
x = mx.sym.InstanceNorm(data, eps=eps, gamma=gamma, beta=beta, name=(name if (reuse is None) else None))
return x
|
def Flatten(data, name=None):
name = (GetLayerName.get('flatten') if (name is None) else name)
x = mx.sym.flatten(data, name=name)
return x
|
def ConvRelu(*args, **kwargs):
x = Conv(*args, **kwargs)
x = Relu(x, (x.name + '_relu'))
return x
|
def BNRelu(*args, **kwargs):
x = BN(*args, **kwargs)
x = Relu(x, (x.name + '_relu'))
return x
|
def FCRelu(*args, **kwargs):
x = FC(*args, **kwargs)
x = Relu(x, (x.name + '_relu'))
return x
|
def ConvBNRelu(*args, **kwargs):
x = Conv(*args, **kwargs)
x = BN(x, name=(x.name + '_bn'), lr_mult=kwargs.get('lr_mult', 1), reuse=kwargs.get('reuse', None))
x = Relu(x, (x.name + '_relu'))
return x
|
def get_variable(name, lr_mult=1, reuse=None):
if (reuse is None):
return mx.sym.Variable(name, lr_mult=lr_mult)
return reuse.get_internals()[name]
|
class GetLayerName(object):
_name_count = {}
@classmethod
def get(cls, name_prefix):
cnt = cls._name_count.get(name_prefix, 0)
cls._name_count[name_prefix] = (cnt + 1)
return (name_prefix + str(cnt))
|
def padding_helper(in_size, kernel_size, stride, pad_type='same'):
pad_type = pad_type.lower()
if (pad_type == 'same'):
out_size = ((in_size // stride) + int(((in_size % stride) > 0)))
pad_size = max(((((out_size - 1) * stride) + kernel_size) - in_size), 0)
return ((pad_size // 2), (pad_size - (pad_size // 2)))
else:
raise ValueError(pad_type)
|
class OpConstant(mx.operator.CustomOp):
def __init__(self, val):
self.val = val
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], self.val)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
|
@mx.operator.register('Constant')
class OpConstantProp(mx.operator.CustomOpProp):
def __init__(self, val_str, shape_str, type_str='float32'):
super(OpConstantProp, self).__init__(need_top_grad=False)
val = [float(x) for x in val_str.split(',')]
shape = [int(x) for x in shape_str.split(',')]
self.val = mx.nd.array(val, dtype=type_str).reshape(shape)
def list_arguments(self):
return []
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return (in_shape, [self.val.shape], [])
def infer_type(self, in_type):
return (in_type, [self.val.dtype], [])
def create_operator(self, ctx, shapes, dtypes):
return OpConstant(self.val.as_in_context(ctx))
|
def CustomConstantEncoder(value, dtype='float32'):
if (not isinstance(value, np.ndarray)):
if (not isinstance(value, (list, tuple))):
value = [value]
value = np.array(value, dtype=dtype)
return (','.join([str(x) for x in value.ravel()]), ','.join([str(x) for x in value.shape]))
|
def Constant(value, dtype='float32'):
assert isinstance(dtype, str), dtype
(val, shape) = CustomConstantEncoder(value, dtype)
return mx.sym.Custom(val_str=val, shape_str=shape, type_str=dtype, op_type='Constant')
|
class BilinearScale(mx.operator.CustomOp):
def __init__(self, scale):
self.scale = scale
def forward(self, is_train, req, in_data, out_data, aux):
x = in_data[0]
(h, w) = x.shape[2:]
new_h = (int(((h - 1) * self.scale)) + 1)
new_w = (int(((w - 1) * self.scale)) + 1)
x.attach_grad()
with mx.autograd.record():
new_x = mx.nd.contrib.BilinearResize2D(x, height=new_h, width=new_w)
self.new_x = new_x
self.x = x
self.assign(out_data[0], req[0], new_x)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.new_x.backward(out_grad[0])
self.assign(in_grad[0], req[0], self.x.grad)
|
@mx.operator.register('BilinearScale')
class BilinearScaleProp(mx.operator.CustomOpProp):
def __init__(self, scale):
super(BilinearScaleProp, self).__init__(need_top_grad=True)
self.scale = float(scale)
def infer_shape(self, in_shape):
(n, c, h, w) = in_shape[0]
new_h = (int(((h - 1) * self.scale)) + 1)
new_w = (int(((w - 1) * self.scale)) + 1)
return (in_shape, [(n, c, new_h, new_w)], [])
def create_operator(self, ctx, shapes, dtypes):
return BilinearScale(self.scale)
|
class BilinearScaleLike(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
(x, x_ref) = in_data
(new_h, new_w) = x_ref.shape[2:]
x.attach_grad()
with mx.autograd.record():
new_x = mx.nd.contrib.BilinearResize2D(x, height=new_h, width=new_w)
self.new_x = new_x
self.x = x
self.assign(out_data[0], req[0], new_x)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.new_x.backward(out_grad[0])
in_grad[1][:] = 0
self.assign(in_grad[0], req[0], self.x.grad)
|
@mx.operator.register('BilinearScaleLike')
class BilinearScaleLikeProp(mx.operator.CustomOpProp):
def __init__(self):
super(BilinearScaleLikeProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['d1', 'd2']
def infer_shape(self, in_shape):
out_shape = list(in_shape[1])
out_shape[1] = in_shape[0][1]
return (in_shape, [out_shape], [])
def create_operator(self, ctx, shapes, dtypes):
return BilinearScaleLike()
|
class SegmentLoss(mx.operator.CustomOp):
def __init__(self, has_grad_scale):
self.has_grad_scale = has_grad_scale
def forward(self, is_train, req, in_data, out_data, aux):
prediction = mx.nd.softmax(in_data[0], axis=1)
self.assign(out_data[0], req[0], prediction)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
prediction = out_data[0]
label = mx.nd.one_hot(in_data[1], depth=prediction.shape[1]).transpose((0, 3, 1, 2))
if (prediction.shape[2] != label.shape[2]):
label = mx.nd.contrib.BilinearResize2D(label, height=prediction.shape[2], width=prediction.shape[3])
label = (mx.nd.one_hot(mx.nd.argmax(label, axis=1), depth=prediction.shape[1]).transpose((0, 3, 1, 2)) * (mx.nd.max(label, axis=1, keepdims=True) > 0.5))
mask = label.sum(axis=1, keepdims=True)
num_pixel = mx.nd.maximum((mask.sum() / mask.shape[0]), 1e-05)
grad = (((prediction - label) * mask) / num_pixel)
if self.has_grad_scale:
grad_scale = in_data[2].reshape((- 1), 1, 1, 1)
grad = (grad * grad_scale)
in_grad[1][:] = 0
self.assign(in_grad[0], req[0], grad)
|
@mx.operator.register('SegmentLoss')
class SegmentLossProp(mx.operator.CustomOpProp):
def __init__(self, has_grad_scale=0):
super(SegmentLossProp, self).__init__(need_top_grad=False)
self.has_grad_scale = (int(has_grad_scale) > 0)
def list_arguments(self):
if self.has_grad_scale:
return ['data', 'label', 'scale']
else:
return ['data', 'label']
def infer_shape(self, in_shape):
return (in_shape, [in_shape[0]], [])
def create_operator(self, ctx, shapes, dtypes):
return SegmentLoss(self.has_grad_scale)
|
class CompletionLoss(mx.operator.CustomOp):
def __init__(self, has_grad_scale):
self.has_grad_scale = has_grad_scale
def forward(self, is_train, req, in_data, out_data, aux):
prediction = mx.nd.softmax(in_data[0], axis=1)
self.assign(out_data[0], req[0], prediction)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
(logit, target, label) = in_data[:3]
prediction = out_data[0]
onehot = target.argmax(axis=1)
onehot = mx.nd.one_hot(onehot, depth=logit.shape[1]).transpose((0, 3, 1, 2))
label = mx.nd.one_hot(label, depth=logit.shape[1]).transpose((0, 3, 1, 2))
mask = label.max(axis=(2, 3), keepdims=True)
onehot = (onehot * mask)
mask = onehot.sum(axis=1, keepdims=True)
num_pixel = (mask.sum() / mask.shape[0])
grad = (((prediction - onehot) * mask) / num_pixel)
if self.has_grad_scale:
grad_scale = in_data[3].reshape((- 1), 1, 1, 1)
grad = (grad * grad_scale)
in_grad[1][:] = 0
in_grad[2][:] = 0
self.assign(in_grad[0], req[0], grad)
|
@mx.operator.register('CompletionLoss')
class CompletionLossProp(mx.operator.CustomOpProp):
def __init__(self, has_grad_scale=0):
super(CompletionLossProp, self).__init__(need_top_grad=False)
self.has_grad_scale = (int(has_grad_scale) > 0)
def list_arguments(self):
if self.has_grad_scale:
return ['data', 'target', 'label', 'scale']
else:
return ['data', 'target', 'label']
def infer_shape(self, in_shape):
return (in_shape, [in_shape[0]], [])
def create_operator(self, ctx, shapes, dtypes):
return CompletionLoss(self.has_grad_scale)
|
class MultiSigmoidLoss(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
(logit, label) = in_data
prediction = mx.nd.sigmoid(logit, axis=1)
self.assign(out_data[0], req[0], prediction)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
prediction = out_data[0]
label = in_data[1]
grad = (prediction - label)
in_grad[1][:] = 0
self.assign(in_grad[0], req[0], grad)
|
@mx.operator.register('MultiSigmoidLoss')
class MultiSigmoidLossProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultiSigmoidLossProp, self).__init__(need_top_grad=False)
def list_arguments(self):
return ['data', 'label']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return (in_shape, [in_shape[0]], [])
def create_operator(self, ctx, shapes, dtypes):
return MultiSigmoidLoss()
|
def config_efficientnet(model_name):
assert re.match('^efficientnet-b[0-7]$', model_name), model_name
efficientnet_params = DEFAULT_EFFICIENT_PARAMS[model_name]
block_args = DEFAULT_EFFICIENT_BLOCK_ARGS
(width_coefficient, depth_coefficient, resolution, dropout_rate) = efficientnet_params
global_params = {'block_args': block_args, 'batch_norm_momentum': 0.99, 'batch_norm_epsilon': 0.001, 'dropout_rate': dropout_rate, 'survival_prob': 0.8, 'num_classes': 1000, 'width_coefficient': width_coefficient, 'depth_coefficient': depth_coefficient, 'depth_divisor': 8, 'min_depth': None, 'use_se': True, 'clip_projection_output': False}
global_params = namedtuple('global_parmas', sorted(global_params))(**global_params)
kv_list = [dict([re.split('([\\d\\.]+)', op)[:2] for op in _block_args.split('_')]) for _block_args in block_args]
block_args_list = [{'kernel_size': int(kv['k']), 'num_repeat': int(kv['r']), 'input_filters': int(kv['i']), 'output_filters': int(kv['o']), 'expand_ratio': int(kv['e']), 'id_skip': ('noskip' not in block_string), 'se_ratio': (float(kv['se']) if ('se' in kv) else None), 'strides': (int(kv['s'][0]), int(kv['s'][1])), 'conv_type': int(kv.get('c', '0')), 'fused_conv': int(kv.get('f', '0')), 'super_pixel': int(kv.get('p', '0')), 'dilate': int(kv.get('d', '1')), 'condconv': ('cc' in block_string), 'survival_prob': 1.0} for (kv, block_string) in zip(kv_list, block_args)]
block_args_list = [namedtuple('block_args', sorted(x))(**x) for x in block_args_list]
return (block_args_list, global_params)
|
def MBConvBlock(data, block_args, global_params, use_global_stats, block_id, name, lr_mult, reuse, input_size=None):
if block_args.super_pixel:
raise NotImplementedError
if block_args.condconv:
raise NotImplementedError
kernel = ((block_args.kernel_size,) * 2)
dilate = (((1 if (kernel[0] == 1) else block_args.dilate),) * 2)
pad = ((((((kernel[0] - 1) * dilate[0]) + 1) // 2),) * 2)
momentum = global_params.batch_norm_momentum
eps = global_params.batch_norm_epsilon
num_filters = (block_args.input_filters * block_args.expand_ratio)
(conv_id, bn_id) = (0, 0)
if block_args.fused_conv:
x = Conv(data, num_filters, kernel, block_args.strides, pad=pad, dilate=dilate, no_bias=True, name=(name + ('block%d_conv' % block_id)), lr_mult=lr_mult, reuse=reuse)
else:
if (block_args.expand_ratio != 1):
x = Conv(data, num_filters, (1, 1), no_bias=True, name=(name + ('block%d_conv%d' % (block_id, conv_id))), lr_mult=lr_mult, reuse=reuse)
x = BN(x, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + ('block%d_bn%d' % (block_id, bn_id))), lr_mult=lr_mult, reuse=reuse)
x = Swish(x)
(conv_id, bn_id) = ((conv_id + 1), (bn_id + 1))
else:
x = data
x = Conv(x, num_filters, kernel, block_args.strides, pad=pad, dilate=dilate, num_group=num_filters, no_bias=True, name=(name + ('block%d_depthwise_conv0' % block_id)), lr_mult=lr_mult, reuse=reuse)
x = BN(x, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + ('block%d_bn%d' % (block_id, bn_id))), lr_mult=lr_mult, reuse=reuse)
x = Swish(x)
bn_id += 1
has_se = (global_params.use_se and (block_args.se_ratio is not None) and (0 < block_args.se_ratio <= 1))
if has_se:
num_filters_rd = max(1, int((block_args.input_filters * block_args.se_ratio)))
x_se = mx.sym.mean(x, axis=(2, 3), keepdims=True)
x_se = Conv(x_se, num_filters_rd, (1, 1), name=(name + ('block%d_se_conv0' % block_id)), lr_mult=lr_mult, reuse=reuse)
x_se = Swish(x_se)
x_se = Conv(x_se, num_filters, (1, 1), name=(name + ('block%d_se_conv1' % block_id)), lr_mult=lr_mult, reuse=reuse)
x = mx.sym.broadcast_mul(mx.sym.sigmoid(x_se), x)
x = Conv(x, block_args.output_filters, (1, 1), no_bias=True, name=(name + ('block%d_conv%d' % (block_id, conv_id))), lr_mult=lr_mult, reuse=reuse)
x = BN(x, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + ('block%d_bn%d' % (block_id, bn_id))), lr_mult=lr_mult, reuse=reuse)
(conv_id, bn_id) = ((conv_id + 1), (bn_id + 1))
if global_params.clip_projection_output:
x = mx.sym.clip(x, a_min=(- 6), a_max=6)
if (block_args.id_skip and all([(s == 1) for s in block_args.strides]) and (block_args.input_filters == block_args.output_filters)):
if (block_args.survival_prob > 0):
x = mx.sym.Custom(x, p=(1 - block_args.survival_prob), op_type='DropConnect')
x = (x + data)
return x
|
def MBConvBlockWithoutDepthwise(data, block_args, global_params, use_global_stats, begin_id, name, lr_mult, reuse):
raise NotImplementedError
|
def meta_efficientnet(model_name, get_internals=False, input_size=None):
(block_args_list, global_params) = config_efficientnet(model_name)
def round_filters(num_filters):
multiplier = global_params.width_coefficient
if (not multiplier):
return num_filters
divisor = global_params.depth_divisor
min_depth = global_params.min_depth
num_filters = (num_filters * multiplier)
new_num = max((min_depth or divisor), ((int((num_filters + (divisor / 2))) // divisor) * divisor))
if (new_num < (0.9 * num_filters)):
new_num += divisor
return int(new_num)
def round_repeats(num_repeat):
multiplier = global_params.depth_coefficient
if (not multiplier):
return num_repeat
return int(np.ceil((multiplier * num_repeat)))
def efficient_model(data, use_global_stats=False, bn_data=False, name=None, lr_mult=1, reuse=None):
name = ('' if (name is None) else name)
endpoints = {}
momentum = global_params.batch_norm_momentum
eps = global_params.batch_norm_epsilon
endpoints['input'] = data
if bn_data:
data = BN(data, fix_gamma=True, momentum=momentum, eps=eps, name='bn_data', lr_mult=lr_mult, reuse=reuse)
x = Conv(data, round_filters(32), (3, 3), (2, 2), pad=(1, 1), no_bias=True, name=(name + 'stem_conv0'), lr_mult=lr_mult, reuse=reuse)
x = BN(x, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name='stem_bn0', lr_mult=lr_mult, reuse=reuse)
x = Swish(x)
endpoints['stem'] = x
block_id = 0
total_blocks = sum([block_args.num_repeat for block_args in block_args_list])
survival_prob = global_params.survival_prob
for (i, block_args) in enumerate(block_args_list):
assert (block_args.num_repeat > 0)
assert (block_args.super_pixel in [0, 1, 2])
block_args = block_args._replace(input_filters=round_filters(block_args.input_filters), output_filters=round_filters(block_args.output_filters), num_repeat=round_repeats(block_args.num_repeat), survival_prob=(1.0 - (((1.0 - global_params.survival_prob) * float(block_id)) / total_blocks)))
ConvBlock = {0: MBConvBlock, 1: MBConvBlockWithoutDepthwise}[block_args.conv_type]
x = ConvBlock(x, block_args, global_params, use_global_stats=use_global_stats, block_id=block_id, name=name, lr_mult=lr_mult, reuse=reuse)
endpoints[('block%d' % block_id)] = x
block_id += 1
for j in range((block_args.num_repeat - 1)):
block_args = block_args._replace(input_filters=block_args.output_filters, strides=(1, 1), survival_prob=(1.0 - (((1.0 - global_params.survival_prob) * float(block_id)) / total_blocks)))
x = ConvBlock(x, block_args, global_params, use_global_stats=use_global_stats, block_id=block_id, name=name, lr_mult=lr_mult, reuse=reuse, input_size=input_size)
endpoints[('block%d' % block_id)] = x
block_id += 1
x = Conv(x, round_filters(1280), (1, 1), no_bias=True, name=(name + 'head_conv0'), lr_mult=lr_mult, reuse=reuse)
x = BN(x, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name='head_bn0', lr_mult=lr_mult, reuse=reuse)
x = Swish(x)
if (global_params.dropout_rate > 0):
x = Drop(x, p=global_params.dropout_rate)
endpoints['head'] = x
x = Pool(x, kernel=(1, 1), pool_type='avg', global_pool=True)
x = mx.sym.flatten(x)
x = FC(x, global_params.num_classes, name=(name + 'head_fc0'), lr_mult=lr_mult, reuse=reuse)
endpoints['logit'] = x
x = mx.sym.softmax(x, axis=1)
endpoints['prob'] = x
if get_internals:
return (x, endpoints)
return x
return efficient_model
|
def tf2mx_params(ckpt_file, dst_file=None, name='', use_ema=True):
convert_w = (lambda x: mx.nd.array((x.transpose(3, 2, 0, 1) if (x.ndim == 4) else x.T)))
convert_b = (lambda x: mx.nd.array(x))
convert_dp_w = (lambda x: mx.nd.array(x.transpose(2, 3, 0, 1)))
lookup_ptype = {'kernel': ('arg', 'weight', convert_w), 'bias': ('arg', 'bias', convert_b), 'depthwise_kernel': ('arg', 'weight', convert_dp_w), 'gamma': ('arg', 'gamma', convert_b), 'beta': ('arg', 'beta', convert_b), 'moving_mean': ('aux', 'moving_mean', convert_b), 'moving_variance': ('aux', 'moving_var', convert_b)}
lookup_op = {'conv2d': 'conv', 'depthwise_conv2d': 'depthwise_conv', 'tpu_batch_normalization': 'bn', 'dense': 'fc'}
def mapKey(tf_key):
names = tf_key.split('/')
if (not re.match('^efficientnet-b[0-7]$', names[0])):
return (None, None)
(block, op, ptype) = names[1:4]
if block.startswith('blocks'):
block = ('block' + block.split('_')[(- 1)])
block_name = ((name + block) + '_')
if (op == 'se'):
(op, ptype) = names[3:5]
block_name = (block_name + 'se_')
r = re.match('^\\w*_(\\d+)$', op)
op_id = (r.group(1) if r else '0')
_op = (re.match('^(\\w+)_\\d+$', op).group(1) if r else op)
try:
(prefix, suffix, converter) = lookup_ptype[ptype]
except:
raise KeyError('[{}], ({}, {}, {}), {}'.format(ptype, block, op, ptype, tf_key))
op_name = lookup_op[_op]
return (((((((prefix + ':') + block_name) + op_name) + op_id) + '_') + suffix), converter)
try:
import tensorflow.compat.v1 as tf
except ImportError:
import tensorflow as tf
reader = tf.train.load_checkpoint(ckpt_file)
shape_map = reader.get_variable_to_shape_map()
keys = sorted(shape_map.keys())
ema_keys = [k for k in keys if k.endswith('ExponentialMovingAverage')]
keys = list(set((list((set(keys) - set(ema_keys))) + [k.rsplit('/', 1)[0] for k in ema_keys])))
keys_ = [(k + '/ExponentialMovingAverage') for k in keys]
kk = {k: (k_ if (use_ema and (k_ in ema_keys)) else k) for (k, k_) in zip(keys, keys_)}
mx_params = {}
for k in kk.keys():
tf_key = kk[k]
(mx_key, converter) = mapKey(k)
if (mx_key is None):
if (tf_key != 'global_step'):
print(('Cannot parse tf_key: %s' % tf_key))
continue
if (mx_key in mx_params):
raise KeyError(('Duplicate key: %s, %s, %s' % (k, tf_key, mx_key)))
mx_params[mx_key] = converter(reader.get_tensor(tf_key))
if (dst_file is not None):
mx.nd.save(dst_file, mx_params)
arg_params = {k[4:]: v for (k, v) in mx_params.items() if k.startswith('arg:')}
aux_params = {k[4:]: v for (k, v) in mx_params.items() if k.startswith('aux:')}
return (arg_params, aux_params)
|
def incepConv(data, num_filter, kernel, stride=None, dilate=None, pad=None, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None):
assert (name is not None)
x = Conv(data, num_filter, kernel, stride, dilate, pad, name=('conv_%s' % name), lr_mult=lr_mult, reuse=reuse)
x = BN(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=('bn_%s' % name), lr_mult=lr_mult, reuse=reuse)
x = Relu(x)
return x
|
def incepBlockA(data, num_filter_1, num_filter_3r, num_filter_3, num_filter_d3r, num_filter_d3, num_filter_p, pool_type, dilate=1, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None):
assert (name is not None)
x1 = incepConv(data, num_filter_1, (1, 1), momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=('%s_1x1' % name), lr_mult=lr_mult, reuse=reuse)
x3 = incepConv(data, num_filter_3r, (1, 1), momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=('%s_3x3_reduce' % name), lr_mult=lr_mult, reuse=reuse)
x3 = incepConv(x3, num_filter_3, (3, 3), pad=((dilate,) * 2), dilate=((dilate,) * 2), momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=('%s_3x3' % name), lr_mult=lr_mult, reuse=reuse)
xd3 = incepConv(data, num_filter_d3r, (1, 1), momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=('%s_double_3x3_reduce' % name), lr_mult=lr_mult, reuse=reuse)
xd3 = incepConv(xd3, num_filter_d3, (3, 3), pad=((dilate,) * 2), dilate=((dilate,) * 2), momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=('%s_double_3x3_0' % name), lr_mult=lr_mult, reuse=reuse)
xd3 = incepConv(xd3, num_filter_d3, (3, 3), pad=((dilate,) * 2), dilate=((dilate,) * 2), momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=('%s_double_3x3_1' % name), lr_mult=lr_mult, reuse=reuse)
xp = Pool(data, kernel=(3, 3), stride=(1, 1), pad=(1, 1), pool_type=pool_type)
xp = incepConv(xp, num_filter_p, (1, 1), momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=('%s_proj' % name), lr_mult=lr_mult, reuse=reuse)
x = mx.sym.Concat(x1, x3, xd3, xp, dim=1, name=('ch_concat_%s_chconcat' % name))
return x
|
def incepBlockB(data, num_filter_3r, num_filter_3, num_filter_d3r, num_filter_d3, stride=2, dilate=1, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None):
assert (name is not None)
x3 = incepConv(data, num_filter_3r, (1, 1), momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=('%s_3x3_reduce' % name), lr_mult=lr_mult, reuse=reuse)
x3 = incepConv(x3, num_filter_3, (3, 3), stride=((stride,) * 2), pad=((dilate,) * 2), dilate=((dilate,) * 2), momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=('%s_3x3' % name), lr_mult=lr_mult, reuse=reuse)
xd3 = incepConv(data, num_filter_d3r, (1, 1), momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=('%s_double_3x3_reduce' % name), lr_mult=lr_mult, reuse=reuse)
xd3 = incepConv(xd3, num_filter_d3, (3, 3), stride=(1, 1), pad=((dilate,) * 2), dilate=((dilate,) * 2), momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=('%s_double_3x3_0' % name), lr_mult=lr_mult, reuse=reuse)
xd3 = incepConv(xd3, num_filter_d3, (3, 3), stride=((stride,) * 2), pad=((dilate,) * 2), dilate=((dilate,) * 2), momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=('%s_double_3x3_1' % name), lr_mult=lr_mult, reuse=reuse)
xp = Pool(data, kernel=(3, 3), stride=((stride,) * 2), pad=(1, 1), pool_type='max')
x = mx.sym.Concat(x3, xd3, xp, dim=1, name=('ch_concat_%s_chconcat' % name))
return x
|
def inceptionBN(x, momentum=0.9, eps=1e-05, use_global_stats=False, bn_data=True, name=None, lr_mult=1, reuse=None):
name = ('' if (name is None) else name)
if bn_data:
x = BN(x, fix_gamma=True, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + 'bn_data'), reuse=reuse)
x = incepConv(x, 64, (7, 7), stride=(2, 2), pad=(3, 3), name=(name + '1'), lr_mult=lr_mult, reuse=reuse)
x = Pool(x, kernel=(3, 3), stride=(2, 2), pad=(1, 1), name=(name + 'pool_1'), pool_type='max')
x = incepConv(x, 64, (1, 1), stride=(1, 1), pad=(0, 0), name=(name + '2_red'), lr_mult=lr_mult, reuse=reuse)
x = incepConv(x, 192, (3, 3), stride=(1, 1), pad=(1, 1), name=(name + '2'), lr_mult=lr_mult, reuse=reuse)
x = Pool(x, kernel=(3, 3), stride=(2, 2), pad=(1, 1), name=(name + 'pool_2'), pool_type='max')
x = incepBlockA(x, 64, 64, 64, 64, 96, 32, 'avg', 1, momentum, eps, use_global_stats, '3a', lr_mult, reuse)
x = incepBlockA(x, 64, 64, 96, 64, 96, 64, 'avg', 1, momentum, eps, use_global_stats, '3b', lr_mult, reuse)
x = incepBlockB(x, 128, 160, 64, 96, 1, 2, momentum, eps, use_global_stats, '3c', lr_mult, reuse)
x = incepBlockA(x, 224, 64, 96, 96, 128, 128, 'avg', 2, momentum, eps, use_global_stats, '4a', lr_mult, reuse)
x = incepBlockA(x, 192, 96, 128, 96, 128, 128, 'avg', 2, momentum, eps, use_global_stats, '4b', lr_mult, reuse)
x = incepBlockA(x, 160, 128, 160, 128, 160, 128, 'avg', 2, momentum, eps, use_global_stats, '4c', lr_mult, reuse)
x = incepBlockA(x, 96, 128, 192, 160, 192, 128, 'avg', 2, momentum, eps, use_global_stats, '4d', lr_mult, reuse)
x = incepBlockB(x, 128, 192, 192, 256, 1, 4, momentum, eps, use_global_stats, '4e', lr_mult, reuse)
x = incepBlockA(x, 352, 192, 320, 160, 224, 128, 'avg', 4, momentum, eps, use_global_stats, '5a', lr_mult, reuse)
x = incepBlockA(x, 352, 192, 320, 192, 224, 128, 'max', 4, momentum, eps, use_global_stats, '5b', lr_mult, reuse)
return x
|
class OpConstant(mx.operator.CustomOp):
def __init__(self, val):
self.val = val
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], self.val)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
|
@mx.operator.register('Constant')
class OpConstantProp(mx.operator.CustomOpProp):
def __init__(self, val_str, shape_str, type_str='float32'):
super(OpConstantProp, self).__init__(need_top_grad=False)
val = [float(x) for x in val_str.split(',')]
shape = [int(x) for x in shape_str.split(',')]
self.val = mx.nd.array(val, dtype=type_str).reshape(shape)
def list_arguments(self):
return []
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return (in_shape, [self.val.shape], [])
def infer_type(self, in_type):
return (in_type, [self.val.dtype], [])
def create_operator(self, ctx, shapes, dtypes):
return OpConstant(self.val.as_in_context(ctx))
|
def CustomConstantEncoder(value, dtype='float32'):
if (not isinstance(value, np.ndarray)):
if (not isinstance(value, (list, tuple))):
value = [value]
value = np.array(value, dtype=dtype)
return (','.join([str(x) for x in value.ravel()]), ','.join([str(x) for x in value.shape]))
|
def Constant(value, dtype='float32'):
assert isinstance(dtype, str), dtype
(val, shape) = CustomConstantEncoder(value, dtype)
return mx.sym.Custom(val_str=val, shape_str=shape, type_str=dtype, op_type='Constant')
|
class DropConnect(mx.operator.CustomOp):
def __init__(self, p):
self.drop_rate = p
self.mask = None
def forward(self, is_train, req, in_data, out_data, aux):
data = in_data[0]
if (is_train or (self.drop_rate == 0)):
mask_shape = ([data.shape[0]] + ([1] * (len(data.shape) - 1)))
mask = mx.nd.random.uniform(0, 1, mask_shape, ctx=data.context)
mask = ((mask > self.drop_rate) / (1 - self.drop_rate))
out = (data * mask)
self.mask = mask
else:
out = data
self.mask = None
self.assign(out_data[0], req[0], out)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
if (self.mask is None):
grad = out_grad[0].copy()
else:
grad = (out_grad[0] * self.mask)
self.assign(in_grad[0], req[0], grad)
|
@mx.operator.register('DropConnect')
class DropConnectProp(mx.operator.CustomOpProp):
def __init__(self, p):
super(DropConnectProp, self).__init__(need_top_grad=True)
self.drop_rate = float(p)
assert ((self.drop_rate >= 0) and (self.drop_rate < 1))
def list_arguments(self):
return ['data']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return (in_shape, in_shape, [])
def create_operator(self, ctx, shapes, dtypes):
return DropConnect(self.drop_rate)
|
class BilinearScale(mx.operator.CustomOp):
def __init__(self, scale):
self.scale = scale
def forward(self, is_train, req, in_data, out_data, aux):
x = in_data[0]
(h, w) = x.shape[2:]
new_h = (int(((h - 1) * self.scale)) + 1)
new_w = (int(((w - 1) * self.scale)) + 1)
x.attach_grad()
with mx.autograd.record():
new_x = mx.nd.contrib.BilinearResize2D(x, height=new_h, width=new_w)
self.new_x = new_x
self.x = x
self.assign(out_data[0], req[0], new_x)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.new_x.backward(out_grad[0])
self.assign(in_grad[0], req[0], self.x.grad)
|
@mx.operator.register('BilinearScale')
class BilinearScaleProp(mx.operator.CustomOpProp):
def __init__(self, scale):
super(BilinearScaleProp, self).__init__(need_top_grad=True)
self.scale = float(scale)
def infer_shape(self, in_shape):
(n, c, h, w) = in_shape[0]
new_h = (int(((h - 1) * self.scale)) + 1)
new_w = (int(((w - 1) * self.scale)) + 1)
return (in_shape, [(n, c, new_h, new_w)], [])
def create_operator(self, ctx, shapes, dtypes):
return BilinearScale(self.scale)
|
class BilinearScaleLike(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
(x, x_ref) = in_data
(new_h, new_w) = x_ref.shape[2:]
x.attach_grad()
with mx.autograd.record():
new_x = mx.nd.contrib.BilinearResize2D(x, height=new_h, width=new_w)
self.new_x = new_x
self.x = x
self.assign(out_data[0], req[0], new_x)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.new_x.backward(out_grad[0])
in_grad[1][:] = 0
self.assign(in_grad[0], req[0], self.x.grad)
|
@mx.operator.register('BilinearScaleLike')
class BilinearScaleLikeProp(mx.operator.CustomOpProp):
def __init__(self):
super(BilinearScaleLikeProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['d1', 'd2']
def infer_shape(self, in_shape):
out_shape = list(in_shape[1])
out_shape[1] = in_shape[0][1]
return (in_shape, [out_shape], [])
def create_operator(self, ctx, shapes, dtypes):
return BilinearScaleLike()
|
class SegmentLoss(mx.operator.CustomOp):
def __init__(self, has_grad_scale):
self.has_grad_scale = has_grad_scale
def forward(self, is_train, req, in_data, out_data, aux):
prediction = mx.nd.softmax(in_data[0], axis=1)
self.assign(out_data[0], req[0], prediction)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
prediction = out_data[0]
label = mx.nd.one_hot(in_data[1], depth=prediction.shape[1]).transpose((0, 3, 1, 2))
if (prediction.shape[2] != label.shape[2]):
label = mx.nd.contrib.BilinearResize2D(label, height=prediction.shape[2], width=prediction.shape[3])
label = (mx.nd.one_hot(mx.nd.argmax(label, axis=1), depth=prediction.shape[1]).transpose((0, 3, 1, 2)) * (mx.nd.max(label, axis=1, keepdims=True) > 0.5))
mask = label.sum(axis=1, keepdims=True)
num_pixel = mx.nd.maximum((mask.sum() / mask.shape[0]), 1e-05)
grad = (((prediction - label) * mask) / num_pixel)
if self.has_grad_scale:
grad_scale = in_data[2].reshape((- 1), 1, 1, 1)
grad = (grad * grad_scale)
in_grad[1][:] = 0
self.assign(in_grad[0], req[0], grad)
|
@mx.operator.register('SegmentLoss')
class SegmentLossProp(mx.operator.CustomOpProp):
def __init__(self, has_grad_scale=0):
super(SegmentLossProp, self).__init__(need_top_grad=False)
self.has_grad_scale = (int(has_grad_scale) > 0)
def list_arguments(self):
if self.has_grad_scale:
return ['data', 'label', 'scale']
else:
return ['data', 'label']
def infer_shape(self, in_shape):
return (in_shape, [in_shape[0]], [])
def create_operator(self, ctx, shapes, dtypes):
return SegmentLoss(self.has_grad_scale)
|
class MultiSigmoidLoss(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
(logit, label) = in_data
prediction = mx.nd.sigmoid(logit, axis=1)
self.assign(out_data[0], req[0], prediction)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
prediction = out_data[0]
label = in_data[1]
grad = (prediction - label)
in_grad[1][:] = 0
self.assign(in_grad[0], req[0], grad)
|
@mx.operator.register('MultiSigmoidLoss')
class MultiSigmoidLossProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultiSigmoidLossProp, self).__init__(need_top_grad=False)
def list_arguments(self):
return ['data', 'label']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return (in_shape, [in_shape[0]], [])
def create_operator(self, ctx, shapes, dtypes):
return MultiSigmoidLoss()
|
class MultiSoftmaxLoss(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
(logit, label) = in_data
prediction = mx.nd.softmax(logit, axis=1)
self.assign(out_data[0], req[0], prediction)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
prediction = out_data[0]
label = in_data[1]
grad = (prediction - label)
in_grad[1][:] = 0
self.assign(in_grad[0], req[0], grad)
|
@mx.operator.register('MultiSoftmaxLoss')
class MultiSoftmaxLossProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultiSoftmaxLossProp, self).__init__(need_top_grad=False)
def list_arguments(self):
return ['data', 'label']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return (in_shape, [in_shape[0]], [])
def create_operator(self, ctx, shapes, dtypes):
return MultiSoftmaxLoss()
|
def ResStem(data, num_filter, momentum=0.9, eps=1e-05, use_global_stats=False, bn_data=True, name=None, lr_mult=1, reuse=None):
name = ('' if (name is None) else name)
if bn_data:
x = BN(data, fix_gamma=True, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + 'bn_data'), reuse=reuse)
else:
x = data
x = Conv(x, num_filter=num_filter, kernel=(7, 7), stride=(2, 2), pad=(3, 3), no_bias=True, name=(name + 'conv0'), lr_mult=lr_mult, reuse=reuse)
x = BN(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + 'bn0'), lr_mult=lr_mult, reuse=reuse)
x = Relu(x, name=(name + 'relu0'))
x = Pool(x, kernel=(3, 3), stride=(2, 2), pad=(1, 1), pool_type='max', name=(name + 'pool0'))
return x
|
def ResUnit(data, num_filter, stride, dilate, projection, bottle_neck, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None):
assert (name is not None)
x = BNRelu(data, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + '_bn1'), lr_mult=lr_mult, reuse=reuse)
if projection:
shortcut = Conv(x, num_filter=num_filter, kernel=(1, 1), stride=((stride,) * 2), pad=(0, 0), no_bias=True, name=(name + '_sc'), lr_mult=lr_mult, reuse=reuse)
else:
shortcut = data
if bottle_neck:
x = Conv(x, num_filter=int((num_filter / 4.0)), kernel=(1, 1), stride=(1, 1), pad=(0, 0), no_bias=True, name=(name + '_conv1'), lr_mult=lr_mult, reuse=reuse)
x = BNRelu(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + '_bn2'), lr_mult=lr_mult, reuse=reuse)
x = Conv(x, num_filter=int((num_filter / 4.0)), kernel=(3, 3), stride=((stride,) * 2), pad=((dilate,) * 2), dilate=((dilate,) * 2), no_bias=True, name=(name + '_conv2'), lr_mult=lr_mult, reuse=reuse)
x = BNRelu(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + '_bn3'), lr_mult=lr_mult, reuse=reuse)
x = Conv(x, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), no_bias=True, name=(name + '_conv3'), lr_mult=lr_mult, reuse=reuse)
else:
x = Conv(x, num_filter=num_filter, kernel=(3, 3), stride=((stride,) * 2), pad=((dilate,) * 2), dilate=((dilate,) * 2), no_bias=True, name=(name + '_conv1'), lr_mult=lr_mult, reuse=reuse)
x = BNRelu(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + '_bn2'), lr_mult=lr_mult, reuse=reuse)
x = Conv(x, num_filter=num_filter, kernel=(3, 3), stride=(1, 1), pad=(1, 1), no_bias=True, name=(name + '_conv2'), lr_mult=lr_mult, reuse=reuse)
x = (x + shortcut)
return x
|
def ResBlock(data, num_unit, num_filter, stride, dilate, bottle_neck, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None):
assert (name is not None)
x = ResUnit(data, num_filter, stride, dilate, True, bottle_neck, momentum, eps, use_global_stats, (name + '_unit1'), lr_mult, reuse)
for i in range(1, num_unit):
x = ResUnit(x, num_filter, 1, dilate, False, bottle_neck, momentum, eps, use_global_stats, (name + ('_unit%d' % (i + 1))), lr_mult, reuse)
return x
|
def _Resnet(x, num_units, num_filters, bottle_neck, momentum=0.9, eps=1e-05, use_global_stats=False, bn_data=True, strides=(1, 2, 2, 2), dilates=(1, 1, 1, 1), name=None, lr_mult=1, reuse=None):
name = ('' if (name is None) else name)
x = ResStem(x, num_filters[0], momentum, eps, use_global_stats, bn_data, name, lr_mult, reuse)
for i in range(4):
x = ResBlock(x, num_units[i], num_filters[(i + 1)], strides[i], dilates[i], bottle_neck, momentum, eps, use_global_stats, (name + ('stage%d' % (i + 1))), lr_mult, reuse)
x = BNRelu(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + 'bn1'), lr_mult=lr_mult, reuse=reuse)
return x
|
def resnet18(x, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None):
name = ('' if (name is None) else name)
x = _Resnet(x, (2, 2, 2, 2), (64, 64, 128, 256, 512), False, momentum, eps, use_global_stats, name=name, lr_mult=lr_mult, reuse=reuse)
x = Pool(x, (1, 1), pool_type='avg', global_pool=True)
x = Flatten(x)
return x
|
def resnet34(x, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None):
name = ('' if (name is None) else name)
x = _Resnet(x, (3, 4, 6, 3), (64, 64, 128, 256, 512), False, momentum, eps, use_global_stats, name=name, lr_mult=lr_mult, reuse=reuse)
x = Pool(x, (1, 1), pool_type='avg', global_pool=True)
x = Flatten(x)
return x
|
def resnet50(x, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None):
name = ('' if (name is None) else name)
x = _Resnet(x, (3, 4, 6, 3), (64, 256, 512, 1024, 2048), True, momentum, eps, use_global_stats, name=name, lr_mult=lr_mult, reuse=reuse)
x = Pool(x, (1, 1), pool_type='avg', global_pool=True)
x = Flatten(x)
return x
|
def resnet101(x, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None):
name = ('' if (name is None) else name)
x = _Resnet(x, (3, 4, 23, 3), (64, 256, 512, 1024, 2048), True, momentum, eps, use_global_stats, name=name, lr_mult=lr_mult, reuse=reuse)
x = Pool(x, (1, 1), pool_type='avg', global_pool=True)
x = Flatten(x)
return x
|
def resnet101_largefov(x, num_cls, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=10, reuse=None):
name = ('' if (name is None) else name)
x = _Resnet(x, (3, 4, 23, 3), (64, 256, 512, 1024, 2048), True, momentum, eps, use_global_stats, strides=(1, 2, 1, 1), dilates=(1, 1, 2, 4), name=name, lr_mult=1, reuse=reuse)
x = Conv(x, num_cls, kernel=(3, 3), dilate=(12, 12), pad=(12, 12), name=(name + 'fc1'), lr_mult=lr_mult, reuse=reuse)
return x
|
def resnet101_aspp(x, num_cls, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=10, reuse=None):
name = ('' if (name is None) else name)
x = _Resnet(x, (3, 4, 23, 3), (64, 256, 512, 1024, 2048), True, momentum, eps, use_global_stats, strides=(1, 2, 1, 1), dilates=(1, 1, 2, 4), name=name, lr_mult=1, reuse=reuse)
x_aspp = []
for d in (6, 12, 18, 24):
x_aspp.append(Conv(x, num_cls, kernel=(3, 3), dilate=(d, d), pad=(d, d), name=(name + ('fc1_aspp%d' % d)), lr_mult=lr_mult, reuse=reuse))
x = sum(x_aspp)
return x
|
def ResStemV1(data, num_filter, momentum=0.9, eps=1e-05, use_global_stats=False, bn_data=True, name=None, lr_mult=1, reuse=None):
name = ('' if (name is None) else name)
if bn_data:
x = BN(data, fix_gamma=True, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + 'bn_data'), reuse=reuse)
else:
x = data
x = Conv(x, num_filter=num_filter, kernel=(7, 7), stride=(2, 2), pad=(3, 3), no_bias=True, name=(name + 'conv0'), lr_mult=lr_mult, reuse=reuse)
x = BN(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + 'bn0'), lr_mult=lr_mult, reuse=reuse)
x = Relu(x, name=(name + 'relu0'))
x = Pool(x, kernel=(3, 3), stride=(2, 2), pad=(1, 1), pool_type='max', name=(name + 'pool0'))
return x
|
def ResUnitV1(data, num_filter, stride, dilate, projection, bottle_neck, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None):
assert (name is not None)
if projection:
shortcut = Conv(data, num_filter=num_filter, kernel=(1, 1), stride=((stride,) * 2), pad=(0, 0), no_bias=True, name=(name + '_conv0'), lr_mult=lr_mult, reuse=reuse)
shortcut = BN(shortcut, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + '_bn0'), lr_mult=lr_mult, reuse=reuse)
else:
shortcut = data
if bottle_neck:
x = Conv(data, num_filter=int((num_filter / 4.0)), kernel=(1, 1), stride=((stride,) * 2), pad=(0, 0), no_bias=True, name=(name + '_conv1'), lr_mult=lr_mult, reuse=reuse)
x = BNRelu(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + '_bn1'), lr_mult=lr_mult, reuse=reuse)
x = Conv(x, num_filter=int((num_filter / 4.0)), kernel=(3, 3), stride=(1, 1), pad=((dilate,) * 2), dilate=((dilate,) * 2), no_bias=True, name=(name + '_conv2'), lr_mult=lr_mult, reuse=reuse)
x = BNRelu(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + '_bn2'), lr_mult=lr_mult, reuse=reuse)
x = Conv(x, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), no_bias=True, name=(name + '_conv3'), lr_mult=lr_mult, reuse=reuse)
x = BN(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + '_bn3'), lr_mult=lr_mult, reuse=reuse)
else:
raise NotImplementedError
x = (x + shortcut)
x = Relu(x)
return x
|
def ResBlockV1(data, num_unit, num_filter, stride, dilate, bottle_neck, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None):
assert (name is not None)
x = ResUnitV1(data, num_filter, stride, dilate, True, bottle_neck, momentum, eps, use_global_stats, (name + '_unit1'), lr_mult, reuse)
for i in range(1, num_unit):
x = ResUnitV1(x, num_filter, 1, dilate, False, bottle_neck, momentum, eps, use_global_stats, (name + ('_unit%d' % (i + 1))), lr_mult, reuse)
return x
|
def _Resnet(x, num_units, num_filters, bottle_neck, momentum=0.9, eps=1e-05, use_global_stats=False, bn_data=True, strides=(1, 2, 2, 2), dilates=(1, 1, 1, 1), name=None, lr_mult=1, reuse=None):
name = ('' if (name is None) else name)
x = ResStemV1(x, num_filters[0], momentum, eps, use_global_stats, bn_data, name, lr_mult, reuse)
for i in range(4):
x = ResBlockV1(x, num_units[i], num_filters[(i + 1)], strides[i], dilates[i], bottle_neck, momentum, eps, use_global_stats, (name + ('stage%d' % (i + 1))), lr_mult, reuse)
return x
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.