id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
23,440 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
class OpBuilder(object):
_OP_COUNT = {}
# TODO(yuwang): Maybe use op object instead of op class ?
def __init__(self, op_cls, config=None, params=None, *args, **kwargs):
self._op = op_cls(*args, **kwargs)
self._config = config if config else {}
self._params = params if params else {}
self._AttrName = op_cls.AttrName if hasattr(op_cls, 'AttrName') else None
self._ParamName = op_cls.ParamName if hasattr(op_cls, 'ParamName') else None
op_type = self._op.type
if op_type not in self._OP_COUNT:
self._OP_COUNT[op_type] = 0
self._OP_COUNT[op_type] = self._OP_COUNT[op_type] + 1
for name, value in self._config.items():
self._op.set_config(name, value)
for name, value in self._params.items():
self.param(name, value)
def config(self, name, value):
self._op.set_config(name, value)
return self
def attr(self, name, value):
if not self._AttrName:
raise ValueError('Op {} does not has any attributes'.format(
type(self._op)))
for attr in self._AttrName:
if name == attr.value:
break
self._op.set_attr(attr, value)
return self
def param(self, name, value):
if not self._ParamName:
param = name
else:
param = utils.op_param_by_name(self._op, name)
if not param:
raise ValueError('{} does not has a param named "{}"'.format(
type(self._op), name))
op_type = self._op.type
index = self._OP_COUNT[op_type] - 1
# Naming tensor like dense_0:weight
name = '{}_{}:{}'.format(op_type, index, name)
tensor = tensor_utils.param_from_tf_numpy(name, value)
self._op.set_param(param, tensor)
return self
def build(self):
return self._op
def create_activation_node(kernel_node, activation, output_names):
actv = activations.get(activation)
#op = ops.Operation(_activation_cvt_map[actv])
op = _activation_cvt_map[actv]()
actv_name = activations.serialize(actv)
actv_node = ops.Node('/'.join([kernel_node.name, actv_name]), op)
# Connect the activation to the kernel
kernel_out_tensor = kernel_node.name + ':0'
kernel_node.output_names = [kernel_out_tensor]
actv_node.input_names = [kernel_out_tensor]
actv_node.output_names = copy.deepcopy(output_names)
# Dettach activation node from its kernel node.
# The layer name of activation node should not be set to kernel's layer name.
actv_node.layer_name = _NO_LAYER_NAME
return actv_node
def convert_layer_separableConv2D(node):
config = node.get_config()
params = node.get_params()
# depthwise_kernel: (5,5,32,1) HWIO & pointwise_kernel: (1,1,32,11) HWIO
# [*kernel_size, input_channels / groups, filters]
op = (
OpBuilder(op_def.TFSeparableConv2D, config, params).config(
'activation', None).attr('activation', config['activation']).attr(
'in_dim', params['depthwise_kernel'].shape[-2]).build())
conv_node = ops.Node(node.name, op)
conv_node.input_names = copy.deepcopy(node.input_names)
if config['activation']:
actv_node = create_activation_node(conv_node, config['activation'],
node.output_names)
return [conv_node, actv_node]
else:
conv_node.output_names = copy.deepcopy(node.output_names)
return conv_node | null |
23,441 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
class OpBuilder(object):
_OP_COUNT = {}
# TODO(yuwang): Maybe use op object instead of op class ?
def __init__(self, op_cls, config=None, params=None, *args, **kwargs):
self._op = op_cls(*args, **kwargs)
self._config = config if config else {}
self._params = params if params else {}
self._AttrName = op_cls.AttrName if hasattr(op_cls, 'AttrName') else None
self._ParamName = op_cls.ParamName if hasattr(op_cls, 'ParamName') else None
op_type = self._op.type
if op_type not in self._OP_COUNT:
self._OP_COUNT[op_type] = 0
self._OP_COUNT[op_type] = self._OP_COUNT[op_type] + 1
for name, value in self._config.items():
self._op.set_config(name, value)
for name, value in self._params.items():
self.param(name, value)
def config(self, name, value):
self._op.set_config(name, value)
return self
def attr(self, name, value):
if not self._AttrName:
raise ValueError('Op {} does not has any attributes'.format(
type(self._op)))
for attr in self._AttrName:
if name == attr.value:
break
self._op.set_attr(attr, value)
return self
def param(self, name, value):
if not self._ParamName:
param = name
else:
param = utils.op_param_by_name(self._op, name)
if not param:
raise ValueError('{} does not has a param named "{}"'.format(
type(self._op), name))
op_type = self._op.type
index = self._OP_COUNT[op_type] - 1
# Naming tensor like dense_0:weight
name = '{}_{}:{}'.format(op_type, index, name)
tensor = tensor_utils.param_from_tf_numpy(name, value)
self._op.set_param(param, tensor)
return self
def build(self):
return self._op
def create_activation_node(kernel_node, activation, output_names):
actv = activations.get(activation)
#op = ops.Operation(_activation_cvt_map[actv])
op = _activation_cvt_map[actv]()
actv_name = activations.serialize(actv)
actv_node = ops.Node('/'.join([kernel_node.name, actv_name]), op)
# Connect the activation to the kernel
kernel_out_tensor = kernel_node.name + ':0'
kernel_node.output_names = [kernel_out_tensor]
actv_node.input_names = [kernel_out_tensor]
actv_node.output_names = copy.deepcopy(output_names)
# Dettach activation node from its kernel node.
# The layer name of activation node should not be set to kernel's layer name.
actv_node.layer_name = _NO_LAYER_NAME
return actv_node
def convert_layer_conv2dtranspose(node):
config = node.get_config()
params = node.get_params()
# note in tf Conv2DTranspose the weight save in HWOI rather than HWIO(conv2d)
# in order to get right dim expand/prune in optimizer stage need transpoed
# Conv2DTranspose weight: HWOI, Conv2d weight: HWIO so we transpose the weight
# and re-transpose the weight in tf_nndct.pruning/runner.py [_get_sparse_model, _get_slim_model]
params['kernel'] = params['kernel'].transpose((0, 1, 3, 2))
# [*kernel_size, input_channels / groups, filters] output_pad
op = (
OpBuilder(op_def.TFConv2DTranspose, config, params).config(
'activation', None).attr('activation', config['activation']).attr(
'in_dim', params['kernel'].shape[-2]).build())
conv_node = ops.Node(node.name, op)
conv_node.input_names = copy.deepcopy(node.input_names)
if config['activation']:
actv_node = create_activation_node(conv_node, config['activation'],
node.output_names)
return [conv_node, actv_node]
else:
conv_node.output_names = copy.deepcopy(node.output_names)
return conv_node | null |
23,442 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
class OpBuilder(object):
_OP_COUNT = {}
# TODO(yuwang): Maybe use op object instead of op class ?
def __init__(self, op_cls, config=None, params=None, *args, **kwargs):
self._op = op_cls(*args, **kwargs)
self._config = config if config else {}
self._params = params if params else {}
self._AttrName = op_cls.AttrName if hasattr(op_cls, 'AttrName') else None
self._ParamName = op_cls.ParamName if hasattr(op_cls, 'ParamName') else None
op_type = self._op.type
if op_type not in self._OP_COUNT:
self._OP_COUNT[op_type] = 0
self._OP_COUNT[op_type] = self._OP_COUNT[op_type] + 1
for name, value in self._config.items():
self._op.set_config(name, value)
for name, value in self._params.items():
self.param(name, value)
def config(self, name, value):
self._op.set_config(name, value)
return self
def attr(self, name, value):
if not self._AttrName:
raise ValueError('Op {} does not has any attributes'.format(
type(self._op)))
for attr in self._AttrName:
if name == attr.value:
break
self._op.set_attr(attr, value)
return self
def param(self, name, value):
if not self._ParamName:
param = name
else:
param = utils.op_param_by_name(self._op, name)
if not param:
raise ValueError('{} does not has a param named "{}"'.format(
type(self._op), name))
op_type = self._op.type
index = self._OP_COUNT[op_type] - 1
# Naming tensor like dense_0:weight
name = '{}_{}:{}'.format(op_type, index, name)
tensor = tensor_utils.param_from_tf_numpy(name, value)
self._op.set_param(param, tensor)
return self
def build(self):
return self._op
def create_activation_node(kernel_node, activation, output_names):
actv = activations.get(activation)
#op = ops.Operation(_activation_cvt_map[actv])
op = _activation_cvt_map[actv]()
actv_name = activations.serialize(actv)
actv_node = ops.Node('/'.join([kernel_node.name, actv_name]), op)
# Connect the activation to the kernel
kernel_out_tensor = kernel_node.name + ':0'
kernel_node.output_names = [kernel_out_tensor]
actv_node.input_names = [kernel_out_tensor]
actv_node.output_names = copy.deepcopy(output_names)
# Dettach activation node from its kernel node.
# The layer name of activation node should not be set to kernel's layer name.
actv_node.layer_name = _NO_LAYER_NAME
return actv_node
def convert_layer_conv3d(node):
config = node.get_config()
params = node.get_params()
# [*kernel_size, input_channels / groups, filters]
op = (
OpBuilder(op_def.TFConv3D,
config, params).config('activation', None).attr(
'activation', config['activation']).attr(
'in_dim', params['kernel'].shape[-2]).build())
conv_node = ops.Node(node.name, op)
conv_node.input_names = copy.deepcopy(node.input_names)
if config['activation']:
actv_node = create_activation_node(conv_node, config['activation'],
node.output_names)
return [conv_node, actv_node]
else:
conv_node.output_names = copy.deepcopy(node.output_names)
return conv_node | null |
23,443 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
class OpBuilder(object):
_OP_COUNT = {}
# TODO(yuwang): Maybe use op object instead of op class ?
def __init__(self, op_cls, config=None, params=None, *args, **kwargs):
self._op = op_cls(*args, **kwargs)
self._config = config if config else {}
self._params = params if params else {}
self._AttrName = op_cls.AttrName if hasattr(op_cls, 'AttrName') else None
self._ParamName = op_cls.ParamName if hasattr(op_cls, 'ParamName') else None
op_type = self._op.type
if op_type not in self._OP_COUNT:
self._OP_COUNT[op_type] = 0
self._OP_COUNT[op_type] = self._OP_COUNT[op_type] + 1
for name, value in self._config.items():
self._op.set_config(name, value)
for name, value in self._params.items():
self.param(name, value)
def config(self, name, value):
self._op.set_config(name, value)
return self
def attr(self, name, value):
if not self._AttrName:
raise ValueError('Op {} does not has any attributes'.format(
type(self._op)))
for attr in self._AttrName:
if name == attr.value:
break
self._op.set_attr(attr, value)
return self
def param(self, name, value):
if not self._ParamName:
param = name
else:
param = utils.op_param_by_name(self._op, name)
if not param:
raise ValueError('{} does not has a param named "{}"'.format(
type(self._op), name))
op_type = self._op.type
index = self._OP_COUNT[op_type] - 1
# Naming tensor like dense_0:weight
name = '{}_{}:{}'.format(op_type, index, name)
tensor = tensor_utils.param_from_tf_numpy(name, value)
self._op.set_param(param, tensor)
return self
def build(self):
return self._op
def create_activation_node(kernel_node, activation, output_names):
actv = activations.get(activation)
#op = ops.Operation(_activation_cvt_map[actv])
op = _activation_cvt_map[actv]()
actv_name = activations.serialize(actv)
actv_node = ops.Node('/'.join([kernel_node.name, actv_name]), op)
# Connect the activation to the kernel
kernel_out_tensor = kernel_node.name + ':0'
kernel_node.output_names = [kernel_out_tensor]
actv_node.input_names = [kernel_out_tensor]
actv_node.output_names = copy.deepcopy(output_names)
# Dettach activation node from its kernel node.
# The layer name of activation node should not be set to kernel's layer name.
actv_node.layer_name = _NO_LAYER_NAME
return actv_node
def convert_layer_conv3dtranspose(node):
config = node.get_config()
params = node.get_params()
# note in tf Conv3DTranspose the weight save in DHWOI rather than DHWIO(conv3d)
# in order to get right dim expand/prune in optimizer stage need transpoed
# and re-transpose the weight in tf_nndct.pruning/runner.py [_get_sparse_model, _get_slim_model]
params['kernel'] = params['kernel'].transpose((0, 1, 2, 4, 3))
# [*kernel_size, input_channels / groups, filters]
op = (
OpBuilder(op_def.TFConv3DTranspose, config, params).config(
'activation', None).attr('activation', config['activation']).attr(
'in_dim', params['kernel'].shape[-2]).build())
conv_node = ops.Node(node.name, op)
conv_node.input_names = copy.deepcopy(node.input_names)
if config['activation']:
actv_node = create_activation_node(conv_node, config['activation'],
node.output_names)
return [conv_node, actv_node]
else:
conv_node.output_names = copy.deepcopy(node.output_names)
return conv_node | null |
23,444 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
class OpBuilder(object):
_OP_COUNT = {}
# TODO(yuwang): Maybe use op object instead of op class ?
def __init__(self, op_cls, config=None, params=None, *args, **kwargs):
self._op = op_cls(*args, **kwargs)
self._config = config if config else {}
self._params = params if params else {}
self._AttrName = op_cls.AttrName if hasattr(op_cls, 'AttrName') else None
self._ParamName = op_cls.ParamName if hasattr(op_cls, 'ParamName') else None
op_type = self._op.type
if op_type not in self._OP_COUNT:
self._OP_COUNT[op_type] = 0
self._OP_COUNT[op_type] = self._OP_COUNT[op_type] + 1
for name, value in self._config.items():
self._op.set_config(name, value)
for name, value in self._params.items():
self.param(name, value)
def config(self, name, value):
self._op.set_config(name, value)
return self
def attr(self, name, value):
if not self._AttrName:
raise ValueError('Op {} does not has any attributes'.format(
type(self._op)))
for attr in self._AttrName:
if name == attr.value:
break
self._op.set_attr(attr, value)
return self
def param(self, name, value):
if not self._ParamName:
param = name
else:
param = utils.op_param_by_name(self._op, name)
if not param:
raise ValueError('{} does not has a param named "{}"'.format(
type(self._op), name))
op_type = self._op.type
index = self._OP_COUNT[op_type] - 1
# Naming tensor like dense_0:weight
name = '{}_{}:{}'.format(op_type, index, name)
tensor = tensor_utils.param_from_tf_numpy(name, value)
self._op.set_param(param, tensor)
return self
def build(self):
return self._op
def create_activation_node(kernel_node, activation, output_names):
actv = activations.get(activation)
#op = ops.Operation(_activation_cvt_map[actv])
op = _activation_cvt_map[actv]()
actv_name = activations.serialize(actv)
actv_node = ops.Node('/'.join([kernel_node.name, actv_name]), op)
# Connect the activation to the kernel
kernel_out_tensor = kernel_node.name + ':0'
kernel_node.output_names = [kernel_out_tensor]
actv_node.input_names = [kernel_out_tensor]
actv_node.output_names = copy.deepcopy(output_names)
# Dettach activation node from its kernel node.
# The layer name of activation node should not be set to kernel's layer name.
actv_node.layer_name = _NO_LAYER_NAME
return actv_node
def convert_layer_depthwise_conv2d(node):
config = node.get_config()
params = node.get_params()
depthwise_kernel = params['depthwise_kernel']
# [*kernel_size, input_dim, depth_multiplier]
op = (
OpBuilder(op_def.TFDepthwiseConv2D, config, params).config(
'activation', None).attr('activation', config['activation']).attr(
'group', depthwise_kernel.shape[-2]).attr(
'in_dim', depthwise_kernel.shape[-2]).attr(
'out_dim', depthwise_kernel.shape[-2] *
depthwise_kernel.shape[-1]).build())
conv_node = ops.Node(node.name, op)
conv_node.input_names = copy.deepcopy(node.input_names)
if config['activation']:
actv_node = create_activation_node(conv_node, config['activation'],
node.output_names)
return [conv_node, actv_node]
else:
conv_node.output_names = copy.deepcopy(node.output_names)
return conv_node | null |
23,445 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
class OpBuilder(object):
_OP_COUNT = {}
# TODO(yuwang): Maybe use op object instead of op class ?
def __init__(self, op_cls, config=None, params=None, *args, **kwargs):
self._op = op_cls(*args, **kwargs)
self._config = config if config else {}
self._params = params if params else {}
self._AttrName = op_cls.AttrName if hasattr(op_cls, 'AttrName') else None
self._ParamName = op_cls.ParamName if hasattr(op_cls, 'ParamName') else None
op_type = self._op.type
if op_type not in self._OP_COUNT:
self._OP_COUNT[op_type] = 0
self._OP_COUNT[op_type] = self._OP_COUNT[op_type] + 1
for name, value in self._config.items():
self._op.set_config(name, value)
for name, value in self._params.items():
self.param(name, value)
def config(self, name, value):
self._op.set_config(name, value)
return self
def attr(self, name, value):
if not self._AttrName:
raise ValueError('Op {} does not has any attributes'.format(
type(self._op)))
for attr in self._AttrName:
if name == attr.value:
break
self._op.set_attr(attr, value)
return self
def param(self, name, value):
if not self._ParamName:
param = name
else:
param = utils.op_param_by_name(self._op, name)
if not param:
raise ValueError('{} does not has a param named "{}"'.format(
type(self._op), name))
op_type = self._op.type
index = self._OP_COUNT[op_type] - 1
# Naming tensor like dense_0:weight
name = '{}_{}:{}'.format(op_type, index, name)
tensor = tensor_utils.param_from_tf_numpy(name, value)
self._op.set_param(param, tensor)
return self
def build(self):
return self._op
def create_node(name, op, input_names, output_names):
node = ops.Node(name, op)
node.input_names[:] = input_names
node.output_names[:] = output_names
return node
def convert_layer_concatenate(node):
op = OpBuilder(op_def.TFConcat, node.get_config()).build()
return create_node(node.name, op, node.input_names, node.output_names) | null |
23,446 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
class OpBuilder(object):
_OP_COUNT = {}
# TODO(yuwang): Maybe use op object instead of op class ?
def __init__(self, op_cls, config=None, params=None, *args, **kwargs):
self._op = op_cls(*args, **kwargs)
self._config = config if config else {}
self._params = params if params else {}
self._AttrName = op_cls.AttrName if hasattr(op_cls, 'AttrName') else None
self._ParamName = op_cls.ParamName if hasattr(op_cls, 'ParamName') else None
op_type = self._op.type
if op_type not in self._OP_COUNT:
self._OP_COUNT[op_type] = 0
self._OP_COUNT[op_type] = self._OP_COUNT[op_type] + 1
for name, value in self._config.items():
self._op.set_config(name, value)
for name, value in self._params.items():
self.param(name, value)
def config(self, name, value):
self._op.set_config(name, value)
return self
def attr(self, name, value):
if not self._AttrName:
raise ValueError('Op {} does not has any attributes'.format(
type(self._op)))
for attr in self._AttrName:
if name == attr.value:
break
self._op.set_attr(attr, value)
return self
def param(self, name, value):
if not self._ParamName:
param = name
else:
param = utils.op_param_by_name(self._op, name)
if not param:
raise ValueError('{} does not has a param named "{}"'.format(
type(self._op), name))
op_type = self._op.type
index = self._OP_COUNT[op_type] - 1
# Naming tensor like dense_0:weight
name = '{}_{}:{}'.format(op_type, index, name)
tensor = tensor_utils.param_from_tf_numpy(name, value)
self._op.set_param(param, tensor)
return self
def build(self):
return self._op
def create_node(name, op, input_names, output_names):
node = ops.Node(name, op)
node.input_names[:] = input_names
node.output_names[:] = output_names
return node
def convert_layer_reshape(node):
op = OpBuilder(op_def.TFReshape, node.get_config()).build()
return create_node(node.name, op, node.input_names, node.output_names) | null |
23,447 | import copy
import os
import shutil
import tensorflow as tf
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
from typing import List, Tuple
from nndct_shared.expanding.expander import ChannelExpander
from nndct_shared.expanding.spec import ExpandableGroup, ExpandingSpec
from nndct_shared.expanding.spec import ExpandingSpec
from nndct_shared.pruning import pruning_lib
from nndct_shared.pruning.pruning_lib import group_nodes
from nndct_shared.pruning import errors
from tf_nndct.graph import parser
from tf_nndct.utils import keras_utils as ku
from tf_nndct.utils import tensor_utils
keras = tf.keras
class ExpandingRunner(object):
def __init__(self, model: keras.Model,
input_signature: tf.TensorSpec) -> None:
def expand_from_spec(self, expanding_spec: ExpandingSpec) -> keras.Model:
def create_from_config(layer):
def expand(
self,
channel_divisible: int = 2,
nodes_to_exclude: List[str] = []) -> Tuple[keras.Model, ExpandingSpec]:
def export_model_as_pb(model: keras.Model, output_dir: str, output_name: str):
def convert_pb_to_fp16_pb(model_path: str,
save_path: str,
input_nodes: List[str],
output_nodes: List[str],
as_text=False):
def expand_and_export(model_name: str,
model: keras.Model,
input_signature: tf.TensorSpec,
output_dir: str,
channel_divisibles: List[int],
input_nodes: List[str],
output_nodes: List[str],
export_fp16_model=True) -> None:
expanding_runner = ExpandingRunner(model, input_signature)
for channel_divisible in channel_divisibles:
dir_path = os.path.join(output_dir,
model_name + "_padded_{}".format(channel_divisible))
if not os.path.exists(dir_path):
os.makedirs(dir_path)
expanded_model, expanding_spec = expanding_runner.expand(channel_divisible)
with open(os.path.join(dir_path, "expanding_spec"), 'w') as f:
f.write(expanding_spec.serialize())
export_model_as_pb(expanded_model, dir_path, model_name + "_fp32.pb")
if export_fp16_model:
convert_pb_to_fp16_pb(
os.path.join(dir_path, model_name + "_fp32.pb"),
os.path.join(dir_path, model_name + "_fp16.pb"), input_nodes,
output_nodes) | null |
23,448 | import os
from typing import Optional
import tensorflow as tf
from nndct_shared.base import NNDCT_KEYS, NNDCT_OP, GLOBAL_MAP
from nndct_shared.utils import option_util, NndctOption, NndctScreenLogger
from tf_nndct.graph import OpTypes
from tf_nndct.graph import builder
from tf_nndct.graph import ops
from tf_nndct.graph import parser
from tf_nndct.graph import utils as graph_utils
from tf_nndct.layers import recurrent
from tf_nndct.quantization import TFQuantizer
from tf_nndct.utils import keras_utils
from tf_nndct.utils import tf_utils
from tf_nndct.quantization import RNNTFQConfig
from tensorflow.keras import activations
def _init_quant_mode(quant_mode):
if isinstance(quant_mode, int):
NndctScreenLogger().warning(
f"quant_mode will not support integer value in future version. It supports string values 'calib' and 'test'."
)
qmode = quant_mode
elif isinstance(quant_mode, str):
if quant_mode == 'calib':
qmode = 1
elif quant_mode == 'test':
qmode = 2
else:
NndctScreenLogger().error(
f"quant_mode supported values are 'calib' and 'test'. Change it to 'calib' as calibration mode"
)
qmode = 1
else:
NndctScreenLogger().error(
f"quant_mode supported values are string 'calib' and 'test'. Change it to 'calib' as calibration mode"
)
qmode = 1
if NndctOption.nndct_quant_mode.value > 0:
qmode = NndctOption.nndct_quant_mode.value
if qmode == 1:
NndctScreenLogger().info(f"Quantization calibration process start up...")
elif qmode == 2:
NndctScreenLogger().info(f"Quantization test process start up...")
return qmode
def _merge_cell_graphs(cell_graphs):
def prepend_scope(obj, scope):
obj.name = '{}/{}'.format(scope, obj.name)
def rename_input(node):
# XXX(yuwang): Modify input nodes names to input_0, input_1... to make it
# easier for the compiler to inditify the inputs. We need to design
# this more rationally in the future.
# The node name follow the correspondence rules:
# input_0 -> inputs
# input_1 -> H(t-1)
# input_2 -> C(t-1)
if node.op.type != OpTypes.INPUT:
return
arg_to_input = {
'args_0': 'input_0',
'args_1': 'input_1',
'args_1_1': 'input_2'
}
name_parts = node.name.split('/')
name_parts[-1] = arg_to_input[name_parts[-1]]
node.name = '/'.join(name_parts)
graph = ops.Graph()
for cell_graph in cell_graphs:
scope = cell_graph.name
for node in cell_graph.nodes:
rename_input(node)
prepend_scope(node, scope)
for tensor in node.out_tensors:
prepend_scope(tensor, scope)
graph.add_node(node)
return graph
def _maybe_rebuild_rnn(model):
rebuilding_results = []
layers = keras_utils.gather_layers(model)
for layer in layers:
# TODO(yuwang): Support StackedRNNCells, RNN
if not isinstance(layer, recurrent.LSTM):
continue
cell = layer.cell
assert cell.recurrent_activation == activations.get(
'sigmoid'), 'recurrent_activation must be "sigmoid"'
graph_name = 'rnn_cell_%d' % len(rebuilding_results)
cell_graph = _parse_rnn_cell(cell)
cell_graph.name = graph_name
rebuilt_cell, layer_nodes = builder.KerasBuilder(cell_graph).build(
os.path.join('quantize_result', graph_name + '.py'), quantized=True)
rebuilding_results.append((cell_graph, layer_nodes))
_copy_attr('units', cell, rebuilt_cell)
_copy_attr('state_size', cell, rebuilt_cell)
_copy_attr('output_size', cell, rebuilt_cell)
layer.cell = rebuilt_cell
return rebuilding_results
GLOBAL_MAP = GlobalMap()
def tf_quantizer(model,
input_signature,
quant_mode: str = "calib",
output_dir: str = "quantize_result",
bitwidth: int = 8,
quant_config_file: Optional[str] = None):
#initialize quant mode
qmode = _init_quant_mode(quant_mode)
# turn off weights equalization and bias correction
option_util.set_option_value("nndct_param_corr", False)
option_util.set_option_value("nndct_equalization", False)
# parse the quant config file
QConfiger = RNNTFQConfig()
#if quant_config_file:
QConfiger.parse_config_file(
quant_config_file, bit_width_w=bitwidth, bit_width_a=bitwidth)
qconfig = QConfiger.qconfig
# lstm IP only support 16 bit activation
quantizer = TFQuantizer(qmode, output_dir, qconfig)
GLOBAL_MAP.set_map(NNDCT_KEYS.QUANTIZER, quantizer)
GLOBAL_MAP.set_map(NNDCT_KEYS.QUANT_MODE, qmode)
GLOBAL_MAP.set_map(NNDCT_KEYS.QUANT_CONFIG, qconfig)
graph = parser.from_keras_model(model, input_signature)
quant_model, layer_nodes = builder.KerasBuilder(graph).build(
os.path.join(output_dir, model.name + '_quant.py'), quantized=True)
rebuilding_results = _maybe_rebuild_rnn(quant_model)
if rebuilding_results:
cell_graphs = []
cell_layer_nodes = []
for graph, layer_nodes in rebuilding_results:
cell_graphs.append(graph)
cell_layer_nodes.extend(layer_nodes)
quantizer.add_rnn_cell_graph('forward', graph)
graph = _merge_cell_graphs(cell_graphs)
layer_nodes = cell_layer_nodes
# TODO(yuwang): Support backward direction.
export_file = os.path.join(output_dir, 'merged_graph.pb')
graph_utils.maybe_export_graph(export_file, graph)
lstm = True if len(rebuilding_results) > 0 else False
quantizer.setup(graph, lstm=lstm)
quantizer.load_node_to_layer(layer_nodes, quant_model)
return quantizer | null |
23,449 | import os
from typing import Optional
import tensorflow as tf
from nndct_shared.base import NNDCT_KEYS, NNDCT_OP, GLOBAL_MAP
from nndct_shared.utils import option_util, NndctOption, NndctScreenLogger
from tf_nndct.graph import OpTypes
from tf_nndct.graph import builder
from tf_nndct.graph import ops
from tf_nndct.graph import parser
from tf_nndct.graph import utils as graph_utils
from tf_nndct.layers import recurrent
from tf_nndct.quantization import TFQuantizer
from tf_nndct.utils import keras_utils
from tf_nndct.utils import tf_utils
from tf_nndct.quantization import RNNTFQConfig
from tensorflow.keras import activations
GLOBAL_MAP = GlobalMap()
def export_quant_config():
quantizer = GLOBAL_MAP.get_ele(NNDCT_KEYS.QUANTIZER)
if quantizer and quantizer.quant_mode == 1:
quantizer.export_quant_config() | null |
23,450 | from tensorflow.python.util import tf_inspect
from tf_nndct.utils import registry
from tf_nndct.layers import quantization
_quant_module_registry = registry.Registry('quant_op')
def get_quant_module(op_type, default=None):
if op_type not in _quant_module_registry:
return default
return _quant_module_registry.lookup(op_type) | null |
23,451 |
def fix_neuron(input, fp_tensor, bit_width, method=4):
#return kernels.nndct_fix_neuron_v2(input, valmax, valamp, method)
return kernels.nndct_fix_neuron(input, fp_tensor, bit_width, method) | null |
23,452 |
def diffs_fix_pos(input, bit_width=8, range=5, method=4):
return kernels.nndct_diff_s(input, bit_width, range, method) | null |
23,453 |
def stat_act_pos(fp_tensor, fp_stat_tensor):
return kernels.nndct_stat_act_pos(fp_tensor, fp_stat_tensor) | null |
23,454 |
def scaleop(input, scale):
return kernels.nndct_scale_op(input, float(scale)) | null |
23,455 |
def table_lookup(input, table, fragpos, type):
return kernels.nndct_table_lookup(input, table, fragpos, type) | null |
23,456 |
def simulation(input, fragpos, type):
return kernels.nndct_simulation(input, fragpos, type) | null |
23,457 | import os
import tensorflow as tf
from distutils.version import LooseVersion
from google.protobuf import text_format
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.framework import dtypes as tf_dtypes
from tensorflow.python.framework import tensor_util
from tf_nndct.graph import dtypes as nndct_dtypes
from tf_nndct.utils.convert_to_constants import convert_variables_to_constants_v2 as convert_to_constants
from tf_nndct.utils import generic_utils
def node_name_parts_from_input(input_name):
prefix = ''
node_name = ''
suffix = ''
if input_name.startswith('^'):
prefix = '^'
input_name = input_name[1:]
input_parts = input_name.split(':')
if len(input_parts) < 2:
suffix = ''
else:
suffix = ':' + input_parts[1]
node_name = input_parts[0]
return prefix, node_name, suffix
The provided code snippet includes necessary dependencies for implementing the `node_name_from_input` function. Write a Python function `def node_name_from_input(input_name)` to solve the following problem:
Strips off ports and other decorations to get the underlying node name.
Here is the function:
def node_name_from_input(input_name):
"""Strips off ports and other decorations to get the underlying node name."""
prefix, node_name, suffix = node_name_parts_from_input(input_name)
return node_name | Strips off ports and other decorations to get the underlying node name. |
23,458 | import os
import tensorflow as tf
from distutils.version import LooseVersion
from google.protobuf import text_format
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.framework import dtypes as tf_dtypes
from tensorflow.python.framework import tensor_util
from tf_nndct.graph import dtypes as nndct_dtypes
from tf_nndct.utils.convert_to_constants import convert_variables_to_constants_v2 as convert_to_constants
from tf_nndct.utils import generic_utils
def node_name_parts_from_input(input_name):
prefix = ''
node_name = ''
suffix = ''
if input_name.startswith('^'):
prefix = '^'
input_name = input_name[1:]
input_parts = input_name.split(':')
if len(input_parts) < 2:
suffix = ''
else:
suffix = ':' + input_parts[1]
node_name = input_parts[0]
return prefix, node_name, suffix
def canonical_output_name(input_name):
prefix, node_name, suffix = node_name_parts_from_input(input_name)
if not suffix:
suffix = ':0'
return ''.join([prefix, node_name, suffix]) | null |
23,459 | import os
import tensorflow as tf
from distutils.version import LooseVersion
from google.protobuf import text_format
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.framework import dtypes as tf_dtypes
from tensorflow.python.framework import tensor_util
from tf_nndct.graph import dtypes as nndct_dtypes
from tf_nndct.utils.convert_to_constants import convert_variables_to_constants_v2 as convert_to_constants
from tf_nndct.utils import generic_utils
def dtype_to_tf_string(dtype):
if type(dtype) == nndct_dtypes.DType:
tf_dtype = nndct_dtypes.to_tf(dtype)
elif type(dtype) == tf_dtypes.DType:
tf_dtype = dtype
return ".".join(["tf", tf_dtypes._TYPE_TO_STRING[tf_dtype]]) | null |
23,460 | import os
import tensorflow as tf
from distutils.version import LooseVersion
from google.protobuf import text_format
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.framework import dtypes as tf_dtypes
from tensorflow.python.framework import tensor_util
from tf_nndct.graph import dtypes as nndct_dtypes
from tf_nndct.utils.convert_to_constants import convert_variables_to_constants_v2 as convert_to_constants
from tf_nndct.utils import generic_utils
The provided code snippet includes necessary dependencies for implementing the `parse_tf_tensor` function. Write a Python function `def parse_tf_tensor(tensor)` to solve the following problem:
Parse data from given `tensor`.
Here is the function:
def parse_tf_tensor(tensor):
"""Parse data from given `tensor`."""
if not isinstance(tensor, tensor_pb2.TensorProto):
raise TypeError("TensorProto required, but given {}".format(type(tensor)))
return tensor_util.MakeNdarray(tensor) | Parse data from given `tensor`. |
23,461 | import os
import tensorflow as tf
from distutils.version import LooseVersion
from google.protobuf import text_format
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.framework import dtypes as tf_dtypes
from tensorflow.python.framework import tensor_util
from tf_nndct.graph import dtypes as nndct_dtypes
from tf_nndct.utils.convert_to_constants import convert_variables_to_constants_v2 as convert_to_constants
from tf_nndct.utils import generic_utils
def get_attr_proto_value(attr_value):
"""Returns the value of the attr of this buf with the given `name`.
Args:
attr_value: attrvalue protobuf.
Returns:
The value of the attr, as a Python object.
Raises:
ValueError: If this op does not have an attr with the given `name`.
"""
fields = ["s", "i", "f", "b", "type", "shape", "tensor", "func"]
x = attr_value
ret = []
# Treat an empty oneof value as an empty list.
if not x.WhichOneof("value"):
return ret
if x.HasField("list"):
for f in fields:
if getattr(x.list, f):
if f == "type":
ret += [tf_dtypes.as_dtype(x) for x in list(getattr(x.list, f))]
else:
ret += list(getattr(x.list, f))
else:
for f in fields:
if x.HasField(f):
if f == "type":
ret = tf_dtypes.as_dtype(getattr(x, f))
else:
ret = getattr(x, f)
return ret
The provided code snippet includes necessary dependencies for implementing the `parse_attr_proto` function. Write a Python function `def parse_attr_proto(attr_proto)` to solve the following problem:
Convert a list of AttributeProto to a dict, with names as keys.
Here is the function:
def parse_attr_proto(attr_proto):
"""Convert a list of AttributeProto to a dict, with names as keys."""
attrs = {}
for key, value in attr_proto.items():
attrs[key] = get_attr_proto_value(value)
return attrs | Convert a list of AttributeProto to a dict, with names as keys. |
23,462 | import os
import tensorflow as tf
from distutils.version import LooseVersion
from google.protobuf import text_format
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.framework import dtypes as tf_dtypes
from tensorflow.python.framework import tensor_util
from tf_nndct.graph import dtypes as nndct_dtypes
from tf_nndct.utils.convert_to_constants import convert_variables_to_constants_v2 as convert_to_constants
from tf_nndct.utils import generic_utils
def tf_tensor_shape(tensor):
shape = []
try:
shape = tensor.get_shape().as_list()
except Exception: # pylint: disable=broad-except
shape = None
return shape | null |
23,463 | import os
import tensorflow as tf
from distutils.version import LooseVersion
from google.protobuf import text_format
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.framework import dtypes as tf_dtypes
from tensorflow.python.framework import tensor_util
from tf_nndct.graph import dtypes as nndct_dtypes
from tf_nndct.utils.convert_to_constants import convert_variables_to_constants_v2 as convert_to_constants
from tf_nndct.utils import generic_utils
def write_proto(path, message, as_text=False):
dir_name = os.path.dirname(path)
generic_utils.mkdir_if_not_exist(dir_name)
if dir_name:
os.makedirs(dir_name, exist_ok=True)
if as_text:
with open(path, "w") as f:
f.write(text_format.MessageToString(message))
else:
with open(path, "wb") as f:
f.write(message.SerializeToString())
def write_text_proto(path, message):
write_proto(path, message, as_text=True) | null |
23,464 | import os
import tensorflow as tf
from distutils.version import LooseVersion
from google.protobuf import text_format
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.framework import dtypes as tf_dtypes
from tensorflow.python.framework import tensor_util
from tf_nndct.graph import dtypes as nndct_dtypes
from tf_nndct.utils.convert_to_constants import convert_variables_to_constants_v2 as convert_to_constants
from tf_nndct.utils import generic_utils
def write_proto(path, message, as_text=False):
dir_name = os.path.dirname(path)
generic_utils.mkdir_if_not_exist(dir_name)
if dir_name:
os.makedirs(dir_name, exist_ok=True)
if as_text:
with open(path, "w") as f:
f.write(text_format.MessageToString(message))
else:
with open(path, "wb") as f:
f.write(message.SerializeToString())
def write_binary_proto(path, message):
write_proto(path, message, as_text=False) | null |
23,465 | import os
import tensorflow as tf
from distutils.version import LooseVersion
from google.protobuf import text_format
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.framework import dtypes as tf_dtypes
from tensorflow.python.framework import tensor_util
from tf_nndct.graph import dtypes as nndct_dtypes
from tf_nndct.utils.convert_to_constants import convert_variables_to_constants_v2 as convert_to_constants
from tf_nndct.utils import generic_utils
def tf_version():
return tf.__version__
def is_tf_version_equal(version: str):
return tf_version() == LooseVersion(version) | null |
23,466 | import os
import tensorflow as tf
from distutils.version import LooseVersion
from google.protobuf import text_format
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.framework import dtypes as tf_dtypes
from tensorflow.python.framework import tensor_util
from tf_nndct.graph import dtypes as nndct_dtypes
from tf_nndct.utils.convert_to_constants import convert_variables_to_constants_v2 as convert_to_constants
from tf_nndct.utils import generic_utils
def tf_version():
def is_tf_version_greater_than(version: str):
return tf_version() > LooseVersion(version) | null |
23,467 | import os
import tensorflow as tf
from distutils.version import LooseVersion
from google.protobuf import text_format
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.framework import dtypes as tf_dtypes
from tensorflow.python.framework import tensor_util
from tf_nndct.graph import dtypes as nndct_dtypes
from tf_nndct.utils.convert_to_constants import convert_variables_to_constants_v2 as convert_to_constants
from tf_nndct.utils import generic_utils
def tf_version():
return tf.__version__
def is_tf_version_greater_equal(version: str):
return tf_version() >= LooseVersion(version) | null |
23,468 | import os
import tensorflow as tf
from distutils.version import LooseVersion
from google.protobuf import text_format
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.framework import dtypes as tf_dtypes
from tensorflow.python.framework import tensor_util
from tf_nndct.graph import dtypes as nndct_dtypes
from tf_nndct.utils.convert_to_constants import convert_variables_to_constants_v2 as convert_to_constants
from tf_nndct.utils import generic_utils
def tf_version():
return tf.__version__
def is_tf_version_less_than(version: str):
return tf_version() < LooseVersion(version) | null |
23,469 | import os
import tensorflow as tf
from distutils.version import LooseVersion
from google.protobuf import text_format
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.framework import dtypes as tf_dtypes
from tensorflow.python.framework import tensor_util
from tf_nndct.graph import dtypes as nndct_dtypes
from tf_nndct.utils.convert_to_constants import convert_variables_to_constants_v2 as convert_to_constants
from tf_nndct.utils import generic_utils
def tf_version():
return tf.__version__
def is_tf_version_less_equal(version: str):
return tf_version() <= LooseVersion(version) | null |
23,470 | import os
import tensorflow as tf
from distutils.version import LooseVersion
from google.protobuf import text_format
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.framework import dtypes as tf_dtypes
from tensorflow.python.framework import tensor_util
from tf_nndct.graph import dtypes as nndct_dtypes
from tf_nndct.utils.convert_to_constants import convert_variables_to_constants_v2 as convert_to_constants
from tf_nndct.utils import generic_utils
def is_tf_concat(op):
return op.type in ("Concat", "ConcatV2", "ConcatV3") | null |
23,471 | import os
import tensorflow as tf
from distutils.version import LooseVersion
from google.protobuf import text_format
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.framework import dtypes as tf_dtypes
from tensorflow.python.framework import tensor_util
from tf_nndct.graph import dtypes as nndct_dtypes
from tf_nndct.utils.convert_to_constants import convert_variables_to_constants_v2 as convert_to_constants
from tf_nndct.utils import generic_utils
def is_tf_const(op):
return op.type in ["Const", "ConstV2"] | null |
23,472 | import os
import tensorflow as tf
from distutils.version import LooseVersion
from google.protobuf import text_format
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.framework import dtypes as tf_dtypes
from tensorflow.python.framework import tensor_util
from tf_nndct.graph import dtypes as nndct_dtypes
from tf_nndct.utils.convert_to_constants import convert_variables_to_constants_v2 as convert_to_constants
from tf_nndct.utils import generic_utils
def is_tf_identity(op):
return op.type == "Identity" or op.type == "IdentityN" | null |
23,473 | import os
import tensorflow as tf
from distutils.version import LooseVersion
from google.protobuf import text_format
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.framework import dtypes as tf_dtypes
from tensorflow.python.framework import tensor_util
from tf_nndct.graph import dtypes as nndct_dtypes
from tf_nndct.utils.convert_to_constants import convert_variables_to_constants_v2 as convert_to_constants
from tf_nndct.utils import generic_utils
def is_tf_placeholder(op):
return op.type == "Placeholder" | null |
23,474 | import os
import tensorflow as tf
from distutils.version import LooseVersion
from google.protobuf import text_format
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.framework import dtypes as tf_dtypes
from tensorflow.python.framework import tensor_util
from tf_nndct.graph import dtypes as nndct_dtypes
from tf_nndct.utils.convert_to_constants import convert_variables_to_constants_v2 as convert_to_constants
from tf_nndct.utils import generic_utils
def is_tf_biasadd(op):
return op.type == "BiasAdd" | null |
23,475 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.core.framework import variable_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.ops import array_ops
from tensorflow.python.util import object_identity
from tensorflow.python.training.saver import export_meta_graph
from tf_nndct.graph import utils
_CONDITIONAL_OPS = set(["If", "StatelessIf"])
_LOOP_OPS = set(["While", "StatelessWhile"])
def _run_inline_graph_optimization(func, lower_control_flow):
"""Apply function inline optimization to the graph.
Returns the GraphDef after Grappler's function inlining optimization is
applied. This optimization does not work on models with control flow.
Args:
func: ConcreteFunction.
lower_control_flow: Boolean indicating whether or not to lower control flow
ops such as If and While. (default True)
Returns:
GraphDef
"""
graph_def = func.graph.as_graph_def()
if not lower_control_flow:
graph_def = disable_lower_using_switch_merge(graph_def)
# In some cases, a secondary implementation of the function (e.g. for GPU) is
# written to the "api_implements" attribute. (e.g. `tf.keras.layers.LSTM` in
# TF2 produces a CuDNN-based RNN for GPU).
# This function suppose to inline all functions calls, but "api_implements"
# prevents this from happening. Removing the attribute solves the problem.
# To learn more about "api_implements", see:
# tensorflow/core/grappler/optimizers/implementation_selector.h
for function in graph_def.library.function:
if "api_implements" in function.attr:
del function.attr["api_implements"]
meta_graph = export_meta_graph(graph_def=graph_def, graph=func.graph)
# Clear the initializer_name for the variables collections, since they are not
# needed after saved to saved_model.
for name in [
"variables", "model_variables", "trainable_variables", "local_variables"
]:
raw_list = []
for raw in meta_graph.collection_def["variables"].bytes_list.value:
variable = variable_pb2.VariableDef()
variable.ParseFromString(raw)
variable.ClearField("initializer_name")
raw_list.append(variable.SerializeToString())
meta_graph.collection_def[name].bytes_list.value[:] = raw_list
# Add a collection 'train_op' so that Grappler knows the outputs.
fetch_collection = meta_graph_pb2.CollectionDef()
for array in func.inputs + func.outputs:
fetch_collection.node_list.value.append(array.name)
meta_graph.collection_def["train_op"].CopyFrom(fetch_collection)
# Initialize RewriterConfig with everything disabled except function inlining.
config = config_pb2.ConfigProto()
rewrite_options = config.graph_options.rewrite_options
rewrite_options.min_graph_nodes = -1 # do not skip small graphs
rewrite_options.optimizers.append("function")
return tf_optimizer.OptimizeGraph(config, meta_graph)
def _get_tensor_name(name):
"""Returns the name of the input tensor.
Args:
name: str
Returns:
str
"""
return name.split(":")[0]
def _get_new_function_name(name):
"""Returns the function name with '_frozen' appended.
Args:
name: str
Returns:
str
"""
return name + "_frozen"
def _get_node_defs_list(graph_def):
"""Returns a list of NodeDefs in the GraphDef.
This list consists of all NodeDefs in the main graph as well as all control
flow NodeDefs in the functions.
The remaining NodeDefs in the functions are not included because the op names
are not unique and the variables are handled differently than the main graph.
The control flow ops need to be extracted because they are need their
attributes to be updated similar to the control flow ops in the main graph.
Args:
graph_def: GraphDef proto.
Returns:
[NodeDef]
"""
node_defs = list(graph_def.node)
if graph_def.library:
for func in graph_def.library.function:
node_defs.extend(
[node for node in func.node_def if node.op in _CONTROL_FLOW_OPS])
return node_defs
def _get_tensor_data(func):
"""Gets the tensor data for all Placeholders in the model.
Returns a dictionary that maps the tensor name to a dictionary containing:
data: numpy data
index: int index in func.graph.captures
is_variable: bool indicating whether the tensor is a variable or not
Args:
func: ConcreteFunction.
Returns:
Dict
"""
tensor_data = {}
map_index_to_variable = {}
for var in func.graph.variables:
for idx, captured_input in enumerate(func.captured_inputs):
if var.handle is captured_input: # pylint: disable=protected-access
map_index_to_variable[idx] = var
break
# Iterates through all captures which are represented as Placeholders.
for idx, (val_tensor, name_tensor) in enumerate(func.graph.captures):
tensor_name = _get_tensor_name(name_tensor.name)
is_variable = idx in map_index_to_variable
if is_variable:
data = map_index_to_variable[idx].numpy()
else:
data = val_tensor.numpy()
tensor_data[tensor_name] = {
"data": data,
"index": idx,
"is_variable": is_variable,
}
return tensor_data
def _get_control_flow_function_data(node_defs, tensor_data, name_to_node):
"""Gets the types and shapes for the parameters to the function.
Creates a map from function name to a list of types and a list of shapes that
correspond with the function arguments. The data is primarily determined from
the corresponding "If" or "While" op. If the argument is a resource variable,
then the type is determined from the type of the data contained within the
Tensor. The shape data is only determined in the case of the "While" op.
`is_also_output_type` is used to identify the "While" bodies that require the
output types to be updated at the same time the input types are updated.
Args:
node_defs: List of NodeDefs.
tensor_data: {str name : Tensor}.
name_to_node: Dictionary mapping node name to node object.
Returns:
{str function name : {"types" : [int representing DataType],
"shapes" : [[int] representing TensorShape]],
"is_also_output_type" : bool}
"""
func_data = {}
def get_source_node_name_through_identities(node_name):
# Trace the source node along with a chain of Identity nodes.
# For example, given Plaecholder -> Identity -> Identity -> node_name
# The function will return the name of the Placeholder.
while name_to_node[node_name].op == "Identity":
node_name = _get_tensor_name(name_to_node[node_name].input[0])
return node_name
def get_resource_type(node_name):
node_name = get_source_node_name_through_identities(node_name)
numpy_type = tensor_data[node_name]["data"].dtype
return dtypes.as_dtype(numpy_type).as_datatype_enum
def get_resource_shape(node_name):
node_name = get_source_node_name_through_identities(node_name)
return tensor_shape_pb2.TensorShapeProto(dim=[
tensor_shape_pb2.TensorShapeProto.Dim(size=dim)
for dim in tensor_data[node_name]["data"].shape
])
def add_value(func_name, arg_types, output_shapes, is_also_output_type):
func_data[func_name] = {
"types": arg_types,
"shapes": output_shapes,
"is_also_output_type": is_also_output_type
}
for node in node_defs:
if node.op in _CONDITIONAL_OPS:
arg_types = [dtype for dtype in node.attr["Tin"].list.type]
for idx in range(len(arg_types)):
if arg_types[idx] == dtypes.resource:
# Skip first index which represents the condition.
arg_types[idx] = get_resource_type(node.input[idx + 1])
add_value(node.attr["then_branch"].func.name, arg_types, None, False)
add_value(node.attr["else_branch"].func.name, arg_types, None, False)
elif node.op in _LOOP_OPS:
arg_types = [dtype for dtype in node.attr["T"].list.type]
output_shapes = [shape for shape in node.attr["output_shapes"].list.shape]
for idx in range(len(arg_types)):
if arg_types[idx] == dtypes.resource:
input_name = node.input[idx]
arg_types[idx] = get_resource_type(input_name)
output_shapes[idx] = get_resource_shape(input_name)
add_value(node.attr["body"].func.name, arg_types, output_shapes, True)
add_value(node.attr["cond"].func.name, arg_types, output_shapes, False)
return func_data
def _populate_const_op(output_node, node_name, dtype, data, data_shape):
"""Creates a Const op.
Args:
output_node: TensorFlow NodeDef.
node_name: str node name.
dtype: AttrValue with a populated .type field.
data: numpy data value.
data_shape: Tuple of integers containing data shape.
"""
output_node.op = "Const"
output_node.name = node_name
output_node.attr["dtype"].CopyFrom(dtype)
tensor = tensor_util.make_tensor_proto(
data, dtype=dtype.type, shape=data_shape)
output_node.attr["value"].tensor.CopyFrom(tensor)
def _populate_identity_op(output_node, input_node):
"""Creates an Identity op from a ReadVariable op.
Args:
output_node: TensorFlow NodeDef.
input_node: TensorFlow NodeDef.
"""
output_node.op = "Identity"
output_node.name = input_node.name
output_node.input.append(input_node.input[0])
output_node.attr["T"].CopyFrom(input_node.attr["dtype"])
if "_class" in input_node.attr:
output_node.attr["_class"].CopyFrom(input_node.attr["_class"])
def _populate_if_op(output_node, input_node, function_data):
"""Updates the type attributes and function names of If or StatelessIf.
Args:
output_node: TensorFlow NodeDef.
input_node: TensorFlow NodeDef.
function_data: Map of function names to the list of types and shapes that
correspond with the function arguments.
"""
output_node.CopyFrom(input_node)
then_func = input_node.attr["then_branch"].func.name
output_node.attr["then_branch"].func.name = _get_new_function_name(then_func)
output_node.attr["else_branch"].func.name = _get_new_function_name(
input_node.attr["else_branch"].func.name)
output_node.attr["Tin"].list.CopyFrom(
attr_value_pb2.AttrValue.ListValue(
type=function_data[then_func]["types"]))
def _populate_while_op(output_node, input_node, function_data):
"""Updates the type attributes and function names of While or StatelessWhile.
Args:
output_node: TensorFlow NodeDef.
input_node: TensorFlow NodeDef.
function_data: Map of function names to the list of types and shapes that
correspond with the function arguments.
"""
output_node.CopyFrom(input_node)
cond_func = input_node.attr["cond"].func.name
output_node.attr["cond"].func.name = _get_new_function_name(cond_func)
output_node.attr["body"].func.name = _get_new_function_name(
input_node.attr["body"].func.name)
output_node.attr["T"].list.CopyFrom(
attr_value_pb2.AttrValue.ListValue(
type=function_data[cond_func]["types"]))
output_node.attr["output_shapes"].list.CopyFrom(
attr_value_pb2.AttrValue.ListValue(
shape=function_data[cond_func]["shapes"]))
def _construct_concrete_function(func, output_graph_def,
converted_input_indices):
"""Constructs a concrete function from the `output_graph_def`.
Args:
func: ConcreteFunction
output_graph_def: GraphDef proto.
converted_input_indices: Set of integers of input indices that were
converted to constants.
Returns:
ConcreteFunction.
"""
# Create a ConcreteFunction from the new GraphDef.
input_tensors = func.graph.internal_captures
converted_inputs = object_identity.ObjectIdentitySet(
[input_tensors[index] for index in converted_input_indices])
not_converted_inputs = [
tensor for tensor in func.inputs if tensor not in converted_inputs
]
not_converted_inputs_map = {
tensor.name: tensor for tensor in not_converted_inputs
}
new_input_names = [tensor.name for tensor in not_converted_inputs]
new_output_names = [tensor.name for tensor in func.outputs]
new_func = wrap_function.function_from_graph_def(output_graph_def,
new_input_names,
new_output_names)
# Manually propagate shape for input tensors where the shape is not correctly
# propagated. Scalars shapes are lost when wrapping the function.
for input_tensor in new_func.inputs:
input_tensor.set_shape(not_converted_inputs_map[input_tensor.name].shape)
return new_func
The provided code snippet includes necessary dependencies for implementing the `convert_variables_to_constants_v2` function. Write a Python function `def convert_variables_to_constants_v2(func, lower_control_flow=True)` to solve the following problem:
Replaces all the variables in a graph with constants of the same values. TensorFlow 2.0 function for converting all Variable ops into Const ops holding the same values. This makes it possible to describe the network fully with a single GraphDef file, and allows the removal of a lot of ops related to loading and saving the variables. This function runs Grappler's function inlining optimization in order to return a single subgraph. The current implementation only works for graphs that do not contain any control flow or embedding related ops. Args: func: ConcreteFunction. lower_control_flow: Boolean indicating whether or not to lower control flow ops such as If and While. (default True) Returns: ConcreteFunction containing a simplified version of the original.
Here is the function:
def convert_variables_to_constants_v2(func, lower_control_flow=True):
"""Replaces all the variables in a graph with constants of the same values.
TensorFlow 2.0 function for converting all Variable ops into Const ops holding
the same values. This makes it possible to describe the network fully with a
single GraphDef file, and allows the removal of a lot of ops related to
loading and saving the variables. This function runs Grappler's function
inlining optimization in order to return a single subgraph.
The current implementation only works for graphs that do not contain any
control flow or embedding related ops.
Args:
func: ConcreteFunction.
lower_control_flow: Boolean indicating whether or not to lower control flow
ops such as If and While. (default True)
Returns:
ConcreteFunction containing a simplified version of the original.
"""
# Inline the graph in order to remove functions when possible.
graph_def = _run_inline_graph_optimization(func, lower_control_flow)
utils.maybe_export_graph("/tmp/0_inlined_graph.pb", graph_def)
# Gets list of all node defs include those in the library.
node_defs = _get_node_defs_list(graph_def)
# Get mapping from node name to node.
name_to_node = {_get_tensor_name(node.name): node for node in node_defs}
# Get mapping from node name to variable value.
tensor_data = _get_tensor_data(func)
# Get mapping from function name to argument types.
function_data = _get_control_flow_function_data(node_defs, tensor_data,
name_to_node)
# Get variable data for all nodes in `node_defs`.
reference_variables = {}
resource_identities = {}
placeholders = {}
converted_input_indices = set()
def _save_placeholder(node_name, dtype):
placeholders[node_name] = {
"dtype": dtype,
"data": tensor_data[node_name]["data"],
}
converted_input_indices.add(tensor_data[node_name]["index"])
for node in node_defs:
if node.op in _CONDITIONAL_OPS:
# Get dtype and data for resource Placeholders.
then_func = node.attr["then_branch"].func.name
arg_types = function_data[then_func]["types"]
for idx, input_tensor in enumerate(node.input[1:]):
input_name = _get_tensor_name(input_tensor)
if input_name in tensor_data:
dtype = attr_value_pb2.AttrValue(type=arg_types[idx])
_save_placeholder(_get_tensor_name(input_tensor), dtype)
elif node.op in _LOOP_OPS:
# Get dtype and data for resource Placeholders.
cond_func = node.attr["cond"].func.name
arg_types = function_data[cond_func]["types"]
for idx, input_tensor in enumerate(node.input):
input_name = _get_tensor_name(input_tensor)
if input_name in tensor_data:
dtype = attr_value_pb2.AttrValue(type=arg_types[idx])
_save_placeholder(_get_tensor_name(input_tensor), dtype)
elif (node.op == "Identity" and node.attr["T"].type == dtypes.resource and
name_to_node[_get_tensor_name(node.input[0])].op in _LOOP_OPS):
# Store the dtype for Identity resource ops that are outputs of While ops.
while_node = name_to_node[_get_tensor_name(node.input[0])]
body_func = while_node.attr["body"].func.name
input_data = node.input[0].split(":")
idx = 0 if len(input_data) == 1 else int(input_data[1])
dtype = attr_value_pb2.AttrValue(
type=function_data[body_func]["types"][idx])
resource_identities[node.name] = dtype
elif node.op == "VariableV2":
# Get data for VariableV2 ops (reference variables) that cannot be lifted.
with func.graph.as_default():
identity_node = array_ops.identity(
func.graph.as_graph_element(node.name + ":0"))
reference_variables[node.name] = (
func.prune([], [identity_node.name])()[0])
elif node.name in tensor_data and not tensor_data[node.name]["is_variable"]:
# Get dtype and data for non-variable Placeholders (ex. values for 1.X
# Const ops that are loaded as Placeholders in 2.0)
_save_placeholder(node.name, node.attr["dtype"])
elif node.op in ["ReadVariableOp", "ResourceGather", "AssignSubVariableOp"]:
# Get dtype and data for Placeholder ops associated with ReadVariableOp
# and ResourceGather ops. There can be an Identity in between the
# resource op and Placeholder. Store the dtype for the Identity ops.
input_name = _get_tensor_name(node.input[0])
while name_to_node[input_name].op == "Identity":
resource_identities[input_name] = node.attr["dtype"]
input_name = _get_tensor_name(name_to_node[input_name].input[0])
if name_to_node[input_name].op != "Placeholder":
raise ValueError("Cannot find the Placeholder op that is an input "
"to the ReadVariableOp.")
_save_placeholder(input_name, node.attr["dtype"])
# Reconstruct the graph with constants in place of variables.
output_graph_def = graph_pb2.GraphDef()
for input_node in graph_def.node:
output_node = output_graph_def.node.add()
# Convert VariableV2 ops to Const ops.
if input_node.name in reference_variables:
data = reference_variables[input_node.name]
dtype = attr_value_pb2.AttrValue(type=data.dtype.as_datatype_enum)
_populate_const_op(output_node, input_node.name, dtype, data.numpy(),
data.shape)
# Convert Placeholder ops to Const ops.
elif input_node.name in placeholders:
data = placeholders[input_node.name]["data"]
dtype = placeholders[input_node.name]["dtype"]
_populate_const_op(output_node, input_node.name, dtype, data, data.shape)
# Update the dtype for Identity ops that are inputs to ReadVariableOps.
elif input_node.name in resource_identities:
output_node.CopyFrom(input_node)
output_node.attr["T"].CopyFrom(resource_identities[input_node.name])
# Convert ReadVariableOps to Identity ops.
elif input_node.op == "ReadVariableOp":
_populate_identity_op(output_node, input_node)
# Convert ResourceGather to Gather ops with a Const axis feeding into it.
elif input_node.op == "AssignSubVariableOp":
output_node.op = "Sub"
output_node.name = input_node.name
output_node.input.extend(input_node.input)
output_node.attr["T"].CopyFrom(input_node.attr["dtype"])
if "_class" in input_node.attr:
output_node.attr["_class"].CopyFrom(input_node.attr["_class"])
elif input_node.op == "ResourceGather":
if input_node.attr["batch_dims"].i != 0:
raise ValueError("batch_dims != 0 is not supported by freeze_graph.")
output_axis_node = output_graph_def.node.add()
axis_node_name = input_node.name + "/axis"
axis_dtype = input_node.attr["Tindices"]
axis_data = np.array(input_node.attr["batch_dims"].i)
_populate_const_op(output_axis_node, axis_node_name, axis_dtype,
axis_data, axis_data.shape)
output_node.op = "GatherV2"
output_node.name = input_node.name
output_node.input.extend(
[input_node.input[0], input_node.input[1], axis_node_name])
output_node.attr["Tparams"].CopyFrom(input_node.attr["dtype"])
output_node.attr["Tindices"].CopyFrom(input_node.attr["Tindices"])
output_node.attr["Taxis"].CopyFrom(axis_dtype)
if "_class" in input_node.attr:
output_node.attr["_class"].CopyFrom(input_node.attr["_class"])
# Update the function names and argument types for the conditional ops.
elif input_node.op in _CONDITIONAL_OPS:
_populate_if_op(output_node, input_node, function_data)
elif input_node.op in _LOOP_OPS:
_populate_while_op(output_node, input_node, function_data)
else:
output_node.CopyFrom(input_node)
# Add functions to reconstructed graph.
if graph_def.library:
library = output_graph_def.library
for input_library_func in graph_def.library.function:
orig_func_name = input_library_func.signature.name
new_func_name = _get_new_function_name(orig_func_name)
# Do not copy any functions that aren't being used in the graph. Any
# functions that are not used by control flow should have been inlined.
if orig_func_name not in function_data:
continue
output_library_func = library.function.add()
for key, value in input_library_func.ret.items():
output_library_func.ret[key] = value
for key, value in input_library_func.control_ret.items():
output_library_func.control_ret[key] = value
# Update the input types in the function signature. Update the output
# types for functions that are while loop bodies.
output_library_func.signature.CopyFrom(input_library_func.signature)
output_library_func.signature.name = new_func_name
for dtype, arg in zip(function_data[orig_func_name]["types"],
output_library_func.signature.input_arg):
arg.type = dtype
if function_data[orig_func_name]["is_also_output_type"]:
for dtype, arg in zip(function_data[orig_func_name]["types"],
output_library_func.signature.output_arg):
arg.type = dtype
# Update the NodeDefs.
func_variables = {
node.name: node.input[0]
for node in input_library_func.node_def
if node.op in ["ReadVariableOp", "AssignSubVariableOp"]
}
for input_node in input_library_func.node_def:
output_node = output_library_func.node_def.add()
# Convert ReadVariableOps to Identity ops.
if input_node.op == "ReadVariableOp":
_populate_identity_op(output_node, input_node)
# Update the function names and argument types for the conditional ops.
elif input_node.op in _CONDITIONAL_OPS:
_populate_if_op(output_node, input_node, function_data)
elif input_node.op in _LOOP_OPS:
_populate_while_op(output_node, input_node, function_data)
else:
output_node.CopyFrom(input_node)
# Convert :value to :output for ops that use the ReadVariableOp.
for idx, full_name in enumerate(input_node.input):
input_name = _get_tensor_name(full_name)
if input_name in func_variables:
full_name_parts = full_name.split(":")
full_name_parts[1] = "output"
input_name = ":".join(full_name_parts)
output_node.input[idx] = input_name
output_graph_def.versions.CopyFrom(graph_def.versions)
return _construct_concrete_function(func, output_graph_def,
converted_input_indices) | Replaces all the variables in a graph with constants of the same values. TensorFlow 2.0 function for converting all Variable ops into Const ops holding the same values. This makes it possible to describe the network fully with a single GraphDef file, and allows the removal of a lot of ops related to loading and saving the variables. This function runs Grappler's function inlining optimization in order to return a single subgraph. The current implementation only works for graphs that do not contain any control flow or embedding related ops. Args: func: ConcreteFunction. lower_control_flow: Boolean indicating whether or not to lower control flow ops such as If and While. (default True) Returns: ConcreteFunction containing a simplified version of the original. |
23,476 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging as _logging
import os as _os
import sys as _sys
import time as _time
import traceback as _traceback
from logging import DEBUG
from logging import ERROR
from logging import FATAL
from logging import INFO
from logging import WARN
from logging import NOTSET
import threading
def _log_prefix(level, timestamp=None, file_and_line=None):
"""Generate a nndct logline prefix."""
# pylint: disable=global-variable-not-assigned
global _level_names
# pylint: enable=global-variable-not-assigned
# Record current time
now = timestamp or _time.time()
now_tuple = _time.localtime(now)
now_microsecond = int(1e6 * (now % 1.0))
(filename, line) = file_and_line or _get_file_and_line()
basename = _os.path.basename(filename)
# Severity string
severity = 'I'
if level in _level_names:
severity = _level_names[level][0]
# TODO(yuwang): Format by environment variable.
s = '%c%02d%02d %02d:%02d:%02d %s:%d]' % (
severity,
now_tuple[1], # month
now_tuple[2], # day
now_tuple[3], # hour
now_tuple[4], # min
now_tuple[5], # sec
basename,
line)
return s
def get_logger():
"""Return logger instance."""
global _logger
# Use double-checked locking to avoid taking lock unnecessarily.
if _logger:
return _logger
_logger_lock.acquire()
try:
if _logger:
return _logger
# Scope the TensorFlow logger to not conflict with users' loggers.
logger = _logging.getLogger('nndct')
logger.setLevel(1)
# Override findCaller on the logger to skip internal helper functions
logger.findCaller = _logger_find_caller
# Don't further configure the TensorFlow logger if the root logger is
# already configured. This prevents double logging in those cases.
if not _logging.getLogger().handlers:
# Determine whether we are in an interactive environment
_interactive = False
try:
# This is only defined in interactive shells.
if _sys.ps1:
_interactive = True
except AttributeError:
# Even now, we may be in an interactive shell with `python -i`.
_interactive = _sys.flags.interactive
# If we are in an interactive environment (like Jupyter), set loglevel
# to INFO and pipe the output to stdout.
if _interactive:
#logger.setLevel(INFO)
_logging_target = _sys.stdout
else:
_logging_target = _sys.stderr
# Add the output handler.
_handler = _logging.StreamHandler(_logging_target)
_handler.setFormatter(_logging.Formatter(_logging_fmt, None))
logger.addHandler(_handler)
_logger = logger
return _logger
finally:
_logger_lock.release()
def debug(msg, *args, **kwargs):
extra = {'nndct_prefix': _log_prefix(DEBUG)}
get_logger().debug(msg, extra=extra, *args, **kwargs) | null |
23,477 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging as _logging
import os as _os
import sys as _sys
import time as _time
import traceback as _traceback
from logging import DEBUG
from logging import ERROR
from logging import FATAL
from logging import INFO
from logging import WARN
from logging import NOTSET
import threading
def _log_prefix(level, timestamp=None, file_and_line=None):
def get_logger():
def warn(msg, *args, **kwargs):
extra = {'nndct_prefix': _log_prefix(WARN)}
get_logger().warning(msg, *args, extra=extra, **kwargs) | null |
23,478 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging as _logging
import os as _os
import sys as _sys
import time as _time
import traceback as _traceback
from logging import DEBUG
from logging import ERROR
from logging import FATAL
from logging import INFO
from logging import WARN
from logging import NOTSET
import threading
def _log_prefix(level, timestamp=None, file_and_line=None):
"""Generate a nndct logline prefix."""
# pylint: disable=global-variable-not-assigned
global _level_names
# pylint: enable=global-variable-not-assigned
# Record current time
now = timestamp or _time.time()
now_tuple = _time.localtime(now)
now_microsecond = int(1e6 * (now % 1.0))
(filename, line) = file_and_line or _get_file_and_line()
basename = _os.path.basename(filename)
# Severity string
severity = 'I'
if level in _level_names:
severity = _level_names[level][0]
# TODO(yuwang): Format by environment variable.
s = '%c%02d%02d %02d:%02d:%02d %s:%d]' % (
severity,
now_tuple[1], # month
now_tuple[2], # day
now_tuple[3], # hour
now_tuple[4], # min
now_tuple[5], # sec
basename,
line)
return s
def get_logger():
"""Return logger instance."""
global _logger
# Use double-checked locking to avoid taking lock unnecessarily.
if _logger:
return _logger
_logger_lock.acquire()
try:
if _logger:
return _logger
# Scope the TensorFlow logger to not conflict with users' loggers.
logger = _logging.getLogger('nndct')
logger.setLevel(1)
# Override findCaller on the logger to skip internal helper functions
logger.findCaller = _logger_find_caller
# Don't further configure the TensorFlow logger if the root logger is
# already configured. This prevents double logging in those cases.
if not _logging.getLogger().handlers:
# Determine whether we are in an interactive environment
_interactive = False
try:
# This is only defined in interactive shells.
if _sys.ps1:
_interactive = True
except AttributeError:
# Even now, we may be in an interactive shell with `python -i`.
_interactive = _sys.flags.interactive
# If we are in an interactive environment (like Jupyter), set loglevel
# to INFO and pipe the output to stdout.
if _interactive:
#logger.setLevel(INFO)
_logging_target = _sys.stdout
else:
_logging_target = _sys.stderr
# Add the output handler.
_handler = _logging.StreamHandler(_logging_target)
_handler.setFormatter(_logging.Formatter(_logging_fmt, None))
logger.addHandler(_handler)
_logger = logger
return _logger
finally:
_logger_lock.release()
def error(msg, *args, **kwargs):
extra = {'nndct_prefix': _log_prefix(ERROR)}
get_logger().error(msg, *args, extra=extra, **kwargs) | null |
23,482 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
from google.protobuf import text_format
The provided code snippet includes necessary dependencies for implementing the `to_list` function. Write a Python function `def to_list(x)` to solve the following problem:
Normalizes a list/tuple to a list. If a tensor is passed, we return a list of size 1 containing the tensor. Arguments: x: target object to be normalized. Returns: A list.
Here is the function:
def to_list(x):
"""Normalizes a list/tuple to a list.
If a tensor is passed, we return
a list of size 1 containing the tensor.
Arguments:
x: target object to be normalized.
Returns:
A list.
"""
if isinstance(x, (list, tuple)):
return list(x)
return [x] | Normalizes a list/tuple to a list. If a tensor is passed, we return a list of size 1 containing the tensor. Arguments: x: target object to be normalized. Returns: A list. |
23,483 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
from google.protobuf import text_format
def get_temp_directory():
return os.environ.get("VAI_TEMP_DIRECTORY", tempfile.mkdtemp()) | null |
23,484 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
from google.protobuf import text_format
def delete_directory(path):
if os.path.exists(path):
shutil.rmtree(path) | null |
23,485 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
from google.protobuf import text_format
def write_proto(path, message, as_text=False):
dir_name = os.path.dirname(path)
mkdir_if_not_exist(dir_name)
if dir_name:
os.makedirs(dir_name, exist_ok=True)
if as_text:
with open(path, "w") as f:
f.write(text_format.MessageToString(message))
else:
with open(path, "wb") as f:
f.write(message.SerializeToString())
def write_text_proto(path, message):
write_proto(path, message, as_text=True) | null |
23,486 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
from google.protobuf import text_format
def is_list_or_tuple(obj):
return isinstance(obj, (list, tuple)) | null |
23,487 | import collections
import tensorflow as tf
from typing import Any, Callable, Dict, List, Optional, Union
from tensorflow.keras import layers
from tensorflow.python.eager import def_function
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.util import nest
from tf_nndct.utils import logging
from tf_nndct.utils import tf_utils
from nndct_shared.utils import common
keras = tf.keras
def is_sequential_or_functional(model):
return isinstance(model, keras.Model) and (isinstance(model, keras.Sequential)
or model._is_graph_network)
def is_subclassing(model):
return isinstance(model,
keras.Model) and not is_sequential_or_functional(model) | null |
23,488 | import collections
import tensorflow as tf
from typing import Any, Callable, Dict, List, Optional, Union
from tensorflow.keras import layers
from tensorflow.python.eager import def_function
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.util import nest
from tf_nndct.utils import logging
from tf_nndct.utils import tf_utils
from nndct_shared.utils import common
def flatten_layers(layer, recursive=True, include_self=True):
if include_self:
yield layer
# Only instantiate set and deque if needed.
layers_or_containers = getattr(layer, '_layers', None)
if layers_or_containers:
seen_object_ids = set()
deque = collections.deque(layers_or_containers)
while deque:
layer_or_container = deque.popleft()
layer_or_container_id = id(layer_or_container)
if layer_or_container_id in seen_object_ids:
continue
seen_object_ids.add(layer_or_container_id)
if isinstance(layer_or_container, layers.Layer):
yield layer_or_container
# Introspect recursively through sublayers.
if recursive:
sublayers = getattr(layer_or_container, '_layers', None)
if sublayers:
deque.extendleft(reversed(sublayers))
elif isinstance(layer_or_container,
data_structures.TrackableDataStructure):
# Data structures are introspected even with `recursive=False`.
tracked_values = layer_or_container._values
if tracked_values:
deque.extendleft(reversed(tracked_values)) | null |
23,489 | import collections
import tensorflow as tf
from typing import Any, Callable, Dict, List, Optional, Union
from tensorflow.keras import layers
from tensorflow.python.eager import def_function
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.util import nest
from tf_nndct.utils import logging
from tf_nndct.utils import tf_utils
from nndct_shared.utils import common
def unnest_if_single_tensor(input_tensors):
# Preserve compatibility with older configs
flat_input_tensors = nest.flatten(input_tensors)
# If this is a single element but not a dict, unwrap. If this is a dict,
# assume the first layer expects a dict (as is the case with a
# DenseFeatures layer); pass through.
if not isinstance(input_tensors, dict) and len(flat_input_tensors) == 1:
input_tensors = flat_input_tensors[0]
return input_tensors | null |
23,490 | import collections
import tensorflow as tf
from typing import Any, Callable, Dict, List, Optional, Union
from tensorflow.keras import layers
from tensorflow.python.eager import def_function
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.util import nest
from tf_nndct.utils import logging
from tf_nndct.utils import tf_utils
from nndct_shared.utils import common
keras = tf.keras
The provided code snippet includes necessary dependencies for implementing the `get_actual_inbound_layers` function. Write a Python function `def get_actual_inbound_layers( model: Union[tf.Module, tf.keras.Model]) -> Dict[str, List[str]]` to solve the following problem:
Get inbound layers for nested Functional models. The inbound_layer of a submodel is another Functional model, this function find the output layers of that model and treats them as the actual inbound layers.
Here is the function:
def get_actual_inbound_layers(
model: Union[tf.Module, tf.keras.Model]) -> Dict[str, List[str]]:
'''Get inbound layers for nested Functional models.
The inbound_layer of a submodel is another Functional model, this function
find the output layers of that model and treats them as the actual
inbound layers.
'''
# See https://github.com/keras-team/keras/blob/v2.8.0/keras/utils/layer_utils.py#L294
relevant_nodes = []
for v in model._nodes_by_depth.values():
relevant_nodes += v
inbound_layers = {}
for layer in model.layers:
if hasattr(layer, 'layers') and layer.layers:
inbound_layers[layer.name] = []
for node in layer._inbound_nodes:
if relevant_nodes and node not in relevant_nodes:
# node is not part of the current network
continue
for inbound_layer, _, tensor_index, __ in node.iterate_inbound():
if isinstance(inbound_layer, keras.layers.InputLayer):
inbound_layers[layer.name].append(inbound_layer.name)
else:
# inbound_layer is a functional model, we need to find out
# the actual output layer.
output_layer = inbound_layer._output_layers[tensor_index]
inbound_layers[layer.name].append(output_layer.name)
inbound_layers.update(get_actual_inbound_layers(layer))
return inbound_layers | Get inbound layers for nested Functional models. The inbound_layer of a submodel is another Functional model, this function find the output layers of that model and treats them as the actual inbound layers. |
23,491 | import collections
import tensorflow as tf
from typing import Any, Callable, Dict, List, Optional, Union
from tensorflow.keras import layers
from tensorflow.python.eager import def_function
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.util import nest
from tf_nndct.utils import logging
from tf_nndct.utils import tf_utils
from nndct_shared.utils import common
keras = tf.keras
def try_count_params(model: Union[tf.Module, tf.keras.Model],
trainable_only: bool = False):
"""Count the number of parameters if model is possible.
Args:
model: Try to count the number of params in this model.
trainable_only: Whether to calculate trainable params only. This flag is
not used when the model has `count_params` attribute.
Returns:
The number of parameters or None.
"""
if hasattr(model, 'count_params'):
try:
return model.count_params()
except ValueError:
logging.info('Number of trainable params unknown, because the build() '
'methods in keras layers were not called. This is probably '
'because the model was not feed any input, e.g., the max '
'train step already reached before this run.')
return None
else:
total_params = 0
variables = model.trainable_variables if trainable_only else model.variables
for var in variables:
shape = tf.shape(var)
total_params += tf.math.reduce_prod(shape).numpy()
return total_params
def try_count_flops(model: Union[tf.Module, tf.keras.Model],
inputs_kwargs: Optional[Dict[str, Any]] = None):
"""Counts and returns model FLOPs.
Args:
model: A model instance.
inputs_kwargs: An optional dictionary of argument pairs specifying inputs'
shape specifications to getting corresponding concrete function.
Returns:
The model's FLOPs.
"""
if hasattr(model, 'inputs'):
try:
# Get input shape and set batch size to 1.
if model.inputs:
inputs = [
tf.TensorSpec([1] + input.shape[1:], input.dtype)
for input in model.inputs
]
concrete_func = tf.function(model).get_concrete_function(inputs)
# If model.inputs is invalid, try to use the input to get concrete
# function for model.call (subclass model).
else:
concrete_func = tf.function(
model.call).get_concrete_function(**inputs_kwargs)
if _is_tf_later_than_220:
frozen_func, _ = convert_variables_to_constants_v2_as_graph(
concrete_func)
else:
frozen_func = tf_utils.convert_to_constants(
concrete_func, lower_control_flow=False)
# Calculate FLOPs.
run_meta = tf.compat.v1.RunMetadata()
opts = tf.compat.v1.profiler.ProfileOptionBuilder.float_operation()
flops = tf.compat.v1.profiler.profile(
graph=frozen_func.graph, run_meta=run_meta, cmd='op', options=opts)
return flops.total_float_ops
except Exception as e: # pylint: disable=broad-except
logging.info(
'Failed to count model FLOPs with error %s, because the build() '
'methods in keras layers were not called. This is probably because '
'the model was not feed any input, e.g., the max train step already '
'reached before this run.', e)
return None
return None
The provided code snippet includes necessary dependencies for implementing the `model_complexity` function. Write a Python function `def model_complexity(model: Union[tf.Module, tf.keras.Model], inputs_kwargs: Optional[Dict[str, Any]] = None, return_flops=True, readable=False)` to solve the following problem:
Stat the complexity of the given model. Currently includes macs and params. MACs: multiply–accumulate operations that performs a += b x c Params: total number of parameters of a model. Args: model: A model instance. inputs_kwargs: An optional dictionary of argument pairs specifying inputs' shape specifications to getting corresponding concrete function. readable: Whether to return readable numbers. Returns: Statistically obtained macs and params by given inputs.
Here is the function:
def model_complexity(model: Union[tf.Module, tf.keras.Model],
inputs_kwargs: Optional[Dict[str, Any]] = None,
return_flops=True,
readable=False):
"""Stat the complexity of the given model. Currently includes macs and params.
MACs: multiply–accumulate operations that performs a += b x c
Params: total number of parameters of a model.
Args:
model: A model instance.
inputs_kwargs: An optional dictionary of argument pairs specifying inputs'
shape specifications to getting corresponding concrete function.
readable: Whether to return readable numbers.
Returns:
Statistically obtained macs and params by given inputs.
"""
total_flops = try_count_flops(model)
total_params = try_count_params(model)
if not return_flops:
total_flops = total_flops // 2
if readable:
total_flops = common.readable_num(total_flops)
total_params = common.readable_num(total_params)
return total_flops, total_params | Stat the complexity of the given model. Currently includes macs and params. MACs: multiply–accumulate operations that performs a += b x c Params: total number of parameters of a model. Args: model: A model instance. inputs_kwargs: An optional dictionary of argument pairs specifying inputs' shape specifications to getting corresponding concrete function. readable: Whether to return readable numbers. Returns: Statistically obtained macs and params by given inputs. |
23,492 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import graph_pb2
from tensorflow.python.platform import gfile
def export_to_graphviz(graph):
pass | null |
23,493 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from nndct_shared.utils.tensor_util import convert_parameter_tensor_format
from nndct_shared.utils.tensor_util import DataFormatMap
from nndct_shared.pruning import pruning_lib
from nndct_shared.base import FrameworkType
from tf_nndct.graph.ops import Tensor
from tf_nndct.utils import keras_utils
def tf_param_to_nndct(tensor):
return convert_parameter_tensor_format(tensor, FrameworkType.TENSORFLOW,
FrameworkType.NNDCT)
class Tensor(base_tensor.Tensor):
def __init__(self,
name=None,
shape=None,
dtype=None,
data=None,
producer=None):
super(Tensor, self).__init__(name, shape, dtype, data=data, node=producer)
self._producer = producer
def from_numpy(cls, name, data):
tensor = cls(name)
tensor.from_ndarray(data)
return tensor
def is_produced_by(self, node):
if not self._producer:
return False
return self._producer.name == node.name
def transpose(self, axes):
if self._data is None:
shape = [self._shape[i] for i in axes]
else:
data = self._data.transpose(axes)
data = np.ascontiguousarray(data)
shape = data.shape
self._data = data
self._shape = shape
return self
def clone(self):
tensor = self.__class__(self.name)
tensor.clone_from(self)
return tensor
def producer(self):
return self._producer
def param_from_tf_numpy(name, ndarray):
tensor = Tensor.from_numpy(name, ndarray)
return tf_param_to_nndct(tensor) | null |
23,494 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from nndct_shared.utils.tensor_util import convert_parameter_tensor_format
from nndct_shared.utils.tensor_util import DataFormatMap
from nndct_shared.pruning import pruning_lib
from nndct_shared.base import FrameworkType
from tf_nndct.graph.ops import Tensor
from tf_nndct.utils import keras_utils
def tf_blob_format(ndim):
blob_format = {
'channels_first': {
2: 'NH',
3: 'NCL',
4: 'NCHW',
5: 'NCDHW'
},
'channels_last': {
2: 'NH',
3: 'NLC',
4: 'NHWC',
5: 'NDHWC'
}
}
return blob_format[keras_utils.data_format()][ndim]
def transpose(tensor, src_layout, dst_layout):
if not isinstance(tensor, Tensor):
raise TypeError("'tensor' must be Tensor, but given {}".format(
type(tensor)))
return _transpose(tensor, src_layout, dst_layout)
class DataFormatMap(object):
"""A dict mapping of framework and op type to its data format.
"""
_blob_format_map = {
FrameworkType.NNDCT: {
2: "NH",
3: "NLC",
4: "NHWC",
5: "NHWDC"
},
FrameworkType.TORCH: {
2: "NH",
3: "NCL",
4: "NCHW",
5: "NCDHW"
},
# TF format generated in runtime.
}
_parameter_format_map = {
FrameworkType.NNDCT: {
2: "OI",
3: "OLI",
4: "OHWI",
5: "OHWDI"
},
FrameworkType.TORCH: {
2: "OI",
3: "OIL",
4: "OIHW",
5: "OIDHW"
},
FrameworkType.TENSORFLOW: {
2: "IO",
3: "LIO",
4: "HWIO",
5: "DHWIO",
}
}
def blob_format(cls, framework_type, ndim):
if framework_type not in cls._blob_format_map:
raise KeyError(
"Framework type '{}' not supported now.".format(framework_type))
return cls._blob_format_map[framework_type][ndim]
def param_format(cls, framework_type, ndim):
if framework_type not in cls._parameter_format_map:
raise KeyError(
"Framework type '{}' not supported now.".format(framework_type))
return cls._parameter_format_map[framework_type][ndim]
def tf_blob_to_nndct(tensor):
return transpose(tensor, tf_blob_format(tensor.ndim),
DataFormatMap.blob_format(FrameworkType.NNDCT, tensor.ndim)) | null |
23,495 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from nndct_shared.utils.tensor_util import convert_parameter_tensor_format
from nndct_shared.utils.tensor_util import DataFormatMap
from nndct_shared.pruning import pruning_lib
from nndct_shared.base import FrameworkType
from tf_nndct.graph.ops import Tensor
from tf_nndct.utils import keras_utils
def transposeconv_weight_dim_trans(weight: np.ndarray):
# the default weight in convlayer is HWIO
# the weight saved in transpoed conv is HWOI
# in order to set_weight for tf.transposeconv layer
# we need to convert the last two dim
if len(weight.shape) < 2:
return weight
index_list = [x for x in range(len(weight.shape))]
index_list[-1] -= 1
index_list[-2] += 1
return weight.transpose(index_list)
def param_to_tf_numpy(tensor):
t = Tensor.from_numpy(tensor.name, np.copy(tensor.data))
nndct_param_to_tf(t)
return t.data
])
The provided code snippet includes necessary dependencies for implementing the `layer_weights_from_node` function. Write a Python function `def layer_weights_from_node(node)` to solve the following problem:
convert the nndct tensor format -> tf format(channal last) more in nndct_shared/utils/tensor_util.py
Here is the function:
def layer_weights_from_node(node):
'''
convert the nndct tensor format -> tf format(channal last)
more in nndct_shared/utils/tensor_util.py
'''
weights = []
for tensor in node.op.params.values():
# In tf weight in transpose conv2D/3D diff from conv2D/3D
# transpose(2D/3D): kernel_shape = kernel_size + (self.filters, input_dim)
# Conv2D kernel_shape = kernel_size + (input_dim, self.filters)
if pruning_lib.is_transpose_conv(node.op):
# get the weight for tf transpose conv layer we need transpoe the O <-> I
weights.append(transposeconv_weight_dim_trans(param_to_tf_numpy(tensor)))
else:
# for normal conv/linear
weights.append(param_to_tf_numpy(tensor))
return weights | convert the nndct tensor format -> tf format(channal last) more in nndct_shared/utils/tensor_util.py |
23,496 | import argparse
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tf_nndct import IterativePruningRunner
num_classes = 10
input_shape = (28, 28, 1)
def build_model(pretrained=None):
# Implementation from https://github.com/keras-team/keras-io/blob/master/examples/vision/mnist_convnet.py
model = keras.Sequential([
keras.Input(shape=input_shape),
layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(), layers.Dropout(0.5),
layers.Dense(num_classes, activation="softmax"),
])
if pretrained:
model.load_weights(pretrained)
return model | null |
23,497 | import argparse
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tf_nndct import IterativePruningRunner
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data('mnist.npz')
x_train = x_train.astype("float32") / 255
x_test = x_test.astype("float32") / 255
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
def evaluate(model):
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
score = model.evaluate(x_test, y_test, verbose=0)
print("Test accuracy:", score[1])
return score[1]
def train(model, save_path, epochs=10):
batch_size = 128
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
validation_split=0.1)
model.evaluate(x_test, y_test, verbose=1)
model.save_weights(save_path, save_format='tf') | null |
23,498 | import argparse
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tf_nndct import IterativePruningRunner
input_shape = (28, 28, 1)
def evaluate(model):
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
score = model.evaluate(x_test, y_test, verbose=0)
print("Test accuracy:", score[1])
return score[1]
def prune(model, ratio):
input_spec = tf.TensorSpec((1, *input_shape), tf.float32)
runner = IterativePruningRunner(model, input_spec)
runner.ana(evaluate)
return runner.prune(ratio) | null |
23,499 | import argparse
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tf_nndct import IterativePruningRunner
input_shape = (28, 28, 1)
def transform(model):
input_spec = tf.TensorSpec((1, *input_shape), tf.float32)
runner = IterativePruningRunner(model, input_spec)
return runner.get_slim_model() | null |
23,500 | import argparse
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tf_nndct import IterativePruningRunner
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"-t",
"--train",
action="store_true",
help="If true, train a baseline model."
)
parser.add_argument(
"-p",
"--prune",
type=float,
default=None,
help="Pruning ratio",
)
parser.add_argument(
"-tf",
"--transform",
action="store_true",
help="Transforms a sparse model to a slim model."
)
parser.add_argument(
"-sp",
"--save_path",
help="Path to save trained weights"
)
parser.add_argument(
"-pr",
"--pretrained",
help="Pretrained weights loaded to model."
)
return parser.parse_args() | null |
23,501 | from tf1_nndct.optimization.pruning import IterativePruningRunner
import tensorflow as tf
import numpy as np
from nets.resnet_v2 import resnet_v2_50
def eval_fn(frozen_graph_def: tf.compat.v1.GraphDef) -> float:
with tf.compat.v1.Session().as_default() as sess:
return 0.5 | null |
23,502 | from tf1_nndct.optimization.pruning import IterativePruningRunner
import tensorflow as tf
import numpy as np
from nets.alexnet import alexnet_v2
def eval_fn(frozen_graph_def: tf.compat.v1.GraphDef) -> float:
with tf.compat.v1.Session().as_default() as sess:
return 0.5 | null |
23,503 | from tf1_nndct.optimization.pruning import IterativePruningRunner
import tensorflow as tf
from tensorflow.keras import layers
import numpy as np
def mnist_convnet():
num_classes = 10
input_shape = (28, 28, 1)
model = keras.Sequential([
layers.InputLayer(input_shape=input_shape),
layers.Conv2D(16, kernel_size=(3, 3), activation="relu"),
layers.BatchNormalization(),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(num_classes),
])
return model, input_shape
def eval_fn(frozen_graph_def: tf.compat.v1.GraphDef) -> float:
with tf.compat.v1.Session().as_default() as sess:
tf.import_graph_def(frozen_graph_def, name="")
inp = np.random.rand(1, 28, 28, 1)
a = sess.run(sess.graph.get_tensor_by_name('dense/BiasAdd:0'), feed_dict={'input_1:0': inp})
print(a)
print("in eval_fn done")
return 0.5
class IterativePruningRunner(object):
def __init__(
self, model_name: str, sess: SessionInterface,
input_specs: Mapping[str, tf.TensorSpec],
output_node_names: List[str], excludes: List[str]=[]) -> None:
self._model_name = model_name
self._sess = sess
self._weights = {
var.name.split(":")[0] : sess.run(var) for var in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)}
self._output_node_names = output_node_names
self._graph_def = tf.compat.v1.graph_util.extract_sub_graph(
sess.graph_def, output_node_names)
self._input_specs = input_specs
self._excludes = excludes
self._sens_analyzer = None
def _fill_in_weights(self, weights: Mapping[str, np.ndarray]) -> None:
for var in tf.get_collection('variables'):
var_name = var.name.split(":")[0]
if var_name in weights:
self._sess.run(tf.assign(var, weights[var_name]))
def ana(
self, eval_fn: Callable[[tf.compat.v1.GraphDef], float],
gpu_ids: List[str]=['/GPU:0'], checkpoint_interval: int = 10) -> None:
sens_path = os.path.join(BASE_DIR, self._model_name + '.sens')
self._sens_analyzer = SensAnalyzer()
input_queue = ctx.Queue(maxsize=len(gpu_ids))
output_queue = ctx.Queue(maxsize=len(gpu_ids))
if os.path.exists(sens_path):
self._sens_analyzer.load(sens_path)
else:
groups = group_conv_nodes(self._graph_def, self._excludes)
input_queue = ctx.Queue(maxsize=len(gpu_ids))
output_queue = ctx.Queue(maxsize=len(gpu_ids))
p = _submit_to_subprocess(
0, self._sess, self._output_node_names, eval_fn, input_queue, output_queue, gpu_id=gpu_ids[0])
ret = output_queue.get()
assert not isinstance(ret, Exception), "Error occurred during call eval_fn"
base_score = ret[1]
p.join()
for g in groups:
group_sens = GroupSensitivity(g, [Sensitivity(i * 0.1) for i in range(10)])
group_sens.sens[0].val = base_score
self._sens_analyzer.add_group_sens(group_sens)
unfinished_specs = self._sens_analyzer.unfinished_specs()
processes: List[mp.Process] = [None for _ in unfinished_specs]
for _ in gpu_ids:
output_queue.put(None)
num_unfinished_tasks = 0
num_finished_tasks = 0
for idx, (sens, spec) in enumerate(unfinished_specs):
ret = output_queue.get()
assert not isinstance(ret, Exception), "Error occurred during call eval_fn"
if ret is not None:
num_unfinished_tasks -= 1
num_finished_tasks += 1
if num_finished_tasks % checkpoint_interval == 0:
self._sens_analyzer.save(sens_path)
i, score = ret
unfinished_specs[i][0].val = score
if processes[i] is not None:
processes[i].join()
_, weights, _ = self._prune(spec)
processes[idx] = _submit_to_subprocess(
idx, self._sess, self._output_node_names, eval_fn,
input_queue, output_queue, weights, gpu_id=gpu_ids[idx % len(gpu_ids)])
num_unfinished_tasks += 1
for _ in range(num_unfinished_tasks):
ret = output_queue.get()
assert not isinstance(ret, Exception), "Error occurred during call eval_fn"
if ret is not None:
idx, score = ret
unfinished_specs[idx][0].val = score
if processes[idx] is not None:
processes[idx].join()
self._sens_analyzer.save(sens_path)
def _get_spec_by_sparsity(self, sparsity: float, max_attemp: int) -> PruningSpec:
idx = 0
flops_tolerance = 1e-2
min_th = 1e-5
max_th = 1 - min_th
base_flops = calculate_flops(self.get_slim_graph_def(), self._input_specs)
expected_flops = (1 - sparsity) * base_flops
prev_spec = None
cur_spec = None
while idx < max_attemp:
idx += 1
threshold = (min_th + max_th) / 2
cur_spec = self._sens_analyzer.generate_spec_by_threshold(threshold)
if prev_spec and prev_spec == cur_spec:
continue
shape_tensors, _, masks = self._prune(cur_spec)
current_flops = calculate_flops(self.get_slim_graph_def(shape_tensors, masks), self._input_specs)
error = abs(base_flops - expected_flops) / base_flops
if error < flops_tolerance:
break
if current_flops < expected_flops:
max_th = threshold
else:
min_th = threshold
prev_spec = cur_spec
return cur_spec
def prune(self, sparsity: float=None, threshold: float=None, max_attemp: int=10) -> Tuple[Mapping[str, TensorProto], Mapping[str, np.ndarray]]:
assert sparsity is not None or threshold is not None
if sparsity is not None:
shape_tensors, weights, masks = self._prune(self._get_spec_by_sparsity(sparsity, max_attemp))
else:
shape_tensors, weights, masks = self._prune(self._sens_analyzer.generate_spec_by_threshold(threshold))
self._fill_in_weights(weights)
MASKS.update(masks)
return shape_tensors, masks
def _prune(self, spec: PruningSpec) -> Tuple[Mapping[str, TensorProto], Mapping[str, np.ndarray], Mapping[str, np.ndarray]]:
graph_def = deepcopy(self._graph_def)
weights = deepcopy(self._weights)
node_def_map = {n.name: n for n in graph_def.node}
node_pruning_descs = {n.name: PruningDesc() for n in graph_def.node}
for group_spec in spec.group_specs:
weight_node = find_weight_nodes(node_def_map[group_spec.nodes[0]], node_def_map)[0]
out_depth = weight_node.attr['shape'].shape.dim[3].size
remain_out_depth = int(out_depth * (1 - group_spec.sparsity))
remain_out_depth = math.ceil(remain_out_depth / spec.channel_divisible) * spec.channel_divisible
remain_out_depth = min(max(remain_out_depth, 2), out_depth)
node_pruning_descs[group_spec.nodes[0]].out_depth = remain_out_depth
node_pruning_descs[group_spec.nodes[0]].removed_outputs = self._get_channel_indices_to_remove(
weights[weight_node.name], -1, remain_out_depth)
for i in range(1, len(group_spec.nodes)):
node_pruning_descs[group_spec.nodes[i]].out_depth = node_pruning_descs[group_spec.nodes[0]].out_depth
node_pruning_descs[group_spec.nodes[i]].removed_outputs.extend(
node_pruning_descs[group_spec.nodes[0]].removed_outputs)
self._shape_inference(graph_def, node_def_map, node_pruning_descs)
shape_tensors = self._update_shape_tensor(graph_def, node_def_map, node_pruning_descs)
masks = self._update_weights(graph_def, node_def_map, node_pruning_descs, weights)
return shape_tensors, weights, masks
def _shape_inference(self, graph_def: tf.compat.v1.GraphDef, node_def_map: Mapping[str, tf.compat.v1.GraphDef], node_pruning_descs: Mapping[str, PruningDesc]) -> None:
for node_def in topo_sort(graph_def):
if len(node_def.input) == 0:
continue
pruning_desc = node_pruning_descs[node_def.name]
if is_matmul(node_def):
input_pruning_desc = node_pruning_descs[get_input_node_name(node_def.input[0])]
if len(input_pruning_desc.removed_outputs) == 0:
continue
weight_node = find_weight_nodes(node_def, node_def_map)[0]
input_node_out_depth = (input_pruning_desc.out_depth + len(input_pruning_desc.removed_outputs))
pruning_desc.removed_inputs = []
offset = 0
while offset < weight_node.attr['shape'].shape.dim[0].size:
pruning_desc.removed_inputs.extend([c + offset for c in input_pruning_desc.removed_outputs])
offset += input_node_out_depth
elif is_conv(node_def):
input_pruning_desc = node_pruning_descs[get_input_node_name(node_def.input[0])]
if len(input_pruning_desc.removed_outputs) == 0:
continue
pruning_desc.removed_inputs.extend(input_pruning_desc.removed_outputs)
elif is_depthwise_conv(node_def):
input_pruning_desc = node_pruning_descs[get_input_node_name(node_def.input[0])]
if len(input_pruning_desc.removed_outputs) == 0:
continue
weight_node = find_weight_nodes(node_def, node_def_map)[0]
group_size = weight_node.attr['shape'].shape.dim[-1].size
pruning_desc.removed_inputs = [i for i in input_pruning_desc.removed_outputs]
pruning_desc.removed_outputs = []
for i in input_pruning_desc.removed_outputs:
pruning_desc.removed_outputs.extend(list(range(i * group_size, (i + 1) * group_size)))
pruning_desc.out_depth = input_pruning_desc.out_depth * group_size
elif is_concat(node_def):
pruning_desc.removed_outputs = []
pruning_desc.out_depth = 0
offset = 0
for inpt in node_def.input:
input_pruning_desc = node_pruning_descs[get_input_node_name(inpt)]
pruning_desc.out_depth += input_pruning_desc.out_depth
pruning_desc.removed_outputs.extend([c + offset for c in input_pruning_desc.removed_outputs])
offset += (input_pruning_desc.out_depth + len(input_pruning_desc.removed_outputs))
else:
input_pruning_desc = node_pruning_descs[get_input_node_name(node_def.input[0])]
if len(input_pruning_desc.removed_outputs) == 0:
continue
pruning_desc.removed_inputs = [i for i in input_pruning_desc.removed_outputs]
pruning_desc.removed_outputs = [i for i in pruning_desc.removed_inputs]
pruning_desc.out_depth = input_pruning_desc.out_depth
def _update_shape_tensor(
self, graph_def: tf.compat.v1.GraphDef, node_def_map: Mapping[str, tf.compat.v1.GraphDef],
node_pruning_descs: Mapping[str, PruningDesc]) -> Mapping[str, TensorProto]:
ret = {}
for node_def in graph_def.node:
if node_def.name not in node_pruning_descs:
continue
pruning_desc = node_pruning_descs[node_def.name]
if node_def.op == OpType.Mul and node_def.name.endswith("dropout/mul_1"):
for const_node in find_ancestor_target_nodes(node_def_map, node_def.input[1], [OpType.Const], [], True):
if const_node.name.endswith("dropout/Shape"):
shape_tensor = tf.make_ndarray(const_node.attr['value'].tensor)
shape_tensor[-1] -= len(pruning_desc.removed_outputs)
ret[const_node.name] = tf.make_tensor_proto(shape_tensor)
return ret
def _update_weights(
self, graph_def: tf.compat.v1.GraphDef, node_def_map: Mapping[str, tf.compat.v1.GraphDef],
node_pruning_descs: Mapping[str, PruningDesc], weights: Mapping[str, np.ndarray]) -> Mapping[str, np.ndarray]:
masks = {}
for node_def in graph_def.node:
if not is_weighted_node(node_def) or node_def.name not in node_pruning_descs:
continue
pruning_desc = node_pruning_descs[node_def.name]
if len(pruning_desc.removed_inputs) == 0 and len(pruning_desc.removed_outputs) == 0:
continue
processed_weights = set()
for weight_node in find_weight_nodes(node_def, node_def_map):
if weight_node.name in processed_weights:
continue
if weight_node.name in weights:
weight_array = weights[weight_node.name]
mask_array = np.ones_like(weight_array)
elif weight_node.op == OpType.Const:
weight_array = tf.make_ndarray(weight_node.attr['value'].tensor)
if weight_array.size == 0:
continue
mask_array = np.ones_like(weight_array)
else:
continue
masks[weight_node.name] = mask_array
dim_size = len(weight_array.shape)
if dim_size == 1:
for idx in pruning_desc.removed_outputs:
weight_array[idx] = 0
mask_array[idx] = 0
else:
for idx in pruning_desc.removed_inputs:
weight_array[..., idx, :] = 0
mask_array[..., idx, :] = 0
if not is_depthwise_conv(node_def):
for idx in pruning_desc.removed_outputs:
weight_array[..., idx] = 0
mask_array[..., idx] = 0
processed_weights.add(weight_node.name)
return masks
def _get_channel_indices_to_remove(self, weight: np.ndarray, axis: int, remain_depth: int) -> List[int]:
weight = abs(weight)
dim_size = len(weight.shape)
sums = []
axis = axis % dim_size
if axis == 0:
for i in range(weight.shape[0]):
sums.append(weight[i, ...].sum())
elif axis == dim_size - 1:
for i in range(weight.shape[dim_size - 1]):
sums.append(weight[..., i].sum())
else:
for i in range(weight.shape[axis]):
sums.append(weight[slice(0, axis), i, slice(axis + 1, dim_size)])
sums = np.array(sums)
sorted_sum_indices = np.argsort(sums)
return list(sorted_sum_indices[0: weight.shape[axis] - remain_depth])
def _get_slim_ndarray(self, array: np.ndarray, mask: np.ndarray) -> np.ndarray:
dim_size = len(array.shape)
assert dim_size in [1, 2, 4]
output_to_remove = []
for i in range(mask.shape[-1]):
if mask[..., i].sum() == 0:
output_to_remove.append(i)
array = np.delete(array, output_to_remove, axis=-1)
if dim_size == 4 or dim_size == 2:
input_to_remove = []
for i in range(mask.shape[-2]):
if mask[..., i, :].sum() == 0:
input_to_remove.append(i)
array = np.delete(array, input_to_remove, axis=-2)
return array
def get_slim_graph_def(self, shape_tensors: Mapping[str, TensorProto]=None, masks: Mapping[str, np.ndarray]=None) -> tf.compat.v1.GraphDef:
frozen_graph_def = tf.compat.v1.graph_util.convert_variables_to_constants(
self._sess,
self._graph_def,
self._output_node_names
)
frozen_graph_def_map = {node.name: node for node in frozen_graph_def.node}
if shape_tensors is not None:
for k, tensor_proto in shape_tensors.items():
if k in frozen_graph_def_map:
node_def = frozen_graph_def_map[k]
node_def.attr['value'].tensor.CopyFrom(tensor_proto)
if masks is not None:
for k, mask in masks.items():
if k in frozen_graph_def_map:
node_def = frozen_graph_def_map[k]
array = tf.make_ndarray(node_def.attr['value'].tensor)
node_def.attr['value'].tensor.CopyFrom(tf.make_tensor_proto(self._get_slim_ndarray(array, mask)))
return frozen_graph_def
def prune():
with tf.Session() as sess:
model, input_shape = mnist_convnet()
sess.run(tf.global_variables_initializer())
input_specs={'input_1:0': tf.TensorSpec(shape=(1, 28, 28, 1), dtype=tf.dtypes.float32)}
pruner = IterativePruningRunner("mnist", sess, input_specs, ["dense/BiasAdd"])
pruner.ana(eval_fn, gpu_ids=['/GPU:0', '/GPU:1'])
shape_tensors, masks = pruner.prune(sparsity=0.5)
def loss_fn():
images = np.ones((1, 28, 28, 1), dtype=np.float32)
out = model(images, training=True)
return tf.reduce_sum(out)
opt = tf.compat.v1.train.GradientDescentOptimizer(3.0)
sess.run(opt.minimize(loss_fn, var_list=tf.trainable_variables()))
slim_graph_def = pruner.get_slim_graph_def(shape_tensors, masks) | null |
23,504 | from tf1_nndct.optimization.pruning import IterativePruningRunner
import tensorflow as tf
import numpy as np
from nets.vgg import vgg_16
def eval_fn(frozen_graph_def: tf.compat.v1.GraphDef) -> float:
with tf.compat.v1.Session().as_default() as sess:
return 0.5 | null |
23,505 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
slim = tf.contrib.slim
def alexnet_v2_arg_scope(weight_decay=0.0005):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
biases_initializer=tf.constant_initializer(0.1),
weights_regularizer=slim.l2_regularizer(weight_decay)):
with slim.arg_scope([slim.conv2d], padding='SAME'):
with slim.arg_scope([slim.max_pool2d], padding='VALID') as arg_sc:
return arg_sc | null |
23,506 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
slim = tf.contrib.slim
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
The provided code snippet includes necessary dependencies for implementing the `alexnet_v2` function. Write a Python function `def alexnet_v2(inputs, num_classes=1000, is_training=True, dropout_keep_prob=0.5, spatial_squeeze=True, scope='alexnet_v2', global_pool=False)` to solve the following problem:
AlexNet version 2. Described in: http://arxiv.org/pdf/1404.5997v2.pdf Parameters from: github.com/akrizhevsky/cuda-convnet2/blob/master/layers/ layers-imagenet-1gpu.cfg Note: All the fully_connected layers have been transformed to conv2d layers. To use in classification mode, resize input to 224x224 or set global_pool=True. To use in fully convolutional mode, set spatial_squeeze to false. The LRN layers have been removed and change the initializers from random_normal_initializer to xavier_initializer. Args: inputs: a tensor of size [batch_size, height, width, channels]. num_classes: the number of predicted classes. If 0 or None, the logits layer is omitted and the input features to the logits layer are returned instead. is_training: whether or not the model is being trained. dropout_keep_prob: the probability that activations are kept in the dropout layers during training. spatial_squeeze: whether or not should squeeze the spatial dimensions of the logits. Useful to remove unnecessary dimensions for classification. scope: Optional scope for the variables. global_pool: Optional boolean flag. If True, the input to the classification layer is avgpooled to size 1x1, for any input size. (This is not part of the original AlexNet.) Returns: net: the output of the logits layer (if num_classes is a non-zero integer), or the non-dropped-out input to the logits layer (if num_classes is 0 or None). end_points: a dict of tensors with intermediate activations.
Here is the function:
def alexnet_v2(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='alexnet_v2',
global_pool=False):
"""AlexNet version 2.
Described in: http://arxiv.org/pdf/1404.5997v2.pdf
Parameters from:
github.com/akrizhevsky/cuda-convnet2/blob/master/layers/
layers-imagenet-1gpu.cfg
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224 or set
global_pool=True. To use in fully convolutional mode, set
spatial_squeeze to false.
The LRN layers have been removed and change the initializers from
random_normal_initializer to xavier_initializer.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: the number of predicted classes. If 0 or None, the logits layer
is omitted and the input features to the logits layer are returned instead.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
logits. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
global_pool: Optional boolean flag. If True, the input to the classification
layer is avgpooled to size 1x1, for any input size. (This is not part
of the original AlexNet.)
Returns:
net: the output of the logits layer (if num_classes is a non-zero integer),
or the non-dropped-out input to the logits layer (if num_classes is 0
or None).
end_points: a dict of tensors with intermediate activations.
"""
with tf.variable_scope(scope, 'alexnet_v2', [inputs]) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
outputs_collections=[end_points_collection]):
net = slim.conv2d(inputs, 64, [11, 11], 4, padding='VALID',
scope='conv1')
net = slim.max_pool2d(net, [3, 3], 2, scope='pool1')
net = slim.conv2d(net, 192, [5, 5], scope='conv2')
net = slim.max_pool2d(net, [3, 3], 2, scope='pool2')
net = slim.conv2d(net, 384, [3, 3], scope='conv3')
net = slim.conv2d(net, 384, [3, 3], scope='conv4')
net = slim.conv2d(net, 256, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [3, 3], 2, scope='pool5')
# Use conv2d instead of fully_connected layers.
with slim.arg_scope([slim.conv2d],
weights_initializer=trunc_normal(0.005),
biases_initializer=tf.constant_initializer(0.1)):
net = slim.conv2d(net, 4096, [5, 5], padding='VALID',
scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout6')
net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
# Convert end_points_collection into a end_point dict.
end_points = slim.utils.convert_collection_to_dict(
end_points_collection)
if global_pool:
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool')
end_points['global_pool'] = net
if num_classes:
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout7')
net = slim.conv2d(net, num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
biases_initializer=tf.zeros_initializer(),
scope='fc8')
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points | AlexNet version 2. Described in: http://arxiv.org/pdf/1404.5997v2.pdf Parameters from: github.com/akrizhevsky/cuda-convnet2/blob/master/layers/ layers-imagenet-1gpu.cfg Note: All the fully_connected layers have been transformed to conv2d layers. To use in classification mode, resize input to 224x224 or set global_pool=True. To use in fully convolutional mode, set spatial_squeeze to false. The LRN layers have been removed and change the initializers from random_normal_initializer to xavier_initializer. Args: inputs: a tensor of size [batch_size, height, width, channels]. num_classes: the number of predicted classes. If 0 or None, the logits layer is omitted and the input features to the logits layer are returned instead. is_training: whether or not the model is being trained. dropout_keep_prob: the probability that activations are kept in the dropout layers during training. spatial_squeeze: whether or not should squeeze the spatial dimensions of the logits. Useful to remove unnecessary dimensions for classification. scope: Optional scope for the variables. global_pool: Optional boolean flag. If True, the input to the classification layer is avgpooled to size 1x1, for any input size. (This is not part of the original AlexNet.) Returns: net: the output of the logits layer (if num_classes is a non-zero integer), or the non-dropped-out input to the logits layer (if num_classes is 0 or None). end_points: a dict of tensors with intermediate activations. |
23,507 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import inception_utils
slim = tf.contrib.slim
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
def inception_v3_base(inputs,
final_endpoint='Mixed_7c',
min_depth=16,
depth_multiplier=1.0,
scope=None):
"""Inception model from http://arxiv.org/abs/1512.00567.
Constructs an Inception v3 network from inputs to the given final endpoint.
This method can construct the network up to the final inception block
Mixed_7c.
Note that the names of the layers in the paper do not correspond to the names
of the endpoints registered by this function although they build the same
network.
Here is a mapping from the old_names to the new names:
Old name | New name
=======================================
conv0 | Conv2d_1a_3x3
conv1 | Conv2d_2a_3x3
conv2 | Conv2d_2b_3x3
pool1 | MaxPool_3a_3x3
conv3 | Conv2d_3b_1x1
conv4 | Conv2d_4a_3x3
pool2 | MaxPool_5a_3x3
mixed_35x35x256a | Mixed_5b
mixed_35x35x288a | Mixed_5c
mixed_35x35x288b | Mixed_5d
mixed_17x17x768a | Mixed_6a
mixed_17x17x768b | Mixed_6b
mixed_17x17x768c | Mixed_6c
mixed_17x17x768d | Mixed_6d
mixed_17x17x768e | Mixed_6e
mixed_8x8x1280a | Mixed_7a
mixed_8x8x2048a | Mixed_7b
mixed_8x8x2048b | Mixed_7c
Args:
inputs: a tensor of size [batch_size, height, width, channels].
final_endpoint: specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3',
'Mixed_5b', 'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c',
'Mixed_6d', 'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c'].
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
scope: Optional variable_scope.
Returns:
tensor_out: output tensor corresponding to the final_endpoint.
end_points: a set of activations for external use, for example summaries or
losses.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
or depth_multiplier <= 0
"""
# end_points will collect relevant activations for external use, for example
# summaries or losses.
end_points = {}
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
depth = lambda d: max(int(d * depth_multiplier), min_depth)
with tf.variable_scope(scope, 'InceptionV3', [inputs]):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='VALID'):
# 299 x 299 x 3
end_point = 'Conv2d_1a_3x3'
net = slim.conv2d(inputs, depth(32), [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 149 x 149 x 32
end_point = 'Conv2d_2a_3x3'
net = slim.conv2d(net, depth(32), [3, 3], scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 147 x 147 x 32
end_point = 'Conv2d_2b_3x3'
net = slim.conv2d(net, depth(64), [3, 3], padding='SAME', scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 147 x 147 x 64
end_point = 'MaxPool_3a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 73 x 73 x 64
end_point = 'Conv2d_3b_1x1'
net = slim.conv2d(net, depth(80), [1, 1], scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 73 x 73 x 80.
end_point = 'Conv2d_4a_3x3'
net = slim.conv2d(net, depth(192), [3, 3], scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 71 x 71 x 192.
end_point = 'MaxPool_5a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 35 x 35 x 192.
# Inception blocks
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# mixed: 35 x 35 x 256.
end_point = 'Mixed_5b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(64), [5, 5],
scope='Conv2d_0b_5x5')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(32), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_1: 35 x 35 x 288.
end_point = 'Mixed_5c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0b_1x1')
branch_1 = slim.conv2d(branch_1, depth(64), [5, 5],
scope='Conv_1_0c_5x5')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(64), [1, 1],
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(64), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_2: 35 x 35 x 288.
end_point = 'Mixed_5d'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(64), [5, 5],
scope='Conv2d_0b_5x5')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(64), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_3: 17 x 17 x 768.
end_point = 'Mixed_6a'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(384), [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(96), [3, 3],
scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(branch_1, depth(96), [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_1x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed4: 17 x 17 x 768.
end_point = 'Mixed_6b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(128), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(128), [1, 7],
scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(128), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(128), [7, 1],
scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2, depth(128), [1, 7],
scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, depth(128), [7, 1],
scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_5: 17 x 17 x 768.
end_point = 'Mixed_6c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(160), [1, 7],
scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],
scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2, depth(160), [1, 7],
scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],
scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_6: 17 x 17 x 768.
end_point = 'Mixed_6d'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(160), [1, 7],
scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],
scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2, depth(160), [1, 7],
scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],
scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_7: 17 x 17 x 768.
end_point = 'Mixed_6e'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(192), [1, 7],
scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(192), [7, 1],
scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, depth(192), [7, 1],
scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_8: 8 x 8 x 1280.
end_point = 'Mixed_7a'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, depth(320), [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(192), [1, 7],
scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
scope='Conv2d_0c_7x1')
branch_1 = slim.conv2d(branch_1, depth(192), [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_9: 8 x 8 x 2048.
end_point = 'Mixed_7b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(320), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(384), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = tf.concat(axis=3, values=[
slim.conv2d(branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'),
slim.conv2d(branch_1, depth(384), [3, 1], scope='Conv2d_0b_3x1')])
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(448), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(
branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = tf.concat(axis=3, values=[
slim.conv2d(branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'),
slim.conv2d(branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')])
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_10: 8 x 8 x 2048.
end_point = 'Mixed_7c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(320), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(384), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = tf.concat(axis=3, values=[
slim.conv2d(branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'),
slim.conv2d(branch_1, depth(384), [3, 1], scope='Conv2d_0c_3x1')])
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(448), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(
branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = tf.concat(axis=3, values=[
slim.conv2d(branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'),
slim.conv2d(branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')])
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
"""Define kernel size which is automatically reduced for small input.
If the shape of the input images is unknown at graph construction time this
function assumes that the input images are is large enough.
Args:
input_tensor: input tensor of size [batch_size, height, width, channels].
kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]
Returns:
a tensor with the kernel size.
TODO(jrru): Make this function work with unknown shapes. Theoretically, this
can be done with the code below. Problems are two-fold: (1) If the shape was
known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot
handle tensors that define the kernel size.
shape = tf.shape(input_tensor)
return = tf.stack([tf.minimum(shape[1], kernel_size[0]),
tf.minimum(shape[2], kernel_size[1])])
"""
shape = input_tensor.get_shape().as_list()
if shape[1] is None or shape[2] is None:
kernel_size_out = kernel_size
else:
kernel_size_out = [min(shape[1], kernel_size[0]),
min(shape[2], kernel_size[1])]
return kernel_size_out
The provided code snippet includes necessary dependencies for implementing the `inception_v3` function. Write a Python function `def inception_v3(inputs, num_classes=1000, is_training=True, dropout_keep_prob=0.8, min_depth=16, depth_multiplier=1.0, prediction_fn=slim.softmax, spatial_squeeze=True, reuse=None, create_aux_logits=True, scope='InceptionV3', global_pool=False)` to solve the following problem:
Inception model from http://arxiv.org/abs/1512.00567. "Rethinking the Inception Architecture for Computer Vision" Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens, Zbigniew Wojna. With the default arguments this method constructs the exact model defined in the paper. However, one can experiment with variations of the inception_v3 network by changing arguments dropout_keep_prob, min_depth and depth_multiplier. The default image size used to train this network is 299x299. Args: inputs: a tensor of size [batch_size, height, width, channels]. num_classes: number of predicted classes. If 0 or None, the logits layer is omitted and the input features to the logits layer (before dropout) are returned instead. is_training: whether is training or not. dropout_keep_prob: the percentage of activation values that are retained. min_depth: Minimum depth value (number of channels) for all convolution ops. Enforced when depth_multiplier < 1, and not an active constraint when depth_multiplier >= 1. depth_multiplier: Float multiplier for the depth (number of channels) for all convolution ops. The value must be greater than zero. Typical usage will be to set this value in (0, 1) to reduce the number of parameters or computation cost of the model. prediction_fn: a function to get predictions out of logits. spatial_squeeze: if True, logits is of shape [B, C], if false logits is of shape [B, 1, 1, C], where B is batch_size and C is number of classes. reuse: whether or not the network and its variables should be reused. To be able to reuse 'scope' must be given. create_aux_logits: Whether to create the auxiliary logits. scope: Optional variable_scope. global_pool: Optional boolean flag to control the avgpooling before the logits layer. If false or unset, pooling is done with a fixed window that reduces default-sized inputs to 1x1, while larger inputs lead to larger outputs. If true, any input size is pooled down to 1x1. Returns: net: a Tensor with the logits (pre-softmax activations) if num_classes is a non-zero integer, or the non-dropped-out input to the logits layer if num_classes is 0 or None. end_points: a dictionary from components of the network to the corresponding activation. Raises: ValueError: if 'depth_multiplier' is less than or equal to zero.
Here is the function:
def inception_v3(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.8,
min_depth=16,
depth_multiplier=1.0,
prediction_fn=slim.softmax,
spatial_squeeze=True,
reuse=None,
create_aux_logits=True,
scope='InceptionV3',
global_pool=False):
"""Inception model from http://arxiv.org/abs/1512.00567.
"Rethinking the Inception Architecture for Computer Vision"
Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens,
Zbigniew Wojna.
With the default arguments this method constructs the exact model defined in
the paper. However, one can experiment with variations of the inception_v3
network by changing arguments dropout_keep_prob, min_depth and
depth_multiplier.
The default image size used to train this network is 299x299.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer
is omitted and the input features to the logits layer (before dropout)
are returned instead.
is_training: whether is training or not.
dropout_keep_prob: the percentage of activation values that are retained.
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape [B, C], if false logits is of
shape [B, 1, 1, C], where B is batch_size and C is number of classes.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
create_aux_logits: Whether to create the auxiliary logits.
scope: Optional variable_scope.
global_pool: Optional boolean flag to control the avgpooling before the
logits layer. If false or unset, pooling is done with a fixed window
that reduces default-sized inputs to 1x1, while larger inputs lead to
larger outputs. If true, any input size is pooled down to 1x1.
Returns:
net: a Tensor with the logits (pre-softmax activations) if num_classes
is a non-zero integer, or the non-dropped-out input to the logits layer
if num_classes is 0 or None.
end_points: a dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: if 'depth_multiplier' is less than or equal to zero.
"""
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
depth = lambda d: max(int(d * depth_multiplier), min_depth)
with tf.variable_scope(scope, 'InceptionV3', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = inception_v3_base(
inputs, scope=scope, min_depth=min_depth,
depth_multiplier=depth_multiplier)
# Auxiliary Head logits
if create_aux_logits and num_classes:
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
aux_logits = end_points['Mixed_6e']
with tf.variable_scope('AuxLogits'):
aux_logits = slim.avg_pool2d(
aux_logits, [5, 5], stride=3, padding='VALID',
scope='AvgPool_1a_5x5')
aux_logits = slim.conv2d(aux_logits, depth(128), [1, 1],
scope='Conv2d_1b_1x1')
# Shape of feature map before the final layer.
kernel_size = _reduced_kernel_size_for_small_input(
aux_logits, [5, 5])
aux_logits = slim.conv2d(
aux_logits, depth(768), kernel_size,
weights_initializer=trunc_normal(0.01),
padding='VALID', scope='Conv2d_2a_{}x{}'.format(*kernel_size))
aux_logits = slim.conv2d(
aux_logits, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, weights_initializer=trunc_normal(0.001),
scope='Conv2d_2b_1x1')
if spatial_squeeze:
aux_logits = tf.squeeze(aux_logits, [1, 2], name='SpatialSqueeze')
end_points['AuxLogits'] = aux_logits
# Final pooling and prediction
with tf.variable_scope('Logits'):
if global_pool:
# Global average pooling.
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='GlobalPool')
end_points['global_pool'] = net
else:
# Pooling with a fixed kernel size.
kernel_size = _reduced_kernel_size_for_small_input(net, [8, 8])
net = slim.avg_pool2d(net, kernel_size, padding='VALID',
scope='AvgPool_1a_{}x{}'.format(*kernel_size))
end_points['AvgPool_1a'] = net
if not num_classes:
return net, end_points
# 1 x 1 x 2048
net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
end_points['PreLogits'] = net
# 2048
logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='Conv2d_1c_1x1')
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
# 1000
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points | Inception model from http://arxiv.org/abs/1512.00567. "Rethinking the Inception Architecture for Computer Vision" Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens, Zbigniew Wojna. With the default arguments this method constructs the exact model defined in the paper. However, one can experiment with variations of the inception_v3 network by changing arguments dropout_keep_prob, min_depth and depth_multiplier. The default image size used to train this network is 299x299. Args: inputs: a tensor of size [batch_size, height, width, channels]. num_classes: number of predicted classes. If 0 or None, the logits layer is omitted and the input features to the logits layer (before dropout) are returned instead. is_training: whether is training or not. dropout_keep_prob: the percentage of activation values that are retained. min_depth: Minimum depth value (number of channels) for all convolution ops. Enforced when depth_multiplier < 1, and not an active constraint when depth_multiplier >= 1. depth_multiplier: Float multiplier for the depth (number of channels) for all convolution ops. The value must be greater than zero. Typical usage will be to set this value in (0, 1) to reduce the number of parameters or computation cost of the model. prediction_fn: a function to get predictions out of logits. spatial_squeeze: if True, logits is of shape [B, C], if false logits is of shape [B, 1, 1, C], where B is batch_size and C is number of classes. reuse: whether or not the network and its variables should be reused. To be able to reuse 'scope' must be given. create_aux_logits: Whether to create the auxiliary logits. scope: Optional variable_scope. global_pool: Optional boolean flag to control the avgpooling before the logits layer. If false or unset, pooling is done with a fixed window that reduces default-sized inputs to 1x1, while larger inputs lead to larger outputs. If true, any input size is pooled down to 1x1. Returns: net: a Tensor with the logits (pre-softmax activations) if num_classes is a non-zero integer, or the non-dropped-out input to the logits layer if num_classes is 0 or None. end_points: a dictionary from components of the network to the corresponding activation. Raises: ValueError: if 'depth_multiplier' is less than or equal to zero. |
23,508 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import functools
import tensorflow as tf
slim = tf.contrib.slim
def mobilenet_v1_base(inputs,
final_endpoint='Conv2d_13_pointwise',
min_depth=8,
depth_multiplier=1.0,
conv_defs=None,
output_stride=None,
use_explicit_padding=False,
scope=None):
"""Mobilenet v1.
Constructs a Mobilenet v1 network from inputs to the given final endpoint.
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
final_endpoint: specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_0', 'Conv2d_1_pointwise', 'Conv2d_2_pointwise',
'Conv2d_3_pointwise', 'Conv2d_4_pointwise', 'Conv2d_5'_pointwise,
'Conv2d_6_pointwise', 'Conv2d_7_pointwise', 'Conv2d_8_pointwise',
'Conv2d_9_pointwise', 'Conv2d_10_pointwise', 'Conv2d_11_pointwise',
'Conv2d_12_pointwise', 'Conv2d_13_pointwise'].
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
conv_defs: A list of ConvDef namedtuples specifying the net architecture.
output_stride: An integer that specifies the requested ratio of input to
output spatial resolution. If not None, then we invoke atrous convolution
if necessary to prevent the network from reducing the spatial resolution
of the activation maps. Allowed values are 8 (accurate fully convolutional
mode), 16 (fast fully convolutional mode), 32 (classification mode).
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
scope: Optional variable_scope.
Returns:
tensor_out: output tensor corresponding to the final_endpoint.
end_points: a set of activations for external use, for example summaries or
losses.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
or depth_multiplier <= 0, or the target output_stride is not
allowed.
"""
depth = lambda d: max(int(d * depth_multiplier), min_depth)
end_points = {}
# Used to find thinned depths for each layer.
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
if conv_defs is None:
conv_defs = _CONV_DEFS
if output_stride is not None and output_stride not in [8, 16, 32]:
raise ValueError('Only allowed output_stride values are 8, 16, 32.')
padding = 'SAME'
if use_explicit_padding:
padding = 'VALID'
with tf.variable_scope(scope, 'MobilenetV1', [inputs]):
with slim.arg_scope([slim.conv2d, slim.separable_conv2d], padding=padding):
# The current_stride variable keeps track of the output stride of the
# activations, i.e., the running product of convolution strides up to the
# current network layer. This allows us to invoke atrous convolution
# whenever applying the next convolution would result in the activations
# having output stride larger than the target output_stride.
current_stride = 1
# The atrous convolution rate parameter.
rate = 1
net = inputs
for i, conv_def in enumerate(conv_defs):
end_point_base = 'Conv2d_%d' % i
if output_stride is not None and current_stride == output_stride:
# If we have reached the target output_stride, then we need to employ
# atrous convolution with stride=1 and multiply the atrous rate by the
# current unit's stride for use in subsequent layers.
layer_stride = 1
layer_rate = rate
rate *= conv_def.stride
else:
layer_stride = conv_def.stride
layer_rate = 1
current_stride *= conv_def.stride
if isinstance(conv_def, Conv):
end_point = end_point_base
if use_explicit_padding:
net = _fixed_padding(net, conv_def.kernel)
net = slim.conv2d(net, depth(conv_def.depth), conv_def.kernel,
stride=conv_def.stride,
normalizer_fn=slim.batch_norm,
scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
elif isinstance(conv_def, DepthSepConv):
end_point = end_point_base + '_depthwise'
# By passing filters=None
# separable_conv2d produces only a depthwise convolution layer
if use_explicit_padding:
net = _fixed_padding(net, conv_def.kernel, layer_rate)
net = slim.separable_conv2d(net, None, conv_def.kernel,
depth_multiplier=1,
stride=layer_stride,
rate=layer_rate,
normalizer_fn=slim.batch_norm,
scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
end_point = end_point_base + '_pointwise'
net = slim.conv2d(net, depth(conv_def.depth), [1, 1],
stride=1,
normalizer_fn=slim.batch_norm,
scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
else:
raise ValueError('Unknown convolution type %s for layer %d'
% (conv_def.ltype, i))
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
"""Define kernel size which is automatically reduced for small input.
If the shape of the input images is unknown at graph construction time this
function assumes that the input images are large enough.
Args:
input_tensor: input tensor of size [batch_size, height, width, channels].
kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]
Returns:
a tensor with the kernel size.
"""
shape = input_tensor.get_shape().as_list()
if shape[1] is None or shape[2] is None:
kernel_size_out = kernel_size
else:
kernel_size_out = [min(shape[1], kernel_size[0]),
min(shape[2], kernel_size[1])]
return kernel_size_out
The provided code snippet includes necessary dependencies for implementing the `mobilenet_v1` function. Write a Python function `def mobilenet_v1(inputs, num_classes=1000, dropout_keep_prob=0.999, is_training=True, min_depth=8, depth_multiplier=1.0, conv_defs=None, prediction_fn=tf.contrib.layers.softmax, spatial_squeeze=True, reuse=None, scope='MobilenetV1', global_pool=False)` to solve the following problem:
Mobilenet v1 model for classification. Args: inputs: a tensor of shape [batch_size, height, width, channels]. num_classes: number of predicted classes. If 0 or None, the logits layer is omitted and the input features to the logits layer (before dropout) are returned instead. dropout_keep_prob: the percentage of activation values that are retained. is_training: whether is training or not. min_depth: Minimum depth value (number of channels) for all convolution ops. Enforced when depth_multiplier < 1, and not an active constraint when depth_multiplier >= 1. depth_multiplier: Float multiplier for the depth (number of channels) for all convolution ops. The value must be greater than zero. Typical usage will be to set this value in (0, 1) to reduce the number of parameters or computation cost of the model. conv_defs: A list of ConvDef namedtuples specifying the net architecture. prediction_fn: a function to get predictions out of logits. spatial_squeeze: if True, logits is of shape is [B, C], if false logits is of shape [B, 1, 1, C], where B is batch_size and C is number of classes. reuse: whether or not the network and its variables should be reused. To be able to reuse 'scope' must be given. scope: Optional variable_scope. global_pool: Optional boolean flag to control the avgpooling before the logits layer. If false or unset, pooling is done with a fixed window that reduces default-sized inputs to 1x1, while larger inputs lead to larger outputs. If true, any input size is pooled down to 1x1. Returns: net: a 2D Tensor with the logits (pre-softmax activations) if num_classes is a non-zero integer, or the non-dropped-out input to the logits layer if num_classes is 0 or None. end_points: a dictionary from components of the network to the corresponding activation. Raises: ValueError: Input rank is invalid.
Here is the function:
def mobilenet_v1(inputs,
num_classes=1000,
dropout_keep_prob=0.999,
is_training=True,
min_depth=8,
depth_multiplier=1.0,
conv_defs=None,
prediction_fn=tf.contrib.layers.softmax,
spatial_squeeze=True,
reuse=None,
scope='MobilenetV1',
global_pool=False):
"""Mobilenet v1 model for classification.
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer
is omitted and the input features to the logits layer (before dropout)
are returned instead.
dropout_keep_prob: the percentage of activation values that are retained.
is_training: whether is training or not.
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
conv_defs: A list of ConvDef namedtuples specifying the net architecture.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape is [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
global_pool: Optional boolean flag to control the avgpooling before the
logits layer. If false or unset, pooling is done with a fixed window
that reduces default-sized inputs to 1x1, while larger inputs lead to
larger outputs. If true, any input size is pooled down to 1x1.
Returns:
net: a 2D Tensor with the logits (pre-softmax activations) if num_classes
is a non-zero integer, or the non-dropped-out input to the logits layer
if num_classes is 0 or None.
end_points: a dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: Input rank is invalid.
"""
input_shape = inputs.get_shape().as_list()
if len(input_shape) != 4:
raise ValueError('Invalid input tensor rank, expected 4, was: %d' %
len(input_shape))
with tf.variable_scope(scope, 'MobilenetV1', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = mobilenet_v1_base(inputs, scope=scope,
min_depth=min_depth,
depth_multiplier=depth_multiplier,
conv_defs=conv_defs)
with tf.variable_scope('Logits'):
if global_pool:
# Global average pooling.
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool')
end_points['global_pool'] = net
else:
# Pooling with a fixed kernel size.
kernel_size = _reduced_kernel_size_for_small_input(net, [7, 7])
net = slim.avg_pool2d(net, kernel_size, padding='VALID',
scope='AvgPool_1a')
end_points['AvgPool_1a'] = net
if not num_classes:
return net, end_points
# 1 x 1 x 1024
net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='Conv2d_1c_1x1')
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
if prediction_fn:
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points | Mobilenet v1 model for classification. Args: inputs: a tensor of shape [batch_size, height, width, channels]. num_classes: number of predicted classes. If 0 or None, the logits layer is omitted and the input features to the logits layer (before dropout) are returned instead. dropout_keep_prob: the percentage of activation values that are retained. is_training: whether is training or not. min_depth: Minimum depth value (number of channels) for all convolution ops. Enforced when depth_multiplier < 1, and not an active constraint when depth_multiplier >= 1. depth_multiplier: Float multiplier for the depth (number of channels) for all convolution ops. The value must be greater than zero. Typical usage will be to set this value in (0, 1) to reduce the number of parameters or computation cost of the model. conv_defs: A list of ConvDef namedtuples specifying the net architecture. prediction_fn: a function to get predictions out of logits. spatial_squeeze: if True, logits is of shape is [B, C], if false logits is of shape [B, 1, 1, C], where B is batch_size and C is number of classes. reuse: whether or not the network and its variables should be reused. To be able to reuse 'scope' must be given. scope: Optional variable_scope. global_pool: Optional boolean flag to control the avgpooling before the logits layer. If false or unset, pooling is done with a fixed window that reduces default-sized inputs to 1x1, while larger inputs lead to larger outputs. If true, any input size is pooled down to 1x1. Returns: net: a 2D Tensor with the logits (pre-softmax activations) if num_classes is a non-zero integer, or the non-dropped-out input to the logits layer if num_classes is 0 or None. end_points: a dictionary from components of the network to the corresponding activation. Raises: ValueError: Input rank is invalid. |
23,509 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import functools
import tensorflow as tf
def wrapped_partial(func, *args, **kwargs):
partial_func = functools.partial(func, *args, **kwargs)
functools.update_wrapper(partial_func, func)
return partial_func | null |
23,510 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import functools
import tensorflow as tf
slim = tf.contrib.slim
The provided code snippet includes necessary dependencies for implementing the `mobilenet_v1_arg_scope` function. Write a Python function `def mobilenet_v1_arg_scope( is_training=True, weight_decay=0.00004, stddev=0.09, regularize_depthwise=False, batch_norm_decay=0.9997, batch_norm_epsilon=0.001, batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS)` to solve the following problem:
Defines the default MobilenetV1 arg scope. Args: is_training: Whether or not we're training the model. If this is set to None, the parameter is not added to the batch_norm arg_scope. weight_decay: The weight decay to use for regularizing the model. stddev: The standard deviation of the trunctated normal weight initializer. regularize_depthwise: Whether or not apply regularization on depthwise. batch_norm_decay: Decay for batch norm moving average. batch_norm_epsilon: Small float added to variance to avoid dividing by zero in batch norm. batch_norm_updates_collections: Collection for the update ops for batch norm. Returns: An `arg_scope` to use for the mobilenet v1 model.
Here is the function:
def mobilenet_v1_arg_scope(
is_training=True,
weight_decay=0.00004,
stddev=0.09,
regularize_depthwise=False,
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001,
batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS):
"""Defines the default MobilenetV1 arg scope.
Args:
is_training: Whether or not we're training the model. If this is set to
None, the parameter is not added to the batch_norm arg_scope.
weight_decay: The weight decay to use for regularizing the model.
stddev: The standard deviation of the trunctated normal weight initializer.
regularize_depthwise: Whether or not apply regularization on depthwise.
batch_norm_decay: Decay for batch norm moving average.
batch_norm_epsilon: Small float added to variance to avoid dividing by zero
in batch norm.
batch_norm_updates_collections: Collection for the update ops for
batch norm.
Returns:
An `arg_scope` to use for the mobilenet v1 model.
"""
batch_norm_params = {
'center': True,
'scale': True,
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'updates_collections': batch_norm_updates_collections,
}
if is_training is not None:
batch_norm_params['is_training'] = is_training
# Set weight_decay for weights in Conv and DepthSepConv layers.
weights_init = tf.truncated_normal_initializer(stddev=stddev)
regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
if regularize_depthwise:
depthwise_regularizer = regularizer
else:
depthwise_regularizer = None
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
weights_initializer=weights_init,
activation_fn=tf.nn.relu6, normalizer_fn=slim.batch_norm):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer):
with slim.arg_scope([slim.separable_conv2d],
weights_regularizer=depthwise_regularizer) as sc:
return sc | Defines the default MobilenetV1 arg scope. Args: is_training: Whether or not we're training the model. If this is set to None, the parameter is not added to the batch_norm arg_scope. weight_decay: The weight decay to use for regularizing the model. stddev: The standard deviation of the trunctated normal weight initializer. regularize_depthwise: Whether or not apply regularization on depthwise. batch_norm_decay: Decay for batch norm moving average. batch_norm_epsilon: Small float added to variance to avoid dividing by zero in batch norm. batch_norm_updates_collections: Collection for the update ops for batch norm. Returns: An `arg_scope` to use for the mobilenet v1 model. |
23,511 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow as tf
slim = tf.contrib.slim
The provided code snippet includes necessary dependencies for implementing the `resnet_arg_scope` function. Write a Python function `def resnet_arg_scope(weight_decay=0.0001, batch_norm_decay=0.997, batch_norm_epsilon=1e-5, batch_norm_scale=True, activation_fn=tf.nn.relu, use_batch_norm=True, batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS)` to solve the following problem:
Defines the default ResNet arg scope. TODO(gpapan): The batch-normalization related default values above are appropriate for use in conjunction with the reference ResNet models released at https://github.com/KaimingHe/deep-residual-networks. When training ResNets from scratch, they might need to be tuned. Args: weight_decay: The weight decay to use for regularizing the model. batch_norm_decay: The moving average decay when estimating layer activation statistics in batch normalization. batch_norm_epsilon: Small constant to prevent division by zero when normalizing activations by their variance in batch normalization. batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the activations in the batch normalization layer. activation_fn: The activation function which is used in ResNet. use_batch_norm: Whether or not to use batch normalization. batch_norm_updates_collections: Collection for the update ops for batch norm. Returns: An `arg_scope` to use for the resnet models.
Here is the function:
def resnet_arg_scope(weight_decay=0.0001,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True,
activation_fn=tf.nn.relu,
use_batch_norm=True,
batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS):
"""Defines the default ResNet arg scope.
TODO(gpapan): The batch-normalization related default values above are
appropriate for use in conjunction with the reference ResNet models
released at https://github.com/KaimingHe/deep-residual-networks. When
training ResNets from scratch, they might need to be tuned.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: The moving average decay when estimating layer activation
statistics in batch normalization.
batch_norm_epsilon: Small constant to prevent division by zero when
normalizing activations by their variance in batch normalization.
batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
activations in the batch normalization layer.
activation_fn: The activation function which is used in ResNet.
use_batch_norm: Whether or not to use batch normalization.
batch_norm_updates_collections: Collection for the update ops for
batch norm.
Returns:
An `arg_scope` to use for the resnet models.
"""
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'updates_collections': batch_norm_updates_collections,
'fused': None, # Use fused batch norm if possible.
}
with slim.arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=activation_fn,
normalizer_fn=slim.batch_norm if use_batch_norm else None,
normalizer_params=batch_norm_params):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
# The following implies padding='SAME' for pool1, which makes feature
# alignment easier for dense prediction tasks. This is also used in
# https://github.com/facebook/fb.resnet.torch. However the accompanying
# code of 'Deep Residual Learning for Image Recognition' uses
# padding='VALID' for pool1. You can switch to that choice by setting
# slim.arg_scope([slim.max_pool2d], padding='VALID').
with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
return arg_sc | Defines the default ResNet arg scope. TODO(gpapan): The batch-normalization related default values above are appropriate for use in conjunction with the reference ResNet models released at https://github.com/KaimingHe/deep-residual-networks. When training ResNets from scratch, they might need to be tuned. Args: weight_decay: The weight decay to use for regularizing the model. batch_norm_decay: The moving average decay when estimating layer activation statistics in batch normalization. batch_norm_epsilon: Small constant to prevent division by zero when normalizing activations by their variance in batch normalization. batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the activations in the batch normalization layer. activation_fn: The activation function which is used in ResNet. use_batch_norm: Whether or not to use batch normalization. batch_norm_updates_collections: Collection for the update ops for batch norm. Returns: An `arg_scope` to use for the resnet models. |
23,512 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
slim = tf.contrib.slim
The provided code snippet includes necessary dependencies for implementing the `inception_arg_scope` function. Write a Python function `def inception_arg_scope(weight_decay=0.00004, use_batch_norm=True, batch_norm_decay=0.9997, batch_norm_epsilon=0.001, activation_fn=tf.nn.relu, batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS)` to solve the following problem:
Defines the default arg scope for inception models. Args: weight_decay: The weight decay to use for regularizing the model. use_batch_norm: "If `True`, batch_norm is applied after each convolution. batch_norm_decay: Decay for batch norm moving average. batch_norm_epsilon: Small float added to variance to avoid dividing by zero in batch norm. activation_fn: Activation function for conv2d. batch_norm_updates_collections: Collection for the update ops for batch norm. Returns: An `arg_scope` to use for the inception models.
Here is the function:
def inception_arg_scope(weight_decay=0.00004,
use_batch_norm=True,
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001,
activation_fn=tf.nn.relu,
batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS):
"""Defines the default arg scope for inception models.
Args:
weight_decay: The weight decay to use for regularizing the model.
use_batch_norm: "If `True`, batch_norm is applied after each convolution.
batch_norm_decay: Decay for batch norm moving average.
batch_norm_epsilon: Small float added to variance to avoid dividing by zero
in batch norm.
activation_fn: Activation function for conv2d.
batch_norm_updates_collections: Collection for the update ops for
batch norm.
Returns:
An `arg_scope` to use for the inception models.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': batch_norm_decay,
# epsilon to prevent 0s in variance.
'epsilon': batch_norm_epsilon,
# collection containing update_ops.
'updates_collections': batch_norm_updates_collections,
# use fused batch norm if possible.
'fused': None,
}
if use_batch_norm:
normalizer_fn = slim.batch_norm
normalizer_params = batch_norm_params
else:
normalizer_fn = None
normalizer_params = {}
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(weight_decay)):
with slim.arg_scope(
[slim.conv2d],
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=activation_fn,
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params) as sc:
return sc | Defines the default arg scope for inception models. Args: weight_decay: The weight decay to use for regularizing the model. use_batch_norm: "If `True`, batch_norm is applied after each convolution. batch_norm_decay: Decay for batch norm moving average. batch_norm_epsilon: Small float added to variance to avoid dividing by zero in batch norm. activation_fn: Activation function for conv2d. batch_norm_updates_collections: Collection for the update ops for batch norm. Returns: An `arg_scope` to use for the inception models. |
23,513 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import resnet_utils
def resnet_v2(inputs,
blocks,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
spatial_squeeze=True,
reuse=None,
scope=None):
"""Generator for v2 (preactivation) ResNet models.
This function generates a family of ResNet v2 models. See the resnet_v2_*()
methods for specific model instantiations, obtained by selecting different
block instantiations that produce ResNets of various depths.
Training for image classification on Imagenet is usually done with [224, 224]
inputs, resulting in [7, 7] feature maps at the output of the last ResNet
block for the ResNets defined in [1] that have nominal stride equal to 32.
However, for dense prediction tasks we advise that one uses inputs with
spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In
this case the feature maps at the ResNet output will have spatial shape
[(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]
and corners exactly aligned with the input image corners, which greatly
facilitates alignment of the features to the image. Using as input [225, 225]
images results in [8, 8] feature maps at the output of the last ResNet block.
For dense prediction tasks, the ResNet needs to run in fully-convolutional
(FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all
have nominal stride equal to 32 and a good choice in FCN mode is to use
output_stride=16 in order to increase the density of the computed features at
small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
blocks: A list of length equal to the number of ResNet blocks. Each element
is a resnet_utils.Block object describing the units in the block.
num_classes: Number of predicted classes for classification tasks.
If 0 or None, we return the features before the logit layer.
is_training: whether batch_norm layers are in training mode.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
include_root_block: If True, include the initial convolution followed by
max-pooling, if False excludes it. If excluded, `inputs` should be the
results of an activation-less convolution.
spatial_squeeze: if True, logits is of shape [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
To use this parameter, the input images must be smaller than 300x300
pixels, in which case the output logit layer does not contain spatial
information and can be removed.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is 0 or None,
then net is the output of the last ResNet block, potentially after global
average pooling. If num_classes is a non-zero integer, net contains the
pre-softmax activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: If the target output_stride is not valid.
"""
with tf.variable_scope(scope, 'resnet_v2', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
with slim.arg_scope([slim.conv2d, bottleneck,
resnet_utils.stack_blocks_dense],
outputs_collections=end_points_collection):
with slim.arg_scope([slim.batch_norm], is_training=is_training):
net = inputs
if include_root_block:
if output_stride is not None:
if output_stride % 4 != 0:
raise ValueError('The output_stride needs to be a multiple of 4.')
output_stride /= 4
# We do not include batch normalization or activation functions in
# conv1 because the first ResNet unit will perform these. Cf.
# Appendix of [2].
with slim.arg_scope([slim.conv2d],
activation_fn=None, normalizer_fn=None):
net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')
net = resnet_utils.stack_blocks_dense(net, blocks, output_stride)
# This is needed because the pre-activation variant does not have batch
# normalization or activation functions in the residual unit output. See
# Appendix of [2].
net = slim.batch_norm(net, activation_fn=tf.nn.relu, scope='postnorm')
# Convert end_points_collection into a dictionary of end_points.
end_points = slim.utils.convert_collection_to_dict(
end_points_collection)
if global_pool:
# Global average pooling.
net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)
end_points['global_pool'] = net
if num_classes:
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='logits')
end_points[sc.name + '/logits'] = net
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='SpatialSqueeze')
end_points[sc.name + '/spatial_squeeze'] = net
end_points['predictions'] = slim.softmax(net, scope='predictions')
return net, end_points
resnet_v2.default_image_size = 224
def resnet_v2_block(scope, base_depth, num_units, stride):
"""Helper function for creating a resnet_v2 bottleneck block.
Args:
scope: The scope of the block.
base_depth: The depth of the bottleneck layer for each unit.
num_units: The number of units in the block.
stride: The stride of the block, implemented as a stride in the last unit.
All other units have stride=1.
Returns:
A resnet_v2 bottleneck block.
"""
return resnet_utils.Block(scope, bottleneck, [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': 1
}] * (num_units - 1) + [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': stride
}])
resnet_v2.default_image_size = 224
The provided code snippet includes necessary dependencies for implementing the `resnet_v2_50` function. Write a Python function `def resnet_v2_50(inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None, spatial_squeeze=True, reuse=None, scope='resnet_v2_50')` to solve the following problem:
ResNet-50 model of [1]. See resnet_v2() for arg and return description.
Here is the function:
def resnet_v2_50(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope='resnet_v2_50'):
"""ResNet-50 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=6, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(inputs, blocks, num_classes, is_training=is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
reuse=reuse, scope=scope) | ResNet-50 model of [1]. See resnet_v2() for arg and return description. |
23,514 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import resnet_utils
def resnet_v2(inputs,
blocks,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
spatial_squeeze=True,
reuse=None,
scope=None):
"""Generator for v2 (preactivation) ResNet models.
This function generates a family of ResNet v2 models. See the resnet_v2_*()
methods for specific model instantiations, obtained by selecting different
block instantiations that produce ResNets of various depths.
Training for image classification on Imagenet is usually done with [224, 224]
inputs, resulting in [7, 7] feature maps at the output of the last ResNet
block for the ResNets defined in [1] that have nominal stride equal to 32.
However, for dense prediction tasks we advise that one uses inputs with
spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In
this case the feature maps at the ResNet output will have spatial shape
[(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]
and corners exactly aligned with the input image corners, which greatly
facilitates alignment of the features to the image. Using as input [225, 225]
images results in [8, 8] feature maps at the output of the last ResNet block.
For dense prediction tasks, the ResNet needs to run in fully-convolutional
(FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all
have nominal stride equal to 32 and a good choice in FCN mode is to use
output_stride=16 in order to increase the density of the computed features at
small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
blocks: A list of length equal to the number of ResNet blocks. Each element
is a resnet_utils.Block object describing the units in the block.
num_classes: Number of predicted classes for classification tasks.
If 0 or None, we return the features before the logit layer.
is_training: whether batch_norm layers are in training mode.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
include_root_block: If True, include the initial convolution followed by
max-pooling, if False excludes it. If excluded, `inputs` should be the
results of an activation-less convolution.
spatial_squeeze: if True, logits is of shape [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
To use this parameter, the input images must be smaller than 300x300
pixels, in which case the output logit layer does not contain spatial
information and can be removed.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is 0 or None,
then net is the output of the last ResNet block, potentially after global
average pooling. If num_classes is a non-zero integer, net contains the
pre-softmax activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: If the target output_stride is not valid.
"""
with tf.variable_scope(scope, 'resnet_v2', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
with slim.arg_scope([slim.conv2d, bottleneck,
resnet_utils.stack_blocks_dense],
outputs_collections=end_points_collection):
with slim.arg_scope([slim.batch_norm], is_training=is_training):
net = inputs
if include_root_block:
if output_stride is not None:
if output_stride % 4 != 0:
raise ValueError('The output_stride needs to be a multiple of 4.')
output_stride /= 4
# We do not include batch normalization or activation functions in
# conv1 because the first ResNet unit will perform these. Cf.
# Appendix of [2].
with slim.arg_scope([slim.conv2d],
activation_fn=None, normalizer_fn=None):
net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')
net = resnet_utils.stack_blocks_dense(net, blocks, output_stride)
# This is needed because the pre-activation variant does not have batch
# normalization or activation functions in the residual unit output. See
# Appendix of [2].
net = slim.batch_norm(net, activation_fn=tf.nn.relu, scope='postnorm')
# Convert end_points_collection into a dictionary of end_points.
end_points = slim.utils.convert_collection_to_dict(
end_points_collection)
if global_pool:
# Global average pooling.
net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)
end_points['global_pool'] = net
if num_classes:
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='logits')
end_points[sc.name + '/logits'] = net
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='SpatialSqueeze')
end_points[sc.name + '/spatial_squeeze'] = net
end_points['predictions'] = slim.softmax(net, scope='predictions')
return net, end_points
resnet_v2.default_image_size = 224
def resnet_v2_block(scope, base_depth, num_units, stride):
"""Helper function for creating a resnet_v2 bottleneck block.
Args:
scope: The scope of the block.
base_depth: The depth of the bottleneck layer for each unit.
num_units: The number of units in the block.
stride: The stride of the block, implemented as a stride in the last unit.
All other units have stride=1.
Returns:
A resnet_v2 bottleneck block.
"""
return resnet_utils.Block(scope, bottleneck, [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': 1
}] * (num_units - 1) + [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': stride
}])
resnet_v2.default_image_size = 224
The provided code snippet includes necessary dependencies for implementing the `resnet_v2_101` function. Write a Python function `def resnet_v2_101(inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None, spatial_squeeze=True, reuse=None, scope='resnet_v2_101')` to solve the following problem:
ResNet-101 model of [1]. See resnet_v2() for arg and return description.
Here is the function:
def resnet_v2_101(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope='resnet_v2_101'):
"""ResNet-101 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=23, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(inputs, blocks, num_classes, is_training=is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
reuse=reuse, scope=scope) | ResNet-101 model of [1]. See resnet_v2() for arg and return description. |
23,515 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import resnet_utils
def resnet_v2(inputs,
blocks,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
spatial_squeeze=True,
reuse=None,
scope=None):
"""Generator for v2 (preactivation) ResNet models.
This function generates a family of ResNet v2 models. See the resnet_v2_*()
methods for specific model instantiations, obtained by selecting different
block instantiations that produce ResNets of various depths.
Training for image classification on Imagenet is usually done with [224, 224]
inputs, resulting in [7, 7] feature maps at the output of the last ResNet
block for the ResNets defined in [1] that have nominal stride equal to 32.
However, for dense prediction tasks we advise that one uses inputs with
spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In
this case the feature maps at the ResNet output will have spatial shape
[(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]
and corners exactly aligned with the input image corners, which greatly
facilitates alignment of the features to the image. Using as input [225, 225]
images results in [8, 8] feature maps at the output of the last ResNet block.
For dense prediction tasks, the ResNet needs to run in fully-convolutional
(FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all
have nominal stride equal to 32 and a good choice in FCN mode is to use
output_stride=16 in order to increase the density of the computed features at
small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
blocks: A list of length equal to the number of ResNet blocks. Each element
is a resnet_utils.Block object describing the units in the block.
num_classes: Number of predicted classes for classification tasks.
If 0 or None, we return the features before the logit layer.
is_training: whether batch_norm layers are in training mode.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
include_root_block: If True, include the initial convolution followed by
max-pooling, if False excludes it. If excluded, `inputs` should be the
results of an activation-less convolution.
spatial_squeeze: if True, logits is of shape [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
To use this parameter, the input images must be smaller than 300x300
pixels, in which case the output logit layer does not contain spatial
information and can be removed.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is 0 or None,
then net is the output of the last ResNet block, potentially after global
average pooling. If num_classes is a non-zero integer, net contains the
pre-softmax activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: If the target output_stride is not valid.
"""
with tf.variable_scope(scope, 'resnet_v2', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
with slim.arg_scope([slim.conv2d, bottleneck,
resnet_utils.stack_blocks_dense],
outputs_collections=end_points_collection):
with slim.arg_scope([slim.batch_norm], is_training=is_training):
net = inputs
if include_root_block:
if output_stride is not None:
if output_stride % 4 != 0:
raise ValueError('The output_stride needs to be a multiple of 4.')
output_stride /= 4
# We do not include batch normalization or activation functions in
# conv1 because the first ResNet unit will perform these. Cf.
# Appendix of [2].
with slim.arg_scope([slim.conv2d],
activation_fn=None, normalizer_fn=None):
net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')
net = resnet_utils.stack_blocks_dense(net, blocks, output_stride)
# This is needed because the pre-activation variant does not have batch
# normalization or activation functions in the residual unit output. See
# Appendix of [2].
net = slim.batch_norm(net, activation_fn=tf.nn.relu, scope='postnorm')
# Convert end_points_collection into a dictionary of end_points.
end_points = slim.utils.convert_collection_to_dict(
end_points_collection)
if global_pool:
# Global average pooling.
net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)
end_points['global_pool'] = net
if num_classes:
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='logits')
end_points[sc.name + '/logits'] = net
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='SpatialSqueeze')
end_points[sc.name + '/spatial_squeeze'] = net
end_points['predictions'] = slim.softmax(net, scope='predictions')
return net, end_points
resnet_v2.default_image_size = 224
def resnet_v2_block(scope, base_depth, num_units, stride):
"""Helper function for creating a resnet_v2 bottleneck block.
Args:
scope: The scope of the block.
base_depth: The depth of the bottleneck layer for each unit.
num_units: The number of units in the block.
stride: The stride of the block, implemented as a stride in the last unit.
All other units have stride=1.
Returns:
A resnet_v2 bottleneck block.
"""
return resnet_utils.Block(scope, bottleneck, [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': 1
}] * (num_units - 1) + [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': stride
}])
resnet_v2.default_image_size = 224
The provided code snippet includes necessary dependencies for implementing the `resnet_v2_152` function. Write a Python function `def resnet_v2_152(inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None, spatial_squeeze=True, reuse=None, scope='resnet_v2_152')` to solve the following problem:
ResNet-152 model of [1]. See resnet_v2() for arg and return description.
Here is the function:
def resnet_v2_152(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope='resnet_v2_152'):
"""ResNet-152 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=8, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=36, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(inputs, blocks, num_classes, is_training=is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
reuse=reuse, scope=scope) | ResNet-152 model of [1]. See resnet_v2() for arg and return description. |
23,516 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import resnet_utils
def resnet_v2(inputs,
blocks,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
spatial_squeeze=True,
reuse=None,
scope=None):
"""Generator for v2 (preactivation) ResNet models.
This function generates a family of ResNet v2 models. See the resnet_v2_*()
methods for specific model instantiations, obtained by selecting different
block instantiations that produce ResNets of various depths.
Training for image classification on Imagenet is usually done with [224, 224]
inputs, resulting in [7, 7] feature maps at the output of the last ResNet
block for the ResNets defined in [1] that have nominal stride equal to 32.
However, for dense prediction tasks we advise that one uses inputs with
spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In
this case the feature maps at the ResNet output will have spatial shape
[(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]
and corners exactly aligned with the input image corners, which greatly
facilitates alignment of the features to the image. Using as input [225, 225]
images results in [8, 8] feature maps at the output of the last ResNet block.
For dense prediction tasks, the ResNet needs to run in fully-convolutional
(FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all
have nominal stride equal to 32 and a good choice in FCN mode is to use
output_stride=16 in order to increase the density of the computed features at
small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
blocks: A list of length equal to the number of ResNet blocks. Each element
is a resnet_utils.Block object describing the units in the block.
num_classes: Number of predicted classes for classification tasks.
If 0 or None, we return the features before the logit layer.
is_training: whether batch_norm layers are in training mode.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
include_root_block: If True, include the initial convolution followed by
max-pooling, if False excludes it. If excluded, `inputs` should be the
results of an activation-less convolution.
spatial_squeeze: if True, logits is of shape [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
To use this parameter, the input images must be smaller than 300x300
pixels, in which case the output logit layer does not contain spatial
information and can be removed.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is 0 or None,
then net is the output of the last ResNet block, potentially after global
average pooling. If num_classes is a non-zero integer, net contains the
pre-softmax activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: If the target output_stride is not valid.
"""
with tf.variable_scope(scope, 'resnet_v2', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
with slim.arg_scope([slim.conv2d, bottleneck,
resnet_utils.stack_blocks_dense],
outputs_collections=end_points_collection):
with slim.arg_scope([slim.batch_norm], is_training=is_training):
net = inputs
if include_root_block:
if output_stride is not None:
if output_stride % 4 != 0:
raise ValueError('The output_stride needs to be a multiple of 4.')
output_stride /= 4
# We do not include batch normalization or activation functions in
# conv1 because the first ResNet unit will perform these. Cf.
# Appendix of [2].
with slim.arg_scope([slim.conv2d],
activation_fn=None, normalizer_fn=None):
net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')
net = resnet_utils.stack_blocks_dense(net, blocks, output_stride)
# This is needed because the pre-activation variant does not have batch
# normalization or activation functions in the residual unit output. See
# Appendix of [2].
net = slim.batch_norm(net, activation_fn=tf.nn.relu, scope='postnorm')
# Convert end_points_collection into a dictionary of end_points.
end_points = slim.utils.convert_collection_to_dict(
end_points_collection)
if global_pool:
# Global average pooling.
net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)
end_points['global_pool'] = net
if num_classes:
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='logits')
end_points[sc.name + '/logits'] = net
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='SpatialSqueeze')
end_points[sc.name + '/spatial_squeeze'] = net
end_points['predictions'] = slim.softmax(net, scope='predictions')
return net, end_points
resnet_v2.default_image_size = 224
def resnet_v2_block(scope, base_depth, num_units, stride):
"""Helper function for creating a resnet_v2 bottleneck block.
Args:
scope: The scope of the block.
base_depth: The depth of the bottleneck layer for each unit.
num_units: The number of units in the block.
stride: The stride of the block, implemented as a stride in the last unit.
All other units have stride=1.
Returns:
A resnet_v2 bottleneck block.
"""
return resnet_utils.Block(scope, bottleneck, [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': 1
}] * (num_units - 1) + [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': stride
}])
resnet_v2.default_image_size = 224
The provided code snippet includes necessary dependencies for implementing the `resnet_v2_200` function. Write a Python function `def resnet_v2_200(inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None, spatial_squeeze=True, reuse=None, scope='resnet_v2_200')` to solve the following problem:
ResNet-200 model of [2]. See resnet_v2() for arg and return description.
Here is the function:
def resnet_v2_200(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope='resnet_v2_200'):
"""ResNet-200 model of [2]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=24, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=36, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(inputs, blocks, num_classes, is_training=is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
reuse=reuse, scope=scope) | ResNet-200 model of [2]. See resnet_v2() for arg and return description. |
23,517 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
slim = tf.contrib.slim
The provided code snippet includes necessary dependencies for implementing the `vgg_arg_scope` function. Write a Python function `def vgg_arg_scope(weight_decay=0.0005)` to solve the following problem:
Defines the VGG arg scope. Args: weight_decay: The l2 regularization coefficient. Returns: An arg_scope.
Here is the function:
def vgg_arg_scope(weight_decay=0.0005):
"""Defines the VGG arg scope.
Args:
weight_decay: The l2 regularization coefficient.
Returns:
An arg_scope.
"""
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(weight_decay),
biases_initializer=tf.zeros_initializer()):
with slim.arg_scope([slim.conv2d], padding='SAME') as arg_sc:
return arg_sc | Defines the VGG arg scope. Args: weight_decay: The l2 regularization coefficient. Returns: An arg_scope. |
23,518 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
slim = tf.contrib.slim
The provided code snippet includes necessary dependencies for implementing the `vgg_a` function. Write a Python function `def vgg_a(inputs, num_classes=1000, is_training=True, dropout_keep_prob=0.5, spatial_squeeze=True, scope='vgg_a', fc_conv_padding='VALID', global_pool=False)` to solve the following problem:
Oxford Net VGG 11-Layers version A Example. Note: All the fully_connected layers have been transformed to conv2d layers. To use in classification mode, resize input to 224x224. Args: inputs: a tensor of size [batch_size, height, width, channels]. num_classes: number of predicted classes. If 0 or None, the logits layer is omitted and the input features to the logits layer are returned instead. is_training: whether or not the model is being trained. dropout_keep_prob: the probability that activations are kept in the dropout layers during training. spatial_squeeze: whether or not should squeeze the spatial dimensions of the outputs. Useful to remove unnecessary dimensions for classification. scope: Optional scope for the variables. fc_conv_padding: the type of padding to use for the fully connected layer that is implemented as a convolutional layer. Use 'SAME' padding if you are applying the network in a fully convolutional manner and want to get a prediction map downsampled by a factor of 32 as an output. Otherwise, the output prediction map will be (input / 32) - 6 in case of 'VALID' padding. global_pool: Optional boolean flag. If True, the input to the classification layer is avgpooled to size 1x1, for any input size. (This is not part of the original VGG architecture.) Returns: net: the output of the logits layer (if num_classes is a non-zero integer), or the input to the logits layer (if num_classes is 0 or None). end_points: a dict of tensors with intermediate activations.
Here is the function:
def vgg_a(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='vgg_a',
fc_conv_padding='VALID',
global_pool=False):
"""Oxford Net VGG 11-Layers version A Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer is
omitted and the input features to the logits layer are returned instead.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
fc_conv_padding: the type of padding to use for the fully connected layer
that is implemented as a convolutional layer. Use 'SAME' padding if you
are applying the network in a fully convolutional manner and want to
get a prediction map downsampled by a factor of 32 as an output.
Otherwise, the output prediction map will be (input / 32) - 6 in case of
'VALID' padding.
global_pool: Optional boolean flag. If True, the input to the classification
layer is avgpooled to size 1x1, for any input size. (This is not part
of the original VGG architecture.)
Returns:
net: the output of the logits layer (if num_classes is a non-zero integer),
or the input to the logits layer (if num_classes is 0 or None).
end_points: a dict of tensors with intermediate activations.
"""
with tf.variable_scope(scope, 'vgg_a', [inputs]) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.max_pool2d],
outputs_collections=end_points_collection):
net = slim.repeat(inputs, 1, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 1, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 2, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 2, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 2, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
# Use conv2d instead of fully_connected layers.
net = slim.conv2d(net, 4096, [7, 7], padding=fc_conv_padding, scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout6')
net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
# Convert end_points_collection into a end_point dict.
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if global_pool:
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool')
end_points['global_pool'] = net
if num_classes:
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout7')
net = slim.conv2d(net, num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='fc8')
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points | Oxford Net VGG 11-Layers version A Example. Note: All the fully_connected layers have been transformed to conv2d layers. To use in classification mode, resize input to 224x224. Args: inputs: a tensor of size [batch_size, height, width, channels]. num_classes: number of predicted classes. If 0 or None, the logits layer is omitted and the input features to the logits layer are returned instead. is_training: whether or not the model is being trained. dropout_keep_prob: the probability that activations are kept in the dropout layers during training. spatial_squeeze: whether or not should squeeze the spatial dimensions of the outputs. Useful to remove unnecessary dimensions for classification. scope: Optional scope for the variables. fc_conv_padding: the type of padding to use for the fully connected layer that is implemented as a convolutional layer. Use 'SAME' padding if you are applying the network in a fully convolutional manner and want to get a prediction map downsampled by a factor of 32 as an output. Otherwise, the output prediction map will be (input / 32) - 6 in case of 'VALID' padding. global_pool: Optional boolean flag. If True, the input to the classification layer is avgpooled to size 1x1, for any input size. (This is not part of the original VGG architecture.) Returns: net: the output of the logits layer (if num_classes is a non-zero integer), or the input to the logits layer (if num_classes is 0 or None). end_points: a dict of tensors with intermediate activations. |
23,519 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
slim = tf.contrib.slim
The provided code snippet includes necessary dependencies for implementing the `vgg_16` function. Write a Python function `def vgg_16(inputs, num_classes=1000, is_training=True, dropout_keep_prob=0.5, spatial_squeeze=True, scope='vgg_16', fc_conv_padding='VALID', global_pool=False)` to solve the following problem:
Oxford Net VGG 16-Layers version D Example. Note: All the fully_connected layers have been transformed to conv2d layers. To use in classification mode, resize input to 224x224. Args: inputs: a tensor of size [batch_size, height, width, channels]. num_classes: number of predicted classes. If 0 or None, the logits layer is omitted and the input features to the logits layer are returned instead. is_training: whether or not the model is being trained. dropout_keep_prob: the probability that activations are kept in the dropout layers during training. spatial_squeeze: whether or not should squeeze the spatial dimensions of the outputs. Useful to remove unnecessary dimensions for classification. scope: Optional scope for the variables. fc_conv_padding: the type of padding to use for the fully connected layer that is implemented as a convolutional layer. Use 'SAME' padding if you are applying the network in a fully convolutional manner and want to get a prediction map downsampled by a factor of 32 as an output. Otherwise, the output prediction map will be (input / 32) - 6 in case of 'VALID' padding. global_pool: Optional boolean flag. If True, the input to the classification layer is avgpooled to size 1x1, for any input size. (This is not part of the original VGG architecture.) Returns: net: the output of the logits layer (if num_classes is a non-zero integer), or the input to the logits layer (if num_classes is 0 or None). end_points: a dict of tensors with intermediate activations.
Here is the function:
def vgg_16(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='vgg_16',
fc_conv_padding='VALID',
global_pool=False):
"""Oxford Net VGG 16-Layers version D Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer is
omitted and the input features to the logits layer are returned instead.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
fc_conv_padding: the type of padding to use for the fully connected layer
that is implemented as a convolutional layer. Use 'SAME' padding if you
are applying the network in a fully convolutional manner and want to
get a prediction map downsampled by a factor of 32 as an output.
Otherwise, the output prediction map will be (input / 32) - 6 in case of
'VALID' padding.
global_pool: Optional boolean flag. If True, the input to the classification
layer is avgpooled to size 1x1, for any input size. (This is not part
of the original VGG architecture.)
Returns:
net: the output of the logits layer (if num_classes is a non-zero integer),
or the input to the logits layer (if num_classes is 0 or None).
end_points: a dict of tensors with intermediate activations.
"""
with tf.variable_scope(scope, 'vgg_16', [inputs]) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
outputs_collections=end_points_collection):
net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
# Use conv2d instead of fully_connected layers.
net = slim.conv2d(net, 4096, [7, 7], padding=fc_conv_padding, scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout6')
net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
# Convert end_points_collection into a end_point dict.
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if global_pool:
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool')
end_points['global_pool'] = net
if num_classes:
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout7')
net = slim.conv2d(net, num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='fc8')
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points | Oxford Net VGG 16-Layers version D Example. Note: All the fully_connected layers have been transformed to conv2d layers. To use in classification mode, resize input to 224x224. Args: inputs: a tensor of size [batch_size, height, width, channels]. num_classes: number of predicted classes. If 0 or None, the logits layer is omitted and the input features to the logits layer are returned instead. is_training: whether or not the model is being trained. dropout_keep_prob: the probability that activations are kept in the dropout layers during training. spatial_squeeze: whether or not should squeeze the spatial dimensions of the outputs. Useful to remove unnecessary dimensions for classification. scope: Optional scope for the variables. fc_conv_padding: the type of padding to use for the fully connected layer that is implemented as a convolutional layer. Use 'SAME' padding if you are applying the network in a fully convolutional manner and want to get a prediction map downsampled by a factor of 32 as an output. Otherwise, the output prediction map will be (input / 32) - 6 in case of 'VALID' padding. global_pool: Optional boolean flag. If True, the input to the classification layer is avgpooled to size 1x1, for any input size. (This is not part of the original VGG architecture.) Returns: net: the output of the logits layer (if num_classes is a non-zero integer), or the input to the logits layer (if num_classes is 0 or None). end_points: a dict of tensors with intermediate activations. |
23,520 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
slim = tf.contrib.slim
The provided code snippet includes necessary dependencies for implementing the `vgg_19` function. Write a Python function `def vgg_19(inputs, num_classes=1000, is_training=True, dropout_keep_prob=0.5, spatial_squeeze=True, scope='vgg_19', fc_conv_padding='VALID', global_pool=False)` to solve the following problem:
Oxford Net VGG 19-Layers version E Example. Note: All the fully_connected layers have been transformed to conv2d layers. To use in classification mode, resize input to 224x224. Args: inputs: a tensor of size [batch_size, height, width, channels]. num_classes: number of predicted classes. If 0 or None, the logits layer is omitted and the input features to the logits layer are returned instead. is_training: whether or not the model is being trained. dropout_keep_prob: the probability that activations are kept in the dropout layers during training. spatial_squeeze: whether or not should squeeze the spatial dimensions of the outputs. Useful to remove unnecessary dimensions for classification. scope: Optional scope for the variables. fc_conv_padding: the type of padding to use for the fully connected layer that is implemented as a convolutional layer. Use 'SAME' padding if you are applying the network in a fully convolutional manner and want to get a prediction map downsampled by a factor of 32 as an output. Otherwise, the output prediction map will be (input / 32) - 6 in case of 'VALID' padding. global_pool: Optional boolean flag. If True, the input to the classification layer is avgpooled to size 1x1, for any input size. (This is not part of the original VGG architecture.) Returns: net: the output of the logits layer (if num_classes is a non-zero integer), or the non-dropped-out input to the logits layer (if num_classes is 0 or None). end_points: a dict of tensors with intermediate activations.
Here is the function:
def vgg_19(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='vgg_19',
fc_conv_padding='VALID',
global_pool=False):
"""Oxford Net VGG 19-Layers version E Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer is
omitted and the input features to the logits layer are returned instead.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
fc_conv_padding: the type of padding to use for the fully connected layer
that is implemented as a convolutional layer. Use 'SAME' padding if you
are applying the network in a fully convolutional manner and want to
get a prediction map downsampled by a factor of 32 as an output.
Otherwise, the output prediction map will be (input / 32) - 6 in case of
'VALID' padding.
global_pool: Optional boolean flag. If True, the input to the classification
layer is avgpooled to size 1x1, for any input size. (This is not part
of the original VGG architecture.)
Returns:
net: the output of the logits layer (if num_classes is a non-zero integer),
or the non-dropped-out input to the logits layer (if num_classes is 0 or
None).
end_points: a dict of tensors with intermediate activations.
"""
with tf.variable_scope(scope, 'vgg_19', [inputs]) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
outputs_collections=end_points_collection):
net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 4, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 4, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 4, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
# Use conv2d instead of fully_connected layers.
net = slim.conv2d(net, 4096, [7, 7], padding=fc_conv_padding, scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout6')
net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
# Convert end_points_collection into a end_point dict.
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if global_pool:
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool')
end_points['global_pool'] = net
if num_classes:
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout7')
net = slim.conv2d(net, num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='fc8')
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points | Oxford Net VGG 19-Layers version E Example. Note: All the fully_connected layers have been transformed to conv2d layers. To use in classification mode, resize input to 224x224. Args: inputs: a tensor of size [batch_size, height, width, channels]. num_classes: number of predicted classes. If 0 or None, the logits layer is omitted and the input features to the logits layer are returned instead. is_training: whether or not the model is being trained. dropout_keep_prob: the probability that activations are kept in the dropout layers during training. spatial_squeeze: whether or not should squeeze the spatial dimensions of the outputs. Useful to remove unnecessary dimensions for classification. scope: Optional scope for the variables. fc_conv_padding: the type of padding to use for the fully connected layer that is implemented as a convolutional layer. Use 'SAME' padding if you are applying the network in a fully convolutional manner and want to get a prediction map downsampled by a factor of 32 as an output. Otherwise, the output prediction map will be (input / 32) - 6 in case of 'VALID' padding. global_pool: Optional boolean flag. If True, the input to the classification layer is avgpooled to size 1x1, for any input size. (This is not part of the original VGG architecture.) Returns: net: the output of the logits layer (if num_classes is a non-zero integer), or the non-dropped-out input to the logits layer (if num_classes is 0 or None). end_points: a dict of tensors with intermediate activations. |
23,521 | from tf1_nndct.optimization.pruning import IterativePruningRunner
import tensorflow as tf
import numpy as np
from nets.inception_v3 import inception_v3
def eval_fn(frozen_graph_def: tf.compat.v1.GraphDef) -> float:
with tf.compat.v1.Session().as_default() as sess:
return 0.5 | null |
23,522 | from tf1_nndct.optimization.pruning import IterativePruningRunner
import tensorflow as tf
import numpy as np
from nets.mobilenet_v1 import mobilenet_v1
def eval_fn(frozen_graph_def: tf.compat.v1.GraphDef) -> float:
with tf.compat.v1.Session().as_default() as sess:
return 0.5 | null |
23,523 | import argparse
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torch.optim.lr_scheduler import CosineAnnealingLR
from torchvision.models.mobilenet import mobilenet_v2
from pytorch_nndct import OFAPruner
IMAGENET_TRAINSET_SIZE = 1281167
best_acc1 = 0
def sgd_optimizer(model, lr, momentum, weight_decay):
params = []
for key, value in model.named_parameters():
if not value.requires_grad:
continue
apply_weight_decay = weight_decay
apply_lr = lr
if 'bias' in key or 'bn' in key:
apply_weight_decay = 0
if 'bias' in key:
apply_lr = 2 * lr
params += [{
'params': [value],
'lr': apply_lr,
'weight_decay': apply_weight_decay
}]
optimizer = torch.optim.SGD(params, lr, momentum=momentum)
return optimizer
def train(train_loader, model, ofa_pruner, criterion, soft_criterion, optimizer,
epoch, args, lr_scheduler):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader), [
batch_time,
data_time,
losses,
top1,
top5,
],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(args.gpu, non_blocking=True)
# total subnets to be sampled
optimizer.zero_grad()
args.teacher_model.train()
with torch.no_grad():
soft_logits = args.teacher_model(images).detach()
for arch_id in range(4):
if arch_id == 0:
model, _ = ofa_pruner.sample_subnet(model, 'max')
elif arch_id == 1:
model, _ = ofa_pruner.sample_subnet(model, 'min')
else:
model, _ = ofa_pruner.sample_subnet(model, 'random')
# calcualting loss
output = model(images)
if soft_criterion:
loss = soft_criterion(output, soft_logits) + criterion(output, target)
else:
loss = criterion(output, target)
loss.backward()
optimizer.step()
lr_scheduler.step()
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
if i % 1000 == 0:
print('cur lr: ', lr_scheduler.get_lr()[0])
def validate_subnet(train_loader, val_loader, model, ofa_pruner, criterion,
args, bn_calibration):
evaluated_subnet = {
'ofa_min_subnet': {},
'ofa_max_subnet': {},
}
for net_id in evaluated_subnet:
if net_id == 'ofa_min_subnet':
dynamic_subnet, dynamic_subnet_setting = ofa_pruner.sample_subnet(
model, 'min')
elif net_id == 'ofa_max_subnet':
dynamic_subnet, dynamic_subnet_setting = ofa_pruner.sample_subnet(
model, 'max')
else:
dynamic_subnet_setting = evaluated_subnet[net_id]
static_subnet, _, macs, params = ofa_pruner.get_static_subnet(
model, dynamic_subnet_setting)
if len(evaluated_subnet[net_id]) == 0:
static_subnet, _, macs, params = ofa_pruner.get_static_subnet(
dynamic_subnet, dynamic_subnet_setting)
static_subnet = static_subnet.cuda()
if bn_calibration:
with torch.no_grad():
static_subnet.eval()
ofa_pruner.reset_bn_running_stats_for_calibration(static_subnet)
for batch_idx, (images, _) in enumerate(train_loader):
if batch_idx >= 16:
break
images = images.cuda(non_blocking=True)
static_subnet(images) #forward only
acc1, acc5 = validate(val_loader, static_subnet, criterion, args)
summary = {
'net_id': net_id,
'mode': 'evaluate',
'acc1': acc1,
'acc5': acc5,
'macs': macs,
'params': params
}
print(summary)
class cross_entropy_loss_with_soft_target(torch.nn.modules.loss._Loss):
def forward(self, output, target):
target = torch.nn.functional.softmax(target, dim=1)
logsoftmax = nn.LogSoftmax()
return torch.mean(torch.sum(-target * logsoftmax(output), 1))
def save_checkpoint(state, filename='checkpoint.pth.tar'):
torch.save(state, filename)
shutil.copyfile(filename, 'model_best.pth.tar')
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(
backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank)
if not torch.cuda.is_available():
print('using CPU, this will be slow')
elif args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
args.teacher_model = mobilenet_v2(pretrained=True)
args.teacher_model.cuda(args.gpu)
inputs = torch.randn([1, 3, 224, 224], dtype=torch.float32).cuda()
ofa_pruner = OFAPruner(args.teacher_model, inputs)
ofa_model = ofa_pruner.ofa_model(args.expand_ratio,
args.channel_divisible, args.excludes)
model = ofa_model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.gpu])
# define loss function (criterion and kd loss) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
soft_criterion = cross_entropy_loss_with_soft_target().cuda(args.gpu)
optimizer = sgd_optimizer(model, args.lr, args.momentum, args.weight_decay)
lr_scheduler = CosineAnnealingLR(
optimizer=optimizer,
T_max=args.epochs * IMAGENET_TRAINSET_SIZE // args.batch_size //
ngpus_per_node)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['scheduler'])
print("=> loaded checkpoint '{}' (epoch {})".format(
args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=(train_sampler is None),
num_workers=args.workers,
pin_memory=True,
sampler=train_sampler)
val_dataset = datasets.ImageFolder(
valdir,
transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True)
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
if epoch == args.start_epoch:
validate_subnet(train_loader, val_loader, model, ofa_pruner, criterion,
args, False)
# train for one epoch
train(train_loader, model, ofa_pruner, criterion, soft_criterion, optimizer,
epoch, args, lr_scheduler)
# evaluate on validation set
validate_subnet(train_loader, val_loader, model, ofa_pruner, criterion,
args, True)
if not args.multiprocessing_distributed or (
args.multiprocessing_distributed and args.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer': optimizer.state_dict(),
'scheduler': lr_scheduler.state_dict(),
}) | null |
23,524 | import argparse
import os
import torch
import torchvision.datasets as datasets
from torchvision.models.mobilenet import mobilenet_v2
import torchvision.transforms as transforms
from pytorch_nndct import OFAPruner
def get_gpus(device):
return [int(i) for i in device.split(',')] | null |
23,525 | import argparse
import os
import torch
import torchvision.datasets as datasets
from torchvision.models.mobilenet import mobilenet_v2
import torchvision.transforms as transforms
from pytorch_nndct import OFAPruner
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions
for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].flatten().float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def eval_fn(model, dataloader_test):
top1 = AverageMeter('Acc@1', ':6.2f')
model.eval()
with torch.no_grad():
for i, (images, targets) in enumerate(dataloader_test):
images = images.cuda()
targets = targets.cuda()
outputs = model(images)
acc1, _ = accuracy(outputs, targets, topk=(1, 5))
top1.update(acc1[0], images.size(0))
return float(top1.avg) | null |
23,526 | import argparse
import os
import torch
import torchvision.datasets as datasets
from torchvision.models.mobilenet import mobilenet_v2
import torchvision.transforms as transforms
from pytorch_nndct import OFAPruner
def calibration_fn(model, train_loader, number_forward=16):
model.eval()
for n, m in model.named_modules():
if isinstance(m, torch.nn.BatchNorm2d):
m.training = True
m.momentum = None
m.reset_running_stats()
print("Calibration BN start...")
with torch.no_grad():
for index, (images, _) in enumerate(train_loader):
images = images.cuda()
model(images)
if index > number_forward:
break
print("Calibration BN end...") | null |
23,527 | import argparse
import os
import time
import torch
import torchvision.datasets as datasets
from torchvision.models.resnet import resnet18
import torchvision.transforms as transforms
from pytorch_nndct import get_pruning_runner
def get_gpus(device):
return [int(i) for i in device.split(',')] | null |
23,528 | import argparse
import os
import time
import torch
import torchvision.datasets as datasets
from torchvision.models.resnet import resnet18
import torchvision.transforms as transforms
from pytorch_nndct import get_pruning_runner
The provided code snippet includes necessary dependencies for implementing the `adjust_learning_rate` function. Write a Python function `def adjust_learning_rate(optimizer, epoch, lr)` to solve the following problem:
Sets the learning rate to the initial LR decayed by every 2 epochs
Here is the function:
def adjust_learning_rate(optimizer, epoch, lr):
"""Sets the learning rate to the initial LR decayed by every 2 epochs"""
lr = lr * (0.1**(epoch // 2))
for param_group in optimizer.param_groups:
param_group['lr'] = lr | Sets the learning rate to the initial LR decayed by every 2 epochs |
23,529 | import argparse
import os
import time
import torch
import torchvision.datasets as datasets
from torchvision.models.resnet import resnet18
import torchvision.transforms as transforms
from pytorch_nndct import get_pruning_runner
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions
for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].flatten().float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def eval_fn(model, dataloader_test):
top1 = AverageMeter('Acc@1', ':6.2f')
model.eval()
with torch.no_grad():
for i, (images, targets) in enumerate(dataloader_test):
images = images.cuda()
targets = targets.cuda()
outputs = model(images)
acc1, _ = accuracy(outputs, targets, topk=(1, 5))
top1.update(acc1[0], images.size(0))
return top1.avg | null |
23,530 | import argparse
import os
import time
import torch
import torchvision.datasets as datasets
from torchvision.models.resnet import resnet18
import torchvision.transforms as transforms
from pytorch_nndct import get_pruning_runner
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader), [batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
model = model.cuda()
images = images.cuda()
target = target.cuda()
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % 10 == 0:
progress.display(i)
def calibration_fn(model, train_loader, number_forward=100):
model.train()
print("Adaptive BN atart...")
with torch.no_grad():
for index, (images, target) in enumerate(train_loader):
images = images.cuda()
model(images)
if index > number_forward:
break
print("Adaptive BN end...") | null |
23,531 | import argparse
import os
import time
import torch
import torchvision.datasets as datasets
from torchvision.models.resnet import resnet18
import torchvision.transforms as transforms
from pytorch_nndct import get_pruning_runner
class AverageMeter(object):
def __init__(self, name, fmt=':f'):
def reset(self):
def update(self, val, n=1):
def __str__(self):
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
def display(self, batch):
def _get_batch_fmtstr(self, num_batches):
def accuracy(output, target, topk=(1,)):
def evaluate(val_loader, model, criterion):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader), [batch_time, losses, top1, top5], prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
model = model.cuda()
images = images.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % 50 == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'.format(
top1=top1, top5=top5))
return top1.avg, top5.avg | null |
23,532 | import argparse
import os
import time
import torch
import torch.nn as nn
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from pytorch_nndct import get_pruning_runner
The provided code snippet includes necessary dependencies for implementing the `adjust_learning_rate` function. Write a Python function `def adjust_learning_rate(optimizer, epoch, lr)` to solve the following problem:
Sets the learning rate to the initial LR decayed by every 2 epochs
Here is the function:
def adjust_learning_rate(optimizer, epoch, lr):
"""Sets the learning rate to the initial LR decayed by every 2 epochs"""
lr = lr * (0.1**(epoch // 2))
for param_group in optimizer.param_groups:
param_group['lr'] = lr | Sets the learning rate to the initial LR decayed by every 2 epochs |
23,533 | import argparse
import os
import time
import torch
import torch.nn as nn
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from pytorch_nndct import get_pruning_runner
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions
for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].flatten().float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def eval_fn(model, dataloader):
top1 = AverageMeter('Acc@1', ':6.2f')
model.eval()
with torch.no_grad():
for i, (images, targets) in enumerate(dataloader):
images = images.cuda()
targets = targets.cuda()
outputs = model(images)
acc1, _ = accuracy(outputs, targets, topk=(1, 5))
top1.update(acc1[0], images.size(0))
return top1.avg | null |
23,534 | import argparse
import os
import time
import torch
import torch.nn as nn
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from pytorch_nndct import get_pruning_runner
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader), [batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
model = model.cuda()
images = images.cuda()
target = target.cuda()
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % 10 == 0:
progress.display(i)
def calibration_fn(model, dataloader, number_forward=100):
model.train()
print("Adaptive BN atart...")
with torch.no_grad():
for index, (images, target) in enumerate(dataloader):
images = images.cuda()
model(images)
if index > number_forward:
break
print("Adaptive BN end...") | null |
23,535 | import argparse
import os
import time
import torch
import torch.nn as nn
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from pytorch_nndct import get_pruning_runner
class AverageMeter(object):
def __init__(self, name, fmt=':f'):
def reset(self):
def update(self, val, n=1):
def __str__(self):
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
def display(self, batch):
def _get_batch_fmtstr(self, num_batches):
def accuracy(output, target, topk=(1,)):
def evaluate(dataloader, model, criterion):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader), [batch_time, losses, top1, top5], prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(dataloader):
model = model.cuda()
images = images.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % 50 == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'.format(
top1=top1, top5=top5))
return top1.avg, top5.avg | null |
23,536 | import argparse
import os
import time
import torch
import torch.nn as nn
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from pytorch_nndct import get_pruning_runner
def load_weights(model, model_path):
checkpoint = torch.load(model_path)
model.load_state_dict(checkpoint)
return model | null |
23,537 | import argparse
import os
import time
import torch
import torch.nn as nn
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from pytorch_nndct import get_pruning_runner
args, _ = parser.parse_known_args()
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader), [batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
model = model.cuda()
images = images.cuda()
target = target.cuda()
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % 10 == 0:
progress.display(i)
def get_dataloader():
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
train_dataset = datasets.CIFAR10(root=args.data_dir, train=True, download=True, transform=transform)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
val_dataset = datasets.CIFAR10(root=args.data_dir, train=False, download=True, transform=transform)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)
ana_dataset = torch.utils.data.Subset(val_dataset,
list(range(args.ana_subset_len)))
ana_loader = torch.utils.data.DataLoader(
ana_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.num_workers,
pin_memory=True)
return train_loader, val_loader, ana_loader | null |
23,538 | import os
import shutil
import subprocess
import sys
import setuptools.command.develop
import setuptools.command.install
import torch
from setuptools import find_packages, setup
from torch.utils.cpp_extension import BuildExtension, CppExtension
from distutils import core
from distutils.core import Distribution
from distutils.errors import DistutilsArgError
INSTALL = False
DEVELOP = False
BDIST = False
EMIT_WARNING = False
CUDA_AVAILABLE = False
HIP_AVAILABLE = False
def check_env_args():
#global INSTALL, DEVELOP, BDIST, EMIT_WARNING, CUDA_AVAILABLE
global INSTALL, DEVELOP, BDIST, EMIT_WARNING, CUDA_AVAILABLE, HIP_AVAILABLE
#if torch.cuda.is_available() and "CUDA_HOME" in os.environ:
if "CUDA_HOME" in os.environ:
CUDA_AVAILABLE = True
#MACROS += [("WITH_CUDA", None)]
elif "ROCM_HOME" in os.environ:
HIP_AVAILABLE = True
else:
CUDA_AVAILABLE = False
# print("CUDA is not available, or CUDA_HOME not found in the environment "
# "so building without GPU support.")
print("CUDA and (HIP) is not available, or CUDA_HOME (ROCM_HOME) not found in the environment "
"so building without GPU support."
"This should be a path which contains include/cuda.h (include/hip/hip_runtime.h)")
'''
print("CUDA_HOME not found in the environment so building "
"without GPU support. To build with GPU support "
"please define the CUDA_HOME environment variable. "
"This should be a path which contains include/cuda.h")
'''
if "install" in sys.argv:
INSTALL = True
elif "develop" in sys.argv:
DEVELOP = True
elif "bdist_wheel" in sys.argv:
BDIST = True
else:
args = []
args.append(sys.argv[0])
args += ["sdist", "bdist_wheel"]
sys.argv = args
BDIST = True
EMIT_WARNING = True | null |
23,539 | import os
import shutil
import subprocess
import sys
import setuptools.command.develop
import setuptools.command.install
import torch
from setuptools import find_packages, setup
from torch.utils.cpp_extension import BuildExtension, CppExtension
from distutils import core
from distutils.core import Distribution
from distutils.errors import DistutilsArgError
INSTALL = False
def clean_install_info():
try:
for dir_name in [
'build', 'dist', "pytorch_nndct" + '.egg-info', "nndct_shared",
"pytorch_nndct/nn/kernel"
]:
if dir_name == "nndct_shared":
if INSTALL:
if os.path.exists(dir_name):
os.unlink(dir_name)
else:
if os.path.exists(dir_name):
shutil.rmtree(dir_name)
except Exception:
print("failed to do the cleaning, please clean up manully") | null |
23,540 | import os
import shutil
import subprocess
import sys
import setuptools.command.develop
import setuptools.command.install
import torch
from setuptools import find_packages, setup
from torch.utils.cpp_extension import BuildExtension, CppExtension
from distutils import core
from distutils.core import Distribution
from distutils.errors import DistutilsArgError
INSTALL = False
DEVELOP = False
BDIST = False
CUDA_AVAILABLE = False
HIP_AVAILABLE = False
class install(setuptools.command.install.install):
def run(self):
setuptools.command.install.install.run(self)
class develop(setuptools.command.develop.develop):
def run(self):
setuptools.command.develop.develop.run(self)
def build_config_setup():
# global INSTALL, DEVELOP, BDIST, CUDA_AVAILABLE
global INSTALL, DEVELOP, BDIST, CUDA_AVAILABLE, HIP_AVAILABLE
install_packages = ["nndct_shared"]
for package in install_packages:
if os.path.exists(package):
try:
os.unlink(package)
except Exception:
print("failed to do the cleaning, please clean up manully")
else:
os.symlink(f"../{package}", package)
else:
os.symlink(f"../{package}", package)
if INSTALL:
if not os.path.exists("pytorch_nndct/nn/kernel"):
os.mkdir("pytorch_nndct/nn/kernel")
with open("pytorch_nndct/nn/kernel/__init__.py", 'w') as f:
cwd = os.path.dirname(os.path.realpath(__file__))
nn_path = os.path.join(cwd, "pytorch_nndct/nn")
f.write(f"NN_PATH='{nn_path}'")
install_requires = []
if not DEVELOP:
install_requires += ["scipy<=1.10.1",
"numpy<=1.24.2",
"tqdm",
"ninja"]
extensions = []
if not BDIST:
cmdclass = {"install": install,
"develop": develop
}
else:
cmdclass = {"build_ext": BuildExtension}
extra_compile_args = {'cxx': ['-std=c++14', '-fPIC']}
cwd = os.path.dirname(os.path.realpath(__file__))
cpu_src_path = os.path.join(cwd, "../csrc/cpu")
source_files = []
for name in os.listdir(cpu_src_path):
if name.split(".")[-1] in ["cpp", "cc", "c"]:
source_files.append(os.path.join(cpu_src_path, name))
include_dir = [
os.path.join(cwd, "../include/cpu"),
os.path.join(cwd, "pytorch_nndct/nn/include")
]
Extension = CppExtension
if CUDA_AVAILABLE:
extra_compile_args['nvcc'] = ['-O2','-arch=sm_35']
cuda_src_path = os.path.join(cwd, "../csrc/cuda")
for name in os.listdir(cuda_src_path):
if name.split(".")[-1] in ["cu", "cpp", "cc", "c"]:
source_files.append(os.path.join(cuda_src_path, name))
cpp_src_path = os.path.join(cwd, "pytorch_nndct/nn/src/cuda")
for name in os.listdir(cpp_src_path):
if name.split(".")[-1] in ["cpp", "cc", "c"]:
source_files.append(os.path.join(cpp_src_path, name))
include_dir.append(os.path.join(cwd, "../include/cuda"))
from torch.utils.cpp_extension import CUDAExtension
Extension = CUDAExtension
elif HIP_AVAILABLE:
extra_compile_args['hipcc'] = ['-O2','-arch=sm_35']
hip_src_path = os.path.join(cwd, "../csrc/cuda")
for name in os.listdir(hip_src_path):
if name.split(".")[-1] in ["cu", "cpp", "cc", "c"]:
source_files.append(os.path.join(hip_src_path, name))
cpp_src_path = os.path.join(cwd, "pytorch_nndct/nn/src/cuda")
for name in os.listdir(cpp_src_path):
if name.split(".")[-1] in ["cpp", "cc", "c"]:
source_files.append(os.path.join(cpp_src_path, name))
include_dir.append(os.path.join(cwd, "../include/cuda"))
# CUDAExtension class has checks for HIP backend
from torch.utils.cpp_extension import CUDAExtension
Extension = CUDAExtension
else:
cpp_src_path = os.path.join(cwd, "pytorch_nndct/nn/src/cpu")
for name in os.listdir(cpp_src_path):
if name.split(".")[-1] in ["cpp", "cc", "c"]:
source_files.append(os.path.join(cpp_src_path, name))
kernel_ext = Extension(name='pytorch_nndct.nn._kernels',
language='c++',
sources=source_files,
include_dirs=include_dir,
extra_compile_args=extra_compile_args)
extensions.append(kernel_ext)
return extensions, cmdclass, install_requires | null |
23,541 | import os
import shutil
import subprocess
import sys
import setuptools.command.develop
import setuptools.command.install
import torch
from setuptools import find_packages, setup
from torch.utils.cpp_extension import BuildExtension, CppExtension
from distutils import core
from distutils.core import Distribution
from distutils.errors import DistutilsArgError
version = open("version.txt", "r").read().strip()
def get_version():
global version
sha = "unknown"
try:
sha = subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=cwd).decode("ascii").strip()
except Exception:
print('Failed to get git commit ID.')
if sha != "unknown":
version += "+" + sha[:7]
else:
version += "+" + "unknown"
version += "+" + f"torch{torch.__version__}"
return sha | null |
23,542 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch
def get_mask(matrix, m, n):
mat = reshape_tensor(matrix, m)
topk, indices = mat.abs().topk(k=n, dim=1, sorted=False)
mask_matrix = torch.zeros_like(mat)
mask_min = torch.min(topk, dim=-1).values
mask_min = mask_min.unsqueeze(-1).repeat(1, m)
mask = torch.where(
torch.ge(mat.abs(), mask_min) & torch.ne(mat.abs(), mask_matrix), mat,
mask_matrix)
return mask
def compute_sparse_tensor(tensor, sparsity, block_size):
if sparsity != 0:
shape = tensor.shape
t = tensor.float().contiguous()
if len(tensor.shape) == 2:
# fc
t = t.view(shape[0], shape[1])
mask = get_mask(t, block_size, int(block_size * (1 - sparsity)))
elif len(tensor.shape) == 4:
# conv
t = t.permute(2, 3, 0,
1).contiguous().view(shape[2] * shape[3] * shape[0],
shape[1])
mask = get_mask(t, block_size, int(block_size * (1 - sparsity)))
mask = mask.view(shape[2], shape[3], shape[0],
shape[1]).permute(2, 3, 0, 1).contiguous()
return mask.view(shape).type(tensor.type())
else:
return tensor | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.