code stringlengths 17 6.64M |
|---|
@layer_register(log_shape=False)
def PReLU(x, init=tf.constant_initializer(0.001), name=None):
'\n Parameterized relu as in `Delving Deep into Rectifiers: Surpassing\n Human-Level Performance on ImageNet Classification\n <http://arxiv.org/abs/1502.01852>`_.\n\n :param input: any tensor.\n :param init: initializer for the p. default to 0.001.\n '
alpha = tf.get_variable('alpha', [], initializer=init)
x = (((1 + alpha) * x) + ((1 - alpha) * tf.abs(x)))
if (name is None):
name = 'output'
return tf.mul(x, 0.5, name=name)
|
@layer_register(use_scope=False, log_shape=False)
def LeakyReLU(x, alpha, name=None):
'\n Leaky relu as in `Rectifier Nonlinearities Improve Neural Network Acoustic\n Models\n <http://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`_.\n\n :param input: any tensor.\n :param alpha: the negative slope.\n '
if (name is None):
name = 'output'
return tf.maximum(x, (alpha * x), name=name)
|
@layer_register(log_shape=False, use_scope=False)
def BNReLU(x, name=None):
x = BatchNorm('bn', x)
x = tf.nn.relu(x, name=name)
return x
|
@memoized
def _log_regularizer(name):
logger.info('Apply regularizer for {}'.format(name))
|
def regularize_cost(regex, func, name=None):
'\n Apply a regularizer on every trainable variable matching the regex.\n\n :param func: a function that takes a tensor and return a scalar.\n '
G = tf.get_default_graph()
params = G.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
costs = []
for p in params:
para_name = p.name
if re.search(regex, para_name):
costs.append(func(p))
_log_regularizer(para_name)
if (not costs):
return 0
return tf.add_n(costs, name=name)
|
@layer_register(log_shape=False, use_scope=False)
def Dropout(x, keep_prob=0.5, is_training=None):
'\n :param is_training: if None, will use the current context by default.\n '
if (is_training is None):
is_training = get_current_tower_context().is_training
keep_prob = tf.constant((keep_prob if is_training else 1.0))
return tf.nn.dropout(x, keep_prob)
|
@layer_register(use_scope=False, log_shape=False)
def ConcatWith(x, dim, tensor):
'\n A wrapper around `tf.concat` to support `LinearWrap`\n :param x: the input tensor\n :param dim: the dimension along which to concatenate\n :param tensor: a tensor or list of tensor to concatenate with x. x will be\n at the beginning\n :return: tf.concat(dim, [x] + [tensor])\n '
if (type(tensor) != list):
tensor = [tensor]
return tf.concat(dim, ([x] + tensor))
|
@layer_register()
def SoftMax(x, use_temperature=False, temperature_init=1.0):
'\n A SoftMax layer (no linear projection) with optional temperature\n :param x: a 2D tensor\n '
if use_temperature:
t = tf.get_variable('invtemp', [], initializer=tf.constant_initializer((1.0 / float(temperature_init))))
x = (x * t)
return tf.nn.softmax(x, name='output')
|
def global_import(name):
p = __import__(name, globals(), locals(), level=1)
lst = (p.__all__ if ('__all__' in dir(p)) else dir(p))
del globals()[name]
for k in lst:
globals()[k] = p.__dict__[k]
__all__.append(k)
|
@six.add_metaclass(ABCMeta)
class PredictorBase(object):
'\n Available attributes:\n session\n return_input\n '
def __call__(self, *args):
'\n if len(args) == 1, assume args[0] is a datapoint (a list)\n else, assume args is a datapoinnt\n '
if (len(args) != 1):
dp = args
else:
dp = args[0]
output = self._do_call(dp)
if self.return_input:
return (dp, output)
else:
return output
@abstractmethod
def _do_call(self, dp):
'\n :param dp: input datapoint. must have the same length as input_names\n :return: output as defined by the config\n '
|
class AsyncPredictorBase(PredictorBase):
@abstractmethod
def put_task(self, dp, callback=None):
'\n :param dp: A data point (list of component) as inputs.\n (It should be either batched or not batched depending on the predictor implementation)\n :param callback: a thread-safe callback to get called with\n either outputs or (inputs, outputs)\n :return: a Future of results\n '
@abstractmethod
def start(self):
' Start workers '
def _do_call(self, dp):
assert six.PY3, 'With Python2, sync methods not available for async predictor'
fut = self.put_task(dp)
return fut.result()
|
class OnlinePredictor(PredictorBase):
def __init__(self, sess, input_tensors, output_tensors, return_input=False):
self.session = sess
self.return_input = return_input
self.input_tensors = input_tensors
self.output_tensors = output_tensors
def _do_call(self, dp):
assert (len(dp) == len(self.input_tensors)), '{} != {}'.format(len(dp), len(self.input_tensors))
feed = dict(zip(self.input_tensors, dp))
output = self.session.run(self.output_tensors, feed_dict=feed)
return output
|
class OfflinePredictor(OnlinePredictor):
' Build a predictor from a given config, in an independent graph'
def __init__(self, config):
self.graph = tf.Graph()
with self.graph.as_default():
input_placehdrs = config.model.get_input_vars()
with TowerContext('', False):
config.model.build_graph(input_placehdrs)
input_vars = get_tensors_by_names(config.input_names)
output_vars = get_tensors_by_names(config.output_names)
sess = tf.Session(config=config.session_config)
config.session_init.init(sess)
super(OfflinePredictor, self).__init__(sess, input_vars, output_vars, config.return_input)
|
def build_multi_tower_prediction_graph(build_tower_fn, towers):
'\n :param build_tower_fn: the function to be called inside each tower, taking tower as the argument\n :param towers: a list of gpu relative id.\n '
for k in towers:
logger.info('Building graph for predictor tower {}...'.format(k))
with tf.device(('/gpu:{}'.format(k) if (k >= 0) else '/cpu:0')), TowerContext('{}{}'.format(PREDICT_TOWER, k)):
build_tower_fn(k)
tf.get_variable_scope().reuse_variables()
|
class MultiTowerOfflinePredictor(OnlinePredictor):
def __init__(self, config, towers):
self.graph = tf.Graph()
self.predictors = []
with self.graph.as_default():
fn = (lambda _: config.model.build_graph(config.model.get_input_vars()))
build_multi_tower_prediction_graph(fn, towers)
self.sess = tf.Session(config=config.session_config)
config.session_init.init(self.sess)
input_vars = get_tensors_by_names(config.input_names)
for k in towers:
output_vars = get_tensors_by_names([('{}{}/'.format(PREDICT_TOWER, k) + n) for n in config.output_names])
self.predictors.append(OnlinePredictor(self.sess, input_vars, output_vars, config.return_input))
def _do_call(self, dp):
return self.predictors[0]._do_call(dp)
def get_predictors(self, n):
return [self.predictors[(k % len(self.predictors))] for k in range(n)]
|
class DataParallelOfflinePredictor(OnlinePredictor):
def __init__(self, config, towers):
self.graph = tf.Graph()
with self.graph.as_default():
sess = tf.Session(config=config.session_config)
input_var_names = []
output_vars = []
for k in towers:
towername = (PREDICT_TOWER + str(k))
input_vars = config.model.build_placeholders(prefix=(towername + '-'))
logger.info('Building graph for predictor tower {}...'.format(k))
with tf.device(('/gpu:{}'.format(k) if (k >= 0) else '/cpu:0')), TowerContext(towername, is_training=False):
config.model.build_graph(input_vars)
tf.get_variable_scope().reuse_variables()
input_var_names.extend([k.name for k in input_vars])
output_vars.extend(get_tensors_by_names([((towername + '/') + n) for n in config.output_names]))
input_vars = get_tensors_by_names(input_var_names)
config.session_init.init(sess)
super(DataParallelOfflinePredictor, self).__init__(sess, input_vars, output_vars, config.return_input)
|
class PredictConfig(object):
def __init__(self, **kwargs):
'\n The config used by `get_predict_func`.\n\n :param session_init: a `utils.sessinit.SessionInit` instance to\n initialize variables of a session.\n :param model: a `ModelDesc` instance\n :param input_names: a list of input variable names.\n :param output_names: a list of names of the output tensors to predict, the\n variables can be any computable tensor in the graph.\n Predict specific output might not require all input variables.\n :param return_input: whether to return (input, output) pair or just output. default to False.\n '
def assert_type(v, tp):
assert isinstance(v, tp), v.__class__
self.session_config = kwargs.pop('session_config', get_default_sess_config(0.4))
self.session_init = kwargs.pop('session_init', JustCurrentSession())
assert_type(self.session_init, SessionInit)
self.model = kwargs.pop('model')
assert_type(self.model, ModelDesc)
self.input_names = kwargs.pop('input_names', None)
if (self.input_names is None):
self.input_names = kwargs.pop('input_var_names', None)
if (self.input_names is not None):
pass
if (self.input_names is None):
raw_vars = self.model.get_input_vars_desc()
self.input_names = [k.name for k in raw_vars]
self.output_names = kwargs.pop('output_names', None)
if (self.output_names is None):
self.output_names = kwargs.pop('output_var_names')
assert len(self.input_names), self.input_names
for v in self.input_names:
assert_type(v, six.string_types)
assert len(self.output_names), self.output_names
self.return_input = kwargs.pop('return_input', False)
assert (len(kwargs) == 0), 'Unknown arguments: {}'.format(str(kwargs.keys()))
|
def get_predict_func(config):
'\n Produce a offline predictor run inside a new session.\n\n :param config: a `PredictConfig` instance.\n :returns: A callable predictor that takes a list of input values, and return\n a list of output values defined in ``config.output_var_names``.\n '
return OfflinePredictor(config)
|
class MultiProcessPredictWorker(multiprocessing.Process):
' Base class for predict worker that runs offline in multiprocess'
def __init__(self, idx, config):
'\n :param idx: index of the worker. the 0th worker will print log.\n :param config: a `PredictConfig`\n '
super(MultiProcessPredictWorker, self).__init__()
self.idx = idx
self.config = config
def _init_runtime(self):
" Call _init_runtime under different CUDA_VISIBLE_DEVICES, you'll\n have workers that run on multiGPUs\n "
if (self.idx != 0):
from tensorpack.models._common import disable_layer_logging
disable_layer_logging()
self.predictor = OfflinePredictor(self.config)
import sys
if (self.idx == 0):
with self.predictor.graph.as_default():
describe_model()
|
class MultiProcessQueuePredictWorker(MultiProcessPredictWorker):
' An offline predictor worker that takes input and produces output by queue'
def __init__(self, idx, inqueue, outqueue, config):
'\n :param inqueue: input queue to get data point. elements are (task_id, dp)\n :param outqueue: output queue put result. elements are (task_id, output)\n '
super(MultiProcessQueuePredictWorker, self).__init__(idx, config)
self.inqueue = inqueue
self.outqueue = outqueue
assert isinstance(self.inqueue, multiprocessing.queues.Queue)
assert isinstance(self.outqueue, multiprocessing.queues.Queue)
def run(self):
self._init_runtime()
while True:
(tid, dp) = self.inqueue.get()
if (tid == DIE):
self.outqueue.put((DIE, None))
return
else:
self.outqueue.put((tid, self.predictor(dp)))
|
class PredictorWorkerThread(threading.Thread):
def __init__(self, queue, pred_func, id, batch_size=5):
super(PredictorWorkerThread, self).__init__()
self.queue = queue
self.func = pred_func
self.daemon = True
self.batch_size = batch_size
self.id = id
def run(self):
while True:
(batched, futures) = self.fetch_batch()
outputs = self.func(batched)
for (idx, f) in enumerate(futures):
f.set_result([k[idx] for k in outputs])
def fetch_batch(self):
' Fetch a batch of data without waiting'
(inp, f) = self.queue.get()
nr_input_var = len(inp)
(batched, futures) = ([[] for _ in range(nr_input_var)], [])
for k in range(nr_input_var):
batched[k].append(inp[k])
futures.append(f)
cnt = 1
while (cnt < self.batch_size):
try:
(inp, f) = self.queue.get_nowait()
for k in range(nr_input_var):
batched[k].append(inp[k])
futures.append(f)
except queue.Empty:
break
cnt += 1
return (batched, futures)
|
class MultiThreadAsyncPredictor(AsyncPredictorBase):
'\n An multithread online async predictor which run a list of PredictorBase.\n It would do an extra batching internally.\n '
def __init__(self, predictors, batch_size=5):
' :param predictors: a list of OnlinePredictor'
assert len(predictors)
for k in predictors:
assert (k.return_input == False)
self.input_queue = queue.Queue(maxsize=(len(predictors) * 100))
self.threads = [PredictorWorkerThread(self.input_queue, f, id, batch_size=batch_size) for (id, f) in enumerate(predictors)]
if six.PY2:
import tornado.options as options
options.parse_command_line(['--logging=debug'])
def start(self):
for t in self.threads:
t.start()
def run(self):
self.start()
def put_task(self, dp, callback=None):
'\n dp must be non-batched, i.e. single instance\n '
f = Future()
if (callback is not None):
f.add_done_callback(callback)
self.input_queue.put((dp, f))
return f
|
@six.add_metaclass(ABCMeta)
class DatasetPredictorBase(object):
def __init__(self, config, dataset):
'\n :param config: a `PredictConfig` instance.\n :param dataset: a `DataFlow` instance.\n '
assert isinstance(dataset, DataFlow)
assert isinstance(config, PredictConfig)
self.config = config
self.dataset = dataset
@abstractmethod
def get_result(self):
' A generator function, produce output for each input in dataset'
pass
def get_all_result(self):
'\n Run over the dataset and return a list of all predictions.\n '
return list(self.get_result())
|
class SimpleDatasetPredictor(DatasetPredictorBase):
'\n Run the predict_config on a given `DataFlow`.\n '
def __init__(self, config, dataset):
super(SimpleDatasetPredictor, self).__init__(config, dataset)
self.predictor = OfflinePredictor(config)
def get_result(self):
' A generator to produce prediction for each data'
self.dataset.reset_state()
try:
sz = self.dataset.size()
except NotImplementedError:
sz = 0
with get_tqdm(total=sz, disable=(sz == 0)) as pbar:
for dp in self.dataset.get_data():
res = self.predictor(dp)
(yield res)
pbar.update()
|
class MultiProcessDatasetPredictor(DatasetPredictorBase):
def __init__(self, config, dataset, nr_proc, use_gpu=True, ordered=True):
"\n Run prediction in multiprocesses, on either CPU or GPU. Mix mode not supported.\n\n :param nr_proc: number of processes to use\n :param use_gpu: use GPU or CPU.\n If GPU, then nr_proc cannot be more than what's in CUDA_VISIBLE_DEVICES\n :param ordered: produce results with the original order of the\n dataflow. a bit slower.\n "
if config.return_input:
logger.warn('Using the option `return_input` in MultiProcessDatasetPredictor might be slow')
assert (nr_proc > 1), nr_proc
super(MultiProcessDatasetPredictor, self).__init__(config, dataset)
self.nr_proc = nr_proc
self.ordered = ordered
(self.inqueue, self.inqueue_proc) = dataflow_to_process_queue(self.dataset, (nr_proc * 2), self.nr_proc)
if use_gpu:
try:
gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',')
assert (len(gpus) >= self.nr_proc), 'nr_proc={} while only {} gpus available'.format(self.nr_proc, len(gpus))
except KeyError:
gpus = list(range(self.nr_proc))
else:
gpus = (['-1'] * self.nr_proc)
self.outqueue = multiprocessing.Queue()
self.workers = [MultiProcessQueuePredictWorker(i, self.inqueue, self.outqueue, self.config) for i in range(self.nr_proc)]
self.inqueue_proc.start()
for (p, gpuid) in zip(self.workers, gpus):
if (gpuid == '-1'):
logger.info('Worker {} uses CPU'.format(p.idx))
else:
logger.info('Worker {} uses GPU {}'.format(p.idx, gpuid))
with change_gpu(gpuid):
p.start()
if ordered:
self.result_queue = OrderedResultGatherProc(self.outqueue, nr_producer=self.nr_proc)
self.result_queue.start()
ensure_proc_terminate(self.result_queue)
else:
self.result_queue = self.outqueue
ensure_proc_terminate((self.workers + [self.inqueue_proc]))
def get_result(self):
try:
sz = self.dataset.size()
except NotImplementedError:
sz = 0
with get_tqdm(total=sz, disable=(sz == 0)) as pbar:
die_cnt = 0
while True:
res = self.result_queue.get()
pbar.update()
if (res[0] != DIE):
(yield res[1])
else:
die_cnt += 1
if (die_cnt == self.nr_proc):
break
self.inqueue_proc.join()
self.inqueue_proc.terminate()
if self.ordered:
self.result_queue.join()
self.result_queue.terminate()
for p in self.workers:
p.join()
p.terminate()
|
def _global_import(name):
p = __import__(name, globals(), None, level=1)
lst = (p.__all__ if ('__all__' in dir(p)) else dir(p))
for k in lst:
globals()[k] = p.__dict__[k]
__all__.append(k)
|
@contextmanager
def argscope(layers, **param):
if (not isinstance(layers, list)):
layers = [layers]
def _check_args_exist(l):
args = inspect.getargspec(l).args
for (k, v) in six.iteritems(param):
assert (k in args), 'No argument {} in {}'.format(k, l.__name__)
for l in layers:
assert hasattr(l, 'f'), '{} is not a registered layer'.format(l.__name__)
_check_args_exist(l.f)
new_scope = copy.copy(get_arg_scope())
for l in layers:
new_scope[l.__name__].update(param)
_ArgScopeStack.append(new_scope)
(yield)
del _ArgScopeStack[(- 1)]
|
def get_arg_scope():
'\n :returns: the current argscope.\n An argscope is a dict of dict: dict[layername] = {arg: val}\n '
if (len(_ArgScopeStack) > 0):
return _ArgScopeStack[(- 1)]
else:
return defaultdict(dict)
|
def get_default_sess_config(mem_fraction=0.99):
'\n Return a better session config to use as default.\n Tensorflow default session config consume too much resources.\n\n :param mem_fraction: fraction of memory to use. default to 0.99\n :returns: a `tf.ConfigProto` object.\n '
conf = tf.ConfigProto()
conf.gpu_options.per_process_gpu_memory_fraction = mem_fraction
conf.gpu_options.allocator_type = 'BFC'
conf.gpu_options.allow_growth = True
conf.allow_soft_placement = True
return conf
|
def get_global_step_var():
' :returns: the global_step variable in the current graph. create if not existed'
try:
return tf.get_default_graph().get_tensor_by_name(GLOBAL_STEP_VAR_NAME)
except KeyError:
scope = tf.get_variable_scope()
assert (scope.name == ''), 'Creating global_step_var under a variable scope would cause problems!'
with tf.variable_scope(scope, reuse=False):
var = tf.get_variable(GLOBAL_STEP_OP_NAME, shape=[], initializer=tf.constant_initializer(dtype=tf.int32), trainable=False, dtype=tf.int32)
return var
|
def get_global_step():
' :returns: global_step value in current graph and session'
return tf.train.global_step(tf.get_default_session(), get_global_step_var())
|
def get_op_tensor_name(name):
"\n Tensor name is assumed to be ``op_name + ':0'``\n\n :param name: an op or a tensor name\n :returns: (op_name, tensor_name)\n "
if name.endswith(':0'):
return (name[:(- 2)], name)
else:
return (name, (name + ':0'))
|
def get_tensors_by_names(names):
'\n Get a list of tensors in the default graph by a list of names\n '
ret = []
G = tf.get_default_graph()
for n in names:
(opn, varn) = get_op_var_name(n)
ret.append(G.get_tensor_by_name(varn))
return ret
|
def backup_collection(keys):
ret = {}
for k in keys:
ret[k] = copy(tf.get_collection(k))
return ret
|
def restore_collection(backup):
for (k, v) in six.iteritems(backup):
del tf.get_collection_ref(k)[:]
tf.get_collection_ref(k).extend(v)
|
def clear_collection(keys):
for k in keys:
del tf.get_collection_ref(k)[:]
|
@contextmanager
def freeze_collection(keys):
backup = backup_collection(keys)
(yield)
restore_collection(backup)
|
def get_tf_version():
return int(tf.__version__.split('.')[1])
|
def apply_grad_processors(grads, gradprocs):
'\n :param grads: list of (grad, var).\n :param gradprocs: list of `GradientProcessor` instances.\n :returns: list of (grad, var) went through the processors\n '
g = []
for (grad, var) in grads:
if (grad is None):
logger.warn('No Gradient w.r.t {}'.format(var.op.name))
else:
g.append((grad, var))
for proc in gradprocs:
g = proc.process(g)
return g
|
@six.add_metaclass(ABCMeta)
class GradientProcessor(object):
def process(self, grads):
'\n Process the symbolic gradients.\n\n :param grads: list of (grad, var)\n :returns: symbolic gradients with the same type as input\n '
with tf.name_scope(type(self).__name__):
return self._process(grads)
@abstractmethod
def _process(self, grads):
pass
|
class GlobalNormClip(GradientProcessor):
def __init__(self, global_norm):
' Clip by global norm\n Note that the global norm is the sum of norm for **all** gradients\n '
self._norm = global_norm
def _process(self, grads):
g = [k[0] for k in grads]
v = [k[1] for k in grads]
(g, _) = tf.clip_by_global_norm(g, self._norm, name='clip_by_global_norm')
return list(zip(g, v))
|
class MapGradient(GradientProcessor):
'\n Apply a function on all gradient if the name matches regex.\n Keep the other gradients unchanged.\n '
def __init__(self, func, regex='.*'):
'\n :param func: takes a grad or (grad, var) pair and returns a grad. If return None, the\n gradient is discarded.\n :param regex: used to match variables. default to match all variables.\n '
args = inspect.getargspec(func).args
arg_num = (len(args) - inspect.ismethod(func))
assert (arg_num in [1, 2]), 'The function must take 1 or 2 arguments! ({})'.format(args)
if (arg_num == 1):
self.func = (lambda grad, var: func(grad))
else:
self.func = func
if (not regex.endswith('$')):
regex = (regex + '$')
self.regex = regex
def _process(self, grads):
ret = []
for (grad, var) in grads:
if re.match(self.regex, var.op.name):
grad = self.func(grad, var)
if (grad is not None):
ret.append((grad, var))
else:
ret.append((grad, var))
return ret
|
class SummaryGradient(MapGradient):
'\n Summary history and RMS for each graident variable\n '
def __init__(self):
super(SummaryGradient, self).__init__(self._mapper)
def _mapper(self, grad, var):
name = var.op.name
if (name not in _summaried_gradient):
_summaried_gradient.add(name)
tf.summary.histogram((name + '-grad'), grad)
add_moving_summary(rms(grad, name=(name + '/rms')))
return grad
|
class CheckGradient(MapGradient):
'\n Check for numeric issue.\n '
def __init__(self):
super(CheckGradient, self).__init__(self._mapper)
def _mapper(self, grad, var):
grad = tf.check_numerics(grad, ('CheckGradient-' + var.op.name))
return grad
|
class ScaleGradient(MapGradient):
'\n Scale certain gradient by a multiplier\n '
def __init__(self, multipliers, log=True):
'\n :param multipliers: list of (regex, float)\n :param log: whether to do logging or not\n '
if (not isinstance(multipliers, list)):
multipliers = [multipliers]
self.multipliers = multipliers
self._log = log
super(ScaleGradient, self).__init__(self._mapper)
def _mapper(self, grad, var):
varname = var.op.name
for (regex, val) in self.multipliers:
if (not regex.endswith('$')):
regex = (regex + '$')
if re.match(regex, varname):
if self._log:
logger.info('Apply lr multiplier {} for {}'.format(val, varname))
if (val != 0):
return (grad * val)
else:
return None
return grad
|
def describe_model():
' print a description of the current model parameters '
train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
msg = ['']
total = 0
for v in train_vars:
shape = v.get_shape()
ele = shape.num_elements()
total += ele
msg.append('{}: shape={}, dim={}'.format(v.name, shape.as_list(), ele))
size_mb = ((total * 4) / (1024.0 ** 2))
msg.append(colored('Total param={} ({:01f} MB assuming all float32)'.format(total, size_mb), 'cyan'))
logger.info((colored('Model Parameters: ', 'cyan') + '\n'.join(msg)))
|
def get_shape_str(tensors):
'\n :param tensors: a tensor or a list of tensors\n :returns: a string to describe the shape\n '
if isinstance(tensors, (list, tuple)):
for v in tensors:
assert isinstance(v, (tf.Tensor, tf.Variable)), 'Not a tensor: {}'.format(type(v))
shape_str = ','.join(map((lambda x: str(x.get_shape().as_list())), tensors))
else:
assert isinstance(tensors, (tf.Tensor, tf.Variable)), 'Not a tensor: {}'.format(type(tensors))
shape_str = str(tensors.get_shape().as_list())
return shape_str
|
@six.add_metaclass(ABCMeta)
class SessionInit(object):
' Base class for utilities to initialize a session'
def init(self, sess):
' Initialize a session\n\n :param sess: a `tf.Session`\n '
self._init(sess)
@abstractmethod
def _init(self, sess):
pass
|
class JustCurrentSession(SessionInit):
' Just use the current default session. This is a no-op placeholder'
def _init(self, sess):
pass
|
class NewSession(SessionInit):
'\n Create a new session. All variables will be initialized by their\n initializer.\n '
def _init(self, sess):
sess.run(tf.initialize_all_variables())
|
class SaverRestore(SessionInit):
'\n Restore an old model saved by `ModelSaver`.\n '
def __init__(self, model_path, prefix=None):
'\n :param model_path: a model name (model-xxxx) or a ``checkpoint`` file.\n :param prefix: add a `prefix/` for every variable in this checkpoint\n '
if (os.path.basename(model_path) == model_path):
model_path = os.path.join('.', model_path)
if (os.path.basename(model_path) == 'checkpoint'):
model_path = tf.train.latest_checkpoint(os.path.dirname(model_path))
assert (os.path.isfile(model_path) or os.path.isfile((model_path + '.index'))), model_path
self.set_path(model_path)
self.prefix = prefix
def _init(self, sess):
logger.info('Restoring checkpoint from {} ...'.format(self.path))
chkpt_vars = SaverRestore._read_checkpoint_vars(self.path)
vars_map = self._get_vars_to_restore_multimap(chkpt_vars)
for dic in SaverRestore._produce_restore_dict(vars_map):
try:
saver = tf.train.Saver(var_list=dic, name=str(id(dic)), write_version=2)
except:
saver = tf.train.Saver(var_list=dic, name=str(id(dic)))
saver.restore(sess, self.path)
def set_path(self, model_path):
self.path = model_path
@staticmethod
def _produce_restore_dict(vars_multimap):
'\n Produce {var_name: var} dict that can be used by `tf.train.Saver`, from a {var_name: [vars]} dict.\n '
while len(vars_multimap):
ret = {}
for k in list(vars_multimap.keys()):
v = vars_multimap[k]
ret[k] = v[(- 1)]
del v[(- 1)]
if (not len(v)):
del vars_multimap[k]
(yield ret)
@staticmethod
def _read_checkpoint_vars(model_path):
' return a set of strings '
reader = tf.train.NewCheckpointReader(model_path)
ckpt_vars = reader.get_variable_to_shape_map().keys()
for v in ckpt_vars:
if v.startswith(PREDICT_TOWER):
logger.error("Found {} in checkpoint. But anything from prediction tower shouldn't be saved.".format(v.name))
return set(ckpt_vars)
def _get_vars_to_restore_multimap(self, vars_available):
'\n :param vars_available: varaible names available in the checkpoint, for existence checking\n :returns: a dict of {var_name: [var, var]} to restore\n '
try:
vars_to_restore = tf.global_variables()
except AttributeError:
vars_to_restore = tf.all_variables()
var_dict = defaultdict(list)
chkpt_vars_used = set()
for v in vars_to_restore:
name = get_savename_from_varname(v.name, varname_prefix=self.prefix)
if (name in vars_available):
var_dict[name].append(v)
chkpt_vars_used.add(name)
elif name.endswith(':0'):
name = name[:(- 2)]
if (name in vars_available):
var_dict[name].append(v)
chkpt_vars_used.add(name)
elif (not is_training_name(v.op.name)):
logger.warn('Variable {} in the graph not found in checkpoint!'.format(v.op.name))
if (len(chkpt_vars_used) < len(vars_available)):
unused = (vars_available - chkpt_vars_used)
for name in unused:
if (not is_training_name(name)):
logger.warn('Variable {} in checkpoint not found in the graph!'.format(name))
return var_dict
|
class ParamRestore(SessionInit):
'\n Restore variables from a dictionary.\n '
def __init__(self, param_dict):
'\n :param param_dict: a dict of {name: value}\n '
self.prms = {get_op_var_name(n)[1]: v for (n, v) in six.iteritems(param_dict)}
def _init(self, sess):
variables = tf.get_collection(tf.GraphKeys().VARIABLES)
variable_names = set([get_savename_from_varname(k.name) for k in variables])
param_names = set(six.iterkeys(self.prms))
intersect = (variable_names & param_names)
logger.info('Params to restore: {}'.format(', '.join(map(str, intersect))))
for k in (variable_names - param_names):
if (not is_training_name(k)):
logger.warn('Variable {} in the graph not found in the dict!'.format(k))
for k in (param_names - variable_names):
logger.warn('Variable {} in the dict not found in the graph!'.format(k))
upd = SessionUpdate(sess, [v for v in variables if (get_savename_from_varname(v.name) in intersect)])
logger.info('Restoring from dict ...')
upd.update({name: value for (name, value) in six.iteritems(self.prms) if (name in intersect)})
|
class ChainInit(SessionInit):
' Init a session by a list of SessionInit instance.'
def __init__(self, sess_inits, new_session=True):
'\n :params sess_inits: list of `SessionInit` instances.\n :params new_session: add a `NewSession()` and the beginning, if not there\n '
if (new_session and (not isinstance(sess_inits[0], NewSession))):
sess_inits.insert(0, NewSession())
self.inits = sess_inits
def _init(self, sess):
for i in self.inits:
i.init(sess)
|
def get_model_loader(filename):
'\n Get a corresponding model loader by looking at the file name\n :return: either a ParamRestore or SaverRestore\n '
if filename.endswith('.npy'):
assert os.path.isfile(filename), filename
return ParamRestore(np.load(filename, encoding='latin1').item())
else:
return SaverRestore(filename)
|
def create_summary(name, v):
'\n Return a tf.Summary object with name and simple scalar value v\n '
assert isinstance(name, six.string_types), type(name)
v = float(v)
s = tf.Summary()
s.value.add(tag=name, simple_value=v)
return s
|
def add_activation_summary(x, name=None):
'\n Add summary to graph for an activation tensor x.\n If name is None, use x.name.\n '
ctx = get_current_tower_context()
if ((ctx is not None) and (not ctx.is_main_training_tower)):
return
ndim = x.get_shape().ndims
assert (ndim >= 2), 'Summary a scalar with histogram? Maybe use scalar instead. FIXME!'
if (name is None):
name = x.name
with tf.name_scope('activation-summary'):
tf.summary.histogram(name, x)
tf.summary.scalar((name + '-sparsity'), tf.nn.zero_fraction(x))
tf.summary.scalar((name + '-rms'), rms(x))
|
def add_param_summary(summary_lists):
"\n Add summary for all trainable variables matching the regex\n\n :param summary_lists: list of (regex, [list of summary type to perform]).\n Type can be 'mean', 'scalar', 'histogram', 'sparsity', 'rms'\n "
ctx = get_current_tower_context()
if ((ctx is not None) and (not ctx.is_main_training_tower)):
return
def perform(var, action):
ndim = var.get_shape().ndims
name = var.name.replace(':0', '')
if (action == 'scalar'):
assert (ndim == 0), "Scalar summary on high-dimension data. Maybe you want 'mean'?"
tf.summary.scalar(name, var)
return
assert (ndim > 0), 'Cannot perform {} summary on scalar data'.format(action)
if (action == 'histogram'):
tf.summary.histogram(name, var)
return
if (action == 'sparsity'):
tf.summary.scalar((name + '-sparsity'), tf.nn.zero_fraction(var))
return
if (action == 'mean'):
tf.summary.scalar((name + '-mean'), tf.reduce_mean(var))
return
if (action == 'rms'):
tf.summary.scalar((name + '-rms'), rms(var))
return
raise RuntimeError('Unknown summary type: {}'.format(action))
params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
with tf.name_scope('param-summary'):
for p in params:
name = p.name
for (rgx, actions) in summary_lists:
if (not rgx.endswith('$')):
rgx = (rgx + '(:0)?$')
if re.match(rgx, name):
for act in actions:
perform(p, act)
|
def add_moving_summary(v, *args):
'\n :param v: tensor or list of tensor to summary\n :param args: tensors to summary\n '
ctx = get_current_tower_context()
if ((ctx is not None) and (not ctx.is_main_training_tower)):
return
if (not isinstance(v, list)):
v = [v]
v.extend(args)
for x in v:
assert (x.get_shape().ndims == 0), x.get_shape()
tf.add_to_collection(MOVING_SUMMARY_VARS_KEY, x)
|
@memoized
def summary_moving_average(tensors=None):
'\n Create a MovingAverage op and add summary for tensors\n :param tensors: list of tf.Tensor to summary. default to the collection MOVING_SUMMARY_VARS_KEY\n :returns: a op to maintain these average.\n '
if (tensors is None):
tensors = tf.get_collection(MOVING_SUMMARY_VARS_KEY)
with tf.name_scope(None):
averager = tf.train.ExponentialMovingAverage(0.95, num_updates=get_global_step_var(), name='EMA')
avg_maintain_op = averager.apply(tensors)
for (idx, c) in enumerate(tensors):
name = re.sub('tower[p0-9]+/', '', c.op.name)
tf.summary.scalar((name + '-summary'), averager.average(c))
return avg_maintain_op
|
def prediction_incorrect(logits, label, topk=1, name='incorrect_vector'):
'\n :param logits: NxC\n :param label: N\n :returns: a float32 vector of length N with 0/1 values. 1 means incorrect prediction\n '
return tf.cast(tf.logical_not(tf.nn.in_top_k(logits, label, topk)), tf.float32, name=name)
|
def flatten(x):
'\n Flatten the tensor.\n '
return tf.reshape(x, [(- 1)])
|
def batch_flatten(x):
'\n Flatten the tensor except the first dimension.\n '
shape = x.get_shape().as_list()[1:]
if (None not in shape):
return tf.reshape(x, [(- 1), int(np.prod(shape))])
return tf.reshape(x, tf.pack([tf.shape(x)[0], (- 1)]))
|
def class_balanced_cross_entropy(pred, label, name='cross_entropy_loss'):
'\n The class-balanced cross entropy loss,\n as in `Holistically-Nested Edge Detection\n <http://arxiv.org/abs/1504.06375>`_.\n\n :param pred: size: b x ANYTHING. the predictions in [0,1].\n :param label: size: b x ANYTHING. the ground truth in {0,1}.\n :returns: class-balanced cross entropy loss\n '
z = batch_flatten(pred)
y = tf.cast(batch_flatten(label), tf.float32)
count_neg = tf.reduce_sum((1.0 - y))
count_pos = tf.reduce_sum(y)
beta = (count_neg / (count_neg + count_pos))
eps = 1e-12
loss_pos = ((- beta) * tf.reduce_mean((y * tf.log((z + eps)))))
loss_neg = ((1.0 - beta) * tf.reduce_mean(((1.0 - y) * tf.log(((1.0 - z) + eps)))))
cost = tf.sub(loss_pos, loss_neg, name=name)
return cost
|
def class_balanced_sigmoid_cross_entropy(logits, label, name='cross_entropy_loss'):
'\n The class-balanced cross entropy loss,\n as in `Holistically-Nested Edge Detection\n <http://arxiv.org/abs/1504.06375>`_.\n This is more numerically stable than class_balanced_cross_entropy\n\n :param logits: size: the logits.\n :param label: size: the ground truth in {0,1}, of the same shape as logits.\n :returns: a scalar. class-balanced cross entropy loss\n '
y = tf.cast(label, tf.float32)
count_neg = tf.reduce_sum((1.0 - y))
count_pos = tf.reduce_sum(y)
beta = (count_neg / (count_neg + count_pos))
pos_weight = (beta / (1 - beta))
cost = tf.nn.weighted_cross_entropy_with_logits(logits, y, pos_weight)
cost = tf.reduce_mean((cost * (1 - beta)), name=name)
return cost
|
def print_stat(x, message=None):
' a simple print op.\n Use it like: x = print_stat(x)\n '
if (message is None):
message = x.op.name
return tf.Print(x, [tf.shape(x), tf.reduce_mean(x), x], summarize=20, message=message, name=('print_' + x.op.name))
|
def rms(x, name=None):
if (name is None):
name = (x.op.name + '/rms')
with tf.name_scope(None):
return tf.sqrt(tf.reduce_mean(tf.square(x)), name=name)
return tf.sqrt(tf.reduce_mean(tf.square(x)), name=name)
|
def huber_loss(x, delta=1, name='huber_loss'):
sqrcost = tf.square(x)
abscost = tf.abs(x)
return tf.reduce_sum(tf.select((abscost < delta), (sqrcost * 0.5), ((abscost * delta) - (0.5 * (delta ** 2)))), name=name)
|
def get_scalar_var(name, init_value, summary=False, trainable=False):
'\n get a scalar variable with certain initial value\n :param summary: summary this variable\n '
ret = tf.get_variable(name, shape=[], initializer=tf.constant_initializer(init_value), trainable=trainable)
if summary:
tf.summary.scalar((name + '-summary'), ret)
return ret
|
class TowerContext(object):
def __init__(self, tower_name, is_training=None):
" tower_name: 'tower0', 'towerp0', or '' "
self._name = tower_name
if (is_training is None):
is_training = (not self._name.startswith(PREDICT_TOWER))
self._is_training = is_training
@property
def is_main_training_tower(self):
return (self.is_training and ((self._name == '') or (self._name == 'tower0')))
@property
def is_main_tower(self):
return ((self._name == '') or (self._name == 'tower0'))
@property
def is_training(self):
return self._is_training
@property
def name(self):
return self._name
def get_variable_on_tower(self, *args, **kwargs):
"\n Get a variable for this tower specifically, without reusing.\n Tensorflow doesn't allow reuse=False scope under a\n reuse=True scope. This method provides a work around.\n See https://www.tensorflow.org/versions/master/how_tos/variable_scope/index.html#basics-of-tfvariable-scope\n\n :param args, kwargs: same as tf.get_variable()\n "
with tf.variable_scope(self._name) as scope:
with tf.variable_scope(scope, reuse=False):
scope = tf.get_variable_scope()
assert (scope.reuse == False)
return tf.get_variable(*args, **kwargs)
def find_tensor_in_main_tower(self, graph, name):
if self.is_main_tower:
return graph.get_tensor_by_name(name)
if name.startswith(PREDICT_TOWER):
predict_tower_prefix = '{}[0-9]+/'.format(PREDICT_TOWER)
newname = re.sub(predict_tower_prefix, '', name)
try:
return graph.get_tensor_by_name(newname)
except KeyError:
newname = re.sub(predict_tower_prefix, 'tower0/', name)
return graph.get_tensor_by_name(newname)
def __enter__(self):
global _CurrentTowerContext
assert (_CurrentTowerContext is None), 'Nesting TowerContext!'
_CurrentTowerContext = self
if len(self._name):
self._scope = tf.name_scope(self._name)
return self._scope.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
global _CurrentTowerContext
_CurrentTowerContext = None
if len(self._name):
self._scope.__exit__(exc_type, exc_val, exc_tb)
return False
|
def get_current_tower_context():
global _CurrentTowerContext
return _CurrentTowerContext
|
def get_savename_from_varname(varname, varname_prefix=None, savename_prefix=None):
'\n :param varname: a variable name in the graph\n :param varname_prefix: an optional prefix that may need to be removed in varname\n :param savename_prefix: an optional prefix to append to all savename\n :returns: the name used to save the variable\n '
name = varname
if (PREDICT_TOWER in name):
logger.error("No variable under '{}' name scope should be saved!".format(PREDICT_TOWER))
return None
if ('tower' in name):
name = re.sub('tower[p0-9]+/', '', name)
if ((varname_prefix is not None) and name.startswith(varname_prefix)):
name = name[(len(varname_prefix) + 1):]
if (savename_prefix is not None):
name = ((savename_prefix + '/') + name)
return name
|
class SessionUpdate(object):
' Update the variables in a session '
def __init__(self, sess, vars_to_update):
'\n :param vars_to_update: a collection of variables to update\n '
self.sess = sess
self.assign_ops = defaultdict(list)
for v in vars_to_update:
with tf.device('/cpu:0'):
p = tf.placeholder(v.dtype)
savename = get_savename_from_varname(v.name)
self.assign_ops[savename].append((p, v, v.assign(p)))
def update(self, prms):
'\n :param prms: dict of {variable name: value}\n Any name in prms must be in the graph and in vars_to_update.\n '
for (name, value) in six.iteritems(prms):
assert (name in self.assign_ops)
for (p, v, op) in self.assign_ops[name]:
varshape = tuple(v.get_shape().as_list())
if (varshape != value.shape):
assert (np.prod(varshape) == np.prod(value.shape)), '{}: {}!={}'.format(name, varshape, value.shape)
logger.warn('Param {} is reshaped during assigning'.format(name))
value = value.reshape(varshape)
self.sess.run(op, feed_dict={p: value})
|
def dump_session_params(path):
' Dump value of all trainable + to_save variables to a dict and save to `path` as\n npy format, loadable by ParamRestore\n '
var = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
var.extend(tf.get_collection(tf.GraphKeys.MODEL_VARIABLES))
assert (len(set(var)) == len(var)), 'TRAINABLE and MODEL variables have duplication!'
result = {}
for v in var:
name = get_savename_from_varname(v.name)
if (name in result):
logger.info('Variable {} would be stored instead of another with the same name'.format(v.name))
result[name] = v.eval()
logger.info('Variables to save to {}:'.format(path))
logger.info(str(result.keys()))
np.save(path, result)
|
def dump_chkpt_vars(model_path):
' Dump all variables from a checkpoint to a dict'
if (os.path.basename(model_path) == model_path):
model_path = os.path.join('.', model_path)
reader = tf.train.NewCheckpointReader(model_path)
var_names = reader.get_variable_to_shape_map().keys()
result = {}
for n in var_names:
result[n] = reader.get_tensor(n)
return result
|
def is_training_name(name):
'\n This is only used to improve logging.\n :returns: guess whether this tensor is something only used in training.\n '
name = get_op_tensor_name(name)[0]
if (name.endswith('/Adam') or name.endswith('/Adam_1')):
return True
if name.endswith('/Momentum'):
return True
if (name.endswith('/Adadelta') or name.endswith('/Adadelta_1')):
return True
if (name.endswith('/RMSProp') or name.endswith('/RMSProp_1')):
return True
if name.endswith('/Adagrad'):
return True
if ('EMA_summary/' in name):
return True
return False
|
def global_import(name):
p = __import__(name, globals(), locals(), level=1)
lst = (p.__all__ if ('__all__' in dir(p)) else [])
del globals()[name]
for k in lst:
globals()[k] = p.__dict__[k]
__all__.append(k)
|
class StopTraining(BaseException):
pass
|
@six.add_metaclass(ABCMeta)
class Trainer(object):
' Base class for a trainer.'
'a `StatHolder` instance'
stat_holder = None
'`tf.SummaryWriter`'
summary_writer = None
'a tf.Tensor which returns summary string'
summary_op = None
' TrainConfig '
config = None
' a ModelDesc'
model = None
' the current session'
sess = None
' the `tf.train.Coordinator` '
coord = None
def __init__(self, config):
'\n :param config: a `TrainConfig` instance\n '
assert isinstance(config, TrainConfig), type(config)
self.config = config
self.model = config.model
self.sess = tf.Session(config=self.config.session_config)
self.coord = tf.train.Coordinator()
def train(self):
' Start training'
self.setup()
self.main_loop()
@abstractmethod
def run_step(self):
' run an iteration'
pass
def get_predict_func(self, input_names, output_names):
' return a online predictor'
raise NotImplementedError()
def get_predict_funcs(self, input_names, output_names, n):
' return n predictor functions.\n Can be overwritten by subclasses to exploit more\n parallelism among funcs.\n '
return [self.get_predict_func(input_names, output_names) for k in range(n)]
def trigger_epoch(self):
self._trigger_epoch()
self.config.callbacks.trigger_epoch()
self.summary_writer.flush()
@abstractmethod
def _trigger_epoch(self):
' This is called right after all steps in an epoch are finished'
pass
def _process_summary(self, summary_str):
summary = tf.Summary.FromString(summary_str)
for val in summary.value:
if (val.WhichOneof('value') == 'simple_value'):
val.tag = re.sub('tower[p0-9]+/', '', val.tag)
suffix = '-summary'
if val.tag.endswith(suffix):
val.tag = val.tag[:(- len(suffix))]
self.stat_holder.add_stat(val.tag, val.simple_value)
self.summary_writer.add_summary(summary, get_global_step())
def write_scalar_summary(self, name, val):
self.summary_writer.add_summary(create_summary(name, val), get_global_step())
self.stat_holder.add_stat(name, val)
def setup(self):
self._setup()
describe_model()
get_global_step_var()
logger.info('Setup callbacks ...')
self.config.callbacks.setup_graph(weakref.proxy(self))
if (not hasattr(logger, 'LOG_DIR')):
raise RuntimeError("logger directory wasn't set!")
self.summary_writer = tf.summary.FileWriter(logger.LOG_DIR, graph=self.sess.graph)
self.summary_op = tf.summary.merge_all()
self.stat_holder = StatHolder(logger.LOG_DIR)
logger.info('Initializing graph variables ...')
try:
initop = tf.global_variables_initializer()
except:
initop = tf.initialize_all_variables()
self.sess.run(initop)
self.config.session_init.init(self.sess)
tf.get_default_graph().finalize()
tf.train.start_queue_runners(sess=self.sess, coord=self.coord, daemon=True, start=True)
@abstractmethod
def _setup(self):
' setup Trainer-specific stuff for training'
def main_loop(self):
callbacks = self.config.callbacks
with self.sess.as_default():
try:
callbacks.before_train()
logger.info('Start training with global_step={}'.format(get_global_step()))
for epoch_num in range(self.config.starting_epoch, (self.config.max_epoch + 1)):
with timed_operation('Epoch {} (global_step {})'.format(epoch_num, (get_global_step() + self.config.step_per_epoch))):
for step in tqdm.trange(self.config.step_per_epoch, **get_tqdm_kwargs(leave=True)):
if self.coord.should_stop():
return
self.run_step()
callbacks.trigger_step()
self.trigger_epoch()
except StopTraining:
logger.info('Training was stopped.')
except:
raise
finally:
callbacks.after_train()
self.coord.request_stop()
self.summary_writer.close()
self.sess.close()
|
class TrainConfig(object):
'\n Config for training a model with a single loss\n '
def __init__(self, **kwargs):
'\n :param dataset: the dataset to train. a `DataFlow` instance.\n :param data: an `InputData` instance\n\n :param optimizer: a `tf.train.Optimizer` instance defining the optimizer for trainig.\n :param callbacks: a `callback.Callbacks` instance. Define\n the callbacks to perform during training.\n :param session_config: a `tf.ConfigProto` instance to instantiate the session.\n :param session_init: a `sessinit.SessionInit` instance to\n initialize variables of a session. default to a new session.\n :param model: a `ModelDesc` instance.\n :param starting_epoch: int. default to be 1.\n :param step_per_epoch: the number of steps (SGD updates) to perform in each epoch.\n :param max_epoch: maximum number of epoch to run training. default to inf\n :param nr_tower: int. number of training towers. default to 1.\n :param tower: list of training towers in relative id. default to `range(nr_tower)` if nr_tower is given.\n :param predict_tower: list of prediction tower in their relative gpu id. Defaults to [0]\n '
def assert_type(v, tp):
assert isinstance(v, tp), v.__class__
if ('dataset' in kwargs):
assert ('data' not in kwargs), 'dataset and data cannot be both presented in TrainConfig!'
self.dataset = kwargs.pop('dataset')
assert_type(self.dataset, DataFlow)
else:
self.data = kwargs.pop('data')
assert_type(self.data, InputData)
self.optimizer = kwargs.pop('optimizer')
assert_type(self.optimizer, tf.train.Optimizer)
self.callbacks = kwargs.pop('callbacks')
assert_type(self.callbacks, Callbacks)
self.model = kwargs.pop('model')
assert_type(self.model, ModelDesc)
self.session_config = kwargs.pop('session_config', get_default_sess_config())
assert_type(self.session_config, tf.ConfigProto)
self.session_init = kwargs.pop('session_init', JustCurrentSession())
assert_type(self.session_init, SessionInit)
self.step_per_epoch = kwargs.pop('step_per_epoch', None)
if (self.step_per_epoch is None):
try:
if hasattr(self, 'dataset'):
self.step_per_epoch = self.dataset.size()
else:
self.step_per_epoch = self.data.size()
except NotImplementedError:
logger.exception('You must set `step_per_epoch` if dataset.size() is not implemented.')
else:
self.step_per_epoch = int(self.step_per_epoch)
self.starting_epoch = int(kwargs.pop('starting_epoch', 1))
self.max_epoch = int(kwargs.pop('max_epoch', 99999))
assert ((self.step_per_epoch >= 0) and (self.max_epoch > 0))
if ('nr_tower' in kwargs):
assert ('tower' not in kwargs), 'Cannot set both nr_tower and tower in TrainConfig!'
self.nr_tower = kwargs.pop('nr_tower')
elif ('tower' in kwargs):
self.tower = kwargs.pop('tower')
else:
self.tower = [0]
self.predict_tower = kwargs.pop('predict_tower', [0])
if isinstance(self.predict_tower, int):
self.predict_tower = [self.predict_tower]
self.extra_threads_procs = kwargs.pop('extra_threads_procs', [])
if self.extra_threads_procs:
logger.warn('[DEPRECATED] use the Callback StartProcOrThread instead of _extra_threads_procs')
from ..callbacks.concurrency import StartProcOrThread
self.callbacks.append(StartProcOrThread(self.extra_threads_procs))
assert (len(kwargs) == 0), 'Unknown arguments: {}'.format(str(kwargs.keys()))
def set_tower(self, nr_tower=None, tower=None):
logger.warn('config.set_tower is deprecated. set config.tower or config.nr_tower directly')
assert ((nr_tower is None) or (tower is None)), 'Cannot set both nr_tower and tower!'
if nr_tower:
tower = list(range(nr_tower))
elif isinstance(tower, int):
tower = list(range(tower))
self.tower = tower
assert isinstance(self.tower, list)
@property
def nr_tower(self):
return len(self.tower)
@nr_tower.setter
def nr_tower(self, value):
self.tower = list(range(value))
|
class FeedfreeTrainer(Trainer):
' A trainer which runs iteration without feed_dict (therefore faster) '
def _trigger_epoch(self):
if (self.summary_op is not None):
summary_str = self.summary_op.eval()
self._process_summary(summary_str)
def _get_input_tensors(self):
return self._input_method.get_input_tensors()
def _setup(self):
assert isinstance(self._input_method, FeedfreeInput), type(self._input_method)
self._input_method._setup(self)
|
class SingleCostFeedfreeTrainer(FeedfreeTrainer):
def _get_cost_and_grad(self):
' get the cost and gradient on a new tower'
actual_inputs = self._get_input_tensors()
self.model.build_graph(actual_inputs)
cost_var = self.model.get_cost()
grads = self.config.optimizer.compute_gradients(cost_var, gate_gradients=tf.train.Optimizer.GATE_NONE, colocate_gradients_with_ops=False)
add_moving_summary(cost_var)
return (cost_var, grads)
def run_step(self):
' Simply run self.train_op'
self.sess.run(self.train_op)
|
class SimpleFeedfreeTrainer(MultiPredictorTowerTrainer, SingleCostFeedfreeTrainer):
def __init__(self, config):
'\n A trainer with single cost, single training tower and feed-free input\n config.data must exists\n '
self._input_method = config.data
assert isinstance(self._input_method, FeedfreeInput), self._input_method
super(SimpleFeedfreeTrainer, self).__init__(config)
self._setup_predictor_factory(config.predict_tower)
assert (len(self.config.tower) == 1), "SimpleFeedfreeTrainer doesn't support multigpu!"
def _setup(self):
super(SimpleFeedfreeTrainer, self)._setup()
with TowerContext('', is_training=True):
(cost, grads) = self._get_cost_and_grad()
grads = apply_grad_processors(grads, self.model.get_gradient_processor())
self.train_op = tf.group(self.config.optimizer.apply_gradients(grads, get_global_step_var()), summary_moving_average(), name='train_op')
|
class QueueInputTrainer(SimpleFeedfreeTrainer):
def __init__(self, config, input_queue=None, predict_tower=None):
'\n Single tower Trainer, takes input from a queue\n\n :param config: a `TrainConfig` instance. config.dataset must exist\n :param input_queue: a `tf.QueueBase` instance\n :param predict_tower: list of gpu relative idx to run prediction. default to be [0].\n Use -1 for cpu.\n '
config.data = QueueInput(config.dataset, input_queue)
if (predict_tower is not None):
logger.warn('[Deprecated] Argument `predict_tower` is deprecated for trainer. Use TrainConfig.predict_tower instead!')
config.predict_tower = predict_tower
assert (len(config.tower) == 1), "QueueInputTrainer doesn't support multigpu! Use Sync/AsyncMultiGPUTrainer instead."
super(QueueInputTrainer, self).__init__(config)
|
@six.add_metaclass(ABCMeta)
class InputData(object):
pass
|
class FeedInput(InputData):
def __init__(self, ds):
assert isinstance(ds, DataFlow), ds
self.ds = ds
def size(self):
return self.ds.size()
def _setup(self, trainer):
self.input_vars = trainer.model.get_input_vars()
rds = RepeatedData(self.ds, (- 1))
rds.reset_state()
self.data_producer = rds.get_data()
def next_feed(self):
data = next(self.data_producer)
feed = dict(zip(self.input_vars, data))
return feed
|
class FeedfreeInput(InputData):
def get_input_tensors(self):
return self._get_input_tensors()
@abstractmethod
def _get_input_tensors(self):
'\n always create and return a list of new input tensors\n '
|
class EnqueueThread(threading.Thread):
def __init__(self, trainer, queue, ds, input_placehdrs):
super(EnqueueThread, self).__init__()
self.name = 'EnqueueThread'
self.daemon = True
self.dataflow = ds
self.queue = queue
self.sess = trainer.sess
self.coord = trainer.coord
self.placehdrs = input_placehdrs
self.op = self.queue.enqueue(self.placehdrs)
self.close_op = self.queue.close(cancel_pending_enqueues=True)
self.size_op = self.queue.size()
add_moving_summary(tf.cast(self.size_op, tf.float32, name='input_queue_size'))
def run(self):
self.dataflow.reset_state()
with self.sess.as_default():
try:
while True:
for dp in self.dataflow.get_data():
if self.coord.should_stop():
return
feed = dict(zip(self.placehdrs, dp))
self.op.run(feed_dict=feed)
except tf.errors.CancelledError as e:
pass
except Exception:
logger.exception('Exception in EnqueueThread:')
finally:
self.coord.request_stop()
try:
self.sess.run(self.close_op)
except RuntimeError:
pass
logger.info('Enqueue Thread Exited.')
|
class QueueInput(FeedfreeInput):
def __init__(self, ds, queue=None):
'\n :param ds: a `DataFlow` instance\n :param queue: a `tf.QueueBase` instance to be used to buffer datapoints.\n Defaults to a FIFO queue of size 50.\n '
assert isinstance(ds, DataFlow), ds
self.queue = queue
self.ds = ds
def size(self):
return self.ds.size()
def _setup(self, trainer):
self.input_placehdrs = trainer.model.get_input_vars()
assert (len(self.input_placehdrs) > 0), 'QueueInput can only be used with input placeholders!'
if (self.queue is None):
self.queue = tf.FIFOQueue(50, [x.dtype for x in self.input_placehdrs], name='input_queue')
self.thread = EnqueueThread(trainer, self.queue, self.ds, self.input_placehdrs)
trainer.config.callbacks.append(StartProcOrThread(self.thread))
def _get_input_tensors(self):
ret = self.queue.dequeue(name='input_deque')
if isinstance(ret, tf.Tensor):
ret = [ret]
assert (len(ret) == len(self.input_placehdrs))
for (qv, v) in zip(ret, self.input_placehdrs):
qv.set_shape(v.get_shape())
return ret
|
class DummyConstantInput(QueueInput):
' only for debugging performance issues '
def __init__(self, ds, shapes):
super(DummyConstantInput, self).__init__(ds)
self.shapes = shapes
logger.warn('Using dummy input for debug!')
def _get_input_tensors(self):
placehdrs = self.input_placehdrs
assert (len(self.shapes) == len(placehdrs))
ret = []
for (idx, p) in enumerate(placehdrs):
with tf.device('/gpu:0'):
ret.append(tf.get_variable(('dummy-' + p.op.name), shape=self.shapes[idx], dtype=p.dtype, trainable=False, initializer=tf.constant_initializer()))
return ret
|
class TensorInput(FeedfreeInput):
def __init__(self, get_tensor_fn, size=None):
self.get_tensor_fn = get_tensor_fn
self._size = size
def size(self):
if (self._size is None):
raise ValueError('size of TensorInput is undefined!')
return self._size
def _setup(self, trainer):
pass
def _get_input_tensors(self):
return self.get_tensor_fn()
|
class MultiGPUTrainer(Trainer):
' Base class for multi-gpu training'
@staticmethod
def _multi_tower_grads(towers, get_tower_grad_func):
' ret[i] is a lists of (grad,var) tuple for tower i'
logger.info('Training a model of {} tower'.format(len(towers)))
grad_list = []
global_scope = tf.get_variable_scope()
for (idx, t) in enumerate(towers):
with tf.device('/gpu:{}'.format(t)), tf.variable_scope(global_scope, reuse=(idx > 0)), TowerContext('tower{}'.format(idx)) as scope:
logger.info('Building graph for training tower {}...'.format(idx))
grad_list.append(get_tower_grad_func())
if (idx == 0):
backup = backup_collection(SUMMARY_BACKUP_KEYS)
restore_collection(backup)
return grad_list
|
class SyncMultiGPUTrainer(MultiGPUTrainer, SingleCostFeedfreeTrainer, MultiPredictorTowerTrainer):
def __init__(self, config, input_queue=None, predict_tower=None):
if hasattr(config, 'dataset'):
self._input_method = QueueInput(config.dataset, input_queue)
else:
self._input_method = config.data
assert isinstance(self._input_method, QueueInput)
if (predict_tower is not None):
logger.warn('[Deprecated] Argument `predict_tower` is deprecated for trainer. Use TrainConfig.predict_tower instead!')
config.predict_tower = predict_tower
super(SyncMultiGPUTrainer, self).__init__(config)
self._setup_predictor_factory(config.predict_tower)
assert (len(config.tower) >= 1), 'MultiGPUTrainer must be used with at least one GPU.'
assert tf.test.is_gpu_available()
@staticmethod
def _average_grads(tower_grads):
if (len(tower_grads) == 1):
return tower_grads[0]
ret = []
with tf.name_scope('AvgGrad'):
for grad_and_vars in zip(*tower_grads):
v = grad_and_vars[0][1]
all_grad = [k[0] for k in grad_and_vars]
nones = list(set(all_grad))
if ((None in nones) and (len(nones) != 1)):
raise RuntimeError('Gradient w.r.t {} is None in some but not all towers!'.format(v.name))
elif (nones[0] is None):
logger.warn('No Gradient w.r.t {}'.format(var.op.name))
continue
try:
grad = (tf.add_n(all_grad) / float(len(tower_grads)))
except:
logger.error('Error while processing gradients of {}'.format(v.name))
raise
ret.append((grad, v))
return ret
def _setup(self):
super(SyncMultiGPUTrainer, self)._setup()
grad_list = MultiGPUTrainer._multi_tower_grads(self.config.tower, (lambda : self._get_cost_and_grad()[1]))
grads = SyncMultiGPUTrainer._average_grads(grad_list)
grads = apply_grad_processors(grads, self.model.get_gradient_processor())
self.train_op = tf.group(self.config.optimizer.apply_gradients(grads, get_global_step_var()), summary_moving_average(), name='train_op')
def run_step(self):
self.sess.run(self.train_op)
|
class AsyncMultiGPUTrainer(MultiGPUTrainer, SingleCostFeedfreeTrainer, MultiPredictorTowerTrainer):
def __init__(self, config, input_queue=None, average_gradient=True, predict_tower=None):
if hasattr(config, 'dataset'):
self._input_method = QueueInput(config.dataset, input_queue)
else:
self._input_method = config.data
assert isinstance(self._input_method, QueueInput)
super(AsyncMultiGPUTrainer, self).__init__(config)
if (predict_tower is not None):
logger.warn('[Deprecated] Argument `predict_tower` is deprecated for trainer. Use TrainConfig.predict_tower instead!')
config.predict_tower = predict_tower
self._setup_predictor_factory(config.predict_tower)
self._average_gradient = average_gradient
assert tf.test.is_gpu_available()
def _setup(self):
super(AsyncMultiGPUTrainer, self)._setup()
grad_list = MultiGPUTrainer._multi_tower_grads(self.config.tower, (lambda : self._get_cost_and_grad()[1]))
gradprocs = self.model.get_gradient_processor()
if (self._average_gradient and (self.config.nr_tower > 1)):
gradprocs.insert(0, ScaleGradient(('.*', (1.0 / self.config.nr_tower)), log=False))
grad_list = [apply_grad_processors(g, gradprocs) for g in grad_list]
self.train_op = tf.group(self.config.optimizer.apply_gradients(grad_list[0], get_global_step_var()), summary_moving_average(), name='train_op')
self._start_async_threads(grad_list)
def _start_async_threads(self, grad_list):
self.async_step_counter = itertools.count()
self.training_threads = []
for k in range(1, len(self.config.tower)):
train_op = self.config.optimizer.apply_gradients(grad_list[k])
def f(op=train_op):
self.sess.run([op])
next(self.async_step_counter)
th = LoopThread(f)
th.pause()
th.start()
self.training_threads.append(th)
self.async_running = False
def run_step(self):
if (not self.async_running):
self.async_running = True
for th in self.training_threads:
th.resume()
next(self.async_step_counter)
self.sess.run(self.train_op)
def _trigger_epoch(self):
self.async_running = False
for th in self.training_threads:
th.pause()
try:
if (self.config.tower > 1):
async_step_total_cnt = int(re.findall('[0-9]+', self.async_step_counter.__str__())[0])
self.write_scalar_summary('async_global_step', async_step_total_cnt)
except:
logger.exception('Cannot log async_global_step')
super(AsyncMultiGPUTrainer, self)._trigger_epoch()
|
class PredictorFactory(object):
' Make predictors for a trainer'
def __init__(self, sess, model, towers):
'\n :param towers: list of gpu relative id\n '
self.sess = sess
self.model = model
self.towers = towers
self.tower_built = False
def get_predictor(self, input_names, output_names, tower):
'\n :param tower: need the kth tower (not the gpu id)\n :returns: an online predictor\n '
if (not self.tower_built):
self._build_predict_tower()
tower = self.towers[(tower % len(self.towers))]
raw_input_vars = get_tensors_by_names(input_names)
output_names = [('{}{}/'.format(PREDICT_TOWER, tower) + n) for n in output_names]
output_vars = get_tensors_by_names(output_names)
return OnlinePredictor(self.sess, raw_input_vars, output_vars)
def _build_predict_tower(self):
tf.get_variable_scope().reuse_variables()
with tf.name_scope(None), freeze_collection(SUMMARY_BACKUP_KEYS):
fn = (lambda _: self.model.build_graph(self.model.get_input_vars()))
build_multi_tower_prediction_graph(fn, self.towers)
self.tower_built = True
|
class SimpleTrainer(Trainer):
' A naive demo trainer '
def __init__(self, config):
super(SimpleTrainer, self).__init__(config)
self._predictor_factory = PredictorFactory(self.sess, self.model, [0])
if (not hasattr(config, 'dataset')):
self._input_method = config.data
assert isinstance(self._input_method, FeedInput)
else:
self._input_method = FeedInput(config.dataset)
def run_step(self):
feed = self._input_method.next_feed()
self.sess.run([self.train_op], feed_dict=feed)
def _setup(self):
self._input_method._setup(self)
model = self.model
self.input_vars = model.get_input_vars()
with TowerContext('', is_training=True):
model.build_graph(self.input_vars)
cost_var = model.get_cost()
add_moving_summary(cost_var)
grads = self.config.optimizer.compute_gradients(cost_var)
grads = apply_grad_processors(grads, self.model.get_gradient_processor())
self.train_op = tf.group(self.config.optimizer.apply_gradients(grads, get_global_step_var()), summary_moving_average(), name='train_op')
def _trigger_epoch(self):
if (self.summary_op is not None):
feed = self._input_method.next_feed()
summary_str = self.summary_op.eval(feed_dict=feed)
self._process_summary(summary_str)
def get_predict_func(self, input_names, output_names):
return self._predictor_factory.get_predictor(input_names, output_names, 0)
|
class MultiPredictorTowerTrainer(Trainer):
' A trainer with possibly multiple prediction tower '
def _setup_predictor_factory(self, predict_tower):
predict_tower = (predict_tower or [0])
self._predictor_factory = PredictorFactory(self.sess, self.model, predict_tower)
def get_predict_func(self, input_names, output_names, tower=0):
'\n :param tower: return the kth predict_func\n :returns: an `OnlinePredictor`\n '
return self._predictor_factory.get_predictor(input_names, output_names, tower)
def get_predict_funcs(self, input_names, output_names, n):
return [self.get_predict_func(input_names, output_names, k) for k in range(n)]
|
def _global_import(name):
p = __import__(name, globals(), None, level=1)
lst = (p.__all__ if ('__all__' in dir(p)) else dir(p))
for k in lst:
globals()[k] = p.__dict__[k]
__all__.append(k)
|
def map_arg(**maps):
'\n Apply a mapping on certains argument before calling original function.\n maps: {key: map_func}\n '
def deco(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
argmap = inspect.getcallargs(func, *args, **kwargs)
for (k, map_func) in six.iteritems(maps):
if (k in argmap):
argmap[k] = map_func(argmap[k])
return func(**argmap)
return wrapper
return deco
|
class memoized(object):
"Decorator. Caches a function's return value each time it is called.\n If called later with the same arguments, the cached value is returned\n (not reevaluated).\n "
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args, **kwargs):
kwlist = tuple(sorted(list(kwargs), key=operator.itemgetter(0)))
if ((not isinstance(args, collections.Hashable)) or (not isinstance(kwlist, collections.Hashable))):
logger.warn('Arguments to memoized call is unhashable!')
return self.func(*args, **kwargs)
key = (args, kwlist)
if (key in self.cache):
return self.cache[key]
else:
value = self.func(*args, **kwargs)
self.cache[key] = value
return value
def __repr__(self):
"Return the function's docstring."
return self.func.__doc__
def __get__(self, obj, objtype):
'Support instance methods.'
return functools.partial(self.__call__, obj)
|
def memoized_ignoreargs(func):
h = hash(func)
def wrapper(*args, **kwargs):
if (func not in _MEMOIZED_NOARGS):
res = func(*args, **kwargs)
_MEMOIZED_NOARGS[func] = res
return res
return _MEMOIZED_NOARGS[func]
return wrapper
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.