repo
stringlengths
7
55
path
stringlengths
4
223
func_name
stringlengths
1
134
original_string
stringlengths
75
104k
language
stringclasses
1 value
code
stringlengths
75
104k
code_tokens
listlengths
19
28.4k
docstring
stringlengths
1
46.9k
docstring_tokens
listlengths
1
1.97k
sha
stringlengths
40
40
url
stringlengths
87
315
partition
stringclasses
1 value
tensorpack/tensorpack
tensorpack/tfutils/common.py
get_default_sess_config
def get_default_sess_config(mem_fraction=0.99): """ Return a tf.ConfigProto to use as default session config. You can modify the returned config to fit your needs. Args: mem_fraction(float): see the `per_process_gpu_memory_fraction` option in TensorFlow's GPUOptions protobuf: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/protobuf/config.proto Returns: tf.ConfigProto: the config to use. """ conf = tfv1.ConfigProto() conf.allow_soft_placement = True # conf.log_device_placement = True conf.intra_op_parallelism_threads = 1 conf.inter_op_parallelism_threads = 0 # TF benchmark use cpu_count() - gpu_thread_count(), e.g. 80 - 8 * 2 # Didn't see much difference. conf.gpu_options.per_process_gpu_memory_fraction = mem_fraction # This hurt performance of large data pipeline: # https://github.com/tensorflow/benchmarks/commit/1528c46499cdcff669b5d7c006b7b971884ad0e6 # conf.gpu_options.force_gpu_compatible = True conf.gpu_options.allow_growth = True # from tensorflow.core.protobuf import rewriter_config_pb2 as rwc # conf.graph_options.rewrite_options.memory_optimization = \ # rwc.RewriterConfig.HEURISTICS # May hurt performance? # conf.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1 # conf.graph_options.place_pruned_graph = True return conf
python
def get_default_sess_config(mem_fraction=0.99): """ Return a tf.ConfigProto to use as default session config. You can modify the returned config to fit your needs. Args: mem_fraction(float): see the `per_process_gpu_memory_fraction` option in TensorFlow's GPUOptions protobuf: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/protobuf/config.proto Returns: tf.ConfigProto: the config to use. """ conf = tfv1.ConfigProto() conf.allow_soft_placement = True # conf.log_device_placement = True conf.intra_op_parallelism_threads = 1 conf.inter_op_parallelism_threads = 0 # TF benchmark use cpu_count() - gpu_thread_count(), e.g. 80 - 8 * 2 # Didn't see much difference. conf.gpu_options.per_process_gpu_memory_fraction = mem_fraction # This hurt performance of large data pipeline: # https://github.com/tensorflow/benchmarks/commit/1528c46499cdcff669b5d7c006b7b971884ad0e6 # conf.gpu_options.force_gpu_compatible = True conf.gpu_options.allow_growth = True # from tensorflow.core.protobuf import rewriter_config_pb2 as rwc # conf.graph_options.rewrite_options.memory_optimization = \ # rwc.RewriterConfig.HEURISTICS # May hurt performance? # conf.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1 # conf.graph_options.place_pruned_graph = True return conf
[ "def", "get_default_sess_config", "(", "mem_fraction", "=", "0.99", ")", ":", "conf", "=", "tfv1", ".", "ConfigProto", "(", ")", "conf", ".", "allow_soft_placement", "=", "True", "# conf.log_device_placement = True", "conf", ".", "intra_op_parallelism_threads", "=", "1", "conf", ".", "inter_op_parallelism_threads", "=", "0", "# TF benchmark use cpu_count() - gpu_thread_count(), e.g. 80 - 8 * 2", "# Didn't see much difference.", "conf", ".", "gpu_options", ".", "per_process_gpu_memory_fraction", "=", "mem_fraction", "# This hurt performance of large data pipeline:", "# https://github.com/tensorflow/benchmarks/commit/1528c46499cdcff669b5d7c006b7b971884ad0e6", "# conf.gpu_options.force_gpu_compatible = True", "conf", ".", "gpu_options", ".", "allow_growth", "=", "True", "# from tensorflow.core.protobuf import rewriter_config_pb2 as rwc", "# conf.graph_options.rewrite_options.memory_optimization = \\", "# rwc.RewriterConfig.HEURISTICS", "# May hurt performance?", "# conf.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1", "# conf.graph_options.place_pruned_graph = True", "return", "conf" ]
Return a tf.ConfigProto to use as default session config. You can modify the returned config to fit your needs. Args: mem_fraction(float): see the `per_process_gpu_memory_fraction` option in TensorFlow's GPUOptions protobuf: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/protobuf/config.proto Returns: tf.ConfigProto: the config to use.
[ "Return", "a", "tf", ".", "ConfigProto", "to", "use", "as", "default", "session", "config", ".", "You", "can", "modify", "the", "returned", "config", "to", "fit", "your", "needs", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/tfutils/common.py#L30-L68
train
tensorpack/tensorpack
tensorpack/tfutils/common.py
get_global_step_var
def get_global_step_var(): """ Returns: tf.Tensor: the global_step variable in the current graph. Create if doesn't exist. """ scope = tfv1.VariableScope(reuse=False, name='') # the root vs with tfv1.variable_scope(scope): var = tfv1.train.get_or_create_global_step() return var
python
def get_global_step_var(): """ Returns: tf.Tensor: the global_step variable in the current graph. Create if doesn't exist. """ scope = tfv1.VariableScope(reuse=False, name='') # the root vs with tfv1.variable_scope(scope): var = tfv1.train.get_or_create_global_step() return var
[ "def", "get_global_step_var", "(", ")", ":", "scope", "=", "tfv1", ".", "VariableScope", "(", "reuse", "=", "False", ",", "name", "=", "''", ")", "# the root vs", "with", "tfv1", ".", "variable_scope", "(", "scope", ")", ":", "var", "=", "tfv1", ".", "train", ".", "get_or_create_global_step", "(", ")", "return", "var" ]
Returns: tf.Tensor: the global_step variable in the current graph. Create if doesn't exist.
[ "Returns", ":", "tf", ".", "Tensor", ":", "the", "global_step", "variable", "in", "the", "current", "graph", ".", "Create", "if", "doesn", "t", "exist", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/tfutils/common.py#L72-L80
train
tensorpack/tensorpack
tensorpack/tfutils/common.py
get_tensors_by_names
def get_tensors_by_names(names): """ Get a list of tensors in the default graph by a list of names. Args: names (list): """ ret = [] G = tfv1.get_default_graph() for n in names: opn, varn = get_op_tensor_name(n) ret.append(G.get_tensor_by_name(varn)) return ret
python
def get_tensors_by_names(names): """ Get a list of tensors in the default graph by a list of names. Args: names (list): """ ret = [] G = tfv1.get_default_graph() for n in names: opn, varn = get_op_tensor_name(n) ret.append(G.get_tensor_by_name(varn)) return ret
[ "def", "get_tensors_by_names", "(", "names", ")", ":", "ret", "=", "[", "]", "G", "=", "tfv1", ".", "get_default_graph", "(", ")", "for", "n", "in", "names", ":", "opn", ",", "varn", "=", "get_op_tensor_name", "(", "n", ")", "ret", ".", "append", "(", "G", ".", "get_tensor_by_name", "(", "varn", ")", ")", "return", "ret" ]
Get a list of tensors in the default graph by a list of names. Args: names (list):
[ "Get", "a", "list", "of", "tensors", "in", "the", "default", "graph", "by", "a", "list", "of", "names", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/tfutils/common.py#L113-L125
train
tensorpack/tensorpack
tensorpack/tfutils/common.py
get_op_or_tensor_by_name
def get_op_or_tensor_by_name(name): """ Get either tf.Operation of tf.Tensor from names. Args: name (list[str] or str): names of operations or tensors. Raises: KeyError, if the name doesn't exist """ G = tfv1.get_default_graph() def f(n): if len(n) >= 3 and n[-2] == ':': return G.get_tensor_by_name(n) else: return G.get_operation_by_name(n) if not isinstance(name, list): return f(name) else: return list(map(f, name))
python
def get_op_or_tensor_by_name(name): """ Get either tf.Operation of tf.Tensor from names. Args: name (list[str] or str): names of operations or tensors. Raises: KeyError, if the name doesn't exist """ G = tfv1.get_default_graph() def f(n): if len(n) >= 3 and n[-2] == ':': return G.get_tensor_by_name(n) else: return G.get_operation_by_name(n) if not isinstance(name, list): return f(name) else: return list(map(f, name))
[ "def", "get_op_or_tensor_by_name", "(", "name", ")", ":", "G", "=", "tfv1", ".", "get_default_graph", "(", ")", "def", "f", "(", "n", ")", ":", "if", "len", "(", "n", ")", ">=", "3", "and", "n", "[", "-", "2", "]", "==", "':'", ":", "return", "G", ".", "get_tensor_by_name", "(", "n", ")", "else", ":", "return", "G", ".", "get_operation_by_name", "(", "n", ")", "if", "not", "isinstance", "(", "name", ",", "list", ")", ":", "return", "f", "(", "name", ")", "else", ":", "return", "list", "(", "map", "(", "f", ",", "name", ")", ")" ]
Get either tf.Operation of tf.Tensor from names. Args: name (list[str] or str): names of operations or tensors. Raises: KeyError, if the name doesn't exist
[ "Get", "either", "tf", ".", "Operation", "of", "tf", ".", "Tensor", "from", "names", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/tfutils/common.py#L128-L149
train
tensorpack/tensorpack
tensorpack/tfutils/common.py
collect_env_info
def collect_env_info(): """ Returns: str - a table contains important information about the environment """ data = [] data.append(("sys.platform", sys.platform)) data.append(("Python", sys.version.replace("\n", ""))) data.append(("Tensorpack", __git_version__)) data.append(("Numpy", np.__version__)) data.append(("TensorFlow", tfv1.VERSION + "/" + tfv1.GIT_VERSION)) data.append(("TF Compiler Version", tfv1.COMPILER_VERSION)) has_cuda = tf.test.is_built_with_cuda() data.append(("TF CUDA support", has_cuda)) try: from tensorflow.python.framework import test_util data.append(("TF MKL support", test_util.IsMklEnabled())) except Exception: pass try: from tensorflow.python.framework import test_util data.append(("TF XLA support", test_util.is_xla_enabled())) except Exception: pass if has_cuda: data.append(("Nvidia Driver", find_library("nvidia-ml"))) data.append(("CUDA", find_library("cudart"))) data.append(("CUDNN", find_library("cudnn"))) data.append(("NCCL", find_library("nccl"))) # List devices with NVML data.append( ("CUDA_VISIBLE_DEVICES", os.environ.get("CUDA_VISIBLE_DEVICES", str(None)))) try: devs = defaultdict(list) with NVMLContext() as ctx: for idx, dev in enumerate(ctx.devices()): devs[dev.name()].append(str(idx)) for devname, devids in devs.items(): data.append( ("GPU " + ",".join(devids), devname)) except Exception: data.append(("GPU", "Not found with NVML")) vram = psutil.virtual_memory() data.append(("Free RAM", "{:.2f}/{:.2f} GB".format(vram.available / 1024**3, vram.total / 1024**3))) data.append(("CPU Count", psutil.cpu_count())) # Other important dependencies: try: import horovod data.append(("horovod", horovod.__version__)) except ImportError: pass try: import cv2 data.append(("cv2", cv2.__version__)) except ImportError: pass import msgpack data.append(("msgpack", ".".join([str(x) for x in msgpack.version]))) has_prctl = True try: import prctl _ = prctl.set_pdeathsig # noqa except Exception: has_prctl = False data.append(("python-prctl", has_prctl)) return tabulate(data)
python
def collect_env_info(): """ Returns: str - a table contains important information about the environment """ data = [] data.append(("sys.platform", sys.platform)) data.append(("Python", sys.version.replace("\n", ""))) data.append(("Tensorpack", __git_version__)) data.append(("Numpy", np.__version__)) data.append(("TensorFlow", tfv1.VERSION + "/" + tfv1.GIT_VERSION)) data.append(("TF Compiler Version", tfv1.COMPILER_VERSION)) has_cuda = tf.test.is_built_with_cuda() data.append(("TF CUDA support", has_cuda)) try: from tensorflow.python.framework import test_util data.append(("TF MKL support", test_util.IsMklEnabled())) except Exception: pass try: from tensorflow.python.framework import test_util data.append(("TF XLA support", test_util.is_xla_enabled())) except Exception: pass if has_cuda: data.append(("Nvidia Driver", find_library("nvidia-ml"))) data.append(("CUDA", find_library("cudart"))) data.append(("CUDNN", find_library("cudnn"))) data.append(("NCCL", find_library("nccl"))) # List devices with NVML data.append( ("CUDA_VISIBLE_DEVICES", os.environ.get("CUDA_VISIBLE_DEVICES", str(None)))) try: devs = defaultdict(list) with NVMLContext() as ctx: for idx, dev in enumerate(ctx.devices()): devs[dev.name()].append(str(idx)) for devname, devids in devs.items(): data.append( ("GPU " + ",".join(devids), devname)) except Exception: data.append(("GPU", "Not found with NVML")) vram = psutil.virtual_memory() data.append(("Free RAM", "{:.2f}/{:.2f} GB".format(vram.available / 1024**3, vram.total / 1024**3))) data.append(("CPU Count", psutil.cpu_count())) # Other important dependencies: try: import horovod data.append(("horovod", horovod.__version__)) except ImportError: pass try: import cv2 data.append(("cv2", cv2.__version__)) except ImportError: pass import msgpack data.append(("msgpack", ".".join([str(x) for x in msgpack.version]))) has_prctl = True try: import prctl _ = prctl.set_pdeathsig # noqa except Exception: has_prctl = False data.append(("python-prctl", has_prctl)) return tabulate(data)
[ "def", "collect_env_info", "(", ")", ":", "data", "=", "[", "]", "data", ".", "append", "(", "(", "\"sys.platform\"", ",", "sys", ".", "platform", ")", ")", "data", ".", "append", "(", "(", "\"Python\"", ",", "sys", ".", "version", ".", "replace", "(", "\"\\n\"", ",", "\"\"", ")", ")", ")", "data", ".", "append", "(", "(", "\"Tensorpack\"", ",", "__git_version__", ")", ")", "data", ".", "append", "(", "(", "\"Numpy\"", ",", "np", ".", "__version__", ")", ")", "data", ".", "append", "(", "(", "\"TensorFlow\"", ",", "tfv1", ".", "VERSION", "+", "\"/\"", "+", "tfv1", ".", "GIT_VERSION", ")", ")", "data", ".", "append", "(", "(", "\"TF Compiler Version\"", ",", "tfv1", ".", "COMPILER_VERSION", ")", ")", "has_cuda", "=", "tf", ".", "test", ".", "is_built_with_cuda", "(", ")", "data", ".", "append", "(", "(", "\"TF CUDA support\"", ",", "has_cuda", ")", ")", "try", ":", "from", "tensorflow", ".", "python", ".", "framework", "import", "test_util", "data", ".", "append", "(", "(", "\"TF MKL support\"", ",", "test_util", ".", "IsMklEnabled", "(", ")", ")", ")", "except", "Exception", ":", "pass", "try", ":", "from", "tensorflow", ".", "python", ".", "framework", "import", "test_util", "data", ".", "append", "(", "(", "\"TF XLA support\"", ",", "test_util", ".", "is_xla_enabled", "(", ")", ")", ")", "except", "Exception", ":", "pass", "if", "has_cuda", ":", "data", ".", "append", "(", "(", "\"Nvidia Driver\"", ",", "find_library", "(", "\"nvidia-ml\"", ")", ")", ")", "data", ".", "append", "(", "(", "\"CUDA\"", ",", "find_library", "(", "\"cudart\"", ")", ")", ")", "data", ".", "append", "(", "(", "\"CUDNN\"", ",", "find_library", "(", "\"cudnn\"", ")", ")", ")", "data", ".", "append", "(", "(", "\"NCCL\"", ",", "find_library", "(", "\"nccl\"", ")", ")", ")", "# List devices with NVML", "data", ".", "append", "(", "(", "\"CUDA_VISIBLE_DEVICES\"", ",", "os", ".", "environ", ".", "get", "(", "\"CUDA_VISIBLE_DEVICES\"", ",", "str", "(", "None", ")", ")", ")", ")", "try", ":", "devs", "=", "defaultdict", "(", "list", ")", "with", "NVMLContext", "(", ")", "as", "ctx", ":", "for", "idx", ",", "dev", "in", "enumerate", "(", "ctx", ".", "devices", "(", ")", ")", ":", "devs", "[", "dev", ".", "name", "(", ")", "]", ".", "append", "(", "str", "(", "idx", ")", ")", "for", "devname", ",", "devids", "in", "devs", ".", "items", "(", ")", ":", "data", ".", "append", "(", "(", "\"GPU \"", "+", "\",\"", ".", "join", "(", "devids", ")", ",", "devname", ")", ")", "except", "Exception", ":", "data", ".", "append", "(", "(", "\"GPU\"", ",", "\"Not found with NVML\"", ")", ")", "vram", "=", "psutil", ".", "virtual_memory", "(", ")", "data", ".", "append", "(", "(", "\"Free RAM\"", ",", "\"{:.2f}/{:.2f} GB\"", ".", "format", "(", "vram", ".", "available", "/", "1024", "**", "3", ",", "vram", ".", "total", "/", "1024", "**", "3", ")", ")", ")", "data", ".", "append", "(", "(", "\"CPU Count\"", ",", "psutil", ".", "cpu_count", "(", ")", ")", ")", "# Other important dependencies:", "try", ":", "import", "horovod", "data", ".", "append", "(", "(", "\"horovod\"", ",", "horovod", ".", "__version__", ")", ")", "except", "ImportError", ":", "pass", "try", ":", "import", "cv2", "data", ".", "append", "(", "(", "\"cv2\"", ",", "cv2", ".", "__version__", ")", ")", "except", "ImportError", ":", "pass", "import", "msgpack", "data", ".", "append", "(", "(", "\"msgpack\"", ",", "\".\"", ".", "join", "(", "[", "str", "(", "x", ")", "for", "x", "in", "msgpack", ".", "version", "]", ")", ")", ")", "has_prctl", "=", "True", "try", ":", "import", "prctl", "_", "=", "prctl", ".", "set_pdeathsig", "# noqa", "except", "Exception", ":", "has_prctl", "=", "False", "data", ".", "append", "(", "(", "\"python-prctl\"", ",", "has_prctl", ")", ")", "return", "tabulate", "(", "data", ")" ]
Returns: str - a table contains important information about the environment
[ "Returns", ":", "str", "-", "a", "table", "contains", "important", "information", "about", "the", "environment" ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/tfutils/common.py#L167-L245
train
tensorpack/tensorpack
tensorpack/graph_builder/distributed.py
DistributedBuilderBase._add_sync_queues_and_barrier
def _add_sync_queues_and_barrier(self, name, dependencies): """Adds ops to enqueue on all worker queues. Args: name: prefixed for the shared_name of ops. dependencies: control dependency from ops. Returns: an op that should be used as control dependency before starting next step. """ self._sync_queue_counter += 1 with tf.device(self.sync_queue_devices[self._sync_queue_counter % len(self.sync_queue_devices)]): sync_queues = [ tf.FIFOQueue(self.num_worker, [tf.bool], shapes=[[]], shared_name='%s%s' % (name, i)) for i in range(self.num_worker)] queue_ops = [] # For each other worker, add an entry in a queue, signaling that it can finish this step. token = tf.constant(False) with tf.control_dependencies(dependencies): for i, q in enumerate(sync_queues): if i != self.task_index: queue_ops.append(q.enqueue(token)) # Drain tokens off queue for this worker, one for each other worker. queue_ops.append( sync_queues[self.task_index].dequeue_many(len(sync_queues) - 1)) return tf.group(*queue_ops, name=name)
python
def _add_sync_queues_and_barrier(self, name, dependencies): """Adds ops to enqueue on all worker queues. Args: name: prefixed for the shared_name of ops. dependencies: control dependency from ops. Returns: an op that should be used as control dependency before starting next step. """ self._sync_queue_counter += 1 with tf.device(self.sync_queue_devices[self._sync_queue_counter % len(self.sync_queue_devices)]): sync_queues = [ tf.FIFOQueue(self.num_worker, [tf.bool], shapes=[[]], shared_name='%s%s' % (name, i)) for i in range(self.num_worker)] queue_ops = [] # For each other worker, add an entry in a queue, signaling that it can finish this step. token = tf.constant(False) with tf.control_dependencies(dependencies): for i, q in enumerate(sync_queues): if i != self.task_index: queue_ops.append(q.enqueue(token)) # Drain tokens off queue for this worker, one for each other worker. queue_ops.append( sync_queues[self.task_index].dequeue_many(len(sync_queues) - 1)) return tf.group(*queue_ops, name=name)
[ "def", "_add_sync_queues_and_barrier", "(", "self", ",", "name", ",", "dependencies", ")", ":", "self", ".", "_sync_queue_counter", "+=", "1", "with", "tf", ".", "device", "(", "self", ".", "sync_queue_devices", "[", "self", ".", "_sync_queue_counter", "%", "len", "(", "self", ".", "sync_queue_devices", ")", "]", ")", ":", "sync_queues", "=", "[", "tf", ".", "FIFOQueue", "(", "self", ".", "num_worker", ",", "[", "tf", ".", "bool", "]", ",", "shapes", "=", "[", "[", "]", "]", ",", "shared_name", "=", "'%s%s'", "%", "(", "name", ",", "i", ")", ")", "for", "i", "in", "range", "(", "self", ".", "num_worker", ")", "]", "queue_ops", "=", "[", "]", "# For each other worker, add an entry in a queue, signaling that it can finish this step.", "token", "=", "tf", ".", "constant", "(", "False", ")", "with", "tf", ".", "control_dependencies", "(", "dependencies", ")", ":", "for", "i", ",", "q", "in", "enumerate", "(", "sync_queues", ")", ":", "if", "i", "!=", "self", ".", "task_index", ":", "queue_ops", ".", "append", "(", "q", ".", "enqueue", "(", "token", ")", ")", "# Drain tokens off queue for this worker, one for each other worker.", "queue_ops", ".", "append", "(", "sync_queues", "[", "self", ".", "task_index", "]", ".", "dequeue_many", "(", "len", "(", "sync_queues", ")", "-", "1", ")", ")", "return", "tf", ".", "group", "(", "*", "queue_ops", ",", "name", "=", "name", ")" ]
Adds ops to enqueue on all worker queues. Args: name: prefixed for the shared_name of ops. dependencies: control dependency from ops. Returns: an op that should be used as control dependency before starting next step.
[ "Adds", "ops", "to", "enqueue", "on", "all", "worker", "queues", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/graph_builder/distributed.py#L30-L58
train
tensorpack/tensorpack
tensorpack/graph_builder/distributed.py
DistributedReplicatedBuilder._apply_shadow_vars
def _apply_shadow_vars(avg_grads): """ Create shadow variables on PS, and replace variables in avg_grads by these shadow variables. Args: avg_grads: list of (grad, var) tuples """ ps_var_grads = [] for grad, var in avg_grads: assert var.name.startswith('tower'), var.name my_name = '/'.join(var.name.split('/')[1:]) my_name = get_op_tensor_name(my_name)[0] new_v = tf.get_variable(my_name, dtype=var.dtype.base_dtype, initializer=var.initial_value, trainable=True) # (g, v) to be applied, where v is global (ps vars) ps_var_grads.append((grad, new_v)) return ps_var_grads
python
def _apply_shadow_vars(avg_grads): """ Create shadow variables on PS, and replace variables in avg_grads by these shadow variables. Args: avg_grads: list of (grad, var) tuples """ ps_var_grads = [] for grad, var in avg_grads: assert var.name.startswith('tower'), var.name my_name = '/'.join(var.name.split('/')[1:]) my_name = get_op_tensor_name(my_name)[0] new_v = tf.get_variable(my_name, dtype=var.dtype.base_dtype, initializer=var.initial_value, trainable=True) # (g, v) to be applied, where v is global (ps vars) ps_var_grads.append((grad, new_v)) return ps_var_grads
[ "def", "_apply_shadow_vars", "(", "avg_grads", ")", ":", "ps_var_grads", "=", "[", "]", "for", "grad", ",", "var", "in", "avg_grads", ":", "assert", "var", ".", "name", ".", "startswith", "(", "'tower'", ")", ",", "var", ".", "name", "my_name", "=", "'/'", ".", "join", "(", "var", ".", "name", ".", "split", "(", "'/'", ")", "[", "1", ":", "]", ")", "my_name", "=", "get_op_tensor_name", "(", "my_name", ")", "[", "0", "]", "new_v", "=", "tf", ".", "get_variable", "(", "my_name", ",", "dtype", "=", "var", ".", "dtype", ".", "base_dtype", ",", "initializer", "=", "var", ".", "initial_value", ",", "trainable", "=", "True", ")", "# (g, v) to be applied, where v is global (ps vars)", "ps_var_grads", ".", "append", "(", "(", "grad", ",", "new_v", ")", ")", "return", "ps_var_grads" ]
Create shadow variables on PS, and replace variables in avg_grads by these shadow variables. Args: avg_grads: list of (grad, var) tuples
[ "Create", "shadow", "variables", "on", "PS", "and", "replace", "variables", "in", "avg_grads", "by", "these", "shadow", "variables", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/graph_builder/distributed.py#L205-L223
train
tensorpack/tensorpack
tensorpack/graph_builder/distributed.py
DistributedReplicatedBuilder._shadow_model_variables
def _shadow_model_variables(shadow_vars): """ Create shadow vars for model_variables as well, and add to the list of ``shadow_vars``. Returns: list of (shadow_model_var, local_model_var) used for syncing. """ G = tf.get_default_graph() curr_shadow_vars = set([v.name for v in shadow_vars]) model_vars = tf.model_variables() shadow_model_vars = [] for v in model_vars: assert v.name.startswith('tower'), "Found some MODEL_VARIABLES created outside of the tower function!" stripped_op_name, stripped_var_name = get_op_tensor_name(re.sub('^tower[0-9]+/', '', v.name)) if stripped_op_name in curr_shadow_vars: continue try: G.get_tensor_by_name(stripped_var_name) logger.warn("Model Variable {} also appears in other collections.".format(stripped_var_name)) continue except KeyError: pass new_v = tf.get_variable(stripped_op_name, dtype=v.dtype.base_dtype, initializer=v.initial_value, trainable=False) curr_shadow_vars.add(stripped_op_name) # avoid duplicated shadow_model_vars shadow_vars.append(new_v) shadow_model_vars.append((new_v, v)) # only need to sync model_var from one tower return shadow_model_vars
python
def _shadow_model_variables(shadow_vars): """ Create shadow vars for model_variables as well, and add to the list of ``shadow_vars``. Returns: list of (shadow_model_var, local_model_var) used for syncing. """ G = tf.get_default_graph() curr_shadow_vars = set([v.name for v in shadow_vars]) model_vars = tf.model_variables() shadow_model_vars = [] for v in model_vars: assert v.name.startswith('tower'), "Found some MODEL_VARIABLES created outside of the tower function!" stripped_op_name, stripped_var_name = get_op_tensor_name(re.sub('^tower[0-9]+/', '', v.name)) if stripped_op_name in curr_shadow_vars: continue try: G.get_tensor_by_name(stripped_var_name) logger.warn("Model Variable {} also appears in other collections.".format(stripped_var_name)) continue except KeyError: pass new_v = tf.get_variable(stripped_op_name, dtype=v.dtype.base_dtype, initializer=v.initial_value, trainable=False) curr_shadow_vars.add(stripped_op_name) # avoid duplicated shadow_model_vars shadow_vars.append(new_v) shadow_model_vars.append((new_v, v)) # only need to sync model_var from one tower return shadow_model_vars
[ "def", "_shadow_model_variables", "(", "shadow_vars", ")", ":", "G", "=", "tf", ".", "get_default_graph", "(", ")", "curr_shadow_vars", "=", "set", "(", "[", "v", ".", "name", "for", "v", "in", "shadow_vars", "]", ")", "model_vars", "=", "tf", ".", "model_variables", "(", ")", "shadow_model_vars", "=", "[", "]", "for", "v", "in", "model_vars", ":", "assert", "v", ".", "name", ".", "startswith", "(", "'tower'", ")", ",", "\"Found some MODEL_VARIABLES created outside of the tower function!\"", "stripped_op_name", ",", "stripped_var_name", "=", "get_op_tensor_name", "(", "re", ".", "sub", "(", "'^tower[0-9]+/'", ",", "''", ",", "v", ".", "name", ")", ")", "if", "stripped_op_name", "in", "curr_shadow_vars", ":", "continue", "try", ":", "G", ".", "get_tensor_by_name", "(", "stripped_var_name", ")", "logger", ".", "warn", "(", "\"Model Variable {} also appears in other collections.\"", ".", "format", "(", "stripped_var_name", ")", ")", "continue", "except", "KeyError", ":", "pass", "new_v", "=", "tf", ".", "get_variable", "(", "stripped_op_name", ",", "dtype", "=", "v", ".", "dtype", ".", "base_dtype", ",", "initializer", "=", "v", ".", "initial_value", ",", "trainable", "=", "False", ")", "curr_shadow_vars", ".", "add", "(", "stripped_op_name", ")", "# avoid duplicated shadow_model_vars", "shadow_vars", ".", "append", "(", "new_v", ")", "shadow_model_vars", ".", "append", "(", "(", "new_v", ",", "v", ")", ")", "# only need to sync model_var from one tower", "return", "shadow_model_vars" ]
Create shadow vars for model_variables as well, and add to the list of ``shadow_vars``. Returns: list of (shadow_model_var, local_model_var) used for syncing.
[ "Create", "shadow", "vars", "for", "model_variables", "as", "well", "and", "add", "to", "the", "list", "of", "shadow_vars", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/graph_builder/distributed.py#L226-L255
train
tensorpack/tensorpack
tensorpack/graph_builder/distributed.py
DistributedReplicatedBuilder.build
def build(self, get_grad_fn, get_opt_fn): """ Args: get_grad_fn (-> [(grad, var)]): get_opt_fn (-> tf.train.Optimizer): callable which returns an optimizer Returns: (tf.Operation, tf.Operation, tf.Operation): 1. the training op. 2. the op which sync all the local variables from PS. This op should be run before training. 3. the op which sync all the local `MODEL_VARIABLES` from PS. You can choose how often to run it by yourself. """ with override_to_local_variable(): get_global_step_var() get_opt_fn = memoized(get_opt_fn) # Build the optimizer first, before entering any tower. # This makes sure that learning_rate is a global variable (what we expect) get_opt_fn() # TODO get_opt_fn called before main graph was built # Ngpu * Nvar * 2 grad_list = DataParallelBuilder.build_on_towers( self.towers, get_grad_fn, devices=self.raw_devices, use_vs=[True] * len(self.towers)) # open vs at each tower DataParallelBuilder._check_grad_list(grad_list) avg_grads = aggregate_grads( grad_list, colocation=False, devices=self.raw_devices) with tf.device(self.param_server_device): ps_var_grads = DistributedReplicatedBuilder._apply_shadow_vars(avg_grads) var_update_ops = self._apply_gradients_and_copy( get_opt_fn(), grad_list, ps_var_grads) self._shadow_vars = [v for (__, v) in ps_var_grads] self._shadow_model_vars = DistributedReplicatedBuilder._shadow_model_variables(self._shadow_vars) # TODO add options to synchronize less main_fetch = tf.group(*var_update_ops, name='main_fetches') train_op = self._add_sync_queues_and_barrier( 'post_copy_barrier', [main_fetch]) # initial local_vars syncing with tf.name_scope('initial_sync_variables'): initial_sync_op = self._get_initial_sync_op() if len(self._shadow_model_vars) and self.is_chief: with tf.name_scope('sync_model_variables'): model_sync_op = self._get_sync_model_vars_op() else: model_sync_op = None return train_op, initial_sync_op, model_sync_op
python
def build(self, get_grad_fn, get_opt_fn): """ Args: get_grad_fn (-> [(grad, var)]): get_opt_fn (-> tf.train.Optimizer): callable which returns an optimizer Returns: (tf.Operation, tf.Operation, tf.Operation): 1. the training op. 2. the op which sync all the local variables from PS. This op should be run before training. 3. the op which sync all the local `MODEL_VARIABLES` from PS. You can choose how often to run it by yourself. """ with override_to_local_variable(): get_global_step_var() get_opt_fn = memoized(get_opt_fn) # Build the optimizer first, before entering any tower. # This makes sure that learning_rate is a global variable (what we expect) get_opt_fn() # TODO get_opt_fn called before main graph was built # Ngpu * Nvar * 2 grad_list = DataParallelBuilder.build_on_towers( self.towers, get_grad_fn, devices=self.raw_devices, use_vs=[True] * len(self.towers)) # open vs at each tower DataParallelBuilder._check_grad_list(grad_list) avg_grads = aggregate_grads( grad_list, colocation=False, devices=self.raw_devices) with tf.device(self.param_server_device): ps_var_grads = DistributedReplicatedBuilder._apply_shadow_vars(avg_grads) var_update_ops = self._apply_gradients_and_copy( get_opt_fn(), grad_list, ps_var_grads) self._shadow_vars = [v for (__, v) in ps_var_grads] self._shadow_model_vars = DistributedReplicatedBuilder._shadow_model_variables(self._shadow_vars) # TODO add options to synchronize less main_fetch = tf.group(*var_update_ops, name='main_fetches') train_op = self._add_sync_queues_and_barrier( 'post_copy_barrier', [main_fetch]) # initial local_vars syncing with tf.name_scope('initial_sync_variables'): initial_sync_op = self._get_initial_sync_op() if len(self._shadow_model_vars) and self.is_chief: with tf.name_scope('sync_model_variables'): model_sync_op = self._get_sync_model_vars_op() else: model_sync_op = None return train_op, initial_sync_op, model_sync_op
[ "def", "build", "(", "self", ",", "get_grad_fn", ",", "get_opt_fn", ")", ":", "with", "override_to_local_variable", "(", ")", ":", "get_global_step_var", "(", ")", "get_opt_fn", "=", "memoized", "(", "get_opt_fn", ")", "# Build the optimizer first, before entering any tower.", "# This makes sure that learning_rate is a global variable (what we expect)", "get_opt_fn", "(", ")", "# TODO get_opt_fn called before main graph was built", "# Ngpu * Nvar * 2", "grad_list", "=", "DataParallelBuilder", ".", "build_on_towers", "(", "self", ".", "towers", ",", "get_grad_fn", ",", "devices", "=", "self", ".", "raw_devices", ",", "use_vs", "=", "[", "True", "]", "*", "len", "(", "self", ".", "towers", ")", ")", "# open vs at each tower", "DataParallelBuilder", ".", "_check_grad_list", "(", "grad_list", ")", "avg_grads", "=", "aggregate_grads", "(", "grad_list", ",", "colocation", "=", "False", ",", "devices", "=", "self", ".", "raw_devices", ")", "with", "tf", ".", "device", "(", "self", ".", "param_server_device", ")", ":", "ps_var_grads", "=", "DistributedReplicatedBuilder", ".", "_apply_shadow_vars", "(", "avg_grads", ")", "var_update_ops", "=", "self", ".", "_apply_gradients_and_copy", "(", "get_opt_fn", "(", ")", ",", "grad_list", ",", "ps_var_grads", ")", "self", ".", "_shadow_vars", "=", "[", "v", "for", "(", "__", ",", "v", ")", "in", "ps_var_grads", "]", "self", ".", "_shadow_model_vars", "=", "DistributedReplicatedBuilder", ".", "_shadow_model_variables", "(", "self", ".", "_shadow_vars", ")", "# TODO add options to synchronize less", "main_fetch", "=", "tf", ".", "group", "(", "*", "var_update_ops", ",", "name", "=", "'main_fetches'", ")", "train_op", "=", "self", ".", "_add_sync_queues_and_barrier", "(", "'post_copy_barrier'", ",", "[", "main_fetch", "]", ")", "# initial local_vars syncing", "with", "tf", ".", "name_scope", "(", "'initial_sync_variables'", ")", ":", "initial_sync_op", "=", "self", ".", "_get_initial_sync_op", "(", ")", "if", "len", "(", "self", ".", "_shadow_model_vars", ")", "and", "self", ".", "is_chief", ":", "with", "tf", ".", "name_scope", "(", "'sync_model_variables'", ")", ":", "model_sync_op", "=", "self", ".", "_get_sync_model_vars_op", "(", ")", "else", ":", "model_sync_op", "=", "None", "return", "train_op", ",", "initial_sync_op", ",", "model_sync_op" ]
Args: get_grad_fn (-> [(grad, var)]): get_opt_fn (-> tf.train.Optimizer): callable which returns an optimizer Returns: (tf.Operation, tf.Operation, tf.Operation): 1. the training op. 2. the op which sync all the local variables from PS. This op should be run before training. 3. the op which sync all the local `MODEL_VARIABLES` from PS. You can choose how often to run it by yourself.
[ "Args", ":", "get_grad_fn", "(", "-", ">", "[", "(", "grad", "var", ")", "]", ")", ":", "get_opt_fn", "(", "-", ">", "tf", ".", "train", ".", "Optimizer", ")", ":", "callable", "which", "returns", "an", "optimizer" ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/graph_builder/distributed.py#L257-L311
train
tensorpack/tensorpack
tensorpack/graph_builder/distributed.py
DistributedReplicatedBuilder._apply_gradients_and_copy
def _apply_gradients_and_copy(self, opt, raw_grad_list, ps_var_grads): """ Apply averaged gradients to ps vars, and then copy the updated variables back to each tower. Args: raw_grad_list: Ngpu x Nvar x 2 gradient list from all towers ps_var_grads: Nvar x 2 (grad, ps_var) Returns: list of copy ops """ # TODO do this for variables together? with tf.name_scope('apply_gradients'): var_update_ops = [] for vid, (g, v) in enumerate(ps_var_grads): # TODO do we put momentum variables into local or global? apply_gradient_op = opt.apply_gradients([(g, v)]) barrier = self._add_sync_queues_and_barrier( 'param_update_barrier_{}'.format(vid), [apply_gradient_op]) with tf.control_dependencies([barrier]), \ tf.device(self.cpu_device): updated_value = v.read_value() for towerid in range(self.nr_gpu): var_update_ops.append( raw_grad_list[towerid][vid][1].assign(updated_value)) return var_update_ops
python
def _apply_gradients_and_copy(self, opt, raw_grad_list, ps_var_grads): """ Apply averaged gradients to ps vars, and then copy the updated variables back to each tower. Args: raw_grad_list: Ngpu x Nvar x 2 gradient list from all towers ps_var_grads: Nvar x 2 (grad, ps_var) Returns: list of copy ops """ # TODO do this for variables together? with tf.name_scope('apply_gradients'): var_update_ops = [] for vid, (g, v) in enumerate(ps_var_grads): # TODO do we put momentum variables into local or global? apply_gradient_op = opt.apply_gradients([(g, v)]) barrier = self._add_sync_queues_and_barrier( 'param_update_barrier_{}'.format(vid), [apply_gradient_op]) with tf.control_dependencies([barrier]), \ tf.device(self.cpu_device): updated_value = v.read_value() for towerid in range(self.nr_gpu): var_update_ops.append( raw_grad_list[towerid][vid][1].assign(updated_value)) return var_update_ops
[ "def", "_apply_gradients_and_copy", "(", "self", ",", "opt", ",", "raw_grad_list", ",", "ps_var_grads", ")", ":", "# TODO do this for variables together?", "with", "tf", ".", "name_scope", "(", "'apply_gradients'", ")", ":", "var_update_ops", "=", "[", "]", "for", "vid", ",", "(", "g", ",", "v", ")", "in", "enumerate", "(", "ps_var_grads", ")", ":", "# TODO do we put momentum variables into local or global?", "apply_gradient_op", "=", "opt", ".", "apply_gradients", "(", "[", "(", "g", ",", "v", ")", "]", ")", "barrier", "=", "self", ".", "_add_sync_queues_and_barrier", "(", "'param_update_barrier_{}'", ".", "format", "(", "vid", ")", ",", "[", "apply_gradient_op", "]", ")", "with", "tf", ".", "control_dependencies", "(", "[", "barrier", "]", ")", ",", "tf", ".", "device", "(", "self", ".", "cpu_device", ")", ":", "updated_value", "=", "v", ".", "read_value", "(", ")", "for", "towerid", "in", "range", "(", "self", ".", "nr_gpu", ")", ":", "var_update_ops", ".", "append", "(", "raw_grad_list", "[", "towerid", "]", "[", "vid", "]", "[", "1", "]", ".", "assign", "(", "updated_value", ")", ")", "return", "var_update_ops" ]
Apply averaged gradients to ps vars, and then copy the updated variables back to each tower. Args: raw_grad_list: Ngpu x Nvar x 2 gradient list from all towers ps_var_grads: Nvar x 2 (grad, ps_var) Returns: list of copy ops
[ "Apply", "averaged", "gradients", "to", "ps", "vars", "and", "then", "copy", "the", "updated", "variables", "back", "to", "each", "tower", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/graph_builder/distributed.py#L313-L339
train
tensorpack/tensorpack
tensorpack/graph_builder/distributed.py
DistributedReplicatedBuilder._get_initial_sync_op
def _get_initial_sync_op(self): """ Get the op to copy-initialized all local variables from PS. """ def strip_port(s): if s.endswith(':0'): return s[:-2] return s local_vars = tf.local_variables() local_var_by_name = dict([(strip_port(v.name), v) for v in local_vars]) ops = [] nr_shadow_vars = len(self._shadow_vars) for v in self._shadow_vars: vname = strip_port(v.name) for i in range(self.nr_gpu): name = 'tower%s/%s' % (i, vname) assert name in local_var_by_name, \ "Shadow variable {} doesn't match a corresponding local variable!".format(v.name) copy_to = local_var_by_name[name] # logger.info("{} -> {}".format(v.name, copy_to.name)) ops.append(copy_to.assign(v.read_value())) return tf.group(*ops, name='sync_{}_variables_from_ps'.format(nr_shadow_vars))
python
def _get_initial_sync_op(self): """ Get the op to copy-initialized all local variables from PS. """ def strip_port(s): if s.endswith(':0'): return s[:-2] return s local_vars = tf.local_variables() local_var_by_name = dict([(strip_port(v.name), v) for v in local_vars]) ops = [] nr_shadow_vars = len(self._shadow_vars) for v in self._shadow_vars: vname = strip_port(v.name) for i in range(self.nr_gpu): name = 'tower%s/%s' % (i, vname) assert name in local_var_by_name, \ "Shadow variable {} doesn't match a corresponding local variable!".format(v.name) copy_to = local_var_by_name[name] # logger.info("{} -> {}".format(v.name, copy_to.name)) ops.append(copy_to.assign(v.read_value())) return tf.group(*ops, name='sync_{}_variables_from_ps'.format(nr_shadow_vars))
[ "def", "_get_initial_sync_op", "(", "self", ")", ":", "def", "strip_port", "(", "s", ")", ":", "if", "s", ".", "endswith", "(", "':0'", ")", ":", "return", "s", "[", ":", "-", "2", "]", "return", "s", "local_vars", "=", "tf", ".", "local_variables", "(", ")", "local_var_by_name", "=", "dict", "(", "[", "(", "strip_port", "(", "v", ".", "name", ")", ",", "v", ")", "for", "v", "in", "local_vars", "]", ")", "ops", "=", "[", "]", "nr_shadow_vars", "=", "len", "(", "self", ".", "_shadow_vars", ")", "for", "v", "in", "self", ".", "_shadow_vars", ":", "vname", "=", "strip_port", "(", "v", ".", "name", ")", "for", "i", "in", "range", "(", "self", ".", "nr_gpu", ")", ":", "name", "=", "'tower%s/%s'", "%", "(", "i", ",", "vname", ")", "assert", "name", "in", "local_var_by_name", ",", "\"Shadow variable {} doesn't match a corresponding local variable!\"", ".", "format", "(", "v", ".", "name", ")", "copy_to", "=", "local_var_by_name", "[", "name", "]", "# logger.info(\"{} -> {}\".format(v.name, copy_to.name))", "ops", ".", "append", "(", "copy_to", ".", "assign", "(", "v", ".", "read_value", "(", ")", ")", ")", "return", "tf", ".", "group", "(", "*", "ops", ",", "name", "=", "'sync_{}_variables_from_ps'", ".", "format", "(", "nr_shadow_vars", ")", ")" ]
Get the op to copy-initialized all local variables from PS.
[ "Get", "the", "op", "to", "copy", "-", "initialized", "all", "local", "variables", "from", "PS", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/graph_builder/distributed.py#L341-L362
train
tensorpack/tensorpack
tensorpack/graph_builder/distributed.py
DistributedReplicatedBuilder._get_sync_model_vars_op
def _get_sync_model_vars_op(self): """ Get the op to sync local model_variables to PS. """ ops = [] for (shadow_v, local_v) in self._shadow_model_vars: ops.append(shadow_v.assign(local_v.read_value())) assert len(ops) return tf.group(*ops, name='sync_{}_model_variables_to_ps'.format(len(ops)))
python
def _get_sync_model_vars_op(self): """ Get the op to sync local model_variables to PS. """ ops = [] for (shadow_v, local_v) in self._shadow_model_vars: ops.append(shadow_v.assign(local_v.read_value())) assert len(ops) return tf.group(*ops, name='sync_{}_model_variables_to_ps'.format(len(ops)))
[ "def", "_get_sync_model_vars_op", "(", "self", ")", ":", "ops", "=", "[", "]", "for", "(", "shadow_v", ",", "local_v", ")", "in", "self", ".", "_shadow_model_vars", ":", "ops", ".", "append", "(", "shadow_v", ".", "assign", "(", "local_v", ".", "read_value", "(", ")", ")", ")", "assert", "len", "(", "ops", ")", "return", "tf", ".", "group", "(", "*", "ops", ",", "name", "=", "'sync_{}_model_variables_to_ps'", ".", "format", "(", "len", "(", "ops", ")", ")", ")" ]
Get the op to sync local model_variables to PS.
[ "Get", "the", "op", "to", "sync", "local", "model_variables", "to", "PS", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/graph_builder/distributed.py#L364-L372
train
tensorpack/tensorpack
tensorpack/input_source/input_source_base.py
get_tensors_inputs
def get_tensors_inputs(placeholders, tensors, names): """ Args: placeholders (list[Tensor]): tensors (list[Tensor]): list of tf.Tensor names (list[str]): names matching the given tensors Returns: list[Tensor]: inputs to used for the tower function, with the corresponding placeholders replaced by tensors. """ assert len(tensors) == len(names), \ "Input tensors {} and input names {} have different length!".format( tensors, names) ret = copy.copy(placeholders) placeholder_names = [p.name for p in placeholders] for name, tensor in zip(names, tensors): tensorname = get_op_tensor_name(name)[1] try: idx = placeholder_names.index(tensorname) except ValueError: logger.error("Name {} is not a model input!".format(tensorname)) raise ret[idx] = tensor return ret
python
def get_tensors_inputs(placeholders, tensors, names): """ Args: placeholders (list[Tensor]): tensors (list[Tensor]): list of tf.Tensor names (list[str]): names matching the given tensors Returns: list[Tensor]: inputs to used for the tower function, with the corresponding placeholders replaced by tensors. """ assert len(tensors) == len(names), \ "Input tensors {} and input names {} have different length!".format( tensors, names) ret = copy.copy(placeholders) placeholder_names = [p.name for p in placeholders] for name, tensor in zip(names, tensors): tensorname = get_op_tensor_name(name)[1] try: idx = placeholder_names.index(tensorname) except ValueError: logger.error("Name {} is not a model input!".format(tensorname)) raise ret[idx] = tensor return ret
[ "def", "get_tensors_inputs", "(", "placeholders", ",", "tensors", ",", "names", ")", ":", "assert", "len", "(", "tensors", ")", "==", "len", "(", "names", ")", ",", "\"Input tensors {} and input names {} have different length!\"", ".", "format", "(", "tensors", ",", "names", ")", "ret", "=", "copy", ".", "copy", "(", "placeholders", ")", "placeholder_names", "=", "[", "p", ".", "name", "for", "p", "in", "placeholders", "]", "for", "name", ",", "tensor", "in", "zip", "(", "names", ",", "tensors", ")", ":", "tensorname", "=", "get_op_tensor_name", "(", "name", ")", "[", "1", "]", "try", ":", "idx", "=", "placeholder_names", ".", "index", "(", "tensorname", ")", "except", "ValueError", ":", "logger", ".", "error", "(", "\"Name {} is not a model input!\"", ".", "format", "(", "tensorname", ")", ")", "raise", "ret", "[", "idx", "]", "=", "tensor", "return", "ret" ]
Args: placeholders (list[Tensor]): tensors (list[Tensor]): list of tf.Tensor names (list[str]): names matching the given tensors Returns: list[Tensor]: inputs to used for the tower function, with the corresponding placeholders replaced by tensors.
[ "Args", ":", "placeholders", "(", "list", "[", "Tensor", "]", ")", ":", "tensors", "(", "list", "[", "Tensor", "]", ")", ":", "list", "of", "tf", ".", "Tensor", "names", "(", "list", "[", "str", "]", ")", ":", "names", "matching", "the", "given", "tensors" ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/input_source/input_source_base.py#L20-L44
train
tensorpack/tensorpack
tensorpack/input_source/input_source_base.py
get_sublist_by_names
def get_sublist_by_names(lst, names): """ Args: lst (list): list of objects with "name" property. Returns: list: a sublist of objects, matching names """ orig_names = [p.name for p in lst] ret = [] for name in names: try: idx = orig_names.index(name) except ValueError: logger.error("Name {} doesn't appear in lst {}!".format( name, str(orig_names))) raise ret.append(lst[idx]) return ret
python
def get_sublist_by_names(lst, names): """ Args: lst (list): list of objects with "name" property. Returns: list: a sublist of objects, matching names """ orig_names = [p.name for p in lst] ret = [] for name in names: try: idx = orig_names.index(name) except ValueError: logger.error("Name {} doesn't appear in lst {}!".format( name, str(orig_names))) raise ret.append(lst[idx]) return ret
[ "def", "get_sublist_by_names", "(", "lst", ",", "names", ")", ":", "orig_names", "=", "[", "p", ".", "name", "for", "p", "in", "lst", "]", "ret", "=", "[", "]", "for", "name", "in", "names", ":", "try", ":", "idx", "=", "orig_names", ".", "index", "(", "name", ")", "except", "ValueError", ":", "logger", ".", "error", "(", "\"Name {} doesn't appear in lst {}!\"", ".", "format", "(", "name", ",", "str", "(", "orig_names", ")", ")", ")", "raise", "ret", ".", "append", "(", "lst", "[", "idx", "]", ")", "return", "ret" ]
Args: lst (list): list of objects with "name" property. Returns: list: a sublist of objects, matching names
[ "Args", ":", "lst", "(", "list", ")", ":", "list", "of", "objects", "with", "name", "property", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/input_source/input_source_base.py#L47-L65
train
tensorpack/tensorpack
tensorpack/input_source/input_source_base.py
remap_input_source
def remap_input_source(input, names): """ When you have some :class:`InputSource` which doesn't match the inputs of your tower function, use `RemapInputSource`. It produces placeholders for all the inputs in your model, except that the corresponding ones are replaced with the tensor produced by the given :class:`InputSource`. Args: input(InputSource): a :class:`InputSource`, whose tensors will get mapped. names(list[str]): list of input names corresponding to the tensors produced by ``input``. Returns: InputSource: Example: .. code-block:: python input1 = QueueInput(ds) # assume ds produces 'image' and 'label', but the graph takes more # inputs for some reasons, or takes inputs of a different order: input_signature = [tf.TensorSpec((None,10), tf.float32, 'score'), tf.TensorSpec((None,20,20,3), tf.float32, 'label'), tf.TensorSpec((None,), tf.int32, 'image') ] input2 = remap_input_source(input1, ['image', 'label']) input2.setup(input_signature) # now, input2.get_input_tensors() will return a placeholder for 'score', # plus the tensors returned by input1.get_input_tensors() """ def __init__(self, input, names): ProxyInputSource.__init__(self, input) assert isinstance(names, (list, tuple)), names self._names = tuple(names) def _setup(self, inputs): self._all_placehdrs = [build_or_reuse_placeholder(v) for v in inputs] inputs_subset = get_sublist_by_names(inputs, self._names) self._input.setup(inputs_subset) def _get_input_tensors(self): ret = self._input.get_input_tensors() assert len(ret) == len(self._names) return get_tensors_inputs( self._all_placehdrs, ret, self._names) oldcls = type(input) # inherit oldcls so that type check in various places would work cls = type('Remapped' + oldcls.__name__, (ProxyInputSource, oldcls), { '__init__': __init__, '_setup': _setup, '_get_input_tensors': _get_input_tensors}) return cls(input, names)
python
def remap_input_source(input, names): """ When you have some :class:`InputSource` which doesn't match the inputs of your tower function, use `RemapInputSource`. It produces placeholders for all the inputs in your model, except that the corresponding ones are replaced with the tensor produced by the given :class:`InputSource`. Args: input(InputSource): a :class:`InputSource`, whose tensors will get mapped. names(list[str]): list of input names corresponding to the tensors produced by ``input``. Returns: InputSource: Example: .. code-block:: python input1 = QueueInput(ds) # assume ds produces 'image' and 'label', but the graph takes more # inputs for some reasons, or takes inputs of a different order: input_signature = [tf.TensorSpec((None,10), tf.float32, 'score'), tf.TensorSpec((None,20,20,3), tf.float32, 'label'), tf.TensorSpec((None,), tf.int32, 'image') ] input2 = remap_input_source(input1, ['image', 'label']) input2.setup(input_signature) # now, input2.get_input_tensors() will return a placeholder for 'score', # plus the tensors returned by input1.get_input_tensors() """ def __init__(self, input, names): ProxyInputSource.__init__(self, input) assert isinstance(names, (list, tuple)), names self._names = tuple(names) def _setup(self, inputs): self._all_placehdrs = [build_or_reuse_placeholder(v) for v in inputs] inputs_subset = get_sublist_by_names(inputs, self._names) self._input.setup(inputs_subset) def _get_input_tensors(self): ret = self._input.get_input_tensors() assert len(ret) == len(self._names) return get_tensors_inputs( self._all_placehdrs, ret, self._names) oldcls = type(input) # inherit oldcls so that type check in various places would work cls = type('Remapped' + oldcls.__name__, (ProxyInputSource, oldcls), { '__init__': __init__, '_setup': _setup, '_get_input_tensors': _get_input_tensors}) return cls(input, names)
[ "def", "remap_input_source", "(", "input", ",", "names", ")", ":", "def", "__init__", "(", "self", ",", "input", ",", "names", ")", ":", "ProxyInputSource", ".", "__init__", "(", "self", ",", "input", ")", "assert", "isinstance", "(", "names", ",", "(", "list", ",", "tuple", ")", ")", ",", "names", "self", ".", "_names", "=", "tuple", "(", "names", ")", "def", "_setup", "(", "self", ",", "inputs", ")", ":", "self", ".", "_all_placehdrs", "=", "[", "build_or_reuse_placeholder", "(", "v", ")", "for", "v", "in", "inputs", "]", "inputs_subset", "=", "get_sublist_by_names", "(", "inputs", ",", "self", ".", "_names", ")", "self", ".", "_input", ".", "setup", "(", "inputs_subset", ")", "def", "_get_input_tensors", "(", "self", ")", ":", "ret", "=", "self", ".", "_input", ".", "get_input_tensors", "(", ")", "assert", "len", "(", "ret", ")", "==", "len", "(", "self", ".", "_names", ")", "return", "get_tensors_inputs", "(", "self", ".", "_all_placehdrs", ",", "ret", ",", "self", ".", "_names", ")", "oldcls", "=", "type", "(", "input", ")", "# inherit oldcls so that type check in various places would work", "cls", "=", "type", "(", "'Remapped'", "+", "oldcls", ".", "__name__", ",", "(", "ProxyInputSource", ",", "oldcls", ")", ",", "{", "'__init__'", ":", "__init__", ",", "'_setup'", ":", "_setup", ",", "'_get_input_tensors'", ":", "_get_input_tensors", "}", ")", "return", "cls", "(", "input", ",", "names", ")" ]
When you have some :class:`InputSource` which doesn't match the inputs of your tower function, use `RemapInputSource`. It produces placeholders for all the inputs in your model, except that the corresponding ones are replaced with the tensor produced by the given :class:`InputSource`. Args: input(InputSource): a :class:`InputSource`, whose tensors will get mapped. names(list[str]): list of input names corresponding to the tensors produced by ``input``. Returns: InputSource: Example: .. code-block:: python input1 = QueueInput(ds) # assume ds produces 'image' and 'label', but the graph takes more # inputs for some reasons, or takes inputs of a different order: input_signature = [tf.TensorSpec((None,10), tf.float32, 'score'), tf.TensorSpec((None,20,20,3), tf.float32, 'label'), tf.TensorSpec((None,), tf.int32, 'image') ] input2 = remap_input_source(input1, ['image', 'label']) input2.setup(input_signature) # now, input2.get_input_tensors() will return a placeholder for 'score', # plus the tensors returned by input1.get_input_tensors()
[ "When", "you", "have", "some", ":", "class", ":", "InputSource", "which", "doesn", "t", "match", "the", "inputs", "of", "your", "tower", "function", "use", "RemapInputSource", ".", "It", "produces", "placeholders", "for", "all", "the", "inputs", "in", "your", "model", "except", "that", "the", "corresponding", "ones", "are", "replaced", "with", "the", "tensor", "produced", "by", "the", "given", ":", "class", ":", "InputSource", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/input_source/input_source_base.py#L207-L260
train
tensorpack/tensorpack
examples/FasterRCNN/model_rpn.py
rpn_head
def rpn_head(featuremap, channel, num_anchors): """ Returns: label_logits: fHxfWxNA box_logits: fHxfWxNAx4 """ with argscope(Conv2D, data_format='channels_first', kernel_initializer=tf.random_normal_initializer(stddev=0.01)): hidden = Conv2D('conv0', featuremap, channel, 3, activation=tf.nn.relu) label_logits = Conv2D('class', hidden, num_anchors, 1) box_logits = Conv2D('box', hidden, 4 * num_anchors, 1) # 1, NA(*4), im/16, im/16 (NCHW) label_logits = tf.transpose(label_logits, [0, 2, 3, 1]) # 1xfHxfWxNA label_logits = tf.squeeze(label_logits, 0) # fHxfWxNA shp = tf.shape(box_logits) # 1x(NAx4)xfHxfW box_logits = tf.transpose(box_logits, [0, 2, 3, 1]) # 1xfHxfWx(NAx4) box_logits = tf.reshape(box_logits, tf.stack([shp[2], shp[3], num_anchors, 4])) # fHxfWxNAx4 return label_logits, box_logits
python
def rpn_head(featuremap, channel, num_anchors): """ Returns: label_logits: fHxfWxNA box_logits: fHxfWxNAx4 """ with argscope(Conv2D, data_format='channels_first', kernel_initializer=tf.random_normal_initializer(stddev=0.01)): hidden = Conv2D('conv0', featuremap, channel, 3, activation=tf.nn.relu) label_logits = Conv2D('class', hidden, num_anchors, 1) box_logits = Conv2D('box', hidden, 4 * num_anchors, 1) # 1, NA(*4), im/16, im/16 (NCHW) label_logits = tf.transpose(label_logits, [0, 2, 3, 1]) # 1xfHxfWxNA label_logits = tf.squeeze(label_logits, 0) # fHxfWxNA shp = tf.shape(box_logits) # 1x(NAx4)xfHxfW box_logits = tf.transpose(box_logits, [0, 2, 3, 1]) # 1xfHxfWx(NAx4) box_logits = tf.reshape(box_logits, tf.stack([shp[2], shp[3], num_anchors, 4])) # fHxfWxNAx4 return label_logits, box_logits
[ "def", "rpn_head", "(", "featuremap", ",", "channel", ",", "num_anchors", ")", ":", "with", "argscope", "(", "Conv2D", ",", "data_format", "=", "'channels_first'", ",", "kernel_initializer", "=", "tf", ".", "random_normal_initializer", "(", "stddev", "=", "0.01", ")", ")", ":", "hidden", "=", "Conv2D", "(", "'conv0'", ",", "featuremap", ",", "channel", ",", "3", ",", "activation", "=", "tf", ".", "nn", ".", "relu", ")", "label_logits", "=", "Conv2D", "(", "'class'", ",", "hidden", ",", "num_anchors", ",", "1", ")", "box_logits", "=", "Conv2D", "(", "'box'", ",", "hidden", ",", "4", "*", "num_anchors", ",", "1", ")", "# 1, NA(*4), im/16, im/16 (NCHW)", "label_logits", "=", "tf", ".", "transpose", "(", "label_logits", ",", "[", "0", ",", "2", ",", "3", ",", "1", "]", ")", "# 1xfHxfWxNA", "label_logits", "=", "tf", ".", "squeeze", "(", "label_logits", ",", "0", ")", "# fHxfWxNA", "shp", "=", "tf", ".", "shape", "(", "box_logits", ")", "# 1x(NAx4)xfHxfW", "box_logits", "=", "tf", ".", "transpose", "(", "box_logits", ",", "[", "0", ",", "2", ",", "3", ",", "1", "]", ")", "# 1xfHxfWx(NAx4)", "box_logits", "=", "tf", ".", "reshape", "(", "box_logits", ",", "tf", ".", "stack", "(", "[", "shp", "[", "2", "]", ",", "shp", "[", "3", "]", ",", "num_anchors", ",", "4", "]", ")", ")", "# fHxfWxNAx4", "return", "label_logits", ",", "box_logits" ]
Returns: label_logits: fHxfWxNA box_logits: fHxfWxNAx4
[ "Returns", ":", "label_logits", ":", "fHxfWxNA", "box_logits", ":", "fHxfWxNAx4" ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/model_rpn.py#L16-L36
train
tensorpack/tensorpack
examples/FasterRCNN/model_rpn.py
rpn_losses
def rpn_losses(anchor_labels, anchor_boxes, label_logits, box_logits): """ Args: anchor_labels: fHxfWxNA anchor_boxes: fHxfWxNAx4, encoded label_logits: fHxfWxNA box_logits: fHxfWxNAx4 Returns: label_loss, box_loss """ with tf.device('/cpu:0'): valid_mask = tf.stop_gradient(tf.not_equal(anchor_labels, -1)) pos_mask = tf.stop_gradient(tf.equal(anchor_labels, 1)) nr_valid = tf.stop_gradient(tf.count_nonzero(valid_mask, dtype=tf.int32), name='num_valid_anchor') nr_pos = tf.identity(tf.count_nonzero(pos_mask, dtype=tf.int32), name='num_pos_anchor') # nr_pos is guaranteed >0 in C4. But in FPN. even nr_valid could be 0. valid_anchor_labels = tf.boolean_mask(anchor_labels, valid_mask) valid_label_logits = tf.boolean_mask(label_logits, valid_mask) with tf.name_scope('label_metrics'): valid_label_prob = tf.nn.sigmoid(valid_label_logits) summaries = [] with tf.device('/cpu:0'): for th in [0.5, 0.2, 0.1]: valid_prediction = tf.cast(valid_label_prob > th, tf.int32) nr_pos_prediction = tf.reduce_sum(valid_prediction, name='num_pos_prediction') pos_prediction_corr = tf.count_nonzero( tf.logical_and( valid_label_prob > th, tf.equal(valid_prediction, valid_anchor_labels)), dtype=tf.int32) placeholder = 0.5 # A small value will make summaries appear lower. recall = tf.cast(tf.truediv(pos_prediction_corr, nr_pos), tf.float32) recall = tf.where(tf.equal(nr_pos, 0), placeholder, recall, name='recall_th{}'.format(th)) precision = tf.cast(tf.truediv(pos_prediction_corr, nr_pos_prediction), tf.float32) precision = tf.where(tf.equal(nr_pos_prediction, 0), placeholder, precision, name='precision_th{}'.format(th)) summaries.extend([precision, recall]) add_moving_summary(*summaries) # Per-level loss summaries in FPN may appear lower due to the use of a small placeholder. # But the total RPN loss will be fine. TODO make the summary op smarter placeholder = 0. label_loss = tf.nn.sigmoid_cross_entropy_with_logits( labels=tf.cast(valid_anchor_labels, tf.float32), logits=valid_label_logits) label_loss = tf.reduce_sum(label_loss) * (1. / cfg.RPN.BATCH_PER_IM) label_loss = tf.where(tf.equal(nr_valid, 0), placeholder, label_loss, name='label_loss') pos_anchor_boxes = tf.boolean_mask(anchor_boxes, pos_mask) pos_box_logits = tf.boolean_mask(box_logits, pos_mask) delta = 1.0 / 9 box_loss = tf.losses.huber_loss( pos_anchor_boxes, pos_box_logits, delta=delta, reduction=tf.losses.Reduction.SUM) / delta box_loss = box_loss * (1. / cfg.RPN.BATCH_PER_IM) box_loss = tf.where(tf.equal(nr_pos, 0), placeholder, box_loss, name='box_loss') add_moving_summary(label_loss, box_loss, nr_valid, nr_pos) return [label_loss, box_loss]
python
def rpn_losses(anchor_labels, anchor_boxes, label_logits, box_logits): """ Args: anchor_labels: fHxfWxNA anchor_boxes: fHxfWxNAx4, encoded label_logits: fHxfWxNA box_logits: fHxfWxNAx4 Returns: label_loss, box_loss """ with tf.device('/cpu:0'): valid_mask = tf.stop_gradient(tf.not_equal(anchor_labels, -1)) pos_mask = tf.stop_gradient(tf.equal(anchor_labels, 1)) nr_valid = tf.stop_gradient(tf.count_nonzero(valid_mask, dtype=tf.int32), name='num_valid_anchor') nr_pos = tf.identity(tf.count_nonzero(pos_mask, dtype=tf.int32), name='num_pos_anchor') # nr_pos is guaranteed >0 in C4. But in FPN. even nr_valid could be 0. valid_anchor_labels = tf.boolean_mask(anchor_labels, valid_mask) valid_label_logits = tf.boolean_mask(label_logits, valid_mask) with tf.name_scope('label_metrics'): valid_label_prob = tf.nn.sigmoid(valid_label_logits) summaries = [] with tf.device('/cpu:0'): for th in [0.5, 0.2, 0.1]: valid_prediction = tf.cast(valid_label_prob > th, tf.int32) nr_pos_prediction = tf.reduce_sum(valid_prediction, name='num_pos_prediction') pos_prediction_corr = tf.count_nonzero( tf.logical_and( valid_label_prob > th, tf.equal(valid_prediction, valid_anchor_labels)), dtype=tf.int32) placeholder = 0.5 # A small value will make summaries appear lower. recall = tf.cast(tf.truediv(pos_prediction_corr, nr_pos), tf.float32) recall = tf.where(tf.equal(nr_pos, 0), placeholder, recall, name='recall_th{}'.format(th)) precision = tf.cast(tf.truediv(pos_prediction_corr, nr_pos_prediction), tf.float32) precision = tf.where(tf.equal(nr_pos_prediction, 0), placeholder, precision, name='precision_th{}'.format(th)) summaries.extend([precision, recall]) add_moving_summary(*summaries) # Per-level loss summaries in FPN may appear lower due to the use of a small placeholder. # But the total RPN loss will be fine. TODO make the summary op smarter placeholder = 0. label_loss = tf.nn.sigmoid_cross_entropy_with_logits( labels=tf.cast(valid_anchor_labels, tf.float32), logits=valid_label_logits) label_loss = tf.reduce_sum(label_loss) * (1. / cfg.RPN.BATCH_PER_IM) label_loss = tf.where(tf.equal(nr_valid, 0), placeholder, label_loss, name='label_loss') pos_anchor_boxes = tf.boolean_mask(anchor_boxes, pos_mask) pos_box_logits = tf.boolean_mask(box_logits, pos_mask) delta = 1.0 / 9 box_loss = tf.losses.huber_loss( pos_anchor_boxes, pos_box_logits, delta=delta, reduction=tf.losses.Reduction.SUM) / delta box_loss = box_loss * (1. / cfg.RPN.BATCH_PER_IM) box_loss = tf.where(tf.equal(nr_pos, 0), placeholder, box_loss, name='box_loss') add_moving_summary(label_loss, box_loss, nr_valid, nr_pos) return [label_loss, box_loss]
[ "def", "rpn_losses", "(", "anchor_labels", ",", "anchor_boxes", ",", "label_logits", ",", "box_logits", ")", ":", "with", "tf", ".", "device", "(", "'/cpu:0'", ")", ":", "valid_mask", "=", "tf", ".", "stop_gradient", "(", "tf", ".", "not_equal", "(", "anchor_labels", ",", "-", "1", ")", ")", "pos_mask", "=", "tf", ".", "stop_gradient", "(", "tf", ".", "equal", "(", "anchor_labels", ",", "1", ")", ")", "nr_valid", "=", "tf", ".", "stop_gradient", "(", "tf", ".", "count_nonzero", "(", "valid_mask", ",", "dtype", "=", "tf", ".", "int32", ")", ",", "name", "=", "'num_valid_anchor'", ")", "nr_pos", "=", "tf", ".", "identity", "(", "tf", ".", "count_nonzero", "(", "pos_mask", ",", "dtype", "=", "tf", ".", "int32", ")", ",", "name", "=", "'num_pos_anchor'", ")", "# nr_pos is guaranteed >0 in C4. But in FPN. even nr_valid could be 0.", "valid_anchor_labels", "=", "tf", ".", "boolean_mask", "(", "anchor_labels", ",", "valid_mask", ")", "valid_label_logits", "=", "tf", ".", "boolean_mask", "(", "label_logits", ",", "valid_mask", ")", "with", "tf", ".", "name_scope", "(", "'label_metrics'", ")", ":", "valid_label_prob", "=", "tf", ".", "nn", ".", "sigmoid", "(", "valid_label_logits", ")", "summaries", "=", "[", "]", "with", "tf", ".", "device", "(", "'/cpu:0'", ")", ":", "for", "th", "in", "[", "0.5", ",", "0.2", ",", "0.1", "]", ":", "valid_prediction", "=", "tf", ".", "cast", "(", "valid_label_prob", ">", "th", ",", "tf", ".", "int32", ")", "nr_pos_prediction", "=", "tf", ".", "reduce_sum", "(", "valid_prediction", ",", "name", "=", "'num_pos_prediction'", ")", "pos_prediction_corr", "=", "tf", ".", "count_nonzero", "(", "tf", ".", "logical_and", "(", "valid_label_prob", ">", "th", ",", "tf", ".", "equal", "(", "valid_prediction", ",", "valid_anchor_labels", ")", ")", ",", "dtype", "=", "tf", ".", "int32", ")", "placeholder", "=", "0.5", "# A small value will make summaries appear lower.", "recall", "=", "tf", ".", "cast", "(", "tf", ".", "truediv", "(", "pos_prediction_corr", ",", "nr_pos", ")", ",", "tf", ".", "float32", ")", "recall", "=", "tf", ".", "where", "(", "tf", ".", "equal", "(", "nr_pos", ",", "0", ")", ",", "placeholder", ",", "recall", ",", "name", "=", "'recall_th{}'", ".", "format", "(", "th", ")", ")", "precision", "=", "tf", ".", "cast", "(", "tf", ".", "truediv", "(", "pos_prediction_corr", ",", "nr_pos_prediction", ")", ",", "tf", ".", "float32", ")", "precision", "=", "tf", ".", "where", "(", "tf", ".", "equal", "(", "nr_pos_prediction", ",", "0", ")", ",", "placeholder", ",", "precision", ",", "name", "=", "'precision_th{}'", ".", "format", "(", "th", ")", ")", "summaries", ".", "extend", "(", "[", "precision", ",", "recall", "]", ")", "add_moving_summary", "(", "*", "summaries", ")", "# Per-level loss summaries in FPN may appear lower due to the use of a small placeholder.", "# But the total RPN loss will be fine. TODO make the summary op smarter", "placeholder", "=", "0.", "label_loss", "=", "tf", ".", "nn", ".", "sigmoid_cross_entropy_with_logits", "(", "labels", "=", "tf", ".", "cast", "(", "valid_anchor_labels", ",", "tf", ".", "float32", ")", ",", "logits", "=", "valid_label_logits", ")", "label_loss", "=", "tf", ".", "reduce_sum", "(", "label_loss", ")", "*", "(", "1.", "/", "cfg", ".", "RPN", ".", "BATCH_PER_IM", ")", "label_loss", "=", "tf", ".", "where", "(", "tf", ".", "equal", "(", "nr_valid", ",", "0", ")", ",", "placeholder", ",", "label_loss", ",", "name", "=", "'label_loss'", ")", "pos_anchor_boxes", "=", "tf", ".", "boolean_mask", "(", "anchor_boxes", ",", "pos_mask", ")", "pos_box_logits", "=", "tf", ".", "boolean_mask", "(", "box_logits", ",", "pos_mask", ")", "delta", "=", "1.0", "/", "9", "box_loss", "=", "tf", ".", "losses", ".", "huber_loss", "(", "pos_anchor_boxes", ",", "pos_box_logits", ",", "delta", "=", "delta", ",", "reduction", "=", "tf", ".", "losses", ".", "Reduction", ".", "SUM", ")", "/", "delta", "box_loss", "=", "box_loss", "*", "(", "1.", "/", "cfg", ".", "RPN", ".", "BATCH_PER_IM", ")", "box_loss", "=", "tf", ".", "where", "(", "tf", ".", "equal", "(", "nr_pos", ",", "0", ")", ",", "placeholder", ",", "box_loss", ",", "name", "=", "'box_loss'", ")", "add_moving_summary", "(", "label_loss", ",", "box_loss", ",", "nr_valid", ",", "nr_pos", ")", "return", "[", "label_loss", ",", "box_loss", "]" ]
Args: anchor_labels: fHxfWxNA anchor_boxes: fHxfWxNAx4, encoded label_logits: fHxfWxNA box_logits: fHxfWxNAx4 Returns: label_loss, box_loss
[ "Args", ":", "anchor_labels", ":", "fHxfWxNA", "anchor_boxes", ":", "fHxfWxNAx4", "encoded", "label_logits", ":", "fHxfWxNA", "box_logits", ":", "fHxfWxNAx4" ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/model_rpn.py#L40-L100
train
tensorpack/tensorpack
examples/FasterRCNN/model_rpn.py
generate_rpn_proposals
def generate_rpn_proposals(boxes, scores, img_shape, pre_nms_topk, post_nms_topk=None): """ Sample RPN proposals by the following steps: 1. Pick top k1 by scores 2. NMS them 3. Pick top k2 by scores. Default k2 == k1, i.e. does not filter the NMS output. Args: boxes: nx4 float dtype, the proposal boxes. Decoded to floatbox already scores: n float, the logits img_shape: [h, w] pre_nms_topk, post_nms_topk (int): See above. Returns: boxes: kx4 float scores: k logits """ assert boxes.shape.ndims == 2, boxes.shape if post_nms_topk is None: post_nms_topk = pre_nms_topk topk = tf.minimum(pre_nms_topk, tf.size(scores)) topk_scores, topk_indices = tf.nn.top_k(scores, k=topk, sorted=False) topk_boxes = tf.gather(boxes, topk_indices) topk_boxes = clip_boxes(topk_boxes, img_shape) topk_boxes_x1y1x2y2 = tf.reshape(topk_boxes, (-1, 2, 2)) topk_boxes_x1y1, topk_boxes_x2y2 = tf.split(topk_boxes_x1y1x2y2, 2, axis=1) # nx1x2 each wbhb = tf.squeeze(topk_boxes_x2y2 - topk_boxes_x1y1, axis=1) valid = tf.reduce_all(wbhb > cfg.RPN.MIN_SIZE, axis=1) # n, topk_valid_boxes_x1y1x2y2 = tf.boolean_mask(topk_boxes_x1y1x2y2, valid) topk_valid_scores = tf.boolean_mask(topk_scores, valid) # TODO not needed topk_valid_boxes_y1x1y2x2 = tf.reshape( tf.reverse(topk_valid_boxes_x1y1x2y2, axis=[2]), (-1, 4), name='nms_input_boxes') nms_indices = tf.image.non_max_suppression( topk_valid_boxes_y1x1y2x2, topk_valid_scores, max_output_size=post_nms_topk, iou_threshold=cfg.RPN.PROPOSAL_NMS_THRESH) topk_valid_boxes = tf.reshape(topk_valid_boxes_x1y1x2y2, (-1, 4)) proposal_boxes = tf.gather(topk_valid_boxes, nms_indices) proposal_scores = tf.gather(topk_valid_scores, nms_indices) tf.sigmoid(proposal_scores, name='probs') # for visualization return tf.stop_gradient(proposal_boxes, name='boxes'), tf.stop_gradient(proposal_scores, name='scores')
python
def generate_rpn_proposals(boxes, scores, img_shape, pre_nms_topk, post_nms_topk=None): """ Sample RPN proposals by the following steps: 1. Pick top k1 by scores 2. NMS them 3. Pick top k2 by scores. Default k2 == k1, i.e. does not filter the NMS output. Args: boxes: nx4 float dtype, the proposal boxes. Decoded to floatbox already scores: n float, the logits img_shape: [h, w] pre_nms_topk, post_nms_topk (int): See above. Returns: boxes: kx4 float scores: k logits """ assert boxes.shape.ndims == 2, boxes.shape if post_nms_topk is None: post_nms_topk = pre_nms_topk topk = tf.minimum(pre_nms_topk, tf.size(scores)) topk_scores, topk_indices = tf.nn.top_k(scores, k=topk, sorted=False) topk_boxes = tf.gather(boxes, topk_indices) topk_boxes = clip_boxes(topk_boxes, img_shape) topk_boxes_x1y1x2y2 = tf.reshape(topk_boxes, (-1, 2, 2)) topk_boxes_x1y1, topk_boxes_x2y2 = tf.split(topk_boxes_x1y1x2y2, 2, axis=1) # nx1x2 each wbhb = tf.squeeze(topk_boxes_x2y2 - topk_boxes_x1y1, axis=1) valid = tf.reduce_all(wbhb > cfg.RPN.MIN_SIZE, axis=1) # n, topk_valid_boxes_x1y1x2y2 = tf.boolean_mask(topk_boxes_x1y1x2y2, valid) topk_valid_scores = tf.boolean_mask(topk_scores, valid) # TODO not needed topk_valid_boxes_y1x1y2x2 = tf.reshape( tf.reverse(topk_valid_boxes_x1y1x2y2, axis=[2]), (-1, 4), name='nms_input_boxes') nms_indices = tf.image.non_max_suppression( topk_valid_boxes_y1x1y2x2, topk_valid_scores, max_output_size=post_nms_topk, iou_threshold=cfg.RPN.PROPOSAL_NMS_THRESH) topk_valid_boxes = tf.reshape(topk_valid_boxes_x1y1x2y2, (-1, 4)) proposal_boxes = tf.gather(topk_valid_boxes, nms_indices) proposal_scores = tf.gather(topk_valid_scores, nms_indices) tf.sigmoid(proposal_scores, name='probs') # for visualization return tf.stop_gradient(proposal_boxes, name='boxes'), tf.stop_gradient(proposal_scores, name='scores')
[ "def", "generate_rpn_proposals", "(", "boxes", ",", "scores", ",", "img_shape", ",", "pre_nms_topk", ",", "post_nms_topk", "=", "None", ")", ":", "assert", "boxes", ".", "shape", ".", "ndims", "==", "2", ",", "boxes", ".", "shape", "if", "post_nms_topk", "is", "None", ":", "post_nms_topk", "=", "pre_nms_topk", "topk", "=", "tf", ".", "minimum", "(", "pre_nms_topk", ",", "tf", ".", "size", "(", "scores", ")", ")", "topk_scores", ",", "topk_indices", "=", "tf", ".", "nn", ".", "top_k", "(", "scores", ",", "k", "=", "topk", ",", "sorted", "=", "False", ")", "topk_boxes", "=", "tf", ".", "gather", "(", "boxes", ",", "topk_indices", ")", "topk_boxes", "=", "clip_boxes", "(", "topk_boxes", ",", "img_shape", ")", "topk_boxes_x1y1x2y2", "=", "tf", ".", "reshape", "(", "topk_boxes", ",", "(", "-", "1", ",", "2", ",", "2", ")", ")", "topk_boxes_x1y1", ",", "topk_boxes_x2y2", "=", "tf", ".", "split", "(", "topk_boxes_x1y1x2y2", ",", "2", ",", "axis", "=", "1", ")", "# nx1x2 each", "wbhb", "=", "tf", ".", "squeeze", "(", "topk_boxes_x2y2", "-", "topk_boxes_x1y1", ",", "axis", "=", "1", ")", "valid", "=", "tf", ".", "reduce_all", "(", "wbhb", ">", "cfg", ".", "RPN", ".", "MIN_SIZE", ",", "axis", "=", "1", ")", "# n,", "topk_valid_boxes_x1y1x2y2", "=", "tf", ".", "boolean_mask", "(", "topk_boxes_x1y1x2y2", ",", "valid", ")", "topk_valid_scores", "=", "tf", ".", "boolean_mask", "(", "topk_scores", ",", "valid", ")", "# TODO not needed", "topk_valid_boxes_y1x1y2x2", "=", "tf", ".", "reshape", "(", "tf", ".", "reverse", "(", "topk_valid_boxes_x1y1x2y2", ",", "axis", "=", "[", "2", "]", ")", ",", "(", "-", "1", ",", "4", ")", ",", "name", "=", "'nms_input_boxes'", ")", "nms_indices", "=", "tf", ".", "image", ".", "non_max_suppression", "(", "topk_valid_boxes_y1x1y2x2", ",", "topk_valid_scores", ",", "max_output_size", "=", "post_nms_topk", ",", "iou_threshold", "=", "cfg", ".", "RPN", ".", "PROPOSAL_NMS_THRESH", ")", "topk_valid_boxes", "=", "tf", ".", "reshape", "(", "topk_valid_boxes_x1y1x2y2", ",", "(", "-", "1", ",", "4", ")", ")", "proposal_boxes", "=", "tf", ".", "gather", "(", "topk_valid_boxes", ",", "nms_indices", ")", "proposal_scores", "=", "tf", ".", "gather", "(", "topk_valid_scores", ",", "nms_indices", ")", "tf", ".", "sigmoid", "(", "proposal_scores", ",", "name", "=", "'probs'", ")", "# for visualization", "return", "tf", ".", "stop_gradient", "(", "proposal_boxes", ",", "name", "=", "'boxes'", ")", ",", "tf", ".", "stop_gradient", "(", "proposal_scores", ",", "name", "=", "'scores'", ")" ]
Sample RPN proposals by the following steps: 1. Pick top k1 by scores 2. NMS them 3. Pick top k2 by scores. Default k2 == k1, i.e. does not filter the NMS output. Args: boxes: nx4 float dtype, the proposal boxes. Decoded to floatbox already scores: n float, the logits img_shape: [h, w] pre_nms_topk, post_nms_topk (int): See above. Returns: boxes: kx4 float scores: k logits
[ "Sample", "RPN", "proposals", "by", "the", "following", "steps", ":", "1", ".", "Pick", "top", "k1", "by", "scores", "2", ".", "NMS", "them", "3", ".", "Pick", "top", "k2", "by", "scores", ".", "Default", "k2", "==", "k1", "i", ".", "e", ".", "does", "not", "filter", "the", "NMS", "output", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/model_rpn.py#L104-L153
train
tensorpack/tensorpack
tensorpack/callbacks/summary.py
MergeAllSummaries
def MergeAllSummaries(period=0, run_alone=False, key=None): """ This callback is enabled by default. Evaluate all summaries by ``tf.summary.merge_all``, and write them to logs. Args: period (int): by default the callback summarizes once every epoch. This option (if not set to 0) makes it additionally summarize every ``period`` steps. run_alone (bool): whether to evaluate the summaries alone. If True, summaries will be evaluated after each epoch alone. If False, summaries will be evaluated together with the `sess.run` calls, in the last step of each epoch. For :class:`SimpleTrainer`, it needs to be False because summary may depend on inputs. key (str): the collection of summary tensors. Same as in ``tf.summary.merge_all``. Default is ``tf.GraphKeys.SUMMARIES``. """ if key is None: key = tf.GraphKeys.SUMMARIES period = int(period) if run_alone: return MergeAllSummaries_RunAlone(period, key) else: return MergeAllSummaries_RunWithOp(period, key)
python
def MergeAllSummaries(period=0, run_alone=False, key=None): """ This callback is enabled by default. Evaluate all summaries by ``tf.summary.merge_all``, and write them to logs. Args: period (int): by default the callback summarizes once every epoch. This option (if not set to 0) makes it additionally summarize every ``period`` steps. run_alone (bool): whether to evaluate the summaries alone. If True, summaries will be evaluated after each epoch alone. If False, summaries will be evaluated together with the `sess.run` calls, in the last step of each epoch. For :class:`SimpleTrainer`, it needs to be False because summary may depend on inputs. key (str): the collection of summary tensors. Same as in ``tf.summary.merge_all``. Default is ``tf.GraphKeys.SUMMARIES``. """ if key is None: key = tf.GraphKeys.SUMMARIES period = int(period) if run_alone: return MergeAllSummaries_RunAlone(period, key) else: return MergeAllSummaries_RunWithOp(period, key)
[ "def", "MergeAllSummaries", "(", "period", "=", "0", ",", "run_alone", "=", "False", ",", "key", "=", "None", ")", ":", "if", "key", "is", "None", ":", "key", "=", "tf", ".", "GraphKeys", ".", "SUMMARIES", "period", "=", "int", "(", "period", ")", "if", "run_alone", ":", "return", "MergeAllSummaries_RunAlone", "(", "period", ",", "key", ")", "else", ":", "return", "MergeAllSummaries_RunWithOp", "(", "period", ",", "key", ")" ]
This callback is enabled by default. Evaluate all summaries by ``tf.summary.merge_all``, and write them to logs. Args: period (int): by default the callback summarizes once every epoch. This option (if not set to 0) makes it additionally summarize every ``period`` steps. run_alone (bool): whether to evaluate the summaries alone. If True, summaries will be evaluated after each epoch alone. If False, summaries will be evaluated together with the `sess.run` calls, in the last step of each epoch. For :class:`SimpleTrainer`, it needs to be False because summary may depend on inputs. key (str): the collection of summary tensors. Same as in ``tf.summary.merge_all``. Default is ``tf.GraphKeys.SUMMARIES``.
[ "This", "callback", "is", "enabled", "by", "default", ".", "Evaluate", "all", "summaries", "by", "tf", ".", "summary", ".", "merge_all", "and", "write", "them", "to", "logs", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/callbacks/summary.py#L119-L142
train
tensorpack/tensorpack
examples/DeepQNetwork/expreplay.py
ReplayMemory.append
def append(self, exp): """ Args: exp (Experience): """ if self._curr_size < self.max_size: self._assign(self._curr_pos, exp) self._curr_pos = (self._curr_pos + 1) % self.max_size self._curr_size += 1 else: self._assign(self._curr_pos, exp) self._curr_pos = (self._curr_pos + 1) % self.max_size
python
def append(self, exp): """ Args: exp (Experience): """ if self._curr_size < self.max_size: self._assign(self._curr_pos, exp) self._curr_pos = (self._curr_pos + 1) % self.max_size self._curr_size += 1 else: self._assign(self._curr_pos, exp) self._curr_pos = (self._curr_pos + 1) % self.max_size
[ "def", "append", "(", "self", ",", "exp", ")", ":", "if", "self", ".", "_curr_size", "<", "self", ".", "max_size", ":", "self", ".", "_assign", "(", "self", ".", "_curr_pos", ",", "exp", ")", "self", ".", "_curr_pos", "=", "(", "self", ".", "_curr_pos", "+", "1", ")", "%", "self", ".", "max_size", "self", ".", "_curr_size", "+=", "1", "else", ":", "self", ".", "_assign", "(", "self", ".", "_curr_pos", ",", "exp", ")", "self", ".", "_curr_pos", "=", "(", "self", ".", "_curr_pos", "+", "1", ")", "%", "self", ".", "max_size" ]
Args: exp (Experience):
[ "Args", ":", "exp", "(", "Experience", ")", ":" ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/DeepQNetwork/expreplay.py#L53-L64
train
tensorpack/tensorpack
examples/DeepQNetwork/expreplay.py
ReplayMemory.sample
def sample(self, idx): """ return a tuple of (s,r,a,o), where s is of shape self._output_shape, which is [H, W, (hist_len+1) * channel] if input is (H, W, channel)""" idx = (self._curr_pos + idx) % self._curr_size k = self.history_len + 1 if idx + k <= self._curr_size: state = self.state[idx: idx + k] reward = self.reward[idx: idx + k] action = self.action[idx: idx + k] isOver = self.isOver[idx: idx + k] else: end = idx + k - self._curr_size state = self._slice(self.state, idx, end) reward = self._slice(self.reward, idx, end) action = self._slice(self.action, idx, end) isOver = self._slice(self.isOver, idx, end) ret = self._pad_sample(state, reward, action, isOver) return ret
python
def sample(self, idx): """ return a tuple of (s,r,a,o), where s is of shape self._output_shape, which is [H, W, (hist_len+1) * channel] if input is (H, W, channel)""" idx = (self._curr_pos + idx) % self._curr_size k = self.history_len + 1 if idx + k <= self._curr_size: state = self.state[idx: idx + k] reward = self.reward[idx: idx + k] action = self.action[idx: idx + k] isOver = self.isOver[idx: idx + k] else: end = idx + k - self._curr_size state = self._slice(self.state, idx, end) reward = self._slice(self.reward, idx, end) action = self._slice(self.action, idx, end) isOver = self._slice(self.isOver, idx, end) ret = self._pad_sample(state, reward, action, isOver) return ret
[ "def", "sample", "(", "self", ",", "idx", ")", ":", "idx", "=", "(", "self", ".", "_curr_pos", "+", "idx", ")", "%", "self", ".", "_curr_size", "k", "=", "self", ".", "history_len", "+", "1", "if", "idx", "+", "k", "<=", "self", ".", "_curr_size", ":", "state", "=", "self", ".", "state", "[", "idx", ":", "idx", "+", "k", "]", "reward", "=", "self", ".", "reward", "[", "idx", ":", "idx", "+", "k", "]", "action", "=", "self", ".", "action", "[", "idx", ":", "idx", "+", "k", "]", "isOver", "=", "self", ".", "isOver", "[", "idx", ":", "idx", "+", "k", "]", "else", ":", "end", "=", "idx", "+", "k", "-", "self", ".", "_curr_size", "state", "=", "self", ".", "_slice", "(", "self", ".", "state", ",", "idx", ",", "end", ")", "reward", "=", "self", ".", "_slice", "(", "self", ".", "reward", ",", "idx", ",", "end", ")", "action", "=", "self", ".", "_slice", "(", "self", ".", "action", ",", "idx", ",", "end", ")", "isOver", "=", "self", ".", "_slice", "(", "self", ".", "isOver", ",", "idx", ",", "end", ")", "ret", "=", "self", ".", "_pad_sample", "(", "state", ",", "reward", ",", "action", ",", "isOver", ")", "return", "ret" ]
return a tuple of (s,r,a,o), where s is of shape self._output_shape, which is [H, W, (hist_len+1) * channel] if input is (H, W, channel)
[ "return", "a", "tuple", "of", "(", "s", "r", "a", "o", ")", "where", "s", "is", "of", "shape", "self", ".", "_output_shape", "which", "is", "[", "H", "W", "(", "hist_len", "+", "1", ")", "*", "channel", "]", "if", "input", "is", "(", "H", "W", "channel", ")" ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/DeepQNetwork/expreplay.py#L66-L84
train
tensorpack/tensorpack
examples/DeepQNetwork/expreplay.py
EnvRunner.step
def step(self, exploration): """ Run the environment for one step. If the episode ends, store the entire episode to the replay memory. """ old_s = self._current_ob if self.rng.rand() <= exploration: act = self.rng.choice(range(self.num_actions)) else: history = self.recent_state() history.append(old_s) history = np.stack(history, axis=-1) # state_shape + (Hist,) # assume batched network history = np.expand_dims(history, axis=0) q_values = self.predictor(history)[0][0] # this is the bottleneck act = np.argmax(q_values) self._current_ob, reward, isOver, info = self.player.step(act) self._current_game_score.feed(reward) self._current_episode.append(Experience(old_s, act, reward, isOver)) if isOver: flush_experience = True if 'ale.lives' in info: # if running Atari, do something special if info['ale.lives'] != 0: # only record score and flush experience # when a whole game is over (not when an episode is over) flush_experience = False self.player.reset() if flush_experience: self.total_scores.append(self._current_game_score.sum) self._current_game_score.reset() # Ensure that the whole episode of experience is continuous in the replay buffer with self.memory.writer_lock: for exp in self._current_episode: self.memory.append(exp) self._current_episode.clear()
python
def step(self, exploration): """ Run the environment for one step. If the episode ends, store the entire episode to the replay memory. """ old_s = self._current_ob if self.rng.rand() <= exploration: act = self.rng.choice(range(self.num_actions)) else: history = self.recent_state() history.append(old_s) history = np.stack(history, axis=-1) # state_shape + (Hist,) # assume batched network history = np.expand_dims(history, axis=0) q_values = self.predictor(history)[0][0] # this is the bottleneck act = np.argmax(q_values) self._current_ob, reward, isOver, info = self.player.step(act) self._current_game_score.feed(reward) self._current_episode.append(Experience(old_s, act, reward, isOver)) if isOver: flush_experience = True if 'ale.lives' in info: # if running Atari, do something special if info['ale.lives'] != 0: # only record score and flush experience # when a whole game is over (not when an episode is over) flush_experience = False self.player.reset() if flush_experience: self.total_scores.append(self._current_game_score.sum) self._current_game_score.reset() # Ensure that the whole episode of experience is continuous in the replay buffer with self.memory.writer_lock: for exp in self._current_episode: self.memory.append(exp) self._current_episode.clear()
[ "def", "step", "(", "self", ",", "exploration", ")", ":", "old_s", "=", "self", ".", "_current_ob", "if", "self", ".", "rng", ".", "rand", "(", ")", "<=", "exploration", ":", "act", "=", "self", ".", "rng", ".", "choice", "(", "range", "(", "self", ".", "num_actions", ")", ")", "else", ":", "history", "=", "self", ".", "recent_state", "(", ")", "history", ".", "append", "(", "old_s", ")", "history", "=", "np", ".", "stack", "(", "history", ",", "axis", "=", "-", "1", ")", "# state_shape + (Hist,)", "# assume batched network", "history", "=", "np", ".", "expand_dims", "(", "history", ",", "axis", "=", "0", ")", "q_values", "=", "self", ".", "predictor", "(", "history", ")", "[", "0", "]", "[", "0", "]", "# this is the bottleneck", "act", "=", "np", ".", "argmax", "(", "q_values", ")", "self", ".", "_current_ob", ",", "reward", ",", "isOver", ",", "info", "=", "self", ".", "player", ".", "step", "(", "act", ")", "self", ".", "_current_game_score", ".", "feed", "(", "reward", ")", "self", ".", "_current_episode", ".", "append", "(", "Experience", "(", "old_s", ",", "act", ",", "reward", ",", "isOver", ")", ")", "if", "isOver", ":", "flush_experience", "=", "True", "if", "'ale.lives'", "in", "info", ":", "# if running Atari, do something special", "if", "info", "[", "'ale.lives'", "]", "!=", "0", ":", "# only record score and flush experience", "# when a whole game is over (not when an episode is over)", "flush_experience", "=", "False", "self", ".", "player", ".", "reset", "(", ")", "if", "flush_experience", ":", "self", ".", "total_scores", ".", "append", "(", "self", ".", "_current_game_score", ".", "sum", ")", "self", ".", "_current_game_score", ".", "reset", "(", ")", "# Ensure that the whole episode of experience is continuous in the replay buffer", "with", "self", ".", "memory", ".", "writer_lock", ":", "for", "exp", "in", "self", ".", "_current_episode", ":", "self", ".", "memory", ".", "append", "(", "exp", ")", "self", ".", "_current_episode", ".", "clear", "(", ")" ]
Run the environment for one step. If the episode ends, store the entire episode to the replay memory.
[ "Run", "the", "environment", "for", "one", "step", ".", "If", "the", "episode", "ends", "store", "the", "entire", "episode", "to", "the", "replay", "memory", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/DeepQNetwork/expreplay.py#L143-L182
train
tensorpack/tensorpack
examples/DeepQNetwork/expreplay.py
EnvRunner.recent_state
def recent_state(self): """ Get the recent state (with stacked history) of the environment. Returns: a list of ``hist_len-1`` elements, each of shape ``self.state_shape`` """ expected_len = self.history_len - 1 if len(self._current_episode) >= expected_len: return [k.state for k in self._current_episode[-expected_len:]] else: states = [np.zeros(self.state_shape, dtype=self.dtype)] * (expected_len - len(self._current_episode)) states.extend([k.state for k in self._current_episode]) return states
python
def recent_state(self): """ Get the recent state (with stacked history) of the environment. Returns: a list of ``hist_len-1`` elements, each of shape ``self.state_shape`` """ expected_len = self.history_len - 1 if len(self._current_episode) >= expected_len: return [k.state for k in self._current_episode[-expected_len:]] else: states = [np.zeros(self.state_shape, dtype=self.dtype)] * (expected_len - len(self._current_episode)) states.extend([k.state for k in self._current_episode]) return states
[ "def", "recent_state", "(", "self", ")", ":", "expected_len", "=", "self", ".", "history_len", "-", "1", "if", "len", "(", "self", ".", "_current_episode", ")", ">=", "expected_len", ":", "return", "[", "k", ".", "state", "for", "k", "in", "self", ".", "_current_episode", "[", "-", "expected_len", ":", "]", "]", "else", ":", "states", "=", "[", "np", ".", "zeros", "(", "self", ".", "state_shape", ",", "dtype", "=", "self", ".", "dtype", ")", "]", "*", "(", "expected_len", "-", "len", "(", "self", ".", "_current_episode", ")", ")", "states", ".", "extend", "(", "[", "k", ".", "state", "for", "k", "in", "self", ".", "_current_episode", "]", ")", "return", "states" ]
Get the recent state (with stacked history) of the environment. Returns: a list of ``hist_len-1`` elements, each of shape ``self.state_shape``
[ "Get", "the", "recent", "state", "(", "with", "stacked", "history", ")", "of", "the", "environment", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/DeepQNetwork/expreplay.py#L184-L197
train
tensorpack/tensorpack
examples/DeepQNetwork/expreplay.py
EnvRunnerManager.step
def step(self, exploration): """ Execute one step in any of the runners. """ if len(self._runners) > 1: self._populate_job_queue.put(exploration) else: self._runners[0].step(exploration)
python
def step(self, exploration): """ Execute one step in any of the runners. """ if len(self._runners) > 1: self._populate_job_queue.put(exploration) else: self._runners[0].step(exploration)
[ "def", "step", "(", "self", ",", "exploration", ")", ":", "if", "len", "(", "self", ".", "_runners", ")", ">", "1", ":", "self", ".", "_populate_job_queue", ".", "put", "(", "exploration", ")", "else", ":", "self", ".", "_runners", "[", "0", "]", ".", "step", "(", "exploration", ")" ]
Execute one step in any of the runners.
[ "Execute", "one", "step", "in", "any", "of", "the", "runners", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/DeepQNetwork/expreplay.py#L233-L240
train
tensorpack/tensorpack
examples/DeepQNetwork/expreplay.py
EnvRunnerManager.reset_stats
def reset_stats(self): """ Returns: mean, max: two stats of the runners, to be added to backend """ scores = list(itertools.chain.from_iterable([v.total_scores for v in self._runners])) for v in self._runners: v.total_scores.clear() try: return np.mean(scores), np.max(scores) except Exception: logger.exception("Cannot compute total scores in EnvRunner.") return None, None
python
def reset_stats(self): """ Returns: mean, max: two stats of the runners, to be added to backend """ scores = list(itertools.chain.from_iterable([v.total_scores for v in self._runners])) for v in self._runners: v.total_scores.clear() try: return np.mean(scores), np.max(scores) except Exception: logger.exception("Cannot compute total scores in EnvRunner.") return None, None
[ "def", "reset_stats", "(", "self", ")", ":", "scores", "=", "list", "(", "itertools", ".", "chain", ".", "from_iterable", "(", "[", "v", ".", "total_scores", "for", "v", "in", "self", ".", "_runners", "]", ")", ")", "for", "v", "in", "self", ".", "_runners", ":", "v", ".", "total_scores", ".", "clear", "(", ")", "try", ":", "return", "np", ".", "mean", "(", "scores", ")", ",", "np", ".", "max", "(", "scores", ")", "except", "Exception", ":", "logger", ".", "exception", "(", "\"Cannot compute total scores in EnvRunner.\"", ")", "return", "None", ",", "None" ]
Returns: mean, max: two stats of the runners, to be added to backend
[ "Returns", ":", "mean", "max", ":", "two", "stats", "of", "the", "runners", "to", "be", "added", "to", "backend" ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/DeepQNetwork/expreplay.py#L242-L255
train
tensorpack/tensorpack
tensorpack/callbacks/group.py
CallbackTimeLogger.log
def log(self): """ log the time of some heavy callbacks """ if self.tot < 3: return msgs = [] for name, t in self.times: if t / self.tot > 0.3 and t > 1: msgs.append(name + ": " + humanize_time_delta(t)) logger.info( "Callbacks took {:.3f} sec in total. {}".format( self.tot, '; '.join(msgs)))
python
def log(self): """ log the time of some heavy callbacks """ if self.tot < 3: return msgs = [] for name, t in self.times: if t / self.tot > 0.3 and t > 1: msgs.append(name + ": " + humanize_time_delta(t)) logger.info( "Callbacks took {:.3f} sec in total. {}".format( self.tot, '; '.join(msgs)))
[ "def", "log", "(", "self", ")", ":", "if", "self", ".", "tot", "<", "3", ":", "return", "msgs", "=", "[", "]", "for", "name", ",", "t", "in", "self", ".", "times", ":", "if", "t", "/", "self", ".", "tot", ">", "0.3", "and", "t", ">", "1", ":", "msgs", ".", "append", "(", "name", "+", "\": \"", "+", "humanize_time_delta", "(", "t", ")", ")", "logger", ".", "info", "(", "\"Callbacks took {:.3f} sec in total. {}\"", ".", "format", "(", "self", ".", "tot", ",", "'; '", ".", "join", "(", "msgs", ")", ")", ")" ]
log the time of some heavy callbacks
[ "log", "the", "time", "of", "some", "heavy", "callbacks" ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/callbacks/group.py#L37-L48
train
tensorpack/tensorpack
tensorpack/tfutils/tower.py
TowerContext
def TowerContext(tower_name, is_training, vs_name=''): """ The context for a tower function, containing metadata about the current tower. Tensorpack trainers use :class:`TowerContext` to manage tower function. Many tensorpack layers have to be called under a :class:`TowerContext`. Example: .. code-block:: python with TowerContext('', is_training=True): # call a tensorpack layer or a tower function """ if is_training: return TrainTowerContext(tower_name, vs_name=vs_name) else: return PredictTowerContext(tower_name, vs_name=vs_name)
python
def TowerContext(tower_name, is_training, vs_name=''): """ The context for a tower function, containing metadata about the current tower. Tensorpack trainers use :class:`TowerContext` to manage tower function. Many tensorpack layers have to be called under a :class:`TowerContext`. Example: .. code-block:: python with TowerContext('', is_training=True): # call a tensorpack layer or a tower function """ if is_training: return TrainTowerContext(tower_name, vs_name=vs_name) else: return PredictTowerContext(tower_name, vs_name=vs_name)
[ "def", "TowerContext", "(", "tower_name", ",", "is_training", ",", "vs_name", "=", "''", ")", ":", "if", "is_training", ":", "return", "TrainTowerContext", "(", "tower_name", ",", "vs_name", "=", "vs_name", ")", "else", ":", "return", "PredictTowerContext", "(", "tower_name", ",", "vs_name", "=", "vs_name", ")" ]
The context for a tower function, containing metadata about the current tower. Tensorpack trainers use :class:`TowerContext` to manage tower function. Many tensorpack layers have to be called under a :class:`TowerContext`. Example: .. code-block:: python with TowerContext('', is_training=True): # call a tensorpack layer or a tower function
[ "The", "context", "for", "a", "tower", "function", "containing", "metadata", "about", "the", "current", "tower", ".", "Tensorpack", "trainers", "use", ":", "class", ":", "TowerContext", "to", "manage", "tower", "function", ".", "Many", "tensorpack", "layers", "have", "to", "be", "called", "under", "a", ":", "class", ":", "TowerContext", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/tfutils/tower.py#L229-L245
train
tensorpack/tensorpack
tensorpack/tfutils/tower.py
TowerTensorHandles.training
def training(self): """ Returns: A :class:`TowerTensorHandles`, containing only the training towers. """ handles = [h for h in self._handles if h.is_training] return TowerTensorHandles(handles)
python
def training(self): """ Returns: A :class:`TowerTensorHandles`, containing only the training towers. """ handles = [h for h in self._handles if h.is_training] return TowerTensorHandles(handles)
[ "def", "training", "(", "self", ")", ":", "handles", "=", "[", "h", "for", "h", "in", "self", ".", "_handles", "if", "h", ".", "is_training", "]", "return", "TowerTensorHandles", "(", "handles", ")" ]
Returns: A :class:`TowerTensorHandles`, containing only the training towers.
[ "Returns", ":", "A", ":", "class", ":", "TowerTensorHandles", "containing", "only", "the", "training", "towers", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/tfutils/tower.py#L338-L344
train
tensorpack/tensorpack
tensorpack/tfutils/tower.py
TowerTensorHandles.inference
def inference(self): """ Returns: A :class:`TowerTensorHandles`, containing only the inference towers. """ handles = [h for h in self._handles if not h.is_training] return TowerTensorHandles(handles)
python
def inference(self): """ Returns: A :class:`TowerTensorHandles`, containing only the inference towers. """ handles = [h for h in self._handles if not h.is_training] return TowerTensorHandles(handles)
[ "def", "inference", "(", "self", ")", ":", "handles", "=", "[", "h", "for", "h", "in", "self", ".", "_handles", "if", "not", "h", ".", "is_training", "]", "return", "TowerTensorHandles", "(", "handles", ")" ]
Returns: A :class:`TowerTensorHandles`, containing only the inference towers.
[ "Returns", ":", "A", ":", "class", ":", "TowerTensorHandles", "containing", "only", "the", "inference", "towers", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/tfutils/tower.py#L346-L352
train
tensorpack/tensorpack
tensorpack/tfutils/tower.py
TowerTensorHandle.get_tensor
def get_tensor(self, name): """ Get a tensor in this tower. The name can be: 1. The name of the tensor without any tower prefix. 2. A name in the input signature, if it is used when building the tower. In the second case, this method will return the tensor that's used as the corresponding input to the tower. Note that this tensor may have a different name (e.g. may be an output of a queue). """ name = get_op_tensor_name(name)[1] if len(self.ns_name): name_with_ns = self.ns_name + "/" + name else: name_with_ns = name try: ret = get_op_or_tensor_by_name(name_with_ns) except KeyError: if name in self._extra_tensor_names: return self._extra_tensor_names[name] raise else: if name in self._extra_tensor_names: mapped_tensor = self._extra_tensor_names[name] logger.info( "'{}' may refer to both the Tensor/Placeholder '{}' or the input to the tower '{}'.".format( name, ret.name, mapped_tensor.name) + " Assuming it is the input '{}'.".format(mapped_tensor.name)) return mapped_tensor return ret
python
def get_tensor(self, name): """ Get a tensor in this tower. The name can be: 1. The name of the tensor without any tower prefix. 2. A name in the input signature, if it is used when building the tower. In the second case, this method will return the tensor that's used as the corresponding input to the tower. Note that this tensor may have a different name (e.g. may be an output of a queue). """ name = get_op_tensor_name(name)[1] if len(self.ns_name): name_with_ns = self.ns_name + "/" + name else: name_with_ns = name try: ret = get_op_or_tensor_by_name(name_with_ns) except KeyError: if name in self._extra_tensor_names: return self._extra_tensor_names[name] raise else: if name in self._extra_tensor_names: mapped_tensor = self._extra_tensor_names[name] logger.info( "'{}' may refer to both the Tensor/Placeholder '{}' or the input to the tower '{}'.".format( name, ret.name, mapped_tensor.name) + " Assuming it is the input '{}'.".format(mapped_tensor.name)) return mapped_tensor return ret
[ "def", "get_tensor", "(", "self", ",", "name", ")", ":", "name", "=", "get_op_tensor_name", "(", "name", ")", "[", "1", "]", "if", "len", "(", "self", ".", "ns_name", ")", ":", "name_with_ns", "=", "self", ".", "ns_name", "+", "\"/\"", "+", "name", "else", ":", "name_with_ns", "=", "name", "try", ":", "ret", "=", "get_op_or_tensor_by_name", "(", "name_with_ns", ")", "except", "KeyError", ":", "if", "name", "in", "self", ".", "_extra_tensor_names", ":", "return", "self", ".", "_extra_tensor_names", "[", "name", "]", "raise", "else", ":", "if", "name", "in", "self", ".", "_extra_tensor_names", ":", "mapped_tensor", "=", "self", ".", "_extra_tensor_names", "[", "name", "]", "logger", ".", "info", "(", "\"'{}' may refer to both the Tensor/Placeholder '{}' or the input to the tower '{}'.\"", ".", "format", "(", "name", ",", "ret", ".", "name", ",", "mapped_tensor", ".", "name", ")", "+", "\" Assuming it is the input '{}'.\"", ".", "format", "(", "mapped_tensor", ".", "name", ")", ")", "return", "mapped_tensor", "return", "ret" ]
Get a tensor in this tower. The name can be: 1. The name of the tensor without any tower prefix. 2. A name in the input signature, if it is used when building the tower. In the second case, this method will return the tensor that's used as the corresponding input to the tower. Note that this tensor may have a different name (e.g. may be an output of a queue).
[ "Get", "a", "tensor", "in", "this", "tower", ".", "The", "name", "can", "be", ":" ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/tfutils/tower.py#L384-L415
train
tensorpack/tensorpack
tensorpack/tfutils/tower.py
TowerTensorHandle.get_variable
def get_variable(self, name): """ Get a variable used in this tower. The name should not contain the variable scope prefix of the tower. When the tower has the same variable scope and name scope, this is equivalent to :meth:`get_tensor`. """ name = get_op_tensor_name(name)[1] if len(self.vs_name): name_with_vs = self.vs_name + "/" + name else: name_with_vs = name return get_op_or_tensor_by_name(name_with_vs)
python
def get_variable(self, name): """ Get a variable used in this tower. The name should not contain the variable scope prefix of the tower. When the tower has the same variable scope and name scope, this is equivalent to :meth:`get_tensor`. """ name = get_op_tensor_name(name)[1] if len(self.vs_name): name_with_vs = self.vs_name + "/" + name else: name_with_vs = name return get_op_or_tensor_by_name(name_with_vs)
[ "def", "get_variable", "(", "self", ",", "name", ")", ":", "name", "=", "get_op_tensor_name", "(", "name", ")", "[", "1", "]", "if", "len", "(", "self", ".", "vs_name", ")", ":", "name_with_vs", "=", "self", ".", "vs_name", "+", "\"/\"", "+", "name", "else", ":", "name_with_vs", "=", "name", "return", "get_op_or_tensor_by_name", "(", "name_with_vs", ")" ]
Get a variable used in this tower. The name should not contain the variable scope prefix of the tower. When the tower has the same variable scope and name scope, this is equivalent to :meth:`get_tensor`.
[ "Get", "a", "variable", "used", "in", "this", "tower", ".", "The", "name", "should", "not", "contain", "the", "variable", "scope", "prefix", "of", "the", "tower", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/tfutils/tower.py#L429-L442
train
tensorpack/tensorpack
tensorpack/tfutils/tower.py
TowerTensorHandle.get_collection
def get_collection(self, key=None, name=None): """ See :meth:`BaseTowerContext.get_collection_in_tower`. Args: key (str): the key of the collection name: deprecated """ if name is not None: logger.warn("TowerTensorHandle.get_collection(name=..) was renamed to (key=..) !") key = name return self._ctx.get_collection_in_tower(key)
python
def get_collection(self, key=None, name=None): """ See :meth:`BaseTowerContext.get_collection_in_tower`. Args: key (str): the key of the collection name: deprecated """ if name is not None: logger.warn("TowerTensorHandle.get_collection(name=..) was renamed to (key=..) !") key = name return self._ctx.get_collection_in_tower(key)
[ "def", "get_collection", "(", "self", ",", "key", "=", "None", ",", "name", "=", "None", ")", ":", "if", "name", "is", "not", "None", ":", "logger", ".", "warn", "(", "\"TowerTensorHandle.get_collection(name=..) was renamed to (key=..) !\"", ")", "key", "=", "name", "return", "self", ".", "_ctx", ".", "get_collection_in_tower", "(", "key", ")" ]
See :meth:`BaseTowerContext.get_collection_in_tower`. Args: key (str): the key of the collection name: deprecated
[ "See", ":", "meth", ":", "BaseTowerContext", ".", "get_collection_in_tower", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/tfutils/tower.py#L450-L461
train
tensorpack/tensorpack
tensorpack/utils/fs.py
mkdir_p
def mkdir_p(dirname): """ Like "mkdir -p", make a dir recursively, but do nothing if the dir exists Args: dirname(str): """ assert dirname is not None if dirname == '' or os.path.isdir(dirname): return try: os.makedirs(dirname) except OSError as e: if e.errno != errno.EEXIST: raise e
python
def mkdir_p(dirname): """ Like "mkdir -p", make a dir recursively, but do nothing if the dir exists Args: dirname(str): """ assert dirname is not None if dirname == '' or os.path.isdir(dirname): return try: os.makedirs(dirname) except OSError as e: if e.errno != errno.EEXIST: raise e
[ "def", "mkdir_p", "(", "dirname", ")", ":", "assert", "dirname", "is", "not", "None", "if", "dirname", "==", "''", "or", "os", ".", "path", ".", "isdir", "(", "dirname", ")", ":", "return", "try", ":", "os", ".", "makedirs", "(", "dirname", ")", "except", "OSError", "as", "e", ":", "if", "e", ".", "errno", "!=", "errno", ".", "EEXIST", ":", "raise", "e" ]
Like "mkdir -p", make a dir recursively, but do nothing if the dir exists Args: dirname(str):
[ "Like", "mkdir", "-", "p", "make", "a", "dir", "recursively", "but", "do", "nothing", "if", "the", "dir", "exists" ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/utils/fs.py#L16-L29
train
tensorpack/tensorpack
tensorpack/utils/fs.py
download
def download(url, dir, filename=None, expect_size=None): """ Download URL to a directory. Will figure out the filename automatically from URL, if not given. """ mkdir_p(dir) if filename is None: filename = url.split('/')[-1] fpath = os.path.join(dir, filename) if os.path.isfile(fpath): if expect_size is not None and os.stat(fpath).st_size == expect_size: logger.info("File {} exists! Skip download.".format(filename)) return fpath else: logger.warn("File {} exists. Will overwrite with a new download!".format(filename)) def hook(t): last_b = [0] def inner(b, bsize, tsize=None): if tsize is not None: t.total = tsize t.update((b - last_b[0]) * bsize) last_b[0] = b return inner try: with tqdm.tqdm(unit='B', unit_scale=True, miniters=1, desc=filename) as t: fpath, _ = urllib.request.urlretrieve(url, fpath, reporthook=hook(t)) statinfo = os.stat(fpath) size = statinfo.st_size except IOError: logger.error("Failed to download {}".format(url)) raise assert size > 0, "Downloaded an empty file from {}!".format(url) if expect_size is not None and size != expect_size: logger.error("File downloaded from {} does not match the expected size!".format(url)) logger.error("You may have downloaded a broken file, or the upstream may have modified the file.") # TODO human-readable size logger.info('Succesfully downloaded ' + filename + ". " + str(size) + ' bytes.') return fpath
python
def download(url, dir, filename=None, expect_size=None): """ Download URL to a directory. Will figure out the filename automatically from URL, if not given. """ mkdir_p(dir) if filename is None: filename = url.split('/')[-1] fpath = os.path.join(dir, filename) if os.path.isfile(fpath): if expect_size is not None and os.stat(fpath).st_size == expect_size: logger.info("File {} exists! Skip download.".format(filename)) return fpath else: logger.warn("File {} exists. Will overwrite with a new download!".format(filename)) def hook(t): last_b = [0] def inner(b, bsize, tsize=None): if tsize is not None: t.total = tsize t.update((b - last_b[0]) * bsize) last_b[0] = b return inner try: with tqdm.tqdm(unit='B', unit_scale=True, miniters=1, desc=filename) as t: fpath, _ = urllib.request.urlretrieve(url, fpath, reporthook=hook(t)) statinfo = os.stat(fpath) size = statinfo.st_size except IOError: logger.error("Failed to download {}".format(url)) raise assert size > 0, "Downloaded an empty file from {}!".format(url) if expect_size is not None and size != expect_size: logger.error("File downloaded from {} does not match the expected size!".format(url)) logger.error("You may have downloaded a broken file, or the upstream may have modified the file.") # TODO human-readable size logger.info('Succesfully downloaded ' + filename + ". " + str(size) + ' bytes.') return fpath
[ "def", "download", "(", "url", ",", "dir", ",", "filename", "=", "None", ",", "expect_size", "=", "None", ")", ":", "mkdir_p", "(", "dir", ")", "if", "filename", "is", "None", ":", "filename", "=", "url", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", "fpath", "=", "os", ".", "path", ".", "join", "(", "dir", ",", "filename", ")", "if", "os", ".", "path", ".", "isfile", "(", "fpath", ")", ":", "if", "expect_size", "is", "not", "None", "and", "os", ".", "stat", "(", "fpath", ")", ".", "st_size", "==", "expect_size", ":", "logger", ".", "info", "(", "\"File {} exists! Skip download.\"", ".", "format", "(", "filename", ")", ")", "return", "fpath", "else", ":", "logger", ".", "warn", "(", "\"File {} exists. Will overwrite with a new download!\"", ".", "format", "(", "filename", ")", ")", "def", "hook", "(", "t", ")", ":", "last_b", "=", "[", "0", "]", "def", "inner", "(", "b", ",", "bsize", ",", "tsize", "=", "None", ")", ":", "if", "tsize", "is", "not", "None", ":", "t", ".", "total", "=", "tsize", "t", ".", "update", "(", "(", "b", "-", "last_b", "[", "0", "]", ")", "*", "bsize", ")", "last_b", "[", "0", "]", "=", "b", "return", "inner", "try", ":", "with", "tqdm", ".", "tqdm", "(", "unit", "=", "'B'", ",", "unit_scale", "=", "True", ",", "miniters", "=", "1", ",", "desc", "=", "filename", ")", "as", "t", ":", "fpath", ",", "_", "=", "urllib", ".", "request", ".", "urlretrieve", "(", "url", ",", "fpath", ",", "reporthook", "=", "hook", "(", "t", ")", ")", "statinfo", "=", "os", ".", "stat", "(", "fpath", ")", "size", "=", "statinfo", ".", "st_size", "except", "IOError", ":", "logger", ".", "error", "(", "\"Failed to download {}\"", ".", "format", "(", "url", ")", ")", "raise", "assert", "size", ">", "0", ",", "\"Downloaded an empty file from {}!\"", ".", "format", "(", "url", ")", "if", "expect_size", "is", "not", "None", "and", "size", "!=", "expect_size", ":", "logger", ".", "error", "(", "\"File downloaded from {} does not match the expected size!\"", ".", "format", "(", "url", ")", ")", "logger", ".", "error", "(", "\"You may have downloaded a broken file, or the upstream may have modified the file.\"", ")", "# TODO human-readable size", "logger", ".", "info", "(", "'Succesfully downloaded '", "+", "filename", "+", "\". \"", "+", "str", "(", "size", ")", "+", "' bytes.'", ")", "return", "fpath" ]
Download URL to a directory. Will figure out the filename automatically from URL, if not given.
[ "Download", "URL", "to", "a", "directory", ".", "Will", "figure", "out", "the", "filename", "automatically", "from", "URL", "if", "not", "given", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/utils/fs.py#L32-L74
train
tensorpack/tensorpack
tensorpack/utils/fs.py
recursive_walk
def recursive_walk(rootdir): """ Yields: str: All files in rootdir, recursively. """ for r, dirs, files in os.walk(rootdir): for f in files: yield os.path.join(r, f)
python
def recursive_walk(rootdir): """ Yields: str: All files in rootdir, recursively. """ for r, dirs, files in os.walk(rootdir): for f in files: yield os.path.join(r, f)
[ "def", "recursive_walk", "(", "rootdir", ")", ":", "for", "r", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "rootdir", ")", ":", "for", "f", "in", "files", ":", "yield", "os", ".", "path", ".", "join", "(", "r", ",", "f", ")" ]
Yields: str: All files in rootdir, recursively.
[ "Yields", ":", "str", ":", "All", "files", "in", "rootdir", "recursively", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/utils/fs.py#L77-L84
train
tensorpack/tensorpack
tensorpack/utils/fs.py
get_dataset_path
def get_dataset_path(*args): """ Get the path to some dataset under ``$TENSORPACK_DATASET``. Args: args: strings to be joined to form path. Returns: str: path to the dataset. """ d = os.environ.get('TENSORPACK_DATASET', None) if d is None: d = os.path.join(os.path.expanduser('~'), 'tensorpack_data') if execute_only_once(): logger.warn("Env var $TENSORPACK_DATASET not set, using {} for datasets.".format(d)) if not os.path.isdir(d): mkdir_p(d) logger.info("Created the directory {}.".format(d)) assert os.path.isdir(d), d return os.path.join(d, *args)
python
def get_dataset_path(*args): """ Get the path to some dataset under ``$TENSORPACK_DATASET``. Args: args: strings to be joined to form path. Returns: str: path to the dataset. """ d = os.environ.get('TENSORPACK_DATASET', None) if d is None: d = os.path.join(os.path.expanduser('~'), 'tensorpack_data') if execute_only_once(): logger.warn("Env var $TENSORPACK_DATASET not set, using {} for datasets.".format(d)) if not os.path.isdir(d): mkdir_p(d) logger.info("Created the directory {}.".format(d)) assert os.path.isdir(d), d return os.path.join(d, *args)
[ "def", "get_dataset_path", "(", "*", "args", ")", ":", "d", "=", "os", ".", "environ", ".", "get", "(", "'TENSORPACK_DATASET'", ",", "None", ")", "if", "d", "is", "None", ":", "d", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "expanduser", "(", "'~'", ")", ",", "'tensorpack_data'", ")", "if", "execute_only_once", "(", ")", ":", "logger", ".", "warn", "(", "\"Env var $TENSORPACK_DATASET not set, using {} for datasets.\"", ".", "format", "(", "d", ")", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "d", ")", ":", "mkdir_p", "(", "d", ")", "logger", ".", "info", "(", "\"Created the directory {}.\"", ".", "format", "(", "d", ")", ")", "assert", "os", ".", "path", ".", "isdir", "(", "d", ")", ",", "d", "return", "os", ".", "path", ".", "join", "(", "d", ",", "*", "args", ")" ]
Get the path to some dataset under ``$TENSORPACK_DATASET``. Args: args: strings to be joined to form path. Returns: str: path to the dataset.
[ "Get", "the", "path", "to", "some", "dataset", "under", "$TENSORPACK_DATASET", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/utils/fs.py#L87-L106
train
tensorpack/tensorpack
tensorpack/tfutils/collection.py
backup_collection
def backup_collection(keys=None): """ Args: keys (list): list of collection keys to backup. Defaults to all keys in the graph. Returns: dict: the backup """ if keys is None: keys = tf.get_default_graph().get_all_collection_keys() ret = {} assert isinstance(keys, (list, tuple, set)) for k in keys: ret[k] = copy(tf.get_collection(k)) return ret
python
def backup_collection(keys=None): """ Args: keys (list): list of collection keys to backup. Defaults to all keys in the graph. Returns: dict: the backup """ if keys is None: keys = tf.get_default_graph().get_all_collection_keys() ret = {} assert isinstance(keys, (list, tuple, set)) for k in keys: ret[k] = copy(tf.get_collection(k)) return ret
[ "def", "backup_collection", "(", "keys", "=", "None", ")", ":", "if", "keys", "is", "None", ":", "keys", "=", "tf", ".", "get_default_graph", "(", ")", ".", "get_all_collection_keys", "(", ")", "ret", "=", "{", "}", "assert", "isinstance", "(", "keys", ",", "(", "list", ",", "tuple", ",", "set", ")", ")", "for", "k", "in", "keys", ":", "ret", "[", "k", "]", "=", "copy", "(", "tf", ".", "get_collection", "(", "k", ")", ")", "return", "ret" ]
Args: keys (list): list of collection keys to backup. Defaults to all keys in the graph. Returns: dict: the backup
[ "Args", ":", "keys", "(", "list", ")", ":", "list", "of", "collection", "keys", "to", "backup", ".", "Defaults", "to", "all", "keys", "in", "the", "graph", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/tfutils/collection.py#L19-L34
train
tensorpack/tensorpack
tensorpack/tfutils/collection.py
restore_collection
def restore_collection(backup): """ Restore from a collection backup. Args: backup (dict): """ for k, v in six.iteritems(backup): del tf.get_collection_ref(k)[:] tf.get_collection_ref(k).extend(v)
python
def restore_collection(backup): """ Restore from a collection backup. Args: backup (dict): """ for k, v in six.iteritems(backup): del tf.get_collection_ref(k)[:] tf.get_collection_ref(k).extend(v)
[ "def", "restore_collection", "(", "backup", ")", ":", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "backup", ")", ":", "del", "tf", ".", "get_collection_ref", "(", "k", ")", "[", ":", "]", "tf", ".", "get_collection_ref", "(", "k", ")", ".", "extend", "(", "v", ")" ]
Restore from a collection backup. Args: backup (dict):
[ "Restore", "from", "a", "collection", "backup", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/tfutils/collection.py#L37-L46
train
tensorpack/tensorpack
tensorpack/tfutils/collection.py
CollectionGuard.get_collection_in_tower
def get_collection_in_tower(self, key): """ Get items from this collection that are added in the current tower. """ new = tf.get_collection(key) old = set(self.original.get(key, [])) # persist the order in new return [x for x in new if x not in old]
python
def get_collection_in_tower(self, key): """ Get items from this collection that are added in the current tower. """ new = tf.get_collection(key) old = set(self.original.get(key, [])) # persist the order in new return [x for x in new if x not in old]
[ "def", "get_collection_in_tower", "(", "self", ",", "key", ")", ":", "new", "=", "tf", ".", "get_collection", "(", "key", ")", "old", "=", "set", "(", "self", ".", "original", ".", "get", "(", "key", ",", "[", "]", ")", ")", "# persist the order in new", "return", "[", "x", "for", "x", "in", "new", "if", "x", "not", "in", "old", "]" ]
Get items from this collection that are added in the current tower.
[ "Get", "items", "from", "this", "collection", "that", "are", "added", "in", "the", "current", "tower", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/tfutils/collection.py#L168-L175
train
tensorpack/tensorpack
examples/PennTreebank/reader.py
ptb_producer
def ptb_producer(raw_data, batch_size, num_steps, name=None): """Iterate on the raw PTB data. This chunks up raw_data into batches of examples and returns Tensors that are drawn from these batches. Args: raw_data: one of the raw data outputs from ptb_raw_data. batch_size: int, the batch size. num_steps: int, the number of unrolls. name: the name of this operation (optional). Returns: A pair of Tensors, each shaped [batch_size, num_steps]. The second element of the tuple is the same data time-shifted to the right by one. Raises: tf.errors.InvalidArgumentError: if batch_size or num_steps are too high. """ with tf.name_scope(name, "PTBProducer", [raw_data, batch_size, num_steps]): raw_data = tf.convert_to_tensor(raw_data, name="raw_data", dtype=tf.int32) data_len = tf.size(raw_data) batch_len = data_len // batch_size data = tf.reshape(raw_data[0 : batch_size * batch_len], [batch_size, batch_len]) epoch_size = (batch_len - 1) // num_steps assertion = tf.assert_positive( epoch_size, message="epoch_size == 0, decrease batch_size or num_steps") with tf.control_dependencies([assertion]): epoch_size = tf.identity(epoch_size, name="epoch_size") i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue() x = tf.strided_slice(data, [0, i * num_steps], [batch_size, (i + 1) * num_steps]) x.set_shape([batch_size, num_steps]) y = tf.strided_slice(data, [0, i * num_steps + 1], [batch_size, (i + 1) * num_steps + 1]) y.set_shape([batch_size, num_steps]) return x, y
python
def ptb_producer(raw_data, batch_size, num_steps, name=None): """Iterate on the raw PTB data. This chunks up raw_data into batches of examples and returns Tensors that are drawn from these batches. Args: raw_data: one of the raw data outputs from ptb_raw_data. batch_size: int, the batch size. num_steps: int, the number of unrolls. name: the name of this operation (optional). Returns: A pair of Tensors, each shaped [batch_size, num_steps]. The second element of the tuple is the same data time-shifted to the right by one. Raises: tf.errors.InvalidArgumentError: if batch_size or num_steps are too high. """ with tf.name_scope(name, "PTBProducer", [raw_data, batch_size, num_steps]): raw_data = tf.convert_to_tensor(raw_data, name="raw_data", dtype=tf.int32) data_len = tf.size(raw_data) batch_len = data_len // batch_size data = tf.reshape(raw_data[0 : batch_size * batch_len], [batch_size, batch_len]) epoch_size = (batch_len - 1) // num_steps assertion = tf.assert_positive( epoch_size, message="epoch_size == 0, decrease batch_size or num_steps") with tf.control_dependencies([assertion]): epoch_size = tf.identity(epoch_size, name="epoch_size") i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue() x = tf.strided_slice(data, [0, i * num_steps], [batch_size, (i + 1) * num_steps]) x.set_shape([batch_size, num_steps]) y = tf.strided_slice(data, [0, i * num_steps + 1], [batch_size, (i + 1) * num_steps + 1]) y.set_shape([batch_size, num_steps]) return x, y
[ "def", "ptb_producer", "(", "raw_data", ",", "batch_size", ",", "num_steps", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "name_scope", "(", "name", ",", "\"PTBProducer\"", ",", "[", "raw_data", ",", "batch_size", ",", "num_steps", "]", ")", ":", "raw_data", "=", "tf", ".", "convert_to_tensor", "(", "raw_data", ",", "name", "=", "\"raw_data\"", ",", "dtype", "=", "tf", ".", "int32", ")", "data_len", "=", "tf", ".", "size", "(", "raw_data", ")", "batch_len", "=", "data_len", "//", "batch_size", "data", "=", "tf", ".", "reshape", "(", "raw_data", "[", "0", ":", "batch_size", "*", "batch_len", "]", ",", "[", "batch_size", ",", "batch_len", "]", ")", "epoch_size", "=", "(", "batch_len", "-", "1", ")", "//", "num_steps", "assertion", "=", "tf", ".", "assert_positive", "(", "epoch_size", ",", "message", "=", "\"epoch_size == 0, decrease batch_size or num_steps\"", ")", "with", "tf", ".", "control_dependencies", "(", "[", "assertion", "]", ")", ":", "epoch_size", "=", "tf", ".", "identity", "(", "epoch_size", ",", "name", "=", "\"epoch_size\"", ")", "i", "=", "tf", ".", "train", ".", "range_input_producer", "(", "epoch_size", ",", "shuffle", "=", "False", ")", ".", "dequeue", "(", ")", "x", "=", "tf", ".", "strided_slice", "(", "data", ",", "[", "0", ",", "i", "*", "num_steps", "]", ",", "[", "batch_size", ",", "(", "i", "+", "1", ")", "*", "num_steps", "]", ")", "x", ".", "set_shape", "(", "[", "batch_size", ",", "num_steps", "]", ")", "y", "=", "tf", ".", "strided_slice", "(", "data", ",", "[", "0", ",", "i", "*", "num_steps", "+", "1", "]", ",", "[", "batch_size", ",", "(", "i", "+", "1", ")", "*", "num_steps", "+", "1", "]", ")", "y", ".", "set_shape", "(", "[", "batch_size", ",", "num_steps", "]", ")", "return", "x", ",", "y" ]
Iterate on the raw PTB data. This chunks up raw_data into batches of examples and returns Tensors that are drawn from these batches. Args: raw_data: one of the raw data outputs from ptb_raw_data. batch_size: int, the batch size. num_steps: int, the number of unrolls. name: the name of this operation (optional). Returns: A pair of Tensors, each shaped [batch_size, num_steps]. The second element of the tuple is the same data time-shifted to the right by one. Raises: tf.errors.InvalidArgumentError: if batch_size or num_steps are too high.
[ "Iterate", "on", "the", "raw", "PTB", "data", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/PennTreebank/reader.py#L78-L119
train
tensorpack/tensorpack
tensorpack/utils/logger.py
set_logger_dir
def set_logger_dir(dirname, action=None): """ Set the directory for global logging. Args: dirname(str): log directory action(str): an action of ["k","d","q"] to be performed when the directory exists. Will ask user by default. "d": delete the directory. Note that the deletion may fail when the directory is used by tensorboard. "k": keep the directory. This is useful when you resume from a previous training and want the directory to look as if the training was not interrupted. Note that this option does not load old models or any other old states for you. It simply does nothing. """ global LOG_DIR, _FILE_HANDLER if _FILE_HANDLER: # unload and close the old file handler, so that we may safely delete the logger directory _logger.removeHandler(_FILE_HANDLER) del _FILE_HANDLER def dir_nonempty(dirname): # If directory exists and nonempty (ignore hidden files), prompt for action return os.path.isdir(dirname) and len([x for x in os.listdir(dirname) if x[0] != '.']) if dir_nonempty(dirname): if not action: _logger.warn("""\ Log directory {} exists! Use 'd' to delete it. """.format(dirname)) _logger.warn("""\ If you're resuming from a previous run, you can choose to keep it. Press any other key to exit. """) while not action: action = input("Select Action: k (keep) / d (delete) / q (quit):").lower().strip() act = action if act == 'b': backup_name = dirname + _get_time_str() shutil.move(dirname, backup_name) info("Directory '{}' backuped to '{}'".format(dirname, backup_name)) # noqa: F821 elif act == 'd': shutil.rmtree(dirname, ignore_errors=True) if dir_nonempty(dirname): shutil.rmtree(dirname, ignore_errors=False) elif act == 'n': dirname = dirname + _get_time_str() info("Use a new log directory {}".format(dirname)) # noqa: F821 elif act == 'k': pass else: raise OSError("Directory {} exits!".format(dirname)) LOG_DIR = dirname from .fs import mkdir_p mkdir_p(dirname) _set_file(os.path.join(dirname, 'log.log'))
python
def set_logger_dir(dirname, action=None): """ Set the directory for global logging. Args: dirname(str): log directory action(str): an action of ["k","d","q"] to be performed when the directory exists. Will ask user by default. "d": delete the directory. Note that the deletion may fail when the directory is used by tensorboard. "k": keep the directory. This is useful when you resume from a previous training and want the directory to look as if the training was not interrupted. Note that this option does not load old models or any other old states for you. It simply does nothing. """ global LOG_DIR, _FILE_HANDLER if _FILE_HANDLER: # unload and close the old file handler, so that we may safely delete the logger directory _logger.removeHandler(_FILE_HANDLER) del _FILE_HANDLER def dir_nonempty(dirname): # If directory exists and nonempty (ignore hidden files), prompt for action return os.path.isdir(dirname) and len([x for x in os.listdir(dirname) if x[0] != '.']) if dir_nonempty(dirname): if not action: _logger.warn("""\ Log directory {} exists! Use 'd' to delete it. """.format(dirname)) _logger.warn("""\ If you're resuming from a previous run, you can choose to keep it. Press any other key to exit. """) while not action: action = input("Select Action: k (keep) / d (delete) / q (quit):").lower().strip() act = action if act == 'b': backup_name = dirname + _get_time_str() shutil.move(dirname, backup_name) info("Directory '{}' backuped to '{}'".format(dirname, backup_name)) # noqa: F821 elif act == 'd': shutil.rmtree(dirname, ignore_errors=True) if dir_nonempty(dirname): shutil.rmtree(dirname, ignore_errors=False) elif act == 'n': dirname = dirname + _get_time_str() info("Use a new log directory {}".format(dirname)) # noqa: F821 elif act == 'k': pass else: raise OSError("Directory {} exits!".format(dirname)) LOG_DIR = dirname from .fs import mkdir_p mkdir_p(dirname) _set_file(os.path.join(dirname, 'log.log'))
[ "def", "set_logger_dir", "(", "dirname", ",", "action", "=", "None", ")", ":", "global", "LOG_DIR", ",", "_FILE_HANDLER", "if", "_FILE_HANDLER", ":", "# unload and close the old file handler, so that we may safely delete the logger directory", "_logger", ".", "removeHandler", "(", "_FILE_HANDLER", ")", "del", "_FILE_HANDLER", "def", "dir_nonempty", "(", "dirname", ")", ":", "# If directory exists and nonempty (ignore hidden files), prompt for action", "return", "os", ".", "path", ".", "isdir", "(", "dirname", ")", "and", "len", "(", "[", "x", "for", "x", "in", "os", ".", "listdir", "(", "dirname", ")", "if", "x", "[", "0", "]", "!=", "'.'", "]", ")", "if", "dir_nonempty", "(", "dirname", ")", ":", "if", "not", "action", ":", "_logger", ".", "warn", "(", "\"\"\"\\\nLog directory {} exists! Use 'd' to delete it. \"\"\"", ".", "format", "(", "dirname", ")", ")", "_logger", ".", "warn", "(", "\"\"\"\\\nIf you're resuming from a previous run, you can choose to keep it.\nPress any other key to exit. \"\"\"", ")", "while", "not", "action", ":", "action", "=", "input", "(", "\"Select Action: k (keep) / d (delete) / q (quit):\"", ")", ".", "lower", "(", ")", ".", "strip", "(", ")", "act", "=", "action", "if", "act", "==", "'b'", ":", "backup_name", "=", "dirname", "+", "_get_time_str", "(", ")", "shutil", ".", "move", "(", "dirname", ",", "backup_name", ")", "info", "(", "\"Directory '{}' backuped to '{}'\"", ".", "format", "(", "dirname", ",", "backup_name", ")", ")", "# noqa: F821", "elif", "act", "==", "'d'", ":", "shutil", ".", "rmtree", "(", "dirname", ",", "ignore_errors", "=", "True", ")", "if", "dir_nonempty", "(", "dirname", ")", ":", "shutil", ".", "rmtree", "(", "dirname", ",", "ignore_errors", "=", "False", ")", "elif", "act", "==", "'n'", ":", "dirname", "=", "dirname", "+", "_get_time_str", "(", ")", "info", "(", "\"Use a new log directory {}\"", ".", "format", "(", "dirname", ")", ")", "# noqa: F821", "elif", "act", "==", "'k'", ":", "pass", "else", ":", "raise", "OSError", "(", "\"Directory {} exits!\"", ".", "format", "(", "dirname", ")", ")", "LOG_DIR", "=", "dirname", "from", ".", "fs", "import", "mkdir_p", "mkdir_p", "(", "dirname", ")", "_set_file", "(", "os", ".", "path", ".", "join", "(", "dirname", ",", "'log.log'", ")", ")" ]
Set the directory for global logging. Args: dirname(str): log directory action(str): an action of ["k","d","q"] to be performed when the directory exists. Will ask user by default. "d": delete the directory. Note that the deletion may fail when the directory is used by tensorboard. "k": keep the directory. This is useful when you resume from a previous training and want the directory to look as if the training was not interrupted. Note that this option does not load old models or any other old states for you. It simply does nothing.
[ "Set", "the", "directory", "for", "global", "logging", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/utils/logger.py#L93-L150
train
tensorpack/tensorpack
tensorpack/utils/logger.py
auto_set_dir
def auto_set_dir(action=None, name=None): """ Use :func:`logger.set_logger_dir` to set log directory to "./train_log/{scriptname}:{name}". "scriptname" is the name of the main python file currently running""" mod = sys.modules['__main__'] basename = os.path.basename(mod.__file__) auto_dirname = os.path.join('train_log', basename[:basename.rfind('.')]) if name: auto_dirname += '_%s' % name if os.name == 'nt' else ':%s' % name set_logger_dir(auto_dirname, action=action)
python
def auto_set_dir(action=None, name=None): """ Use :func:`logger.set_logger_dir` to set log directory to "./train_log/{scriptname}:{name}". "scriptname" is the name of the main python file currently running""" mod = sys.modules['__main__'] basename = os.path.basename(mod.__file__) auto_dirname = os.path.join('train_log', basename[:basename.rfind('.')]) if name: auto_dirname += '_%s' % name if os.name == 'nt' else ':%s' % name set_logger_dir(auto_dirname, action=action)
[ "def", "auto_set_dir", "(", "action", "=", "None", ",", "name", "=", "None", ")", ":", "mod", "=", "sys", ".", "modules", "[", "'__main__'", "]", "basename", "=", "os", ".", "path", ".", "basename", "(", "mod", ".", "__file__", ")", "auto_dirname", "=", "os", ".", "path", ".", "join", "(", "'train_log'", ",", "basename", "[", ":", "basename", ".", "rfind", "(", "'.'", ")", "]", ")", "if", "name", ":", "auto_dirname", "+=", "'_%s'", "%", "name", "if", "os", ".", "name", "==", "'nt'", "else", "':%s'", "%", "name", "set_logger_dir", "(", "auto_dirname", ",", "action", "=", "action", ")" ]
Use :func:`logger.set_logger_dir` to set log directory to "./train_log/{scriptname}:{name}". "scriptname" is the name of the main python file currently running
[ "Use", ":", "func", ":", "logger", ".", "set_logger_dir", "to", "set", "log", "directory", "to", ".", "/", "train_log", "/", "{", "scriptname", "}", ":", "{", "name", "}", ".", "scriptname", "is", "the", "name", "of", "the", "main", "python", "file", "currently", "running" ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/utils/logger.py#L153-L162
train
tensorpack/tensorpack
examples/HED/hed.py
class_balanced_sigmoid_cross_entropy
def class_balanced_sigmoid_cross_entropy(logits, label, name='cross_entropy_loss'): """ The class-balanced cross entropy loss, as in `Holistically-Nested Edge Detection <http://arxiv.org/abs/1504.06375>`_. Args: logits: of shape (b, ...). label: of the same shape. the ground truth in {0,1}. Returns: class-balanced cross entropy loss. """ with tf.name_scope('class_balanced_sigmoid_cross_entropy'): y = tf.cast(label, tf.float32) count_neg = tf.reduce_sum(1. - y) count_pos = tf.reduce_sum(y) beta = count_neg / (count_neg + count_pos) pos_weight = beta / (1 - beta) cost = tf.nn.weighted_cross_entropy_with_logits(logits=logits, targets=y, pos_weight=pos_weight) cost = tf.reduce_mean(cost * (1 - beta)) zero = tf.equal(count_pos, 0.0) return tf.where(zero, 0.0, cost, name=name)
python
def class_balanced_sigmoid_cross_entropy(logits, label, name='cross_entropy_loss'): """ The class-balanced cross entropy loss, as in `Holistically-Nested Edge Detection <http://arxiv.org/abs/1504.06375>`_. Args: logits: of shape (b, ...). label: of the same shape. the ground truth in {0,1}. Returns: class-balanced cross entropy loss. """ with tf.name_scope('class_balanced_sigmoid_cross_entropy'): y = tf.cast(label, tf.float32) count_neg = tf.reduce_sum(1. - y) count_pos = tf.reduce_sum(y) beta = count_neg / (count_neg + count_pos) pos_weight = beta / (1 - beta) cost = tf.nn.weighted_cross_entropy_with_logits(logits=logits, targets=y, pos_weight=pos_weight) cost = tf.reduce_mean(cost * (1 - beta)) zero = tf.equal(count_pos, 0.0) return tf.where(zero, 0.0, cost, name=name)
[ "def", "class_balanced_sigmoid_cross_entropy", "(", "logits", ",", "label", ",", "name", "=", "'cross_entropy_loss'", ")", ":", "with", "tf", ".", "name_scope", "(", "'class_balanced_sigmoid_cross_entropy'", ")", ":", "y", "=", "tf", ".", "cast", "(", "label", ",", "tf", ".", "float32", ")", "count_neg", "=", "tf", ".", "reduce_sum", "(", "1.", "-", "y", ")", "count_pos", "=", "tf", ".", "reduce_sum", "(", "y", ")", "beta", "=", "count_neg", "/", "(", "count_neg", "+", "count_pos", ")", "pos_weight", "=", "beta", "/", "(", "1", "-", "beta", ")", "cost", "=", "tf", ".", "nn", ".", "weighted_cross_entropy_with_logits", "(", "logits", "=", "logits", ",", "targets", "=", "y", ",", "pos_weight", "=", "pos_weight", ")", "cost", "=", "tf", ".", "reduce_mean", "(", "cost", "*", "(", "1", "-", "beta", ")", ")", "zero", "=", "tf", ".", "equal", "(", "count_pos", ",", "0.0", ")", "return", "tf", ".", "where", "(", "zero", ",", "0.0", ",", "cost", ",", "name", "=", "name", ")" ]
The class-balanced cross entropy loss, as in `Holistically-Nested Edge Detection <http://arxiv.org/abs/1504.06375>`_. Args: logits: of shape (b, ...). label: of the same shape. the ground truth in {0,1}. Returns: class-balanced cross entropy loss.
[ "The", "class", "-", "balanced", "cross", "entropy", "loss", "as", "in", "Holistically", "-", "Nested", "Edge", "Detection", "<http", ":", "//", "arxiv", ".", "org", "/", "abs", "/", "1504", ".", "06375", ">", "_", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/HED/hed.py#L21-L44
train
tensorpack/tensorpack
examples/HED/hed.py
CaffeBilinearUpSample
def CaffeBilinearUpSample(x, shape): """ Deterministic bilinearly-upsample the input images. It is implemented by deconvolution with "BilinearFiller" in Caffe. It is aimed to mimic caffe behavior. Args: x (tf.Tensor): a NCHW tensor shape (int): the upsample factor Returns: tf.Tensor: a NCHW tensor. """ inp_shape = x.shape.as_list() ch = inp_shape[1] assert ch == 1, "This layer only works for channel=1" # for a version that supports >1 channels, see: # https://github.com/tensorpack/tensorpack/issues/1040#issuecomment-452798180 shape = int(shape) filter_shape = 2 * shape def bilinear_conv_filler(s): """ s: width, height of the conv filter https://github.com/BVLC/caffe/blob/99bd99795dcdf0b1d3086a8d67ab1782a8a08383/include/caffe/filler.hpp#L219-L268 """ f = np.ceil(float(s) / 2) c = float(2 * f - 1 - f % 2) / (2 * f) ret = np.zeros((s, s), dtype='float32') for x in range(s): for y in range(s): ret[x, y] = (1 - abs(x / f - c)) * (1 - abs(y / f - c)) return ret w = bilinear_conv_filler(filter_shape) w = np.repeat(w, ch * ch).reshape((filter_shape, filter_shape, ch, ch)) weight_var = tf.constant(w, tf.float32, shape=(filter_shape, filter_shape, ch, ch), name='bilinear_upsample_filter') x = tf.pad(x, [[0, 0], [0, 0], [shape - 1, shape - 1], [shape - 1, shape - 1]], mode='SYMMETRIC') out_shape = tf.shape(x) * tf.constant([1, 1, shape, shape], tf.int32) deconv = tf.nn.conv2d_transpose(x, weight_var, out_shape, [1, 1, shape, shape], 'SAME', data_format='NCHW') edge = shape * (shape - 1) deconv = deconv[:, :, edge:-edge, edge:-edge] if inp_shape[2]: inp_shape[2] *= shape if inp_shape[3]: inp_shape[3] *= shape deconv.set_shape(inp_shape) return deconv
python
def CaffeBilinearUpSample(x, shape): """ Deterministic bilinearly-upsample the input images. It is implemented by deconvolution with "BilinearFiller" in Caffe. It is aimed to mimic caffe behavior. Args: x (tf.Tensor): a NCHW tensor shape (int): the upsample factor Returns: tf.Tensor: a NCHW tensor. """ inp_shape = x.shape.as_list() ch = inp_shape[1] assert ch == 1, "This layer only works for channel=1" # for a version that supports >1 channels, see: # https://github.com/tensorpack/tensorpack/issues/1040#issuecomment-452798180 shape = int(shape) filter_shape = 2 * shape def bilinear_conv_filler(s): """ s: width, height of the conv filter https://github.com/BVLC/caffe/blob/99bd99795dcdf0b1d3086a8d67ab1782a8a08383/include/caffe/filler.hpp#L219-L268 """ f = np.ceil(float(s) / 2) c = float(2 * f - 1 - f % 2) / (2 * f) ret = np.zeros((s, s), dtype='float32') for x in range(s): for y in range(s): ret[x, y] = (1 - abs(x / f - c)) * (1 - abs(y / f - c)) return ret w = bilinear_conv_filler(filter_shape) w = np.repeat(w, ch * ch).reshape((filter_shape, filter_shape, ch, ch)) weight_var = tf.constant(w, tf.float32, shape=(filter_shape, filter_shape, ch, ch), name='bilinear_upsample_filter') x = tf.pad(x, [[0, 0], [0, 0], [shape - 1, shape - 1], [shape - 1, shape - 1]], mode='SYMMETRIC') out_shape = tf.shape(x) * tf.constant([1, 1, shape, shape], tf.int32) deconv = tf.nn.conv2d_transpose(x, weight_var, out_shape, [1, 1, shape, shape], 'SAME', data_format='NCHW') edge = shape * (shape - 1) deconv = deconv[:, :, edge:-edge, edge:-edge] if inp_shape[2]: inp_shape[2] *= shape if inp_shape[3]: inp_shape[3] *= shape deconv.set_shape(inp_shape) return deconv
[ "def", "CaffeBilinearUpSample", "(", "x", ",", "shape", ")", ":", "inp_shape", "=", "x", ".", "shape", ".", "as_list", "(", ")", "ch", "=", "inp_shape", "[", "1", "]", "assert", "ch", "==", "1", ",", "\"This layer only works for channel=1\"", "# for a version that supports >1 channels, see:", "# https://github.com/tensorpack/tensorpack/issues/1040#issuecomment-452798180", "shape", "=", "int", "(", "shape", ")", "filter_shape", "=", "2", "*", "shape", "def", "bilinear_conv_filler", "(", "s", ")", ":", "\"\"\"\n s: width, height of the conv filter\n https://github.com/BVLC/caffe/blob/99bd99795dcdf0b1d3086a8d67ab1782a8a08383/include/caffe/filler.hpp#L219-L268\n \"\"\"", "f", "=", "np", ".", "ceil", "(", "float", "(", "s", ")", "/", "2", ")", "c", "=", "float", "(", "2", "*", "f", "-", "1", "-", "f", "%", "2", ")", "/", "(", "2", "*", "f", ")", "ret", "=", "np", ".", "zeros", "(", "(", "s", ",", "s", ")", ",", "dtype", "=", "'float32'", ")", "for", "x", "in", "range", "(", "s", ")", ":", "for", "y", "in", "range", "(", "s", ")", ":", "ret", "[", "x", ",", "y", "]", "=", "(", "1", "-", "abs", "(", "x", "/", "f", "-", "c", ")", ")", "*", "(", "1", "-", "abs", "(", "y", "/", "f", "-", "c", ")", ")", "return", "ret", "w", "=", "bilinear_conv_filler", "(", "filter_shape", ")", "w", "=", "np", ".", "repeat", "(", "w", ",", "ch", "*", "ch", ")", ".", "reshape", "(", "(", "filter_shape", ",", "filter_shape", ",", "ch", ",", "ch", ")", ")", "weight_var", "=", "tf", ".", "constant", "(", "w", ",", "tf", ".", "float32", ",", "shape", "=", "(", "filter_shape", ",", "filter_shape", ",", "ch", ",", "ch", ")", ",", "name", "=", "'bilinear_upsample_filter'", ")", "x", "=", "tf", ".", "pad", "(", "x", ",", "[", "[", "0", ",", "0", "]", ",", "[", "0", ",", "0", "]", ",", "[", "shape", "-", "1", ",", "shape", "-", "1", "]", ",", "[", "shape", "-", "1", ",", "shape", "-", "1", "]", "]", ",", "mode", "=", "'SYMMETRIC'", ")", "out_shape", "=", "tf", ".", "shape", "(", "x", ")", "*", "tf", ".", "constant", "(", "[", "1", ",", "1", ",", "shape", ",", "shape", "]", ",", "tf", ".", "int32", ")", "deconv", "=", "tf", ".", "nn", ".", "conv2d_transpose", "(", "x", ",", "weight_var", ",", "out_shape", ",", "[", "1", ",", "1", ",", "shape", ",", "shape", "]", ",", "'SAME'", ",", "data_format", "=", "'NCHW'", ")", "edge", "=", "shape", "*", "(", "shape", "-", "1", ")", "deconv", "=", "deconv", "[", ":", ",", ":", ",", "edge", ":", "-", "edge", ",", "edge", ":", "-", "edge", "]", "if", "inp_shape", "[", "2", "]", ":", "inp_shape", "[", "2", "]", "*=", "shape", "if", "inp_shape", "[", "3", "]", ":", "inp_shape", "[", "3", "]", "*=", "shape", "deconv", ".", "set_shape", "(", "inp_shape", ")", "return", "deconv" ]
Deterministic bilinearly-upsample the input images. It is implemented by deconvolution with "BilinearFiller" in Caffe. It is aimed to mimic caffe behavior. Args: x (tf.Tensor): a NCHW tensor shape (int): the upsample factor Returns: tf.Tensor: a NCHW tensor.
[ "Deterministic", "bilinearly", "-", "upsample", "the", "input", "images", ".", "It", "is", "implemented", "by", "deconvolution", "with", "BilinearFiller", "in", "Caffe", ".", "It", "is", "aimed", "to", "mimic", "caffe", "behavior", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/HED/hed.py#L48-L101
train
tensorpack/tensorpack
tensorpack/dataflow/parallel.py
_MultiProcessZMQDataFlow.reset_state
def reset_state(self): """ All forked dataflows should only be reset **once and only once** in spawned processes. Subclasses should call this method with super. """ assert not self._reset_done, "reset_state() was called twice! This violates the API of DataFlow!" self._reset_done = True # __del__ not guaranteed to get called at exit atexit.register(del_weakref, weakref.ref(self))
python
def reset_state(self): """ All forked dataflows should only be reset **once and only once** in spawned processes. Subclasses should call this method with super. """ assert not self._reset_done, "reset_state() was called twice! This violates the API of DataFlow!" self._reset_done = True # __del__ not guaranteed to get called at exit atexit.register(del_weakref, weakref.ref(self))
[ "def", "reset_state", "(", "self", ")", ":", "assert", "not", "self", ".", "_reset_done", ",", "\"reset_state() was called twice! This violates the API of DataFlow!\"", "self", ".", "_reset_done", "=", "True", "# __del__ not guaranteed to get called at exit", "atexit", ".", "register", "(", "del_weakref", ",", "weakref", ".", "ref", "(", "self", ")", ")" ]
All forked dataflows should only be reset **once and only once** in spawned processes. Subclasses should call this method with super.
[ "All", "forked", "dataflows", "should", "only", "be", "reset", "**", "once", "and", "only", "once", "**", "in", "spawned", "processes", ".", "Subclasses", "should", "call", "this", "method", "with", "super", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/dataflow/parallel.py#L92-L101
train
tensorpack/tensorpack
tensorpack/compat/tensor_spec.py
TensorSpec.is_compatible_with
def is_compatible_with(self, spec_or_tensor): """Returns True if spec_or_tensor is compatible with this TensorSpec. Two tensors are considered compatible if they have the same dtype and their shapes are compatible (see `tf.TensorShape.is_compatible_with`). Args: spec_or_tensor: A tf.TensorSpec or a tf.Tensor Returns: True if spec_or_tensor is compatible with self. """ return (self._dtype.is_compatible_with(spec_or_tensor.dtype) and self._shape.is_compatible_with(spec_or_tensor.shape))
python
def is_compatible_with(self, spec_or_tensor): """Returns True if spec_or_tensor is compatible with this TensorSpec. Two tensors are considered compatible if they have the same dtype and their shapes are compatible (see `tf.TensorShape.is_compatible_with`). Args: spec_or_tensor: A tf.TensorSpec or a tf.Tensor Returns: True if spec_or_tensor is compatible with self. """ return (self._dtype.is_compatible_with(spec_or_tensor.dtype) and self._shape.is_compatible_with(spec_or_tensor.shape))
[ "def", "is_compatible_with", "(", "self", ",", "spec_or_tensor", ")", ":", "return", "(", "self", ".", "_dtype", ".", "is_compatible_with", "(", "spec_or_tensor", ".", "dtype", ")", "and", "self", ".", "_shape", ".", "is_compatible_with", "(", "spec_or_tensor", ".", "shape", ")", ")" ]
Returns True if spec_or_tensor is compatible with this TensorSpec. Two tensors are considered compatible if they have the same dtype and their shapes are compatible (see `tf.TensorShape.is_compatible_with`). Args: spec_or_tensor: A tf.TensorSpec or a tf.Tensor Returns: True if spec_or_tensor is compatible with self.
[ "Returns", "True", "if", "spec_or_tensor", "is", "compatible", "with", "this", "TensorSpec", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/compat/tensor_spec.py#L75-L88
train
tensorpack/tensorpack
tensorpack/tfutils/model_utils.py
describe_trainable_vars
def describe_trainable_vars(): """ Print a description of the current model parameters. Skip variables starting with "tower", as they are just duplicates built by data-parallel logic. """ train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if len(train_vars) == 0: logger.warn("No trainable variables in the graph!") return total = 0 total_bytes = 0 data = [] for v in train_vars: if v.name.startswith('tower'): continue shape = v.get_shape() ele = shape.num_elements() if ele is None: logger.warn("Shape of variable {} is not fully defined but {}.".format(v.name, shape)) ele = 0 try: shape = shape.as_list() except ValueError: shape = '<unknown>' total += ele total_bytes += ele * v.dtype.size data.append([v.name, shape, ele, v.device, v.dtype.base_dtype.name]) headers = ['name', 'shape', '#elements', 'device', 'dtype'] dtypes = list(set([x[4] for x in data])) if len(dtypes) == 1 and dtypes[0] == "float32": # don't log the dtype if all vars are float32 (default dtype) for x in data: del x[4] del headers[4] devices = set([x[3] for x in data]) if len(devices) == 1: # don't log the device if all vars on the same device for x in data: del x[3] del headers[3] table = tabulate(data, headers=headers) size_mb = total_bytes / 1024.0**2 summary_msg = colored( "\nNumber of trainable variables: {}".format(len(data)) + "\nNumber of parameters (elements): {}".format(total) + "\nStorage space needed for all trainable variables: {:.02f}MB".format(size_mb), 'cyan') logger.info(colored("List of Trainable Variables: \n", 'cyan') + table + summary_msg)
python
def describe_trainable_vars(): """ Print a description of the current model parameters. Skip variables starting with "tower", as they are just duplicates built by data-parallel logic. """ train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if len(train_vars) == 0: logger.warn("No trainable variables in the graph!") return total = 0 total_bytes = 0 data = [] for v in train_vars: if v.name.startswith('tower'): continue shape = v.get_shape() ele = shape.num_elements() if ele is None: logger.warn("Shape of variable {} is not fully defined but {}.".format(v.name, shape)) ele = 0 try: shape = shape.as_list() except ValueError: shape = '<unknown>' total += ele total_bytes += ele * v.dtype.size data.append([v.name, shape, ele, v.device, v.dtype.base_dtype.name]) headers = ['name', 'shape', '#elements', 'device', 'dtype'] dtypes = list(set([x[4] for x in data])) if len(dtypes) == 1 and dtypes[0] == "float32": # don't log the dtype if all vars are float32 (default dtype) for x in data: del x[4] del headers[4] devices = set([x[3] for x in data]) if len(devices) == 1: # don't log the device if all vars on the same device for x in data: del x[3] del headers[3] table = tabulate(data, headers=headers) size_mb = total_bytes / 1024.0**2 summary_msg = colored( "\nNumber of trainable variables: {}".format(len(data)) + "\nNumber of parameters (elements): {}".format(total) + "\nStorage space needed for all trainable variables: {:.02f}MB".format(size_mb), 'cyan') logger.info(colored("List of Trainable Variables: \n", 'cyan') + table + summary_msg)
[ "def", "describe_trainable_vars", "(", ")", ":", "train_vars", "=", "tf", ".", "get_collection", "(", "tf", ".", "GraphKeys", ".", "TRAINABLE_VARIABLES", ")", "if", "len", "(", "train_vars", ")", "==", "0", ":", "logger", ".", "warn", "(", "\"No trainable variables in the graph!\"", ")", "return", "total", "=", "0", "total_bytes", "=", "0", "data", "=", "[", "]", "for", "v", "in", "train_vars", ":", "if", "v", ".", "name", ".", "startswith", "(", "'tower'", ")", ":", "continue", "shape", "=", "v", ".", "get_shape", "(", ")", "ele", "=", "shape", ".", "num_elements", "(", ")", "if", "ele", "is", "None", ":", "logger", ".", "warn", "(", "\"Shape of variable {} is not fully defined but {}.\"", ".", "format", "(", "v", ".", "name", ",", "shape", ")", ")", "ele", "=", "0", "try", ":", "shape", "=", "shape", ".", "as_list", "(", ")", "except", "ValueError", ":", "shape", "=", "'<unknown>'", "total", "+=", "ele", "total_bytes", "+=", "ele", "*", "v", ".", "dtype", ".", "size", "data", ".", "append", "(", "[", "v", ".", "name", ",", "shape", ",", "ele", ",", "v", ".", "device", ",", "v", ".", "dtype", ".", "base_dtype", ".", "name", "]", ")", "headers", "=", "[", "'name'", ",", "'shape'", ",", "'#elements'", ",", "'device'", ",", "'dtype'", "]", "dtypes", "=", "list", "(", "set", "(", "[", "x", "[", "4", "]", "for", "x", "in", "data", "]", ")", ")", "if", "len", "(", "dtypes", ")", "==", "1", "and", "dtypes", "[", "0", "]", "==", "\"float32\"", ":", "# don't log the dtype if all vars are float32 (default dtype)", "for", "x", "in", "data", ":", "del", "x", "[", "4", "]", "del", "headers", "[", "4", "]", "devices", "=", "set", "(", "[", "x", "[", "3", "]", "for", "x", "in", "data", "]", ")", "if", "len", "(", "devices", ")", "==", "1", ":", "# don't log the device if all vars on the same device", "for", "x", "in", "data", ":", "del", "x", "[", "3", "]", "del", "headers", "[", "3", "]", "table", "=", "tabulate", "(", "data", ",", "headers", "=", "headers", ")", "size_mb", "=", "total_bytes", "/", "1024.0", "**", "2", "summary_msg", "=", "colored", "(", "\"\\nNumber of trainable variables: {}\"", ".", "format", "(", "len", "(", "data", ")", ")", "+", "\"\\nNumber of parameters (elements): {}\"", ".", "format", "(", "total", ")", "+", "\"\\nStorage space needed for all trainable variables: {:.02f}MB\"", ".", "format", "(", "size_mb", ")", ",", "'cyan'", ")", "logger", ".", "info", "(", "colored", "(", "\"List of Trainable Variables: \\n\"", ",", "'cyan'", ")", "+", "table", "+", "summary_msg", ")" ]
Print a description of the current model parameters. Skip variables starting with "tower", as they are just duplicates built by data-parallel logic.
[ "Print", "a", "description", "of", "the", "current", "model", "parameters", ".", "Skip", "variables", "starting", "with", "tower", "as", "they", "are", "just", "duplicates", "built", "by", "data", "-", "parallel", "logic", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/tfutils/model_utils.py#L15-L67
train
tensorpack/tensorpack
tensorpack/tfutils/model_utils.py
get_shape_str
def get_shape_str(tensors): """ Internally used by layer registry, to print shapes of inputs/outputs of layers. Args: tensors (list or tf.Tensor): a tensor or a list of tensors Returns: str: a string to describe the shape """ if isinstance(tensors, (list, tuple)): for v in tensors: assert isinstance(v, (tf.Tensor, tf.Variable)), "Not a tensor: {}".format(type(v)) shape_str = ",".join( map(lambda x: str(x.get_shape().as_list()), tensors)) else: assert isinstance(tensors, (tf.Tensor, tf.Variable)), "Not a tensor: {}".format(type(tensors)) shape_str = str(tensors.get_shape().as_list()) return shape_str
python
def get_shape_str(tensors): """ Internally used by layer registry, to print shapes of inputs/outputs of layers. Args: tensors (list or tf.Tensor): a tensor or a list of tensors Returns: str: a string to describe the shape """ if isinstance(tensors, (list, tuple)): for v in tensors: assert isinstance(v, (tf.Tensor, tf.Variable)), "Not a tensor: {}".format(type(v)) shape_str = ",".join( map(lambda x: str(x.get_shape().as_list()), tensors)) else: assert isinstance(tensors, (tf.Tensor, tf.Variable)), "Not a tensor: {}".format(type(tensors)) shape_str = str(tensors.get_shape().as_list()) return shape_str
[ "def", "get_shape_str", "(", "tensors", ")", ":", "if", "isinstance", "(", "tensors", ",", "(", "list", ",", "tuple", ")", ")", ":", "for", "v", "in", "tensors", ":", "assert", "isinstance", "(", "v", ",", "(", "tf", ".", "Tensor", ",", "tf", ".", "Variable", ")", ")", ",", "\"Not a tensor: {}\"", ".", "format", "(", "type", "(", "v", ")", ")", "shape_str", "=", "\",\"", ".", "join", "(", "map", "(", "lambda", "x", ":", "str", "(", "x", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", ")", ",", "tensors", ")", ")", "else", ":", "assert", "isinstance", "(", "tensors", ",", "(", "tf", ".", "Tensor", ",", "tf", ".", "Variable", ")", ")", ",", "\"Not a tensor: {}\"", ".", "format", "(", "type", "(", "tensors", ")", ")", "shape_str", "=", "str", "(", "tensors", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", ")", "return", "shape_str" ]
Internally used by layer registry, to print shapes of inputs/outputs of layers. Args: tensors (list or tf.Tensor): a tensor or a list of tensors Returns: str: a string to describe the shape
[ "Internally", "used", "by", "layer", "registry", "to", "print", "shapes", "of", "inputs", "/", "outputs", "of", "layers", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/tfutils/model_utils.py#L70-L87
train
tensorpack/tensorpack
examples/SimilarityLearning/mnist-embeddings.py
contrastive_loss
def contrastive_loss(left, right, y, margin, extra=False, scope="constrastive_loss"): r"""Loss for Siamese networks as described in the paper: `Learning a Similarity Metric Discriminatively, with Application to Face Verification <http://yann.lecun.com/exdb/publis/pdf/chopra-05.pdf>`_ by Chopra et al. .. math:: \frac{1}{2} [y \cdot d^2 + (1-y) \cdot \max(0, m - d)^2], d = \Vert l - r \Vert_2 Args: left (tf.Tensor): left feature vectors of shape [Batch, N]. right (tf.Tensor): right feature vectors of shape [Batch, N]. y (tf.Tensor): binary labels of shape [Batch]. 1: similar, 0: not similar. margin (float): horizon for negative examples (y==0). extra (bool): also return distances for pos and neg. Returns: tf.Tensor: constrastive_loss (averaged over the batch), (and optionally average_pos_dist, average_neg_dist) """ with tf.name_scope(scope): y = tf.cast(y, tf.float32) delta = tf.reduce_sum(tf.square(left - right), 1) delta_sqrt = tf.sqrt(delta + 1e-10) match_loss = delta missmatch_loss = tf.square(tf.nn.relu(margin - delta_sqrt)) loss = tf.reduce_mean(0.5 * (y * match_loss + (1 - y) * missmatch_loss)) if extra: num_pos = tf.count_nonzero(y) num_neg = tf.count_nonzero(1 - y) pos_dist = tf.where(tf.equal(num_pos, 0), 0., tf.reduce_sum(y * delta_sqrt) / tf.cast(num_pos, tf.float32), name="pos-dist") neg_dist = tf.where(tf.equal(num_neg, 0), 0., tf.reduce_sum((1 - y) * delta_sqrt) / tf.cast(num_neg, tf.float32), name="neg-dist") return loss, pos_dist, neg_dist else: return loss
python
def contrastive_loss(left, right, y, margin, extra=False, scope="constrastive_loss"): r"""Loss for Siamese networks as described in the paper: `Learning a Similarity Metric Discriminatively, with Application to Face Verification <http://yann.lecun.com/exdb/publis/pdf/chopra-05.pdf>`_ by Chopra et al. .. math:: \frac{1}{2} [y \cdot d^2 + (1-y) \cdot \max(0, m - d)^2], d = \Vert l - r \Vert_2 Args: left (tf.Tensor): left feature vectors of shape [Batch, N]. right (tf.Tensor): right feature vectors of shape [Batch, N]. y (tf.Tensor): binary labels of shape [Batch]. 1: similar, 0: not similar. margin (float): horizon for negative examples (y==0). extra (bool): also return distances for pos and neg. Returns: tf.Tensor: constrastive_loss (averaged over the batch), (and optionally average_pos_dist, average_neg_dist) """ with tf.name_scope(scope): y = tf.cast(y, tf.float32) delta = tf.reduce_sum(tf.square(left - right), 1) delta_sqrt = tf.sqrt(delta + 1e-10) match_loss = delta missmatch_loss = tf.square(tf.nn.relu(margin - delta_sqrt)) loss = tf.reduce_mean(0.5 * (y * match_loss + (1 - y) * missmatch_loss)) if extra: num_pos = tf.count_nonzero(y) num_neg = tf.count_nonzero(1 - y) pos_dist = tf.where(tf.equal(num_pos, 0), 0., tf.reduce_sum(y * delta_sqrt) / tf.cast(num_pos, tf.float32), name="pos-dist") neg_dist = tf.where(tf.equal(num_neg, 0), 0., tf.reduce_sum((1 - y) * delta_sqrt) / tf.cast(num_neg, tf.float32), name="neg-dist") return loss, pos_dist, neg_dist else: return loss
[ "def", "contrastive_loss", "(", "left", ",", "right", ",", "y", ",", "margin", ",", "extra", "=", "False", ",", "scope", "=", "\"constrastive_loss\"", ")", ":", "with", "tf", ".", "name_scope", "(", "scope", ")", ":", "y", "=", "tf", ".", "cast", "(", "y", ",", "tf", ".", "float32", ")", "delta", "=", "tf", ".", "reduce_sum", "(", "tf", ".", "square", "(", "left", "-", "right", ")", ",", "1", ")", "delta_sqrt", "=", "tf", ".", "sqrt", "(", "delta", "+", "1e-10", ")", "match_loss", "=", "delta", "missmatch_loss", "=", "tf", ".", "square", "(", "tf", ".", "nn", ".", "relu", "(", "margin", "-", "delta_sqrt", ")", ")", "loss", "=", "tf", ".", "reduce_mean", "(", "0.5", "*", "(", "y", "*", "match_loss", "+", "(", "1", "-", "y", ")", "*", "missmatch_loss", ")", ")", "if", "extra", ":", "num_pos", "=", "tf", ".", "count_nonzero", "(", "y", ")", "num_neg", "=", "tf", ".", "count_nonzero", "(", "1", "-", "y", ")", "pos_dist", "=", "tf", ".", "where", "(", "tf", ".", "equal", "(", "num_pos", ",", "0", ")", ",", "0.", ",", "tf", ".", "reduce_sum", "(", "y", "*", "delta_sqrt", ")", "/", "tf", ".", "cast", "(", "num_pos", ",", "tf", ".", "float32", ")", ",", "name", "=", "\"pos-dist\"", ")", "neg_dist", "=", "tf", ".", "where", "(", "tf", ".", "equal", "(", "num_neg", ",", "0", ")", ",", "0.", ",", "tf", ".", "reduce_sum", "(", "(", "1", "-", "y", ")", "*", "delta_sqrt", ")", "/", "tf", ".", "cast", "(", "num_neg", ",", "tf", ".", "float32", ")", ",", "name", "=", "\"neg-dist\"", ")", "return", "loss", ",", "pos_dist", ",", "neg_dist", "else", ":", "return", "loss" ]
r"""Loss for Siamese networks as described in the paper: `Learning a Similarity Metric Discriminatively, with Application to Face Verification <http://yann.lecun.com/exdb/publis/pdf/chopra-05.pdf>`_ by Chopra et al. .. math:: \frac{1}{2} [y \cdot d^2 + (1-y) \cdot \max(0, m - d)^2], d = \Vert l - r \Vert_2 Args: left (tf.Tensor): left feature vectors of shape [Batch, N]. right (tf.Tensor): right feature vectors of shape [Batch, N]. y (tf.Tensor): binary labels of shape [Batch]. 1: similar, 0: not similar. margin (float): horizon for negative examples (y==0). extra (bool): also return distances for pos and neg. Returns: tf.Tensor: constrastive_loss (averaged over the batch), (and optionally average_pos_dist, average_neg_dist)
[ "r", "Loss", "for", "Siamese", "networks", "as", "described", "in", "the", "paper", ":", "Learning", "a", "Similarity", "Metric", "Discriminatively", "with", "Application", "to", "Face", "Verification", "<http", ":", "//", "yann", ".", "lecun", ".", "com", "/", "exdb", "/", "publis", "/", "pdf", "/", "chopra", "-", "05", ".", "pdf", ">", "_", "by", "Chopra", "et", "al", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/SimilarityLearning/mnist-embeddings.py#L25-L65
train
tensorpack/tensorpack
examples/SimilarityLearning/mnist-embeddings.py
siamese_cosine_loss
def siamese_cosine_loss(left, right, y, scope="cosine_loss"): r"""Loss for Siamese networks (cosine version). Same as :func:`contrastive_loss` but with different similarity measurement. .. math:: [\frac{l \cdot r}{\lVert l\rVert \lVert r\rVert} - (2y-1)]^2 Args: left (tf.Tensor): left feature vectors of shape [Batch, N]. right (tf.Tensor): right feature vectors of shape [Batch, N]. y (tf.Tensor): binary labels of shape [Batch]. 1: similar, 0: not similar. Returns: tf.Tensor: cosine-loss as a scalar tensor. """ def l2_norm(t, eps=1e-12): """ Returns: tf.Tensor: norm of 2D input tensor on axis 1 """ with tf.name_scope("l2_norm"): return tf.sqrt(tf.reduce_sum(tf.square(t), 1) + eps) with tf.name_scope(scope): y = 2 * tf.cast(y, tf.float32) - 1 pred = tf.reduce_sum(left * right, 1) / (l2_norm(left) * l2_norm(right) + 1e-10) return tf.nn.l2_loss(y - pred) / tf.cast(tf.shape(left)[0], tf.float32)
python
def siamese_cosine_loss(left, right, y, scope="cosine_loss"): r"""Loss for Siamese networks (cosine version). Same as :func:`contrastive_loss` but with different similarity measurement. .. math:: [\frac{l \cdot r}{\lVert l\rVert \lVert r\rVert} - (2y-1)]^2 Args: left (tf.Tensor): left feature vectors of shape [Batch, N]. right (tf.Tensor): right feature vectors of shape [Batch, N]. y (tf.Tensor): binary labels of shape [Batch]. 1: similar, 0: not similar. Returns: tf.Tensor: cosine-loss as a scalar tensor. """ def l2_norm(t, eps=1e-12): """ Returns: tf.Tensor: norm of 2D input tensor on axis 1 """ with tf.name_scope("l2_norm"): return tf.sqrt(tf.reduce_sum(tf.square(t), 1) + eps) with tf.name_scope(scope): y = 2 * tf.cast(y, tf.float32) - 1 pred = tf.reduce_sum(left * right, 1) / (l2_norm(left) * l2_norm(right) + 1e-10) return tf.nn.l2_loss(y - pred) / tf.cast(tf.shape(left)[0], tf.float32)
[ "def", "siamese_cosine_loss", "(", "left", ",", "right", ",", "y", ",", "scope", "=", "\"cosine_loss\"", ")", ":", "def", "l2_norm", "(", "t", ",", "eps", "=", "1e-12", ")", ":", "\"\"\"\n Returns:\n tf.Tensor: norm of 2D input tensor on axis 1\n \"\"\"", "with", "tf", ".", "name_scope", "(", "\"l2_norm\"", ")", ":", "return", "tf", ".", "sqrt", "(", "tf", ".", "reduce_sum", "(", "tf", ".", "square", "(", "t", ")", ",", "1", ")", "+", "eps", ")", "with", "tf", ".", "name_scope", "(", "scope", ")", ":", "y", "=", "2", "*", "tf", ".", "cast", "(", "y", ",", "tf", ".", "float32", ")", "-", "1", "pred", "=", "tf", ".", "reduce_sum", "(", "left", "*", "right", ",", "1", ")", "/", "(", "l2_norm", "(", "left", ")", "*", "l2_norm", "(", "right", ")", "+", "1e-10", ")", "return", "tf", ".", "nn", ".", "l2_loss", "(", "y", "-", "pred", ")", "/", "tf", ".", "cast", "(", "tf", ".", "shape", "(", "left", ")", "[", "0", "]", ",", "tf", ".", "float32", ")" ]
r"""Loss for Siamese networks (cosine version). Same as :func:`contrastive_loss` but with different similarity measurement. .. math:: [\frac{l \cdot r}{\lVert l\rVert \lVert r\rVert} - (2y-1)]^2 Args: left (tf.Tensor): left feature vectors of shape [Batch, N]. right (tf.Tensor): right feature vectors of shape [Batch, N]. y (tf.Tensor): binary labels of shape [Batch]. 1: similar, 0: not similar. Returns: tf.Tensor: cosine-loss as a scalar tensor.
[ "r", "Loss", "for", "Siamese", "networks", "(", "cosine", "version", ")", ".", "Same", "as", ":", "func", ":", "contrastive_loss", "but", "with", "different", "similarity", "measurement", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/SimilarityLearning/mnist-embeddings.py#L68-L96
train
tensorpack/tensorpack
examples/SimilarityLearning/mnist-embeddings.py
triplet_loss
def triplet_loss(anchor, positive, negative, margin, extra=False, scope="triplet_loss"): r"""Loss for Triplet networks as described in the paper: `FaceNet: A Unified Embedding for Face Recognition and Clustering <https://arxiv.org/abs/1503.03832>`_ by Schroff et al. Learn embeddings from an anchor point and a similar input (positive) as well as a not-similar input (negative). Intuitively, a matching pair (anchor, positive) should have a smaller relative distance than a non-matching pair (anchor, negative). .. math:: \max(0, m + \Vert a-p\Vert^2 - \Vert a-n\Vert^2) Args: anchor (tf.Tensor): anchor feature vectors of shape [Batch, N]. positive (tf.Tensor): features of positive match of the same shape. negative (tf.Tensor): features of negative match of the same shape. margin (float): horizon for negative examples extra (bool): also return distances for pos and neg. Returns: tf.Tensor: triplet-loss as scalar (and optionally average_pos_dist, average_neg_dist) """ with tf.name_scope(scope): d_pos = tf.reduce_sum(tf.square(anchor - positive), 1) d_neg = tf.reduce_sum(tf.square(anchor - negative), 1) loss = tf.reduce_mean(tf.maximum(0., margin + d_pos - d_neg)) if extra: pos_dist = tf.reduce_mean(tf.sqrt(d_pos + 1e-10), name='pos-dist') neg_dist = tf.reduce_mean(tf.sqrt(d_neg + 1e-10), name='neg-dist') return loss, pos_dist, neg_dist else: return loss
python
def triplet_loss(anchor, positive, negative, margin, extra=False, scope="triplet_loss"): r"""Loss for Triplet networks as described in the paper: `FaceNet: A Unified Embedding for Face Recognition and Clustering <https://arxiv.org/abs/1503.03832>`_ by Schroff et al. Learn embeddings from an anchor point and a similar input (positive) as well as a not-similar input (negative). Intuitively, a matching pair (anchor, positive) should have a smaller relative distance than a non-matching pair (anchor, negative). .. math:: \max(0, m + \Vert a-p\Vert^2 - \Vert a-n\Vert^2) Args: anchor (tf.Tensor): anchor feature vectors of shape [Batch, N]. positive (tf.Tensor): features of positive match of the same shape. negative (tf.Tensor): features of negative match of the same shape. margin (float): horizon for negative examples extra (bool): also return distances for pos and neg. Returns: tf.Tensor: triplet-loss as scalar (and optionally average_pos_dist, average_neg_dist) """ with tf.name_scope(scope): d_pos = tf.reduce_sum(tf.square(anchor - positive), 1) d_neg = tf.reduce_sum(tf.square(anchor - negative), 1) loss = tf.reduce_mean(tf.maximum(0., margin + d_pos - d_neg)) if extra: pos_dist = tf.reduce_mean(tf.sqrt(d_pos + 1e-10), name='pos-dist') neg_dist = tf.reduce_mean(tf.sqrt(d_neg + 1e-10), name='neg-dist') return loss, pos_dist, neg_dist else: return loss
[ "def", "triplet_loss", "(", "anchor", ",", "positive", ",", "negative", ",", "margin", ",", "extra", "=", "False", ",", "scope", "=", "\"triplet_loss\"", ")", ":", "with", "tf", ".", "name_scope", "(", "scope", ")", ":", "d_pos", "=", "tf", ".", "reduce_sum", "(", "tf", ".", "square", "(", "anchor", "-", "positive", ")", ",", "1", ")", "d_neg", "=", "tf", ".", "reduce_sum", "(", "tf", ".", "square", "(", "anchor", "-", "negative", ")", ",", "1", ")", "loss", "=", "tf", ".", "reduce_mean", "(", "tf", ".", "maximum", "(", "0.", ",", "margin", "+", "d_pos", "-", "d_neg", ")", ")", "if", "extra", ":", "pos_dist", "=", "tf", ".", "reduce_mean", "(", "tf", ".", "sqrt", "(", "d_pos", "+", "1e-10", ")", ",", "name", "=", "'pos-dist'", ")", "neg_dist", "=", "tf", ".", "reduce_mean", "(", "tf", ".", "sqrt", "(", "d_neg", "+", "1e-10", ")", ",", "name", "=", "'neg-dist'", ")", "return", "loss", ",", "pos_dist", ",", "neg_dist", "else", ":", "return", "loss" ]
r"""Loss for Triplet networks as described in the paper: `FaceNet: A Unified Embedding for Face Recognition and Clustering <https://arxiv.org/abs/1503.03832>`_ by Schroff et al. Learn embeddings from an anchor point and a similar input (positive) as well as a not-similar input (negative). Intuitively, a matching pair (anchor, positive) should have a smaller relative distance than a non-matching pair (anchor, negative). .. math:: \max(0, m + \Vert a-p\Vert^2 - \Vert a-n\Vert^2) Args: anchor (tf.Tensor): anchor feature vectors of shape [Batch, N]. positive (tf.Tensor): features of positive match of the same shape. negative (tf.Tensor): features of negative match of the same shape. margin (float): horizon for negative examples extra (bool): also return distances for pos and neg. Returns: tf.Tensor: triplet-loss as scalar (and optionally average_pos_dist, average_neg_dist)
[ "r", "Loss", "for", "Triplet", "networks", "as", "described", "in", "the", "paper", ":", "FaceNet", ":", "A", "Unified", "Embedding", "for", "Face", "Recognition", "and", "Clustering", "<https", ":", "//", "arxiv", ".", "org", "/", "abs", "/", "1503", ".", "03832", ">", "_", "by", "Schroff", "et", "al", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/SimilarityLearning/mnist-embeddings.py#L99-L135
train
tensorpack/tensorpack
examples/SimilarityLearning/mnist-embeddings.py
soft_triplet_loss
def soft_triplet_loss(anchor, positive, negative, extra=True, scope="soft_triplet_loss"): r"""Loss for triplet networks as described in the paper: `Deep Metric Learning using Triplet Network <https://arxiv.org/abs/1412.6622>`_ by Hoffer et al. It is a softmax loss using :math:`(anchor-positive)^2` and :math:`(anchor-negative)^2` as logits. Args: anchor (tf.Tensor): anchor feature vectors of shape [Batch, N]. positive (tf.Tensor): features of positive match of the same shape. negative (tf.Tensor): features of negative match of the same shape. extra (bool): also return distances for pos and neg. Returns: tf.Tensor: triplet-loss as scalar (and optionally average_pos_dist, average_neg_dist) """ eps = 1e-10 with tf.name_scope(scope): d_pos = tf.sqrt(tf.reduce_sum(tf.square(anchor - positive), 1) + eps) d_neg = tf.sqrt(tf.reduce_sum(tf.square(anchor - negative), 1) + eps) logits = tf.stack([d_pos, d_neg], axis=1) ones = tf.ones_like(tf.squeeze(d_pos), dtype="int32") loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=ones)) if extra: pos_dist = tf.reduce_mean(d_pos, name='pos-dist') neg_dist = tf.reduce_mean(d_neg, name='neg-dist') return loss, pos_dist, neg_dist else: return loss
python
def soft_triplet_loss(anchor, positive, negative, extra=True, scope="soft_triplet_loss"): r"""Loss for triplet networks as described in the paper: `Deep Metric Learning using Triplet Network <https://arxiv.org/abs/1412.6622>`_ by Hoffer et al. It is a softmax loss using :math:`(anchor-positive)^2` and :math:`(anchor-negative)^2` as logits. Args: anchor (tf.Tensor): anchor feature vectors of shape [Batch, N]. positive (tf.Tensor): features of positive match of the same shape. negative (tf.Tensor): features of negative match of the same shape. extra (bool): also return distances for pos and neg. Returns: tf.Tensor: triplet-loss as scalar (and optionally average_pos_dist, average_neg_dist) """ eps = 1e-10 with tf.name_scope(scope): d_pos = tf.sqrt(tf.reduce_sum(tf.square(anchor - positive), 1) + eps) d_neg = tf.sqrt(tf.reduce_sum(tf.square(anchor - negative), 1) + eps) logits = tf.stack([d_pos, d_neg], axis=1) ones = tf.ones_like(tf.squeeze(d_pos), dtype="int32") loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=ones)) if extra: pos_dist = tf.reduce_mean(d_pos, name='pos-dist') neg_dist = tf.reduce_mean(d_neg, name='neg-dist') return loss, pos_dist, neg_dist else: return loss
[ "def", "soft_triplet_loss", "(", "anchor", ",", "positive", ",", "negative", ",", "extra", "=", "True", ",", "scope", "=", "\"soft_triplet_loss\"", ")", ":", "eps", "=", "1e-10", "with", "tf", ".", "name_scope", "(", "scope", ")", ":", "d_pos", "=", "tf", ".", "sqrt", "(", "tf", ".", "reduce_sum", "(", "tf", ".", "square", "(", "anchor", "-", "positive", ")", ",", "1", ")", "+", "eps", ")", "d_neg", "=", "tf", ".", "sqrt", "(", "tf", ".", "reduce_sum", "(", "tf", ".", "square", "(", "anchor", "-", "negative", ")", ",", "1", ")", "+", "eps", ")", "logits", "=", "tf", ".", "stack", "(", "[", "d_pos", ",", "d_neg", "]", ",", "axis", "=", "1", ")", "ones", "=", "tf", ".", "ones_like", "(", "tf", ".", "squeeze", "(", "d_pos", ")", ",", "dtype", "=", "\"int32\"", ")", "loss", "=", "tf", ".", "reduce_mean", "(", "tf", ".", "nn", ".", "sparse_softmax_cross_entropy_with_logits", "(", "logits", "=", "logits", ",", "labels", "=", "ones", ")", ")", "if", "extra", ":", "pos_dist", "=", "tf", ".", "reduce_mean", "(", "d_pos", ",", "name", "=", "'pos-dist'", ")", "neg_dist", "=", "tf", ".", "reduce_mean", "(", "d_neg", ",", "name", "=", "'neg-dist'", ")", "return", "loss", ",", "pos_dist", ",", "neg_dist", "else", ":", "return", "loss" ]
r"""Loss for triplet networks as described in the paper: `Deep Metric Learning using Triplet Network <https://arxiv.org/abs/1412.6622>`_ by Hoffer et al. It is a softmax loss using :math:`(anchor-positive)^2` and :math:`(anchor-negative)^2` as logits. Args: anchor (tf.Tensor): anchor feature vectors of shape [Batch, N]. positive (tf.Tensor): features of positive match of the same shape. negative (tf.Tensor): features of negative match of the same shape. extra (bool): also return distances for pos and neg. Returns: tf.Tensor: triplet-loss as scalar (and optionally average_pos_dist, average_neg_dist)
[ "r", "Loss", "for", "triplet", "networks", "as", "described", "in", "the", "paper", ":", "Deep", "Metric", "Learning", "using", "Triplet", "Network", "<https", ":", "//", "arxiv", ".", "org", "/", "abs", "/", "1412", ".", "6622", ">", "_", "by", "Hoffer", "et", "al", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/SimilarityLearning/mnist-embeddings.py#L138-L171
train
tensorpack/tensorpack
examples/SimilarityLearning/mnist-embeddings.py
center_loss
def center_loss(embedding, label, num_classes, alpha=0.1, scope="center_loss"): r"""Center-Loss as described in the paper `A Discriminative Feature Learning Approach for Deep Face Recognition` <http://ydwen.github.io/papers/WenECCV16.pdf> by Wen et al. Args: embedding (tf.Tensor): features produced by the network label (tf.Tensor): ground-truth label for each feature num_classes (int): number of different classes alpha (float): learning rate for updating the centers Returns: tf.Tensor: center loss """ nrof_features = embedding.get_shape()[1] centers = tf.get_variable('centers', [num_classes, nrof_features], dtype=tf.float32, initializer=tf.constant_initializer(0), trainable=False) label = tf.reshape(label, [-1]) centers_batch = tf.gather(centers, label) diff = (1 - alpha) * (centers_batch - embedding) centers = tf.scatter_sub(centers, label, diff) loss = tf.reduce_mean(tf.square(embedding - centers_batch), name=scope) return loss
python
def center_loss(embedding, label, num_classes, alpha=0.1, scope="center_loss"): r"""Center-Loss as described in the paper `A Discriminative Feature Learning Approach for Deep Face Recognition` <http://ydwen.github.io/papers/WenECCV16.pdf> by Wen et al. Args: embedding (tf.Tensor): features produced by the network label (tf.Tensor): ground-truth label for each feature num_classes (int): number of different classes alpha (float): learning rate for updating the centers Returns: tf.Tensor: center loss """ nrof_features = embedding.get_shape()[1] centers = tf.get_variable('centers', [num_classes, nrof_features], dtype=tf.float32, initializer=tf.constant_initializer(0), trainable=False) label = tf.reshape(label, [-1]) centers_batch = tf.gather(centers, label) diff = (1 - alpha) * (centers_batch - embedding) centers = tf.scatter_sub(centers, label, diff) loss = tf.reduce_mean(tf.square(embedding - centers_batch), name=scope) return loss
[ "def", "center_loss", "(", "embedding", ",", "label", ",", "num_classes", ",", "alpha", "=", "0.1", ",", "scope", "=", "\"center_loss\"", ")", ":", "nrof_features", "=", "embedding", ".", "get_shape", "(", ")", "[", "1", "]", "centers", "=", "tf", ".", "get_variable", "(", "'centers'", ",", "[", "num_classes", ",", "nrof_features", "]", ",", "dtype", "=", "tf", ".", "float32", ",", "initializer", "=", "tf", ".", "constant_initializer", "(", "0", ")", ",", "trainable", "=", "False", ")", "label", "=", "tf", ".", "reshape", "(", "label", ",", "[", "-", "1", "]", ")", "centers_batch", "=", "tf", ".", "gather", "(", "centers", ",", "label", ")", "diff", "=", "(", "1", "-", "alpha", ")", "*", "(", "centers_batch", "-", "embedding", ")", "centers", "=", "tf", ".", "scatter_sub", "(", "centers", ",", "label", ",", "diff", ")", "loss", "=", "tf", ".", "reduce_mean", "(", "tf", ".", "square", "(", "embedding", "-", "centers_batch", ")", ",", "name", "=", "scope", ")", "return", "loss" ]
r"""Center-Loss as described in the paper `A Discriminative Feature Learning Approach for Deep Face Recognition` <http://ydwen.github.io/papers/WenECCV16.pdf> by Wen et al. Args: embedding (tf.Tensor): features produced by the network label (tf.Tensor): ground-truth label for each feature num_classes (int): number of different classes alpha (float): learning rate for updating the centers Returns: tf.Tensor: center loss
[ "r", "Center", "-", "Loss", "as", "described", "in", "the", "paper", "A", "Discriminative", "Feature", "Learning", "Approach", "for", "Deep", "Face", "Recognition", "<http", ":", "//", "ydwen", ".", "github", ".", "io", "/", "papers", "/", "WenECCV16", ".", "pdf", ">", "by", "Wen", "et", "al", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/SimilarityLearning/mnist-embeddings.py#L174-L196
train
tensorpack/tensorpack
examples/SimilarityLearning/mnist-embeddings.py
EmbeddingModel.embed
def embed(self, x, nfeatures=2): """Embed all given tensors into an nfeatures-dim space. """ list_split = 0 if isinstance(x, list): list_split = len(x) x = tf.concat(x, 0) # pre-process MNIST dataflow data x = tf.expand_dims(x, 3) x = x * 2 - 1 # the embedding network net = slim.layers.conv2d(x, 20, 5, scope='conv1') net = slim.layers.max_pool2d(net, 2, scope='pool1') net = slim.layers.conv2d(net, 50, 5, scope='conv2') net = slim.layers.max_pool2d(net, 2, scope='pool2') net = slim.layers.flatten(net, scope='flatten3') net = slim.layers.fully_connected(net, 500, scope='fully_connected4') embeddings = slim.layers.fully_connected(net, nfeatures, activation_fn=None, scope='fully_connected5') # if "x" was a list of tensors, then split the embeddings if list_split > 0: embeddings = tf.split(embeddings, list_split, 0) return embeddings
python
def embed(self, x, nfeatures=2): """Embed all given tensors into an nfeatures-dim space. """ list_split = 0 if isinstance(x, list): list_split = len(x) x = tf.concat(x, 0) # pre-process MNIST dataflow data x = tf.expand_dims(x, 3) x = x * 2 - 1 # the embedding network net = slim.layers.conv2d(x, 20, 5, scope='conv1') net = slim.layers.max_pool2d(net, 2, scope='pool1') net = slim.layers.conv2d(net, 50, 5, scope='conv2') net = slim.layers.max_pool2d(net, 2, scope='pool2') net = slim.layers.flatten(net, scope='flatten3') net = slim.layers.fully_connected(net, 500, scope='fully_connected4') embeddings = slim.layers.fully_connected(net, nfeatures, activation_fn=None, scope='fully_connected5') # if "x" was a list of tensors, then split the embeddings if list_split > 0: embeddings = tf.split(embeddings, list_split, 0) return embeddings
[ "def", "embed", "(", "self", ",", "x", ",", "nfeatures", "=", "2", ")", ":", "list_split", "=", "0", "if", "isinstance", "(", "x", ",", "list", ")", ":", "list_split", "=", "len", "(", "x", ")", "x", "=", "tf", ".", "concat", "(", "x", ",", "0", ")", "# pre-process MNIST dataflow data", "x", "=", "tf", ".", "expand_dims", "(", "x", ",", "3", ")", "x", "=", "x", "*", "2", "-", "1", "# the embedding network", "net", "=", "slim", ".", "layers", ".", "conv2d", "(", "x", ",", "20", ",", "5", ",", "scope", "=", "'conv1'", ")", "net", "=", "slim", ".", "layers", ".", "max_pool2d", "(", "net", ",", "2", ",", "scope", "=", "'pool1'", ")", "net", "=", "slim", ".", "layers", ".", "conv2d", "(", "net", ",", "50", ",", "5", ",", "scope", "=", "'conv2'", ")", "net", "=", "slim", ".", "layers", ".", "max_pool2d", "(", "net", ",", "2", ",", "scope", "=", "'pool2'", ")", "net", "=", "slim", ".", "layers", ".", "flatten", "(", "net", ",", "scope", "=", "'flatten3'", ")", "net", "=", "slim", ".", "layers", ".", "fully_connected", "(", "net", ",", "500", ",", "scope", "=", "'fully_connected4'", ")", "embeddings", "=", "slim", ".", "layers", ".", "fully_connected", "(", "net", ",", "nfeatures", ",", "activation_fn", "=", "None", ",", "scope", "=", "'fully_connected5'", ")", "# if \"x\" was a list of tensors, then split the embeddings", "if", "list_split", ">", "0", ":", "embeddings", "=", "tf", ".", "split", "(", "embeddings", ",", "list_split", ",", "0", ")", "return", "embeddings" ]
Embed all given tensors into an nfeatures-dim space.
[ "Embed", "all", "given", "tensors", "into", "an", "nfeatures", "-", "dim", "space", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/SimilarityLearning/mnist-embeddings.py#L200-L224
train
tensorpack/tensorpack
examples/FasterRCNN/utils/generate_anchors.py
generate_anchors
def generate_anchors(base_size=16, ratios=[0.5, 1, 2], scales=2**np.arange(3, 6)): """ Generate anchor (reference) windows by enumerating aspect ratios X scales wrt a reference (0, 0, 15, 15) window. """ base_anchor = np.array([1, 1, base_size, base_size], dtype='float32') - 1 ratio_anchors = _ratio_enum(base_anchor, ratios) anchors = np.vstack([_scale_enum(ratio_anchors[i, :], scales) for i in range(ratio_anchors.shape[0])]) return anchors
python
def generate_anchors(base_size=16, ratios=[0.5, 1, 2], scales=2**np.arange(3, 6)): """ Generate anchor (reference) windows by enumerating aspect ratios X scales wrt a reference (0, 0, 15, 15) window. """ base_anchor = np.array([1, 1, base_size, base_size], dtype='float32') - 1 ratio_anchors = _ratio_enum(base_anchor, ratios) anchors = np.vstack([_scale_enum(ratio_anchors[i, :], scales) for i in range(ratio_anchors.shape[0])]) return anchors
[ "def", "generate_anchors", "(", "base_size", "=", "16", ",", "ratios", "=", "[", "0.5", ",", "1", ",", "2", "]", ",", "scales", "=", "2", "**", "np", ".", "arange", "(", "3", ",", "6", ")", ")", ":", "base_anchor", "=", "np", ".", "array", "(", "[", "1", ",", "1", ",", "base_size", ",", "base_size", "]", ",", "dtype", "=", "'float32'", ")", "-", "1", "ratio_anchors", "=", "_ratio_enum", "(", "base_anchor", ",", "ratios", ")", "anchors", "=", "np", ".", "vstack", "(", "[", "_scale_enum", "(", "ratio_anchors", "[", "i", ",", ":", "]", ",", "scales", ")", "for", "i", "in", "range", "(", "ratio_anchors", ".", "shape", "[", "0", "]", ")", "]", ")", "return", "anchors" ]
Generate anchor (reference) windows by enumerating aspect ratios X scales wrt a reference (0, 0, 15, 15) window.
[ "Generate", "anchor", "(", "reference", ")", "windows", "by", "enumerating", "aspect", "ratios", "X", "scales", "wrt", "a", "reference", "(", "0", "0", "15", "15", ")", "window", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/utils/generate_anchors.py#L41-L52
train
tensorpack/tensorpack
examples/basics/mnist-tflayers.py
Model.build_graph
def build_graph(self, image, label): """This function should build the model which takes the input variables and return cost at the end""" # In tensorflow, inputs to convolution function are assumed to be # NHWC. Add a single channel here. image = tf.expand_dims(image, 3) image = image * 2 - 1 # center the pixels values at zero # The context manager `argscope` sets the default option for all the layers under # this context. Here we use 32 channel convolution with shape 3x3 with argscope([tf.layers.conv2d], padding='same', activation=tf.nn.relu): l = tf.layers.conv2d(image, 32, 3, name='conv0') l = tf.layers.max_pooling2d(l, 2, 2, padding='valid') l = tf.layers.conv2d(l, 32, 3, name='conv1') l = tf.layers.conv2d(l, 32, 3, name='conv2') l = tf.layers.max_pooling2d(l, 2, 2, padding='valid') l = tf.layers.conv2d(l, 32, 3, name='conv3') l = tf.layers.flatten(l) l = tf.layers.dense(l, 512, activation=tf.nn.relu, name='fc0') l = tf.layers.dropout(l, rate=0.5, training=get_current_tower_context().is_training) logits = tf.layers.dense(l, 10, activation=tf.identity, name='fc1') # a vector of length B with loss of each sample cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label) cost = tf.reduce_mean(cost, name='cross_entropy_loss') # the average cross-entropy loss correct = tf.cast(tf.nn.in_top_k(logits, label, 1), tf.float32, name='correct') accuracy = tf.reduce_mean(correct, name='accuracy') # This will monitor training error & accuracy (in a moving average fashion). The value will be automatically # 1. written to tensosrboard # 2. written to stat.json # 3. printed after each epoch train_error = tf.reduce_mean(1 - correct, name='train_error') summary.add_moving_summary(train_error, accuracy) # Use a regex to find parameters to apply weight decay. # Here we apply a weight decay on all W (weight matrix) of all fc layers # If you don't like regex, you can certainly define the cost in any other methods. wd_cost = tf.multiply(1e-5, regularize_cost('fc.*/kernel', tf.nn.l2_loss), name='regularize_loss') total_cost = tf.add_n([wd_cost, cost], name='total_cost') summary.add_moving_summary(cost, wd_cost, total_cost) # monitor histogram of all weight (of conv and fc layers) in tensorboard summary.add_param_summary(('.*/kernel', ['histogram', 'rms'])) # the function should return the total cost to be optimized return total_cost
python
def build_graph(self, image, label): """This function should build the model which takes the input variables and return cost at the end""" # In tensorflow, inputs to convolution function are assumed to be # NHWC. Add a single channel here. image = tf.expand_dims(image, 3) image = image * 2 - 1 # center the pixels values at zero # The context manager `argscope` sets the default option for all the layers under # this context. Here we use 32 channel convolution with shape 3x3 with argscope([tf.layers.conv2d], padding='same', activation=tf.nn.relu): l = tf.layers.conv2d(image, 32, 3, name='conv0') l = tf.layers.max_pooling2d(l, 2, 2, padding='valid') l = tf.layers.conv2d(l, 32, 3, name='conv1') l = tf.layers.conv2d(l, 32, 3, name='conv2') l = tf.layers.max_pooling2d(l, 2, 2, padding='valid') l = tf.layers.conv2d(l, 32, 3, name='conv3') l = tf.layers.flatten(l) l = tf.layers.dense(l, 512, activation=tf.nn.relu, name='fc0') l = tf.layers.dropout(l, rate=0.5, training=get_current_tower_context().is_training) logits = tf.layers.dense(l, 10, activation=tf.identity, name='fc1') # a vector of length B with loss of each sample cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label) cost = tf.reduce_mean(cost, name='cross_entropy_loss') # the average cross-entropy loss correct = tf.cast(tf.nn.in_top_k(logits, label, 1), tf.float32, name='correct') accuracy = tf.reduce_mean(correct, name='accuracy') # This will monitor training error & accuracy (in a moving average fashion). The value will be automatically # 1. written to tensosrboard # 2. written to stat.json # 3. printed after each epoch train_error = tf.reduce_mean(1 - correct, name='train_error') summary.add_moving_summary(train_error, accuracy) # Use a regex to find parameters to apply weight decay. # Here we apply a weight decay on all W (weight matrix) of all fc layers # If you don't like regex, you can certainly define the cost in any other methods. wd_cost = tf.multiply(1e-5, regularize_cost('fc.*/kernel', tf.nn.l2_loss), name='regularize_loss') total_cost = tf.add_n([wd_cost, cost], name='total_cost') summary.add_moving_summary(cost, wd_cost, total_cost) # monitor histogram of all weight (of conv and fc layers) in tensorboard summary.add_param_summary(('.*/kernel', ['histogram', 'rms'])) # the function should return the total cost to be optimized return total_cost
[ "def", "build_graph", "(", "self", ",", "image", ",", "label", ")", ":", "# In tensorflow, inputs to convolution function are assumed to be", "# NHWC. Add a single channel here.", "image", "=", "tf", ".", "expand_dims", "(", "image", ",", "3", ")", "image", "=", "image", "*", "2", "-", "1", "# center the pixels values at zero", "# The context manager `argscope` sets the default option for all the layers under", "# this context. Here we use 32 channel convolution with shape 3x3", "with", "argscope", "(", "[", "tf", ".", "layers", ".", "conv2d", "]", ",", "padding", "=", "'same'", ",", "activation", "=", "tf", ".", "nn", ".", "relu", ")", ":", "l", "=", "tf", ".", "layers", ".", "conv2d", "(", "image", ",", "32", ",", "3", ",", "name", "=", "'conv0'", ")", "l", "=", "tf", ".", "layers", ".", "max_pooling2d", "(", "l", ",", "2", ",", "2", ",", "padding", "=", "'valid'", ")", "l", "=", "tf", ".", "layers", ".", "conv2d", "(", "l", ",", "32", ",", "3", ",", "name", "=", "'conv1'", ")", "l", "=", "tf", ".", "layers", ".", "conv2d", "(", "l", ",", "32", ",", "3", ",", "name", "=", "'conv2'", ")", "l", "=", "tf", ".", "layers", ".", "max_pooling2d", "(", "l", ",", "2", ",", "2", ",", "padding", "=", "'valid'", ")", "l", "=", "tf", ".", "layers", ".", "conv2d", "(", "l", ",", "32", ",", "3", ",", "name", "=", "'conv3'", ")", "l", "=", "tf", ".", "layers", ".", "flatten", "(", "l", ")", "l", "=", "tf", ".", "layers", ".", "dense", "(", "l", ",", "512", ",", "activation", "=", "tf", ".", "nn", ".", "relu", ",", "name", "=", "'fc0'", ")", "l", "=", "tf", ".", "layers", ".", "dropout", "(", "l", ",", "rate", "=", "0.5", ",", "training", "=", "get_current_tower_context", "(", ")", ".", "is_training", ")", "logits", "=", "tf", ".", "layers", ".", "dense", "(", "l", ",", "10", ",", "activation", "=", "tf", ".", "identity", ",", "name", "=", "'fc1'", ")", "# a vector of length B with loss of each sample", "cost", "=", "tf", ".", "nn", ".", "sparse_softmax_cross_entropy_with_logits", "(", "logits", "=", "logits", ",", "labels", "=", "label", ")", "cost", "=", "tf", ".", "reduce_mean", "(", "cost", ",", "name", "=", "'cross_entropy_loss'", ")", "# the average cross-entropy loss", "correct", "=", "tf", ".", "cast", "(", "tf", ".", "nn", ".", "in_top_k", "(", "logits", ",", "label", ",", "1", ")", ",", "tf", ".", "float32", ",", "name", "=", "'correct'", ")", "accuracy", "=", "tf", ".", "reduce_mean", "(", "correct", ",", "name", "=", "'accuracy'", ")", "# This will monitor training error & accuracy (in a moving average fashion). The value will be automatically", "# 1. written to tensosrboard", "# 2. written to stat.json", "# 3. printed after each epoch", "train_error", "=", "tf", ".", "reduce_mean", "(", "1", "-", "correct", ",", "name", "=", "'train_error'", ")", "summary", ".", "add_moving_summary", "(", "train_error", ",", "accuracy", ")", "# Use a regex to find parameters to apply weight decay.", "# Here we apply a weight decay on all W (weight matrix) of all fc layers", "# If you don't like regex, you can certainly define the cost in any other methods.", "wd_cost", "=", "tf", ".", "multiply", "(", "1e-5", ",", "regularize_cost", "(", "'fc.*/kernel'", ",", "tf", ".", "nn", ".", "l2_loss", ")", ",", "name", "=", "'regularize_loss'", ")", "total_cost", "=", "tf", ".", "add_n", "(", "[", "wd_cost", ",", "cost", "]", ",", "name", "=", "'total_cost'", ")", "summary", ".", "add_moving_summary", "(", "cost", ",", "wd_cost", ",", "total_cost", ")", "# monitor histogram of all weight (of conv and fc layers) in tensorboard", "summary", ".", "add_param_summary", "(", "(", "'.*/kernel'", ",", "[", "'histogram'", ",", "'rms'", "]", ")", ")", "# the function should return the total cost to be optimized", "return", "total_cost" ]
This function should build the model which takes the input variables and return cost at the end
[ "This", "function", "should", "build", "the", "model", "which", "takes", "the", "input", "variables", "and", "return", "cost", "at", "the", "end" ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/basics/mnist-tflayers.py#L32-L83
train
tensorpack/tensorpack
examples/FasterRCNN/data.py
print_class_histogram
def print_class_histogram(roidbs): """ Args: roidbs (list[dict]): the same format as the output of `load_training_roidbs`. """ dataset = DetectionDataset() hist_bins = np.arange(dataset.num_classes + 1) # Histogram of ground-truth objects gt_hist = np.zeros((dataset.num_classes,), dtype=np.int) for entry in roidbs: # filter crowd? gt_inds = np.where( (entry['class'] > 0) & (entry['is_crowd'] == 0))[0] gt_classes = entry['class'][gt_inds] gt_hist += np.histogram(gt_classes, bins=hist_bins)[0] data = [[dataset.class_names[i], v] for i, v in enumerate(gt_hist)] data.append(['total', sum(x[1] for x in data)]) # the first line is BG table = tabulate(data[1:], headers=['class', '#box'], tablefmt='pipe') logger.info("Ground-Truth Boxes:\n" + colored(table, 'cyan'))
python
def print_class_histogram(roidbs): """ Args: roidbs (list[dict]): the same format as the output of `load_training_roidbs`. """ dataset = DetectionDataset() hist_bins = np.arange(dataset.num_classes + 1) # Histogram of ground-truth objects gt_hist = np.zeros((dataset.num_classes,), dtype=np.int) for entry in roidbs: # filter crowd? gt_inds = np.where( (entry['class'] > 0) & (entry['is_crowd'] == 0))[0] gt_classes = entry['class'][gt_inds] gt_hist += np.histogram(gt_classes, bins=hist_bins)[0] data = [[dataset.class_names[i], v] for i, v in enumerate(gt_hist)] data.append(['total', sum(x[1] for x in data)]) # the first line is BG table = tabulate(data[1:], headers=['class', '#box'], tablefmt='pipe') logger.info("Ground-Truth Boxes:\n" + colored(table, 'cyan'))
[ "def", "print_class_histogram", "(", "roidbs", ")", ":", "dataset", "=", "DetectionDataset", "(", ")", "hist_bins", "=", "np", ".", "arange", "(", "dataset", ".", "num_classes", "+", "1", ")", "# Histogram of ground-truth objects", "gt_hist", "=", "np", ".", "zeros", "(", "(", "dataset", ".", "num_classes", ",", ")", ",", "dtype", "=", "np", ".", "int", ")", "for", "entry", "in", "roidbs", ":", "# filter crowd?", "gt_inds", "=", "np", ".", "where", "(", "(", "entry", "[", "'class'", "]", ">", "0", ")", "&", "(", "entry", "[", "'is_crowd'", "]", "==", "0", ")", ")", "[", "0", "]", "gt_classes", "=", "entry", "[", "'class'", "]", "[", "gt_inds", "]", "gt_hist", "+=", "np", ".", "histogram", "(", "gt_classes", ",", "bins", "=", "hist_bins", ")", "[", "0", "]", "data", "=", "[", "[", "dataset", ".", "class_names", "[", "i", "]", ",", "v", "]", "for", "i", ",", "v", "in", "enumerate", "(", "gt_hist", ")", "]", "data", ".", "append", "(", "[", "'total'", ",", "sum", "(", "x", "[", "1", "]", "for", "x", "in", "data", ")", "]", ")", "# the first line is BG", "table", "=", "tabulate", "(", "data", "[", "1", ":", "]", ",", "headers", "=", "[", "'class'", ",", "'#box'", "]", ",", "tablefmt", "=", "'pipe'", ")", "logger", ".", "info", "(", "\"Ground-Truth Boxes:\\n\"", "+", "colored", "(", "table", ",", "'cyan'", ")", ")" ]
Args: roidbs (list[dict]): the same format as the output of `load_training_roidbs`.
[ "Args", ":", "roidbs", "(", "list", "[", "dict", "]", ")", ":", "the", "same", "format", "as", "the", "output", "of", "load_training_roidbs", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/data.py#L30-L50
train
tensorpack/tensorpack
examples/FasterRCNN/data.py
get_all_anchors
def get_all_anchors(stride=None, sizes=None): """ Get all anchors in the largest possible image, shifted, floatbox Args: stride (int): the stride of anchors. sizes (tuple[int]): the sizes (sqrt area) of anchors Returns: anchors: SxSxNUM_ANCHORx4, where S == ceil(MAX_SIZE/STRIDE), floatbox The layout in the NUM_ANCHOR dim is NUM_RATIO x NUM_SIZE. """ if stride is None: stride = cfg.RPN.ANCHOR_STRIDE if sizes is None: sizes = cfg.RPN.ANCHOR_SIZES # Generates a NAx4 matrix of anchor boxes in (x1, y1, x2, y2) format. Anchors # are centered on stride / 2, have (approximate) sqrt areas of the specified # sizes, and aspect ratios as given. cell_anchors = generate_anchors( stride, scales=np.array(sizes, dtype=np.float) / stride, ratios=np.array(cfg.RPN.ANCHOR_RATIOS, dtype=np.float)) # anchors are intbox here. # anchors at featuremap [0,0] are centered at fpcoor (8,8) (half of stride) max_size = cfg.PREPROC.MAX_SIZE field_size = int(np.ceil(max_size / stride)) shifts = np.arange(0, field_size) * stride shift_x, shift_y = np.meshgrid(shifts, shifts) shift_x = shift_x.flatten() shift_y = shift_y.flatten() shifts = np.vstack((shift_x, shift_y, shift_x, shift_y)).transpose() # Kx4, K = field_size * field_size K = shifts.shape[0] A = cell_anchors.shape[0] field_of_anchors = ( cell_anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2))) field_of_anchors = field_of_anchors.reshape((field_size, field_size, A, 4)) # FSxFSxAx4 # Many rounding happens inside the anchor code anyway # assert np.all(field_of_anchors == field_of_anchors.astype('int32')) field_of_anchors = field_of_anchors.astype('float32') field_of_anchors[:, :, :, [2, 3]] += 1 return field_of_anchors
python
def get_all_anchors(stride=None, sizes=None): """ Get all anchors in the largest possible image, shifted, floatbox Args: stride (int): the stride of anchors. sizes (tuple[int]): the sizes (sqrt area) of anchors Returns: anchors: SxSxNUM_ANCHORx4, where S == ceil(MAX_SIZE/STRIDE), floatbox The layout in the NUM_ANCHOR dim is NUM_RATIO x NUM_SIZE. """ if stride is None: stride = cfg.RPN.ANCHOR_STRIDE if sizes is None: sizes = cfg.RPN.ANCHOR_SIZES # Generates a NAx4 matrix of anchor boxes in (x1, y1, x2, y2) format. Anchors # are centered on stride / 2, have (approximate) sqrt areas of the specified # sizes, and aspect ratios as given. cell_anchors = generate_anchors( stride, scales=np.array(sizes, dtype=np.float) / stride, ratios=np.array(cfg.RPN.ANCHOR_RATIOS, dtype=np.float)) # anchors are intbox here. # anchors at featuremap [0,0] are centered at fpcoor (8,8) (half of stride) max_size = cfg.PREPROC.MAX_SIZE field_size = int(np.ceil(max_size / stride)) shifts = np.arange(0, field_size) * stride shift_x, shift_y = np.meshgrid(shifts, shifts) shift_x = shift_x.flatten() shift_y = shift_y.flatten() shifts = np.vstack((shift_x, shift_y, shift_x, shift_y)).transpose() # Kx4, K = field_size * field_size K = shifts.shape[0] A = cell_anchors.shape[0] field_of_anchors = ( cell_anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2))) field_of_anchors = field_of_anchors.reshape((field_size, field_size, A, 4)) # FSxFSxAx4 # Many rounding happens inside the anchor code anyway # assert np.all(field_of_anchors == field_of_anchors.astype('int32')) field_of_anchors = field_of_anchors.astype('float32') field_of_anchors[:, :, :, [2, 3]] += 1 return field_of_anchors
[ "def", "get_all_anchors", "(", "stride", "=", "None", ",", "sizes", "=", "None", ")", ":", "if", "stride", "is", "None", ":", "stride", "=", "cfg", ".", "RPN", ".", "ANCHOR_STRIDE", "if", "sizes", "is", "None", ":", "sizes", "=", "cfg", ".", "RPN", ".", "ANCHOR_SIZES", "# Generates a NAx4 matrix of anchor boxes in (x1, y1, x2, y2) format. Anchors", "# are centered on stride / 2, have (approximate) sqrt areas of the specified", "# sizes, and aspect ratios as given.", "cell_anchors", "=", "generate_anchors", "(", "stride", ",", "scales", "=", "np", ".", "array", "(", "sizes", ",", "dtype", "=", "np", ".", "float", ")", "/", "stride", ",", "ratios", "=", "np", ".", "array", "(", "cfg", ".", "RPN", ".", "ANCHOR_RATIOS", ",", "dtype", "=", "np", ".", "float", ")", ")", "# anchors are intbox here.", "# anchors at featuremap [0,0] are centered at fpcoor (8,8) (half of stride)", "max_size", "=", "cfg", ".", "PREPROC", ".", "MAX_SIZE", "field_size", "=", "int", "(", "np", ".", "ceil", "(", "max_size", "/", "stride", ")", ")", "shifts", "=", "np", ".", "arange", "(", "0", ",", "field_size", ")", "*", "stride", "shift_x", ",", "shift_y", "=", "np", ".", "meshgrid", "(", "shifts", ",", "shifts", ")", "shift_x", "=", "shift_x", ".", "flatten", "(", ")", "shift_y", "=", "shift_y", ".", "flatten", "(", ")", "shifts", "=", "np", ".", "vstack", "(", "(", "shift_x", ",", "shift_y", ",", "shift_x", ",", "shift_y", ")", ")", ".", "transpose", "(", ")", "# Kx4, K = field_size * field_size", "K", "=", "shifts", ".", "shape", "[", "0", "]", "A", "=", "cell_anchors", ".", "shape", "[", "0", "]", "field_of_anchors", "=", "(", "cell_anchors", ".", "reshape", "(", "(", "1", ",", "A", ",", "4", ")", ")", "+", "shifts", ".", "reshape", "(", "(", "1", ",", "K", ",", "4", ")", ")", ".", "transpose", "(", "(", "1", ",", "0", ",", "2", ")", ")", ")", "field_of_anchors", "=", "field_of_anchors", ".", "reshape", "(", "(", "field_size", ",", "field_size", ",", "A", ",", "4", ")", ")", "# FSxFSxAx4", "# Many rounding happens inside the anchor code anyway", "# assert np.all(field_of_anchors == field_of_anchors.astype('int32'))", "field_of_anchors", "=", "field_of_anchors", ".", "astype", "(", "'float32'", ")", "field_of_anchors", "[", ":", ",", ":", ",", ":", ",", "[", "2", ",", "3", "]", "]", "+=", "1", "return", "field_of_anchors" ]
Get all anchors in the largest possible image, shifted, floatbox Args: stride (int): the stride of anchors. sizes (tuple[int]): the sizes (sqrt area) of anchors Returns: anchors: SxSxNUM_ANCHORx4, where S == ceil(MAX_SIZE/STRIDE), floatbox The layout in the NUM_ANCHOR dim is NUM_RATIO x NUM_SIZE.
[ "Get", "all", "anchors", "in", "the", "largest", "possible", "image", "shifted", "floatbox", "Args", ":", "stride", "(", "int", ")", ":", "the", "stride", "of", "anchors", ".", "sizes", "(", "tuple", "[", "int", "]", ")", ":", "the", "sizes", "(", "sqrt", "area", ")", "of", "anchors" ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/data.py#L54-L100
train
tensorpack/tensorpack
examples/FasterRCNN/data.py
get_all_anchors_fpn
def get_all_anchors_fpn(strides=None, sizes=None): """ Returns: [anchors]: each anchors is a SxSx NUM_ANCHOR_RATIOS x4 array. """ if strides is None: strides = cfg.FPN.ANCHOR_STRIDES if sizes is None: sizes = cfg.RPN.ANCHOR_SIZES assert len(strides) == len(sizes) foas = [] for stride, size in zip(strides, sizes): foa = get_all_anchors(stride=stride, sizes=(size,)) foas.append(foa) return foas
python
def get_all_anchors_fpn(strides=None, sizes=None): """ Returns: [anchors]: each anchors is a SxSx NUM_ANCHOR_RATIOS x4 array. """ if strides is None: strides = cfg.FPN.ANCHOR_STRIDES if sizes is None: sizes = cfg.RPN.ANCHOR_SIZES assert len(strides) == len(sizes) foas = [] for stride, size in zip(strides, sizes): foa = get_all_anchors(stride=stride, sizes=(size,)) foas.append(foa) return foas
[ "def", "get_all_anchors_fpn", "(", "strides", "=", "None", ",", "sizes", "=", "None", ")", ":", "if", "strides", "is", "None", ":", "strides", "=", "cfg", ".", "FPN", ".", "ANCHOR_STRIDES", "if", "sizes", "is", "None", ":", "sizes", "=", "cfg", ".", "RPN", ".", "ANCHOR_SIZES", "assert", "len", "(", "strides", ")", "==", "len", "(", "sizes", ")", "foas", "=", "[", "]", "for", "stride", ",", "size", "in", "zip", "(", "strides", ",", "sizes", ")", ":", "foa", "=", "get_all_anchors", "(", "stride", "=", "stride", ",", "sizes", "=", "(", "size", ",", ")", ")", "foas", ".", "append", "(", "foa", ")", "return", "foas" ]
Returns: [anchors]: each anchors is a SxSx NUM_ANCHOR_RATIOS x4 array.
[ "Returns", ":", "[", "anchors", "]", ":", "each", "anchors", "is", "a", "SxSx", "NUM_ANCHOR_RATIOS", "x4", "array", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/data.py#L104-L118
train
tensorpack/tensorpack
examples/FasterRCNN/data.py
get_anchor_labels
def get_anchor_labels(anchors, gt_boxes, crowd_boxes): """ Label each anchor as fg/bg/ignore. Args: anchors: Ax4 float gt_boxes: Bx4 float, non-crowd crowd_boxes: Cx4 float Returns: anchor_labels: (A,) int. Each element is {-1, 0, 1} anchor_boxes: Ax4. Contains the target gt_box for each anchor when the anchor is fg. """ # This function will modify labels and return the filtered inds def filter_box_label(labels, value, max_num): curr_inds = np.where(labels == value)[0] if len(curr_inds) > max_num: disable_inds = np.random.choice( curr_inds, size=(len(curr_inds) - max_num), replace=False) labels[disable_inds] = -1 # ignore them curr_inds = np.where(labels == value)[0] return curr_inds NA, NB = len(anchors), len(gt_boxes) assert NB > 0 # empty images should have been filtered already box_ious = np_iou(anchors, gt_boxes) # NA x NB ious_argmax_per_anchor = box_ious.argmax(axis=1) # NA, ious_max_per_anchor = box_ious.max(axis=1) ious_max_per_gt = np.amax(box_ious, axis=0, keepdims=True) # 1xNB # for each gt, find all those anchors (including ties) that has the max ious with it anchors_with_max_iou_per_gt = np.where(box_ious == ious_max_per_gt)[0] # Setting NA labels: 1--fg 0--bg -1--ignore anchor_labels = -np.ones((NA,), dtype='int32') # NA, # the order of setting neg/pos labels matter anchor_labels[anchors_with_max_iou_per_gt] = 1 anchor_labels[ious_max_per_anchor >= cfg.RPN.POSITIVE_ANCHOR_THRESH] = 1 anchor_labels[ious_max_per_anchor < cfg.RPN.NEGATIVE_ANCHOR_THRESH] = 0 # label all non-ignore candidate boxes which overlap crowd as ignore if crowd_boxes.size > 0: cand_inds = np.where(anchor_labels >= 0)[0] cand_anchors = anchors[cand_inds] ioas = np_ioa(crowd_boxes, cand_anchors) overlap_with_crowd = cand_inds[ioas.max(axis=0) > cfg.RPN.CROWD_OVERLAP_THRESH] anchor_labels[overlap_with_crowd] = -1 # Subsample fg labels: ignore some fg if fg is too many target_num_fg = int(cfg.RPN.BATCH_PER_IM * cfg.RPN.FG_RATIO) fg_inds = filter_box_label(anchor_labels, 1, target_num_fg) # Keep an image even if there is no foreground anchors # if len(fg_inds) == 0: # raise MalformedData("No valid foreground for RPN!") # Subsample bg labels. num_bg is not allowed to be too many old_num_bg = np.sum(anchor_labels == 0) if old_num_bg == 0: # No valid bg in this image, skip. raise MalformedData("No valid background for RPN!") target_num_bg = cfg.RPN.BATCH_PER_IM - len(fg_inds) filter_box_label(anchor_labels, 0, target_num_bg) # ignore return values # Set anchor boxes: the best gt_box for each fg anchor anchor_boxes = np.zeros((NA, 4), dtype='float32') fg_boxes = gt_boxes[ious_argmax_per_anchor[fg_inds], :] anchor_boxes[fg_inds, :] = fg_boxes # assert len(fg_inds) + np.sum(anchor_labels == 0) == cfg.RPN.BATCH_PER_IM return anchor_labels, anchor_boxes
python
def get_anchor_labels(anchors, gt_boxes, crowd_boxes): """ Label each anchor as fg/bg/ignore. Args: anchors: Ax4 float gt_boxes: Bx4 float, non-crowd crowd_boxes: Cx4 float Returns: anchor_labels: (A,) int. Each element is {-1, 0, 1} anchor_boxes: Ax4. Contains the target gt_box for each anchor when the anchor is fg. """ # This function will modify labels and return the filtered inds def filter_box_label(labels, value, max_num): curr_inds = np.where(labels == value)[0] if len(curr_inds) > max_num: disable_inds = np.random.choice( curr_inds, size=(len(curr_inds) - max_num), replace=False) labels[disable_inds] = -1 # ignore them curr_inds = np.where(labels == value)[0] return curr_inds NA, NB = len(anchors), len(gt_boxes) assert NB > 0 # empty images should have been filtered already box_ious = np_iou(anchors, gt_boxes) # NA x NB ious_argmax_per_anchor = box_ious.argmax(axis=1) # NA, ious_max_per_anchor = box_ious.max(axis=1) ious_max_per_gt = np.amax(box_ious, axis=0, keepdims=True) # 1xNB # for each gt, find all those anchors (including ties) that has the max ious with it anchors_with_max_iou_per_gt = np.where(box_ious == ious_max_per_gt)[0] # Setting NA labels: 1--fg 0--bg -1--ignore anchor_labels = -np.ones((NA,), dtype='int32') # NA, # the order of setting neg/pos labels matter anchor_labels[anchors_with_max_iou_per_gt] = 1 anchor_labels[ious_max_per_anchor >= cfg.RPN.POSITIVE_ANCHOR_THRESH] = 1 anchor_labels[ious_max_per_anchor < cfg.RPN.NEGATIVE_ANCHOR_THRESH] = 0 # label all non-ignore candidate boxes which overlap crowd as ignore if crowd_boxes.size > 0: cand_inds = np.where(anchor_labels >= 0)[0] cand_anchors = anchors[cand_inds] ioas = np_ioa(crowd_boxes, cand_anchors) overlap_with_crowd = cand_inds[ioas.max(axis=0) > cfg.RPN.CROWD_OVERLAP_THRESH] anchor_labels[overlap_with_crowd] = -1 # Subsample fg labels: ignore some fg if fg is too many target_num_fg = int(cfg.RPN.BATCH_PER_IM * cfg.RPN.FG_RATIO) fg_inds = filter_box_label(anchor_labels, 1, target_num_fg) # Keep an image even if there is no foreground anchors # if len(fg_inds) == 0: # raise MalformedData("No valid foreground for RPN!") # Subsample bg labels. num_bg is not allowed to be too many old_num_bg = np.sum(anchor_labels == 0) if old_num_bg == 0: # No valid bg in this image, skip. raise MalformedData("No valid background for RPN!") target_num_bg = cfg.RPN.BATCH_PER_IM - len(fg_inds) filter_box_label(anchor_labels, 0, target_num_bg) # ignore return values # Set anchor boxes: the best gt_box for each fg anchor anchor_boxes = np.zeros((NA, 4), dtype='float32') fg_boxes = gt_boxes[ious_argmax_per_anchor[fg_inds], :] anchor_boxes[fg_inds, :] = fg_boxes # assert len(fg_inds) + np.sum(anchor_labels == 0) == cfg.RPN.BATCH_PER_IM return anchor_labels, anchor_boxes
[ "def", "get_anchor_labels", "(", "anchors", ",", "gt_boxes", ",", "crowd_boxes", ")", ":", "# This function will modify labels and return the filtered inds", "def", "filter_box_label", "(", "labels", ",", "value", ",", "max_num", ")", ":", "curr_inds", "=", "np", ".", "where", "(", "labels", "==", "value", ")", "[", "0", "]", "if", "len", "(", "curr_inds", ")", ">", "max_num", ":", "disable_inds", "=", "np", ".", "random", ".", "choice", "(", "curr_inds", ",", "size", "=", "(", "len", "(", "curr_inds", ")", "-", "max_num", ")", ",", "replace", "=", "False", ")", "labels", "[", "disable_inds", "]", "=", "-", "1", "# ignore them", "curr_inds", "=", "np", ".", "where", "(", "labels", "==", "value", ")", "[", "0", "]", "return", "curr_inds", "NA", ",", "NB", "=", "len", "(", "anchors", ")", ",", "len", "(", "gt_boxes", ")", "assert", "NB", ">", "0", "# empty images should have been filtered already", "box_ious", "=", "np_iou", "(", "anchors", ",", "gt_boxes", ")", "# NA x NB", "ious_argmax_per_anchor", "=", "box_ious", ".", "argmax", "(", "axis", "=", "1", ")", "# NA,", "ious_max_per_anchor", "=", "box_ious", ".", "max", "(", "axis", "=", "1", ")", "ious_max_per_gt", "=", "np", ".", "amax", "(", "box_ious", ",", "axis", "=", "0", ",", "keepdims", "=", "True", ")", "# 1xNB", "# for each gt, find all those anchors (including ties) that has the max ious with it", "anchors_with_max_iou_per_gt", "=", "np", ".", "where", "(", "box_ious", "==", "ious_max_per_gt", ")", "[", "0", "]", "# Setting NA labels: 1--fg 0--bg -1--ignore", "anchor_labels", "=", "-", "np", ".", "ones", "(", "(", "NA", ",", ")", ",", "dtype", "=", "'int32'", ")", "# NA,", "# the order of setting neg/pos labels matter", "anchor_labels", "[", "anchors_with_max_iou_per_gt", "]", "=", "1", "anchor_labels", "[", "ious_max_per_anchor", ">=", "cfg", ".", "RPN", ".", "POSITIVE_ANCHOR_THRESH", "]", "=", "1", "anchor_labels", "[", "ious_max_per_anchor", "<", "cfg", ".", "RPN", ".", "NEGATIVE_ANCHOR_THRESH", "]", "=", "0", "# label all non-ignore candidate boxes which overlap crowd as ignore", "if", "crowd_boxes", ".", "size", ">", "0", ":", "cand_inds", "=", "np", ".", "where", "(", "anchor_labels", ">=", "0", ")", "[", "0", "]", "cand_anchors", "=", "anchors", "[", "cand_inds", "]", "ioas", "=", "np_ioa", "(", "crowd_boxes", ",", "cand_anchors", ")", "overlap_with_crowd", "=", "cand_inds", "[", "ioas", ".", "max", "(", "axis", "=", "0", ")", ">", "cfg", ".", "RPN", ".", "CROWD_OVERLAP_THRESH", "]", "anchor_labels", "[", "overlap_with_crowd", "]", "=", "-", "1", "# Subsample fg labels: ignore some fg if fg is too many", "target_num_fg", "=", "int", "(", "cfg", ".", "RPN", ".", "BATCH_PER_IM", "*", "cfg", ".", "RPN", ".", "FG_RATIO", ")", "fg_inds", "=", "filter_box_label", "(", "anchor_labels", ",", "1", ",", "target_num_fg", ")", "# Keep an image even if there is no foreground anchors", "# if len(fg_inds) == 0:", "# raise MalformedData(\"No valid foreground for RPN!\")", "# Subsample bg labels. num_bg is not allowed to be too many", "old_num_bg", "=", "np", ".", "sum", "(", "anchor_labels", "==", "0", ")", "if", "old_num_bg", "==", "0", ":", "# No valid bg in this image, skip.", "raise", "MalformedData", "(", "\"No valid background for RPN!\"", ")", "target_num_bg", "=", "cfg", ".", "RPN", ".", "BATCH_PER_IM", "-", "len", "(", "fg_inds", ")", "filter_box_label", "(", "anchor_labels", ",", "0", ",", "target_num_bg", ")", "# ignore return values", "# Set anchor boxes: the best gt_box for each fg anchor", "anchor_boxes", "=", "np", ".", "zeros", "(", "(", "NA", ",", "4", ")", ",", "dtype", "=", "'float32'", ")", "fg_boxes", "=", "gt_boxes", "[", "ious_argmax_per_anchor", "[", "fg_inds", "]", ",", ":", "]", "anchor_boxes", "[", "fg_inds", ",", ":", "]", "=", "fg_boxes", "# assert len(fg_inds) + np.sum(anchor_labels == 0) == cfg.RPN.BATCH_PER_IM", "return", "anchor_labels", ",", "anchor_boxes" ]
Label each anchor as fg/bg/ignore. Args: anchors: Ax4 float gt_boxes: Bx4 float, non-crowd crowd_boxes: Cx4 float Returns: anchor_labels: (A,) int. Each element is {-1, 0, 1} anchor_boxes: Ax4. Contains the target gt_box for each anchor when the anchor is fg.
[ "Label", "each", "anchor", "as", "fg", "/", "bg", "/", "ignore", ".", "Args", ":", "anchors", ":", "Ax4", "float", "gt_boxes", ":", "Bx4", "float", "non", "-", "crowd", "crowd_boxes", ":", "Cx4", "float" ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/data.py#L121-L189
train
tensorpack/tensorpack
examples/FasterRCNN/data.py
get_rpn_anchor_input
def get_rpn_anchor_input(im, boxes, is_crowd): """ Args: im: an image boxes: nx4, floatbox, gt. shoudn't be changed is_crowd: n, Returns: The anchor labels and target boxes for each pixel in the featuremap. fm_labels: fHxfWxNA fm_boxes: fHxfWxNAx4 NA will be NUM_ANCHOR_SIZES x NUM_ANCHOR_RATIOS """ boxes = boxes.copy() all_anchors = np.copy(get_all_anchors()) # fHxfWxAx4 -> (-1, 4) featuremap_anchors_flatten = all_anchors.reshape((-1, 4)) # only use anchors inside the image inside_ind, inside_anchors = filter_boxes_inside_shape(featuremap_anchors_flatten, im.shape[:2]) # obtain anchor labels and their corresponding gt boxes anchor_labels, anchor_gt_boxes = get_anchor_labels(inside_anchors, boxes[is_crowd == 0], boxes[is_crowd == 1]) # Fill them back to original size: fHxfWx1, fHxfWx4 anchorH, anchorW = all_anchors.shape[:2] featuremap_labels = -np.ones((anchorH * anchorW * cfg.RPN.NUM_ANCHOR, ), dtype='int32') featuremap_labels[inside_ind] = anchor_labels featuremap_labels = featuremap_labels.reshape((anchorH, anchorW, cfg.RPN.NUM_ANCHOR)) featuremap_boxes = np.zeros((anchorH * anchorW * cfg.RPN.NUM_ANCHOR, 4), dtype='float32') featuremap_boxes[inside_ind, :] = anchor_gt_boxes featuremap_boxes = featuremap_boxes.reshape((anchorH, anchorW, cfg.RPN.NUM_ANCHOR, 4)) return featuremap_labels, featuremap_boxes
python
def get_rpn_anchor_input(im, boxes, is_crowd): """ Args: im: an image boxes: nx4, floatbox, gt. shoudn't be changed is_crowd: n, Returns: The anchor labels and target boxes for each pixel in the featuremap. fm_labels: fHxfWxNA fm_boxes: fHxfWxNAx4 NA will be NUM_ANCHOR_SIZES x NUM_ANCHOR_RATIOS """ boxes = boxes.copy() all_anchors = np.copy(get_all_anchors()) # fHxfWxAx4 -> (-1, 4) featuremap_anchors_flatten = all_anchors.reshape((-1, 4)) # only use anchors inside the image inside_ind, inside_anchors = filter_boxes_inside_shape(featuremap_anchors_flatten, im.shape[:2]) # obtain anchor labels and their corresponding gt boxes anchor_labels, anchor_gt_boxes = get_anchor_labels(inside_anchors, boxes[is_crowd == 0], boxes[is_crowd == 1]) # Fill them back to original size: fHxfWx1, fHxfWx4 anchorH, anchorW = all_anchors.shape[:2] featuremap_labels = -np.ones((anchorH * anchorW * cfg.RPN.NUM_ANCHOR, ), dtype='int32') featuremap_labels[inside_ind] = anchor_labels featuremap_labels = featuremap_labels.reshape((anchorH, anchorW, cfg.RPN.NUM_ANCHOR)) featuremap_boxes = np.zeros((anchorH * anchorW * cfg.RPN.NUM_ANCHOR, 4), dtype='float32') featuremap_boxes[inside_ind, :] = anchor_gt_boxes featuremap_boxes = featuremap_boxes.reshape((anchorH, anchorW, cfg.RPN.NUM_ANCHOR, 4)) return featuremap_labels, featuremap_boxes
[ "def", "get_rpn_anchor_input", "(", "im", ",", "boxes", ",", "is_crowd", ")", ":", "boxes", "=", "boxes", ".", "copy", "(", ")", "all_anchors", "=", "np", ".", "copy", "(", "get_all_anchors", "(", ")", ")", "# fHxfWxAx4 -> (-1, 4)", "featuremap_anchors_flatten", "=", "all_anchors", ".", "reshape", "(", "(", "-", "1", ",", "4", ")", ")", "# only use anchors inside the image", "inside_ind", ",", "inside_anchors", "=", "filter_boxes_inside_shape", "(", "featuremap_anchors_flatten", ",", "im", ".", "shape", "[", ":", "2", "]", ")", "# obtain anchor labels and their corresponding gt boxes", "anchor_labels", ",", "anchor_gt_boxes", "=", "get_anchor_labels", "(", "inside_anchors", ",", "boxes", "[", "is_crowd", "==", "0", "]", ",", "boxes", "[", "is_crowd", "==", "1", "]", ")", "# Fill them back to original size: fHxfWx1, fHxfWx4", "anchorH", ",", "anchorW", "=", "all_anchors", ".", "shape", "[", ":", "2", "]", "featuremap_labels", "=", "-", "np", ".", "ones", "(", "(", "anchorH", "*", "anchorW", "*", "cfg", ".", "RPN", ".", "NUM_ANCHOR", ",", ")", ",", "dtype", "=", "'int32'", ")", "featuremap_labels", "[", "inside_ind", "]", "=", "anchor_labels", "featuremap_labels", "=", "featuremap_labels", ".", "reshape", "(", "(", "anchorH", ",", "anchorW", ",", "cfg", ".", "RPN", ".", "NUM_ANCHOR", ")", ")", "featuremap_boxes", "=", "np", ".", "zeros", "(", "(", "anchorH", "*", "anchorW", "*", "cfg", ".", "RPN", ".", "NUM_ANCHOR", ",", "4", ")", ",", "dtype", "=", "'float32'", ")", "featuremap_boxes", "[", "inside_ind", ",", ":", "]", "=", "anchor_gt_boxes", "featuremap_boxes", "=", "featuremap_boxes", ".", "reshape", "(", "(", "anchorH", ",", "anchorW", ",", "cfg", ".", "RPN", ".", "NUM_ANCHOR", ",", "4", ")", ")", "return", "featuremap_labels", ",", "featuremap_boxes" ]
Args: im: an image boxes: nx4, floatbox, gt. shoudn't be changed is_crowd: n, Returns: The anchor labels and target boxes for each pixel in the featuremap. fm_labels: fHxfWxNA fm_boxes: fHxfWxNAx4 NA will be NUM_ANCHOR_SIZES x NUM_ANCHOR_RATIOS
[ "Args", ":", "im", ":", "an", "image", "boxes", ":", "nx4", "floatbox", "gt", ".", "shoudn", "t", "be", "changed", "is_crowd", ":", "n" ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/data.py#L192-L223
train
tensorpack/tensorpack
examples/FasterRCNN/data.py
get_multilevel_rpn_anchor_input
def get_multilevel_rpn_anchor_input(im, boxes, is_crowd): """ Args: im: an image boxes: nx4, floatbox, gt. shoudn't be changed is_crowd: n, Returns: [(fm_labels, fm_boxes)]: Returns a tuple for each FPN level. Each tuple contains the anchor labels and target boxes for each pixel in the featuremap. fm_labels: fHxfWx NUM_ANCHOR_RATIOS fm_boxes: fHxfWx NUM_ANCHOR_RATIOS x4 """ boxes = boxes.copy() anchors_per_level = get_all_anchors_fpn() flatten_anchors_per_level = [k.reshape((-1, 4)) for k in anchors_per_level] all_anchors_flatten = np.concatenate(flatten_anchors_per_level, axis=0) inside_ind, inside_anchors = filter_boxes_inside_shape(all_anchors_flatten, im.shape[:2]) anchor_labels, anchor_gt_boxes = get_anchor_labels(inside_anchors, boxes[is_crowd == 0], boxes[is_crowd == 1]) # map back to all_anchors, then split to each level num_all_anchors = all_anchors_flatten.shape[0] all_labels = -np.ones((num_all_anchors, ), dtype='int32') all_labels[inside_ind] = anchor_labels all_boxes = np.zeros((num_all_anchors, 4), dtype='float32') all_boxes[inside_ind] = anchor_gt_boxes start = 0 multilevel_inputs = [] for level_anchor in anchors_per_level: assert level_anchor.shape[2] == len(cfg.RPN.ANCHOR_RATIOS) anchor_shape = level_anchor.shape[:3] # fHxfWxNUM_ANCHOR_RATIOS num_anchor_this_level = np.prod(anchor_shape) end = start + num_anchor_this_level multilevel_inputs.append( (all_labels[start: end].reshape(anchor_shape), all_boxes[start: end, :].reshape(anchor_shape + (4,)) )) start = end assert end == num_all_anchors, "{} != {}".format(end, num_all_anchors) return multilevel_inputs
python
def get_multilevel_rpn_anchor_input(im, boxes, is_crowd): """ Args: im: an image boxes: nx4, floatbox, gt. shoudn't be changed is_crowd: n, Returns: [(fm_labels, fm_boxes)]: Returns a tuple for each FPN level. Each tuple contains the anchor labels and target boxes for each pixel in the featuremap. fm_labels: fHxfWx NUM_ANCHOR_RATIOS fm_boxes: fHxfWx NUM_ANCHOR_RATIOS x4 """ boxes = boxes.copy() anchors_per_level = get_all_anchors_fpn() flatten_anchors_per_level = [k.reshape((-1, 4)) for k in anchors_per_level] all_anchors_flatten = np.concatenate(flatten_anchors_per_level, axis=0) inside_ind, inside_anchors = filter_boxes_inside_shape(all_anchors_flatten, im.shape[:2]) anchor_labels, anchor_gt_boxes = get_anchor_labels(inside_anchors, boxes[is_crowd == 0], boxes[is_crowd == 1]) # map back to all_anchors, then split to each level num_all_anchors = all_anchors_flatten.shape[0] all_labels = -np.ones((num_all_anchors, ), dtype='int32') all_labels[inside_ind] = anchor_labels all_boxes = np.zeros((num_all_anchors, 4), dtype='float32') all_boxes[inside_ind] = anchor_gt_boxes start = 0 multilevel_inputs = [] for level_anchor in anchors_per_level: assert level_anchor.shape[2] == len(cfg.RPN.ANCHOR_RATIOS) anchor_shape = level_anchor.shape[:3] # fHxfWxNUM_ANCHOR_RATIOS num_anchor_this_level = np.prod(anchor_shape) end = start + num_anchor_this_level multilevel_inputs.append( (all_labels[start: end].reshape(anchor_shape), all_boxes[start: end, :].reshape(anchor_shape + (4,)) )) start = end assert end == num_all_anchors, "{} != {}".format(end, num_all_anchors) return multilevel_inputs
[ "def", "get_multilevel_rpn_anchor_input", "(", "im", ",", "boxes", ",", "is_crowd", ")", ":", "boxes", "=", "boxes", ".", "copy", "(", ")", "anchors_per_level", "=", "get_all_anchors_fpn", "(", ")", "flatten_anchors_per_level", "=", "[", "k", ".", "reshape", "(", "(", "-", "1", ",", "4", ")", ")", "for", "k", "in", "anchors_per_level", "]", "all_anchors_flatten", "=", "np", ".", "concatenate", "(", "flatten_anchors_per_level", ",", "axis", "=", "0", ")", "inside_ind", ",", "inside_anchors", "=", "filter_boxes_inside_shape", "(", "all_anchors_flatten", ",", "im", ".", "shape", "[", ":", "2", "]", ")", "anchor_labels", ",", "anchor_gt_boxes", "=", "get_anchor_labels", "(", "inside_anchors", ",", "boxes", "[", "is_crowd", "==", "0", "]", ",", "boxes", "[", "is_crowd", "==", "1", "]", ")", "# map back to all_anchors, then split to each level", "num_all_anchors", "=", "all_anchors_flatten", ".", "shape", "[", "0", "]", "all_labels", "=", "-", "np", ".", "ones", "(", "(", "num_all_anchors", ",", ")", ",", "dtype", "=", "'int32'", ")", "all_labels", "[", "inside_ind", "]", "=", "anchor_labels", "all_boxes", "=", "np", ".", "zeros", "(", "(", "num_all_anchors", ",", "4", ")", ",", "dtype", "=", "'float32'", ")", "all_boxes", "[", "inside_ind", "]", "=", "anchor_gt_boxes", "start", "=", "0", "multilevel_inputs", "=", "[", "]", "for", "level_anchor", "in", "anchors_per_level", ":", "assert", "level_anchor", ".", "shape", "[", "2", "]", "==", "len", "(", "cfg", ".", "RPN", ".", "ANCHOR_RATIOS", ")", "anchor_shape", "=", "level_anchor", ".", "shape", "[", ":", "3", "]", "# fHxfWxNUM_ANCHOR_RATIOS", "num_anchor_this_level", "=", "np", ".", "prod", "(", "anchor_shape", ")", "end", "=", "start", "+", "num_anchor_this_level", "multilevel_inputs", ".", "append", "(", "(", "all_labels", "[", "start", ":", "end", "]", ".", "reshape", "(", "anchor_shape", ")", ",", "all_boxes", "[", "start", ":", "end", ",", ":", "]", ".", "reshape", "(", "anchor_shape", "+", "(", "4", ",", ")", ")", ")", ")", "start", "=", "end", "assert", "end", "==", "num_all_anchors", ",", "\"{} != {}\"", ".", "format", "(", "end", ",", "num_all_anchors", ")", "return", "multilevel_inputs" ]
Args: im: an image boxes: nx4, floatbox, gt. shoudn't be changed is_crowd: n, Returns: [(fm_labels, fm_boxes)]: Returns a tuple for each FPN level. Each tuple contains the anchor labels and target boxes for each pixel in the featuremap. fm_labels: fHxfWx NUM_ANCHOR_RATIOS fm_boxes: fHxfWx NUM_ANCHOR_RATIOS x4
[ "Args", ":", "im", ":", "an", "image", "boxes", ":", "nx4", "floatbox", "gt", ".", "shoudn", "t", "be", "changed", "is_crowd", ":", "n" ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/data.py#L226-L268
train
tensorpack/tensorpack
examples/FasterRCNN/data.py
get_train_dataflow
def get_train_dataflow(): """ Return a training dataflow. Each datapoint consists of the following: An image: (h, w, 3), 1 or more pairs of (anchor_labels, anchor_boxes): anchor_labels: (h', w', NA) anchor_boxes: (h', w', NA, 4) gt_boxes: (N, 4) gt_labels: (N,) If MODE_MASK, gt_masks: (N, h, w) """ roidbs = DetectionDataset().load_training_roidbs(cfg.DATA.TRAIN) print_class_histogram(roidbs) # Valid training images should have at least one fg box. # But this filter shall not be applied for testing. num = len(roidbs) roidbs = list(filter(lambda img: len(img['boxes'][img['is_crowd'] == 0]) > 0, roidbs)) logger.info("Filtered {} images which contain no non-crowd groudtruth boxes. Total #images for training: {}".format( num - len(roidbs), len(roidbs))) ds = DataFromList(roidbs, shuffle=True) aug = imgaug.AugmentorList( [CustomResize(cfg.PREPROC.TRAIN_SHORT_EDGE_SIZE, cfg.PREPROC.MAX_SIZE), imgaug.Flip(horiz=True)]) def preprocess(roidb): fname, boxes, klass, is_crowd = roidb['file_name'], roidb['boxes'], roidb['class'], roidb['is_crowd'] boxes = np.copy(boxes) im = cv2.imread(fname, cv2.IMREAD_COLOR) assert im is not None, fname im = im.astype('float32') height, width = im.shape[:2] # assume floatbox as input assert boxes.dtype == np.float32, "Loader has to return floating point boxes!" if not cfg.DATA.ABSOLUTE_COORD: boxes[:, 0::2] *= width boxes[:, 1::2] *= height # augmentation: im, params = aug.augment_return_params(im) points = box_to_point8(boxes) points = aug.augment_coords(points, params) boxes = point8_to_box(points) assert np.min(np_area(boxes)) > 0, "Some boxes have zero area!" ret = {'image': im} # rpn anchor: try: if cfg.MODE_FPN: multilevel_anchor_inputs = get_multilevel_rpn_anchor_input(im, boxes, is_crowd) for i, (anchor_labels, anchor_boxes) in enumerate(multilevel_anchor_inputs): ret['anchor_labels_lvl{}'.format(i + 2)] = anchor_labels ret['anchor_boxes_lvl{}'.format(i + 2)] = anchor_boxes else: # anchor_labels, anchor_boxes ret['anchor_labels'], ret['anchor_boxes'] = get_rpn_anchor_input(im, boxes, is_crowd) boxes = boxes[is_crowd == 0] # skip crowd boxes in training target klass = klass[is_crowd == 0] ret['gt_boxes'] = boxes ret['gt_labels'] = klass if not len(boxes): raise MalformedData("No valid gt_boxes!") except MalformedData as e: log_once("Input {} is filtered for training: {}".format(fname, str(e)), 'warn') return None if cfg.MODE_MASK: # augmentation will modify the polys in-place segmentation = copy.deepcopy(roidb['segmentation']) segmentation = [segmentation[k] for k in range(len(segmentation)) if not is_crowd[k]] assert len(segmentation) == len(boxes) # Apply augmentation on polygon coordinates. # And produce one image-sized binary mask per box. masks = [] width_height = np.asarray([width, height], dtype=np.float32) for polys in segmentation: if not cfg.DATA.ABSOLUTE_COORD: polys = [p * width_height for p in polys] polys = [aug.augment_coords(p, params) for p in polys] masks.append(segmentation_to_mask(polys, im.shape[0], im.shape[1])) masks = np.asarray(masks, dtype='uint8') # values in {0, 1} ret['gt_masks'] = masks # from viz import draw_annotation, draw_mask # viz = draw_annotation(im, boxes, klass) # for mask in masks: # viz = draw_mask(viz, mask) # tpviz.interactive_imshow(viz) return ret if cfg.DATA.NUM_WORKERS > 0: buffer_size = cfg.DATA.NUM_WORKERS * 20 if cfg.TRAINER == 'horovod': ds = MultiThreadMapData(ds, cfg.DATA.NUM_WORKERS, preprocess, buffer_size=buffer_size) # MPI does not like fork() else: ds = MultiProcessMapDataZMQ(ds, cfg.DATA.NUM_WORKERS, preprocess, buffer_size=buffer_size) else: ds = MapData(ds, preprocess) return ds
python
def get_train_dataflow(): """ Return a training dataflow. Each datapoint consists of the following: An image: (h, w, 3), 1 or more pairs of (anchor_labels, anchor_boxes): anchor_labels: (h', w', NA) anchor_boxes: (h', w', NA, 4) gt_boxes: (N, 4) gt_labels: (N,) If MODE_MASK, gt_masks: (N, h, w) """ roidbs = DetectionDataset().load_training_roidbs(cfg.DATA.TRAIN) print_class_histogram(roidbs) # Valid training images should have at least one fg box. # But this filter shall not be applied for testing. num = len(roidbs) roidbs = list(filter(lambda img: len(img['boxes'][img['is_crowd'] == 0]) > 0, roidbs)) logger.info("Filtered {} images which contain no non-crowd groudtruth boxes. Total #images for training: {}".format( num - len(roidbs), len(roidbs))) ds = DataFromList(roidbs, shuffle=True) aug = imgaug.AugmentorList( [CustomResize(cfg.PREPROC.TRAIN_SHORT_EDGE_SIZE, cfg.PREPROC.MAX_SIZE), imgaug.Flip(horiz=True)]) def preprocess(roidb): fname, boxes, klass, is_crowd = roidb['file_name'], roidb['boxes'], roidb['class'], roidb['is_crowd'] boxes = np.copy(boxes) im = cv2.imread(fname, cv2.IMREAD_COLOR) assert im is not None, fname im = im.astype('float32') height, width = im.shape[:2] # assume floatbox as input assert boxes.dtype == np.float32, "Loader has to return floating point boxes!" if not cfg.DATA.ABSOLUTE_COORD: boxes[:, 0::2] *= width boxes[:, 1::2] *= height # augmentation: im, params = aug.augment_return_params(im) points = box_to_point8(boxes) points = aug.augment_coords(points, params) boxes = point8_to_box(points) assert np.min(np_area(boxes)) > 0, "Some boxes have zero area!" ret = {'image': im} # rpn anchor: try: if cfg.MODE_FPN: multilevel_anchor_inputs = get_multilevel_rpn_anchor_input(im, boxes, is_crowd) for i, (anchor_labels, anchor_boxes) in enumerate(multilevel_anchor_inputs): ret['anchor_labels_lvl{}'.format(i + 2)] = anchor_labels ret['anchor_boxes_lvl{}'.format(i + 2)] = anchor_boxes else: # anchor_labels, anchor_boxes ret['anchor_labels'], ret['anchor_boxes'] = get_rpn_anchor_input(im, boxes, is_crowd) boxes = boxes[is_crowd == 0] # skip crowd boxes in training target klass = klass[is_crowd == 0] ret['gt_boxes'] = boxes ret['gt_labels'] = klass if not len(boxes): raise MalformedData("No valid gt_boxes!") except MalformedData as e: log_once("Input {} is filtered for training: {}".format(fname, str(e)), 'warn') return None if cfg.MODE_MASK: # augmentation will modify the polys in-place segmentation = copy.deepcopy(roidb['segmentation']) segmentation = [segmentation[k] for k in range(len(segmentation)) if not is_crowd[k]] assert len(segmentation) == len(boxes) # Apply augmentation on polygon coordinates. # And produce one image-sized binary mask per box. masks = [] width_height = np.asarray([width, height], dtype=np.float32) for polys in segmentation: if not cfg.DATA.ABSOLUTE_COORD: polys = [p * width_height for p in polys] polys = [aug.augment_coords(p, params) for p in polys] masks.append(segmentation_to_mask(polys, im.shape[0], im.shape[1])) masks = np.asarray(masks, dtype='uint8') # values in {0, 1} ret['gt_masks'] = masks # from viz import draw_annotation, draw_mask # viz = draw_annotation(im, boxes, klass) # for mask in masks: # viz = draw_mask(viz, mask) # tpviz.interactive_imshow(viz) return ret if cfg.DATA.NUM_WORKERS > 0: buffer_size = cfg.DATA.NUM_WORKERS * 20 if cfg.TRAINER == 'horovod': ds = MultiThreadMapData(ds, cfg.DATA.NUM_WORKERS, preprocess, buffer_size=buffer_size) # MPI does not like fork() else: ds = MultiProcessMapDataZMQ(ds, cfg.DATA.NUM_WORKERS, preprocess, buffer_size=buffer_size) else: ds = MapData(ds, preprocess) return ds
[ "def", "get_train_dataflow", "(", ")", ":", "roidbs", "=", "DetectionDataset", "(", ")", ".", "load_training_roidbs", "(", "cfg", ".", "DATA", ".", "TRAIN", ")", "print_class_histogram", "(", "roidbs", ")", "# Valid training images should have at least one fg box.", "# But this filter shall not be applied for testing.", "num", "=", "len", "(", "roidbs", ")", "roidbs", "=", "list", "(", "filter", "(", "lambda", "img", ":", "len", "(", "img", "[", "'boxes'", "]", "[", "img", "[", "'is_crowd'", "]", "==", "0", "]", ")", ">", "0", ",", "roidbs", ")", ")", "logger", ".", "info", "(", "\"Filtered {} images which contain no non-crowd groudtruth boxes. Total #images for training: {}\"", ".", "format", "(", "num", "-", "len", "(", "roidbs", ")", ",", "len", "(", "roidbs", ")", ")", ")", "ds", "=", "DataFromList", "(", "roidbs", ",", "shuffle", "=", "True", ")", "aug", "=", "imgaug", ".", "AugmentorList", "(", "[", "CustomResize", "(", "cfg", ".", "PREPROC", ".", "TRAIN_SHORT_EDGE_SIZE", ",", "cfg", ".", "PREPROC", ".", "MAX_SIZE", ")", ",", "imgaug", ".", "Flip", "(", "horiz", "=", "True", ")", "]", ")", "def", "preprocess", "(", "roidb", ")", ":", "fname", ",", "boxes", ",", "klass", ",", "is_crowd", "=", "roidb", "[", "'file_name'", "]", ",", "roidb", "[", "'boxes'", "]", ",", "roidb", "[", "'class'", "]", ",", "roidb", "[", "'is_crowd'", "]", "boxes", "=", "np", ".", "copy", "(", "boxes", ")", "im", "=", "cv2", ".", "imread", "(", "fname", ",", "cv2", ".", "IMREAD_COLOR", ")", "assert", "im", "is", "not", "None", ",", "fname", "im", "=", "im", ".", "astype", "(", "'float32'", ")", "height", ",", "width", "=", "im", ".", "shape", "[", ":", "2", "]", "# assume floatbox as input", "assert", "boxes", ".", "dtype", "==", "np", ".", "float32", ",", "\"Loader has to return floating point boxes!\"", "if", "not", "cfg", ".", "DATA", ".", "ABSOLUTE_COORD", ":", "boxes", "[", ":", ",", "0", ":", ":", "2", "]", "*=", "width", "boxes", "[", ":", ",", "1", ":", ":", "2", "]", "*=", "height", "# augmentation:", "im", ",", "params", "=", "aug", ".", "augment_return_params", "(", "im", ")", "points", "=", "box_to_point8", "(", "boxes", ")", "points", "=", "aug", ".", "augment_coords", "(", "points", ",", "params", ")", "boxes", "=", "point8_to_box", "(", "points", ")", "assert", "np", ".", "min", "(", "np_area", "(", "boxes", ")", ")", ">", "0", ",", "\"Some boxes have zero area!\"", "ret", "=", "{", "'image'", ":", "im", "}", "# rpn anchor:", "try", ":", "if", "cfg", ".", "MODE_FPN", ":", "multilevel_anchor_inputs", "=", "get_multilevel_rpn_anchor_input", "(", "im", ",", "boxes", ",", "is_crowd", ")", "for", "i", ",", "(", "anchor_labels", ",", "anchor_boxes", ")", "in", "enumerate", "(", "multilevel_anchor_inputs", ")", ":", "ret", "[", "'anchor_labels_lvl{}'", ".", "format", "(", "i", "+", "2", ")", "]", "=", "anchor_labels", "ret", "[", "'anchor_boxes_lvl{}'", ".", "format", "(", "i", "+", "2", ")", "]", "=", "anchor_boxes", "else", ":", "# anchor_labels, anchor_boxes", "ret", "[", "'anchor_labels'", "]", ",", "ret", "[", "'anchor_boxes'", "]", "=", "get_rpn_anchor_input", "(", "im", ",", "boxes", ",", "is_crowd", ")", "boxes", "=", "boxes", "[", "is_crowd", "==", "0", "]", "# skip crowd boxes in training target", "klass", "=", "klass", "[", "is_crowd", "==", "0", "]", "ret", "[", "'gt_boxes'", "]", "=", "boxes", "ret", "[", "'gt_labels'", "]", "=", "klass", "if", "not", "len", "(", "boxes", ")", ":", "raise", "MalformedData", "(", "\"No valid gt_boxes!\"", ")", "except", "MalformedData", "as", "e", ":", "log_once", "(", "\"Input {} is filtered for training: {}\"", ".", "format", "(", "fname", ",", "str", "(", "e", ")", ")", ",", "'warn'", ")", "return", "None", "if", "cfg", ".", "MODE_MASK", ":", "# augmentation will modify the polys in-place", "segmentation", "=", "copy", ".", "deepcopy", "(", "roidb", "[", "'segmentation'", "]", ")", "segmentation", "=", "[", "segmentation", "[", "k", "]", "for", "k", "in", "range", "(", "len", "(", "segmentation", ")", ")", "if", "not", "is_crowd", "[", "k", "]", "]", "assert", "len", "(", "segmentation", ")", "==", "len", "(", "boxes", ")", "# Apply augmentation on polygon coordinates.", "# And produce one image-sized binary mask per box.", "masks", "=", "[", "]", "width_height", "=", "np", ".", "asarray", "(", "[", "width", ",", "height", "]", ",", "dtype", "=", "np", ".", "float32", ")", "for", "polys", "in", "segmentation", ":", "if", "not", "cfg", ".", "DATA", ".", "ABSOLUTE_COORD", ":", "polys", "=", "[", "p", "*", "width_height", "for", "p", "in", "polys", "]", "polys", "=", "[", "aug", ".", "augment_coords", "(", "p", ",", "params", ")", "for", "p", "in", "polys", "]", "masks", ".", "append", "(", "segmentation_to_mask", "(", "polys", ",", "im", ".", "shape", "[", "0", "]", ",", "im", ".", "shape", "[", "1", "]", ")", ")", "masks", "=", "np", ".", "asarray", "(", "masks", ",", "dtype", "=", "'uint8'", ")", "# values in {0, 1}", "ret", "[", "'gt_masks'", "]", "=", "masks", "# from viz import draw_annotation, draw_mask", "# viz = draw_annotation(im, boxes, klass)", "# for mask in masks:", "# viz = draw_mask(viz, mask)", "# tpviz.interactive_imshow(viz)", "return", "ret", "if", "cfg", ".", "DATA", ".", "NUM_WORKERS", ">", "0", ":", "buffer_size", "=", "cfg", ".", "DATA", ".", "NUM_WORKERS", "*", "20", "if", "cfg", ".", "TRAINER", "==", "'horovod'", ":", "ds", "=", "MultiThreadMapData", "(", "ds", ",", "cfg", ".", "DATA", ".", "NUM_WORKERS", ",", "preprocess", ",", "buffer_size", "=", "buffer_size", ")", "# MPI does not like fork()", "else", ":", "ds", "=", "MultiProcessMapDataZMQ", "(", "ds", ",", "cfg", ".", "DATA", ".", "NUM_WORKERS", ",", "preprocess", ",", "buffer_size", "=", "buffer_size", ")", "else", ":", "ds", "=", "MapData", "(", "ds", ",", "preprocess", ")", "return", "ds" ]
Return a training dataflow. Each datapoint consists of the following: An image: (h, w, 3), 1 or more pairs of (anchor_labels, anchor_boxes): anchor_labels: (h', w', NA) anchor_boxes: (h', w', NA, 4) gt_boxes: (N, 4) gt_labels: (N,) If MODE_MASK, gt_masks: (N, h, w)
[ "Return", "a", "training", "dataflow", ".", "Each", "datapoint", "consists", "of", "the", "following", ":" ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/data.py#L271-L380
train
tensorpack/tensorpack
examples/FasterRCNN/data.py
get_eval_dataflow
def get_eval_dataflow(name, shard=0, num_shards=1): """ Args: name (str): name of the dataset to evaluate shard, num_shards: to get subset of evaluation data """ roidbs = DetectionDataset().load_inference_roidbs(name) num_imgs = len(roidbs) img_per_shard = num_imgs // num_shards img_range = (shard * img_per_shard, (shard + 1) * img_per_shard if shard + 1 < num_shards else num_imgs) # no filter for training ds = DataFromListOfDict(roidbs[img_range[0]: img_range[1]], ['file_name', 'image_id']) def f(fname): im = cv2.imread(fname, cv2.IMREAD_COLOR) assert im is not None, fname return im ds = MapDataComponent(ds, f, 0) # Evaluation itself may be multi-threaded, therefore don't add prefetch here. return ds
python
def get_eval_dataflow(name, shard=0, num_shards=1): """ Args: name (str): name of the dataset to evaluate shard, num_shards: to get subset of evaluation data """ roidbs = DetectionDataset().load_inference_roidbs(name) num_imgs = len(roidbs) img_per_shard = num_imgs // num_shards img_range = (shard * img_per_shard, (shard + 1) * img_per_shard if shard + 1 < num_shards else num_imgs) # no filter for training ds = DataFromListOfDict(roidbs[img_range[0]: img_range[1]], ['file_name', 'image_id']) def f(fname): im = cv2.imread(fname, cv2.IMREAD_COLOR) assert im is not None, fname return im ds = MapDataComponent(ds, f, 0) # Evaluation itself may be multi-threaded, therefore don't add prefetch here. return ds
[ "def", "get_eval_dataflow", "(", "name", ",", "shard", "=", "0", ",", "num_shards", "=", "1", ")", ":", "roidbs", "=", "DetectionDataset", "(", ")", ".", "load_inference_roidbs", "(", "name", ")", "num_imgs", "=", "len", "(", "roidbs", ")", "img_per_shard", "=", "num_imgs", "//", "num_shards", "img_range", "=", "(", "shard", "*", "img_per_shard", ",", "(", "shard", "+", "1", ")", "*", "img_per_shard", "if", "shard", "+", "1", "<", "num_shards", "else", "num_imgs", ")", "# no filter for training", "ds", "=", "DataFromListOfDict", "(", "roidbs", "[", "img_range", "[", "0", "]", ":", "img_range", "[", "1", "]", "]", ",", "[", "'file_name'", ",", "'image_id'", "]", ")", "def", "f", "(", "fname", ")", ":", "im", "=", "cv2", ".", "imread", "(", "fname", ",", "cv2", ".", "IMREAD_COLOR", ")", "assert", "im", "is", "not", "None", ",", "fname", "return", "im", "ds", "=", "MapDataComponent", "(", "ds", ",", "f", ",", "0", ")", "# Evaluation itself may be multi-threaded, therefore don't add prefetch here.", "return", "ds" ]
Args: name (str): name of the dataset to evaluate shard, num_shards: to get subset of evaluation data
[ "Args", ":", "name", "(", "str", ")", ":", "name", "of", "the", "dataset", "to", "evaluate", "shard", "num_shards", ":", "to", "get", "subset", "of", "evaluation", "data" ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/data.py#L383-L404
train
tensorpack/tensorpack
tensorpack/graph_builder/utils.py
override_to_local_variable
def override_to_local_variable(enable=True): """ Returns: a context where all variables will be created as local. """ if enable: def custom_getter(getter, name, *args, **kwargs): _replace_global_by_local(kwargs) return getter(name, *args, **kwargs) with custom_getter_scope(custom_getter): yield else: yield
python
def override_to_local_variable(enable=True): """ Returns: a context where all variables will be created as local. """ if enable: def custom_getter(getter, name, *args, **kwargs): _replace_global_by_local(kwargs) return getter(name, *args, **kwargs) with custom_getter_scope(custom_getter): yield else: yield
[ "def", "override_to_local_variable", "(", "enable", "=", "True", ")", ":", "if", "enable", ":", "def", "custom_getter", "(", "getter", ",", "name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "_replace_global_by_local", "(", "kwargs", ")", "return", "getter", "(", "name", ",", "*", "args", ",", "*", "*", "kwargs", ")", "with", "custom_getter_scope", "(", "custom_getter", ")", ":", "yield", "else", ":", "yield" ]
Returns: a context where all variables will be created as local.
[ "Returns", ":", "a", "context", "where", "all", "variables", "will", "be", "created", "as", "local", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/graph_builder/utils.py#L43-L57
train
tensorpack/tensorpack
tensorpack/graph_builder/utils.py
split_grad_list
def split_grad_list(grad_list): """ Args: grad_list: K x N x 2 Returns: K x N: gradients K x N: variables """ g = [] v = [] for tower in grad_list: g.append([x[0] for x in tower]) v.append([x[1] for x in tower]) return g, v
python
def split_grad_list(grad_list): """ Args: grad_list: K x N x 2 Returns: K x N: gradients K x N: variables """ g = [] v = [] for tower in grad_list: g.append([x[0] for x in tower]) v.append([x[1] for x in tower]) return g, v
[ "def", "split_grad_list", "(", "grad_list", ")", ":", "g", "=", "[", "]", "v", "=", "[", "]", "for", "tower", "in", "grad_list", ":", "g", ".", "append", "(", "[", "x", "[", "0", "]", "for", "x", "in", "tower", "]", ")", "v", ".", "append", "(", "[", "x", "[", "1", "]", "for", "x", "in", "tower", "]", ")", "return", "g", ",", "v" ]
Args: grad_list: K x N x 2 Returns: K x N: gradients K x N: variables
[ "Args", ":", "grad_list", ":", "K", "x", "N", "x", "2" ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/graph_builder/utils.py#L109-L123
train
tensorpack/tensorpack
tensorpack/graph_builder/utils.py
merge_grad_list
def merge_grad_list(all_grads, all_vars): """ Args: all_grads (K x N): gradients all_vars(K x N): variables Return: K x N x 2: list of list of (grad, var) pairs """ return [list(zip(gs, vs)) for gs, vs in zip(all_grads, all_vars)]
python
def merge_grad_list(all_grads, all_vars): """ Args: all_grads (K x N): gradients all_vars(K x N): variables Return: K x N x 2: list of list of (grad, var) pairs """ return [list(zip(gs, vs)) for gs, vs in zip(all_grads, all_vars)]
[ "def", "merge_grad_list", "(", "all_grads", ",", "all_vars", ")", ":", "return", "[", "list", "(", "zip", "(", "gs", ",", "vs", ")", ")", "for", "gs", ",", "vs", "in", "zip", "(", "all_grads", ",", "all_vars", ")", "]" ]
Args: all_grads (K x N): gradients all_vars(K x N): variables Return: K x N x 2: list of list of (grad, var) pairs
[ "Args", ":", "all_grads", "(", "K", "x", "N", ")", ":", "gradients", "all_vars", "(", "K", "x", "N", ")", ":", "variables" ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/graph_builder/utils.py#L126-L135
train
tensorpack/tensorpack
tensorpack/graph_builder/utils.py
allreduce_grads
def allreduce_grads(all_grads, average): """ All-reduce average the gradients among K devices. Results are broadcasted to all devices. Args: all_grads (K x N): List of list of gradients. N is the number of variables. average (bool): average gradients or not. Returns: K x N: same as input, but each grad is replaced by the average over K devices. """ if get_tf_version_tuple() <= (1, 12): from tensorflow.contrib import nccl else: from tensorflow.python.ops import nccl_ops as nccl nr_tower = len(all_grads) if nr_tower == 1: return all_grads new_all_grads = [] # N x K for grads in zip(*all_grads): summed = nccl.all_sum(grads) grads_for_devices = [] # K for g in summed: with tf.device(g.device): # tensorflow/benchmarks didn't average gradients if average: g = tf.multiply(g, 1.0 / nr_tower) grads_for_devices.append(g) new_all_grads.append(grads_for_devices) # transpose to K x N ret = list(zip(*new_all_grads)) return ret
python
def allreduce_grads(all_grads, average): """ All-reduce average the gradients among K devices. Results are broadcasted to all devices. Args: all_grads (K x N): List of list of gradients. N is the number of variables. average (bool): average gradients or not. Returns: K x N: same as input, but each grad is replaced by the average over K devices. """ if get_tf_version_tuple() <= (1, 12): from tensorflow.contrib import nccl else: from tensorflow.python.ops import nccl_ops as nccl nr_tower = len(all_grads) if nr_tower == 1: return all_grads new_all_grads = [] # N x K for grads in zip(*all_grads): summed = nccl.all_sum(grads) grads_for_devices = [] # K for g in summed: with tf.device(g.device): # tensorflow/benchmarks didn't average gradients if average: g = tf.multiply(g, 1.0 / nr_tower) grads_for_devices.append(g) new_all_grads.append(grads_for_devices) # transpose to K x N ret = list(zip(*new_all_grads)) return ret
[ "def", "allreduce_grads", "(", "all_grads", ",", "average", ")", ":", "if", "get_tf_version_tuple", "(", ")", "<=", "(", "1", ",", "12", ")", ":", "from", "tensorflow", ".", "contrib", "import", "nccl", "else", ":", "from", "tensorflow", ".", "python", ".", "ops", "import", "nccl_ops", "as", "nccl", "nr_tower", "=", "len", "(", "all_grads", ")", "if", "nr_tower", "==", "1", ":", "return", "all_grads", "new_all_grads", "=", "[", "]", "# N x K", "for", "grads", "in", "zip", "(", "*", "all_grads", ")", ":", "summed", "=", "nccl", ".", "all_sum", "(", "grads", ")", "grads_for_devices", "=", "[", "]", "# K", "for", "g", "in", "summed", ":", "with", "tf", ".", "device", "(", "g", ".", "device", ")", ":", "# tensorflow/benchmarks didn't average gradients", "if", "average", ":", "g", "=", "tf", ".", "multiply", "(", "g", ",", "1.0", "/", "nr_tower", ")", "grads_for_devices", ".", "append", "(", "g", ")", "new_all_grads", ".", "append", "(", "grads_for_devices", ")", "# transpose to K x N", "ret", "=", "list", "(", "zip", "(", "*", "new_all_grads", ")", ")", "return", "ret" ]
All-reduce average the gradients among K devices. Results are broadcasted to all devices. Args: all_grads (K x N): List of list of gradients. N is the number of variables. average (bool): average gradients or not. Returns: K x N: same as input, but each grad is replaced by the average over K devices.
[ "All", "-", "reduce", "average", "the", "gradients", "among", "K", "devices", ".", "Results", "are", "broadcasted", "to", "all", "devices", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/graph_builder/utils.py#L139-L173
train
tensorpack/tensorpack
tensorpack/graph_builder/utils.py
allreduce_grads_hierarchical
def allreduce_grads_hierarchical(all_grads, devices, average=False): """ Hierarchical allreduce for DGX-1 system. Args: all_grads (K x N): List of list of gradients. N is the number of variables. devices ([str]): K str for the K devices. average (bool): average gradients or not. Returns: (K x N): same as input, but each grad is replaced by the average over K lists. """ num_gpu = len(devices) assert num_gpu == 8, num_gpu assert len(all_grads) == num_gpu, len(all_grads) group_size = num_gpu // 2 agg_all_grads = [] # N x K for varid, grads in enumerate(zip(*all_grads)): # grads: K gradients g0_main_gpu = varid % num_gpu g1_main_gpu = (g0_main_gpu + group_size) % num_gpu g0_start = 0 if g0_main_gpu < group_size else group_size g1_start = 0 if g1_main_gpu < group_size else group_size assert g0_start != g1_start g0_grads = grads[g0_start: g0_start + group_size] g1_grads = grads[g1_start: g1_start + group_size] with tf.device(devices[g0_main_gpu]): g0_agg = tf.add_n(g0_grads, name='group0_agg') with tf.device(devices[g1_main_gpu]): g1_agg = tf.add_n(g1_grads, name='group1_agg') g1_total_agg = tf.add(g0_agg, g1_agg, name='group1_total_agg') with tf.device(devices[g0_main_gpu]): g0_total_agg = tf.identity(g1_total_agg, name='group0_total_agg') agg_grads = [] # K aggregated grads for k in range(num_gpu): if (k < group_size) == (g0_main_gpu < group_size): main_gpu = g0_total_agg else: main_gpu = g1_total_agg with tf.device(devices[k]): if not average: device_total_agg = tf.identity( main_gpu, name='device{}_total_agg'.format(k)) else: # TODO where to put average? device_total_agg = tf.multiply( main_gpu, 1.0 / num_gpu, name='device{}_total_agg'.format(k)) agg_grads.append(device_total_agg) agg_all_grads.append(agg_grads) # transpose agg_all_grads = list(zip(*agg_all_grads)) # K x Nvar return agg_all_grads
python
def allreduce_grads_hierarchical(all_grads, devices, average=False): """ Hierarchical allreduce for DGX-1 system. Args: all_grads (K x N): List of list of gradients. N is the number of variables. devices ([str]): K str for the K devices. average (bool): average gradients or not. Returns: (K x N): same as input, but each grad is replaced by the average over K lists. """ num_gpu = len(devices) assert num_gpu == 8, num_gpu assert len(all_grads) == num_gpu, len(all_grads) group_size = num_gpu // 2 agg_all_grads = [] # N x K for varid, grads in enumerate(zip(*all_grads)): # grads: K gradients g0_main_gpu = varid % num_gpu g1_main_gpu = (g0_main_gpu + group_size) % num_gpu g0_start = 0 if g0_main_gpu < group_size else group_size g1_start = 0 if g1_main_gpu < group_size else group_size assert g0_start != g1_start g0_grads = grads[g0_start: g0_start + group_size] g1_grads = grads[g1_start: g1_start + group_size] with tf.device(devices[g0_main_gpu]): g0_agg = tf.add_n(g0_grads, name='group0_agg') with tf.device(devices[g1_main_gpu]): g1_agg = tf.add_n(g1_grads, name='group1_agg') g1_total_agg = tf.add(g0_agg, g1_agg, name='group1_total_agg') with tf.device(devices[g0_main_gpu]): g0_total_agg = tf.identity(g1_total_agg, name='group0_total_agg') agg_grads = [] # K aggregated grads for k in range(num_gpu): if (k < group_size) == (g0_main_gpu < group_size): main_gpu = g0_total_agg else: main_gpu = g1_total_agg with tf.device(devices[k]): if not average: device_total_agg = tf.identity( main_gpu, name='device{}_total_agg'.format(k)) else: # TODO where to put average? device_total_agg = tf.multiply( main_gpu, 1.0 / num_gpu, name='device{}_total_agg'.format(k)) agg_grads.append(device_total_agg) agg_all_grads.append(agg_grads) # transpose agg_all_grads = list(zip(*agg_all_grads)) # K x Nvar return agg_all_grads
[ "def", "allreduce_grads_hierarchical", "(", "all_grads", ",", "devices", ",", "average", "=", "False", ")", ":", "num_gpu", "=", "len", "(", "devices", ")", "assert", "num_gpu", "==", "8", ",", "num_gpu", "assert", "len", "(", "all_grads", ")", "==", "num_gpu", ",", "len", "(", "all_grads", ")", "group_size", "=", "num_gpu", "//", "2", "agg_all_grads", "=", "[", "]", "# N x K", "for", "varid", ",", "grads", "in", "enumerate", "(", "zip", "(", "*", "all_grads", ")", ")", ":", "# grads: K gradients", "g0_main_gpu", "=", "varid", "%", "num_gpu", "g1_main_gpu", "=", "(", "g0_main_gpu", "+", "group_size", ")", "%", "num_gpu", "g0_start", "=", "0", "if", "g0_main_gpu", "<", "group_size", "else", "group_size", "g1_start", "=", "0", "if", "g1_main_gpu", "<", "group_size", "else", "group_size", "assert", "g0_start", "!=", "g1_start", "g0_grads", "=", "grads", "[", "g0_start", ":", "g0_start", "+", "group_size", "]", "g1_grads", "=", "grads", "[", "g1_start", ":", "g1_start", "+", "group_size", "]", "with", "tf", ".", "device", "(", "devices", "[", "g0_main_gpu", "]", ")", ":", "g0_agg", "=", "tf", ".", "add_n", "(", "g0_grads", ",", "name", "=", "'group0_agg'", ")", "with", "tf", ".", "device", "(", "devices", "[", "g1_main_gpu", "]", ")", ":", "g1_agg", "=", "tf", ".", "add_n", "(", "g1_grads", ",", "name", "=", "'group1_agg'", ")", "g1_total_agg", "=", "tf", ".", "add", "(", "g0_agg", ",", "g1_agg", ",", "name", "=", "'group1_total_agg'", ")", "with", "tf", ".", "device", "(", "devices", "[", "g0_main_gpu", "]", ")", ":", "g0_total_agg", "=", "tf", ".", "identity", "(", "g1_total_agg", ",", "name", "=", "'group0_total_agg'", ")", "agg_grads", "=", "[", "]", "# K aggregated grads", "for", "k", "in", "range", "(", "num_gpu", ")", ":", "if", "(", "k", "<", "group_size", ")", "==", "(", "g0_main_gpu", "<", "group_size", ")", ":", "main_gpu", "=", "g0_total_agg", "else", ":", "main_gpu", "=", "g1_total_agg", "with", "tf", ".", "device", "(", "devices", "[", "k", "]", ")", ":", "if", "not", "average", ":", "device_total_agg", "=", "tf", ".", "identity", "(", "main_gpu", ",", "name", "=", "'device{}_total_agg'", ".", "format", "(", "k", ")", ")", "else", ":", "# TODO where to put average?", "device_total_agg", "=", "tf", ".", "multiply", "(", "main_gpu", ",", "1.0", "/", "num_gpu", ",", "name", "=", "'device{}_total_agg'", ".", "format", "(", "k", ")", ")", "agg_grads", ".", "append", "(", "device_total_agg", ")", "agg_all_grads", ".", "append", "(", "agg_grads", ")", "# transpose", "agg_all_grads", "=", "list", "(", "zip", "(", "*", "agg_all_grads", ")", ")", "# K x Nvar", "return", "agg_all_grads" ]
Hierarchical allreduce for DGX-1 system. Args: all_grads (K x N): List of list of gradients. N is the number of variables. devices ([str]): K str for the K devices. average (bool): average gradients or not. Returns: (K x N): same as input, but each grad is replaced by the average over K lists.
[ "Hierarchical", "allreduce", "for", "DGX", "-", "1", "system", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/graph_builder/utils.py#L177-L235
train
tensorpack/tensorpack
tensorpack/graph_builder/utils.py
aggregate_grads
def aggregate_grads(all_grads, colocation=False, devices=None, average=True): """ Average the gradients. Args: all_grads (K x N x 2): A list of K lists. Each of the list is a list of N (grad, var) tuples. The variables have to be the same across the K lists. colocation (bool): colocate gradient averaging on the device of the variable. devices (list[str]): assign the averaging to these device in round-robin. Cannot be used together with ``colocation``. average (bool): do average or sum Returns: (N x 2): A list of N (grad, var) tuples, where grad is averaged or summed over K. """ assert not (devices is not None and colocation) if devices is not None: assert isinstance(devices, list), devices nr_tower = len(all_grads) if nr_tower == 1: return all_grads[0] def aggregate(grads): if average: return tf.multiply(tf.add_n(grads), 1.0 / nr_tower) else: return tf.add_n(grads) ret = [] for idx, grad_and_vars in enumerate(zip(*all_grads)): # Ngpu * 2 v = grad_and_vars[0][1] grads = [g for (g, _) in grad_and_vars] if colocation: with tf.device(v.device): # colocate summed grad with var grad = aggregate(grads) elif devices is None: grad = aggregate(grads) else: dev = devices[idx % len(devices)] with tf.device(dev): grad = aggregate(grads) ret.append((grad, v)) return ret
python
def aggregate_grads(all_grads, colocation=False, devices=None, average=True): """ Average the gradients. Args: all_grads (K x N x 2): A list of K lists. Each of the list is a list of N (grad, var) tuples. The variables have to be the same across the K lists. colocation (bool): colocate gradient averaging on the device of the variable. devices (list[str]): assign the averaging to these device in round-robin. Cannot be used together with ``colocation``. average (bool): do average or sum Returns: (N x 2): A list of N (grad, var) tuples, where grad is averaged or summed over K. """ assert not (devices is not None and colocation) if devices is not None: assert isinstance(devices, list), devices nr_tower = len(all_grads) if nr_tower == 1: return all_grads[0] def aggregate(grads): if average: return tf.multiply(tf.add_n(grads), 1.0 / nr_tower) else: return tf.add_n(grads) ret = [] for idx, grad_and_vars in enumerate(zip(*all_grads)): # Ngpu * 2 v = grad_and_vars[0][1] grads = [g for (g, _) in grad_and_vars] if colocation: with tf.device(v.device): # colocate summed grad with var grad = aggregate(grads) elif devices is None: grad = aggregate(grads) else: dev = devices[idx % len(devices)] with tf.device(dev): grad = aggregate(grads) ret.append((grad, v)) return ret
[ "def", "aggregate_grads", "(", "all_grads", ",", "colocation", "=", "False", ",", "devices", "=", "None", ",", "average", "=", "True", ")", ":", "assert", "not", "(", "devices", "is", "not", "None", "and", "colocation", ")", "if", "devices", "is", "not", "None", ":", "assert", "isinstance", "(", "devices", ",", "list", ")", ",", "devices", "nr_tower", "=", "len", "(", "all_grads", ")", "if", "nr_tower", "==", "1", ":", "return", "all_grads", "[", "0", "]", "def", "aggregate", "(", "grads", ")", ":", "if", "average", ":", "return", "tf", ".", "multiply", "(", "tf", ".", "add_n", "(", "grads", ")", ",", "1.0", "/", "nr_tower", ")", "else", ":", "return", "tf", ".", "add_n", "(", "grads", ")", "ret", "=", "[", "]", "for", "idx", ",", "grad_and_vars", "in", "enumerate", "(", "zip", "(", "*", "all_grads", ")", ")", ":", "# Ngpu * 2", "v", "=", "grad_and_vars", "[", "0", "]", "[", "1", "]", "grads", "=", "[", "g", "for", "(", "g", ",", "_", ")", "in", "grad_and_vars", "]", "if", "colocation", ":", "with", "tf", ".", "device", "(", "v", ".", "device", ")", ":", "# colocate summed grad with var", "grad", "=", "aggregate", "(", "grads", ")", "elif", "devices", "is", "None", ":", "grad", "=", "aggregate", "(", "grads", ")", "else", ":", "dev", "=", "devices", "[", "idx", "%", "len", "(", "devices", ")", "]", "with", "tf", ".", "device", "(", "dev", ")", ":", "grad", "=", "aggregate", "(", "grads", ")", "ret", ".", "append", "(", "(", "grad", ",", "v", ")", ")", "return", "ret" ]
Average the gradients. Args: all_grads (K x N x 2): A list of K lists. Each of the list is a list of N (grad, var) tuples. The variables have to be the same across the K lists. colocation (bool): colocate gradient averaging on the device of the variable. devices (list[str]): assign the averaging to these device in round-robin. Cannot be used together with ``colocation``. average (bool): do average or sum Returns: (N x 2): A list of N (grad, var) tuples, where grad is averaged or summed over K.
[ "Average", "the", "gradients", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/graph_builder/utils.py#L239-L287
train
tensorpack/tensorpack
tensorpack/graph_builder/utils.py
GradientPacker.compute_strategy
def compute_strategy(self, grads): """ Returns: bool - False if grads cannot be packed due to various reasons. """ for g in grads: assert g.shape.is_fully_defined(), "Shape of {} is {}!".format(g.name, g.shape) self._shapes = [g.shape for g in grads] self._sizes = [g.shape.num_elements() for g in grads] self._total_size = sum(self._sizes) if self._total_size / self._num_split < 1024: logger.info("Skip GradientPacker due to too few gradients.") return False # should have the same dtype dtypes = set([g.dtype for g in grads]) if len(dtypes) != 1: logger.info("Skip GradientPacker due to inconsistent gradient types.") return False self._grad_dtype = grads[0].dtype split_size = self._total_size // self._num_split split_size_last = self._total_size - split_size * (self._num_split - 1) self._split_sizes = [split_size] * (self._num_split - 1) + [split_size_last] logger.info( "Will pack {} gradients of total dimension={} into {} splits.".format( len(self._sizes), self._total_size, self._num_split)) return True
python
def compute_strategy(self, grads): """ Returns: bool - False if grads cannot be packed due to various reasons. """ for g in grads: assert g.shape.is_fully_defined(), "Shape of {} is {}!".format(g.name, g.shape) self._shapes = [g.shape for g in grads] self._sizes = [g.shape.num_elements() for g in grads] self._total_size = sum(self._sizes) if self._total_size / self._num_split < 1024: logger.info("Skip GradientPacker due to too few gradients.") return False # should have the same dtype dtypes = set([g.dtype for g in grads]) if len(dtypes) != 1: logger.info("Skip GradientPacker due to inconsistent gradient types.") return False self._grad_dtype = grads[0].dtype split_size = self._total_size // self._num_split split_size_last = self._total_size - split_size * (self._num_split - 1) self._split_sizes = [split_size] * (self._num_split - 1) + [split_size_last] logger.info( "Will pack {} gradients of total dimension={} into {} splits.".format( len(self._sizes), self._total_size, self._num_split)) return True
[ "def", "compute_strategy", "(", "self", ",", "grads", ")", ":", "for", "g", "in", "grads", ":", "assert", "g", ".", "shape", ".", "is_fully_defined", "(", ")", ",", "\"Shape of {} is {}!\"", ".", "format", "(", "g", ".", "name", ",", "g", ".", "shape", ")", "self", ".", "_shapes", "=", "[", "g", ".", "shape", "for", "g", "in", "grads", "]", "self", ".", "_sizes", "=", "[", "g", ".", "shape", ".", "num_elements", "(", ")", "for", "g", "in", "grads", "]", "self", ".", "_total_size", "=", "sum", "(", "self", ".", "_sizes", ")", "if", "self", ".", "_total_size", "/", "self", ".", "_num_split", "<", "1024", ":", "logger", ".", "info", "(", "\"Skip GradientPacker due to too few gradients.\"", ")", "return", "False", "# should have the same dtype", "dtypes", "=", "set", "(", "[", "g", ".", "dtype", "for", "g", "in", "grads", "]", ")", "if", "len", "(", "dtypes", ")", "!=", "1", ":", "logger", ".", "info", "(", "\"Skip GradientPacker due to inconsistent gradient types.\"", ")", "return", "False", "self", ".", "_grad_dtype", "=", "grads", "[", "0", "]", ".", "dtype", "split_size", "=", "self", ".", "_total_size", "//", "self", ".", "_num_split", "split_size_last", "=", "self", ".", "_total_size", "-", "split_size", "*", "(", "self", ".", "_num_split", "-", "1", ")", "self", ".", "_split_sizes", "=", "[", "split_size", "]", "*", "(", "self", ".", "_num_split", "-", "1", ")", "+", "[", "split_size_last", "]", "logger", ".", "info", "(", "\"Will pack {} gradients of total dimension={} into {} splits.\"", ".", "format", "(", "len", "(", "self", ".", "_sizes", ")", ",", "self", ".", "_total_size", ",", "self", ".", "_num_split", ")", ")", "return", "True" ]
Returns: bool - False if grads cannot be packed due to various reasons.
[ "Returns", ":", "bool", "-", "False", "if", "grads", "cannot", "be", "packed", "due", "to", "various", "reasons", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/graph_builder/utils.py#L337-L364
train
tensorpack/tensorpack
tensorpack/graph_builder/utils.py
GradientPacker.pack
def pack(self, grads): """ Args: grads (list): list of gradient tensors Returns: packed list of gradient tensors to be aggregated. """ for i, g in enumerate(grads): assert g.shape == self._shapes[i] with cached_name_scope("GradientPacker", top_level=False): concat_grads = tf.concat([tf.reshape(g, [-1]) for g in grads], 0, name='concatenated_grads') # concat_grads = tf.cast(concat_grads, tf.float16) grad_packs = tf.split(concat_grads, self._split_sizes) return grad_packs
python
def pack(self, grads): """ Args: grads (list): list of gradient tensors Returns: packed list of gradient tensors to be aggregated. """ for i, g in enumerate(grads): assert g.shape == self._shapes[i] with cached_name_scope("GradientPacker", top_level=False): concat_grads = tf.concat([tf.reshape(g, [-1]) for g in grads], 0, name='concatenated_grads') # concat_grads = tf.cast(concat_grads, tf.float16) grad_packs = tf.split(concat_grads, self._split_sizes) return grad_packs
[ "def", "pack", "(", "self", ",", "grads", ")", ":", "for", "i", ",", "g", "in", "enumerate", "(", "grads", ")", ":", "assert", "g", ".", "shape", "==", "self", ".", "_shapes", "[", "i", "]", "with", "cached_name_scope", "(", "\"GradientPacker\"", ",", "top_level", "=", "False", ")", ":", "concat_grads", "=", "tf", ".", "concat", "(", "[", "tf", ".", "reshape", "(", "g", ",", "[", "-", "1", "]", ")", "for", "g", "in", "grads", "]", ",", "0", ",", "name", "=", "'concatenated_grads'", ")", "# concat_grads = tf.cast(concat_grads, tf.float16)", "grad_packs", "=", "tf", ".", "split", "(", "concat_grads", ",", "self", ".", "_split_sizes", ")", "return", "grad_packs" ]
Args: grads (list): list of gradient tensors Returns: packed list of gradient tensors to be aggregated.
[ "Args", ":", "grads", "(", "list", ")", ":", "list", "of", "gradient", "tensors" ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/graph_builder/utils.py#L366-L381
train
tensorpack/tensorpack
tensorpack/graph_builder/utils.py
GradientPacker.pack_all
def pack_all(self, all_grads, devices): """ Args: all_grads: K x N, K lists of gradients to be packed """ ret = [] # #GPU x #split for dev, grads in zip(devices, all_grads): with tf.device(dev): ret.append(self.pack(grads)) return ret
python
def pack_all(self, all_grads, devices): """ Args: all_grads: K x N, K lists of gradients to be packed """ ret = [] # #GPU x #split for dev, grads in zip(devices, all_grads): with tf.device(dev): ret.append(self.pack(grads)) return ret
[ "def", "pack_all", "(", "self", ",", "all_grads", ",", "devices", ")", ":", "ret", "=", "[", "]", "# #GPU x #split", "for", "dev", ",", "grads", "in", "zip", "(", "devices", ",", "all_grads", ")", ":", "with", "tf", ".", "device", "(", "dev", ")", ":", "ret", ".", "append", "(", "self", ".", "pack", "(", "grads", ")", ")", "return", "ret" ]
Args: all_grads: K x N, K lists of gradients to be packed
[ "Args", ":", "all_grads", ":", "K", "x", "N", "K", "lists", "of", "gradients", "to", "be", "packed" ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/graph_builder/utils.py#L391-L400
train
tensorpack/tensorpack
tensorpack/graph_builder/utils.py
GradientPacker.unpack_all
def unpack_all(self, all_packed, devices): """ Args: all_packed: K lists of packed gradients. """ all_grads = [] # #GPU x #Var for dev, packed_grads_single_device in zip(devices, all_packed): with tf.device(dev): all_grads.append(self.unpack(packed_grads_single_device)) return all_grads
python
def unpack_all(self, all_packed, devices): """ Args: all_packed: K lists of packed gradients. """ all_grads = [] # #GPU x #Var for dev, packed_grads_single_device in zip(devices, all_packed): with tf.device(dev): all_grads.append(self.unpack(packed_grads_single_device)) return all_grads
[ "def", "unpack_all", "(", "self", ",", "all_packed", ",", "devices", ")", ":", "all_grads", "=", "[", "]", "# #GPU x #Var", "for", "dev", ",", "packed_grads_single_device", "in", "zip", "(", "devices", ",", "all_packed", ")", ":", "with", "tf", ".", "device", "(", "dev", ")", ":", "all_grads", ".", "append", "(", "self", ".", "unpack", "(", "packed_grads_single_device", ")", ")", "return", "all_grads" ]
Args: all_packed: K lists of packed gradients.
[ "Args", ":", "all_packed", ":", "K", "lists", "of", "packed", "gradients", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/graph_builder/utils.py#L402-L411
train
tensorpack/tensorpack
examples/FasterRCNN/model_fpn.py
fpn_model
def fpn_model(features): """ Args: features ([tf.Tensor]): ResNet features c2-c5 Returns: [tf.Tensor]: FPN features p2-p6 """ assert len(features) == 4, features num_channel = cfg.FPN.NUM_CHANNEL use_gn = cfg.FPN.NORM == 'GN' def upsample2x(name, x): return FixedUnPooling( name, x, 2, unpool_mat=np.ones((2, 2), dtype='float32'), data_format='channels_first') # tf.image.resize is, again, not aligned. # with tf.name_scope(name): # shape2d = tf.shape(x)[2:] # x = tf.transpose(x, [0, 2, 3, 1]) # x = tf.image.resize_nearest_neighbor(x, shape2d * 2, align_corners=True) # x = tf.transpose(x, [0, 3, 1, 2]) # return x with argscope(Conv2D, data_format='channels_first', activation=tf.identity, use_bias=True, kernel_initializer=tf.variance_scaling_initializer(scale=1.)): lat_2345 = [Conv2D('lateral_1x1_c{}'.format(i + 2), c, num_channel, 1) for i, c in enumerate(features)] if use_gn: lat_2345 = [GroupNorm('gn_c{}'.format(i + 2), c) for i, c in enumerate(lat_2345)] lat_sum_5432 = [] for idx, lat in enumerate(lat_2345[::-1]): if idx == 0: lat_sum_5432.append(lat) else: lat = lat + upsample2x('upsample_lat{}'.format(6 - idx), lat_sum_5432[-1]) lat_sum_5432.append(lat) p2345 = [Conv2D('posthoc_3x3_p{}'.format(i + 2), c, num_channel, 3) for i, c in enumerate(lat_sum_5432[::-1])] if use_gn: p2345 = [GroupNorm('gn_p{}'.format(i + 2), c) for i, c in enumerate(p2345)] p6 = MaxPooling('maxpool_p6', p2345[-1], pool_size=1, strides=2, data_format='channels_first', padding='VALID') return p2345 + [p6]
python
def fpn_model(features): """ Args: features ([tf.Tensor]): ResNet features c2-c5 Returns: [tf.Tensor]: FPN features p2-p6 """ assert len(features) == 4, features num_channel = cfg.FPN.NUM_CHANNEL use_gn = cfg.FPN.NORM == 'GN' def upsample2x(name, x): return FixedUnPooling( name, x, 2, unpool_mat=np.ones((2, 2), dtype='float32'), data_format='channels_first') # tf.image.resize is, again, not aligned. # with tf.name_scope(name): # shape2d = tf.shape(x)[2:] # x = tf.transpose(x, [0, 2, 3, 1]) # x = tf.image.resize_nearest_neighbor(x, shape2d * 2, align_corners=True) # x = tf.transpose(x, [0, 3, 1, 2]) # return x with argscope(Conv2D, data_format='channels_first', activation=tf.identity, use_bias=True, kernel_initializer=tf.variance_scaling_initializer(scale=1.)): lat_2345 = [Conv2D('lateral_1x1_c{}'.format(i + 2), c, num_channel, 1) for i, c in enumerate(features)] if use_gn: lat_2345 = [GroupNorm('gn_c{}'.format(i + 2), c) for i, c in enumerate(lat_2345)] lat_sum_5432 = [] for idx, lat in enumerate(lat_2345[::-1]): if idx == 0: lat_sum_5432.append(lat) else: lat = lat + upsample2x('upsample_lat{}'.format(6 - idx), lat_sum_5432[-1]) lat_sum_5432.append(lat) p2345 = [Conv2D('posthoc_3x3_p{}'.format(i + 2), c, num_channel, 3) for i, c in enumerate(lat_sum_5432[::-1])] if use_gn: p2345 = [GroupNorm('gn_p{}'.format(i + 2), c) for i, c in enumerate(p2345)] p6 = MaxPooling('maxpool_p6', p2345[-1], pool_size=1, strides=2, data_format='channels_first', padding='VALID') return p2345 + [p6]
[ "def", "fpn_model", "(", "features", ")", ":", "assert", "len", "(", "features", ")", "==", "4", ",", "features", "num_channel", "=", "cfg", ".", "FPN", ".", "NUM_CHANNEL", "use_gn", "=", "cfg", ".", "FPN", ".", "NORM", "==", "'GN'", "def", "upsample2x", "(", "name", ",", "x", ")", ":", "return", "FixedUnPooling", "(", "name", ",", "x", ",", "2", ",", "unpool_mat", "=", "np", ".", "ones", "(", "(", "2", ",", "2", ")", ",", "dtype", "=", "'float32'", ")", ",", "data_format", "=", "'channels_first'", ")", "# tf.image.resize is, again, not aligned.", "# with tf.name_scope(name):", "# shape2d = tf.shape(x)[2:]", "# x = tf.transpose(x, [0, 2, 3, 1])", "# x = tf.image.resize_nearest_neighbor(x, shape2d * 2, align_corners=True)", "# x = tf.transpose(x, [0, 3, 1, 2])", "# return x", "with", "argscope", "(", "Conv2D", ",", "data_format", "=", "'channels_first'", ",", "activation", "=", "tf", ".", "identity", ",", "use_bias", "=", "True", ",", "kernel_initializer", "=", "tf", ".", "variance_scaling_initializer", "(", "scale", "=", "1.", ")", ")", ":", "lat_2345", "=", "[", "Conv2D", "(", "'lateral_1x1_c{}'", ".", "format", "(", "i", "+", "2", ")", ",", "c", ",", "num_channel", ",", "1", ")", "for", "i", ",", "c", "in", "enumerate", "(", "features", ")", "]", "if", "use_gn", ":", "lat_2345", "=", "[", "GroupNorm", "(", "'gn_c{}'", ".", "format", "(", "i", "+", "2", ")", ",", "c", ")", "for", "i", ",", "c", "in", "enumerate", "(", "lat_2345", ")", "]", "lat_sum_5432", "=", "[", "]", "for", "idx", ",", "lat", "in", "enumerate", "(", "lat_2345", "[", ":", ":", "-", "1", "]", ")", ":", "if", "idx", "==", "0", ":", "lat_sum_5432", ".", "append", "(", "lat", ")", "else", ":", "lat", "=", "lat", "+", "upsample2x", "(", "'upsample_lat{}'", ".", "format", "(", "6", "-", "idx", ")", ",", "lat_sum_5432", "[", "-", "1", "]", ")", "lat_sum_5432", ".", "append", "(", "lat", ")", "p2345", "=", "[", "Conv2D", "(", "'posthoc_3x3_p{}'", ".", "format", "(", "i", "+", "2", ")", ",", "c", ",", "num_channel", ",", "3", ")", "for", "i", ",", "c", "in", "enumerate", "(", "lat_sum_5432", "[", ":", ":", "-", "1", "]", ")", "]", "if", "use_gn", ":", "p2345", "=", "[", "GroupNorm", "(", "'gn_p{}'", ".", "format", "(", "i", "+", "2", ")", ",", "c", ")", "for", "i", ",", "c", "in", "enumerate", "(", "p2345", ")", "]", "p6", "=", "MaxPooling", "(", "'maxpool_p6'", ",", "p2345", "[", "-", "1", "]", ",", "pool_size", "=", "1", ",", "strides", "=", "2", ",", "data_format", "=", "'channels_first'", ",", "padding", "=", "'VALID'", ")", "return", "p2345", "+", "[", "p6", "]" ]
Args: features ([tf.Tensor]): ResNet features c2-c5 Returns: [tf.Tensor]: FPN features p2-p6
[ "Args", ":", "features", "(", "[", "tf", ".", "Tensor", "]", ")", ":", "ResNet", "features", "c2", "-", "c5" ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/model_fpn.py#L21-L66
train
tensorpack/tensorpack
examples/FasterRCNN/model_fpn.py
fpn_map_rois_to_levels
def fpn_map_rois_to_levels(boxes): """ Assign boxes to level 2~5. Args: boxes (nx4): Returns: [tf.Tensor]: 4 tensors for level 2-5. Each tensor is a vector of indices of boxes in its level. [tf.Tensor]: 4 tensors, the gathered boxes in each level. Be careful that the returned tensor could be empty. """ sqrtarea = tf.sqrt(tf_area(boxes)) level = tf.cast(tf.floor( 4 + tf.log(sqrtarea * (1. / 224) + 1e-6) * (1.0 / np.log(2))), tf.int32) # RoI levels range from 2~5 (not 6) level_ids = [ tf.where(level <= 2), tf.where(tf.equal(level, 3)), # == is not supported tf.where(tf.equal(level, 4)), tf.where(level >= 5)] level_ids = [tf.reshape(x, [-1], name='roi_level{}_id'.format(i + 2)) for i, x in enumerate(level_ids)] num_in_levels = [tf.size(x, name='num_roi_level{}'.format(i + 2)) for i, x in enumerate(level_ids)] add_moving_summary(*num_in_levels) level_boxes = [tf.gather(boxes, ids) for ids in level_ids] return level_ids, level_boxes
python
def fpn_map_rois_to_levels(boxes): """ Assign boxes to level 2~5. Args: boxes (nx4): Returns: [tf.Tensor]: 4 tensors for level 2-5. Each tensor is a vector of indices of boxes in its level. [tf.Tensor]: 4 tensors, the gathered boxes in each level. Be careful that the returned tensor could be empty. """ sqrtarea = tf.sqrt(tf_area(boxes)) level = tf.cast(tf.floor( 4 + tf.log(sqrtarea * (1. / 224) + 1e-6) * (1.0 / np.log(2))), tf.int32) # RoI levels range from 2~5 (not 6) level_ids = [ tf.where(level <= 2), tf.where(tf.equal(level, 3)), # == is not supported tf.where(tf.equal(level, 4)), tf.where(level >= 5)] level_ids = [tf.reshape(x, [-1], name='roi_level{}_id'.format(i + 2)) for i, x in enumerate(level_ids)] num_in_levels = [tf.size(x, name='num_roi_level{}'.format(i + 2)) for i, x in enumerate(level_ids)] add_moving_summary(*num_in_levels) level_boxes = [tf.gather(boxes, ids) for ids in level_ids] return level_ids, level_boxes
[ "def", "fpn_map_rois_to_levels", "(", "boxes", ")", ":", "sqrtarea", "=", "tf", ".", "sqrt", "(", "tf_area", "(", "boxes", ")", ")", "level", "=", "tf", ".", "cast", "(", "tf", ".", "floor", "(", "4", "+", "tf", ".", "log", "(", "sqrtarea", "*", "(", "1.", "/", "224", ")", "+", "1e-6", ")", "*", "(", "1.0", "/", "np", ".", "log", "(", "2", ")", ")", ")", ",", "tf", ".", "int32", ")", "# RoI levels range from 2~5 (not 6)", "level_ids", "=", "[", "tf", ".", "where", "(", "level", "<=", "2", ")", ",", "tf", ".", "where", "(", "tf", ".", "equal", "(", "level", ",", "3", ")", ")", ",", "# == is not supported", "tf", ".", "where", "(", "tf", ".", "equal", "(", "level", ",", "4", ")", ")", ",", "tf", ".", "where", "(", "level", ">=", "5", ")", "]", "level_ids", "=", "[", "tf", ".", "reshape", "(", "x", ",", "[", "-", "1", "]", ",", "name", "=", "'roi_level{}_id'", ".", "format", "(", "i", "+", "2", ")", ")", "for", "i", ",", "x", "in", "enumerate", "(", "level_ids", ")", "]", "num_in_levels", "=", "[", "tf", ".", "size", "(", "x", ",", "name", "=", "'num_roi_level{}'", ".", "format", "(", "i", "+", "2", ")", ")", "for", "i", ",", "x", "in", "enumerate", "(", "level_ids", ")", "]", "add_moving_summary", "(", "*", "num_in_levels", ")", "level_boxes", "=", "[", "tf", ".", "gather", "(", "boxes", ",", "ids", ")", "for", "ids", "in", "level_ids", "]", "return", "level_ids", ",", "level_boxes" ]
Assign boxes to level 2~5. Args: boxes (nx4): Returns: [tf.Tensor]: 4 tensors for level 2-5. Each tensor is a vector of indices of boxes in its level. [tf.Tensor]: 4 tensors, the gathered boxes in each level. Be careful that the returned tensor could be empty.
[ "Assign", "boxes", "to", "level", "2~5", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/model_fpn.py#L70-L100
train
tensorpack/tensorpack
examples/FasterRCNN/model_fpn.py
multilevel_roi_align
def multilevel_roi_align(features, rcnn_boxes, resolution): """ Args: features ([tf.Tensor]): 4 FPN feature level 2-5 rcnn_boxes (tf.Tensor): nx4 boxes resolution (int): output spatial resolution Returns: NxC x res x res """ assert len(features) == 4, features # Reassign rcnn_boxes to levels level_ids, level_boxes = fpn_map_rois_to_levels(rcnn_boxes) all_rois = [] # Crop patches from corresponding levels for i, boxes, featuremap in zip(itertools.count(), level_boxes, features): with tf.name_scope('roi_level{}'.format(i + 2)): boxes_on_featuremap = boxes * (1.0 / cfg.FPN.ANCHOR_STRIDES[i]) all_rois.append(roi_align(featuremap, boxes_on_featuremap, resolution)) # this can fail if using TF<=1.8 with MKL build all_rois = tf.concat(all_rois, axis=0) # NCHW # Unshuffle to the original order, to match the original samples level_id_perm = tf.concat(level_ids, axis=0) # A permutation of 1~N level_id_invert_perm = tf.invert_permutation(level_id_perm) all_rois = tf.gather(all_rois, level_id_invert_perm) return all_rois
python
def multilevel_roi_align(features, rcnn_boxes, resolution): """ Args: features ([tf.Tensor]): 4 FPN feature level 2-5 rcnn_boxes (tf.Tensor): nx4 boxes resolution (int): output spatial resolution Returns: NxC x res x res """ assert len(features) == 4, features # Reassign rcnn_boxes to levels level_ids, level_boxes = fpn_map_rois_to_levels(rcnn_boxes) all_rois = [] # Crop patches from corresponding levels for i, boxes, featuremap in zip(itertools.count(), level_boxes, features): with tf.name_scope('roi_level{}'.format(i + 2)): boxes_on_featuremap = boxes * (1.0 / cfg.FPN.ANCHOR_STRIDES[i]) all_rois.append(roi_align(featuremap, boxes_on_featuremap, resolution)) # this can fail if using TF<=1.8 with MKL build all_rois = tf.concat(all_rois, axis=0) # NCHW # Unshuffle to the original order, to match the original samples level_id_perm = tf.concat(level_ids, axis=0) # A permutation of 1~N level_id_invert_perm = tf.invert_permutation(level_id_perm) all_rois = tf.gather(all_rois, level_id_invert_perm) return all_rois
[ "def", "multilevel_roi_align", "(", "features", ",", "rcnn_boxes", ",", "resolution", ")", ":", "assert", "len", "(", "features", ")", "==", "4", ",", "features", "# Reassign rcnn_boxes to levels", "level_ids", ",", "level_boxes", "=", "fpn_map_rois_to_levels", "(", "rcnn_boxes", ")", "all_rois", "=", "[", "]", "# Crop patches from corresponding levels", "for", "i", ",", "boxes", ",", "featuremap", "in", "zip", "(", "itertools", ".", "count", "(", ")", ",", "level_boxes", ",", "features", ")", ":", "with", "tf", ".", "name_scope", "(", "'roi_level{}'", ".", "format", "(", "i", "+", "2", ")", ")", ":", "boxes_on_featuremap", "=", "boxes", "*", "(", "1.0", "/", "cfg", ".", "FPN", ".", "ANCHOR_STRIDES", "[", "i", "]", ")", "all_rois", ".", "append", "(", "roi_align", "(", "featuremap", ",", "boxes_on_featuremap", ",", "resolution", ")", ")", "# this can fail if using TF<=1.8 with MKL build", "all_rois", "=", "tf", ".", "concat", "(", "all_rois", ",", "axis", "=", "0", ")", "# NCHW", "# Unshuffle to the original order, to match the original samples", "level_id_perm", "=", "tf", ".", "concat", "(", "level_ids", ",", "axis", "=", "0", ")", "# A permutation of 1~N", "level_id_invert_perm", "=", "tf", ".", "invert_permutation", "(", "level_id_perm", ")", "all_rois", "=", "tf", ".", "gather", "(", "all_rois", ",", "level_id_invert_perm", ")", "return", "all_rois" ]
Args: features ([tf.Tensor]): 4 FPN feature level 2-5 rcnn_boxes (tf.Tensor): nx4 boxes resolution (int): output spatial resolution Returns: NxC x res x res
[ "Args", ":", "features", "(", "[", "tf", ".", "Tensor", "]", ")", ":", "4", "FPN", "feature", "level", "2", "-", "5", "rcnn_boxes", "(", "tf", ".", "Tensor", ")", ":", "nx4", "boxes", "resolution", "(", "int", ")", ":", "output", "spatial", "resolution", "Returns", ":", "NxC", "x", "res", "x", "res" ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/model_fpn.py#L104-L130
train
tensorpack/tensorpack
examples/FasterRCNN/model_fpn.py
multilevel_rpn_losses
def multilevel_rpn_losses( multilevel_anchors, multilevel_label_logits, multilevel_box_logits): """ Args: multilevel_anchors: #lvl RPNAnchors multilevel_label_logits: #lvl tensors of shape HxWxA multilevel_box_logits: #lvl tensors of shape HxWxAx4 Returns: label_loss, box_loss """ num_lvl = len(cfg.FPN.ANCHOR_STRIDES) assert len(multilevel_anchors) == num_lvl assert len(multilevel_label_logits) == num_lvl assert len(multilevel_box_logits) == num_lvl losses = [] with tf.name_scope('rpn_losses'): for lvl in range(num_lvl): anchors = multilevel_anchors[lvl] label_loss, box_loss = rpn_losses( anchors.gt_labels, anchors.encoded_gt_boxes(), multilevel_label_logits[lvl], multilevel_box_logits[lvl], name_scope='level{}'.format(lvl + 2)) losses.extend([label_loss, box_loss]) total_label_loss = tf.add_n(losses[::2], name='label_loss') total_box_loss = tf.add_n(losses[1::2], name='box_loss') add_moving_summary(total_label_loss, total_box_loss) return [total_label_loss, total_box_loss]
python
def multilevel_rpn_losses( multilevel_anchors, multilevel_label_logits, multilevel_box_logits): """ Args: multilevel_anchors: #lvl RPNAnchors multilevel_label_logits: #lvl tensors of shape HxWxA multilevel_box_logits: #lvl tensors of shape HxWxAx4 Returns: label_loss, box_loss """ num_lvl = len(cfg.FPN.ANCHOR_STRIDES) assert len(multilevel_anchors) == num_lvl assert len(multilevel_label_logits) == num_lvl assert len(multilevel_box_logits) == num_lvl losses = [] with tf.name_scope('rpn_losses'): for lvl in range(num_lvl): anchors = multilevel_anchors[lvl] label_loss, box_loss = rpn_losses( anchors.gt_labels, anchors.encoded_gt_boxes(), multilevel_label_logits[lvl], multilevel_box_logits[lvl], name_scope='level{}'.format(lvl + 2)) losses.extend([label_loss, box_loss]) total_label_loss = tf.add_n(losses[::2], name='label_loss') total_box_loss = tf.add_n(losses[1::2], name='box_loss') add_moving_summary(total_label_loss, total_box_loss) return [total_label_loss, total_box_loss]
[ "def", "multilevel_rpn_losses", "(", "multilevel_anchors", ",", "multilevel_label_logits", ",", "multilevel_box_logits", ")", ":", "num_lvl", "=", "len", "(", "cfg", ".", "FPN", ".", "ANCHOR_STRIDES", ")", "assert", "len", "(", "multilevel_anchors", ")", "==", "num_lvl", "assert", "len", "(", "multilevel_label_logits", ")", "==", "num_lvl", "assert", "len", "(", "multilevel_box_logits", ")", "==", "num_lvl", "losses", "=", "[", "]", "with", "tf", ".", "name_scope", "(", "'rpn_losses'", ")", ":", "for", "lvl", "in", "range", "(", "num_lvl", ")", ":", "anchors", "=", "multilevel_anchors", "[", "lvl", "]", "label_loss", ",", "box_loss", "=", "rpn_losses", "(", "anchors", ".", "gt_labels", ",", "anchors", ".", "encoded_gt_boxes", "(", ")", ",", "multilevel_label_logits", "[", "lvl", "]", ",", "multilevel_box_logits", "[", "lvl", "]", ",", "name_scope", "=", "'level{}'", ".", "format", "(", "lvl", "+", "2", ")", ")", "losses", ".", "extend", "(", "[", "label_loss", ",", "box_loss", "]", ")", "total_label_loss", "=", "tf", ".", "add_n", "(", "losses", "[", ":", ":", "2", "]", ",", "name", "=", "'label_loss'", ")", "total_box_loss", "=", "tf", ".", "add_n", "(", "losses", "[", "1", ":", ":", "2", "]", ",", "name", "=", "'box_loss'", ")", "add_moving_summary", "(", "total_label_loss", ",", "total_box_loss", ")", "return", "[", "total_label_loss", ",", "total_box_loss", "]" ]
Args: multilevel_anchors: #lvl RPNAnchors multilevel_label_logits: #lvl tensors of shape HxWxA multilevel_box_logits: #lvl tensors of shape HxWxAx4 Returns: label_loss, box_loss
[ "Args", ":", "multilevel_anchors", ":", "#lvl", "RPNAnchors", "multilevel_label_logits", ":", "#lvl", "tensors", "of", "shape", "HxWxA", "multilevel_box_logits", ":", "#lvl", "tensors", "of", "shape", "HxWxAx4" ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/model_fpn.py#L133-L162
train
tensorpack/tensorpack
examples/FasterRCNN/model_fpn.py
generate_fpn_proposals
def generate_fpn_proposals( multilevel_pred_boxes, multilevel_label_logits, image_shape2d): """ Args: multilevel_pred_boxes: #lvl HxWxAx4 boxes multilevel_label_logits: #lvl tensors of shape HxWxA Returns: boxes: kx4 float scores: k logits """ num_lvl = len(cfg.FPN.ANCHOR_STRIDES) assert len(multilevel_pred_boxes) == num_lvl assert len(multilevel_label_logits) == num_lvl training = get_current_tower_context().is_training all_boxes = [] all_scores = [] if cfg.FPN.PROPOSAL_MODE == 'Level': fpn_nms_topk = cfg.RPN.TRAIN_PER_LEVEL_NMS_TOPK if training else cfg.RPN.TEST_PER_LEVEL_NMS_TOPK for lvl in range(num_lvl): with tf.name_scope('Lvl{}'.format(lvl + 2)): pred_boxes_decoded = multilevel_pred_boxes[lvl] proposal_boxes, proposal_scores = generate_rpn_proposals( tf.reshape(pred_boxes_decoded, [-1, 4]), tf.reshape(multilevel_label_logits[lvl], [-1]), image_shape2d, fpn_nms_topk) all_boxes.append(proposal_boxes) all_scores.append(proposal_scores) proposal_boxes = tf.concat(all_boxes, axis=0) # nx4 proposal_scores = tf.concat(all_scores, axis=0) # n # Here we are different from Detectron. # Detectron picks top-k within the batch, rather than within an image. However we do not have a batch. proposal_topk = tf.minimum(tf.size(proposal_scores), fpn_nms_topk) proposal_scores, topk_indices = tf.nn.top_k(proposal_scores, k=proposal_topk, sorted=False) proposal_boxes = tf.gather(proposal_boxes, topk_indices) else: for lvl in range(num_lvl): with tf.name_scope('Lvl{}'.format(lvl + 2)): pred_boxes_decoded = multilevel_pred_boxes[lvl] all_boxes.append(tf.reshape(pred_boxes_decoded, [-1, 4])) all_scores.append(tf.reshape(multilevel_label_logits[lvl], [-1])) all_boxes = tf.concat(all_boxes, axis=0) all_scores = tf.concat(all_scores, axis=0) proposal_boxes, proposal_scores = generate_rpn_proposals( all_boxes, all_scores, image_shape2d, cfg.RPN.TRAIN_PRE_NMS_TOPK if training else cfg.RPN.TEST_PRE_NMS_TOPK, cfg.RPN.TRAIN_POST_NMS_TOPK if training else cfg.RPN.TEST_POST_NMS_TOPK) tf.sigmoid(proposal_scores, name='probs') # for visualization return tf.stop_gradient(proposal_boxes, name='boxes'), \ tf.stop_gradient(proposal_scores, name='scores')
python
def generate_fpn_proposals( multilevel_pred_boxes, multilevel_label_logits, image_shape2d): """ Args: multilevel_pred_boxes: #lvl HxWxAx4 boxes multilevel_label_logits: #lvl tensors of shape HxWxA Returns: boxes: kx4 float scores: k logits """ num_lvl = len(cfg.FPN.ANCHOR_STRIDES) assert len(multilevel_pred_boxes) == num_lvl assert len(multilevel_label_logits) == num_lvl training = get_current_tower_context().is_training all_boxes = [] all_scores = [] if cfg.FPN.PROPOSAL_MODE == 'Level': fpn_nms_topk = cfg.RPN.TRAIN_PER_LEVEL_NMS_TOPK if training else cfg.RPN.TEST_PER_LEVEL_NMS_TOPK for lvl in range(num_lvl): with tf.name_scope('Lvl{}'.format(lvl + 2)): pred_boxes_decoded = multilevel_pred_boxes[lvl] proposal_boxes, proposal_scores = generate_rpn_proposals( tf.reshape(pred_boxes_decoded, [-1, 4]), tf.reshape(multilevel_label_logits[lvl], [-1]), image_shape2d, fpn_nms_topk) all_boxes.append(proposal_boxes) all_scores.append(proposal_scores) proposal_boxes = tf.concat(all_boxes, axis=0) # nx4 proposal_scores = tf.concat(all_scores, axis=0) # n # Here we are different from Detectron. # Detectron picks top-k within the batch, rather than within an image. However we do not have a batch. proposal_topk = tf.minimum(tf.size(proposal_scores), fpn_nms_topk) proposal_scores, topk_indices = tf.nn.top_k(proposal_scores, k=proposal_topk, sorted=False) proposal_boxes = tf.gather(proposal_boxes, topk_indices) else: for lvl in range(num_lvl): with tf.name_scope('Lvl{}'.format(lvl + 2)): pred_boxes_decoded = multilevel_pred_boxes[lvl] all_boxes.append(tf.reshape(pred_boxes_decoded, [-1, 4])) all_scores.append(tf.reshape(multilevel_label_logits[lvl], [-1])) all_boxes = tf.concat(all_boxes, axis=0) all_scores = tf.concat(all_scores, axis=0) proposal_boxes, proposal_scores = generate_rpn_proposals( all_boxes, all_scores, image_shape2d, cfg.RPN.TRAIN_PRE_NMS_TOPK if training else cfg.RPN.TEST_PRE_NMS_TOPK, cfg.RPN.TRAIN_POST_NMS_TOPK if training else cfg.RPN.TEST_POST_NMS_TOPK) tf.sigmoid(proposal_scores, name='probs') # for visualization return tf.stop_gradient(proposal_boxes, name='boxes'), \ tf.stop_gradient(proposal_scores, name='scores')
[ "def", "generate_fpn_proposals", "(", "multilevel_pred_boxes", ",", "multilevel_label_logits", ",", "image_shape2d", ")", ":", "num_lvl", "=", "len", "(", "cfg", ".", "FPN", ".", "ANCHOR_STRIDES", ")", "assert", "len", "(", "multilevel_pred_boxes", ")", "==", "num_lvl", "assert", "len", "(", "multilevel_label_logits", ")", "==", "num_lvl", "training", "=", "get_current_tower_context", "(", ")", ".", "is_training", "all_boxes", "=", "[", "]", "all_scores", "=", "[", "]", "if", "cfg", ".", "FPN", ".", "PROPOSAL_MODE", "==", "'Level'", ":", "fpn_nms_topk", "=", "cfg", ".", "RPN", ".", "TRAIN_PER_LEVEL_NMS_TOPK", "if", "training", "else", "cfg", ".", "RPN", ".", "TEST_PER_LEVEL_NMS_TOPK", "for", "lvl", "in", "range", "(", "num_lvl", ")", ":", "with", "tf", ".", "name_scope", "(", "'Lvl{}'", ".", "format", "(", "lvl", "+", "2", ")", ")", ":", "pred_boxes_decoded", "=", "multilevel_pred_boxes", "[", "lvl", "]", "proposal_boxes", ",", "proposal_scores", "=", "generate_rpn_proposals", "(", "tf", ".", "reshape", "(", "pred_boxes_decoded", ",", "[", "-", "1", ",", "4", "]", ")", ",", "tf", ".", "reshape", "(", "multilevel_label_logits", "[", "lvl", "]", ",", "[", "-", "1", "]", ")", ",", "image_shape2d", ",", "fpn_nms_topk", ")", "all_boxes", ".", "append", "(", "proposal_boxes", ")", "all_scores", ".", "append", "(", "proposal_scores", ")", "proposal_boxes", "=", "tf", ".", "concat", "(", "all_boxes", ",", "axis", "=", "0", ")", "# nx4", "proposal_scores", "=", "tf", ".", "concat", "(", "all_scores", ",", "axis", "=", "0", ")", "# n", "# Here we are different from Detectron.", "# Detectron picks top-k within the batch, rather than within an image. However we do not have a batch.", "proposal_topk", "=", "tf", ".", "minimum", "(", "tf", ".", "size", "(", "proposal_scores", ")", ",", "fpn_nms_topk", ")", "proposal_scores", ",", "topk_indices", "=", "tf", ".", "nn", ".", "top_k", "(", "proposal_scores", ",", "k", "=", "proposal_topk", ",", "sorted", "=", "False", ")", "proposal_boxes", "=", "tf", ".", "gather", "(", "proposal_boxes", ",", "topk_indices", ")", "else", ":", "for", "lvl", "in", "range", "(", "num_lvl", ")", ":", "with", "tf", ".", "name_scope", "(", "'Lvl{}'", ".", "format", "(", "lvl", "+", "2", ")", ")", ":", "pred_boxes_decoded", "=", "multilevel_pred_boxes", "[", "lvl", "]", "all_boxes", ".", "append", "(", "tf", ".", "reshape", "(", "pred_boxes_decoded", ",", "[", "-", "1", ",", "4", "]", ")", ")", "all_scores", ".", "append", "(", "tf", ".", "reshape", "(", "multilevel_label_logits", "[", "lvl", "]", ",", "[", "-", "1", "]", ")", ")", "all_boxes", "=", "tf", ".", "concat", "(", "all_boxes", ",", "axis", "=", "0", ")", "all_scores", "=", "tf", ".", "concat", "(", "all_scores", ",", "axis", "=", "0", ")", "proposal_boxes", ",", "proposal_scores", "=", "generate_rpn_proposals", "(", "all_boxes", ",", "all_scores", ",", "image_shape2d", ",", "cfg", ".", "RPN", ".", "TRAIN_PRE_NMS_TOPK", "if", "training", "else", "cfg", ".", "RPN", ".", "TEST_PRE_NMS_TOPK", ",", "cfg", ".", "RPN", ".", "TRAIN_POST_NMS_TOPK", "if", "training", "else", "cfg", ".", "RPN", ".", "TEST_POST_NMS_TOPK", ")", "tf", ".", "sigmoid", "(", "proposal_scores", ",", "name", "=", "'probs'", ")", "# for visualization", "return", "tf", ".", "stop_gradient", "(", "proposal_boxes", ",", "name", "=", "'boxes'", ")", ",", "tf", ".", "stop_gradient", "(", "proposal_scores", ",", "name", "=", "'scores'", ")" ]
Args: multilevel_pred_boxes: #lvl HxWxAx4 boxes multilevel_label_logits: #lvl tensors of shape HxWxA Returns: boxes: kx4 float scores: k logits
[ "Args", ":", "multilevel_pred_boxes", ":", "#lvl", "HxWxAx4", "boxes", "multilevel_label_logits", ":", "#lvl", "tensors", "of", "shape", "HxWxA" ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/model_fpn.py#L166-L219
train
tensorpack/tensorpack
tensorpack/models/layer_norm.py
LayerNorm
def LayerNorm( x, epsilon=1e-5, use_bias=True, use_scale=True, gamma_init=None, data_format='channels_last'): """ Layer Normalization layer, as described in the paper: `Layer Normalization <https://arxiv.org/abs/1607.06450>`_. Args: x (tf.Tensor): a 4D or 2D tensor. When 4D, the layout should match data_format. epsilon (float): epsilon to avoid divide-by-zero. use_scale, use_bias (bool): whether to use the extra affine transformation or not. """ data_format = get_data_format(data_format, keras_mode=False) shape = x.get_shape().as_list() ndims = len(shape) assert ndims in [2, 4] mean, var = tf.nn.moments(x, list(range(1, len(shape))), keep_dims=True) if data_format == 'NCHW': chan = shape[1] new_shape = [1, chan, 1, 1] else: chan = shape[-1] new_shape = [1, 1, 1, chan] if ndims == 2: new_shape = [1, chan] if use_bias: beta = tf.get_variable('beta', [chan], initializer=tf.constant_initializer()) beta = tf.reshape(beta, new_shape) else: beta = tf.zeros([1] * ndims, name='beta') if use_scale: if gamma_init is None: gamma_init = tf.constant_initializer(1.0) gamma = tf.get_variable('gamma', [chan], initializer=gamma_init) gamma = tf.reshape(gamma, new_shape) else: gamma = tf.ones([1] * ndims, name='gamma') ret = tf.nn.batch_normalization(x, mean, var, beta, gamma, epsilon, name='output') vh = ret.variables = VariableHolder() if use_scale: vh.gamma = gamma if use_bias: vh.beta = beta return ret
python
def LayerNorm( x, epsilon=1e-5, use_bias=True, use_scale=True, gamma_init=None, data_format='channels_last'): """ Layer Normalization layer, as described in the paper: `Layer Normalization <https://arxiv.org/abs/1607.06450>`_. Args: x (tf.Tensor): a 4D or 2D tensor. When 4D, the layout should match data_format. epsilon (float): epsilon to avoid divide-by-zero. use_scale, use_bias (bool): whether to use the extra affine transformation or not. """ data_format = get_data_format(data_format, keras_mode=False) shape = x.get_shape().as_list() ndims = len(shape) assert ndims in [2, 4] mean, var = tf.nn.moments(x, list(range(1, len(shape))), keep_dims=True) if data_format == 'NCHW': chan = shape[1] new_shape = [1, chan, 1, 1] else: chan = shape[-1] new_shape = [1, 1, 1, chan] if ndims == 2: new_shape = [1, chan] if use_bias: beta = tf.get_variable('beta', [chan], initializer=tf.constant_initializer()) beta = tf.reshape(beta, new_shape) else: beta = tf.zeros([1] * ndims, name='beta') if use_scale: if gamma_init is None: gamma_init = tf.constant_initializer(1.0) gamma = tf.get_variable('gamma', [chan], initializer=gamma_init) gamma = tf.reshape(gamma, new_shape) else: gamma = tf.ones([1] * ndims, name='gamma') ret = tf.nn.batch_normalization(x, mean, var, beta, gamma, epsilon, name='output') vh = ret.variables = VariableHolder() if use_scale: vh.gamma = gamma if use_bias: vh.beta = beta return ret
[ "def", "LayerNorm", "(", "x", ",", "epsilon", "=", "1e-5", ",", "use_bias", "=", "True", ",", "use_scale", "=", "True", ",", "gamma_init", "=", "None", ",", "data_format", "=", "'channels_last'", ")", ":", "data_format", "=", "get_data_format", "(", "data_format", ",", "keras_mode", "=", "False", ")", "shape", "=", "x", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "ndims", "=", "len", "(", "shape", ")", "assert", "ndims", "in", "[", "2", ",", "4", "]", "mean", ",", "var", "=", "tf", ".", "nn", ".", "moments", "(", "x", ",", "list", "(", "range", "(", "1", ",", "len", "(", "shape", ")", ")", ")", ",", "keep_dims", "=", "True", ")", "if", "data_format", "==", "'NCHW'", ":", "chan", "=", "shape", "[", "1", "]", "new_shape", "=", "[", "1", ",", "chan", ",", "1", ",", "1", "]", "else", ":", "chan", "=", "shape", "[", "-", "1", "]", "new_shape", "=", "[", "1", ",", "1", ",", "1", ",", "chan", "]", "if", "ndims", "==", "2", ":", "new_shape", "=", "[", "1", ",", "chan", "]", "if", "use_bias", ":", "beta", "=", "tf", ".", "get_variable", "(", "'beta'", ",", "[", "chan", "]", ",", "initializer", "=", "tf", ".", "constant_initializer", "(", ")", ")", "beta", "=", "tf", ".", "reshape", "(", "beta", ",", "new_shape", ")", "else", ":", "beta", "=", "tf", ".", "zeros", "(", "[", "1", "]", "*", "ndims", ",", "name", "=", "'beta'", ")", "if", "use_scale", ":", "if", "gamma_init", "is", "None", ":", "gamma_init", "=", "tf", ".", "constant_initializer", "(", "1.0", ")", "gamma", "=", "tf", ".", "get_variable", "(", "'gamma'", ",", "[", "chan", "]", ",", "initializer", "=", "gamma_init", ")", "gamma", "=", "tf", ".", "reshape", "(", "gamma", ",", "new_shape", ")", "else", ":", "gamma", "=", "tf", ".", "ones", "(", "[", "1", "]", "*", "ndims", ",", "name", "=", "'gamma'", ")", "ret", "=", "tf", ".", "nn", ".", "batch_normalization", "(", "x", ",", "mean", ",", "var", ",", "beta", ",", "gamma", ",", "epsilon", ",", "name", "=", "'output'", ")", "vh", "=", "ret", ".", "variables", "=", "VariableHolder", "(", ")", "if", "use_scale", ":", "vh", ".", "gamma", "=", "gamma", "if", "use_bias", ":", "vh", ".", "beta", "=", "beta", "return", "ret" ]
Layer Normalization layer, as described in the paper: `Layer Normalization <https://arxiv.org/abs/1607.06450>`_. Args: x (tf.Tensor): a 4D or 2D tensor. When 4D, the layout should match data_format. epsilon (float): epsilon to avoid divide-by-zero. use_scale, use_bias (bool): whether to use the extra affine transformation or not.
[ "Layer", "Normalization", "layer", "as", "described", "in", "the", "paper", ":", "Layer", "Normalization", "<https", ":", "//", "arxiv", ".", "org", "/", "abs", "/", "1607", ".", "06450", ">", "_", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/models/layer_norm.py#L14-L63
train
tensorpack/tensorpack
tensorpack/models/layer_norm.py
InstanceNorm
def InstanceNorm(x, epsilon=1e-5, use_affine=True, gamma_init=None, data_format='channels_last'): """ Instance Normalization, as in the paper: `Instance Normalization: The Missing Ingredient for Fast Stylization <https://arxiv.org/abs/1607.08022>`_. Args: x (tf.Tensor): a 4D tensor. epsilon (float): avoid divide-by-zero use_affine (bool): whether to apply learnable affine transformation """ data_format = get_data_format(data_format, keras_mode=False) shape = x.get_shape().as_list() assert len(shape) == 4, "Input of InstanceNorm has to be 4D!" if data_format == 'NHWC': axis = [1, 2] ch = shape[3] new_shape = [1, 1, 1, ch] else: axis = [2, 3] ch = shape[1] new_shape = [1, ch, 1, 1] assert ch is not None, "Input of InstanceNorm require known channel!" mean, var = tf.nn.moments(x, axis, keep_dims=True) if not use_affine: return tf.divide(x - mean, tf.sqrt(var + epsilon), name='output') beta = tf.get_variable('beta', [ch], initializer=tf.constant_initializer()) beta = tf.reshape(beta, new_shape) if gamma_init is None: gamma_init = tf.constant_initializer(1.0) gamma = tf.get_variable('gamma', [ch], initializer=gamma_init) gamma = tf.reshape(gamma, new_shape) ret = tf.nn.batch_normalization(x, mean, var, beta, gamma, epsilon, name='output') vh = ret.variables = VariableHolder() if use_affine: vh.gamma = gamma vh.beta = beta return ret
python
def InstanceNorm(x, epsilon=1e-5, use_affine=True, gamma_init=None, data_format='channels_last'): """ Instance Normalization, as in the paper: `Instance Normalization: The Missing Ingredient for Fast Stylization <https://arxiv.org/abs/1607.08022>`_. Args: x (tf.Tensor): a 4D tensor. epsilon (float): avoid divide-by-zero use_affine (bool): whether to apply learnable affine transformation """ data_format = get_data_format(data_format, keras_mode=False) shape = x.get_shape().as_list() assert len(shape) == 4, "Input of InstanceNorm has to be 4D!" if data_format == 'NHWC': axis = [1, 2] ch = shape[3] new_shape = [1, 1, 1, ch] else: axis = [2, 3] ch = shape[1] new_shape = [1, ch, 1, 1] assert ch is not None, "Input of InstanceNorm require known channel!" mean, var = tf.nn.moments(x, axis, keep_dims=True) if not use_affine: return tf.divide(x - mean, tf.sqrt(var + epsilon), name='output') beta = tf.get_variable('beta', [ch], initializer=tf.constant_initializer()) beta = tf.reshape(beta, new_shape) if gamma_init is None: gamma_init = tf.constant_initializer(1.0) gamma = tf.get_variable('gamma', [ch], initializer=gamma_init) gamma = tf.reshape(gamma, new_shape) ret = tf.nn.batch_normalization(x, mean, var, beta, gamma, epsilon, name='output') vh = ret.variables = VariableHolder() if use_affine: vh.gamma = gamma vh.beta = beta return ret
[ "def", "InstanceNorm", "(", "x", ",", "epsilon", "=", "1e-5", ",", "use_affine", "=", "True", ",", "gamma_init", "=", "None", ",", "data_format", "=", "'channels_last'", ")", ":", "data_format", "=", "get_data_format", "(", "data_format", ",", "keras_mode", "=", "False", ")", "shape", "=", "x", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "assert", "len", "(", "shape", ")", "==", "4", ",", "\"Input of InstanceNorm has to be 4D!\"", "if", "data_format", "==", "'NHWC'", ":", "axis", "=", "[", "1", ",", "2", "]", "ch", "=", "shape", "[", "3", "]", "new_shape", "=", "[", "1", ",", "1", ",", "1", ",", "ch", "]", "else", ":", "axis", "=", "[", "2", ",", "3", "]", "ch", "=", "shape", "[", "1", "]", "new_shape", "=", "[", "1", ",", "ch", ",", "1", ",", "1", "]", "assert", "ch", "is", "not", "None", ",", "\"Input of InstanceNorm require known channel!\"", "mean", ",", "var", "=", "tf", ".", "nn", ".", "moments", "(", "x", ",", "axis", ",", "keep_dims", "=", "True", ")", "if", "not", "use_affine", ":", "return", "tf", ".", "divide", "(", "x", "-", "mean", ",", "tf", ".", "sqrt", "(", "var", "+", "epsilon", ")", ",", "name", "=", "'output'", ")", "beta", "=", "tf", ".", "get_variable", "(", "'beta'", ",", "[", "ch", "]", ",", "initializer", "=", "tf", ".", "constant_initializer", "(", ")", ")", "beta", "=", "tf", ".", "reshape", "(", "beta", ",", "new_shape", ")", "if", "gamma_init", "is", "None", ":", "gamma_init", "=", "tf", ".", "constant_initializer", "(", "1.0", ")", "gamma", "=", "tf", ".", "get_variable", "(", "'gamma'", ",", "[", "ch", "]", ",", "initializer", "=", "gamma_init", ")", "gamma", "=", "tf", ".", "reshape", "(", "gamma", ",", "new_shape", ")", "ret", "=", "tf", ".", "nn", ".", "batch_normalization", "(", "x", ",", "mean", ",", "var", ",", "beta", ",", "gamma", ",", "epsilon", ",", "name", "=", "'output'", ")", "vh", "=", "ret", ".", "variables", "=", "VariableHolder", "(", ")", "if", "use_affine", ":", "vh", ".", "gamma", "=", "gamma", "vh", ".", "beta", "=", "beta", "return", "ret" ]
Instance Normalization, as in the paper: `Instance Normalization: The Missing Ingredient for Fast Stylization <https://arxiv.org/abs/1607.08022>`_. Args: x (tf.Tensor): a 4D tensor. epsilon (float): avoid divide-by-zero use_affine (bool): whether to apply learnable affine transformation
[ "Instance", "Normalization", "as", "in", "the", "paper", ":", "Instance", "Normalization", ":", "The", "Missing", "Ingredient", "for", "Fast", "Stylization", "<https", ":", "//", "arxiv", ".", "org", "/", "abs", "/", "1607", ".", "08022", ">", "_", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/models/layer_norm.py#L67-L109
train
tensorpack/tensorpack
examples/FasterRCNN/model_frcnn.py
proposal_metrics
def proposal_metrics(iou): """ Add summaries for RPN proposals. Args: iou: nxm, #proposal x #gt """ # find best roi for each gt, for summary only best_iou = tf.reduce_max(iou, axis=0) mean_best_iou = tf.reduce_mean(best_iou, name='best_iou_per_gt') summaries = [mean_best_iou] with tf.device('/cpu:0'): for th in [0.3, 0.5]: recall = tf.truediv( tf.count_nonzero(best_iou >= th), tf.size(best_iou, out_type=tf.int64), name='recall_iou{}'.format(th)) summaries.append(recall) add_moving_summary(*summaries)
python
def proposal_metrics(iou): """ Add summaries for RPN proposals. Args: iou: nxm, #proposal x #gt """ # find best roi for each gt, for summary only best_iou = tf.reduce_max(iou, axis=0) mean_best_iou = tf.reduce_mean(best_iou, name='best_iou_per_gt') summaries = [mean_best_iou] with tf.device('/cpu:0'): for th in [0.3, 0.5]: recall = tf.truediv( tf.count_nonzero(best_iou >= th), tf.size(best_iou, out_type=tf.int64), name='recall_iou{}'.format(th)) summaries.append(recall) add_moving_summary(*summaries)
[ "def", "proposal_metrics", "(", "iou", ")", ":", "# find best roi for each gt, for summary only", "best_iou", "=", "tf", ".", "reduce_max", "(", "iou", ",", "axis", "=", "0", ")", "mean_best_iou", "=", "tf", ".", "reduce_mean", "(", "best_iou", ",", "name", "=", "'best_iou_per_gt'", ")", "summaries", "=", "[", "mean_best_iou", "]", "with", "tf", ".", "device", "(", "'/cpu:0'", ")", ":", "for", "th", "in", "[", "0.3", ",", "0.5", "]", ":", "recall", "=", "tf", ".", "truediv", "(", "tf", ".", "count_nonzero", "(", "best_iou", ">=", "th", ")", ",", "tf", ".", "size", "(", "best_iou", ",", "out_type", "=", "tf", ".", "int64", ")", ",", "name", "=", "'recall_iou{}'", ".", "format", "(", "th", ")", ")", "summaries", ".", "append", "(", "recall", ")", "add_moving_summary", "(", "*", "summaries", ")" ]
Add summaries for RPN proposals. Args: iou: nxm, #proposal x #gt
[ "Add", "summaries", "for", "RPN", "proposals", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/model_frcnn.py#L20-L38
train
tensorpack/tensorpack
examples/FasterRCNN/model_frcnn.py
sample_fast_rcnn_targets
def sample_fast_rcnn_targets(boxes, gt_boxes, gt_labels): """ Sample some boxes from all proposals for training. #fg is guaranteed to be > 0, because ground truth boxes will be added as proposals. Args: boxes: nx4 region proposals, floatbox gt_boxes: mx4, floatbox gt_labels: m, int32 Returns: A BoxProposals instance. sampled_boxes: tx4 floatbox, the rois sampled_labels: t int64 labels, in [0, #class). Positive means foreground. fg_inds_wrt_gt: #fg indices, each in range [0, m-1]. It contains the matching GT of each foreground roi. """ iou = pairwise_iou(boxes, gt_boxes) # nxm proposal_metrics(iou) # add ground truth as proposals as well boxes = tf.concat([boxes, gt_boxes], axis=0) # (n+m) x 4 iou = tf.concat([iou, tf.eye(tf.shape(gt_boxes)[0])], axis=0) # (n+m) x m # #proposal=n+m from now on def sample_fg_bg(iou): fg_mask = tf.reduce_max(iou, axis=1) >= cfg.FRCNN.FG_THRESH fg_inds = tf.reshape(tf.where(fg_mask), [-1]) num_fg = tf.minimum(int( cfg.FRCNN.BATCH_PER_IM * cfg.FRCNN.FG_RATIO), tf.size(fg_inds), name='num_fg') fg_inds = tf.random_shuffle(fg_inds)[:num_fg] bg_inds = tf.reshape(tf.where(tf.logical_not(fg_mask)), [-1]) num_bg = tf.minimum( cfg.FRCNN.BATCH_PER_IM - num_fg, tf.size(bg_inds), name='num_bg') bg_inds = tf.random_shuffle(bg_inds)[:num_bg] add_moving_summary(num_fg, num_bg) return fg_inds, bg_inds fg_inds, bg_inds = sample_fg_bg(iou) # fg,bg indices w.r.t proposals best_iou_ind = tf.argmax(iou, axis=1) # #proposal, each in 0~m-1 fg_inds_wrt_gt = tf.gather(best_iou_ind, fg_inds) # num_fg all_indices = tf.concat([fg_inds, bg_inds], axis=0) # indices w.r.t all n+m proposal boxes ret_boxes = tf.gather(boxes, all_indices) ret_labels = tf.concat( [tf.gather(gt_labels, fg_inds_wrt_gt), tf.zeros_like(bg_inds, dtype=tf.int64)], axis=0) # stop the gradient -- they are meant to be training targets return BoxProposals( tf.stop_gradient(ret_boxes, name='sampled_proposal_boxes'), tf.stop_gradient(ret_labels, name='sampled_labels'), tf.stop_gradient(fg_inds_wrt_gt))
python
def sample_fast_rcnn_targets(boxes, gt_boxes, gt_labels): """ Sample some boxes from all proposals for training. #fg is guaranteed to be > 0, because ground truth boxes will be added as proposals. Args: boxes: nx4 region proposals, floatbox gt_boxes: mx4, floatbox gt_labels: m, int32 Returns: A BoxProposals instance. sampled_boxes: tx4 floatbox, the rois sampled_labels: t int64 labels, in [0, #class). Positive means foreground. fg_inds_wrt_gt: #fg indices, each in range [0, m-1]. It contains the matching GT of each foreground roi. """ iou = pairwise_iou(boxes, gt_boxes) # nxm proposal_metrics(iou) # add ground truth as proposals as well boxes = tf.concat([boxes, gt_boxes], axis=0) # (n+m) x 4 iou = tf.concat([iou, tf.eye(tf.shape(gt_boxes)[0])], axis=0) # (n+m) x m # #proposal=n+m from now on def sample_fg_bg(iou): fg_mask = tf.reduce_max(iou, axis=1) >= cfg.FRCNN.FG_THRESH fg_inds = tf.reshape(tf.where(fg_mask), [-1]) num_fg = tf.minimum(int( cfg.FRCNN.BATCH_PER_IM * cfg.FRCNN.FG_RATIO), tf.size(fg_inds), name='num_fg') fg_inds = tf.random_shuffle(fg_inds)[:num_fg] bg_inds = tf.reshape(tf.where(tf.logical_not(fg_mask)), [-1]) num_bg = tf.minimum( cfg.FRCNN.BATCH_PER_IM - num_fg, tf.size(bg_inds), name='num_bg') bg_inds = tf.random_shuffle(bg_inds)[:num_bg] add_moving_summary(num_fg, num_bg) return fg_inds, bg_inds fg_inds, bg_inds = sample_fg_bg(iou) # fg,bg indices w.r.t proposals best_iou_ind = tf.argmax(iou, axis=1) # #proposal, each in 0~m-1 fg_inds_wrt_gt = tf.gather(best_iou_ind, fg_inds) # num_fg all_indices = tf.concat([fg_inds, bg_inds], axis=0) # indices w.r.t all n+m proposal boxes ret_boxes = tf.gather(boxes, all_indices) ret_labels = tf.concat( [tf.gather(gt_labels, fg_inds_wrt_gt), tf.zeros_like(bg_inds, dtype=tf.int64)], axis=0) # stop the gradient -- they are meant to be training targets return BoxProposals( tf.stop_gradient(ret_boxes, name='sampled_proposal_boxes'), tf.stop_gradient(ret_labels, name='sampled_labels'), tf.stop_gradient(fg_inds_wrt_gt))
[ "def", "sample_fast_rcnn_targets", "(", "boxes", ",", "gt_boxes", ",", "gt_labels", ")", ":", "iou", "=", "pairwise_iou", "(", "boxes", ",", "gt_boxes", ")", "# nxm", "proposal_metrics", "(", "iou", ")", "# add ground truth as proposals as well", "boxes", "=", "tf", ".", "concat", "(", "[", "boxes", ",", "gt_boxes", "]", ",", "axis", "=", "0", ")", "# (n+m) x 4", "iou", "=", "tf", ".", "concat", "(", "[", "iou", ",", "tf", ".", "eye", "(", "tf", ".", "shape", "(", "gt_boxes", ")", "[", "0", "]", ")", "]", ",", "axis", "=", "0", ")", "# (n+m) x m", "# #proposal=n+m from now on", "def", "sample_fg_bg", "(", "iou", ")", ":", "fg_mask", "=", "tf", ".", "reduce_max", "(", "iou", ",", "axis", "=", "1", ")", ">=", "cfg", ".", "FRCNN", ".", "FG_THRESH", "fg_inds", "=", "tf", ".", "reshape", "(", "tf", ".", "where", "(", "fg_mask", ")", ",", "[", "-", "1", "]", ")", "num_fg", "=", "tf", ".", "minimum", "(", "int", "(", "cfg", ".", "FRCNN", ".", "BATCH_PER_IM", "*", "cfg", ".", "FRCNN", ".", "FG_RATIO", ")", ",", "tf", ".", "size", "(", "fg_inds", ")", ",", "name", "=", "'num_fg'", ")", "fg_inds", "=", "tf", ".", "random_shuffle", "(", "fg_inds", ")", "[", ":", "num_fg", "]", "bg_inds", "=", "tf", ".", "reshape", "(", "tf", ".", "where", "(", "tf", ".", "logical_not", "(", "fg_mask", ")", ")", ",", "[", "-", "1", "]", ")", "num_bg", "=", "tf", ".", "minimum", "(", "cfg", ".", "FRCNN", ".", "BATCH_PER_IM", "-", "num_fg", ",", "tf", ".", "size", "(", "bg_inds", ")", ",", "name", "=", "'num_bg'", ")", "bg_inds", "=", "tf", ".", "random_shuffle", "(", "bg_inds", ")", "[", ":", "num_bg", "]", "add_moving_summary", "(", "num_fg", ",", "num_bg", ")", "return", "fg_inds", ",", "bg_inds", "fg_inds", ",", "bg_inds", "=", "sample_fg_bg", "(", "iou", ")", "# fg,bg indices w.r.t proposals", "best_iou_ind", "=", "tf", ".", "argmax", "(", "iou", ",", "axis", "=", "1", ")", "# #proposal, each in 0~m-1", "fg_inds_wrt_gt", "=", "tf", ".", "gather", "(", "best_iou_ind", ",", "fg_inds", ")", "# num_fg", "all_indices", "=", "tf", ".", "concat", "(", "[", "fg_inds", ",", "bg_inds", "]", ",", "axis", "=", "0", ")", "# indices w.r.t all n+m proposal boxes", "ret_boxes", "=", "tf", ".", "gather", "(", "boxes", ",", "all_indices", ")", "ret_labels", "=", "tf", ".", "concat", "(", "[", "tf", ".", "gather", "(", "gt_labels", ",", "fg_inds_wrt_gt", ")", ",", "tf", ".", "zeros_like", "(", "bg_inds", ",", "dtype", "=", "tf", ".", "int64", ")", "]", ",", "axis", "=", "0", ")", "# stop the gradient -- they are meant to be training targets", "return", "BoxProposals", "(", "tf", ".", "stop_gradient", "(", "ret_boxes", ",", "name", "=", "'sampled_proposal_boxes'", ")", ",", "tf", ".", "stop_gradient", "(", "ret_labels", ",", "name", "=", "'sampled_labels'", ")", ",", "tf", ".", "stop_gradient", "(", "fg_inds_wrt_gt", ")", ")" ]
Sample some boxes from all proposals for training. #fg is guaranteed to be > 0, because ground truth boxes will be added as proposals. Args: boxes: nx4 region proposals, floatbox gt_boxes: mx4, floatbox gt_labels: m, int32 Returns: A BoxProposals instance. sampled_boxes: tx4 floatbox, the rois sampled_labels: t int64 labels, in [0, #class). Positive means foreground. fg_inds_wrt_gt: #fg indices, each in range [0, m-1]. It contains the matching GT of each foreground roi.
[ "Sample", "some", "boxes", "from", "all", "proposals", "for", "training", ".", "#fg", "is", "guaranteed", "to", "be", ">", "0", "because", "ground", "truth", "boxes", "will", "be", "added", "as", "proposals", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/model_frcnn.py#L42-L101
train
tensorpack/tensorpack
examples/FasterRCNN/model_frcnn.py
fastrcnn_outputs
def fastrcnn_outputs(feature, num_classes, class_agnostic_regression=False): """ Args: feature (any shape): num_classes(int): num_category + 1 class_agnostic_regression (bool): if True, regression to N x 1 x 4 Returns: cls_logits: N x num_class classification logits reg_logits: N x num_classx4 or Nx2x4 if class agnostic """ classification = FullyConnected( 'class', feature, num_classes, kernel_initializer=tf.random_normal_initializer(stddev=0.01)) num_classes_for_box = 1 if class_agnostic_regression else num_classes box_regression = FullyConnected( 'box', feature, num_classes_for_box * 4, kernel_initializer=tf.random_normal_initializer(stddev=0.001)) box_regression = tf.reshape(box_regression, (-1, num_classes_for_box, 4), name='output_box') return classification, box_regression
python
def fastrcnn_outputs(feature, num_classes, class_agnostic_regression=False): """ Args: feature (any shape): num_classes(int): num_category + 1 class_agnostic_regression (bool): if True, regression to N x 1 x 4 Returns: cls_logits: N x num_class classification logits reg_logits: N x num_classx4 or Nx2x4 if class agnostic """ classification = FullyConnected( 'class', feature, num_classes, kernel_initializer=tf.random_normal_initializer(stddev=0.01)) num_classes_for_box = 1 if class_agnostic_regression else num_classes box_regression = FullyConnected( 'box', feature, num_classes_for_box * 4, kernel_initializer=tf.random_normal_initializer(stddev=0.001)) box_regression = tf.reshape(box_regression, (-1, num_classes_for_box, 4), name='output_box') return classification, box_regression
[ "def", "fastrcnn_outputs", "(", "feature", ",", "num_classes", ",", "class_agnostic_regression", "=", "False", ")", ":", "classification", "=", "FullyConnected", "(", "'class'", ",", "feature", ",", "num_classes", ",", "kernel_initializer", "=", "tf", ".", "random_normal_initializer", "(", "stddev", "=", "0.01", ")", ")", "num_classes_for_box", "=", "1", "if", "class_agnostic_regression", "else", "num_classes", "box_regression", "=", "FullyConnected", "(", "'box'", ",", "feature", ",", "num_classes_for_box", "*", "4", ",", "kernel_initializer", "=", "tf", ".", "random_normal_initializer", "(", "stddev", "=", "0.001", ")", ")", "box_regression", "=", "tf", ".", "reshape", "(", "box_regression", ",", "(", "-", "1", ",", "num_classes_for_box", ",", "4", ")", ",", "name", "=", "'output_box'", ")", "return", "classification", ",", "box_regression" ]
Args: feature (any shape): num_classes(int): num_category + 1 class_agnostic_regression (bool): if True, regression to N x 1 x 4 Returns: cls_logits: N x num_class classification logits reg_logits: N x num_classx4 or Nx2x4 if class agnostic
[ "Args", ":", "feature", "(", "any", "shape", ")", ":", "num_classes", "(", "int", ")", ":", "num_category", "+", "1", "class_agnostic_regression", "(", "bool", ")", ":", "if", "True", "regression", "to", "N", "x", "1", "x", "4" ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/model_frcnn.py#L105-L124
train
tensorpack/tensorpack
examples/FasterRCNN/model_frcnn.py
fastrcnn_losses
def fastrcnn_losses(labels, label_logits, fg_boxes, fg_box_logits): """ Args: labels: n, label_logits: nxC fg_boxes: nfgx4, encoded fg_box_logits: nfgxCx4 or nfgx1x4 if class agnostic Returns: label_loss, box_loss """ label_loss = tf.nn.sparse_softmax_cross_entropy_with_logits( labels=labels, logits=label_logits) label_loss = tf.reduce_mean(label_loss, name='label_loss') fg_inds = tf.where(labels > 0)[:, 0] fg_labels = tf.gather(labels, fg_inds) num_fg = tf.size(fg_inds, out_type=tf.int64) empty_fg = tf.equal(num_fg, 0) if int(fg_box_logits.shape[1]) > 1: indices = tf.stack( [tf.range(num_fg), fg_labels], axis=1) # #fgx2 fg_box_logits = tf.gather_nd(fg_box_logits, indices) else: fg_box_logits = tf.reshape(fg_box_logits, [-1, 4]) with tf.name_scope('label_metrics'), tf.device('/cpu:0'): prediction = tf.argmax(label_logits, axis=1, name='label_prediction') correct = tf.cast(tf.equal(prediction, labels), tf.float32) # boolean/integer gather is unavailable on GPU accuracy = tf.reduce_mean(correct, name='accuracy') fg_label_pred = tf.argmax(tf.gather(label_logits, fg_inds), axis=1) num_zero = tf.reduce_sum(tf.cast(tf.equal(fg_label_pred, 0), tf.int64), name='num_zero') false_negative = tf.where( empty_fg, 0., tf.cast(tf.truediv(num_zero, num_fg), tf.float32), name='false_negative') fg_accuracy = tf.where( empty_fg, 0., tf.reduce_mean(tf.gather(correct, fg_inds)), name='fg_accuracy') box_loss = tf.losses.huber_loss( fg_boxes, fg_box_logits, reduction=tf.losses.Reduction.SUM) box_loss = tf.truediv( box_loss, tf.cast(tf.shape(labels)[0], tf.float32), name='box_loss') add_moving_summary(label_loss, box_loss, accuracy, fg_accuracy, false_negative, tf.cast(num_fg, tf.float32, name='num_fg_label')) return [label_loss, box_loss]
python
def fastrcnn_losses(labels, label_logits, fg_boxes, fg_box_logits): """ Args: labels: n, label_logits: nxC fg_boxes: nfgx4, encoded fg_box_logits: nfgxCx4 or nfgx1x4 if class agnostic Returns: label_loss, box_loss """ label_loss = tf.nn.sparse_softmax_cross_entropy_with_logits( labels=labels, logits=label_logits) label_loss = tf.reduce_mean(label_loss, name='label_loss') fg_inds = tf.where(labels > 0)[:, 0] fg_labels = tf.gather(labels, fg_inds) num_fg = tf.size(fg_inds, out_type=tf.int64) empty_fg = tf.equal(num_fg, 0) if int(fg_box_logits.shape[1]) > 1: indices = tf.stack( [tf.range(num_fg), fg_labels], axis=1) # #fgx2 fg_box_logits = tf.gather_nd(fg_box_logits, indices) else: fg_box_logits = tf.reshape(fg_box_logits, [-1, 4]) with tf.name_scope('label_metrics'), tf.device('/cpu:0'): prediction = tf.argmax(label_logits, axis=1, name='label_prediction') correct = tf.cast(tf.equal(prediction, labels), tf.float32) # boolean/integer gather is unavailable on GPU accuracy = tf.reduce_mean(correct, name='accuracy') fg_label_pred = tf.argmax(tf.gather(label_logits, fg_inds), axis=1) num_zero = tf.reduce_sum(tf.cast(tf.equal(fg_label_pred, 0), tf.int64), name='num_zero') false_negative = tf.where( empty_fg, 0., tf.cast(tf.truediv(num_zero, num_fg), tf.float32), name='false_negative') fg_accuracy = tf.where( empty_fg, 0., tf.reduce_mean(tf.gather(correct, fg_inds)), name='fg_accuracy') box_loss = tf.losses.huber_loss( fg_boxes, fg_box_logits, reduction=tf.losses.Reduction.SUM) box_loss = tf.truediv( box_loss, tf.cast(tf.shape(labels)[0], tf.float32), name='box_loss') add_moving_summary(label_loss, box_loss, accuracy, fg_accuracy, false_negative, tf.cast(num_fg, tf.float32, name='num_fg_label')) return [label_loss, box_loss]
[ "def", "fastrcnn_losses", "(", "labels", ",", "label_logits", ",", "fg_boxes", ",", "fg_box_logits", ")", ":", "label_loss", "=", "tf", ".", "nn", ".", "sparse_softmax_cross_entropy_with_logits", "(", "labels", "=", "labels", ",", "logits", "=", "label_logits", ")", "label_loss", "=", "tf", ".", "reduce_mean", "(", "label_loss", ",", "name", "=", "'label_loss'", ")", "fg_inds", "=", "tf", ".", "where", "(", "labels", ">", "0", ")", "[", ":", ",", "0", "]", "fg_labels", "=", "tf", ".", "gather", "(", "labels", ",", "fg_inds", ")", "num_fg", "=", "tf", ".", "size", "(", "fg_inds", ",", "out_type", "=", "tf", ".", "int64", ")", "empty_fg", "=", "tf", ".", "equal", "(", "num_fg", ",", "0", ")", "if", "int", "(", "fg_box_logits", ".", "shape", "[", "1", "]", ")", ">", "1", ":", "indices", "=", "tf", ".", "stack", "(", "[", "tf", ".", "range", "(", "num_fg", ")", ",", "fg_labels", "]", ",", "axis", "=", "1", ")", "# #fgx2", "fg_box_logits", "=", "tf", ".", "gather_nd", "(", "fg_box_logits", ",", "indices", ")", "else", ":", "fg_box_logits", "=", "tf", ".", "reshape", "(", "fg_box_logits", ",", "[", "-", "1", ",", "4", "]", ")", "with", "tf", ".", "name_scope", "(", "'label_metrics'", ")", ",", "tf", ".", "device", "(", "'/cpu:0'", ")", ":", "prediction", "=", "tf", ".", "argmax", "(", "label_logits", ",", "axis", "=", "1", ",", "name", "=", "'label_prediction'", ")", "correct", "=", "tf", ".", "cast", "(", "tf", ".", "equal", "(", "prediction", ",", "labels", ")", ",", "tf", ".", "float32", ")", "# boolean/integer gather is unavailable on GPU", "accuracy", "=", "tf", ".", "reduce_mean", "(", "correct", ",", "name", "=", "'accuracy'", ")", "fg_label_pred", "=", "tf", ".", "argmax", "(", "tf", ".", "gather", "(", "label_logits", ",", "fg_inds", ")", ",", "axis", "=", "1", ")", "num_zero", "=", "tf", ".", "reduce_sum", "(", "tf", ".", "cast", "(", "tf", ".", "equal", "(", "fg_label_pred", ",", "0", ")", ",", "tf", ".", "int64", ")", ",", "name", "=", "'num_zero'", ")", "false_negative", "=", "tf", ".", "where", "(", "empty_fg", ",", "0.", ",", "tf", ".", "cast", "(", "tf", ".", "truediv", "(", "num_zero", ",", "num_fg", ")", ",", "tf", ".", "float32", ")", ",", "name", "=", "'false_negative'", ")", "fg_accuracy", "=", "tf", ".", "where", "(", "empty_fg", ",", "0.", ",", "tf", ".", "reduce_mean", "(", "tf", ".", "gather", "(", "correct", ",", "fg_inds", ")", ")", ",", "name", "=", "'fg_accuracy'", ")", "box_loss", "=", "tf", ".", "losses", ".", "huber_loss", "(", "fg_boxes", ",", "fg_box_logits", ",", "reduction", "=", "tf", ".", "losses", ".", "Reduction", ".", "SUM", ")", "box_loss", "=", "tf", ".", "truediv", "(", "box_loss", ",", "tf", ".", "cast", "(", "tf", ".", "shape", "(", "labels", ")", "[", "0", "]", ",", "tf", ".", "float32", ")", ",", "name", "=", "'box_loss'", ")", "add_moving_summary", "(", "label_loss", ",", "box_loss", ",", "accuracy", ",", "fg_accuracy", ",", "false_negative", ",", "tf", ".", "cast", "(", "num_fg", ",", "tf", ".", "float32", ",", "name", "=", "'num_fg_label'", ")", ")", "return", "[", "label_loss", ",", "box_loss", "]" ]
Args: labels: n, label_logits: nxC fg_boxes: nfgx4, encoded fg_box_logits: nfgxCx4 or nfgx1x4 if class agnostic Returns: label_loss, box_loss
[ "Args", ":", "labels", ":", "n", "label_logits", ":", "nxC", "fg_boxes", ":", "nfgx4", "encoded", "fg_box_logits", ":", "nfgxCx4", "or", "nfgx1x4", "if", "class", "agnostic" ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/model_frcnn.py#L128-L172
train
tensorpack/tensorpack
examples/FasterRCNN/model_frcnn.py
fastrcnn_predictions
def fastrcnn_predictions(boxes, scores): """ Generate final results from predictions of all proposals. Args: boxes: n#classx4 floatbox in float32 scores: nx#class Returns: boxes: Kx4 scores: K labels: K """ assert boxes.shape[1] == cfg.DATA.NUM_CLASS assert scores.shape[1] == cfg.DATA.NUM_CLASS boxes = tf.transpose(boxes, [1, 0, 2])[1:, :, :] # #catxnx4 scores = tf.transpose(scores[:, 1:], [1, 0]) # #catxn def f(X): """ prob: n probabilities box: nx4 boxes Returns: n boolean, the selection """ prob, box = X output_shape = tf.shape(prob, out_type=tf.int64) # filter by score threshold ids = tf.reshape(tf.where(prob > cfg.TEST.RESULT_SCORE_THRESH), [-1]) prob = tf.gather(prob, ids) box = tf.gather(box, ids) # NMS within each class selection = tf.image.non_max_suppression( box, prob, cfg.TEST.RESULTS_PER_IM, cfg.TEST.FRCNN_NMS_THRESH) selection = tf.gather(ids, selection) if get_tf_version_tuple() >= (1, 13): sorted_selection = tf.sort(selection, direction='ASCENDING') mask = tf.sparse.SparseTensor(indices=tf.expand_dims(sorted_selection, 1), values=tf.ones_like(sorted_selection, dtype=tf.bool), dense_shape=output_shape) mask = tf.sparse.to_dense(mask, default_value=False) else: # this function is deprecated by TF sorted_selection = -tf.nn.top_k(-selection, k=tf.size(selection))[0] mask = tf.sparse_to_dense( sparse_indices=sorted_selection, output_shape=output_shape, sparse_values=True, default_value=False) return mask # TF bug in version 1.11, 1.12: https://github.com/tensorflow/tensorflow/issues/22750 buggy_tf = get_tf_version_tuple() in [(1, 11), (1, 12)] masks = tf.map_fn(f, (scores, boxes), dtype=tf.bool, parallel_iterations=1 if buggy_tf else 10) # #cat x N selected_indices = tf.where(masks) # #selection x 2, each is (cat_id, box_id) scores = tf.boolean_mask(scores, masks) # filter again by sorting scores topk_scores, topk_indices = tf.nn.top_k( scores, tf.minimum(cfg.TEST.RESULTS_PER_IM, tf.size(scores)), sorted=False) filtered_selection = tf.gather(selected_indices, topk_indices) cat_ids, box_ids = tf.unstack(filtered_selection, axis=1) final_scores = tf.identity(topk_scores, name='scores') final_labels = tf.add(cat_ids, 1, name='labels') final_ids = tf.stack([cat_ids, box_ids], axis=1, name='all_ids') final_boxes = tf.gather_nd(boxes, final_ids, name='boxes') return final_boxes, final_scores, final_labels
python
def fastrcnn_predictions(boxes, scores): """ Generate final results from predictions of all proposals. Args: boxes: n#classx4 floatbox in float32 scores: nx#class Returns: boxes: Kx4 scores: K labels: K """ assert boxes.shape[1] == cfg.DATA.NUM_CLASS assert scores.shape[1] == cfg.DATA.NUM_CLASS boxes = tf.transpose(boxes, [1, 0, 2])[1:, :, :] # #catxnx4 scores = tf.transpose(scores[:, 1:], [1, 0]) # #catxn def f(X): """ prob: n probabilities box: nx4 boxes Returns: n boolean, the selection """ prob, box = X output_shape = tf.shape(prob, out_type=tf.int64) # filter by score threshold ids = tf.reshape(tf.where(prob > cfg.TEST.RESULT_SCORE_THRESH), [-1]) prob = tf.gather(prob, ids) box = tf.gather(box, ids) # NMS within each class selection = tf.image.non_max_suppression( box, prob, cfg.TEST.RESULTS_PER_IM, cfg.TEST.FRCNN_NMS_THRESH) selection = tf.gather(ids, selection) if get_tf_version_tuple() >= (1, 13): sorted_selection = tf.sort(selection, direction='ASCENDING') mask = tf.sparse.SparseTensor(indices=tf.expand_dims(sorted_selection, 1), values=tf.ones_like(sorted_selection, dtype=tf.bool), dense_shape=output_shape) mask = tf.sparse.to_dense(mask, default_value=False) else: # this function is deprecated by TF sorted_selection = -tf.nn.top_k(-selection, k=tf.size(selection))[0] mask = tf.sparse_to_dense( sparse_indices=sorted_selection, output_shape=output_shape, sparse_values=True, default_value=False) return mask # TF bug in version 1.11, 1.12: https://github.com/tensorflow/tensorflow/issues/22750 buggy_tf = get_tf_version_tuple() in [(1, 11), (1, 12)] masks = tf.map_fn(f, (scores, boxes), dtype=tf.bool, parallel_iterations=1 if buggy_tf else 10) # #cat x N selected_indices = tf.where(masks) # #selection x 2, each is (cat_id, box_id) scores = tf.boolean_mask(scores, masks) # filter again by sorting scores topk_scores, topk_indices = tf.nn.top_k( scores, tf.minimum(cfg.TEST.RESULTS_PER_IM, tf.size(scores)), sorted=False) filtered_selection = tf.gather(selected_indices, topk_indices) cat_ids, box_ids = tf.unstack(filtered_selection, axis=1) final_scores = tf.identity(topk_scores, name='scores') final_labels = tf.add(cat_ids, 1, name='labels') final_ids = tf.stack([cat_ids, box_ids], axis=1, name='all_ids') final_boxes = tf.gather_nd(boxes, final_ids, name='boxes') return final_boxes, final_scores, final_labels
[ "def", "fastrcnn_predictions", "(", "boxes", ",", "scores", ")", ":", "assert", "boxes", ".", "shape", "[", "1", "]", "==", "cfg", ".", "DATA", ".", "NUM_CLASS", "assert", "scores", ".", "shape", "[", "1", "]", "==", "cfg", ".", "DATA", ".", "NUM_CLASS", "boxes", "=", "tf", ".", "transpose", "(", "boxes", ",", "[", "1", ",", "0", ",", "2", "]", ")", "[", "1", ":", ",", ":", ",", ":", "]", "# #catxnx4", "scores", "=", "tf", ".", "transpose", "(", "scores", "[", ":", ",", "1", ":", "]", ",", "[", "1", ",", "0", "]", ")", "# #catxn", "def", "f", "(", "X", ")", ":", "\"\"\"\n prob: n probabilities\n box: nx4 boxes\n\n Returns: n boolean, the selection\n \"\"\"", "prob", ",", "box", "=", "X", "output_shape", "=", "tf", ".", "shape", "(", "prob", ",", "out_type", "=", "tf", ".", "int64", ")", "# filter by score threshold", "ids", "=", "tf", ".", "reshape", "(", "tf", ".", "where", "(", "prob", ">", "cfg", ".", "TEST", ".", "RESULT_SCORE_THRESH", ")", ",", "[", "-", "1", "]", ")", "prob", "=", "tf", ".", "gather", "(", "prob", ",", "ids", ")", "box", "=", "tf", ".", "gather", "(", "box", ",", "ids", ")", "# NMS within each class", "selection", "=", "tf", ".", "image", ".", "non_max_suppression", "(", "box", ",", "prob", ",", "cfg", ".", "TEST", ".", "RESULTS_PER_IM", ",", "cfg", ".", "TEST", ".", "FRCNN_NMS_THRESH", ")", "selection", "=", "tf", ".", "gather", "(", "ids", ",", "selection", ")", "if", "get_tf_version_tuple", "(", ")", ">=", "(", "1", ",", "13", ")", ":", "sorted_selection", "=", "tf", ".", "sort", "(", "selection", ",", "direction", "=", "'ASCENDING'", ")", "mask", "=", "tf", ".", "sparse", ".", "SparseTensor", "(", "indices", "=", "tf", ".", "expand_dims", "(", "sorted_selection", ",", "1", ")", ",", "values", "=", "tf", ".", "ones_like", "(", "sorted_selection", ",", "dtype", "=", "tf", ".", "bool", ")", ",", "dense_shape", "=", "output_shape", ")", "mask", "=", "tf", ".", "sparse", ".", "to_dense", "(", "mask", ",", "default_value", "=", "False", ")", "else", ":", "# this function is deprecated by TF", "sorted_selection", "=", "-", "tf", ".", "nn", ".", "top_k", "(", "-", "selection", ",", "k", "=", "tf", ".", "size", "(", "selection", ")", ")", "[", "0", "]", "mask", "=", "tf", ".", "sparse_to_dense", "(", "sparse_indices", "=", "sorted_selection", ",", "output_shape", "=", "output_shape", ",", "sparse_values", "=", "True", ",", "default_value", "=", "False", ")", "return", "mask", "# TF bug in version 1.11, 1.12: https://github.com/tensorflow/tensorflow/issues/22750", "buggy_tf", "=", "get_tf_version_tuple", "(", ")", "in", "[", "(", "1", ",", "11", ")", ",", "(", "1", ",", "12", ")", "]", "masks", "=", "tf", ".", "map_fn", "(", "f", ",", "(", "scores", ",", "boxes", ")", ",", "dtype", "=", "tf", ".", "bool", ",", "parallel_iterations", "=", "1", "if", "buggy_tf", "else", "10", ")", "# #cat x N", "selected_indices", "=", "tf", ".", "where", "(", "masks", ")", "# #selection x 2, each is (cat_id, box_id)", "scores", "=", "tf", ".", "boolean_mask", "(", "scores", ",", "masks", ")", "# filter again by sorting scores", "topk_scores", ",", "topk_indices", "=", "tf", ".", "nn", ".", "top_k", "(", "scores", ",", "tf", ".", "minimum", "(", "cfg", ".", "TEST", ".", "RESULTS_PER_IM", ",", "tf", ".", "size", "(", "scores", ")", ")", ",", "sorted", "=", "False", ")", "filtered_selection", "=", "tf", ".", "gather", "(", "selected_indices", ",", "topk_indices", ")", "cat_ids", ",", "box_ids", "=", "tf", ".", "unstack", "(", "filtered_selection", ",", "axis", "=", "1", ")", "final_scores", "=", "tf", ".", "identity", "(", "topk_scores", ",", "name", "=", "'scores'", ")", "final_labels", "=", "tf", ".", "add", "(", "cat_ids", ",", "1", ",", "name", "=", "'labels'", ")", "final_ids", "=", "tf", ".", "stack", "(", "[", "cat_ids", ",", "box_ids", "]", ",", "axis", "=", "1", ",", "name", "=", "'all_ids'", ")", "final_boxes", "=", "tf", ".", "gather_nd", "(", "boxes", ",", "final_ids", ",", "name", "=", "'boxes'", ")", "return", "final_boxes", ",", "final_scores", ",", "final_labels" ]
Generate final results from predictions of all proposals. Args: boxes: n#classx4 floatbox in float32 scores: nx#class Returns: boxes: Kx4 scores: K labels: K
[ "Generate", "final", "results", "from", "predictions", "of", "all", "proposals", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/model_frcnn.py#L176-L247
train
tensorpack/tensorpack
examples/FasterRCNN/model_frcnn.py
fastrcnn_2fc_head
def fastrcnn_2fc_head(feature): """ Args: feature (any shape): Returns: 2D head feature """ dim = cfg.FPN.FRCNN_FC_HEAD_DIM init = tf.variance_scaling_initializer() hidden = FullyConnected('fc6', feature, dim, kernel_initializer=init, activation=tf.nn.relu) hidden = FullyConnected('fc7', hidden, dim, kernel_initializer=init, activation=tf.nn.relu) return hidden
python
def fastrcnn_2fc_head(feature): """ Args: feature (any shape): Returns: 2D head feature """ dim = cfg.FPN.FRCNN_FC_HEAD_DIM init = tf.variance_scaling_initializer() hidden = FullyConnected('fc6', feature, dim, kernel_initializer=init, activation=tf.nn.relu) hidden = FullyConnected('fc7', hidden, dim, kernel_initializer=init, activation=tf.nn.relu) return hidden
[ "def", "fastrcnn_2fc_head", "(", "feature", ")", ":", "dim", "=", "cfg", ".", "FPN", ".", "FRCNN_FC_HEAD_DIM", "init", "=", "tf", ".", "variance_scaling_initializer", "(", ")", "hidden", "=", "FullyConnected", "(", "'fc6'", ",", "feature", ",", "dim", ",", "kernel_initializer", "=", "init", ",", "activation", "=", "tf", ".", "nn", ".", "relu", ")", "hidden", "=", "FullyConnected", "(", "'fc7'", ",", "hidden", ",", "dim", ",", "kernel_initializer", "=", "init", ",", "activation", "=", "tf", ".", "nn", ".", "relu", ")", "return", "hidden" ]
Args: feature (any shape): Returns: 2D head feature
[ "Args", ":", "feature", "(", "any", "shape", ")", ":" ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/model_frcnn.py#L256-L268
train
tensorpack/tensorpack
examples/FasterRCNN/model_frcnn.py
fastrcnn_Xconv1fc_head
def fastrcnn_Xconv1fc_head(feature, num_convs, norm=None): """ Args: feature (NCHW): num_classes(int): num_category + 1 num_convs (int): number of conv layers norm (str or None): either None or 'GN' Returns: 2D head feature """ assert norm in [None, 'GN'], norm l = feature with argscope(Conv2D, data_format='channels_first', kernel_initializer=tf.variance_scaling_initializer( scale=2.0, mode='fan_out', distribution='untruncated_normal' if get_tf_version_tuple() >= (1, 12) else 'normal')): for k in range(num_convs): l = Conv2D('conv{}'.format(k), l, cfg.FPN.FRCNN_CONV_HEAD_DIM, 3, activation=tf.nn.relu) if norm is not None: l = GroupNorm('gn{}'.format(k), l) l = FullyConnected('fc', l, cfg.FPN.FRCNN_FC_HEAD_DIM, kernel_initializer=tf.variance_scaling_initializer(), activation=tf.nn.relu) return l
python
def fastrcnn_Xconv1fc_head(feature, num_convs, norm=None): """ Args: feature (NCHW): num_classes(int): num_category + 1 num_convs (int): number of conv layers norm (str or None): either None or 'GN' Returns: 2D head feature """ assert norm in [None, 'GN'], norm l = feature with argscope(Conv2D, data_format='channels_first', kernel_initializer=tf.variance_scaling_initializer( scale=2.0, mode='fan_out', distribution='untruncated_normal' if get_tf_version_tuple() >= (1, 12) else 'normal')): for k in range(num_convs): l = Conv2D('conv{}'.format(k), l, cfg.FPN.FRCNN_CONV_HEAD_DIM, 3, activation=tf.nn.relu) if norm is not None: l = GroupNorm('gn{}'.format(k), l) l = FullyConnected('fc', l, cfg.FPN.FRCNN_FC_HEAD_DIM, kernel_initializer=tf.variance_scaling_initializer(), activation=tf.nn.relu) return l
[ "def", "fastrcnn_Xconv1fc_head", "(", "feature", ",", "num_convs", ",", "norm", "=", "None", ")", ":", "assert", "norm", "in", "[", "None", ",", "'GN'", "]", ",", "norm", "l", "=", "feature", "with", "argscope", "(", "Conv2D", ",", "data_format", "=", "'channels_first'", ",", "kernel_initializer", "=", "tf", ".", "variance_scaling_initializer", "(", "scale", "=", "2.0", ",", "mode", "=", "'fan_out'", ",", "distribution", "=", "'untruncated_normal'", "if", "get_tf_version_tuple", "(", ")", ">=", "(", "1", ",", "12", ")", "else", "'normal'", ")", ")", ":", "for", "k", "in", "range", "(", "num_convs", ")", ":", "l", "=", "Conv2D", "(", "'conv{}'", ".", "format", "(", "k", ")", ",", "l", ",", "cfg", ".", "FPN", ".", "FRCNN_CONV_HEAD_DIM", ",", "3", ",", "activation", "=", "tf", ".", "nn", ".", "relu", ")", "if", "norm", "is", "not", "None", ":", "l", "=", "GroupNorm", "(", "'gn{}'", ".", "format", "(", "k", ")", ",", "l", ")", "l", "=", "FullyConnected", "(", "'fc'", ",", "l", ",", "cfg", ".", "FPN", ".", "FRCNN_FC_HEAD_DIM", ",", "kernel_initializer", "=", "tf", ".", "variance_scaling_initializer", "(", ")", ",", "activation", "=", "tf", ".", "nn", ".", "relu", ")", "return", "l" ]
Args: feature (NCHW): num_classes(int): num_category + 1 num_convs (int): number of conv layers norm (str or None): either None or 'GN' Returns: 2D head feature
[ "Args", ":", "feature", "(", "NCHW", ")", ":", "num_classes", "(", "int", ")", ":", "num_category", "+", "1", "num_convs", "(", "int", ")", ":", "number", "of", "conv", "layers", "norm", "(", "str", "or", "None", ")", ":", "either", "None", "or", "GN" ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/model_frcnn.py#L272-L295
train
tensorpack/tensorpack
examples/FasterRCNN/model_frcnn.py
FastRCNNHead.fg_box_logits
def fg_box_logits(self): """ Returns: #fg x ? x 4 """ return tf.gather(self.box_logits, self.proposals.fg_inds(), name='fg_box_logits')
python
def fg_box_logits(self): """ Returns: #fg x ? x 4 """ return tf.gather(self.box_logits, self.proposals.fg_inds(), name='fg_box_logits')
[ "def", "fg_box_logits", "(", "self", ")", ":", "return", "tf", ".", "gather", "(", "self", ".", "box_logits", ",", "self", ".", "proposals", ".", "fg_inds", "(", ")", ",", "name", "=", "'fg_box_logits'", ")" ]
Returns: #fg x ? x 4
[ "Returns", ":", "#fg", "x", "?", "x", "4" ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/model_frcnn.py#L358-L360
train
tensorpack/tensorpack
examples/FasterRCNN/model_frcnn.py
FastRCNNHead.decoded_output_boxes
def decoded_output_boxes(self): """ Returns: N x #class x 4 """ anchors = tf.tile(tf.expand_dims(self.proposals.boxes, 1), [1, cfg.DATA.NUM_CLASS, 1]) # N x #class x 4 decoded_boxes = decode_bbox_target( self.box_logits / self.bbox_regression_weights, anchors ) return decoded_boxes
python
def decoded_output_boxes(self): """ Returns: N x #class x 4 """ anchors = tf.tile(tf.expand_dims(self.proposals.boxes, 1), [1, cfg.DATA.NUM_CLASS, 1]) # N x #class x 4 decoded_boxes = decode_bbox_target( self.box_logits / self.bbox_regression_weights, anchors ) return decoded_boxes
[ "def", "decoded_output_boxes", "(", "self", ")", ":", "anchors", "=", "tf", ".", "tile", "(", "tf", ".", "expand_dims", "(", "self", ".", "proposals", ".", "boxes", ",", "1", ")", ",", "[", "1", ",", "cfg", ".", "DATA", ".", "NUM_CLASS", ",", "1", "]", ")", "# N x #class x 4", "decoded_boxes", "=", "decode_bbox_target", "(", "self", ".", "box_logits", "/", "self", ".", "bbox_regression_weights", ",", "anchors", ")", "return", "decoded_boxes" ]
Returns: N x #class x 4
[ "Returns", ":", "N", "x", "#class", "x", "4" ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/model_frcnn.py#L373-L381
train
tensorpack/tensorpack
examples/FasterRCNN/model_frcnn.py
FastRCNNHead.decoded_output_boxes_class_agnostic
def decoded_output_boxes_class_agnostic(self): """ Returns: Nx4 """ assert self._bbox_class_agnostic box_logits = tf.reshape(self.box_logits, [-1, 4]) decoded = decode_bbox_target( box_logits / self.bbox_regression_weights, self.proposals.boxes ) return decoded
python
def decoded_output_boxes_class_agnostic(self): """ Returns: Nx4 """ assert self._bbox_class_agnostic box_logits = tf.reshape(self.box_logits, [-1, 4]) decoded = decode_bbox_target( box_logits / self.bbox_regression_weights, self.proposals.boxes ) return decoded
[ "def", "decoded_output_boxes_class_agnostic", "(", "self", ")", ":", "assert", "self", ".", "_bbox_class_agnostic", "box_logits", "=", "tf", ".", "reshape", "(", "self", ".", "box_logits", ",", "[", "-", "1", ",", "4", "]", ")", "decoded", "=", "decode_bbox_target", "(", "box_logits", "/", "self", ".", "bbox_regression_weights", ",", "self", ".", "proposals", ".", "boxes", ")", "return", "decoded" ]
Returns: Nx4
[ "Returns", ":", "Nx4" ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/model_frcnn.py#L408-L416
train
tensorpack/tensorpack
examples/FasterRCNN/model_frcnn.py
FastRCNNHead.output_scores
def output_scores(self, name=None): """ Returns: N x #class scores, summed to one for each box.""" return tf.nn.softmax(self.label_logits, name=name)
python
def output_scores(self, name=None): """ Returns: N x #class scores, summed to one for each box.""" return tf.nn.softmax(self.label_logits, name=name)
[ "def", "output_scores", "(", "self", ",", "name", "=", "None", ")", ":", "return", "tf", ".", "nn", ".", "softmax", "(", "self", ".", "label_logits", ",", "name", "=", "name", ")" ]
Returns: N x #class scores, summed to one for each box.
[ "Returns", ":", "N", "x", "#class", "scores", "summed", "to", "one", "for", "each", "box", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/model_frcnn.py#L419-L421
train
tensorpack/tensorpack
examples/A3C-Gym/train-atari.py
MySimulatorMaster._on_state
def _on_state(self, state, client): """ Launch forward prediction for the new state given by some client. """ def cb(outputs): try: distrib, value = outputs.result() except CancelledError: logger.info("Client {} cancelled.".format(client.ident)) return assert np.all(np.isfinite(distrib)), distrib action = np.random.choice(len(distrib), p=distrib) client.memory.append(TransitionExperience( state, action, reward=None, value=value, prob=distrib[action])) self.send_queue.put([client.ident, dumps(action)]) self.async_predictor.put_task([state], cb)
python
def _on_state(self, state, client): """ Launch forward prediction for the new state given by some client. """ def cb(outputs): try: distrib, value = outputs.result() except CancelledError: logger.info("Client {} cancelled.".format(client.ident)) return assert np.all(np.isfinite(distrib)), distrib action = np.random.choice(len(distrib), p=distrib) client.memory.append(TransitionExperience( state, action, reward=None, value=value, prob=distrib[action])) self.send_queue.put([client.ident, dumps(action)]) self.async_predictor.put_task([state], cb)
[ "def", "_on_state", "(", "self", ",", "state", ",", "client", ")", ":", "def", "cb", "(", "outputs", ")", ":", "try", ":", "distrib", ",", "value", "=", "outputs", ".", "result", "(", ")", "except", "CancelledError", ":", "logger", ".", "info", "(", "\"Client {} cancelled.\"", ".", "format", "(", "client", ".", "ident", ")", ")", "return", "assert", "np", ".", "all", "(", "np", ".", "isfinite", "(", "distrib", ")", ")", ",", "distrib", "action", "=", "np", ".", "random", ".", "choice", "(", "len", "(", "distrib", ")", ",", "p", "=", "distrib", ")", "client", ".", "memory", ".", "append", "(", "TransitionExperience", "(", "state", ",", "action", ",", "reward", "=", "None", ",", "value", "=", "value", ",", "prob", "=", "distrib", "[", "action", "]", ")", ")", "self", ".", "send_queue", ".", "put", "(", "[", "client", ".", "ident", ",", "dumps", "(", "action", ")", "]", ")", "self", ".", "async_predictor", ".", "put_task", "(", "[", "state", "]", ",", "cb", ")" ]
Launch forward prediction for the new state given by some client.
[ "Launch", "forward", "prediction", "for", "the", "new", "state", "given", "by", "some", "client", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/A3C-Gym/train-atari.py#L159-L174
train
tensorpack/tensorpack
examples/A3C-Gym/train-atari.py
MySimulatorMaster._process_msg
def _process_msg(self, client, state, reward, isOver): """ Process a message sent from some client. """ # in the first message, only state is valid, # reward&isOver should be discarded if len(client.memory) > 0: client.memory[-1].reward = reward if isOver: # should clear client's memory and put to queue self._parse_memory(0, client, True) else: if len(client.memory) == LOCAL_TIME_MAX + 1: R = client.memory[-1].value self._parse_memory(R, client, False) # feed state and return action self._on_state(state, client)
python
def _process_msg(self, client, state, reward, isOver): """ Process a message sent from some client. """ # in the first message, only state is valid, # reward&isOver should be discarded if len(client.memory) > 0: client.memory[-1].reward = reward if isOver: # should clear client's memory and put to queue self._parse_memory(0, client, True) else: if len(client.memory) == LOCAL_TIME_MAX + 1: R = client.memory[-1].value self._parse_memory(R, client, False) # feed state and return action self._on_state(state, client)
[ "def", "_process_msg", "(", "self", ",", "client", ",", "state", ",", "reward", ",", "isOver", ")", ":", "# in the first message, only state is valid,", "# reward&isOver should be discarded", "if", "len", "(", "client", ".", "memory", ")", ">", "0", ":", "client", ".", "memory", "[", "-", "1", "]", ".", "reward", "=", "reward", "if", "isOver", ":", "# should clear client's memory and put to queue", "self", ".", "_parse_memory", "(", "0", ",", "client", ",", "True", ")", "else", ":", "if", "len", "(", "client", ".", "memory", ")", "==", "LOCAL_TIME_MAX", "+", "1", ":", "R", "=", "client", ".", "memory", "[", "-", "1", "]", ".", "value", "self", ".", "_parse_memory", "(", "R", ",", "client", ",", "False", ")", "# feed state and return action", "self", ".", "_on_state", "(", "state", ",", "client", ")" ]
Process a message sent from some client.
[ "Process", "a", "message", "sent", "from", "some", "client", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/A3C-Gym/train-atari.py#L176-L192
train
tensorpack/tensorpack
examples/GAN/ConditionalGAN-mnist.py
Model.discriminator
def discriminator(self, imgs, y): """ return a (b, 1) logits""" yv = y y = tf.reshape(y, [-1, 1, 1, 10]) with argscope(Conv2D, kernel_size=5, strides=1): l = (LinearWrap(imgs) .ConcatWith(tf.tile(y, [1, 28, 28, 1]), 3) .Conv2D('conv0', 11) .tf.nn.leaky_relu() .ConcatWith(tf.tile(y, [1, 14, 14, 1]), 3) .Conv2D('conv1', 74) .BatchNorm('bn1') .tf.nn.leaky_relu() .apply(batch_flatten) .ConcatWith(yv, 1) .FullyConnected('fc1', 1024, activation=tf.identity) .BatchNorm('bn2') .tf.nn.leaky_relu() .ConcatWith(yv, 1) .FullyConnected('fct', 1, activation=tf.identity)()) return l
python
def discriminator(self, imgs, y): """ return a (b, 1) logits""" yv = y y = tf.reshape(y, [-1, 1, 1, 10]) with argscope(Conv2D, kernel_size=5, strides=1): l = (LinearWrap(imgs) .ConcatWith(tf.tile(y, [1, 28, 28, 1]), 3) .Conv2D('conv0', 11) .tf.nn.leaky_relu() .ConcatWith(tf.tile(y, [1, 14, 14, 1]), 3) .Conv2D('conv1', 74) .BatchNorm('bn1') .tf.nn.leaky_relu() .apply(batch_flatten) .ConcatWith(yv, 1) .FullyConnected('fc1', 1024, activation=tf.identity) .BatchNorm('bn2') .tf.nn.leaky_relu() .ConcatWith(yv, 1) .FullyConnected('fct', 1, activation=tf.identity)()) return l
[ "def", "discriminator", "(", "self", ",", "imgs", ",", "y", ")", ":", "yv", "=", "y", "y", "=", "tf", ".", "reshape", "(", "y", ",", "[", "-", "1", ",", "1", ",", "1", ",", "10", "]", ")", "with", "argscope", "(", "Conv2D", ",", "kernel_size", "=", "5", ",", "strides", "=", "1", ")", ":", "l", "=", "(", "LinearWrap", "(", "imgs", ")", ".", "ConcatWith", "(", "tf", ".", "tile", "(", "y", ",", "[", "1", ",", "28", ",", "28", ",", "1", "]", ")", ",", "3", ")", ".", "Conv2D", "(", "'conv0'", ",", "11", ")", ".", "tf", ".", "nn", ".", "leaky_relu", "(", ")", ".", "ConcatWith", "(", "tf", ".", "tile", "(", "y", ",", "[", "1", ",", "14", ",", "14", ",", "1", "]", ")", ",", "3", ")", ".", "Conv2D", "(", "'conv1'", ",", "74", ")", ".", "BatchNorm", "(", "'bn1'", ")", ".", "tf", ".", "nn", ".", "leaky_relu", "(", ")", ".", "apply", "(", "batch_flatten", ")", ".", "ConcatWith", "(", "yv", ",", "1", ")", ".", "FullyConnected", "(", "'fc1'", ",", "1024", ",", "activation", "=", "tf", ".", "identity", ")", ".", "BatchNorm", "(", "'bn2'", ")", ".", "tf", ".", "nn", ".", "leaky_relu", "(", ")", ".", "ConcatWith", "(", "yv", ",", "1", ")", ".", "FullyConnected", "(", "'fct'", ",", "1", ",", "activation", "=", "tf", ".", "identity", ")", "(", ")", ")", "return", "l" ]
return a (b, 1) logits
[ "return", "a", "(", "b", "1", ")", "logits" ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/GAN/ConditionalGAN-mnist.py#L62-L85
train
tensorpack/tensorpack
tensorpack/tfutils/export.py
ModelExporter.export_compact
def export_compact(self, filename, optimize=True, toco_compatible=False): """Create a self-contained inference-only graph and write final graph (in pb format) to disk. Args: filename (str): path to the output graph optimize (bool): whether to use TensorFlow's `optimize_for_inference` to prune and optimize the graph. This does not work on all types of graphs. toco_compatible (bool): See TensorFlow's `optimize_for_inference <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/optimize_for_inference.py>`_ for details. Only available after TF 1.8. """ if toco_compatible: assert optimize, "toco_compatible is only effective when optimize=True!" self.graph = self.config._maybe_create_graph() with self.graph.as_default(): input = PlaceholderInput() input.setup(self.config.input_signature) with PredictTowerContext(''): self.config.tower_func(*input.get_input_tensors()) input_tensors = get_tensors_by_names(self.config.input_names) output_tensors = get_tensors_by_names(self.config.output_names) self.config.session_init._setup_graph() # we cannot use "self.config.session_creator.create_session()" here since it finalizes the graph sess = tfv1.Session(config=tfv1.ConfigProto(allow_soft_placement=True)) self.config.session_init._run_init(sess) dtypes = [n.dtype for n in input_tensors] # freeze variables to constants frozen_graph_def = graph_util.convert_variables_to_constants( sess, self.graph.as_graph_def(), [n.name[:-2] for n in output_tensors], variable_names_whitelist=None, variable_names_blacklist=None) # prune unused nodes from graph if optimize: toco_args = () if get_tf_version_tuple() < (1, 8) else (toco_compatible, ) frozen_graph_def = optimize_for_inference_lib.optimize_for_inference( frozen_graph_def, [n.name[:-2] for n in input_tensors], [n.name[:-2] for n in output_tensors], [dtype.as_datatype_enum for dtype in dtypes], *toco_args) with gfile.FastGFile(filename, "wb") as f: f.write(frozen_graph_def.SerializeToString()) logger.info("Output graph written to {}.".format(filename))
python
def export_compact(self, filename, optimize=True, toco_compatible=False): """Create a self-contained inference-only graph and write final graph (in pb format) to disk. Args: filename (str): path to the output graph optimize (bool): whether to use TensorFlow's `optimize_for_inference` to prune and optimize the graph. This does not work on all types of graphs. toco_compatible (bool): See TensorFlow's `optimize_for_inference <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/optimize_for_inference.py>`_ for details. Only available after TF 1.8. """ if toco_compatible: assert optimize, "toco_compatible is only effective when optimize=True!" self.graph = self.config._maybe_create_graph() with self.graph.as_default(): input = PlaceholderInput() input.setup(self.config.input_signature) with PredictTowerContext(''): self.config.tower_func(*input.get_input_tensors()) input_tensors = get_tensors_by_names(self.config.input_names) output_tensors = get_tensors_by_names(self.config.output_names) self.config.session_init._setup_graph() # we cannot use "self.config.session_creator.create_session()" here since it finalizes the graph sess = tfv1.Session(config=tfv1.ConfigProto(allow_soft_placement=True)) self.config.session_init._run_init(sess) dtypes = [n.dtype for n in input_tensors] # freeze variables to constants frozen_graph_def = graph_util.convert_variables_to_constants( sess, self.graph.as_graph_def(), [n.name[:-2] for n in output_tensors], variable_names_whitelist=None, variable_names_blacklist=None) # prune unused nodes from graph if optimize: toco_args = () if get_tf_version_tuple() < (1, 8) else (toco_compatible, ) frozen_graph_def = optimize_for_inference_lib.optimize_for_inference( frozen_graph_def, [n.name[:-2] for n in input_tensors], [n.name[:-2] for n in output_tensors], [dtype.as_datatype_enum for dtype in dtypes], *toco_args) with gfile.FastGFile(filename, "wb") as f: f.write(frozen_graph_def.SerializeToString()) logger.info("Output graph written to {}.".format(filename))
[ "def", "export_compact", "(", "self", ",", "filename", ",", "optimize", "=", "True", ",", "toco_compatible", "=", "False", ")", ":", "if", "toco_compatible", ":", "assert", "optimize", ",", "\"toco_compatible is only effective when optimize=True!\"", "self", ".", "graph", "=", "self", ".", "config", ".", "_maybe_create_graph", "(", ")", "with", "self", ".", "graph", ".", "as_default", "(", ")", ":", "input", "=", "PlaceholderInput", "(", ")", "input", ".", "setup", "(", "self", ".", "config", ".", "input_signature", ")", "with", "PredictTowerContext", "(", "''", ")", ":", "self", ".", "config", ".", "tower_func", "(", "*", "input", ".", "get_input_tensors", "(", ")", ")", "input_tensors", "=", "get_tensors_by_names", "(", "self", ".", "config", ".", "input_names", ")", "output_tensors", "=", "get_tensors_by_names", "(", "self", ".", "config", ".", "output_names", ")", "self", ".", "config", ".", "session_init", ".", "_setup_graph", "(", ")", "# we cannot use \"self.config.session_creator.create_session()\" here since it finalizes the graph", "sess", "=", "tfv1", ".", "Session", "(", "config", "=", "tfv1", ".", "ConfigProto", "(", "allow_soft_placement", "=", "True", ")", ")", "self", ".", "config", ".", "session_init", ".", "_run_init", "(", "sess", ")", "dtypes", "=", "[", "n", ".", "dtype", "for", "n", "in", "input_tensors", "]", "# freeze variables to constants", "frozen_graph_def", "=", "graph_util", ".", "convert_variables_to_constants", "(", "sess", ",", "self", ".", "graph", ".", "as_graph_def", "(", ")", ",", "[", "n", ".", "name", "[", ":", "-", "2", "]", "for", "n", "in", "output_tensors", "]", ",", "variable_names_whitelist", "=", "None", ",", "variable_names_blacklist", "=", "None", ")", "# prune unused nodes from graph", "if", "optimize", ":", "toco_args", "=", "(", ")", "if", "get_tf_version_tuple", "(", ")", "<", "(", "1", ",", "8", ")", "else", "(", "toco_compatible", ",", ")", "frozen_graph_def", "=", "optimize_for_inference_lib", ".", "optimize_for_inference", "(", "frozen_graph_def", ",", "[", "n", ".", "name", "[", ":", "-", "2", "]", "for", "n", "in", "input_tensors", "]", ",", "[", "n", ".", "name", "[", ":", "-", "2", "]", "for", "n", "in", "output_tensors", "]", ",", "[", "dtype", ".", "as_datatype_enum", "for", "dtype", "in", "dtypes", "]", ",", "*", "toco_args", ")", "with", "gfile", ".", "FastGFile", "(", "filename", ",", "\"wb\"", ")", "as", "f", ":", "f", ".", "write", "(", "frozen_graph_def", ".", "SerializeToString", "(", ")", ")", "logger", ".", "info", "(", "\"Output graph written to {}.\"", ".", "format", "(", "filename", ")", ")" ]
Create a self-contained inference-only graph and write final graph (in pb format) to disk. Args: filename (str): path to the output graph optimize (bool): whether to use TensorFlow's `optimize_for_inference` to prune and optimize the graph. This does not work on all types of graphs. toco_compatible (bool): See TensorFlow's `optimize_for_inference <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/optimize_for_inference.py>`_ for details. Only available after TF 1.8.
[ "Create", "a", "self", "-", "contained", "inference", "-", "only", "graph", "and", "write", "final", "graph", "(", "in", "pb", "format", ")", "to", "disk", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/tfutils/export.py#L38-L89
train
tensorpack/tensorpack
tensorpack/tfutils/export.py
ModelExporter.export_serving
def export_serving(self, filename, tags=[tf.saved_model.SERVING if is_tfv2() else tf.saved_model.tag_constants.SERVING], signature_name='prediction_pipeline'): """ Converts a checkpoint and graph to a servable for TensorFlow Serving. Use TF's `SavedModelBuilder` to export a trained model without tensorpack dependency. Args: filename (str): path for export directory tags (list): list of user specified tags signature_name (str): name of signature for prediction Note: This produces .. code-block:: none variables/ # output from the vanilla Saver variables.data-?????-of-????? variables.index saved_model.pb # a `SavedModel` protobuf Currently, we only support a single signature, which is the general PredictSignatureDef: https://github.com/tensorflow/serving/blob/master/tensorflow_serving/g3doc/signature_defs.md """ self.graph = self.config._maybe_create_graph() with self.graph.as_default(): input = PlaceholderInput() input.setup(self.config.input_signature) with PredictTowerContext(''): self.config.tower_func(*input.get_input_tensors()) input_tensors = get_tensors_by_names(self.config.input_names) saved_model = tfv1.saved_model.utils inputs_signatures = {t.name: saved_model.build_tensor_info(t) for t in input_tensors} output_tensors = get_tensors_by_names(self.config.output_names) outputs_signatures = {t.name: saved_model.build_tensor_info(t) for t in output_tensors} self.config.session_init._setup_graph() # we cannot use "self.config.session_creator.create_session()" here since it finalizes the graph sess = tfv1.Session(config=tfv1.ConfigProto(allow_soft_placement=True)) self.config.session_init._run_init(sess) builder = tfv1.saved_model.builder.SavedModelBuilder(filename) prediction_signature = tfv1.saved_model.signature_def_utils.build_signature_def( inputs=inputs_signatures, outputs=outputs_signatures, method_name=tfv1.saved_model.signature_constants.PREDICT_METHOD_NAME) builder.add_meta_graph_and_variables( sess, tags, signature_def_map={signature_name: prediction_signature}) builder.save() logger.info("SavedModel created at {}.".format(filename))
python
def export_serving(self, filename, tags=[tf.saved_model.SERVING if is_tfv2() else tf.saved_model.tag_constants.SERVING], signature_name='prediction_pipeline'): """ Converts a checkpoint and graph to a servable for TensorFlow Serving. Use TF's `SavedModelBuilder` to export a trained model without tensorpack dependency. Args: filename (str): path for export directory tags (list): list of user specified tags signature_name (str): name of signature for prediction Note: This produces .. code-block:: none variables/ # output from the vanilla Saver variables.data-?????-of-????? variables.index saved_model.pb # a `SavedModel` protobuf Currently, we only support a single signature, which is the general PredictSignatureDef: https://github.com/tensorflow/serving/blob/master/tensorflow_serving/g3doc/signature_defs.md """ self.graph = self.config._maybe_create_graph() with self.graph.as_default(): input = PlaceholderInput() input.setup(self.config.input_signature) with PredictTowerContext(''): self.config.tower_func(*input.get_input_tensors()) input_tensors = get_tensors_by_names(self.config.input_names) saved_model = tfv1.saved_model.utils inputs_signatures = {t.name: saved_model.build_tensor_info(t) for t in input_tensors} output_tensors = get_tensors_by_names(self.config.output_names) outputs_signatures = {t.name: saved_model.build_tensor_info(t) for t in output_tensors} self.config.session_init._setup_graph() # we cannot use "self.config.session_creator.create_session()" here since it finalizes the graph sess = tfv1.Session(config=tfv1.ConfigProto(allow_soft_placement=True)) self.config.session_init._run_init(sess) builder = tfv1.saved_model.builder.SavedModelBuilder(filename) prediction_signature = tfv1.saved_model.signature_def_utils.build_signature_def( inputs=inputs_signatures, outputs=outputs_signatures, method_name=tfv1.saved_model.signature_constants.PREDICT_METHOD_NAME) builder.add_meta_graph_and_variables( sess, tags, signature_def_map={signature_name: prediction_signature}) builder.save() logger.info("SavedModel created at {}.".format(filename))
[ "def", "export_serving", "(", "self", ",", "filename", ",", "tags", "=", "[", "tf", ".", "saved_model", ".", "SERVING", "if", "is_tfv2", "(", ")", "else", "tf", ".", "saved_model", ".", "tag_constants", ".", "SERVING", "]", ",", "signature_name", "=", "'prediction_pipeline'", ")", ":", "self", ".", "graph", "=", "self", ".", "config", ".", "_maybe_create_graph", "(", ")", "with", "self", ".", "graph", ".", "as_default", "(", ")", ":", "input", "=", "PlaceholderInput", "(", ")", "input", ".", "setup", "(", "self", ".", "config", ".", "input_signature", ")", "with", "PredictTowerContext", "(", "''", ")", ":", "self", ".", "config", ".", "tower_func", "(", "*", "input", ".", "get_input_tensors", "(", ")", ")", "input_tensors", "=", "get_tensors_by_names", "(", "self", ".", "config", ".", "input_names", ")", "saved_model", "=", "tfv1", ".", "saved_model", ".", "utils", "inputs_signatures", "=", "{", "t", ".", "name", ":", "saved_model", ".", "build_tensor_info", "(", "t", ")", "for", "t", "in", "input_tensors", "}", "output_tensors", "=", "get_tensors_by_names", "(", "self", ".", "config", ".", "output_names", ")", "outputs_signatures", "=", "{", "t", ".", "name", ":", "saved_model", ".", "build_tensor_info", "(", "t", ")", "for", "t", "in", "output_tensors", "}", "self", ".", "config", ".", "session_init", ".", "_setup_graph", "(", ")", "# we cannot use \"self.config.session_creator.create_session()\" here since it finalizes the graph", "sess", "=", "tfv1", ".", "Session", "(", "config", "=", "tfv1", ".", "ConfigProto", "(", "allow_soft_placement", "=", "True", ")", ")", "self", ".", "config", ".", "session_init", ".", "_run_init", "(", "sess", ")", "builder", "=", "tfv1", ".", "saved_model", ".", "builder", ".", "SavedModelBuilder", "(", "filename", ")", "prediction_signature", "=", "tfv1", ".", "saved_model", ".", "signature_def_utils", ".", "build_signature_def", "(", "inputs", "=", "inputs_signatures", ",", "outputs", "=", "outputs_signatures", ",", "method_name", "=", "tfv1", ".", "saved_model", ".", "signature_constants", ".", "PREDICT_METHOD_NAME", ")", "builder", ".", "add_meta_graph_and_variables", "(", "sess", ",", "tags", ",", "signature_def_map", "=", "{", "signature_name", ":", "prediction_signature", "}", ")", "builder", ".", "save", "(", ")", "logger", ".", "info", "(", "\"SavedModel created at {}.\"", ".", "format", "(", "filename", ")", ")" ]
Converts a checkpoint and graph to a servable for TensorFlow Serving. Use TF's `SavedModelBuilder` to export a trained model without tensorpack dependency. Args: filename (str): path for export directory tags (list): list of user specified tags signature_name (str): name of signature for prediction Note: This produces .. code-block:: none variables/ # output from the vanilla Saver variables.data-?????-of-????? variables.index saved_model.pb # a `SavedModel` protobuf Currently, we only support a single signature, which is the general PredictSignatureDef: https://github.com/tensorflow/serving/blob/master/tensorflow_serving/g3doc/signature_defs.md
[ "Converts", "a", "checkpoint", "and", "graph", "to", "a", "servable", "for", "TensorFlow", "Serving", ".", "Use", "TF", "s", "SavedModelBuilder", "to", "export", "a", "trained", "model", "without", "tensorpack", "dependency", "." ]
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/tfutils/export.py#L91-L146
train
modin-project/modin
modin/experimental/engines/pandas_on_ray/io_exp.py
_read_sql_with_offset_pandas_on_ray
def _read_sql_with_offset_pandas_on_ray( partition_column, start, end, num_splits, sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, columns=None, chunksize=None, ): # pragma: no cover """Use a Ray task to read a chunk of SQL source. Note: Ray functions are not detected by codecov (thus pragma: no cover) """ from .sql import query_put_bounders query_with_bounders = query_put_bounders(sql, partition_column, start, end) pandas_df = pandas.read_sql( query_with_bounders, con, index_col=index_col, coerce_float=coerce_float, params=params, parse_dates=parse_dates, columns=columns, chunksize=chunksize, ) index = len(pandas_df) return _split_result_for_readers(1, num_splits, pandas_df) + [index]
python
def _read_sql_with_offset_pandas_on_ray( partition_column, start, end, num_splits, sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, columns=None, chunksize=None, ): # pragma: no cover """Use a Ray task to read a chunk of SQL source. Note: Ray functions are not detected by codecov (thus pragma: no cover) """ from .sql import query_put_bounders query_with_bounders = query_put_bounders(sql, partition_column, start, end) pandas_df = pandas.read_sql( query_with_bounders, con, index_col=index_col, coerce_float=coerce_float, params=params, parse_dates=parse_dates, columns=columns, chunksize=chunksize, ) index = len(pandas_df) return _split_result_for_readers(1, num_splits, pandas_df) + [index]
[ "def", "_read_sql_with_offset_pandas_on_ray", "(", "partition_column", ",", "start", ",", "end", ",", "num_splits", ",", "sql", ",", "con", ",", "index_col", "=", "None", ",", "coerce_float", "=", "True", ",", "params", "=", "None", ",", "parse_dates", "=", "None", ",", "columns", "=", "None", ",", "chunksize", "=", "None", ",", ")", ":", "# pragma: no cover", "from", ".", "sql", "import", "query_put_bounders", "query_with_bounders", "=", "query_put_bounders", "(", "sql", ",", "partition_column", ",", "start", ",", "end", ")", "pandas_df", "=", "pandas", ".", "read_sql", "(", "query_with_bounders", ",", "con", ",", "index_col", "=", "index_col", ",", "coerce_float", "=", "coerce_float", ",", "params", "=", "params", ",", "parse_dates", "=", "parse_dates", ",", "columns", "=", "columns", ",", "chunksize", "=", "chunksize", ",", ")", "index", "=", "len", "(", "pandas_df", ")", "return", "_split_result_for_readers", "(", "1", ",", "num_splits", ",", "pandas_df", ")", "+", "[", "index", "]" ]
Use a Ray task to read a chunk of SQL source. Note: Ray functions are not detected by codecov (thus pragma: no cover)
[ "Use", "a", "Ray", "task", "to", "read", "a", "chunk", "of", "SQL", "source", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/experimental/engines/pandas_on_ray/io_exp.py#L119-L152
train
modin-project/modin
modin/experimental/engines/pandas_on_ray/io_exp.py
ExperimentalPandasOnRayIO.read_sql
def read_sql( cls, sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, columns=None, chunksize=None, partition_column=None, lower_bound=None, upper_bound=None, max_sessions=None, ): """ Read SQL query or database table into a DataFrame. Args: sql: string or SQLAlchemy Selectable (select or text object) SQL query to be executed or a table name. con: SQLAlchemy connectable (engine/connection) or database string URI or DBAPI2 connection (fallback mode) index_col: Column(s) to set as index(MultiIndex). coerce_float: Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets. params: List of parameters to pass to execute method. The syntax used to pass parameters is database driver dependent. Check your database driver documentation for which of the five syntax styles, described in PEP 249's paramstyle, is supported. parse_dates: - List of column names to parse as dates. - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times, or is one of (D, s, ns, ms, us) in case of parsing integer timestamps. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite. columns: List of column names to select from SQL table (only used when reading a table). chunksize: If specified, return an iterator where `chunksize` is the number of rows to include in each chunk. partition_column: column used to share the data between the workers (MUST be a INTEGER column) lower_bound: the minimum value to be requested from the partition_column upper_bound: the maximum value to be requested from the partition_column max_sessions: the maximum number of simultaneous connections allowed to use Returns: Pandas Dataframe """ from .sql import is_distributed, get_query_info if not is_distributed(partition_column, lower_bound, upper_bound): warnings.warn("Defaulting to Modin core implementation") return PandasOnRayIO.read_sql( sql, con, index_col, coerce_float=coerce_float, params=params, parse_dates=parse_dates, columns=columns, chunksize=chunksize, ) # starts the distributed alternative cols_names, query = get_query_info(sql, con, partition_column) num_parts = min(cls.frame_mgr_cls._compute_num_partitions(), max_sessions) num_splits = min(len(cols_names), num_parts) diff = (upper_bound - lower_bound) + 1 min_size = diff // num_parts rest = diff % num_parts partition_ids = [] index_ids = [] end = lower_bound - 1 for part in range(num_parts): if rest: size = min_size + 1 rest -= 1 else: size = min_size start = end + 1 end = start + size - 1 partition_id = _read_sql_with_offset_pandas_on_ray._remote( args=( partition_column, start, end, num_splits, query, con, index_col, coerce_float, params, parse_dates, columns, chunksize, ), num_return_vals=num_splits + 1, ) partition_ids.append( [PandasOnRayFramePartition(obj) for obj in partition_id[:-1]] ) index_ids.append(partition_id[-1]) new_index = pandas.RangeIndex(sum(ray.get(index_ids))) new_query_compiler = cls.query_compiler_cls( cls.frame_mgr_cls(np.array(partition_ids)), new_index, cols_names ) return new_query_compiler
python
def read_sql( cls, sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, columns=None, chunksize=None, partition_column=None, lower_bound=None, upper_bound=None, max_sessions=None, ): """ Read SQL query or database table into a DataFrame. Args: sql: string or SQLAlchemy Selectable (select or text object) SQL query to be executed or a table name. con: SQLAlchemy connectable (engine/connection) or database string URI or DBAPI2 connection (fallback mode) index_col: Column(s) to set as index(MultiIndex). coerce_float: Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets. params: List of parameters to pass to execute method. The syntax used to pass parameters is database driver dependent. Check your database driver documentation for which of the five syntax styles, described in PEP 249's paramstyle, is supported. parse_dates: - List of column names to parse as dates. - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times, or is one of (D, s, ns, ms, us) in case of parsing integer timestamps. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite. columns: List of column names to select from SQL table (only used when reading a table). chunksize: If specified, return an iterator where `chunksize` is the number of rows to include in each chunk. partition_column: column used to share the data between the workers (MUST be a INTEGER column) lower_bound: the minimum value to be requested from the partition_column upper_bound: the maximum value to be requested from the partition_column max_sessions: the maximum number of simultaneous connections allowed to use Returns: Pandas Dataframe """ from .sql import is_distributed, get_query_info if not is_distributed(partition_column, lower_bound, upper_bound): warnings.warn("Defaulting to Modin core implementation") return PandasOnRayIO.read_sql( sql, con, index_col, coerce_float=coerce_float, params=params, parse_dates=parse_dates, columns=columns, chunksize=chunksize, ) # starts the distributed alternative cols_names, query = get_query_info(sql, con, partition_column) num_parts = min(cls.frame_mgr_cls._compute_num_partitions(), max_sessions) num_splits = min(len(cols_names), num_parts) diff = (upper_bound - lower_bound) + 1 min_size = diff // num_parts rest = diff % num_parts partition_ids = [] index_ids = [] end = lower_bound - 1 for part in range(num_parts): if rest: size = min_size + 1 rest -= 1 else: size = min_size start = end + 1 end = start + size - 1 partition_id = _read_sql_with_offset_pandas_on_ray._remote( args=( partition_column, start, end, num_splits, query, con, index_col, coerce_float, params, parse_dates, columns, chunksize, ), num_return_vals=num_splits + 1, ) partition_ids.append( [PandasOnRayFramePartition(obj) for obj in partition_id[:-1]] ) index_ids.append(partition_id[-1]) new_index = pandas.RangeIndex(sum(ray.get(index_ids))) new_query_compiler = cls.query_compiler_cls( cls.frame_mgr_cls(np.array(partition_ids)), new_index, cols_names ) return new_query_compiler
[ "def", "read_sql", "(", "cls", ",", "sql", ",", "con", ",", "index_col", "=", "None", ",", "coerce_float", "=", "True", ",", "params", "=", "None", ",", "parse_dates", "=", "None", ",", "columns", "=", "None", ",", "chunksize", "=", "None", ",", "partition_column", "=", "None", ",", "lower_bound", "=", "None", ",", "upper_bound", "=", "None", ",", "max_sessions", "=", "None", ",", ")", ":", "from", ".", "sql", "import", "is_distributed", ",", "get_query_info", "if", "not", "is_distributed", "(", "partition_column", ",", "lower_bound", ",", "upper_bound", ")", ":", "warnings", ".", "warn", "(", "\"Defaulting to Modin core implementation\"", ")", "return", "PandasOnRayIO", ".", "read_sql", "(", "sql", ",", "con", ",", "index_col", ",", "coerce_float", "=", "coerce_float", ",", "params", "=", "params", ",", "parse_dates", "=", "parse_dates", ",", "columns", "=", "columns", ",", "chunksize", "=", "chunksize", ",", ")", "# starts the distributed alternative", "cols_names", ",", "query", "=", "get_query_info", "(", "sql", ",", "con", ",", "partition_column", ")", "num_parts", "=", "min", "(", "cls", ".", "frame_mgr_cls", ".", "_compute_num_partitions", "(", ")", ",", "max_sessions", ")", "num_splits", "=", "min", "(", "len", "(", "cols_names", ")", ",", "num_parts", ")", "diff", "=", "(", "upper_bound", "-", "lower_bound", ")", "+", "1", "min_size", "=", "diff", "//", "num_parts", "rest", "=", "diff", "%", "num_parts", "partition_ids", "=", "[", "]", "index_ids", "=", "[", "]", "end", "=", "lower_bound", "-", "1", "for", "part", "in", "range", "(", "num_parts", ")", ":", "if", "rest", ":", "size", "=", "min_size", "+", "1", "rest", "-=", "1", "else", ":", "size", "=", "min_size", "start", "=", "end", "+", "1", "end", "=", "start", "+", "size", "-", "1", "partition_id", "=", "_read_sql_with_offset_pandas_on_ray", ".", "_remote", "(", "args", "=", "(", "partition_column", ",", "start", ",", "end", ",", "num_splits", ",", "query", ",", "con", ",", "index_col", ",", "coerce_float", ",", "params", ",", "parse_dates", ",", "columns", ",", "chunksize", ",", ")", ",", "num_return_vals", "=", "num_splits", "+", "1", ",", ")", "partition_ids", ".", "append", "(", "[", "PandasOnRayFramePartition", "(", "obj", ")", "for", "obj", "in", "partition_id", "[", ":", "-", "1", "]", "]", ")", "index_ids", ".", "append", "(", "partition_id", "[", "-", "1", "]", ")", "new_index", "=", "pandas", ".", "RangeIndex", "(", "sum", "(", "ray", ".", "get", "(", "index_ids", ")", ")", ")", "new_query_compiler", "=", "cls", ".", "query_compiler_cls", "(", "cls", ".", "frame_mgr_cls", "(", "np", ".", "array", "(", "partition_ids", ")", ")", ",", "new_index", ",", "cols_names", ")", "return", "new_query_compiler" ]
Read SQL query or database table into a DataFrame. Args: sql: string or SQLAlchemy Selectable (select or text object) SQL query to be executed or a table name. con: SQLAlchemy connectable (engine/connection) or database string URI or DBAPI2 connection (fallback mode) index_col: Column(s) to set as index(MultiIndex). coerce_float: Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets. params: List of parameters to pass to execute method. The syntax used to pass parameters is database driver dependent. Check your database driver documentation for which of the five syntax styles, described in PEP 249's paramstyle, is supported. parse_dates: - List of column names to parse as dates. - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times, or is one of (D, s, ns, ms, us) in case of parsing integer timestamps. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite. columns: List of column names to select from SQL table (only used when reading a table). chunksize: If specified, return an iterator where `chunksize` is the number of rows to include in each chunk. partition_column: column used to share the data between the workers (MUST be a INTEGER column) lower_bound: the minimum value to be requested from the partition_column upper_bound: the maximum value to be requested from the partition_column max_sessions: the maximum number of simultaneous connections allowed to use Returns: Pandas Dataframe
[ "Read", "SQL", "query", "or", "database", "table", "into", "a", "DataFrame", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/experimental/engines/pandas_on_ray/io_exp.py#L12-L115
train
modin-project/modin
modin/pandas/utils.py
_inherit_docstrings
def _inherit_docstrings(parent, excluded=[]): """Creates a decorator which overwrites a decorated class' __doc__ attribute with parent's __doc__ attribute. Also overwrites __doc__ of methods and properties defined in the class with the __doc__ of matching methods and properties in parent. Args: parent (object): Class from which the decorated class inherits __doc__. excluded (list): List of parent objects from which the class does not inherit docstrings. Returns: function: decorator which replaces the decorated class' documentation parent's documentation. """ def decorator(cls): if parent not in excluded: cls.__doc__ = parent.__doc__ for attr, obj in cls.__dict__.items(): parent_obj = getattr(parent, attr, None) if parent_obj in excluded or ( not callable(parent_obj) and not isinstance(parent_obj, property) ): continue if callable(obj): obj.__doc__ = parent_obj.__doc__ elif isinstance(obj, property) and obj.fget is not None: p = property(obj.fget, obj.fset, obj.fdel, parent_obj.__doc__) setattr(cls, attr, p) return cls return decorator
python
def _inherit_docstrings(parent, excluded=[]): """Creates a decorator which overwrites a decorated class' __doc__ attribute with parent's __doc__ attribute. Also overwrites __doc__ of methods and properties defined in the class with the __doc__ of matching methods and properties in parent. Args: parent (object): Class from which the decorated class inherits __doc__. excluded (list): List of parent objects from which the class does not inherit docstrings. Returns: function: decorator which replaces the decorated class' documentation parent's documentation. """ def decorator(cls): if parent not in excluded: cls.__doc__ = parent.__doc__ for attr, obj in cls.__dict__.items(): parent_obj = getattr(parent, attr, None) if parent_obj in excluded or ( not callable(parent_obj) and not isinstance(parent_obj, property) ): continue if callable(obj): obj.__doc__ = parent_obj.__doc__ elif isinstance(obj, property) and obj.fget is not None: p = property(obj.fget, obj.fset, obj.fdel, parent_obj.__doc__) setattr(cls, attr, p) return cls return decorator
[ "def", "_inherit_docstrings", "(", "parent", ",", "excluded", "=", "[", "]", ")", ":", "def", "decorator", "(", "cls", ")", ":", "if", "parent", "not", "in", "excluded", ":", "cls", ".", "__doc__", "=", "parent", ".", "__doc__", "for", "attr", ",", "obj", "in", "cls", ".", "__dict__", ".", "items", "(", ")", ":", "parent_obj", "=", "getattr", "(", "parent", ",", "attr", ",", "None", ")", "if", "parent_obj", "in", "excluded", "or", "(", "not", "callable", "(", "parent_obj", ")", "and", "not", "isinstance", "(", "parent_obj", ",", "property", ")", ")", ":", "continue", "if", "callable", "(", "obj", ")", ":", "obj", ".", "__doc__", "=", "parent_obj", ".", "__doc__", "elif", "isinstance", "(", "obj", ",", "property", ")", "and", "obj", ".", "fget", "is", "not", "None", ":", "p", "=", "property", "(", "obj", ".", "fget", ",", "obj", ".", "fset", ",", "obj", ".", "fdel", ",", "parent_obj", ".", "__doc__", ")", "setattr", "(", "cls", ",", "attr", ",", "p", ")", "return", "cls", "return", "decorator" ]
Creates a decorator which overwrites a decorated class' __doc__ attribute with parent's __doc__ attribute. Also overwrites __doc__ of methods and properties defined in the class with the __doc__ of matching methods and properties in parent. Args: parent (object): Class from which the decorated class inherits __doc__. excluded (list): List of parent objects from which the class does not inherit docstrings. Returns: function: decorator which replaces the decorated class' documentation parent's documentation.
[ "Creates", "a", "decorator", "which", "overwrites", "a", "decorated", "class", "__doc__", "attribute", "with", "parent", "s", "__doc__", "attribute", ".", "Also", "overwrites", "__doc__", "of", "methods", "and", "properties", "defined", "in", "the", "class", "with", "the", "__doc__", "of", "matching", "methods", "and", "properties", "in", "parent", "." ]
5b77d242596560c646b8405340c9ce64acb183cb
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/utils.py#L33-L65
train