repo
stringlengths
7
55
path
stringlengths
4
223
func_name
stringlengths
1
134
original_string
stringlengths
75
104k
language
stringclasses
1 value
code
stringlengths
75
104k
code_tokens
listlengths
19
28.4k
docstring
stringlengths
1
46.9k
docstring_tokens
listlengths
1
1.97k
sha
stringlengths
40
40
url
stringlengths
87
315
partition
stringclasses
1 value
tensorflow/tensorboard
tensorboard/backend/event_processing/io_wrapper.py
ListDirectoryAbsolute
def ListDirectoryAbsolute(directory): """Yields all files in the given directory. The paths are absolute.""" return (os.path.join(directory, path) for path in tf.io.gfile.listdir(directory))
python
def ListDirectoryAbsolute(directory): """Yields all files in the given directory. The paths are absolute.""" return (os.path.join(directory, path) for path in tf.io.gfile.listdir(directory))
[ "def", "ListDirectoryAbsolute", "(", "directory", ")", ":", "return", "(", "os", ".", "path", ".", "join", "(", "directory", ",", "path", ")", "for", "path", "in", "tf", ".", "io", ".", "gfile", ".", "listdir", "(", "directory", ")", ")" ]
Yields all files in the given directory. The paths are absolute.
[ "Yields", "all", "files", "in", "the", "given", "directory", ".", "The", "paths", "are", "absolute", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/io_wrapper.py#L62-L65
train
tensorflow/tensorboard
tensorboard/backend/event_processing/io_wrapper.py
_EscapeGlobCharacters
def _EscapeGlobCharacters(path): """Escapes the glob characters in a path. Python 3 has a glob.escape method, but python 2 lacks it, so we manually implement this method. Args: path: The absolute path to escape. Returns: The escaped path string. """ drive, path = os.path.splitdrive(path) return '%s%s' % (drive, _ESCAPE_GLOB_CHARACTERS_REGEX.sub(r'[\1]', path))
python
def _EscapeGlobCharacters(path): """Escapes the glob characters in a path. Python 3 has a glob.escape method, but python 2 lacks it, so we manually implement this method. Args: path: The absolute path to escape. Returns: The escaped path string. """ drive, path = os.path.splitdrive(path) return '%s%s' % (drive, _ESCAPE_GLOB_CHARACTERS_REGEX.sub(r'[\1]', path))
[ "def", "_EscapeGlobCharacters", "(", "path", ")", ":", "drive", ",", "path", "=", "os", ".", "path", ".", "splitdrive", "(", "path", ")", "return", "'%s%s'", "%", "(", "drive", ",", "_ESCAPE_GLOB_CHARACTERS_REGEX", ".", "sub", "(", "r'[\\1]'", ",", "path", ")", ")" ]
Escapes the glob characters in a path. Python 3 has a glob.escape method, but python 2 lacks it, so we manually implement this method. Args: path: The absolute path to escape. Returns: The escaped path string.
[ "Escapes", "the", "glob", "characters", "in", "a", "path", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/io_wrapper.py#L68-L81
train
tensorflow/tensorboard
tensorboard/backend/event_processing/io_wrapper.py
ListRecursivelyViaGlobbing
def ListRecursivelyViaGlobbing(top): """Recursively lists all files within the directory. This method does not list subdirectories (in addition to regular files), and the file paths are all absolute. If the directory does not exist, this yields nothing. This method does so by glob-ing deeper and deeper directories, ie foo/*, foo/*/*, foo/*/*/* and so on until all files are listed. All file paths are absolute, and this method lists subdirectories too. For certain file systems, globbing via this method may prove significantly faster than recursively walking a directory. Specifically, TF file systems that implement TensorFlow's FileSystem.GetMatchingPaths method could save costly disk reads by using this method. However, for other file systems, this method might prove slower because the file system performs a walk per call to glob (in which case it might as well just perform 1 walk). Args: top: A path to a directory. Yields: A (dir_path, file_paths) tuple for each directory/subdirectory. """ current_glob_string = os.path.join(_EscapeGlobCharacters(top), '*') level = 0 while True: logger.info('GlobAndListFiles: Starting to glob level %d', level) glob = tf.io.gfile.glob(current_glob_string) logger.info( 'GlobAndListFiles: %d files glob-ed at level %d', len(glob), level) if not glob: # This subdirectory level lacks files. Terminate. return # Map subdirectory to a list of files. pairs = collections.defaultdict(list) for file_path in glob: pairs[os.path.dirname(file_path)].append(file_path) for dir_name, file_paths in six.iteritems(pairs): yield (dir_name, tuple(file_paths)) if len(pairs) == 1: # If at any point the glob returns files that are all in a single # directory, replace the current globbing path with that directory as the # literal prefix. This should improve efficiency in cases where a single # subdir is significantly deeper than the rest of the sudirs. current_glob_string = os.path.join(list(pairs.keys())[0], '*') # Iterate to the next level of subdirectories. current_glob_string = os.path.join(current_glob_string, '*') level += 1
python
def ListRecursivelyViaGlobbing(top): """Recursively lists all files within the directory. This method does not list subdirectories (in addition to regular files), and the file paths are all absolute. If the directory does not exist, this yields nothing. This method does so by glob-ing deeper and deeper directories, ie foo/*, foo/*/*, foo/*/*/* and so on until all files are listed. All file paths are absolute, and this method lists subdirectories too. For certain file systems, globbing via this method may prove significantly faster than recursively walking a directory. Specifically, TF file systems that implement TensorFlow's FileSystem.GetMatchingPaths method could save costly disk reads by using this method. However, for other file systems, this method might prove slower because the file system performs a walk per call to glob (in which case it might as well just perform 1 walk). Args: top: A path to a directory. Yields: A (dir_path, file_paths) tuple for each directory/subdirectory. """ current_glob_string = os.path.join(_EscapeGlobCharacters(top), '*') level = 0 while True: logger.info('GlobAndListFiles: Starting to glob level %d', level) glob = tf.io.gfile.glob(current_glob_string) logger.info( 'GlobAndListFiles: %d files glob-ed at level %d', len(glob), level) if not glob: # This subdirectory level lacks files. Terminate. return # Map subdirectory to a list of files. pairs = collections.defaultdict(list) for file_path in glob: pairs[os.path.dirname(file_path)].append(file_path) for dir_name, file_paths in six.iteritems(pairs): yield (dir_name, tuple(file_paths)) if len(pairs) == 1: # If at any point the glob returns files that are all in a single # directory, replace the current globbing path with that directory as the # literal prefix. This should improve efficiency in cases where a single # subdir is significantly deeper than the rest of the sudirs. current_glob_string = os.path.join(list(pairs.keys())[0], '*') # Iterate to the next level of subdirectories. current_glob_string = os.path.join(current_glob_string, '*') level += 1
[ "def", "ListRecursivelyViaGlobbing", "(", "top", ")", ":", "current_glob_string", "=", "os", ".", "path", ".", "join", "(", "_EscapeGlobCharacters", "(", "top", ")", ",", "'*'", ")", "level", "=", "0", "while", "True", ":", "logger", ".", "info", "(", "'GlobAndListFiles: Starting to glob level %d'", ",", "level", ")", "glob", "=", "tf", ".", "io", ".", "gfile", ".", "glob", "(", "current_glob_string", ")", "logger", ".", "info", "(", "'GlobAndListFiles: %d files glob-ed at level %d'", ",", "len", "(", "glob", ")", ",", "level", ")", "if", "not", "glob", ":", "# This subdirectory level lacks files. Terminate.", "return", "# Map subdirectory to a list of files.", "pairs", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "file_path", "in", "glob", ":", "pairs", "[", "os", ".", "path", ".", "dirname", "(", "file_path", ")", "]", ".", "append", "(", "file_path", ")", "for", "dir_name", ",", "file_paths", "in", "six", ".", "iteritems", "(", "pairs", ")", ":", "yield", "(", "dir_name", ",", "tuple", "(", "file_paths", ")", ")", "if", "len", "(", "pairs", ")", "==", "1", ":", "# If at any point the glob returns files that are all in a single", "# directory, replace the current globbing path with that directory as the", "# literal prefix. This should improve efficiency in cases where a single", "# subdir is significantly deeper than the rest of the sudirs.", "current_glob_string", "=", "os", ".", "path", ".", "join", "(", "list", "(", "pairs", ".", "keys", "(", ")", ")", "[", "0", "]", ",", "'*'", ")", "# Iterate to the next level of subdirectories.", "current_glob_string", "=", "os", ".", "path", ".", "join", "(", "current_glob_string", ",", "'*'", ")", "level", "+=", "1" ]
Recursively lists all files within the directory. This method does not list subdirectories (in addition to regular files), and the file paths are all absolute. If the directory does not exist, this yields nothing. This method does so by glob-ing deeper and deeper directories, ie foo/*, foo/*/*, foo/*/*/* and so on until all files are listed. All file paths are absolute, and this method lists subdirectories too. For certain file systems, globbing via this method may prove significantly faster than recursively walking a directory. Specifically, TF file systems that implement TensorFlow's FileSystem.GetMatchingPaths method could save costly disk reads by using this method. However, for other file systems, this method might prove slower because the file system performs a walk per call to glob (in which case it might as well just perform 1 walk). Args: top: A path to a directory. Yields: A (dir_path, file_paths) tuple for each directory/subdirectory.
[ "Recursively", "lists", "all", "files", "within", "the", "directory", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/io_wrapper.py#L84-L137
train
tensorflow/tensorboard
tensorboard/backend/event_processing/io_wrapper.py
ListRecursivelyViaWalking
def ListRecursivelyViaWalking(top): """Walks a directory tree, yielding (dir_path, file_paths) tuples. For each of `top` and its subdirectories, yields a tuple containing the path to the directory and the path to each of the contained files. Note that unlike os.Walk()/tf.io.gfile.walk()/ListRecursivelyViaGlobbing, this does not list subdirectories. The file paths are all absolute. If the directory does not exist, this yields nothing. Walking may be incredibly slow on certain file systems. Args: top: A path to a directory. Yields: A (dir_path, file_paths) tuple for each directory/subdirectory. """ for dir_path, _, filenames in tf.io.gfile.walk(top, topdown=True): yield (dir_path, (os.path.join(dir_path, filename) for filename in filenames))
python
def ListRecursivelyViaWalking(top): """Walks a directory tree, yielding (dir_path, file_paths) tuples. For each of `top` and its subdirectories, yields a tuple containing the path to the directory and the path to each of the contained files. Note that unlike os.Walk()/tf.io.gfile.walk()/ListRecursivelyViaGlobbing, this does not list subdirectories. The file paths are all absolute. If the directory does not exist, this yields nothing. Walking may be incredibly slow on certain file systems. Args: top: A path to a directory. Yields: A (dir_path, file_paths) tuple for each directory/subdirectory. """ for dir_path, _, filenames in tf.io.gfile.walk(top, topdown=True): yield (dir_path, (os.path.join(dir_path, filename) for filename in filenames))
[ "def", "ListRecursivelyViaWalking", "(", "top", ")", ":", "for", "dir_path", ",", "_", ",", "filenames", "in", "tf", ".", "io", ".", "gfile", ".", "walk", "(", "top", ",", "topdown", "=", "True", ")", ":", "yield", "(", "dir_path", ",", "(", "os", ".", "path", ".", "join", "(", "dir_path", ",", "filename", ")", "for", "filename", "in", "filenames", ")", ")" ]
Walks a directory tree, yielding (dir_path, file_paths) tuples. For each of `top` and its subdirectories, yields a tuple containing the path to the directory and the path to each of the contained files. Note that unlike os.Walk()/tf.io.gfile.walk()/ListRecursivelyViaGlobbing, this does not list subdirectories. The file paths are all absolute. If the directory does not exist, this yields nothing. Walking may be incredibly slow on certain file systems. Args: top: A path to a directory. Yields: A (dir_path, file_paths) tuple for each directory/subdirectory.
[ "Walks", "a", "directory", "tree", "yielding", "(", "dir_path", "file_paths", ")", "tuples", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/io_wrapper.py#L140-L159
train
tensorflow/tensorboard
tensorboard/backend/event_processing/io_wrapper.py
GetLogdirSubdirectories
def GetLogdirSubdirectories(path): """Obtains all subdirectories with events files. The order of the subdirectories returned is unspecified. The internal logic that determines order varies by scenario. Args: path: The path to a directory under which to find subdirectories. Returns: A tuple of absolute paths of all subdirectories each with at least 1 events file directly within the subdirectory. Raises: ValueError: If the path passed to the method exists and is not a directory. """ if not tf.io.gfile.exists(path): # No directory to traverse. return () if not tf.io.gfile.isdir(path): raise ValueError('GetLogdirSubdirectories: path exists and is not a ' 'directory, %s' % path) if IsCloudPath(path): # Glob-ing for files can be significantly faster than recursively # walking through directories for some file systems. logger.info( 'GetLogdirSubdirectories: Starting to list directories via glob-ing.') traversal_method = ListRecursivelyViaGlobbing else: # For other file systems, the glob-ing based method might be slower because # each call to glob could involve performing a recursive walk. logger.info( 'GetLogdirSubdirectories: Starting to list directories via walking.') traversal_method = ListRecursivelyViaWalking return ( subdir for (subdir, files) in traversal_method(path) if any(IsTensorFlowEventsFile(f) for f in files) )
python
def GetLogdirSubdirectories(path): """Obtains all subdirectories with events files. The order of the subdirectories returned is unspecified. The internal logic that determines order varies by scenario. Args: path: The path to a directory under which to find subdirectories. Returns: A tuple of absolute paths of all subdirectories each with at least 1 events file directly within the subdirectory. Raises: ValueError: If the path passed to the method exists and is not a directory. """ if not tf.io.gfile.exists(path): # No directory to traverse. return () if not tf.io.gfile.isdir(path): raise ValueError('GetLogdirSubdirectories: path exists and is not a ' 'directory, %s' % path) if IsCloudPath(path): # Glob-ing for files can be significantly faster than recursively # walking through directories for some file systems. logger.info( 'GetLogdirSubdirectories: Starting to list directories via glob-ing.') traversal_method = ListRecursivelyViaGlobbing else: # For other file systems, the glob-ing based method might be slower because # each call to glob could involve performing a recursive walk. logger.info( 'GetLogdirSubdirectories: Starting to list directories via walking.') traversal_method = ListRecursivelyViaWalking return ( subdir for (subdir, files) in traversal_method(path) if any(IsTensorFlowEventsFile(f) for f in files) )
[ "def", "GetLogdirSubdirectories", "(", "path", ")", ":", "if", "not", "tf", ".", "io", ".", "gfile", ".", "exists", "(", "path", ")", ":", "# No directory to traverse.", "return", "(", ")", "if", "not", "tf", ".", "io", ".", "gfile", ".", "isdir", "(", "path", ")", ":", "raise", "ValueError", "(", "'GetLogdirSubdirectories: path exists and is not a '", "'directory, %s'", "%", "path", ")", "if", "IsCloudPath", "(", "path", ")", ":", "# Glob-ing for files can be significantly faster than recursively", "# walking through directories for some file systems.", "logger", ".", "info", "(", "'GetLogdirSubdirectories: Starting to list directories via glob-ing.'", ")", "traversal_method", "=", "ListRecursivelyViaGlobbing", "else", ":", "# For other file systems, the glob-ing based method might be slower because", "# each call to glob could involve performing a recursive walk.", "logger", ".", "info", "(", "'GetLogdirSubdirectories: Starting to list directories via walking.'", ")", "traversal_method", "=", "ListRecursivelyViaWalking", "return", "(", "subdir", "for", "(", "subdir", ",", "files", ")", "in", "traversal_method", "(", "path", ")", "if", "any", "(", "IsTensorFlowEventsFile", "(", "f", ")", "for", "f", "in", "files", ")", ")" ]
Obtains all subdirectories with events files. The order of the subdirectories returned is unspecified. The internal logic that determines order varies by scenario. Args: path: The path to a directory under which to find subdirectories. Returns: A tuple of absolute paths of all subdirectories each with at least 1 events file directly within the subdirectory. Raises: ValueError: If the path passed to the method exists and is not a directory.
[ "Obtains", "all", "subdirectories", "with", "events", "files", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/io_wrapper.py#L162-L203
train
tensorflow/tensorboard
tensorboard/plugins/audio/summary_v2.py
audio
def audio(name, data, sample_rate, step=None, max_outputs=3, encoding=None, description=None): """Write an audio summary. Arguments: name: A name for this summary. The summary tag used for TensorBoard will be this name prefixed by any active name scopes. data: A `Tensor` representing audio data with shape `[k, t, c]`, where `k` is the number of audio clips, `t` is the number of frames, and `c` is the number of channels. Elements should be floating-point values in `[-1.0, 1.0]`. Any of the dimensions may be statically unknown (i.e., `None`). sample_rate: An `int` or rank-0 `int32` `Tensor` that represents the sample rate, in Hz. Must be positive. step: Explicit `int64`-castable monotonic step value for this summary. If omitted, this defaults to `tf.summary.experimental.get_step()`, which must not be None. max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this many audio clips will be emitted at each step. When more than `max_outputs` many clips are provided, the first `max_outputs` many clips will be used and the rest silently discarded. encoding: Optional constant `str` for the desired encoding. Only "wav" is currently supported, but this is not guaranteed to remain the default, so if you want "wav" in particular, set this explicitly. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. Returns: True on success, or false if no summary was emitted because no default summary writer was available. Raises: ValueError: if a default writer exists, but no step was provided and `tf.summary.experimental.get_step()` is None. """ audio_ops = getattr(tf, 'audio', None) if audio_ops is None: # Fallback for older versions of TF without tf.audio. from tensorflow.python.ops import gen_audio_ops as audio_ops if encoding is None: encoding = 'wav' if encoding != 'wav': raise ValueError('Unknown encoding: %r' % encoding) summary_metadata = metadata.create_summary_metadata( display_name=None, description=description, encoding=metadata.Encoding.Value('WAV')) inputs = [data, sample_rate, max_outputs, step] # TODO(https://github.com/tensorflow/tensorboard/issues/2109): remove fallback summary_scope = ( getattr(tf.summary.experimental, 'summary_scope', None) or tf.summary.summary_scope) with summary_scope( name, 'audio_summary', values=inputs) as (tag, _): tf.debugging.assert_rank(data, 3) tf.debugging.assert_non_negative(max_outputs) limited_audio = data[:max_outputs] encode_fn = functools.partial(audio_ops.encode_wav, sample_rate=sample_rate) encoded_audio = tf.map_fn(encode_fn, limited_audio, dtype=tf.string, name='encode_each_audio') # Workaround for map_fn returning float dtype for an empty elems input. encoded_audio = tf.cond( tf.shape(input=encoded_audio)[0] > 0, lambda: encoded_audio, lambda: tf.constant([], tf.string)) limited_labels = tf.tile([''], tf.shape(input=limited_audio)[:1]) tensor = tf.transpose(a=tf.stack([encoded_audio, limited_labels])) return tf.summary.write( tag=tag, tensor=tensor, step=step, metadata=summary_metadata)
python
def audio(name, data, sample_rate, step=None, max_outputs=3, encoding=None, description=None): """Write an audio summary. Arguments: name: A name for this summary. The summary tag used for TensorBoard will be this name prefixed by any active name scopes. data: A `Tensor` representing audio data with shape `[k, t, c]`, where `k` is the number of audio clips, `t` is the number of frames, and `c` is the number of channels. Elements should be floating-point values in `[-1.0, 1.0]`. Any of the dimensions may be statically unknown (i.e., `None`). sample_rate: An `int` or rank-0 `int32` `Tensor` that represents the sample rate, in Hz. Must be positive. step: Explicit `int64`-castable monotonic step value for this summary. If omitted, this defaults to `tf.summary.experimental.get_step()`, which must not be None. max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this many audio clips will be emitted at each step. When more than `max_outputs` many clips are provided, the first `max_outputs` many clips will be used and the rest silently discarded. encoding: Optional constant `str` for the desired encoding. Only "wav" is currently supported, but this is not guaranteed to remain the default, so if you want "wav" in particular, set this explicitly. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. Returns: True on success, or false if no summary was emitted because no default summary writer was available. Raises: ValueError: if a default writer exists, but no step was provided and `tf.summary.experimental.get_step()` is None. """ audio_ops = getattr(tf, 'audio', None) if audio_ops is None: # Fallback for older versions of TF without tf.audio. from tensorflow.python.ops import gen_audio_ops as audio_ops if encoding is None: encoding = 'wav' if encoding != 'wav': raise ValueError('Unknown encoding: %r' % encoding) summary_metadata = metadata.create_summary_metadata( display_name=None, description=description, encoding=metadata.Encoding.Value('WAV')) inputs = [data, sample_rate, max_outputs, step] # TODO(https://github.com/tensorflow/tensorboard/issues/2109): remove fallback summary_scope = ( getattr(tf.summary.experimental, 'summary_scope', None) or tf.summary.summary_scope) with summary_scope( name, 'audio_summary', values=inputs) as (tag, _): tf.debugging.assert_rank(data, 3) tf.debugging.assert_non_negative(max_outputs) limited_audio = data[:max_outputs] encode_fn = functools.partial(audio_ops.encode_wav, sample_rate=sample_rate) encoded_audio = tf.map_fn(encode_fn, limited_audio, dtype=tf.string, name='encode_each_audio') # Workaround for map_fn returning float dtype for an empty elems input. encoded_audio = tf.cond( tf.shape(input=encoded_audio)[0] > 0, lambda: encoded_audio, lambda: tf.constant([], tf.string)) limited_labels = tf.tile([''], tf.shape(input=limited_audio)[:1]) tensor = tf.transpose(a=tf.stack([encoded_audio, limited_labels])) return tf.summary.write( tag=tag, tensor=tensor, step=step, metadata=summary_metadata)
[ "def", "audio", "(", "name", ",", "data", ",", "sample_rate", ",", "step", "=", "None", ",", "max_outputs", "=", "3", ",", "encoding", "=", "None", ",", "description", "=", "None", ")", ":", "audio_ops", "=", "getattr", "(", "tf", ",", "'audio'", ",", "None", ")", "if", "audio_ops", "is", "None", ":", "# Fallback for older versions of TF without tf.audio.", "from", "tensorflow", ".", "python", ".", "ops", "import", "gen_audio_ops", "as", "audio_ops", "if", "encoding", "is", "None", ":", "encoding", "=", "'wav'", "if", "encoding", "!=", "'wav'", ":", "raise", "ValueError", "(", "'Unknown encoding: %r'", "%", "encoding", ")", "summary_metadata", "=", "metadata", ".", "create_summary_metadata", "(", "display_name", "=", "None", ",", "description", "=", "description", ",", "encoding", "=", "metadata", ".", "Encoding", ".", "Value", "(", "'WAV'", ")", ")", "inputs", "=", "[", "data", ",", "sample_rate", ",", "max_outputs", ",", "step", "]", "# TODO(https://github.com/tensorflow/tensorboard/issues/2109): remove fallback", "summary_scope", "=", "(", "getattr", "(", "tf", ".", "summary", ".", "experimental", ",", "'summary_scope'", ",", "None", ")", "or", "tf", ".", "summary", ".", "summary_scope", ")", "with", "summary_scope", "(", "name", ",", "'audio_summary'", ",", "values", "=", "inputs", ")", "as", "(", "tag", ",", "_", ")", ":", "tf", ".", "debugging", ".", "assert_rank", "(", "data", ",", "3", ")", "tf", ".", "debugging", ".", "assert_non_negative", "(", "max_outputs", ")", "limited_audio", "=", "data", "[", ":", "max_outputs", "]", "encode_fn", "=", "functools", ".", "partial", "(", "audio_ops", ".", "encode_wav", ",", "sample_rate", "=", "sample_rate", ")", "encoded_audio", "=", "tf", ".", "map_fn", "(", "encode_fn", ",", "limited_audio", ",", "dtype", "=", "tf", ".", "string", ",", "name", "=", "'encode_each_audio'", ")", "# Workaround for map_fn returning float dtype for an empty elems input.", "encoded_audio", "=", "tf", ".", "cond", "(", "tf", ".", "shape", "(", "input", "=", "encoded_audio", ")", "[", "0", "]", ">", "0", ",", "lambda", ":", "encoded_audio", ",", "lambda", ":", "tf", ".", "constant", "(", "[", "]", ",", "tf", ".", "string", ")", ")", "limited_labels", "=", "tf", ".", "tile", "(", "[", "''", "]", ",", "tf", ".", "shape", "(", "input", "=", "limited_audio", ")", "[", ":", "1", "]", ")", "tensor", "=", "tf", ".", "transpose", "(", "a", "=", "tf", ".", "stack", "(", "[", "encoded_audio", ",", "limited_labels", "]", ")", ")", "return", "tf", ".", "summary", ".", "write", "(", "tag", "=", "tag", ",", "tensor", "=", "tensor", ",", "step", "=", "step", ",", "metadata", "=", "summary_metadata", ")" ]
Write an audio summary. Arguments: name: A name for this summary. The summary tag used for TensorBoard will be this name prefixed by any active name scopes. data: A `Tensor` representing audio data with shape `[k, t, c]`, where `k` is the number of audio clips, `t` is the number of frames, and `c` is the number of channels. Elements should be floating-point values in `[-1.0, 1.0]`. Any of the dimensions may be statically unknown (i.e., `None`). sample_rate: An `int` or rank-0 `int32` `Tensor` that represents the sample rate, in Hz. Must be positive. step: Explicit `int64`-castable monotonic step value for this summary. If omitted, this defaults to `tf.summary.experimental.get_step()`, which must not be None. max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this many audio clips will be emitted at each step. When more than `max_outputs` many clips are provided, the first `max_outputs` many clips will be used and the rest silently discarded. encoding: Optional constant `str` for the desired encoding. Only "wav" is currently supported, but this is not guaranteed to remain the default, so if you want "wav" in particular, set this explicitly. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. Returns: True on success, or false if no summary was emitted because no default summary writer was available. Raises: ValueError: if a default writer exists, but no step was provided and `tf.summary.experimental.get_step()` is None.
[ "Write", "an", "audio", "summary", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/audio/summary_v2.py#L34-L109
train
tensorflow/tensorboard
tensorboard/plugins/debugger/numerics_alert.py
extract_numerics_alert
def extract_numerics_alert(event): """Determines whether a health pill event contains bad values. A bad value is one of NaN, -Inf, or +Inf. Args: event: (`Event`) A `tensorflow.Event` proto from `DebugNumericSummary` ops. Returns: An instance of `NumericsAlert`, if bad values are found. `None`, if no bad values are found. Raises: ValueError: if the event does not have the expected tag prefix or the debug op name is not the expected debug op name suffix. """ value = event.summary.value[0] debugger_plugin_metadata_content = None if value.HasField("metadata"): plugin_data = value.metadata.plugin_data if plugin_data.plugin_name == constants.DEBUGGER_PLUGIN_NAME: debugger_plugin_metadata_content = plugin_data.content if not debugger_plugin_metadata_content: raise ValueError("Event proto input lacks debugger plugin SummaryMetadata.") debugger_plugin_metadata_content = tf.compat.as_text( debugger_plugin_metadata_content) try: content_object = json.loads(debugger_plugin_metadata_content) device_name = content_object["device"] except (KeyError, ValueError) as e: raise ValueError("Could not determine device from JSON string %r, %r" % (debugger_plugin_metadata_content, e)) debug_op_suffix = ":DebugNumericSummary" if not value.node_name.endswith(debug_op_suffix): raise ValueError( "Event proto input does not have the expected debug op suffix %s" % debug_op_suffix) tensor_name = value.node_name[:-len(debug_op_suffix)] elements = tf_debug.load_tensor_from_event(event) nan_count = elements[constants.NAN_NUMERIC_SUMMARY_OP_INDEX] neg_inf_count = elements[constants.NEG_INF_NUMERIC_SUMMARY_OP_INDEX] pos_inf_count = elements[constants.POS_INF_NUMERIC_SUMMARY_OP_INDEX] if nan_count > 0 or neg_inf_count > 0 or pos_inf_count > 0: return NumericsAlert( device_name, tensor_name, event.wall_time, nan_count, neg_inf_count, pos_inf_count) return None
python
def extract_numerics_alert(event): """Determines whether a health pill event contains bad values. A bad value is one of NaN, -Inf, or +Inf. Args: event: (`Event`) A `tensorflow.Event` proto from `DebugNumericSummary` ops. Returns: An instance of `NumericsAlert`, if bad values are found. `None`, if no bad values are found. Raises: ValueError: if the event does not have the expected tag prefix or the debug op name is not the expected debug op name suffix. """ value = event.summary.value[0] debugger_plugin_metadata_content = None if value.HasField("metadata"): plugin_data = value.metadata.plugin_data if plugin_data.plugin_name == constants.DEBUGGER_PLUGIN_NAME: debugger_plugin_metadata_content = plugin_data.content if not debugger_plugin_metadata_content: raise ValueError("Event proto input lacks debugger plugin SummaryMetadata.") debugger_plugin_metadata_content = tf.compat.as_text( debugger_plugin_metadata_content) try: content_object = json.loads(debugger_plugin_metadata_content) device_name = content_object["device"] except (KeyError, ValueError) as e: raise ValueError("Could not determine device from JSON string %r, %r" % (debugger_plugin_metadata_content, e)) debug_op_suffix = ":DebugNumericSummary" if not value.node_name.endswith(debug_op_suffix): raise ValueError( "Event proto input does not have the expected debug op suffix %s" % debug_op_suffix) tensor_name = value.node_name[:-len(debug_op_suffix)] elements = tf_debug.load_tensor_from_event(event) nan_count = elements[constants.NAN_NUMERIC_SUMMARY_OP_INDEX] neg_inf_count = elements[constants.NEG_INF_NUMERIC_SUMMARY_OP_INDEX] pos_inf_count = elements[constants.POS_INF_NUMERIC_SUMMARY_OP_INDEX] if nan_count > 0 or neg_inf_count > 0 or pos_inf_count > 0: return NumericsAlert( device_name, tensor_name, event.wall_time, nan_count, neg_inf_count, pos_inf_count) return None
[ "def", "extract_numerics_alert", "(", "event", ")", ":", "value", "=", "event", ".", "summary", ".", "value", "[", "0", "]", "debugger_plugin_metadata_content", "=", "None", "if", "value", ".", "HasField", "(", "\"metadata\"", ")", ":", "plugin_data", "=", "value", ".", "metadata", ".", "plugin_data", "if", "plugin_data", ".", "plugin_name", "==", "constants", ".", "DEBUGGER_PLUGIN_NAME", ":", "debugger_plugin_metadata_content", "=", "plugin_data", ".", "content", "if", "not", "debugger_plugin_metadata_content", ":", "raise", "ValueError", "(", "\"Event proto input lacks debugger plugin SummaryMetadata.\"", ")", "debugger_plugin_metadata_content", "=", "tf", ".", "compat", ".", "as_text", "(", "debugger_plugin_metadata_content", ")", "try", ":", "content_object", "=", "json", ".", "loads", "(", "debugger_plugin_metadata_content", ")", "device_name", "=", "content_object", "[", "\"device\"", "]", "except", "(", "KeyError", ",", "ValueError", ")", "as", "e", ":", "raise", "ValueError", "(", "\"Could not determine device from JSON string %r, %r\"", "%", "(", "debugger_plugin_metadata_content", ",", "e", ")", ")", "debug_op_suffix", "=", "\":DebugNumericSummary\"", "if", "not", "value", ".", "node_name", ".", "endswith", "(", "debug_op_suffix", ")", ":", "raise", "ValueError", "(", "\"Event proto input does not have the expected debug op suffix %s\"", "%", "debug_op_suffix", ")", "tensor_name", "=", "value", ".", "node_name", "[", ":", "-", "len", "(", "debug_op_suffix", ")", "]", "elements", "=", "tf_debug", ".", "load_tensor_from_event", "(", "event", ")", "nan_count", "=", "elements", "[", "constants", ".", "NAN_NUMERIC_SUMMARY_OP_INDEX", "]", "neg_inf_count", "=", "elements", "[", "constants", ".", "NEG_INF_NUMERIC_SUMMARY_OP_INDEX", "]", "pos_inf_count", "=", "elements", "[", "constants", ".", "POS_INF_NUMERIC_SUMMARY_OP_INDEX", "]", "if", "nan_count", ">", "0", "or", "neg_inf_count", ">", "0", "or", "pos_inf_count", ">", "0", ":", "return", "NumericsAlert", "(", "device_name", ",", "tensor_name", ",", "event", ".", "wall_time", ",", "nan_count", ",", "neg_inf_count", ",", "pos_inf_count", ")", "return", "None" ]
Determines whether a health pill event contains bad values. A bad value is one of NaN, -Inf, or +Inf. Args: event: (`Event`) A `tensorflow.Event` proto from `DebugNumericSummary` ops. Returns: An instance of `NumericsAlert`, if bad values are found. `None`, if no bad values are found. Raises: ValueError: if the event does not have the expected tag prefix or the debug op name is not the expected debug op name suffix.
[ "Determines", "whether", "a", "health", "pill", "event", "contains", "bad", "values", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/debugger/numerics_alert.py#L291-L342
train
tensorflow/tensorboard
tensorboard/plugins/debugger/numerics_alert.py
NumericsAlertHistory.first_timestamp
def first_timestamp(self, event_key=None): """Obtain the first timestamp. Args: event_key: the type key of the sought events (e.g., constants.NAN_KEY). If None, includes all event type keys. Returns: First (earliest) timestamp of all the events of the given type (or all event types if event_key is None). """ if event_key is None: timestamps = [self._trackers[key].first_timestamp for key in self._trackers] return min(timestamp for timestamp in timestamps if timestamp >= 0) else: return self._trackers[event_key].first_timestamp
python
def first_timestamp(self, event_key=None): """Obtain the first timestamp. Args: event_key: the type key of the sought events (e.g., constants.NAN_KEY). If None, includes all event type keys. Returns: First (earliest) timestamp of all the events of the given type (or all event types if event_key is None). """ if event_key is None: timestamps = [self._trackers[key].first_timestamp for key in self._trackers] return min(timestamp for timestamp in timestamps if timestamp >= 0) else: return self._trackers[event_key].first_timestamp
[ "def", "first_timestamp", "(", "self", ",", "event_key", "=", "None", ")", ":", "if", "event_key", "is", "None", ":", "timestamps", "=", "[", "self", ".", "_trackers", "[", "key", "]", ".", "first_timestamp", "for", "key", "in", "self", ".", "_trackers", "]", "return", "min", "(", "timestamp", "for", "timestamp", "in", "timestamps", "if", "timestamp", ">=", "0", ")", "else", ":", "return", "self", ".", "_trackers", "[", "event_key", "]", ".", "first_timestamp" ]
Obtain the first timestamp. Args: event_key: the type key of the sought events (e.g., constants.NAN_KEY). If None, includes all event type keys. Returns: First (earliest) timestamp of all the events of the given type (or all event types if event_key is None).
[ "Obtain", "the", "first", "timestamp", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/debugger/numerics_alert.py#L136-L152
train
tensorflow/tensorboard
tensorboard/plugins/debugger/numerics_alert.py
NumericsAlertHistory.last_timestamp
def last_timestamp(self, event_key=None): """Obtain the last timestamp. Args: event_key: the type key of the sought events (e.g., constants.NAN_KEY). If None, includes all event type keys. Returns: Last (latest) timestamp of all the events of the given type (or all event types if event_key is None). """ if event_key is None: timestamps = [self._trackers[key].first_timestamp for key in self._trackers] return max(timestamp for timestamp in timestamps if timestamp >= 0) else: return self._trackers[event_key].last_timestamp
python
def last_timestamp(self, event_key=None): """Obtain the last timestamp. Args: event_key: the type key of the sought events (e.g., constants.NAN_KEY). If None, includes all event type keys. Returns: Last (latest) timestamp of all the events of the given type (or all event types if event_key is None). """ if event_key is None: timestamps = [self._trackers[key].first_timestamp for key in self._trackers] return max(timestamp for timestamp in timestamps if timestamp >= 0) else: return self._trackers[event_key].last_timestamp
[ "def", "last_timestamp", "(", "self", ",", "event_key", "=", "None", ")", ":", "if", "event_key", "is", "None", ":", "timestamps", "=", "[", "self", ".", "_trackers", "[", "key", "]", ".", "first_timestamp", "for", "key", "in", "self", ".", "_trackers", "]", "return", "max", "(", "timestamp", "for", "timestamp", "in", "timestamps", "if", "timestamp", ">=", "0", ")", "else", ":", "return", "self", ".", "_trackers", "[", "event_key", "]", ".", "last_timestamp" ]
Obtain the last timestamp. Args: event_key: the type key of the sought events (e.g., constants.NAN_KEY). If None, includes all event type keys. Returns: Last (latest) timestamp of all the events of the given type (or all event types if event_key is None).
[ "Obtain", "the", "last", "timestamp", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/debugger/numerics_alert.py#L154-L170
train
tensorflow/tensorboard
tensorboard/plugins/debugger/numerics_alert.py
NumericsAlertHistory.create_jsonable_history
def create_jsonable_history(self): """Creates a JSON-able representation of this object. Returns: A dictionary mapping key to EventTrackerDescription (which can be used to create event trackers). """ return {value_category_key: tracker.get_description() for (value_category_key, tracker) in self._trackers.items()}
python
def create_jsonable_history(self): """Creates a JSON-able representation of this object. Returns: A dictionary mapping key to EventTrackerDescription (which can be used to create event trackers). """ return {value_category_key: tracker.get_description() for (value_category_key, tracker) in self._trackers.items()}
[ "def", "create_jsonable_history", "(", "self", ")", ":", "return", "{", "value_category_key", ":", "tracker", ".", "get_description", "(", ")", "for", "(", "value_category_key", ",", "tracker", ")", "in", "self", ".", "_trackers", ".", "items", "(", ")", "}" ]
Creates a JSON-able representation of this object. Returns: A dictionary mapping key to EventTrackerDescription (which can be used to create event trackers).
[ "Creates", "a", "JSON", "-", "able", "representation", "of", "this", "object", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/debugger/numerics_alert.py#L185-L193
train
tensorflow/tensorboard
tensorboard/plugins/debugger/numerics_alert.py
NumericsAlertRegistry.register
def register(self, numerics_alert): """Register an alerting numeric event. Args: numerics_alert: An instance of `NumericsAlert`. """ key = (numerics_alert.device_name, numerics_alert.tensor_name) if key in self._data: self._data[key].add(numerics_alert) else: if len(self._data) < self._capacity: history = NumericsAlertHistory() history.add(numerics_alert) self._data[key] = history
python
def register(self, numerics_alert): """Register an alerting numeric event. Args: numerics_alert: An instance of `NumericsAlert`. """ key = (numerics_alert.device_name, numerics_alert.tensor_name) if key in self._data: self._data[key].add(numerics_alert) else: if len(self._data) < self._capacity: history = NumericsAlertHistory() history.add(numerics_alert) self._data[key] = history
[ "def", "register", "(", "self", ",", "numerics_alert", ")", ":", "key", "=", "(", "numerics_alert", ".", "device_name", ",", "numerics_alert", ".", "tensor_name", ")", "if", "key", "in", "self", ".", "_data", ":", "self", ".", "_data", "[", "key", "]", ".", "add", "(", "numerics_alert", ")", "else", ":", "if", "len", "(", "self", ".", "_data", ")", "<", "self", ".", "_capacity", ":", "history", "=", "NumericsAlertHistory", "(", ")", "history", ".", "add", "(", "numerics_alert", ")", "self", ".", "_data", "[", "key", "]", "=", "history" ]
Register an alerting numeric event. Args: numerics_alert: An instance of `NumericsAlert`.
[ "Register", "an", "alerting", "numeric", "event", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/debugger/numerics_alert.py#L224-L237
train
tensorflow/tensorboard
tensorboard/plugins/debugger/numerics_alert.py
NumericsAlertRegistry.report
def report(self, device_name_filter=None, tensor_name_filter=None): """Get a report of offending device/tensor names. The report includes information about the device name, tensor name, first (earliest) timestamp of the alerting events from the tensor, in addition to counts of nan, positive inf and negative inf events. Args: device_name_filter: regex filter for device name, or None (not filtered). tensor_name_filter: regex filter for tensor name, or None (not filtered). Returns: A list of NumericsAlertReportRow, sorted by the first_timestamp in asecnding order. """ report = [] for key in self._data: device_name, tensor_name = key history = self._data[key] report.append( NumericsAlertReportRow( device_name=device_name, tensor_name=tensor_name, first_timestamp=history.first_timestamp(), nan_event_count=history.event_count(constants.NAN_KEY), neg_inf_event_count=history.event_count(constants.NEG_INF_KEY), pos_inf_event_count=history.event_count(constants.POS_INF_KEY))) if device_name_filter: device_name_pattern = re.compile(device_name_filter) report = [item for item in report if device_name_pattern.match(item.device_name)] if tensor_name_filter: tensor_name_pattern = re.compile(tensor_name_filter) report = [item for item in report if tensor_name_pattern.match(item.tensor_name)] # Sort results chronologically. return sorted(report, key=lambda x: x.first_timestamp)
python
def report(self, device_name_filter=None, tensor_name_filter=None): """Get a report of offending device/tensor names. The report includes information about the device name, tensor name, first (earliest) timestamp of the alerting events from the tensor, in addition to counts of nan, positive inf and negative inf events. Args: device_name_filter: regex filter for device name, or None (not filtered). tensor_name_filter: regex filter for tensor name, or None (not filtered). Returns: A list of NumericsAlertReportRow, sorted by the first_timestamp in asecnding order. """ report = [] for key in self._data: device_name, tensor_name = key history = self._data[key] report.append( NumericsAlertReportRow( device_name=device_name, tensor_name=tensor_name, first_timestamp=history.first_timestamp(), nan_event_count=history.event_count(constants.NAN_KEY), neg_inf_event_count=history.event_count(constants.NEG_INF_KEY), pos_inf_event_count=history.event_count(constants.POS_INF_KEY))) if device_name_filter: device_name_pattern = re.compile(device_name_filter) report = [item for item in report if device_name_pattern.match(item.device_name)] if tensor_name_filter: tensor_name_pattern = re.compile(tensor_name_filter) report = [item for item in report if tensor_name_pattern.match(item.tensor_name)] # Sort results chronologically. return sorted(report, key=lambda x: x.first_timestamp)
[ "def", "report", "(", "self", ",", "device_name_filter", "=", "None", ",", "tensor_name_filter", "=", "None", ")", ":", "report", "=", "[", "]", "for", "key", "in", "self", ".", "_data", ":", "device_name", ",", "tensor_name", "=", "key", "history", "=", "self", ".", "_data", "[", "key", "]", "report", ".", "append", "(", "NumericsAlertReportRow", "(", "device_name", "=", "device_name", ",", "tensor_name", "=", "tensor_name", ",", "first_timestamp", "=", "history", ".", "first_timestamp", "(", ")", ",", "nan_event_count", "=", "history", ".", "event_count", "(", "constants", ".", "NAN_KEY", ")", ",", "neg_inf_event_count", "=", "history", ".", "event_count", "(", "constants", ".", "NEG_INF_KEY", ")", ",", "pos_inf_event_count", "=", "history", ".", "event_count", "(", "constants", ".", "POS_INF_KEY", ")", ")", ")", "if", "device_name_filter", ":", "device_name_pattern", "=", "re", ".", "compile", "(", "device_name_filter", ")", "report", "=", "[", "item", "for", "item", "in", "report", "if", "device_name_pattern", ".", "match", "(", "item", ".", "device_name", ")", "]", "if", "tensor_name_filter", ":", "tensor_name_pattern", "=", "re", ".", "compile", "(", "tensor_name_filter", ")", "report", "=", "[", "item", "for", "item", "in", "report", "if", "tensor_name_pattern", ".", "match", "(", "item", ".", "tensor_name", ")", "]", "# Sort results chronologically.", "return", "sorted", "(", "report", ",", "key", "=", "lambda", "x", ":", "x", ".", "first_timestamp", ")" ]
Get a report of offending device/tensor names. The report includes information about the device name, tensor name, first (earliest) timestamp of the alerting events from the tensor, in addition to counts of nan, positive inf and negative inf events. Args: device_name_filter: regex filter for device name, or None (not filtered). tensor_name_filter: regex filter for tensor name, or None (not filtered). Returns: A list of NumericsAlertReportRow, sorted by the first_timestamp in asecnding order.
[ "Get", "a", "report", "of", "offending", "device", "/", "tensor", "names", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/debugger/numerics_alert.py#L239-L276
train
tensorflow/tensorboard
tensorboard/plugins/debugger/numerics_alert.py
NumericsAlertRegistry.create_jsonable_registry
def create_jsonable_registry(self): """Creates a JSON-able representation of this object. Returns: A dictionary mapping (device, tensor name) to JSON-able object representations of NumericsAlertHistory. """ # JSON does not support tuples as keys. Only strings. Therefore, we store # the device name, tensor name, and dictionary data within a 3-item list. return [HistoryTriplet(pair[0], pair[1], history.create_jsonable_history()) for (pair, history) in self._data.items()]
python
def create_jsonable_registry(self): """Creates a JSON-able representation of this object. Returns: A dictionary mapping (device, tensor name) to JSON-able object representations of NumericsAlertHistory. """ # JSON does not support tuples as keys. Only strings. Therefore, we store # the device name, tensor name, and dictionary data within a 3-item list. return [HistoryTriplet(pair[0], pair[1], history.create_jsonable_history()) for (pair, history) in self._data.items()]
[ "def", "create_jsonable_registry", "(", "self", ")", ":", "# JSON does not support tuples as keys. Only strings. Therefore, we store", "# the device name, tensor name, and dictionary data within a 3-item list.", "return", "[", "HistoryTriplet", "(", "pair", "[", "0", "]", ",", "pair", "[", "1", "]", ",", "history", ".", "create_jsonable_history", "(", ")", ")", "for", "(", "pair", ",", "history", ")", "in", "self", ".", "_data", ".", "items", "(", ")", "]" ]
Creates a JSON-able representation of this object. Returns: A dictionary mapping (device, tensor name) to JSON-able object representations of NumericsAlertHistory.
[ "Creates", "a", "JSON", "-", "able", "representation", "of", "this", "object", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/debugger/numerics_alert.py#L278-L288
train
tensorflow/tensorboard
tensorboard/plugins/audio/audio_demo.py
run
def run(logdir, run_name, wave_name, wave_constructor): """Generate wave data of the given form. The provided function `wave_constructor` should accept a scalar tensor of type float32, representing the frequency (in Hz) at which to construct a wave, and return a tensor of shape [1, _samples(), `n`] representing audio data (for some number of channels `n`). Waves will be generated at frequencies ranging from A4 to A5. Arguments: logdir: the top-level directory into which to write summary data run_name: the name of this run; will be created as a subdirectory under logdir wave_name: the name of the wave being generated wave_constructor: see above """ tf.compat.v1.reset_default_graph() tf.compat.v1.set_random_seed(0) # On each step `i`, we'll set this placeholder to `i`. This allows us # to know "what time it is" at each step. step_placeholder = tf.compat.v1.placeholder(tf.float32, shape=[]) # We want to linearly interpolate a frequency between A4 (440 Hz) and # A5 (880 Hz). with tf.name_scope('compute_frequency'): f_min = 440.0 f_max = 880.0 t = step_placeholder / (FLAGS.steps - 1) frequency = f_min * (1.0 - t) + f_max * t # Let's log this frequency, just so that we can make sure that it's as # expected. tf.compat.v1.summary.scalar('frequency', frequency) # Now, we pass this to the wave constructor to get our waveform. Doing # so within a name scope means that any summaries that the wave # constructor produces will be namespaced. with tf.name_scope(wave_name): waveform = wave_constructor(frequency) # We also have the opportunity to annotate each audio clip with a # label. This is a good place to include the frequency, because it'll # be visible immediately next to the audio clip. with tf.name_scope('compute_labels'): samples = tf.shape(input=waveform)[0] wave_types = tf.tile(["*Wave type:* `%s`." % wave_name], [samples]) frequencies = tf.strings.join([ "*Frequency:* ", tf.tile([tf.as_string(frequency, precision=2)], [samples]), " Hz.", ]) samples = tf.strings.join([ "*Sample:* ", tf.as_string(tf.range(samples) + 1), " of ", tf.as_string(samples), ".", ]) labels = tf.strings.join([wave_types, frequencies, samples], separator=" ") # We can place a description next to the summary in TensorBoard. This # is a good place to explain what the summary represents, methodology # for creating it, etc. Let's include the source code of the function # that generated the wave. source = '\n'.join(' %s' % line.rstrip() for line in inspect.getsourcelines(wave_constructor)[0]) description = ("A wave of type `%r`, generated via:\n\n%s" % (wave_name, source)) # Here's the crucial piece: we interpret this result as audio. summary.op('waveform', waveform, FLAGS.sample_rate, labels=labels, display_name=wave_name, description=description) # Now, we can collect up all the summaries and begin the run. summ = tf.compat.v1.summary.merge_all() sess = tf.compat.v1.Session() writer = tf.summary.FileWriter(os.path.join(logdir, run_name)) writer.add_graph(sess.graph) sess.run(tf.compat.v1.global_variables_initializer()) for step in xrange(FLAGS.steps): s = sess.run(summ, feed_dict={step_placeholder: float(step)}) writer.add_summary(s, global_step=step) writer.close()
python
def run(logdir, run_name, wave_name, wave_constructor): """Generate wave data of the given form. The provided function `wave_constructor` should accept a scalar tensor of type float32, representing the frequency (in Hz) at which to construct a wave, and return a tensor of shape [1, _samples(), `n`] representing audio data (for some number of channels `n`). Waves will be generated at frequencies ranging from A4 to A5. Arguments: logdir: the top-level directory into which to write summary data run_name: the name of this run; will be created as a subdirectory under logdir wave_name: the name of the wave being generated wave_constructor: see above """ tf.compat.v1.reset_default_graph() tf.compat.v1.set_random_seed(0) # On each step `i`, we'll set this placeholder to `i`. This allows us # to know "what time it is" at each step. step_placeholder = tf.compat.v1.placeholder(tf.float32, shape=[]) # We want to linearly interpolate a frequency between A4 (440 Hz) and # A5 (880 Hz). with tf.name_scope('compute_frequency'): f_min = 440.0 f_max = 880.0 t = step_placeholder / (FLAGS.steps - 1) frequency = f_min * (1.0 - t) + f_max * t # Let's log this frequency, just so that we can make sure that it's as # expected. tf.compat.v1.summary.scalar('frequency', frequency) # Now, we pass this to the wave constructor to get our waveform. Doing # so within a name scope means that any summaries that the wave # constructor produces will be namespaced. with tf.name_scope(wave_name): waveform = wave_constructor(frequency) # We also have the opportunity to annotate each audio clip with a # label. This is a good place to include the frequency, because it'll # be visible immediately next to the audio clip. with tf.name_scope('compute_labels'): samples = tf.shape(input=waveform)[0] wave_types = tf.tile(["*Wave type:* `%s`." % wave_name], [samples]) frequencies = tf.strings.join([ "*Frequency:* ", tf.tile([tf.as_string(frequency, precision=2)], [samples]), " Hz.", ]) samples = tf.strings.join([ "*Sample:* ", tf.as_string(tf.range(samples) + 1), " of ", tf.as_string(samples), ".", ]) labels = tf.strings.join([wave_types, frequencies, samples], separator=" ") # We can place a description next to the summary in TensorBoard. This # is a good place to explain what the summary represents, methodology # for creating it, etc. Let's include the source code of the function # that generated the wave. source = '\n'.join(' %s' % line.rstrip() for line in inspect.getsourcelines(wave_constructor)[0]) description = ("A wave of type `%r`, generated via:\n\n%s" % (wave_name, source)) # Here's the crucial piece: we interpret this result as audio. summary.op('waveform', waveform, FLAGS.sample_rate, labels=labels, display_name=wave_name, description=description) # Now, we can collect up all the summaries and begin the run. summ = tf.compat.v1.summary.merge_all() sess = tf.compat.v1.Session() writer = tf.summary.FileWriter(os.path.join(logdir, run_name)) writer.add_graph(sess.graph) sess.run(tf.compat.v1.global_variables_initializer()) for step in xrange(FLAGS.steps): s = sess.run(summ, feed_dict={step_placeholder: float(step)}) writer.add_summary(s, global_step=step) writer.close()
[ "def", "run", "(", "logdir", ",", "run_name", ",", "wave_name", ",", "wave_constructor", ")", ":", "tf", ".", "compat", ".", "v1", ".", "reset_default_graph", "(", ")", "tf", ".", "compat", ".", "v1", ".", "set_random_seed", "(", "0", ")", "# On each step `i`, we'll set this placeholder to `i`. This allows us", "# to know \"what time it is\" at each step.", "step_placeholder", "=", "tf", ".", "compat", ".", "v1", ".", "placeholder", "(", "tf", ".", "float32", ",", "shape", "=", "[", "]", ")", "# We want to linearly interpolate a frequency between A4 (440 Hz) and", "# A5 (880 Hz).", "with", "tf", ".", "name_scope", "(", "'compute_frequency'", ")", ":", "f_min", "=", "440.0", "f_max", "=", "880.0", "t", "=", "step_placeholder", "/", "(", "FLAGS", ".", "steps", "-", "1", ")", "frequency", "=", "f_min", "*", "(", "1.0", "-", "t", ")", "+", "f_max", "*", "t", "# Let's log this frequency, just so that we can make sure that it's as", "# expected.", "tf", ".", "compat", ".", "v1", ".", "summary", ".", "scalar", "(", "'frequency'", ",", "frequency", ")", "# Now, we pass this to the wave constructor to get our waveform. Doing", "# so within a name scope means that any summaries that the wave", "# constructor produces will be namespaced.", "with", "tf", ".", "name_scope", "(", "wave_name", ")", ":", "waveform", "=", "wave_constructor", "(", "frequency", ")", "# We also have the opportunity to annotate each audio clip with a", "# label. This is a good place to include the frequency, because it'll", "# be visible immediately next to the audio clip.", "with", "tf", ".", "name_scope", "(", "'compute_labels'", ")", ":", "samples", "=", "tf", ".", "shape", "(", "input", "=", "waveform", ")", "[", "0", "]", "wave_types", "=", "tf", ".", "tile", "(", "[", "\"*Wave type:* `%s`.\"", "%", "wave_name", "]", ",", "[", "samples", "]", ")", "frequencies", "=", "tf", ".", "strings", ".", "join", "(", "[", "\"*Frequency:* \"", ",", "tf", ".", "tile", "(", "[", "tf", ".", "as_string", "(", "frequency", ",", "precision", "=", "2", ")", "]", ",", "[", "samples", "]", ")", ",", "\" Hz.\"", ",", "]", ")", "samples", "=", "tf", ".", "strings", ".", "join", "(", "[", "\"*Sample:* \"", ",", "tf", ".", "as_string", "(", "tf", ".", "range", "(", "samples", ")", "+", "1", ")", ",", "\" of \"", ",", "tf", ".", "as_string", "(", "samples", ")", ",", "\".\"", ",", "]", ")", "labels", "=", "tf", ".", "strings", ".", "join", "(", "[", "wave_types", ",", "frequencies", ",", "samples", "]", ",", "separator", "=", "\" \"", ")", "# We can place a description next to the summary in TensorBoard. This", "# is a good place to explain what the summary represents, methodology", "# for creating it, etc. Let's include the source code of the function", "# that generated the wave.", "source", "=", "'\\n'", ".", "join", "(", "' %s'", "%", "line", ".", "rstrip", "(", ")", "for", "line", "in", "inspect", ".", "getsourcelines", "(", "wave_constructor", ")", "[", "0", "]", ")", "description", "=", "(", "\"A wave of type `%r`, generated via:\\n\\n%s\"", "%", "(", "wave_name", ",", "source", ")", ")", "# Here's the crucial piece: we interpret this result as audio.", "summary", ".", "op", "(", "'waveform'", ",", "waveform", ",", "FLAGS", ".", "sample_rate", ",", "labels", "=", "labels", ",", "display_name", "=", "wave_name", ",", "description", "=", "description", ")", "# Now, we can collect up all the summaries and begin the run.", "summ", "=", "tf", ".", "compat", ".", "v1", ".", "summary", ".", "merge_all", "(", ")", "sess", "=", "tf", ".", "compat", ".", "v1", ".", "Session", "(", ")", "writer", "=", "tf", ".", "summary", ".", "FileWriter", "(", "os", ".", "path", ".", "join", "(", "logdir", ",", "run_name", ")", ")", "writer", ".", "add_graph", "(", "sess", ".", "graph", ")", "sess", ".", "run", "(", "tf", ".", "compat", ".", "v1", ".", "global_variables_initializer", "(", ")", ")", "for", "step", "in", "xrange", "(", "FLAGS", ".", "steps", ")", ":", "s", "=", "sess", ".", "run", "(", "summ", ",", "feed_dict", "=", "{", "step_placeholder", ":", "float", "(", "step", ")", "}", ")", "writer", ".", "add_summary", "(", "s", ",", "global_step", "=", "step", ")", "writer", ".", "close", "(", ")" ]
Generate wave data of the given form. The provided function `wave_constructor` should accept a scalar tensor of type float32, representing the frequency (in Hz) at which to construct a wave, and return a tensor of shape [1, _samples(), `n`] representing audio data (for some number of channels `n`). Waves will be generated at frequencies ranging from A4 to A5. Arguments: logdir: the top-level directory into which to write summary data run_name: the name of this run; will be created as a subdirectory under logdir wave_name: the name of the wave being generated wave_constructor: see above
[ "Generate", "wave", "data", "of", "the", "given", "form", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/audio/audio_demo.py#L49-L133
train
tensorflow/tensorboard
tensorboard/plugins/audio/audio_demo.py
sine_wave
def sine_wave(frequency): """Emit a sine wave at the given frequency.""" xs = tf.reshape(tf.range(_samples(), dtype=tf.float32), [1, _samples(), 1]) ts = xs / FLAGS.sample_rate return tf.sin(2 * math.pi * frequency * ts)
python
def sine_wave(frequency): """Emit a sine wave at the given frequency.""" xs = tf.reshape(tf.range(_samples(), dtype=tf.float32), [1, _samples(), 1]) ts = xs / FLAGS.sample_rate return tf.sin(2 * math.pi * frequency * ts)
[ "def", "sine_wave", "(", "frequency", ")", ":", "xs", "=", "tf", ".", "reshape", "(", "tf", ".", "range", "(", "_samples", "(", ")", ",", "dtype", "=", "tf", ".", "float32", ")", ",", "[", "1", ",", "_samples", "(", ")", ",", "1", "]", ")", "ts", "=", "xs", "/", "FLAGS", ".", "sample_rate", "return", "tf", ".", "sin", "(", "2", "*", "math", ".", "pi", "*", "frequency", "*", "ts", ")" ]
Emit a sine wave at the given frequency.
[ "Emit", "a", "sine", "wave", "at", "the", "given", "frequency", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/audio/audio_demo.py#L139-L143
train
tensorflow/tensorboard
tensorboard/plugins/audio/audio_demo.py
triangle_wave
def triangle_wave(frequency): """Emit a triangle wave at the given frequency.""" xs = tf.reshape(tf.range(_samples(), dtype=tf.float32), [1, _samples(), 1]) ts = xs / FLAGS.sample_rate # # A triangle wave looks like this: # # /\ /\ # / \ / \ # \ / \ / # \/ \/ # # If we look at just half a period (the first four slashes in the # diagram above), we can see that it looks like a transformed absolute # value function. # # Let's start by computing the times relative to the start of each # half-wave pulse (each individual "mountain" or "valley", of which # there are four in the above diagram). half_pulse_index = ts * (frequency * 2) half_pulse_angle = half_pulse_index % 1.0 # in [0, 1] # # Now, we can see that each positive half-pulse ("mountain") has # amplitude given by A(z) = 0.5 - abs(z - 0.5), and then normalized: absolute_amplitude = (0.5 - tf.abs(half_pulse_angle - 0.5)) / 0.5 # # But every other half-pulse is negative, so we should invert these. half_pulse_parity = tf.sign(1 - (half_pulse_index % 2.0)) amplitude = half_pulse_parity * absolute_amplitude # # This is precisely the desired result, so we're done! return amplitude
python
def triangle_wave(frequency): """Emit a triangle wave at the given frequency.""" xs = tf.reshape(tf.range(_samples(), dtype=tf.float32), [1, _samples(), 1]) ts = xs / FLAGS.sample_rate # # A triangle wave looks like this: # # /\ /\ # / \ / \ # \ / \ / # \/ \/ # # If we look at just half a period (the first four slashes in the # diagram above), we can see that it looks like a transformed absolute # value function. # # Let's start by computing the times relative to the start of each # half-wave pulse (each individual "mountain" or "valley", of which # there are four in the above diagram). half_pulse_index = ts * (frequency * 2) half_pulse_angle = half_pulse_index % 1.0 # in [0, 1] # # Now, we can see that each positive half-pulse ("mountain") has # amplitude given by A(z) = 0.5 - abs(z - 0.5), and then normalized: absolute_amplitude = (0.5 - tf.abs(half_pulse_angle - 0.5)) / 0.5 # # But every other half-pulse is negative, so we should invert these. half_pulse_parity = tf.sign(1 - (half_pulse_index % 2.0)) amplitude = half_pulse_parity * absolute_amplitude # # This is precisely the desired result, so we're done! return amplitude
[ "def", "triangle_wave", "(", "frequency", ")", ":", "xs", "=", "tf", ".", "reshape", "(", "tf", ".", "range", "(", "_samples", "(", ")", ",", "dtype", "=", "tf", ".", "float32", ")", ",", "[", "1", ",", "_samples", "(", ")", ",", "1", "]", ")", "ts", "=", "xs", "/", "FLAGS", ".", "sample_rate", "#", "# A triangle wave looks like this:", "#", "# /\\ /\\", "# / \\ / \\", "# \\ / \\ /", "# \\/ \\/", "#", "# If we look at just half a period (the first four slashes in the", "# diagram above), we can see that it looks like a transformed absolute", "# value function.", "#", "# Let's start by computing the times relative to the start of each", "# half-wave pulse (each individual \"mountain\" or \"valley\", of which", "# there are four in the above diagram).", "half_pulse_index", "=", "ts", "*", "(", "frequency", "*", "2", ")", "half_pulse_angle", "=", "half_pulse_index", "%", "1.0", "# in [0, 1]", "#", "# Now, we can see that each positive half-pulse (\"mountain\") has", "# amplitude given by A(z) = 0.5 - abs(z - 0.5), and then normalized:", "absolute_amplitude", "=", "(", "0.5", "-", "tf", ".", "abs", "(", "half_pulse_angle", "-", "0.5", ")", ")", "/", "0.5", "#", "# But every other half-pulse is negative, so we should invert these.", "half_pulse_parity", "=", "tf", ".", "sign", "(", "1", "-", "(", "half_pulse_index", "%", "2.0", ")", ")", "amplitude", "=", "half_pulse_parity", "*", "absolute_amplitude", "#", "# This is precisely the desired result, so we're done!", "return", "amplitude" ]
Emit a triangle wave at the given frequency.
[ "Emit", "a", "triangle", "wave", "at", "the", "given", "frequency", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/audio/audio_demo.py#L152-L183
train
tensorflow/tensorboard
tensorboard/plugins/audio/audio_demo.py
bisine_wave
def bisine_wave(frequency): """Emit two sine waves, in stereo at different octaves.""" # # We can first our existing sine generator to generate two different # waves. f_hi = frequency f_lo = frequency / 2.0 with tf.name_scope('hi'): sine_hi = sine_wave(f_hi) with tf.name_scope('lo'): sine_lo = sine_wave(f_lo) # # Now, we have two tensors of shape [1, _samples(), 1]. By concatenating # them along axis 2, we get a tensor of shape [1, _samples(), 2]---a # stereo waveform. return tf.concat([sine_lo, sine_hi], axis=2)
python
def bisine_wave(frequency): """Emit two sine waves, in stereo at different octaves.""" # # We can first our existing sine generator to generate two different # waves. f_hi = frequency f_lo = frequency / 2.0 with tf.name_scope('hi'): sine_hi = sine_wave(f_hi) with tf.name_scope('lo'): sine_lo = sine_wave(f_lo) # # Now, we have two tensors of shape [1, _samples(), 1]. By concatenating # them along axis 2, we get a tensor of shape [1, _samples(), 2]---a # stereo waveform. return tf.concat([sine_lo, sine_hi], axis=2)
[ "def", "bisine_wave", "(", "frequency", ")", ":", "#", "# We can first our existing sine generator to generate two different", "# waves.", "f_hi", "=", "frequency", "f_lo", "=", "frequency", "/", "2.0", "with", "tf", ".", "name_scope", "(", "'hi'", ")", ":", "sine_hi", "=", "sine_wave", "(", "f_hi", ")", "with", "tf", ".", "name_scope", "(", "'lo'", ")", ":", "sine_lo", "=", "sine_wave", "(", "f_lo", ")", "#", "# Now, we have two tensors of shape [1, _samples(), 1]. By concatenating", "# them along axis 2, we get a tensor of shape [1, _samples(), 2]---a", "# stereo waveform.", "return", "tf", ".", "concat", "(", "[", "sine_lo", ",", "sine_hi", "]", ",", "axis", "=", "2", ")" ]
Emit two sine waves, in stereo at different octaves.
[ "Emit", "two", "sine", "waves", "in", "stereo", "at", "different", "octaves", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/audio/audio_demo.py#L190-L205
train
tensorflow/tensorboard
tensorboard/plugins/audio/audio_demo.py
bisine_wahwah_wave
def bisine_wahwah_wave(frequency): """Emit two sine waves with balance oscillating left and right.""" # # This is clearly intended to build on the bisine wave defined above, # so we can start by generating that. waves_a = bisine_wave(frequency) # # Then, by reversing axis 2, we swap the stereo channels. By mixing # this with `waves_a`, we'll be able to create the desired effect. waves_b = tf.reverse(waves_a, axis=[2]) # # Let's have the balance oscillate from left to right four times. iterations = 4 # # Now, we compute the balance for each sample: `ts` has values # in [0, 1] that indicate how much we should use `waves_a`. xs = tf.reshape(tf.range(_samples(), dtype=tf.float32), [1, _samples(), 1]) thetas = xs / _samples() * iterations ts = (tf.sin(math.pi * 2 * thetas) + 1) / 2 # # Finally, we can mix the two together, and we're done. wave = ts * waves_a + (1.0 - ts) * waves_b # # Alternately, we can make the effect more pronounced by exaggerating # the sample data. Let's emit both variations. exaggerated_wave = wave ** 3.0 return tf.concat([wave, exaggerated_wave], axis=0)
python
def bisine_wahwah_wave(frequency): """Emit two sine waves with balance oscillating left and right.""" # # This is clearly intended to build on the bisine wave defined above, # so we can start by generating that. waves_a = bisine_wave(frequency) # # Then, by reversing axis 2, we swap the stereo channels. By mixing # this with `waves_a`, we'll be able to create the desired effect. waves_b = tf.reverse(waves_a, axis=[2]) # # Let's have the balance oscillate from left to right four times. iterations = 4 # # Now, we compute the balance for each sample: `ts` has values # in [0, 1] that indicate how much we should use `waves_a`. xs = tf.reshape(tf.range(_samples(), dtype=tf.float32), [1, _samples(), 1]) thetas = xs / _samples() * iterations ts = (tf.sin(math.pi * 2 * thetas) + 1) / 2 # # Finally, we can mix the two together, and we're done. wave = ts * waves_a + (1.0 - ts) * waves_b # # Alternately, we can make the effect more pronounced by exaggerating # the sample data. Let's emit both variations. exaggerated_wave = wave ** 3.0 return tf.concat([wave, exaggerated_wave], axis=0)
[ "def", "bisine_wahwah_wave", "(", "frequency", ")", ":", "#", "# This is clearly intended to build on the bisine wave defined above,", "# so we can start by generating that.", "waves_a", "=", "bisine_wave", "(", "frequency", ")", "#", "# Then, by reversing axis 2, we swap the stereo channels. By mixing", "# this with `waves_a`, we'll be able to create the desired effect.", "waves_b", "=", "tf", ".", "reverse", "(", "waves_a", ",", "axis", "=", "[", "2", "]", ")", "#", "# Let's have the balance oscillate from left to right four times.", "iterations", "=", "4", "#", "# Now, we compute the balance for each sample: `ts` has values", "# in [0, 1] that indicate how much we should use `waves_a`.", "xs", "=", "tf", ".", "reshape", "(", "tf", ".", "range", "(", "_samples", "(", ")", ",", "dtype", "=", "tf", ".", "float32", ")", ",", "[", "1", ",", "_samples", "(", ")", ",", "1", "]", ")", "thetas", "=", "xs", "/", "_samples", "(", ")", "*", "iterations", "ts", "=", "(", "tf", ".", "sin", "(", "math", ".", "pi", "*", "2", "*", "thetas", ")", "+", "1", ")", "/", "2", "#", "# Finally, we can mix the two together, and we're done.", "wave", "=", "ts", "*", "waves_a", "+", "(", "1.0", "-", "ts", ")", "*", "waves_b", "#", "# Alternately, we can make the effect more pronounced by exaggerating", "# the sample data. Let's emit both variations.", "exaggerated_wave", "=", "wave", "**", "3.0", "return", "tf", ".", "concat", "(", "[", "wave", ",", "exaggerated_wave", "]", ",", "axis", "=", "0", ")" ]
Emit two sine waves with balance oscillating left and right.
[ "Emit", "two", "sine", "waves", "with", "balance", "oscillating", "left", "and", "right", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/audio/audio_demo.py#L208-L234
train
tensorflow/tensorboard
tensorboard/plugins/audio/audio_demo.py
run_all
def run_all(logdir, verbose=False): """Generate waves of the shapes defined above. Arguments: logdir: the directory into which to store all the runs' data verbose: if true, print out each run's name as it begins """ waves = [sine_wave, square_wave, triangle_wave, bisine_wave, bisine_wahwah_wave] for (i, wave_constructor) in enumerate(waves): wave_name = wave_constructor.__name__ run_name = 'wave:%02d,%s' % (i + 1, wave_name) if verbose: print('--- Running: %s' % run_name) run(logdir, run_name, wave_name, wave_constructor)
python
def run_all(logdir, verbose=False): """Generate waves of the shapes defined above. Arguments: logdir: the directory into which to store all the runs' data verbose: if true, print out each run's name as it begins """ waves = [sine_wave, square_wave, triangle_wave, bisine_wave, bisine_wahwah_wave] for (i, wave_constructor) in enumerate(waves): wave_name = wave_constructor.__name__ run_name = 'wave:%02d,%s' % (i + 1, wave_name) if verbose: print('--- Running: %s' % run_name) run(logdir, run_name, wave_name, wave_constructor)
[ "def", "run_all", "(", "logdir", ",", "verbose", "=", "False", ")", ":", "waves", "=", "[", "sine_wave", ",", "square_wave", ",", "triangle_wave", ",", "bisine_wave", ",", "bisine_wahwah_wave", "]", "for", "(", "i", ",", "wave_constructor", ")", "in", "enumerate", "(", "waves", ")", ":", "wave_name", "=", "wave_constructor", ".", "__name__", "run_name", "=", "'wave:%02d,%s'", "%", "(", "i", "+", "1", ",", "wave_name", ")", "if", "verbose", ":", "print", "(", "'--- Running: %s'", "%", "run_name", ")", "run", "(", "logdir", ",", "run_name", ",", "wave_name", ",", "wave_constructor", ")" ]
Generate waves of the shapes defined above. Arguments: logdir: the directory into which to store all the runs' data verbose: if true, print out each run's name as it begins
[ "Generate", "waves", "of", "the", "shapes", "defined", "above", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/audio/audio_demo.py#L237-L251
train
tensorflow/tensorboard
tensorboard/backend/process_graph.py
prepare_graph_for_ui
def prepare_graph_for_ui(graph, limit_attr_size=1024, large_attrs_key='_too_large_attrs'): """Prepares (modifies in-place) the graph to be served to the front-end. For now, it supports filtering out attributes that are too large to be shown in the graph UI. Args: graph: The GraphDef proto message. limit_attr_size: Maximum allowed size in bytes, before the attribute is considered large. Default is 1024 (1KB). Must be > 0 or None. If None, there will be no filtering. large_attrs_key: The attribute key that will be used for storing attributes that are too large. Default is '_too_large_attrs'. Must be != None if `limit_attr_size` is != None. Raises: ValueError: If `large_attrs_key is None` while `limit_attr_size != None`. ValueError: If `limit_attr_size` is defined, but <= 0. """ # Check input for validity. if limit_attr_size is not None: if large_attrs_key is None: raise ValueError('large_attrs_key must be != None when limit_attr_size' '!= None.') if limit_attr_size <= 0: raise ValueError('limit_attr_size must be > 0, but is %d' % limit_attr_size) # Filter only if a limit size is defined. if limit_attr_size is not None: for node in graph.node: # Go through all the attributes and filter out ones bigger than the # limit. keys = list(node.attr.keys()) for key in keys: size = node.attr[key].ByteSize() if size > limit_attr_size or size < 0: del node.attr[key] # Add the attribute key to the list of "too large" attributes. # This is used in the info card in the graph UI to show the user # that some attributes are too large to be shown. node.attr[large_attrs_key].list.s.append(tf.compat.as_bytes(key))
python
def prepare_graph_for_ui(graph, limit_attr_size=1024, large_attrs_key='_too_large_attrs'): """Prepares (modifies in-place) the graph to be served to the front-end. For now, it supports filtering out attributes that are too large to be shown in the graph UI. Args: graph: The GraphDef proto message. limit_attr_size: Maximum allowed size in bytes, before the attribute is considered large. Default is 1024 (1KB). Must be > 0 or None. If None, there will be no filtering. large_attrs_key: The attribute key that will be used for storing attributes that are too large. Default is '_too_large_attrs'. Must be != None if `limit_attr_size` is != None. Raises: ValueError: If `large_attrs_key is None` while `limit_attr_size != None`. ValueError: If `limit_attr_size` is defined, but <= 0. """ # Check input for validity. if limit_attr_size is not None: if large_attrs_key is None: raise ValueError('large_attrs_key must be != None when limit_attr_size' '!= None.') if limit_attr_size <= 0: raise ValueError('limit_attr_size must be > 0, but is %d' % limit_attr_size) # Filter only if a limit size is defined. if limit_attr_size is not None: for node in graph.node: # Go through all the attributes and filter out ones bigger than the # limit. keys = list(node.attr.keys()) for key in keys: size = node.attr[key].ByteSize() if size > limit_attr_size or size < 0: del node.attr[key] # Add the attribute key to the list of "too large" attributes. # This is used in the info card in the graph UI to show the user # that some attributes are too large to be shown. node.attr[large_attrs_key].list.s.append(tf.compat.as_bytes(key))
[ "def", "prepare_graph_for_ui", "(", "graph", ",", "limit_attr_size", "=", "1024", ",", "large_attrs_key", "=", "'_too_large_attrs'", ")", ":", "# Check input for validity.", "if", "limit_attr_size", "is", "not", "None", ":", "if", "large_attrs_key", "is", "None", ":", "raise", "ValueError", "(", "'large_attrs_key must be != None when limit_attr_size'", "'!= None.'", ")", "if", "limit_attr_size", "<=", "0", ":", "raise", "ValueError", "(", "'limit_attr_size must be > 0, but is %d'", "%", "limit_attr_size", ")", "# Filter only if a limit size is defined.", "if", "limit_attr_size", "is", "not", "None", ":", "for", "node", "in", "graph", ".", "node", ":", "# Go through all the attributes and filter out ones bigger than the", "# limit.", "keys", "=", "list", "(", "node", ".", "attr", ".", "keys", "(", ")", ")", "for", "key", "in", "keys", ":", "size", "=", "node", ".", "attr", "[", "key", "]", ".", "ByteSize", "(", ")", "if", "size", ">", "limit_attr_size", "or", "size", "<", "0", ":", "del", "node", ".", "attr", "[", "key", "]", "# Add the attribute key to the list of \"too large\" attributes.", "# This is used in the info card in the graph UI to show the user", "# that some attributes are too large to be shown.", "node", ".", "attr", "[", "large_attrs_key", "]", ".", "list", ".", "s", ".", "append", "(", "tf", ".", "compat", ".", "as_bytes", "(", "key", ")", ")" ]
Prepares (modifies in-place) the graph to be served to the front-end. For now, it supports filtering out attributes that are too large to be shown in the graph UI. Args: graph: The GraphDef proto message. limit_attr_size: Maximum allowed size in bytes, before the attribute is considered large. Default is 1024 (1KB). Must be > 0 or None. If None, there will be no filtering. large_attrs_key: The attribute key that will be used for storing attributes that are too large. Default is '_too_large_attrs'. Must be != None if `limit_attr_size` is != None. Raises: ValueError: If `large_attrs_key is None` while `limit_attr_size != None`. ValueError: If `limit_attr_size` is defined, but <= 0.
[ "Prepares", "(", "modifies", "in", "-", "place", ")", "the", "graph", "to", "be", "served", "to", "the", "front", "-", "end", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/process_graph.py#L25-L68
train
tensorflow/tensorboard
tensorboard/plugins/graph/graphs_plugin.py
GraphsPlugin.info_impl
def info_impl(self): """Returns a dict of all runs and tags and their data availabilities.""" result = {} def add_row_item(run, tag=None): run_item = result.setdefault(run, { 'run': run, 'tags': {}, # A run-wide GraphDef of ops. 'run_graph': False}) tag_item = None if tag: tag_item = run_item.get('tags').setdefault(tag, { 'tag': tag, 'conceptual_graph': False, # A tagged GraphDef of ops. 'op_graph': False, 'profile': False}) return (run_item, tag_item) mapping = self._multiplexer.PluginRunToTagToContent( _PLUGIN_NAME_RUN_METADATA_WITH_GRAPH) for run_name, tag_to_content in six.iteritems(mapping): for (tag, content) in six.iteritems(tag_to_content): # The Summary op is defined in TensorFlow and does not use a stringified proto # as a content of plugin data. It contains single string that denotes a version. # https://github.com/tensorflow/tensorflow/blob/11f4ecb54708865ec757ca64e4805957b05d7570/tensorflow/python/ops/summary_ops_v2.py#L789-L790 if content != b'1': logger.warn('Ignoring unrecognizable version of RunMetadata.') continue (_, tag_item) = add_row_item(run_name, tag) tag_item['op_graph'] = True # Tensors associated with plugin name _PLUGIN_NAME_RUN_METADATA contain # both op graph and profile information. mapping = self._multiplexer.PluginRunToTagToContent( _PLUGIN_NAME_RUN_METADATA) for run_name, tag_to_content in six.iteritems(mapping): for (tag, content) in six.iteritems(tag_to_content): if content != b'1': logger.warn('Ignoring unrecognizable version of RunMetadata.') continue (_, tag_item) = add_row_item(run_name, tag) tag_item['profile'] = True tag_item['op_graph'] = True # Tensors associated with plugin name _PLUGIN_NAME_KERAS_MODEL contain # serialized Keras model in JSON format. mapping = self._multiplexer.PluginRunToTagToContent( _PLUGIN_NAME_KERAS_MODEL) for run_name, tag_to_content in six.iteritems(mapping): for (tag, content) in six.iteritems(tag_to_content): if content != b'1': logger.warn('Ignoring unrecognizable version of RunMetadata.') continue (_, tag_item) = add_row_item(run_name, tag) tag_item['conceptual_graph'] = True for (run_name, run_data) in six.iteritems(self._multiplexer.Runs()): if run_data.get(event_accumulator.GRAPH): (run_item, _) = add_row_item(run_name, None) run_item['run_graph'] = True for (run_name, run_data) in six.iteritems(self._multiplexer.Runs()): if event_accumulator.RUN_METADATA in run_data: for tag in run_data[event_accumulator.RUN_METADATA]: (_, tag_item) = add_row_item(run_name, tag) tag_item['profile'] = True return result
python
def info_impl(self): """Returns a dict of all runs and tags and their data availabilities.""" result = {} def add_row_item(run, tag=None): run_item = result.setdefault(run, { 'run': run, 'tags': {}, # A run-wide GraphDef of ops. 'run_graph': False}) tag_item = None if tag: tag_item = run_item.get('tags').setdefault(tag, { 'tag': tag, 'conceptual_graph': False, # A tagged GraphDef of ops. 'op_graph': False, 'profile': False}) return (run_item, tag_item) mapping = self._multiplexer.PluginRunToTagToContent( _PLUGIN_NAME_RUN_METADATA_WITH_GRAPH) for run_name, tag_to_content in six.iteritems(mapping): for (tag, content) in six.iteritems(tag_to_content): # The Summary op is defined in TensorFlow and does not use a stringified proto # as a content of plugin data. It contains single string that denotes a version. # https://github.com/tensorflow/tensorflow/blob/11f4ecb54708865ec757ca64e4805957b05d7570/tensorflow/python/ops/summary_ops_v2.py#L789-L790 if content != b'1': logger.warn('Ignoring unrecognizable version of RunMetadata.') continue (_, tag_item) = add_row_item(run_name, tag) tag_item['op_graph'] = True # Tensors associated with plugin name _PLUGIN_NAME_RUN_METADATA contain # both op graph and profile information. mapping = self._multiplexer.PluginRunToTagToContent( _PLUGIN_NAME_RUN_METADATA) for run_name, tag_to_content in six.iteritems(mapping): for (tag, content) in six.iteritems(tag_to_content): if content != b'1': logger.warn('Ignoring unrecognizable version of RunMetadata.') continue (_, tag_item) = add_row_item(run_name, tag) tag_item['profile'] = True tag_item['op_graph'] = True # Tensors associated with plugin name _PLUGIN_NAME_KERAS_MODEL contain # serialized Keras model in JSON format. mapping = self._multiplexer.PluginRunToTagToContent( _PLUGIN_NAME_KERAS_MODEL) for run_name, tag_to_content in six.iteritems(mapping): for (tag, content) in six.iteritems(tag_to_content): if content != b'1': logger.warn('Ignoring unrecognizable version of RunMetadata.') continue (_, tag_item) = add_row_item(run_name, tag) tag_item['conceptual_graph'] = True for (run_name, run_data) in six.iteritems(self._multiplexer.Runs()): if run_data.get(event_accumulator.GRAPH): (run_item, _) = add_row_item(run_name, None) run_item['run_graph'] = True for (run_name, run_data) in six.iteritems(self._multiplexer.Runs()): if event_accumulator.RUN_METADATA in run_data: for tag in run_data[event_accumulator.RUN_METADATA]: (_, tag_item) = add_row_item(run_name, tag) tag_item['profile'] = True return result
[ "def", "info_impl", "(", "self", ")", ":", "result", "=", "{", "}", "def", "add_row_item", "(", "run", ",", "tag", "=", "None", ")", ":", "run_item", "=", "result", ".", "setdefault", "(", "run", ",", "{", "'run'", ":", "run", ",", "'tags'", ":", "{", "}", ",", "# A run-wide GraphDef of ops.", "'run_graph'", ":", "False", "}", ")", "tag_item", "=", "None", "if", "tag", ":", "tag_item", "=", "run_item", ".", "get", "(", "'tags'", ")", ".", "setdefault", "(", "tag", ",", "{", "'tag'", ":", "tag", ",", "'conceptual_graph'", ":", "False", ",", "# A tagged GraphDef of ops.", "'op_graph'", ":", "False", ",", "'profile'", ":", "False", "}", ")", "return", "(", "run_item", ",", "tag_item", ")", "mapping", "=", "self", ".", "_multiplexer", ".", "PluginRunToTagToContent", "(", "_PLUGIN_NAME_RUN_METADATA_WITH_GRAPH", ")", "for", "run_name", ",", "tag_to_content", "in", "six", ".", "iteritems", "(", "mapping", ")", ":", "for", "(", "tag", ",", "content", ")", "in", "six", ".", "iteritems", "(", "tag_to_content", ")", ":", "# The Summary op is defined in TensorFlow and does not use a stringified proto", "# as a content of plugin data. It contains single string that denotes a version.", "# https://github.com/tensorflow/tensorflow/blob/11f4ecb54708865ec757ca64e4805957b05d7570/tensorflow/python/ops/summary_ops_v2.py#L789-L790", "if", "content", "!=", "b'1'", ":", "logger", ".", "warn", "(", "'Ignoring unrecognizable version of RunMetadata.'", ")", "continue", "(", "_", ",", "tag_item", ")", "=", "add_row_item", "(", "run_name", ",", "tag", ")", "tag_item", "[", "'op_graph'", "]", "=", "True", "# Tensors associated with plugin name _PLUGIN_NAME_RUN_METADATA contain", "# both op graph and profile information.", "mapping", "=", "self", ".", "_multiplexer", ".", "PluginRunToTagToContent", "(", "_PLUGIN_NAME_RUN_METADATA", ")", "for", "run_name", ",", "tag_to_content", "in", "six", ".", "iteritems", "(", "mapping", ")", ":", "for", "(", "tag", ",", "content", ")", "in", "six", ".", "iteritems", "(", "tag_to_content", ")", ":", "if", "content", "!=", "b'1'", ":", "logger", ".", "warn", "(", "'Ignoring unrecognizable version of RunMetadata.'", ")", "continue", "(", "_", ",", "tag_item", ")", "=", "add_row_item", "(", "run_name", ",", "tag", ")", "tag_item", "[", "'profile'", "]", "=", "True", "tag_item", "[", "'op_graph'", "]", "=", "True", "# Tensors associated with plugin name _PLUGIN_NAME_KERAS_MODEL contain", "# serialized Keras model in JSON format.", "mapping", "=", "self", ".", "_multiplexer", ".", "PluginRunToTagToContent", "(", "_PLUGIN_NAME_KERAS_MODEL", ")", "for", "run_name", ",", "tag_to_content", "in", "six", ".", "iteritems", "(", "mapping", ")", ":", "for", "(", "tag", ",", "content", ")", "in", "six", ".", "iteritems", "(", "tag_to_content", ")", ":", "if", "content", "!=", "b'1'", ":", "logger", ".", "warn", "(", "'Ignoring unrecognizable version of RunMetadata.'", ")", "continue", "(", "_", ",", "tag_item", ")", "=", "add_row_item", "(", "run_name", ",", "tag", ")", "tag_item", "[", "'conceptual_graph'", "]", "=", "True", "for", "(", "run_name", ",", "run_data", ")", "in", "six", ".", "iteritems", "(", "self", ".", "_multiplexer", ".", "Runs", "(", ")", ")", ":", "if", "run_data", ".", "get", "(", "event_accumulator", ".", "GRAPH", ")", ":", "(", "run_item", ",", "_", ")", "=", "add_row_item", "(", "run_name", ",", "None", ")", "run_item", "[", "'run_graph'", "]", "=", "True", "for", "(", "run_name", ",", "run_data", ")", "in", "six", ".", "iteritems", "(", "self", ".", "_multiplexer", ".", "Runs", "(", ")", ")", ":", "if", "event_accumulator", ".", "RUN_METADATA", "in", "run_data", ":", "for", "tag", "in", "run_data", "[", "event_accumulator", ".", "RUN_METADATA", "]", ":", "(", "_", ",", "tag_item", ")", "=", "add_row_item", "(", "run_name", ",", "tag", ")", "tag_item", "[", "'profile'", "]", "=", "True", "return", "result" ]
Returns a dict of all runs and tags and their data availabilities.
[ "Returns", "a", "dict", "of", "all", "runs", "and", "tags", "and", "their", "data", "availabilities", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/graph/graphs_plugin.py#L74-L143
train
tensorflow/tensorboard
tensorboard/plugins/graph/graphs_plugin.py
GraphsPlugin.graph_impl
def graph_impl(self, run, tag, is_conceptual, limit_attr_size=None, large_attrs_key=None): """Result of the form `(body, mime_type)`, or `None` if no graph exists.""" if is_conceptual: tensor_events = self._multiplexer.Tensors(run, tag) # Take the first event if there are multiple events written from different # steps. keras_model_config = json.loads(tensor_events[0].tensor_proto.string_val[0]) graph = keras_util.keras_model_to_graph_def(keras_model_config) elif tag: tensor_events = self._multiplexer.Tensors(run, tag) # Take the first event if there are multiple events written from different # steps. run_metadata = config_pb2.RunMetadata.FromString( tensor_events[0].tensor_proto.string_val[0]) graph = graph_pb2.GraphDef() for func_graph in run_metadata.function_graphs: graph_util.combine_graph_defs(graph, func_graph.pre_optimization_graph) else: graph = self._multiplexer.Graph(run) # This next line might raise a ValueError if the limit parameters # are invalid (size is negative, size present but key absent, etc.). process_graph.prepare_graph_for_ui(graph, limit_attr_size, large_attrs_key) return (str(graph), 'text/x-protobuf')
python
def graph_impl(self, run, tag, is_conceptual, limit_attr_size=None, large_attrs_key=None): """Result of the form `(body, mime_type)`, or `None` if no graph exists.""" if is_conceptual: tensor_events = self._multiplexer.Tensors(run, tag) # Take the first event if there are multiple events written from different # steps. keras_model_config = json.loads(tensor_events[0].tensor_proto.string_val[0]) graph = keras_util.keras_model_to_graph_def(keras_model_config) elif tag: tensor_events = self._multiplexer.Tensors(run, tag) # Take the first event if there are multiple events written from different # steps. run_metadata = config_pb2.RunMetadata.FromString( tensor_events[0].tensor_proto.string_val[0]) graph = graph_pb2.GraphDef() for func_graph in run_metadata.function_graphs: graph_util.combine_graph_defs(graph, func_graph.pre_optimization_graph) else: graph = self._multiplexer.Graph(run) # This next line might raise a ValueError if the limit parameters # are invalid (size is negative, size present but key absent, etc.). process_graph.prepare_graph_for_ui(graph, limit_attr_size, large_attrs_key) return (str(graph), 'text/x-protobuf')
[ "def", "graph_impl", "(", "self", ",", "run", ",", "tag", ",", "is_conceptual", ",", "limit_attr_size", "=", "None", ",", "large_attrs_key", "=", "None", ")", ":", "if", "is_conceptual", ":", "tensor_events", "=", "self", ".", "_multiplexer", ".", "Tensors", "(", "run", ",", "tag", ")", "# Take the first event if there are multiple events written from different", "# steps.", "keras_model_config", "=", "json", ".", "loads", "(", "tensor_events", "[", "0", "]", ".", "tensor_proto", ".", "string_val", "[", "0", "]", ")", "graph", "=", "keras_util", ".", "keras_model_to_graph_def", "(", "keras_model_config", ")", "elif", "tag", ":", "tensor_events", "=", "self", ".", "_multiplexer", ".", "Tensors", "(", "run", ",", "tag", ")", "# Take the first event if there are multiple events written from different", "# steps.", "run_metadata", "=", "config_pb2", ".", "RunMetadata", ".", "FromString", "(", "tensor_events", "[", "0", "]", ".", "tensor_proto", ".", "string_val", "[", "0", "]", ")", "graph", "=", "graph_pb2", ".", "GraphDef", "(", ")", "for", "func_graph", "in", "run_metadata", ".", "function_graphs", ":", "graph_util", ".", "combine_graph_defs", "(", "graph", ",", "func_graph", ".", "pre_optimization_graph", ")", "else", ":", "graph", "=", "self", ".", "_multiplexer", ".", "Graph", "(", "run", ")", "# This next line might raise a ValueError if the limit parameters", "# are invalid (size is negative, size present but key absent, etc.).", "process_graph", ".", "prepare_graph_for_ui", "(", "graph", ",", "limit_attr_size", ",", "large_attrs_key", ")", "return", "(", "str", "(", "graph", ")", ",", "'text/x-protobuf'", ")" ]
Result of the form `(body, mime_type)`, or `None` if no graph exists.
[ "Result", "of", "the", "form", "(", "body", "mime_type", ")", "or", "None", "if", "no", "graph", "exists", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/graph/graphs_plugin.py#L145-L169
train
tensorflow/tensorboard
tensorboard/plugins/graph/graphs_plugin.py
GraphsPlugin.run_metadata_impl
def run_metadata_impl(self, run, tag): """Result of the form `(body, mime_type)`, or `None` if no data exists.""" try: run_metadata = self._multiplexer.RunMetadata(run, tag) except ValueError: # TODO(stephanwlee): Should include whether FE is fetching for v1 or v2 RunMetadata # so we can remove this try/except. tensor_events = self._multiplexer.Tensors(run, tag) if tensor_events is None: return None # Take the first event if there are multiple events written from different # steps. run_metadata = config_pb2.RunMetadata.FromString( tensor_events[0].tensor_proto.string_val[0]) if run_metadata is None: return None return (str(run_metadata), 'text/x-protobuf')
python
def run_metadata_impl(self, run, tag): """Result of the form `(body, mime_type)`, or `None` if no data exists.""" try: run_metadata = self._multiplexer.RunMetadata(run, tag) except ValueError: # TODO(stephanwlee): Should include whether FE is fetching for v1 or v2 RunMetadata # so we can remove this try/except. tensor_events = self._multiplexer.Tensors(run, tag) if tensor_events is None: return None # Take the first event if there are multiple events written from different # steps. run_metadata = config_pb2.RunMetadata.FromString( tensor_events[0].tensor_proto.string_val[0]) if run_metadata is None: return None return (str(run_metadata), 'text/x-protobuf')
[ "def", "run_metadata_impl", "(", "self", ",", "run", ",", "tag", ")", ":", "try", ":", "run_metadata", "=", "self", ".", "_multiplexer", ".", "RunMetadata", "(", "run", ",", "tag", ")", "except", "ValueError", ":", "# TODO(stephanwlee): Should include whether FE is fetching for v1 or v2 RunMetadata", "# so we can remove this try/except.", "tensor_events", "=", "self", ".", "_multiplexer", ".", "Tensors", "(", "run", ",", "tag", ")", "if", "tensor_events", "is", "None", ":", "return", "None", "# Take the first event if there are multiple events written from different", "# steps.", "run_metadata", "=", "config_pb2", ".", "RunMetadata", ".", "FromString", "(", "tensor_events", "[", "0", "]", ".", "tensor_proto", ".", "string_val", "[", "0", "]", ")", "if", "run_metadata", "is", "None", ":", "return", "None", "return", "(", "str", "(", "run_metadata", ")", ",", "'text/x-protobuf'", ")" ]
Result of the form `(body, mime_type)`, or `None` if no data exists.
[ "Result", "of", "the", "form", "(", "body", "mime_type", ")", "or", "None", "if", "no", "data", "exists", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/graph/graphs_plugin.py#L171-L187
train
tensorflow/tensorboard
tensorboard/plugins/graph/graphs_plugin.py
GraphsPlugin.graph_route
def graph_route(self, request): """Given a single run, return the graph definition in protobuf format.""" run = request.args.get('run') tag = request.args.get('tag', '') conceptual_arg = request.args.get('conceptual', False) is_conceptual = True if conceptual_arg == 'true' else False if run is None: return http_util.Respond( request, 'query parameter "run" is required', 'text/plain', 400) limit_attr_size = request.args.get('limit_attr_size', None) if limit_attr_size is not None: try: limit_attr_size = int(limit_attr_size) except ValueError: return http_util.Respond( request, 'query parameter `limit_attr_size` must be an integer', 'text/plain', 400) large_attrs_key = request.args.get('large_attrs_key', None) try: result = self.graph_impl(run, tag, is_conceptual, limit_attr_size, large_attrs_key) except ValueError as e: return http_util.Respond(request, e.message, 'text/plain', code=400) else: if result is not None: (body, mime_type) = result # pylint: disable=unpacking-non-sequence return http_util.Respond(request, body, mime_type) else: return http_util.Respond(request, '404 Not Found', 'text/plain', code=404)
python
def graph_route(self, request): """Given a single run, return the graph definition in protobuf format.""" run = request.args.get('run') tag = request.args.get('tag', '') conceptual_arg = request.args.get('conceptual', False) is_conceptual = True if conceptual_arg == 'true' else False if run is None: return http_util.Respond( request, 'query parameter "run" is required', 'text/plain', 400) limit_attr_size = request.args.get('limit_attr_size', None) if limit_attr_size is not None: try: limit_attr_size = int(limit_attr_size) except ValueError: return http_util.Respond( request, 'query parameter `limit_attr_size` must be an integer', 'text/plain', 400) large_attrs_key = request.args.get('large_attrs_key', None) try: result = self.graph_impl(run, tag, is_conceptual, limit_attr_size, large_attrs_key) except ValueError as e: return http_util.Respond(request, e.message, 'text/plain', code=400) else: if result is not None: (body, mime_type) = result # pylint: disable=unpacking-non-sequence return http_util.Respond(request, body, mime_type) else: return http_util.Respond(request, '404 Not Found', 'text/plain', code=404)
[ "def", "graph_route", "(", "self", ",", "request", ")", ":", "run", "=", "request", ".", "args", ".", "get", "(", "'run'", ")", "tag", "=", "request", ".", "args", ".", "get", "(", "'tag'", ",", "''", ")", "conceptual_arg", "=", "request", ".", "args", ".", "get", "(", "'conceptual'", ",", "False", ")", "is_conceptual", "=", "True", "if", "conceptual_arg", "==", "'true'", "else", "False", "if", "run", "is", "None", ":", "return", "http_util", ".", "Respond", "(", "request", ",", "'query parameter \"run\" is required'", ",", "'text/plain'", ",", "400", ")", "limit_attr_size", "=", "request", ".", "args", ".", "get", "(", "'limit_attr_size'", ",", "None", ")", "if", "limit_attr_size", "is", "not", "None", ":", "try", ":", "limit_attr_size", "=", "int", "(", "limit_attr_size", ")", "except", "ValueError", ":", "return", "http_util", ".", "Respond", "(", "request", ",", "'query parameter `limit_attr_size` must be an integer'", ",", "'text/plain'", ",", "400", ")", "large_attrs_key", "=", "request", ".", "args", ".", "get", "(", "'large_attrs_key'", ",", "None", ")", "try", ":", "result", "=", "self", ".", "graph_impl", "(", "run", ",", "tag", ",", "is_conceptual", ",", "limit_attr_size", ",", "large_attrs_key", ")", "except", "ValueError", "as", "e", ":", "return", "http_util", ".", "Respond", "(", "request", ",", "e", ".", "message", ",", "'text/plain'", ",", "code", "=", "400", ")", "else", ":", "if", "result", "is", "not", "None", ":", "(", "body", ",", "mime_type", ")", "=", "result", "# pylint: disable=unpacking-non-sequence", "return", "http_util", ".", "Respond", "(", "request", ",", "body", ",", "mime_type", ")", "else", ":", "return", "http_util", ".", "Respond", "(", "request", ",", "'404 Not Found'", ",", "'text/plain'", ",", "code", "=", "404", ")" ]
Given a single run, return the graph definition in protobuf format.
[ "Given", "a", "single", "run", "return", "the", "graph", "definition", "in", "protobuf", "format", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/graph/graphs_plugin.py#L195-L227
train
tensorflow/tensorboard
tensorboard/plugins/graph/graphs_plugin.py
GraphsPlugin.run_metadata_route
def run_metadata_route(self, request): """Given a tag and a run, return the session.run() metadata.""" tag = request.args.get('tag') run = request.args.get('run') if tag is None: return http_util.Respond( request, 'query parameter "tag" is required', 'text/plain', 400) if run is None: return http_util.Respond( request, 'query parameter "run" is required', 'text/plain', 400) result = self.run_metadata_impl(run, tag) if result is not None: (body, mime_type) = result # pylint: disable=unpacking-non-sequence return http_util.Respond(request, body, mime_type) else: return http_util.Respond(request, '404 Not Found', 'text/plain', code=404)
python
def run_metadata_route(self, request): """Given a tag and a run, return the session.run() metadata.""" tag = request.args.get('tag') run = request.args.get('run') if tag is None: return http_util.Respond( request, 'query parameter "tag" is required', 'text/plain', 400) if run is None: return http_util.Respond( request, 'query parameter "run" is required', 'text/plain', 400) result = self.run_metadata_impl(run, tag) if result is not None: (body, mime_type) = result # pylint: disable=unpacking-non-sequence return http_util.Respond(request, body, mime_type) else: return http_util.Respond(request, '404 Not Found', 'text/plain', code=404)
[ "def", "run_metadata_route", "(", "self", ",", "request", ")", ":", "tag", "=", "request", ".", "args", ".", "get", "(", "'tag'", ")", "run", "=", "request", ".", "args", ".", "get", "(", "'run'", ")", "if", "tag", "is", "None", ":", "return", "http_util", ".", "Respond", "(", "request", ",", "'query parameter \"tag\" is required'", ",", "'text/plain'", ",", "400", ")", "if", "run", "is", "None", ":", "return", "http_util", ".", "Respond", "(", "request", ",", "'query parameter \"run\" is required'", ",", "'text/plain'", ",", "400", ")", "result", "=", "self", ".", "run_metadata_impl", "(", "run", ",", "tag", ")", "if", "result", "is", "not", "None", ":", "(", "body", ",", "mime_type", ")", "=", "result", "# pylint: disable=unpacking-non-sequence", "return", "http_util", ".", "Respond", "(", "request", ",", "body", ",", "mime_type", ")", "else", ":", "return", "http_util", ".", "Respond", "(", "request", ",", "'404 Not Found'", ",", "'text/plain'", ",", "code", "=", "404", ")" ]
Given a tag and a run, return the session.run() metadata.
[ "Given", "a", "tag", "and", "a", "run", "return", "the", "session", ".", "run", "()", "metadata", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/graph/graphs_plugin.py#L230-L246
train
tensorflow/tensorboard
tensorboard/plugins/profile/profile_plugin_loader.py
ProfilePluginLoader.load
def load(self, context): """Returns the plugin, if possible. Args: context: The TBContext flags. Returns: A ProfilePlugin instance or None if it couldn't be loaded. """ try: # pylint: disable=g-import-not-at-top,unused-import import tensorflow # Available in TensorFlow 1.14 or later, so do import check # pylint: disable=g-import-not-at-top,unused-import from tensorflow.python.eager import profiler_client except ImportError: return # pylint: disable=g-import-not-at-top from tensorboard.plugins.profile.profile_plugin import ProfilePlugin return ProfilePlugin(context)
python
def load(self, context): """Returns the plugin, if possible. Args: context: The TBContext flags. Returns: A ProfilePlugin instance or None if it couldn't be loaded. """ try: # pylint: disable=g-import-not-at-top,unused-import import tensorflow # Available in TensorFlow 1.14 or later, so do import check # pylint: disable=g-import-not-at-top,unused-import from tensorflow.python.eager import profiler_client except ImportError: return # pylint: disable=g-import-not-at-top from tensorboard.plugins.profile.profile_plugin import ProfilePlugin return ProfilePlugin(context)
[ "def", "load", "(", "self", ",", "context", ")", ":", "try", ":", "# pylint: disable=g-import-not-at-top,unused-import", "import", "tensorflow", "# Available in TensorFlow 1.14 or later, so do import check", "# pylint: disable=g-import-not-at-top,unused-import", "from", "tensorflow", ".", "python", ".", "eager", "import", "profiler_client", "except", "ImportError", ":", "return", "# pylint: disable=g-import-not-at-top", "from", "tensorboard", ".", "plugins", ".", "profile", ".", "profile_plugin", "import", "ProfilePlugin", "return", "ProfilePlugin", "(", "context", ")" ]
Returns the plugin, if possible. Args: context: The TBContext flags. Returns: A ProfilePlugin instance or None if it couldn't be loaded.
[ "Returns", "the", "plugin", "if", "possible", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/profile/profile_plugin_loader.py#L42-L61
train
tensorflow/tensorboard
tensorboard/plugins/hparams/hparams_demo.py
model_fn
def model_fn(hparams, seed): """Create a Keras model with the given hyperparameters. Args: hparams: A dict mapping hyperparameters in `HPARAMS` to values. seed: A hashable object to be used as a random seed (e.g., to construct dropout layers in the model). Returns: A compiled Keras model. """ rng = random.Random(seed) model = tf.keras.models.Sequential() model.add(tf.keras.layers.Input(INPUT_SHAPE)) model.add(tf.keras.layers.Reshape(INPUT_SHAPE + (1,))) # grayscale channel # Add convolutional layers. conv_filters = 8 for _ in xrange(hparams[HP_CONV_LAYERS]): model.add(tf.keras.layers.Conv2D( filters=conv_filters, kernel_size=hparams[HP_CONV_KERNEL_SIZE], padding="same", activation="relu", )) model.add(tf.keras.layers.MaxPool2D(pool_size=2, padding="same")) conv_filters *= 2 model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dropout(hparams[HP_DROPOUT], seed=rng.random())) # Add fully connected layers. dense_neurons = 32 for _ in xrange(hparams[HP_DENSE_LAYERS]): model.add(tf.keras.layers.Dense(dense_neurons, activation="relu")) dense_neurons *= 2 # Add the final output layer. model.add(tf.keras.layers.Dense(OUTPUT_CLASSES, activation="softmax")) model.compile( loss="sparse_categorical_crossentropy", optimizer=hparams[HP_OPTIMIZER], metrics=["accuracy"], ) return model
python
def model_fn(hparams, seed): """Create a Keras model with the given hyperparameters. Args: hparams: A dict mapping hyperparameters in `HPARAMS` to values. seed: A hashable object to be used as a random seed (e.g., to construct dropout layers in the model). Returns: A compiled Keras model. """ rng = random.Random(seed) model = tf.keras.models.Sequential() model.add(tf.keras.layers.Input(INPUT_SHAPE)) model.add(tf.keras.layers.Reshape(INPUT_SHAPE + (1,))) # grayscale channel # Add convolutional layers. conv_filters = 8 for _ in xrange(hparams[HP_CONV_LAYERS]): model.add(tf.keras.layers.Conv2D( filters=conv_filters, kernel_size=hparams[HP_CONV_KERNEL_SIZE], padding="same", activation="relu", )) model.add(tf.keras.layers.MaxPool2D(pool_size=2, padding="same")) conv_filters *= 2 model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dropout(hparams[HP_DROPOUT], seed=rng.random())) # Add fully connected layers. dense_neurons = 32 for _ in xrange(hparams[HP_DENSE_LAYERS]): model.add(tf.keras.layers.Dense(dense_neurons, activation="relu")) dense_neurons *= 2 # Add the final output layer. model.add(tf.keras.layers.Dense(OUTPUT_CLASSES, activation="softmax")) model.compile( loss="sparse_categorical_crossentropy", optimizer=hparams[HP_OPTIMIZER], metrics=["accuracy"], ) return model
[ "def", "model_fn", "(", "hparams", ",", "seed", ")", ":", "rng", "=", "random", ".", "Random", "(", "seed", ")", "model", "=", "tf", ".", "keras", ".", "models", ".", "Sequential", "(", ")", "model", ".", "add", "(", "tf", ".", "keras", ".", "layers", ".", "Input", "(", "INPUT_SHAPE", ")", ")", "model", ".", "add", "(", "tf", ".", "keras", ".", "layers", ".", "Reshape", "(", "INPUT_SHAPE", "+", "(", "1", ",", ")", ")", ")", "# grayscale channel", "# Add convolutional layers.", "conv_filters", "=", "8", "for", "_", "in", "xrange", "(", "hparams", "[", "HP_CONV_LAYERS", "]", ")", ":", "model", ".", "add", "(", "tf", ".", "keras", ".", "layers", ".", "Conv2D", "(", "filters", "=", "conv_filters", ",", "kernel_size", "=", "hparams", "[", "HP_CONV_KERNEL_SIZE", "]", ",", "padding", "=", "\"same\"", ",", "activation", "=", "\"relu\"", ",", ")", ")", "model", ".", "add", "(", "tf", ".", "keras", ".", "layers", ".", "MaxPool2D", "(", "pool_size", "=", "2", ",", "padding", "=", "\"same\"", ")", ")", "conv_filters", "*=", "2", "model", ".", "add", "(", "tf", ".", "keras", ".", "layers", ".", "Flatten", "(", ")", ")", "model", ".", "add", "(", "tf", ".", "keras", ".", "layers", ".", "Dropout", "(", "hparams", "[", "HP_DROPOUT", "]", ",", "seed", "=", "rng", ".", "random", "(", ")", ")", ")", "# Add fully connected layers.", "dense_neurons", "=", "32", "for", "_", "in", "xrange", "(", "hparams", "[", "HP_DENSE_LAYERS", "]", ")", ":", "model", ".", "add", "(", "tf", ".", "keras", ".", "layers", ".", "Dense", "(", "dense_neurons", ",", "activation", "=", "\"relu\"", ")", ")", "dense_neurons", "*=", "2", "# Add the final output layer.", "model", ".", "add", "(", "tf", ".", "keras", ".", "layers", ".", "Dense", "(", "OUTPUT_CLASSES", ",", "activation", "=", "\"softmax\"", ")", ")", "model", ".", "compile", "(", "loss", "=", "\"sparse_categorical_crossentropy\"", ",", "optimizer", "=", "hparams", "[", "HP_OPTIMIZER", "]", ",", "metrics", "=", "[", "\"accuracy\"", "]", ",", ")", "return", "model" ]
Create a Keras model with the given hyperparameters. Args: hparams: A dict mapping hyperparameters in `HPARAMS` to values. seed: A hashable object to be used as a random seed (e.g., to construct dropout layers in the model). Returns: A compiled Keras model.
[ "Create", "a", "Keras", "model", "with", "the", "given", "hyperparameters", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/hparams/hparams_demo.py#L115-L161
train
tensorflow/tensorboard
tensorboard/plugins/hparams/hparams_demo.py
run
def run(data, base_logdir, session_id, group_id, hparams): """Run a training/validation session. Flags must have been parsed for this function to behave. Args: data: The data as loaded by `prepare_data()`. base_logdir: The top-level logdir to which to write summary data. session_id: A unique string ID for this session. group_id: The string ID of the session group that includes this session. hparams: A dict mapping hyperparameters in `HPARAMS` to values. """ model = model_fn(hparams=hparams, seed=session_id) logdir = os.path.join(base_logdir, session_id) callback = tf.keras.callbacks.TensorBoard( logdir, update_freq=flags.FLAGS.summary_freq, profile_batch=0, # workaround for issue #2084 ) hparams_callback = hp.KerasCallback(logdir, hparams, group_name=group_id) ((x_train, y_train), (x_test, y_test)) = data result = model.fit( x=x_train, y=y_train, epochs=flags.FLAGS.num_epochs, shuffle=False, validation_data=(x_test, y_test), callbacks=[callback, hparams_callback], )
python
def run(data, base_logdir, session_id, group_id, hparams): """Run a training/validation session. Flags must have been parsed for this function to behave. Args: data: The data as loaded by `prepare_data()`. base_logdir: The top-level logdir to which to write summary data. session_id: A unique string ID for this session. group_id: The string ID of the session group that includes this session. hparams: A dict mapping hyperparameters in `HPARAMS` to values. """ model = model_fn(hparams=hparams, seed=session_id) logdir = os.path.join(base_logdir, session_id) callback = tf.keras.callbacks.TensorBoard( logdir, update_freq=flags.FLAGS.summary_freq, profile_batch=0, # workaround for issue #2084 ) hparams_callback = hp.KerasCallback(logdir, hparams, group_name=group_id) ((x_train, y_train), (x_test, y_test)) = data result = model.fit( x=x_train, y=y_train, epochs=flags.FLAGS.num_epochs, shuffle=False, validation_data=(x_test, y_test), callbacks=[callback, hparams_callback], )
[ "def", "run", "(", "data", ",", "base_logdir", ",", "session_id", ",", "group_id", ",", "hparams", ")", ":", "model", "=", "model_fn", "(", "hparams", "=", "hparams", ",", "seed", "=", "session_id", ")", "logdir", "=", "os", ".", "path", ".", "join", "(", "base_logdir", ",", "session_id", ")", "callback", "=", "tf", ".", "keras", ".", "callbacks", ".", "TensorBoard", "(", "logdir", ",", "update_freq", "=", "flags", ".", "FLAGS", ".", "summary_freq", ",", "profile_batch", "=", "0", ",", "# workaround for issue #2084", ")", "hparams_callback", "=", "hp", ".", "KerasCallback", "(", "logdir", ",", "hparams", ",", "group_name", "=", "group_id", ")", "(", "(", "x_train", ",", "y_train", ")", ",", "(", "x_test", ",", "y_test", ")", ")", "=", "data", "result", "=", "model", ".", "fit", "(", "x", "=", "x_train", ",", "y", "=", "y_train", ",", "epochs", "=", "flags", ".", "FLAGS", ".", "num_epochs", ",", "shuffle", "=", "False", ",", "validation_data", "=", "(", "x_test", ",", "y_test", ")", ",", "callbacks", "=", "[", "callback", ",", "hparams_callback", "]", ",", ")" ]
Run a training/validation session. Flags must have been parsed for this function to behave. Args: data: The data as loaded by `prepare_data()`. base_logdir: The top-level logdir to which to write summary data. session_id: A unique string ID for this session. group_id: The string ID of the session group that includes this session. hparams: A dict mapping hyperparameters in `HPARAMS` to values.
[ "Run", "a", "training", "/", "validation", "session", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/hparams/hparams_demo.py#L164-L194
train
tensorflow/tensorboard
tensorboard/plugins/hparams/hparams_demo.py
prepare_data
def prepare_data(): """Load and normalize data.""" ((x_train, y_train), (x_test, y_test)) = DATASET.load_data() x_train = x_train.astype("float32") x_test = x_test.astype("float32") x_train /= 255.0 x_test /= 255.0 return ((x_train, y_train), (x_test, y_test))
python
def prepare_data(): """Load and normalize data.""" ((x_train, y_train), (x_test, y_test)) = DATASET.load_data() x_train = x_train.astype("float32") x_test = x_test.astype("float32") x_train /= 255.0 x_test /= 255.0 return ((x_train, y_train), (x_test, y_test))
[ "def", "prepare_data", "(", ")", ":", "(", "(", "x_train", ",", "y_train", ")", ",", "(", "x_test", ",", "y_test", ")", ")", "=", "DATASET", ".", "load_data", "(", ")", "x_train", "=", "x_train", ".", "astype", "(", "\"float32\"", ")", "x_test", "=", "x_test", ".", "astype", "(", "\"float32\"", ")", "x_train", "/=", "255.0", "x_test", "/=", "255.0", "return", "(", "(", "x_train", ",", "y_train", ")", ",", "(", "x_test", ",", "y_test", ")", ")" ]
Load and normalize data.
[ "Load", "and", "normalize", "data", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/hparams/hparams_demo.py#L197-L204
train
tensorflow/tensorboard
tensorboard/plugins/hparams/hparams_demo.py
run_all
def run_all(logdir, verbose=False): """Perform random search over the hyperparameter space. Arguments: logdir: The top-level directory into which to write data. This directory should be empty or nonexistent. verbose: If true, print out each run's name as it begins. """ data = prepare_data() rng = random.Random(0) base_writer = tf.summary.create_file_writer(logdir) with base_writer.as_default(): experiment = hp.Experiment(hparams=HPARAMS, metrics=METRICS) experiment_string = experiment.summary_pb().SerializeToString() tf.summary.experimental.write_raw_pb(experiment_string, step=0) base_writer.flush() base_writer.close() sessions_per_group = 2 num_sessions = flags.FLAGS.num_session_groups * sessions_per_group session_index = 0 # across all session groups for group_index in xrange(flags.FLAGS.num_session_groups): hparams = {h: sample_uniform(h.domain, rng) for h in HPARAMS} hparams_string = str(hparams) group_id = hashlib.sha256(hparams_string.encode("utf-8")).hexdigest() for repeat_index in xrange(sessions_per_group): session_id = str(session_index) session_index += 1 if verbose: print( "--- Running training session %d/%d" % (session_index, num_sessions) ) print(hparams_string) print("--- repeat #: %d" % (repeat_index + 1)) run( data=data, base_logdir=logdir, session_id=session_id, group_id=group_id, hparams=hparams, )
python
def run_all(logdir, verbose=False): """Perform random search over the hyperparameter space. Arguments: logdir: The top-level directory into which to write data. This directory should be empty or nonexistent. verbose: If true, print out each run's name as it begins. """ data = prepare_data() rng = random.Random(0) base_writer = tf.summary.create_file_writer(logdir) with base_writer.as_default(): experiment = hp.Experiment(hparams=HPARAMS, metrics=METRICS) experiment_string = experiment.summary_pb().SerializeToString() tf.summary.experimental.write_raw_pb(experiment_string, step=0) base_writer.flush() base_writer.close() sessions_per_group = 2 num_sessions = flags.FLAGS.num_session_groups * sessions_per_group session_index = 0 # across all session groups for group_index in xrange(flags.FLAGS.num_session_groups): hparams = {h: sample_uniform(h.domain, rng) for h in HPARAMS} hparams_string = str(hparams) group_id = hashlib.sha256(hparams_string.encode("utf-8")).hexdigest() for repeat_index in xrange(sessions_per_group): session_id = str(session_index) session_index += 1 if verbose: print( "--- Running training session %d/%d" % (session_index, num_sessions) ) print(hparams_string) print("--- repeat #: %d" % (repeat_index + 1)) run( data=data, base_logdir=logdir, session_id=session_id, group_id=group_id, hparams=hparams, )
[ "def", "run_all", "(", "logdir", ",", "verbose", "=", "False", ")", ":", "data", "=", "prepare_data", "(", ")", "rng", "=", "random", ".", "Random", "(", "0", ")", "base_writer", "=", "tf", ".", "summary", ".", "create_file_writer", "(", "logdir", ")", "with", "base_writer", ".", "as_default", "(", ")", ":", "experiment", "=", "hp", ".", "Experiment", "(", "hparams", "=", "HPARAMS", ",", "metrics", "=", "METRICS", ")", "experiment_string", "=", "experiment", ".", "summary_pb", "(", ")", ".", "SerializeToString", "(", ")", "tf", ".", "summary", ".", "experimental", ".", "write_raw_pb", "(", "experiment_string", ",", "step", "=", "0", ")", "base_writer", ".", "flush", "(", ")", "base_writer", ".", "close", "(", ")", "sessions_per_group", "=", "2", "num_sessions", "=", "flags", ".", "FLAGS", ".", "num_session_groups", "*", "sessions_per_group", "session_index", "=", "0", "# across all session groups", "for", "group_index", "in", "xrange", "(", "flags", ".", "FLAGS", ".", "num_session_groups", ")", ":", "hparams", "=", "{", "h", ":", "sample_uniform", "(", "h", ".", "domain", ",", "rng", ")", "for", "h", "in", "HPARAMS", "}", "hparams_string", "=", "str", "(", "hparams", ")", "group_id", "=", "hashlib", ".", "sha256", "(", "hparams_string", ".", "encode", "(", "\"utf-8\"", ")", ")", ".", "hexdigest", "(", ")", "for", "repeat_index", "in", "xrange", "(", "sessions_per_group", ")", ":", "session_id", "=", "str", "(", "session_index", ")", "session_index", "+=", "1", "if", "verbose", ":", "print", "(", "\"--- Running training session %d/%d\"", "%", "(", "session_index", ",", "num_sessions", ")", ")", "print", "(", "hparams_string", ")", "print", "(", "\"--- repeat #: %d\"", "%", "(", "repeat_index", "+", "1", ")", ")", "run", "(", "data", "=", "data", ",", "base_logdir", "=", "logdir", ",", "session_id", "=", "session_id", ",", "group_id", "=", "group_id", ",", "hparams", "=", "hparams", ",", ")" ]
Perform random search over the hyperparameter space. Arguments: logdir: The top-level directory into which to write data. This directory should be empty or nonexistent. verbose: If true, print out each run's name as it begins.
[ "Perform", "random", "search", "over", "the", "hyperparameter", "space", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/hparams/hparams_demo.py#L207-L249
train
tensorflow/tensorboard
tensorboard/plugins/hparams/hparams_demo.py
sample_uniform
def sample_uniform(domain, rng): """Sample a value uniformly from a domain. Args: domain: An `IntInterval`, `RealInterval`, or `Discrete` domain. rng: A `random.Random` object; defaults to the `random` module. Raises: TypeError: If `domain` is not a known kind of domain. IndexError: If the domain is empty. """ if isinstance(domain, hp.IntInterval): return rng.randint(domain.min_value, domain.max_value) elif isinstance(domain, hp.RealInterval): return rng.uniform(domain.min_value, domain.max_value) elif isinstance(domain, hp.Discrete): return rng.choice(domain.values) else: raise TypeError("unknown domain type: %r" % (domain,))
python
def sample_uniform(domain, rng): """Sample a value uniformly from a domain. Args: domain: An `IntInterval`, `RealInterval`, or `Discrete` domain. rng: A `random.Random` object; defaults to the `random` module. Raises: TypeError: If `domain` is not a known kind of domain. IndexError: If the domain is empty. """ if isinstance(domain, hp.IntInterval): return rng.randint(domain.min_value, domain.max_value) elif isinstance(domain, hp.RealInterval): return rng.uniform(domain.min_value, domain.max_value) elif isinstance(domain, hp.Discrete): return rng.choice(domain.values) else: raise TypeError("unknown domain type: %r" % (domain,))
[ "def", "sample_uniform", "(", "domain", ",", "rng", ")", ":", "if", "isinstance", "(", "domain", ",", "hp", ".", "IntInterval", ")", ":", "return", "rng", ".", "randint", "(", "domain", ".", "min_value", ",", "domain", ".", "max_value", ")", "elif", "isinstance", "(", "domain", ",", "hp", ".", "RealInterval", ")", ":", "return", "rng", ".", "uniform", "(", "domain", ".", "min_value", ",", "domain", ".", "max_value", ")", "elif", "isinstance", "(", "domain", ",", "hp", ".", "Discrete", ")", ":", "return", "rng", ".", "choice", "(", "domain", ".", "values", ")", "else", ":", "raise", "TypeError", "(", "\"unknown domain type: %r\"", "%", "(", "domain", ",", ")", ")" ]
Sample a value uniformly from a domain. Args: domain: An `IntInterval`, `RealInterval`, or `Discrete` domain. rng: A `random.Random` object; defaults to the `random` module. Raises: TypeError: If `domain` is not a known kind of domain. IndexError: If the domain is empty.
[ "Sample", "a", "value", "uniformly", "from", "a", "domain", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/hparams/hparams_demo.py#L252-L270
train
tensorflow/tensorboard
tensorboard/plugins/pr_curve/pr_curves_plugin.py
PrCurvesPlugin.pr_curves_route
def pr_curves_route(self, request): """A route that returns a JSON mapping between runs and PR curve data. Returns: Given a tag and a comma-separated list of runs (both stored within GET parameters), fetches a JSON object that maps between run name and objects containing data required for PR curves for that run. Runs that either cannot be found or that lack tags will be excluded from the response. """ runs = request.args.getlist('run') if not runs: return http_util.Respond( request, 'No runs provided when fetching PR curve data', 400) tag = request.args.get('tag') if not tag: return http_util.Respond( request, 'No tag provided when fetching PR curve data', 400) try: response = http_util.Respond( request, self.pr_curves_impl(runs, tag), 'application/json') except ValueError as e: return http_util.Respond(request, str(e), 'text/plain', 400) return response
python
def pr_curves_route(self, request): """A route that returns a JSON mapping between runs and PR curve data. Returns: Given a tag and a comma-separated list of runs (both stored within GET parameters), fetches a JSON object that maps between run name and objects containing data required for PR curves for that run. Runs that either cannot be found or that lack tags will be excluded from the response. """ runs = request.args.getlist('run') if not runs: return http_util.Respond( request, 'No runs provided when fetching PR curve data', 400) tag = request.args.get('tag') if not tag: return http_util.Respond( request, 'No tag provided when fetching PR curve data', 400) try: response = http_util.Respond( request, self.pr_curves_impl(runs, tag), 'application/json') except ValueError as e: return http_util.Respond(request, str(e), 'text/plain', 400) return response
[ "def", "pr_curves_route", "(", "self", ",", "request", ")", ":", "runs", "=", "request", ".", "args", ".", "getlist", "(", "'run'", ")", "if", "not", "runs", ":", "return", "http_util", ".", "Respond", "(", "request", ",", "'No runs provided when fetching PR curve data'", ",", "400", ")", "tag", "=", "request", ".", "args", ".", "get", "(", "'tag'", ")", "if", "not", "tag", ":", "return", "http_util", ".", "Respond", "(", "request", ",", "'No tag provided when fetching PR curve data'", ",", "400", ")", "try", ":", "response", "=", "http_util", ".", "Respond", "(", "request", ",", "self", ".", "pr_curves_impl", "(", "runs", ",", "tag", ")", ",", "'application/json'", ")", "except", "ValueError", "as", "e", ":", "return", "http_util", ".", "Respond", "(", "request", ",", "str", "(", "e", ")", ",", "'text/plain'", ",", "400", ")", "return", "response" ]
A route that returns a JSON mapping between runs and PR curve data. Returns: Given a tag and a comma-separated list of runs (both stored within GET parameters), fetches a JSON object that maps between run name and objects containing data required for PR curves for that run. Runs that either cannot be found or that lack tags will be excluded from the response.
[ "A", "route", "that", "returns", "a", "JSON", "mapping", "between", "runs", "and", "PR", "curve", "data", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/pr_curve/pr_curves_plugin.py#L47-L72
train
tensorflow/tensorboard
tensorboard/plugins/pr_curve/pr_curves_plugin.py
PrCurvesPlugin.pr_curves_impl
def pr_curves_impl(self, runs, tag): """Creates the JSON object for the PR curves response for a run-tag combo. Arguments: runs: A list of runs to fetch the curves for. tag: The tag to fetch the curves for. Raises: ValueError: If no PR curves could be fetched for a run and tag. Returns: The JSON object for the PR curves route response. """ if self._db_connection_provider: # Serve data from the database. db = self._db_connection_provider() # We select for steps greater than -1 because the writer inserts # placeholder rows en masse. The check for step filters out those rows. cursor = db.execute(''' SELECT Runs.run_name, Tensors.step, Tensors.computed_time, Tensors.data, Tensors.dtype, Tensors.shape, Tags.plugin_data FROM Tensors JOIN Tags ON Tensors.series = Tags.tag_id JOIN Runs ON Tags.run_id = Runs.run_id WHERE Runs.run_name IN (%s) AND Tags.tag_name = ? AND Tags.plugin_name = ? AND Tensors.step > -1 ORDER BY Tensors.step ''' % ','.join(['?'] * len(runs)), runs + [tag, metadata.PLUGIN_NAME]) response_mapping = {} for (run, step, wall_time, data, dtype, shape, plugin_data) in cursor: if run not in response_mapping: response_mapping[run] = [] buf = np.frombuffer(data, dtype=tf.DType(dtype).as_numpy_dtype) data_array = buf.reshape([int(i) for i in shape.split(',')]) plugin_data_proto = plugin_data_pb2.PrCurvePluginData() string_buffer = np.frombuffer(plugin_data, dtype=np.dtype('b')) plugin_data_proto.ParseFromString(tf.compat.as_bytes( string_buffer.tostring())) thresholds = self._compute_thresholds(plugin_data_proto.num_thresholds) entry = self._make_pr_entry(step, wall_time, data_array, thresholds) response_mapping[run].append(entry) else: # Serve data from events files. response_mapping = {} for run in runs: try: tensor_events = self._multiplexer.Tensors(run, tag) except KeyError: raise ValueError( 'No PR curves could be found for run %r and tag %r' % (run, tag)) content = self._multiplexer.SummaryMetadata( run, tag).plugin_data.content pr_curve_data = metadata.parse_plugin_metadata(content) thresholds = self._compute_thresholds(pr_curve_data.num_thresholds) response_mapping[run] = [ self._process_tensor_event(e, thresholds) for e in tensor_events] return response_mapping
python
def pr_curves_impl(self, runs, tag): """Creates the JSON object for the PR curves response for a run-tag combo. Arguments: runs: A list of runs to fetch the curves for. tag: The tag to fetch the curves for. Raises: ValueError: If no PR curves could be fetched for a run and tag. Returns: The JSON object for the PR curves route response. """ if self._db_connection_provider: # Serve data from the database. db = self._db_connection_provider() # We select for steps greater than -1 because the writer inserts # placeholder rows en masse. The check for step filters out those rows. cursor = db.execute(''' SELECT Runs.run_name, Tensors.step, Tensors.computed_time, Tensors.data, Tensors.dtype, Tensors.shape, Tags.plugin_data FROM Tensors JOIN Tags ON Tensors.series = Tags.tag_id JOIN Runs ON Tags.run_id = Runs.run_id WHERE Runs.run_name IN (%s) AND Tags.tag_name = ? AND Tags.plugin_name = ? AND Tensors.step > -1 ORDER BY Tensors.step ''' % ','.join(['?'] * len(runs)), runs + [tag, metadata.PLUGIN_NAME]) response_mapping = {} for (run, step, wall_time, data, dtype, shape, plugin_data) in cursor: if run not in response_mapping: response_mapping[run] = [] buf = np.frombuffer(data, dtype=tf.DType(dtype).as_numpy_dtype) data_array = buf.reshape([int(i) for i in shape.split(',')]) plugin_data_proto = plugin_data_pb2.PrCurvePluginData() string_buffer = np.frombuffer(plugin_data, dtype=np.dtype('b')) plugin_data_proto.ParseFromString(tf.compat.as_bytes( string_buffer.tostring())) thresholds = self._compute_thresholds(plugin_data_proto.num_thresholds) entry = self._make_pr_entry(step, wall_time, data_array, thresholds) response_mapping[run].append(entry) else: # Serve data from events files. response_mapping = {} for run in runs: try: tensor_events = self._multiplexer.Tensors(run, tag) except KeyError: raise ValueError( 'No PR curves could be found for run %r and tag %r' % (run, tag)) content = self._multiplexer.SummaryMetadata( run, tag).plugin_data.content pr_curve_data = metadata.parse_plugin_metadata(content) thresholds = self._compute_thresholds(pr_curve_data.num_thresholds) response_mapping[run] = [ self._process_tensor_event(e, thresholds) for e in tensor_events] return response_mapping
[ "def", "pr_curves_impl", "(", "self", ",", "runs", ",", "tag", ")", ":", "if", "self", ".", "_db_connection_provider", ":", "# Serve data from the database.", "db", "=", "self", ".", "_db_connection_provider", "(", ")", "# We select for steps greater than -1 because the writer inserts", "# placeholder rows en masse. The check for step filters out those rows.", "cursor", "=", "db", ".", "execute", "(", "'''\n SELECT\n Runs.run_name,\n Tensors.step,\n Tensors.computed_time,\n Tensors.data,\n Tensors.dtype,\n Tensors.shape,\n Tags.plugin_data\n FROM Tensors\n JOIN Tags\n ON Tensors.series = Tags.tag_id\n JOIN Runs\n ON Tags.run_id = Runs.run_id\n WHERE\n Runs.run_name IN (%s)\n AND Tags.tag_name = ?\n AND Tags.plugin_name = ?\n AND Tensors.step > -1\n ORDER BY Tensors.step\n '''", "%", "','", ".", "join", "(", "[", "'?'", "]", "*", "len", "(", "runs", ")", ")", ",", "runs", "+", "[", "tag", ",", "metadata", ".", "PLUGIN_NAME", "]", ")", "response_mapping", "=", "{", "}", "for", "(", "run", ",", "step", ",", "wall_time", ",", "data", ",", "dtype", ",", "shape", ",", "plugin_data", ")", "in", "cursor", ":", "if", "run", "not", "in", "response_mapping", ":", "response_mapping", "[", "run", "]", "=", "[", "]", "buf", "=", "np", ".", "frombuffer", "(", "data", ",", "dtype", "=", "tf", ".", "DType", "(", "dtype", ")", ".", "as_numpy_dtype", ")", "data_array", "=", "buf", ".", "reshape", "(", "[", "int", "(", "i", ")", "for", "i", "in", "shape", ".", "split", "(", "','", ")", "]", ")", "plugin_data_proto", "=", "plugin_data_pb2", ".", "PrCurvePluginData", "(", ")", "string_buffer", "=", "np", ".", "frombuffer", "(", "plugin_data", ",", "dtype", "=", "np", ".", "dtype", "(", "'b'", ")", ")", "plugin_data_proto", ".", "ParseFromString", "(", "tf", ".", "compat", ".", "as_bytes", "(", "string_buffer", ".", "tostring", "(", ")", ")", ")", "thresholds", "=", "self", ".", "_compute_thresholds", "(", "plugin_data_proto", ".", "num_thresholds", ")", "entry", "=", "self", ".", "_make_pr_entry", "(", "step", ",", "wall_time", ",", "data_array", ",", "thresholds", ")", "response_mapping", "[", "run", "]", ".", "append", "(", "entry", ")", "else", ":", "# Serve data from events files.", "response_mapping", "=", "{", "}", "for", "run", "in", "runs", ":", "try", ":", "tensor_events", "=", "self", ".", "_multiplexer", ".", "Tensors", "(", "run", ",", "tag", ")", "except", "KeyError", ":", "raise", "ValueError", "(", "'No PR curves could be found for run %r and tag %r'", "%", "(", "run", ",", "tag", ")", ")", "content", "=", "self", ".", "_multiplexer", ".", "SummaryMetadata", "(", "run", ",", "tag", ")", ".", "plugin_data", ".", "content", "pr_curve_data", "=", "metadata", ".", "parse_plugin_metadata", "(", "content", ")", "thresholds", "=", "self", ".", "_compute_thresholds", "(", "pr_curve_data", ".", "num_thresholds", ")", "response_mapping", "[", "run", "]", "=", "[", "self", ".", "_process_tensor_event", "(", "e", ",", "thresholds", ")", "for", "e", "in", "tensor_events", "]", "return", "response_mapping" ]
Creates the JSON object for the PR curves response for a run-tag combo. Arguments: runs: A list of runs to fetch the curves for. tag: The tag to fetch the curves for. Raises: ValueError: If no PR curves could be fetched for a run and tag. Returns: The JSON object for the PR curves route response.
[ "Creates", "the", "JSON", "object", "for", "the", "PR", "curves", "response", "for", "a", "run", "-", "tag", "combo", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/pr_curve/pr_curves_plugin.py#L74-L143
train
tensorflow/tensorboard
tensorboard/plugins/pr_curve/pr_curves_plugin.py
PrCurvesPlugin.tags_impl
def tags_impl(self): """Creates the JSON object for the tags route response. Returns: The JSON object for the tags route response. """ if self._db_connection_provider: # Read tags from the database. db = self._db_connection_provider() cursor = db.execute(''' SELECT Tags.tag_name, Tags.display_name, Runs.run_name FROM Tags JOIN Runs ON Tags.run_id = Runs.run_id WHERE Tags.plugin_name = ? ''', (metadata.PLUGIN_NAME,)) result = {} for (tag_name, display_name, run_name) in cursor: if run_name not in result: result[run_name] = {} result[run_name][tag_name] = { 'displayName': display_name, # TODO(chihuahua): Populate the description. Currently, the tags # table does not link with the description table. 'description': '', } else: # Read tags from events files. runs = self._multiplexer.Runs() result = {run: {} for run in runs} mapping = self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME) for (run, tag_to_content) in six.iteritems(mapping): for (tag, _) in six.iteritems(tag_to_content): summary_metadata = self._multiplexer.SummaryMetadata(run, tag) result[run][tag] = {'displayName': summary_metadata.display_name, 'description': plugin_util.markdown_to_safe_html( summary_metadata.summary_description)} return result
python
def tags_impl(self): """Creates the JSON object for the tags route response. Returns: The JSON object for the tags route response. """ if self._db_connection_provider: # Read tags from the database. db = self._db_connection_provider() cursor = db.execute(''' SELECT Tags.tag_name, Tags.display_name, Runs.run_name FROM Tags JOIN Runs ON Tags.run_id = Runs.run_id WHERE Tags.plugin_name = ? ''', (metadata.PLUGIN_NAME,)) result = {} for (tag_name, display_name, run_name) in cursor: if run_name not in result: result[run_name] = {} result[run_name][tag_name] = { 'displayName': display_name, # TODO(chihuahua): Populate the description. Currently, the tags # table does not link with the description table. 'description': '', } else: # Read tags from events files. runs = self._multiplexer.Runs() result = {run: {} for run in runs} mapping = self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME) for (run, tag_to_content) in six.iteritems(mapping): for (tag, _) in six.iteritems(tag_to_content): summary_metadata = self._multiplexer.SummaryMetadata(run, tag) result[run][tag] = {'displayName': summary_metadata.display_name, 'description': plugin_util.markdown_to_safe_html( summary_metadata.summary_description)} return result
[ "def", "tags_impl", "(", "self", ")", ":", "if", "self", ".", "_db_connection_provider", ":", "# Read tags from the database.", "db", "=", "self", ".", "_db_connection_provider", "(", ")", "cursor", "=", "db", ".", "execute", "(", "'''\n SELECT\n Tags.tag_name,\n Tags.display_name,\n Runs.run_name\n FROM Tags\n JOIN Runs\n ON Tags.run_id = Runs.run_id\n WHERE\n Tags.plugin_name = ?\n '''", ",", "(", "metadata", ".", "PLUGIN_NAME", ",", ")", ")", "result", "=", "{", "}", "for", "(", "tag_name", ",", "display_name", ",", "run_name", ")", "in", "cursor", ":", "if", "run_name", "not", "in", "result", ":", "result", "[", "run_name", "]", "=", "{", "}", "result", "[", "run_name", "]", "[", "tag_name", "]", "=", "{", "'displayName'", ":", "display_name", ",", "# TODO(chihuahua): Populate the description. Currently, the tags", "# table does not link with the description table.", "'description'", ":", "''", ",", "}", "else", ":", "# Read tags from events files.", "runs", "=", "self", ".", "_multiplexer", ".", "Runs", "(", ")", "result", "=", "{", "run", ":", "{", "}", "for", "run", "in", "runs", "}", "mapping", "=", "self", ".", "_multiplexer", ".", "PluginRunToTagToContent", "(", "metadata", ".", "PLUGIN_NAME", ")", "for", "(", "run", ",", "tag_to_content", ")", "in", "six", ".", "iteritems", "(", "mapping", ")", ":", "for", "(", "tag", ",", "_", ")", "in", "six", ".", "iteritems", "(", "tag_to_content", ")", ":", "summary_metadata", "=", "self", ".", "_multiplexer", ".", "SummaryMetadata", "(", "run", ",", "tag", ")", "result", "[", "run", "]", "[", "tag", "]", "=", "{", "'displayName'", ":", "summary_metadata", ".", "display_name", ",", "'description'", ":", "plugin_util", ".", "markdown_to_safe_html", "(", "summary_metadata", ".", "summary_description", ")", "}", "return", "result" ]
Creates the JSON object for the tags route response. Returns: The JSON object for the tags route response.
[ "Creates", "the", "JSON", "object", "for", "the", "tags", "route", "response", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/pr_curve/pr_curves_plugin.py#L175-L218
train
tensorflow/tensorboard
tensorboard/plugins/pr_curve/pr_curves_plugin.py
PrCurvesPlugin.available_time_entries_impl
def available_time_entries_impl(self): """Creates the JSON object for the available time entries route response. Returns: The JSON object for the available time entries route response. """ result = {} if self._db_connection_provider: db = self._db_connection_provider() # For each run, pick a tag. cursor = db.execute( ''' SELECT TagPickingTable.run_name, Tensors.step, Tensors.computed_time FROM (/* For each run, pick any tag. */ SELECT Runs.run_id AS run_id, Runs.run_name AS run_name, Tags.tag_id AS tag_id FROM Runs JOIN Tags ON Tags.run_id = Runs.run_id WHERE Tags.plugin_name = ? GROUP BY Runs.run_id) AS TagPickingTable JOIN Tensors ON Tensors.series = TagPickingTable.tag_id WHERE Tensors.step IS NOT NULL ORDER BY Tensors.step ''', (metadata.PLUGIN_NAME,)) for (run, step, wall_time) in cursor: if run not in result: result[run] = [] result[run].append(self._create_time_entry(step, wall_time)) else: # Read data from disk. all_runs = self._multiplexer.PluginRunToTagToContent( metadata.PLUGIN_NAME) for run, tag_to_content in all_runs.items(): if not tag_to_content: # This run lacks data for this plugin. continue # Just use the list of tensor events for any of the tags to determine # the steps to list for the run. The steps are often the same across # tags for each run, albeit the user may elect to sample certain tags # differently within the same run. If the latter occurs, TensorBoard # will show the actual step of each tag atop the card for the tag. tensor_events = self._multiplexer.Tensors( run, min(six.iterkeys(tag_to_content))) result[run] = [self._create_time_entry(e.step, e.wall_time) for e in tensor_events] return result
python
def available_time_entries_impl(self): """Creates the JSON object for the available time entries route response. Returns: The JSON object for the available time entries route response. """ result = {} if self._db_connection_provider: db = self._db_connection_provider() # For each run, pick a tag. cursor = db.execute( ''' SELECT TagPickingTable.run_name, Tensors.step, Tensors.computed_time FROM (/* For each run, pick any tag. */ SELECT Runs.run_id AS run_id, Runs.run_name AS run_name, Tags.tag_id AS tag_id FROM Runs JOIN Tags ON Tags.run_id = Runs.run_id WHERE Tags.plugin_name = ? GROUP BY Runs.run_id) AS TagPickingTable JOIN Tensors ON Tensors.series = TagPickingTable.tag_id WHERE Tensors.step IS NOT NULL ORDER BY Tensors.step ''', (metadata.PLUGIN_NAME,)) for (run, step, wall_time) in cursor: if run not in result: result[run] = [] result[run].append(self._create_time_entry(step, wall_time)) else: # Read data from disk. all_runs = self._multiplexer.PluginRunToTagToContent( metadata.PLUGIN_NAME) for run, tag_to_content in all_runs.items(): if not tag_to_content: # This run lacks data for this plugin. continue # Just use the list of tensor events for any of the tags to determine # the steps to list for the run. The steps are often the same across # tags for each run, albeit the user may elect to sample certain tags # differently within the same run. If the latter occurs, TensorBoard # will show the actual step of each tag atop the card for the tag. tensor_events = self._multiplexer.Tensors( run, min(six.iterkeys(tag_to_content))) result[run] = [self._create_time_entry(e.step, e.wall_time) for e in tensor_events] return result
[ "def", "available_time_entries_impl", "(", "self", ")", ":", "result", "=", "{", "}", "if", "self", ".", "_db_connection_provider", ":", "db", "=", "self", ".", "_db_connection_provider", "(", ")", "# For each run, pick a tag.", "cursor", "=", "db", ".", "execute", "(", "'''\n SELECT\n TagPickingTable.run_name,\n Tensors.step,\n Tensors.computed_time\n FROM (/* For each run, pick any tag. */\n SELECT\n Runs.run_id AS run_id,\n Runs.run_name AS run_name,\n Tags.tag_id AS tag_id\n FROM Runs\n JOIN Tags\n ON Tags.run_id = Runs.run_id\n WHERE\n Tags.plugin_name = ?\n GROUP BY Runs.run_id) AS TagPickingTable\n JOIN Tensors\n ON Tensors.series = TagPickingTable.tag_id\n WHERE Tensors.step IS NOT NULL\n ORDER BY Tensors.step\n '''", ",", "(", "metadata", ".", "PLUGIN_NAME", ",", ")", ")", "for", "(", "run", ",", "step", ",", "wall_time", ")", "in", "cursor", ":", "if", "run", "not", "in", "result", ":", "result", "[", "run", "]", "=", "[", "]", "result", "[", "run", "]", ".", "append", "(", "self", ".", "_create_time_entry", "(", "step", ",", "wall_time", ")", ")", "else", ":", "# Read data from disk.", "all_runs", "=", "self", ".", "_multiplexer", ".", "PluginRunToTagToContent", "(", "metadata", ".", "PLUGIN_NAME", ")", "for", "run", ",", "tag_to_content", "in", "all_runs", ".", "items", "(", ")", ":", "if", "not", "tag_to_content", ":", "# This run lacks data for this plugin.", "continue", "# Just use the list of tensor events for any of the tags to determine", "# the steps to list for the run. The steps are often the same across", "# tags for each run, albeit the user may elect to sample certain tags", "# differently within the same run. If the latter occurs, TensorBoard", "# will show the actual step of each tag atop the card for the tag.", "tensor_events", "=", "self", ".", "_multiplexer", ".", "Tensors", "(", "run", ",", "min", "(", "six", ".", "iterkeys", "(", "tag_to_content", ")", ")", ")", "result", "[", "run", "]", "=", "[", "self", ".", "_create_time_entry", "(", "e", ".", "step", ",", "e", ".", "wall_time", ")", "for", "e", "in", "tensor_events", "]", "return", "result" ]
Creates the JSON object for the available time entries route response. Returns: The JSON object for the available time entries route response.
[ "Creates", "the", "JSON", "object", "for", "the", "available", "time", "entries", "route", "response", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/pr_curve/pr_curves_plugin.py#L231-L284
train
tensorflow/tensorboard
tensorboard/plugins/pr_curve/pr_curves_plugin.py
PrCurvesPlugin.is_active
def is_active(self): """Determines whether this plugin is active. This plugin is active only if PR curve summary data is read by TensorBoard. Returns: Whether this plugin is active. """ if self._db_connection_provider: # The plugin is active if one relevant tag can be found in the database. db = self._db_connection_provider() cursor = db.execute( ''' SELECT 1 FROM Tags WHERE Tags.plugin_name = ? LIMIT 1 ''', (metadata.PLUGIN_NAME,)) return bool(list(cursor)) if not self._multiplexer: return False all_runs = self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME) # The plugin is active if any of the runs has a tag relevant to the plugin. return any(six.itervalues(all_runs))
python
def is_active(self): """Determines whether this plugin is active. This plugin is active only if PR curve summary data is read by TensorBoard. Returns: Whether this plugin is active. """ if self._db_connection_provider: # The plugin is active if one relevant tag can be found in the database. db = self._db_connection_provider() cursor = db.execute( ''' SELECT 1 FROM Tags WHERE Tags.plugin_name = ? LIMIT 1 ''', (metadata.PLUGIN_NAME,)) return bool(list(cursor)) if not self._multiplexer: return False all_runs = self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME) # The plugin is active if any of the runs has a tag relevant to the plugin. return any(six.itervalues(all_runs))
[ "def", "is_active", "(", "self", ")", ":", "if", "self", ".", "_db_connection_provider", ":", "# The plugin is active if one relevant tag can be found in the database.", "db", "=", "self", ".", "_db_connection_provider", "(", ")", "cursor", "=", "db", ".", "execute", "(", "'''\n SELECT 1\n FROM Tags\n WHERE Tags.plugin_name = ?\n LIMIT 1\n '''", ",", "(", "metadata", ".", "PLUGIN_NAME", ",", ")", ")", "return", "bool", "(", "list", "(", "cursor", ")", ")", "if", "not", "self", ".", "_multiplexer", ":", "return", "False", "all_runs", "=", "self", ".", "_multiplexer", ".", "PluginRunToTagToContent", "(", "metadata", ".", "PLUGIN_NAME", ")", "# The plugin is active if any of the runs has a tag relevant to the plugin.", "return", "any", "(", "six", ".", "itervalues", "(", "all_runs", ")", ")" ]
Determines whether this plugin is active. This plugin is active only if PR curve summary data is read by TensorBoard. Returns: Whether this plugin is active.
[ "Determines", "whether", "this", "plugin", "is", "active", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/pr_curve/pr_curves_plugin.py#L314-L341
train
tensorflow/tensorboard
tensorboard/plugins/pr_curve/pr_curves_plugin.py
PrCurvesPlugin._process_tensor_event
def _process_tensor_event(self, event, thresholds): """Converts a TensorEvent into a dict that encapsulates information on it. Args: event: The TensorEvent to convert. thresholds: An array of floats that ranges from 0 to 1 (in that direction and inclusive of 0 and 1). Returns: A JSON-able dictionary of PR curve data for 1 step. """ return self._make_pr_entry( event.step, event.wall_time, tensor_util.make_ndarray(event.tensor_proto), thresholds)
python
def _process_tensor_event(self, event, thresholds): """Converts a TensorEvent into a dict that encapsulates information on it. Args: event: The TensorEvent to convert. thresholds: An array of floats that ranges from 0 to 1 (in that direction and inclusive of 0 and 1). Returns: A JSON-able dictionary of PR curve data for 1 step. """ return self._make_pr_entry( event.step, event.wall_time, tensor_util.make_ndarray(event.tensor_proto), thresholds)
[ "def", "_process_tensor_event", "(", "self", ",", "event", ",", "thresholds", ")", ":", "return", "self", ".", "_make_pr_entry", "(", "event", ".", "step", ",", "event", ".", "wall_time", ",", "tensor_util", ".", "make_ndarray", "(", "event", ".", "tensor_proto", ")", ",", "thresholds", ")" ]
Converts a TensorEvent into a dict that encapsulates information on it. Args: event: The TensorEvent to convert. thresholds: An array of floats that ranges from 0 to 1 (in that direction and inclusive of 0 and 1). Returns: A JSON-able dictionary of PR curve data for 1 step.
[ "Converts", "a", "TensorEvent", "into", "a", "dict", "that", "encapsulates", "information", "on", "it", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/pr_curve/pr_curves_plugin.py#L343-L358
train
tensorflow/tensorboard
tensorboard/plugins/pr_curve/pr_curves_plugin.py
PrCurvesPlugin._make_pr_entry
def _make_pr_entry(self, step, wall_time, data_array, thresholds): """Creates an entry for PR curve data. Each entry corresponds to 1 step. Args: step: The step. wall_time: The wall time. data_array: A numpy array of PR curve data stored in the summary format. thresholds: An array of floating point thresholds. Returns: A PR curve entry. """ # Trim entries for which TP + FP = 0 (precision is undefined) at the tail of # the data. true_positives = [int(v) for v in data_array[metadata.TRUE_POSITIVES_INDEX]] false_positives = [ int(v) for v in data_array[metadata.FALSE_POSITIVES_INDEX]] tp_index = metadata.TRUE_POSITIVES_INDEX fp_index = metadata.FALSE_POSITIVES_INDEX positives = data_array[[tp_index, fp_index], :].astype(int).sum(axis=0) end_index_inclusive = len(positives) - 1 while end_index_inclusive > 0 and positives[end_index_inclusive] == 0: end_index_inclusive -= 1 end_index = end_index_inclusive + 1 return { 'wall_time': wall_time, 'step': step, 'precision': data_array[metadata.PRECISION_INDEX, :end_index].tolist(), 'recall': data_array[metadata.RECALL_INDEX, :end_index].tolist(), 'true_positives': true_positives[:end_index], 'false_positives': false_positives[:end_index], 'true_negatives': [int(v) for v in data_array[metadata.TRUE_NEGATIVES_INDEX][:end_index]], 'false_negatives': [int(v) for v in data_array[metadata.FALSE_NEGATIVES_INDEX][:end_index]], 'thresholds': thresholds[:end_index], }
python
def _make_pr_entry(self, step, wall_time, data_array, thresholds): """Creates an entry for PR curve data. Each entry corresponds to 1 step. Args: step: The step. wall_time: The wall time. data_array: A numpy array of PR curve data stored in the summary format. thresholds: An array of floating point thresholds. Returns: A PR curve entry. """ # Trim entries for which TP + FP = 0 (precision is undefined) at the tail of # the data. true_positives = [int(v) for v in data_array[metadata.TRUE_POSITIVES_INDEX]] false_positives = [ int(v) for v in data_array[metadata.FALSE_POSITIVES_INDEX]] tp_index = metadata.TRUE_POSITIVES_INDEX fp_index = metadata.FALSE_POSITIVES_INDEX positives = data_array[[tp_index, fp_index], :].astype(int).sum(axis=0) end_index_inclusive = len(positives) - 1 while end_index_inclusive > 0 and positives[end_index_inclusive] == 0: end_index_inclusive -= 1 end_index = end_index_inclusive + 1 return { 'wall_time': wall_time, 'step': step, 'precision': data_array[metadata.PRECISION_INDEX, :end_index].tolist(), 'recall': data_array[metadata.RECALL_INDEX, :end_index].tolist(), 'true_positives': true_positives[:end_index], 'false_positives': false_positives[:end_index], 'true_negatives': [int(v) for v in data_array[metadata.TRUE_NEGATIVES_INDEX][:end_index]], 'false_negatives': [int(v) for v in data_array[metadata.FALSE_NEGATIVES_INDEX][:end_index]], 'thresholds': thresholds[:end_index], }
[ "def", "_make_pr_entry", "(", "self", ",", "step", ",", "wall_time", ",", "data_array", ",", "thresholds", ")", ":", "# Trim entries for which TP + FP = 0 (precision is undefined) at the tail of", "# the data.", "true_positives", "=", "[", "int", "(", "v", ")", "for", "v", "in", "data_array", "[", "metadata", ".", "TRUE_POSITIVES_INDEX", "]", "]", "false_positives", "=", "[", "int", "(", "v", ")", "for", "v", "in", "data_array", "[", "metadata", ".", "FALSE_POSITIVES_INDEX", "]", "]", "tp_index", "=", "metadata", ".", "TRUE_POSITIVES_INDEX", "fp_index", "=", "metadata", ".", "FALSE_POSITIVES_INDEX", "positives", "=", "data_array", "[", "[", "tp_index", ",", "fp_index", "]", ",", ":", "]", ".", "astype", "(", "int", ")", ".", "sum", "(", "axis", "=", "0", ")", "end_index_inclusive", "=", "len", "(", "positives", ")", "-", "1", "while", "end_index_inclusive", ">", "0", "and", "positives", "[", "end_index_inclusive", "]", "==", "0", ":", "end_index_inclusive", "-=", "1", "end_index", "=", "end_index_inclusive", "+", "1", "return", "{", "'wall_time'", ":", "wall_time", ",", "'step'", ":", "step", ",", "'precision'", ":", "data_array", "[", "metadata", ".", "PRECISION_INDEX", ",", ":", "end_index", "]", ".", "tolist", "(", ")", ",", "'recall'", ":", "data_array", "[", "metadata", ".", "RECALL_INDEX", ",", ":", "end_index", "]", ".", "tolist", "(", ")", ",", "'true_positives'", ":", "true_positives", "[", ":", "end_index", "]", ",", "'false_positives'", ":", "false_positives", "[", ":", "end_index", "]", ",", "'true_negatives'", ":", "[", "int", "(", "v", ")", "for", "v", "in", "data_array", "[", "metadata", ".", "TRUE_NEGATIVES_INDEX", "]", "[", ":", "end_index", "]", "]", ",", "'false_negatives'", ":", "[", "int", "(", "v", ")", "for", "v", "in", "data_array", "[", "metadata", ".", "FALSE_NEGATIVES_INDEX", "]", "[", ":", "end_index", "]", "]", ",", "'thresholds'", ":", "thresholds", "[", ":", "end_index", "]", ",", "}" ]
Creates an entry for PR curve data. Each entry corresponds to 1 step. Args: step: The step. wall_time: The wall time. data_array: A numpy array of PR curve data stored in the summary format. thresholds: An array of floating point thresholds. Returns: A PR curve entry.
[ "Creates", "an", "entry", "for", "PR", "curve", "data", ".", "Each", "entry", "corresponds", "to", "1", "step", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/pr_curve/pr_curves_plugin.py#L360-L399
train
tensorflow/tensorboard
tensorboard/plugins/hparams/api.py
_normalize_hparams
def _normalize_hparams(hparams): """Normalize a dict keyed by `HParam`s and/or raw strings. Args: hparams: A `dict` whose keys are `HParam` objects and/or strings representing hyperparameter names, and whose values are hyperparameter values. No two keys may have the same name. Returns: A `dict` whose keys are hyperparameter names (as strings) and whose values are the corresponding hyperparameter values. Raises: ValueError: If two entries in `hparams` share the same hyperparameter name. """ result = {} for (k, v) in six.iteritems(hparams): if isinstance(k, HParam): k = k.name if k in result: raise ValueError("multiple values specified for hparam %r" % (k,)) result[k] = v return result
python
def _normalize_hparams(hparams): """Normalize a dict keyed by `HParam`s and/or raw strings. Args: hparams: A `dict` whose keys are `HParam` objects and/or strings representing hyperparameter names, and whose values are hyperparameter values. No two keys may have the same name. Returns: A `dict` whose keys are hyperparameter names (as strings) and whose values are the corresponding hyperparameter values. Raises: ValueError: If two entries in `hparams` share the same hyperparameter name. """ result = {} for (k, v) in six.iteritems(hparams): if isinstance(k, HParam): k = k.name if k in result: raise ValueError("multiple values specified for hparam %r" % (k,)) result[k] = v return result
[ "def", "_normalize_hparams", "(", "hparams", ")", ":", "result", "=", "{", "}", "for", "(", "k", ",", "v", ")", "in", "six", ".", "iteritems", "(", "hparams", ")", ":", "if", "isinstance", "(", "k", ",", "HParam", ")", ":", "k", "=", "k", ".", "name", "if", "k", "in", "result", ":", "raise", "ValueError", "(", "\"multiple values specified for hparam %r\"", "%", "(", "k", ",", ")", ")", "result", "[", "k", "]", "=", "v", "return", "result" ]
Normalize a dict keyed by `HParam`s and/or raw strings. Args: hparams: A `dict` whose keys are `HParam` objects and/or strings representing hyperparameter names, and whose values are hyperparameter values. No two keys may have the same name. Returns: A `dict` whose keys are hyperparameter names (as strings) and whose values are the corresponding hyperparameter values. Raises: ValueError: If two entries in `hparams` share the same hyperparameter name.
[ "Normalize", "a", "dict", "keyed", "by", "HParam", "s", "and", "/", "or", "raw", "strings", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/hparams/api.py#L491-L514
train
tensorflow/tensorboard
tensorboard/plugins/hparams/api.py
Experiment.summary_pb
def summary_pb(self): """Create a top-level experiment summary describing this experiment. The resulting summary should be written to a log directory that encloses all the individual sessions' log directories. Analogous to the low-level `experiment_pb` function in the `hparams.summary` module. """ hparam_infos = [] for hparam in self._hparams: info = api_pb2.HParamInfo( name=hparam.name, description=hparam.description, display_name=hparam.display_name, ) domain = hparam.domain if domain is not None: domain.update_hparam_info(info) hparam_infos.append(info) metric_infos = [metric.as_proto() for metric in self._metrics] return summary.experiment_pb( hparam_infos=hparam_infos, metric_infos=metric_infos, user=self._user, description=self._description, time_created_secs=self._time_created_secs, )
python
def summary_pb(self): """Create a top-level experiment summary describing this experiment. The resulting summary should be written to a log directory that encloses all the individual sessions' log directories. Analogous to the low-level `experiment_pb` function in the `hparams.summary` module. """ hparam_infos = [] for hparam in self._hparams: info = api_pb2.HParamInfo( name=hparam.name, description=hparam.description, display_name=hparam.display_name, ) domain = hparam.domain if domain is not None: domain.update_hparam_info(info) hparam_infos.append(info) metric_infos = [metric.as_proto() for metric in self._metrics] return summary.experiment_pb( hparam_infos=hparam_infos, metric_infos=metric_infos, user=self._user, description=self._description, time_created_secs=self._time_created_secs, )
[ "def", "summary_pb", "(", "self", ")", ":", "hparam_infos", "=", "[", "]", "for", "hparam", "in", "self", ".", "_hparams", ":", "info", "=", "api_pb2", ".", "HParamInfo", "(", "name", "=", "hparam", ".", "name", ",", "description", "=", "hparam", ".", "description", ",", "display_name", "=", "hparam", ".", "display_name", ",", ")", "domain", "=", "hparam", ".", "domain", "if", "domain", "is", "not", "None", ":", "domain", ".", "update_hparam_info", "(", "info", ")", "hparam_infos", ".", "append", "(", "info", ")", "metric_infos", "=", "[", "metric", ".", "as_proto", "(", ")", "for", "metric", "in", "self", ".", "_metrics", "]", "return", "summary", ".", "experiment_pb", "(", "hparam_infos", "=", "hparam_infos", ",", "metric_infos", "=", "metric_infos", ",", "user", "=", "self", ".", "_user", ",", "description", "=", "self", ".", "_description", ",", "time_created_secs", "=", "self", ".", "_time_created_secs", ",", ")" ]
Create a top-level experiment summary describing this experiment. The resulting summary should be written to a log directory that encloses all the individual sessions' log directories. Analogous to the low-level `experiment_pb` function in the `hparams.summary` module.
[ "Create", "a", "top", "-", "level", "experiment", "summary", "describing", "this", "experiment", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/hparams/api.py#L90-L117
train
tensorflow/tensorboard
tensorboard/plugins/histogram/histograms_demo.py
run_all
def run_all(logdir, verbose=False, num_summaries=400): """Generate a bunch of histogram data, and write it to logdir.""" del verbose tf.compat.v1.set_random_seed(0) k = tf.compat.v1.placeholder(tf.float32) # Make a normal distribution, with a shifting mean mean_moving_normal = tf.random.normal(shape=[1000], mean=(5*k), stddev=1) # Record that distribution into a histogram summary histogram_summary.op("normal/moving_mean", mean_moving_normal, description="A normal distribution whose mean changes " "over time.") # Make a normal distribution with shrinking variance shrinking_normal = tf.random.normal(shape=[1000], mean=0, stddev=1-(k)) # Record that distribution too histogram_summary.op("normal/shrinking_variance", shrinking_normal, description="A normal distribution whose variance " "shrinks over time.") # Let's combine both of those distributions into one dataset normal_combined = tf.concat([mean_moving_normal, shrinking_normal], 0) # We add another histogram summary to record the combined distribution histogram_summary.op("normal/bimodal", normal_combined, description="A combination of two normal distributions, " "one with a moving mean and one with " "shrinking variance. The result is a " "distribution that starts as unimodal and " "becomes more and more bimodal over time.") # Add a gamma distribution gamma = tf.random.gamma(shape=[1000], alpha=k) histogram_summary.op("gamma", gamma, description="A gamma distribution whose shape " "parameter, α, changes over time.") # And a poisson distribution poisson = tf.compat.v1.random_poisson(shape=[1000], lam=k) histogram_summary.op("poisson", poisson, description="A Poisson distribution, which only " "takes on integer values.") # And a uniform distribution uniform = tf.random.uniform(shape=[1000], maxval=k*10) histogram_summary.op("uniform", uniform, description="A simple uniform distribution.") # Finally, combine everything together! all_distributions = [mean_moving_normal, shrinking_normal, gamma, poisson, uniform] all_combined = tf.concat(all_distributions, 0) histogram_summary.op("all_combined", all_combined, description="An amalgamation of five distributions: a " "uniform distribution, a gamma " "distribution, a Poisson distribution, and " "two normal distributions.") summaries = tf.compat.v1.summary.merge_all() # Setup a session and summary writer sess = tf.compat.v1.Session() writer = tf.summary.FileWriter(logdir) # Setup a loop and write the summaries to disk N = num_summaries for step in xrange(N): k_val = step/float(N) summ = sess.run(summaries, feed_dict={k: k_val}) writer.add_summary(summ, global_step=step)
python
def run_all(logdir, verbose=False, num_summaries=400): """Generate a bunch of histogram data, and write it to logdir.""" del verbose tf.compat.v1.set_random_seed(0) k = tf.compat.v1.placeholder(tf.float32) # Make a normal distribution, with a shifting mean mean_moving_normal = tf.random.normal(shape=[1000], mean=(5*k), stddev=1) # Record that distribution into a histogram summary histogram_summary.op("normal/moving_mean", mean_moving_normal, description="A normal distribution whose mean changes " "over time.") # Make a normal distribution with shrinking variance shrinking_normal = tf.random.normal(shape=[1000], mean=0, stddev=1-(k)) # Record that distribution too histogram_summary.op("normal/shrinking_variance", shrinking_normal, description="A normal distribution whose variance " "shrinks over time.") # Let's combine both of those distributions into one dataset normal_combined = tf.concat([mean_moving_normal, shrinking_normal], 0) # We add another histogram summary to record the combined distribution histogram_summary.op("normal/bimodal", normal_combined, description="A combination of two normal distributions, " "one with a moving mean and one with " "shrinking variance. The result is a " "distribution that starts as unimodal and " "becomes more and more bimodal over time.") # Add a gamma distribution gamma = tf.random.gamma(shape=[1000], alpha=k) histogram_summary.op("gamma", gamma, description="A gamma distribution whose shape " "parameter, α, changes over time.") # And a poisson distribution poisson = tf.compat.v1.random_poisson(shape=[1000], lam=k) histogram_summary.op("poisson", poisson, description="A Poisson distribution, which only " "takes on integer values.") # And a uniform distribution uniform = tf.random.uniform(shape=[1000], maxval=k*10) histogram_summary.op("uniform", uniform, description="A simple uniform distribution.") # Finally, combine everything together! all_distributions = [mean_moving_normal, shrinking_normal, gamma, poisson, uniform] all_combined = tf.concat(all_distributions, 0) histogram_summary.op("all_combined", all_combined, description="An amalgamation of five distributions: a " "uniform distribution, a gamma " "distribution, a Poisson distribution, and " "two normal distributions.") summaries = tf.compat.v1.summary.merge_all() # Setup a session and summary writer sess = tf.compat.v1.Session() writer = tf.summary.FileWriter(logdir) # Setup a loop and write the summaries to disk N = num_summaries for step in xrange(N): k_val = step/float(N) summ = sess.run(summaries, feed_dict={k: k_val}) writer.add_summary(summ, global_step=step)
[ "def", "run_all", "(", "logdir", ",", "verbose", "=", "False", ",", "num_summaries", "=", "400", ")", ":", "del", "verbose", "tf", ".", "compat", ".", "v1", ".", "set_random_seed", "(", "0", ")", "k", "=", "tf", ".", "compat", ".", "v1", ".", "placeholder", "(", "tf", ".", "float32", ")", "# Make a normal distribution, with a shifting mean", "mean_moving_normal", "=", "tf", ".", "random", ".", "normal", "(", "shape", "=", "[", "1000", "]", ",", "mean", "=", "(", "5", "*", "k", ")", ",", "stddev", "=", "1", ")", "# Record that distribution into a histogram summary", "histogram_summary", ".", "op", "(", "\"normal/moving_mean\"", ",", "mean_moving_normal", ",", "description", "=", "\"A normal distribution whose mean changes \"", "\"over time.\"", ")", "# Make a normal distribution with shrinking variance", "shrinking_normal", "=", "tf", ".", "random", ".", "normal", "(", "shape", "=", "[", "1000", "]", ",", "mean", "=", "0", ",", "stddev", "=", "1", "-", "(", "k", ")", ")", "# Record that distribution too", "histogram_summary", ".", "op", "(", "\"normal/shrinking_variance\"", ",", "shrinking_normal", ",", "description", "=", "\"A normal distribution whose variance \"", "\"shrinks over time.\"", ")", "# Let's combine both of those distributions into one dataset", "normal_combined", "=", "tf", ".", "concat", "(", "[", "mean_moving_normal", ",", "shrinking_normal", "]", ",", "0", ")", "# We add another histogram summary to record the combined distribution", "histogram_summary", ".", "op", "(", "\"normal/bimodal\"", ",", "normal_combined", ",", "description", "=", "\"A combination of two normal distributions, \"", "\"one with a moving mean and one with \"", "\"shrinking variance. The result is a \"", "\"distribution that starts as unimodal and \"", "\"becomes more and more bimodal over time.\"", ")", "# Add a gamma distribution", "gamma", "=", "tf", ".", "random", ".", "gamma", "(", "shape", "=", "[", "1000", "]", ",", "alpha", "=", "k", ")", "histogram_summary", ".", "op", "(", "\"gamma\"", ",", "gamma", ",", "description", "=", "\"A gamma distribution whose shape \"", "\"parameter, α, changes over time.\")", "", "# And a poisson distribution", "poisson", "=", "tf", ".", "compat", ".", "v1", ".", "random_poisson", "(", "shape", "=", "[", "1000", "]", ",", "lam", "=", "k", ")", "histogram_summary", ".", "op", "(", "\"poisson\"", ",", "poisson", ",", "description", "=", "\"A Poisson distribution, which only \"", "\"takes on integer values.\"", ")", "# And a uniform distribution", "uniform", "=", "tf", ".", "random", ".", "uniform", "(", "shape", "=", "[", "1000", "]", ",", "maxval", "=", "k", "*", "10", ")", "histogram_summary", ".", "op", "(", "\"uniform\"", ",", "uniform", ",", "description", "=", "\"A simple uniform distribution.\"", ")", "# Finally, combine everything together!", "all_distributions", "=", "[", "mean_moving_normal", ",", "shrinking_normal", ",", "gamma", ",", "poisson", ",", "uniform", "]", "all_combined", "=", "tf", ".", "concat", "(", "all_distributions", ",", "0", ")", "histogram_summary", ".", "op", "(", "\"all_combined\"", ",", "all_combined", ",", "description", "=", "\"An amalgamation of five distributions: a \"", "\"uniform distribution, a gamma \"", "\"distribution, a Poisson distribution, and \"", "\"two normal distributions.\"", ")", "summaries", "=", "tf", ".", "compat", ".", "v1", ".", "summary", ".", "merge_all", "(", ")", "# Setup a session and summary writer", "sess", "=", "tf", ".", "compat", ".", "v1", ".", "Session", "(", ")", "writer", "=", "tf", ".", "summary", ".", "FileWriter", "(", "logdir", ")", "# Setup a loop and write the summaries to disk", "N", "=", "num_summaries", "for", "step", "in", "xrange", "(", "N", ")", ":", "k_val", "=", "step", "/", "float", "(", "N", ")", "summ", "=", "sess", ".", "run", "(", "summaries", ",", "feed_dict", "=", "{", "k", ":", "k_val", "}", ")", "writer", ".", "add_summary", "(", "summ", ",", "global_step", "=", "step", ")" ]
Generate a bunch of histogram data, and write it to logdir.
[ "Generate", "a", "bunch", "of", "histogram", "data", "and", "write", "it", "to", "logdir", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/histogram/histograms_demo.py#L32-L103
train
tensorflow/tensorboard
tensorboard/plugins/projector/projector_plugin.py
_parse_positive_int_param
def _parse_positive_int_param(request, param_name): """Parses and asserts a positive (>0) integer query parameter. Args: request: The Werkzeug Request object param_name: Name of the parameter. Returns: Param, or None, or -1 if parameter is not a positive integer. """ param = request.args.get(param_name) if not param: return None try: param = int(param) if param <= 0: raise ValueError() return param except ValueError: return -1
python
def _parse_positive_int_param(request, param_name): """Parses and asserts a positive (>0) integer query parameter. Args: request: The Werkzeug Request object param_name: Name of the parameter. Returns: Param, or None, or -1 if parameter is not a positive integer. """ param = request.args.get(param_name) if not param: return None try: param = int(param) if param <= 0: raise ValueError() return param except ValueError: return -1
[ "def", "_parse_positive_int_param", "(", "request", ",", "param_name", ")", ":", "param", "=", "request", ".", "args", ".", "get", "(", "param_name", ")", "if", "not", "param", ":", "return", "None", "try", ":", "param", "=", "int", "(", "param", ")", "if", "param", "<=", "0", ":", "raise", "ValueError", "(", ")", "return", "param", "except", "ValueError", ":", "return", "-", "1" ]
Parses and asserts a positive (>0) integer query parameter. Args: request: The Werkzeug Request object param_name: Name of the parameter. Returns: Param, or None, or -1 if parameter is not a positive integer.
[ "Parses", "and", "asserts", "a", "positive", "(", ">", "0", ")", "integer", "query", "parameter", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/projector/projector_plugin.py#L189-L208
train
tensorflow/tensorboard
tensorboard/plugins/projector/projector_plugin.py
EmbeddingMetadata.add_column
def add_column(self, column_name, column_values): """Adds a named column of metadata values. Args: column_name: Name of the column. column_values: 1D array/list/iterable holding the column values. Must be of length `num_points`. The i-th value corresponds to the i-th point. Raises: ValueError: If `column_values` is not 1D array, or of length `num_points`, or the `name` is already used. """ # Sanity checks. if isinstance(column_values, list) and isinstance(column_values[0], list): raise ValueError('"column_values" must be a flat list, but we detected ' 'that its first entry is a list') if isinstance(column_values, np.ndarray) and column_values.ndim != 1: raise ValueError('"column_values" should be of rank 1, ' 'but is of rank %d' % column_values.ndim) if len(column_values) != self.num_points: raise ValueError('"column_values" should be of length %d, but is of ' 'length %d' % (self.num_points, len(column_values))) if column_name in self.name_to_values: raise ValueError('The column name "%s" is already used' % column_name) self.column_names.append(column_name) self.name_to_values[column_name] = column_values
python
def add_column(self, column_name, column_values): """Adds a named column of metadata values. Args: column_name: Name of the column. column_values: 1D array/list/iterable holding the column values. Must be of length `num_points`. The i-th value corresponds to the i-th point. Raises: ValueError: If `column_values` is not 1D array, or of length `num_points`, or the `name` is already used. """ # Sanity checks. if isinstance(column_values, list) and isinstance(column_values[0], list): raise ValueError('"column_values" must be a flat list, but we detected ' 'that its first entry is a list') if isinstance(column_values, np.ndarray) and column_values.ndim != 1: raise ValueError('"column_values" should be of rank 1, ' 'but is of rank %d' % column_values.ndim) if len(column_values) != self.num_points: raise ValueError('"column_values" should be of length %d, but is of ' 'length %d' % (self.num_points, len(column_values))) if column_name in self.name_to_values: raise ValueError('The column name "%s" is already used' % column_name) self.column_names.append(column_name) self.name_to_values[column_name] = column_values
[ "def", "add_column", "(", "self", ",", "column_name", ",", "column_values", ")", ":", "# Sanity checks.", "if", "isinstance", "(", "column_values", ",", "list", ")", "and", "isinstance", "(", "column_values", "[", "0", "]", ",", "list", ")", ":", "raise", "ValueError", "(", "'\"column_values\" must be a flat list, but we detected '", "'that its first entry is a list'", ")", "if", "isinstance", "(", "column_values", ",", "np", ".", "ndarray", ")", "and", "column_values", ".", "ndim", "!=", "1", ":", "raise", "ValueError", "(", "'\"column_values\" should be of rank 1, '", "'but is of rank %d'", "%", "column_values", ".", "ndim", ")", "if", "len", "(", "column_values", ")", "!=", "self", ".", "num_points", ":", "raise", "ValueError", "(", "'\"column_values\" should be of length %d, but is of '", "'length %d'", "%", "(", "self", ".", "num_points", ",", "len", "(", "column_values", ")", ")", ")", "if", "column_name", "in", "self", ".", "name_to_values", ":", "raise", "ValueError", "(", "'The column name \"%s\" is already used'", "%", "column_name", ")", "self", ".", "column_names", ".", "append", "(", "column_name", ")", "self", ".", "name_to_values", "[", "column_name", "]", "=", "column_values" ]
Adds a named column of metadata values. Args: column_name: Name of the column. column_values: 1D array/list/iterable holding the column values. Must be of length `num_points`. The i-th value corresponds to the i-th point. Raises: ValueError: If `column_values` is not 1D array, or of length `num_points`, or the `name` is already used.
[ "Adds", "a", "named", "column", "of", "metadata", "values", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/projector/projector_plugin.py#L118-L145
train
tensorflow/tensorboard
tensorboard/plugins/projector/projector_plugin.py
ProjectorPlugin.is_active
def is_active(self): """Determines whether this plugin is active. This plugin is only active if any run has an embedding. Returns: Whether any run has embedding data to show in the projector. """ if not self.multiplexer: return False if self._is_active: # We have already determined that the projector plugin should be active. # Do not re-compute that. We have no reason to later set this plugin to be # inactive. return True if self._thread_for_determining_is_active: # We are currently determining whether the plugin is active. Do not start # a separate thread. return self._is_active # The plugin is currently not active. The frontend might check again later. # For now, spin off a separate thread to determine whether the plugin is # active. new_thread = threading.Thread( target=self._determine_is_active, name='ProjectorPluginIsActiveThread') self._thread_for_determining_is_active = new_thread new_thread.start() return False
python
def is_active(self): """Determines whether this plugin is active. This plugin is only active if any run has an embedding. Returns: Whether any run has embedding data to show in the projector. """ if not self.multiplexer: return False if self._is_active: # We have already determined that the projector plugin should be active. # Do not re-compute that. We have no reason to later set this plugin to be # inactive. return True if self._thread_for_determining_is_active: # We are currently determining whether the plugin is active. Do not start # a separate thread. return self._is_active # The plugin is currently not active. The frontend might check again later. # For now, spin off a separate thread to determine whether the plugin is # active. new_thread = threading.Thread( target=self._determine_is_active, name='ProjectorPluginIsActiveThread') self._thread_for_determining_is_active = new_thread new_thread.start() return False
[ "def", "is_active", "(", "self", ")", ":", "if", "not", "self", ".", "multiplexer", ":", "return", "False", "if", "self", ".", "_is_active", ":", "# We have already determined that the projector plugin should be active.", "# Do not re-compute that. We have no reason to later set this plugin to be", "# inactive.", "return", "True", "if", "self", ".", "_thread_for_determining_is_active", ":", "# We are currently determining whether the plugin is active. Do not start", "# a separate thread.", "return", "self", ".", "_is_active", "# The plugin is currently not active. The frontend might check again later.", "# For now, spin off a separate thread to determine whether the plugin is", "# active.", "new_thread", "=", "threading", ".", "Thread", "(", "target", "=", "self", ".", "_determine_is_active", ",", "name", "=", "'ProjectorPluginIsActiveThread'", ")", "self", ".", "_thread_for_determining_is_active", "=", "new_thread", "new_thread", ".", "start", "(", ")", "return", "False" ]
Determines whether this plugin is active. This plugin is only active if any run has an embedding. Returns: Whether any run has embedding data to show in the projector.
[ "Determines", "whether", "this", "plugin", "is", "active", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/projector/projector_plugin.py#L267-L297
train
tensorflow/tensorboard
tensorboard/plugins/projector/projector_plugin.py
ProjectorPlugin.configs
def configs(self): """Returns a map of run paths to `ProjectorConfig` protos.""" run_path_pairs = list(self.run_paths.items()) self._append_plugin_asset_directories(run_path_pairs) # If there are no summary event files, the projector should still work, # treating the `logdir` as the model checkpoint directory. if not run_path_pairs: run_path_pairs.append(('.', self.logdir)) if (self._run_paths_changed() or _latest_checkpoints_changed(self._configs, run_path_pairs)): self.readers = {} self._configs, self.config_fpaths = self._read_latest_config_files( run_path_pairs) self._augment_configs_with_checkpoint_info() return self._configs
python
def configs(self): """Returns a map of run paths to `ProjectorConfig` protos.""" run_path_pairs = list(self.run_paths.items()) self._append_plugin_asset_directories(run_path_pairs) # If there are no summary event files, the projector should still work, # treating the `logdir` as the model checkpoint directory. if not run_path_pairs: run_path_pairs.append(('.', self.logdir)) if (self._run_paths_changed() or _latest_checkpoints_changed(self._configs, run_path_pairs)): self.readers = {} self._configs, self.config_fpaths = self._read_latest_config_files( run_path_pairs) self._augment_configs_with_checkpoint_info() return self._configs
[ "def", "configs", "(", "self", ")", ":", "run_path_pairs", "=", "list", "(", "self", ".", "run_paths", ".", "items", "(", ")", ")", "self", ".", "_append_plugin_asset_directories", "(", "run_path_pairs", ")", "# If there are no summary event files, the projector should still work,", "# treating the `logdir` as the model checkpoint directory.", "if", "not", "run_path_pairs", ":", "run_path_pairs", ".", "append", "(", "(", "'.'", ",", "self", ".", "logdir", ")", ")", "if", "(", "self", ".", "_run_paths_changed", "(", ")", "or", "_latest_checkpoints_changed", "(", "self", ".", "_configs", ",", "run_path_pairs", ")", ")", ":", "self", ".", "readers", "=", "{", "}", "self", ".", "_configs", ",", "self", ".", "config_fpaths", "=", "self", ".", "_read_latest_config_files", "(", "run_path_pairs", ")", "self", ".", "_augment_configs_with_checkpoint_info", "(", ")", "return", "self", ".", "_configs" ]
Returns a map of run paths to `ProjectorConfig` protos.
[ "Returns", "a", "map", "of", "run", "paths", "to", "ProjectorConfig", "protos", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/projector/projector_plugin.py#L311-L325
train
tensorflow/tensorboard
tensorboard/backend/event_processing/event_multiplexer.py
EventMultiplexer.Reload
def Reload(self): """Call `Reload` on every `EventAccumulator`.""" logger.info('Beginning EventMultiplexer.Reload()') self._reload_called = True # Build a list so we're safe even if the list of accumulators is modified # even while we're reloading. with self._accumulators_mutex: items = list(self._accumulators.items()) names_to_delete = set() for name, accumulator in items: try: accumulator.Reload() except (OSError, IOError) as e: logger.error("Unable to reload accumulator '%s': %s", name, e) except directory_watcher.DirectoryDeletedError: names_to_delete.add(name) with self._accumulators_mutex: for name in names_to_delete: logger.warn("Deleting accumulator '%s'", name) del self._accumulators[name] logger.info('Finished with EventMultiplexer.Reload()') return self
python
def Reload(self): """Call `Reload` on every `EventAccumulator`.""" logger.info('Beginning EventMultiplexer.Reload()') self._reload_called = True # Build a list so we're safe even if the list of accumulators is modified # even while we're reloading. with self._accumulators_mutex: items = list(self._accumulators.items()) names_to_delete = set() for name, accumulator in items: try: accumulator.Reload() except (OSError, IOError) as e: logger.error("Unable to reload accumulator '%s': %s", name, e) except directory_watcher.DirectoryDeletedError: names_to_delete.add(name) with self._accumulators_mutex: for name in names_to_delete: logger.warn("Deleting accumulator '%s'", name) del self._accumulators[name] logger.info('Finished with EventMultiplexer.Reload()') return self
[ "def", "Reload", "(", "self", ")", ":", "logger", ".", "info", "(", "'Beginning EventMultiplexer.Reload()'", ")", "self", ".", "_reload_called", "=", "True", "# Build a list so we're safe even if the list of accumulators is modified", "# even while we're reloading.", "with", "self", ".", "_accumulators_mutex", ":", "items", "=", "list", "(", "self", ".", "_accumulators", ".", "items", "(", ")", ")", "names_to_delete", "=", "set", "(", ")", "for", "name", ",", "accumulator", "in", "items", ":", "try", ":", "accumulator", ".", "Reload", "(", ")", "except", "(", "OSError", ",", "IOError", ")", "as", "e", ":", "logger", ".", "error", "(", "\"Unable to reload accumulator '%s': %s\"", ",", "name", ",", "e", ")", "except", "directory_watcher", ".", "DirectoryDeletedError", ":", "names_to_delete", ".", "add", "(", "name", ")", "with", "self", ".", "_accumulators_mutex", ":", "for", "name", "in", "names_to_delete", ":", "logger", ".", "warn", "(", "\"Deleting accumulator '%s'\"", ",", "name", ")", "del", "self", ".", "_accumulators", "[", "name", "]", "logger", ".", "info", "(", "'Finished with EventMultiplexer.Reload()'", ")", "return", "self" ]
Call `Reload` on every `EventAccumulator`.
[ "Call", "Reload", "on", "every", "EventAccumulator", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/event_multiplexer.py#L179-L202
train
tensorflow/tensorboard
tensorboard/backend/event_processing/event_multiplexer.py
EventMultiplexer.Histograms
def Histograms(self, run, tag): """Retrieve the histogram events associated with a run and tag. Args: run: A string name of the run for which values are retrieved. tag: A string name of the tag for which values are retrieved. Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: An array of `event_accumulator.HistogramEvents`. """ accumulator = self.GetAccumulator(run) return accumulator.Histograms(tag)
python
def Histograms(self, run, tag): """Retrieve the histogram events associated with a run and tag. Args: run: A string name of the run for which values are retrieved. tag: A string name of the tag for which values are retrieved. Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: An array of `event_accumulator.HistogramEvents`. """ accumulator = self.GetAccumulator(run) return accumulator.Histograms(tag)
[ "def", "Histograms", "(", "self", ",", "run", ",", "tag", ")", ":", "accumulator", "=", "self", ".", "GetAccumulator", "(", "run", ")", "return", "accumulator", ".", "Histograms", "(", "tag", ")" ]
Retrieve the histogram events associated with a run and tag. Args: run: A string name of the run for which values are retrieved. tag: A string name of the tag for which values are retrieved. Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: An array of `event_accumulator.HistogramEvents`.
[ "Retrieve", "the", "histogram", "events", "associated", "with", "a", "run", "and", "tag", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/event_multiplexer.py#L323-L338
train
tensorflow/tensorboard
tensorboard/backend/event_processing/event_multiplexer.py
EventMultiplexer.CompressedHistograms
def CompressedHistograms(self, run, tag): """Retrieve the compressed histogram events associated with a run and tag. Args: run: A string name of the run for which values are retrieved. tag: A string name of the tag for which values are retrieved. Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: An array of `event_accumulator.CompressedHistogramEvents`. """ accumulator = self.GetAccumulator(run) return accumulator.CompressedHistograms(tag)
python
def CompressedHistograms(self, run, tag): """Retrieve the compressed histogram events associated with a run and tag. Args: run: A string name of the run for which values are retrieved. tag: A string name of the tag for which values are retrieved. Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: An array of `event_accumulator.CompressedHistogramEvents`. """ accumulator = self.GetAccumulator(run) return accumulator.CompressedHistograms(tag)
[ "def", "CompressedHistograms", "(", "self", ",", "run", ",", "tag", ")", ":", "accumulator", "=", "self", ".", "GetAccumulator", "(", "run", ")", "return", "accumulator", ".", "CompressedHistograms", "(", "tag", ")" ]
Retrieve the compressed histogram events associated with a run and tag. Args: run: A string name of the run for which values are retrieved. tag: A string name of the tag for which values are retrieved. Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: An array of `event_accumulator.CompressedHistogramEvents`.
[ "Retrieve", "the", "compressed", "histogram", "events", "associated", "with", "a", "run", "and", "tag", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/event_multiplexer.py#L340-L355
train
tensorflow/tensorboard
tensorboard/backend/event_processing/event_multiplexer.py
EventMultiplexer.Images
def Images(self, run, tag): """Retrieve the image events associated with a run and tag. Args: run: A string name of the run for which values are retrieved. tag: A string name of the tag for which values are retrieved. Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: An array of `event_accumulator.ImageEvents`. """ accumulator = self.GetAccumulator(run) return accumulator.Images(tag)
python
def Images(self, run, tag): """Retrieve the image events associated with a run and tag. Args: run: A string name of the run for which values are retrieved. tag: A string name of the tag for which values are retrieved. Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: An array of `event_accumulator.ImageEvents`. """ accumulator = self.GetAccumulator(run) return accumulator.Images(tag)
[ "def", "Images", "(", "self", ",", "run", ",", "tag", ")", ":", "accumulator", "=", "self", ".", "GetAccumulator", "(", "run", ")", "return", "accumulator", ".", "Images", "(", "tag", ")" ]
Retrieve the image events associated with a run and tag. Args: run: A string name of the run for which values are retrieved. tag: A string name of the tag for which values are retrieved. Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: An array of `event_accumulator.ImageEvents`.
[ "Retrieve", "the", "image", "events", "associated", "with", "a", "run", "and", "tag", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/event_multiplexer.py#L357-L372
train
tensorflow/tensorboard
tensorboard/plugins/histogram/summary_v2.py
histogram
def histogram(name, data, step=None, buckets=None, description=None): """Write a histogram summary. Arguments: name: A name for this summary. The summary tag used for TensorBoard will be this name prefixed by any active name scopes. data: A `Tensor` of any shape. Must be castable to `float64`. step: Explicit `int64`-castable monotonic step value for this summary. If omitted, this defaults to `tf.summary.experimental.get_step()`, which must not be None. buckets: Optional positive `int`. The output will have this many buckets, except in two edge cases. If there is no data, then there are no buckets. If there is data but all points have the same value, then there is one bucket whose left and right endpoints are the same. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. Returns: True on success, or false if no summary was emitted because no default summary writer was available. Raises: ValueError: if a default writer exists, but no step was provided and `tf.summary.experimental.get_step()` is None. """ summary_metadata = metadata.create_summary_metadata( display_name=None, description=description) # TODO(https://github.com/tensorflow/tensorboard/issues/2109): remove fallback summary_scope = ( getattr(tf.summary.experimental, 'summary_scope', None) or tf.summary.summary_scope) with summary_scope( name, 'histogram_summary', values=[data, buckets, step]) as (tag, _): tensor = _buckets(data, bucket_count=buckets) return tf.summary.write( tag=tag, tensor=tensor, step=step, metadata=summary_metadata)
python
def histogram(name, data, step=None, buckets=None, description=None): """Write a histogram summary. Arguments: name: A name for this summary. The summary tag used for TensorBoard will be this name prefixed by any active name scopes. data: A `Tensor` of any shape. Must be castable to `float64`. step: Explicit `int64`-castable monotonic step value for this summary. If omitted, this defaults to `tf.summary.experimental.get_step()`, which must not be None. buckets: Optional positive `int`. The output will have this many buckets, except in two edge cases. If there is no data, then there are no buckets. If there is data but all points have the same value, then there is one bucket whose left and right endpoints are the same. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. Returns: True on success, or false if no summary was emitted because no default summary writer was available. Raises: ValueError: if a default writer exists, but no step was provided and `tf.summary.experimental.get_step()` is None. """ summary_metadata = metadata.create_summary_metadata( display_name=None, description=description) # TODO(https://github.com/tensorflow/tensorboard/issues/2109): remove fallback summary_scope = ( getattr(tf.summary.experimental, 'summary_scope', None) or tf.summary.summary_scope) with summary_scope( name, 'histogram_summary', values=[data, buckets, step]) as (tag, _): tensor = _buckets(data, bucket_count=buckets) return tf.summary.write( tag=tag, tensor=tensor, step=step, metadata=summary_metadata)
[ "def", "histogram", "(", "name", ",", "data", ",", "step", "=", "None", ",", "buckets", "=", "None", ",", "description", "=", "None", ")", ":", "summary_metadata", "=", "metadata", ".", "create_summary_metadata", "(", "display_name", "=", "None", ",", "description", "=", "description", ")", "# TODO(https://github.com/tensorflow/tensorboard/issues/2109): remove fallback", "summary_scope", "=", "(", "getattr", "(", "tf", ".", "summary", ".", "experimental", ",", "'summary_scope'", ",", "None", ")", "or", "tf", ".", "summary", ".", "summary_scope", ")", "with", "summary_scope", "(", "name", ",", "'histogram_summary'", ",", "values", "=", "[", "data", ",", "buckets", ",", "step", "]", ")", "as", "(", "tag", ",", "_", ")", ":", "tensor", "=", "_buckets", "(", "data", ",", "bucket_count", "=", "buckets", ")", "return", "tf", ".", "summary", ".", "write", "(", "tag", "=", "tag", ",", "tensor", "=", "tensor", ",", "step", "=", "step", ",", "metadata", "=", "summary_metadata", ")" ]
Write a histogram summary. Arguments: name: A name for this summary. The summary tag used for TensorBoard will be this name prefixed by any active name scopes. data: A `Tensor` of any shape. Must be castable to `float64`. step: Explicit `int64`-castable monotonic step value for this summary. If omitted, this defaults to `tf.summary.experimental.get_step()`, which must not be None. buckets: Optional positive `int`. The output will have this many buckets, except in two edge cases. If there is no data, then there are no buckets. If there is data but all points have the same value, then there is one bucket whose left and right endpoints are the same. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. Returns: True on success, or false if no summary was emitted because no default summary writer was available. Raises: ValueError: if a default writer exists, but no step was provided and `tf.summary.experimental.get_step()` is None.
[ "Write", "a", "histogram", "summary", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/histogram/summary_v2.py#L43-L79
train
tensorflow/tensorboard
tensorboard/plugins/histogram/summary_v2.py
histogram_pb
def histogram_pb(tag, data, buckets=None, description=None): """Create a histogram summary protobuf. Arguments: tag: String tag for the summary. data: A `np.array` or array-like form of any shape. Must have type castable to `float`. buckets: Optional positive `int`. The output will have this many buckets, except in two edge cases. If there is no data, then there are no buckets. If there is data but all points have the same value, then there is one bucket whose left and right endpoints are the same. description: Optional long-form description for this summary, as a `str`. Markdown is supported. Defaults to empty. Returns: A `summary_pb2.Summary` protobuf object. """ bucket_count = DEFAULT_BUCKET_COUNT if buckets is None else buckets data = np.array(data).flatten().astype(float) if data.size == 0: buckets = np.array([]).reshape((0, 3)) else: min_ = np.min(data) max_ = np.max(data) range_ = max_ - min_ if range_ == 0: center = min_ buckets = np.array([[center - 0.5, center + 0.5, float(data.size)]]) else: bucket_width = range_ / bucket_count offsets = data - min_ bucket_indices = np.floor(offsets / bucket_width).astype(int) clamped_indices = np.minimum(bucket_indices, bucket_count - 1) one_hots = (np.array([clamped_indices]).transpose() == np.arange(0, bucket_count)) # broadcast assert one_hots.shape == (data.size, bucket_count), ( one_hots.shape, (data.size, bucket_count)) bucket_counts = np.sum(one_hots, axis=0) edges = np.linspace(min_, max_, bucket_count + 1) left_edges = edges[:-1] right_edges = edges[1:] buckets = np.array([left_edges, right_edges, bucket_counts]).transpose() tensor = tensor_util.make_tensor_proto(buckets, dtype=np.float64) summary_metadata = metadata.create_summary_metadata( display_name=None, description=description) summary = summary_pb2.Summary() summary.value.add(tag=tag, metadata=summary_metadata, tensor=tensor) return summary
python
def histogram_pb(tag, data, buckets=None, description=None): """Create a histogram summary protobuf. Arguments: tag: String tag for the summary. data: A `np.array` or array-like form of any shape. Must have type castable to `float`. buckets: Optional positive `int`. The output will have this many buckets, except in two edge cases. If there is no data, then there are no buckets. If there is data but all points have the same value, then there is one bucket whose left and right endpoints are the same. description: Optional long-form description for this summary, as a `str`. Markdown is supported. Defaults to empty. Returns: A `summary_pb2.Summary` protobuf object. """ bucket_count = DEFAULT_BUCKET_COUNT if buckets is None else buckets data = np.array(data).flatten().astype(float) if data.size == 0: buckets = np.array([]).reshape((0, 3)) else: min_ = np.min(data) max_ = np.max(data) range_ = max_ - min_ if range_ == 0: center = min_ buckets = np.array([[center - 0.5, center + 0.5, float(data.size)]]) else: bucket_width = range_ / bucket_count offsets = data - min_ bucket_indices = np.floor(offsets / bucket_width).astype(int) clamped_indices = np.minimum(bucket_indices, bucket_count - 1) one_hots = (np.array([clamped_indices]).transpose() == np.arange(0, bucket_count)) # broadcast assert one_hots.shape == (data.size, bucket_count), ( one_hots.shape, (data.size, bucket_count)) bucket_counts = np.sum(one_hots, axis=0) edges = np.linspace(min_, max_, bucket_count + 1) left_edges = edges[:-1] right_edges = edges[1:] buckets = np.array([left_edges, right_edges, bucket_counts]).transpose() tensor = tensor_util.make_tensor_proto(buckets, dtype=np.float64) summary_metadata = metadata.create_summary_metadata( display_name=None, description=description) summary = summary_pb2.Summary() summary.value.add(tag=tag, metadata=summary_metadata, tensor=tensor) return summary
[ "def", "histogram_pb", "(", "tag", ",", "data", ",", "buckets", "=", "None", ",", "description", "=", "None", ")", ":", "bucket_count", "=", "DEFAULT_BUCKET_COUNT", "if", "buckets", "is", "None", "else", "buckets", "data", "=", "np", ".", "array", "(", "data", ")", ".", "flatten", "(", ")", ".", "astype", "(", "float", ")", "if", "data", ".", "size", "==", "0", ":", "buckets", "=", "np", ".", "array", "(", "[", "]", ")", ".", "reshape", "(", "(", "0", ",", "3", ")", ")", "else", ":", "min_", "=", "np", ".", "min", "(", "data", ")", "max_", "=", "np", ".", "max", "(", "data", ")", "range_", "=", "max_", "-", "min_", "if", "range_", "==", "0", ":", "center", "=", "min_", "buckets", "=", "np", ".", "array", "(", "[", "[", "center", "-", "0.5", ",", "center", "+", "0.5", ",", "float", "(", "data", ".", "size", ")", "]", "]", ")", "else", ":", "bucket_width", "=", "range_", "/", "bucket_count", "offsets", "=", "data", "-", "min_", "bucket_indices", "=", "np", ".", "floor", "(", "offsets", "/", "bucket_width", ")", ".", "astype", "(", "int", ")", "clamped_indices", "=", "np", ".", "minimum", "(", "bucket_indices", ",", "bucket_count", "-", "1", ")", "one_hots", "=", "(", "np", ".", "array", "(", "[", "clamped_indices", "]", ")", ".", "transpose", "(", ")", "==", "np", ".", "arange", "(", "0", ",", "bucket_count", ")", ")", "# broadcast", "assert", "one_hots", ".", "shape", "==", "(", "data", ".", "size", ",", "bucket_count", ")", ",", "(", "one_hots", ".", "shape", ",", "(", "data", ".", "size", ",", "bucket_count", ")", ")", "bucket_counts", "=", "np", ".", "sum", "(", "one_hots", ",", "axis", "=", "0", ")", "edges", "=", "np", ".", "linspace", "(", "min_", ",", "max_", ",", "bucket_count", "+", "1", ")", "left_edges", "=", "edges", "[", ":", "-", "1", "]", "right_edges", "=", "edges", "[", "1", ":", "]", "buckets", "=", "np", ".", "array", "(", "[", "left_edges", ",", "right_edges", ",", "bucket_counts", "]", ")", ".", "transpose", "(", ")", "tensor", "=", "tensor_util", ".", "make_tensor_proto", "(", "buckets", ",", "dtype", "=", "np", ".", "float64", ")", "summary_metadata", "=", "metadata", ".", "create_summary_metadata", "(", "display_name", "=", "None", ",", "description", "=", "description", ")", "summary", "=", "summary_pb2", ".", "Summary", "(", ")", "summary", ".", "value", ".", "add", "(", "tag", "=", "tag", ",", "metadata", "=", "summary_metadata", ",", "tensor", "=", "tensor", ")", "return", "summary" ]
Create a histogram summary protobuf. Arguments: tag: String tag for the summary. data: A `np.array` or array-like form of any shape. Must have type castable to `float`. buckets: Optional positive `int`. The output will have this many buckets, except in two edge cases. If there is no data, then there are no buckets. If there is data but all points have the same value, then there is one bucket whose left and right endpoints are the same. description: Optional long-form description for this summary, as a `str`. Markdown is supported. Defaults to empty. Returns: A `summary_pb2.Summary` protobuf object.
[ "Create", "a", "histogram", "summary", "protobuf", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/histogram/summary_v2.py#L142-L193
train
tensorflow/tensorboard
tensorboard/program.py
setup_environment
def setup_environment(): """Makes recommended modifications to the environment. This functions changes global state in the Python process. Calling this function is a good idea, but it can't appropriately be called from library routines. """ absl.logging.set_verbosity(absl.logging.WARNING) # The default is HTTP/1.0 for some strange reason. If we don't use # HTTP/1.1 then a new TCP socket and Python thread is created for # each HTTP request. The tradeoff is we must always specify the # Content-Length header, or do chunked encoding for streaming. serving.WSGIRequestHandler.protocol_version = 'HTTP/1.1'
python
def setup_environment(): """Makes recommended modifications to the environment. This functions changes global state in the Python process. Calling this function is a good idea, but it can't appropriately be called from library routines. """ absl.logging.set_verbosity(absl.logging.WARNING) # The default is HTTP/1.0 for some strange reason. If we don't use # HTTP/1.1 then a new TCP socket and Python thread is created for # each HTTP request. The tradeoff is we must always specify the # Content-Length header, or do chunked encoding for streaming. serving.WSGIRequestHandler.protocol_version = 'HTTP/1.1'
[ "def", "setup_environment", "(", ")", ":", "absl", ".", "logging", ".", "set_verbosity", "(", "absl", ".", "logging", ".", "WARNING", ")", "# The default is HTTP/1.0 for some strange reason. If we don't use", "# HTTP/1.1 then a new TCP socket and Python thread is created for", "# each HTTP request. The tradeoff is we must always specify the", "# Content-Length header, or do chunked encoding for streaming.", "serving", ".", "WSGIRequestHandler", ".", "protocol_version", "=", "'HTTP/1.1'" ]
Makes recommended modifications to the environment. This functions changes global state in the Python process. Calling this function is a good idea, but it can't appropriately be called from library routines.
[ "Makes", "recommended", "modifications", "to", "the", "environment", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/program.py#L71-L84
train
tensorflow/tensorboard
tensorboard/program.py
get_default_assets_zip_provider
def get_default_assets_zip_provider(): """Opens stock TensorBoard web assets collection. Returns: Returns function that returns a newly opened file handle to zip file containing static assets for stock TensorBoard, or None if webfiles.zip could not be found. The value the callback returns must be closed. The paths inside the zip file are considered absolute paths on the web server. """ path = os.path.join(os.path.dirname(inspect.getfile(sys._getframe(1))), 'webfiles.zip') if not os.path.exists(path): logger.warning('webfiles.zip static assets not found: %s', path) return None return lambda: open(path, 'rb')
python
def get_default_assets_zip_provider(): """Opens stock TensorBoard web assets collection. Returns: Returns function that returns a newly opened file handle to zip file containing static assets for stock TensorBoard, or None if webfiles.zip could not be found. The value the callback returns must be closed. The paths inside the zip file are considered absolute paths on the web server. """ path = os.path.join(os.path.dirname(inspect.getfile(sys._getframe(1))), 'webfiles.zip') if not os.path.exists(path): logger.warning('webfiles.zip static assets not found: %s', path) return None return lambda: open(path, 'rb')
[ "def", "get_default_assets_zip_provider", "(", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "inspect", ".", "getfile", "(", "sys", ".", "_getframe", "(", "1", ")", ")", ")", ",", "'webfiles.zip'", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "logger", ".", "warning", "(", "'webfiles.zip static assets not found: %s'", ",", "path", ")", "return", "None", "return", "lambda", ":", "open", "(", "path", ",", "'rb'", ")" ]
Opens stock TensorBoard web assets collection. Returns: Returns function that returns a newly opened file handle to zip file containing static assets for stock TensorBoard, or None if webfiles.zip could not be found. The value the callback returns must be closed. The paths inside the zip file are considered absolute paths on the web server.
[ "Opens", "stock", "TensorBoard", "web", "assets", "collection", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/program.py#L86-L100
train
tensorflow/tensorboard
tensorboard/program.py
with_port_scanning
def with_port_scanning(cls): """Create a server factory that performs port scanning. This function returns a callable whose signature matches the specification of `TensorBoardServer.__init__`, using `cls` as an underlying implementation. It passes through `flags` unchanged except in the case that `flags.port is None`, in which case it repeatedly instantiates the underlying server with new port suggestions. Args: cls: A valid implementation of `TensorBoardServer`. This class's initializer should raise a `TensorBoardPortInUseError` upon failing to bind to a port when it is expected that binding to another nearby port might succeed. The initializer for `cls` will only ever be invoked with `flags` such that `flags.port is not None`. Returns: A function that implements the `__init__` contract of `TensorBoardServer`. """ def init(wsgi_app, flags): # base_port: what's the first port to which we should try to bind? # should_scan: if that fails, shall we try additional ports? # max_attempts: how many ports shall we try? should_scan = flags.port is None base_port = core_plugin.DEFAULT_PORT if flags.port is None else flags.port max_attempts = 10 if should_scan else 1 if base_port > 0xFFFF: raise TensorBoardServerException( 'TensorBoard cannot bind to port %d > %d' % (base_port, 0xFFFF) ) max_attempts = 10 if should_scan else 1 base_port = min(base_port + max_attempts, 0x10000) - max_attempts for port in xrange(base_port, base_port + max_attempts): subflags = argparse.Namespace(**vars(flags)) subflags.port = port try: return cls(wsgi_app=wsgi_app, flags=subflags) except TensorBoardPortInUseError: if not should_scan: raise # All attempts failed to bind. raise TensorBoardServerException( 'TensorBoard could not bind to any port around %s ' '(tried %d times)' % (base_port, max_attempts)) return init
python
def with_port_scanning(cls): """Create a server factory that performs port scanning. This function returns a callable whose signature matches the specification of `TensorBoardServer.__init__`, using `cls` as an underlying implementation. It passes through `flags` unchanged except in the case that `flags.port is None`, in which case it repeatedly instantiates the underlying server with new port suggestions. Args: cls: A valid implementation of `TensorBoardServer`. This class's initializer should raise a `TensorBoardPortInUseError` upon failing to bind to a port when it is expected that binding to another nearby port might succeed. The initializer for `cls` will only ever be invoked with `flags` such that `flags.port is not None`. Returns: A function that implements the `__init__` contract of `TensorBoardServer`. """ def init(wsgi_app, flags): # base_port: what's the first port to which we should try to bind? # should_scan: if that fails, shall we try additional ports? # max_attempts: how many ports shall we try? should_scan = flags.port is None base_port = core_plugin.DEFAULT_PORT if flags.port is None else flags.port max_attempts = 10 if should_scan else 1 if base_port > 0xFFFF: raise TensorBoardServerException( 'TensorBoard cannot bind to port %d > %d' % (base_port, 0xFFFF) ) max_attempts = 10 if should_scan else 1 base_port = min(base_port + max_attempts, 0x10000) - max_attempts for port in xrange(base_port, base_port + max_attempts): subflags = argparse.Namespace(**vars(flags)) subflags.port = port try: return cls(wsgi_app=wsgi_app, flags=subflags) except TensorBoardPortInUseError: if not should_scan: raise # All attempts failed to bind. raise TensorBoardServerException( 'TensorBoard could not bind to any port around %s ' '(tried %d times)' % (base_port, max_attempts)) return init
[ "def", "with_port_scanning", "(", "cls", ")", ":", "def", "init", "(", "wsgi_app", ",", "flags", ")", ":", "# base_port: what's the first port to which we should try to bind?", "# should_scan: if that fails, shall we try additional ports?", "# max_attempts: how many ports shall we try?", "should_scan", "=", "flags", ".", "port", "is", "None", "base_port", "=", "core_plugin", ".", "DEFAULT_PORT", "if", "flags", ".", "port", "is", "None", "else", "flags", ".", "port", "max_attempts", "=", "10", "if", "should_scan", "else", "1", "if", "base_port", ">", "0xFFFF", ":", "raise", "TensorBoardServerException", "(", "'TensorBoard cannot bind to port %d > %d'", "%", "(", "base_port", ",", "0xFFFF", ")", ")", "max_attempts", "=", "10", "if", "should_scan", "else", "1", "base_port", "=", "min", "(", "base_port", "+", "max_attempts", ",", "0x10000", ")", "-", "max_attempts", "for", "port", "in", "xrange", "(", "base_port", ",", "base_port", "+", "max_attempts", ")", ":", "subflags", "=", "argparse", ".", "Namespace", "(", "*", "*", "vars", "(", "flags", ")", ")", "subflags", ".", "port", "=", "port", "try", ":", "return", "cls", "(", "wsgi_app", "=", "wsgi_app", ",", "flags", "=", "subflags", ")", "except", "TensorBoardPortInUseError", ":", "if", "not", "should_scan", ":", "raise", "# All attempts failed to bind.", "raise", "TensorBoardServerException", "(", "'TensorBoard could not bind to any port around %s '", "'(tried %d times)'", "%", "(", "base_port", ",", "max_attempts", ")", ")", "return", "init" ]
Create a server factory that performs port scanning. This function returns a callable whose signature matches the specification of `TensorBoardServer.__init__`, using `cls` as an underlying implementation. It passes through `flags` unchanged except in the case that `flags.port is None`, in which case it repeatedly instantiates the underlying server with new port suggestions. Args: cls: A valid implementation of `TensorBoardServer`. This class's initializer should raise a `TensorBoardPortInUseError` upon failing to bind to a port when it is expected that binding to another nearby port might succeed. The initializer for `cls` will only ever be invoked with `flags` such that `flags.port is not None`. Returns: A function that implements the `__init__` contract of `TensorBoardServer`.
[ "Create", "a", "server", "factory", "that", "performs", "port", "scanning", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/program.py#L358-L410
train
tensorflow/tensorboard
tensorboard/program.py
TensorBoard.configure
def configure(self, argv=('',), **kwargs): """Configures TensorBoard behavior via flags. This method will populate the "flags" property with an argparse.Namespace representing flag values parsed from the provided argv list, overridden by explicit flags from remaining keyword arguments. Args: argv: Can be set to CLI args equivalent to sys.argv; the first arg is taken to be the name of the path being executed. kwargs: Additional arguments will override what was parsed from argv. They must be passed as Python data structures, e.g. `foo=1` rather than `foo="1"`. Returns: Either argv[:1] if argv was non-empty, or [''] otherwise, as a mechanism for absl.app.run() compatibility. Raises: ValueError: If flag values are invalid. """ parser = argparse_flags.ArgumentParser( prog='tensorboard', description=('TensorBoard is a suite of web applications for ' 'inspecting and understanding your TensorFlow runs ' 'and graphs. https://github.com/tensorflow/tensorboard ')) for loader in self.plugin_loaders: loader.define_flags(parser) arg0 = argv[0] if argv else '' flags = parser.parse_args(argv[1:]) # Strip binary name from argv. self.cache_key = manager.cache_key( working_directory=os.getcwd(), arguments=argv[1:], configure_kwargs=kwargs, ) if absl_flags and arg0: # Only expose main module Abseil flags as TensorBoard native flags. # This is the same logic Abseil's ArgumentParser uses for determining # which Abseil flags to include in the short helpstring. for flag in set(absl_flags.FLAGS.get_key_flags_for_module(arg0)): if hasattr(flags, flag.name): raise ValueError('Conflicting Abseil flag: %s' % flag.name) setattr(flags, flag.name, flag.value) for k, v in kwargs.items(): if not hasattr(flags, k): raise ValueError('Unknown TensorBoard flag: %s' % k) setattr(flags, k, v) for loader in self.plugin_loaders: loader.fix_flags(flags) self.flags = flags return [arg0]
python
def configure(self, argv=('',), **kwargs): """Configures TensorBoard behavior via flags. This method will populate the "flags" property with an argparse.Namespace representing flag values parsed from the provided argv list, overridden by explicit flags from remaining keyword arguments. Args: argv: Can be set to CLI args equivalent to sys.argv; the first arg is taken to be the name of the path being executed. kwargs: Additional arguments will override what was parsed from argv. They must be passed as Python data structures, e.g. `foo=1` rather than `foo="1"`. Returns: Either argv[:1] if argv was non-empty, or [''] otherwise, as a mechanism for absl.app.run() compatibility. Raises: ValueError: If flag values are invalid. """ parser = argparse_flags.ArgumentParser( prog='tensorboard', description=('TensorBoard is a suite of web applications for ' 'inspecting and understanding your TensorFlow runs ' 'and graphs. https://github.com/tensorflow/tensorboard ')) for loader in self.plugin_loaders: loader.define_flags(parser) arg0 = argv[0] if argv else '' flags = parser.parse_args(argv[1:]) # Strip binary name from argv. self.cache_key = manager.cache_key( working_directory=os.getcwd(), arguments=argv[1:], configure_kwargs=kwargs, ) if absl_flags and arg0: # Only expose main module Abseil flags as TensorBoard native flags. # This is the same logic Abseil's ArgumentParser uses for determining # which Abseil flags to include in the short helpstring. for flag in set(absl_flags.FLAGS.get_key_flags_for_module(arg0)): if hasattr(flags, flag.name): raise ValueError('Conflicting Abseil flag: %s' % flag.name) setattr(flags, flag.name, flag.value) for k, v in kwargs.items(): if not hasattr(flags, k): raise ValueError('Unknown TensorBoard flag: %s' % k) setattr(flags, k, v) for loader in self.plugin_loaders: loader.fix_flags(flags) self.flags = flags return [arg0]
[ "def", "configure", "(", "self", ",", "argv", "=", "(", "''", ",", ")", ",", "*", "*", "kwargs", ")", ":", "parser", "=", "argparse_flags", ".", "ArgumentParser", "(", "prog", "=", "'tensorboard'", ",", "description", "=", "(", "'TensorBoard is a suite of web applications for '", "'inspecting and understanding your TensorFlow runs '", "'and graphs. https://github.com/tensorflow/tensorboard '", ")", ")", "for", "loader", "in", "self", ".", "plugin_loaders", ":", "loader", ".", "define_flags", "(", "parser", ")", "arg0", "=", "argv", "[", "0", "]", "if", "argv", "else", "''", "flags", "=", "parser", ".", "parse_args", "(", "argv", "[", "1", ":", "]", ")", "# Strip binary name from argv.", "self", ".", "cache_key", "=", "manager", ".", "cache_key", "(", "working_directory", "=", "os", ".", "getcwd", "(", ")", ",", "arguments", "=", "argv", "[", "1", ":", "]", ",", "configure_kwargs", "=", "kwargs", ",", ")", "if", "absl_flags", "and", "arg0", ":", "# Only expose main module Abseil flags as TensorBoard native flags.", "# This is the same logic Abseil's ArgumentParser uses for determining", "# which Abseil flags to include in the short helpstring.", "for", "flag", "in", "set", "(", "absl_flags", ".", "FLAGS", ".", "get_key_flags_for_module", "(", "arg0", ")", ")", ":", "if", "hasattr", "(", "flags", ",", "flag", ".", "name", ")", ":", "raise", "ValueError", "(", "'Conflicting Abseil flag: %s'", "%", "flag", ".", "name", ")", "setattr", "(", "flags", ",", "flag", ".", "name", ",", "flag", ".", "value", ")", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "if", "not", "hasattr", "(", "flags", ",", "k", ")", ":", "raise", "ValueError", "(", "'Unknown TensorBoard flag: %s'", "%", "k", ")", "setattr", "(", "flags", ",", "k", ",", "v", ")", "for", "loader", "in", "self", ".", "plugin_loaders", ":", "loader", ".", "fix_flags", "(", "flags", ")", "self", ".", "flags", "=", "flags", "return", "[", "arg0", "]" ]
Configures TensorBoard behavior via flags. This method will populate the "flags" property with an argparse.Namespace representing flag values parsed from the provided argv list, overridden by explicit flags from remaining keyword arguments. Args: argv: Can be set to CLI args equivalent to sys.argv; the first arg is taken to be the name of the path being executed. kwargs: Additional arguments will override what was parsed from argv. They must be passed as Python data structures, e.g. `foo=1` rather than `foo="1"`. Returns: Either argv[:1] if argv was non-empty, or [''] otherwise, as a mechanism for absl.app.run() compatibility. Raises: ValueError: If flag values are invalid.
[ "Configures", "TensorBoard", "behavior", "via", "flags", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/program.py#L149-L199
train
tensorflow/tensorboard
tensorboard/program.py
TensorBoard.main
def main(self, ignored_argv=('',)): """Blocking main function for TensorBoard. This method is called by `tensorboard.main.run_main`, which is the standard entrypoint for the tensorboard command line program. The configure() method must be called first. Args: ignored_argv: Do not pass. Required for Abseil compatibility. Returns: Process exit code, i.e. 0 if successful or non-zero on failure. In practice, an exception will most likely be raised instead of returning non-zero. :rtype: int """ self._install_signal_handler(signal.SIGTERM, "SIGTERM") if self.flags.inspect: logger.info('Not bringing up TensorBoard, but inspecting event files.') event_file = os.path.expanduser(self.flags.event_file) efi.inspect(self.flags.logdir, event_file, self.flags.tag) return 0 if self.flags.version_tb: print(version.VERSION) return 0 try: server = self._make_server() sys.stderr.write('TensorBoard %s at %s (Press CTRL+C to quit)\n' % (version.VERSION, server.get_url())) sys.stderr.flush() self._register_info(server) server.serve_forever() return 0 except TensorBoardServerException as e: logger.error(e.msg) sys.stderr.write('ERROR: %s\n' % e.msg) sys.stderr.flush() return -1
python
def main(self, ignored_argv=('',)): """Blocking main function for TensorBoard. This method is called by `tensorboard.main.run_main`, which is the standard entrypoint for the tensorboard command line program. The configure() method must be called first. Args: ignored_argv: Do not pass. Required for Abseil compatibility. Returns: Process exit code, i.e. 0 if successful or non-zero on failure. In practice, an exception will most likely be raised instead of returning non-zero. :rtype: int """ self._install_signal_handler(signal.SIGTERM, "SIGTERM") if self.flags.inspect: logger.info('Not bringing up TensorBoard, but inspecting event files.') event_file = os.path.expanduser(self.flags.event_file) efi.inspect(self.flags.logdir, event_file, self.flags.tag) return 0 if self.flags.version_tb: print(version.VERSION) return 0 try: server = self._make_server() sys.stderr.write('TensorBoard %s at %s (Press CTRL+C to quit)\n' % (version.VERSION, server.get_url())) sys.stderr.flush() self._register_info(server) server.serve_forever() return 0 except TensorBoardServerException as e: logger.error(e.msg) sys.stderr.write('ERROR: %s\n' % e.msg) sys.stderr.flush() return -1
[ "def", "main", "(", "self", ",", "ignored_argv", "=", "(", "''", ",", ")", ")", ":", "self", ".", "_install_signal_handler", "(", "signal", ".", "SIGTERM", ",", "\"SIGTERM\"", ")", "if", "self", ".", "flags", ".", "inspect", ":", "logger", ".", "info", "(", "'Not bringing up TensorBoard, but inspecting event files.'", ")", "event_file", "=", "os", ".", "path", ".", "expanduser", "(", "self", ".", "flags", ".", "event_file", ")", "efi", ".", "inspect", "(", "self", ".", "flags", ".", "logdir", ",", "event_file", ",", "self", ".", "flags", ".", "tag", ")", "return", "0", "if", "self", ".", "flags", ".", "version_tb", ":", "print", "(", "version", ".", "VERSION", ")", "return", "0", "try", ":", "server", "=", "self", ".", "_make_server", "(", ")", "sys", ".", "stderr", ".", "write", "(", "'TensorBoard %s at %s (Press CTRL+C to quit)\\n'", "%", "(", "version", ".", "VERSION", ",", "server", ".", "get_url", "(", ")", ")", ")", "sys", ".", "stderr", ".", "flush", "(", ")", "self", ".", "_register_info", "(", "server", ")", "server", ".", "serve_forever", "(", ")", "return", "0", "except", "TensorBoardServerException", "as", "e", ":", "logger", ".", "error", "(", "e", ".", "msg", ")", "sys", ".", "stderr", ".", "write", "(", "'ERROR: %s\\n'", "%", "e", ".", "msg", ")", "sys", ".", "stderr", ".", "flush", "(", ")", "return", "-", "1" ]
Blocking main function for TensorBoard. This method is called by `tensorboard.main.run_main`, which is the standard entrypoint for the tensorboard command line program. The configure() method must be called first. Args: ignored_argv: Do not pass. Required for Abseil compatibility. Returns: Process exit code, i.e. 0 if successful or non-zero on failure. In practice, an exception will most likely be raised instead of returning non-zero. :rtype: int
[ "Blocking", "main", "function", "for", "TensorBoard", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/program.py#L201-L239
train
tensorflow/tensorboard
tensorboard/program.py
TensorBoard.launch
def launch(self): """Python API for launching TensorBoard. This method is the same as main() except it launches TensorBoard in a separate permanent thread. The configure() method must be called first. Returns: The URL of the TensorBoard web server. :rtype: str """ # Make it easy to run TensorBoard inside other programs, e.g. Colab. server = self._make_server() thread = threading.Thread(target=server.serve_forever, name='TensorBoard') thread.daemon = True thread.start() return server.get_url()
python
def launch(self): """Python API for launching TensorBoard. This method is the same as main() except it launches TensorBoard in a separate permanent thread. The configure() method must be called first. Returns: The URL of the TensorBoard web server. :rtype: str """ # Make it easy to run TensorBoard inside other programs, e.g. Colab. server = self._make_server() thread = threading.Thread(target=server.serve_forever, name='TensorBoard') thread.daemon = True thread.start() return server.get_url()
[ "def", "launch", "(", "self", ")", ":", "# Make it easy to run TensorBoard inside other programs, e.g. Colab.", "server", "=", "self", ".", "_make_server", "(", ")", "thread", "=", "threading", ".", "Thread", "(", "target", "=", "server", ".", "serve_forever", ",", "name", "=", "'TensorBoard'", ")", "thread", ".", "daemon", "=", "True", "thread", ".", "start", "(", ")", "return", "server", ".", "get_url", "(", ")" ]
Python API for launching TensorBoard. This method is the same as main() except it launches TensorBoard in a separate permanent thread. The configure() method must be called first. Returns: The URL of the TensorBoard web server. :rtype: str
[ "Python", "API", "for", "launching", "TensorBoard", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/program.py#L241-L258
train
tensorflow/tensorboard
tensorboard/program.py
TensorBoard._register_info
def _register_info(self, server): """Write a TensorBoardInfo file and arrange for its cleanup. Args: server: The result of `self._make_server()`. """ server_url = urllib.parse.urlparse(server.get_url()) info = manager.TensorBoardInfo( version=version.VERSION, start_time=int(time.time()), port=server_url.port, pid=os.getpid(), path_prefix=self.flags.path_prefix, logdir=self.flags.logdir, db=self.flags.db, cache_key=self.cache_key, ) atexit.register(manager.remove_info_file) manager.write_info_file(info)
python
def _register_info(self, server): """Write a TensorBoardInfo file and arrange for its cleanup. Args: server: The result of `self._make_server()`. """ server_url = urllib.parse.urlparse(server.get_url()) info = manager.TensorBoardInfo( version=version.VERSION, start_time=int(time.time()), port=server_url.port, pid=os.getpid(), path_prefix=self.flags.path_prefix, logdir=self.flags.logdir, db=self.flags.db, cache_key=self.cache_key, ) atexit.register(manager.remove_info_file) manager.write_info_file(info)
[ "def", "_register_info", "(", "self", ",", "server", ")", ":", "server_url", "=", "urllib", ".", "parse", ".", "urlparse", "(", "server", ".", "get_url", "(", ")", ")", "info", "=", "manager", ".", "TensorBoardInfo", "(", "version", "=", "version", ".", "VERSION", ",", "start_time", "=", "int", "(", "time", ".", "time", "(", ")", ")", ",", "port", "=", "server_url", ".", "port", ",", "pid", "=", "os", ".", "getpid", "(", ")", ",", "path_prefix", "=", "self", ".", "flags", ".", "path_prefix", ",", "logdir", "=", "self", ".", "flags", ".", "logdir", ",", "db", "=", "self", ".", "flags", ".", "db", ",", "cache_key", "=", "self", ".", "cache_key", ",", ")", "atexit", ".", "register", "(", "manager", ".", "remove_info_file", ")", "manager", ".", "write_info_file", "(", "info", ")" ]
Write a TensorBoardInfo file and arrange for its cleanup. Args: server: The result of `self._make_server()`.
[ "Write", "a", "TensorBoardInfo", "file", "and", "arrange", "for", "its", "cleanup", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/program.py#L260-L278
train
tensorflow/tensorboard
tensorboard/program.py
TensorBoard._install_signal_handler
def _install_signal_handler(self, signal_number, signal_name): """Set a signal handler to gracefully exit on the given signal. When this process receives the given signal, it will run `atexit` handlers and then exit with `0`. Args: signal_number: The numeric code for the signal to handle, like `signal.SIGTERM`. signal_name: The human-readable signal name. """ old_signal_handler = None # set below def handler(handled_signal_number, frame): # In case we catch this signal again while running atexit # handlers, take the hint and actually die. signal.signal(signal_number, signal.SIG_DFL) sys.stderr.write("TensorBoard caught %s; exiting...\n" % signal_name) # The main thread is the only non-daemon thread, so it suffices to # exit hence. if old_signal_handler not in (signal.SIG_IGN, signal.SIG_DFL): old_signal_handler(handled_signal_number, frame) sys.exit(0) old_signal_handler = signal.signal(signal_number, handler)
python
def _install_signal_handler(self, signal_number, signal_name): """Set a signal handler to gracefully exit on the given signal. When this process receives the given signal, it will run `atexit` handlers and then exit with `0`. Args: signal_number: The numeric code for the signal to handle, like `signal.SIGTERM`. signal_name: The human-readable signal name. """ old_signal_handler = None # set below def handler(handled_signal_number, frame): # In case we catch this signal again while running atexit # handlers, take the hint and actually die. signal.signal(signal_number, signal.SIG_DFL) sys.stderr.write("TensorBoard caught %s; exiting...\n" % signal_name) # The main thread is the only non-daemon thread, so it suffices to # exit hence. if old_signal_handler not in (signal.SIG_IGN, signal.SIG_DFL): old_signal_handler(handled_signal_number, frame) sys.exit(0) old_signal_handler = signal.signal(signal_number, handler)
[ "def", "_install_signal_handler", "(", "self", ",", "signal_number", ",", "signal_name", ")", ":", "old_signal_handler", "=", "None", "# set below", "def", "handler", "(", "handled_signal_number", ",", "frame", ")", ":", "# In case we catch this signal again while running atexit", "# handlers, take the hint and actually die.", "signal", ".", "signal", "(", "signal_number", ",", "signal", ".", "SIG_DFL", ")", "sys", ".", "stderr", ".", "write", "(", "\"TensorBoard caught %s; exiting...\\n\"", "%", "signal_name", ")", "# The main thread is the only non-daemon thread, so it suffices to", "# exit hence.", "if", "old_signal_handler", "not", "in", "(", "signal", ".", "SIG_IGN", ",", "signal", ".", "SIG_DFL", ")", ":", "old_signal_handler", "(", "handled_signal_number", ",", "frame", ")", "sys", ".", "exit", "(", "0", ")", "old_signal_handler", "=", "signal", ".", "signal", "(", "signal_number", ",", "handler", ")" ]
Set a signal handler to gracefully exit on the given signal. When this process receives the given signal, it will run `atexit` handlers and then exit with `0`. Args: signal_number: The numeric code for the signal to handle, like `signal.SIGTERM`. signal_name: The human-readable signal name.
[ "Set", "a", "signal", "handler", "to", "gracefully", "exit", "on", "the", "given", "signal", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/program.py#L280-L302
train
tensorflow/tensorboard
tensorboard/program.py
TensorBoard._make_server
def _make_server(self): """Constructs the TensorBoard WSGI app and instantiates the server.""" app = application.standard_tensorboard_wsgi(self.flags, self.plugin_loaders, self.assets_zip_provider) return self.server_class(app, self.flags)
python
def _make_server(self): """Constructs the TensorBoard WSGI app and instantiates the server.""" app = application.standard_tensorboard_wsgi(self.flags, self.plugin_loaders, self.assets_zip_provider) return self.server_class(app, self.flags)
[ "def", "_make_server", "(", "self", ")", ":", "app", "=", "application", ".", "standard_tensorboard_wsgi", "(", "self", ".", "flags", ",", "self", ".", "plugin_loaders", ",", "self", ".", "assets_zip_provider", ")", "return", "self", ".", "server_class", "(", "app", ",", "self", ".", "flags", ")" ]
Constructs the TensorBoard WSGI app and instantiates the server.
[ "Constructs", "the", "TensorBoard", "WSGI", "app", "and", "instantiates", "the", "server", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/program.py#L305-L310
train
tensorflow/tensorboard
tensorboard/program.py
WerkzeugServer._get_wildcard_address
def _get_wildcard_address(self, port): """Returns a wildcard address for the port in question. This will attempt to follow the best practice of calling getaddrinfo() with a null host and AI_PASSIVE to request a server-side socket wildcard address. If that succeeds, this returns the first IPv6 address found, or if none, then returns the first IPv4 address. If that fails, then this returns the hardcoded address "::" if socket.has_ipv6 is True, else "0.0.0.0". """ fallback_address = '::' if socket.has_ipv6 else '0.0.0.0' if hasattr(socket, 'AI_PASSIVE'): try: addrinfos = socket.getaddrinfo(None, port, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.IPPROTO_TCP, socket.AI_PASSIVE) except socket.gaierror as e: logger.warn('Failed to auto-detect wildcard address, assuming %s: %s', fallback_address, str(e)) return fallback_address addrs_by_family = defaultdict(list) for family, _, _, _, sockaddr in addrinfos: # Format of the "sockaddr" socket address varies by address family, # but [0] is always the IP address portion. addrs_by_family[family].append(sockaddr[0]) if hasattr(socket, 'AF_INET6') and addrs_by_family[socket.AF_INET6]: return addrs_by_family[socket.AF_INET6][0] if hasattr(socket, 'AF_INET') and addrs_by_family[socket.AF_INET]: return addrs_by_family[socket.AF_INET][0] logger.warn('Failed to auto-detect wildcard address, assuming %s', fallback_address) return fallback_address
python
def _get_wildcard_address(self, port): """Returns a wildcard address for the port in question. This will attempt to follow the best practice of calling getaddrinfo() with a null host and AI_PASSIVE to request a server-side socket wildcard address. If that succeeds, this returns the first IPv6 address found, or if none, then returns the first IPv4 address. If that fails, then this returns the hardcoded address "::" if socket.has_ipv6 is True, else "0.0.0.0". """ fallback_address = '::' if socket.has_ipv6 else '0.0.0.0' if hasattr(socket, 'AI_PASSIVE'): try: addrinfos = socket.getaddrinfo(None, port, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.IPPROTO_TCP, socket.AI_PASSIVE) except socket.gaierror as e: logger.warn('Failed to auto-detect wildcard address, assuming %s: %s', fallback_address, str(e)) return fallback_address addrs_by_family = defaultdict(list) for family, _, _, _, sockaddr in addrinfos: # Format of the "sockaddr" socket address varies by address family, # but [0] is always the IP address portion. addrs_by_family[family].append(sockaddr[0]) if hasattr(socket, 'AF_INET6') and addrs_by_family[socket.AF_INET6]: return addrs_by_family[socket.AF_INET6][0] if hasattr(socket, 'AF_INET') and addrs_by_family[socket.AF_INET]: return addrs_by_family[socket.AF_INET][0] logger.warn('Failed to auto-detect wildcard address, assuming %s', fallback_address) return fallback_address
[ "def", "_get_wildcard_address", "(", "self", ",", "port", ")", ":", "fallback_address", "=", "'::'", "if", "socket", ".", "has_ipv6", "else", "'0.0.0.0'", "if", "hasattr", "(", "socket", ",", "'AI_PASSIVE'", ")", ":", "try", ":", "addrinfos", "=", "socket", ".", "getaddrinfo", "(", "None", ",", "port", ",", "socket", ".", "AF_UNSPEC", ",", "socket", ".", "SOCK_STREAM", ",", "socket", ".", "IPPROTO_TCP", ",", "socket", ".", "AI_PASSIVE", ")", "except", "socket", ".", "gaierror", "as", "e", ":", "logger", ".", "warn", "(", "'Failed to auto-detect wildcard address, assuming %s: %s'", ",", "fallback_address", ",", "str", "(", "e", ")", ")", "return", "fallback_address", "addrs_by_family", "=", "defaultdict", "(", "list", ")", "for", "family", ",", "_", ",", "_", ",", "_", ",", "sockaddr", "in", "addrinfos", ":", "# Format of the \"sockaddr\" socket address varies by address family,", "# but [0] is always the IP address portion.", "addrs_by_family", "[", "family", "]", ".", "append", "(", "sockaddr", "[", "0", "]", ")", "if", "hasattr", "(", "socket", ",", "'AF_INET6'", ")", "and", "addrs_by_family", "[", "socket", ".", "AF_INET6", "]", ":", "return", "addrs_by_family", "[", "socket", ".", "AF_INET6", "]", "[", "0", "]", "if", "hasattr", "(", "socket", ",", "'AF_INET'", ")", "and", "addrs_by_family", "[", "socket", ".", "AF_INET", "]", ":", "return", "addrs_by_family", "[", "socket", ".", "AF_INET", "]", "[", "0", "]", "logger", ".", "warn", "(", "'Failed to auto-detect wildcard address, assuming %s'", ",", "fallback_address", ")", "return", "fallback_address" ]
Returns a wildcard address for the port in question. This will attempt to follow the best practice of calling getaddrinfo() with a null host and AI_PASSIVE to request a server-side socket wildcard address. If that succeeds, this returns the first IPv6 address found, or if none, then returns the first IPv4 address. If that fails, then this returns the hardcoded address "::" if socket.has_ipv6 is True, else "0.0.0.0".
[ "Returns", "a", "wildcard", "address", "for", "the", "port", "in", "question", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/program.py#L456-L486
train
tensorflow/tensorboard
tensorboard/program.py
WerkzeugServer.server_bind
def server_bind(self): """Override to enable IPV4 mapping for IPV6 sockets when desired. The main use case for this is so that when no host is specified, TensorBoard can listen on all interfaces for both IPv4 and IPv6 connections, rather than having to choose v4 or v6 and hope the browser didn't choose the other one. """ socket_is_v6 = ( hasattr(socket, 'AF_INET6') and self.socket.family == socket.AF_INET6) has_v6only_option = ( hasattr(socket, 'IPPROTO_IPV6') and hasattr(socket, 'IPV6_V6ONLY')) if self._auto_wildcard and socket_is_v6 and has_v6only_option: try: self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0) except socket.error as e: # Log a warning on failure to dual-bind, except for EAFNOSUPPORT # since that's expected if IPv4 isn't supported at all (IPv6-only). if hasattr(errno, 'EAFNOSUPPORT') and e.errno != errno.EAFNOSUPPORT: logger.warn('Failed to dual-bind to IPv4 wildcard: %s', str(e)) super(WerkzeugServer, self).server_bind()
python
def server_bind(self): """Override to enable IPV4 mapping for IPV6 sockets when desired. The main use case for this is so that when no host is specified, TensorBoard can listen on all interfaces for both IPv4 and IPv6 connections, rather than having to choose v4 or v6 and hope the browser didn't choose the other one. """ socket_is_v6 = ( hasattr(socket, 'AF_INET6') and self.socket.family == socket.AF_INET6) has_v6only_option = ( hasattr(socket, 'IPPROTO_IPV6') and hasattr(socket, 'IPV6_V6ONLY')) if self._auto_wildcard and socket_is_v6 and has_v6only_option: try: self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0) except socket.error as e: # Log a warning on failure to dual-bind, except for EAFNOSUPPORT # since that's expected if IPv4 isn't supported at all (IPv6-only). if hasattr(errno, 'EAFNOSUPPORT') and e.errno != errno.EAFNOSUPPORT: logger.warn('Failed to dual-bind to IPv4 wildcard: %s', str(e)) super(WerkzeugServer, self).server_bind()
[ "def", "server_bind", "(", "self", ")", ":", "socket_is_v6", "=", "(", "hasattr", "(", "socket", ",", "'AF_INET6'", ")", "and", "self", ".", "socket", ".", "family", "==", "socket", ".", "AF_INET6", ")", "has_v6only_option", "=", "(", "hasattr", "(", "socket", ",", "'IPPROTO_IPV6'", ")", "and", "hasattr", "(", "socket", ",", "'IPV6_V6ONLY'", ")", ")", "if", "self", ".", "_auto_wildcard", "and", "socket_is_v6", "and", "has_v6only_option", ":", "try", ":", "self", ".", "socket", ".", "setsockopt", "(", "socket", ".", "IPPROTO_IPV6", ",", "socket", ".", "IPV6_V6ONLY", ",", "0", ")", "except", "socket", ".", "error", "as", "e", ":", "# Log a warning on failure to dual-bind, except for EAFNOSUPPORT", "# since that's expected if IPv4 isn't supported at all (IPv6-only).", "if", "hasattr", "(", "errno", ",", "'EAFNOSUPPORT'", ")", "and", "e", ".", "errno", "!=", "errno", ".", "EAFNOSUPPORT", ":", "logger", ".", "warn", "(", "'Failed to dual-bind to IPv4 wildcard: %s'", ",", "str", "(", "e", ")", ")", "super", "(", "WerkzeugServer", ",", "self", ")", ".", "server_bind", "(", ")" ]
Override to enable IPV4 mapping for IPV6 sockets when desired. The main use case for this is so that when no host is specified, TensorBoard can listen on all interfaces for both IPv4 and IPv6 connections, rather than having to choose v4 or v6 and hope the browser didn't choose the other one.
[ "Override", "to", "enable", "IPV4", "mapping", "for", "IPV6", "sockets", "when", "desired", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/program.py#L488-L507
train
tensorflow/tensorboard
tensorboard/program.py
WerkzeugServer.handle_error
def handle_error(self, request, client_address): """Override to get rid of noisy EPIPE errors.""" del request # unused # Kludge to override a SocketServer.py method so we can get rid of noisy # EPIPE errors. They're kind of a red herring as far as errors go. For # example, `curl -N http://localhost:6006/ | head` will cause an EPIPE. exc_info = sys.exc_info() e = exc_info[1] if isinstance(e, IOError) and e.errno == errno.EPIPE: logger.warn('EPIPE caused by %s in HTTP serving' % str(client_address)) else: logger.error('HTTP serving error', exc_info=exc_info)
python
def handle_error(self, request, client_address): """Override to get rid of noisy EPIPE errors.""" del request # unused # Kludge to override a SocketServer.py method so we can get rid of noisy # EPIPE errors. They're kind of a red herring as far as errors go. For # example, `curl -N http://localhost:6006/ | head` will cause an EPIPE. exc_info = sys.exc_info() e = exc_info[1] if isinstance(e, IOError) and e.errno == errno.EPIPE: logger.warn('EPIPE caused by %s in HTTP serving' % str(client_address)) else: logger.error('HTTP serving error', exc_info=exc_info)
[ "def", "handle_error", "(", "self", ",", "request", ",", "client_address", ")", ":", "del", "request", "# unused", "# Kludge to override a SocketServer.py method so we can get rid of noisy", "# EPIPE errors. They're kind of a red herring as far as errors go. For", "# example, `curl -N http://localhost:6006/ | head` will cause an EPIPE.", "exc_info", "=", "sys", ".", "exc_info", "(", ")", "e", "=", "exc_info", "[", "1", "]", "if", "isinstance", "(", "e", ",", "IOError", ")", "and", "e", ".", "errno", "==", "errno", ".", "EPIPE", ":", "logger", ".", "warn", "(", "'EPIPE caused by %s in HTTP serving'", "%", "str", "(", "client_address", ")", ")", "else", ":", "logger", ".", "error", "(", "'HTTP serving error'", ",", "exc_info", "=", "exc_info", ")" ]
Override to get rid of noisy EPIPE errors.
[ "Override", "to", "get", "rid", "of", "noisy", "EPIPE", "errors", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/program.py#L509-L520
train
tensorflow/tensorboard
tensorboard/plugins/profile/trace_events_json.py
TraceEventsJsonStream._events
def _events(self): """Iterator over all catapult trace events, as python values.""" for did, device in sorted(six.iteritems(self._proto.devices)): if device.name: yield dict( ph=_TYPE_METADATA, pid=did, name='process_name', args=dict(name=device.name)) yield dict( ph=_TYPE_METADATA, pid=did, name='process_sort_index', args=dict(sort_index=did)) for rid, resource in sorted(six.iteritems(device.resources)): if resource.name: yield dict( ph=_TYPE_METADATA, pid=did, tid=rid, name='thread_name', args=dict(name=resource.name)) yield dict( ph=_TYPE_METADATA, pid=did, tid=rid, name='thread_sort_index', args=dict(sort_index=rid)) # TODO(sammccall): filtering and downsampling? for event in self._proto.trace_events: yield self._event(event)
python
def _events(self): """Iterator over all catapult trace events, as python values.""" for did, device in sorted(six.iteritems(self._proto.devices)): if device.name: yield dict( ph=_TYPE_METADATA, pid=did, name='process_name', args=dict(name=device.name)) yield dict( ph=_TYPE_METADATA, pid=did, name='process_sort_index', args=dict(sort_index=did)) for rid, resource in sorted(six.iteritems(device.resources)): if resource.name: yield dict( ph=_TYPE_METADATA, pid=did, tid=rid, name='thread_name', args=dict(name=resource.name)) yield dict( ph=_TYPE_METADATA, pid=did, tid=rid, name='thread_sort_index', args=dict(sort_index=rid)) # TODO(sammccall): filtering and downsampling? for event in self._proto.trace_events: yield self._event(event)
[ "def", "_events", "(", "self", ")", ":", "for", "did", ",", "device", "in", "sorted", "(", "six", ".", "iteritems", "(", "self", ".", "_proto", ".", "devices", ")", ")", ":", "if", "device", ".", "name", ":", "yield", "dict", "(", "ph", "=", "_TYPE_METADATA", ",", "pid", "=", "did", ",", "name", "=", "'process_name'", ",", "args", "=", "dict", "(", "name", "=", "device", ".", "name", ")", ")", "yield", "dict", "(", "ph", "=", "_TYPE_METADATA", ",", "pid", "=", "did", ",", "name", "=", "'process_sort_index'", ",", "args", "=", "dict", "(", "sort_index", "=", "did", ")", ")", "for", "rid", ",", "resource", "in", "sorted", "(", "six", ".", "iteritems", "(", "device", ".", "resources", ")", ")", ":", "if", "resource", ".", "name", ":", "yield", "dict", "(", "ph", "=", "_TYPE_METADATA", ",", "pid", "=", "did", ",", "tid", "=", "rid", ",", "name", "=", "'thread_name'", ",", "args", "=", "dict", "(", "name", "=", "resource", ".", "name", ")", ")", "yield", "dict", "(", "ph", "=", "_TYPE_METADATA", ",", "pid", "=", "did", ",", "tid", "=", "rid", ",", "name", "=", "'thread_sort_index'", ",", "args", "=", "dict", "(", "sort_index", "=", "rid", ")", ")", "# TODO(sammccall): filtering and downsampling?", "for", "event", "in", "self", ".", "_proto", ".", "trace_events", ":", "yield", "self", ".", "_event", "(", "event", ")" ]
Iterator over all catapult trace events, as python values.
[ "Iterator", "over", "all", "catapult", "trace", "events", "as", "python", "values", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/profile/trace_events_json.py#L47-L77
train
tensorflow/tensorboard
tensorboard/plugins/profile/trace_events_json.py
TraceEventsJsonStream._event
def _event(self, event): """Converts a TraceEvent proto into a catapult trace event python value.""" result = dict( pid=event.device_id, tid=event.resource_id, name=event.name, ts=event.timestamp_ps / 1000000.0) if event.duration_ps: result['ph'] = _TYPE_COMPLETE result['dur'] = event.duration_ps / 1000000.0 else: result['ph'] = _TYPE_INSTANT result['s'] = _SCOPE_THREAD for key in dict(event.args): if 'args' not in result: result['args'] = {} result['args'][key] = event.args[key] return result
python
def _event(self, event): """Converts a TraceEvent proto into a catapult trace event python value.""" result = dict( pid=event.device_id, tid=event.resource_id, name=event.name, ts=event.timestamp_ps / 1000000.0) if event.duration_ps: result['ph'] = _TYPE_COMPLETE result['dur'] = event.duration_ps / 1000000.0 else: result['ph'] = _TYPE_INSTANT result['s'] = _SCOPE_THREAD for key in dict(event.args): if 'args' not in result: result['args'] = {} result['args'][key] = event.args[key] return result
[ "def", "_event", "(", "self", ",", "event", ")", ":", "result", "=", "dict", "(", "pid", "=", "event", ".", "device_id", ",", "tid", "=", "event", ".", "resource_id", ",", "name", "=", "event", ".", "name", ",", "ts", "=", "event", ".", "timestamp_ps", "/", "1000000.0", ")", "if", "event", ".", "duration_ps", ":", "result", "[", "'ph'", "]", "=", "_TYPE_COMPLETE", "result", "[", "'dur'", "]", "=", "event", ".", "duration_ps", "/", "1000000.0", "else", ":", "result", "[", "'ph'", "]", "=", "_TYPE_INSTANT", "result", "[", "'s'", "]", "=", "_SCOPE_THREAD", "for", "key", "in", "dict", "(", "event", ".", "args", ")", ":", "if", "'args'", "not", "in", "result", ":", "result", "[", "'args'", "]", "=", "{", "}", "result", "[", "'args'", "]", "[", "key", "]", "=", "event", ".", "args", "[", "key", "]", "return", "result" ]
Converts a TraceEvent proto into a catapult trace event python value.
[ "Converts", "a", "TraceEvent", "proto", "into", "a", "catapult", "trace", "event", "python", "value", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/profile/trace_events_json.py#L79-L96
train
tensorflow/tensorboard
tensorboard/plugins/scalar/summary.py
op
def op(name, data, display_name=None, description=None, collections=None): """Create a legacy scalar summary op. Arguments: name: A unique name for the generated summary node. data: A real numeric rank-0 `Tensor`. Must have `dtype` castable to `float32`. display_name: Optional name for this summary in TensorBoard, as a constant `str`. Defaults to `name`. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[Graph Keys.SUMMARIES]`. Returns: A TensorFlow summary op. """ # TODO(nickfelt): remove on-demand imports once dep situation is fixed. import tensorflow.compat.v1 as tf if display_name is None: display_name = name summary_metadata = metadata.create_summary_metadata( display_name=display_name, description=description) with tf.name_scope(name): with tf.control_dependencies([tf.assert_scalar(data)]): return tf.summary.tensor_summary(name='scalar_summary', tensor=tf.cast(data, tf.float32), collections=collections, summary_metadata=summary_metadata)
python
def op(name, data, display_name=None, description=None, collections=None): """Create a legacy scalar summary op. Arguments: name: A unique name for the generated summary node. data: A real numeric rank-0 `Tensor`. Must have `dtype` castable to `float32`. display_name: Optional name for this summary in TensorBoard, as a constant `str`. Defaults to `name`. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[Graph Keys.SUMMARIES]`. Returns: A TensorFlow summary op. """ # TODO(nickfelt): remove on-demand imports once dep situation is fixed. import tensorflow.compat.v1 as tf if display_name is None: display_name = name summary_metadata = metadata.create_summary_metadata( display_name=display_name, description=description) with tf.name_scope(name): with tf.control_dependencies([tf.assert_scalar(data)]): return tf.summary.tensor_summary(name='scalar_summary', tensor=tf.cast(data, tf.float32), collections=collections, summary_metadata=summary_metadata)
[ "def", "op", "(", "name", ",", "data", ",", "display_name", "=", "None", ",", "description", "=", "None", ",", "collections", "=", "None", ")", ":", "# TODO(nickfelt): remove on-demand imports once dep situation is fixed.", "import", "tensorflow", ".", "compat", ".", "v1", "as", "tf", "if", "display_name", "is", "None", ":", "display_name", "=", "name", "summary_metadata", "=", "metadata", ".", "create_summary_metadata", "(", "display_name", "=", "display_name", ",", "description", "=", "description", ")", "with", "tf", ".", "name_scope", "(", "name", ")", ":", "with", "tf", ".", "control_dependencies", "(", "[", "tf", ".", "assert_scalar", "(", "data", ")", "]", ")", ":", "return", "tf", ".", "summary", ".", "tensor_summary", "(", "name", "=", "'scalar_summary'", ",", "tensor", "=", "tf", ".", "cast", "(", "data", ",", "tf", ".", "float32", ")", ",", "collections", "=", "collections", ",", "summary_metadata", "=", "summary_metadata", ")" ]
Create a legacy scalar summary op. Arguments: name: A unique name for the generated summary node. data: A real numeric rank-0 `Tensor`. Must have `dtype` castable to `float32`. display_name: Optional name for this summary in TensorBoard, as a constant `str`. Defaults to `name`. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[Graph Keys.SUMMARIES]`. Returns: A TensorFlow summary op.
[ "Create", "a", "legacy", "scalar", "summary", "op", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/scalar/summary.py#L35-L69
train
tensorflow/tensorboard
tensorboard/plugins/scalar/summary.py
pb
def pb(name, data, display_name=None, description=None): """Create a legacy scalar summary protobuf. Arguments: name: A unique name for the generated summary, including any desired name scopes. data: A rank-0 `np.array` or array-like form (so raw `int`s and `float`s are fine, too). display_name: Optional name for this summary in TensorBoard, as a `str`. Defaults to `name`. description: Optional long-form description for this summary, as a `str`. Markdown is supported. Defaults to empty. Returns: A `tf.Summary` protobuf object. """ # TODO(nickfelt): remove on-demand imports once dep situation is fixed. import tensorflow.compat.v1 as tf data = np.array(data) if data.shape != (): raise ValueError('Expected scalar shape for data, saw shape: %s.' % data.shape) if data.dtype.kind not in ('b', 'i', 'u', 'f'): # bool, int, uint, float raise ValueError('Cast %s to float is not supported' % data.dtype.name) tensor = tf.make_tensor_proto(data.astype(np.float32)) if display_name is None: display_name = name summary_metadata = metadata.create_summary_metadata( display_name=display_name, description=description) tf_summary_metadata = tf.SummaryMetadata.FromString( summary_metadata.SerializeToString()) summary = tf.Summary() summary.value.add(tag='%s/scalar_summary' % name, metadata=tf_summary_metadata, tensor=tensor) return summary
python
def pb(name, data, display_name=None, description=None): """Create a legacy scalar summary protobuf. Arguments: name: A unique name for the generated summary, including any desired name scopes. data: A rank-0 `np.array` or array-like form (so raw `int`s and `float`s are fine, too). display_name: Optional name for this summary in TensorBoard, as a `str`. Defaults to `name`. description: Optional long-form description for this summary, as a `str`. Markdown is supported. Defaults to empty. Returns: A `tf.Summary` protobuf object. """ # TODO(nickfelt): remove on-demand imports once dep situation is fixed. import tensorflow.compat.v1 as tf data = np.array(data) if data.shape != (): raise ValueError('Expected scalar shape for data, saw shape: %s.' % data.shape) if data.dtype.kind not in ('b', 'i', 'u', 'f'): # bool, int, uint, float raise ValueError('Cast %s to float is not supported' % data.dtype.name) tensor = tf.make_tensor_proto(data.astype(np.float32)) if display_name is None: display_name = name summary_metadata = metadata.create_summary_metadata( display_name=display_name, description=description) tf_summary_metadata = tf.SummaryMetadata.FromString( summary_metadata.SerializeToString()) summary = tf.Summary() summary.value.add(tag='%s/scalar_summary' % name, metadata=tf_summary_metadata, tensor=tensor) return summary
[ "def", "pb", "(", "name", ",", "data", ",", "display_name", "=", "None", ",", "description", "=", "None", ")", ":", "# TODO(nickfelt): remove on-demand imports once dep situation is fixed.", "import", "tensorflow", ".", "compat", ".", "v1", "as", "tf", "data", "=", "np", ".", "array", "(", "data", ")", "if", "data", ".", "shape", "!=", "(", ")", ":", "raise", "ValueError", "(", "'Expected scalar shape for data, saw shape: %s.'", "%", "data", ".", "shape", ")", "if", "data", ".", "dtype", ".", "kind", "not", "in", "(", "'b'", ",", "'i'", ",", "'u'", ",", "'f'", ")", ":", "# bool, int, uint, float", "raise", "ValueError", "(", "'Cast %s to float is not supported'", "%", "data", ".", "dtype", ".", "name", ")", "tensor", "=", "tf", ".", "make_tensor_proto", "(", "data", ".", "astype", "(", "np", ".", "float32", ")", ")", "if", "display_name", "is", "None", ":", "display_name", "=", "name", "summary_metadata", "=", "metadata", ".", "create_summary_metadata", "(", "display_name", "=", "display_name", ",", "description", "=", "description", ")", "tf_summary_metadata", "=", "tf", ".", "SummaryMetadata", ".", "FromString", "(", "summary_metadata", ".", "SerializeToString", "(", ")", ")", "summary", "=", "tf", ".", "Summary", "(", ")", "summary", ".", "value", ".", "add", "(", "tag", "=", "'%s/scalar_summary'", "%", "name", ",", "metadata", "=", "tf_summary_metadata", ",", "tensor", "=", "tensor", ")", "return", "summary" ]
Create a legacy scalar summary protobuf. Arguments: name: A unique name for the generated summary, including any desired name scopes. data: A rank-0 `np.array` or array-like form (so raw `int`s and `float`s are fine, too). display_name: Optional name for this summary in TensorBoard, as a `str`. Defaults to `name`. description: Optional long-form description for this summary, as a `str`. Markdown is supported. Defaults to empty. Returns: A `tf.Summary` protobuf object.
[ "Create", "a", "legacy", "scalar", "summary", "protobuf", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/scalar/summary.py#L72-L109
train
tensorflow/tensorboard
tensorboard/scripts/execrooter.py
run
def run(inputs, program, outputs): """Creates temp symlink tree, runs program, and copies back outputs. Args: inputs: List of fake paths to real paths, which are used for symlink tree. program: List containing real path of program and its arguments. The execroot directory will be appended as the last argument. outputs: List of fake outputted paths to copy back to real paths. Returns: 0 if succeeded or nonzero if failed. """ root = tempfile.mkdtemp() try: cwd = os.getcwd() for fake, real in inputs: parent = os.path.join(root, os.path.dirname(fake)) if not os.path.exists(parent): os.makedirs(parent) # Use symlink if possible and not on Windows, since on Windows 10 # symlinks exist but they require administrator privileges to use. if hasattr(os, 'symlink') and not os.name == 'nt': os.symlink(os.path.join(cwd, real), os.path.join(root, fake)) else: shutil.copyfile(os.path.join(cwd, real), os.path.join(root, fake)) if subprocess.call(program + [root]) != 0: return 1 for fake, real in outputs: shutil.copyfile(os.path.join(root, fake), real) return 0 finally: try: shutil.rmtree(root) except EnvironmentError: # Ignore "file in use" errors on Windows; ok since it's just a tmpdir. pass
python
def run(inputs, program, outputs): """Creates temp symlink tree, runs program, and copies back outputs. Args: inputs: List of fake paths to real paths, which are used for symlink tree. program: List containing real path of program and its arguments. The execroot directory will be appended as the last argument. outputs: List of fake outputted paths to copy back to real paths. Returns: 0 if succeeded or nonzero if failed. """ root = tempfile.mkdtemp() try: cwd = os.getcwd() for fake, real in inputs: parent = os.path.join(root, os.path.dirname(fake)) if not os.path.exists(parent): os.makedirs(parent) # Use symlink if possible and not on Windows, since on Windows 10 # symlinks exist but they require administrator privileges to use. if hasattr(os, 'symlink') and not os.name == 'nt': os.symlink(os.path.join(cwd, real), os.path.join(root, fake)) else: shutil.copyfile(os.path.join(cwd, real), os.path.join(root, fake)) if subprocess.call(program + [root]) != 0: return 1 for fake, real in outputs: shutil.copyfile(os.path.join(root, fake), real) return 0 finally: try: shutil.rmtree(root) except EnvironmentError: # Ignore "file in use" errors on Windows; ok since it's just a tmpdir. pass
[ "def", "run", "(", "inputs", ",", "program", ",", "outputs", ")", ":", "root", "=", "tempfile", ".", "mkdtemp", "(", ")", "try", ":", "cwd", "=", "os", ".", "getcwd", "(", ")", "for", "fake", ",", "real", "in", "inputs", ":", "parent", "=", "os", ".", "path", ".", "join", "(", "root", ",", "os", ".", "path", ".", "dirname", "(", "fake", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "parent", ")", ":", "os", ".", "makedirs", "(", "parent", ")", "# Use symlink if possible and not on Windows, since on Windows 10", "# symlinks exist but they require administrator privileges to use.", "if", "hasattr", "(", "os", ",", "'symlink'", ")", "and", "not", "os", ".", "name", "==", "'nt'", ":", "os", ".", "symlink", "(", "os", ".", "path", ".", "join", "(", "cwd", ",", "real", ")", ",", "os", ".", "path", ".", "join", "(", "root", ",", "fake", ")", ")", "else", ":", "shutil", ".", "copyfile", "(", "os", ".", "path", ".", "join", "(", "cwd", ",", "real", ")", ",", "os", ".", "path", ".", "join", "(", "root", ",", "fake", ")", ")", "if", "subprocess", ".", "call", "(", "program", "+", "[", "root", "]", ")", "!=", "0", ":", "return", "1", "for", "fake", ",", "real", "in", "outputs", ":", "shutil", ".", "copyfile", "(", "os", ".", "path", ".", "join", "(", "root", ",", "fake", ")", ",", "real", ")", "return", "0", "finally", ":", "try", ":", "shutil", ".", "rmtree", "(", "root", ")", "except", "EnvironmentError", ":", "# Ignore \"file in use\" errors on Windows; ok since it's just a tmpdir.", "pass" ]
Creates temp symlink tree, runs program, and copies back outputs. Args: inputs: List of fake paths to real paths, which are used for symlink tree. program: List containing real path of program and its arguments. The execroot directory will be appended as the last argument. outputs: List of fake outputted paths to copy back to real paths. Returns: 0 if succeeded or nonzero if failed.
[ "Creates", "temp", "symlink", "tree", "runs", "program", "and", "copies", "back", "outputs", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/scripts/execrooter.py#L28-L62
train
tensorflow/tensorboard
tensorboard/scripts/execrooter.py
main
def main(args): """Invokes run function using a JSON file config. Args: args: CLI args, which can be a JSON file containing an object whose attributes are the parameters to the run function. If multiple JSON files are passed, their contents are concatenated. Returns: 0 if succeeded or nonzero if failed. Raises: Exception: If input data is missing. """ if not args: raise Exception('Please specify at least one JSON config path') inputs = [] program = [] outputs = [] for arg in args: with open(arg) as fd: config = json.load(fd) inputs.extend(config.get('inputs', [])) program.extend(config.get('program', [])) outputs.extend(config.get('outputs', [])) if not program: raise Exception('Please specify a program') return run(inputs, program, outputs)
python
def main(args): """Invokes run function using a JSON file config. Args: args: CLI args, which can be a JSON file containing an object whose attributes are the parameters to the run function. If multiple JSON files are passed, their contents are concatenated. Returns: 0 if succeeded or nonzero if failed. Raises: Exception: If input data is missing. """ if not args: raise Exception('Please specify at least one JSON config path') inputs = [] program = [] outputs = [] for arg in args: with open(arg) as fd: config = json.load(fd) inputs.extend(config.get('inputs', [])) program.extend(config.get('program', [])) outputs.extend(config.get('outputs', [])) if not program: raise Exception('Please specify a program') return run(inputs, program, outputs)
[ "def", "main", "(", "args", ")", ":", "if", "not", "args", ":", "raise", "Exception", "(", "'Please specify at least one JSON config path'", ")", "inputs", "=", "[", "]", "program", "=", "[", "]", "outputs", "=", "[", "]", "for", "arg", "in", "args", ":", "with", "open", "(", "arg", ")", "as", "fd", ":", "config", "=", "json", ".", "load", "(", "fd", ")", "inputs", ".", "extend", "(", "config", ".", "get", "(", "'inputs'", ",", "[", "]", ")", ")", "program", ".", "extend", "(", "config", ".", "get", "(", "'program'", ",", "[", "]", ")", ")", "outputs", ".", "extend", "(", "config", ".", "get", "(", "'outputs'", ",", "[", "]", ")", ")", "if", "not", "program", ":", "raise", "Exception", "(", "'Please specify a program'", ")", "return", "run", "(", "inputs", ",", "program", ",", "outputs", ")" ]
Invokes run function using a JSON file config. Args: args: CLI args, which can be a JSON file containing an object whose attributes are the parameters to the run function. If multiple JSON files are passed, their contents are concatenated. Returns: 0 if succeeded or nonzero if failed. Raises: Exception: If input data is missing.
[ "Invokes", "run", "function", "using", "a", "JSON", "file", "config", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/scripts/execrooter.py#L65-L90
train
tensorflow/tensorboard
tensorboard/backend/event_processing/sqlite_writer.py
initialize_schema
def initialize_schema(connection): """Initializes the TensorBoard sqlite schema using the given connection. Args: connection: A sqlite DB connection. """ cursor = connection.cursor() cursor.execute("PRAGMA application_id={}".format(_TENSORBOARD_APPLICATION_ID)) cursor.execute("PRAGMA user_version={}".format(_TENSORBOARD_USER_VERSION)) with connection: for statement in _SCHEMA_STATEMENTS: lines = statement.strip('\n').split('\n') message = lines[0] + ('...' if len(lines) > 1 else '') logger.debug('Running DB init statement: %s', message) cursor.execute(statement)
python
def initialize_schema(connection): """Initializes the TensorBoard sqlite schema using the given connection. Args: connection: A sqlite DB connection. """ cursor = connection.cursor() cursor.execute("PRAGMA application_id={}".format(_TENSORBOARD_APPLICATION_ID)) cursor.execute("PRAGMA user_version={}".format(_TENSORBOARD_USER_VERSION)) with connection: for statement in _SCHEMA_STATEMENTS: lines = statement.strip('\n').split('\n') message = lines[0] + ('...' if len(lines) > 1 else '') logger.debug('Running DB init statement: %s', message) cursor.execute(statement)
[ "def", "initialize_schema", "(", "connection", ")", ":", "cursor", "=", "connection", ".", "cursor", "(", ")", "cursor", ".", "execute", "(", "\"PRAGMA application_id={}\"", ".", "format", "(", "_TENSORBOARD_APPLICATION_ID", ")", ")", "cursor", ".", "execute", "(", "\"PRAGMA user_version={}\"", ".", "format", "(", "_TENSORBOARD_USER_VERSION", ")", ")", "with", "connection", ":", "for", "statement", "in", "_SCHEMA_STATEMENTS", ":", "lines", "=", "statement", ".", "strip", "(", "'\\n'", ")", ".", "split", "(", "'\\n'", ")", "message", "=", "lines", "[", "0", "]", "+", "(", "'...'", "if", "len", "(", "lines", ")", ">", "1", "else", "''", ")", "logger", ".", "debug", "(", "'Running DB init statement: %s'", ",", "message", ")", "cursor", ".", "execute", "(", "statement", ")" ]
Initializes the TensorBoard sqlite schema using the given connection. Args: connection: A sqlite DB connection.
[ "Initializes", "the", "TensorBoard", "sqlite", "schema", "using", "the", "given", "connection", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/sqlite_writer.py#L416-L430
train
tensorflow/tensorboard
tensorboard/backend/event_processing/sqlite_writer.py
SqliteWriter._create_id
def _create_id(self): """Returns a freshly created DB-wide unique ID.""" cursor = self._db.cursor() cursor.execute('INSERT INTO Ids DEFAULT VALUES') return cursor.lastrowid
python
def _create_id(self): """Returns a freshly created DB-wide unique ID.""" cursor = self._db.cursor() cursor.execute('INSERT INTO Ids DEFAULT VALUES') return cursor.lastrowid
[ "def", "_create_id", "(", "self", ")", ":", "cursor", "=", "self", ".", "_db", ".", "cursor", "(", ")", "cursor", ".", "execute", "(", "'INSERT INTO Ids DEFAULT VALUES'", ")", "return", "cursor", ".", "lastrowid" ]
Returns a freshly created DB-wide unique ID.
[ "Returns", "a", "freshly", "created", "DB", "-", "wide", "unique", "ID", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/sqlite_writer.py#L58-L62
train
tensorflow/tensorboard
tensorboard/backend/event_processing/sqlite_writer.py
SqliteWriter._maybe_init_user
def _maybe_init_user(self): """Returns the ID for the current user, creating the row if needed.""" user_name = os.environ.get('USER', '') or os.environ.get('USERNAME', '') cursor = self._db.cursor() cursor.execute('SELECT user_id FROM Users WHERE user_name = ?', (user_name,)) row = cursor.fetchone() if row: return row[0] user_id = self._create_id() cursor.execute( """ INSERT INTO USERS (user_id, user_name, inserted_time) VALUES (?, ?, ?) """, (user_id, user_name, time.time())) return user_id
python
def _maybe_init_user(self): """Returns the ID for the current user, creating the row if needed.""" user_name = os.environ.get('USER', '') or os.environ.get('USERNAME', '') cursor = self._db.cursor() cursor.execute('SELECT user_id FROM Users WHERE user_name = ?', (user_name,)) row = cursor.fetchone() if row: return row[0] user_id = self._create_id() cursor.execute( """ INSERT INTO USERS (user_id, user_name, inserted_time) VALUES (?, ?, ?) """, (user_id, user_name, time.time())) return user_id
[ "def", "_maybe_init_user", "(", "self", ")", ":", "user_name", "=", "os", ".", "environ", ".", "get", "(", "'USER'", ",", "''", ")", "or", "os", ".", "environ", ".", "get", "(", "'USERNAME'", ",", "''", ")", "cursor", "=", "self", ".", "_db", ".", "cursor", "(", ")", "cursor", ".", "execute", "(", "'SELECT user_id FROM Users WHERE user_name = ?'", ",", "(", "user_name", ",", ")", ")", "row", "=", "cursor", ".", "fetchone", "(", ")", "if", "row", ":", "return", "row", "[", "0", "]", "user_id", "=", "self", ".", "_create_id", "(", ")", "cursor", ".", "execute", "(", "\"\"\"\n INSERT INTO USERS (user_id, user_name, inserted_time)\n VALUES (?, ?, ?)\n \"\"\"", ",", "(", "user_id", ",", "user_name", ",", "time", ".", "time", "(", ")", ")", ")", "return", "user_id" ]
Returns the ID for the current user, creating the row if needed.
[ "Returns", "the", "ID", "for", "the", "current", "user", "creating", "the", "row", "if", "needed", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/sqlite_writer.py#L64-L80
train
tensorflow/tensorboard
tensorboard/backend/event_processing/sqlite_writer.py
SqliteWriter._maybe_init_experiment
def _maybe_init_experiment(self, experiment_name): """Returns the ID for the given experiment, creating the row if needed. Args: experiment_name: name of experiment. """ user_id = self._maybe_init_user() cursor = self._db.cursor() cursor.execute( """ SELECT experiment_id FROM Experiments WHERE user_id = ? AND experiment_name = ? """, (user_id, experiment_name)) row = cursor.fetchone() if row: return row[0] experiment_id = self._create_id() # TODO: track computed time from run start times computed_time = 0 cursor.execute( """ INSERT INTO Experiments ( user_id, experiment_id, experiment_name, inserted_time, started_time, is_watching ) VALUES (?, ?, ?, ?, ?, ?) """, (user_id, experiment_id, experiment_name, time.time(), computed_time, False)) return experiment_id
python
def _maybe_init_experiment(self, experiment_name): """Returns the ID for the given experiment, creating the row if needed. Args: experiment_name: name of experiment. """ user_id = self._maybe_init_user() cursor = self._db.cursor() cursor.execute( """ SELECT experiment_id FROM Experiments WHERE user_id = ? AND experiment_name = ? """, (user_id, experiment_name)) row = cursor.fetchone() if row: return row[0] experiment_id = self._create_id() # TODO: track computed time from run start times computed_time = 0 cursor.execute( """ INSERT INTO Experiments ( user_id, experiment_id, experiment_name, inserted_time, started_time, is_watching ) VALUES (?, ?, ?, ?, ?, ?) """, (user_id, experiment_id, experiment_name, time.time(), computed_time, False)) return experiment_id
[ "def", "_maybe_init_experiment", "(", "self", ",", "experiment_name", ")", ":", "user_id", "=", "self", ".", "_maybe_init_user", "(", ")", "cursor", "=", "self", ".", "_db", ".", "cursor", "(", ")", "cursor", ".", "execute", "(", "\"\"\"\n SELECT experiment_id FROM Experiments\n WHERE user_id = ? AND experiment_name = ?\n \"\"\"", ",", "(", "user_id", ",", "experiment_name", ")", ")", "row", "=", "cursor", ".", "fetchone", "(", ")", "if", "row", ":", "return", "row", "[", "0", "]", "experiment_id", "=", "self", ".", "_create_id", "(", ")", "# TODO: track computed time from run start times", "computed_time", "=", "0", "cursor", ".", "execute", "(", "\"\"\"\n INSERT INTO Experiments (\n user_id, experiment_id, experiment_name,\n inserted_time, started_time, is_watching\n ) VALUES (?, ?, ?, ?, ?, ?)\n \"\"\"", ",", "(", "user_id", ",", "experiment_id", ",", "experiment_name", ",", "time", ".", "time", "(", ")", ",", "computed_time", ",", "False", ")", ")", "return", "experiment_id" ]
Returns the ID for the given experiment, creating the row if needed. Args: experiment_name: name of experiment.
[ "Returns", "the", "ID", "for", "the", "given", "experiment", "creating", "the", "row", "if", "needed", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/sqlite_writer.py#L82-L111
train
tensorflow/tensorboard
tensorboard/backend/event_processing/sqlite_writer.py
SqliteWriter._maybe_init_run
def _maybe_init_run(self, experiment_name, run_name): """Returns the ID for the given run, creating the row if needed. Args: experiment_name: name of experiment containing this run. run_name: name of run. """ experiment_id = self._maybe_init_experiment(experiment_name) cursor = self._db.cursor() cursor.execute( """ SELECT run_id FROM Runs WHERE experiment_id = ? AND run_name = ? """, (experiment_id, run_name)) row = cursor.fetchone() if row: return row[0] run_id = self._create_id() # TODO: track actual run start times started_time = 0 cursor.execute( """ INSERT INTO Runs ( experiment_id, run_id, run_name, inserted_time, started_time ) VALUES (?, ?, ?, ?, ?) """, (experiment_id, run_id, run_name, time.time(), started_time)) return run_id
python
def _maybe_init_run(self, experiment_name, run_name): """Returns the ID for the given run, creating the row if needed. Args: experiment_name: name of experiment containing this run. run_name: name of run. """ experiment_id = self._maybe_init_experiment(experiment_name) cursor = self._db.cursor() cursor.execute( """ SELECT run_id FROM Runs WHERE experiment_id = ? AND run_name = ? """, (experiment_id, run_name)) row = cursor.fetchone() if row: return row[0] run_id = self._create_id() # TODO: track actual run start times started_time = 0 cursor.execute( """ INSERT INTO Runs ( experiment_id, run_id, run_name, inserted_time, started_time ) VALUES (?, ?, ?, ?, ?) """, (experiment_id, run_id, run_name, time.time(), started_time)) return run_id
[ "def", "_maybe_init_run", "(", "self", ",", "experiment_name", ",", "run_name", ")", ":", "experiment_id", "=", "self", ".", "_maybe_init_experiment", "(", "experiment_name", ")", "cursor", "=", "self", ".", "_db", ".", "cursor", "(", ")", "cursor", ".", "execute", "(", "\"\"\"\n SELECT run_id FROM Runs\n WHERE experiment_id = ? AND run_name = ?\n \"\"\"", ",", "(", "experiment_id", ",", "run_name", ")", ")", "row", "=", "cursor", ".", "fetchone", "(", ")", "if", "row", ":", "return", "row", "[", "0", "]", "run_id", "=", "self", ".", "_create_id", "(", ")", "# TODO: track actual run start times", "started_time", "=", "0", "cursor", ".", "execute", "(", "\"\"\"\n INSERT INTO Runs (\n experiment_id, run_id, run_name, inserted_time, started_time\n ) VALUES (?, ?, ?, ?, ?)\n \"\"\"", ",", "(", "experiment_id", ",", "run_id", ",", "run_name", ",", "time", ".", "time", "(", ")", ",", "started_time", ")", ")", "return", "run_id" ]
Returns the ID for the given run, creating the row if needed. Args: experiment_name: name of experiment containing this run. run_name: name of run.
[ "Returns", "the", "ID", "for", "the", "given", "run", "creating", "the", "row", "if", "needed", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/sqlite_writer.py#L113-L141
train
tensorflow/tensorboard
tensorboard/backend/event_processing/sqlite_writer.py
SqliteWriter._maybe_init_tags
def _maybe_init_tags(self, run_id, tag_to_metadata): """Returns a tag-to-ID map for the given tags, creating rows if needed. Args: run_id: the ID of the run to which these tags belong. tag_to_metadata: map of tag name to SummaryMetadata for the tag. """ cursor = self._db.cursor() # TODO: for huge numbers of tags (e.g. 1000+), this is slower than just # querying for the known tag names explicitly; find a better tradeoff. cursor.execute('SELECT tag_name, tag_id FROM Tags WHERE run_id = ?', (run_id,)) tag_to_id = {row[0]: row[1] for row in cursor.fetchall() if row[0] in tag_to_metadata} new_tag_data = [] for tag, metadata in six.iteritems(tag_to_metadata): if tag not in tag_to_id: tag_id = self._create_id() tag_to_id[tag] = tag_id new_tag_data.append((run_id, tag_id, tag, time.time(), metadata.display_name, metadata.plugin_data.plugin_name, self._make_blob(metadata.plugin_data.content))) cursor.executemany( """ INSERT INTO Tags ( run_id, tag_id, tag_name, inserted_time, display_name, plugin_name, plugin_data ) VALUES (?, ?, ?, ?, ?, ?, ?) """, new_tag_data) return tag_to_id
python
def _maybe_init_tags(self, run_id, tag_to_metadata): """Returns a tag-to-ID map for the given tags, creating rows if needed. Args: run_id: the ID of the run to which these tags belong. tag_to_metadata: map of tag name to SummaryMetadata for the tag. """ cursor = self._db.cursor() # TODO: for huge numbers of tags (e.g. 1000+), this is slower than just # querying for the known tag names explicitly; find a better tradeoff. cursor.execute('SELECT tag_name, tag_id FROM Tags WHERE run_id = ?', (run_id,)) tag_to_id = {row[0]: row[1] for row in cursor.fetchall() if row[0] in tag_to_metadata} new_tag_data = [] for tag, metadata in six.iteritems(tag_to_metadata): if tag not in tag_to_id: tag_id = self._create_id() tag_to_id[tag] = tag_id new_tag_data.append((run_id, tag_id, tag, time.time(), metadata.display_name, metadata.plugin_data.plugin_name, self._make_blob(metadata.plugin_data.content))) cursor.executemany( """ INSERT INTO Tags ( run_id, tag_id, tag_name, inserted_time, display_name, plugin_name, plugin_data ) VALUES (?, ?, ?, ?, ?, ?, ?) """, new_tag_data) return tag_to_id
[ "def", "_maybe_init_tags", "(", "self", ",", "run_id", ",", "tag_to_metadata", ")", ":", "cursor", "=", "self", ".", "_db", ".", "cursor", "(", ")", "# TODO: for huge numbers of tags (e.g. 1000+), this is slower than just", "# querying for the known tag names explicitly; find a better tradeoff.", "cursor", ".", "execute", "(", "'SELECT tag_name, tag_id FROM Tags WHERE run_id = ?'", ",", "(", "run_id", ",", ")", ")", "tag_to_id", "=", "{", "row", "[", "0", "]", ":", "row", "[", "1", "]", "for", "row", "in", "cursor", ".", "fetchall", "(", ")", "if", "row", "[", "0", "]", "in", "tag_to_metadata", "}", "new_tag_data", "=", "[", "]", "for", "tag", ",", "metadata", "in", "six", ".", "iteritems", "(", "tag_to_metadata", ")", ":", "if", "tag", "not", "in", "tag_to_id", ":", "tag_id", "=", "self", ".", "_create_id", "(", ")", "tag_to_id", "[", "tag", "]", "=", "tag_id", "new_tag_data", ".", "append", "(", "(", "run_id", ",", "tag_id", ",", "tag", ",", "time", ".", "time", "(", ")", ",", "metadata", ".", "display_name", ",", "metadata", ".", "plugin_data", ".", "plugin_name", ",", "self", ".", "_make_blob", "(", "metadata", ".", "plugin_data", ".", "content", ")", ")", ")", "cursor", ".", "executemany", "(", "\"\"\"\n INSERT INTO Tags (\n run_id, tag_id, tag_name, inserted_time, display_name, plugin_name,\n plugin_data\n ) VALUES (?, ?, ?, ?, ?, ?, ?)\n \"\"\"", ",", "new_tag_data", ")", "return", "tag_to_id" ]
Returns a tag-to-ID map for the given tags, creating rows if needed. Args: run_id: the ID of the run to which these tags belong. tag_to_metadata: map of tag name to SummaryMetadata for the tag.
[ "Returns", "a", "tag", "-", "to", "-", "ID", "map", "for", "the", "given", "tags", "creating", "rows", "if", "needed", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/sqlite_writer.py#L143-L174
train
tensorflow/tensorboard
tensorboard/backend/event_processing/sqlite_writer.py
SqliteWriter.write_summaries
def write_summaries(self, tagged_data, experiment_name, run_name): """Transactionally writes the given tagged summary data to the DB. Args: tagged_data: map from tag to TagData instances. experiment_name: name of experiment. run_name: name of run. """ logger.debug('Writing summaries for %s tags', len(tagged_data)) # Connection used as context manager for auto commit/rollback on exit. # We still need an explicit BEGIN, because it doesn't do one on enter, # it waits until the first DML command - which is totally broken. # See: https://stackoverflow.com/a/44448465/1179226 with self._db: self._db.execute('BEGIN TRANSACTION') run_id = self._maybe_init_run(experiment_name, run_name) tag_to_metadata = { tag: tagdata.metadata for tag, tagdata in six.iteritems(tagged_data) } tag_to_id = self._maybe_init_tags(run_id, tag_to_metadata) tensor_values = [] for tag, tagdata in six.iteritems(tagged_data): tag_id = tag_to_id[tag] for step, wall_time, tensor_proto in tagdata.values: dtype = tensor_proto.dtype shape = ','.join(str(d.size) for d in tensor_proto.tensor_shape.dim) # Use tensor_proto.tensor_content if it's set, to skip relatively # expensive extraction into intermediate ndarray. data = self._make_blob( tensor_proto.tensor_content or tensor_util.make_ndarray(tensor_proto).tobytes()) tensor_values.append((tag_id, step, wall_time, dtype, shape, data)) self._db.executemany( """ INSERT OR REPLACE INTO Tensors ( series, step, computed_time, dtype, shape, data ) VALUES (?, ?, ?, ?, ?, ?) """, tensor_values)
python
def write_summaries(self, tagged_data, experiment_name, run_name): """Transactionally writes the given tagged summary data to the DB. Args: tagged_data: map from tag to TagData instances. experiment_name: name of experiment. run_name: name of run. """ logger.debug('Writing summaries for %s tags', len(tagged_data)) # Connection used as context manager for auto commit/rollback on exit. # We still need an explicit BEGIN, because it doesn't do one on enter, # it waits until the first DML command - which is totally broken. # See: https://stackoverflow.com/a/44448465/1179226 with self._db: self._db.execute('BEGIN TRANSACTION') run_id = self._maybe_init_run(experiment_name, run_name) tag_to_metadata = { tag: tagdata.metadata for tag, tagdata in six.iteritems(tagged_data) } tag_to_id = self._maybe_init_tags(run_id, tag_to_metadata) tensor_values = [] for tag, tagdata in six.iteritems(tagged_data): tag_id = tag_to_id[tag] for step, wall_time, tensor_proto in tagdata.values: dtype = tensor_proto.dtype shape = ','.join(str(d.size) for d in tensor_proto.tensor_shape.dim) # Use tensor_proto.tensor_content if it's set, to skip relatively # expensive extraction into intermediate ndarray. data = self._make_blob( tensor_proto.tensor_content or tensor_util.make_ndarray(tensor_proto).tobytes()) tensor_values.append((tag_id, step, wall_time, dtype, shape, data)) self._db.executemany( """ INSERT OR REPLACE INTO Tensors ( series, step, computed_time, dtype, shape, data ) VALUES (?, ?, ?, ?, ?, ?) """, tensor_values)
[ "def", "write_summaries", "(", "self", ",", "tagged_data", ",", "experiment_name", ",", "run_name", ")", ":", "logger", ".", "debug", "(", "'Writing summaries for %s tags'", ",", "len", "(", "tagged_data", ")", ")", "# Connection used as context manager for auto commit/rollback on exit.", "# We still need an explicit BEGIN, because it doesn't do one on enter,", "# it waits until the first DML command - which is totally broken.", "# See: https://stackoverflow.com/a/44448465/1179226", "with", "self", ".", "_db", ":", "self", ".", "_db", ".", "execute", "(", "'BEGIN TRANSACTION'", ")", "run_id", "=", "self", ".", "_maybe_init_run", "(", "experiment_name", ",", "run_name", ")", "tag_to_metadata", "=", "{", "tag", ":", "tagdata", ".", "metadata", "for", "tag", ",", "tagdata", "in", "six", ".", "iteritems", "(", "tagged_data", ")", "}", "tag_to_id", "=", "self", ".", "_maybe_init_tags", "(", "run_id", ",", "tag_to_metadata", ")", "tensor_values", "=", "[", "]", "for", "tag", ",", "tagdata", "in", "six", ".", "iteritems", "(", "tagged_data", ")", ":", "tag_id", "=", "tag_to_id", "[", "tag", "]", "for", "step", ",", "wall_time", ",", "tensor_proto", "in", "tagdata", ".", "values", ":", "dtype", "=", "tensor_proto", ".", "dtype", "shape", "=", "','", ".", "join", "(", "str", "(", "d", ".", "size", ")", "for", "d", "in", "tensor_proto", ".", "tensor_shape", ".", "dim", ")", "# Use tensor_proto.tensor_content if it's set, to skip relatively", "# expensive extraction into intermediate ndarray.", "data", "=", "self", ".", "_make_blob", "(", "tensor_proto", ".", "tensor_content", "or", "tensor_util", ".", "make_ndarray", "(", "tensor_proto", ")", ".", "tobytes", "(", ")", ")", "tensor_values", ".", "append", "(", "(", "tag_id", ",", "step", ",", "wall_time", ",", "dtype", ",", "shape", ",", "data", ")", ")", "self", ".", "_db", ".", "executemany", "(", "\"\"\"\n INSERT OR REPLACE INTO Tensors (\n series, step, computed_time, dtype, shape, data\n ) VALUES (?, ?, ?, ?, ?, ?)\n \"\"\"", ",", "tensor_values", ")" ]
Transactionally writes the given tagged summary data to the DB. Args: tagged_data: map from tag to TagData instances. experiment_name: name of experiment. run_name: name of run.
[ "Transactionally", "writes", "the", "given", "tagged", "summary", "data", "to", "the", "DB", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/sqlite_writer.py#L176-L214
train
tensorflow/tensorboard
tensorboard/plugins/image/images_demo.py
image_data
def image_data(verbose=False): """Get the raw encoded image data, downloading it if necessary.""" # This is a principled use of the `global` statement; don't lint me. global _IMAGE_DATA # pylint: disable=global-statement if _IMAGE_DATA is None: if verbose: logger.info("--- Downloading image.") with contextlib.closing(urllib.request.urlopen(IMAGE_URL)) as infile: _IMAGE_DATA = infile.read() return _IMAGE_DATA
python
def image_data(verbose=False): """Get the raw encoded image data, downloading it if necessary.""" # This is a principled use of the `global` statement; don't lint me. global _IMAGE_DATA # pylint: disable=global-statement if _IMAGE_DATA is None: if verbose: logger.info("--- Downloading image.") with contextlib.closing(urllib.request.urlopen(IMAGE_URL)) as infile: _IMAGE_DATA = infile.read() return _IMAGE_DATA
[ "def", "image_data", "(", "verbose", "=", "False", ")", ":", "# This is a principled use of the `global` statement; don't lint me.", "global", "_IMAGE_DATA", "# pylint: disable=global-statement", "if", "_IMAGE_DATA", "is", "None", ":", "if", "verbose", ":", "logger", ".", "info", "(", "\"--- Downloading image.\"", ")", "with", "contextlib", ".", "closing", "(", "urllib", ".", "request", ".", "urlopen", "(", "IMAGE_URL", ")", ")", "as", "infile", ":", "_IMAGE_DATA", "=", "infile", ".", "read", "(", ")", "return", "_IMAGE_DATA" ]
Get the raw encoded image data, downloading it if necessary.
[ "Get", "the", "raw", "encoded", "image", "data", "downloading", "it", "if", "necessary", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/image/images_demo.py#L56-L65
train
tensorflow/tensorboard
tensorboard/plugins/image/images_demo.py
convolve
def convolve(image, pixel_filter, channels=3, name=None): """Perform a 2D pixel convolution on the given image. Arguments: image: A 3D `float32` `Tensor` of shape `[height, width, channels]`, where `channels` is the third argument to this function and the first two dimensions are arbitrary. pixel_filter: A 2D `Tensor`, representing pixel weightings for the kernel. This will be used to create a 4D kernel---the extra two dimensions are for channels (see `tf.nn.conv2d` documentation), and the kernel will be constructed so that the channels are independent: each channel only observes the data from neighboring pixels of the same channel. channels: An integer representing the number of channels in the image (e.g., 3 for RGB). Returns: A 3D `float32` `Tensor` of the same shape as the input. """ with tf.name_scope(name, 'convolve'): tf.compat.v1.assert_type(image, tf.float32) channel_filter = tf.eye(channels) filter_ = (tf.expand_dims(tf.expand_dims(pixel_filter, -1), -1) * tf.expand_dims(tf.expand_dims(channel_filter, 0), 0)) result_batch = tf.nn.conv2d(tf.stack([image]), # batch filter=filter_, strides=[1, 1, 1, 1], padding='SAME') return result_batch[0]
python
def convolve(image, pixel_filter, channels=3, name=None): """Perform a 2D pixel convolution on the given image. Arguments: image: A 3D `float32` `Tensor` of shape `[height, width, channels]`, where `channels` is the third argument to this function and the first two dimensions are arbitrary. pixel_filter: A 2D `Tensor`, representing pixel weightings for the kernel. This will be used to create a 4D kernel---the extra two dimensions are for channels (see `tf.nn.conv2d` documentation), and the kernel will be constructed so that the channels are independent: each channel only observes the data from neighboring pixels of the same channel. channels: An integer representing the number of channels in the image (e.g., 3 for RGB). Returns: A 3D `float32` `Tensor` of the same shape as the input. """ with tf.name_scope(name, 'convolve'): tf.compat.v1.assert_type(image, tf.float32) channel_filter = tf.eye(channels) filter_ = (tf.expand_dims(tf.expand_dims(pixel_filter, -1), -1) * tf.expand_dims(tf.expand_dims(channel_filter, 0), 0)) result_batch = tf.nn.conv2d(tf.stack([image]), # batch filter=filter_, strides=[1, 1, 1, 1], padding='SAME') return result_batch[0]
[ "def", "convolve", "(", "image", ",", "pixel_filter", ",", "channels", "=", "3", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "name_scope", "(", "name", ",", "'convolve'", ")", ":", "tf", ".", "compat", ".", "v1", ".", "assert_type", "(", "image", ",", "tf", ".", "float32", ")", "channel_filter", "=", "tf", ".", "eye", "(", "channels", ")", "filter_", "=", "(", "tf", ".", "expand_dims", "(", "tf", ".", "expand_dims", "(", "pixel_filter", ",", "-", "1", ")", ",", "-", "1", ")", "*", "tf", ".", "expand_dims", "(", "tf", ".", "expand_dims", "(", "channel_filter", ",", "0", ")", ",", "0", ")", ")", "result_batch", "=", "tf", ".", "nn", ".", "conv2d", "(", "tf", ".", "stack", "(", "[", "image", "]", ")", ",", "# batch", "filter", "=", "filter_", ",", "strides", "=", "[", "1", ",", "1", ",", "1", ",", "1", "]", ",", "padding", "=", "'SAME'", ")", "return", "result_batch", "[", "0", "]" ]
Perform a 2D pixel convolution on the given image. Arguments: image: A 3D `float32` `Tensor` of shape `[height, width, channels]`, where `channels` is the third argument to this function and the first two dimensions are arbitrary. pixel_filter: A 2D `Tensor`, representing pixel weightings for the kernel. This will be used to create a 4D kernel---the extra two dimensions are for channels (see `tf.nn.conv2d` documentation), and the kernel will be constructed so that the channels are independent: each channel only observes the data from neighboring pixels of the same channel. channels: An integer representing the number of channels in the image (e.g., 3 for RGB). Returns: A 3D `float32` `Tensor` of the same shape as the input.
[ "Perform", "a", "2D", "pixel", "convolution", "on", "the", "given", "image", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/image/images_demo.py#L68-L96
train
tensorflow/tensorboard
tensorboard/plugins/image/images_demo.py
get_image
def get_image(verbose=False): """Get the image as a TensorFlow variable. Returns: A `tf.Variable`, which must be initialized prior to use: invoke `sess.run(result.initializer)`.""" base_data = tf.constant(image_data(verbose=verbose)) base_image = tf.image.decode_image(base_data, channels=3) base_image.set_shape((IMAGE_HEIGHT, IMAGE_WIDTH, 3)) parsed_image = tf.Variable(base_image, name='image', dtype=tf.uint8) return parsed_image
python
def get_image(verbose=False): """Get the image as a TensorFlow variable. Returns: A `tf.Variable`, which must be initialized prior to use: invoke `sess.run(result.initializer)`.""" base_data = tf.constant(image_data(verbose=verbose)) base_image = tf.image.decode_image(base_data, channels=3) base_image.set_shape((IMAGE_HEIGHT, IMAGE_WIDTH, 3)) parsed_image = tf.Variable(base_image, name='image', dtype=tf.uint8) return parsed_image
[ "def", "get_image", "(", "verbose", "=", "False", ")", ":", "base_data", "=", "tf", ".", "constant", "(", "image_data", "(", "verbose", "=", "verbose", ")", ")", "base_image", "=", "tf", ".", "image", ".", "decode_image", "(", "base_data", ",", "channels", "=", "3", ")", "base_image", ".", "set_shape", "(", "(", "IMAGE_HEIGHT", ",", "IMAGE_WIDTH", ",", "3", ")", ")", "parsed_image", "=", "tf", ".", "Variable", "(", "base_image", ",", "name", "=", "'image'", ",", "dtype", "=", "tf", ".", "uint8", ")", "return", "parsed_image" ]
Get the image as a TensorFlow variable. Returns: A `tf.Variable`, which must be initialized prior to use: invoke `sess.run(result.initializer)`.
[ "Get", "the", "image", "as", "a", "TensorFlow", "variable", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/image/images_demo.py#L99-L109
train
tensorflow/tensorboard
tensorboard/plugins/image/images_demo.py
run_box_to_gaussian
def run_box_to_gaussian(logdir, verbose=False): """Run a box-blur-to-Gaussian-blur demonstration. See the summary description for more details. Arguments: logdir: Directory into which to write event logs. verbose: Boolean; whether to log any output. """ if verbose: logger.info('--- Starting run: box_to_gaussian') tf.compat.v1.reset_default_graph() tf.compat.v1.set_random_seed(0) image = get_image(verbose=verbose) blur_radius = tf.compat.v1.placeholder(shape=(), dtype=tf.int32) with tf.name_scope('filter'): blur_side_length = blur_radius * 2 + 1 pixel_filter = tf.ones((blur_side_length, blur_side_length)) pixel_filter = (pixel_filter / tf.cast(tf.size(input=pixel_filter), tf.float32)) # normalize iterations = 4 images = [tf.cast(image, tf.float32) / 255.0] for _ in xrange(iterations): images.append(convolve(images[-1], pixel_filter)) with tf.name_scope('convert_to_uint8'): images = tf.stack( [tf.cast(255 * tf.clip_by_value(image_, 0.0, 1.0), tf.uint8) for image_ in images]) summ = image_summary.op( 'box_to_gaussian', images, max_outputs=iterations, display_name='Gaussian blur as a limit process of box blurs', description=('Demonstration of forming a Gaussian blur by ' 'composing box blurs, each of which can be expressed ' 'as a 2D convolution.\n\n' 'A Gaussian blur is formed by convolving a Gaussian ' 'kernel over an image. But a Gaussian kernel is ' 'itself the limit of convolving a constant kernel ' 'with itself many times. Thus, while applying ' 'a box-filter convolution just once produces ' 'results that are noticeably different from those ' 'of a Gaussian blur, repeating the same convolution ' 'just a few times causes the result to rapidly ' 'converge to an actual Gaussian blur.\n\n' 'Here, the step value controls the blur radius, ' 'and the image sample controls the number of times ' 'that the convolution is applied (plus one). ' 'So, when *sample*=1, the original image is shown; ' '*sample*=2 shows a box blur; and a hypothetical ' '*sample*=&infin; would show a true Gaussian blur.\n\n' 'This is one ingredient in a recipe to compute very ' 'fast Gaussian blurs. The other pieces require ' 'special treatment for the box blurs themselves ' '(decomposition to dual one-dimensional box blurs, ' 'each of which is computed with a sliding window); ' 'we don&rsquo;t perform those optimizations here.\n\n' '[Here are some slides describing the full process.]' '(%s)\n\n' '%s' % ('http://elynxsdk.free.fr/ext-docs/Blur/Fast_box_blur.pdf', IMAGE_CREDIT))) with tf.compat.v1.Session() as sess: sess.run(image.initializer) writer = tf.summary.FileWriter(os.path.join(logdir, 'box_to_gaussian')) writer.add_graph(sess.graph) for step in xrange(8): if verbose: logger.info('--- box_to_gaussian: step: %s' % step) feed_dict = {blur_radius: step} run_options = tf.compat.v1.RunOptions(trace_level=tf.compat.v1.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() s = sess.run(summ, feed_dict=feed_dict, options=run_options, run_metadata=run_metadata) writer.add_summary(s, global_step=step) writer.add_run_metadata(run_metadata, 'step_%04d' % step) writer.close()
python
def run_box_to_gaussian(logdir, verbose=False): """Run a box-blur-to-Gaussian-blur demonstration. See the summary description for more details. Arguments: logdir: Directory into which to write event logs. verbose: Boolean; whether to log any output. """ if verbose: logger.info('--- Starting run: box_to_gaussian') tf.compat.v1.reset_default_graph() tf.compat.v1.set_random_seed(0) image = get_image(verbose=verbose) blur_radius = tf.compat.v1.placeholder(shape=(), dtype=tf.int32) with tf.name_scope('filter'): blur_side_length = blur_radius * 2 + 1 pixel_filter = tf.ones((blur_side_length, blur_side_length)) pixel_filter = (pixel_filter / tf.cast(tf.size(input=pixel_filter), tf.float32)) # normalize iterations = 4 images = [tf.cast(image, tf.float32) / 255.0] for _ in xrange(iterations): images.append(convolve(images[-1], pixel_filter)) with tf.name_scope('convert_to_uint8'): images = tf.stack( [tf.cast(255 * tf.clip_by_value(image_, 0.0, 1.0), tf.uint8) for image_ in images]) summ = image_summary.op( 'box_to_gaussian', images, max_outputs=iterations, display_name='Gaussian blur as a limit process of box blurs', description=('Demonstration of forming a Gaussian blur by ' 'composing box blurs, each of which can be expressed ' 'as a 2D convolution.\n\n' 'A Gaussian blur is formed by convolving a Gaussian ' 'kernel over an image. But a Gaussian kernel is ' 'itself the limit of convolving a constant kernel ' 'with itself many times. Thus, while applying ' 'a box-filter convolution just once produces ' 'results that are noticeably different from those ' 'of a Gaussian blur, repeating the same convolution ' 'just a few times causes the result to rapidly ' 'converge to an actual Gaussian blur.\n\n' 'Here, the step value controls the blur radius, ' 'and the image sample controls the number of times ' 'that the convolution is applied (plus one). ' 'So, when *sample*=1, the original image is shown; ' '*sample*=2 shows a box blur; and a hypothetical ' '*sample*=&infin; would show a true Gaussian blur.\n\n' 'This is one ingredient in a recipe to compute very ' 'fast Gaussian blurs. The other pieces require ' 'special treatment for the box blurs themselves ' '(decomposition to dual one-dimensional box blurs, ' 'each of which is computed with a sliding window); ' 'we don&rsquo;t perform those optimizations here.\n\n' '[Here are some slides describing the full process.]' '(%s)\n\n' '%s' % ('http://elynxsdk.free.fr/ext-docs/Blur/Fast_box_blur.pdf', IMAGE_CREDIT))) with tf.compat.v1.Session() as sess: sess.run(image.initializer) writer = tf.summary.FileWriter(os.path.join(logdir, 'box_to_gaussian')) writer.add_graph(sess.graph) for step in xrange(8): if verbose: logger.info('--- box_to_gaussian: step: %s' % step) feed_dict = {blur_radius: step} run_options = tf.compat.v1.RunOptions(trace_level=tf.compat.v1.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() s = sess.run(summ, feed_dict=feed_dict, options=run_options, run_metadata=run_metadata) writer.add_summary(s, global_step=step) writer.add_run_metadata(run_metadata, 'step_%04d' % step) writer.close()
[ "def", "run_box_to_gaussian", "(", "logdir", ",", "verbose", "=", "False", ")", ":", "if", "verbose", ":", "logger", ".", "info", "(", "'--- Starting run: box_to_gaussian'", ")", "tf", ".", "compat", ".", "v1", ".", "reset_default_graph", "(", ")", "tf", ".", "compat", ".", "v1", ".", "set_random_seed", "(", "0", ")", "image", "=", "get_image", "(", "verbose", "=", "verbose", ")", "blur_radius", "=", "tf", ".", "compat", ".", "v1", ".", "placeholder", "(", "shape", "=", "(", ")", ",", "dtype", "=", "tf", ".", "int32", ")", "with", "tf", ".", "name_scope", "(", "'filter'", ")", ":", "blur_side_length", "=", "blur_radius", "*", "2", "+", "1", "pixel_filter", "=", "tf", ".", "ones", "(", "(", "blur_side_length", ",", "blur_side_length", ")", ")", "pixel_filter", "=", "(", "pixel_filter", "/", "tf", ".", "cast", "(", "tf", ".", "size", "(", "input", "=", "pixel_filter", ")", ",", "tf", ".", "float32", ")", ")", "# normalize", "iterations", "=", "4", "images", "=", "[", "tf", ".", "cast", "(", "image", ",", "tf", ".", "float32", ")", "/", "255.0", "]", "for", "_", "in", "xrange", "(", "iterations", ")", ":", "images", ".", "append", "(", "convolve", "(", "images", "[", "-", "1", "]", ",", "pixel_filter", ")", ")", "with", "tf", ".", "name_scope", "(", "'convert_to_uint8'", ")", ":", "images", "=", "tf", ".", "stack", "(", "[", "tf", ".", "cast", "(", "255", "*", "tf", ".", "clip_by_value", "(", "image_", ",", "0.0", ",", "1.0", ")", ",", "tf", ".", "uint8", ")", "for", "image_", "in", "images", "]", ")", "summ", "=", "image_summary", ".", "op", "(", "'box_to_gaussian'", ",", "images", ",", "max_outputs", "=", "iterations", ",", "display_name", "=", "'Gaussian blur as a limit process of box blurs'", ",", "description", "=", "(", "'Demonstration of forming a Gaussian blur by '", "'composing box blurs, each of which can be expressed '", "'as a 2D convolution.\\n\\n'", "'A Gaussian blur is formed by convolving a Gaussian '", "'kernel over an image. But a Gaussian kernel is '", "'itself the limit of convolving a constant kernel '", "'with itself many times. Thus, while applying '", "'a box-filter convolution just once produces '", "'results that are noticeably different from those '", "'of a Gaussian blur, repeating the same convolution '", "'just a few times causes the result to rapidly '", "'converge to an actual Gaussian blur.\\n\\n'", "'Here, the step value controls the blur radius, '", "'and the image sample controls the number of times '", "'that the convolution is applied (plus one). '", "'So, when *sample*=1, the original image is shown; '", "'*sample*=2 shows a box blur; and a hypothetical '", "'*sample*=&infin; would show a true Gaussian blur.\\n\\n'", "'This is one ingredient in a recipe to compute very '", "'fast Gaussian blurs. The other pieces require '", "'special treatment for the box blurs themselves '", "'(decomposition to dual one-dimensional box blurs, '", "'each of which is computed with a sliding window); '", "'we don&rsquo;t perform those optimizations here.\\n\\n'", "'[Here are some slides describing the full process.]'", "'(%s)\\n\\n'", "'%s'", "%", "(", "'http://elynxsdk.free.fr/ext-docs/Blur/Fast_box_blur.pdf'", ",", "IMAGE_CREDIT", ")", ")", ")", "with", "tf", ".", "compat", ".", "v1", ".", "Session", "(", ")", "as", "sess", ":", "sess", ".", "run", "(", "image", ".", "initializer", ")", "writer", "=", "tf", ".", "summary", ".", "FileWriter", "(", "os", ".", "path", ".", "join", "(", "logdir", ",", "'box_to_gaussian'", ")", ")", "writer", ".", "add_graph", "(", "sess", ".", "graph", ")", "for", "step", "in", "xrange", "(", "8", ")", ":", "if", "verbose", ":", "logger", ".", "info", "(", "'--- box_to_gaussian: step: %s'", "%", "step", ")", "feed_dict", "=", "{", "blur_radius", ":", "step", "}", "run_options", "=", "tf", ".", "compat", ".", "v1", ".", "RunOptions", "(", "trace_level", "=", "tf", ".", "compat", ".", "v1", ".", "RunOptions", ".", "FULL_TRACE", ")", "run_metadata", "=", "config_pb2", ".", "RunMetadata", "(", ")", "s", "=", "sess", ".", "run", "(", "summ", ",", "feed_dict", "=", "feed_dict", ",", "options", "=", "run_options", ",", "run_metadata", "=", "run_metadata", ")", "writer", ".", "add_summary", "(", "s", ",", "global_step", "=", "step", ")", "writer", ".", "add_run_metadata", "(", "run_metadata", ",", "'step_%04d'", "%", "step", ")", "writer", ".", "close", "(", ")" ]
Run a box-blur-to-Gaussian-blur demonstration. See the summary description for more details. Arguments: logdir: Directory into which to write event logs. verbose: Boolean; whether to log any output.
[ "Run", "a", "box", "-", "blur", "-", "to", "-", "Gaussian", "-", "blur", "demonstration", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/image/images_demo.py#L112-L191
train
tensorflow/tensorboard
tensorboard/plugins/image/images_demo.py
run_sobel
def run_sobel(logdir, verbose=False): """Run a Sobel edge detection demonstration. See the summary description for more details. Arguments: logdir: Directory into which to write event logs. verbose: Boolean; whether to log any output. """ if verbose: logger.info('--- Starting run: sobel') tf.compat.v1.reset_default_graph() tf.compat.v1.set_random_seed(0) image = get_image(verbose=verbose) kernel_radius = tf.compat.v1.placeholder(shape=(), dtype=tf.int32) with tf.name_scope('horizontal_kernel'): kernel_side_length = kernel_radius * 2 + 1 # Drop off influence for pixels further away from the center. weighting_kernel = ( 1.0 - tf.abs(tf.linspace(-1.0, 1.0, num=kernel_side_length))) differentiation_kernel = tf.linspace(-1.0, 1.0, num=kernel_side_length) horizontal_kernel = tf.matmul(tf.expand_dims(weighting_kernel, 1), tf.expand_dims(differentiation_kernel, 0)) with tf.name_scope('vertical_kernel'): vertical_kernel = tf.transpose(a=horizontal_kernel) float_image = tf.cast(image, tf.float32) dx = convolve(float_image, horizontal_kernel, name='convolve_dx') dy = convolve(float_image, vertical_kernel, name='convolve_dy') gradient_magnitude = tf.norm(tensor=[dx, dy], axis=0, name='gradient_magnitude') with tf.name_scope('normalized_gradient'): normalized_gradient = gradient_magnitude / tf.reduce_max(input_tensor=gradient_magnitude) with tf.name_scope('output_image'): output_image = tf.cast(255 * normalized_gradient, tf.uint8) summ = image_summary.op( 'sobel', tf.stack([output_image]), display_name='Sobel edge detection', description=(u'Demonstration of [Sobel edge detection]. The step ' 'parameter adjusts the radius of the kernel. ' 'The kernel can be of arbitrary size, and considers ' u'nearby pixels with \u2113\u2082-linear falloff.\n\n' # (that says ``$\ell_2$-linear falloff'') 'Edge detection is done on a per-channel basis, so ' 'you can observe which edges are &ldquo;mostly red ' 'edges,&rdquo; for instance.\n\n' 'For practical edge detection, a small kernel ' '(usually not more than more than *r*=2) is best.\n\n' '[Sobel edge detection]: %s\n\n' "%s" % ('https://en.wikipedia.org/wiki/Sobel_operator', IMAGE_CREDIT))) with tf.compat.v1.Session() as sess: sess.run(image.initializer) writer = tf.summary.FileWriter(os.path.join(logdir, 'sobel')) writer.add_graph(sess.graph) for step in xrange(8): if verbose: logger.info("--- sobel: step: %s" % step) feed_dict = {kernel_radius: step} run_options = tf.compat.v1.RunOptions(trace_level=tf.compat.v1.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() s = sess.run(summ, feed_dict=feed_dict, options=run_options, run_metadata=run_metadata) writer.add_summary(s, global_step=step) writer.add_run_metadata(run_metadata, 'step_%04d' % step) writer.close()
python
def run_sobel(logdir, verbose=False): """Run a Sobel edge detection demonstration. See the summary description for more details. Arguments: logdir: Directory into which to write event logs. verbose: Boolean; whether to log any output. """ if verbose: logger.info('--- Starting run: sobel') tf.compat.v1.reset_default_graph() tf.compat.v1.set_random_seed(0) image = get_image(verbose=verbose) kernel_radius = tf.compat.v1.placeholder(shape=(), dtype=tf.int32) with tf.name_scope('horizontal_kernel'): kernel_side_length = kernel_radius * 2 + 1 # Drop off influence for pixels further away from the center. weighting_kernel = ( 1.0 - tf.abs(tf.linspace(-1.0, 1.0, num=kernel_side_length))) differentiation_kernel = tf.linspace(-1.0, 1.0, num=kernel_side_length) horizontal_kernel = tf.matmul(tf.expand_dims(weighting_kernel, 1), tf.expand_dims(differentiation_kernel, 0)) with tf.name_scope('vertical_kernel'): vertical_kernel = tf.transpose(a=horizontal_kernel) float_image = tf.cast(image, tf.float32) dx = convolve(float_image, horizontal_kernel, name='convolve_dx') dy = convolve(float_image, vertical_kernel, name='convolve_dy') gradient_magnitude = tf.norm(tensor=[dx, dy], axis=0, name='gradient_magnitude') with tf.name_scope('normalized_gradient'): normalized_gradient = gradient_magnitude / tf.reduce_max(input_tensor=gradient_magnitude) with tf.name_scope('output_image'): output_image = tf.cast(255 * normalized_gradient, tf.uint8) summ = image_summary.op( 'sobel', tf.stack([output_image]), display_name='Sobel edge detection', description=(u'Demonstration of [Sobel edge detection]. The step ' 'parameter adjusts the radius of the kernel. ' 'The kernel can be of arbitrary size, and considers ' u'nearby pixels with \u2113\u2082-linear falloff.\n\n' # (that says ``$\ell_2$-linear falloff'') 'Edge detection is done on a per-channel basis, so ' 'you can observe which edges are &ldquo;mostly red ' 'edges,&rdquo; for instance.\n\n' 'For practical edge detection, a small kernel ' '(usually not more than more than *r*=2) is best.\n\n' '[Sobel edge detection]: %s\n\n' "%s" % ('https://en.wikipedia.org/wiki/Sobel_operator', IMAGE_CREDIT))) with tf.compat.v1.Session() as sess: sess.run(image.initializer) writer = tf.summary.FileWriter(os.path.join(logdir, 'sobel')) writer.add_graph(sess.graph) for step in xrange(8): if verbose: logger.info("--- sobel: step: %s" % step) feed_dict = {kernel_radius: step} run_options = tf.compat.v1.RunOptions(trace_level=tf.compat.v1.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() s = sess.run(summ, feed_dict=feed_dict, options=run_options, run_metadata=run_metadata) writer.add_summary(s, global_step=step) writer.add_run_metadata(run_metadata, 'step_%04d' % step) writer.close()
[ "def", "run_sobel", "(", "logdir", ",", "verbose", "=", "False", ")", ":", "if", "verbose", ":", "logger", ".", "info", "(", "'--- Starting run: sobel'", ")", "tf", ".", "compat", ".", "v1", ".", "reset_default_graph", "(", ")", "tf", ".", "compat", ".", "v1", ".", "set_random_seed", "(", "0", ")", "image", "=", "get_image", "(", "verbose", "=", "verbose", ")", "kernel_radius", "=", "tf", ".", "compat", ".", "v1", ".", "placeholder", "(", "shape", "=", "(", ")", ",", "dtype", "=", "tf", ".", "int32", ")", "with", "tf", ".", "name_scope", "(", "'horizontal_kernel'", ")", ":", "kernel_side_length", "=", "kernel_radius", "*", "2", "+", "1", "# Drop off influence for pixels further away from the center.", "weighting_kernel", "=", "(", "1.0", "-", "tf", ".", "abs", "(", "tf", ".", "linspace", "(", "-", "1.0", ",", "1.0", ",", "num", "=", "kernel_side_length", ")", ")", ")", "differentiation_kernel", "=", "tf", ".", "linspace", "(", "-", "1.0", ",", "1.0", ",", "num", "=", "kernel_side_length", ")", "horizontal_kernel", "=", "tf", ".", "matmul", "(", "tf", ".", "expand_dims", "(", "weighting_kernel", ",", "1", ")", ",", "tf", ".", "expand_dims", "(", "differentiation_kernel", ",", "0", ")", ")", "with", "tf", ".", "name_scope", "(", "'vertical_kernel'", ")", ":", "vertical_kernel", "=", "tf", ".", "transpose", "(", "a", "=", "horizontal_kernel", ")", "float_image", "=", "tf", ".", "cast", "(", "image", ",", "tf", ".", "float32", ")", "dx", "=", "convolve", "(", "float_image", ",", "horizontal_kernel", ",", "name", "=", "'convolve_dx'", ")", "dy", "=", "convolve", "(", "float_image", ",", "vertical_kernel", ",", "name", "=", "'convolve_dy'", ")", "gradient_magnitude", "=", "tf", ".", "norm", "(", "tensor", "=", "[", "dx", ",", "dy", "]", ",", "axis", "=", "0", ",", "name", "=", "'gradient_magnitude'", ")", "with", "tf", ".", "name_scope", "(", "'normalized_gradient'", ")", ":", "normalized_gradient", "=", "gradient_magnitude", "/", "tf", ".", "reduce_max", "(", "input_tensor", "=", "gradient_magnitude", ")", "with", "tf", ".", "name_scope", "(", "'output_image'", ")", ":", "output_image", "=", "tf", ".", "cast", "(", "255", "*", "normalized_gradient", ",", "tf", ".", "uint8", ")", "summ", "=", "image_summary", ".", "op", "(", "'sobel'", ",", "tf", ".", "stack", "(", "[", "output_image", "]", ")", ",", "display_name", "=", "'Sobel edge detection'", ",", "description", "=", "(", "u'Demonstration of [Sobel edge detection]. The step '", "'parameter adjusts the radius of the kernel. '", "'The kernel can be of arbitrary size, and considers '", "u'nearby pixels with \\u2113\\u2082-linear falloff.\\n\\n'", "# (that says ``$\\ell_2$-linear falloff'')", "'Edge detection is done on a per-channel basis, so '", "'you can observe which edges are &ldquo;mostly red '", "'edges,&rdquo; for instance.\\n\\n'", "'For practical edge detection, a small kernel '", "'(usually not more than more than *r*=2) is best.\\n\\n'", "'[Sobel edge detection]: %s\\n\\n'", "\"%s\"", "%", "(", "'https://en.wikipedia.org/wiki/Sobel_operator'", ",", "IMAGE_CREDIT", ")", ")", ")", "with", "tf", ".", "compat", ".", "v1", ".", "Session", "(", ")", "as", "sess", ":", "sess", ".", "run", "(", "image", ".", "initializer", ")", "writer", "=", "tf", ".", "summary", ".", "FileWriter", "(", "os", ".", "path", ".", "join", "(", "logdir", ",", "'sobel'", ")", ")", "writer", ".", "add_graph", "(", "sess", ".", "graph", ")", "for", "step", "in", "xrange", "(", "8", ")", ":", "if", "verbose", ":", "logger", ".", "info", "(", "\"--- sobel: step: %s\"", "%", "step", ")", "feed_dict", "=", "{", "kernel_radius", ":", "step", "}", "run_options", "=", "tf", ".", "compat", ".", "v1", ".", "RunOptions", "(", "trace_level", "=", "tf", ".", "compat", ".", "v1", ".", "RunOptions", ".", "FULL_TRACE", ")", "run_metadata", "=", "config_pb2", ".", "RunMetadata", "(", ")", "s", "=", "sess", ".", "run", "(", "summ", ",", "feed_dict", "=", "feed_dict", ",", "options", "=", "run_options", ",", "run_metadata", "=", "run_metadata", ")", "writer", ".", "add_summary", "(", "s", ",", "global_step", "=", "step", ")", "writer", ".", "add_run_metadata", "(", "run_metadata", ",", "'step_%04d'", "%", "step", ")", "writer", ".", "close", "(", ")" ]
Run a Sobel edge detection demonstration. See the summary description for more details. Arguments: logdir: Directory into which to write event logs. verbose: Boolean; whether to log any output.
[ "Run", "a", "Sobel", "edge", "detection", "demonstration", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/image/images_demo.py#L194-L265
train
tensorflow/tensorboard
tensorboard/plugins/image/images_demo.py
run_all
def run_all(logdir, verbose=False): """Run simulations on a reasonable set of parameters. Arguments: logdir: the directory into which to store all the runs' data verbose: if true, print out each run's name as it begins """ run_box_to_gaussian(logdir, verbose=verbose) run_sobel(logdir, verbose=verbose)
python
def run_all(logdir, verbose=False): """Run simulations on a reasonable set of parameters. Arguments: logdir: the directory into which to store all the runs' data verbose: if true, print out each run's name as it begins """ run_box_to_gaussian(logdir, verbose=verbose) run_sobel(logdir, verbose=verbose)
[ "def", "run_all", "(", "logdir", ",", "verbose", "=", "False", ")", ":", "run_box_to_gaussian", "(", "logdir", ",", "verbose", "=", "verbose", ")", "run_sobel", "(", "logdir", ",", "verbose", "=", "verbose", ")" ]
Run simulations on a reasonable set of parameters. Arguments: logdir: the directory into which to store all the runs' data verbose: if true, print out each run's name as it begins
[ "Run", "simulations", "on", "a", "reasonable", "set", "of", "parameters", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/image/images_demo.py#L268-L276
train
tensorflow/tensorboard
tensorboard/plugins/interactive_inference/utils/inference_utils.py
proto_value_for_feature
def proto_value_for_feature(example, feature_name): """Get the value of a feature from Example regardless of feature type.""" feature = get_example_features(example)[feature_name] if feature is None: raise ValueError('Feature {} is not on example proto.'.format(feature_name)) feature_type = feature.WhichOneof('kind') if feature_type is None: raise ValueError('Feature {} on example proto has no declared type.'.format( feature_name)) return getattr(feature, feature_type).value
python
def proto_value_for_feature(example, feature_name): """Get the value of a feature from Example regardless of feature type.""" feature = get_example_features(example)[feature_name] if feature is None: raise ValueError('Feature {} is not on example proto.'.format(feature_name)) feature_type = feature.WhichOneof('kind') if feature_type is None: raise ValueError('Feature {} on example proto has no declared type.'.format( feature_name)) return getattr(feature, feature_type).value
[ "def", "proto_value_for_feature", "(", "example", ",", "feature_name", ")", ":", "feature", "=", "get_example_features", "(", "example", ")", "[", "feature_name", "]", "if", "feature", "is", "None", ":", "raise", "ValueError", "(", "'Feature {} is not on example proto.'", ".", "format", "(", "feature_name", ")", ")", "feature_type", "=", "feature", ".", "WhichOneof", "(", "'kind'", ")", "if", "feature_type", "is", "None", ":", "raise", "ValueError", "(", "'Feature {} on example proto has no declared type.'", ".", "format", "(", "feature_name", ")", ")", "return", "getattr", "(", "feature", ",", "feature_type", ")", ".", "value" ]
Get the value of a feature from Example regardless of feature type.
[ "Get", "the", "value", "of", "a", "feature", "from", "Example", "regardless", "of", "feature", "type", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/interactive_inference/utils/inference_utils.py#L225-L234
train
tensorflow/tensorboard
tensorboard/plugins/interactive_inference/utils/inference_utils.py
parse_original_feature_from_example
def parse_original_feature_from_example(example, feature_name): """Returns an `OriginalFeatureList` for the specified feature_name. Args: example: An example. feature_name: A string feature name. Returns: A filled in `OriginalFeatureList` object representing the feature. """ feature = get_example_features(example)[feature_name] feature_type = feature.WhichOneof('kind') original_value = proto_value_for_feature(example, feature_name) return OriginalFeatureList(feature_name, original_value, feature_type)
python
def parse_original_feature_from_example(example, feature_name): """Returns an `OriginalFeatureList` for the specified feature_name. Args: example: An example. feature_name: A string feature name. Returns: A filled in `OriginalFeatureList` object representing the feature. """ feature = get_example_features(example)[feature_name] feature_type = feature.WhichOneof('kind') original_value = proto_value_for_feature(example, feature_name) return OriginalFeatureList(feature_name, original_value, feature_type)
[ "def", "parse_original_feature_from_example", "(", "example", ",", "feature_name", ")", ":", "feature", "=", "get_example_features", "(", "example", ")", "[", "feature_name", "]", "feature_type", "=", "feature", ".", "WhichOneof", "(", "'kind'", ")", "original_value", "=", "proto_value_for_feature", "(", "example", ",", "feature_name", ")", "return", "OriginalFeatureList", "(", "feature_name", ",", "original_value", ",", "feature_type", ")" ]
Returns an `OriginalFeatureList` for the specified feature_name. Args: example: An example. feature_name: A string feature name. Returns: A filled in `OriginalFeatureList` object representing the feature.
[ "Returns", "an", "OriginalFeatureList", "for", "the", "specified", "feature_name", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/interactive_inference/utils/inference_utils.py#L237-L251
train
tensorflow/tensorboard
tensorboard/plugins/interactive_inference/utils/inference_utils.py
wrap_inference_results
def wrap_inference_results(inference_result_proto): """Returns packaged inference results from the provided proto. Args: inference_result_proto: The classification or regression response proto. Returns: An InferenceResult proto with the result from the response. """ inference_proto = inference_pb2.InferenceResult() if isinstance(inference_result_proto, classification_pb2.ClassificationResponse): inference_proto.classification_result.CopyFrom( inference_result_proto.result) elif isinstance(inference_result_proto, regression_pb2.RegressionResponse): inference_proto.regression_result.CopyFrom(inference_result_proto.result) return inference_proto
python
def wrap_inference_results(inference_result_proto): """Returns packaged inference results from the provided proto. Args: inference_result_proto: The classification or regression response proto. Returns: An InferenceResult proto with the result from the response. """ inference_proto = inference_pb2.InferenceResult() if isinstance(inference_result_proto, classification_pb2.ClassificationResponse): inference_proto.classification_result.CopyFrom( inference_result_proto.result) elif isinstance(inference_result_proto, regression_pb2.RegressionResponse): inference_proto.regression_result.CopyFrom(inference_result_proto.result) return inference_proto
[ "def", "wrap_inference_results", "(", "inference_result_proto", ")", ":", "inference_proto", "=", "inference_pb2", ".", "InferenceResult", "(", ")", "if", "isinstance", "(", "inference_result_proto", ",", "classification_pb2", ".", "ClassificationResponse", ")", ":", "inference_proto", ".", "classification_result", ".", "CopyFrom", "(", "inference_result_proto", ".", "result", ")", "elif", "isinstance", "(", "inference_result_proto", ",", "regression_pb2", ".", "RegressionResponse", ")", ":", "inference_proto", ".", "regression_result", ".", "CopyFrom", "(", "inference_result_proto", ".", "result", ")", "return", "inference_proto" ]
Returns packaged inference results from the provided proto. Args: inference_result_proto: The classification or regression response proto. Returns: An InferenceResult proto with the result from the response.
[ "Returns", "packaged", "inference", "results", "from", "the", "provided", "proto", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/interactive_inference/utils/inference_utils.py#L254-L270
train
tensorflow/tensorboard
tensorboard/plugins/interactive_inference/utils/inference_utils.py
get_numeric_feature_names
def get_numeric_feature_names(example): """Returns a list of feature names for float and int64 type features. Args: example: An example. Returns: A list of strings of the names of numeric features. """ numeric_features = ('float_list', 'int64_list') features = get_example_features(example) return sorted([ feature_name for feature_name in features if features[feature_name].WhichOneof('kind') in numeric_features ])
python
def get_numeric_feature_names(example): """Returns a list of feature names for float and int64 type features. Args: example: An example. Returns: A list of strings of the names of numeric features. """ numeric_features = ('float_list', 'int64_list') features = get_example_features(example) return sorted([ feature_name for feature_name in features if features[feature_name].WhichOneof('kind') in numeric_features ])
[ "def", "get_numeric_feature_names", "(", "example", ")", ":", "numeric_features", "=", "(", "'float_list'", ",", "'int64_list'", ")", "features", "=", "get_example_features", "(", "example", ")", "return", "sorted", "(", "[", "feature_name", "for", "feature_name", "in", "features", "if", "features", "[", "feature_name", "]", ".", "WhichOneof", "(", "'kind'", ")", "in", "numeric_features", "]", ")" ]
Returns a list of feature names for float and int64 type features. Args: example: An example. Returns: A list of strings of the names of numeric features.
[ "Returns", "a", "list", "of", "feature", "names", "for", "float", "and", "int64", "type", "features", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/interactive_inference/utils/inference_utils.py#L273-L287
train
tensorflow/tensorboard
tensorboard/plugins/interactive_inference/utils/inference_utils.py
get_categorical_feature_names
def get_categorical_feature_names(example): """Returns a list of feature names for byte type features. Args: example: An example. Returns: A list of categorical feature names (e.g. ['education', 'marital_status'] ) """ features = get_example_features(example) return sorted([ feature_name for feature_name in features if features[feature_name].WhichOneof('kind') == 'bytes_list' ])
python
def get_categorical_feature_names(example): """Returns a list of feature names for byte type features. Args: example: An example. Returns: A list of categorical feature names (e.g. ['education', 'marital_status'] ) """ features = get_example_features(example) return sorted([ feature_name for feature_name in features if features[feature_name].WhichOneof('kind') == 'bytes_list' ])
[ "def", "get_categorical_feature_names", "(", "example", ")", ":", "features", "=", "get_example_features", "(", "example", ")", "return", "sorted", "(", "[", "feature_name", "for", "feature_name", "in", "features", "if", "features", "[", "feature_name", "]", ".", "WhichOneof", "(", "'kind'", ")", "==", "'bytes_list'", "]", ")" ]
Returns a list of feature names for byte type features. Args: example: An example. Returns: A list of categorical feature names (e.g. ['education', 'marital_status'] )
[ "Returns", "a", "list", "of", "feature", "names", "for", "byte", "type", "features", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/interactive_inference/utils/inference_utils.py#L290-L303
train
tensorflow/tensorboard
tensorboard/plugins/interactive_inference/utils/inference_utils.py
get_numeric_features_to_observed_range
def get_numeric_features_to_observed_range(examples): """Returns numerical features and their observed ranges. Args: examples: Examples to read to get ranges. Returns: A dict mapping feature_name -> {'observedMin': 'observedMax': } dicts, with a key for each numerical feature. """ observed_features = collections.defaultdict(list) # name -> [value, ] for example in examples: for feature_name in get_numeric_feature_names(example): original_feature = parse_original_feature_from_example( example, feature_name) observed_features[feature_name].extend(original_feature.original_value) return { feature_name: { 'observedMin': min(feature_values), 'observedMax': max(feature_values), } for feature_name, feature_values in iteritems(observed_features) }
python
def get_numeric_features_to_observed_range(examples): """Returns numerical features and their observed ranges. Args: examples: Examples to read to get ranges. Returns: A dict mapping feature_name -> {'observedMin': 'observedMax': } dicts, with a key for each numerical feature. """ observed_features = collections.defaultdict(list) # name -> [value, ] for example in examples: for feature_name in get_numeric_feature_names(example): original_feature = parse_original_feature_from_example( example, feature_name) observed_features[feature_name].extend(original_feature.original_value) return { feature_name: { 'observedMin': min(feature_values), 'observedMax': max(feature_values), } for feature_name, feature_values in iteritems(observed_features) }
[ "def", "get_numeric_features_to_observed_range", "(", "examples", ")", ":", "observed_features", "=", "collections", ".", "defaultdict", "(", "list", ")", "# name -> [value, ]", "for", "example", "in", "examples", ":", "for", "feature_name", "in", "get_numeric_feature_names", "(", "example", ")", ":", "original_feature", "=", "parse_original_feature_from_example", "(", "example", ",", "feature_name", ")", "observed_features", "[", "feature_name", "]", ".", "extend", "(", "original_feature", ".", "original_value", ")", "return", "{", "feature_name", ":", "{", "'observedMin'", ":", "min", "(", "feature_values", ")", ",", "'observedMax'", ":", "max", "(", "feature_values", ")", ",", "}", "for", "feature_name", ",", "feature_values", "in", "iteritems", "(", "observed_features", ")", "}" ]
Returns numerical features and their observed ranges. Args: examples: Examples to read to get ranges. Returns: A dict mapping feature_name -> {'observedMin': 'observedMax': } dicts, with a key for each numerical feature.
[ "Returns", "numerical", "features", "and", "their", "observed", "ranges", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/interactive_inference/utils/inference_utils.py#L306-L328
train
tensorflow/tensorboard
tensorboard/plugins/interactive_inference/utils/inference_utils.py
get_categorical_features_to_sampling
def get_categorical_features_to_sampling(examples, top_k): """Returns categorical features and a sampling of their most-common values. The results of this slow function are used by the visualization repeatedly, so the results are cached. Args: examples: Examples to read to get feature samples. top_k: Max number of samples to return per feature. Returns: A dict of feature_name -> {'samples': ['Married-civ-spouse', 'Never-married', 'Divorced']}. There is one key for each categorical feature. Currently, the inner dict just has one key, but this structure leaves room for further expansion, and mirrors the structure used by `get_numeric_features_to_observed_range`. """ observed_features = collections.defaultdict(list) # name -> [value, ] for example in examples: for feature_name in get_categorical_feature_names(example): original_feature = parse_original_feature_from_example( example, feature_name) observed_features[feature_name].extend(original_feature.original_value) result = {} for feature_name, feature_values in sorted(iteritems(observed_features)): samples = [ word for word, count in collections.Counter(feature_values).most_common( top_k) if count > 1 ] if samples: result[feature_name] = {'samples': samples} return result
python
def get_categorical_features_to_sampling(examples, top_k): """Returns categorical features and a sampling of their most-common values. The results of this slow function are used by the visualization repeatedly, so the results are cached. Args: examples: Examples to read to get feature samples. top_k: Max number of samples to return per feature. Returns: A dict of feature_name -> {'samples': ['Married-civ-spouse', 'Never-married', 'Divorced']}. There is one key for each categorical feature. Currently, the inner dict just has one key, but this structure leaves room for further expansion, and mirrors the structure used by `get_numeric_features_to_observed_range`. """ observed_features = collections.defaultdict(list) # name -> [value, ] for example in examples: for feature_name in get_categorical_feature_names(example): original_feature = parse_original_feature_from_example( example, feature_name) observed_features[feature_name].extend(original_feature.original_value) result = {} for feature_name, feature_values in sorted(iteritems(observed_features)): samples = [ word for word, count in collections.Counter(feature_values).most_common( top_k) if count > 1 ] if samples: result[feature_name] = {'samples': samples} return result
[ "def", "get_categorical_features_to_sampling", "(", "examples", ",", "top_k", ")", ":", "observed_features", "=", "collections", ".", "defaultdict", "(", "list", ")", "# name -> [value, ]", "for", "example", "in", "examples", ":", "for", "feature_name", "in", "get_categorical_feature_names", "(", "example", ")", ":", "original_feature", "=", "parse_original_feature_from_example", "(", "example", ",", "feature_name", ")", "observed_features", "[", "feature_name", "]", ".", "extend", "(", "original_feature", ".", "original_value", ")", "result", "=", "{", "}", "for", "feature_name", ",", "feature_values", "in", "sorted", "(", "iteritems", "(", "observed_features", ")", ")", ":", "samples", "=", "[", "word", "for", "word", ",", "count", "in", "collections", ".", "Counter", "(", "feature_values", ")", ".", "most_common", "(", "top_k", ")", "if", "count", ">", "1", "]", "if", "samples", ":", "result", "[", "feature_name", "]", "=", "{", "'samples'", ":", "samples", "}", "return", "result" ]
Returns categorical features and a sampling of their most-common values. The results of this slow function are used by the visualization repeatedly, so the results are cached. Args: examples: Examples to read to get feature samples. top_k: Max number of samples to return per feature. Returns: A dict of feature_name -> {'samples': ['Married-civ-spouse', 'Never-married', 'Divorced']}. There is one key for each categorical feature. Currently, the inner dict just has one key, but this structure leaves room for further expansion, and mirrors the structure used by `get_numeric_features_to_observed_range`.
[ "Returns", "categorical", "features", "and", "a", "sampling", "of", "their", "most", "-", "common", "values", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/interactive_inference/utils/inference_utils.py#L331-L367
train
tensorflow/tensorboard
tensorboard/plugins/interactive_inference/utils/inference_utils.py
make_mutant_features
def make_mutant_features(original_feature, index_to_mutate, viz_params): """Return a list of `MutantFeatureValue`s that are variants of original.""" lower = viz_params.x_min upper = viz_params.x_max examples = viz_params.examples num_mutants = viz_params.num_mutants if original_feature.feature_type == 'float_list': return [ MutantFeatureValue(original_feature, index_to_mutate, value) for value in np.linspace(lower, upper, num_mutants) ] elif original_feature.feature_type == 'int64_list': mutant_values = np.linspace(int(lower), int(upper), num_mutants).astype(int).tolist() # Remove duplicates that can occur due to integer constraint. mutant_values = sorted(set(mutant_values)) return [ MutantFeatureValue(original_feature, index_to_mutate, value) for value in mutant_values ] elif original_feature.feature_type == 'bytes_list': feature_to_samples = get_categorical_features_to_sampling( examples, num_mutants) # `mutant_values` looks like: # [['Married-civ-spouse'], ['Never-married'], ['Divorced'], ['Separated']] mutant_values = feature_to_samples[original_feature.feature_name]['samples'] return [ MutantFeatureValue(original_feature, None, value) for value in mutant_values ] else: raise ValueError('Malformed original feature had type of: ' + original_feature.feature_type)
python
def make_mutant_features(original_feature, index_to_mutate, viz_params): """Return a list of `MutantFeatureValue`s that are variants of original.""" lower = viz_params.x_min upper = viz_params.x_max examples = viz_params.examples num_mutants = viz_params.num_mutants if original_feature.feature_type == 'float_list': return [ MutantFeatureValue(original_feature, index_to_mutate, value) for value in np.linspace(lower, upper, num_mutants) ] elif original_feature.feature_type == 'int64_list': mutant_values = np.linspace(int(lower), int(upper), num_mutants).astype(int).tolist() # Remove duplicates that can occur due to integer constraint. mutant_values = sorted(set(mutant_values)) return [ MutantFeatureValue(original_feature, index_to_mutate, value) for value in mutant_values ] elif original_feature.feature_type == 'bytes_list': feature_to_samples = get_categorical_features_to_sampling( examples, num_mutants) # `mutant_values` looks like: # [['Married-civ-spouse'], ['Never-married'], ['Divorced'], ['Separated']] mutant_values = feature_to_samples[original_feature.feature_name]['samples'] return [ MutantFeatureValue(original_feature, None, value) for value in mutant_values ] else: raise ValueError('Malformed original feature had type of: ' + original_feature.feature_type)
[ "def", "make_mutant_features", "(", "original_feature", ",", "index_to_mutate", ",", "viz_params", ")", ":", "lower", "=", "viz_params", ".", "x_min", "upper", "=", "viz_params", ".", "x_max", "examples", "=", "viz_params", ".", "examples", "num_mutants", "=", "viz_params", ".", "num_mutants", "if", "original_feature", ".", "feature_type", "==", "'float_list'", ":", "return", "[", "MutantFeatureValue", "(", "original_feature", ",", "index_to_mutate", ",", "value", ")", "for", "value", "in", "np", ".", "linspace", "(", "lower", ",", "upper", ",", "num_mutants", ")", "]", "elif", "original_feature", ".", "feature_type", "==", "'int64_list'", ":", "mutant_values", "=", "np", ".", "linspace", "(", "int", "(", "lower", ")", ",", "int", "(", "upper", ")", ",", "num_mutants", ")", ".", "astype", "(", "int", ")", ".", "tolist", "(", ")", "# Remove duplicates that can occur due to integer constraint.", "mutant_values", "=", "sorted", "(", "set", "(", "mutant_values", ")", ")", "return", "[", "MutantFeatureValue", "(", "original_feature", ",", "index_to_mutate", ",", "value", ")", "for", "value", "in", "mutant_values", "]", "elif", "original_feature", ".", "feature_type", "==", "'bytes_list'", ":", "feature_to_samples", "=", "get_categorical_features_to_sampling", "(", "examples", ",", "num_mutants", ")", "# `mutant_values` looks like:", "# [['Married-civ-spouse'], ['Never-married'], ['Divorced'], ['Separated']]", "mutant_values", "=", "feature_to_samples", "[", "original_feature", ".", "feature_name", "]", "[", "'samples'", "]", "return", "[", "MutantFeatureValue", "(", "original_feature", ",", "None", ",", "value", ")", "for", "value", "in", "mutant_values", "]", "else", ":", "raise", "ValueError", "(", "'Malformed original feature had type of: '", "+", "original_feature", ".", "feature_type", ")" ]
Return a list of `MutantFeatureValue`s that are variants of original.
[ "Return", "a", "list", "of", "MutantFeatureValue", "s", "that", "are", "variants", "of", "original", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/interactive_inference/utils/inference_utils.py#L370-L404
train
tensorflow/tensorboard
tensorboard/plugins/interactive_inference/utils/inference_utils.py
make_mutant_tuples
def make_mutant_tuples(example_protos, original_feature, index_to_mutate, viz_params): """Return a list of `MutantFeatureValue`s and a list of mutant Examples. Args: example_protos: The examples to mutate. original_feature: A `OriginalFeatureList` that encapsulates the feature to mutate. index_to_mutate: The index of the int64_list or float_list to mutate. viz_params: A `VizParams` object that contains the UI state of the request. Returns: A list of `MutantFeatureValue`s and a list of mutant examples. """ mutant_features = make_mutant_features(original_feature, index_to_mutate, viz_params) mutant_examples = [] for example_proto in example_protos: for mutant_feature in mutant_features: copied_example = copy.deepcopy(example_proto) feature_name = mutant_feature.original_feature.feature_name try: feature_list = proto_value_for_feature(copied_example, feature_name) if index_to_mutate is None: new_values = mutant_feature.mutant_value else: new_values = list(feature_list) new_values[index_to_mutate] = mutant_feature.mutant_value del feature_list[:] feature_list.extend(new_values) mutant_examples.append(copied_example) except (ValueError, IndexError): # If the mutant value can't be set, still add the example to the # mutant_example even though no change was made. This is necessary to # allow for computation of global PD plots when not all examples have # the same number of feature values for a feature. mutant_examples.append(copied_example) return mutant_features, mutant_examples
python
def make_mutant_tuples(example_protos, original_feature, index_to_mutate, viz_params): """Return a list of `MutantFeatureValue`s and a list of mutant Examples. Args: example_protos: The examples to mutate. original_feature: A `OriginalFeatureList` that encapsulates the feature to mutate. index_to_mutate: The index of the int64_list or float_list to mutate. viz_params: A `VizParams` object that contains the UI state of the request. Returns: A list of `MutantFeatureValue`s and a list of mutant examples. """ mutant_features = make_mutant_features(original_feature, index_to_mutate, viz_params) mutant_examples = [] for example_proto in example_protos: for mutant_feature in mutant_features: copied_example = copy.deepcopy(example_proto) feature_name = mutant_feature.original_feature.feature_name try: feature_list = proto_value_for_feature(copied_example, feature_name) if index_to_mutate is None: new_values = mutant_feature.mutant_value else: new_values = list(feature_list) new_values[index_to_mutate] = mutant_feature.mutant_value del feature_list[:] feature_list.extend(new_values) mutant_examples.append(copied_example) except (ValueError, IndexError): # If the mutant value can't be set, still add the example to the # mutant_example even though no change was made. This is necessary to # allow for computation of global PD plots when not all examples have # the same number of feature values for a feature. mutant_examples.append(copied_example) return mutant_features, mutant_examples
[ "def", "make_mutant_tuples", "(", "example_protos", ",", "original_feature", ",", "index_to_mutate", ",", "viz_params", ")", ":", "mutant_features", "=", "make_mutant_features", "(", "original_feature", ",", "index_to_mutate", ",", "viz_params", ")", "mutant_examples", "=", "[", "]", "for", "example_proto", "in", "example_protos", ":", "for", "mutant_feature", "in", "mutant_features", ":", "copied_example", "=", "copy", ".", "deepcopy", "(", "example_proto", ")", "feature_name", "=", "mutant_feature", ".", "original_feature", ".", "feature_name", "try", ":", "feature_list", "=", "proto_value_for_feature", "(", "copied_example", ",", "feature_name", ")", "if", "index_to_mutate", "is", "None", ":", "new_values", "=", "mutant_feature", ".", "mutant_value", "else", ":", "new_values", "=", "list", "(", "feature_list", ")", "new_values", "[", "index_to_mutate", "]", "=", "mutant_feature", ".", "mutant_value", "del", "feature_list", "[", ":", "]", "feature_list", ".", "extend", "(", "new_values", ")", "mutant_examples", ".", "append", "(", "copied_example", ")", "except", "(", "ValueError", ",", "IndexError", ")", ":", "# If the mutant value can't be set, still add the example to the", "# mutant_example even though no change was made. This is necessary to", "# allow for computation of global PD plots when not all examples have", "# the same number of feature values for a feature.", "mutant_examples", ".", "append", "(", "copied_example", ")", "return", "mutant_features", ",", "mutant_examples" ]
Return a list of `MutantFeatureValue`s and a list of mutant Examples. Args: example_protos: The examples to mutate. original_feature: A `OriginalFeatureList` that encapsulates the feature to mutate. index_to_mutate: The index of the int64_list or float_list to mutate. viz_params: A `VizParams` object that contains the UI state of the request. Returns: A list of `MutantFeatureValue`s and a list of mutant examples.
[ "Return", "a", "list", "of", "MutantFeatureValue", "s", "and", "a", "list", "of", "mutant", "Examples", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/interactive_inference/utils/inference_utils.py#L407-L447
train
tensorflow/tensorboard
tensorboard/plugins/interactive_inference/utils/inference_utils.py
mutant_charts_for_feature
def mutant_charts_for_feature(example_protos, feature_name, serving_bundles, viz_params): """Returns JSON formatted for rendering all charts for a feature. Args: example_proto: The example protos to mutate. feature_name: The string feature name to mutate. serving_bundles: One `ServingBundle` object per model, that contains the information to make the serving request. viz_params: A `VizParams` object that contains the UI state of the request. Raises: InvalidUserInputError if `viz_params.feature_index_pattern` requests out of range indices for `feature_name` within `example_proto`. Returns: A JSON-able dict for rendering a single mutant chart. parsed in `tf-inference-dashboard.html`. { 'chartType': 'numeric', # oneof('numeric', 'categorical') 'data': [A list of data] # parseable by vz-line-chart or vz-bar-chart } """ def chart_for_index(index_to_mutate): mutant_features, mutant_examples = make_mutant_tuples( example_protos, original_feature, index_to_mutate, viz_params) charts = [] for serving_bundle in serving_bundles: inference_result_proto = run_inference(mutant_examples, serving_bundle) charts.append(make_json_formatted_for_single_chart( mutant_features, inference_result_proto, index_to_mutate)) return charts try: original_feature = parse_original_feature_from_example( example_protos[0], feature_name) except ValueError as e: return { 'chartType': 'categorical', 'data': [] } indices_to_mutate = viz_params.feature_indices or range( original_feature.length) chart_type = ('categorical' if original_feature.feature_type == 'bytes_list' else 'numeric') try: return { 'chartType': chart_type, 'data': [ chart_for_index(index_to_mutate) for index_to_mutate in indices_to_mutate ] } except IndexError as e: raise common_utils.InvalidUserInputError(e)
python
def mutant_charts_for_feature(example_protos, feature_name, serving_bundles, viz_params): """Returns JSON formatted for rendering all charts for a feature. Args: example_proto: The example protos to mutate. feature_name: The string feature name to mutate. serving_bundles: One `ServingBundle` object per model, that contains the information to make the serving request. viz_params: A `VizParams` object that contains the UI state of the request. Raises: InvalidUserInputError if `viz_params.feature_index_pattern` requests out of range indices for `feature_name` within `example_proto`. Returns: A JSON-able dict for rendering a single mutant chart. parsed in `tf-inference-dashboard.html`. { 'chartType': 'numeric', # oneof('numeric', 'categorical') 'data': [A list of data] # parseable by vz-line-chart or vz-bar-chart } """ def chart_for_index(index_to_mutate): mutant_features, mutant_examples = make_mutant_tuples( example_protos, original_feature, index_to_mutate, viz_params) charts = [] for serving_bundle in serving_bundles: inference_result_proto = run_inference(mutant_examples, serving_bundle) charts.append(make_json_formatted_for_single_chart( mutant_features, inference_result_proto, index_to_mutate)) return charts try: original_feature = parse_original_feature_from_example( example_protos[0], feature_name) except ValueError as e: return { 'chartType': 'categorical', 'data': [] } indices_to_mutate = viz_params.feature_indices or range( original_feature.length) chart_type = ('categorical' if original_feature.feature_type == 'bytes_list' else 'numeric') try: return { 'chartType': chart_type, 'data': [ chart_for_index(index_to_mutate) for index_to_mutate in indices_to_mutate ] } except IndexError as e: raise common_utils.InvalidUserInputError(e)
[ "def", "mutant_charts_for_feature", "(", "example_protos", ",", "feature_name", ",", "serving_bundles", ",", "viz_params", ")", ":", "def", "chart_for_index", "(", "index_to_mutate", ")", ":", "mutant_features", ",", "mutant_examples", "=", "make_mutant_tuples", "(", "example_protos", ",", "original_feature", ",", "index_to_mutate", ",", "viz_params", ")", "charts", "=", "[", "]", "for", "serving_bundle", "in", "serving_bundles", ":", "inference_result_proto", "=", "run_inference", "(", "mutant_examples", ",", "serving_bundle", ")", "charts", ".", "append", "(", "make_json_formatted_for_single_chart", "(", "mutant_features", ",", "inference_result_proto", ",", "index_to_mutate", ")", ")", "return", "charts", "try", ":", "original_feature", "=", "parse_original_feature_from_example", "(", "example_protos", "[", "0", "]", ",", "feature_name", ")", "except", "ValueError", "as", "e", ":", "return", "{", "'chartType'", ":", "'categorical'", ",", "'data'", ":", "[", "]", "}", "indices_to_mutate", "=", "viz_params", ".", "feature_indices", "or", "range", "(", "original_feature", ".", "length", ")", "chart_type", "=", "(", "'categorical'", "if", "original_feature", ".", "feature_type", "==", "'bytes_list'", "else", "'numeric'", ")", "try", ":", "return", "{", "'chartType'", ":", "chart_type", ",", "'data'", ":", "[", "chart_for_index", "(", "index_to_mutate", ")", "for", "index_to_mutate", "in", "indices_to_mutate", "]", "}", "except", "IndexError", "as", "e", ":", "raise", "common_utils", ".", "InvalidUserInputError", "(", "e", ")" ]
Returns JSON formatted for rendering all charts for a feature. Args: example_proto: The example protos to mutate. feature_name: The string feature name to mutate. serving_bundles: One `ServingBundle` object per model, that contains the information to make the serving request. viz_params: A `VizParams` object that contains the UI state of the request. Raises: InvalidUserInputError if `viz_params.feature_index_pattern` requests out of range indices for `feature_name` within `example_proto`. Returns: A JSON-able dict for rendering a single mutant chart. parsed in `tf-inference-dashboard.html`. { 'chartType': 'numeric', # oneof('numeric', 'categorical') 'data': [A list of data] # parseable by vz-line-chart or vz-bar-chart }
[ "Returns", "JSON", "formatted", "for", "rendering", "all", "charts", "for", "a", "feature", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/interactive_inference/utils/inference_utils.py#L450-L507
train
tensorflow/tensorboard
tensorboard/plugins/interactive_inference/utils/inference_utils.py
make_json_formatted_for_single_chart
def make_json_formatted_for_single_chart(mutant_features, inference_result_proto, index_to_mutate): """Returns JSON formatted for a single mutant chart. Args: mutant_features: An iterable of `MutantFeatureValue`s representing the X-axis. inference_result_proto: A ClassificationResponse or RegressionResponse returned by Servo, representing the Y-axis. It contains one 'classification' or 'regression' for every Example that was sent for inference. The length of that field should be the same length of mutant_features. index_to_mutate: The index of the feature being mutated for this chart. Returns: A JSON-able dict for rendering a single mutant chart, parseable by `vz-line-chart` or `vz-bar-chart`. """ x_label = 'step' y_label = 'scalar' if isinstance(inference_result_proto, classification_pb2.ClassificationResponse): # classification_label -> [{x_label: y_label:}] series = {} # ClassificationResponse has a separate probability for each label for idx, classification in enumerate( inference_result_proto.result.classifications): # For each example to use for mutant inference, we create a copied example # with the feature in question changed to each possible mutant value. So # when we get the inferences back, we get num_examples*num_mutants # results. So, modding by len(mutant_features) allows us to correctly # lookup the mutant value for each inference. mutant_feature = mutant_features[idx % len(mutant_features)] for class_index, classification_class in enumerate( classification.classes): # Fill in class index when labels are missing if classification_class.label == '': classification_class.label = str(class_index) # Special case to not include the "0" class in binary classification. # Since that just results in a chart that is symmetric around 0.5. if len( classification.classes) == 2 and classification_class.label == '0': continue key = classification_class.label if index_to_mutate: key += ' (index %d)' % index_to_mutate if not key in series: series[key] = {} if not mutant_feature.mutant_value in series[key]: series[key][mutant_feature.mutant_value] = [] series[key][mutant_feature.mutant_value].append( classification_class.score) # Post-process points to have separate list for each class return_series = collections.defaultdict(list) for key, mutant_values in iteritems(series): for value, y_list in iteritems(mutant_values): return_series[key].append({ x_label: value, y_label: sum(y_list) / float(len(y_list)) }) return_series[key].sort(key=lambda p: p[x_label]) return return_series elif isinstance(inference_result_proto, regression_pb2.RegressionResponse): points = {} for idx, regression in enumerate(inference_result_proto.result.regressions): # For each example to use for mutant inference, we create a copied example # with the feature in question changed to each possible mutant value. So # when we get the inferences back, we get num_examples*num_mutants # results. So, modding by len(mutant_features) allows us to correctly # lookup the mutant value for each inference. mutant_feature = mutant_features[idx % len(mutant_features)] if not mutant_feature.mutant_value in points: points[mutant_feature.mutant_value] = [] points[mutant_feature.mutant_value].append(regression.value) key = 'value' if (index_to_mutate != 0): key += ' (index %d)' % index_to_mutate list_of_points = [] for value, y_list in iteritems(points): list_of_points.append({ x_label: value, y_label: sum(y_list) / float(len(y_list)) }) list_of_points.sort(key=lambda p: p[x_label]) return {key: list_of_points} else: raise NotImplementedError('Only classification and regression implemented.')
python
def make_json_formatted_for_single_chart(mutant_features, inference_result_proto, index_to_mutate): """Returns JSON formatted for a single mutant chart. Args: mutant_features: An iterable of `MutantFeatureValue`s representing the X-axis. inference_result_proto: A ClassificationResponse or RegressionResponse returned by Servo, representing the Y-axis. It contains one 'classification' or 'regression' for every Example that was sent for inference. The length of that field should be the same length of mutant_features. index_to_mutate: The index of the feature being mutated for this chart. Returns: A JSON-able dict for rendering a single mutant chart, parseable by `vz-line-chart` or `vz-bar-chart`. """ x_label = 'step' y_label = 'scalar' if isinstance(inference_result_proto, classification_pb2.ClassificationResponse): # classification_label -> [{x_label: y_label:}] series = {} # ClassificationResponse has a separate probability for each label for idx, classification in enumerate( inference_result_proto.result.classifications): # For each example to use for mutant inference, we create a copied example # with the feature in question changed to each possible mutant value. So # when we get the inferences back, we get num_examples*num_mutants # results. So, modding by len(mutant_features) allows us to correctly # lookup the mutant value for each inference. mutant_feature = mutant_features[idx % len(mutant_features)] for class_index, classification_class in enumerate( classification.classes): # Fill in class index when labels are missing if classification_class.label == '': classification_class.label = str(class_index) # Special case to not include the "0" class in binary classification. # Since that just results in a chart that is symmetric around 0.5. if len( classification.classes) == 2 and classification_class.label == '0': continue key = classification_class.label if index_to_mutate: key += ' (index %d)' % index_to_mutate if not key in series: series[key] = {} if not mutant_feature.mutant_value in series[key]: series[key][mutant_feature.mutant_value] = [] series[key][mutant_feature.mutant_value].append( classification_class.score) # Post-process points to have separate list for each class return_series = collections.defaultdict(list) for key, mutant_values in iteritems(series): for value, y_list in iteritems(mutant_values): return_series[key].append({ x_label: value, y_label: sum(y_list) / float(len(y_list)) }) return_series[key].sort(key=lambda p: p[x_label]) return return_series elif isinstance(inference_result_proto, regression_pb2.RegressionResponse): points = {} for idx, regression in enumerate(inference_result_proto.result.regressions): # For each example to use for mutant inference, we create a copied example # with the feature in question changed to each possible mutant value. So # when we get the inferences back, we get num_examples*num_mutants # results. So, modding by len(mutant_features) allows us to correctly # lookup the mutant value for each inference. mutant_feature = mutant_features[idx % len(mutant_features)] if not mutant_feature.mutant_value in points: points[mutant_feature.mutant_value] = [] points[mutant_feature.mutant_value].append(regression.value) key = 'value' if (index_to_mutate != 0): key += ' (index %d)' % index_to_mutate list_of_points = [] for value, y_list in iteritems(points): list_of_points.append({ x_label: value, y_label: sum(y_list) / float(len(y_list)) }) list_of_points.sort(key=lambda p: p[x_label]) return {key: list_of_points} else: raise NotImplementedError('Only classification and regression implemented.')
[ "def", "make_json_formatted_for_single_chart", "(", "mutant_features", ",", "inference_result_proto", ",", "index_to_mutate", ")", ":", "x_label", "=", "'step'", "y_label", "=", "'scalar'", "if", "isinstance", "(", "inference_result_proto", ",", "classification_pb2", ".", "ClassificationResponse", ")", ":", "# classification_label -> [{x_label: y_label:}]", "series", "=", "{", "}", "# ClassificationResponse has a separate probability for each label", "for", "idx", ",", "classification", "in", "enumerate", "(", "inference_result_proto", ".", "result", ".", "classifications", ")", ":", "# For each example to use for mutant inference, we create a copied example", "# with the feature in question changed to each possible mutant value. So", "# when we get the inferences back, we get num_examples*num_mutants", "# results. So, modding by len(mutant_features) allows us to correctly", "# lookup the mutant value for each inference.", "mutant_feature", "=", "mutant_features", "[", "idx", "%", "len", "(", "mutant_features", ")", "]", "for", "class_index", ",", "classification_class", "in", "enumerate", "(", "classification", ".", "classes", ")", ":", "# Fill in class index when labels are missing", "if", "classification_class", ".", "label", "==", "''", ":", "classification_class", ".", "label", "=", "str", "(", "class_index", ")", "# Special case to not include the \"0\" class in binary classification.", "# Since that just results in a chart that is symmetric around 0.5.", "if", "len", "(", "classification", ".", "classes", ")", "==", "2", "and", "classification_class", ".", "label", "==", "'0'", ":", "continue", "key", "=", "classification_class", ".", "label", "if", "index_to_mutate", ":", "key", "+=", "' (index %d)'", "%", "index_to_mutate", "if", "not", "key", "in", "series", ":", "series", "[", "key", "]", "=", "{", "}", "if", "not", "mutant_feature", ".", "mutant_value", "in", "series", "[", "key", "]", ":", "series", "[", "key", "]", "[", "mutant_feature", ".", "mutant_value", "]", "=", "[", "]", "series", "[", "key", "]", "[", "mutant_feature", ".", "mutant_value", "]", ".", "append", "(", "classification_class", ".", "score", ")", "# Post-process points to have separate list for each class", "return_series", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "key", ",", "mutant_values", "in", "iteritems", "(", "series", ")", ":", "for", "value", ",", "y_list", "in", "iteritems", "(", "mutant_values", ")", ":", "return_series", "[", "key", "]", ".", "append", "(", "{", "x_label", ":", "value", ",", "y_label", ":", "sum", "(", "y_list", ")", "/", "float", "(", "len", "(", "y_list", ")", ")", "}", ")", "return_series", "[", "key", "]", ".", "sort", "(", "key", "=", "lambda", "p", ":", "p", "[", "x_label", "]", ")", "return", "return_series", "elif", "isinstance", "(", "inference_result_proto", ",", "regression_pb2", ".", "RegressionResponse", ")", ":", "points", "=", "{", "}", "for", "idx", ",", "regression", "in", "enumerate", "(", "inference_result_proto", ".", "result", ".", "regressions", ")", ":", "# For each example to use for mutant inference, we create a copied example", "# with the feature in question changed to each possible mutant value. So", "# when we get the inferences back, we get num_examples*num_mutants", "# results. So, modding by len(mutant_features) allows us to correctly", "# lookup the mutant value for each inference.", "mutant_feature", "=", "mutant_features", "[", "idx", "%", "len", "(", "mutant_features", ")", "]", "if", "not", "mutant_feature", ".", "mutant_value", "in", "points", ":", "points", "[", "mutant_feature", ".", "mutant_value", "]", "=", "[", "]", "points", "[", "mutant_feature", ".", "mutant_value", "]", ".", "append", "(", "regression", ".", "value", ")", "key", "=", "'value'", "if", "(", "index_to_mutate", "!=", "0", ")", ":", "key", "+=", "' (index %d)'", "%", "index_to_mutate", "list_of_points", "=", "[", "]", "for", "value", ",", "y_list", "in", "iteritems", "(", "points", ")", ":", "list_of_points", ".", "append", "(", "{", "x_label", ":", "value", ",", "y_label", ":", "sum", "(", "y_list", ")", "/", "float", "(", "len", "(", "y_list", ")", ")", "}", ")", "list_of_points", ".", "sort", "(", "key", "=", "lambda", "p", ":", "p", "[", "x_label", "]", ")", "return", "{", "key", ":", "list_of_points", "}", "else", ":", "raise", "NotImplementedError", "(", "'Only classification and regression implemented.'", ")" ]
Returns JSON formatted for a single mutant chart. Args: mutant_features: An iterable of `MutantFeatureValue`s representing the X-axis. inference_result_proto: A ClassificationResponse or RegressionResponse returned by Servo, representing the Y-axis. It contains one 'classification' or 'regression' for every Example that was sent for inference. The length of that field should be the same length of mutant_features. index_to_mutate: The index of the feature being mutated for this chart. Returns: A JSON-able dict for rendering a single mutant chart, parseable by `vz-line-chart` or `vz-bar-chart`.
[ "Returns", "JSON", "formatted", "for", "a", "single", "mutant", "chart", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/interactive_inference/utils/inference_utils.py#L510-L603
train
tensorflow/tensorboard
tensorboard/plugins/interactive_inference/utils/inference_utils.py
get_example_features
def get_example_features(example): """Returns the non-sequence features from the provided example.""" return (example.features.feature if isinstance(example, tf.train.Example) else example.context.feature)
python
def get_example_features(example): """Returns the non-sequence features from the provided example.""" return (example.features.feature if isinstance(example, tf.train.Example) else example.context.feature)
[ "def", "get_example_features", "(", "example", ")", ":", "return", "(", "example", ".", "features", ".", "feature", "if", "isinstance", "(", "example", ",", "tf", ".", "train", ".", "Example", ")", "else", "example", ".", "context", ".", "feature", ")" ]
Returns the non-sequence features from the provided example.
[ "Returns", "the", "non", "-", "sequence", "features", "from", "the", "provided", "example", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/interactive_inference/utils/inference_utils.py#L606-L609
train
tensorflow/tensorboard
tensorboard/plugins/interactive_inference/utils/inference_utils.py
run_inference_for_inference_results
def run_inference_for_inference_results(examples, serving_bundle): """Calls servo and wraps the inference results.""" inference_result_proto = run_inference(examples, serving_bundle) inferences = wrap_inference_results(inference_result_proto) infer_json = json_format.MessageToJson( inferences, including_default_value_fields=True) return json.loads(infer_json)
python
def run_inference_for_inference_results(examples, serving_bundle): """Calls servo and wraps the inference results.""" inference_result_proto = run_inference(examples, serving_bundle) inferences = wrap_inference_results(inference_result_proto) infer_json = json_format.MessageToJson( inferences, including_default_value_fields=True) return json.loads(infer_json)
[ "def", "run_inference_for_inference_results", "(", "examples", ",", "serving_bundle", ")", ":", "inference_result_proto", "=", "run_inference", "(", "examples", ",", "serving_bundle", ")", "inferences", "=", "wrap_inference_results", "(", "inference_result_proto", ")", "infer_json", "=", "json_format", ".", "MessageToJson", "(", "inferences", ",", "including_default_value_fields", "=", "True", ")", "return", "json", ".", "loads", "(", "infer_json", ")" ]
Calls servo and wraps the inference results.
[ "Calls", "servo", "and", "wraps", "the", "inference", "results", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/interactive_inference/utils/inference_utils.py#L611-L617
train
tensorflow/tensorboard
tensorboard/plugins/interactive_inference/utils/inference_utils.py
get_eligible_features
def get_eligible_features(examples, num_mutants): """Returns a list of JSON objects for each feature in the examples. This list is used to drive partial dependence plots in the plugin. Args: examples: Examples to examine to determine the eligible features. num_mutants: The number of mutations to make over each feature. Returns: A list with a JSON object for each feature. Numeric features are represented as {name: observedMin: observedMax:}. Categorical features are repesented as {name: samples:[]}. """ features_dict = ( get_numeric_features_to_observed_range( examples)) features_dict.update( get_categorical_features_to_sampling( examples, num_mutants)) # Massage the features_dict into a sorted list before returning because # Polymer dom-repeat needs a list. features_list = [] for k, v in sorted(features_dict.items()): v['name'] = k features_list.append(v) return features_list
python
def get_eligible_features(examples, num_mutants): """Returns a list of JSON objects for each feature in the examples. This list is used to drive partial dependence plots in the plugin. Args: examples: Examples to examine to determine the eligible features. num_mutants: The number of mutations to make over each feature. Returns: A list with a JSON object for each feature. Numeric features are represented as {name: observedMin: observedMax:}. Categorical features are repesented as {name: samples:[]}. """ features_dict = ( get_numeric_features_to_observed_range( examples)) features_dict.update( get_categorical_features_to_sampling( examples, num_mutants)) # Massage the features_dict into a sorted list before returning because # Polymer dom-repeat needs a list. features_list = [] for k, v in sorted(features_dict.items()): v['name'] = k features_list.append(v) return features_list
[ "def", "get_eligible_features", "(", "examples", ",", "num_mutants", ")", ":", "features_dict", "=", "(", "get_numeric_features_to_observed_range", "(", "examples", ")", ")", "features_dict", ".", "update", "(", "get_categorical_features_to_sampling", "(", "examples", ",", "num_mutants", ")", ")", "# Massage the features_dict into a sorted list before returning because", "# Polymer dom-repeat needs a list.", "features_list", "=", "[", "]", "for", "k", ",", "v", "in", "sorted", "(", "features_dict", ".", "items", "(", ")", ")", ":", "v", "[", "'name'", "]", "=", "k", "features_list", ".", "append", "(", "v", ")", "return", "features_list" ]
Returns a list of JSON objects for each feature in the examples. This list is used to drive partial dependence plots in the plugin. Args: examples: Examples to examine to determine the eligible features. num_mutants: The number of mutations to make over each feature. Returns: A list with a JSON object for each feature. Numeric features are represented as {name: observedMin: observedMax:}. Categorical features are repesented as {name: samples:[]}.
[ "Returns", "a", "list", "of", "JSON", "objects", "for", "each", "feature", "in", "the", "examples", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/interactive_inference/utils/inference_utils.py#L619-L647
train
tensorflow/tensorboard
tensorboard/plugins/interactive_inference/utils/inference_utils.py
get_label_vocab
def get_label_vocab(vocab_path): """Returns a list of label strings loaded from the provided path.""" if vocab_path: try: with tf.io.gfile.GFile(vocab_path, 'r') as f: return [line.rstrip('\n') for line in f] except tf.errors.NotFoundError as err: tf.logging.error('error reading vocab file: %s', err) return []
python
def get_label_vocab(vocab_path): """Returns a list of label strings loaded from the provided path.""" if vocab_path: try: with tf.io.gfile.GFile(vocab_path, 'r') as f: return [line.rstrip('\n') for line in f] except tf.errors.NotFoundError as err: tf.logging.error('error reading vocab file: %s', err) return []
[ "def", "get_label_vocab", "(", "vocab_path", ")", ":", "if", "vocab_path", ":", "try", ":", "with", "tf", ".", "io", ".", "gfile", ".", "GFile", "(", "vocab_path", ",", "'r'", ")", "as", "f", ":", "return", "[", "line", ".", "rstrip", "(", "'\\n'", ")", "for", "line", "in", "f", "]", "except", "tf", ".", "errors", ".", "NotFoundError", "as", "err", ":", "tf", ".", "logging", ".", "error", "(", "'error reading vocab file: %s'", ",", "err", ")", "return", "[", "]" ]
Returns a list of label strings loaded from the provided path.
[ "Returns", "a", "list", "of", "label", "strings", "loaded", "from", "the", "provided", "path", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/interactive_inference/utils/inference_utils.py#L649-L657
train
tensorflow/tensorboard
tensorboard/plugins/interactive_inference/utils/inference_utils.py
create_sprite_image
def create_sprite_image(examples): """Returns an encoded sprite image for use in Facets Dive. Args: examples: A list of serialized example protos to get images for. Returns: An encoded PNG. """ def generate_image_from_thubnails(thumbnails, thumbnail_dims): """Generates a sprite atlas image from a set of thumbnails.""" num_thumbnails = tf.shape(thumbnails)[0].eval() images_per_row = int(math.ceil(math.sqrt(num_thumbnails))) thumb_height = thumbnail_dims[0] thumb_width = thumbnail_dims[1] master_height = images_per_row * thumb_height master_width = images_per_row * thumb_width num_channels = 3 master = np.zeros([master_height, master_width, num_channels]) for idx, image in enumerate(thumbnails.eval()): left_idx = idx % images_per_row top_idx = int(math.floor(idx / images_per_row)) left_start = left_idx * thumb_width left_end = left_start + thumb_width top_start = top_idx * thumb_height top_end = top_start + thumb_height master[top_start:top_end, left_start:left_end, :] = image return tf.image.encode_png(master) image_feature_name = 'image/encoded' sprite_thumbnail_dim_px = 32 with tf.compat.v1.Session(): keys_to_features = { image_feature_name: tf.FixedLenFeature((), tf.string, default_value=''), } parsed = tf.parse_example(examples, keys_to_features) images = tf.zeros([1, 1, 1, 1], tf.float32) i = tf.constant(0) thumbnail_dims = (sprite_thumbnail_dim_px, sprite_thumbnail_dim_px) num_examples = tf.constant(len(examples)) encoded_images = parsed[image_feature_name] # Loop over all examples, decoding the image feature value, resizing # and appending to a list of all images. def loop_body(i, encoded_images, images): encoded_image = encoded_images[i] image = tf.image.decode_jpeg(encoded_image, channels=3) resized_image = tf.image.resize(image, thumbnail_dims) expanded_image = tf.expand_dims(resized_image, 0) images = tf.cond( tf.equal(i, 0), lambda: expanded_image, lambda: tf.concat([images, expanded_image], 0)) return i + 1, encoded_images, images loop_out = tf.while_loop( lambda i, encoded_images, images: tf.less(i, num_examples), loop_body, [i, encoded_images, images], shape_invariants=[ i.get_shape(), encoded_images.get_shape(), tf.TensorShape(None) ]) # Create the single sprite atlas image from these thumbnails. sprite = generate_image_from_thubnails(loop_out[2], thumbnail_dims) return sprite.eval()
python
def create_sprite_image(examples): """Returns an encoded sprite image for use in Facets Dive. Args: examples: A list of serialized example protos to get images for. Returns: An encoded PNG. """ def generate_image_from_thubnails(thumbnails, thumbnail_dims): """Generates a sprite atlas image from a set of thumbnails.""" num_thumbnails = tf.shape(thumbnails)[0].eval() images_per_row = int(math.ceil(math.sqrt(num_thumbnails))) thumb_height = thumbnail_dims[0] thumb_width = thumbnail_dims[1] master_height = images_per_row * thumb_height master_width = images_per_row * thumb_width num_channels = 3 master = np.zeros([master_height, master_width, num_channels]) for idx, image in enumerate(thumbnails.eval()): left_idx = idx % images_per_row top_idx = int(math.floor(idx / images_per_row)) left_start = left_idx * thumb_width left_end = left_start + thumb_width top_start = top_idx * thumb_height top_end = top_start + thumb_height master[top_start:top_end, left_start:left_end, :] = image return tf.image.encode_png(master) image_feature_name = 'image/encoded' sprite_thumbnail_dim_px = 32 with tf.compat.v1.Session(): keys_to_features = { image_feature_name: tf.FixedLenFeature((), tf.string, default_value=''), } parsed = tf.parse_example(examples, keys_to_features) images = tf.zeros([1, 1, 1, 1], tf.float32) i = tf.constant(0) thumbnail_dims = (sprite_thumbnail_dim_px, sprite_thumbnail_dim_px) num_examples = tf.constant(len(examples)) encoded_images = parsed[image_feature_name] # Loop over all examples, decoding the image feature value, resizing # and appending to a list of all images. def loop_body(i, encoded_images, images): encoded_image = encoded_images[i] image = tf.image.decode_jpeg(encoded_image, channels=3) resized_image = tf.image.resize(image, thumbnail_dims) expanded_image = tf.expand_dims(resized_image, 0) images = tf.cond( tf.equal(i, 0), lambda: expanded_image, lambda: tf.concat([images, expanded_image], 0)) return i + 1, encoded_images, images loop_out = tf.while_loop( lambda i, encoded_images, images: tf.less(i, num_examples), loop_body, [i, encoded_images, images], shape_invariants=[ i.get_shape(), encoded_images.get_shape(), tf.TensorShape(None) ]) # Create the single sprite atlas image from these thumbnails. sprite = generate_image_from_thubnails(loop_out[2], thumbnail_dims) return sprite.eval()
[ "def", "create_sprite_image", "(", "examples", ")", ":", "def", "generate_image_from_thubnails", "(", "thumbnails", ",", "thumbnail_dims", ")", ":", "\"\"\"Generates a sprite atlas image from a set of thumbnails.\"\"\"", "num_thumbnails", "=", "tf", ".", "shape", "(", "thumbnails", ")", "[", "0", "]", ".", "eval", "(", ")", "images_per_row", "=", "int", "(", "math", ".", "ceil", "(", "math", ".", "sqrt", "(", "num_thumbnails", ")", ")", ")", "thumb_height", "=", "thumbnail_dims", "[", "0", "]", "thumb_width", "=", "thumbnail_dims", "[", "1", "]", "master_height", "=", "images_per_row", "*", "thumb_height", "master_width", "=", "images_per_row", "*", "thumb_width", "num_channels", "=", "3", "master", "=", "np", ".", "zeros", "(", "[", "master_height", ",", "master_width", ",", "num_channels", "]", ")", "for", "idx", ",", "image", "in", "enumerate", "(", "thumbnails", ".", "eval", "(", ")", ")", ":", "left_idx", "=", "idx", "%", "images_per_row", "top_idx", "=", "int", "(", "math", ".", "floor", "(", "idx", "/", "images_per_row", ")", ")", "left_start", "=", "left_idx", "*", "thumb_width", "left_end", "=", "left_start", "+", "thumb_width", "top_start", "=", "top_idx", "*", "thumb_height", "top_end", "=", "top_start", "+", "thumb_height", "master", "[", "top_start", ":", "top_end", ",", "left_start", ":", "left_end", ",", ":", "]", "=", "image", "return", "tf", ".", "image", ".", "encode_png", "(", "master", ")", "image_feature_name", "=", "'image/encoded'", "sprite_thumbnail_dim_px", "=", "32", "with", "tf", ".", "compat", ".", "v1", ".", "Session", "(", ")", ":", "keys_to_features", "=", "{", "image_feature_name", ":", "tf", ".", "FixedLenFeature", "(", "(", ")", ",", "tf", ".", "string", ",", "default_value", "=", "''", ")", ",", "}", "parsed", "=", "tf", ".", "parse_example", "(", "examples", ",", "keys_to_features", ")", "images", "=", "tf", ".", "zeros", "(", "[", "1", ",", "1", ",", "1", ",", "1", "]", ",", "tf", ".", "float32", ")", "i", "=", "tf", ".", "constant", "(", "0", ")", "thumbnail_dims", "=", "(", "sprite_thumbnail_dim_px", ",", "sprite_thumbnail_dim_px", ")", "num_examples", "=", "tf", ".", "constant", "(", "len", "(", "examples", ")", ")", "encoded_images", "=", "parsed", "[", "image_feature_name", "]", "# Loop over all examples, decoding the image feature value, resizing", "# and appending to a list of all images.", "def", "loop_body", "(", "i", ",", "encoded_images", ",", "images", ")", ":", "encoded_image", "=", "encoded_images", "[", "i", "]", "image", "=", "tf", ".", "image", ".", "decode_jpeg", "(", "encoded_image", ",", "channels", "=", "3", ")", "resized_image", "=", "tf", ".", "image", ".", "resize", "(", "image", ",", "thumbnail_dims", ")", "expanded_image", "=", "tf", ".", "expand_dims", "(", "resized_image", ",", "0", ")", "images", "=", "tf", ".", "cond", "(", "tf", ".", "equal", "(", "i", ",", "0", ")", ",", "lambda", ":", "expanded_image", ",", "lambda", ":", "tf", ".", "concat", "(", "[", "images", ",", "expanded_image", "]", ",", "0", ")", ")", "return", "i", "+", "1", ",", "encoded_images", ",", "images", "loop_out", "=", "tf", ".", "while_loop", "(", "lambda", "i", ",", "encoded_images", ",", "images", ":", "tf", ".", "less", "(", "i", ",", "num_examples", ")", ",", "loop_body", ",", "[", "i", ",", "encoded_images", ",", "images", "]", ",", "shape_invariants", "=", "[", "i", ".", "get_shape", "(", ")", ",", "encoded_images", ".", "get_shape", "(", ")", ",", "tf", ".", "TensorShape", "(", "None", ")", "]", ")", "# Create the single sprite atlas image from these thumbnails.", "sprite", "=", "generate_image_from_thubnails", "(", "loop_out", "[", "2", "]", ",", "thumbnail_dims", ")", "return", "sprite", ".", "eval", "(", ")" ]
Returns an encoded sprite image for use in Facets Dive. Args: examples: A list of serialized example protos to get images for. Returns: An encoded PNG.
[ "Returns", "an", "encoded", "sprite", "image", "for", "use", "in", "Facets", "Dive", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/interactive_inference/utils/inference_utils.py#L659-L727
train
tensorflow/tensorboard
tensorboard/plugins/interactive_inference/utils/inference_utils.py
run_inference
def run_inference(examples, serving_bundle): """Run inference on examples given model information Args: examples: A list of examples that matches the model spec. serving_bundle: A `ServingBundle` object that contains the information to make the inference request. Returns: A ClassificationResponse or RegressionResponse proto. """ batch_size = 64 if serving_bundle.estimator and serving_bundle.feature_spec: # If provided an estimator and feature spec then run inference locally. preds = serving_bundle.estimator.predict( lambda: tf.data.Dataset.from_tensor_slices( tf.parse_example([ex.SerializeToString() for ex in examples], serving_bundle.feature_spec)).batch(batch_size)) if serving_bundle.use_predict: preds_key = serving_bundle.predict_output_tensor elif serving_bundle.model_type == 'regression': preds_key = 'predictions' else: preds_key = 'probabilities' values = [] for pred in preds: values.append(pred[preds_key]) return common_utils.convert_prediction_values(values, serving_bundle) elif serving_bundle.custom_predict_fn: # If custom_predict_fn is provided, pass examples directly for local # inference. values = serving_bundle.custom_predict_fn(examples) return common_utils.convert_prediction_values(values, serving_bundle) else: return platform_utils.call_servo(examples, serving_bundle)
python
def run_inference(examples, serving_bundle): """Run inference on examples given model information Args: examples: A list of examples that matches the model spec. serving_bundle: A `ServingBundle` object that contains the information to make the inference request. Returns: A ClassificationResponse or RegressionResponse proto. """ batch_size = 64 if serving_bundle.estimator and serving_bundle.feature_spec: # If provided an estimator and feature spec then run inference locally. preds = serving_bundle.estimator.predict( lambda: tf.data.Dataset.from_tensor_slices( tf.parse_example([ex.SerializeToString() for ex in examples], serving_bundle.feature_spec)).batch(batch_size)) if serving_bundle.use_predict: preds_key = serving_bundle.predict_output_tensor elif serving_bundle.model_type == 'regression': preds_key = 'predictions' else: preds_key = 'probabilities' values = [] for pred in preds: values.append(pred[preds_key]) return common_utils.convert_prediction_values(values, serving_bundle) elif serving_bundle.custom_predict_fn: # If custom_predict_fn is provided, pass examples directly for local # inference. values = serving_bundle.custom_predict_fn(examples) return common_utils.convert_prediction_values(values, serving_bundle) else: return platform_utils.call_servo(examples, serving_bundle)
[ "def", "run_inference", "(", "examples", ",", "serving_bundle", ")", ":", "batch_size", "=", "64", "if", "serving_bundle", ".", "estimator", "and", "serving_bundle", ".", "feature_spec", ":", "# If provided an estimator and feature spec then run inference locally.", "preds", "=", "serving_bundle", ".", "estimator", ".", "predict", "(", "lambda", ":", "tf", ".", "data", ".", "Dataset", ".", "from_tensor_slices", "(", "tf", ".", "parse_example", "(", "[", "ex", ".", "SerializeToString", "(", ")", "for", "ex", "in", "examples", "]", ",", "serving_bundle", ".", "feature_spec", ")", ")", ".", "batch", "(", "batch_size", ")", ")", "if", "serving_bundle", ".", "use_predict", ":", "preds_key", "=", "serving_bundle", ".", "predict_output_tensor", "elif", "serving_bundle", ".", "model_type", "==", "'regression'", ":", "preds_key", "=", "'predictions'", "else", ":", "preds_key", "=", "'probabilities'", "values", "=", "[", "]", "for", "pred", "in", "preds", ":", "values", ".", "append", "(", "pred", "[", "preds_key", "]", ")", "return", "common_utils", ".", "convert_prediction_values", "(", "values", ",", "serving_bundle", ")", "elif", "serving_bundle", ".", "custom_predict_fn", ":", "# If custom_predict_fn is provided, pass examples directly for local", "# inference.", "values", "=", "serving_bundle", ".", "custom_predict_fn", "(", "examples", ")", "return", "common_utils", ".", "convert_prediction_values", "(", "values", ",", "serving_bundle", ")", "else", ":", "return", "platform_utils", ".", "call_servo", "(", "examples", ",", "serving_bundle", ")" ]
Run inference on examples given model information Args: examples: A list of examples that matches the model spec. serving_bundle: A `ServingBundle` object that contains the information to make the inference request. Returns: A ClassificationResponse or RegressionResponse proto.
[ "Run", "inference", "on", "examples", "given", "model", "information" ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/interactive_inference/utils/inference_utils.py#L729-L765
train
tensorflow/tensorboard
tensorboard/backend/event_processing/reservoir.py
Reservoir.Items
def Items(self, key): """Return items associated with given key. Args: key: The key for which we are finding associated items. Raises: KeyError: If the key is not found in the reservoir. Returns: [list, of, items] associated with that key. """ with self._mutex: if key not in self._buckets: raise KeyError('Key %s was not found in Reservoir' % key) bucket = self._buckets[key] return bucket.Items()
python
def Items(self, key): """Return items associated with given key. Args: key: The key for which we are finding associated items. Raises: KeyError: If the key is not found in the reservoir. Returns: [list, of, items] associated with that key. """ with self._mutex: if key not in self._buckets: raise KeyError('Key %s was not found in Reservoir' % key) bucket = self._buckets[key] return bucket.Items()
[ "def", "Items", "(", "self", ",", "key", ")", ":", "with", "self", ".", "_mutex", ":", "if", "key", "not", "in", "self", ".", "_buckets", ":", "raise", "KeyError", "(", "'Key %s was not found in Reservoir'", "%", "key", ")", "bucket", "=", "self", ".", "_buckets", "[", "key", "]", "return", "bucket", ".", "Items", "(", ")" ]
Return items associated with given key. Args: key: The key for which we are finding associated items. Raises: KeyError: If the key is not found in the reservoir. Returns: [list, of, items] associated with that key.
[ "Return", "items", "associated", "with", "given", "key", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/reservoir.py#L96-L112
train