repo stringlengths 7 55 | path stringlengths 4 223 | func_name stringlengths 1 134 | original_string stringlengths 75 104k | language stringclasses 1 value | code stringlengths 75 104k | code_tokens listlengths 19 28.4k | docstring stringlengths 1 46.9k | docstring_tokens listlengths 1 1.97k | sha stringlengths 40 40 | url stringlengths 87 315 | partition stringclasses 1 value |
|---|---|---|---|---|---|---|---|---|---|---|---|
tensorflow/tensorboard | tensorboard/plugins/core/core_plugin.py | CorePluginLoader.define_flags | def define_flags(self, parser):
"""Adds standard TensorBoard CLI flags to parser."""
parser.add_argument(
'--logdir',
metavar='PATH',
type=str,
default='',
help='''\
Directory where TensorBoard will look to find TensorFlow event files
that it can display. TensorBoard will recursively walk the directory
structure rooted at logdir, looking for .*tfevents.* files.
You may also pass a comma separated list of log directories, and
TensorBoard will watch each directory. You can also assign names to
individual log directories by putting a colon between the name and the
path, as in:
`tensorboard --logdir=name1:/path/to/logs/1,name2:/path/to/logs/2`\
''')
parser.add_argument(
'--host',
metavar='ADDR',
type=str,
default='',
help='''\
What host to listen to. Defaults to serving on all interfaces. Other
commonly used values are 127.0.0.1 (localhost) and :: (for IPv6).\
''')
parser.add_argument(
'--port',
metavar='PORT',
type=lambda s: (None if s == "default" else int(s)),
default="default",
help='''\
Port to serve TensorBoard on. Pass 0 to request an unused port selected
by the operating system, or pass "default" to try to bind to the default
port (%s) but search for a nearby free port if the default port is
unavailable. (default: "default").\
''' % DEFAULT_PORT)
parser.add_argument(
'--purge_orphaned_data',
metavar='BOOL',
# Custom str-to-bool converter since regular bool() doesn't work.
type=lambda v: {'true': True, 'false': False}.get(v.lower(), v),
choices=[True, False],
default=True,
help='''\
Whether to purge data that may have been orphaned due to TensorBoard
restarts. Setting --purge_orphaned_data=False can be used to debug data
disappearance. (default: %(default)s)\
''')
parser.add_argument(
'--reload_interval',
metavar='SECONDS',
type=float,
default=5.0,
help='''\
How often the backend should load more data, in seconds. Set to 0 to
load just once at startup and a negative number to never reload at all.
Not relevant for DB read-only mode. (default: %(default)s)\
''')
parser.add_argument(
'--db',
metavar='URI',
type=str,
default='',
help='''\
[experimental] sets SQL database URI and enables DB backend mode, which is
read-only unless --db_import is also passed.\
''')
parser.add_argument(
'--db_import',
action='store_true',
help='''\
[experimental] enables DB read-and-import mode, which in combination with
--logdir imports event files into a DB backend on the fly. The backing DB is
temporary unless --db is also passed to specify a DB path to use.\
''')
parser.add_argument(
'--db_import_use_op',
action='store_true',
help='''\
[experimental] in combination with --db_import, if passed, use TensorFlow's
import_event() op for importing event data, otherwise use TensorBoard's own
sqlite ingestion logic.\
''')
parser.add_argument(
'--inspect',
action='store_true',
help='''\
Prints digests of event files to command line.
This is useful when no data is shown on TensorBoard, or the data shown
looks weird.
Must specify one of `logdir` or `event_file` flag.
Example usage:
`tensorboard --inspect --logdir mylogdir --tag loss`
See tensorboard/backend/event_processing/event_file_inspector.py for more info.\
''')
# This flag has a "_tb" suffix to avoid conflicting with an internal flag
# named --version. Note that due to argparse auto-expansion of unambiguous
# flag prefixes, you can still invoke this as `tensorboard --version`.
parser.add_argument(
'--version_tb',
action='store_true',
help='Prints the version of Tensorboard')
parser.add_argument(
'--tag',
metavar='TAG',
type=str,
default='',
help='tag to query for; used with --inspect')
parser.add_argument(
'--event_file',
metavar='PATH',
type=str,
default='',
help='''\
The particular event file to query for. Only used if --inspect is
present and --logdir is not specified.\
''')
parser.add_argument(
'--path_prefix',
metavar='PATH',
type=str,
default='',
help='''\
An optional, relative prefix to the path, e.g. "/path/to/tensorboard".
resulting in the new base url being located at
localhost:6006/path/to/tensorboard under default settings. A leading
slash is required when specifying the path_prefix, however trailing
slashes can be omitted. The path_prefix can be leveraged for path based
routing of an elb when the website base_url is not available e.g.
"example.site.com/path/to/tensorboard/".\
''')
parser.add_argument(
'--window_title',
metavar='TEXT',
type=str,
default='',
help='changes title of browser window')
parser.add_argument(
'--max_reload_threads',
metavar='COUNT',
type=int,
default=1,
help='''\
The max number of threads that TensorBoard can use to reload runs. Not
relevant for db read-only mode. Each thread reloads one run at a time.
(default: %(default)s)\
''')
parser.add_argument(
'--reload_task',
metavar='TYPE',
type=str,
default='auto',
choices=['auto', 'thread', 'process', 'blocking'],
help='''\
[experimental] The mechanism to use for the background data reload task.
The default "auto" option will conditionally use threads for legacy reloading
and a child process for DB import reloading. The "process" option is only
useful with DB import mode. The "blocking" option will block startup until
reload finishes, and requires --load_interval=0. (default: %(default)s)\
''')
parser.add_argument(
'--samples_per_plugin',
type=str,
default='',
help='''\
An optional comma separated list of plugin_name=num_samples pairs to
explicitly specify how many samples to keep per tag for that plugin. For
unspecified plugins, TensorBoard randomly downsamples logged summaries
to reasonable values to prevent out-of-memory errors for long running
jobs. This flag allows fine control over that downsampling. Note that 0
means keep all samples of that type. For instance "scalars=500,images=0"
keeps 500 scalars and all images. Most users should not need to set this
flag.\
''') | python | def define_flags(self, parser):
"""Adds standard TensorBoard CLI flags to parser."""
parser.add_argument(
'--logdir',
metavar='PATH',
type=str,
default='',
help='''\
Directory where TensorBoard will look to find TensorFlow event files
that it can display. TensorBoard will recursively walk the directory
structure rooted at logdir, looking for .*tfevents.* files.
You may also pass a comma separated list of log directories, and
TensorBoard will watch each directory. You can also assign names to
individual log directories by putting a colon between the name and the
path, as in:
`tensorboard --logdir=name1:/path/to/logs/1,name2:/path/to/logs/2`\
''')
parser.add_argument(
'--host',
metavar='ADDR',
type=str,
default='',
help='''\
What host to listen to. Defaults to serving on all interfaces. Other
commonly used values are 127.0.0.1 (localhost) and :: (for IPv6).\
''')
parser.add_argument(
'--port',
metavar='PORT',
type=lambda s: (None if s == "default" else int(s)),
default="default",
help='''\
Port to serve TensorBoard on. Pass 0 to request an unused port selected
by the operating system, or pass "default" to try to bind to the default
port (%s) but search for a nearby free port if the default port is
unavailable. (default: "default").\
''' % DEFAULT_PORT)
parser.add_argument(
'--purge_orphaned_data',
metavar='BOOL',
# Custom str-to-bool converter since regular bool() doesn't work.
type=lambda v: {'true': True, 'false': False}.get(v.lower(), v),
choices=[True, False],
default=True,
help='''\
Whether to purge data that may have been orphaned due to TensorBoard
restarts. Setting --purge_orphaned_data=False can be used to debug data
disappearance. (default: %(default)s)\
''')
parser.add_argument(
'--reload_interval',
metavar='SECONDS',
type=float,
default=5.0,
help='''\
How often the backend should load more data, in seconds. Set to 0 to
load just once at startup and a negative number to never reload at all.
Not relevant for DB read-only mode. (default: %(default)s)\
''')
parser.add_argument(
'--db',
metavar='URI',
type=str,
default='',
help='''\
[experimental] sets SQL database URI and enables DB backend mode, which is
read-only unless --db_import is also passed.\
''')
parser.add_argument(
'--db_import',
action='store_true',
help='''\
[experimental] enables DB read-and-import mode, which in combination with
--logdir imports event files into a DB backend on the fly. The backing DB is
temporary unless --db is also passed to specify a DB path to use.\
''')
parser.add_argument(
'--db_import_use_op',
action='store_true',
help='''\
[experimental] in combination with --db_import, if passed, use TensorFlow's
import_event() op for importing event data, otherwise use TensorBoard's own
sqlite ingestion logic.\
''')
parser.add_argument(
'--inspect',
action='store_true',
help='''\
Prints digests of event files to command line.
This is useful when no data is shown on TensorBoard, or the data shown
looks weird.
Must specify one of `logdir` or `event_file` flag.
Example usage:
`tensorboard --inspect --logdir mylogdir --tag loss`
See tensorboard/backend/event_processing/event_file_inspector.py for more info.\
''')
# This flag has a "_tb" suffix to avoid conflicting with an internal flag
# named --version. Note that due to argparse auto-expansion of unambiguous
# flag prefixes, you can still invoke this as `tensorboard --version`.
parser.add_argument(
'--version_tb',
action='store_true',
help='Prints the version of Tensorboard')
parser.add_argument(
'--tag',
metavar='TAG',
type=str,
default='',
help='tag to query for; used with --inspect')
parser.add_argument(
'--event_file',
metavar='PATH',
type=str,
default='',
help='''\
The particular event file to query for. Only used if --inspect is
present and --logdir is not specified.\
''')
parser.add_argument(
'--path_prefix',
metavar='PATH',
type=str,
default='',
help='''\
An optional, relative prefix to the path, e.g. "/path/to/tensorboard".
resulting in the new base url being located at
localhost:6006/path/to/tensorboard under default settings. A leading
slash is required when specifying the path_prefix, however trailing
slashes can be omitted. The path_prefix can be leveraged for path based
routing of an elb when the website base_url is not available e.g.
"example.site.com/path/to/tensorboard/".\
''')
parser.add_argument(
'--window_title',
metavar='TEXT',
type=str,
default='',
help='changes title of browser window')
parser.add_argument(
'--max_reload_threads',
metavar='COUNT',
type=int,
default=1,
help='''\
The max number of threads that TensorBoard can use to reload runs. Not
relevant for db read-only mode. Each thread reloads one run at a time.
(default: %(default)s)\
''')
parser.add_argument(
'--reload_task',
metavar='TYPE',
type=str,
default='auto',
choices=['auto', 'thread', 'process', 'blocking'],
help='''\
[experimental] The mechanism to use for the background data reload task.
The default "auto" option will conditionally use threads for legacy reloading
and a child process for DB import reloading. The "process" option is only
useful with DB import mode. The "blocking" option will block startup until
reload finishes, and requires --load_interval=0. (default: %(default)s)\
''')
parser.add_argument(
'--samples_per_plugin',
type=str,
default='',
help='''\
An optional comma separated list of plugin_name=num_samples pairs to
explicitly specify how many samples to keep per tag for that plugin. For
unspecified plugins, TensorBoard randomly downsamples logged summaries
to reasonable values to prevent out-of-memory errors for long running
jobs. This flag allows fine control over that downsampling. Note that 0
means keep all samples of that type. For instance "scalars=500,images=0"
keeps 500 scalars and all images. Most users should not need to set this
flag.\
''') | [
"def",
"define_flags",
"(",
"self",
",",
"parser",
")",
":",
"parser",
".",
"add_argument",
"(",
"'--logdir'",
",",
"metavar",
"=",
"'PATH'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"''",
",",
"help",
"=",
"'''\\\nDirectory where TensorBoard will look to find TensorFlow event files\nthat it can display. TensorBoard will recursively walk the directory\nstructure rooted at logdir, looking for .*tfevents.* files.\n\nYou may also pass a comma separated list of log directories, and\nTensorBoard will watch each directory. You can also assign names to\nindividual log directories by putting a colon between the name and the\npath, as in:\n\n`tensorboard --logdir=name1:/path/to/logs/1,name2:/path/to/logs/2`\\\n'''",
")",
"parser",
".",
"add_argument",
"(",
"'--host'",
",",
"metavar",
"=",
"'ADDR'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"''",
",",
"help",
"=",
"'''\\\nWhat host to listen to. Defaults to serving on all interfaces. Other\ncommonly used values are 127.0.0.1 (localhost) and :: (for IPv6).\\\n'''",
")",
"parser",
".",
"add_argument",
"(",
"'--port'",
",",
"metavar",
"=",
"'PORT'",
",",
"type",
"=",
"lambda",
"s",
":",
"(",
"None",
"if",
"s",
"==",
"\"default\"",
"else",
"int",
"(",
"s",
")",
")",
",",
"default",
"=",
"\"default\"",
",",
"help",
"=",
"'''\\\nPort to serve TensorBoard on. Pass 0 to request an unused port selected\nby the operating system, or pass \"default\" to try to bind to the default\nport (%s) but search for a nearby free port if the default port is\nunavailable. (default: \"default\").\\\n'''",
"%",
"DEFAULT_PORT",
")",
"parser",
".",
"add_argument",
"(",
"'--purge_orphaned_data'",
",",
"metavar",
"=",
"'BOOL'",
",",
"# Custom str-to-bool converter since regular bool() doesn't work.",
"type",
"=",
"lambda",
"v",
":",
"{",
"'true'",
":",
"True",
",",
"'false'",
":",
"False",
"}",
".",
"get",
"(",
"v",
".",
"lower",
"(",
")",
",",
"v",
")",
",",
"choices",
"=",
"[",
"True",
",",
"False",
"]",
",",
"default",
"=",
"True",
",",
"help",
"=",
"'''\\\nWhether to purge data that may have been orphaned due to TensorBoard\nrestarts. Setting --purge_orphaned_data=False can be used to debug data\ndisappearance. (default: %(default)s)\\\n'''",
")",
"parser",
".",
"add_argument",
"(",
"'--reload_interval'",
",",
"metavar",
"=",
"'SECONDS'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"5.0",
",",
"help",
"=",
"'''\\\nHow often the backend should load more data, in seconds. Set to 0 to\nload just once at startup and a negative number to never reload at all.\nNot relevant for DB read-only mode. (default: %(default)s)\\\n'''",
")",
"parser",
".",
"add_argument",
"(",
"'--db'",
",",
"metavar",
"=",
"'URI'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"''",
",",
"help",
"=",
"'''\\\n[experimental] sets SQL database URI and enables DB backend mode, which is\nread-only unless --db_import is also passed.\\\n'''",
")",
"parser",
".",
"add_argument",
"(",
"'--db_import'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'''\\\n[experimental] enables DB read-and-import mode, which in combination with\n--logdir imports event files into a DB backend on the fly. The backing DB is\ntemporary unless --db is also passed to specify a DB path to use.\\\n'''",
")",
"parser",
".",
"add_argument",
"(",
"'--db_import_use_op'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'''\\\n[experimental] in combination with --db_import, if passed, use TensorFlow's\nimport_event() op for importing event data, otherwise use TensorBoard's own\nsqlite ingestion logic.\\\n'''",
")",
"parser",
".",
"add_argument",
"(",
"'--inspect'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'''\\\nPrints digests of event files to command line.\n\nThis is useful when no data is shown on TensorBoard, or the data shown\nlooks weird.\n\nMust specify one of `logdir` or `event_file` flag.\n\nExample usage:\n `tensorboard --inspect --logdir mylogdir --tag loss`\n\nSee tensorboard/backend/event_processing/event_file_inspector.py for more info.\\\n'''",
")",
"# This flag has a \"_tb\" suffix to avoid conflicting with an internal flag",
"# named --version. Note that due to argparse auto-expansion of unambiguous",
"# flag prefixes, you can still invoke this as `tensorboard --version`.",
"parser",
".",
"add_argument",
"(",
"'--version_tb'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Prints the version of Tensorboard'",
")",
"parser",
".",
"add_argument",
"(",
"'--tag'",
",",
"metavar",
"=",
"'TAG'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"''",
",",
"help",
"=",
"'tag to query for; used with --inspect'",
")",
"parser",
".",
"add_argument",
"(",
"'--event_file'",
",",
"metavar",
"=",
"'PATH'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"''",
",",
"help",
"=",
"'''\\\nThe particular event file to query for. Only used if --inspect is\npresent and --logdir is not specified.\\\n'''",
")",
"parser",
".",
"add_argument",
"(",
"'--path_prefix'",
",",
"metavar",
"=",
"'PATH'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"''",
",",
"help",
"=",
"'''\\\nAn optional, relative prefix to the path, e.g. \"/path/to/tensorboard\".\nresulting in the new base url being located at\nlocalhost:6006/path/to/tensorboard under default settings. A leading\nslash is required when specifying the path_prefix, however trailing\nslashes can be omitted. The path_prefix can be leveraged for path based\nrouting of an elb when the website base_url is not available e.g.\n\"example.site.com/path/to/tensorboard/\".\\\n'''",
")",
"parser",
".",
"add_argument",
"(",
"'--window_title'",
",",
"metavar",
"=",
"'TEXT'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"''",
",",
"help",
"=",
"'changes title of browser window'",
")",
"parser",
".",
"add_argument",
"(",
"'--max_reload_threads'",
",",
"metavar",
"=",
"'COUNT'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"1",
",",
"help",
"=",
"'''\\\nThe max number of threads that TensorBoard can use to reload runs. Not\nrelevant for db read-only mode. Each thread reloads one run at a time.\n(default: %(default)s)\\\n'''",
")",
"parser",
".",
"add_argument",
"(",
"'--reload_task'",
",",
"metavar",
"=",
"'TYPE'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'auto'",
",",
"choices",
"=",
"[",
"'auto'",
",",
"'thread'",
",",
"'process'",
",",
"'blocking'",
"]",
",",
"help",
"=",
"'''\\\n[experimental] The mechanism to use for the background data reload task.\nThe default \"auto\" option will conditionally use threads for legacy reloading\nand a child process for DB import reloading. The \"process\" option is only\nuseful with DB import mode. The \"blocking\" option will block startup until\nreload finishes, and requires --load_interval=0. (default: %(default)s)\\\n'''",
")",
"parser",
".",
"add_argument",
"(",
"'--samples_per_plugin'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"''",
",",
"help",
"=",
"'''\\\nAn optional comma separated list of plugin_name=num_samples pairs to\nexplicitly specify how many samples to keep per tag for that plugin. For\nunspecified plugins, TensorBoard randomly downsamples logged summaries\nto reasonable values to prevent out-of-memory errors for long running\njobs. This flag allows fine control over that downsampling. Note that 0\nmeans keep all samples of that type. For instance \"scalars=500,images=0\"\nkeeps 500 scalars and all images. Most users should not need to set this\nflag.\\\n'''",
")"
] | Adds standard TensorBoard CLI flags to parser. | [
"Adds",
"standard",
"TensorBoard",
"CLI",
"flags",
"to",
"parser",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/core/core_plugin.py#L269-L465 | train |
tensorflow/tensorboard | tensorboard/plugins/core/core_plugin.py | CorePluginLoader.fix_flags | def fix_flags(self, flags):
"""Fixes standard TensorBoard CLI flags to parser."""
FlagsError = base_plugin.FlagsError
if flags.version_tb:
pass
elif flags.inspect:
if flags.logdir and flags.event_file:
raise FlagsError(
'Must specify either --logdir or --event_file, but not both.')
if not (flags.logdir or flags.event_file):
raise FlagsError('Must specify either --logdir or --event_file.')
elif not flags.db and not flags.logdir:
raise FlagsError('A logdir or db must be specified. '
'For example `tensorboard --logdir mylogdir` '
'or `tensorboard --db sqlite:~/.tensorboard.db`. '
'Run `tensorboard --helpfull` for details and examples.')
if flags.path_prefix.endswith('/'):
flags.path_prefix = flags.path_prefix[:-1] | python | def fix_flags(self, flags):
"""Fixes standard TensorBoard CLI flags to parser."""
FlagsError = base_plugin.FlagsError
if flags.version_tb:
pass
elif flags.inspect:
if flags.logdir and flags.event_file:
raise FlagsError(
'Must specify either --logdir or --event_file, but not both.')
if not (flags.logdir or flags.event_file):
raise FlagsError('Must specify either --logdir or --event_file.')
elif not flags.db and not flags.logdir:
raise FlagsError('A logdir or db must be specified. '
'For example `tensorboard --logdir mylogdir` '
'or `tensorboard --db sqlite:~/.tensorboard.db`. '
'Run `tensorboard --helpfull` for details and examples.')
if flags.path_prefix.endswith('/'):
flags.path_prefix = flags.path_prefix[:-1] | [
"def",
"fix_flags",
"(",
"self",
",",
"flags",
")",
":",
"FlagsError",
"=",
"base_plugin",
".",
"FlagsError",
"if",
"flags",
".",
"version_tb",
":",
"pass",
"elif",
"flags",
".",
"inspect",
":",
"if",
"flags",
".",
"logdir",
"and",
"flags",
".",
"event_file",
":",
"raise",
"FlagsError",
"(",
"'Must specify either --logdir or --event_file, but not both.'",
")",
"if",
"not",
"(",
"flags",
".",
"logdir",
"or",
"flags",
".",
"event_file",
")",
":",
"raise",
"FlagsError",
"(",
"'Must specify either --logdir or --event_file.'",
")",
"elif",
"not",
"flags",
".",
"db",
"and",
"not",
"flags",
".",
"logdir",
":",
"raise",
"FlagsError",
"(",
"'A logdir or db must be specified. '",
"'For example `tensorboard --logdir mylogdir` '",
"'or `tensorboard --db sqlite:~/.tensorboard.db`. '",
"'Run `tensorboard --helpfull` for details and examples.'",
")",
"if",
"flags",
".",
"path_prefix",
".",
"endswith",
"(",
"'/'",
")",
":",
"flags",
".",
"path_prefix",
"=",
"flags",
".",
"path_prefix",
"[",
":",
"-",
"1",
"]"
] | Fixes standard TensorBoard CLI flags to parser. | [
"Fixes",
"standard",
"TensorBoard",
"CLI",
"flags",
"to",
"parser",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/core/core_plugin.py#L467-L485 | train |
tensorflow/tensorboard | tensorboard/plugins/debugger/comm_channel.py | CommChannel.put | def put(self, message):
"""Put a message into the outgoing message stack.
Outgoing message will be stored indefinitely to support multi-users.
"""
with self._outgoing_lock:
self._outgoing.append(message)
self._outgoing_counter += 1
# Check to see if there are pending queues waiting for the item.
if self._outgoing_counter in self._outgoing_pending_queues:
for q in self._outgoing_pending_queues[self._outgoing_counter]:
q.put(message)
del self._outgoing_pending_queues[self._outgoing_counter] | python | def put(self, message):
"""Put a message into the outgoing message stack.
Outgoing message will be stored indefinitely to support multi-users.
"""
with self._outgoing_lock:
self._outgoing.append(message)
self._outgoing_counter += 1
# Check to see if there are pending queues waiting for the item.
if self._outgoing_counter in self._outgoing_pending_queues:
for q in self._outgoing_pending_queues[self._outgoing_counter]:
q.put(message)
del self._outgoing_pending_queues[self._outgoing_counter] | [
"def",
"put",
"(",
"self",
",",
"message",
")",
":",
"with",
"self",
".",
"_outgoing_lock",
":",
"self",
".",
"_outgoing",
".",
"append",
"(",
"message",
")",
"self",
".",
"_outgoing_counter",
"+=",
"1",
"# Check to see if there are pending queues waiting for the item.",
"if",
"self",
".",
"_outgoing_counter",
"in",
"self",
".",
"_outgoing_pending_queues",
":",
"for",
"q",
"in",
"self",
".",
"_outgoing_pending_queues",
"[",
"self",
".",
"_outgoing_counter",
"]",
":",
"q",
".",
"put",
"(",
"message",
")",
"del",
"self",
".",
"_outgoing_pending_queues",
"[",
"self",
".",
"_outgoing_counter",
"]"
] | Put a message into the outgoing message stack.
Outgoing message will be stored indefinitely to support multi-users. | [
"Put",
"a",
"message",
"into",
"the",
"outgoing",
"message",
"stack",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/debugger/comm_channel.py#L52-L65 | train |
tensorflow/tensorboard | tensorboard/plugins/debugger/comm_channel.py | CommChannel.get | def get(self, pos):
"""Get message(s) from the outgoing message stack.
Blocks until an item at stack position pos becomes available.
This method is thread safe.
Args:
pos: An int specifying the top position of the message stack to access.
For example, if the stack counter is at 3 and pos == 2, then the 2nd
item on the stack will be returned, together with an int that indicates
the current stack heigh (3 in this case).
Returns:
1. The item at stack position pos.
2. The height of the stack when the retun values are generated.
Raises:
ValueError: If input `pos` is zero or negative.
"""
if pos <= 0:
raise ValueError('Invalid pos %d: pos must be > 0' % pos)
with self._outgoing_lock:
if self._outgoing_counter >= pos:
# If the stack already has the requested position, return the value
# immediately.
return self._outgoing[pos - 1], self._outgoing_counter
else:
# If the stack has not reached the requested position yet, create a
# queue and block on get().
if pos not in self._outgoing_pending_queues:
self._outgoing_pending_queues[pos] = []
q = queue.Queue(maxsize=1)
self._outgoing_pending_queues[pos].append(q)
value = q.get()
with self._outgoing_lock:
return value, self._outgoing_counter | python | def get(self, pos):
"""Get message(s) from the outgoing message stack.
Blocks until an item at stack position pos becomes available.
This method is thread safe.
Args:
pos: An int specifying the top position of the message stack to access.
For example, if the stack counter is at 3 and pos == 2, then the 2nd
item on the stack will be returned, together with an int that indicates
the current stack heigh (3 in this case).
Returns:
1. The item at stack position pos.
2. The height of the stack when the retun values are generated.
Raises:
ValueError: If input `pos` is zero or negative.
"""
if pos <= 0:
raise ValueError('Invalid pos %d: pos must be > 0' % pos)
with self._outgoing_lock:
if self._outgoing_counter >= pos:
# If the stack already has the requested position, return the value
# immediately.
return self._outgoing[pos - 1], self._outgoing_counter
else:
# If the stack has not reached the requested position yet, create a
# queue and block on get().
if pos not in self._outgoing_pending_queues:
self._outgoing_pending_queues[pos] = []
q = queue.Queue(maxsize=1)
self._outgoing_pending_queues[pos].append(q)
value = q.get()
with self._outgoing_lock:
return value, self._outgoing_counter | [
"def",
"get",
"(",
"self",
",",
"pos",
")",
":",
"if",
"pos",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"'Invalid pos %d: pos must be > 0'",
"%",
"pos",
")",
"with",
"self",
".",
"_outgoing_lock",
":",
"if",
"self",
".",
"_outgoing_counter",
">=",
"pos",
":",
"# If the stack already has the requested position, return the value",
"# immediately.",
"return",
"self",
".",
"_outgoing",
"[",
"pos",
"-",
"1",
"]",
",",
"self",
".",
"_outgoing_counter",
"else",
":",
"# If the stack has not reached the requested position yet, create a",
"# queue and block on get().",
"if",
"pos",
"not",
"in",
"self",
".",
"_outgoing_pending_queues",
":",
"self",
".",
"_outgoing_pending_queues",
"[",
"pos",
"]",
"=",
"[",
"]",
"q",
"=",
"queue",
".",
"Queue",
"(",
"maxsize",
"=",
"1",
")",
"self",
".",
"_outgoing_pending_queues",
"[",
"pos",
"]",
".",
"append",
"(",
"q",
")",
"value",
"=",
"q",
".",
"get",
"(",
")",
"with",
"self",
".",
"_outgoing_lock",
":",
"return",
"value",
",",
"self",
".",
"_outgoing_counter"
] | Get message(s) from the outgoing message stack.
Blocks until an item at stack position pos becomes available.
This method is thread safe.
Args:
pos: An int specifying the top position of the message stack to access.
For example, if the stack counter is at 3 and pos == 2, then the 2nd
item on the stack will be returned, together with an int that indicates
the current stack heigh (3 in this case).
Returns:
1. The item at stack position pos.
2. The height of the stack when the retun values are generated.
Raises:
ValueError: If input `pos` is zero or negative. | [
"Get",
"message",
"(",
"s",
")",
"from",
"the",
"outgoing",
"message",
"stack",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/debugger/comm_channel.py#L67-L103 | train |
tensorflow/tensorboard | tensorboard/plugins/custom_scalar/custom_scalar_demo.py | run | def run():
"""Run custom scalar demo and generate event files."""
step = tf.compat.v1.placeholder(tf.float32, shape=[])
with tf.name_scope('loss'):
# Specify 2 different loss values, each tagged differently.
summary_lib.scalar('foo', tf.pow(0.9, step))
summary_lib.scalar('bar', tf.pow(0.85, step + 2))
# Log metric baz as well as upper and lower bounds for a margin chart.
middle_baz_value = step + 4 * tf.random.uniform([]) - 2
summary_lib.scalar('baz', middle_baz_value)
summary_lib.scalar('baz_lower',
middle_baz_value - 6.42 - tf.random.uniform([]))
summary_lib.scalar('baz_upper',
middle_baz_value + 6.42 + tf.random.uniform([]))
with tf.name_scope('trigFunctions'):
summary_lib.scalar('cosine', tf.cos(step))
summary_lib.scalar('sine', tf.sin(step))
summary_lib.scalar('tangent', tf.tan(step))
merged_summary = tf.compat.v1.summary.merge_all()
with tf.compat.v1.Session() as sess, tf.summary.FileWriter(LOGDIR) as writer:
# We only need to specify the layout once (instead of per step).
layout_summary = summary_lib.custom_scalar_pb(
layout_pb2.Layout(category=[
layout_pb2.Category(
title='losses',
chart=[
layout_pb2.Chart(
title='losses',
multiline=layout_pb2.MultilineChartContent(
tag=[r'loss(?!.*margin.*)'],)),
layout_pb2.Chart(
title='baz',
margin=layout_pb2.MarginChartContent(
series=[
layout_pb2.MarginChartContent.Series(
value='loss/baz/scalar_summary',
lower='loss/baz_lower/scalar_summary',
upper='loss/baz_upper/scalar_summary'
),
],)),
]),
layout_pb2.Category(
title='trig functions',
chart=[
layout_pb2.Chart(
title='wave trig functions',
multiline=layout_pb2.MultilineChartContent(
tag=[
r'trigFunctions/cosine', r'trigFunctions/sine'
],)),
# The range of tangent is different. Give it its own chart.
layout_pb2.Chart(
title='tan',
multiline=layout_pb2.MultilineChartContent(
tag=[r'trigFunctions/tangent'],)),
],
# This category we care less about. Make it initially closed.
closed=True),
]))
writer.add_summary(layout_summary)
for i in xrange(42):
summary = sess.run(merged_summary, feed_dict={step: i})
writer.add_summary(summary, global_step=i) | python | def run():
"""Run custom scalar demo and generate event files."""
step = tf.compat.v1.placeholder(tf.float32, shape=[])
with tf.name_scope('loss'):
# Specify 2 different loss values, each tagged differently.
summary_lib.scalar('foo', tf.pow(0.9, step))
summary_lib.scalar('bar', tf.pow(0.85, step + 2))
# Log metric baz as well as upper and lower bounds for a margin chart.
middle_baz_value = step + 4 * tf.random.uniform([]) - 2
summary_lib.scalar('baz', middle_baz_value)
summary_lib.scalar('baz_lower',
middle_baz_value - 6.42 - tf.random.uniform([]))
summary_lib.scalar('baz_upper',
middle_baz_value + 6.42 + tf.random.uniform([]))
with tf.name_scope('trigFunctions'):
summary_lib.scalar('cosine', tf.cos(step))
summary_lib.scalar('sine', tf.sin(step))
summary_lib.scalar('tangent', tf.tan(step))
merged_summary = tf.compat.v1.summary.merge_all()
with tf.compat.v1.Session() as sess, tf.summary.FileWriter(LOGDIR) as writer:
# We only need to specify the layout once (instead of per step).
layout_summary = summary_lib.custom_scalar_pb(
layout_pb2.Layout(category=[
layout_pb2.Category(
title='losses',
chart=[
layout_pb2.Chart(
title='losses',
multiline=layout_pb2.MultilineChartContent(
tag=[r'loss(?!.*margin.*)'],)),
layout_pb2.Chart(
title='baz',
margin=layout_pb2.MarginChartContent(
series=[
layout_pb2.MarginChartContent.Series(
value='loss/baz/scalar_summary',
lower='loss/baz_lower/scalar_summary',
upper='loss/baz_upper/scalar_summary'
),
],)),
]),
layout_pb2.Category(
title='trig functions',
chart=[
layout_pb2.Chart(
title='wave trig functions',
multiline=layout_pb2.MultilineChartContent(
tag=[
r'trigFunctions/cosine', r'trigFunctions/sine'
],)),
# The range of tangent is different. Give it its own chart.
layout_pb2.Chart(
title='tan',
multiline=layout_pb2.MultilineChartContent(
tag=[r'trigFunctions/tangent'],)),
],
# This category we care less about. Make it initially closed.
closed=True),
]))
writer.add_summary(layout_summary)
for i in xrange(42):
summary = sess.run(merged_summary, feed_dict={step: i})
writer.add_summary(summary, global_step=i) | [
"def",
"run",
"(",
")",
":",
"step",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"placeholder",
"(",
"tf",
".",
"float32",
",",
"shape",
"=",
"[",
"]",
")",
"with",
"tf",
".",
"name_scope",
"(",
"'loss'",
")",
":",
"# Specify 2 different loss values, each tagged differently.",
"summary_lib",
".",
"scalar",
"(",
"'foo'",
",",
"tf",
".",
"pow",
"(",
"0.9",
",",
"step",
")",
")",
"summary_lib",
".",
"scalar",
"(",
"'bar'",
",",
"tf",
".",
"pow",
"(",
"0.85",
",",
"step",
"+",
"2",
")",
")",
"# Log metric baz as well as upper and lower bounds for a margin chart.",
"middle_baz_value",
"=",
"step",
"+",
"4",
"*",
"tf",
".",
"random",
".",
"uniform",
"(",
"[",
"]",
")",
"-",
"2",
"summary_lib",
".",
"scalar",
"(",
"'baz'",
",",
"middle_baz_value",
")",
"summary_lib",
".",
"scalar",
"(",
"'baz_lower'",
",",
"middle_baz_value",
"-",
"6.42",
"-",
"tf",
".",
"random",
".",
"uniform",
"(",
"[",
"]",
")",
")",
"summary_lib",
".",
"scalar",
"(",
"'baz_upper'",
",",
"middle_baz_value",
"+",
"6.42",
"+",
"tf",
".",
"random",
".",
"uniform",
"(",
"[",
"]",
")",
")",
"with",
"tf",
".",
"name_scope",
"(",
"'trigFunctions'",
")",
":",
"summary_lib",
".",
"scalar",
"(",
"'cosine'",
",",
"tf",
".",
"cos",
"(",
"step",
")",
")",
"summary_lib",
".",
"scalar",
"(",
"'sine'",
",",
"tf",
".",
"sin",
"(",
"step",
")",
")",
"summary_lib",
".",
"scalar",
"(",
"'tangent'",
",",
"tf",
".",
"tan",
"(",
"step",
")",
")",
"merged_summary",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"summary",
".",
"merge_all",
"(",
")",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"Session",
"(",
")",
"as",
"sess",
",",
"tf",
".",
"summary",
".",
"FileWriter",
"(",
"LOGDIR",
")",
"as",
"writer",
":",
"# We only need to specify the layout once (instead of per step).",
"layout_summary",
"=",
"summary_lib",
".",
"custom_scalar_pb",
"(",
"layout_pb2",
".",
"Layout",
"(",
"category",
"=",
"[",
"layout_pb2",
".",
"Category",
"(",
"title",
"=",
"'losses'",
",",
"chart",
"=",
"[",
"layout_pb2",
".",
"Chart",
"(",
"title",
"=",
"'losses'",
",",
"multiline",
"=",
"layout_pb2",
".",
"MultilineChartContent",
"(",
"tag",
"=",
"[",
"r'loss(?!.*margin.*)'",
"]",
",",
")",
")",
",",
"layout_pb2",
".",
"Chart",
"(",
"title",
"=",
"'baz'",
",",
"margin",
"=",
"layout_pb2",
".",
"MarginChartContent",
"(",
"series",
"=",
"[",
"layout_pb2",
".",
"MarginChartContent",
".",
"Series",
"(",
"value",
"=",
"'loss/baz/scalar_summary'",
",",
"lower",
"=",
"'loss/baz_lower/scalar_summary'",
",",
"upper",
"=",
"'loss/baz_upper/scalar_summary'",
")",
",",
"]",
",",
")",
")",
",",
"]",
")",
",",
"layout_pb2",
".",
"Category",
"(",
"title",
"=",
"'trig functions'",
",",
"chart",
"=",
"[",
"layout_pb2",
".",
"Chart",
"(",
"title",
"=",
"'wave trig functions'",
",",
"multiline",
"=",
"layout_pb2",
".",
"MultilineChartContent",
"(",
"tag",
"=",
"[",
"r'trigFunctions/cosine'",
",",
"r'trigFunctions/sine'",
"]",
",",
")",
")",
",",
"# The range of tangent is different. Give it its own chart.",
"layout_pb2",
".",
"Chart",
"(",
"title",
"=",
"'tan'",
",",
"multiline",
"=",
"layout_pb2",
".",
"MultilineChartContent",
"(",
"tag",
"=",
"[",
"r'trigFunctions/tangent'",
"]",
",",
")",
")",
",",
"]",
",",
"# This category we care less about. Make it initially closed.",
"closed",
"=",
"True",
")",
",",
"]",
")",
")",
"writer",
".",
"add_summary",
"(",
"layout_summary",
")",
"for",
"i",
"in",
"xrange",
"(",
"42",
")",
":",
"summary",
"=",
"sess",
".",
"run",
"(",
"merged_summary",
",",
"feed_dict",
"=",
"{",
"step",
":",
"i",
"}",
")",
"writer",
".",
"add_summary",
"(",
"summary",
",",
"global_step",
"=",
"i",
")"
] | Run custom scalar demo and generate event files. | [
"Run",
"custom",
"scalar",
"demo",
"and",
"generate",
"event",
"files",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/custom_scalar/custom_scalar_demo.py#L35-L103 | train |
tensorflow/tensorboard | tensorboard/plugins/interactive_inference/interactive_inference_plugin_loader.py | InteractiveInferencePluginLoader.load | def load(self, context):
"""Returns the plugin, if possible.
Args:
context: The TBContext flags.
Returns:
A InteractiveInferencePlugin instance or None if it couldn't be loaded.
"""
try:
# pylint: disable=g-import-not-at-top,unused-import
import tensorflow
except ImportError:
return
# pylint: disable=line-too-long,g-import-not-at-top
from tensorboard.plugins.interactive_inference.interactive_inference_plugin import InteractiveInferencePlugin
return InteractiveInferencePlugin(context) | python | def load(self, context):
"""Returns the plugin, if possible.
Args:
context: The TBContext flags.
Returns:
A InteractiveInferencePlugin instance or None if it couldn't be loaded.
"""
try:
# pylint: disable=g-import-not-at-top,unused-import
import tensorflow
except ImportError:
return
# pylint: disable=line-too-long,g-import-not-at-top
from tensorboard.plugins.interactive_inference.interactive_inference_plugin import InteractiveInferencePlugin
return InteractiveInferencePlugin(context) | [
"def",
"load",
"(",
"self",
",",
"context",
")",
":",
"try",
":",
"# pylint: disable=g-import-not-at-top,unused-import",
"import",
"tensorflow",
"except",
"ImportError",
":",
"return",
"# pylint: disable=line-too-long,g-import-not-at-top",
"from",
"tensorboard",
".",
"plugins",
".",
"interactive_inference",
".",
"interactive_inference_plugin",
"import",
"InteractiveInferencePlugin",
"return",
"InteractiveInferencePlugin",
"(",
"context",
")"
] | Returns the plugin, if possible.
Args:
context: The TBContext flags.
Returns:
A InteractiveInferencePlugin instance or None if it couldn't be loaded. | [
"Returns",
"the",
"plugin",
"if",
"possible",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/interactive_inference/interactive_inference_plugin_loader.py#L30-L46 | train |
tensorflow/tensorboard | tensorboard/plugins/projector/__init__.py | visualize_embeddings | def visualize_embeddings(summary_writer, config):
"""Stores a config file used by the embedding projector.
Args:
summary_writer: The summary writer used for writing events.
config: `tf.contrib.tensorboard.plugins.projector.ProjectorConfig`
proto that holds the configuration for the projector such as paths to
checkpoint files and metadata files for the embeddings. If
`config.model_checkpoint_path` is none, it defaults to the
`logdir` used by the summary_writer.
Raises:
ValueError: If the summary writer does not have a `logdir`.
"""
logdir = summary_writer.get_logdir()
# Sanity checks.
if logdir is None:
raise ValueError('Summary writer must have a logdir')
# Saving the config file in the logdir.
config_pbtxt = _text_format.MessageToString(config)
path = os.path.join(logdir, _projector_plugin.PROJECTOR_FILENAME)
with tf.io.gfile.GFile(path, 'w') as f:
f.write(config_pbtxt) | python | def visualize_embeddings(summary_writer, config):
"""Stores a config file used by the embedding projector.
Args:
summary_writer: The summary writer used for writing events.
config: `tf.contrib.tensorboard.plugins.projector.ProjectorConfig`
proto that holds the configuration for the projector such as paths to
checkpoint files and metadata files for the embeddings. If
`config.model_checkpoint_path` is none, it defaults to the
`logdir` used by the summary_writer.
Raises:
ValueError: If the summary writer does not have a `logdir`.
"""
logdir = summary_writer.get_logdir()
# Sanity checks.
if logdir is None:
raise ValueError('Summary writer must have a logdir')
# Saving the config file in the logdir.
config_pbtxt = _text_format.MessageToString(config)
path = os.path.join(logdir, _projector_plugin.PROJECTOR_FILENAME)
with tf.io.gfile.GFile(path, 'w') as f:
f.write(config_pbtxt) | [
"def",
"visualize_embeddings",
"(",
"summary_writer",
",",
"config",
")",
":",
"logdir",
"=",
"summary_writer",
".",
"get_logdir",
"(",
")",
"# Sanity checks.",
"if",
"logdir",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Summary writer must have a logdir'",
")",
"# Saving the config file in the logdir.",
"config_pbtxt",
"=",
"_text_format",
".",
"MessageToString",
"(",
"config",
")",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"logdir",
",",
"_projector_plugin",
".",
"PROJECTOR_FILENAME",
")",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"path",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"config_pbtxt",
")"
] | Stores a config file used by the embedding projector.
Args:
summary_writer: The summary writer used for writing events.
config: `tf.contrib.tensorboard.plugins.projector.ProjectorConfig`
proto that holds the configuration for the projector such as paths to
checkpoint files and metadata files for the embeddings. If
`config.model_checkpoint_path` is none, it defaults to the
`logdir` used by the summary_writer.
Raises:
ValueError: If the summary writer does not have a `logdir`. | [
"Stores",
"a",
"config",
"file",
"used",
"by",
"the",
"embedding",
"projector",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/projector/__init__.py#L38-L62 | train |
tensorflow/tensorboard | tensorboard/compat/tensorflow_stub/flags.py | _wrap_define_function | def _wrap_define_function(original_function):
"""Wraps absl.flags's define functions so tf.flags accepts old names."""
def wrapper(*args, **kwargs):
"""Wrapper function that turns old keyword names to new ones."""
has_old_names = False
for old_name, new_name in _six.iteritems(_RENAMED_ARGUMENTS):
if old_name in kwargs:
has_old_names = True
value = kwargs.pop(old_name)
kwargs[new_name] = value
if has_old_names:
_logging.warning(
"Use of the keyword argument names (flag_name, default_value, "
"docstring) is deprecated, please use (name, default, help) instead."
)
return original_function(*args, **kwargs)
return wrapper | python | def _wrap_define_function(original_function):
"""Wraps absl.flags's define functions so tf.flags accepts old names."""
def wrapper(*args, **kwargs):
"""Wrapper function that turns old keyword names to new ones."""
has_old_names = False
for old_name, new_name in _six.iteritems(_RENAMED_ARGUMENTS):
if old_name in kwargs:
has_old_names = True
value = kwargs.pop(old_name)
kwargs[new_name] = value
if has_old_names:
_logging.warning(
"Use of the keyword argument names (flag_name, default_value, "
"docstring) is deprecated, please use (name, default, help) instead."
)
return original_function(*args, **kwargs)
return wrapper | [
"def",
"_wrap_define_function",
"(",
"original_function",
")",
":",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Wrapper function that turns old keyword names to new ones.\"\"\"",
"has_old_names",
"=",
"False",
"for",
"old_name",
",",
"new_name",
"in",
"_six",
".",
"iteritems",
"(",
"_RENAMED_ARGUMENTS",
")",
":",
"if",
"old_name",
"in",
"kwargs",
":",
"has_old_names",
"=",
"True",
"value",
"=",
"kwargs",
".",
"pop",
"(",
"old_name",
")",
"kwargs",
"[",
"new_name",
"]",
"=",
"value",
"if",
"has_old_names",
":",
"_logging",
".",
"warning",
"(",
"\"Use of the keyword argument names (flag_name, default_value, \"",
"\"docstring) is deprecated, please use (name, default, help) instead.\"",
")",
"return",
"original_function",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper"
] | Wraps absl.flags's define functions so tf.flags accepts old names. | [
"Wraps",
"absl",
".",
"flags",
"s",
"define",
"functions",
"so",
"tf",
".",
"flags",
"accepts",
"old",
"names",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/compat/tensorflow_stub/flags.py#L41-L59 | train |
tensorflow/tensorboard | tensorboard/plugins/hparams/metrics.py | run_tag_from_session_and_metric | def run_tag_from_session_and_metric(session_name, metric_name):
"""Returns a (run,tag) tuple storing the evaluations of the specified metric.
Args:
session_name: str.
metric_name: MetricName protobuffer.
Returns: (run, tag) tuple.
"""
assert isinstance(session_name, six.string_types)
assert isinstance(metric_name, api_pb2.MetricName)
# os.path.join() will append a final slash if the group is empty; it seems
# like multiplexer.Tensors won't recognize paths that end with a '/' so
# we normalize the result of os.path.join() to remove the final '/' in that
# case.
run = os.path.normpath(os.path.join(session_name, metric_name.group))
tag = metric_name.tag
return run, tag | python | def run_tag_from_session_and_metric(session_name, metric_name):
"""Returns a (run,tag) tuple storing the evaluations of the specified metric.
Args:
session_name: str.
metric_name: MetricName protobuffer.
Returns: (run, tag) tuple.
"""
assert isinstance(session_name, six.string_types)
assert isinstance(metric_name, api_pb2.MetricName)
# os.path.join() will append a final slash if the group is empty; it seems
# like multiplexer.Tensors won't recognize paths that end with a '/' so
# we normalize the result of os.path.join() to remove the final '/' in that
# case.
run = os.path.normpath(os.path.join(session_name, metric_name.group))
tag = metric_name.tag
return run, tag | [
"def",
"run_tag_from_session_and_metric",
"(",
"session_name",
",",
"metric_name",
")",
":",
"assert",
"isinstance",
"(",
"session_name",
",",
"six",
".",
"string_types",
")",
"assert",
"isinstance",
"(",
"metric_name",
",",
"api_pb2",
".",
"MetricName",
")",
"# os.path.join() will append a final slash if the group is empty; it seems",
"# like multiplexer.Tensors won't recognize paths that end with a '/' so",
"# we normalize the result of os.path.join() to remove the final '/' in that",
"# case.",
"run",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"session_name",
",",
"metric_name",
".",
"group",
")",
")",
"tag",
"=",
"metric_name",
".",
"tag",
"return",
"run",
",",
"tag"
] | Returns a (run,tag) tuple storing the evaluations of the specified metric.
Args:
session_name: str.
metric_name: MetricName protobuffer.
Returns: (run, tag) tuple. | [
"Returns",
"a",
"(",
"run",
"tag",
")",
"tuple",
"storing",
"the",
"evaluations",
"of",
"the",
"specified",
"metric",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/hparams/metrics.py#L29-L45 | train |
tensorflow/tensorboard | tensorboard/plugins/hparams/metrics.py | last_metric_eval | def last_metric_eval(multiplexer, session_name, metric_name):
"""Returns the last evaluations of the given metric at the given session.
Args:
multiplexer: The EventMultiplexer instance allowing access to
the exported summary data.
session_name: String. The session name for which to get the metric
evaluations.
metric_name: api_pb2.MetricName proto. The name of the metric to use.
Returns:
A 3-tuples, of the form [wall-time, step, value], denoting
the last evaluation of the metric, where wall-time denotes the wall time
in seconds since UNIX epoch of the time of the evaluation, step denotes
the training step at which the model is evaluated, and value denotes the
(scalar real) value of the metric.
Raises:
KeyError if the given session does not have the metric.
"""
try:
run, tag = run_tag_from_session_and_metric(session_name, metric_name)
tensor_events = multiplexer.Tensors(run=run, tag=tag)
except KeyError as e:
raise KeyError(
'Can\'t find metric %s for session: %s. Underlying error message: %s'
% (metric_name, session_name, e))
last_event = tensor_events[-1]
# TODO(erez): Raise HParamsError if the tensor is not a 0-D real scalar.
return (last_event.wall_time,
last_event.step,
tf.make_ndarray(last_event.tensor_proto).item()) | python | def last_metric_eval(multiplexer, session_name, metric_name):
"""Returns the last evaluations of the given metric at the given session.
Args:
multiplexer: The EventMultiplexer instance allowing access to
the exported summary data.
session_name: String. The session name for which to get the metric
evaluations.
metric_name: api_pb2.MetricName proto. The name of the metric to use.
Returns:
A 3-tuples, of the form [wall-time, step, value], denoting
the last evaluation of the metric, where wall-time denotes the wall time
in seconds since UNIX epoch of the time of the evaluation, step denotes
the training step at which the model is evaluated, and value denotes the
(scalar real) value of the metric.
Raises:
KeyError if the given session does not have the metric.
"""
try:
run, tag = run_tag_from_session_and_metric(session_name, metric_name)
tensor_events = multiplexer.Tensors(run=run, tag=tag)
except KeyError as e:
raise KeyError(
'Can\'t find metric %s for session: %s. Underlying error message: %s'
% (metric_name, session_name, e))
last_event = tensor_events[-1]
# TODO(erez): Raise HParamsError if the tensor is not a 0-D real scalar.
return (last_event.wall_time,
last_event.step,
tf.make_ndarray(last_event.tensor_proto).item()) | [
"def",
"last_metric_eval",
"(",
"multiplexer",
",",
"session_name",
",",
"metric_name",
")",
":",
"try",
":",
"run",
",",
"tag",
"=",
"run_tag_from_session_and_metric",
"(",
"session_name",
",",
"metric_name",
")",
"tensor_events",
"=",
"multiplexer",
".",
"Tensors",
"(",
"run",
"=",
"run",
",",
"tag",
"=",
"tag",
")",
"except",
"KeyError",
"as",
"e",
":",
"raise",
"KeyError",
"(",
"'Can\\'t find metric %s for session: %s. Underlying error message: %s'",
"%",
"(",
"metric_name",
",",
"session_name",
",",
"e",
")",
")",
"last_event",
"=",
"tensor_events",
"[",
"-",
"1",
"]",
"# TODO(erez): Raise HParamsError if the tensor is not a 0-D real scalar.",
"return",
"(",
"last_event",
".",
"wall_time",
",",
"last_event",
".",
"step",
",",
"tf",
".",
"make_ndarray",
"(",
"last_event",
".",
"tensor_proto",
")",
".",
"item",
"(",
")",
")"
] | Returns the last evaluations of the given metric at the given session.
Args:
multiplexer: The EventMultiplexer instance allowing access to
the exported summary data.
session_name: String. The session name for which to get the metric
evaluations.
metric_name: api_pb2.MetricName proto. The name of the metric to use.
Returns:
A 3-tuples, of the form [wall-time, step, value], denoting
the last evaluation of the metric, where wall-time denotes the wall time
in seconds since UNIX epoch of the time of the evaluation, step denotes
the training step at which the model is evaluated, and value denotes the
(scalar real) value of the metric.
Raises:
KeyError if the given session does not have the metric. | [
"Returns",
"the",
"last",
"evaluations",
"of",
"the",
"given",
"metric",
"at",
"the",
"given",
"session",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/hparams/metrics.py#L48-L79 | train |
tensorflow/tensorboard | tensorboard/plugins/scalar/scalars_plugin.py | ScalarsPlugin.index_impl | def index_impl(self):
"""Return {runName: {tagName: {displayName: ..., description: ...}}}."""
if self._db_connection_provider:
# Read tags from the database.
db = self._db_connection_provider()
cursor = db.execute('''
SELECT
Tags.tag_name,
Tags.display_name,
Runs.run_name
FROM Tags
JOIN Runs
ON Tags.run_id = Runs.run_id
WHERE
Tags.plugin_name = ?
''', (metadata.PLUGIN_NAME,))
result = collections.defaultdict(dict)
for row in cursor:
tag_name, display_name, run_name = row
result[run_name][tag_name] = {
'displayName': display_name,
# TODO(chihuahua): Populate the description. Currently, the tags
# table does not link with the description table.
'description': '',
}
return result
runs = self._multiplexer.Runs()
result = {run: {} for run in runs}
mapping = self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME)
for (run, tag_to_content) in six.iteritems(mapping):
for (tag, content) in six.iteritems(tag_to_content):
content = metadata.parse_plugin_metadata(content)
summary_metadata = self._multiplexer.SummaryMetadata(run, tag)
result[run][tag] = {'displayName': summary_metadata.display_name,
'description': plugin_util.markdown_to_safe_html(
summary_metadata.summary_description)}
return result | python | def index_impl(self):
"""Return {runName: {tagName: {displayName: ..., description: ...}}}."""
if self._db_connection_provider:
# Read tags from the database.
db = self._db_connection_provider()
cursor = db.execute('''
SELECT
Tags.tag_name,
Tags.display_name,
Runs.run_name
FROM Tags
JOIN Runs
ON Tags.run_id = Runs.run_id
WHERE
Tags.plugin_name = ?
''', (metadata.PLUGIN_NAME,))
result = collections.defaultdict(dict)
for row in cursor:
tag_name, display_name, run_name = row
result[run_name][tag_name] = {
'displayName': display_name,
# TODO(chihuahua): Populate the description. Currently, the tags
# table does not link with the description table.
'description': '',
}
return result
runs = self._multiplexer.Runs()
result = {run: {} for run in runs}
mapping = self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME)
for (run, tag_to_content) in six.iteritems(mapping):
for (tag, content) in six.iteritems(tag_to_content):
content = metadata.parse_plugin_metadata(content)
summary_metadata = self._multiplexer.SummaryMetadata(run, tag)
result[run][tag] = {'displayName': summary_metadata.display_name,
'description': plugin_util.markdown_to_safe_html(
summary_metadata.summary_description)}
return result | [
"def",
"index_impl",
"(",
"self",
")",
":",
"if",
"self",
".",
"_db_connection_provider",
":",
"# Read tags from the database.",
"db",
"=",
"self",
".",
"_db_connection_provider",
"(",
")",
"cursor",
"=",
"db",
".",
"execute",
"(",
"'''\n SELECT\n Tags.tag_name,\n Tags.display_name,\n Runs.run_name\n FROM Tags\n JOIN Runs\n ON Tags.run_id = Runs.run_id\n WHERE\n Tags.plugin_name = ?\n '''",
",",
"(",
"metadata",
".",
"PLUGIN_NAME",
",",
")",
")",
"result",
"=",
"collections",
".",
"defaultdict",
"(",
"dict",
")",
"for",
"row",
"in",
"cursor",
":",
"tag_name",
",",
"display_name",
",",
"run_name",
"=",
"row",
"result",
"[",
"run_name",
"]",
"[",
"tag_name",
"]",
"=",
"{",
"'displayName'",
":",
"display_name",
",",
"# TODO(chihuahua): Populate the description. Currently, the tags",
"# table does not link with the description table.",
"'description'",
":",
"''",
",",
"}",
"return",
"result",
"runs",
"=",
"self",
".",
"_multiplexer",
".",
"Runs",
"(",
")",
"result",
"=",
"{",
"run",
":",
"{",
"}",
"for",
"run",
"in",
"runs",
"}",
"mapping",
"=",
"self",
".",
"_multiplexer",
".",
"PluginRunToTagToContent",
"(",
"metadata",
".",
"PLUGIN_NAME",
")",
"for",
"(",
"run",
",",
"tag_to_content",
")",
"in",
"six",
".",
"iteritems",
"(",
"mapping",
")",
":",
"for",
"(",
"tag",
",",
"content",
")",
"in",
"six",
".",
"iteritems",
"(",
"tag_to_content",
")",
":",
"content",
"=",
"metadata",
".",
"parse_plugin_metadata",
"(",
"content",
")",
"summary_metadata",
"=",
"self",
".",
"_multiplexer",
".",
"SummaryMetadata",
"(",
"run",
",",
"tag",
")",
"result",
"[",
"run",
"]",
"[",
"tag",
"]",
"=",
"{",
"'displayName'",
":",
"summary_metadata",
".",
"display_name",
",",
"'description'",
":",
"plugin_util",
".",
"markdown_to_safe_html",
"(",
"summary_metadata",
".",
"summary_description",
")",
"}",
"return",
"result"
] | Return {runName: {tagName: {displayName: ..., description: ...}}}. | [
"Return",
"{",
"runName",
":",
"{",
"tagName",
":",
"{",
"displayName",
":",
"...",
"description",
":",
"...",
"}}}",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/scalar/scalars_plugin.py#L86-L125 | train |
tensorflow/tensorboard | tensorboard/plugins/scalar/scalars_plugin.py | ScalarsPlugin.scalars_impl | def scalars_impl(self, tag, run, experiment, output_format):
"""Result of the form `(body, mime_type)`."""
if self._db_connection_provider:
db = self._db_connection_provider()
# We select for steps greater than -1 because the writer inserts
# placeholder rows en masse. The check for step filters out those rows.
cursor = db.execute('''
SELECT
Tensors.step,
Tensors.computed_time,
Tensors.data,
Tensors.dtype
FROM Tensors
JOIN Tags
ON Tensors.series = Tags.tag_id
JOIN Runs
ON Tags.run_id = Runs.run_id
WHERE
/* For backwards compatibility, ignore the experiment id
for matching purposes if it is empty. */
(:exp == '' OR Runs.experiment_id == CAST(:exp AS INT))
AND Runs.run_name = :run
AND Tags.tag_name = :tag
AND Tags.plugin_name = :plugin
AND Tensors.shape = ''
AND Tensors.step > -1
ORDER BY Tensors.step
''', dict(exp=experiment, run=run, tag=tag, plugin=metadata.PLUGIN_NAME))
values = [(wall_time, step, self._get_value(data, dtype_enum))
for (step, wall_time, data, dtype_enum) in cursor]
else:
tensor_events = self._multiplexer.Tensors(run, tag)
values = [(tensor_event.wall_time,
tensor_event.step,
tensor_util.make_ndarray(tensor_event.tensor_proto).item())
for tensor_event in tensor_events]
if output_format == OutputFormat.CSV:
string_io = StringIO()
writer = csv.writer(string_io)
writer.writerow(['Wall time', 'Step', 'Value'])
writer.writerows(values)
return (string_io.getvalue(), 'text/csv')
else:
return (values, 'application/json') | python | def scalars_impl(self, tag, run, experiment, output_format):
"""Result of the form `(body, mime_type)`."""
if self._db_connection_provider:
db = self._db_connection_provider()
# We select for steps greater than -1 because the writer inserts
# placeholder rows en masse. The check for step filters out those rows.
cursor = db.execute('''
SELECT
Tensors.step,
Tensors.computed_time,
Tensors.data,
Tensors.dtype
FROM Tensors
JOIN Tags
ON Tensors.series = Tags.tag_id
JOIN Runs
ON Tags.run_id = Runs.run_id
WHERE
/* For backwards compatibility, ignore the experiment id
for matching purposes if it is empty. */
(:exp == '' OR Runs.experiment_id == CAST(:exp AS INT))
AND Runs.run_name = :run
AND Tags.tag_name = :tag
AND Tags.plugin_name = :plugin
AND Tensors.shape = ''
AND Tensors.step > -1
ORDER BY Tensors.step
''', dict(exp=experiment, run=run, tag=tag, plugin=metadata.PLUGIN_NAME))
values = [(wall_time, step, self._get_value(data, dtype_enum))
for (step, wall_time, data, dtype_enum) in cursor]
else:
tensor_events = self._multiplexer.Tensors(run, tag)
values = [(tensor_event.wall_time,
tensor_event.step,
tensor_util.make_ndarray(tensor_event.tensor_proto).item())
for tensor_event in tensor_events]
if output_format == OutputFormat.CSV:
string_io = StringIO()
writer = csv.writer(string_io)
writer.writerow(['Wall time', 'Step', 'Value'])
writer.writerows(values)
return (string_io.getvalue(), 'text/csv')
else:
return (values, 'application/json') | [
"def",
"scalars_impl",
"(",
"self",
",",
"tag",
",",
"run",
",",
"experiment",
",",
"output_format",
")",
":",
"if",
"self",
".",
"_db_connection_provider",
":",
"db",
"=",
"self",
".",
"_db_connection_provider",
"(",
")",
"# We select for steps greater than -1 because the writer inserts",
"# placeholder rows en masse. The check for step filters out those rows.",
"cursor",
"=",
"db",
".",
"execute",
"(",
"'''\n SELECT\n Tensors.step,\n Tensors.computed_time,\n Tensors.data,\n Tensors.dtype\n FROM Tensors\n JOIN Tags\n ON Tensors.series = Tags.tag_id\n JOIN Runs\n ON Tags.run_id = Runs.run_id\n WHERE\n /* For backwards compatibility, ignore the experiment id\n for matching purposes if it is empty. */\n (:exp == '' OR Runs.experiment_id == CAST(:exp AS INT))\n AND Runs.run_name = :run\n AND Tags.tag_name = :tag\n AND Tags.plugin_name = :plugin\n AND Tensors.shape = ''\n AND Tensors.step > -1\n ORDER BY Tensors.step\n '''",
",",
"dict",
"(",
"exp",
"=",
"experiment",
",",
"run",
"=",
"run",
",",
"tag",
"=",
"tag",
",",
"plugin",
"=",
"metadata",
".",
"PLUGIN_NAME",
")",
")",
"values",
"=",
"[",
"(",
"wall_time",
",",
"step",
",",
"self",
".",
"_get_value",
"(",
"data",
",",
"dtype_enum",
")",
")",
"for",
"(",
"step",
",",
"wall_time",
",",
"data",
",",
"dtype_enum",
")",
"in",
"cursor",
"]",
"else",
":",
"tensor_events",
"=",
"self",
".",
"_multiplexer",
".",
"Tensors",
"(",
"run",
",",
"tag",
")",
"values",
"=",
"[",
"(",
"tensor_event",
".",
"wall_time",
",",
"tensor_event",
".",
"step",
",",
"tensor_util",
".",
"make_ndarray",
"(",
"tensor_event",
".",
"tensor_proto",
")",
".",
"item",
"(",
")",
")",
"for",
"tensor_event",
"in",
"tensor_events",
"]",
"if",
"output_format",
"==",
"OutputFormat",
".",
"CSV",
":",
"string_io",
"=",
"StringIO",
"(",
")",
"writer",
"=",
"csv",
".",
"writer",
"(",
"string_io",
")",
"writer",
".",
"writerow",
"(",
"[",
"'Wall time'",
",",
"'Step'",
",",
"'Value'",
"]",
")",
"writer",
".",
"writerows",
"(",
"values",
")",
"return",
"(",
"string_io",
".",
"getvalue",
"(",
")",
",",
"'text/csv'",
")",
"else",
":",
"return",
"(",
"values",
",",
"'application/json'",
")"
] | Result of the form `(body, mime_type)`. | [
"Result",
"of",
"the",
"form",
"(",
"body",
"mime_type",
")",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/scalar/scalars_plugin.py#L127-L171 | train |
tensorflow/tensorboard | tensorboard/plugins/scalar/scalars_plugin.py | ScalarsPlugin._get_value | def _get_value(self, scalar_data_blob, dtype_enum):
"""Obtains value for scalar event given blob and dtype enum.
Args:
scalar_data_blob: The blob obtained from the database.
dtype_enum: The enum representing the dtype.
Returns:
The scalar value.
"""
tensorflow_dtype = tf.DType(dtype_enum)
buf = np.frombuffer(scalar_data_blob, dtype=tensorflow_dtype.as_numpy_dtype)
return np.asscalar(buf) | python | def _get_value(self, scalar_data_blob, dtype_enum):
"""Obtains value for scalar event given blob and dtype enum.
Args:
scalar_data_blob: The blob obtained from the database.
dtype_enum: The enum representing the dtype.
Returns:
The scalar value.
"""
tensorflow_dtype = tf.DType(dtype_enum)
buf = np.frombuffer(scalar_data_blob, dtype=tensorflow_dtype.as_numpy_dtype)
return np.asscalar(buf) | [
"def",
"_get_value",
"(",
"self",
",",
"scalar_data_blob",
",",
"dtype_enum",
")",
":",
"tensorflow_dtype",
"=",
"tf",
".",
"DType",
"(",
"dtype_enum",
")",
"buf",
"=",
"np",
".",
"frombuffer",
"(",
"scalar_data_blob",
",",
"dtype",
"=",
"tensorflow_dtype",
".",
"as_numpy_dtype",
")",
"return",
"np",
".",
"asscalar",
"(",
"buf",
")"
] | Obtains value for scalar event given blob and dtype enum.
Args:
scalar_data_blob: The blob obtained from the database.
dtype_enum: The enum representing the dtype.
Returns:
The scalar value. | [
"Obtains",
"value",
"for",
"scalar",
"event",
"given",
"blob",
"and",
"dtype",
"enum",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/scalar/scalars_plugin.py#L173-L185 | train |
tensorflow/tensorboard | tensorboard/plugins/scalar/scalars_plugin.py | ScalarsPlugin.scalars_route | def scalars_route(self, request):
"""Given a tag and single run, return array of ScalarEvents."""
# TODO: return HTTP status code for malformed requests
tag = request.args.get('tag')
run = request.args.get('run')
experiment = request.args.get('experiment')
output_format = request.args.get('format')
(body, mime_type) = self.scalars_impl(tag, run, experiment, output_format)
return http_util.Respond(request, body, mime_type) | python | def scalars_route(self, request):
"""Given a tag and single run, return array of ScalarEvents."""
# TODO: return HTTP status code for malformed requests
tag = request.args.get('tag')
run = request.args.get('run')
experiment = request.args.get('experiment')
output_format = request.args.get('format')
(body, mime_type) = self.scalars_impl(tag, run, experiment, output_format)
return http_util.Respond(request, body, mime_type) | [
"def",
"scalars_route",
"(",
"self",
",",
"request",
")",
":",
"# TODO: return HTTP status code for malformed requests",
"tag",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'tag'",
")",
"run",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'run'",
")",
"experiment",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'experiment'",
")",
"output_format",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'format'",
")",
"(",
"body",
",",
"mime_type",
")",
"=",
"self",
".",
"scalars_impl",
"(",
"tag",
",",
"run",
",",
"experiment",
",",
"output_format",
")",
"return",
"http_util",
".",
"Respond",
"(",
"request",
",",
"body",
",",
"mime_type",
")"
] | Given a tag and single run, return array of ScalarEvents. | [
"Given",
"a",
"tag",
"and",
"single",
"run",
"return",
"array",
"of",
"ScalarEvents",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/scalar/scalars_plugin.py#L193-L201 | train |
tensorflow/tensorboard | tensorboard/backend/event_processing/plugin_event_multiplexer.py | EventMultiplexer.AddRun | def AddRun(self, path, name=None):
"""Add a run to the multiplexer.
If the name is not specified, it is the same as the path.
If a run by that name exists, and we are already watching the right path,
do nothing. If we are watching a different path, replace the event
accumulator.
If `Reload` has been called, it will `Reload` the newly created
accumulators.
Args:
path: Path to the event files (or event directory) for given run.
name: Name of the run to add. If not provided, is set to path.
Returns:
The `EventMultiplexer`.
"""
name = name or path
accumulator = None
with self._accumulators_mutex:
if name not in self._accumulators or self._paths[name] != path:
if name in self._paths and self._paths[name] != path:
# TODO(@dandelionmane) - Make it impossible to overwrite an old path
# with a new path (just give the new path a distinct name)
logger.warn('Conflict for name %s: old path %s, new path %s',
name, self._paths[name], path)
logger.info('Constructing EventAccumulator for %s', path)
accumulator = event_accumulator.EventAccumulator(
path,
size_guidance=self._size_guidance,
tensor_size_guidance=self._tensor_size_guidance,
purge_orphaned_data=self.purge_orphaned_data)
self._accumulators[name] = accumulator
self._paths[name] = path
if accumulator:
if self._reload_called:
accumulator.Reload()
return self | python | def AddRun(self, path, name=None):
"""Add a run to the multiplexer.
If the name is not specified, it is the same as the path.
If a run by that name exists, and we are already watching the right path,
do nothing. If we are watching a different path, replace the event
accumulator.
If `Reload` has been called, it will `Reload` the newly created
accumulators.
Args:
path: Path to the event files (or event directory) for given run.
name: Name of the run to add. If not provided, is set to path.
Returns:
The `EventMultiplexer`.
"""
name = name or path
accumulator = None
with self._accumulators_mutex:
if name not in self._accumulators or self._paths[name] != path:
if name in self._paths and self._paths[name] != path:
# TODO(@dandelionmane) - Make it impossible to overwrite an old path
# with a new path (just give the new path a distinct name)
logger.warn('Conflict for name %s: old path %s, new path %s',
name, self._paths[name], path)
logger.info('Constructing EventAccumulator for %s', path)
accumulator = event_accumulator.EventAccumulator(
path,
size_guidance=self._size_guidance,
tensor_size_guidance=self._tensor_size_guidance,
purge_orphaned_data=self.purge_orphaned_data)
self._accumulators[name] = accumulator
self._paths[name] = path
if accumulator:
if self._reload_called:
accumulator.Reload()
return self | [
"def",
"AddRun",
"(",
"self",
",",
"path",
",",
"name",
"=",
"None",
")",
":",
"name",
"=",
"name",
"or",
"path",
"accumulator",
"=",
"None",
"with",
"self",
".",
"_accumulators_mutex",
":",
"if",
"name",
"not",
"in",
"self",
".",
"_accumulators",
"or",
"self",
".",
"_paths",
"[",
"name",
"]",
"!=",
"path",
":",
"if",
"name",
"in",
"self",
".",
"_paths",
"and",
"self",
".",
"_paths",
"[",
"name",
"]",
"!=",
"path",
":",
"# TODO(@dandelionmane) - Make it impossible to overwrite an old path",
"# with a new path (just give the new path a distinct name)",
"logger",
".",
"warn",
"(",
"'Conflict for name %s: old path %s, new path %s'",
",",
"name",
",",
"self",
".",
"_paths",
"[",
"name",
"]",
",",
"path",
")",
"logger",
".",
"info",
"(",
"'Constructing EventAccumulator for %s'",
",",
"path",
")",
"accumulator",
"=",
"event_accumulator",
".",
"EventAccumulator",
"(",
"path",
",",
"size_guidance",
"=",
"self",
".",
"_size_guidance",
",",
"tensor_size_guidance",
"=",
"self",
".",
"_tensor_size_guidance",
",",
"purge_orphaned_data",
"=",
"self",
".",
"purge_orphaned_data",
")",
"self",
".",
"_accumulators",
"[",
"name",
"]",
"=",
"accumulator",
"self",
".",
"_paths",
"[",
"name",
"]",
"=",
"path",
"if",
"accumulator",
":",
"if",
"self",
".",
"_reload_called",
":",
"accumulator",
".",
"Reload",
"(",
")",
"return",
"self"
] | Add a run to the multiplexer.
If the name is not specified, it is the same as the path.
If a run by that name exists, and we are already watching the right path,
do nothing. If we are watching a different path, replace the event
accumulator.
If `Reload` has been called, it will `Reload` the newly created
accumulators.
Args:
path: Path to the event files (or event directory) for given run.
name: Name of the run to add. If not provided, is set to path.
Returns:
The `EventMultiplexer`. | [
"Add",
"a",
"run",
"to",
"the",
"multiplexer",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/plugin_event_multiplexer.py#L114-L153 | train |
tensorflow/tensorboard | tensorboard/backend/event_processing/plugin_event_multiplexer.py | EventMultiplexer.AddRunsFromDirectory | def AddRunsFromDirectory(self, path, name=None):
"""Load runs from a directory; recursively walks subdirectories.
If path doesn't exist, no-op. This ensures that it is safe to call
`AddRunsFromDirectory` multiple times, even before the directory is made.
If path is a directory, load event files in the directory (if any exist) and
recursively call AddRunsFromDirectory on any subdirectories. This mean you
can call AddRunsFromDirectory at the root of a tree of event logs and
TensorBoard will load them all.
If the `EventMultiplexer` is already loaded this will cause
the newly created accumulators to `Reload()`.
Args:
path: A string path to a directory to load runs from.
name: Optionally, what name to apply to the runs. If name is provided
and the directory contains run subdirectories, the name of each subrun
is the concatenation of the parent name and the subdirectory name. If
name is provided and the directory contains event files, then a run
is added called "name" and with the events from the path.
Raises:
ValueError: If the path exists and isn't a directory.
Returns:
The `EventMultiplexer`.
"""
logger.info('Starting AddRunsFromDirectory: %s', path)
for subdir in io_wrapper.GetLogdirSubdirectories(path):
logger.info('Adding run from directory %s', subdir)
rpath = os.path.relpath(subdir, path)
subname = os.path.join(name, rpath) if name else rpath
self.AddRun(subdir, name=subname)
logger.info('Done with AddRunsFromDirectory: %s', path)
return self | python | def AddRunsFromDirectory(self, path, name=None):
"""Load runs from a directory; recursively walks subdirectories.
If path doesn't exist, no-op. This ensures that it is safe to call
`AddRunsFromDirectory` multiple times, even before the directory is made.
If path is a directory, load event files in the directory (if any exist) and
recursively call AddRunsFromDirectory on any subdirectories. This mean you
can call AddRunsFromDirectory at the root of a tree of event logs and
TensorBoard will load them all.
If the `EventMultiplexer` is already loaded this will cause
the newly created accumulators to `Reload()`.
Args:
path: A string path to a directory to load runs from.
name: Optionally, what name to apply to the runs. If name is provided
and the directory contains run subdirectories, the name of each subrun
is the concatenation of the parent name and the subdirectory name. If
name is provided and the directory contains event files, then a run
is added called "name" and with the events from the path.
Raises:
ValueError: If the path exists and isn't a directory.
Returns:
The `EventMultiplexer`.
"""
logger.info('Starting AddRunsFromDirectory: %s', path)
for subdir in io_wrapper.GetLogdirSubdirectories(path):
logger.info('Adding run from directory %s', subdir)
rpath = os.path.relpath(subdir, path)
subname = os.path.join(name, rpath) if name else rpath
self.AddRun(subdir, name=subname)
logger.info('Done with AddRunsFromDirectory: %s', path)
return self | [
"def",
"AddRunsFromDirectory",
"(",
"self",
",",
"path",
",",
"name",
"=",
"None",
")",
":",
"logger",
".",
"info",
"(",
"'Starting AddRunsFromDirectory: %s'",
",",
"path",
")",
"for",
"subdir",
"in",
"io_wrapper",
".",
"GetLogdirSubdirectories",
"(",
"path",
")",
":",
"logger",
".",
"info",
"(",
"'Adding run from directory %s'",
",",
"subdir",
")",
"rpath",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"subdir",
",",
"path",
")",
"subname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"name",
",",
"rpath",
")",
"if",
"name",
"else",
"rpath",
"self",
".",
"AddRun",
"(",
"subdir",
",",
"name",
"=",
"subname",
")",
"logger",
".",
"info",
"(",
"'Done with AddRunsFromDirectory: %s'",
",",
"path",
")",
"return",
"self"
] | Load runs from a directory; recursively walks subdirectories.
If path doesn't exist, no-op. This ensures that it is safe to call
`AddRunsFromDirectory` multiple times, even before the directory is made.
If path is a directory, load event files in the directory (if any exist) and
recursively call AddRunsFromDirectory on any subdirectories. This mean you
can call AddRunsFromDirectory at the root of a tree of event logs and
TensorBoard will load them all.
If the `EventMultiplexer` is already loaded this will cause
the newly created accumulators to `Reload()`.
Args:
path: A string path to a directory to load runs from.
name: Optionally, what name to apply to the runs. If name is provided
and the directory contains run subdirectories, the name of each subrun
is the concatenation of the parent name and the subdirectory name. If
name is provided and the directory contains event files, then a run
is added called "name" and with the events from the path.
Raises:
ValueError: If the path exists and isn't a directory.
Returns:
The `EventMultiplexer`. | [
"Load",
"runs",
"from",
"a",
"directory",
";",
"recursively",
"walks",
"subdirectories",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/plugin_event_multiplexer.py#L155-L189 | train |
tensorflow/tensorboard | tensorboard/backend/event_processing/plugin_event_multiplexer.py | EventMultiplexer.Reload | def Reload(self):
"""Call `Reload` on every `EventAccumulator`."""
logger.info('Beginning EventMultiplexer.Reload()')
self._reload_called = True
# Build a list so we're safe even if the list of accumulators is modified
# even while we're reloading.
with self._accumulators_mutex:
items = list(self._accumulators.items())
items_queue = queue.Queue()
for item in items:
items_queue.put(item)
# Methods of built-in python containers are thread-safe so long as the GIL
# for the thread exists, but we might as well be careful.
names_to_delete = set()
names_to_delete_mutex = threading.Lock()
def Worker():
"""Keeps reloading accumulators til none are left."""
while True:
try:
name, accumulator = items_queue.get(block=False)
except queue.Empty:
# No more runs to reload.
break
try:
accumulator.Reload()
except (OSError, IOError) as e:
logger.error('Unable to reload accumulator %r: %s', name, e)
except directory_watcher.DirectoryDeletedError:
with names_to_delete_mutex:
names_to_delete.add(name)
finally:
items_queue.task_done()
if self._max_reload_threads > 1:
num_threads = min(
self._max_reload_threads, len(items))
logger.info('Starting %d threads to reload runs', num_threads)
for i in xrange(num_threads):
thread = threading.Thread(target=Worker, name='Reloader %d' % i)
thread.daemon = True
thread.start()
items_queue.join()
else:
logger.info(
'Reloading runs serially (one after another) on the main '
'thread.')
Worker()
with self._accumulators_mutex:
for name in names_to_delete:
logger.warn('Deleting accumulator %r', name)
del self._accumulators[name]
logger.info('Finished with EventMultiplexer.Reload()')
return self | python | def Reload(self):
"""Call `Reload` on every `EventAccumulator`."""
logger.info('Beginning EventMultiplexer.Reload()')
self._reload_called = True
# Build a list so we're safe even if the list of accumulators is modified
# even while we're reloading.
with self._accumulators_mutex:
items = list(self._accumulators.items())
items_queue = queue.Queue()
for item in items:
items_queue.put(item)
# Methods of built-in python containers are thread-safe so long as the GIL
# for the thread exists, but we might as well be careful.
names_to_delete = set()
names_to_delete_mutex = threading.Lock()
def Worker():
"""Keeps reloading accumulators til none are left."""
while True:
try:
name, accumulator = items_queue.get(block=False)
except queue.Empty:
# No more runs to reload.
break
try:
accumulator.Reload()
except (OSError, IOError) as e:
logger.error('Unable to reload accumulator %r: %s', name, e)
except directory_watcher.DirectoryDeletedError:
with names_to_delete_mutex:
names_to_delete.add(name)
finally:
items_queue.task_done()
if self._max_reload_threads > 1:
num_threads = min(
self._max_reload_threads, len(items))
logger.info('Starting %d threads to reload runs', num_threads)
for i in xrange(num_threads):
thread = threading.Thread(target=Worker, name='Reloader %d' % i)
thread.daemon = True
thread.start()
items_queue.join()
else:
logger.info(
'Reloading runs serially (one after another) on the main '
'thread.')
Worker()
with self._accumulators_mutex:
for name in names_to_delete:
logger.warn('Deleting accumulator %r', name)
del self._accumulators[name]
logger.info('Finished with EventMultiplexer.Reload()')
return self | [
"def",
"Reload",
"(",
"self",
")",
":",
"logger",
".",
"info",
"(",
"'Beginning EventMultiplexer.Reload()'",
")",
"self",
".",
"_reload_called",
"=",
"True",
"# Build a list so we're safe even if the list of accumulators is modified",
"# even while we're reloading.",
"with",
"self",
".",
"_accumulators_mutex",
":",
"items",
"=",
"list",
"(",
"self",
".",
"_accumulators",
".",
"items",
"(",
")",
")",
"items_queue",
"=",
"queue",
".",
"Queue",
"(",
")",
"for",
"item",
"in",
"items",
":",
"items_queue",
".",
"put",
"(",
"item",
")",
"# Methods of built-in python containers are thread-safe so long as the GIL",
"# for the thread exists, but we might as well be careful.",
"names_to_delete",
"=",
"set",
"(",
")",
"names_to_delete_mutex",
"=",
"threading",
".",
"Lock",
"(",
")",
"def",
"Worker",
"(",
")",
":",
"\"\"\"Keeps reloading accumulators til none are left.\"\"\"",
"while",
"True",
":",
"try",
":",
"name",
",",
"accumulator",
"=",
"items_queue",
".",
"get",
"(",
"block",
"=",
"False",
")",
"except",
"queue",
".",
"Empty",
":",
"# No more runs to reload.",
"break",
"try",
":",
"accumulator",
".",
"Reload",
"(",
")",
"except",
"(",
"OSError",
",",
"IOError",
")",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"'Unable to reload accumulator %r: %s'",
",",
"name",
",",
"e",
")",
"except",
"directory_watcher",
".",
"DirectoryDeletedError",
":",
"with",
"names_to_delete_mutex",
":",
"names_to_delete",
".",
"add",
"(",
"name",
")",
"finally",
":",
"items_queue",
".",
"task_done",
"(",
")",
"if",
"self",
".",
"_max_reload_threads",
">",
"1",
":",
"num_threads",
"=",
"min",
"(",
"self",
".",
"_max_reload_threads",
",",
"len",
"(",
"items",
")",
")",
"logger",
".",
"info",
"(",
"'Starting %d threads to reload runs'",
",",
"num_threads",
")",
"for",
"i",
"in",
"xrange",
"(",
"num_threads",
")",
":",
"thread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"Worker",
",",
"name",
"=",
"'Reloader %d'",
"%",
"i",
")",
"thread",
".",
"daemon",
"=",
"True",
"thread",
".",
"start",
"(",
")",
"items_queue",
".",
"join",
"(",
")",
"else",
":",
"logger",
".",
"info",
"(",
"'Reloading runs serially (one after another) on the main '",
"'thread.'",
")",
"Worker",
"(",
")",
"with",
"self",
".",
"_accumulators_mutex",
":",
"for",
"name",
"in",
"names_to_delete",
":",
"logger",
".",
"warn",
"(",
"'Deleting accumulator %r'",
",",
"name",
")",
"del",
"self",
".",
"_accumulators",
"[",
"name",
"]",
"logger",
".",
"info",
"(",
"'Finished with EventMultiplexer.Reload()'",
")",
"return",
"self"
] | Call `Reload` on every `EventAccumulator`. | [
"Call",
"Reload",
"on",
"every",
"EventAccumulator",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/plugin_event_multiplexer.py#L191-L247 | train |
tensorflow/tensorboard | tensorboard/backend/event_processing/plugin_event_multiplexer.py | EventMultiplexer.PluginAssets | def PluginAssets(self, plugin_name):
"""Get index of runs and assets for a given plugin.
Args:
plugin_name: Name of the plugin we are checking for.
Returns:
A dictionary that maps from run_name to a list of plugin
assets for that run.
"""
with self._accumulators_mutex:
# To avoid nested locks, we construct a copy of the run-accumulator map
items = list(six.iteritems(self._accumulators))
return {run: accum.PluginAssets(plugin_name) for run, accum in items} | python | def PluginAssets(self, plugin_name):
"""Get index of runs and assets for a given plugin.
Args:
plugin_name: Name of the plugin we are checking for.
Returns:
A dictionary that maps from run_name to a list of plugin
assets for that run.
"""
with self._accumulators_mutex:
# To avoid nested locks, we construct a copy of the run-accumulator map
items = list(six.iteritems(self._accumulators))
return {run: accum.PluginAssets(plugin_name) for run, accum in items} | [
"def",
"PluginAssets",
"(",
"self",
",",
"plugin_name",
")",
":",
"with",
"self",
".",
"_accumulators_mutex",
":",
"# To avoid nested locks, we construct a copy of the run-accumulator map",
"items",
"=",
"list",
"(",
"six",
".",
"iteritems",
"(",
"self",
".",
"_accumulators",
")",
")",
"return",
"{",
"run",
":",
"accum",
".",
"PluginAssets",
"(",
"plugin_name",
")",
"for",
"run",
",",
"accum",
"in",
"items",
"}"
] | Get index of runs and assets for a given plugin.
Args:
plugin_name: Name of the plugin we are checking for.
Returns:
A dictionary that maps from run_name to a list of plugin
assets for that run. | [
"Get",
"index",
"of",
"runs",
"and",
"assets",
"for",
"a",
"given",
"plugin",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/plugin_event_multiplexer.py#L249-L263 | train |
tensorflow/tensorboard | tensorboard/backend/event_processing/plugin_event_multiplexer.py | EventMultiplexer.RetrievePluginAsset | def RetrievePluginAsset(self, run, plugin_name, asset_name):
"""Return the contents for a specific plugin asset from a run.
Args:
run: The string name of the run.
plugin_name: The string name of a plugin.
asset_name: The string name of an asset.
Returns:
The string contents of the plugin asset.
Raises:
KeyError: If the asset is not available.
"""
accumulator = self.GetAccumulator(run)
return accumulator.RetrievePluginAsset(plugin_name, asset_name) | python | def RetrievePluginAsset(self, run, plugin_name, asset_name):
"""Return the contents for a specific plugin asset from a run.
Args:
run: The string name of the run.
plugin_name: The string name of a plugin.
asset_name: The string name of an asset.
Returns:
The string contents of the plugin asset.
Raises:
KeyError: If the asset is not available.
"""
accumulator = self.GetAccumulator(run)
return accumulator.RetrievePluginAsset(plugin_name, asset_name) | [
"def",
"RetrievePluginAsset",
"(",
"self",
",",
"run",
",",
"plugin_name",
",",
"asset_name",
")",
":",
"accumulator",
"=",
"self",
".",
"GetAccumulator",
"(",
"run",
")",
"return",
"accumulator",
".",
"RetrievePluginAsset",
"(",
"plugin_name",
",",
"asset_name",
")"
] | Return the contents for a specific plugin asset from a run.
Args:
run: The string name of the run.
plugin_name: The string name of a plugin.
asset_name: The string name of an asset.
Returns:
The string contents of the plugin asset.
Raises:
KeyError: If the asset is not available. | [
"Return",
"the",
"contents",
"for",
"a",
"specific",
"plugin",
"asset",
"from",
"a",
"run",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/plugin_event_multiplexer.py#L265-L280 | train |
tensorflow/tensorboard | tensorboard/backend/event_processing/plugin_event_multiplexer.py | EventMultiplexer.Scalars | def Scalars(self, run, tag):
"""Retrieve the scalar events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.ScalarEvents`.
"""
accumulator = self.GetAccumulator(run)
return accumulator.Scalars(tag) | python | def Scalars(self, run, tag):
"""Retrieve the scalar events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.ScalarEvents`.
"""
accumulator = self.GetAccumulator(run)
return accumulator.Scalars(tag) | [
"def",
"Scalars",
"(",
"self",
",",
"run",
",",
"tag",
")",
":",
"accumulator",
"=",
"self",
".",
"GetAccumulator",
"(",
"run",
")",
"return",
"accumulator",
".",
"Scalars",
"(",
"tag",
")"
] | Retrieve the scalar events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.ScalarEvents`. | [
"Retrieve",
"the",
"scalar",
"events",
"associated",
"with",
"a",
"run",
"and",
"tag",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/plugin_event_multiplexer.py#L302-L317 | train |
tensorflow/tensorboard | tensorboard/backend/event_processing/plugin_event_multiplexer.py | EventMultiplexer.RunMetadata | def RunMetadata(self, run, tag):
"""Get the session.run() metadata associated with a TensorFlow run and tag.
Args:
run: A string name of a TensorFlow run.
tag: A string name of the tag associated with a particular session.run().
Raises:
KeyError: If the run is not found, or the tag is not available for the
given run.
Returns:
The metadata in the form of `RunMetadata` protobuf data structure.
"""
accumulator = self.GetAccumulator(run)
return accumulator.RunMetadata(tag) | python | def RunMetadata(self, run, tag):
"""Get the session.run() metadata associated with a TensorFlow run and tag.
Args:
run: A string name of a TensorFlow run.
tag: A string name of the tag associated with a particular session.run().
Raises:
KeyError: If the run is not found, or the tag is not available for the
given run.
Returns:
The metadata in the form of `RunMetadata` protobuf data structure.
"""
accumulator = self.GetAccumulator(run)
return accumulator.RunMetadata(tag) | [
"def",
"RunMetadata",
"(",
"self",
",",
"run",
",",
"tag",
")",
":",
"accumulator",
"=",
"self",
".",
"GetAccumulator",
"(",
"run",
")",
"return",
"accumulator",
".",
"RunMetadata",
"(",
"tag",
")"
] | Get the session.run() metadata associated with a TensorFlow run and tag.
Args:
run: A string name of a TensorFlow run.
tag: A string name of the tag associated with a particular session.run().
Raises:
KeyError: If the run is not found, or the tag is not available for the
given run.
Returns:
The metadata in the form of `RunMetadata` protobuf data structure. | [
"Get",
"the",
"session",
".",
"run",
"()",
"metadata",
"associated",
"with",
"a",
"TensorFlow",
"run",
"and",
"tag",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/plugin_event_multiplexer.py#L351-L366 | train |
tensorflow/tensorboard | tensorboard/backend/event_processing/plugin_event_multiplexer.py | EventMultiplexer.Audio | def Audio(self, run, tag):
"""Retrieve the audio events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.AudioEvents`.
"""
accumulator = self.GetAccumulator(run)
return accumulator.Audio(tag) | python | def Audio(self, run, tag):
"""Retrieve the audio events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.AudioEvents`.
"""
accumulator = self.GetAccumulator(run)
return accumulator.Audio(tag) | [
"def",
"Audio",
"(",
"self",
",",
"run",
",",
"tag",
")",
":",
"accumulator",
"=",
"self",
".",
"GetAccumulator",
"(",
"run",
")",
"return",
"accumulator",
".",
"Audio",
"(",
"tag",
")"
] | Retrieve the audio events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.AudioEvents`. | [
"Retrieve",
"the",
"audio",
"events",
"associated",
"with",
"a",
"run",
"and",
"tag",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/plugin_event_multiplexer.py#L368-L383 | train |
tensorflow/tensorboard | tensorboard/backend/event_processing/plugin_event_multiplexer.py | EventMultiplexer.Tensors | def Tensors(self, run, tag):
"""Retrieve the tensor events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.TensorEvent`s.
"""
accumulator = self.GetAccumulator(run)
return accumulator.Tensors(tag) | python | def Tensors(self, run, tag):
"""Retrieve the tensor events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.TensorEvent`s.
"""
accumulator = self.GetAccumulator(run)
return accumulator.Tensors(tag) | [
"def",
"Tensors",
"(",
"self",
",",
"run",
",",
"tag",
")",
":",
"accumulator",
"=",
"self",
".",
"GetAccumulator",
"(",
"run",
")",
"return",
"accumulator",
".",
"Tensors",
"(",
"tag",
")"
] | Retrieve the tensor events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.TensorEvent`s. | [
"Retrieve",
"the",
"tensor",
"events",
"associated",
"with",
"a",
"run",
"and",
"tag",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/plugin_event_multiplexer.py#L385-L400 | train |
tensorflow/tensorboard | tensorboard/backend/event_processing/plugin_event_multiplexer.py | EventMultiplexer.PluginRunToTagToContent | def PluginRunToTagToContent(self, plugin_name):
"""Returns a 2-layer dictionary of the form {run: {tag: content}}.
The `content` referred above is the content field of the PluginData proto
for the specified plugin within a Summary.Value proto.
Args:
plugin_name: The name of the plugin for which to fetch content.
Returns:
A dictionary of the form {run: {tag: content}}.
"""
mapping = {}
for run in self.Runs():
try:
tag_to_content = self.GetAccumulator(run).PluginTagToContent(
plugin_name)
except KeyError:
# This run lacks content for the plugin. Try the next run.
continue
mapping[run] = tag_to_content
return mapping | python | def PluginRunToTagToContent(self, plugin_name):
"""Returns a 2-layer dictionary of the form {run: {tag: content}}.
The `content` referred above is the content field of the PluginData proto
for the specified plugin within a Summary.Value proto.
Args:
plugin_name: The name of the plugin for which to fetch content.
Returns:
A dictionary of the form {run: {tag: content}}.
"""
mapping = {}
for run in self.Runs():
try:
tag_to_content = self.GetAccumulator(run).PluginTagToContent(
plugin_name)
except KeyError:
# This run lacks content for the plugin. Try the next run.
continue
mapping[run] = tag_to_content
return mapping | [
"def",
"PluginRunToTagToContent",
"(",
"self",
",",
"plugin_name",
")",
":",
"mapping",
"=",
"{",
"}",
"for",
"run",
"in",
"self",
".",
"Runs",
"(",
")",
":",
"try",
":",
"tag_to_content",
"=",
"self",
".",
"GetAccumulator",
"(",
"run",
")",
".",
"PluginTagToContent",
"(",
"plugin_name",
")",
"except",
"KeyError",
":",
"# This run lacks content for the plugin. Try the next run.",
"continue",
"mapping",
"[",
"run",
"]",
"=",
"tag_to_content",
"return",
"mapping"
] | Returns a 2-layer dictionary of the form {run: {tag: content}}.
The `content` referred above is the content field of the PluginData proto
for the specified plugin within a Summary.Value proto.
Args:
plugin_name: The name of the plugin for which to fetch content.
Returns:
A dictionary of the form {run: {tag: content}}. | [
"Returns",
"a",
"2",
"-",
"layer",
"dictionary",
"of",
"the",
"form",
"{",
"run",
":",
"{",
"tag",
":",
"content",
"}}",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/plugin_event_multiplexer.py#L402-L423 | train |
tensorflow/tensorboard | tensorboard/backend/event_processing/plugin_event_multiplexer.py | EventMultiplexer.SummaryMetadata | def SummaryMetadata(self, run, tag):
"""Return the summary metadata for the given tag on the given run.
Args:
run: A string name of the run for which summary metadata is to be
retrieved.
tag: A string name of the tag whose summary metadata is to be
retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
A `SummaryMetadata` protobuf.
"""
accumulator = self.GetAccumulator(run)
return accumulator.SummaryMetadata(tag) | python | def SummaryMetadata(self, run, tag):
"""Return the summary metadata for the given tag on the given run.
Args:
run: A string name of the run for which summary metadata is to be
retrieved.
tag: A string name of the tag whose summary metadata is to be
retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
A `SummaryMetadata` protobuf.
"""
accumulator = self.GetAccumulator(run)
return accumulator.SummaryMetadata(tag) | [
"def",
"SummaryMetadata",
"(",
"self",
",",
"run",
",",
"tag",
")",
":",
"accumulator",
"=",
"self",
".",
"GetAccumulator",
"(",
"run",
")",
"return",
"accumulator",
".",
"SummaryMetadata",
"(",
"tag",
")"
] | Return the summary metadata for the given tag on the given run.
Args:
run: A string name of the run for which summary metadata is to be
retrieved.
tag: A string name of the tag whose summary metadata is to be
retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
A `SummaryMetadata` protobuf. | [
"Return",
"the",
"summary",
"metadata",
"for",
"the",
"given",
"tag",
"on",
"the",
"given",
"run",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/plugin_event_multiplexer.py#L425-L442 | train |
tensorflow/tensorboard | tensorboard/backend/event_processing/plugin_event_multiplexer.py | EventMultiplexer.Runs | def Runs(self):
"""Return all the run names in the `EventMultiplexer`.
Returns:
```
{runName: { scalarValues: [tagA, tagB, tagC],
graph: true, meta_graph: true}}
```
"""
with self._accumulators_mutex:
# To avoid nested locks, we construct a copy of the run-accumulator map
items = list(six.iteritems(self._accumulators))
return {run_name: accumulator.Tags() for run_name, accumulator in items} | python | def Runs(self):
"""Return all the run names in the `EventMultiplexer`.
Returns:
```
{runName: { scalarValues: [tagA, tagB, tagC],
graph: true, meta_graph: true}}
```
"""
with self._accumulators_mutex:
# To avoid nested locks, we construct a copy of the run-accumulator map
items = list(six.iteritems(self._accumulators))
return {run_name: accumulator.Tags() for run_name, accumulator in items} | [
"def",
"Runs",
"(",
"self",
")",
":",
"with",
"self",
".",
"_accumulators_mutex",
":",
"# To avoid nested locks, we construct a copy of the run-accumulator map",
"items",
"=",
"list",
"(",
"six",
".",
"iteritems",
"(",
"self",
".",
"_accumulators",
")",
")",
"return",
"{",
"run_name",
":",
"accumulator",
".",
"Tags",
"(",
")",
"for",
"run_name",
",",
"accumulator",
"in",
"items",
"}"
] | Return all the run names in the `EventMultiplexer`.
Returns:
```
{runName: { scalarValues: [tagA, tagB, tagC],
graph: true, meta_graph: true}}
``` | [
"Return",
"all",
"the",
"run",
"names",
"in",
"the",
"EventMultiplexer",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/plugin_event_multiplexer.py#L444-L456 | train |
tensorflow/tensorboard | tensorboard/plugins/text/summary_v2.py | text | def text(name, data, step=None, description=None):
"""Write a text summary.
Arguments:
name: A name for this summary. The summary tag used for TensorBoard will
be this name prefixed by any active name scopes.
data: A UTF-8 string tensor value.
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which must
not be None.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
Returns:
True on success, or false if no summary was emitted because no default
summary writer was available.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None.
"""
summary_metadata = metadata.create_summary_metadata(
display_name=None, description=description)
# TODO(https://github.com/tensorflow/tensorboard/issues/2109): remove fallback
summary_scope = (
getattr(tf.summary.experimental, 'summary_scope', None) or
tf.summary.summary_scope)
with summary_scope(
name, 'text_summary', values=[data, step]) as (tag, _):
tf.debugging.assert_type(data, tf.string)
return tf.summary.write(
tag=tag, tensor=data, step=step, metadata=summary_metadata) | python | def text(name, data, step=None, description=None):
"""Write a text summary.
Arguments:
name: A name for this summary. The summary tag used for TensorBoard will
be this name prefixed by any active name scopes.
data: A UTF-8 string tensor value.
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which must
not be None.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
Returns:
True on success, or false if no summary was emitted because no default
summary writer was available.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None.
"""
summary_metadata = metadata.create_summary_metadata(
display_name=None, description=description)
# TODO(https://github.com/tensorflow/tensorboard/issues/2109): remove fallback
summary_scope = (
getattr(tf.summary.experimental, 'summary_scope', None) or
tf.summary.summary_scope)
with summary_scope(
name, 'text_summary', values=[data, step]) as (tag, _):
tf.debugging.assert_type(data, tf.string)
return tf.summary.write(
tag=tag, tensor=data, step=step, metadata=summary_metadata) | [
"def",
"text",
"(",
"name",
",",
"data",
",",
"step",
"=",
"None",
",",
"description",
"=",
"None",
")",
":",
"summary_metadata",
"=",
"metadata",
".",
"create_summary_metadata",
"(",
"display_name",
"=",
"None",
",",
"description",
"=",
"description",
")",
"# TODO(https://github.com/tensorflow/tensorboard/issues/2109): remove fallback",
"summary_scope",
"=",
"(",
"getattr",
"(",
"tf",
".",
"summary",
".",
"experimental",
",",
"'summary_scope'",
",",
"None",
")",
"or",
"tf",
".",
"summary",
".",
"summary_scope",
")",
"with",
"summary_scope",
"(",
"name",
",",
"'text_summary'",
",",
"values",
"=",
"[",
"data",
",",
"step",
"]",
")",
"as",
"(",
"tag",
",",
"_",
")",
":",
"tf",
".",
"debugging",
".",
"assert_type",
"(",
"data",
",",
"tf",
".",
"string",
")",
"return",
"tf",
".",
"summary",
".",
"write",
"(",
"tag",
"=",
"tag",
",",
"tensor",
"=",
"data",
",",
"step",
"=",
"step",
",",
"metadata",
"=",
"summary_metadata",
")"
] | Write a text summary.
Arguments:
name: A name for this summary. The summary tag used for TensorBoard will
be this name prefixed by any active name scopes.
data: A UTF-8 string tensor value.
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which must
not be None.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
Returns:
True on success, or false if no summary was emitted because no default
summary writer was available.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None. | [
"Write",
"a",
"text",
"summary",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/text/summary_v2.py#L29-L60 | train |
tensorflow/tensorboard | tensorboard/plugins/text/summary_v2.py | text_pb | def text_pb(tag, data, description=None):
"""Create a text tf.Summary protobuf.
Arguments:
tag: String tag for the summary.
data: A Python bytestring (of type bytes), a Unicode string, or a numpy data
array of those types.
description: Optional long-form description for this summary, as a `str`.
Markdown is supported. Defaults to empty.
Raises:
TypeError: If the type of the data is unsupported.
Returns:
A `tf.Summary` protobuf object.
"""
try:
tensor = tensor_util.make_tensor_proto(data, dtype=np.object)
except TypeError as e:
raise TypeError('tensor must be of type string', e)
summary_metadata = metadata.create_summary_metadata(
display_name=None, description=description)
summary = summary_pb2.Summary()
summary.value.add(tag=tag,
metadata=summary_metadata,
tensor=tensor)
return summary | python | def text_pb(tag, data, description=None):
"""Create a text tf.Summary protobuf.
Arguments:
tag: String tag for the summary.
data: A Python bytestring (of type bytes), a Unicode string, or a numpy data
array of those types.
description: Optional long-form description for this summary, as a `str`.
Markdown is supported. Defaults to empty.
Raises:
TypeError: If the type of the data is unsupported.
Returns:
A `tf.Summary` protobuf object.
"""
try:
tensor = tensor_util.make_tensor_proto(data, dtype=np.object)
except TypeError as e:
raise TypeError('tensor must be of type string', e)
summary_metadata = metadata.create_summary_metadata(
display_name=None, description=description)
summary = summary_pb2.Summary()
summary.value.add(tag=tag,
metadata=summary_metadata,
tensor=tensor)
return summary | [
"def",
"text_pb",
"(",
"tag",
",",
"data",
",",
"description",
"=",
"None",
")",
":",
"try",
":",
"tensor",
"=",
"tensor_util",
".",
"make_tensor_proto",
"(",
"data",
",",
"dtype",
"=",
"np",
".",
"object",
")",
"except",
"TypeError",
"as",
"e",
":",
"raise",
"TypeError",
"(",
"'tensor must be of type string'",
",",
"e",
")",
"summary_metadata",
"=",
"metadata",
".",
"create_summary_metadata",
"(",
"display_name",
"=",
"None",
",",
"description",
"=",
"description",
")",
"summary",
"=",
"summary_pb2",
".",
"Summary",
"(",
")",
"summary",
".",
"value",
".",
"add",
"(",
"tag",
"=",
"tag",
",",
"metadata",
"=",
"summary_metadata",
",",
"tensor",
"=",
"tensor",
")",
"return",
"summary"
] | Create a text tf.Summary protobuf.
Arguments:
tag: String tag for the summary.
data: A Python bytestring (of type bytes), a Unicode string, or a numpy data
array of those types.
description: Optional long-form description for this summary, as a `str`.
Markdown is supported. Defaults to empty.
Raises:
TypeError: If the type of the data is unsupported.
Returns:
A `tf.Summary` protobuf object. | [
"Create",
"a",
"text",
"tf",
".",
"Summary",
"protobuf",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/text/summary_v2.py#L63-L89 | train |
tensorflow/tensorboard | tensorboard/main.py | run_main | def run_main():
"""Initializes flags and calls main()."""
program.setup_environment()
if getattr(tf, '__version__', 'stub') == 'stub':
print("TensorFlow installation not found - running with reduced feature set.",
file=sys.stderr)
tensorboard = program.TensorBoard(default.get_plugins(),
program.get_default_assets_zip_provider())
try:
from absl import app
# Import this to check that app.run() will accept the flags_parser argument.
from absl.flags import argparse_flags
app.run(tensorboard.main, flags_parser=tensorboard.configure)
raise AssertionError("absl.app.run() shouldn't return")
except ImportError:
pass
except base_plugin.FlagsError as e:
print("Error: %s" % e, file=sys.stderr)
sys.exit(1)
tensorboard.configure(sys.argv)
sys.exit(tensorboard.main()) | python | def run_main():
"""Initializes flags and calls main()."""
program.setup_environment()
if getattr(tf, '__version__', 'stub') == 'stub':
print("TensorFlow installation not found - running with reduced feature set.",
file=sys.stderr)
tensorboard = program.TensorBoard(default.get_plugins(),
program.get_default_assets_zip_provider())
try:
from absl import app
# Import this to check that app.run() will accept the flags_parser argument.
from absl.flags import argparse_flags
app.run(tensorboard.main, flags_parser=tensorboard.configure)
raise AssertionError("absl.app.run() shouldn't return")
except ImportError:
pass
except base_plugin.FlagsError as e:
print("Error: %s" % e, file=sys.stderr)
sys.exit(1)
tensorboard.configure(sys.argv)
sys.exit(tensorboard.main()) | [
"def",
"run_main",
"(",
")",
":",
"program",
".",
"setup_environment",
"(",
")",
"if",
"getattr",
"(",
"tf",
",",
"'__version__'",
",",
"'stub'",
")",
"==",
"'stub'",
":",
"print",
"(",
"\"TensorFlow installation not found - running with reduced feature set.\"",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"tensorboard",
"=",
"program",
".",
"TensorBoard",
"(",
"default",
".",
"get_plugins",
"(",
")",
",",
"program",
".",
"get_default_assets_zip_provider",
"(",
")",
")",
"try",
":",
"from",
"absl",
"import",
"app",
"# Import this to check that app.run() will accept the flags_parser argument.",
"from",
"absl",
".",
"flags",
"import",
"argparse_flags",
"app",
".",
"run",
"(",
"tensorboard",
".",
"main",
",",
"flags_parser",
"=",
"tensorboard",
".",
"configure",
")",
"raise",
"AssertionError",
"(",
"\"absl.app.run() shouldn't return\"",
")",
"except",
"ImportError",
":",
"pass",
"except",
"base_plugin",
".",
"FlagsError",
"as",
"e",
":",
"print",
"(",
"\"Error: %s\"",
"%",
"e",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"tensorboard",
".",
"configure",
"(",
"sys",
".",
"argv",
")",
"sys",
".",
"exit",
"(",
"tensorboard",
".",
"main",
"(",
")",
")"
] | Initializes flags and calls main(). | [
"Initializes",
"flags",
"and",
"calls",
"main",
"()",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/main.py#L49-L72 | train |
tensorflow/tensorboard | tensorboard/plugins/image/metadata.py | create_summary_metadata | def create_summary_metadata(display_name, description):
"""Create a `summary_pb2.SummaryMetadata` proto for image plugin data.
Returns:
A `summary_pb2.SummaryMetadata` protobuf object.
"""
content = plugin_data_pb2.ImagePluginData(version=PROTO_VERSION)
metadata = summary_pb2.SummaryMetadata(
display_name=display_name,
summary_description=description,
plugin_data=summary_pb2.SummaryMetadata.PluginData(
plugin_name=PLUGIN_NAME,
content=content.SerializeToString()))
return metadata | python | def create_summary_metadata(display_name, description):
"""Create a `summary_pb2.SummaryMetadata` proto for image plugin data.
Returns:
A `summary_pb2.SummaryMetadata` protobuf object.
"""
content = plugin_data_pb2.ImagePluginData(version=PROTO_VERSION)
metadata = summary_pb2.SummaryMetadata(
display_name=display_name,
summary_description=description,
plugin_data=summary_pb2.SummaryMetadata.PluginData(
plugin_name=PLUGIN_NAME,
content=content.SerializeToString()))
return metadata | [
"def",
"create_summary_metadata",
"(",
"display_name",
",",
"description",
")",
":",
"content",
"=",
"plugin_data_pb2",
".",
"ImagePluginData",
"(",
"version",
"=",
"PROTO_VERSION",
")",
"metadata",
"=",
"summary_pb2",
".",
"SummaryMetadata",
"(",
"display_name",
"=",
"display_name",
",",
"summary_description",
"=",
"description",
",",
"plugin_data",
"=",
"summary_pb2",
".",
"SummaryMetadata",
".",
"PluginData",
"(",
"plugin_name",
"=",
"PLUGIN_NAME",
",",
"content",
"=",
"content",
".",
"SerializeToString",
"(",
")",
")",
")",
"return",
"metadata"
] | Create a `summary_pb2.SummaryMetadata` proto for image plugin data.
Returns:
A `summary_pb2.SummaryMetadata` protobuf object. | [
"Create",
"a",
"summary_pb2",
".",
"SummaryMetadata",
"proto",
"for",
"image",
"plugin",
"data",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/image/metadata.py#L34-L47 | train |
tensorflow/tensorboard | tensorboard/plugins/audio/summary.py | op | def op(name,
audio,
sample_rate,
labels=None,
max_outputs=3,
encoding=None,
display_name=None,
description=None,
collections=None):
"""Create a legacy audio summary op for use in a TensorFlow graph.
Arguments:
name: A unique name for the generated summary node.
audio: A `Tensor` representing audio data with shape `[k, t, c]`,
where `k` is the number of audio clips, `t` is the number of
frames, and `c` is the number of channels. Elements should be
floating-point values in `[-1.0, 1.0]`. Any of the dimensions may
be statically unknown (i.e., `None`).
sample_rate: An `int` or rank-0 `int32` `Tensor` that represents the
sample rate, in Hz. Must be positive.
labels: Optional `string` `Tensor`, a vector whose length is the
first dimension of `audio`, where `labels[i]` contains arbitrary
textual information about `audio[i]`. (For instance, this could be
some text that a TTS system was supposed to produce.) Markdown is
supported. Contents should be UTF-8.
max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this
many audio clips will be emitted at each step. When more than
`max_outputs` many clips are provided, the first `max_outputs`
many clips will be used and the rest silently discarded.
encoding: A constant `str` (not string tensor) indicating the
desired encoding. You can choose any format you like, as long as
it's "wav". Please see the "API compatibility note" below.
display_name: Optional name for this summary in TensorBoard, as a
constant `str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
collections: Optional list of graph collections keys. The new
summary op is added to these collections. Defaults to
`[Graph Keys.SUMMARIES]`.
Returns:
A TensorFlow summary op.
API compatibility note: The default value of the `encoding`
argument is _not_ guaranteed to remain unchanged across TensorBoard
versions. In the future, we will by default encode as FLAC instead of
as WAV. If the specific format is important to you, please provide a
file format explicitly.
"""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow # for contrib
import tensorflow.compat.v1 as tf
if display_name is None:
display_name = name
if encoding is None:
encoding = 'wav'
if encoding == 'wav':
encoding = metadata.Encoding.Value('WAV')
encoder = functools.partial(tensorflow.contrib.ffmpeg.encode_audio,
samples_per_second=sample_rate,
file_format='wav')
else:
raise ValueError('Unknown encoding: %r' % encoding)
with tf.name_scope(name), \
tf.control_dependencies([tf.assert_rank(audio, 3)]):
limited_audio = audio[:max_outputs]
encoded_audio = tf.map_fn(encoder, limited_audio,
dtype=tf.string,
name='encode_each_audio')
if labels is None:
limited_labels = tf.tile([''], tf.shape(input=limited_audio)[:1])
else:
limited_labels = labels[:max_outputs]
tensor = tf.transpose(a=tf.stack([encoded_audio, limited_labels]))
summary_metadata = metadata.create_summary_metadata(
display_name=display_name,
description=description,
encoding=encoding)
return tf.summary.tensor_summary(name='audio_summary',
tensor=tensor,
collections=collections,
summary_metadata=summary_metadata) | python | def op(name,
audio,
sample_rate,
labels=None,
max_outputs=3,
encoding=None,
display_name=None,
description=None,
collections=None):
"""Create a legacy audio summary op for use in a TensorFlow graph.
Arguments:
name: A unique name for the generated summary node.
audio: A `Tensor` representing audio data with shape `[k, t, c]`,
where `k` is the number of audio clips, `t` is the number of
frames, and `c` is the number of channels. Elements should be
floating-point values in `[-1.0, 1.0]`. Any of the dimensions may
be statically unknown (i.e., `None`).
sample_rate: An `int` or rank-0 `int32` `Tensor` that represents the
sample rate, in Hz. Must be positive.
labels: Optional `string` `Tensor`, a vector whose length is the
first dimension of `audio`, where `labels[i]` contains arbitrary
textual information about `audio[i]`. (For instance, this could be
some text that a TTS system was supposed to produce.) Markdown is
supported. Contents should be UTF-8.
max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this
many audio clips will be emitted at each step. When more than
`max_outputs` many clips are provided, the first `max_outputs`
many clips will be used and the rest silently discarded.
encoding: A constant `str` (not string tensor) indicating the
desired encoding. You can choose any format you like, as long as
it's "wav". Please see the "API compatibility note" below.
display_name: Optional name for this summary in TensorBoard, as a
constant `str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
collections: Optional list of graph collections keys. The new
summary op is added to these collections. Defaults to
`[Graph Keys.SUMMARIES]`.
Returns:
A TensorFlow summary op.
API compatibility note: The default value of the `encoding`
argument is _not_ guaranteed to remain unchanged across TensorBoard
versions. In the future, we will by default encode as FLAC instead of
as WAV. If the specific format is important to you, please provide a
file format explicitly.
"""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow # for contrib
import tensorflow.compat.v1 as tf
if display_name is None:
display_name = name
if encoding is None:
encoding = 'wav'
if encoding == 'wav':
encoding = metadata.Encoding.Value('WAV')
encoder = functools.partial(tensorflow.contrib.ffmpeg.encode_audio,
samples_per_second=sample_rate,
file_format='wav')
else:
raise ValueError('Unknown encoding: %r' % encoding)
with tf.name_scope(name), \
tf.control_dependencies([tf.assert_rank(audio, 3)]):
limited_audio = audio[:max_outputs]
encoded_audio = tf.map_fn(encoder, limited_audio,
dtype=tf.string,
name='encode_each_audio')
if labels is None:
limited_labels = tf.tile([''], tf.shape(input=limited_audio)[:1])
else:
limited_labels = labels[:max_outputs]
tensor = tf.transpose(a=tf.stack([encoded_audio, limited_labels]))
summary_metadata = metadata.create_summary_metadata(
display_name=display_name,
description=description,
encoding=encoding)
return tf.summary.tensor_summary(name='audio_summary',
tensor=tensor,
collections=collections,
summary_metadata=summary_metadata) | [
"def",
"op",
"(",
"name",
",",
"audio",
",",
"sample_rate",
",",
"labels",
"=",
"None",
",",
"max_outputs",
"=",
"3",
",",
"encoding",
"=",
"None",
",",
"display_name",
"=",
"None",
",",
"description",
"=",
"None",
",",
"collections",
"=",
"None",
")",
":",
"# TODO(nickfelt): remove on-demand imports once dep situation is fixed.",
"import",
"tensorflow",
"# for contrib",
"import",
"tensorflow",
".",
"compat",
".",
"v1",
"as",
"tf",
"if",
"display_name",
"is",
"None",
":",
"display_name",
"=",
"name",
"if",
"encoding",
"is",
"None",
":",
"encoding",
"=",
"'wav'",
"if",
"encoding",
"==",
"'wav'",
":",
"encoding",
"=",
"metadata",
".",
"Encoding",
".",
"Value",
"(",
"'WAV'",
")",
"encoder",
"=",
"functools",
".",
"partial",
"(",
"tensorflow",
".",
"contrib",
".",
"ffmpeg",
".",
"encode_audio",
",",
"samples_per_second",
"=",
"sample_rate",
",",
"file_format",
"=",
"'wav'",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Unknown encoding: %r'",
"%",
"encoding",
")",
"with",
"tf",
".",
"name_scope",
"(",
"name",
")",
",",
"tf",
".",
"control_dependencies",
"(",
"[",
"tf",
".",
"assert_rank",
"(",
"audio",
",",
"3",
")",
"]",
")",
":",
"limited_audio",
"=",
"audio",
"[",
":",
"max_outputs",
"]",
"encoded_audio",
"=",
"tf",
".",
"map_fn",
"(",
"encoder",
",",
"limited_audio",
",",
"dtype",
"=",
"tf",
".",
"string",
",",
"name",
"=",
"'encode_each_audio'",
")",
"if",
"labels",
"is",
"None",
":",
"limited_labels",
"=",
"tf",
".",
"tile",
"(",
"[",
"''",
"]",
",",
"tf",
".",
"shape",
"(",
"input",
"=",
"limited_audio",
")",
"[",
":",
"1",
"]",
")",
"else",
":",
"limited_labels",
"=",
"labels",
"[",
":",
"max_outputs",
"]",
"tensor",
"=",
"tf",
".",
"transpose",
"(",
"a",
"=",
"tf",
".",
"stack",
"(",
"[",
"encoded_audio",
",",
"limited_labels",
"]",
")",
")",
"summary_metadata",
"=",
"metadata",
".",
"create_summary_metadata",
"(",
"display_name",
"=",
"display_name",
",",
"description",
"=",
"description",
",",
"encoding",
"=",
"encoding",
")",
"return",
"tf",
".",
"summary",
".",
"tensor_summary",
"(",
"name",
"=",
"'audio_summary'",
",",
"tensor",
"=",
"tensor",
",",
"collections",
"=",
"collections",
",",
"summary_metadata",
"=",
"summary_metadata",
")"
] | Create a legacy audio summary op for use in a TensorFlow graph.
Arguments:
name: A unique name for the generated summary node.
audio: A `Tensor` representing audio data with shape `[k, t, c]`,
where `k` is the number of audio clips, `t` is the number of
frames, and `c` is the number of channels. Elements should be
floating-point values in `[-1.0, 1.0]`. Any of the dimensions may
be statically unknown (i.e., `None`).
sample_rate: An `int` or rank-0 `int32` `Tensor` that represents the
sample rate, in Hz. Must be positive.
labels: Optional `string` `Tensor`, a vector whose length is the
first dimension of `audio`, where `labels[i]` contains arbitrary
textual information about `audio[i]`. (For instance, this could be
some text that a TTS system was supposed to produce.) Markdown is
supported. Contents should be UTF-8.
max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this
many audio clips will be emitted at each step. When more than
`max_outputs` many clips are provided, the first `max_outputs`
many clips will be used and the rest silently discarded.
encoding: A constant `str` (not string tensor) indicating the
desired encoding. You can choose any format you like, as long as
it's "wav". Please see the "API compatibility note" below.
display_name: Optional name for this summary in TensorBoard, as a
constant `str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
collections: Optional list of graph collections keys. The new
summary op is added to these collections. Defaults to
`[Graph Keys.SUMMARIES]`.
Returns:
A TensorFlow summary op.
API compatibility note: The default value of the `encoding`
argument is _not_ guaranteed to remain unchanged across TensorBoard
versions. In the future, we will by default encode as FLAC instead of
as WAV. If the specific format is important to you, please provide a
file format explicitly. | [
"Create",
"a",
"legacy",
"audio",
"summary",
"op",
"for",
"use",
"in",
"a",
"TensorFlow",
"graph",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/audio/summary.py#L44-L128 | train |
tensorflow/tensorboard | tensorboard/plugins/audio/summary.py | pb | def pb(name,
audio,
sample_rate,
labels=None,
max_outputs=3,
encoding=None,
display_name=None,
description=None):
"""Create a legacy audio summary protobuf.
This behaves as if you were to create an `op` with the same arguments
(wrapped with constant tensors where appropriate) and then execute
that summary op in a TensorFlow session.
Arguments:
name: A unique name for the generated summary node.
audio: An `np.array` representing audio data with shape `[k, t, c]`,
where `k` is the number of audio clips, `t` is the number of
frames, and `c` is the number of channels. Elements should be
floating-point values in `[-1.0, 1.0]`.
sample_rate: An `int` that represents the sample rate, in Hz.
Must be positive.
labels: Optional list (or rank-1 `np.array`) of textstrings or UTF-8
bytestrings whose length is the first dimension of `audio`, where
`labels[i]` contains arbitrary textual information about
`audio[i]`. (For instance, this could be some text that a TTS
system was supposed to produce.) Markdown is supported.
max_outputs: Optional `int`. At most this many audio clips will be
emitted. When more than `max_outputs` many clips are provided, the
first `max_outputs` many clips will be used and the rest silently
discarded.
encoding: A constant `str` indicating the desired encoding. You
can choose any format you like, as long as it's "wav". Please see
the "API compatibility note" below.
display_name: Optional name for this summary in TensorBoard, as a
`str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
`str`. Markdown is supported. Defaults to empty.
Returns:
A `tf.Summary` protobuf object.
API compatibility note: The default value of the `encoding`
argument is _not_ guaranteed to remain unchanged across TensorBoard
versions. In the future, we will by default encode as FLAC instead of
as WAV. If the specific format is important to you, please provide a
file format explicitly.
"""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
audio = np.array(audio)
if audio.ndim != 3:
raise ValueError('Shape %r must have rank 3' % (audio.shape,))
if encoding is None:
encoding = 'wav'
if encoding == 'wav':
encoding = metadata.Encoding.Value('WAV')
encoder = functools.partial(encoder_util.encode_wav,
samples_per_second=sample_rate)
else:
raise ValueError('Unknown encoding: %r' % encoding)
limited_audio = audio[:max_outputs]
if labels is None:
limited_labels = [b''] * len(limited_audio)
else:
limited_labels = [tf.compat.as_bytes(label)
for label in labels[:max_outputs]]
encoded_audio = [encoder(a) for a in limited_audio]
content = np.array([encoded_audio, limited_labels]).transpose()
tensor = tf.make_tensor_proto(content, dtype=tf.string)
if display_name is None:
display_name = name
summary_metadata = metadata.create_summary_metadata(
display_name=display_name,
description=description,
encoding=encoding)
tf_summary_metadata = tf.SummaryMetadata.FromString(
summary_metadata.SerializeToString())
summary = tf.Summary()
summary.value.add(tag='%s/audio_summary' % name,
metadata=tf_summary_metadata,
tensor=tensor)
return summary | python | def pb(name,
audio,
sample_rate,
labels=None,
max_outputs=3,
encoding=None,
display_name=None,
description=None):
"""Create a legacy audio summary protobuf.
This behaves as if you were to create an `op` with the same arguments
(wrapped with constant tensors where appropriate) and then execute
that summary op in a TensorFlow session.
Arguments:
name: A unique name for the generated summary node.
audio: An `np.array` representing audio data with shape `[k, t, c]`,
where `k` is the number of audio clips, `t` is the number of
frames, and `c` is the number of channels. Elements should be
floating-point values in `[-1.0, 1.0]`.
sample_rate: An `int` that represents the sample rate, in Hz.
Must be positive.
labels: Optional list (or rank-1 `np.array`) of textstrings or UTF-8
bytestrings whose length is the first dimension of `audio`, where
`labels[i]` contains arbitrary textual information about
`audio[i]`. (For instance, this could be some text that a TTS
system was supposed to produce.) Markdown is supported.
max_outputs: Optional `int`. At most this many audio clips will be
emitted. When more than `max_outputs` many clips are provided, the
first `max_outputs` many clips will be used and the rest silently
discarded.
encoding: A constant `str` indicating the desired encoding. You
can choose any format you like, as long as it's "wav". Please see
the "API compatibility note" below.
display_name: Optional name for this summary in TensorBoard, as a
`str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
`str`. Markdown is supported. Defaults to empty.
Returns:
A `tf.Summary` protobuf object.
API compatibility note: The default value of the `encoding`
argument is _not_ guaranteed to remain unchanged across TensorBoard
versions. In the future, we will by default encode as FLAC instead of
as WAV. If the specific format is important to you, please provide a
file format explicitly.
"""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
audio = np.array(audio)
if audio.ndim != 3:
raise ValueError('Shape %r must have rank 3' % (audio.shape,))
if encoding is None:
encoding = 'wav'
if encoding == 'wav':
encoding = metadata.Encoding.Value('WAV')
encoder = functools.partial(encoder_util.encode_wav,
samples_per_second=sample_rate)
else:
raise ValueError('Unknown encoding: %r' % encoding)
limited_audio = audio[:max_outputs]
if labels is None:
limited_labels = [b''] * len(limited_audio)
else:
limited_labels = [tf.compat.as_bytes(label)
for label in labels[:max_outputs]]
encoded_audio = [encoder(a) for a in limited_audio]
content = np.array([encoded_audio, limited_labels]).transpose()
tensor = tf.make_tensor_proto(content, dtype=tf.string)
if display_name is None:
display_name = name
summary_metadata = metadata.create_summary_metadata(
display_name=display_name,
description=description,
encoding=encoding)
tf_summary_metadata = tf.SummaryMetadata.FromString(
summary_metadata.SerializeToString())
summary = tf.Summary()
summary.value.add(tag='%s/audio_summary' % name,
metadata=tf_summary_metadata,
tensor=tensor)
return summary | [
"def",
"pb",
"(",
"name",
",",
"audio",
",",
"sample_rate",
",",
"labels",
"=",
"None",
",",
"max_outputs",
"=",
"3",
",",
"encoding",
"=",
"None",
",",
"display_name",
"=",
"None",
",",
"description",
"=",
"None",
")",
":",
"# TODO(nickfelt): remove on-demand imports once dep situation is fixed.",
"import",
"tensorflow",
".",
"compat",
".",
"v1",
"as",
"tf",
"audio",
"=",
"np",
".",
"array",
"(",
"audio",
")",
"if",
"audio",
".",
"ndim",
"!=",
"3",
":",
"raise",
"ValueError",
"(",
"'Shape %r must have rank 3'",
"%",
"(",
"audio",
".",
"shape",
",",
")",
")",
"if",
"encoding",
"is",
"None",
":",
"encoding",
"=",
"'wav'",
"if",
"encoding",
"==",
"'wav'",
":",
"encoding",
"=",
"metadata",
".",
"Encoding",
".",
"Value",
"(",
"'WAV'",
")",
"encoder",
"=",
"functools",
".",
"partial",
"(",
"encoder_util",
".",
"encode_wav",
",",
"samples_per_second",
"=",
"sample_rate",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Unknown encoding: %r'",
"%",
"encoding",
")",
"limited_audio",
"=",
"audio",
"[",
":",
"max_outputs",
"]",
"if",
"labels",
"is",
"None",
":",
"limited_labels",
"=",
"[",
"b''",
"]",
"*",
"len",
"(",
"limited_audio",
")",
"else",
":",
"limited_labels",
"=",
"[",
"tf",
".",
"compat",
".",
"as_bytes",
"(",
"label",
")",
"for",
"label",
"in",
"labels",
"[",
":",
"max_outputs",
"]",
"]",
"encoded_audio",
"=",
"[",
"encoder",
"(",
"a",
")",
"for",
"a",
"in",
"limited_audio",
"]",
"content",
"=",
"np",
".",
"array",
"(",
"[",
"encoded_audio",
",",
"limited_labels",
"]",
")",
".",
"transpose",
"(",
")",
"tensor",
"=",
"tf",
".",
"make_tensor_proto",
"(",
"content",
",",
"dtype",
"=",
"tf",
".",
"string",
")",
"if",
"display_name",
"is",
"None",
":",
"display_name",
"=",
"name",
"summary_metadata",
"=",
"metadata",
".",
"create_summary_metadata",
"(",
"display_name",
"=",
"display_name",
",",
"description",
"=",
"description",
",",
"encoding",
"=",
"encoding",
")",
"tf_summary_metadata",
"=",
"tf",
".",
"SummaryMetadata",
".",
"FromString",
"(",
"summary_metadata",
".",
"SerializeToString",
"(",
")",
")",
"summary",
"=",
"tf",
".",
"Summary",
"(",
")",
"summary",
".",
"value",
".",
"add",
"(",
"tag",
"=",
"'%s/audio_summary'",
"%",
"name",
",",
"metadata",
"=",
"tf_summary_metadata",
",",
"tensor",
"=",
"tensor",
")",
"return",
"summary"
] | Create a legacy audio summary protobuf.
This behaves as if you were to create an `op` with the same arguments
(wrapped with constant tensors where appropriate) and then execute
that summary op in a TensorFlow session.
Arguments:
name: A unique name for the generated summary node.
audio: An `np.array` representing audio data with shape `[k, t, c]`,
where `k` is the number of audio clips, `t` is the number of
frames, and `c` is the number of channels. Elements should be
floating-point values in `[-1.0, 1.0]`.
sample_rate: An `int` that represents the sample rate, in Hz.
Must be positive.
labels: Optional list (or rank-1 `np.array`) of textstrings or UTF-8
bytestrings whose length is the first dimension of `audio`, where
`labels[i]` contains arbitrary textual information about
`audio[i]`. (For instance, this could be some text that a TTS
system was supposed to produce.) Markdown is supported.
max_outputs: Optional `int`. At most this many audio clips will be
emitted. When more than `max_outputs` many clips are provided, the
first `max_outputs` many clips will be used and the rest silently
discarded.
encoding: A constant `str` indicating the desired encoding. You
can choose any format you like, as long as it's "wav". Please see
the "API compatibility note" below.
display_name: Optional name for this summary in TensorBoard, as a
`str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
`str`. Markdown is supported. Defaults to empty.
Returns:
A `tf.Summary` protobuf object.
API compatibility note: The default value of the `encoding`
argument is _not_ guaranteed to remain unchanged across TensorBoard
versions. In the future, we will by default encode as FLAC instead of
as WAV. If the specific format is important to you, please provide a
file format explicitly. | [
"Create",
"a",
"legacy",
"audio",
"summary",
"protobuf",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/audio/summary.py#L131-L219 | train |
tensorflow/tensorboard | tensorboard/plugins/pr_curve/summary.py | op | def op(
name,
labels,
predictions,
num_thresholds=None,
weights=None,
display_name=None,
description=None,
collections=None):
"""Create a PR curve summary op for a single binary classifier.
Computes true/false positive/negative values for the given `predictions`
against the ground truth `labels`, against a list of evenly distributed
threshold values in `[0, 1]` of length `num_thresholds`.
Each number in `predictions`, a float in `[0, 1]`, is compared with its
corresponding boolean label in `labels`, and counts as a single tp/fp/tn/fn
value at each threshold. This is then multiplied with `weights` which can be
used to reweight certain values, or more commonly used for masking values.
Args:
name: A tag attached to the summary. Used by TensorBoard for organization.
labels: The ground truth values. A Tensor of `bool` values with arbitrary
shape.
predictions: A float32 `Tensor` whose values are in the range `[0, 1]`.
Dimensions must match those of `labels`.
num_thresholds: Number of thresholds, evenly distributed in `[0, 1]`, to
compute PR metrics for. Should be `>= 2`. This value should be a
constant integer value, not a Tensor that stores an integer.
weights: Optional float32 `Tensor`. Individual counts are multiplied by this
value. This tensor must be either the same shape as or broadcastable to
the `labels` tensor.
display_name: Optional name for this summary in TensorBoard, as a
constant `str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
collections: Optional list of graph collections keys. The new
summary op is added to these collections. Defaults to
`[Graph Keys.SUMMARIES]`.
Returns:
A summary operation for use in a TensorFlow graph. The float32 tensor
produced by the summary operation is of dimension (6, num_thresholds). The
first dimension (of length 6) is of the order: true positives,
false positives, true negatives, false negatives, precision, recall.
"""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
if num_thresholds is None:
num_thresholds = _DEFAULT_NUM_THRESHOLDS
if weights is None:
weights = 1.0
dtype = predictions.dtype
with tf.name_scope(name, values=[labels, predictions, weights]):
tf.assert_type(labels, tf.bool)
# We cast to float to ensure we have 0.0 or 1.0.
f_labels = tf.cast(labels, dtype)
# Ensure predictions are all in range [0.0, 1.0].
predictions = tf.minimum(1.0, tf.maximum(0.0, predictions))
# Get weighted true/false labels.
true_labels = f_labels * weights
false_labels = (1.0 - f_labels) * weights
# Before we begin, flatten predictions.
predictions = tf.reshape(predictions, [-1])
# Shape the labels so they are broadcast-able for later multiplication.
true_labels = tf.reshape(true_labels, [-1, 1])
false_labels = tf.reshape(false_labels, [-1, 1])
# To compute TP/FP/TN/FN, we are measuring a binary classifier
# C(t) = (predictions >= t)
# at each threshold 't'. So we have
# TP(t) = sum( C(t) * true_labels )
# FP(t) = sum( C(t) * false_labels )
#
# But, computing C(t) requires computation for each t. To make it fast,
# observe that C(t) is a cumulative integral, and so if we have
# thresholds = [t_0, ..., t_{n-1}]; t_0 < ... < t_{n-1}
# where n = num_thresholds, and if we can compute the bucket function
# B(i) = Sum( (predictions == t), t_i <= t < t{i+1} )
# then we get
# C(t_i) = sum( B(j), j >= i )
# which is the reversed cumulative sum in tf.cumsum().
#
# We can compute B(i) efficiently by taking advantage of the fact that
# our thresholds are evenly distributed, in that
# width = 1.0 / (num_thresholds - 1)
# thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0]
# Given a prediction value p, we can map it to its bucket by
# bucket_index(p) = floor( p * (num_thresholds - 1) )
# so we can use tf.scatter_add() to update the buckets in one pass.
# Compute the bucket indices for each prediction value.
bucket_indices = tf.cast(
tf.floor(predictions * (num_thresholds - 1)), tf.int32)
# Bucket predictions.
tp_buckets = tf.reduce_sum(
input_tensor=tf.one_hot(bucket_indices, depth=num_thresholds) * true_labels,
axis=0)
fp_buckets = tf.reduce_sum(
input_tensor=tf.one_hot(bucket_indices, depth=num_thresholds) * false_labels,
axis=0)
# Set up the cumulative sums to compute the actual metrics.
tp = tf.cumsum(tp_buckets, reverse=True, name='tp')
fp = tf.cumsum(fp_buckets, reverse=True, name='fp')
# fn = sum(true_labels) - tp
# = sum(tp_buckets) - tp
# = tp[0] - tp
# Similarly,
# tn = fp[0] - fp
tn = fp[0] - fp
fn = tp[0] - tp
precision = tp / tf.maximum(_MINIMUM_COUNT, tp + fp)
recall = tp / tf.maximum(_MINIMUM_COUNT, tp + fn)
return _create_tensor_summary(
name,
tp,
fp,
tn,
fn,
precision,
recall,
num_thresholds,
display_name,
description,
collections) | python | def op(
name,
labels,
predictions,
num_thresholds=None,
weights=None,
display_name=None,
description=None,
collections=None):
"""Create a PR curve summary op for a single binary classifier.
Computes true/false positive/negative values for the given `predictions`
against the ground truth `labels`, against a list of evenly distributed
threshold values in `[0, 1]` of length `num_thresholds`.
Each number in `predictions`, a float in `[0, 1]`, is compared with its
corresponding boolean label in `labels`, and counts as a single tp/fp/tn/fn
value at each threshold. This is then multiplied with `weights` which can be
used to reweight certain values, or more commonly used for masking values.
Args:
name: A tag attached to the summary. Used by TensorBoard for organization.
labels: The ground truth values. A Tensor of `bool` values with arbitrary
shape.
predictions: A float32 `Tensor` whose values are in the range `[0, 1]`.
Dimensions must match those of `labels`.
num_thresholds: Number of thresholds, evenly distributed in `[0, 1]`, to
compute PR metrics for. Should be `>= 2`. This value should be a
constant integer value, not a Tensor that stores an integer.
weights: Optional float32 `Tensor`. Individual counts are multiplied by this
value. This tensor must be either the same shape as or broadcastable to
the `labels` tensor.
display_name: Optional name for this summary in TensorBoard, as a
constant `str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
collections: Optional list of graph collections keys. The new
summary op is added to these collections. Defaults to
`[Graph Keys.SUMMARIES]`.
Returns:
A summary operation for use in a TensorFlow graph. The float32 tensor
produced by the summary operation is of dimension (6, num_thresholds). The
first dimension (of length 6) is of the order: true positives,
false positives, true negatives, false negatives, precision, recall.
"""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
if num_thresholds is None:
num_thresholds = _DEFAULT_NUM_THRESHOLDS
if weights is None:
weights = 1.0
dtype = predictions.dtype
with tf.name_scope(name, values=[labels, predictions, weights]):
tf.assert_type(labels, tf.bool)
# We cast to float to ensure we have 0.0 or 1.0.
f_labels = tf.cast(labels, dtype)
# Ensure predictions are all in range [0.0, 1.0].
predictions = tf.minimum(1.0, tf.maximum(0.0, predictions))
# Get weighted true/false labels.
true_labels = f_labels * weights
false_labels = (1.0 - f_labels) * weights
# Before we begin, flatten predictions.
predictions = tf.reshape(predictions, [-1])
# Shape the labels so they are broadcast-able for later multiplication.
true_labels = tf.reshape(true_labels, [-1, 1])
false_labels = tf.reshape(false_labels, [-1, 1])
# To compute TP/FP/TN/FN, we are measuring a binary classifier
# C(t) = (predictions >= t)
# at each threshold 't'. So we have
# TP(t) = sum( C(t) * true_labels )
# FP(t) = sum( C(t) * false_labels )
#
# But, computing C(t) requires computation for each t. To make it fast,
# observe that C(t) is a cumulative integral, and so if we have
# thresholds = [t_0, ..., t_{n-1}]; t_0 < ... < t_{n-1}
# where n = num_thresholds, and if we can compute the bucket function
# B(i) = Sum( (predictions == t), t_i <= t < t{i+1} )
# then we get
# C(t_i) = sum( B(j), j >= i )
# which is the reversed cumulative sum in tf.cumsum().
#
# We can compute B(i) efficiently by taking advantage of the fact that
# our thresholds are evenly distributed, in that
# width = 1.0 / (num_thresholds - 1)
# thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0]
# Given a prediction value p, we can map it to its bucket by
# bucket_index(p) = floor( p * (num_thresholds - 1) )
# so we can use tf.scatter_add() to update the buckets in one pass.
# Compute the bucket indices for each prediction value.
bucket_indices = tf.cast(
tf.floor(predictions * (num_thresholds - 1)), tf.int32)
# Bucket predictions.
tp_buckets = tf.reduce_sum(
input_tensor=tf.one_hot(bucket_indices, depth=num_thresholds) * true_labels,
axis=0)
fp_buckets = tf.reduce_sum(
input_tensor=tf.one_hot(bucket_indices, depth=num_thresholds) * false_labels,
axis=0)
# Set up the cumulative sums to compute the actual metrics.
tp = tf.cumsum(tp_buckets, reverse=True, name='tp')
fp = tf.cumsum(fp_buckets, reverse=True, name='fp')
# fn = sum(true_labels) - tp
# = sum(tp_buckets) - tp
# = tp[0] - tp
# Similarly,
# tn = fp[0] - fp
tn = fp[0] - fp
fn = tp[0] - tp
precision = tp / tf.maximum(_MINIMUM_COUNT, tp + fp)
recall = tp / tf.maximum(_MINIMUM_COUNT, tp + fn)
return _create_tensor_summary(
name,
tp,
fp,
tn,
fn,
precision,
recall,
num_thresholds,
display_name,
description,
collections) | [
"def",
"op",
"(",
"name",
",",
"labels",
",",
"predictions",
",",
"num_thresholds",
"=",
"None",
",",
"weights",
"=",
"None",
",",
"display_name",
"=",
"None",
",",
"description",
"=",
"None",
",",
"collections",
"=",
"None",
")",
":",
"# TODO(nickfelt): remove on-demand imports once dep situation is fixed.",
"import",
"tensorflow",
".",
"compat",
".",
"v1",
"as",
"tf",
"if",
"num_thresholds",
"is",
"None",
":",
"num_thresholds",
"=",
"_DEFAULT_NUM_THRESHOLDS",
"if",
"weights",
"is",
"None",
":",
"weights",
"=",
"1.0",
"dtype",
"=",
"predictions",
".",
"dtype",
"with",
"tf",
".",
"name_scope",
"(",
"name",
",",
"values",
"=",
"[",
"labels",
",",
"predictions",
",",
"weights",
"]",
")",
":",
"tf",
".",
"assert_type",
"(",
"labels",
",",
"tf",
".",
"bool",
")",
"# We cast to float to ensure we have 0.0 or 1.0.",
"f_labels",
"=",
"tf",
".",
"cast",
"(",
"labels",
",",
"dtype",
")",
"# Ensure predictions are all in range [0.0, 1.0].",
"predictions",
"=",
"tf",
".",
"minimum",
"(",
"1.0",
",",
"tf",
".",
"maximum",
"(",
"0.0",
",",
"predictions",
")",
")",
"# Get weighted true/false labels.",
"true_labels",
"=",
"f_labels",
"*",
"weights",
"false_labels",
"=",
"(",
"1.0",
"-",
"f_labels",
")",
"*",
"weights",
"# Before we begin, flatten predictions.",
"predictions",
"=",
"tf",
".",
"reshape",
"(",
"predictions",
",",
"[",
"-",
"1",
"]",
")",
"# Shape the labels so they are broadcast-able for later multiplication.",
"true_labels",
"=",
"tf",
".",
"reshape",
"(",
"true_labels",
",",
"[",
"-",
"1",
",",
"1",
"]",
")",
"false_labels",
"=",
"tf",
".",
"reshape",
"(",
"false_labels",
",",
"[",
"-",
"1",
",",
"1",
"]",
")",
"# To compute TP/FP/TN/FN, we are measuring a binary classifier",
"# C(t) = (predictions >= t)",
"# at each threshold 't'. So we have",
"# TP(t) = sum( C(t) * true_labels )",
"# FP(t) = sum( C(t) * false_labels )",
"#",
"# But, computing C(t) requires computation for each t. To make it fast,",
"# observe that C(t) is a cumulative integral, and so if we have",
"# thresholds = [t_0, ..., t_{n-1}]; t_0 < ... < t_{n-1}",
"# where n = num_thresholds, and if we can compute the bucket function",
"# B(i) = Sum( (predictions == t), t_i <= t < t{i+1} )",
"# then we get",
"# C(t_i) = sum( B(j), j >= i )",
"# which is the reversed cumulative sum in tf.cumsum().",
"#",
"# We can compute B(i) efficiently by taking advantage of the fact that",
"# our thresholds are evenly distributed, in that",
"# width = 1.0 / (num_thresholds - 1)",
"# thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0]",
"# Given a prediction value p, we can map it to its bucket by",
"# bucket_index(p) = floor( p * (num_thresholds - 1) )",
"# so we can use tf.scatter_add() to update the buckets in one pass.",
"# Compute the bucket indices for each prediction value.",
"bucket_indices",
"=",
"tf",
".",
"cast",
"(",
"tf",
".",
"floor",
"(",
"predictions",
"*",
"(",
"num_thresholds",
"-",
"1",
")",
")",
",",
"tf",
".",
"int32",
")",
"# Bucket predictions.",
"tp_buckets",
"=",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"tf",
".",
"one_hot",
"(",
"bucket_indices",
",",
"depth",
"=",
"num_thresholds",
")",
"*",
"true_labels",
",",
"axis",
"=",
"0",
")",
"fp_buckets",
"=",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"tf",
".",
"one_hot",
"(",
"bucket_indices",
",",
"depth",
"=",
"num_thresholds",
")",
"*",
"false_labels",
",",
"axis",
"=",
"0",
")",
"# Set up the cumulative sums to compute the actual metrics.",
"tp",
"=",
"tf",
".",
"cumsum",
"(",
"tp_buckets",
",",
"reverse",
"=",
"True",
",",
"name",
"=",
"'tp'",
")",
"fp",
"=",
"tf",
".",
"cumsum",
"(",
"fp_buckets",
",",
"reverse",
"=",
"True",
",",
"name",
"=",
"'fp'",
")",
"# fn = sum(true_labels) - tp",
"# = sum(tp_buckets) - tp",
"# = tp[0] - tp",
"# Similarly,",
"# tn = fp[0] - fp",
"tn",
"=",
"fp",
"[",
"0",
"]",
"-",
"fp",
"fn",
"=",
"tp",
"[",
"0",
"]",
"-",
"tp",
"precision",
"=",
"tp",
"/",
"tf",
".",
"maximum",
"(",
"_MINIMUM_COUNT",
",",
"tp",
"+",
"fp",
")",
"recall",
"=",
"tp",
"/",
"tf",
".",
"maximum",
"(",
"_MINIMUM_COUNT",
",",
"tp",
"+",
"fn",
")",
"return",
"_create_tensor_summary",
"(",
"name",
",",
"tp",
",",
"fp",
",",
"tn",
",",
"fn",
",",
"precision",
",",
"recall",
",",
"num_thresholds",
",",
"display_name",
",",
"description",
",",
"collections",
")"
] | Create a PR curve summary op for a single binary classifier.
Computes true/false positive/negative values for the given `predictions`
against the ground truth `labels`, against a list of evenly distributed
threshold values in `[0, 1]` of length `num_thresholds`.
Each number in `predictions`, a float in `[0, 1]`, is compared with its
corresponding boolean label in `labels`, and counts as a single tp/fp/tn/fn
value at each threshold. This is then multiplied with `weights` which can be
used to reweight certain values, or more commonly used for masking values.
Args:
name: A tag attached to the summary. Used by TensorBoard for organization.
labels: The ground truth values. A Tensor of `bool` values with arbitrary
shape.
predictions: A float32 `Tensor` whose values are in the range `[0, 1]`.
Dimensions must match those of `labels`.
num_thresholds: Number of thresholds, evenly distributed in `[0, 1]`, to
compute PR metrics for. Should be `>= 2`. This value should be a
constant integer value, not a Tensor that stores an integer.
weights: Optional float32 `Tensor`. Individual counts are multiplied by this
value. This tensor must be either the same shape as or broadcastable to
the `labels` tensor.
display_name: Optional name for this summary in TensorBoard, as a
constant `str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
collections: Optional list of graph collections keys. The new
summary op is added to these collections. Defaults to
`[Graph Keys.SUMMARIES]`.
Returns:
A summary operation for use in a TensorFlow graph. The float32 tensor
produced by the summary operation is of dimension (6, num_thresholds). The
first dimension (of length 6) is of the order: true positives,
false positives, true negatives, false negatives, precision, recall. | [
"Create",
"a",
"PR",
"curve",
"summary",
"op",
"for",
"a",
"single",
"binary",
"classifier",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/pr_curve/summary.py#L37-L172 | train |
tensorflow/tensorboard | tensorboard/plugins/pr_curve/summary.py | pb | def pb(name,
labels,
predictions,
num_thresholds=None,
weights=None,
display_name=None,
description=None):
"""Create a PR curves summary protobuf.
Arguments:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
labels: The ground truth values. A bool numpy array.
predictions: A float32 numpy array whose values are in the range `[0, 1]`.
Dimensions must match those of `labels`.
num_thresholds: Optional number of thresholds, evenly distributed in
`[0, 1]`, to compute PR metrics for. When provided, should be an int of
value at least 2. Defaults to 201.
weights: Optional float or float32 numpy array. Individual counts are
multiplied by this value. This tensor must be either the same shape as
or broadcastable to the `labels` numpy array.
display_name: Optional name for this summary in TensorBoard, as a `str`.
Defaults to `name`.
description: Optional long-form description for this summary, as a `str`.
Markdown is supported. Defaults to empty.
"""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
if num_thresholds is None:
num_thresholds = _DEFAULT_NUM_THRESHOLDS
if weights is None:
weights = 1.0
# Compute bins of true positives and false positives.
bucket_indices = np.int32(np.floor(predictions * (num_thresholds - 1)))
float_labels = labels.astype(np.float)
histogram_range = (0, num_thresholds - 1)
tp_buckets, _ = np.histogram(
bucket_indices,
bins=num_thresholds,
range=histogram_range,
weights=float_labels * weights)
fp_buckets, _ = np.histogram(
bucket_indices,
bins=num_thresholds,
range=histogram_range,
weights=(1.0 - float_labels) * weights)
# Obtain the reverse cumulative sum.
tp = np.cumsum(tp_buckets[::-1])[::-1]
fp = np.cumsum(fp_buckets[::-1])[::-1]
tn = fp[0] - fp
fn = tp[0] - tp
precision = tp / np.maximum(_MINIMUM_COUNT, tp + fp)
recall = tp / np.maximum(_MINIMUM_COUNT, tp + fn)
return raw_data_pb(name,
true_positive_counts=tp,
false_positive_counts=fp,
true_negative_counts=tn,
false_negative_counts=fn,
precision=precision,
recall=recall,
num_thresholds=num_thresholds,
display_name=display_name,
description=description) | python | def pb(name,
labels,
predictions,
num_thresholds=None,
weights=None,
display_name=None,
description=None):
"""Create a PR curves summary protobuf.
Arguments:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
labels: The ground truth values. A bool numpy array.
predictions: A float32 numpy array whose values are in the range `[0, 1]`.
Dimensions must match those of `labels`.
num_thresholds: Optional number of thresholds, evenly distributed in
`[0, 1]`, to compute PR metrics for. When provided, should be an int of
value at least 2. Defaults to 201.
weights: Optional float or float32 numpy array. Individual counts are
multiplied by this value. This tensor must be either the same shape as
or broadcastable to the `labels` numpy array.
display_name: Optional name for this summary in TensorBoard, as a `str`.
Defaults to `name`.
description: Optional long-form description for this summary, as a `str`.
Markdown is supported. Defaults to empty.
"""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
if num_thresholds is None:
num_thresholds = _DEFAULT_NUM_THRESHOLDS
if weights is None:
weights = 1.0
# Compute bins of true positives and false positives.
bucket_indices = np.int32(np.floor(predictions * (num_thresholds - 1)))
float_labels = labels.astype(np.float)
histogram_range = (0, num_thresholds - 1)
tp_buckets, _ = np.histogram(
bucket_indices,
bins=num_thresholds,
range=histogram_range,
weights=float_labels * weights)
fp_buckets, _ = np.histogram(
bucket_indices,
bins=num_thresholds,
range=histogram_range,
weights=(1.0 - float_labels) * weights)
# Obtain the reverse cumulative sum.
tp = np.cumsum(tp_buckets[::-1])[::-1]
fp = np.cumsum(fp_buckets[::-1])[::-1]
tn = fp[0] - fp
fn = tp[0] - tp
precision = tp / np.maximum(_MINIMUM_COUNT, tp + fp)
recall = tp / np.maximum(_MINIMUM_COUNT, tp + fn)
return raw_data_pb(name,
true_positive_counts=tp,
false_positive_counts=fp,
true_negative_counts=tn,
false_negative_counts=fn,
precision=precision,
recall=recall,
num_thresholds=num_thresholds,
display_name=display_name,
description=description) | [
"def",
"pb",
"(",
"name",
",",
"labels",
",",
"predictions",
",",
"num_thresholds",
"=",
"None",
",",
"weights",
"=",
"None",
",",
"display_name",
"=",
"None",
",",
"description",
"=",
"None",
")",
":",
"# TODO(nickfelt): remove on-demand imports once dep situation is fixed.",
"import",
"tensorflow",
".",
"compat",
".",
"v1",
"as",
"tf",
"if",
"num_thresholds",
"is",
"None",
":",
"num_thresholds",
"=",
"_DEFAULT_NUM_THRESHOLDS",
"if",
"weights",
"is",
"None",
":",
"weights",
"=",
"1.0",
"# Compute bins of true positives and false positives.",
"bucket_indices",
"=",
"np",
".",
"int32",
"(",
"np",
".",
"floor",
"(",
"predictions",
"*",
"(",
"num_thresholds",
"-",
"1",
")",
")",
")",
"float_labels",
"=",
"labels",
".",
"astype",
"(",
"np",
".",
"float",
")",
"histogram_range",
"=",
"(",
"0",
",",
"num_thresholds",
"-",
"1",
")",
"tp_buckets",
",",
"_",
"=",
"np",
".",
"histogram",
"(",
"bucket_indices",
",",
"bins",
"=",
"num_thresholds",
",",
"range",
"=",
"histogram_range",
",",
"weights",
"=",
"float_labels",
"*",
"weights",
")",
"fp_buckets",
",",
"_",
"=",
"np",
".",
"histogram",
"(",
"bucket_indices",
",",
"bins",
"=",
"num_thresholds",
",",
"range",
"=",
"histogram_range",
",",
"weights",
"=",
"(",
"1.0",
"-",
"float_labels",
")",
"*",
"weights",
")",
"# Obtain the reverse cumulative sum.",
"tp",
"=",
"np",
".",
"cumsum",
"(",
"tp_buckets",
"[",
":",
":",
"-",
"1",
"]",
")",
"[",
":",
":",
"-",
"1",
"]",
"fp",
"=",
"np",
".",
"cumsum",
"(",
"fp_buckets",
"[",
":",
":",
"-",
"1",
"]",
")",
"[",
":",
":",
"-",
"1",
"]",
"tn",
"=",
"fp",
"[",
"0",
"]",
"-",
"fp",
"fn",
"=",
"tp",
"[",
"0",
"]",
"-",
"tp",
"precision",
"=",
"tp",
"/",
"np",
".",
"maximum",
"(",
"_MINIMUM_COUNT",
",",
"tp",
"+",
"fp",
")",
"recall",
"=",
"tp",
"/",
"np",
".",
"maximum",
"(",
"_MINIMUM_COUNT",
",",
"tp",
"+",
"fn",
")",
"return",
"raw_data_pb",
"(",
"name",
",",
"true_positive_counts",
"=",
"tp",
",",
"false_positive_counts",
"=",
"fp",
",",
"true_negative_counts",
"=",
"tn",
",",
"false_negative_counts",
"=",
"fn",
",",
"precision",
"=",
"precision",
",",
"recall",
"=",
"recall",
",",
"num_thresholds",
"=",
"num_thresholds",
",",
"display_name",
"=",
"display_name",
",",
"description",
"=",
"description",
")"
] | Create a PR curves summary protobuf.
Arguments:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
labels: The ground truth values. A bool numpy array.
predictions: A float32 numpy array whose values are in the range `[0, 1]`.
Dimensions must match those of `labels`.
num_thresholds: Optional number of thresholds, evenly distributed in
`[0, 1]`, to compute PR metrics for. When provided, should be an int of
value at least 2. Defaults to 201.
weights: Optional float or float32 numpy array. Individual counts are
multiplied by this value. This tensor must be either the same shape as
or broadcastable to the `labels` numpy array.
display_name: Optional name for this summary in TensorBoard, as a `str`.
Defaults to `name`.
description: Optional long-form description for this summary, as a `str`.
Markdown is supported. Defaults to empty. | [
"Create",
"a",
"PR",
"curves",
"summary",
"protobuf",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/pr_curve/summary.py#L174-L241 | train |
tensorflow/tensorboard | tensorboard/plugins/pr_curve/summary.py | streaming_op | def streaming_op(name,
labels,
predictions,
num_thresholds=None,
weights=None,
metrics_collections=None,
updates_collections=None,
display_name=None,
description=None):
"""Computes a precision-recall curve summary across batches of data.
This function is similar to op() above, but can be used to compute the PR
curve across multiple batches of labels and predictions, in the same style
as the metrics found in tf.metrics.
This function creates multiple local variables for storing true positives,
true negative, etc. accumulated over each batch of data, and uses these local
variables for computing the final PR curve summary. These variables can be
updated with the returned update_op.
Args:
name: A tag attached to the summary. Used by TensorBoard for organization.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
num_thresholds: The number of evenly spaced thresholds to generate for
computing the PR curve. Defaults to 201.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
display_name: Optional name for this summary in TensorBoard, as a
constant `str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
Returns:
pr_curve: A string `Tensor` containing a single value: the
serialized PR curve Tensor summary. The summary contains a
float32 `Tensor` of dimension (6, num_thresholds). The first
dimension (of length 6) is of the order: true positives, false
positives, true negatives, false negatives, precision, recall.
update_op: An operation that updates the summary with the latest data.
"""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
if num_thresholds is None:
num_thresholds = _DEFAULT_NUM_THRESHOLDS
thresholds = [i / float(num_thresholds - 1)
for i in range(num_thresholds)]
with tf.name_scope(name, values=[labels, predictions, weights]):
tp, update_tp = tf.metrics.true_positives_at_thresholds(
labels=labels,
predictions=predictions,
thresholds=thresholds,
weights=weights)
fp, update_fp = tf.metrics.false_positives_at_thresholds(
labels=labels,
predictions=predictions,
thresholds=thresholds,
weights=weights)
tn, update_tn = tf.metrics.true_negatives_at_thresholds(
labels=labels,
predictions=predictions,
thresholds=thresholds,
weights=weights)
fn, update_fn = tf.metrics.false_negatives_at_thresholds(
labels=labels,
predictions=predictions,
thresholds=thresholds,
weights=weights)
def compute_summary(tp, fp, tn, fn, collections):
precision = tp / tf.maximum(_MINIMUM_COUNT, tp + fp)
recall = tp / tf.maximum(_MINIMUM_COUNT, tp + fn)
return _create_tensor_summary(
name,
tp,
fp,
tn,
fn,
precision,
recall,
num_thresholds,
display_name,
description,
collections)
pr_curve = compute_summary(tp, fp, tn, fn, metrics_collections)
update_op = tf.group(update_tp, update_fp, update_tn, update_fn)
if updates_collections:
for collection in updates_collections:
tf.add_to_collection(collection, update_op)
return pr_curve, update_op | python | def streaming_op(name,
labels,
predictions,
num_thresholds=None,
weights=None,
metrics_collections=None,
updates_collections=None,
display_name=None,
description=None):
"""Computes a precision-recall curve summary across batches of data.
This function is similar to op() above, but can be used to compute the PR
curve across multiple batches of labels and predictions, in the same style
as the metrics found in tf.metrics.
This function creates multiple local variables for storing true positives,
true negative, etc. accumulated over each batch of data, and uses these local
variables for computing the final PR curve summary. These variables can be
updated with the returned update_op.
Args:
name: A tag attached to the summary. Used by TensorBoard for organization.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
num_thresholds: The number of evenly spaced thresholds to generate for
computing the PR curve. Defaults to 201.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
display_name: Optional name for this summary in TensorBoard, as a
constant `str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
Returns:
pr_curve: A string `Tensor` containing a single value: the
serialized PR curve Tensor summary. The summary contains a
float32 `Tensor` of dimension (6, num_thresholds). The first
dimension (of length 6) is of the order: true positives, false
positives, true negatives, false negatives, precision, recall.
update_op: An operation that updates the summary with the latest data.
"""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
if num_thresholds is None:
num_thresholds = _DEFAULT_NUM_THRESHOLDS
thresholds = [i / float(num_thresholds - 1)
for i in range(num_thresholds)]
with tf.name_scope(name, values=[labels, predictions, weights]):
tp, update_tp = tf.metrics.true_positives_at_thresholds(
labels=labels,
predictions=predictions,
thresholds=thresholds,
weights=weights)
fp, update_fp = tf.metrics.false_positives_at_thresholds(
labels=labels,
predictions=predictions,
thresholds=thresholds,
weights=weights)
tn, update_tn = tf.metrics.true_negatives_at_thresholds(
labels=labels,
predictions=predictions,
thresholds=thresholds,
weights=weights)
fn, update_fn = tf.metrics.false_negatives_at_thresholds(
labels=labels,
predictions=predictions,
thresholds=thresholds,
weights=weights)
def compute_summary(tp, fp, tn, fn, collections):
precision = tp / tf.maximum(_MINIMUM_COUNT, tp + fp)
recall = tp / tf.maximum(_MINIMUM_COUNT, tp + fn)
return _create_tensor_summary(
name,
tp,
fp,
tn,
fn,
precision,
recall,
num_thresholds,
display_name,
description,
collections)
pr_curve = compute_summary(tp, fp, tn, fn, metrics_collections)
update_op = tf.group(update_tp, update_fp, update_tn, update_fn)
if updates_collections:
for collection in updates_collections:
tf.add_to_collection(collection, update_op)
return pr_curve, update_op | [
"def",
"streaming_op",
"(",
"name",
",",
"labels",
",",
"predictions",
",",
"num_thresholds",
"=",
"None",
",",
"weights",
"=",
"None",
",",
"metrics_collections",
"=",
"None",
",",
"updates_collections",
"=",
"None",
",",
"display_name",
"=",
"None",
",",
"description",
"=",
"None",
")",
":",
"# TODO(nickfelt): remove on-demand imports once dep situation is fixed.",
"import",
"tensorflow",
".",
"compat",
".",
"v1",
"as",
"tf",
"if",
"num_thresholds",
"is",
"None",
":",
"num_thresholds",
"=",
"_DEFAULT_NUM_THRESHOLDS",
"thresholds",
"=",
"[",
"i",
"/",
"float",
"(",
"num_thresholds",
"-",
"1",
")",
"for",
"i",
"in",
"range",
"(",
"num_thresholds",
")",
"]",
"with",
"tf",
".",
"name_scope",
"(",
"name",
",",
"values",
"=",
"[",
"labels",
",",
"predictions",
",",
"weights",
"]",
")",
":",
"tp",
",",
"update_tp",
"=",
"tf",
".",
"metrics",
".",
"true_positives_at_thresholds",
"(",
"labels",
"=",
"labels",
",",
"predictions",
"=",
"predictions",
",",
"thresholds",
"=",
"thresholds",
",",
"weights",
"=",
"weights",
")",
"fp",
",",
"update_fp",
"=",
"tf",
".",
"metrics",
".",
"false_positives_at_thresholds",
"(",
"labels",
"=",
"labels",
",",
"predictions",
"=",
"predictions",
",",
"thresholds",
"=",
"thresholds",
",",
"weights",
"=",
"weights",
")",
"tn",
",",
"update_tn",
"=",
"tf",
".",
"metrics",
".",
"true_negatives_at_thresholds",
"(",
"labels",
"=",
"labels",
",",
"predictions",
"=",
"predictions",
",",
"thresholds",
"=",
"thresholds",
",",
"weights",
"=",
"weights",
")",
"fn",
",",
"update_fn",
"=",
"tf",
".",
"metrics",
".",
"false_negatives_at_thresholds",
"(",
"labels",
"=",
"labels",
",",
"predictions",
"=",
"predictions",
",",
"thresholds",
"=",
"thresholds",
",",
"weights",
"=",
"weights",
")",
"def",
"compute_summary",
"(",
"tp",
",",
"fp",
",",
"tn",
",",
"fn",
",",
"collections",
")",
":",
"precision",
"=",
"tp",
"/",
"tf",
".",
"maximum",
"(",
"_MINIMUM_COUNT",
",",
"tp",
"+",
"fp",
")",
"recall",
"=",
"tp",
"/",
"tf",
".",
"maximum",
"(",
"_MINIMUM_COUNT",
",",
"tp",
"+",
"fn",
")",
"return",
"_create_tensor_summary",
"(",
"name",
",",
"tp",
",",
"fp",
",",
"tn",
",",
"fn",
",",
"precision",
",",
"recall",
",",
"num_thresholds",
",",
"display_name",
",",
"description",
",",
"collections",
")",
"pr_curve",
"=",
"compute_summary",
"(",
"tp",
",",
"fp",
",",
"tn",
",",
"fn",
",",
"metrics_collections",
")",
"update_op",
"=",
"tf",
".",
"group",
"(",
"update_tp",
",",
"update_fp",
",",
"update_tn",
",",
"update_fn",
")",
"if",
"updates_collections",
":",
"for",
"collection",
"in",
"updates_collections",
":",
"tf",
".",
"add_to_collection",
"(",
"collection",
",",
"update_op",
")",
"return",
"pr_curve",
",",
"update_op"
] | Computes a precision-recall curve summary across batches of data.
This function is similar to op() above, but can be used to compute the PR
curve across multiple batches of labels and predictions, in the same style
as the metrics found in tf.metrics.
This function creates multiple local variables for storing true positives,
true negative, etc. accumulated over each batch of data, and uses these local
variables for computing the final PR curve summary. These variables can be
updated with the returned update_op.
Args:
name: A tag attached to the summary. Used by TensorBoard for organization.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
num_thresholds: The number of evenly spaced thresholds to generate for
computing the PR curve. Defaults to 201.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
display_name: Optional name for this summary in TensorBoard, as a
constant `str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
Returns:
pr_curve: A string `Tensor` containing a single value: the
serialized PR curve Tensor summary. The summary contains a
float32 `Tensor` of dimension (6, num_thresholds). The first
dimension (of length 6) is of the order: true positives, false
positives, true negatives, false negatives, precision, recall.
update_op: An operation that updates the summary with the latest data. | [
"Computes",
"a",
"precision",
"-",
"recall",
"curve",
"summary",
"across",
"batches",
"of",
"data",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/pr_curve/summary.py#L243-L345 | train |
tensorflow/tensorboard | tensorboard/plugins/pr_curve/summary.py | raw_data_op | def raw_data_op(
name,
true_positive_counts,
false_positive_counts,
true_negative_counts,
false_negative_counts,
precision,
recall,
num_thresholds=None,
display_name=None,
description=None,
collections=None):
"""Create an op that collects data for visualizing PR curves.
Unlike the op above, this one avoids computing precision, recall, and the
intermediate counts. Instead, it accepts those tensors as arguments and
relies on the caller to ensure that the calculations are correct (and the
counts yield the provided precision and recall values).
This op is useful when a caller seeks to compute precision and recall
differently but still use the PR curves plugin.
Args:
name: A tag attached to the summary. Used by TensorBoard for organization.
true_positive_counts: A rank-1 tensor of true positive counts. Must contain
`num_thresholds` elements and be castable to float32. Values correspond
to thresholds that increase from left to right (from 0 to 1).
false_positive_counts: A rank-1 tensor of false positive counts. Must
contain `num_thresholds` elements and be castable to float32. Values
correspond to thresholds that increase from left to right (from 0 to 1).
true_negative_counts: A rank-1 tensor of true negative counts. Must contain
`num_thresholds` elements and be castable to float32. Values
correspond to thresholds that increase from left to right (from 0 to 1).
false_negative_counts: A rank-1 tensor of false negative counts. Must
contain `num_thresholds` elements and be castable to float32. Values
correspond to thresholds that increase from left to right (from 0 to 1).
precision: A rank-1 tensor of precision values. Must contain
`num_thresholds` elements and be castable to float32. Values correspond
to thresholds that increase from left to right (from 0 to 1).
recall: A rank-1 tensor of recall values. Must contain `num_thresholds`
elements and be castable to float32. Values correspond to thresholds
that increase from left to right (from 0 to 1).
num_thresholds: Number of thresholds, evenly distributed in `[0, 1]`, to
compute PR metrics for. Should be `>= 2`. This value should be a
constant integer value, not a Tensor that stores an integer.
display_name: Optional name for this summary in TensorBoard, as a
constant `str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
collections: Optional list of graph collections keys. The new
summary op is added to these collections. Defaults to
`[Graph Keys.SUMMARIES]`.
Returns:
A summary operation for use in a TensorFlow graph. See docs for the `op`
method for details on the float32 tensor produced by this summary.
"""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
with tf.name_scope(name, values=[
true_positive_counts,
false_positive_counts,
true_negative_counts,
false_negative_counts,
precision,
recall,
]):
return _create_tensor_summary(
name,
true_positive_counts,
false_positive_counts,
true_negative_counts,
false_negative_counts,
precision,
recall,
num_thresholds,
display_name,
description,
collections) | python | def raw_data_op(
name,
true_positive_counts,
false_positive_counts,
true_negative_counts,
false_negative_counts,
precision,
recall,
num_thresholds=None,
display_name=None,
description=None,
collections=None):
"""Create an op that collects data for visualizing PR curves.
Unlike the op above, this one avoids computing precision, recall, and the
intermediate counts. Instead, it accepts those tensors as arguments and
relies on the caller to ensure that the calculations are correct (and the
counts yield the provided precision and recall values).
This op is useful when a caller seeks to compute precision and recall
differently but still use the PR curves plugin.
Args:
name: A tag attached to the summary. Used by TensorBoard for organization.
true_positive_counts: A rank-1 tensor of true positive counts. Must contain
`num_thresholds` elements and be castable to float32. Values correspond
to thresholds that increase from left to right (from 0 to 1).
false_positive_counts: A rank-1 tensor of false positive counts. Must
contain `num_thresholds` elements and be castable to float32. Values
correspond to thresholds that increase from left to right (from 0 to 1).
true_negative_counts: A rank-1 tensor of true negative counts. Must contain
`num_thresholds` elements and be castable to float32. Values
correspond to thresholds that increase from left to right (from 0 to 1).
false_negative_counts: A rank-1 tensor of false negative counts. Must
contain `num_thresholds` elements and be castable to float32. Values
correspond to thresholds that increase from left to right (from 0 to 1).
precision: A rank-1 tensor of precision values. Must contain
`num_thresholds` elements and be castable to float32. Values correspond
to thresholds that increase from left to right (from 0 to 1).
recall: A rank-1 tensor of recall values. Must contain `num_thresholds`
elements and be castable to float32. Values correspond to thresholds
that increase from left to right (from 0 to 1).
num_thresholds: Number of thresholds, evenly distributed in `[0, 1]`, to
compute PR metrics for. Should be `>= 2`. This value should be a
constant integer value, not a Tensor that stores an integer.
display_name: Optional name for this summary in TensorBoard, as a
constant `str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
collections: Optional list of graph collections keys. The new
summary op is added to these collections. Defaults to
`[Graph Keys.SUMMARIES]`.
Returns:
A summary operation for use in a TensorFlow graph. See docs for the `op`
method for details on the float32 tensor produced by this summary.
"""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
with tf.name_scope(name, values=[
true_positive_counts,
false_positive_counts,
true_negative_counts,
false_negative_counts,
precision,
recall,
]):
return _create_tensor_summary(
name,
true_positive_counts,
false_positive_counts,
true_negative_counts,
false_negative_counts,
precision,
recall,
num_thresholds,
display_name,
description,
collections) | [
"def",
"raw_data_op",
"(",
"name",
",",
"true_positive_counts",
",",
"false_positive_counts",
",",
"true_negative_counts",
",",
"false_negative_counts",
",",
"precision",
",",
"recall",
",",
"num_thresholds",
"=",
"None",
",",
"display_name",
"=",
"None",
",",
"description",
"=",
"None",
",",
"collections",
"=",
"None",
")",
":",
"# TODO(nickfelt): remove on-demand imports once dep situation is fixed.",
"import",
"tensorflow",
".",
"compat",
".",
"v1",
"as",
"tf",
"with",
"tf",
".",
"name_scope",
"(",
"name",
",",
"values",
"=",
"[",
"true_positive_counts",
",",
"false_positive_counts",
",",
"true_negative_counts",
",",
"false_negative_counts",
",",
"precision",
",",
"recall",
",",
"]",
")",
":",
"return",
"_create_tensor_summary",
"(",
"name",
",",
"true_positive_counts",
",",
"false_positive_counts",
",",
"true_negative_counts",
",",
"false_negative_counts",
",",
"precision",
",",
"recall",
",",
"num_thresholds",
",",
"display_name",
",",
"description",
",",
"collections",
")"
] | Create an op that collects data for visualizing PR curves.
Unlike the op above, this one avoids computing precision, recall, and the
intermediate counts. Instead, it accepts those tensors as arguments and
relies on the caller to ensure that the calculations are correct (and the
counts yield the provided precision and recall values).
This op is useful when a caller seeks to compute precision and recall
differently but still use the PR curves plugin.
Args:
name: A tag attached to the summary. Used by TensorBoard for organization.
true_positive_counts: A rank-1 tensor of true positive counts. Must contain
`num_thresholds` elements and be castable to float32. Values correspond
to thresholds that increase from left to right (from 0 to 1).
false_positive_counts: A rank-1 tensor of false positive counts. Must
contain `num_thresholds` elements and be castable to float32. Values
correspond to thresholds that increase from left to right (from 0 to 1).
true_negative_counts: A rank-1 tensor of true negative counts. Must contain
`num_thresholds` elements and be castable to float32. Values
correspond to thresholds that increase from left to right (from 0 to 1).
false_negative_counts: A rank-1 tensor of false negative counts. Must
contain `num_thresholds` elements and be castable to float32. Values
correspond to thresholds that increase from left to right (from 0 to 1).
precision: A rank-1 tensor of precision values. Must contain
`num_thresholds` elements and be castable to float32. Values correspond
to thresholds that increase from left to right (from 0 to 1).
recall: A rank-1 tensor of recall values. Must contain `num_thresholds`
elements and be castable to float32. Values correspond to thresholds
that increase from left to right (from 0 to 1).
num_thresholds: Number of thresholds, evenly distributed in `[0, 1]`, to
compute PR metrics for. Should be `>= 2`. This value should be a
constant integer value, not a Tensor that stores an integer.
display_name: Optional name for this summary in TensorBoard, as a
constant `str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
collections: Optional list of graph collections keys. The new
summary op is added to these collections. Defaults to
`[Graph Keys.SUMMARIES]`.
Returns:
A summary operation for use in a TensorFlow graph. See docs for the `op`
method for details on the float32 tensor produced by this summary. | [
"Create",
"an",
"op",
"that",
"collects",
"data",
"for",
"visualizing",
"PR",
"curves",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/pr_curve/summary.py#L347-L426 | train |
tensorflow/tensorboard | tensorboard/plugins/pr_curve/summary.py | raw_data_pb | def raw_data_pb(
name,
true_positive_counts,
false_positive_counts,
true_negative_counts,
false_negative_counts,
precision,
recall,
num_thresholds=None,
display_name=None,
description=None):
"""Create a PR curves summary protobuf from raw data values.
Args:
name: A tag attached to the summary. Used by TensorBoard for organization.
true_positive_counts: A rank-1 numpy array of true positive counts. Must
contain `num_thresholds` elements and be castable to float32.
false_positive_counts: A rank-1 numpy array of false positive counts. Must
contain `num_thresholds` elements and be castable to float32.
true_negative_counts: A rank-1 numpy array of true negative counts. Must
contain `num_thresholds` elements and be castable to float32.
false_negative_counts: A rank-1 numpy array of false negative counts. Must
contain `num_thresholds` elements and be castable to float32.
precision: A rank-1 numpy array of precision values. Must contain
`num_thresholds` elements and be castable to float32.
recall: A rank-1 numpy array of recall values. Must contain `num_thresholds`
elements and be castable to float32.
num_thresholds: Number of thresholds, evenly distributed in `[0, 1]`, to
compute PR metrics for. Should be an int `>= 2`.
display_name: Optional name for this summary in TensorBoard, as a `str`.
Defaults to `name`.
description: Optional long-form description for this summary, as a `str`.
Markdown is supported. Defaults to empty.
Returns:
A summary operation for use in a TensorFlow graph. See docs for the `op`
method for details on the float32 tensor produced by this summary.
"""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
if display_name is None:
display_name = name
summary_metadata = metadata.create_summary_metadata(
display_name=display_name if display_name is not None else name,
description=description or '',
num_thresholds=num_thresholds)
tf_summary_metadata = tf.SummaryMetadata.FromString(
summary_metadata.SerializeToString())
summary = tf.Summary()
data = np.stack(
(true_positive_counts,
false_positive_counts,
true_negative_counts,
false_negative_counts,
precision,
recall))
tensor = tf.make_tensor_proto(np.float32(data), dtype=tf.float32)
summary.value.add(tag='%s/pr_curves' % name,
metadata=tf_summary_metadata,
tensor=tensor)
return summary | python | def raw_data_pb(
name,
true_positive_counts,
false_positive_counts,
true_negative_counts,
false_negative_counts,
precision,
recall,
num_thresholds=None,
display_name=None,
description=None):
"""Create a PR curves summary protobuf from raw data values.
Args:
name: A tag attached to the summary. Used by TensorBoard for organization.
true_positive_counts: A rank-1 numpy array of true positive counts. Must
contain `num_thresholds` elements and be castable to float32.
false_positive_counts: A rank-1 numpy array of false positive counts. Must
contain `num_thresholds` elements and be castable to float32.
true_negative_counts: A rank-1 numpy array of true negative counts. Must
contain `num_thresholds` elements and be castable to float32.
false_negative_counts: A rank-1 numpy array of false negative counts. Must
contain `num_thresholds` elements and be castable to float32.
precision: A rank-1 numpy array of precision values. Must contain
`num_thresholds` elements and be castable to float32.
recall: A rank-1 numpy array of recall values. Must contain `num_thresholds`
elements and be castable to float32.
num_thresholds: Number of thresholds, evenly distributed in `[0, 1]`, to
compute PR metrics for. Should be an int `>= 2`.
display_name: Optional name for this summary in TensorBoard, as a `str`.
Defaults to `name`.
description: Optional long-form description for this summary, as a `str`.
Markdown is supported. Defaults to empty.
Returns:
A summary operation for use in a TensorFlow graph. See docs for the `op`
method for details on the float32 tensor produced by this summary.
"""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
if display_name is None:
display_name = name
summary_metadata = metadata.create_summary_metadata(
display_name=display_name if display_name is not None else name,
description=description or '',
num_thresholds=num_thresholds)
tf_summary_metadata = tf.SummaryMetadata.FromString(
summary_metadata.SerializeToString())
summary = tf.Summary()
data = np.stack(
(true_positive_counts,
false_positive_counts,
true_negative_counts,
false_negative_counts,
precision,
recall))
tensor = tf.make_tensor_proto(np.float32(data), dtype=tf.float32)
summary.value.add(tag='%s/pr_curves' % name,
metadata=tf_summary_metadata,
tensor=tensor)
return summary | [
"def",
"raw_data_pb",
"(",
"name",
",",
"true_positive_counts",
",",
"false_positive_counts",
",",
"true_negative_counts",
",",
"false_negative_counts",
",",
"precision",
",",
"recall",
",",
"num_thresholds",
"=",
"None",
",",
"display_name",
"=",
"None",
",",
"description",
"=",
"None",
")",
":",
"# TODO(nickfelt): remove on-demand imports once dep situation is fixed.",
"import",
"tensorflow",
".",
"compat",
".",
"v1",
"as",
"tf",
"if",
"display_name",
"is",
"None",
":",
"display_name",
"=",
"name",
"summary_metadata",
"=",
"metadata",
".",
"create_summary_metadata",
"(",
"display_name",
"=",
"display_name",
"if",
"display_name",
"is",
"not",
"None",
"else",
"name",
",",
"description",
"=",
"description",
"or",
"''",
",",
"num_thresholds",
"=",
"num_thresholds",
")",
"tf_summary_metadata",
"=",
"tf",
".",
"SummaryMetadata",
".",
"FromString",
"(",
"summary_metadata",
".",
"SerializeToString",
"(",
")",
")",
"summary",
"=",
"tf",
".",
"Summary",
"(",
")",
"data",
"=",
"np",
".",
"stack",
"(",
"(",
"true_positive_counts",
",",
"false_positive_counts",
",",
"true_negative_counts",
",",
"false_negative_counts",
",",
"precision",
",",
"recall",
")",
")",
"tensor",
"=",
"tf",
".",
"make_tensor_proto",
"(",
"np",
".",
"float32",
"(",
"data",
")",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"summary",
".",
"value",
".",
"add",
"(",
"tag",
"=",
"'%s/pr_curves'",
"%",
"name",
",",
"metadata",
"=",
"tf_summary_metadata",
",",
"tensor",
"=",
"tensor",
")",
"return",
"summary"
] | Create a PR curves summary protobuf from raw data values.
Args:
name: A tag attached to the summary. Used by TensorBoard for organization.
true_positive_counts: A rank-1 numpy array of true positive counts. Must
contain `num_thresholds` elements and be castable to float32.
false_positive_counts: A rank-1 numpy array of false positive counts. Must
contain `num_thresholds` elements and be castable to float32.
true_negative_counts: A rank-1 numpy array of true negative counts. Must
contain `num_thresholds` elements and be castable to float32.
false_negative_counts: A rank-1 numpy array of false negative counts. Must
contain `num_thresholds` elements and be castable to float32.
precision: A rank-1 numpy array of precision values. Must contain
`num_thresholds` elements and be castable to float32.
recall: A rank-1 numpy array of recall values. Must contain `num_thresholds`
elements and be castable to float32.
num_thresholds: Number of thresholds, evenly distributed in `[0, 1]`, to
compute PR metrics for. Should be an int `>= 2`.
display_name: Optional name for this summary in TensorBoard, as a `str`.
Defaults to `name`.
description: Optional long-form description for this summary, as a `str`.
Markdown is supported. Defaults to empty.
Returns:
A summary operation for use in a TensorFlow graph. See docs for the `op`
method for details on the float32 tensor produced by this summary. | [
"Create",
"a",
"PR",
"curves",
"summary",
"protobuf",
"from",
"raw",
"data",
"values",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/pr_curve/summary.py#L428-L489 | train |
tensorflow/tensorboard | tensorboard/plugins/pr_curve/summary.py | _create_tensor_summary | def _create_tensor_summary(
name,
true_positive_counts,
false_positive_counts,
true_negative_counts,
false_negative_counts,
precision,
recall,
num_thresholds=None,
display_name=None,
description=None,
collections=None):
"""A private helper method for generating a tensor summary.
We use a helper method instead of having `op` directly call `raw_data_op`
to prevent the scope of `raw_data_op` from being embedded within `op`.
Arguments are the same as for raw_data_op.
Returns:
A tensor summary that collects data for PR curves.
"""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
# Store the number of thresholds within the summary metadata because
# that value is constant for all pr curve summaries with the same tag.
summary_metadata = metadata.create_summary_metadata(
display_name=display_name if display_name is not None else name,
description=description or '',
num_thresholds=num_thresholds)
# Store values within a tensor. We store them in the order:
# true positives, false positives, true negatives, false
# negatives, precision, and recall.
combined_data = tf.stack([
tf.cast(true_positive_counts, tf.float32),
tf.cast(false_positive_counts, tf.float32),
tf.cast(true_negative_counts, tf.float32),
tf.cast(false_negative_counts, tf.float32),
tf.cast(precision, tf.float32),
tf.cast(recall, tf.float32)])
return tf.summary.tensor_summary(
name='pr_curves',
tensor=combined_data,
collections=collections,
summary_metadata=summary_metadata) | python | def _create_tensor_summary(
name,
true_positive_counts,
false_positive_counts,
true_negative_counts,
false_negative_counts,
precision,
recall,
num_thresholds=None,
display_name=None,
description=None,
collections=None):
"""A private helper method for generating a tensor summary.
We use a helper method instead of having `op` directly call `raw_data_op`
to prevent the scope of `raw_data_op` from being embedded within `op`.
Arguments are the same as for raw_data_op.
Returns:
A tensor summary that collects data for PR curves.
"""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
# Store the number of thresholds within the summary metadata because
# that value is constant for all pr curve summaries with the same tag.
summary_metadata = metadata.create_summary_metadata(
display_name=display_name if display_name is not None else name,
description=description or '',
num_thresholds=num_thresholds)
# Store values within a tensor. We store them in the order:
# true positives, false positives, true negatives, false
# negatives, precision, and recall.
combined_data = tf.stack([
tf.cast(true_positive_counts, tf.float32),
tf.cast(false_positive_counts, tf.float32),
tf.cast(true_negative_counts, tf.float32),
tf.cast(false_negative_counts, tf.float32),
tf.cast(precision, tf.float32),
tf.cast(recall, tf.float32)])
return tf.summary.tensor_summary(
name='pr_curves',
tensor=combined_data,
collections=collections,
summary_metadata=summary_metadata) | [
"def",
"_create_tensor_summary",
"(",
"name",
",",
"true_positive_counts",
",",
"false_positive_counts",
",",
"true_negative_counts",
",",
"false_negative_counts",
",",
"precision",
",",
"recall",
",",
"num_thresholds",
"=",
"None",
",",
"display_name",
"=",
"None",
",",
"description",
"=",
"None",
",",
"collections",
"=",
"None",
")",
":",
"# TODO(nickfelt): remove on-demand imports once dep situation is fixed.",
"import",
"tensorflow",
".",
"compat",
".",
"v1",
"as",
"tf",
"# Store the number of thresholds within the summary metadata because",
"# that value is constant for all pr curve summaries with the same tag.",
"summary_metadata",
"=",
"metadata",
".",
"create_summary_metadata",
"(",
"display_name",
"=",
"display_name",
"if",
"display_name",
"is",
"not",
"None",
"else",
"name",
",",
"description",
"=",
"description",
"or",
"''",
",",
"num_thresholds",
"=",
"num_thresholds",
")",
"# Store values within a tensor. We store them in the order:",
"# true positives, false positives, true negatives, false",
"# negatives, precision, and recall.",
"combined_data",
"=",
"tf",
".",
"stack",
"(",
"[",
"tf",
".",
"cast",
"(",
"true_positive_counts",
",",
"tf",
".",
"float32",
")",
",",
"tf",
".",
"cast",
"(",
"false_positive_counts",
",",
"tf",
".",
"float32",
")",
",",
"tf",
".",
"cast",
"(",
"true_negative_counts",
",",
"tf",
".",
"float32",
")",
",",
"tf",
".",
"cast",
"(",
"false_negative_counts",
",",
"tf",
".",
"float32",
")",
",",
"tf",
".",
"cast",
"(",
"precision",
",",
"tf",
".",
"float32",
")",
",",
"tf",
".",
"cast",
"(",
"recall",
",",
"tf",
".",
"float32",
")",
"]",
")",
"return",
"tf",
".",
"summary",
".",
"tensor_summary",
"(",
"name",
"=",
"'pr_curves'",
",",
"tensor",
"=",
"combined_data",
",",
"collections",
"=",
"collections",
",",
"summary_metadata",
"=",
"summary_metadata",
")"
] | A private helper method for generating a tensor summary.
We use a helper method instead of having `op` directly call `raw_data_op`
to prevent the scope of `raw_data_op` from being embedded within `op`.
Arguments are the same as for raw_data_op.
Returns:
A tensor summary that collects data for PR curves. | [
"A",
"private",
"helper",
"method",
"for",
"generating",
"a",
"tensor",
"summary",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/pr_curve/summary.py#L491-L538 | train |
tensorflow/tensorboard | tensorboard/plugins/hparams/list_metric_evals.py | Handler.run | def run(self):
"""Executes the request.
Returns:
An array of tuples representing the metric evaluations--each of the form
(<wall time in secs>, <training step>, <metric value>).
"""
run, tag = metrics.run_tag_from_session_and_metric(
self._request.session_name, self._request.metric_name)
body, _ = self._scalars_plugin_instance.scalars_impl(
tag, run, None, scalars_plugin.OutputFormat.JSON)
return body | python | def run(self):
"""Executes the request.
Returns:
An array of tuples representing the metric evaluations--each of the form
(<wall time in secs>, <training step>, <metric value>).
"""
run, tag = metrics.run_tag_from_session_and_metric(
self._request.session_name, self._request.metric_name)
body, _ = self._scalars_plugin_instance.scalars_impl(
tag, run, None, scalars_plugin.OutputFormat.JSON)
return body | [
"def",
"run",
"(",
"self",
")",
":",
"run",
",",
"tag",
"=",
"metrics",
".",
"run_tag_from_session_and_metric",
"(",
"self",
".",
"_request",
".",
"session_name",
",",
"self",
".",
"_request",
".",
"metric_name",
")",
"body",
",",
"_",
"=",
"self",
".",
"_scalars_plugin_instance",
".",
"scalars_impl",
"(",
"tag",
",",
"run",
",",
"None",
",",
"scalars_plugin",
".",
"OutputFormat",
".",
"JSON",
")",
"return",
"body"
] | Executes the request.
Returns:
An array of tuples representing the metric evaluations--each of the form
(<wall time in secs>, <training step>, <metric value>). | [
"Executes",
"the",
"request",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/hparams/list_metric_evals.py#L38-L49 | train |
tensorflow/tensorboard | tensorboard/plugins/histogram/histograms_plugin.py | HistogramsPlugin.is_active | def is_active(self):
"""This plugin is active iff any run has at least one histograms tag."""
if self._db_connection_provider:
# The plugin is active if one relevant tag can be found in the database.
db = self._db_connection_provider()
cursor = db.execute('''
SELECT
1
FROM Tags
WHERE Tags.plugin_name = ?
LIMIT 1
''', (metadata.PLUGIN_NAME,))
return bool(list(cursor))
return bool(self._multiplexer) and any(self.index_impl().values()) | python | def is_active(self):
"""This plugin is active iff any run has at least one histograms tag."""
if self._db_connection_provider:
# The plugin is active if one relevant tag can be found in the database.
db = self._db_connection_provider()
cursor = db.execute('''
SELECT
1
FROM Tags
WHERE Tags.plugin_name = ?
LIMIT 1
''', (metadata.PLUGIN_NAME,))
return bool(list(cursor))
return bool(self._multiplexer) and any(self.index_impl().values()) | [
"def",
"is_active",
"(",
"self",
")",
":",
"if",
"self",
".",
"_db_connection_provider",
":",
"# The plugin is active if one relevant tag can be found in the database.",
"db",
"=",
"self",
".",
"_db_connection_provider",
"(",
")",
"cursor",
"=",
"db",
".",
"execute",
"(",
"'''\n SELECT\n 1\n FROM Tags\n WHERE Tags.plugin_name = ?\n LIMIT 1\n '''",
",",
"(",
"metadata",
".",
"PLUGIN_NAME",
",",
")",
")",
"return",
"bool",
"(",
"list",
"(",
"cursor",
")",
")",
"return",
"bool",
"(",
"self",
".",
"_multiplexer",
")",
"and",
"any",
"(",
"self",
".",
"index_impl",
"(",
")",
".",
"values",
"(",
")",
")"
] | This plugin is active iff any run has at least one histograms tag. | [
"This",
"plugin",
"is",
"active",
"iff",
"any",
"run",
"has",
"at",
"least",
"one",
"histograms",
"tag",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/histogram/histograms_plugin.py#L70-L84 | train |
tensorflow/tensorboard | tensorboard/plugins/histogram/histograms_plugin.py | HistogramsPlugin.histograms_impl | def histograms_impl(self, tag, run, downsample_to=None):
"""Result of the form `(body, mime_type)`, or `ValueError`.
At most `downsample_to` events will be returned. If this value is
`None`, then no downsampling will be performed.
"""
if self._db_connection_provider:
# Serve data from the database.
db = self._db_connection_provider()
cursor = db.cursor()
# Prefetch the tag ID matching this run and tag.
cursor.execute(
'''
SELECT
tag_id
FROM Tags
JOIN Runs USING (run_id)
WHERE
Runs.run_name = :run
AND Tags.tag_name = :tag
AND Tags.plugin_name = :plugin
''',
{'run': run, 'tag': tag, 'plugin': metadata.PLUGIN_NAME})
row = cursor.fetchone()
if not row:
raise ValueError('No histogram tag %r for run %r' % (tag, run))
(tag_id,) = row
# Fetch tensor values, optionally with linear-spaced sampling by step.
# For steps ranging from s_min to s_max and sample size k, this query
# divides the range into k - 1 equal-sized intervals and returns the
# lowest step at or above each of the k interval boundaries (which always
# includes s_min and s_max, and may be fewer than k results if there are
# intervals where no steps are present). For contiguous steps the results
# can be formally expressed as the following:
# [s_min + math.ceil(i / k * (s_max - s_min)) for i in range(0, k + 1)]
cursor.execute(
'''
SELECT
MIN(step) AS step,
computed_time,
data,
dtype,
shape
FROM Tensors
INNER JOIN (
SELECT
MIN(step) AS min_step,
MAX(step) AS max_step
FROM Tensors
/* Filter out NULL so we can use TensorSeriesStepIndex. */
WHERE series = :tag_id AND step IS NOT NULL
)
/* Ensure we omit reserved rows, which have NULL step values. */
WHERE series = :tag_id AND step IS NOT NULL
/* Bucket rows into sample_size linearly spaced buckets, or do
no sampling if sample_size is NULL. */
GROUP BY
IFNULL(:sample_size - 1, max_step - min_step)
* (step - min_step) / (max_step - min_step)
ORDER BY step
''',
{'tag_id': tag_id, 'sample_size': downsample_to})
events = [(computed_time, step, self._get_values(data, dtype, shape))
for step, computed_time, data, dtype, shape in cursor]
else:
# Serve data from events files.
try:
tensor_events = self._multiplexer.Tensors(run, tag)
except KeyError:
raise ValueError('No histogram tag %r for run %r' % (tag, run))
if downsample_to is not None and len(tensor_events) > downsample_to:
rand_indices = random.Random(0).sample(
six.moves.xrange(len(tensor_events)), downsample_to)
indices = sorted(rand_indices)
tensor_events = [tensor_events[i] for i in indices]
events = [[e.wall_time, e.step, tensor_util.make_ndarray(e.tensor_proto).tolist()]
for e in tensor_events]
return (events, 'application/json') | python | def histograms_impl(self, tag, run, downsample_to=None):
"""Result of the form `(body, mime_type)`, or `ValueError`.
At most `downsample_to` events will be returned. If this value is
`None`, then no downsampling will be performed.
"""
if self._db_connection_provider:
# Serve data from the database.
db = self._db_connection_provider()
cursor = db.cursor()
# Prefetch the tag ID matching this run and tag.
cursor.execute(
'''
SELECT
tag_id
FROM Tags
JOIN Runs USING (run_id)
WHERE
Runs.run_name = :run
AND Tags.tag_name = :tag
AND Tags.plugin_name = :plugin
''',
{'run': run, 'tag': tag, 'plugin': metadata.PLUGIN_NAME})
row = cursor.fetchone()
if not row:
raise ValueError('No histogram tag %r for run %r' % (tag, run))
(tag_id,) = row
# Fetch tensor values, optionally with linear-spaced sampling by step.
# For steps ranging from s_min to s_max and sample size k, this query
# divides the range into k - 1 equal-sized intervals and returns the
# lowest step at or above each of the k interval boundaries (which always
# includes s_min and s_max, and may be fewer than k results if there are
# intervals where no steps are present). For contiguous steps the results
# can be formally expressed as the following:
# [s_min + math.ceil(i / k * (s_max - s_min)) for i in range(0, k + 1)]
cursor.execute(
'''
SELECT
MIN(step) AS step,
computed_time,
data,
dtype,
shape
FROM Tensors
INNER JOIN (
SELECT
MIN(step) AS min_step,
MAX(step) AS max_step
FROM Tensors
/* Filter out NULL so we can use TensorSeriesStepIndex. */
WHERE series = :tag_id AND step IS NOT NULL
)
/* Ensure we omit reserved rows, which have NULL step values. */
WHERE series = :tag_id AND step IS NOT NULL
/* Bucket rows into sample_size linearly spaced buckets, or do
no sampling if sample_size is NULL. */
GROUP BY
IFNULL(:sample_size - 1, max_step - min_step)
* (step - min_step) / (max_step - min_step)
ORDER BY step
''',
{'tag_id': tag_id, 'sample_size': downsample_to})
events = [(computed_time, step, self._get_values(data, dtype, shape))
for step, computed_time, data, dtype, shape in cursor]
else:
# Serve data from events files.
try:
tensor_events = self._multiplexer.Tensors(run, tag)
except KeyError:
raise ValueError('No histogram tag %r for run %r' % (tag, run))
if downsample_to is not None and len(tensor_events) > downsample_to:
rand_indices = random.Random(0).sample(
six.moves.xrange(len(tensor_events)), downsample_to)
indices = sorted(rand_indices)
tensor_events = [tensor_events[i] for i in indices]
events = [[e.wall_time, e.step, tensor_util.make_ndarray(e.tensor_proto).tolist()]
for e in tensor_events]
return (events, 'application/json') | [
"def",
"histograms_impl",
"(",
"self",
",",
"tag",
",",
"run",
",",
"downsample_to",
"=",
"None",
")",
":",
"if",
"self",
".",
"_db_connection_provider",
":",
"# Serve data from the database.",
"db",
"=",
"self",
".",
"_db_connection_provider",
"(",
")",
"cursor",
"=",
"db",
".",
"cursor",
"(",
")",
"# Prefetch the tag ID matching this run and tag.",
"cursor",
".",
"execute",
"(",
"'''\n SELECT\n tag_id\n FROM Tags\n JOIN Runs USING (run_id)\n WHERE\n Runs.run_name = :run\n AND Tags.tag_name = :tag\n AND Tags.plugin_name = :plugin\n '''",
",",
"{",
"'run'",
":",
"run",
",",
"'tag'",
":",
"tag",
",",
"'plugin'",
":",
"metadata",
".",
"PLUGIN_NAME",
"}",
")",
"row",
"=",
"cursor",
".",
"fetchone",
"(",
")",
"if",
"not",
"row",
":",
"raise",
"ValueError",
"(",
"'No histogram tag %r for run %r'",
"%",
"(",
"tag",
",",
"run",
")",
")",
"(",
"tag_id",
",",
")",
"=",
"row",
"# Fetch tensor values, optionally with linear-spaced sampling by step.",
"# For steps ranging from s_min to s_max and sample size k, this query",
"# divides the range into k - 1 equal-sized intervals and returns the",
"# lowest step at or above each of the k interval boundaries (which always",
"# includes s_min and s_max, and may be fewer than k results if there are",
"# intervals where no steps are present). For contiguous steps the results",
"# can be formally expressed as the following:",
"# [s_min + math.ceil(i / k * (s_max - s_min)) for i in range(0, k + 1)]",
"cursor",
".",
"execute",
"(",
"'''\n SELECT\n MIN(step) AS step,\n computed_time,\n data,\n dtype,\n shape\n FROM Tensors\n INNER JOIN (\n SELECT\n MIN(step) AS min_step,\n MAX(step) AS max_step\n FROM Tensors\n /* Filter out NULL so we can use TensorSeriesStepIndex. */\n WHERE series = :tag_id AND step IS NOT NULL\n )\n /* Ensure we omit reserved rows, which have NULL step values. */\n WHERE series = :tag_id AND step IS NOT NULL\n /* Bucket rows into sample_size linearly spaced buckets, or do\n no sampling if sample_size is NULL. */\n GROUP BY\n IFNULL(:sample_size - 1, max_step - min_step)\n * (step - min_step) / (max_step - min_step)\n ORDER BY step\n '''",
",",
"{",
"'tag_id'",
":",
"tag_id",
",",
"'sample_size'",
":",
"downsample_to",
"}",
")",
"events",
"=",
"[",
"(",
"computed_time",
",",
"step",
",",
"self",
".",
"_get_values",
"(",
"data",
",",
"dtype",
",",
"shape",
")",
")",
"for",
"step",
",",
"computed_time",
",",
"data",
",",
"dtype",
",",
"shape",
"in",
"cursor",
"]",
"else",
":",
"# Serve data from events files.",
"try",
":",
"tensor_events",
"=",
"self",
".",
"_multiplexer",
".",
"Tensors",
"(",
"run",
",",
"tag",
")",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"'No histogram tag %r for run %r'",
"%",
"(",
"tag",
",",
"run",
")",
")",
"if",
"downsample_to",
"is",
"not",
"None",
"and",
"len",
"(",
"tensor_events",
")",
">",
"downsample_to",
":",
"rand_indices",
"=",
"random",
".",
"Random",
"(",
"0",
")",
".",
"sample",
"(",
"six",
".",
"moves",
".",
"xrange",
"(",
"len",
"(",
"tensor_events",
")",
")",
",",
"downsample_to",
")",
"indices",
"=",
"sorted",
"(",
"rand_indices",
")",
"tensor_events",
"=",
"[",
"tensor_events",
"[",
"i",
"]",
"for",
"i",
"in",
"indices",
"]",
"events",
"=",
"[",
"[",
"e",
".",
"wall_time",
",",
"e",
".",
"step",
",",
"tensor_util",
".",
"make_ndarray",
"(",
"e",
".",
"tensor_proto",
")",
".",
"tolist",
"(",
")",
"]",
"for",
"e",
"in",
"tensor_events",
"]",
"return",
"(",
"events",
",",
"'application/json'",
")"
] | Result of the form `(body, mime_type)`, or `ValueError`.
At most `downsample_to` events will be returned. If this value is
`None`, then no downsampling will be performed. | [
"Result",
"of",
"the",
"form",
"(",
"body",
"mime_type",
")",
"or",
"ValueError",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/histogram/histograms_plugin.py#L127-L204 | train |
tensorflow/tensorboard | tensorboard/plugins/histogram/histograms_plugin.py | HistogramsPlugin._get_values | def _get_values(self, data_blob, dtype_enum, shape_string):
"""Obtains values for histogram data given blob and dtype enum.
Args:
data_blob: The blob obtained from the database.
dtype_enum: The enum representing the dtype.
shape_string: A comma-separated string of numbers denoting shape.
Returns:
The histogram values as a list served to the frontend.
"""
buf = np.frombuffer(data_blob, dtype=tf.DType(dtype_enum).as_numpy_dtype)
return buf.reshape([int(i) for i in shape_string.split(',')]).tolist() | python | def _get_values(self, data_blob, dtype_enum, shape_string):
"""Obtains values for histogram data given blob and dtype enum.
Args:
data_blob: The blob obtained from the database.
dtype_enum: The enum representing the dtype.
shape_string: A comma-separated string of numbers denoting shape.
Returns:
The histogram values as a list served to the frontend.
"""
buf = np.frombuffer(data_blob, dtype=tf.DType(dtype_enum).as_numpy_dtype)
return buf.reshape([int(i) for i in shape_string.split(',')]).tolist() | [
"def",
"_get_values",
"(",
"self",
",",
"data_blob",
",",
"dtype_enum",
",",
"shape_string",
")",
":",
"buf",
"=",
"np",
".",
"frombuffer",
"(",
"data_blob",
",",
"dtype",
"=",
"tf",
".",
"DType",
"(",
"dtype_enum",
")",
".",
"as_numpy_dtype",
")",
"return",
"buf",
".",
"reshape",
"(",
"[",
"int",
"(",
"i",
")",
"for",
"i",
"in",
"shape_string",
".",
"split",
"(",
"','",
")",
"]",
")",
".",
"tolist",
"(",
")"
] | Obtains values for histogram data given blob and dtype enum.
Args:
data_blob: The blob obtained from the database.
dtype_enum: The enum representing the dtype.
shape_string: A comma-separated string of numbers denoting shape.
Returns:
The histogram values as a list served to the frontend. | [
"Obtains",
"values",
"for",
"histogram",
"data",
"given",
"blob",
"and",
"dtype",
"enum",
".",
"Args",
":",
"data_blob",
":",
"The",
"blob",
"obtained",
"from",
"the",
"database",
".",
"dtype_enum",
":",
"The",
"enum",
"representing",
"the",
"dtype",
".",
"shape_string",
":",
"A",
"comma",
"-",
"separated",
"string",
"of",
"numbers",
"denoting",
"shape",
".",
"Returns",
":",
"The",
"histogram",
"values",
"as",
"a",
"list",
"served",
"to",
"the",
"frontend",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/histogram/histograms_plugin.py#L206-L216 | train |
tensorflow/tensorboard | tensorboard/plugins/histogram/histograms_plugin.py | HistogramsPlugin.histograms_route | def histograms_route(self, request):
"""Given a tag and single run, return array of histogram values."""
tag = request.args.get('tag')
run = request.args.get('run')
try:
(body, mime_type) = self.histograms_impl(
tag, run, downsample_to=self.SAMPLE_SIZE)
code = 200
except ValueError as e:
(body, mime_type) = (str(e), 'text/plain')
code = 400
return http_util.Respond(request, body, mime_type, code=code) | python | def histograms_route(self, request):
"""Given a tag and single run, return array of histogram values."""
tag = request.args.get('tag')
run = request.args.get('run')
try:
(body, mime_type) = self.histograms_impl(
tag, run, downsample_to=self.SAMPLE_SIZE)
code = 200
except ValueError as e:
(body, mime_type) = (str(e), 'text/plain')
code = 400
return http_util.Respond(request, body, mime_type, code=code) | [
"def",
"histograms_route",
"(",
"self",
",",
"request",
")",
":",
"tag",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'tag'",
")",
"run",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'run'",
")",
"try",
":",
"(",
"body",
",",
"mime_type",
")",
"=",
"self",
".",
"histograms_impl",
"(",
"tag",
",",
"run",
",",
"downsample_to",
"=",
"self",
".",
"SAMPLE_SIZE",
")",
"code",
"=",
"200",
"except",
"ValueError",
"as",
"e",
":",
"(",
"body",
",",
"mime_type",
")",
"=",
"(",
"str",
"(",
"e",
")",
",",
"'text/plain'",
")",
"code",
"=",
"400",
"return",
"http_util",
".",
"Respond",
"(",
"request",
",",
"body",
",",
"mime_type",
",",
"code",
"=",
"code",
")"
] | Given a tag and single run, return array of histogram values. | [
"Given",
"a",
"tag",
"and",
"single",
"run",
"return",
"array",
"of",
"histogram",
"values",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/histogram/histograms_plugin.py#L224-L235 | train |
tensorflow/tensorboard | tensorboard/util/op_evaluator.py | PersistentOpEvaluator._lazily_initialize | def _lazily_initialize(self):
"""Initialize the graph and session, if this has not yet been done."""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
with self._initialization_lock:
if self._session:
return
graph = tf.Graph()
with graph.as_default():
self.initialize_graph()
# Don't reserve GPU because libpng can't run on GPU.
config = tf.ConfigProto(device_count={'GPU': 0})
self._session = tf.Session(graph=graph, config=config) | python | def _lazily_initialize(self):
"""Initialize the graph and session, if this has not yet been done."""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
with self._initialization_lock:
if self._session:
return
graph = tf.Graph()
with graph.as_default():
self.initialize_graph()
# Don't reserve GPU because libpng can't run on GPU.
config = tf.ConfigProto(device_count={'GPU': 0})
self._session = tf.Session(graph=graph, config=config) | [
"def",
"_lazily_initialize",
"(",
"self",
")",
":",
"# TODO(nickfelt): remove on-demand imports once dep situation is fixed.",
"import",
"tensorflow",
".",
"compat",
".",
"v1",
"as",
"tf",
"with",
"self",
".",
"_initialization_lock",
":",
"if",
"self",
".",
"_session",
":",
"return",
"graph",
"=",
"tf",
".",
"Graph",
"(",
")",
"with",
"graph",
".",
"as_default",
"(",
")",
":",
"self",
".",
"initialize_graph",
"(",
")",
"# Don't reserve GPU because libpng can't run on GPU.",
"config",
"=",
"tf",
".",
"ConfigProto",
"(",
"device_count",
"=",
"{",
"'GPU'",
":",
"0",
"}",
")",
"self",
".",
"_session",
"=",
"tf",
".",
"Session",
"(",
"graph",
"=",
"graph",
",",
"config",
"=",
"config",
")"
] | Initialize the graph and session, if this has not yet been done. | [
"Initialize",
"the",
"graph",
"and",
"session",
"if",
"this",
"has",
"not",
"yet",
"been",
"done",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/util/op_evaluator.py#L70-L82 | train |
tensorflow/tensorboard | tensorboard/plugins/custom_scalar/custom_scalars_plugin.py | CustomScalarsPlugin._get_scalars_plugin | def _get_scalars_plugin(self):
"""Tries to get the scalars plugin.
Returns:
The scalars plugin. Or None if it is not yet registered.
"""
if scalars_metadata.PLUGIN_NAME in self._plugin_name_to_instance:
# The plugin is registered.
return self._plugin_name_to_instance[scalars_metadata.PLUGIN_NAME]
# The plugin is not yet registered.
return None | python | def _get_scalars_plugin(self):
"""Tries to get the scalars plugin.
Returns:
The scalars plugin. Or None if it is not yet registered.
"""
if scalars_metadata.PLUGIN_NAME in self._plugin_name_to_instance:
# The plugin is registered.
return self._plugin_name_to_instance[scalars_metadata.PLUGIN_NAME]
# The plugin is not yet registered.
return None | [
"def",
"_get_scalars_plugin",
"(",
"self",
")",
":",
"if",
"scalars_metadata",
".",
"PLUGIN_NAME",
"in",
"self",
".",
"_plugin_name_to_instance",
":",
"# The plugin is registered.",
"return",
"self",
".",
"_plugin_name_to_instance",
"[",
"scalars_metadata",
".",
"PLUGIN_NAME",
"]",
"# The plugin is not yet registered.",
"return",
"None"
] | Tries to get the scalars plugin.
Returns:
The scalars plugin. Or None if it is not yet registered. | [
"Tries",
"to",
"get",
"the",
"scalars",
"plugin",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/custom_scalar/custom_scalars_plugin.py#L72-L82 | train |
tensorflow/tensorboard | tensorboard/plugins/custom_scalar/custom_scalars_plugin.py | CustomScalarsPlugin.is_active | def is_active(self):
"""This plugin is active if 2 conditions hold.
1. The scalars plugin is registered and active.
2. There is a custom layout for the dashboard.
Returns: A boolean. Whether the plugin is active.
"""
if not self._multiplexer:
return False
scalars_plugin_instance = self._get_scalars_plugin()
if not (scalars_plugin_instance and
scalars_plugin_instance.is_active()):
return False
# This plugin is active if any run has a layout.
return bool(self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME)) | python | def is_active(self):
"""This plugin is active if 2 conditions hold.
1. The scalars plugin is registered and active.
2. There is a custom layout for the dashboard.
Returns: A boolean. Whether the plugin is active.
"""
if not self._multiplexer:
return False
scalars_plugin_instance = self._get_scalars_plugin()
if not (scalars_plugin_instance and
scalars_plugin_instance.is_active()):
return False
# This plugin is active if any run has a layout.
return bool(self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME)) | [
"def",
"is_active",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_multiplexer",
":",
"return",
"False",
"scalars_plugin_instance",
"=",
"self",
".",
"_get_scalars_plugin",
"(",
")",
"if",
"not",
"(",
"scalars_plugin_instance",
"and",
"scalars_plugin_instance",
".",
"is_active",
"(",
")",
")",
":",
"return",
"False",
"# This plugin is active if any run has a layout.",
"return",
"bool",
"(",
"self",
".",
"_multiplexer",
".",
"PluginRunToTagToContent",
"(",
"metadata",
".",
"PLUGIN_NAME",
")",
")"
] | This plugin is active if 2 conditions hold.
1. The scalars plugin is registered and active.
2. There is a custom layout for the dashboard.
Returns: A boolean. Whether the plugin is active. | [
"This",
"plugin",
"is",
"active",
"if",
"2",
"conditions",
"hold",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/custom_scalar/custom_scalars_plugin.py#L91-L108 | train |
tensorflow/tensorboard | tensorboard/plugins/custom_scalar/custom_scalars_plugin.py | CustomScalarsPlugin.download_data_impl | def download_data_impl(self, run, tag, response_format):
"""Provides a response for downloading scalars data for a data series.
Args:
run: The run.
tag: The specific tag.
response_format: A string. One of the values of the OutputFormat enum of
the scalar plugin.
Raises:
ValueError: If the scalars plugin is not registered.
Returns:
2 entities:
- A JSON object response body.
- A mime type (string) for the response.
"""
scalars_plugin_instance = self._get_scalars_plugin()
if not scalars_plugin_instance:
raise ValueError(('Failed to respond to request for /download_data. '
'The scalars plugin is oddly not registered.'))
body, mime_type = scalars_plugin_instance.scalars_impl(
tag, run, None, response_format)
return body, mime_type | python | def download_data_impl(self, run, tag, response_format):
"""Provides a response for downloading scalars data for a data series.
Args:
run: The run.
tag: The specific tag.
response_format: A string. One of the values of the OutputFormat enum of
the scalar plugin.
Raises:
ValueError: If the scalars plugin is not registered.
Returns:
2 entities:
- A JSON object response body.
- A mime type (string) for the response.
"""
scalars_plugin_instance = self._get_scalars_plugin()
if not scalars_plugin_instance:
raise ValueError(('Failed to respond to request for /download_data. '
'The scalars plugin is oddly not registered.'))
body, mime_type = scalars_plugin_instance.scalars_impl(
tag, run, None, response_format)
return body, mime_type | [
"def",
"download_data_impl",
"(",
"self",
",",
"run",
",",
"tag",
",",
"response_format",
")",
":",
"scalars_plugin_instance",
"=",
"self",
".",
"_get_scalars_plugin",
"(",
")",
"if",
"not",
"scalars_plugin_instance",
":",
"raise",
"ValueError",
"(",
"(",
"'Failed to respond to request for /download_data. '",
"'The scalars plugin is oddly not registered.'",
")",
")",
"body",
",",
"mime_type",
"=",
"scalars_plugin_instance",
".",
"scalars_impl",
"(",
"tag",
",",
"run",
",",
"None",
",",
"response_format",
")",
"return",
"body",
",",
"mime_type"
] | Provides a response for downloading scalars data for a data series.
Args:
run: The run.
tag: The specific tag.
response_format: A string. One of the values of the OutputFormat enum of
the scalar plugin.
Raises:
ValueError: If the scalars plugin is not registered.
Returns:
2 entities:
- A JSON object response body.
- A mime type (string) for the response. | [
"Provides",
"a",
"response",
"for",
"downloading",
"scalars",
"data",
"for",
"a",
"data",
"series",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/custom_scalar/custom_scalars_plugin.py#L125-L149 | train |
tensorflow/tensorboard | tensorboard/plugins/custom_scalar/custom_scalars_plugin.py | CustomScalarsPlugin.scalars_route | def scalars_route(self, request):
"""Given a tag regex and single run, return ScalarEvents.
This route takes 2 GET params:
run: A run string to find tags for.
tag: A string that is a regex used to find matching tags.
The response is a JSON object:
{
// Whether the regular expression is valid. Also false if empty.
regexValid: boolean,
// An object mapping tag name to a list of ScalarEvents.
payload: Object<string, ScalarEvent[]>,
}
"""
# TODO: return HTTP status code for malformed requests
tag_regex_string = request.args.get('tag')
run = request.args.get('run')
mime_type = 'application/json'
try:
body = self.scalars_impl(run, tag_regex_string)
except ValueError as e:
return http_util.Respond(
request=request,
content=str(e),
content_type='text/plain',
code=500)
# Produce the response.
return http_util.Respond(request, body, mime_type) | python | def scalars_route(self, request):
"""Given a tag regex and single run, return ScalarEvents.
This route takes 2 GET params:
run: A run string to find tags for.
tag: A string that is a regex used to find matching tags.
The response is a JSON object:
{
// Whether the regular expression is valid. Also false if empty.
regexValid: boolean,
// An object mapping tag name to a list of ScalarEvents.
payload: Object<string, ScalarEvent[]>,
}
"""
# TODO: return HTTP status code for malformed requests
tag_regex_string = request.args.get('tag')
run = request.args.get('run')
mime_type = 'application/json'
try:
body = self.scalars_impl(run, tag_regex_string)
except ValueError as e:
return http_util.Respond(
request=request,
content=str(e),
content_type='text/plain',
code=500)
# Produce the response.
return http_util.Respond(request, body, mime_type) | [
"def",
"scalars_route",
"(",
"self",
",",
"request",
")",
":",
"# TODO: return HTTP status code for malformed requests",
"tag_regex_string",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'tag'",
")",
"run",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'run'",
")",
"mime_type",
"=",
"'application/json'",
"try",
":",
"body",
"=",
"self",
".",
"scalars_impl",
"(",
"run",
",",
"tag_regex_string",
")",
"except",
"ValueError",
"as",
"e",
":",
"return",
"http_util",
".",
"Respond",
"(",
"request",
"=",
"request",
",",
"content",
"=",
"str",
"(",
"e",
")",
",",
"content_type",
"=",
"'text/plain'",
",",
"code",
"=",
"500",
")",
"# Produce the response.",
"return",
"http_util",
".",
"Respond",
"(",
"request",
",",
"body",
",",
"mime_type",
")"
] | Given a tag regex and single run, return ScalarEvents.
This route takes 2 GET params:
run: A run string to find tags for.
tag: A string that is a regex used to find matching tags.
The response is a JSON object:
{
// Whether the regular expression is valid. Also false if empty.
regexValid: boolean,
// An object mapping tag name to a list of ScalarEvents.
payload: Object<string, ScalarEvent[]>,
} | [
"Given",
"a",
"tag",
"regex",
"and",
"single",
"run",
"return",
"ScalarEvents",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/custom_scalar/custom_scalars_plugin.py#L152-L182 | train |
tensorflow/tensorboard | tensorboard/plugins/custom_scalar/custom_scalars_plugin.py | CustomScalarsPlugin.scalars_impl | def scalars_impl(self, run, tag_regex_string):
"""Given a tag regex and single run, return ScalarEvents.
Args:
run: A run string.
tag_regex_string: A regular expression that captures portions of tags.
Raises:
ValueError: if the scalars plugin is not registered.
Returns:
A dictionary that is the JSON-able response.
"""
if not tag_regex_string:
# The user provided no regex.
return {
_REGEX_VALID_PROPERTY: False,
_TAG_TO_EVENTS_PROPERTY: {},
}
# Construct the regex.
try:
regex = re.compile(tag_regex_string)
except re.error:
return {
_REGEX_VALID_PROPERTY: False,
_TAG_TO_EVENTS_PROPERTY: {},
}
# Fetch the tags for the run. Filter for tags that match the regex.
run_to_data = self._multiplexer.PluginRunToTagToContent(
scalars_metadata.PLUGIN_NAME)
tag_to_data = None
try:
tag_to_data = run_to_data[run]
except KeyError:
# The run could not be found. Perhaps a configuration specified a run that
# TensorBoard has not read from disk yet.
payload = {}
if tag_to_data:
scalars_plugin_instance = self._get_scalars_plugin()
if not scalars_plugin_instance:
raise ValueError(('Failed to respond to request for /scalars. '
'The scalars plugin is oddly not registered.'))
form = scalars_plugin.OutputFormat.JSON
payload = {
tag: scalars_plugin_instance.scalars_impl(tag, run, None, form)[0]
for tag in tag_to_data.keys()
if regex.match(tag)
}
return {
_REGEX_VALID_PROPERTY: True,
_TAG_TO_EVENTS_PROPERTY: payload,
} | python | def scalars_impl(self, run, tag_regex_string):
"""Given a tag regex and single run, return ScalarEvents.
Args:
run: A run string.
tag_regex_string: A regular expression that captures portions of tags.
Raises:
ValueError: if the scalars plugin is not registered.
Returns:
A dictionary that is the JSON-able response.
"""
if not tag_regex_string:
# The user provided no regex.
return {
_REGEX_VALID_PROPERTY: False,
_TAG_TO_EVENTS_PROPERTY: {},
}
# Construct the regex.
try:
regex = re.compile(tag_regex_string)
except re.error:
return {
_REGEX_VALID_PROPERTY: False,
_TAG_TO_EVENTS_PROPERTY: {},
}
# Fetch the tags for the run. Filter for tags that match the regex.
run_to_data = self._multiplexer.PluginRunToTagToContent(
scalars_metadata.PLUGIN_NAME)
tag_to_data = None
try:
tag_to_data = run_to_data[run]
except KeyError:
# The run could not be found. Perhaps a configuration specified a run that
# TensorBoard has not read from disk yet.
payload = {}
if tag_to_data:
scalars_plugin_instance = self._get_scalars_plugin()
if not scalars_plugin_instance:
raise ValueError(('Failed to respond to request for /scalars. '
'The scalars plugin is oddly not registered.'))
form = scalars_plugin.OutputFormat.JSON
payload = {
tag: scalars_plugin_instance.scalars_impl(tag, run, None, form)[0]
for tag in tag_to_data.keys()
if regex.match(tag)
}
return {
_REGEX_VALID_PROPERTY: True,
_TAG_TO_EVENTS_PROPERTY: payload,
} | [
"def",
"scalars_impl",
"(",
"self",
",",
"run",
",",
"tag_regex_string",
")",
":",
"if",
"not",
"tag_regex_string",
":",
"# The user provided no regex.",
"return",
"{",
"_REGEX_VALID_PROPERTY",
":",
"False",
",",
"_TAG_TO_EVENTS_PROPERTY",
":",
"{",
"}",
",",
"}",
"# Construct the regex.",
"try",
":",
"regex",
"=",
"re",
".",
"compile",
"(",
"tag_regex_string",
")",
"except",
"re",
".",
"error",
":",
"return",
"{",
"_REGEX_VALID_PROPERTY",
":",
"False",
",",
"_TAG_TO_EVENTS_PROPERTY",
":",
"{",
"}",
",",
"}",
"# Fetch the tags for the run. Filter for tags that match the regex.",
"run_to_data",
"=",
"self",
".",
"_multiplexer",
".",
"PluginRunToTagToContent",
"(",
"scalars_metadata",
".",
"PLUGIN_NAME",
")",
"tag_to_data",
"=",
"None",
"try",
":",
"tag_to_data",
"=",
"run_to_data",
"[",
"run",
"]",
"except",
"KeyError",
":",
"# The run could not be found. Perhaps a configuration specified a run that",
"# TensorBoard has not read from disk yet.",
"payload",
"=",
"{",
"}",
"if",
"tag_to_data",
":",
"scalars_plugin_instance",
"=",
"self",
".",
"_get_scalars_plugin",
"(",
")",
"if",
"not",
"scalars_plugin_instance",
":",
"raise",
"ValueError",
"(",
"(",
"'Failed to respond to request for /scalars. '",
"'The scalars plugin is oddly not registered.'",
")",
")",
"form",
"=",
"scalars_plugin",
".",
"OutputFormat",
".",
"JSON",
"payload",
"=",
"{",
"tag",
":",
"scalars_plugin_instance",
".",
"scalars_impl",
"(",
"tag",
",",
"run",
",",
"None",
",",
"form",
")",
"[",
"0",
"]",
"for",
"tag",
"in",
"tag_to_data",
".",
"keys",
"(",
")",
"if",
"regex",
".",
"match",
"(",
"tag",
")",
"}",
"return",
"{",
"_REGEX_VALID_PROPERTY",
":",
"True",
",",
"_TAG_TO_EVENTS_PROPERTY",
":",
"payload",
",",
"}"
] | Given a tag regex and single run, return ScalarEvents.
Args:
run: A run string.
tag_regex_string: A regular expression that captures portions of tags.
Raises:
ValueError: if the scalars plugin is not registered.
Returns:
A dictionary that is the JSON-able response. | [
"Given",
"a",
"tag",
"regex",
"and",
"single",
"run",
"return",
"ScalarEvents",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/custom_scalar/custom_scalars_plugin.py#L184-L241 | train |
tensorflow/tensorboard | tensorboard/plugins/custom_scalar/custom_scalars_plugin.py | CustomScalarsPlugin.layout_route | def layout_route(self, request):
r"""Fetches the custom layout specified by the config file in the logdir.
If more than 1 run contains a layout, this method merges the layouts by
merging charts within individual categories. If 2 categories with the same
name are found, the charts within are merged. The merging is based on the
order of the runs to which the layouts are written.
The response is a JSON object mirroring properties of the Layout proto if a
layout for any run is found.
The response is an empty object if no layout could be found.
"""
body = self.layout_impl()
return http_util.Respond(request, body, 'application/json') | python | def layout_route(self, request):
r"""Fetches the custom layout specified by the config file in the logdir.
If more than 1 run contains a layout, this method merges the layouts by
merging charts within individual categories. If 2 categories with the same
name are found, the charts within are merged. The merging is based on the
order of the runs to which the layouts are written.
The response is a JSON object mirroring properties of the Layout proto if a
layout for any run is found.
The response is an empty object if no layout could be found.
"""
body = self.layout_impl()
return http_util.Respond(request, body, 'application/json') | [
"def",
"layout_route",
"(",
"self",
",",
"request",
")",
":",
"body",
"=",
"self",
".",
"layout_impl",
"(",
")",
"return",
"http_util",
".",
"Respond",
"(",
"request",
",",
"body",
",",
"'application/json'",
")"
] | r"""Fetches the custom layout specified by the config file in the logdir.
If more than 1 run contains a layout, this method merges the layouts by
merging charts within individual categories. If 2 categories with the same
name are found, the charts within are merged. The merging is based on the
order of the runs to which the layouts are written.
The response is a JSON object mirroring properties of the Layout proto if a
layout for any run is found.
The response is an empty object if no layout could be found. | [
"r",
"Fetches",
"the",
"custom",
"layout",
"specified",
"by",
"the",
"config",
"file",
"in",
"the",
"logdir",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/custom_scalar/custom_scalars_plugin.py#L244-L258 | train |
tensorflow/tensorboard | tensorboard/plugins/text/text_plugin.py | make_table_row | def make_table_row(contents, tag='td'):
"""Given an iterable of string contents, make a table row.
Args:
contents: An iterable yielding strings.
tag: The tag to place contents in. Defaults to 'td', you might want 'th'.
Returns:
A string containing the content strings, organized into a table row.
Example: make_table_row(['one', 'two', 'three']) == '''
<tr>
<td>one</td>
<td>two</td>
<td>three</td>
</tr>'''
"""
columns = ('<%s>%s</%s>\n' % (tag, s, tag) for s in contents)
return '<tr>\n' + ''.join(columns) + '</tr>\n' | python | def make_table_row(contents, tag='td'):
"""Given an iterable of string contents, make a table row.
Args:
contents: An iterable yielding strings.
tag: The tag to place contents in. Defaults to 'td', you might want 'th'.
Returns:
A string containing the content strings, organized into a table row.
Example: make_table_row(['one', 'two', 'three']) == '''
<tr>
<td>one</td>
<td>two</td>
<td>three</td>
</tr>'''
"""
columns = ('<%s>%s</%s>\n' % (tag, s, tag) for s in contents)
return '<tr>\n' + ''.join(columns) + '</tr>\n' | [
"def",
"make_table_row",
"(",
"contents",
",",
"tag",
"=",
"'td'",
")",
":",
"columns",
"=",
"(",
"'<%s>%s</%s>\\n'",
"%",
"(",
"tag",
",",
"s",
",",
"tag",
")",
"for",
"s",
"in",
"contents",
")",
"return",
"'<tr>\\n'",
"+",
"''",
".",
"join",
"(",
"columns",
")",
"+",
"'</tr>\\n'"
] | Given an iterable of string contents, make a table row.
Args:
contents: An iterable yielding strings.
tag: The tag to place contents in. Defaults to 'td', you might want 'th'.
Returns:
A string containing the content strings, organized into a table row.
Example: make_table_row(['one', 'two', 'three']) == '''
<tr>
<td>one</td>
<td>two</td>
<td>three</td>
</tr>''' | [
"Given",
"an",
"iterable",
"of",
"string",
"contents",
"make",
"a",
"table",
"row",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/text/text_plugin.py#L54-L72 | train |
tensorflow/tensorboard | tensorboard/plugins/text/text_plugin.py | make_table | def make_table(contents, headers=None):
"""Given a numpy ndarray of strings, concatenate them into a html table.
Args:
contents: A np.ndarray of strings. May be 1d or 2d. In the 1d case, the
table is laid out vertically (i.e. row-major).
headers: A np.ndarray or list of string header names for the table.
Returns:
A string containing all of the content strings, organized into a table.
Raises:
ValueError: If contents is not a np.ndarray.
ValueError: If contents is not 1d or 2d.
ValueError: If contents is empty.
ValueError: If headers is present and not a list, tuple, or ndarray.
ValueError: If headers is not 1d.
ValueError: If number of elements in headers does not correspond to number
of columns in contents.
"""
if not isinstance(contents, np.ndarray):
raise ValueError('make_table contents must be a numpy ndarray')
if contents.ndim not in [1, 2]:
raise ValueError('make_table requires a 1d or 2d numpy array, was %dd' %
contents.ndim)
if headers:
if isinstance(headers, (list, tuple)):
headers = np.array(headers)
if not isinstance(headers, np.ndarray):
raise ValueError('Could not convert headers %s into np.ndarray' % headers)
if headers.ndim != 1:
raise ValueError('Headers must be 1d, is %dd' % headers.ndim)
expected_n_columns = contents.shape[1] if contents.ndim == 2 else 1
if headers.shape[0] != expected_n_columns:
raise ValueError('Number of headers %d must match number of columns %d' %
(headers.shape[0], expected_n_columns))
header = '<thead>\n%s</thead>\n' % make_table_row(headers, tag='th')
else:
header = ''
n_rows = contents.shape[0]
if contents.ndim == 1:
# If it's a vector, we need to wrap each element in a new list, otherwise
# we would turn the string itself into a row (see test code)
rows = (make_table_row([contents[i]]) for i in range(n_rows))
else:
rows = (make_table_row(contents[i, :]) for i in range(n_rows))
return '<table>\n%s<tbody>\n%s</tbody>\n</table>' % (header, ''.join(rows)) | python | def make_table(contents, headers=None):
"""Given a numpy ndarray of strings, concatenate them into a html table.
Args:
contents: A np.ndarray of strings. May be 1d or 2d. In the 1d case, the
table is laid out vertically (i.e. row-major).
headers: A np.ndarray or list of string header names for the table.
Returns:
A string containing all of the content strings, organized into a table.
Raises:
ValueError: If contents is not a np.ndarray.
ValueError: If contents is not 1d or 2d.
ValueError: If contents is empty.
ValueError: If headers is present and not a list, tuple, or ndarray.
ValueError: If headers is not 1d.
ValueError: If number of elements in headers does not correspond to number
of columns in contents.
"""
if not isinstance(contents, np.ndarray):
raise ValueError('make_table contents must be a numpy ndarray')
if contents.ndim not in [1, 2]:
raise ValueError('make_table requires a 1d or 2d numpy array, was %dd' %
contents.ndim)
if headers:
if isinstance(headers, (list, tuple)):
headers = np.array(headers)
if not isinstance(headers, np.ndarray):
raise ValueError('Could not convert headers %s into np.ndarray' % headers)
if headers.ndim != 1:
raise ValueError('Headers must be 1d, is %dd' % headers.ndim)
expected_n_columns = contents.shape[1] if contents.ndim == 2 else 1
if headers.shape[0] != expected_n_columns:
raise ValueError('Number of headers %d must match number of columns %d' %
(headers.shape[0], expected_n_columns))
header = '<thead>\n%s</thead>\n' % make_table_row(headers, tag='th')
else:
header = ''
n_rows = contents.shape[0]
if contents.ndim == 1:
# If it's a vector, we need to wrap each element in a new list, otherwise
# we would turn the string itself into a row (see test code)
rows = (make_table_row([contents[i]]) for i in range(n_rows))
else:
rows = (make_table_row(contents[i, :]) for i in range(n_rows))
return '<table>\n%s<tbody>\n%s</tbody>\n</table>' % (header, ''.join(rows)) | [
"def",
"make_table",
"(",
"contents",
",",
"headers",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"contents",
",",
"np",
".",
"ndarray",
")",
":",
"raise",
"ValueError",
"(",
"'make_table contents must be a numpy ndarray'",
")",
"if",
"contents",
".",
"ndim",
"not",
"in",
"[",
"1",
",",
"2",
"]",
":",
"raise",
"ValueError",
"(",
"'make_table requires a 1d or 2d numpy array, was %dd'",
"%",
"contents",
".",
"ndim",
")",
"if",
"headers",
":",
"if",
"isinstance",
"(",
"headers",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"headers",
"=",
"np",
".",
"array",
"(",
"headers",
")",
"if",
"not",
"isinstance",
"(",
"headers",
",",
"np",
".",
"ndarray",
")",
":",
"raise",
"ValueError",
"(",
"'Could not convert headers %s into np.ndarray'",
"%",
"headers",
")",
"if",
"headers",
".",
"ndim",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"'Headers must be 1d, is %dd'",
"%",
"headers",
".",
"ndim",
")",
"expected_n_columns",
"=",
"contents",
".",
"shape",
"[",
"1",
"]",
"if",
"contents",
".",
"ndim",
"==",
"2",
"else",
"1",
"if",
"headers",
".",
"shape",
"[",
"0",
"]",
"!=",
"expected_n_columns",
":",
"raise",
"ValueError",
"(",
"'Number of headers %d must match number of columns %d'",
"%",
"(",
"headers",
".",
"shape",
"[",
"0",
"]",
",",
"expected_n_columns",
")",
")",
"header",
"=",
"'<thead>\\n%s</thead>\\n'",
"%",
"make_table_row",
"(",
"headers",
",",
"tag",
"=",
"'th'",
")",
"else",
":",
"header",
"=",
"''",
"n_rows",
"=",
"contents",
".",
"shape",
"[",
"0",
"]",
"if",
"contents",
".",
"ndim",
"==",
"1",
":",
"# If it's a vector, we need to wrap each element in a new list, otherwise",
"# we would turn the string itself into a row (see test code)",
"rows",
"=",
"(",
"make_table_row",
"(",
"[",
"contents",
"[",
"i",
"]",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"n_rows",
")",
")",
"else",
":",
"rows",
"=",
"(",
"make_table_row",
"(",
"contents",
"[",
"i",
",",
":",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"n_rows",
")",
")",
"return",
"'<table>\\n%s<tbody>\\n%s</tbody>\\n</table>'",
"%",
"(",
"header",
",",
"''",
".",
"join",
"(",
"rows",
")",
")"
] | Given a numpy ndarray of strings, concatenate them into a html table.
Args:
contents: A np.ndarray of strings. May be 1d or 2d. In the 1d case, the
table is laid out vertically (i.e. row-major).
headers: A np.ndarray or list of string header names for the table.
Returns:
A string containing all of the content strings, organized into a table.
Raises:
ValueError: If contents is not a np.ndarray.
ValueError: If contents is not 1d or 2d.
ValueError: If contents is empty.
ValueError: If headers is present and not a list, tuple, or ndarray.
ValueError: If headers is not 1d.
ValueError: If number of elements in headers does not correspond to number
of columns in contents. | [
"Given",
"a",
"numpy",
"ndarray",
"of",
"strings",
"concatenate",
"them",
"into",
"a",
"html",
"table",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/text/text_plugin.py#L75-L125 | train |
tensorflow/tensorboard | tensorboard/plugins/text/text_plugin.py | reduce_to_2d | def reduce_to_2d(arr):
"""Given a np.npdarray with nDims > 2, reduce it to 2d.
It does this by selecting the zeroth coordinate for every dimension greater
than two.
Args:
arr: a numpy ndarray of dimension at least 2.
Returns:
A two-dimensional subarray from the input array.
Raises:
ValueError: If the argument is not a numpy ndarray, or the dimensionality
is too low.
"""
if not isinstance(arr, np.ndarray):
raise ValueError('reduce_to_2d requires a numpy.ndarray')
ndims = len(arr.shape)
if ndims < 2:
raise ValueError('reduce_to_2d requires an array of dimensionality >=2')
# slice(None) is equivalent to `:`, so we take arr[0,0,...0,:,:]
slices = ([0] * (ndims - 2)) + [slice(None), slice(None)]
return arr[slices] | python | def reduce_to_2d(arr):
"""Given a np.npdarray with nDims > 2, reduce it to 2d.
It does this by selecting the zeroth coordinate for every dimension greater
than two.
Args:
arr: a numpy ndarray of dimension at least 2.
Returns:
A two-dimensional subarray from the input array.
Raises:
ValueError: If the argument is not a numpy ndarray, or the dimensionality
is too low.
"""
if not isinstance(arr, np.ndarray):
raise ValueError('reduce_to_2d requires a numpy.ndarray')
ndims = len(arr.shape)
if ndims < 2:
raise ValueError('reduce_to_2d requires an array of dimensionality >=2')
# slice(None) is equivalent to `:`, so we take arr[0,0,...0,:,:]
slices = ([0] * (ndims - 2)) + [slice(None), slice(None)]
return arr[slices] | [
"def",
"reduce_to_2d",
"(",
"arr",
")",
":",
"if",
"not",
"isinstance",
"(",
"arr",
",",
"np",
".",
"ndarray",
")",
":",
"raise",
"ValueError",
"(",
"'reduce_to_2d requires a numpy.ndarray'",
")",
"ndims",
"=",
"len",
"(",
"arr",
".",
"shape",
")",
"if",
"ndims",
"<",
"2",
":",
"raise",
"ValueError",
"(",
"'reduce_to_2d requires an array of dimensionality >=2'",
")",
"# slice(None) is equivalent to `:`, so we take arr[0,0,...0,:,:]",
"slices",
"=",
"(",
"[",
"0",
"]",
"*",
"(",
"ndims",
"-",
"2",
")",
")",
"+",
"[",
"slice",
"(",
"None",
")",
",",
"slice",
"(",
"None",
")",
"]",
"return",
"arr",
"[",
"slices",
"]"
] | Given a np.npdarray with nDims > 2, reduce it to 2d.
It does this by selecting the zeroth coordinate for every dimension greater
than two.
Args:
arr: a numpy ndarray of dimension at least 2.
Returns:
A two-dimensional subarray from the input array.
Raises:
ValueError: If the argument is not a numpy ndarray, or the dimensionality
is too low. | [
"Given",
"a",
"np",
".",
"npdarray",
"with",
"nDims",
">",
"2",
"reduce",
"it",
"to",
"2d",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/text/text_plugin.py#L128-L152 | train |
tensorflow/tensorboard | tensorboard/plugins/text/text_plugin.py | text_array_to_html | def text_array_to_html(text_arr):
"""Take a numpy.ndarray containing strings, and convert it into html.
If the ndarray contains a single scalar string, that string is converted to
html via our sanitized markdown parser. If it contains an array of strings,
the strings are individually converted to html and then composed into a table
using make_table. If the array contains dimensionality greater than 2,
all but two of the dimensions are removed, and a warning message is prefixed
to the table.
Args:
text_arr: A numpy.ndarray containing strings.
Returns:
The array converted to html.
"""
if not text_arr.shape:
# It is a scalar. No need to put it in a table, just apply markdown
return plugin_util.markdown_to_safe_html(np.asscalar(text_arr))
warning = ''
if len(text_arr.shape) > 2:
warning = plugin_util.markdown_to_safe_html(WARNING_TEMPLATE
% len(text_arr.shape))
text_arr = reduce_to_2d(text_arr)
html_arr = [plugin_util.markdown_to_safe_html(x)
for x in text_arr.reshape(-1)]
html_arr = np.array(html_arr).reshape(text_arr.shape)
return warning + make_table(html_arr) | python | def text_array_to_html(text_arr):
"""Take a numpy.ndarray containing strings, and convert it into html.
If the ndarray contains a single scalar string, that string is converted to
html via our sanitized markdown parser. If it contains an array of strings,
the strings are individually converted to html and then composed into a table
using make_table. If the array contains dimensionality greater than 2,
all but two of the dimensions are removed, and a warning message is prefixed
to the table.
Args:
text_arr: A numpy.ndarray containing strings.
Returns:
The array converted to html.
"""
if not text_arr.shape:
# It is a scalar. No need to put it in a table, just apply markdown
return plugin_util.markdown_to_safe_html(np.asscalar(text_arr))
warning = ''
if len(text_arr.shape) > 2:
warning = plugin_util.markdown_to_safe_html(WARNING_TEMPLATE
% len(text_arr.shape))
text_arr = reduce_to_2d(text_arr)
html_arr = [plugin_util.markdown_to_safe_html(x)
for x in text_arr.reshape(-1)]
html_arr = np.array(html_arr).reshape(text_arr.shape)
return warning + make_table(html_arr) | [
"def",
"text_array_to_html",
"(",
"text_arr",
")",
":",
"if",
"not",
"text_arr",
".",
"shape",
":",
"# It is a scalar. No need to put it in a table, just apply markdown",
"return",
"plugin_util",
".",
"markdown_to_safe_html",
"(",
"np",
".",
"asscalar",
"(",
"text_arr",
")",
")",
"warning",
"=",
"''",
"if",
"len",
"(",
"text_arr",
".",
"shape",
")",
">",
"2",
":",
"warning",
"=",
"plugin_util",
".",
"markdown_to_safe_html",
"(",
"WARNING_TEMPLATE",
"%",
"len",
"(",
"text_arr",
".",
"shape",
")",
")",
"text_arr",
"=",
"reduce_to_2d",
"(",
"text_arr",
")",
"html_arr",
"=",
"[",
"plugin_util",
".",
"markdown_to_safe_html",
"(",
"x",
")",
"for",
"x",
"in",
"text_arr",
".",
"reshape",
"(",
"-",
"1",
")",
"]",
"html_arr",
"=",
"np",
".",
"array",
"(",
"html_arr",
")",
".",
"reshape",
"(",
"text_arr",
".",
"shape",
")",
"return",
"warning",
"+",
"make_table",
"(",
"html_arr",
")"
] | Take a numpy.ndarray containing strings, and convert it into html.
If the ndarray contains a single scalar string, that string is converted to
html via our sanitized markdown parser. If it contains an array of strings,
the strings are individually converted to html and then composed into a table
using make_table. If the array contains dimensionality greater than 2,
all but two of the dimensions are removed, and a warning message is prefixed
to the table.
Args:
text_arr: A numpy.ndarray containing strings.
Returns:
The array converted to html. | [
"Take",
"a",
"numpy",
".",
"ndarray",
"containing",
"strings",
"and",
"convert",
"it",
"into",
"html",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/text/text_plugin.py#L155-L184 | train |
tensorflow/tensorboard | tensorboard/plugins/text/text_plugin.py | process_string_tensor_event | def process_string_tensor_event(event):
"""Convert a TensorEvent into a JSON-compatible response."""
string_arr = tensor_util.make_ndarray(event.tensor_proto)
html = text_array_to_html(string_arr)
return {
'wall_time': event.wall_time,
'step': event.step,
'text': html,
} | python | def process_string_tensor_event(event):
"""Convert a TensorEvent into a JSON-compatible response."""
string_arr = tensor_util.make_ndarray(event.tensor_proto)
html = text_array_to_html(string_arr)
return {
'wall_time': event.wall_time,
'step': event.step,
'text': html,
} | [
"def",
"process_string_tensor_event",
"(",
"event",
")",
":",
"string_arr",
"=",
"tensor_util",
".",
"make_ndarray",
"(",
"event",
".",
"tensor_proto",
")",
"html",
"=",
"text_array_to_html",
"(",
"string_arr",
")",
"return",
"{",
"'wall_time'",
":",
"event",
".",
"wall_time",
",",
"'step'",
":",
"event",
".",
"step",
",",
"'text'",
":",
"html",
",",
"}"
] | Convert a TensorEvent into a JSON-compatible response. | [
"Convert",
"a",
"TensorEvent",
"into",
"a",
"JSON",
"-",
"compatible",
"response",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/text/text_plugin.py#L187-L195 | train |
tensorflow/tensorboard | tensorboard/plugins/text/text_plugin.py | TextPlugin.is_active | def is_active(self):
"""Determines whether this plugin is active.
This plugin is only active if TensorBoard sampled any text summaries.
Returns:
Whether this plugin is active.
"""
if not self._multiplexer:
return False
if self._index_cached is not None:
# If we already have computed the index, use it to determine whether
# the plugin should be active, and if so, return immediately.
if any(self._index_cached.values()):
return True
if self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME):
# Text data is present in the multiplexer. No need to further check for
# data stored via the outdated plugin assets method.
return True
# We haven't conclusively determined if the plugin should be active. Launch
# a thread to compute index_impl() and return False to avoid blocking.
self._maybe_launch_index_impl_thread()
return False | python | def is_active(self):
"""Determines whether this plugin is active.
This plugin is only active if TensorBoard sampled any text summaries.
Returns:
Whether this plugin is active.
"""
if not self._multiplexer:
return False
if self._index_cached is not None:
# If we already have computed the index, use it to determine whether
# the plugin should be active, and if so, return immediately.
if any(self._index_cached.values()):
return True
if self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME):
# Text data is present in the multiplexer. No need to further check for
# data stored via the outdated plugin assets method.
return True
# We haven't conclusively determined if the plugin should be active. Launch
# a thread to compute index_impl() and return False to avoid blocking.
self._maybe_launch_index_impl_thread()
return False | [
"def",
"is_active",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_multiplexer",
":",
"return",
"False",
"if",
"self",
".",
"_index_cached",
"is",
"not",
"None",
":",
"# If we already have computed the index, use it to determine whether",
"# the plugin should be active, and if so, return immediately.",
"if",
"any",
"(",
"self",
".",
"_index_cached",
".",
"values",
"(",
")",
")",
":",
"return",
"True",
"if",
"self",
".",
"_multiplexer",
".",
"PluginRunToTagToContent",
"(",
"metadata",
".",
"PLUGIN_NAME",
")",
":",
"# Text data is present in the multiplexer. No need to further check for",
"# data stored via the outdated plugin assets method.",
"return",
"True",
"# We haven't conclusively determined if the plugin should be active. Launch",
"# a thread to compute index_impl() and return False to avoid blocking.",
"self",
".",
"_maybe_launch_index_impl_thread",
"(",
")",
"return",
"False"
] | Determines whether this plugin is active.
This plugin is only active if TensorBoard sampled any text summaries.
Returns:
Whether this plugin is active. | [
"Determines",
"whether",
"this",
"plugin",
"is",
"active",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/text/text_plugin.py#L224-L250 | train |
tensorflow/tensorboard | tensorboard/plugins/text/text_plugin.py | TextPlugin._maybe_launch_index_impl_thread | def _maybe_launch_index_impl_thread(self):
"""Attempts to launch a thread to compute index_impl().
This may not launch a new thread if one is already running to compute
index_impl(); in that case, this function is a no-op.
"""
# Try to acquire the lock for computing index_impl(), without blocking.
if self._index_impl_lock.acquire(False):
# We got the lock. Start the thread, which will unlock the lock when done.
self._index_impl_thread = threading.Thread(
target=self._async_index_impl,
name='TextPluginIndexImplThread')
self._index_impl_thread.start() | python | def _maybe_launch_index_impl_thread(self):
"""Attempts to launch a thread to compute index_impl().
This may not launch a new thread if one is already running to compute
index_impl(); in that case, this function is a no-op.
"""
# Try to acquire the lock for computing index_impl(), without blocking.
if self._index_impl_lock.acquire(False):
# We got the lock. Start the thread, which will unlock the lock when done.
self._index_impl_thread = threading.Thread(
target=self._async_index_impl,
name='TextPluginIndexImplThread')
self._index_impl_thread.start() | [
"def",
"_maybe_launch_index_impl_thread",
"(",
"self",
")",
":",
"# Try to acquire the lock for computing index_impl(), without blocking.",
"if",
"self",
".",
"_index_impl_lock",
".",
"acquire",
"(",
"False",
")",
":",
"# We got the lock. Start the thread, which will unlock the lock when done.",
"self",
".",
"_index_impl_thread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"self",
".",
"_async_index_impl",
",",
"name",
"=",
"'TextPluginIndexImplThread'",
")",
"self",
".",
"_index_impl_thread",
".",
"start",
"(",
")"
] | Attempts to launch a thread to compute index_impl().
This may not launch a new thread if one is already running to compute
index_impl(); in that case, this function is a no-op. | [
"Attempts",
"to",
"launch",
"a",
"thread",
"to",
"compute",
"index_impl",
"()",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/text/text_plugin.py#L252-L264 | train |
tensorflow/tensorboard | tensorboard/plugins/text/text_plugin.py | TextPlugin._async_index_impl | def _async_index_impl(self):
"""Computes index_impl() asynchronously on a separate thread."""
start = time.time()
logger.info('TextPlugin computing index_impl() in a new thread')
self._index_cached = self.index_impl()
self._index_impl_thread = None
self._index_impl_lock.release()
elapsed = time.time() - start
logger.info(
'TextPlugin index_impl() thread ending after %0.3f sec', elapsed) | python | def _async_index_impl(self):
"""Computes index_impl() asynchronously on a separate thread."""
start = time.time()
logger.info('TextPlugin computing index_impl() in a new thread')
self._index_cached = self.index_impl()
self._index_impl_thread = None
self._index_impl_lock.release()
elapsed = time.time() - start
logger.info(
'TextPlugin index_impl() thread ending after %0.3f sec', elapsed) | [
"def",
"_async_index_impl",
"(",
"self",
")",
":",
"start",
"=",
"time",
".",
"time",
"(",
")",
"logger",
".",
"info",
"(",
"'TextPlugin computing index_impl() in a new thread'",
")",
"self",
".",
"_index_cached",
"=",
"self",
".",
"index_impl",
"(",
")",
"self",
".",
"_index_impl_thread",
"=",
"None",
"self",
".",
"_index_impl_lock",
".",
"release",
"(",
")",
"elapsed",
"=",
"time",
".",
"time",
"(",
")",
"-",
"start",
"logger",
".",
"info",
"(",
"'TextPlugin index_impl() thread ending after %0.3f sec'",
",",
"elapsed",
")"
] | Computes index_impl() asynchronously on a separate thread. | [
"Computes",
"index_impl",
"()",
"asynchronously",
"on",
"a",
"separate",
"thread",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/text/text_plugin.py#L266-L275 | train |
tensorflow/tensorboard | tensorboard/plugins/pr_curve/metadata.py | create_summary_metadata | def create_summary_metadata(display_name, description, num_thresholds):
"""Create a `summary_pb2.SummaryMetadata` proto for pr_curves plugin data.
Arguments:
display_name: The display name used in TensorBoard.
description: The description to show in TensorBoard.
num_thresholds: The number of thresholds to use for PR curves.
Returns:
A `summary_pb2.SummaryMetadata` protobuf object.
"""
pr_curve_plugin_data = plugin_data_pb2.PrCurvePluginData(
version=PROTO_VERSION, num_thresholds=num_thresholds)
content = pr_curve_plugin_data.SerializeToString()
return summary_pb2.SummaryMetadata(
display_name=display_name,
summary_description=description,
plugin_data=summary_pb2.SummaryMetadata.PluginData(
plugin_name=PLUGIN_NAME,
content=content)) | python | def create_summary_metadata(display_name, description, num_thresholds):
"""Create a `summary_pb2.SummaryMetadata` proto for pr_curves plugin data.
Arguments:
display_name: The display name used in TensorBoard.
description: The description to show in TensorBoard.
num_thresholds: The number of thresholds to use for PR curves.
Returns:
A `summary_pb2.SummaryMetadata` protobuf object.
"""
pr_curve_plugin_data = plugin_data_pb2.PrCurvePluginData(
version=PROTO_VERSION, num_thresholds=num_thresholds)
content = pr_curve_plugin_data.SerializeToString()
return summary_pb2.SummaryMetadata(
display_name=display_name,
summary_description=description,
plugin_data=summary_pb2.SummaryMetadata.PluginData(
plugin_name=PLUGIN_NAME,
content=content)) | [
"def",
"create_summary_metadata",
"(",
"display_name",
",",
"description",
",",
"num_thresholds",
")",
":",
"pr_curve_plugin_data",
"=",
"plugin_data_pb2",
".",
"PrCurvePluginData",
"(",
"version",
"=",
"PROTO_VERSION",
",",
"num_thresholds",
"=",
"num_thresholds",
")",
"content",
"=",
"pr_curve_plugin_data",
".",
"SerializeToString",
"(",
")",
"return",
"summary_pb2",
".",
"SummaryMetadata",
"(",
"display_name",
"=",
"display_name",
",",
"summary_description",
"=",
"description",
",",
"plugin_data",
"=",
"summary_pb2",
".",
"SummaryMetadata",
".",
"PluginData",
"(",
"plugin_name",
"=",
"PLUGIN_NAME",
",",
"content",
"=",
"content",
")",
")"
] | Create a `summary_pb2.SummaryMetadata` proto for pr_curves plugin data.
Arguments:
display_name: The display name used in TensorBoard.
description: The description to show in TensorBoard.
num_thresholds: The number of thresholds to use for PR curves.
Returns:
A `summary_pb2.SummaryMetadata` protobuf object. | [
"Create",
"a",
"summary_pb2",
".",
"SummaryMetadata",
"proto",
"for",
"pr_curves",
"plugin",
"data",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/pr_curve/metadata.py#L41-L60 | train |
tensorflow/tensorboard | tensorboard/plugins/pr_curve/metadata.py | parse_plugin_metadata | def parse_plugin_metadata(content):
"""Parse summary metadata to a Python object.
Arguments:
content: The `content` field of a `SummaryMetadata` proto
corresponding to the pr_curves plugin.
Returns:
A `PrCurvesPlugin` protobuf object.
"""
if not isinstance(content, bytes):
raise TypeError('Content type must be bytes')
result = plugin_data_pb2.PrCurvePluginData.FromString(content)
if result.version == 0:
return result
else:
logger.warn(
'Unknown metadata version: %s. The latest version known to '
'this build of TensorBoard is %s; perhaps a newer build is '
'available?', result.version, PROTO_VERSION)
return result | python | def parse_plugin_metadata(content):
"""Parse summary metadata to a Python object.
Arguments:
content: The `content` field of a `SummaryMetadata` proto
corresponding to the pr_curves plugin.
Returns:
A `PrCurvesPlugin` protobuf object.
"""
if not isinstance(content, bytes):
raise TypeError('Content type must be bytes')
result = plugin_data_pb2.PrCurvePluginData.FromString(content)
if result.version == 0:
return result
else:
logger.warn(
'Unknown metadata version: %s. The latest version known to '
'this build of TensorBoard is %s; perhaps a newer build is '
'available?', result.version, PROTO_VERSION)
return result | [
"def",
"parse_plugin_metadata",
"(",
"content",
")",
":",
"if",
"not",
"isinstance",
"(",
"content",
",",
"bytes",
")",
":",
"raise",
"TypeError",
"(",
"'Content type must be bytes'",
")",
"result",
"=",
"plugin_data_pb2",
".",
"PrCurvePluginData",
".",
"FromString",
"(",
"content",
")",
"if",
"result",
".",
"version",
"==",
"0",
":",
"return",
"result",
"else",
":",
"logger",
".",
"warn",
"(",
"'Unknown metadata version: %s. The latest version known to '",
"'this build of TensorBoard is %s; perhaps a newer build is '",
"'available?'",
",",
"result",
".",
"version",
",",
"PROTO_VERSION",
")",
"return",
"result"
] | Parse summary metadata to a Python object.
Arguments:
content: The `content` field of a `SummaryMetadata` proto
corresponding to the pr_curves plugin.
Returns:
A `PrCurvesPlugin` protobuf object. | [
"Parse",
"summary",
"metadata",
"to",
"a",
"Python",
"object",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/pr_curve/metadata.py#L63-L83 | train |
tensorflow/tensorboard | tensorboard/backend/event_processing/event_file_inspector.py | get_field_to_observations_map | def get_field_to_observations_map(generator, query_for_tag=''):
"""Return a field to `Observations` dict for the event generator.
Args:
generator: A generator over event protos.
query_for_tag: A string that if specified, only create observations for
events with this tag name.
Returns:
A dict mapping keys in `TRACKED_FIELDS` to an `Observation` list.
"""
def increment(stat, event, tag=''):
assert stat in TRACKED_FIELDS
field_to_obs[stat].append(Observation(step=event.step,
wall_time=event.wall_time,
tag=tag)._asdict())
field_to_obs = dict([(t, []) for t in TRACKED_FIELDS])
for event in generator:
## Process the event
if event.HasField('graph_def') and (not query_for_tag):
increment('graph', event)
if event.HasField('session_log') and (not query_for_tag):
status = event.session_log.status
if status == event_pb2.SessionLog.START:
increment('sessionlog:start', event)
elif status == event_pb2.SessionLog.STOP:
increment('sessionlog:stop', event)
elif status == event_pb2.SessionLog.CHECKPOINT:
increment('sessionlog:checkpoint', event)
elif event.HasField('summary'):
for value in event.summary.value:
if query_for_tag and value.tag != query_for_tag:
continue
for proto_name, display_name in SUMMARY_TYPE_TO_FIELD.items():
if value.HasField(proto_name):
increment(display_name, event, value.tag)
return field_to_obs | python | def get_field_to_observations_map(generator, query_for_tag=''):
"""Return a field to `Observations` dict for the event generator.
Args:
generator: A generator over event protos.
query_for_tag: A string that if specified, only create observations for
events with this tag name.
Returns:
A dict mapping keys in `TRACKED_FIELDS` to an `Observation` list.
"""
def increment(stat, event, tag=''):
assert stat in TRACKED_FIELDS
field_to_obs[stat].append(Observation(step=event.step,
wall_time=event.wall_time,
tag=tag)._asdict())
field_to_obs = dict([(t, []) for t in TRACKED_FIELDS])
for event in generator:
## Process the event
if event.HasField('graph_def') and (not query_for_tag):
increment('graph', event)
if event.HasField('session_log') and (not query_for_tag):
status = event.session_log.status
if status == event_pb2.SessionLog.START:
increment('sessionlog:start', event)
elif status == event_pb2.SessionLog.STOP:
increment('sessionlog:stop', event)
elif status == event_pb2.SessionLog.CHECKPOINT:
increment('sessionlog:checkpoint', event)
elif event.HasField('summary'):
for value in event.summary.value:
if query_for_tag and value.tag != query_for_tag:
continue
for proto_name, display_name in SUMMARY_TYPE_TO_FIELD.items():
if value.HasField(proto_name):
increment(display_name, event, value.tag)
return field_to_obs | [
"def",
"get_field_to_observations_map",
"(",
"generator",
",",
"query_for_tag",
"=",
"''",
")",
":",
"def",
"increment",
"(",
"stat",
",",
"event",
",",
"tag",
"=",
"''",
")",
":",
"assert",
"stat",
"in",
"TRACKED_FIELDS",
"field_to_obs",
"[",
"stat",
"]",
".",
"append",
"(",
"Observation",
"(",
"step",
"=",
"event",
".",
"step",
",",
"wall_time",
"=",
"event",
".",
"wall_time",
",",
"tag",
"=",
"tag",
")",
".",
"_asdict",
"(",
")",
")",
"field_to_obs",
"=",
"dict",
"(",
"[",
"(",
"t",
",",
"[",
"]",
")",
"for",
"t",
"in",
"TRACKED_FIELDS",
"]",
")",
"for",
"event",
"in",
"generator",
":",
"## Process the event",
"if",
"event",
".",
"HasField",
"(",
"'graph_def'",
")",
"and",
"(",
"not",
"query_for_tag",
")",
":",
"increment",
"(",
"'graph'",
",",
"event",
")",
"if",
"event",
".",
"HasField",
"(",
"'session_log'",
")",
"and",
"(",
"not",
"query_for_tag",
")",
":",
"status",
"=",
"event",
".",
"session_log",
".",
"status",
"if",
"status",
"==",
"event_pb2",
".",
"SessionLog",
".",
"START",
":",
"increment",
"(",
"'sessionlog:start'",
",",
"event",
")",
"elif",
"status",
"==",
"event_pb2",
".",
"SessionLog",
".",
"STOP",
":",
"increment",
"(",
"'sessionlog:stop'",
",",
"event",
")",
"elif",
"status",
"==",
"event_pb2",
".",
"SessionLog",
".",
"CHECKPOINT",
":",
"increment",
"(",
"'sessionlog:checkpoint'",
",",
"event",
")",
"elif",
"event",
".",
"HasField",
"(",
"'summary'",
")",
":",
"for",
"value",
"in",
"event",
".",
"summary",
".",
"value",
":",
"if",
"query_for_tag",
"and",
"value",
".",
"tag",
"!=",
"query_for_tag",
":",
"continue",
"for",
"proto_name",
",",
"display_name",
"in",
"SUMMARY_TYPE_TO_FIELD",
".",
"items",
"(",
")",
":",
"if",
"value",
".",
"HasField",
"(",
"proto_name",
")",
":",
"increment",
"(",
"display_name",
",",
"event",
",",
"value",
".",
"tag",
")",
"return",
"field_to_obs"
] | Return a field to `Observations` dict for the event generator.
Args:
generator: A generator over event protos.
query_for_tag: A string that if specified, only create observations for
events with this tag name.
Returns:
A dict mapping keys in `TRACKED_FIELDS` to an `Observation` list. | [
"Return",
"a",
"field",
"to",
"Observations",
"dict",
"for",
"the",
"event",
"generator",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/event_file_inspector.py#L168-L208 | train |
tensorflow/tensorboard | tensorboard/backend/event_processing/event_file_inspector.py | get_unique_tags | def get_unique_tags(field_to_obs):
"""Returns a dictionary of tags that a user could query over.
Args:
field_to_obs: Dict that maps string field to `Observation` list.
Returns:
A dict that maps keys in `TAG_FIELDS` to a list of string tags present in
the event files. If the dict does not have any observations of the type,
maps to an empty list so that we can render this to console.
"""
return {field: sorted(set([x.get('tag', '') for x in observations]))
for field, observations in field_to_obs.items()
if field in TAG_FIELDS} | python | def get_unique_tags(field_to_obs):
"""Returns a dictionary of tags that a user could query over.
Args:
field_to_obs: Dict that maps string field to `Observation` list.
Returns:
A dict that maps keys in `TAG_FIELDS` to a list of string tags present in
the event files. If the dict does not have any observations of the type,
maps to an empty list so that we can render this to console.
"""
return {field: sorted(set([x.get('tag', '') for x in observations]))
for field, observations in field_to_obs.items()
if field in TAG_FIELDS} | [
"def",
"get_unique_tags",
"(",
"field_to_obs",
")",
":",
"return",
"{",
"field",
":",
"sorted",
"(",
"set",
"(",
"[",
"x",
".",
"get",
"(",
"'tag'",
",",
"''",
")",
"for",
"x",
"in",
"observations",
"]",
")",
")",
"for",
"field",
",",
"observations",
"in",
"field_to_obs",
".",
"items",
"(",
")",
"if",
"field",
"in",
"TAG_FIELDS",
"}"
] | Returns a dictionary of tags that a user could query over.
Args:
field_to_obs: Dict that maps string field to `Observation` list.
Returns:
A dict that maps keys in `TAG_FIELDS` to a list of string tags present in
the event files. If the dict does not have any observations of the type,
maps to an empty list so that we can render this to console. | [
"Returns",
"a",
"dictionary",
"of",
"tags",
"that",
"a",
"user",
"could",
"query",
"over",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/event_file_inspector.py#L211-L224 | train |
tensorflow/tensorboard | tensorboard/backend/event_processing/event_file_inspector.py | print_dict | def print_dict(d, show_missing=True):
"""Prints a shallow dict to console.
Args:
d: Dict to print.
show_missing: Whether to show keys with empty values.
"""
for k, v in sorted(d.items()):
if (not v) and show_missing:
# No instances of the key, so print missing symbol.
print('{} -'.format(k))
elif isinstance(v, list):
# Value is a list, so print each item of the list.
print(k)
for item in v:
print(' {}'.format(item))
elif isinstance(v, dict):
# Value is a dict, so print each (key, value) pair of the dict.
print(k)
for kk, vv in sorted(v.items()):
print(' {:<20} {}'.format(kk, vv)) | python | def print_dict(d, show_missing=True):
"""Prints a shallow dict to console.
Args:
d: Dict to print.
show_missing: Whether to show keys with empty values.
"""
for k, v in sorted(d.items()):
if (not v) and show_missing:
# No instances of the key, so print missing symbol.
print('{} -'.format(k))
elif isinstance(v, list):
# Value is a list, so print each item of the list.
print(k)
for item in v:
print(' {}'.format(item))
elif isinstance(v, dict):
# Value is a dict, so print each (key, value) pair of the dict.
print(k)
for kk, vv in sorted(v.items()):
print(' {:<20} {}'.format(kk, vv)) | [
"def",
"print_dict",
"(",
"d",
",",
"show_missing",
"=",
"True",
")",
":",
"for",
"k",
",",
"v",
"in",
"sorted",
"(",
"d",
".",
"items",
"(",
")",
")",
":",
"if",
"(",
"not",
"v",
")",
"and",
"show_missing",
":",
"# No instances of the key, so print missing symbol.",
"print",
"(",
"'{} -'",
".",
"format",
"(",
"k",
")",
")",
"elif",
"isinstance",
"(",
"v",
",",
"list",
")",
":",
"# Value is a list, so print each item of the list.",
"print",
"(",
"k",
")",
"for",
"item",
"in",
"v",
":",
"print",
"(",
"' {}'",
".",
"format",
"(",
"item",
")",
")",
"elif",
"isinstance",
"(",
"v",
",",
"dict",
")",
":",
"# Value is a dict, so print each (key, value) pair of the dict.",
"print",
"(",
"k",
")",
"for",
"kk",
",",
"vv",
"in",
"sorted",
"(",
"v",
".",
"items",
"(",
")",
")",
":",
"print",
"(",
"' {:<20} {}'",
".",
"format",
"(",
"kk",
",",
"vv",
")",
")"
] | Prints a shallow dict to console.
Args:
d: Dict to print.
show_missing: Whether to show keys with empty values. | [
"Prints",
"a",
"shallow",
"dict",
"to",
"console",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/event_file_inspector.py#L227-L247 | train |
tensorflow/tensorboard | tensorboard/backend/event_processing/event_file_inspector.py | get_dict_to_print | def get_dict_to_print(field_to_obs):
"""Transform the field-to-obs mapping into a printable dictionary.
Args:
field_to_obs: Dict that maps string field to `Observation` list.
Returns:
A dict with the keys and values to print to console.
"""
def compressed_steps(steps):
return {'num_steps': len(set(steps)),
'min_step': min(steps),
'max_step': max(steps),
'last_step': steps[-1],
'first_step': steps[0],
'outoforder_steps': get_out_of_order(steps)}
def full_steps(steps):
return {'steps': steps, 'outoforder_steps': get_out_of_order(steps)}
output = {}
for field, observations in field_to_obs.items():
if not observations:
output[field] = None
continue
steps = [x['step'] for x in observations]
if field in SHORT_FIELDS:
output[field] = compressed_steps(steps)
if field in LONG_FIELDS:
output[field] = full_steps(steps)
return output | python | def get_dict_to_print(field_to_obs):
"""Transform the field-to-obs mapping into a printable dictionary.
Args:
field_to_obs: Dict that maps string field to `Observation` list.
Returns:
A dict with the keys and values to print to console.
"""
def compressed_steps(steps):
return {'num_steps': len(set(steps)),
'min_step': min(steps),
'max_step': max(steps),
'last_step': steps[-1],
'first_step': steps[0],
'outoforder_steps': get_out_of_order(steps)}
def full_steps(steps):
return {'steps': steps, 'outoforder_steps': get_out_of_order(steps)}
output = {}
for field, observations in field_to_obs.items():
if not observations:
output[field] = None
continue
steps = [x['step'] for x in observations]
if field in SHORT_FIELDS:
output[field] = compressed_steps(steps)
if field in LONG_FIELDS:
output[field] = full_steps(steps)
return output | [
"def",
"get_dict_to_print",
"(",
"field_to_obs",
")",
":",
"def",
"compressed_steps",
"(",
"steps",
")",
":",
"return",
"{",
"'num_steps'",
":",
"len",
"(",
"set",
"(",
"steps",
")",
")",
",",
"'min_step'",
":",
"min",
"(",
"steps",
")",
",",
"'max_step'",
":",
"max",
"(",
"steps",
")",
",",
"'last_step'",
":",
"steps",
"[",
"-",
"1",
"]",
",",
"'first_step'",
":",
"steps",
"[",
"0",
"]",
",",
"'outoforder_steps'",
":",
"get_out_of_order",
"(",
"steps",
")",
"}",
"def",
"full_steps",
"(",
"steps",
")",
":",
"return",
"{",
"'steps'",
":",
"steps",
",",
"'outoforder_steps'",
":",
"get_out_of_order",
"(",
"steps",
")",
"}",
"output",
"=",
"{",
"}",
"for",
"field",
",",
"observations",
"in",
"field_to_obs",
".",
"items",
"(",
")",
":",
"if",
"not",
"observations",
":",
"output",
"[",
"field",
"]",
"=",
"None",
"continue",
"steps",
"=",
"[",
"x",
"[",
"'step'",
"]",
"for",
"x",
"in",
"observations",
"]",
"if",
"field",
"in",
"SHORT_FIELDS",
":",
"output",
"[",
"field",
"]",
"=",
"compressed_steps",
"(",
"steps",
")",
"if",
"field",
"in",
"LONG_FIELDS",
":",
"output",
"[",
"field",
"]",
"=",
"full_steps",
"(",
"steps",
")",
"return",
"output"
] | Transform the field-to-obs mapping into a printable dictionary.
Args:
field_to_obs: Dict that maps string field to `Observation` list.
Returns:
A dict with the keys and values to print to console. | [
"Transform",
"the",
"field",
"-",
"to",
"-",
"obs",
"mapping",
"into",
"a",
"printable",
"dictionary",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/event_file_inspector.py#L250-L283 | train |
tensorflow/tensorboard | tensorboard/backend/event_processing/event_file_inspector.py | get_out_of_order | def get_out_of_order(list_of_numbers):
"""Returns elements that break the monotonically non-decreasing trend.
This is used to find instances of global step values that are "out-of-order",
which may trigger TensorBoard event discarding logic.
Args:
list_of_numbers: A list of numbers.
Returns:
A list of tuples in which each tuple are two elements are adjacent, but the
second element is lower than the first.
"""
# TODO: Consider changing this to only check for out-of-order
# steps within a particular tag.
result = []
# pylint: disable=consider-using-enumerate
for i in range(len(list_of_numbers)):
if i == 0:
continue
if list_of_numbers[i] < list_of_numbers[i - 1]:
result.append((list_of_numbers[i - 1], list_of_numbers[i]))
return result | python | def get_out_of_order(list_of_numbers):
"""Returns elements that break the monotonically non-decreasing trend.
This is used to find instances of global step values that are "out-of-order",
which may trigger TensorBoard event discarding logic.
Args:
list_of_numbers: A list of numbers.
Returns:
A list of tuples in which each tuple are two elements are adjacent, but the
second element is lower than the first.
"""
# TODO: Consider changing this to only check for out-of-order
# steps within a particular tag.
result = []
# pylint: disable=consider-using-enumerate
for i in range(len(list_of_numbers)):
if i == 0:
continue
if list_of_numbers[i] < list_of_numbers[i - 1]:
result.append((list_of_numbers[i - 1], list_of_numbers[i]))
return result | [
"def",
"get_out_of_order",
"(",
"list_of_numbers",
")",
":",
"# TODO: Consider changing this to only check for out-of-order",
"# steps within a particular tag.",
"result",
"=",
"[",
"]",
"# pylint: disable=consider-using-enumerate",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"list_of_numbers",
")",
")",
":",
"if",
"i",
"==",
"0",
":",
"continue",
"if",
"list_of_numbers",
"[",
"i",
"]",
"<",
"list_of_numbers",
"[",
"i",
"-",
"1",
"]",
":",
"result",
".",
"append",
"(",
"(",
"list_of_numbers",
"[",
"i",
"-",
"1",
"]",
",",
"list_of_numbers",
"[",
"i",
"]",
")",
")",
"return",
"result"
] | Returns elements that break the monotonically non-decreasing trend.
This is used to find instances of global step values that are "out-of-order",
which may trigger TensorBoard event discarding logic.
Args:
list_of_numbers: A list of numbers.
Returns:
A list of tuples in which each tuple are two elements are adjacent, but the
second element is lower than the first. | [
"Returns",
"elements",
"that",
"break",
"the",
"monotonically",
"non",
"-",
"decreasing",
"trend",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/event_file_inspector.py#L286-L308 | train |
tensorflow/tensorboard | tensorboard/backend/event_processing/event_file_inspector.py | generators_from_logdir | def generators_from_logdir(logdir):
"""Returns a list of event generators for subdirectories with event files.
The number of generators returned should equal the number of directories
within logdir that contain event files. If only logdir contains event files,
returns a list of length one.
Args:
logdir: A log directory that contains event files.
Returns:
List of event generators for each subdirectory with event files.
"""
subdirs = io_wrapper.GetLogdirSubdirectories(logdir)
generators = [
itertools.chain(*[
generator_from_event_file(os.path.join(subdir, f))
for f in tf.io.gfile.listdir(subdir)
if io_wrapper.IsTensorFlowEventsFile(os.path.join(subdir, f))
]) for subdir in subdirs
]
return generators | python | def generators_from_logdir(logdir):
"""Returns a list of event generators for subdirectories with event files.
The number of generators returned should equal the number of directories
within logdir that contain event files. If only logdir contains event files,
returns a list of length one.
Args:
logdir: A log directory that contains event files.
Returns:
List of event generators for each subdirectory with event files.
"""
subdirs = io_wrapper.GetLogdirSubdirectories(logdir)
generators = [
itertools.chain(*[
generator_from_event_file(os.path.join(subdir, f))
for f in tf.io.gfile.listdir(subdir)
if io_wrapper.IsTensorFlowEventsFile(os.path.join(subdir, f))
]) for subdir in subdirs
]
return generators | [
"def",
"generators_from_logdir",
"(",
"logdir",
")",
":",
"subdirs",
"=",
"io_wrapper",
".",
"GetLogdirSubdirectories",
"(",
"logdir",
")",
"generators",
"=",
"[",
"itertools",
".",
"chain",
"(",
"*",
"[",
"generator_from_event_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"subdir",
",",
"f",
")",
")",
"for",
"f",
"in",
"tf",
".",
"io",
".",
"gfile",
".",
"listdir",
"(",
"subdir",
")",
"if",
"io_wrapper",
".",
"IsTensorFlowEventsFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"subdir",
",",
"f",
")",
")",
"]",
")",
"for",
"subdir",
"in",
"subdirs",
"]",
"return",
"generators"
] | Returns a list of event generators for subdirectories with event files.
The number of generators returned should equal the number of directories
within logdir that contain event files. If only logdir contains event files,
returns a list of length one.
Args:
logdir: A log directory that contains event files.
Returns:
List of event generators for each subdirectory with event files. | [
"Returns",
"a",
"list",
"of",
"event",
"generators",
"for",
"subdirectories",
"with",
"event",
"files",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/event_file_inspector.py#L311-L332 | train |
tensorflow/tensorboard | tensorboard/backend/event_processing/event_file_inspector.py | get_inspection_units | def get_inspection_units(logdir='', event_file='', tag=''):
"""Returns a list of InspectionUnit objects given either logdir or event_file.
If logdir is given, the number of InspectionUnits should equal the
number of directories or subdirectories that contain event files.
If event_file is given, the number of InspectionUnits should be 1.
Args:
logdir: A log directory that contains event files.
event_file: Or, a particular event file path.
tag: An optional tag name to query for.
Returns:
A list of InspectionUnit objects.
"""
if logdir:
subdirs = io_wrapper.GetLogdirSubdirectories(logdir)
inspection_units = []
for subdir in subdirs:
generator = itertools.chain(*[
generator_from_event_file(os.path.join(subdir, f))
for f in tf.io.gfile.listdir(subdir)
if io_wrapper.IsTensorFlowEventsFile(os.path.join(subdir, f))
])
inspection_units.append(InspectionUnit(
name=subdir,
generator=generator,
field_to_obs=get_field_to_observations_map(generator, tag)))
if inspection_units:
print('Found event files in:\n{}\n'.format('\n'.join(
[u.name for u in inspection_units])))
elif io_wrapper.IsTensorFlowEventsFile(logdir):
print(
'It seems that {} may be an event file instead of a logdir. If this '
'is the case, use --event_file instead of --logdir to pass '
'it in.'.format(logdir))
else:
print('No event files found within logdir {}'.format(logdir))
return inspection_units
elif event_file:
generator = generator_from_event_file(event_file)
return [InspectionUnit(
name=event_file,
generator=generator,
field_to_obs=get_field_to_observations_map(generator, tag))]
return [] | python | def get_inspection_units(logdir='', event_file='', tag=''):
"""Returns a list of InspectionUnit objects given either logdir or event_file.
If logdir is given, the number of InspectionUnits should equal the
number of directories or subdirectories that contain event files.
If event_file is given, the number of InspectionUnits should be 1.
Args:
logdir: A log directory that contains event files.
event_file: Or, a particular event file path.
tag: An optional tag name to query for.
Returns:
A list of InspectionUnit objects.
"""
if logdir:
subdirs = io_wrapper.GetLogdirSubdirectories(logdir)
inspection_units = []
for subdir in subdirs:
generator = itertools.chain(*[
generator_from_event_file(os.path.join(subdir, f))
for f in tf.io.gfile.listdir(subdir)
if io_wrapper.IsTensorFlowEventsFile(os.path.join(subdir, f))
])
inspection_units.append(InspectionUnit(
name=subdir,
generator=generator,
field_to_obs=get_field_to_observations_map(generator, tag)))
if inspection_units:
print('Found event files in:\n{}\n'.format('\n'.join(
[u.name for u in inspection_units])))
elif io_wrapper.IsTensorFlowEventsFile(logdir):
print(
'It seems that {} may be an event file instead of a logdir. If this '
'is the case, use --event_file instead of --logdir to pass '
'it in.'.format(logdir))
else:
print('No event files found within logdir {}'.format(logdir))
return inspection_units
elif event_file:
generator = generator_from_event_file(event_file)
return [InspectionUnit(
name=event_file,
generator=generator,
field_to_obs=get_field_to_observations_map(generator, tag))]
return [] | [
"def",
"get_inspection_units",
"(",
"logdir",
"=",
"''",
",",
"event_file",
"=",
"''",
",",
"tag",
"=",
"''",
")",
":",
"if",
"logdir",
":",
"subdirs",
"=",
"io_wrapper",
".",
"GetLogdirSubdirectories",
"(",
"logdir",
")",
"inspection_units",
"=",
"[",
"]",
"for",
"subdir",
"in",
"subdirs",
":",
"generator",
"=",
"itertools",
".",
"chain",
"(",
"*",
"[",
"generator_from_event_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"subdir",
",",
"f",
")",
")",
"for",
"f",
"in",
"tf",
".",
"io",
".",
"gfile",
".",
"listdir",
"(",
"subdir",
")",
"if",
"io_wrapper",
".",
"IsTensorFlowEventsFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"subdir",
",",
"f",
")",
")",
"]",
")",
"inspection_units",
".",
"append",
"(",
"InspectionUnit",
"(",
"name",
"=",
"subdir",
",",
"generator",
"=",
"generator",
",",
"field_to_obs",
"=",
"get_field_to_observations_map",
"(",
"generator",
",",
"tag",
")",
")",
")",
"if",
"inspection_units",
":",
"print",
"(",
"'Found event files in:\\n{}\\n'",
".",
"format",
"(",
"'\\n'",
".",
"join",
"(",
"[",
"u",
".",
"name",
"for",
"u",
"in",
"inspection_units",
"]",
")",
")",
")",
"elif",
"io_wrapper",
".",
"IsTensorFlowEventsFile",
"(",
"logdir",
")",
":",
"print",
"(",
"'It seems that {} may be an event file instead of a logdir. If this '",
"'is the case, use --event_file instead of --logdir to pass '",
"'it in.'",
".",
"format",
"(",
"logdir",
")",
")",
"else",
":",
"print",
"(",
"'No event files found within logdir {}'",
".",
"format",
"(",
"logdir",
")",
")",
"return",
"inspection_units",
"elif",
"event_file",
":",
"generator",
"=",
"generator_from_event_file",
"(",
"event_file",
")",
"return",
"[",
"InspectionUnit",
"(",
"name",
"=",
"event_file",
",",
"generator",
"=",
"generator",
",",
"field_to_obs",
"=",
"get_field_to_observations_map",
"(",
"generator",
",",
"tag",
")",
")",
"]",
"return",
"[",
"]"
] | Returns a list of InspectionUnit objects given either logdir or event_file.
If logdir is given, the number of InspectionUnits should equal the
number of directories or subdirectories that contain event files.
If event_file is given, the number of InspectionUnits should be 1.
Args:
logdir: A log directory that contains event files.
event_file: Or, a particular event file path.
tag: An optional tag name to query for.
Returns:
A list of InspectionUnit objects. | [
"Returns",
"a",
"list",
"of",
"InspectionUnit",
"objects",
"given",
"either",
"logdir",
"or",
"event_file",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/event_file_inspector.py#L340-L386 | train |
tensorflow/tensorboard | tensorboard/backend/event_processing/event_file_inspector.py | inspect | def inspect(logdir='', event_file='', tag=''):
"""Main function for inspector that prints out a digest of event files.
Args:
logdir: A log directory that contains event files.
event_file: Or, a particular event file path.
tag: An optional tag name to query for.
Raises:
ValueError: If neither logdir and event_file are given, or both are given.
"""
print(PRINT_SEPARATOR +
'Processing event files... (this can take a few minutes)\n' +
PRINT_SEPARATOR)
inspection_units = get_inspection_units(logdir, event_file, tag)
for unit in inspection_units:
if tag:
print('Event statistics for tag {} in {}:'.format(tag, unit.name))
else:
# If the user is not inspecting a particular tag, also print the list of
# all available tags that they can query.
print('These tags are in {}:'.format(unit.name))
print_dict(get_unique_tags(unit.field_to_obs))
print(PRINT_SEPARATOR)
print('Event statistics for {}:'.format(unit.name))
print_dict(get_dict_to_print(unit.field_to_obs), show_missing=(not tag))
print(PRINT_SEPARATOR) | python | def inspect(logdir='', event_file='', tag=''):
"""Main function for inspector that prints out a digest of event files.
Args:
logdir: A log directory that contains event files.
event_file: Or, a particular event file path.
tag: An optional tag name to query for.
Raises:
ValueError: If neither logdir and event_file are given, or both are given.
"""
print(PRINT_SEPARATOR +
'Processing event files... (this can take a few minutes)\n' +
PRINT_SEPARATOR)
inspection_units = get_inspection_units(logdir, event_file, tag)
for unit in inspection_units:
if tag:
print('Event statistics for tag {} in {}:'.format(tag, unit.name))
else:
# If the user is not inspecting a particular tag, also print the list of
# all available tags that they can query.
print('These tags are in {}:'.format(unit.name))
print_dict(get_unique_tags(unit.field_to_obs))
print(PRINT_SEPARATOR)
print('Event statistics for {}:'.format(unit.name))
print_dict(get_dict_to_print(unit.field_to_obs), show_missing=(not tag))
print(PRINT_SEPARATOR) | [
"def",
"inspect",
"(",
"logdir",
"=",
"''",
",",
"event_file",
"=",
"''",
",",
"tag",
"=",
"''",
")",
":",
"print",
"(",
"PRINT_SEPARATOR",
"+",
"'Processing event files... (this can take a few minutes)\\n'",
"+",
"PRINT_SEPARATOR",
")",
"inspection_units",
"=",
"get_inspection_units",
"(",
"logdir",
",",
"event_file",
",",
"tag",
")",
"for",
"unit",
"in",
"inspection_units",
":",
"if",
"tag",
":",
"print",
"(",
"'Event statistics for tag {} in {}:'",
".",
"format",
"(",
"tag",
",",
"unit",
".",
"name",
")",
")",
"else",
":",
"# If the user is not inspecting a particular tag, also print the list of",
"# all available tags that they can query.",
"print",
"(",
"'These tags are in {}:'",
".",
"format",
"(",
"unit",
".",
"name",
")",
")",
"print_dict",
"(",
"get_unique_tags",
"(",
"unit",
".",
"field_to_obs",
")",
")",
"print",
"(",
"PRINT_SEPARATOR",
")",
"print",
"(",
"'Event statistics for {}:'",
".",
"format",
"(",
"unit",
".",
"name",
")",
")",
"print_dict",
"(",
"get_dict_to_print",
"(",
"unit",
".",
"field_to_obs",
")",
",",
"show_missing",
"=",
"(",
"not",
"tag",
")",
")",
"print",
"(",
"PRINT_SEPARATOR",
")"
] | Main function for inspector that prints out a digest of event files.
Args:
logdir: A log directory that contains event files.
event_file: Or, a particular event file path.
tag: An optional tag name to query for.
Raises:
ValueError: If neither logdir and event_file are given, or both are given. | [
"Main",
"function",
"for",
"inspector",
"that",
"prints",
"out",
"a",
"digest",
"of",
"event",
"files",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/event_file_inspector.py#L389-L417 | train |
tensorflow/tensorboard | tensorboard/plugins/debugger/debugger_plugin_loader.py | DebuggerPluginLoader.define_flags | def define_flags(self, parser):
"""Adds DebuggerPlugin CLI flags to parser."""
group = parser.add_argument_group('debugger plugin')
group.add_argument(
'--debugger_data_server_grpc_port',
metavar='PORT',
type=int,
default=-1,
help='''\
The port at which the non-interactive debugger data server should
receive debugging data via gRPC from one or more debugger-enabled
TensorFlow runtimes. No debugger plugin or debugger data server will be
started if this flag is not provided. This flag differs from the
`--debugger_port` flag in that it starts a non-interactive mode. It is
for use with the "health pills" feature of the Graph Dashboard. This
flag is mutually exclusive with `--debugger_port`.\
''')
group.add_argument(
'--debugger_port',
metavar='PORT',
type=int,
default=-1,
help='''\
The port at which the interactive debugger data server (to be started by
the debugger plugin) should receive debugging data via gRPC from one or
more debugger-enabled TensorFlow runtimes. No debugger plugin or
debugger data server will be started if this flag is not provided. This
flag differs from the `--debugger_data_server_grpc_port` flag in that it
starts an interactive mode that allows user to pause at selected nodes
inside a TensorFlow Graph or between Session.runs. It is for use with
the interactive Debugger Dashboard. This flag is mutually exclusive with
`--debugger_data_server_grpc_port`.\
''') | python | def define_flags(self, parser):
"""Adds DebuggerPlugin CLI flags to parser."""
group = parser.add_argument_group('debugger plugin')
group.add_argument(
'--debugger_data_server_grpc_port',
metavar='PORT',
type=int,
default=-1,
help='''\
The port at which the non-interactive debugger data server should
receive debugging data via gRPC from one or more debugger-enabled
TensorFlow runtimes. No debugger plugin or debugger data server will be
started if this flag is not provided. This flag differs from the
`--debugger_port` flag in that it starts a non-interactive mode. It is
for use with the "health pills" feature of the Graph Dashboard. This
flag is mutually exclusive with `--debugger_port`.\
''')
group.add_argument(
'--debugger_port',
metavar='PORT',
type=int,
default=-1,
help='''\
The port at which the interactive debugger data server (to be started by
the debugger plugin) should receive debugging data via gRPC from one or
more debugger-enabled TensorFlow runtimes. No debugger plugin or
debugger data server will be started if this flag is not provided. This
flag differs from the `--debugger_data_server_grpc_port` flag in that it
starts an interactive mode that allows user to pause at selected nodes
inside a TensorFlow Graph or between Session.runs. It is for use with
the interactive Debugger Dashboard. This flag is mutually exclusive with
`--debugger_data_server_grpc_port`.\
''') | [
"def",
"define_flags",
"(",
"self",
",",
"parser",
")",
":",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'debugger plugin'",
")",
"group",
".",
"add_argument",
"(",
"'--debugger_data_server_grpc_port'",
",",
"metavar",
"=",
"'PORT'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"-",
"1",
",",
"help",
"=",
"'''\\\nThe port at which the non-interactive debugger data server should\nreceive debugging data via gRPC from one or more debugger-enabled\nTensorFlow runtimes. No debugger plugin or debugger data server will be\nstarted if this flag is not provided. This flag differs from the\n`--debugger_port` flag in that it starts a non-interactive mode. It is\nfor use with the \"health pills\" feature of the Graph Dashboard. This\nflag is mutually exclusive with `--debugger_port`.\\\n'''",
")",
"group",
".",
"add_argument",
"(",
"'--debugger_port'",
",",
"metavar",
"=",
"'PORT'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"-",
"1",
",",
"help",
"=",
"'''\\\nThe port at which the interactive debugger data server (to be started by\nthe debugger plugin) should receive debugging data via gRPC from one or\nmore debugger-enabled TensorFlow runtimes. No debugger plugin or\ndebugger data server will be started if this flag is not provided. This\nflag differs from the `--debugger_data_server_grpc_port` flag in that it\nstarts an interactive mode that allows user to pause at selected nodes\ninside a TensorFlow Graph or between Session.runs. It is for use with\nthe interactive Debugger Dashboard. This flag is mutually exclusive with\n`--debugger_data_server_grpc_port`.\\\n'''",
")"
] | Adds DebuggerPlugin CLI flags to parser. | [
"Adds",
"DebuggerPlugin",
"CLI",
"flags",
"to",
"parser",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/debugger/debugger_plugin_loader.py#L38-L70 | train |
tensorflow/tensorboard | tensorboard/plugins/debugger/debugger_plugin_loader.py | DebuggerPluginLoader.load | def load(self, context):
"""Returns the debugger plugin, if possible.
Args:
context: The TBContext flags including `add_arguments`.
Returns:
A DebuggerPlugin instance or None if it couldn't be loaded.
"""
if not (context.flags.debugger_data_server_grpc_port > 0 or
context.flags.debugger_port > 0):
return None
flags = context.flags
try:
# pylint: disable=g-import-not-at-top,unused-import
import tensorflow
except ImportError:
raise ImportError(
'To use the debugger plugin, you need to have TensorFlow installed:\n'
' pip install tensorflow')
try:
# pylint: disable=line-too-long,g-import-not-at-top
from tensorboard.plugins.debugger import debugger_plugin as debugger_plugin_lib
from tensorboard.plugins.debugger import interactive_debugger_plugin as interactive_debugger_plugin_lib
# pylint: enable=line-too-long,g-import-not-at-top
except ImportError as e:
e_type, e_value, e_traceback = sys.exc_info()
message = e.msg if hasattr(e, 'msg') else e.message # Handle py2 vs py3
if 'grpc' in message:
e_value = ImportError(
message +
'\n\nTo use the debugger plugin, you need to have '
'gRPC installed:\n pip install grpcio')
six.reraise(e_type, e_value, e_traceback)
if flags.debugger_port > 0:
interactive_plugin = (
interactive_debugger_plugin_lib.InteractiveDebuggerPlugin(context))
logger.info('Starting Interactive Debugger Plugin at gRPC port %d',
flags.debugger_data_server_grpc_port)
interactive_plugin.listen(flags.debugger_port)
return interactive_plugin
elif flags.debugger_data_server_grpc_port > 0:
noninteractive_plugin = debugger_plugin_lib.DebuggerPlugin(context)
logger.info('Starting Non-interactive Debugger Plugin at gRPC port %d',
flags.debugger_data_server_grpc_port)
noninteractive_plugin.listen(flags.debugger_data_server_grpc_port)
return noninteractive_plugin
raise AssertionError() | python | def load(self, context):
"""Returns the debugger plugin, if possible.
Args:
context: The TBContext flags including `add_arguments`.
Returns:
A DebuggerPlugin instance or None if it couldn't be loaded.
"""
if not (context.flags.debugger_data_server_grpc_port > 0 or
context.flags.debugger_port > 0):
return None
flags = context.flags
try:
# pylint: disable=g-import-not-at-top,unused-import
import tensorflow
except ImportError:
raise ImportError(
'To use the debugger plugin, you need to have TensorFlow installed:\n'
' pip install tensorflow')
try:
# pylint: disable=line-too-long,g-import-not-at-top
from tensorboard.plugins.debugger import debugger_plugin as debugger_plugin_lib
from tensorboard.plugins.debugger import interactive_debugger_plugin as interactive_debugger_plugin_lib
# pylint: enable=line-too-long,g-import-not-at-top
except ImportError as e:
e_type, e_value, e_traceback = sys.exc_info()
message = e.msg if hasattr(e, 'msg') else e.message # Handle py2 vs py3
if 'grpc' in message:
e_value = ImportError(
message +
'\n\nTo use the debugger plugin, you need to have '
'gRPC installed:\n pip install grpcio')
six.reraise(e_type, e_value, e_traceback)
if flags.debugger_port > 0:
interactive_plugin = (
interactive_debugger_plugin_lib.InteractiveDebuggerPlugin(context))
logger.info('Starting Interactive Debugger Plugin at gRPC port %d',
flags.debugger_data_server_grpc_port)
interactive_plugin.listen(flags.debugger_port)
return interactive_plugin
elif flags.debugger_data_server_grpc_port > 0:
noninteractive_plugin = debugger_plugin_lib.DebuggerPlugin(context)
logger.info('Starting Non-interactive Debugger Plugin at gRPC port %d',
flags.debugger_data_server_grpc_port)
noninteractive_plugin.listen(flags.debugger_data_server_grpc_port)
return noninteractive_plugin
raise AssertionError() | [
"def",
"load",
"(",
"self",
",",
"context",
")",
":",
"if",
"not",
"(",
"context",
".",
"flags",
".",
"debugger_data_server_grpc_port",
">",
"0",
"or",
"context",
".",
"flags",
".",
"debugger_port",
">",
"0",
")",
":",
"return",
"None",
"flags",
"=",
"context",
".",
"flags",
"try",
":",
"# pylint: disable=g-import-not-at-top,unused-import",
"import",
"tensorflow",
"except",
"ImportError",
":",
"raise",
"ImportError",
"(",
"'To use the debugger plugin, you need to have TensorFlow installed:\\n'",
"' pip install tensorflow'",
")",
"try",
":",
"# pylint: disable=line-too-long,g-import-not-at-top",
"from",
"tensorboard",
".",
"plugins",
".",
"debugger",
"import",
"debugger_plugin",
"as",
"debugger_plugin_lib",
"from",
"tensorboard",
".",
"plugins",
".",
"debugger",
"import",
"interactive_debugger_plugin",
"as",
"interactive_debugger_plugin_lib",
"# pylint: enable=line-too-long,g-import-not-at-top",
"except",
"ImportError",
"as",
"e",
":",
"e_type",
",",
"e_value",
",",
"e_traceback",
"=",
"sys",
".",
"exc_info",
"(",
")",
"message",
"=",
"e",
".",
"msg",
"if",
"hasattr",
"(",
"e",
",",
"'msg'",
")",
"else",
"e",
".",
"message",
"# Handle py2 vs py3",
"if",
"'grpc'",
"in",
"message",
":",
"e_value",
"=",
"ImportError",
"(",
"message",
"+",
"'\\n\\nTo use the debugger plugin, you need to have '",
"'gRPC installed:\\n pip install grpcio'",
")",
"six",
".",
"reraise",
"(",
"e_type",
",",
"e_value",
",",
"e_traceback",
")",
"if",
"flags",
".",
"debugger_port",
">",
"0",
":",
"interactive_plugin",
"=",
"(",
"interactive_debugger_plugin_lib",
".",
"InteractiveDebuggerPlugin",
"(",
"context",
")",
")",
"logger",
".",
"info",
"(",
"'Starting Interactive Debugger Plugin at gRPC port %d'",
",",
"flags",
".",
"debugger_data_server_grpc_port",
")",
"interactive_plugin",
".",
"listen",
"(",
"flags",
".",
"debugger_port",
")",
"return",
"interactive_plugin",
"elif",
"flags",
".",
"debugger_data_server_grpc_port",
">",
"0",
":",
"noninteractive_plugin",
"=",
"debugger_plugin_lib",
".",
"DebuggerPlugin",
"(",
"context",
")",
"logger",
".",
"info",
"(",
"'Starting Non-interactive Debugger Plugin at gRPC port %d'",
",",
"flags",
".",
"debugger_data_server_grpc_port",
")",
"noninteractive_plugin",
".",
"listen",
"(",
"flags",
".",
"debugger_data_server_grpc_port",
")",
"return",
"noninteractive_plugin",
"raise",
"AssertionError",
"(",
")"
] | Returns the debugger plugin, if possible.
Args:
context: The TBContext flags including `add_arguments`.
Returns:
A DebuggerPlugin instance or None if it couldn't be loaded. | [
"Returns",
"the",
"debugger",
"plugin",
"if",
"possible",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/debugger/debugger_plugin_loader.py#L85-L132 | train |
tensorflow/tensorboard | tensorboard/plugins/hparams/metadata.py | create_summary_metadata | def create_summary_metadata(hparams_plugin_data_pb):
"""Returns a summary metadata for the HParams plugin.
Returns a summary_pb2.SummaryMetadata holding a copy of the given
HParamsPluginData message in its plugin_data.content field.
Sets the version field of the hparams_plugin_data_pb copy to
PLUGIN_DATA_VERSION.
Args:
hparams_plugin_data_pb: the HParamsPluginData protobuffer to use.
"""
if not isinstance(hparams_plugin_data_pb, plugin_data_pb2.HParamsPluginData):
raise TypeError('Needed an instance of plugin_data_pb2.HParamsPluginData.'
' Got: %s' % type(hparams_plugin_data_pb))
content = plugin_data_pb2.HParamsPluginData()
content.CopyFrom(hparams_plugin_data_pb)
content.version = PLUGIN_DATA_VERSION
return tf.compat.v1.SummaryMetadata(
plugin_data=tf.compat.v1.SummaryMetadata.PluginData(
plugin_name=PLUGIN_NAME, content=content.SerializeToString())) | python | def create_summary_metadata(hparams_plugin_data_pb):
"""Returns a summary metadata for the HParams plugin.
Returns a summary_pb2.SummaryMetadata holding a copy of the given
HParamsPluginData message in its plugin_data.content field.
Sets the version field of the hparams_plugin_data_pb copy to
PLUGIN_DATA_VERSION.
Args:
hparams_plugin_data_pb: the HParamsPluginData protobuffer to use.
"""
if not isinstance(hparams_plugin_data_pb, plugin_data_pb2.HParamsPluginData):
raise TypeError('Needed an instance of plugin_data_pb2.HParamsPluginData.'
' Got: %s' % type(hparams_plugin_data_pb))
content = plugin_data_pb2.HParamsPluginData()
content.CopyFrom(hparams_plugin_data_pb)
content.version = PLUGIN_DATA_VERSION
return tf.compat.v1.SummaryMetadata(
plugin_data=tf.compat.v1.SummaryMetadata.PluginData(
plugin_name=PLUGIN_NAME, content=content.SerializeToString())) | [
"def",
"create_summary_metadata",
"(",
"hparams_plugin_data_pb",
")",
":",
"if",
"not",
"isinstance",
"(",
"hparams_plugin_data_pb",
",",
"plugin_data_pb2",
".",
"HParamsPluginData",
")",
":",
"raise",
"TypeError",
"(",
"'Needed an instance of plugin_data_pb2.HParamsPluginData.'",
"' Got: %s'",
"%",
"type",
"(",
"hparams_plugin_data_pb",
")",
")",
"content",
"=",
"plugin_data_pb2",
".",
"HParamsPluginData",
"(",
")",
"content",
".",
"CopyFrom",
"(",
"hparams_plugin_data_pb",
")",
"content",
".",
"version",
"=",
"PLUGIN_DATA_VERSION",
"return",
"tf",
".",
"compat",
".",
"v1",
".",
"SummaryMetadata",
"(",
"plugin_data",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"SummaryMetadata",
".",
"PluginData",
"(",
"plugin_name",
"=",
"PLUGIN_NAME",
",",
"content",
"=",
"content",
".",
"SerializeToString",
"(",
")",
")",
")"
] | Returns a summary metadata for the HParams plugin.
Returns a summary_pb2.SummaryMetadata holding a copy of the given
HParamsPluginData message in its plugin_data.content field.
Sets the version field of the hparams_plugin_data_pb copy to
PLUGIN_DATA_VERSION.
Args:
hparams_plugin_data_pb: the HParamsPluginData protobuffer to use. | [
"Returns",
"a",
"summary",
"metadata",
"for",
"the",
"HParams",
"plugin",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/hparams/metadata.py#L36-L55 | train |
tensorflow/tensorboard | tensorboard/plugins/hparams/metadata.py | _parse_plugin_data_as | def _parse_plugin_data_as(content, data_oneof_field):
"""Returns a data oneof's field from plugin_data.content.
Raises HParamsError if the content doesn't have 'data_oneof_field' set or
this file is incompatible with the version of the metadata stored.
Args:
content: The SummaryMetadata.plugin_data.content to use.
data_oneof_field: string. The name of the data oneof field to return.
"""
plugin_data = plugin_data_pb2.HParamsPluginData.FromString(content)
if plugin_data.version != PLUGIN_DATA_VERSION:
raise error.HParamsError(
'Only supports plugin_data version: %s; found: %s in: %s' %
(PLUGIN_DATA_VERSION, plugin_data.version, plugin_data))
if not plugin_data.HasField(data_oneof_field):
raise error.HParamsError(
'Expected plugin_data.%s to be set. Got: %s' %
(data_oneof_field, plugin_data))
return getattr(plugin_data, data_oneof_field) | python | def _parse_plugin_data_as(content, data_oneof_field):
"""Returns a data oneof's field from plugin_data.content.
Raises HParamsError if the content doesn't have 'data_oneof_field' set or
this file is incompatible with the version of the metadata stored.
Args:
content: The SummaryMetadata.plugin_data.content to use.
data_oneof_field: string. The name of the data oneof field to return.
"""
plugin_data = plugin_data_pb2.HParamsPluginData.FromString(content)
if plugin_data.version != PLUGIN_DATA_VERSION:
raise error.HParamsError(
'Only supports plugin_data version: %s; found: %s in: %s' %
(PLUGIN_DATA_VERSION, plugin_data.version, plugin_data))
if not plugin_data.HasField(data_oneof_field):
raise error.HParamsError(
'Expected plugin_data.%s to be set. Got: %s' %
(data_oneof_field, plugin_data))
return getattr(plugin_data, data_oneof_field) | [
"def",
"_parse_plugin_data_as",
"(",
"content",
",",
"data_oneof_field",
")",
":",
"plugin_data",
"=",
"plugin_data_pb2",
".",
"HParamsPluginData",
".",
"FromString",
"(",
"content",
")",
"if",
"plugin_data",
".",
"version",
"!=",
"PLUGIN_DATA_VERSION",
":",
"raise",
"error",
".",
"HParamsError",
"(",
"'Only supports plugin_data version: %s; found: %s in: %s'",
"%",
"(",
"PLUGIN_DATA_VERSION",
",",
"plugin_data",
".",
"version",
",",
"plugin_data",
")",
")",
"if",
"not",
"plugin_data",
".",
"HasField",
"(",
"data_oneof_field",
")",
":",
"raise",
"error",
".",
"HParamsError",
"(",
"'Expected plugin_data.%s to be set. Got: %s'",
"%",
"(",
"data_oneof_field",
",",
"plugin_data",
")",
")",
"return",
"getattr",
"(",
"plugin_data",
",",
"data_oneof_field",
")"
] | Returns a data oneof's field from plugin_data.content.
Raises HParamsError if the content doesn't have 'data_oneof_field' set or
this file is incompatible with the version of the metadata stored.
Args:
content: The SummaryMetadata.plugin_data.content to use.
data_oneof_field: string. The name of the data oneof field to return. | [
"Returns",
"a",
"data",
"oneof",
"s",
"field",
"from",
"plugin_data",
".",
"content",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/hparams/metadata.py#L94-L113 | train |
tensorflow/tensorboard | tensorboard/plugins/debugger/events_writer_manager.py | EventsWriterManager.write_event | def write_event(self, event):
"""Writes an event proto to disk.
This method is threadsafe with respect to invocations of itself.
Args:
event: The event proto.
Raises:
IOError: If writing the event proto to disk fails.
"""
self._lock.acquire()
try:
self._events_writer.WriteEvent(event)
self._event_count += 1
if self._always_flush:
# We flush on every event within the integration test.
self._events_writer.Flush()
if self._event_count == self._check_this_often:
# Every so often, we check whether the size of the file is too big.
self._event_count = 0
# Flush to get an accurate size check.
self._events_writer.Flush()
file_path = os.path.join(self._events_directory,
self.get_current_file_name())
if not tf.io.gfile.exists(file_path):
# The events file does not exist. Perhaps the user had manually
# deleted it after training began. Create a new one.
self._events_writer.Close()
self._events_writer = self._create_events_writer(
self._events_directory)
elif tf.io.gfile.stat(file_path).length > self._single_file_size_cap_bytes:
# The current events file has gotten too big. Close the previous
# events writer. Make a new one.
self._events_writer.Close()
self._events_writer = self._create_events_writer(
self._events_directory)
except IOError as err:
logger.error(
"Writing to %s failed: %s", self.get_current_file_name(), err)
self._lock.release() | python | def write_event(self, event):
"""Writes an event proto to disk.
This method is threadsafe with respect to invocations of itself.
Args:
event: The event proto.
Raises:
IOError: If writing the event proto to disk fails.
"""
self._lock.acquire()
try:
self._events_writer.WriteEvent(event)
self._event_count += 1
if self._always_flush:
# We flush on every event within the integration test.
self._events_writer.Flush()
if self._event_count == self._check_this_often:
# Every so often, we check whether the size of the file is too big.
self._event_count = 0
# Flush to get an accurate size check.
self._events_writer.Flush()
file_path = os.path.join(self._events_directory,
self.get_current_file_name())
if not tf.io.gfile.exists(file_path):
# The events file does not exist. Perhaps the user had manually
# deleted it after training began. Create a new one.
self._events_writer.Close()
self._events_writer = self._create_events_writer(
self._events_directory)
elif tf.io.gfile.stat(file_path).length > self._single_file_size_cap_bytes:
# The current events file has gotten too big. Close the previous
# events writer. Make a new one.
self._events_writer.Close()
self._events_writer = self._create_events_writer(
self._events_directory)
except IOError as err:
logger.error(
"Writing to %s failed: %s", self.get_current_file_name(), err)
self._lock.release() | [
"def",
"write_event",
"(",
"self",
",",
"event",
")",
":",
"self",
".",
"_lock",
".",
"acquire",
"(",
")",
"try",
":",
"self",
".",
"_events_writer",
".",
"WriteEvent",
"(",
"event",
")",
"self",
".",
"_event_count",
"+=",
"1",
"if",
"self",
".",
"_always_flush",
":",
"# We flush on every event within the integration test.",
"self",
".",
"_events_writer",
".",
"Flush",
"(",
")",
"if",
"self",
".",
"_event_count",
"==",
"self",
".",
"_check_this_often",
":",
"# Every so often, we check whether the size of the file is too big.",
"self",
".",
"_event_count",
"=",
"0",
"# Flush to get an accurate size check.",
"self",
".",
"_events_writer",
".",
"Flush",
"(",
")",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_events_directory",
",",
"self",
".",
"get_current_file_name",
"(",
")",
")",
"if",
"not",
"tf",
".",
"io",
".",
"gfile",
".",
"exists",
"(",
"file_path",
")",
":",
"# The events file does not exist. Perhaps the user had manually",
"# deleted it after training began. Create a new one.",
"self",
".",
"_events_writer",
".",
"Close",
"(",
")",
"self",
".",
"_events_writer",
"=",
"self",
".",
"_create_events_writer",
"(",
"self",
".",
"_events_directory",
")",
"elif",
"tf",
".",
"io",
".",
"gfile",
".",
"stat",
"(",
"file_path",
")",
".",
"length",
">",
"self",
".",
"_single_file_size_cap_bytes",
":",
"# The current events file has gotten too big. Close the previous",
"# events writer. Make a new one.",
"self",
".",
"_events_writer",
".",
"Close",
"(",
")",
"self",
".",
"_events_writer",
"=",
"self",
".",
"_create_events_writer",
"(",
"self",
".",
"_events_directory",
")",
"except",
"IOError",
"as",
"err",
":",
"logger",
".",
"error",
"(",
"\"Writing to %s failed: %s\"",
",",
"self",
".",
"get_current_file_name",
"(",
")",
",",
"err",
")",
"self",
".",
"_lock",
".",
"release",
"(",
")"
] | Writes an event proto to disk.
This method is threadsafe with respect to invocations of itself.
Args:
event: The event proto.
Raises:
IOError: If writing the event proto to disk fails. | [
"Writes",
"an",
"event",
"proto",
"to",
"disk",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/debugger/events_writer_manager.py#L109-L152 | train |
tensorflow/tensorboard | tensorboard/plugins/debugger/events_writer_manager.py | EventsWriterManager.dispose | def dispose(self):
"""Disposes of this events writer manager, making it no longer usable.
Call this method when this object is done being used in order to clean up
resources and handlers. This method should ever only be called once.
"""
self._lock.acquire()
self._events_writer.Close()
self._events_writer = None
self._lock.release() | python | def dispose(self):
"""Disposes of this events writer manager, making it no longer usable.
Call this method when this object is done being used in order to clean up
resources and handlers. This method should ever only be called once.
"""
self._lock.acquire()
self._events_writer.Close()
self._events_writer = None
self._lock.release() | [
"def",
"dispose",
"(",
"self",
")",
":",
"self",
".",
"_lock",
".",
"acquire",
"(",
")",
"self",
".",
"_events_writer",
".",
"Close",
"(",
")",
"self",
".",
"_events_writer",
"=",
"None",
"self",
".",
"_lock",
".",
"release",
"(",
")"
] | Disposes of this events writer manager, making it no longer usable.
Call this method when this object is done being used in order to clean up
resources and handlers. This method should ever only be called once. | [
"Disposes",
"of",
"this",
"events",
"writer",
"manager",
"making",
"it",
"no",
"longer",
"usable",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/debugger/events_writer_manager.py#L162-L171 | train |
tensorflow/tensorboard | tensorboard/plugins/debugger/events_writer_manager.py | EventsWriterManager._create_events_writer | def _create_events_writer(self, directory):
"""Creates a new events writer.
Args:
directory: The directory in which to write files containing events.
Returns:
A new events writer, which corresponds to a new events file.
"""
total_size = 0
events_files = self._fetch_events_files_on_disk()
for file_name in events_files:
file_path = os.path.join(self._events_directory, file_name)
total_size += tf.io.gfile.stat(file_path).length
if total_size >= self.total_file_size_cap_bytes:
# The total size written to disk is too big. Delete events files until
# the size is below the cap.
for file_name in events_files:
if total_size < self.total_file_size_cap_bytes:
break
file_path = os.path.join(self._events_directory, file_name)
file_size = tf.io.gfile.stat(file_path).length
try:
tf.io.gfile.remove(file_path)
total_size -= file_size
logger.info(
"Deleted %s because events files take up over %d bytes",
file_path, self.total_file_size_cap_bytes)
except IOError as err:
logger.error("Deleting %s failed: %s", file_path, err)
# We increment this index because each events writer must differ in prefix.
self._events_file_count += 1
file_path = "%s.%d.%d" % (
os.path.join(directory, DEBUGGER_EVENTS_FILE_STARTING_TEXT),
time.time(), self._events_file_count)
logger.info("Creating events file %s", file_path)
return pywrap_tensorflow.EventsWriter(tf.compat.as_bytes(file_path)) | python | def _create_events_writer(self, directory):
"""Creates a new events writer.
Args:
directory: The directory in which to write files containing events.
Returns:
A new events writer, which corresponds to a new events file.
"""
total_size = 0
events_files = self._fetch_events_files_on_disk()
for file_name in events_files:
file_path = os.path.join(self._events_directory, file_name)
total_size += tf.io.gfile.stat(file_path).length
if total_size >= self.total_file_size_cap_bytes:
# The total size written to disk is too big. Delete events files until
# the size is below the cap.
for file_name in events_files:
if total_size < self.total_file_size_cap_bytes:
break
file_path = os.path.join(self._events_directory, file_name)
file_size = tf.io.gfile.stat(file_path).length
try:
tf.io.gfile.remove(file_path)
total_size -= file_size
logger.info(
"Deleted %s because events files take up over %d bytes",
file_path, self.total_file_size_cap_bytes)
except IOError as err:
logger.error("Deleting %s failed: %s", file_path, err)
# We increment this index because each events writer must differ in prefix.
self._events_file_count += 1
file_path = "%s.%d.%d" % (
os.path.join(directory, DEBUGGER_EVENTS_FILE_STARTING_TEXT),
time.time(), self._events_file_count)
logger.info("Creating events file %s", file_path)
return pywrap_tensorflow.EventsWriter(tf.compat.as_bytes(file_path)) | [
"def",
"_create_events_writer",
"(",
"self",
",",
"directory",
")",
":",
"total_size",
"=",
"0",
"events_files",
"=",
"self",
".",
"_fetch_events_files_on_disk",
"(",
")",
"for",
"file_name",
"in",
"events_files",
":",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_events_directory",
",",
"file_name",
")",
"total_size",
"+=",
"tf",
".",
"io",
".",
"gfile",
".",
"stat",
"(",
"file_path",
")",
".",
"length",
"if",
"total_size",
">=",
"self",
".",
"total_file_size_cap_bytes",
":",
"# The total size written to disk is too big. Delete events files until",
"# the size is below the cap.",
"for",
"file_name",
"in",
"events_files",
":",
"if",
"total_size",
"<",
"self",
".",
"total_file_size_cap_bytes",
":",
"break",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_events_directory",
",",
"file_name",
")",
"file_size",
"=",
"tf",
".",
"io",
".",
"gfile",
".",
"stat",
"(",
"file_path",
")",
".",
"length",
"try",
":",
"tf",
".",
"io",
".",
"gfile",
".",
"remove",
"(",
"file_path",
")",
"total_size",
"-=",
"file_size",
"logger",
".",
"info",
"(",
"\"Deleted %s because events files take up over %d bytes\"",
",",
"file_path",
",",
"self",
".",
"total_file_size_cap_bytes",
")",
"except",
"IOError",
"as",
"err",
":",
"logger",
".",
"error",
"(",
"\"Deleting %s failed: %s\"",
",",
"file_path",
",",
"err",
")",
"# We increment this index because each events writer must differ in prefix.",
"self",
".",
"_events_file_count",
"+=",
"1",
"file_path",
"=",
"\"%s.%d.%d\"",
"%",
"(",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"DEBUGGER_EVENTS_FILE_STARTING_TEXT",
")",
",",
"time",
".",
"time",
"(",
")",
",",
"self",
".",
"_events_file_count",
")",
"logger",
".",
"info",
"(",
"\"Creating events file %s\"",
",",
"file_path",
")",
"return",
"pywrap_tensorflow",
".",
"EventsWriter",
"(",
"tf",
".",
"compat",
".",
"as_bytes",
"(",
"file_path",
")",
")"
] | Creates a new events writer.
Args:
directory: The directory in which to write files containing events.
Returns:
A new events writer, which corresponds to a new events file. | [
"Creates",
"a",
"new",
"events",
"writer",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/debugger/events_writer_manager.py#L173-L212 | train |
tensorflow/tensorboard | tensorboard/plugins/debugger/events_writer_manager.py | EventsWriterManager._fetch_events_files_on_disk | def _fetch_events_files_on_disk(self):
"""Obtains the names of debugger-related events files within the directory.
Returns:
The names of the debugger-related events files written to disk. The names
are sorted in increasing events file index.
"""
all_files = tf.io.gfile.listdir(self._events_directory)
relevant_files = [
file_name for file_name in all_files
if _DEBUGGER_EVENTS_FILE_NAME_REGEX.match(file_name)
]
return sorted(relevant_files, key=self._obtain_file_index) | python | def _fetch_events_files_on_disk(self):
"""Obtains the names of debugger-related events files within the directory.
Returns:
The names of the debugger-related events files written to disk. The names
are sorted in increasing events file index.
"""
all_files = tf.io.gfile.listdir(self._events_directory)
relevant_files = [
file_name for file_name in all_files
if _DEBUGGER_EVENTS_FILE_NAME_REGEX.match(file_name)
]
return sorted(relevant_files, key=self._obtain_file_index) | [
"def",
"_fetch_events_files_on_disk",
"(",
"self",
")",
":",
"all_files",
"=",
"tf",
".",
"io",
".",
"gfile",
".",
"listdir",
"(",
"self",
".",
"_events_directory",
")",
"relevant_files",
"=",
"[",
"file_name",
"for",
"file_name",
"in",
"all_files",
"if",
"_DEBUGGER_EVENTS_FILE_NAME_REGEX",
".",
"match",
"(",
"file_name",
")",
"]",
"return",
"sorted",
"(",
"relevant_files",
",",
"key",
"=",
"self",
".",
"_obtain_file_index",
")"
] | Obtains the names of debugger-related events files within the directory.
Returns:
The names of the debugger-related events files written to disk. The names
are sorted in increasing events file index. | [
"Obtains",
"the",
"names",
"of",
"debugger",
"-",
"related",
"events",
"files",
"within",
"the",
"directory",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/debugger/events_writer_manager.py#L214-L226 | train |
tensorflow/tensorboard | tensorboard/summary/_tf/summary/__init__.py | reexport_tf_summary | def reexport_tf_summary():
"""Re-export all symbols from the original tf.summary.
This function finds the original tf.summary V2 API and re-exports all the
symbols from it within this module as well, so that when this module is
patched into the TF API namespace as the new tf.summary, the effect is an
overlay that just adds TensorBoard-provided symbols to the module.
Finding the original tf.summary V2 API module reliably is a challenge, since
this code runs *during* the overall TF API import process and depending on
the order of imports (which is subject to change), different parts of the API
may or may not be defined at the point in time we attempt to access them. This
code also may be inserted into two places in the API (tf and tf.compat.v2)
and may be re-executed multiple times even for the same place in the API (due
to the TF module import system not populating sys.modules properly), so it
needs to be robust to many different scenarios.
The one constraint we can count on is that everywhere this module is loaded
(via the component_api_helper mechanism in TF), it's going to be the 'summary'
submodule of a larger API package that already has a 'summary' attribute
that contains the TF-only summary API symbols we need to re-export. This
may either be the original TF-only summary module (the first time we load
this module) or a pre-existing copy of this module (if we're re-loading this
module again). We don't actually need to differentiate those two cases,
because it's okay if we re-import our own TensorBoard-provided symbols; they
will just be overwritten later on in this file.
So given that guarantee, the approach we take is to first attempt to locate
a TF V2 API package that already has a 'summary' attribute (most likely this
is the parent package into which we're being imported, but not necessarily),
and then do the dynamic version of "from tf_api_package.summary import *".
Lastly, this logic is encapsulated in a function to avoid symbol leakage.
"""
import sys # pylint: disable=g-import-not-at-top
# API packages to check for the original V2 summary API, in preference order
# to avoid going "under the hood" to the _api packages unless necessary.
packages = [
'tensorflow',
'tensorflow.compat.v2',
'tensorflow._api.v2',
'tensorflow._api.v2.compat.v2',
'tensorflow._api.v1.compat.v2',
]
# If we aren't sure we're on V2, don't use tf.summary since it could be V1.
# Note there may be false positives since the __version__ attribute may not be
# defined at this point in the import process.
if not getattr(tf, '__version__', '').startswith('2.'): # noqa: F821
packages.remove('tensorflow')
def dynamic_wildcard_import(module):
"""Implements the logic of "from module import *" for the given module."""
symbols = getattr(module, '__all__', None)
if symbols is None:
symbols = [k for k in module.__dict__.keys() if not k.startswith('_')]
globals().update({symbol: getattr(module, symbol) for symbol in symbols})
notfound = object() # sentinel value
for package_name in packages:
package = sys.modules.get(package_name, notfound)
if package is notfound:
# Either it isn't in this installation at all (e.g. the _api.vX packages
# are only in API version X), it isn't imported yet, or it was imported
# but not inserted into sys.modules under its user-facing name (for the
# non-'_api' packages), at which point we continue down the list to look
# "under the hood" for it via its '_api' package name.
continue
module = getattr(package, 'summary', None)
if module is None:
# This happens if the package hasn't been fully imported yet. For example,
# the 'tensorflow' package won't yet have 'summary' attribute if we are
# loading this code via the 'tensorflow.compat...' path and 'compat' is
# imported before 'summary' in the 'tensorflow' __init__.py file.
continue
# Success, we hope. Import all the public symbols into this module.
dynamic_wildcard_import(module)
return | python | def reexport_tf_summary():
"""Re-export all symbols from the original tf.summary.
This function finds the original tf.summary V2 API and re-exports all the
symbols from it within this module as well, so that when this module is
patched into the TF API namespace as the new tf.summary, the effect is an
overlay that just adds TensorBoard-provided symbols to the module.
Finding the original tf.summary V2 API module reliably is a challenge, since
this code runs *during* the overall TF API import process and depending on
the order of imports (which is subject to change), different parts of the API
may or may not be defined at the point in time we attempt to access them. This
code also may be inserted into two places in the API (tf and tf.compat.v2)
and may be re-executed multiple times even for the same place in the API (due
to the TF module import system not populating sys.modules properly), so it
needs to be robust to many different scenarios.
The one constraint we can count on is that everywhere this module is loaded
(via the component_api_helper mechanism in TF), it's going to be the 'summary'
submodule of a larger API package that already has a 'summary' attribute
that contains the TF-only summary API symbols we need to re-export. This
may either be the original TF-only summary module (the first time we load
this module) or a pre-existing copy of this module (if we're re-loading this
module again). We don't actually need to differentiate those two cases,
because it's okay if we re-import our own TensorBoard-provided symbols; they
will just be overwritten later on in this file.
So given that guarantee, the approach we take is to first attempt to locate
a TF V2 API package that already has a 'summary' attribute (most likely this
is the parent package into which we're being imported, but not necessarily),
and then do the dynamic version of "from tf_api_package.summary import *".
Lastly, this logic is encapsulated in a function to avoid symbol leakage.
"""
import sys # pylint: disable=g-import-not-at-top
# API packages to check for the original V2 summary API, in preference order
# to avoid going "under the hood" to the _api packages unless necessary.
packages = [
'tensorflow',
'tensorflow.compat.v2',
'tensorflow._api.v2',
'tensorflow._api.v2.compat.v2',
'tensorflow._api.v1.compat.v2',
]
# If we aren't sure we're on V2, don't use tf.summary since it could be V1.
# Note there may be false positives since the __version__ attribute may not be
# defined at this point in the import process.
if not getattr(tf, '__version__', '').startswith('2.'): # noqa: F821
packages.remove('tensorflow')
def dynamic_wildcard_import(module):
"""Implements the logic of "from module import *" for the given module."""
symbols = getattr(module, '__all__', None)
if symbols is None:
symbols = [k for k in module.__dict__.keys() if not k.startswith('_')]
globals().update({symbol: getattr(module, symbol) for symbol in symbols})
notfound = object() # sentinel value
for package_name in packages:
package = sys.modules.get(package_name, notfound)
if package is notfound:
# Either it isn't in this installation at all (e.g. the _api.vX packages
# are only in API version X), it isn't imported yet, or it was imported
# but not inserted into sys.modules under its user-facing name (for the
# non-'_api' packages), at which point we continue down the list to look
# "under the hood" for it via its '_api' package name.
continue
module = getattr(package, 'summary', None)
if module is None:
# This happens if the package hasn't been fully imported yet. For example,
# the 'tensorflow' package won't yet have 'summary' attribute if we are
# loading this code via the 'tensorflow.compat...' path and 'compat' is
# imported before 'summary' in the 'tensorflow' __init__.py file.
continue
# Success, we hope. Import all the public symbols into this module.
dynamic_wildcard_import(module)
return | [
"def",
"reexport_tf_summary",
"(",
")",
":",
"import",
"sys",
"# pylint: disable=g-import-not-at-top",
"# API packages to check for the original V2 summary API, in preference order",
"# to avoid going \"under the hood\" to the _api packages unless necessary.",
"packages",
"=",
"[",
"'tensorflow'",
",",
"'tensorflow.compat.v2'",
",",
"'tensorflow._api.v2'",
",",
"'tensorflow._api.v2.compat.v2'",
",",
"'tensorflow._api.v1.compat.v2'",
",",
"]",
"# If we aren't sure we're on V2, don't use tf.summary since it could be V1.",
"# Note there may be false positives since the __version__ attribute may not be",
"# defined at this point in the import process.",
"if",
"not",
"getattr",
"(",
"tf",
",",
"'__version__'",
",",
"''",
")",
".",
"startswith",
"(",
"'2.'",
")",
":",
"# noqa: F821",
"packages",
".",
"remove",
"(",
"'tensorflow'",
")",
"def",
"dynamic_wildcard_import",
"(",
"module",
")",
":",
"\"\"\"Implements the logic of \"from module import *\" for the given module.\"\"\"",
"symbols",
"=",
"getattr",
"(",
"module",
",",
"'__all__'",
",",
"None",
")",
"if",
"symbols",
"is",
"None",
":",
"symbols",
"=",
"[",
"k",
"for",
"k",
"in",
"module",
".",
"__dict__",
".",
"keys",
"(",
")",
"if",
"not",
"k",
".",
"startswith",
"(",
"'_'",
")",
"]",
"globals",
"(",
")",
".",
"update",
"(",
"{",
"symbol",
":",
"getattr",
"(",
"module",
",",
"symbol",
")",
"for",
"symbol",
"in",
"symbols",
"}",
")",
"notfound",
"=",
"object",
"(",
")",
"# sentinel value",
"for",
"package_name",
"in",
"packages",
":",
"package",
"=",
"sys",
".",
"modules",
".",
"get",
"(",
"package_name",
",",
"notfound",
")",
"if",
"package",
"is",
"notfound",
":",
"# Either it isn't in this installation at all (e.g. the _api.vX packages",
"# are only in API version X), it isn't imported yet, or it was imported",
"# but not inserted into sys.modules under its user-facing name (for the",
"# non-'_api' packages), at which point we continue down the list to look",
"# \"under the hood\" for it via its '_api' package name.",
"continue",
"module",
"=",
"getattr",
"(",
"package",
",",
"'summary'",
",",
"None",
")",
"if",
"module",
"is",
"None",
":",
"# This happens if the package hasn't been fully imported yet. For example,",
"# the 'tensorflow' package won't yet have 'summary' attribute if we are",
"# loading this code via the 'tensorflow.compat...' path and 'compat' is",
"# imported before 'summary' in the 'tensorflow' __init__.py file.",
"continue",
"# Success, we hope. Import all the public symbols into this module.",
"dynamic_wildcard_import",
"(",
"module",
")",
"return"
] | Re-export all symbols from the original tf.summary.
This function finds the original tf.summary V2 API and re-exports all the
symbols from it within this module as well, so that when this module is
patched into the TF API namespace as the new tf.summary, the effect is an
overlay that just adds TensorBoard-provided symbols to the module.
Finding the original tf.summary V2 API module reliably is a challenge, since
this code runs *during* the overall TF API import process and depending on
the order of imports (which is subject to change), different parts of the API
may or may not be defined at the point in time we attempt to access them. This
code also may be inserted into two places in the API (tf and tf.compat.v2)
and may be re-executed multiple times even for the same place in the API (due
to the TF module import system not populating sys.modules properly), so it
needs to be robust to many different scenarios.
The one constraint we can count on is that everywhere this module is loaded
(via the component_api_helper mechanism in TF), it's going to be the 'summary'
submodule of a larger API package that already has a 'summary' attribute
that contains the TF-only summary API symbols we need to re-export. This
may either be the original TF-only summary module (the first time we load
this module) or a pre-existing copy of this module (if we're re-loading this
module again). We don't actually need to differentiate those two cases,
because it's okay if we re-import our own TensorBoard-provided symbols; they
will just be overwritten later on in this file.
So given that guarantee, the approach we take is to first attempt to locate
a TF V2 API package that already has a 'summary' attribute (most likely this
is the parent package into which we're being imported, but not necessarily),
and then do the dynamic version of "from tf_api_package.summary import *".
Lastly, this logic is encapsulated in a function to avoid symbol leakage. | [
"Re",
"-",
"export",
"all",
"symbols",
"from",
"the",
"original",
"tf",
".",
"summary",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/summary/_tf/summary/__init__.py#L90-L167 | train |
tensorflow/tensorboard | tensorboard/encode_png_benchmark.py | bench | def bench(image, thread_count):
"""Encode `image` to PNG on `thread_count` threads in parallel.
Returns:
A `float` representing number of seconds that it takes all threads
to finish encoding `image`.
"""
threads = [threading.Thread(target=lambda: encoder.encode_png(image))
for _ in xrange(thread_count)]
start_time = datetime.datetime.now()
for thread in threads:
thread.start()
for thread in threads:
thread.join()
end_time = datetime.datetime.now()
delta = (end_time - start_time).total_seconds()
return delta | python | def bench(image, thread_count):
"""Encode `image` to PNG on `thread_count` threads in parallel.
Returns:
A `float` representing number of seconds that it takes all threads
to finish encoding `image`.
"""
threads = [threading.Thread(target=lambda: encoder.encode_png(image))
for _ in xrange(thread_count)]
start_time = datetime.datetime.now()
for thread in threads:
thread.start()
for thread in threads:
thread.join()
end_time = datetime.datetime.now()
delta = (end_time - start_time).total_seconds()
return delta | [
"def",
"bench",
"(",
"image",
",",
"thread_count",
")",
":",
"threads",
"=",
"[",
"threading",
".",
"Thread",
"(",
"target",
"=",
"lambda",
":",
"encoder",
".",
"encode_png",
"(",
"image",
")",
")",
"for",
"_",
"in",
"xrange",
"(",
"thread_count",
")",
"]",
"start_time",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"for",
"thread",
"in",
"threads",
":",
"thread",
".",
"start",
"(",
")",
"for",
"thread",
"in",
"threads",
":",
"thread",
".",
"join",
"(",
")",
"end_time",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"delta",
"=",
"(",
"end_time",
"-",
"start_time",
")",
".",
"total_seconds",
"(",
")",
"return",
"delta"
] | Encode `image` to PNG on `thread_count` threads in parallel.
Returns:
A `float` representing number of seconds that it takes all threads
to finish encoding `image`. | [
"Encode",
"image",
"to",
"PNG",
"on",
"thread_count",
"threads",
"in",
"parallel",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/encode_png_benchmark.py#L66-L82 | train |
tensorflow/tensorboard | tensorboard/encode_png_benchmark.py | _image_of_size | def _image_of_size(image_size):
"""Generate a square RGB test image of the given side length."""
return np.random.uniform(0, 256, [image_size, image_size, 3]).astype(np.uint8) | python | def _image_of_size(image_size):
"""Generate a square RGB test image of the given side length."""
return np.random.uniform(0, 256, [image_size, image_size, 3]).astype(np.uint8) | [
"def",
"_image_of_size",
"(",
"image_size",
")",
":",
"return",
"np",
".",
"random",
".",
"uniform",
"(",
"0",
",",
"256",
",",
"[",
"image_size",
",",
"image_size",
",",
"3",
"]",
")",
".",
"astype",
"(",
"np",
".",
"uint8",
")"
] | Generate a square RGB test image of the given side length. | [
"Generate",
"a",
"square",
"RGB",
"test",
"image",
"of",
"the",
"given",
"side",
"length",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/encode_png_benchmark.py#L85-L87 | train |
tensorflow/tensorboard | tensorboard/encode_png_benchmark.py | _format_line | def _format_line(headers, fields):
"""Format a line of a table.
Arguments:
headers: A list of strings that are used as the table headers.
fields: A list of the same length as `headers` where `fields[i]` is
the entry for `headers[i]` in this row. Elements can be of
arbitrary types. Pass `headers` to print the header row.
Returns:
A pretty string.
"""
assert len(fields) == len(headers), (fields, headers)
fields = ["%2.4f" % field if isinstance(field, float) else str(field)
for field in fields]
return ' '.join(' ' * max(0, len(header) - len(field)) + field
for (header, field) in zip(headers, fields)) | python | def _format_line(headers, fields):
"""Format a line of a table.
Arguments:
headers: A list of strings that are used as the table headers.
fields: A list of the same length as `headers` where `fields[i]` is
the entry for `headers[i]` in this row. Elements can be of
arbitrary types. Pass `headers` to print the header row.
Returns:
A pretty string.
"""
assert len(fields) == len(headers), (fields, headers)
fields = ["%2.4f" % field if isinstance(field, float) else str(field)
for field in fields]
return ' '.join(' ' * max(0, len(header) - len(field)) + field
for (header, field) in zip(headers, fields)) | [
"def",
"_format_line",
"(",
"headers",
",",
"fields",
")",
":",
"assert",
"len",
"(",
"fields",
")",
"==",
"len",
"(",
"headers",
")",
",",
"(",
"fields",
",",
"headers",
")",
"fields",
"=",
"[",
"\"%2.4f\"",
"%",
"field",
"if",
"isinstance",
"(",
"field",
",",
"float",
")",
"else",
"str",
"(",
"field",
")",
"for",
"field",
"in",
"fields",
"]",
"return",
"' '",
".",
"join",
"(",
"' '",
"*",
"max",
"(",
"0",
",",
"len",
"(",
"header",
")",
"-",
"len",
"(",
"field",
")",
")",
"+",
"field",
"for",
"(",
"header",
",",
"field",
")",
"in",
"zip",
"(",
"headers",
",",
"fields",
")",
")"
] | Format a line of a table.
Arguments:
headers: A list of strings that are used as the table headers.
fields: A list of the same length as `headers` where `fields[i]` is
the entry for `headers[i]` in this row. Elements can be of
arbitrary types. Pass `headers` to print the header row.
Returns:
A pretty string. | [
"Format",
"a",
"line",
"of",
"a",
"table",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/encode_png_benchmark.py#L90-L106 | train |
tensorflow/tensorboard | tensorboard/plugins/debugger/debug_graphs_helper.py | DebugGraphWrapper.get_gated_grpc_tensors | def get_gated_grpc_tensors(self, matching_debug_op=None):
"""Extract all nodes with gated-gRPC debug ops attached.
Uses cached values if available.
This method is thread-safe.
Args:
graph_def: A tf.GraphDef proto.
matching_debug_op: Return tensors and nodes with only matching the
specified debug op name (optional). If `None`, will extract only
`DebugIdentity` debug ops.
Returns:
A list of (node_name, op_type, output_slot, debug_op) tuples.
"""
with self._grpc_gated_lock:
matching_debug_op = matching_debug_op or 'DebugIdentity'
if matching_debug_op not in self._grpc_gated_tensors:
# First, construct a map from node name to op type.
node_name_to_op_type = dict(
(node.name, node.op) for node in self._graph_def.node)
# Second, populate the output list.
gated = []
for node in self._graph_def.node:
if node.op == matching_debug_op:
for attr_key in node.attr:
if attr_key == 'gated_grpc' and node.attr[attr_key].b:
node_name, output_slot, _, debug_op = (
debug_graphs.parse_debug_node_name(node.name))
gated.append(
(node_name, node_name_to_op_type[node_name], output_slot,
debug_op))
break
self._grpc_gated_tensors[matching_debug_op] = gated
return self._grpc_gated_tensors[matching_debug_op] | python | def get_gated_grpc_tensors(self, matching_debug_op=None):
"""Extract all nodes with gated-gRPC debug ops attached.
Uses cached values if available.
This method is thread-safe.
Args:
graph_def: A tf.GraphDef proto.
matching_debug_op: Return tensors and nodes with only matching the
specified debug op name (optional). If `None`, will extract only
`DebugIdentity` debug ops.
Returns:
A list of (node_name, op_type, output_slot, debug_op) tuples.
"""
with self._grpc_gated_lock:
matching_debug_op = matching_debug_op or 'DebugIdentity'
if matching_debug_op not in self._grpc_gated_tensors:
# First, construct a map from node name to op type.
node_name_to_op_type = dict(
(node.name, node.op) for node in self._graph_def.node)
# Second, populate the output list.
gated = []
for node in self._graph_def.node:
if node.op == matching_debug_op:
for attr_key in node.attr:
if attr_key == 'gated_grpc' and node.attr[attr_key].b:
node_name, output_slot, _, debug_op = (
debug_graphs.parse_debug_node_name(node.name))
gated.append(
(node_name, node_name_to_op_type[node_name], output_slot,
debug_op))
break
self._grpc_gated_tensors[matching_debug_op] = gated
return self._grpc_gated_tensors[matching_debug_op] | [
"def",
"get_gated_grpc_tensors",
"(",
"self",
",",
"matching_debug_op",
"=",
"None",
")",
":",
"with",
"self",
".",
"_grpc_gated_lock",
":",
"matching_debug_op",
"=",
"matching_debug_op",
"or",
"'DebugIdentity'",
"if",
"matching_debug_op",
"not",
"in",
"self",
".",
"_grpc_gated_tensors",
":",
"# First, construct a map from node name to op type.",
"node_name_to_op_type",
"=",
"dict",
"(",
"(",
"node",
".",
"name",
",",
"node",
".",
"op",
")",
"for",
"node",
"in",
"self",
".",
"_graph_def",
".",
"node",
")",
"# Second, populate the output list.",
"gated",
"=",
"[",
"]",
"for",
"node",
"in",
"self",
".",
"_graph_def",
".",
"node",
":",
"if",
"node",
".",
"op",
"==",
"matching_debug_op",
":",
"for",
"attr_key",
"in",
"node",
".",
"attr",
":",
"if",
"attr_key",
"==",
"'gated_grpc'",
"and",
"node",
".",
"attr",
"[",
"attr_key",
"]",
".",
"b",
":",
"node_name",
",",
"output_slot",
",",
"_",
",",
"debug_op",
"=",
"(",
"debug_graphs",
".",
"parse_debug_node_name",
"(",
"node",
".",
"name",
")",
")",
"gated",
".",
"append",
"(",
"(",
"node_name",
",",
"node_name_to_op_type",
"[",
"node_name",
"]",
",",
"output_slot",
",",
"debug_op",
")",
")",
"break",
"self",
".",
"_grpc_gated_tensors",
"[",
"matching_debug_op",
"]",
"=",
"gated",
"return",
"self",
".",
"_grpc_gated_tensors",
"[",
"matching_debug_op",
"]"
] | Extract all nodes with gated-gRPC debug ops attached.
Uses cached values if available.
This method is thread-safe.
Args:
graph_def: A tf.GraphDef proto.
matching_debug_op: Return tensors and nodes with only matching the
specified debug op name (optional). If `None`, will extract only
`DebugIdentity` debug ops.
Returns:
A list of (node_name, op_type, output_slot, debug_op) tuples. | [
"Extract",
"all",
"nodes",
"with",
"gated",
"-",
"gRPC",
"debug",
"ops",
"attached",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/debugger/debug_graphs_helper.py#L37-L73 | train |
tensorflow/tensorboard | tensorboard/plugins/debugger/debug_graphs_helper.py | DebugGraphWrapper.maybe_base_expanded_node_name | def maybe_base_expanded_node_name(self, node_name):
"""Expand the base name if there are node names nested under the node.
For example, if there are two nodes in the graph, "a" and "a/read", then
calling this function on "a" will give "a/(a)", a form that points at
a leaf node in the nested TensorBoard graph. Calling this function on
"a/read" will just return "a/read", because there is no node nested under
it.
This method is thread-safe.
Args:
node_name: Name of the node.
graph_def: The `GraphDef` that the node is a part of.
Returns:
Possibly base-expanded node name.
"""
with self._node_name_lock:
# Lazily populate the map from original node name to base-expanded ones.
if self._maybe_base_expanded_node_names is None:
self._maybe_base_expanded_node_names = dict()
# Sort all the node names.
sorted_names = sorted(node.name for node in self._graph_def.node)
for i, name in enumerate(sorted_names):
j = i + 1
while j < len(sorted_names) and sorted_names[j].startswith(name):
if sorted_names[j].startswith(name + '/'):
self._maybe_base_expanded_node_names[name] = (
name + '/(' + name.split('/')[-1] + ')')
break
j += 1
return self._maybe_base_expanded_node_names.get(node_name, node_name) | python | def maybe_base_expanded_node_name(self, node_name):
"""Expand the base name if there are node names nested under the node.
For example, if there are two nodes in the graph, "a" and "a/read", then
calling this function on "a" will give "a/(a)", a form that points at
a leaf node in the nested TensorBoard graph. Calling this function on
"a/read" will just return "a/read", because there is no node nested under
it.
This method is thread-safe.
Args:
node_name: Name of the node.
graph_def: The `GraphDef` that the node is a part of.
Returns:
Possibly base-expanded node name.
"""
with self._node_name_lock:
# Lazily populate the map from original node name to base-expanded ones.
if self._maybe_base_expanded_node_names is None:
self._maybe_base_expanded_node_names = dict()
# Sort all the node names.
sorted_names = sorted(node.name for node in self._graph_def.node)
for i, name in enumerate(sorted_names):
j = i + 1
while j < len(sorted_names) and sorted_names[j].startswith(name):
if sorted_names[j].startswith(name + '/'):
self._maybe_base_expanded_node_names[name] = (
name + '/(' + name.split('/')[-1] + ')')
break
j += 1
return self._maybe_base_expanded_node_names.get(node_name, node_name) | [
"def",
"maybe_base_expanded_node_name",
"(",
"self",
",",
"node_name",
")",
":",
"with",
"self",
".",
"_node_name_lock",
":",
"# Lazily populate the map from original node name to base-expanded ones.",
"if",
"self",
".",
"_maybe_base_expanded_node_names",
"is",
"None",
":",
"self",
".",
"_maybe_base_expanded_node_names",
"=",
"dict",
"(",
")",
"# Sort all the node names.",
"sorted_names",
"=",
"sorted",
"(",
"node",
".",
"name",
"for",
"node",
"in",
"self",
".",
"_graph_def",
".",
"node",
")",
"for",
"i",
",",
"name",
"in",
"enumerate",
"(",
"sorted_names",
")",
":",
"j",
"=",
"i",
"+",
"1",
"while",
"j",
"<",
"len",
"(",
"sorted_names",
")",
"and",
"sorted_names",
"[",
"j",
"]",
".",
"startswith",
"(",
"name",
")",
":",
"if",
"sorted_names",
"[",
"j",
"]",
".",
"startswith",
"(",
"name",
"+",
"'/'",
")",
":",
"self",
".",
"_maybe_base_expanded_node_names",
"[",
"name",
"]",
"=",
"(",
"name",
"+",
"'/('",
"+",
"name",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
"+",
"')'",
")",
"break",
"j",
"+=",
"1",
"return",
"self",
".",
"_maybe_base_expanded_node_names",
".",
"get",
"(",
"node_name",
",",
"node_name",
")"
] | Expand the base name if there are node names nested under the node.
For example, if there are two nodes in the graph, "a" and "a/read", then
calling this function on "a" will give "a/(a)", a form that points at
a leaf node in the nested TensorBoard graph. Calling this function on
"a/read" will just return "a/read", because there is no node nested under
it.
This method is thread-safe.
Args:
node_name: Name of the node.
graph_def: The `GraphDef` that the node is a part of.
Returns:
Possibly base-expanded node name. | [
"Expand",
"the",
"base",
"name",
"if",
"there",
"are",
"node",
"names",
"nested",
"under",
"the",
"node",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/debugger/debug_graphs_helper.py#L75-L107 | train |
tensorflow/tensorboard | tensorboard/backend/event_processing/db_import_multiplexer.py | DbImportMultiplexer.AddRunsFromDirectory | def AddRunsFromDirectory(self, path, name=None):
"""Load runs from a directory; recursively walks subdirectories.
If path doesn't exist, no-op. This ensures that it is safe to call
`AddRunsFromDirectory` multiple times, even before the directory is made.
Args:
path: A string path to a directory to load runs from.
name: Optional, specifies a name for the experiment under which the
runs from this directory hierarchy will be imported. If omitted, the
path will be used as the name.
Raises:
ValueError: If the path exists and isn't a directory.
"""
logger.info('Starting AddRunsFromDirectory: %s (as %s)', path, name)
for subdir in io_wrapper.GetLogdirSubdirectories(path):
logger.info('Processing directory %s', subdir)
if subdir not in self._run_loaders:
logger.info('Creating DB loader for directory %s', subdir)
names = self._get_exp_and_run_names(path, subdir, name)
experiment_name, run_name = names
self._run_loaders[subdir] = _RunLoader(
subdir=subdir,
experiment_name=experiment_name,
run_name=run_name)
logger.info('Done with AddRunsFromDirectory: %s', path) | python | def AddRunsFromDirectory(self, path, name=None):
"""Load runs from a directory; recursively walks subdirectories.
If path doesn't exist, no-op. This ensures that it is safe to call
`AddRunsFromDirectory` multiple times, even before the directory is made.
Args:
path: A string path to a directory to load runs from.
name: Optional, specifies a name for the experiment under which the
runs from this directory hierarchy will be imported. If omitted, the
path will be used as the name.
Raises:
ValueError: If the path exists and isn't a directory.
"""
logger.info('Starting AddRunsFromDirectory: %s (as %s)', path, name)
for subdir in io_wrapper.GetLogdirSubdirectories(path):
logger.info('Processing directory %s', subdir)
if subdir not in self._run_loaders:
logger.info('Creating DB loader for directory %s', subdir)
names = self._get_exp_and_run_names(path, subdir, name)
experiment_name, run_name = names
self._run_loaders[subdir] = _RunLoader(
subdir=subdir,
experiment_name=experiment_name,
run_name=run_name)
logger.info('Done with AddRunsFromDirectory: %s', path) | [
"def",
"AddRunsFromDirectory",
"(",
"self",
",",
"path",
",",
"name",
"=",
"None",
")",
":",
"logger",
".",
"info",
"(",
"'Starting AddRunsFromDirectory: %s (as %s)'",
",",
"path",
",",
"name",
")",
"for",
"subdir",
"in",
"io_wrapper",
".",
"GetLogdirSubdirectories",
"(",
"path",
")",
":",
"logger",
".",
"info",
"(",
"'Processing directory %s'",
",",
"subdir",
")",
"if",
"subdir",
"not",
"in",
"self",
".",
"_run_loaders",
":",
"logger",
".",
"info",
"(",
"'Creating DB loader for directory %s'",
",",
"subdir",
")",
"names",
"=",
"self",
".",
"_get_exp_and_run_names",
"(",
"path",
",",
"subdir",
",",
"name",
")",
"experiment_name",
",",
"run_name",
"=",
"names",
"self",
".",
"_run_loaders",
"[",
"subdir",
"]",
"=",
"_RunLoader",
"(",
"subdir",
"=",
"subdir",
",",
"experiment_name",
"=",
"experiment_name",
",",
"run_name",
"=",
"run_name",
")",
"logger",
".",
"info",
"(",
"'Done with AddRunsFromDirectory: %s'",
",",
"path",
")"
] | Load runs from a directory; recursively walks subdirectories.
If path doesn't exist, no-op. This ensures that it is safe to call
`AddRunsFromDirectory` multiple times, even before the directory is made.
Args:
path: A string path to a directory to load runs from.
name: Optional, specifies a name for the experiment under which the
runs from this directory hierarchy will be imported. If omitted, the
path will be used as the name.
Raises:
ValueError: If the path exists and isn't a directory. | [
"Load",
"runs",
"from",
"a",
"directory",
";",
"recursively",
"walks",
"subdirectories",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/db_import_multiplexer.py#L95-L121 | train |
tensorflow/tensorboard | tensorboard/backend/event_processing/db_import_multiplexer.py | DbImportMultiplexer.Reload | def Reload(self):
"""Load events from every detected run."""
logger.info('Beginning DbImportMultiplexer.Reload()')
# Defer event sink creation until needed; this ensures it will only exist in
# the thread that calls Reload(), since DB connections must be thread-local.
if not self._event_sink:
self._event_sink = self._CreateEventSink()
# Use collections.deque() for speed when we don't need blocking since it
# also has thread-safe appends/pops.
loader_queue = collections.deque(six.itervalues(self._run_loaders))
loader_delete_queue = collections.deque()
def batch_generator():
while True:
try:
loader = loader_queue.popleft()
except IndexError:
return
try:
for batch in loader.load_batches():
yield batch
except directory_watcher.DirectoryDeletedError:
loader_delete_queue.append(loader)
except (OSError, IOError) as e:
logger.error('Unable to load run %r: %s', loader.subdir, e)
num_threads = min(self._max_reload_threads, len(self._run_loaders))
if num_threads <= 1:
logger.info('Importing runs serially on a single thread')
for batch in batch_generator():
self._event_sink.write_batch(batch)
else:
output_queue = queue.Queue()
sentinel = object()
def producer():
try:
for batch in batch_generator():
output_queue.put(batch)
finally:
output_queue.put(sentinel)
logger.info('Starting %d threads to import runs', num_threads)
for i in xrange(num_threads):
thread = threading.Thread(target=producer, name='Loader %d' % i)
thread.daemon = True
thread.start()
num_live_threads = num_threads
while num_live_threads > 0:
output = output_queue.get()
if output == sentinel:
num_live_threads -= 1
continue
self._event_sink.write_batch(output)
for loader in loader_delete_queue:
logger.warn('Deleting loader %r', loader.subdir)
del self._run_loaders[loader.subdir]
logger.info('Finished with DbImportMultiplexer.Reload()') | python | def Reload(self):
"""Load events from every detected run."""
logger.info('Beginning DbImportMultiplexer.Reload()')
# Defer event sink creation until needed; this ensures it will only exist in
# the thread that calls Reload(), since DB connections must be thread-local.
if not self._event_sink:
self._event_sink = self._CreateEventSink()
# Use collections.deque() for speed when we don't need blocking since it
# also has thread-safe appends/pops.
loader_queue = collections.deque(six.itervalues(self._run_loaders))
loader_delete_queue = collections.deque()
def batch_generator():
while True:
try:
loader = loader_queue.popleft()
except IndexError:
return
try:
for batch in loader.load_batches():
yield batch
except directory_watcher.DirectoryDeletedError:
loader_delete_queue.append(loader)
except (OSError, IOError) as e:
logger.error('Unable to load run %r: %s', loader.subdir, e)
num_threads = min(self._max_reload_threads, len(self._run_loaders))
if num_threads <= 1:
logger.info('Importing runs serially on a single thread')
for batch in batch_generator():
self._event_sink.write_batch(batch)
else:
output_queue = queue.Queue()
sentinel = object()
def producer():
try:
for batch in batch_generator():
output_queue.put(batch)
finally:
output_queue.put(sentinel)
logger.info('Starting %d threads to import runs', num_threads)
for i in xrange(num_threads):
thread = threading.Thread(target=producer, name='Loader %d' % i)
thread.daemon = True
thread.start()
num_live_threads = num_threads
while num_live_threads > 0:
output = output_queue.get()
if output == sentinel:
num_live_threads -= 1
continue
self._event_sink.write_batch(output)
for loader in loader_delete_queue:
logger.warn('Deleting loader %r', loader.subdir)
del self._run_loaders[loader.subdir]
logger.info('Finished with DbImportMultiplexer.Reload()') | [
"def",
"Reload",
"(",
"self",
")",
":",
"logger",
".",
"info",
"(",
"'Beginning DbImportMultiplexer.Reload()'",
")",
"# Defer event sink creation until needed; this ensures it will only exist in",
"# the thread that calls Reload(), since DB connections must be thread-local.",
"if",
"not",
"self",
".",
"_event_sink",
":",
"self",
".",
"_event_sink",
"=",
"self",
".",
"_CreateEventSink",
"(",
")",
"# Use collections.deque() for speed when we don't need blocking since it",
"# also has thread-safe appends/pops.",
"loader_queue",
"=",
"collections",
".",
"deque",
"(",
"six",
".",
"itervalues",
"(",
"self",
".",
"_run_loaders",
")",
")",
"loader_delete_queue",
"=",
"collections",
".",
"deque",
"(",
")",
"def",
"batch_generator",
"(",
")",
":",
"while",
"True",
":",
"try",
":",
"loader",
"=",
"loader_queue",
".",
"popleft",
"(",
")",
"except",
"IndexError",
":",
"return",
"try",
":",
"for",
"batch",
"in",
"loader",
".",
"load_batches",
"(",
")",
":",
"yield",
"batch",
"except",
"directory_watcher",
".",
"DirectoryDeletedError",
":",
"loader_delete_queue",
".",
"append",
"(",
"loader",
")",
"except",
"(",
"OSError",
",",
"IOError",
")",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"'Unable to load run %r: %s'",
",",
"loader",
".",
"subdir",
",",
"e",
")",
"num_threads",
"=",
"min",
"(",
"self",
".",
"_max_reload_threads",
",",
"len",
"(",
"self",
".",
"_run_loaders",
")",
")",
"if",
"num_threads",
"<=",
"1",
":",
"logger",
".",
"info",
"(",
"'Importing runs serially on a single thread'",
")",
"for",
"batch",
"in",
"batch_generator",
"(",
")",
":",
"self",
".",
"_event_sink",
".",
"write_batch",
"(",
"batch",
")",
"else",
":",
"output_queue",
"=",
"queue",
".",
"Queue",
"(",
")",
"sentinel",
"=",
"object",
"(",
")",
"def",
"producer",
"(",
")",
":",
"try",
":",
"for",
"batch",
"in",
"batch_generator",
"(",
")",
":",
"output_queue",
".",
"put",
"(",
"batch",
")",
"finally",
":",
"output_queue",
".",
"put",
"(",
"sentinel",
")",
"logger",
".",
"info",
"(",
"'Starting %d threads to import runs'",
",",
"num_threads",
")",
"for",
"i",
"in",
"xrange",
"(",
"num_threads",
")",
":",
"thread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"producer",
",",
"name",
"=",
"'Loader %d'",
"%",
"i",
")",
"thread",
".",
"daemon",
"=",
"True",
"thread",
".",
"start",
"(",
")",
"num_live_threads",
"=",
"num_threads",
"while",
"num_live_threads",
">",
"0",
":",
"output",
"=",
"output_queue",
".",
"get",
"(",
")",
"if",
"output",
"==",
"sentinel",
":",
"num_live_threads",
"-=",
"1",
"continue",
"self",
".",
"_event_sink",
".",
"write_batch",
"(",
"output",
")",
"for",
"loader",
"in",
"loader_delete_queue",
":",
"logger",
".",
"warn",
"(",
"'Deleting loader %r'",
",",
"loader",
".",
"subdir",
")",
"del",
"self",
".",
"_run_loaders",
"[",
"loader",
".",
"subdir",
"]",
"logger",
".",
"info",
"(",
"'Finished with DbImportMultiplexer.Reload()'",
")"
] | Load events from every detected run. | [
"Load",
"events",
"from",
"every",
"detected",
"run",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/db_import_multiplexer.py#L123-L178 | train |
tensorflow/tensorboard | tensorboard/backend/event_processing/db_import_multiplexer.py | _RunLoader.load_batches | def load_batches(self):
"""Returns a batched event iterator over the run directory event files."""
event_iterator = self._directory_watcher.Load()
while True:
events = []
event_bytes = 0
start = time.time()
for event_proto in event_iterator:
events.append(event_proto)
event_bytes += len(event_proto)
if len(events) >= self._BATCH_COUNT or event_bytes >= self._BATCH_BYTES:
break
elapsed = time.time() - start
logger.debug('RunLoader.load_batch() yielded in %0.3f sec for %s',
elapsed, self._subdir)
if not events:
return
yield _EventBatch(
events=events,
experiment_name=self._experiment_name,
run_name=self._run_name) | python | def load_batches(self):
"""Returns a batched event iterator over the run directory event files."""
event_iterator = self._directory_watcher.Load()
while True:
events = []
event_bytes = 0
start = time.time()
for event_proto in event_iterator:
events.append(event_proto)
event_bytes += len(event_proto)
if len(events) >= self._BATCH_COUNT or event_bytes >= self._BATCH_BYTES:
break
elapsed = time.time() - start
logger.debug('RunLoader.load_batch() yielded in %0.3f sec for %s',
elapsed, self._subdir)
if not events:
return
yield _EventBatch(
events=events,
experiment_name=self._experiment_name,
run_name=self._run_name) | [
"def",
"load_batches",
"(",
"self",
")",
":",
"event_iterator",
"=",
"self",
".",
"_directory_watcher",
".",
"Load",
"(",
")",
"while",
"True",
":",
"events",
"=",
"[",
"]",
"event_bytes",
"=",
"0",
"start",
"=",
"time",
".",
"time",
"(",
")",
"for",
"event_proto",
"in",
"event_iterator",
":",
"events",
".",
"append",
"(",
"event_proto",
")",
"event_bytes",
"+=",
"len",
"(",
"event_proto",
")",
"if",
"len",
"(",
"events",
")",
">=",
"self",
".",
"_BATCH_COUNT",
"or",
"event_bytes",
">=",
"self",
".",
"_BATCH_BYTES",
":",
"break",
"elapsed",
"=",
"time",
".",
"time",
"(",
")",
"-",
"start",
"logger",
".",
"debug",
"(",
"'RunLoader.load_batch() yielded in %0.3f sec for %s'",
",",
"elapsed",
",",
"self",
".",
"_subdir",
")",
"if",
"not",
"events",
":",
"return",
"yield",
"_EventBatch",
"(",
"events",
"=",
"events",
",",
"experiment_name",
"=",
"self",
".",
"_experiment_name",
",",
"run_name",
"=",
"self",
".",
"_run_name",
")"
] | Returns a batched event iterator over the run directory event files. | [
"Returns",
"a",
"batched",
"event",
"iterator",
"over",
"the",
"run",
"directory",
"event",
"files",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/db_import_multiplexer.py#L221-L241 | train |
tensorflow/tensorboard | tensorboard/backend/event_processing/db_import_multiplexer.py | _SqliteWriterEventSink._process_event | def _process_event(self, event, tagged_data):
"""Processes a single tf.Event and records it in tagged_data."""
event_type = event.WhichOneof('what')
# Handle the most common case first.
if event_type == 'summary':
for value in event.summary.value:
value = data_compat.migrate_value(value)
tag, metadata, values = tagged_data.get(value.tag, (None, None, []))
values.append((event.step, event.wall_time, value.tensor))
if tag is None:
# Store metadata only from the first event.
tagged_data[value.tag] = sqlite_writer.TagData(
value.tag, value.metadata, values)
elif event_type == 'file_version':
pass # TODO: reject file version < 2 (at loader level)
elif event_type == 'session_log':
if event.session_log.status == event_pb2.SessionLog.START:
pass # TODO: implement purging via sqlite writer truncation method
elif event_type in ('graph_def', 'meta_graph_def'):
pass # TODO: support graphs
elif event_type == 'tagged_run_metadata':
pass | python | def _process_event(self, event, tagged_data):
"""Processes a single tf.Event and records it in tagged_data."""
event_type = event.WhichOneof('what')
# Handle the most common case first.
if event_type == 'summary':
for value in event.summary.value:
value = data_compat.migrate_value(value)
tag, metadata, values = tagged_data.get(value.tag, (None, None, []))
values.append((event.step, event.wall_time, value.tensor))
if tag is None:
# Store metadata only from the first event.
tagged_data[value.tag] = sqlite_writer.TagData(
value.tag, value.metadata, values)
elif event_type == 'file_version':
pass # TODO: reject file version < 2 (at loader level)
elif event_type == 'session_log':
if event.session_log.status == event_pb2.SessionLog.START:
pass # TODO: implement purging via sqlite writer truncation method
elif event_type in ('graph_def', 'meta_graph_def'):
pass # TODO: support graphs
elif event_type == 'tagged_run_metadata':
pass | [
"def",
"_process_event",
"(",
"self",
",",
"event",
",",
"tagged_data",
")",
":",
"event_type",
"=",
"event",
".",
"WhichOneof",
"(",
"'what'",
")",
"# Handle the most common case first.",
"if",
"event_type",
"==",
"'summary'",
":",
"for",
"value",
"in",
"event",
".",
"summary",
".",
"value",
":",
"value",
"=",
"data_compat",
".",
"migrate_value",
"(",
"value",
")",
"tag",
",",
"metadata",
",",
"values",
"=",
"tagged_data",
".",
"get",
"(",
"value",
".",
"tag",
",",
"(",
"None",
",",
"None",
",",
"[",
"]",
")",
")",
"values",
".",
"append",
"(",
"(",
"event",
".",
"step",
",",
"event",
".",
"wall_time",
",",
"value",
".",
"tensor",
")",
")",
"if",
"tag",
"is",
"None",
":",
"# Store metadata only from the first event.",
"tagged_data",
"[",
"value",
".",
"tag",
"]",
"=",
"sqlite_writer",
".",
"TagData",
"(",
"value",
".",
"tag",
",",
"value",
".",
"metadata",
",",
"values",
")",
"elif",
"event_type",
"==",
"'file_version'",
":",
"pass",
"# TODO: reject file version < 2 (at loader level)",
"elif",
"event_type",
"==",
"'session_log'",
":",
"if",
"event",
".",
"session_log",
".",
"status",
"==",
"event_pb2",
".",
"SessionLog",
".",
"START",
":",
"pass",
"# TODO: implement purging via sqlite writer truncation method",
"elif",
"event_type",
"in",
"(",
"'graph_def'",
",",
"'meta_graph_def'",
")",
":",
"pass",
"# TODO: support graphs",
"elif",
"event_type",
"==",
"'tagged_run_metadata'",
":",
"pass"
] | Processes a single tf.Event and records it in tagged_data. | [
"Processes",
"a",
"single",
"tf",
".",
"Event",
"and",
"records",
"it",
"in",
"tagged_data",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/db_import_multiplexer.py#L329-L350 | train |
tensorflow/tensorboard | tensorboard/plugins/histogram/summary.py | _buckets | def _buckets(data, bucket_count=None):
"""Create a TensorFlow op to group data into histogram buckets.
Arguments:
data: A `Tensor` of any shape. Must be castable to `float64`.
bucket_count: Optional positive `int` or scalar `int32` `Tensor`.
Returns:
A `Tensor` of shape `[k, 3]` and type `float64`. The `i`th row is
a triple `[left_edge, right_edge, count]` for a single bucket.
The value of `k` is either `bucket_count` or `1` or `0`.
"""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
if bucket_count is None:
bucket_count = summary_v2.DEFAULT_BUCKET_COUNT
with tf.name_scope('buckets', values=[data, bucket_count]), \
tf.control_dependencies([tf.assert_scalar(bucket_count),
tf.assert_type(bucket_count, tf.int32)]):
data = tf.reshape(data, shape=[-1]) # flatten
data = tf.cast(data, tf.float64)
is_empty = tf.equal(tf.size(input=data), 0)
def when_empty():
return tf.constant([], shape=(0, 3), dtype=tf.float64)
def when_nonempty():
min_ = tf.reduce_min(input_tensor=data)
max_ = tf.reduce_max(input_tensor=data)
range_ = max_ - min_
is_singular = tf.equal(range_, 0)
def when_nonsingular():
bucket_width = range_ / tf.cast(bucket_count, tf.float64)
offsets = data - min_
bucket_indices = tf.cast(tf.floor(offsets / bucket_width),
dtype=tf.int32)
clamped_indices = tf.minimum(bucket_indices, bucket_count - 1)
one_hots = tf.one_hot(clamped_indices, depth=bucket_count)
bucket_counts = tf.cast(tf.reduce_sum(input_tensor=one_hots, axis=0),
dtype=tf.float64)
edges = tf.linspace(min_, max_, bucket_count + 1)
left_edges = edges[:-1]
right_edges = edges[1:]
return tf.transpose(a=tf.stack(
[left_edges, right_edges, bucket_counts]))
def when_singular():
center = min_
bucket_starts = tf.stack([center - 0.5])
bucket_ends = tf.stack([center + 0.5])
bucket_counts = tf.stack([tf.cast(tf.size(input=data), tf.float64)])
return tf.transpose(
a=tf.stack([bucket_starts, bucket_ends, bucket_counts]))
return tf.cond(is_singular, when_singular, when_nonsingular)
return tf.cond(is_empty, when_empty, when_nonempty) | python | def _buckets(data, bucket_count=None):
"""Create a TensorFlow op to group data into histogram buckets.
Arguments:
data: A `Tensor` of any shape. Must be castable to `float64`.
bucket_count: Optional positive `int` or scalar `int32` `Tensor`.
Returns:
A `Tensor` of shape `[k, 3]` and type `float64`. The `i`th row is
a triple `[left_edge, right_edge, count]` for a single bucket.
The value of `k` is either `bucket_count` or `1` or `0`.
"""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
if bucket_count is None:
bucket_count = summary_v2.DEFAULT_BUCKET_COUNT
with tf.name_scope('buckets', values=[data, bucket_count]), \
tf.control_dependencies([tf.assert_scalar(bucket_count),
tf.assert_type(bucket_count, tf.int32)]):
data = tf.reshape(data, shape=[-1]) # flatten
data = tf.cast(data, tf.float64)
is_empty = tf.equal(tf.size(input=data), 0)
def when_empty():
return tf.constant([], shape=(0, 3), dtype=tf.float64)
def when_nonempty():
min_ = tf.reduce_min(input_tensor=data)
max_ = tf.reduce_max(input_tensor=data)
range_ = max_ - min_
is_singular = tf.equal(range_, 0)
def when_nonsingular():
bucket_width = range_ / tf.cast(bucket_count, tf.float64)
offsets = data - min_
bucket_indices = tf.cast(tf.floor(offsets / bucket_width),
dtype=tf.int32)
clamped_indices = tf.minimum(bucket_indices, bucket_count - 1)
one_hots = tf.one_hot(clamped_indices, depth=bucket_count)
bucket_counts = tf.cast(tf.reduce_sum(input_tensor=one_hots, axis=0),
dtype=tf.float64)
edges = tf.linspace(min_, max_, bucket_count + 1)
left_edges = edges[:-1]
right_edges = edges[1:]
return tf.transpose(a=tf.stack(
[left_edges, right_edges, bucket_counts]))
def when_singular():
center = min_
bucket_starts = tf.stack([center - 0.5])
bucket_ends = tf.stack([center + 0.5])
bucket_counts = tf.stack([tf.cast(tf.size(input=data), tf.float64)])
return tf.transpose(
a=tf.stack([bucket_starts, bucket_ends, bucket_counts]))
return tf.cond(is_singular, when_singular, when_nonsingular)
return tf.cond(is_empty, when_empty, when_nonempty) | [
"def",
"_buckets",
"(",
"data",
",",
"bucket_count",
"=",
"None",
")",
":",
"# TODO(nickfelt): remove on-demand imports once dep situation is fixed.",
"import",
"tensorflow",
".",
"compat",
".",
"v1",
"as",
"tf",
"if",
"bucket_count",
"is",
"None",
":",
"bucket_count",
"=",
"summary_v2",
".",
"DEFAULT_BUCKET_COUNT",
"with",
"tf",
".",
"name_scope",
"(",
"'buckets'",
",",
"values",
"=",
"[",
"data",
",",
"bucket_count",
"]",
")",
",",
"tf",
".",
"control_dependencies",
"(",
"[",
"tf",
".",
"assert_scalar",
"(",
"bucket_count",
")",
",",
"tf",
".",
"assert_type",
"(",
"bucket_count",
",",
"tf",
".",
"int32",
")",
"]",
")",
":",
"data",
"=",
"tf",
".",
"reshape",
"(",
"data",
",",
"shape",
"=",
"[",
"-",
"1",
"]",
")",
"# flatten",
"data",
"=",
"tf",
".",
"cast",
"(",
"data",
",",
"tf",
".",
"float64",
")",
"is_empty",
"=",
"tf",
".",
"equal",
"(",
"tf",
".",
"size",
"(",
"input",
"=",
"data",
")",
",",
"0",
")",
"def",
"when_empty",
"(",
")",
":",
"return",
"tf",
".",
"constant",
"(",
"[",
"]",
",",
"shape",
"=",
"(",
"0",
",",
"3",
")",
",",
"dtype",
"=",
"tf",
".",
"float64",
")",
"def",
"when_nonempty",
"(",
")",
":",
"min_",
"=",
"tf",
".",
"reduce_min",
"(",
"input_tensor",
"=",
"data",
")",
"max_",
"=",
"tf",
".",
"reduce_max",
"(",
"input_tensor",
"=",
"data",
")",
"range_",
"=",
"max_",
"-",
"min_",
"is_singular",
"=",
"tf",
".",
"equal",
"(",
"range_",
",",
"0",
")",
"def",
"when_nonsingular",
"(",
")",
":",
"bucket_width",
"=",
"range_",
"/",
"tf",
".",
"cast",
"(",
"bucket_count",
",",
"tf",
".",
"float64",
")",
"offsets",
"=",
"data",
"-",
"min_",
"bucket_indices",
"=",
"tf",
".",
"cast",
"(",
"tf",
".",
"floor",
"(",
"offsets",
"/",
"bucket_width",
")",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"clamped_indices",
"=",
"tf",
".",
"minimum",
"(",
"bucket_indices",
",",
"bucket_count",
"-",
"1",
")",
"one_hots",
"=",
"tf",
".",
"one_hot",
"(",
"clamped_indices",
",",
"depth",
"=",
"bucket_count",
")",
"bucket_counts",
"=",
"tf",
".",
"cast",
"(",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"one_hots",
",",
"axis",
"=",
"0",
")",
",",
"dtype",
"=",
"tf",
".",
"float64",
")",
"edges",
"=",
"tf",
".",
"linspace",
"(",
"min_",
",",
"max_",
",",
"bucket_count",
"+",
"1",
")",
"left_edges",
"=",
"edges",
"[",
":",
"-",
"1",
"]",
"right_edges",
"=",
"edges",
"[",
"1",
":",
"]",
"return",
"tf",
".",
"transpose",
"(",
"a",
"=",
"tf",
".",
"stack",
"(",
"[",
"left_edges",
",",
"right_edges",
",",
"bucket_counts",
"]",
")",
")",
"def",
"when_singular",
"(",
")",
":",
"center",
"=",
"min_",
"bucket_starts",
"=",
"tf",
".",
"stack",
"(",
"[",
"center",
"-",
"0.5",
"]",
")",
"bucket_ends",
"=",
"tf",
".",
"stack",
"(",
"[",
"center",
"+",
"0.5",
"]",
")",
"bucket_counts",
"=",
"tf",
".",
"stack",
"(",
"[",
"tf",
".",
"cast",
"(",
"tf",
".",
"size",
"(",
"input",
"=",
"data",
")",
",",
"tf",
".",
"float64",
")",
"]",
")",
"return",
"tf",
".",
"transpose",
"(",
"a",
"=",
"tf",
".",
"stack",
"(",
"[",
"bucket_starts",
",",
"bucket_ends",
",",
"bucket_counts",
"]",
")",
")",
"return",
"tf",
".",
"cond",
"(",
"is_singular",
",",
"when_singular",
",",
"when_nonsingular",
")",
"return",
"tf",
".",
"cond",
"(",
"is_empty",
",",
"when_empty",
",",
"when_nonempty",
")"
] | Create a TensorFlow op to group data into histogram buckets.
Arguments:
data: A `Tensor` of any shape. Must be castable to `float64`.
bucket_count: Optional positive `int` or scalar `int32` `Tensor`.
Returns:
A `Tensor` of shape `[k, 3]` and type `float64`. The `i`th row is
a triple `[left_edge, right_edge, count]` for a single bucket.
The value of `k` is either `bucket_count` or `1` or `0`. | [
"Create",
"a",
"TensorFlow",
"op",
"to",
"group",
"data",
"into",
"histogram",
"buckets",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/histogram/summary.py#L46-L102 | train |
tensorflow/tensorboard | tensorboard/plugins/histogram/summary.py | op | def op(name,
data,
bucket_count=None,
display_name=None,
description=None,
collections=None):
"""Create a legacy histogram summary op.
Arguments:
name: A unique name for the generated summary node.
data: A `Tensor` of any shape. Must be castable to `float64`.
bucket_count: Optional positive `int`. The output will have this
many buckets, except in two edge cases. If there is no data, then
there are no buckets. If there is data but all points have the
same value, then there is one bucket whose left and right
endpoints are the same.
display_name: Optional name for this summary in TensorBoard, as a
constant `str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
collections: Optional list of graph collections keys. The new
summary op is added to these collections. Defaults to
`[Graph Keys.SUMMARIES]`.
Returns:
A TensorFlow summary op.
"""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
if display_name is None:
display_name = name
summary_metadata = metadata.create_summary_metadata(
display_name=display_name, description=description)
with tf.name_scope(name):
tensor = _buckets(data, bucket_count=bucket_count)
return tf.summary.tensor_summary(name='histogram_summary',
tensor=tensor,
collections=collections,
summary_metadata=summary_metadata) | python | def op(name,
data,
bucket_count=None,
display_name=None,
description=None,
collections=None):
"""Create a legacy histogram summary op.
Arguments:
name: A unique name for the generated summary node.
data: A `Tensor` of any shape. Must be castable to `float64`.
bucket_count: Optional positive `int`. The output will have this
many buckets, except in two edge cases. If there is no data, then
there are no buckets. If there is data but all points have the
same value, then there is one bucket whose left and right
endpoints are the same.
display_name: Optional name for this summary in TensorBoard, as a
constant `str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
collections: Optional list of graph collections keys. The new
summary op is added to these collections. Defaults to
`[Graph Keys.SUMMARIES]`.
Returns:
A TensorFlow summary op.
"""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
if display_name is None:
display_name = name
summary_metadata = metadata.create_summary_metadata(
display_name=display_name, description=description)
with tf.name_scope(name):
tensor = _buckets(data, bucket_count=bucket_count)
return tf.summary.tensor_summary(name='histogram_summary',
tensor=tensor,
collections=collections,
summary_metadata=summary_metadata) | [
"def",
"op",
"(",
"name",
",",
"data",
",",
"bucket_count",
"=",
"None",
",",
"display_name",
"=",
"None",
",",
"description",
"=",
"None",
",",
"collections",
"=",
"None",
")",
":",
"# TODO(nickfelt): remove on-demand imports once dep situation is fixed.",
"import",
"tensorflow",
".",
"compat",
".",
"v1",
"as",
"tf",
"if",
"display_name",
"is",
"None",
":",
"display_name",
"=",
"name",
"summary_metadata",
"=",
"metadata",
".",
"create_summary_metadata",
"(",
"display_name",
"=",
"display_name",
",",
"description",
"=",
"description",
")",
"with",
"tf",
".",
"name_scope",
"(",
"name",
")",
":",
"tensor",
"=",
"_buckets",
"(",
"data",
",",
"bucket_count",
"=",
"bucket_count",
")",
"return",
"tf",
".",
"summary",
".",
"tensor_summary",
"(",
"name",
"=",
"'histogram_summary'",
",",
"tensor",
"=",
"tensor",
",",
"collections",
"=",
"collections",
",",
"summary_metadata",
"=",
"summary_metadata",
")"
] | Create a legacy histogram summary op.
Arguments:
name: A unique name for the generated summary node.
data: A `Tensor` of any shape. Must be castable to `float64`.
bucket_count: Optional positive `int`. The output will have this
many buckets, except in two edge cases. If there is no data, then
there are no buckets. If there is data but all points have the
same value, then there is one bucket whose left and right
endpoints are the same.
display_name: Optional name for this summary in TensorBoard, as a
constant `str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
collections: Optional list of graph collections keys. The new
summary op is added to these collections. Defaults to
`[Graph Keys.SUMMARIES]`.
Returns:
A TensorFlow summary op. | [
"Create",
"a",
"legacy",
"histogram",
"summary",
"op",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/histogram/summary.py#L105-L144 | train |
tensorflow/tensorboard | tensorboard/plugins/histogram/summary.py | pb | def pb(name, data, bucket_count=None, display_name=None, description=None):
"""Create a legacy histogram summary protobuf.
Arguments:
name: A unique name for the generated summary, including any desired
name scopes.
data: A `np.array` or array-like form of any shape. Must have type
castable to `float`.
bucket_count: Optional positive `int`. The output will have this
many buckets, except in two edge cases. If there is no data, then
there are no buckets. If there is data but all points have the
same value, then there is one bucket whose left and right
endpoints are the same.
display_name: Optional name for this summary in TensorBoard, as a
`str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
`str`. Markdown is supported. Defaults to empty.
Returns:
A `tf.Summary` protobuf object.
"""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
if bucket_count is None:
bucket_count = summary_v2.DEFAULT_BUCKET_COUNT
data = np.array(data).flatten().astype(float)
if data.size == 0:
buckets = np.array([]).reshape((0, 3))
else:
min_ = np.min(data)
max_ = np.max(data)
range_ = max_ - min_
if range_ == 0:
center = min_
buckets = np.array([[center - 0.5, center + 0.5, float(data.size)]])
else:
bucket_width = range_ / bucket_count
offsets = data - min_
bucket_indices = np.floor(offsets / bucket_width).astype(int)
clamped_indices = np.minimum(bucket_indices, bucket_count - 1)
one_hots = (np.array([clamped_indices]).transpose()
== np.arange(0, bucket_count)) # broadcast
assert one_hots.shape == (data.size, bucket_count), (
one_hots.shape, (data.size, bucket_count))
bucket_counts = np.sum(one_hots, axis=0)
edges = np.linspace(min_, max_, bucket_count + 1)
left_edges = edges[:-1]
right_edges = edges[1:]
buckets = np.array([left_edges, right_edges, bucket_counts]).transpose()
tensor = tf.make_tensor_proto(buckets, dtype=tf.float64)
if display_name is None:
display_name = name
summary_metadata = metadata.create_summary_metadata(
display_name=display_name, description=description)
tf_summary_metadata = tf.SummaryMetadata.FromString(
summary_metadata.SerializeToString())
summary = tf.Summary()
summary.value.add(tag='%s/histogram_summary' % name,
metadata=tf_summary_metadata,
tensor=tensor)
return summary | python | def pb(name, data, bucket_count=None, display_name=None, description=None):
"""Create a legacy histogram summary protobuf.
Arguments:
name: A unique name for the generated summary, including any desired
name scopes.
data: A `np.array` or array-like form of any shape. Must have type
castable to `float`.
bucket_count: Optional positive `int`. The output will have this
many buckets, except in two edge cases. If there is no data, then
there are no buckets. If there is data but all points have the
same value, then there is one bucket whose left and right
endpoints are the same.
display_name: Optional name for this summary in TensorBoard, as a
`str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
`str`. Markdown is supported. Defaults to empty.
Returns:
A `tf.Summary` protobuf object.
"""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
if bucket_count is None:
bucket_count = summary_v2.DEFAULT_BUCKET_COUNT
data = np.array(data).flatten().astype(float)
if data.size == 0:
buckets = np.array([]).reshape((0, 3))
else:
min_ = np.min(data)
max_ = np.max(data)
range_ = max_ - min_
if range_ == 0:
center = min_
buckets = np.array([[center - 0.5, center + 0.5, float(data.size)]])
else:
bucket_width = range_ / bucket_count
offsets = data - min_
bucket_indices = np.floor(offsets / bucket_width).astype(int)
clamped_indices = np.minimum(bucket_indices, bucket_count - 1)
one_hots = (np.array([clamped_indices]).transpose()
== np.arange(0, bucket_count)) # broadcast
assert one_hots.shape == (data.size, bucket_count), (
one_hots.shape, (data.size, bucket_count))
bucket_counts = np.sum(one_hots, axis=0)
edges = np.linspace(min_, max_, bucket_count + 1)
left_edges = edges[:-1]
right_edges = edges[1:]
buckets = np.array([left_edges, right_edges, bucket_counts]).transpose()
tensor = tf.make_tensor_proto(buckets, dtype=tf.float64)
if display_name is None:
display_name = name
summary_metadata = metadata.create_summary_metadata(
display_name=display_name, description=description)
tf_summary_metadata = tf.SummaryMetadata.FromString(
summary_metadata.SerializeToString())
summary = tf.Summary()
summary.value.add(tag='%s/histogram_summary' % name,
metadata=tf_summary_metadata,
tensor=tensor)
return summary | [
"def",
"pb",
"(",
"name",
",",
"data",
",",
"bucket_count",
"=",
"None",
",",
"display_name",
"=",
"None",
",",
"description",
"=",
"None",
")",
":",
"# TODO(nickfelt): remove on-demand imports once dep situation is fixed.",
"import",
"tensorflow",
".",
"compat",
".",
"v1",
"as",
"tf",
"if",
"bucket_count",
"is",
"None",
":",
"bucket_count",
"=",
"summary_v2",
".",
"DEFAULT_BUCKET_COUNT",
"data",
"=",
"np",
".",
"array",
"(",
"data",
")",
".",
"flatten",
"(",
")",
".",
"astype",
"(",
"float",
")",
"if",
"data",
".",
"size",
"==",
"0",
":",
"buckets",
"=",
"np",
".",
"array",
"(",
"[",
"]",
")",
".",
"reshape",
"(",
"(",
"0",
",",
"3",
")",
")",
"else",
":",
"min_",
"=",
"np",
".",
"min",
"(",
"data",
")",
"max_",
"=",
"np",
".",
"max",
"(",
"data",
")",
"range_",
"=",
"max_",
"-",
"min_",
"if",
"range_",
"==",
"0",
":",
"center",
"=",
"min_",
"buckets",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"center",
"-",
"0.5",
",",
"center",
"+",
"0.5",
",",
"float",
"(",
"data",
".",
"size",
")",
"]",
"]",
")",
"else",
":",
"bucket_width",
"=",
"range_",
"/",
"bucket_count",
"offsets",
"=",
"data",
"-",
"min_",
"bucket_indices",
"=",
"np",
".",
"floor",
"(",
"offsets",
"/",
"bucket_width",
")",
".",
"astype",
"(",
"int",
")",
"clamped_indices",
"=",
"np",
".",
"minimum",
"(",
"bucket_indices",
",",
"bucket_count",
"-",
"1",
")",
"one_hots",
"=",
"(",
"np",
".",
"array",
"(",
"[",
"clamped_indices",
"]",
")",
".",
"transpose",
"(",
")",
"==",
"np",
".",
"arange",
"(",
"0",
",",
"bucket_count",
")",
")",
"# broadcast",
"assert",
"one_hots",
".",
"shape",
"==",
"(",
"data",
".",
"size",
",",
"bucket_count",
")",
",",
"(",
"one_hots",
".",
"shape",
",",
"(",
"data",
".",
"size",
",",
"bucket_count",
")",
")",
"bucket_counts",
"=",
"np",
".",
"sum",
"(",
"one_hots",
",",
"axis",
"=",
"0",
")",
"edges",
"=",
"np",
".",
"linspace",
"(",
"min_",
",",
"max_",
",",
"bucket_count",
"+",
"1",
")",
"left_edges",
"=",
"edges",
"[",
":",
"-",
"1",
"]",
"right_edges",
"=",
"edges",
"[",
"1",
":",
"]",
"buckets",
"=",
"np",
".",
"array",
"(",
"[",
"left_edges",
",",
"right_edges",
",",
"bucket_counts",
"]",
")",
".",
"transpose",
"(",
")",
"tensor",
"=",
"tf",
".",
"make_tensor_proto",
"(",
"buckets",
",",
"dtype",
"=",
"tf",
".",
"float64",
")",
"if",
"display_name",
"is",
"None",
":",
"display_name",
"=",
"name",
"summary_metadata",
"=",
"metadata",
".",
"create_summary_metadata",
"(",
"display_name",
"=",
"display_name",
",",
"description",
"=",
"description",
")",
"tf_summary_metadata",
"=",
"tf",
".",
"SummaryMetadata",
".",
"FromString",
"(",
"summary_metadata",
".",
"SerializeToString",
"(",
")",
")",
"summary",
"=",
"tf",
".",
"Summary",
"(",
")",
"summary",
".",
"value",
".",
"add",
"(",
"tag",
"=",
"'%s/histogram_summary'",
"%",
"name",
",",
"metadata",
"=",
"tf_summary_metadata",
",",
"tensor",
"=",
"tensor",
")",
"return",
"summary"
] | Create a legacy histogram summary protobuf.
Arguments:
name: A unique name for the generated summary, including any desired
name scopes.
data: A `np.array` or array-like form of any shape. Must have type
castable to `float`.
bucket_count: Optional positive `int`. The output will have this
many buckets, except in two edge cases. If there is no data, then
there are no buckets. If there is data but all points have the
same value, then there is one bucket whose left and right
endpoints are the same.
display_name: Optional name for this summary in TensorBoard, as a
`str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
`str`. Markdown is supported. Defaults to empty.
Returns:
A `tf.Summary` protobuf object. | [
"Create",
"a",
"legacy",
"histogram",
"summary",
"protobuf",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/histogram/summary.py#L147-L210 | train |
tensorflow/tensorboard | tensorboard/plugins/debugger/tensor_store.py | _WatchStore.add | def add(self, value):
"""Add a tensor the watch store."""
if self._disposed:
raise ValueError(
'Cannot add value: this _WatchStore instance is already disposed')
self._data.append(value)
if hasattr(value, 'nbytes'):
self._in_mem_bytes += value.nbytes
self._ensure_bytes_limits() | python | def add(self, value):
"""Add a tensor the watch store."""
if self._disposed:
raise ValueError(
'Cannot add value: this _WatchStore instance is already disposed')
self._data.append(value)
if hasattr(value, 'nbytes'):
self._in_mem_bytes += value.nbytes
self._ensure_bytes_limits() | [
"def",
"add",
"(",
"self",
",",
"value",
")",
":",
"if",
"self",
".",
"_disposed",
":",
"raise",
"ValueError",
"(",
"'Cannot add value: this _WatchStore instance is already disposed'",
")",
"self",
".",
"_data",
".",
"append",
"(",
"value",
")",
"if",
"hasattr",
"(",
"value",
",",
"'nbytes'",
")",
":",
"self",
".",
"_in_mem_bytes",
"+=",
"value",
".",
"nbytes",
"self",
".",
"_ensure_bytes_limits",
"(",
")"
] | Add a tensor the watch store. | [
"Add",
"a",
"tensor",
"the",
"watch",
"store",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/debugger/tensor_store.py#L83-L91 | train |
tensorflow/tensorboard | tensorboard/plugins/debugger/tensor_store.py | _WatchStore.num_in_memory | def num_in_memory(self):
"""Get number of values in memory."""
n = len(self._data) - 1
while n >= 0:
if isinstance(self._data[n], _TensorValueDiscarded):
break
n -= 1
return len(self._data) - 1 - n | python | def num_in_memory(self):
"""Get number of values in memory."""
n = len(self._data) - 1
while n >= 0:
if isinstance(self._data[n], _TensorValueDiscarded):
break
n -= 1
return len(self._data) - 1 - n | [
"def",
"num_in_memory",
"(",
"self",
")",
":",
"n",
"=",
"len",
"(",
"self",
".",
"_data",
")",
"-",
"1",
"while",
"n",
">=",
"0",
":",
"if",
"isinstance",
"(",
"self",
".",
"_data",
"[",
"n",
"]",
",",
"_TensorValueDiscarded",
")",
":",
"break",
"n",
"-=",
"1",
"return",
"len",
"(",
"self",
".",
"_data",
")",
"-",
"1",
"-",
"n"
] | Get number of values in memory. | [
"Get",
"number",
"of",
"values",
"in",
"memory",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/debugger/tensor_store.py#L119-L126 | train |
tensorflow/tensorboard | tensorboard/plugins/debugger/tensor_store.py | _WatchStore.num_discarded | def num_discarded(self):
"""Get the number of values discarded due to exceeding both limits."""
if not self._data:
return 0
n = 0
while n < len(self._data):
if not isinstance(self._data[n], _TensorValueDiscarded):
break
n += 1
return n | python | def num_discarded(self):
"""Get the number of values discarded due to exceeding both limits."""
if not self._data:
return 0
n = 0
while n < len(self._data):
if not isinstance(self._data[n], _TensorValueDiscarded):
break
n += 1
return n | [
"def",
"num_discarded",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_data",
":",
"return",
"0",
"n",
"=",
"0",
"while",
"n",
"<",
"len",
"(",
"self",
".",
"_data",
")",
":",
"if",
"not",
"isinstance",
"(",
"self",
".",
"_data",
"[",
"n",
"]",
",",
"_TensorValueDiscarded",
")",
":",
"break",
"n",
"+=",
"1",
"return",
"n"
] | Get the number of values discarded due to exceeding both limits. | [
"Get",
"the",
"number",
"of",
"values",
"discarded",
"due",
"to",
"exceeding",
"both",
"limits",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/debugger/tensor_store.py#L128-L137 | train |
tensorflow/tensorboard | tensorboard/plugins/debugger/tensor_store.py | _WatchStore.query | def query(self, time_indices):
"""Query the values at given time indices.
Args:
time_indices: 0-based time indices to query, as a `list` of `int`.
Returns:
Values as a list of `numpy.ndarray` (for time indices in memory) or
`None` (for time indices discarded).
"""
if self._disposed:
raise ValueError(
'Cannot query: this _WatchStore instance is already disposed')
if not isinstance(time_indices, (tuple, list)):
time_indices = [time_indices]
output = []
for time_index in time_indices:
if isinstance(self._data[time_index], _TensorValueDiscarded):
output.append(None)
else:
data_item = self._data[time_index]
if (hasattr(data_item, 'dtype') and
tensor_helper.translate_dtype(data_item.dtype) == 'string'):
_, _, data_item = tensor_helper.array_view(data_item)
data_item = np.array(
tensor_helper.process_buffers_for_display(data_item),
dtype=np.object)
output.append(data_item)
return output | python | def query(self, time_indices):
"""Query the values at given time indices.
Args:
time_indices: 0-based time indices to query, as a `list` of `int`.
Returns:
Values as a list of `numpy.ndarray` (for time indices in memory) or
`None` (for time indices discarded).
"""
if self._disposed:
raise ValueError(
'Cannot query: this _WatchStore instance is already disposed')
if not isinstance(time_indices, (tuple, list)):
time_indices = [time_indices]
output = []
for time_index in time_indices:
if isinstance(self._data[time_index], _TensorValueDiscarded):
output.append(None)
else:
data_item = self._data[time_index]
if (hasattr(data_item, 'dtype') and
tensor_helper.translate_dtype(data_item.dtype) == 'string'):
_, _, data_item = tensor_helper.array_view(data_item)
data_item = np.array(
tensor_helper.process_buffers_for_display(data_item),
dtype=np.object)
output.append(data_item)
return output | [
"def",
"query",
"(",
"self",
",",
"time_indices",
")",
":",
"if",
"self",
".",
"_disposed",
":",
"raise",
"ValueError",
"(",
"'Cannot query: this _WatchStore instance is already disposed'",
")",
"if",
"not",
"isinstance",
"(",
"time_indices",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"time_indices",
"=",
"[",
"time_indices",
"]",
"output",
"=",
"[",
"]",
"for",
"time_index",
"in",
"time_indices",
":",
"if",
"isinstance",
"(",
"self",
".",
"_data",
"[",
"time_index",
"]",
",",
"_TensorValueDiscarded",
")",
":",
"output",
".",
"append",
"(",
"None",
")",
"else",
":",
"data_item",
"=",
"self",
".",
"_data",
"[",
"time_index",
"]",
"if",
"(",
"hasattr",
"(",
"data_item",
",",
"'dtype'",
")",
"and",
"tensor_helper",
".",
"translate_dtype",
"(",
"data_item",
".",
"dtype",
")",
"==",
"'string'",
")",
":",
"_",
",",
"_",
",",
"data_item",
"=",
"tensor_helper",
".",
"array_view",
"(",
"data_item",
")",
"data_item",
"=",
"np",
".",
"array",
"(",
"tensor_helper",
".",
"process_buffers_for_display",
"(",
"data_item",
")",
",",
"dtype",
"=",
"np",
".",
"object",
")",
"output",
".",
"append",
"(",
"data_item",
")",
"return",
"output"
] | Query the values at given time indices.
Args:
time_indices: 0-based time indices to query, as a `list` of `int`.
Returns:
Values as a list of `numpy.ndarray` (for time indices in memory) or
`None` (for time indices discarded). | [
"Query",
"the",
"values",
"at",
"given",
"time",
"indices",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/debugger/tensor_store.py#L139-L168 | train |
tensorflow/tensorboard | tensorboard/plugins/debugger/tensor_store.py | TensorStore.add | def add(self, watch_key, tensor_value):
"""Add a tensor value.
Args:
watch_key: A string representing the debugger tensor watch, e.g.,
'Dense_1/BiasAdd:0:DebugIdentity'.
tensor_value: The value of the tensor as a numpy.ndarray.
"""
if watch_key not in self._tensor_data:
self._tensor_data[watch_key] = _WatchStore(
watch_key,
mem_bytes_limit=self._watch_mem_bytes_limit)
self._tensor_data[watch_key].add(tensor_value) | python | def add(self, watch_key, tensor_value):
"""Add a tensor value.
Args:
watch_key: A string representing the debugger tensor watch, e.g.,
'Dense_1/BiasAdd:0:DebugIdentity'.
tensor_value: The value of the tensor as a numpy.ndarray.
"""
if watch_key not in self._tensor_data:
self._tensor_data[watch_key] = _WatchStore(
watch_key,
mem_bytes_limit=self._watch_mem_bytes_limit)
self._tensor_data[watch_key].add(tensor_value) | [
"def",
"add",
"(",
"self",
",",
"watch_key",
",",
"tensor_value",
")",
":",
"if",
"watch_key",
"not",
"in",
"self",
".",
"_tensor_data",
":",
"self",
".",
"_tensor_data",
"[",
"watch_key",
"]",
"=",
"_WatchStore",
"(",
"watch_key",
",",
"mem_bytes_limit",
"=",
"self",
".",
"_watch_mem_bytes_limit",
")",
"self",
".",
"_tensor_data",
"[",
"watch_key",
"]",
".",
"add",
"(",
"tensor_value",
")"
] | Add a tensor value.
Args:
watch_key: A string representing the debugger tensor watch, e.g.,
'Dense_1/BiasAdd:0:DebugIdentity'.
tensor_value: The value of the tensor as a numpy.ndarray. | [
"Add",
"a",
"tensor",
"value",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/debugger/tensor_store.py#L186-L198 | train |
tensorflow/tensorboard | tensorboard/plugins/debugger/tensor_store.py | TensorStore.query | def query(self,
watch_key,
time_indices=None,
slicing=None,
mapping=None):
"""Query tensor store for a given watch_key.
Args:
watch_key: The watch key to query.
time_indices: A numpy-style slicing string for time indices. E.g.,
`-1`, `:-2`, `[::2]`. If not provided (`None`), will use -1.
slicing: A numpy-style slicing string for individual time steps.
mapping: An mapping string or a list of them. Supported mappings:
`{None, 'image/png', 'health-pill'}`.
Returns:
The potentially sliced values as a nested list of values or its mapped
format. A `list` of nested `list` of values.
Raises:
ValueError: If the shape of the sliced array is incompatible with mapping
mode. Or if the mapping type is invalid.
"""
if watch_key not in self._tensor_data:
raise KeyError("watch_key not found: %s" % watch_key)
if time_indices is None:
time_indices = '-1'
time_slicing = tensor_helper.parse_time_indices(time_indices)
all_time_indices = list(range(self._tensor_data[watch_key].num_total()))
sliced_time_indices = all_time_indices[time_slicing]
if not isinstance(sliced_time_indices, list):
sliced_time_indices = [sliced_time_indices]
recombine_and_map = False
step_mapping = mapping
if len(sliced_time_indices) > 1 and mapping not in (None, ):
recombine_and_map = True
step_mapping = None
output = []
for index in sliced_time_indices:
value = self._tensor_data[watch_key].query(index)[0]
if (value is not None and
not isinstance(value, debug_data.InconvertibleTensorProto)):
output.append(tensor_helper.array_view(
value, slicing=slicing, mapping=step_mapping)[2])
else:
output.append(None)
if recombine_and_map:
if mapping == 'image/png':
output = tensor_helper.array_to_base64_png(output)
elif mapping and mapping != 'none':
logger.warn(
'Unsupported mapping mode after recomining time steps: %s',
mapping)
return output | python | def query(self,
watch_key,
time_indices=None,
slicing=None,
mapping=None):
"""Query tensor store for a given watch_key.
Args:
watch_key: The watch key to query.
time_indices: A numpy-style slicing string for time indices. E.g.,
`-1`, `:-2`, `[::2]`. If not provided (`None`), will use -1.
slicing: A numpy-style slicing string for individual time steps.
mapping: An mapping string or a list of them. Supported mappings:
`{None, 'image/png', 'health-pill'}`.
Returns:
The potentially sliced values as a nested list of values or its mapped
format. A `list` of nested `list` of values.
Raises:
ValueError: If the shape of the sliced array is incompatible with mapping
mode. Or if the mapping type is invalid.
"""
if watch_key not in self._tensor_data:
raise KeyError("watch_key not found: %s" % watch_key)
if time_indices is None:
time_indices = '-1'
time_slicing = tensor_helper.parse_time_indices(time_indices)
all_time_indices = list(range(self._tensor_data[watch_key].num_total()))
sliced_time_indices = all_time_indices[time_slicing]
if not isinstance(sliced_time_indices, list):
sliced_time_indices = [sliced_time_indices]
recombine_and_map = False
step_mapping = mapping
if len(sliced_time_indices) > 1 and mapping not in (None, ):
recombine_and_map = True
step_mapping = None
output = []
for index in sliced_time_indices:
value = self._tensor_data[watch_key].query(index)[0]
if (value is not None and
not isinstance(value, debug_data.InconvertibleTensorProto)):
output.append(tensor_helper.array_view(
value, slicing=slicing, mapping=step_mapping)[2])
else:
output.append(None)
if recombine_and_map:
if mapping == 'image/png':
output = tensor_helper.array_to_base64_png(output)
elif mapping and mapping != 'none':
logger.warn(
'Unsupported mapping mode after recomining time steps: %s',
mapping)
return output | [
"def",
"query",
"(",
"self",
",",
"watch_key",
",",
"time_indices",
"=",
"None",
",",
"slicing",
"=",
"None",
",",
"mapping",
"=",
"None",
")",
":",
"if",
"watch_key",
"not",
"in",
"self",
".",
"_tensor_data",
":",
"raise",
"KeyError",
"(",
"\"watch_key not found: %s\"",
"%",
"watch_key",
")",
"if",
"time_indices",
"is",
"None",
":",
"time_indices",
"=",
"'-1'",
"time_slicing",
"=",
"tensor_helper",
".",
"parse_time_indices",
"(",
"time_indices",
")",
"all_time_indices",
"=",
"list",
"(",
"range",
"(",
"self",
".",
"_tensor_data",
"[",
"watch_key",
"]",
".",
"num_total",
"(",
")",
")",
")",
"sliced_time_indices",
"=",
"all_time_indices",
"[",
"time_slicing",
"]",
"if",
"not",
"isinstance",
"(",
"sliced_time_indices",
",",
"list",
")",
":",
"sliced_time_indices",
"=",
"[",
"sliced_time_indices",
"]",
"recombine_and_map",
"=",
"False",
"step_mapping",
"=",
"mapping",
"if",
"len",
"(",
"sliced_time_indices",
")",
">",
"1",
"and",
"mapping",
"not",
"in",
"(",
"None",
",",
")",
":",
"recombine_and_map",
"=",
"True",
"step_mapping",
"=",
"None",
"output",
"=",
"[",
"]",
"for",
"index",
"in",
"sliced_time_indices",
":",
"value",
"=",
"self",
".",
"_tensor_data",
"[",
"watch_key",
"]",
".",
"query",
"(",
"index",
")",
"[",
"0",
"]",
"if",
"(",
"value",
"is",
"not",
"None",
"and",
"not",
"isinstance",
"(",
"value",
",",
"debug_data",
".",
"InconvertibleTensorProto",
")",
")",
":",
"output",
".",
"append",
"(",
"tensor_helper",
".",
"array_view",
"(",
"value",
",",
"slicing",
"=",
"slicing",
",",
"mapping",
"=",
"step_mapping",
")",
"[",
"2",
"]",
")",
"else",
":",
"output",
".",
"append",
"(",
"None",
")",
"if",
"recombine_and_map",
":",
"if",
"mapping",
"==",
"'image/png'",
":",
"output",
"=",
"tensor_helper",
".",
"array_to_base64_png",
"(",
"output",
")",
"elif",
"mapping",
"and",
"mapping",
"!=",
"'none'",
":",
"logger",
".",
"warn",
"(",
"'Unsupported mapping mode after recomining time steps: %s'",
",",
"mapping",
")",
"return",
"output"
] | Query tensor store for a given watch_key.
Args:
watch_key: The watch key to query.
time_indices: A numpy-style slicing string for time indices. E.g.,
`-1`, `:-2`, `[::2]`. If not provided (`None`), will use -1.
slicing: A numpy-style slicing string for individual time steps.
mapping: An mapping string or a list of them. Supported mappings:
`{None, 'image/png', 'health-pill'}`.
Returns:
The potentially sliced values as a nested list of values or its mapped
format. A `list` of nested `list` of values.
Raises:
ValueError: If the shape of the sliced array is incompatible with mapping
mode. Or if the mapping type is invalid. | [
"Query",
"tensor",
"store",
"for",
"a",
"given",
"watch_key",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/debugger/tensor_store.py#L200-L257 | train |
tensorflow/tensorboard | tensorboard/plugins/debugger/debugger_plugin.py | DebuggerPlugin.listen | def listen(self, grpc_port):
"""Start listening on the given gRPC port.
This method of an instance of DebuggerPlugin can be invoked at most once.
This method is not thread safe.
Args:
grpc_port: port number to listen at.
Raises:
ValueError: If this instance is already listening at a gRPC port.
"""
if self._grpc_port:
raise ValueError(
"This DebuggerPlugin instance is already listening at gRPC port %d" %
self._grpc_port)
self._grpc_port = grpc_port
sys.stderr.write('Creating DebuggerDataServer at port %d and logdir %s\n' %
(self._grpc_port, self._logdir))
sys.stderr.flush()
self._debugger_data_server = debugger_server_lib.DebuggerDataServer(
self._grpc_port, self._logdir)
threading.Thread(target=self._debugger_data_server.
start_the_debugger_data_receiving_server).start() | python | def listen(self, grpc_port):
"""Start listening on the given gRPC port.
This method of an instance of DebuggerPlugin can be invoked at most once.
This method is not thread safe.
Args:
grpc_port: port number to listen at.
Raises:
ValueError: If this instance is already listening at a gRPC port.
"""
if self._grpc_port:
raise ValueError(
"This DebuggerPlugin instance is already listening at gRPC port %d" %
self._grpc_port)
self._grpc_port = grpc_port
sys.stderr.write('Creating DebuggerDataServer at port %d and logdir %s\n' %
(self._grpc_port, self._logdir))
sys.stderr.flush()
self._debugger_data_server = debugger_server_lib.DebuggerDataServer(
self._grpc_port, self._logdir)
threading.Thread(target=self._debugger_data_server.
start_the_debugger_data_receiving_server).start() | [
"def",
"listen",
"(",
"self",
",",
"grpc_port",
")",
":",
"if",
"self",
".",
"_grpc_port",
":",
"raise",
"ValueError",
"(",
"\"This DebuggerPlugin instance is already listening at gRPC port %d\"",
"%",
"self",
".",
"_grpc_port",
")",
"self",
".",
"_grpc_port",
"=",
"grpc_port",
"sys",
".",
"stderr",
".",
"write",
"(",
"'Creating DebuggerDataServer at port %d and logdir %s\\n'",
"%",
"(",
"self",
".",
"_grpc_port",
",",
"self",
".",
"_logdir",
")",
")",
"sys",
".",
"stderr",
".",
"flush",
"(",
")",
"self",
".",
"_debugger_data_server",
"=",
"debugger_server_lib",
".",
"DebuggerDataServer",
"(",
"self",
".",
"_grpc_port",
",",
"self",
".",
"_logdir",
")",
"threading",
".",
"Thread",
"(",
"target",
"=",
"self",
".",
"_debugger_data_server",
".",
"start_the_debugger_data_receiving_server",
")",
".",
"start",
"(",
")"
] | Start listening on the given gRPC port.
This method of an instance of DebuggerPlugin can be invoked at most once.
This method is not thread safe.
Args:
grpc_port: port number to listen at.
Raises:
ValueError: If this instance is already listening at a gRPC port. | [
"Start",
"listening",
"on",
"the",
"given",
"gRPC",
"port",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/debugger/debugger_plugin.py#L97-L122 | train |
tensorflow/tensorboard | tensorboard/plugins/debugger/debugger_plugin.py | DebuggerPlugin.is_active | def is_active(self):
"""Determines whether this plugin is active.
This plugin is active if any health pills information is present for any
run.
Returns:
A boolean. Whether this plugin is active.
"""
return bool(
self._grpc_port is not None and
self._event_multiplexer and
self._event_multiplexer.PluginRunToTagToContent(
constants.DEBUGGER_PLUGIN_NAME)) | python | def is_active(self):
"""Determines whether this plugin is active.
This plugin is active if any health pills information is present for any
run.
Returns:
A boolean. Whether this plugin is active.
"""
return bool(
self._grpc_port is not None and
self._event_multiplexer and
self._event_multiplexer.PluginRunToTagToContent(
constants.DEBUGGER_PLUGIN_NAME)) | [
"def",
"is_active",
"(",
"self",
")",
":",
"return",
"bool",
"(",
"self",
".",
"_grpc_port",
"is",
"not",
"None",
"and",
"self",
".",
"_event_multiplexer",
"and",
"self",
".",
"_event_multiplexer",
".",
"PluginRunToTagToContent",
"(",
"constants",
".",
"DEBUGGER_PLUGIN_NAME",
")",
")"
] | Determines whether this plugin is active.
This plugin is active if any health pills information is present for any
run.
Returns:
A boolean. Whether this plugin is active. | [
"Determines",
"whether",
"this",
"plugin",
"is",
"active",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/debugger/debugger_plugin.py#L139-L152 | train |
tensorflow/tensorboard | tensorboard/plugins/debugger/debugger_plugin.py | DebuggerPlugin._serve_health_pills_handler | def _serve_health_pills_handler(self, request):
"""A (wrapped) werkzeug handler for serving health pills.
Accepts POST requests and responds with health pills. The request accepts
several POST parameters:
node_names: (required string) A JSON-ified list of node names for which
the client would like to request health pills.
run: (optional string) The run to retrieve health pills for. Defaults to
'.'. This data is sent via POST (not GET) since URL length is limited.
step: (optional integer): The session run step for which to
retrieve health pills. If provided, the handler reads the health pills
of that step from disk (which is slow) and produces a response with
only health pills at that step. If not provided, the handler returns a
response with health pills at all steps sampled by the event
multiplexer (the fast path). The motivation here is that, sometimes,
one desires to examine health pills at a specific step (to say find
the first step that causes a model to blow up with NaNs).
get_plugin_apps must be called before this slower feature is used
because that method passes the logdir (directory path) to this plugin.
This handler responds with a JSON-ified object mapping from node names to a
list (of size 1) of health pill event objects, each of which has these
properties.
{
'wall_time': float,
'step': int,
'node_name': string,
'output_slot': int,
# A list of 12 floats that summarizes the elements of the tensor.
'value': float[],
}
Node names for which there are no health pills to be found are excluded from
the mapping.
Args:
request: The request issued by the client for health pills.
Returns:
A werkzeug BaseResponse object.
"""
if request.method != 'POST':
return wrappers.Response(response=(
'%s requests are forbidden by the debugger plugin.' %
request.method), status=405)
if _NODE_NAMES_POST_KEY not in request.form:
return wrappers.Response(response=(
'The %r POST key was not found in the request for health pills.' %
_NODE_NAMES_POST_KEY), status=400)
jsonified_node_names = request.form[_NODE_NAMES_POST_KEY]
try:
node_names = json.loads(tf.compat.as_text(jsonified_node_names))
except Exception as e: # pylint: disable=broad-except
# Different JSON libs raise different exceptions, so we just do a
# catch-all here. This problem is complicated by how Tensorboard might be
# run in many different environments, as it is open-source.
# TODO(@caisq, @chihuahua): Create platform-dependent adapter to catch
# specific types of exceptions, instead of the broad catching here.
logger.error('Could not decode node name JSON string %r: %s',
jsonified_node_names, e)
return wrappers.Response(status=400)
if not isinstance(node_names, list):
logger.error('%r is not a JSON list of node names:',
jsonified_node_names)
return wrappers.Response(status=400)
run = request.form.get(_RUN_POST_KEY, _DEFAULT_RUN)
step_string = request.form.get(_STEP_POST_KEY, None)
if step_string is None:
# Use all steps sampled by the event multiplexer (Relatively fast).
mapping = self._obtain_sampled_health_pills(run, node_names)
else:
# Read disk to obtain the health pills for that step (Relatively slow).
# Make sure that the directory for the run exists.
# Determine the directory of events file to read.
events_directory = self._logdir
if run != _DEFAULT_RUN:
# Use the directory for the specific run.
events_directory = os.path.join(events_directory, run)
step = int(step_string)
try:
mapping = self._obtain_health_pills_at_step(
events_directory, node_names, step)
except IOError as error:
logger.error(
'Error retrieving health pills for step %d: %s', step, error)
return wrappers.Response(status=404)
# Convert event_accumulator.HealthPillEvents to JSON-able dicts.
jsonable_mapping = {}
for node_name, events in mapping.items():
jsonable_mapping[node_name] = [e._asdict() for e in events]
return http_util.Respond(request, jsonable_mapping, 'application/json') | python | def _serve_health_pills_handler(self, request):
"""A (wrapped) werkzeug handler for serving health pills.
Accepts POST requests and responds with health pills. The request accepts
several POST parameters:
node_names: (required string) A JSON-ified list of node names for which
the client would like to request health pills.
run: (optional string) The run to retrieve health pills for. Defaults to
'.'. This data is sent via POST (not GET) since URL length is limited.
step: (optional integer): The session run step for which to
retrieve health pills. If provided, the handler reads the health pills
of that step from disk (which is slow) and produces a response with
only health pills at that step. If not provided, the handler returns a
response with health pills at all steps sampled by the event
multiplexer (the fast path). The motivation here is that, sometimes,
one desires to examine health pills at a specific step (to say find
the first step that causes a model to blow up with NaNs).
get_plugin_apps must be called before this slower feature is used
because that method passes the logdir (directory path) to this plugin.
This handler responds with a JSON-ified object mapping from node names to a
list (of size 1) of health pill event objects, each of which has these
properties.
{
'wall_time': float,
'step': int,
'node_name': string,
'output_slot': int,
# A list of 12 floats that summarizes the elements of the tensor.
'value': float[],
}
Node names for which there are no health pills to be found are excluded from
the mapping.
Args:
request: The request issued by the client for health pills.
Returns:
A werkzeug BaseResponse object.
"""
if request.method != 'POST':
return wrappers.Response(response=(
'%s requests are forbidden by the debugger plugin.' %
request.method), status=405)
if _NODE_NAMES_POST_KEY not in request.form:
return wrappers.Response(response=(
'The %r POST key was not found in the request for health pills.' %
_NODE_NAMES_POST_KEY), status=400)
jsonified_node_names = request.form[_NODE_NAMES_POST_KEY]
try:
node_names = json.loads(tf.compat.as_text(jsonified_node_names))
except Exception as e: # pylint: disable=broad-except
# Different JSON libs raise different exceptions, so we just do a
# catch-all here. This problem is complicated by how Tensorboard might be
# run in many different environments, as it is open-source.
# TODO(@caisq, @chihuahua): Create platform-dependent adapter to catch
# specific types of exceptions, instead of the broad catching here.
logger.error('Could not decode node name JSON string %r: %s',
jsonified_node_names, e)
return wrappers.Response(status=400)
if not isinstance(node_names, list):
logger.error('%r is not a JSON list of node names:',
jsonified_node_names)
return wrappers.Response(status=400)
run = request.form.get(_RUN_POST_KEY, _DEFAULT_RUN)
step_string = request.form.get(_STEP_POST_KEY, None)
if step_string is None:
# Use all steps sampled by the event multiplexer (Relatively fast).
mapping = self._obtain_sampled_health_pills(run, node_names)
else:
# Read disk to obtain the health pills for that step (Relatively slow).
# Make sure that the directory for the run exists.
# Determine the directory of events file to read.
events_directory = self._logdir
if run != _DEFAULT_RUN:
# Use the directory for the specific run.
events_directory = os.path.join(events_directory, run)
step = int(step_string)
try:
mapping = self._obtain_health_pills_at_step(
events_directory, node_names, step)
except IOError as error:
logger.error(
'Error retrieving health pills for step %d: %s', step, error)
return wrappers.Response(status=404)
# Convert event_accumulator.HealthPillEvents to JSON-able dicts.
jsonable_mapping = {}
for node_name, events in mapping.items():
jsonable_mapping[node_name] = [e._asdict() for e in events]
return http_util.Respond(request, jsonable_mapping, 'application/json') | [
"def",
"_serve_health_pills_handler",
"(",
"self",
",",
"request",
")",
":",
"if",
"request",
".",
"method",
"!=",
"'POST'",
":",
"return",
"wrappers",
".",
"Response",
"(",
"response",
"=",
"(",
"'%s requests are forbidden by the debugger plugin.'",
"%",
"request",
".",
"method",
")",
",",
"status",
"=",
"405",
")",
"if",
"_NODE_NAMES_POST_KEY",
"not",
"in",
"request",
".",
"form",
":",
"return",
"wrappers",
".",
"Response",
"(",
"response",
"=",
"(",
"'The %r POST key was not found in the request for health pills.'",
"%",
"_NODE_NAMES_POST_KEY",
")",
",",
"status",
"=",
"400",
")",
"jsonified_node_names",
"=",
"request",
".",
"form",
"[",
"_NODE_NAMES_POST_KEY",
"]",
"try",
":",
"node_names",
"=",
"json",
".",
"loads",
"(",
"tf",
".",
"compat",
".",
"as_text",
"(",
"jsonified_node_names",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"# pylint: disable=broad-except",
"# Different JSON libs raise different exceptions, so we just do a",
"# catch-all here. This problem is complicated by how Tensorboard might be",
"# run in many different environments, as it is open-source.",
"# TODO(@caisq, @chihuahua): Create platform-dependent adapter to catch",
"# specific types of exceptions, instead of the broad catching here.",
"logger",
".",
"error",
"(",
"'Could not decode node name JSON string %r: %s'",
",",
"jsonified_node_names",
",",
"e",
")",
"return",
"wrappers",
".",
"Response",
"(",
"status",
"=",
"400",
")",
"if",
"not",
"isinstance",
"(",
"node_names",
",",
"list",
")",
":",
"logger",
".",
"error",
"(",
"'%r is not a JSON list of node names:'",
",",
"jsonified_node_names",
")",
"return",
"wrappers",
".",
"Response",
"(",
"status",
"=",
"400",
")",
"run",
"=",
"request",
".",
"form",
".",
"get",
"(",
"_RUN_POST_KEY",
",",
"_DEFAULT_RUN",
")",
"step_string",
"=",
"request",
".",
"form",
".",
"get",
"(",
"_STEP_POST_KEY",
",",
"None",
")",
"if",
"step_string",
"is",
"None",
":",
"# Use all steps sampled by the event multiplexer (Relatively fast).",
"mapping",
"=",
"self",
".",
"_obtain_sampled_health_pills",
"(",
"run",
",",
"node_names",
")",
"else",
":",
"# Read disk to obtain the health pills for that step (Relatively slow).",
"# Make sure that the directory for the run exists.",
"# Determine the directory of events file to read.",
"events_directory",
"=",
"self",
".",
"_logdir",
"if",
"run",
"!=",
"_DEFAULT_RUN",
":",
"# Use the directory for the specific run.",
"events_directory",
"=",
"os",
".",
"path",
".",
"join",
"(",
"events_directory",
",",
"run",
")",
"step",
"=",
"int",
"(",
"step_string",
")",
"try",
":",
"mapping",
"=",
"self",
".",
"_obtain_health_pills_at_step",
"(",
"events_directory",
",",
"node_names",
",",
"step",
")",
"except",
"IOError",
"as",
"error",
":",
"logger",
".",
"error",
"(",
"'Error retrieving health pills for step %d: %s'",
",",
"step",
",",
"error",
")",
"return",
"wrappers",
".",
"Response",
"(",
"status",
"=",
"404",
")",
"# Convert event_accumulator.HealthPillEvents to JSON-able dicts.",
"jsonable_mapping",
"=",
"{",
"}",
"for",
"node_name",
",",
"events",
"in",
"mapping",
".",
"items",
"(",
")",
":",
"jsonable_mapping",
"[",
"node_name",
"]",
"=",
"[",
"e",
".",
"_asdict",
"(",
")",
"for",
"e",
"in",
"events",
"]",
"return",
"http_util",
".",
"Respond",
"(",
"request",
",",
"jsonable_mapping",
",",
"'application/json'",
")"
] | A (wrapped) werkzeug handler for serving health pills.
Accepts POST requests and responds with health pills. The request accepts
several POST parameters:
node_names: (required string) A JSON-ified list of node names for which
the client would like to request health pills.
run: (optional string) The run to retrieve health pills for. Defaults to
'.'. This data is sent via POST (not GET) since URL length is limited.
step: (optional integer): The session run step for which to
retrieve health pills. If provided, the handler reads the health pills
of that step from disk (which is slow) and produces a response with
only health pills at that step. If not provided, the handler returns a
response with health pills at all steps sampled by the event
multiplexer (the fast path). The motivation here is that, sometimes,
one desires to examine health pills at a specific step (to say find
the first step that causes a model to blow up with NaNs).
get_plugin_apps must be called before this slower feature is used
because that method passes the logdir (directory path) to this plugin.
This handler responds with a JSON-ified object mapping from node names to a
list (of size 1) of health pill event objects, each of which has these
properties.
{
'wall_time': float,
'step': int,
'node_name': string,
'output_slot': int,
# A list of 12 floats that summarizes the elements of the tensor.
'value': float[],
}
Node names for which there are no health pills to be found are excluded from
the mapping.
Args:
request: The request issued by the client for health pills.
Returns:
A werkzeug BaseResponse object. | [
"A",
"(",
"wrapped",
")",
"werkzeug",
"handler",
"for",
"serving",
"health",
"pills",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/debugger/debugger_plugin.py#L155-L253 | train |
tensorflow/tensorboard | tensorboard/plugins/debugger/debugger_plugin.py | DebuggerPlugin._obtain_sampled_health_pills | def _obtain_sampled_health_pills(self, run, node_names):
"""Obtains the health pills for a run sampled by the event multiplexer.
This is much faster than the alternative path of reading health pills from
disk.
Args:
run: The run to fetch health pills for.
node_names: A list of node names for which to retrieve health pills.
Returns:
A dictionary mapping from node name to a list of
event_accumulator.HealthPillEvents.
"""
runs_to_tags_to_content = self._event_multiplexer.PluginRunToTagToContent(
constants.DEBUGGER_PLUGIN_NAME)
if run not in runs_to_tags_to_content:
# The run lacks health pills.
return {}
# This is also a mapping between node name and plugin content because this
# plugin tags by node name.
tags_to_content = runs_to_tags_to_content[run]
mapping = {}
for node_name in node_names:
if node_name not in tags_to_content:
# This node lacks health pill data.
continue
health_pills = []
for tensor_event in self._event_multiplexer.Tensors(run, node_name):
json_string = tags_to_content[node_name]
try:
content_object = json.loads(tf.compat.as_text(json_string))
device_name = content_object['device']
output_slot = content_object['outputSlot']
health_pills.append(
self._tensor_proto_to_health_pill(tensor_event, node_name,
device_name, output_slot))
except (KeyError, ValueError) as e:
logger.error('Could not determine device from JSON string '
'%r: %r', json_string, e)
mapping[node_name] = health_pills
return mapping | python | def _obtain_sampled_health_pills(self, run, node_names):
"""Obtains the health pills for a run sampled by the event multiplexer.
This is much faster than the alternative path of reading health pills from
disk.
Args:
run: The run to fetch health pills for.
node_names: A list of node names for which to retrieve health pills.
Returns:
A dictionary mapping from node name to a list of
event_accumulator.HealthPillEvents.
"""
runs_to_tags_to_content = self._event_multiplexer.PluginRunToTagToContent(
constants.DEBUGGER_PLUGIN_NAME)
if run not in runs_to_tags_to_content:
# The run lacks health pills.
return {}
# This is also a mapping between node name and plugin content because this
# plugin tags by node name.
tags_to_content = runs_to_tags_to_content[run]
mapping = {}
for node_name in node_names:
if node_name not in tags_to_content:
# This node lacks health pill data.
continue
health_pills = []
for tensor_event in self._event_multiplexer.Tensors(run, node_name):
json_string = tags_to_content[node_name]
try:
content_object = json.loads(tf.compat.as_text(json_string))
device_name = content_object['device']
output_slot = content_object['outputSlot']
health_pills.append(
self._tensor_proto_to_health_pill(tensor_event, node_name,
device_name, output_slot))
except (KeyError, ValueError) as e:
logger.error('Could not determine device from JSON string '
'%r: %r', json_string, e)
mapping[node_name] = health_pills
return mapping | [
"def",
"_obtain_sampled_health_pills",
"(",
"self",
",",
"run",
",",
"node_names",
")",
":",
"runs_to_tags_to_content",
"=",
"self",
".",
"_event_multiplexer",
".",
"PluginRunToTagToContent",
"(",
"constants",
".",
"DEBUGGER_PLUGIN_NAME",
")",
"if",
"run",
"not",
"in",
"runs_to_tags_to_content",
":",
"# The run lacks health pills.",
"return",
"{",
"}",
"# This is also a mapping between node name and plugin content because this",
"# plugin tags by node name.",
"tags_to_content",
"=",
"runs_to_tags_to_content",
"[",
"run",
"]",
"mapping",
"=",
"{",
"}",
"for",
"node_name",
"in",
"node_names",
":",
"if",
"node_name",
"not",
"in",
"tags_to_content",
":",
"# This node lacks health pill data.",
"continue",
"health_pills",
"=",
"[",
"]",
"for",
"tensor_event",
"in",
"self",
".",
"_event_multiplexer",
".",
"Tensors",
"(",
"run",
",",
"node_name",
")",
":",
"json_string",
"=",
"tags_to_content",
"[",
"node_name",
"]",
"try",
":",
"content_object",
"=",
"json",
".",
"loads",
"(",
"tf",
".",
"compat",
".",
"as_text",
"(",
"json_string",
")",
")",
"device_name",
"=",
"content_object",
"[",
"'device'",
"]",
"output_slot",
"=",
"content_object",
"[",
"'outputSlot'",
"]",
"health_pills",
".",
"append",
"(",
"self",
".",
"_tensor_proto_to_health_pill",
"(",
"tensor_event",
",",
"node_name",
",",
"device_name",
",",
"output_slot",
")",
")",
"except",
"(",
"KeyError",
",",
"ValueError",
")",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"'Could not determine device from JSON string '",
"'%r: %r'",
",",
"json_string",
",",
"e",
")",
"mapping",
"[",
"node_name",
"]",
"=",
"health_pills",
"return",
"mapping"
] | Obtains the health pills for a run sampled by the event multiplexer.
This is much faster than the alternative path of reading health pills from
disk.
Args:
run: The run to fetch health pills for.
node_names: A list of node names for which to retrieve health pills.
Returns:
A dictionary mapping from node name to a list of
event_accumulator.HealthPillEvents. | [
"Obtains",
"the",
"health",
"pills",
"for",
"a",
"run",
"sampled",
"by",
"the",
"event",
"multiplexer",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/debugger/debugger_plugin.py#L255-L302 | train |
tensorflow/tensorboard | tensorboard/plugins/debugger/debugger_plugin.py | DebuggerPlugin._tensor_proto_to_health_pill | def _tensor_proto_to_health_pill(self, tensor_event, node_name, device,
output_slot):
"""Converts an event_accumulator.TensorEvent to a HealthPillEvent.
Args:
tensor_event: The event_accumulator.TensorEvent to convert.
node_name: The name of the node (without the output slot).
device: The device.
output_slot: The integer output slot this health pill is relevant to.
Returns:
A HealthPillEvent.
"""
return self._process_health_pill_value(
wall_time=tensor_event.wall_time,
step=tensor_event.step,
device_name=device,
output_slot=output_slot,
node_name=node_name,
tensor_proto=tensor_event.tensor_proto) | python | def _tensor_proto_to_health_pill(self, tensor_event, node_name, device,
output_slot):
"""Converts an event_accumulator.TensorEvent to a HealthPillEvent.
Args:
tensor_event: The event_accumulator.TensorEvent to convert.
node_name: The name of the node (without the output slot).
device: The device.
output_slot: The integer output slot this health pill is relevant to.
Returns:
A HealthPillEvent.
"""
return self._process_health_pill_value(
wall_time=tensor_event.wall_time,
step=tensor_event.step,
device_name=device,
output_slot=output_slot,
node_name=node_name,
tensor_proto=tensor_event.tensor_proto) | [
"def",
"_tensor_proto_to_health_pill",
"(",
"self",
",",
"tensor_event",
",",
"node_name",
",",
"device",
",",
"output_slot",
")",
":",
"return",
"self",
".",
"_process_health_pill_value",
"(",
"wall_time",
"=",
"tensor_event",
".",
"wall_time",
",",
"step",
"=",
"tensor_event",
".",
"step",
",",
"device_name",
"=",
"device",
",",
"output_slot",
"=",
"output_slot",
",",
"node_name",
"=",
"node_name",
",",
"tensor_proto",
"=",
"tensor_event",
".",
"tensor_proto",
")"
] | Converts an event_accumulator.TensorEvent to a HealthPillEvent.
Args:
tensor_event: The event_accumulator.TensorEvent to convert.
node_name: The name of the node (without the output slot).
device: The device.
output_slot: The integer output slot this health pill is relevant to.
Returns:
A HealthPillEvent. | [
"Converts",
"an",
"event_accumulator",
".",
"TensorEvent",
"to",
"a",
"HealthPillEvent",
"."
] | 8e5f497b48e40f2a774f85416b8a35ac0693c35e | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/debugger/debugger_plugin.py#L304-L323 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.