repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
wandb/client | wandb/vendor/prompt_toolkit/key_binding/registry.py | Registry.remove_binding | def remove_binding(self, function):
"""
Remove a key binding.
This expects a function that was given to `add_binding` method as
parameter. Raises `ValueError` when the given function was not
registered before.
"""
assert callable(function)
for b in self.key_bindings:
if b.handler == function:
self.key_bindings.remove(b)
self._clear_cache()
return
# No key binding found for this function. Raise ValueError.
raise ValueError('Binding not found: %r' % (function, )) | python | def remove_binding(self, function):
"""
Remove a key binding.
This expects a function that was given to `add_binding` method as
parameter. Raises `ValueError` when the given function was not
registered before.
"""
assert callable(function)
for b in self.key_bindings:
if b.handler == function:
self.key_bindings.remove(b)
self._clear_cache()
return
# No key binding found for this function. Raise ValueError.
raise ValueError('Binding not found: %r' % (function, )) | [
"def",
"remove_binding",
"(",
"self",
",",
"function",
")",
":",
"assert",
"callable",
"(",
"function",
")",
"for",
"b",
"in",
"self",
".",
"key_bindings",
":",
"if",
"b",
".",
"handler",
"==",
"function",
":",
"self",
".",
"key_bindings",
".",
"remove",... | Remove a key binding.
This expects a function that was given to `add_binding` method as
parameter. Raises `ValueError` when the given function was not
registered before. | [
"Remove",
"a",
"key",
"binding",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/key_binding/registry.py#L143-L160 | train | 207,300 |
wandb/client | wandb/vendor/prompt_toolkit/key_binding/registry.py | ConditionalRegistry._update_cache | def _update_cache(self):
" If the original registry was changed. Update our copy version. "
expected_version = (self.registry._version, self._extra_registry._version)
if self._last_version != expected_version:
registry2 = Registry()
# Copy all bindings from `self.registry`, adding our condition.
for reg in (self.registry, self._extra_registry):
for b in reg.key_bindings:
registry2.key_bindings.append(
_Binding(
keys=b.keys,
handler=b.handler,
filter=self.filter & b.filter,
eager=b.eager,
save_before=b.save_before))
self._registry2 = registry2
self._last_version = expected_version | python | def _update_cache(self):
" If the original registry was changed. Update our copy version. "
expected_version = (self.registry._version, self._extra_registry._version)
if self._last_version != expected_version:
registry2 = Registry()
# Copy all bindings from `self.registry`, adding our condition.
for reg in (self.registry, self._extra_registry):
for b in reg.key_bindings:
registry2.key_bindings.append(
_Binding(
keys=b.keys,
handler=b.handler,
filter=self.filter & b.filter,
eager=b.eager,
save_before=b.save_before))
self._registry2 = registry2
self._last_version = expected_version | [
"def",
"_update_cache",
"(",
"self",
")",
":",
"expected_version",
"=",
"(",
"self",
".",
"registry",
".",
"_version",
",",
"self",
".",
"_extra_registry",
".",
"_version",
")",
"if",
"self",
".",
"_last_version",
"!=",
"expected_version",
":",
"registry2",
... | If the original registry was changed. Update our copy version. | [
"If",
"the",
"original",
"registry",
"was",
"changed",
".",
"Update",
"our",
"copy",
"version",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/key_binding/registry.py#L293-L312 | train | 207,301 |
wandb/client | wandb/vendor/prompt_toolkit/key_binding/registry.py | MergedRegistry._update_cache | def _update_cache(self):
"""
If one of the original registries was changed. Update our merged
version.
"""
expected_version = (
tuple(r._version for r in self.registries) +
(self._extra_registry._version, ))
if self._last_version != expected_version:
registry2 = Registry()
for reg in self.registries:
registry2.key_bindings.extend(reg.key_bindings)
# Copy all bindings from `self._extra_registry`.
registry2.key_bindings.extend(self._extra_registry.key_bindings)
self._registry2 = registry2
self._last_version = expected_version | python | def _update_cache(self):
"""
If one of the original registries was changed. Update our merged
version.
"""
expected_version = (
tuple(r._version for r in self.registries) +
(self._extra_registry._version, ))
if self._last_version != expected_version:
registry2 = Registry()
for reg in self.registries:
registry2.key_bindings.extend(reg.key_bindings)
# Copy all bindings from `self._extra_registry`.
registry2.key_bindings.extend(self._extra_registry.key_bindings)
self._registry2 = registry2
self._last_version = expected_version | [
"def",
"_update_cache",
"(",
"self",
")",
":",
"expected_version",
"=",
"(",
"tuple",
"(",
"r",
".",
"_version",
"for",
"r",
"in",
"self",
".",
"registries",
")",
"+",
"(",
"self",
".",
"_extra_registry",
".",
"_version",
",",
")",
")",
"if",
"self",
... | If one of the original registries was changed. Update our merged
version. | [
"If",
"one",
"of",
"the",
"original",
"registries",
"was",
"changed",
".",
"Update",
"our",
"merged",
"version",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/key_binding/registry.py#L331-L350 | train | 207,302 |
wandb/client | wandb/data_types.py | nest | def nest(thing):
"""Use tensorflows nest function if available, otherwise just wrap object in an array"""
tfutil = util.get_module('tensorflow.python.util')
if tfutil:
return tfutil.nest.flatten(thing)
else:
return [thing] | python | def nest(thing):
"""Use tensorflows nest function if available, otherwise just wrap object in an array"""
tfutil = util.get_module('tensorflow.python.util')
if tfutil:
return tfutil.nest.flatten(thing)
else:
return [thing] | [
"def",
"nest",
"(",
"thing",
")",
":",
"tfutil",
"=",
"util",
".",
"get_module",
"(",
"'tensorflow.python.util'",
")",
"if",
"tfutil",
":",
"return",
"tfutil",
".",
"nest",
".",
"flatten",
"(",
"thing",
")",
"else",
":",
"return",
"[",
"thing",
"]"
] | Use tensorflows nest function if available, otherwise just wrap object in an array | [
"Use",
"tensorflows",
"nest",
"function",
"if",
"available",
"otherwise",
"just",
"wrap",
"object",
"in",
"an",
"array"
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/data_types.py#L17-L23 | train | 207,303 |
wandb/client | wandb/data_types.py | val_to_json | def val_to_json(key, val, mode="summary", step=None):
"""Converts a wandb datatype to its JSON representation"""
converted = val
typename = util.get_full_typename(val)
if util.is_matplotlib_typename(typename):
# This handles plots with images in it because plotly doesn't support it
# TODO: should we handle a list of plots?
val = util.ensure_matplotlib_figure(val)
if any(len(ax.images) > 0 for ax in val.axes):
PILImage = util.get_module(
"PIL.Image", required="Logging plots with images requires pil: pip install pillow")
buf = six.BytesIO()
val.savefig(buf)
val = Image(PILImage.open(buf))
else:
converted = util.convert_plots(val)
elif util.is_plotly_typename(typename):
converted = util.convert_plots(val)
if isinstance(val, IterableMedia):
val = [val]
if isinstance(val, collections.Sequence) and len(val) > 0:
is_media = [isinstance(v, IterableMedia) for v in val]
if all(is_media):
cwd = wandb.run.dir if wandb.run else "."
if step is None:
step = "summary"
if isinstance(val[0], Image):
converted = Image.transform(val, cwd,
"{}_{}.jpg".format(key, step))
elif isinstance(val[0], Audio):
converted = Audio.transform(val, cwd, key, step)
elif isinstance(val[0], Html):
converted = Html.transform(val, cwd, key, step)
elif isinstance(val[0], Object3D):
converted = Object3D.transform(val, cwd, key, step)
elif any(is_media):
raise ValueError(
"Mixed media types in the same list aren't supported")
elif isinstance(val, Histogram):
converted = Histogram.transform(val)
elif isinstance(val, Graph):
if mode == "history":
raise ValueError("Graphs are only supported in summary")
converted = Graph.transform(val)
elif isinstance(val, Table):
converted = Table.transform(val)
return converted | python | def val_to_json(key, val, mode="summary", step=None):
"""Converts a wandb datatype to its JSON representation"""
converted = val
typename = util.get_full_typename(val)
if util.is_matplotlib_typename(typename):
# This handles plots with images in it because plotly doesn't support it
# TODO: should we handle a list of plots?
val = util.ensure_matplotlib_figure(val)
if any(len(ax.images) > 0 for ax in val.axes):
PILImage = util.get_module(
"PIL.Image", required="Logging plots with images requires pil: pip install pillow")
buf = six.BytesIO()
val.savefig(buf)
val = Image(PILImage.open(buf))
else:
converted = util.convert_plots(val)
elif util.is_plotly_typename(typename):
converted = util.convert_plots(val)
if isinstance(val, IterableMedia):
val = [val]
if isinstance(val, collections.Sequence) and len(val) > 0:
is_media = [isinstance(v, IterableMedia) for v in val]
if all(is_media):
cwd = wandb.run.dir if wandb.run else "."
if step is None:
step = "summary"
if isinstance(val[0], Image):
converted = Image.transform(val, cwd,
"{}_{}.jpg".format(key, step))
elif isinstance(val[0], Audio):
converted = Audio.transform(val, cwd, key, step)
elif isinstance(val[0], Html):
converted = Html.transform(val, cwd, key, step)
elif isinstance(val[0], Object3D):
converted = Object3D.transform(val, cwd, key, step)
elif any(is_media):
raise ValueError(
"Mixed media types in the same list aren't supported")
elif isinstance(val, Histogram):
converted = Histogram.transform(val)
elif isinstance(val, Graph):
if mode == "history":
raise ValueError("Graphs are only supported in summary")
converted = Graph.transform(val)
elif isinstance(val, Table):
converted = Table.transform(val)
return converted | [
"def",
"val_to_json",
"(",
"key",
",",
"val",
",",
"mode",
"=",
"\"summary\"",
",",
"step",
"=",
"None",
")",
":",
"converted",
"=",
"val",
"typename",
"=",
"util",
".",
"get_full_typename",
"(",
"val",
")",
"if",
"util",
".",
"is_matplotlib_typename",
"... | Converts a wandb datatype to its JSON representation | [
"Converts",
"a",
"wandb",
"datatype",
"to",
"its",
"JSON",
"representation"
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/data_types.py#L26-L73 | train | 207,304 |
wandb/client | wandb/data_types.py | to_json | def to_json(payload, mode="history"):
"""Converts all keys in a potentially nested array into their JSON representation"""
for key, val in six.iteritems(payload):
if isinstance(val, dict):
payload[key] = to_json(val, mode)
else:
payload[key] = val_to_json(
key, val, mode, step=payload.get("_step"))
return payload | python | def to_json(payload, mode="history"):
"""Converts all keys in a potentially nested array into their JSON representation"""
for key, val in six.iteritems(payload):
if isinstance(val, dict):
payload[key] = to_json(val, mode)
else:
payload[key] = val_to_json(
key, val, mode, step=payload.get("_step"))
return payload | [
"def",
"to_json",
"(",
"payload",
",",
"mode",
"=",
"\"history\"",
")",
":",
"for",
"key",
",",
"val",
"in",
"six",
".",
"iteritems",
"(",
"payload",
")",
":",
"if",
"isinstance",
"(",
"val",
",",
"dict",
")",
":",
"payload",
"[",
"key",
"]",
"=",
... | Converts all keys in a potentially nested array into their JSON representation | [
"Converts",
"all",
"keys",
"in",
"a",
"potentially",
"nested",
"array",
"into",
"their",
"JSON",
"representation"
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/data_types.py#L76-L84 | train | 207,305 |
wandb/client | wandb/data_types.py | Image.guess_mode | def guess_mode(self, data):
"""
Guess what type of image the np.array is representing
"""
# TODO: do we want to support dimensions being at the beginning of the array?
if data.ndim == 2:
return "L"
elif data.shape[-1] == 3:
return "RGB"
elif data.shape[-1] == 4:
return "RGBA"
else:
raise ValueError(
"Un-supported shape for image conversion %s" % list(data.shape)) | python | def guess_mode(self, data):
"""
Guess what type of image the np.array is representing
"""
# TODO: do we want to support dimensions being at the beginning of the array?
if data.ndim == 2:
return "L"
elif data.shape[-1] == 3:
return "RGB"
elif data.shape[-1] == 4:
return "RGBA"
else:
raise ValueError(
"Un-supported shape for image conversion %s" % list(data.shape)) | [
"def",
"guess_mode",
"(",
"self",
",",
"data",
")",
":",
"# TODO: do we want to support dimensions being at the beginning of the array?",
"if",
"data",
".",
"ndim",
"==",
"2",
":",
"return",
"\"L\"",
"elif",
"data",
".",
"shape",
"[",
"-",
"1",
"]",
"==",
"3",
... | Guess what type of image the np.array is representing | [
"Guess",
"what",
"type",
"of",
"image",
"the",
"np",
".",
"array",
"is",
"representing"
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/data_types.py#L712-L725 | train | 207,306 |
wandb/client | wandb/data_types.py | Image.transform | def transform(images, out_dir, fname):
"""
Combines a list of images into a single sprite returning meta information
"""
from PIL import Image as PILImage
base = os.path.join(out_dir, "media", "images")
width, height = images[0].image.size
num_images_to_log = len(images)
if num_images_to_log > Image.MAX_IMAGES:
logging.warn(
"The maximum number of images to store per step is %i." % Image.MAX_IMAGES)
num_images_to_log = Image.MAX_IMAGES
if width * num_images_to_log > Image.MAX_DIMENSION:
max_images_by_dimension = Image.MAX_DIMENSION // width
logging.warn("The maximum total width for all images in a collection is 65500, or {} images, each with a width of {} pixels. Only logging the first {} images.".format(max_images_by_dimension, width, max_images_by_dimension))
num_images_to_log = max_images_by_dimension
total_width = width * num_images_to_log
sprite = PILImage.new(
mode='RGB',
size=(total_width, height),
color=(0, 0, 0))
for i, image in enumerate(images[:num_images_to_log]):
location = width * i
sprite.paste(image.image, (location, 0))
util.mkdir_exists_ok(base)
sprite.save(os.path.join(base, fname), transparency=0)
meta = {"width": width, "height": height,
"count": num_images_to_log, "_type": "images"}
# TODO: hacky way to enable image grouping for now
grouping = images[0].grouping
if grouping:
meta["grouping"] = grouping
captions = Image.captions(images[:num_images_to_log])
if captions:
meta["captions"] = captions
return meta | python | def transform(images, out_dir, fname):
"""
Combines a list of images into a single sprite returning meta information
"""
from PIL import Image as PILImage
base = os.path.join(out_dir, "media", "images")
width, height = images[0].image.size
num_images_to_log = len(images)
if num_images_to_log > Image.MAX_IMAGES:
logging.warn(
"The maximum number of images to store per step is %i." % Image.MAX_IMAGES)
num_images_to_log = Image.MAX_IMAGES
if width * num_images_to_log > Image.MAX_DIMENSION:
max_images_by_dimension = Image.MAX_DIMENSION // width
logging.warn("The maximum total width for all images in a collection is 65500, or {} images, each with a width of {} pixels. Only logging the first {} images.".format(max_images_by_dimension, width, max_images_by_dimension))
num_images_to_log = max_images_by_dimension
total_width = width * num_images_to_log
sprite = PILImage.new(
mode='RGB',
size=(total_width, height),
color=(0, 0, 0))
for i, image in enumerate(images[:num_images_to_log]):
location = width * i
sprite.paste(image.image, (location, 0))
util.mkdir_exists_ok(base)
sprite.save(os.path.join(base, fname), transparency=0)
meta = {"width": width, "height": height,
"count": num_images_to_log, "_type": "images"}
# TODO: hacky way to enable image grouping for now
grouping = images[0].grouping
if grouping:
meta["grouping"] = grouping
captions = Image.captions(images[:num_images_to_log])
if captions:
meta["captions"] = captions
return meta | [
"def",
"transform",
"(",
"images",
",",
"out_dir",
",",
"fname",
")",
":",
"from",
"PIL",
"import",
"Image",
"as",
"PILImage",
"base",
"=",
"os",
".",
"path",
".",
"join",
"(",
"out_dir",
",",
"\"media\"",
",",
"\"images\"",
")",
"width",
",",
"height"... | Combines a list of images into a single sprite returning meta information | [
"Combines",
"a",
"list",
"of",
"images",
"into",
"a",
"single",
"sprite",
"returning",
"meta",
"information"
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/data_types.py#L752-L790 | train | 207,307 |
wandb/client | wandb/vendor/prompt_toolkit/contrib/telnet/server.py | TelnetConnection._handle_command | def _handle_command(self, command):
"""
Handle command. This will run in a separate thread, in order not
to block the event loop.
"""
logger.info('Handle command %r', command)
def in_executor():
self.handling_command = True
try:
if self.callback is not None:
self.callback(self, command)
finally:
self.server.call_from_executor(done)
def done():
self.handling_command = False
# Reset state and draw again. (If the connection is still open --
# the application could have called TelnetConnection.close()
if not self.closed:
self.cli.reset()
self.cli.buffers[DEFAULT_BUFFER].reset()
self.cli.renderer.request_absolute_cursor_position()
self.vt100_output.flush()
self.cli._redraw()
self.server.run_in_executor(in_executor) | python | def _handle_command(self, command):
"""
Handle command. This will run in a separate thread, in order not
to block the event loop.
"""
logger.info('Handle command %r', command)
def in_executor():
self.handling_command = True
try:
if self.callback is not None:
self.callback(self, command)
finally:
self.server.call_from_executor(done)
def done():
self.handling_command = False
# Reset state and draw again. (If the connection is still open --
# the application could have called TelnetConnection.close()
if not self.closed:
self.cli.reset()
self.cli.buffers[DEFAULT_BUFFER].reset()
self.cli.renderer.request_absolute_cursor_position()
self.vt100_output.flush()
self.cli._redraw()
self.server.run_in_executor(in_executor) | [
"def",
"_handle_command",
"(",
"self",
",",
"command",
")",
":",
"logger",
".",
"info",
"(",
"'Handle command %r'",
",",
"command",
")",
"def",
"in_executor",
"(",
")",
":",
"self",
".",
"handling_command",
"=",
"True",
"try",
":",
"if",
"self",
".",
"ca... | Handle command. This will run in a separate thread, in order not
to block the event loop. | [
"Handle",
"command",
".",
"This",
"will",
"run",
"in",
"a",
"separate",
"thread",
"in",
"order",
"not",
"to",
"block",
"the",
"event",
"loop",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/contrib/telnet/server.py#L211-L238 | train | 207,308 |
wandb/client | wandb/vendor/prompt_toolkit/contrib/telnet/server.py | TelnetConnection.erase_screen | def erase_screen(self):
"""
Erase output screen.
"""
self.vt100_output.erase_screen()
self.vt100_output.cursor_goto(0, 0)
self.vt100_output.flush() | python | def erase_screen(self):
"""
Erase output screen.
"""
self.vt100_output.erase_screen()
self.vt100_output.cursor_goto(0, 0)
self.vt100_output.flush() | [
"def",
"erase_screen",
"(",
"self",
")",
":",
"self",
".",
"vt100_output",
".",
"erase_screen",
"(",
")",
"self",
".",
"vt100_output",
".",
"cursor_goto",
"(",
"0",
",",
"0",
")",
"self",
".",
"vt100_output",
".",
"flush",
"(",
")"
] | Erase output screen. | [
"Erase",
"output",
"screen",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/contrib/telnet/server.py#L240-L246 | train | 207,309 |
wandb/client | wandb/vendor/prompt_toolkit/contrib/telnet/server.py | TelnetConnection.send | def send(self, data):
"""
Send text to the client.
"""
assert isinstance(data, text_type)
# When data is send back to the client, we should replace the line
# endings. (We didn't allocate a real pseudo terminal, and the telnet
# connection is raw, so we are responsible for inserting \r.)
self.stdout.write(data.replace('\n', '\r\n'))
self.stdout.flush() | python | def send(self, data):
"""
Send text to the client.
"""
assert isinstance(data, text_type)
# When data is send back to the client, we should replace the line
# endings. (We didn't allocate a real pseudo terminal, and the telnet
# connection is raw, so we are responsible for inserting \r.)
self.stdout.write(data.replace('\n', '\r\n'))
self.stdout.flush() | [
"def",
"send",
"(",
"self",
",",
"data",
")",
":",
"assert",
"isinstance",
"(",
"data",
",",
"text_type",
")",
"# When data is send back to the client, we should replace the line",
"# endings. (We didn't allocate a real pseudo terminal, and the telnet",
"# connection is raw, so we ... | Send text to the client. | [
"Send",
"text",
"to",
"the",
"client",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/contrib/telnet/server.py#L248-L258 | train | 207,310 |
wandb/client | wandb/vendor/prompt_toolkit/contrib/telnet/server.py | TelnetServer._process_callbacks | def _process_callbacks(self):
"""
Process callbacks from `call_from_executor` in eventloop.
"""
# Flush all the pipe content.
os.read(self._schedule_pipe[0], 1024)
# Process calls from executor.
calls_from_executor, self._calls_from_executor = self._calls_from_executor, []
for c in calls_from_executor:
c() | python | def _process_callbacks(self):
"""
Process callbacks from `call_from_executor` in eventloop.
"""
# Flush all the pipe content.
os.read(self._schedule_pipe[0], 1024)
# Process calls from executor.
calls_from_executor, self._calls_from_executor = self._calls_from_executor, []
for c in calls_from_executor:
c() | [
"def",
"_process_callbacks",
"(",
"self",
")",
":",
"# Flush all the pipe content.",
"os",
".",
"read",
"(",
"self",
".",
"_schedule_pipe",
"[",
"0",
"]",
",",
"1024",
")",
"# Process calls from executor.",
"calls_from_executor",
",",
"self",
".",
"_calls_from_execu... | Process callbacks from `call_from_executor` in eventloop. | [
"Process",
"callbacks",
"from",
"call_from_executor",
"in",
"eventloop",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/contrib/telnet/server.py#L338-L348 | train | 207,311 |
wandb/client | wandb/vendor/prompt_toolkit/contrib/telnet/server.py | TelnetServer.run | def run(self):
"""
Run the eventloop for the telnet server.
"""
listen_socket = self.create_socket(self.host, self.port)
logger.info('Listening for telnet connections on %s port %r', self.host, self.port)
try:
while True:
# Removed closed connections.
self.connections = set([c for c in self.connections if not c.closed])
# Ignore connections handling commands.
connections = set([c for c in self.connections if not c.handling_command])
# Wait for next event.
read_list = (
[listen_socket, self._schedule_pipe[0]] +
[c.conn for c in connections])
read, _, _ = select.select(read_list, [], [])
for s in read:
# When the socket itself is ready, accept a new connection.
if s == listen_socket:
self._accept(listen_socket)
# If we receive something on our "call_from_executor" pipe, process
# these callbacks in a thread safe way.
elif s == self._schedule_pipe[0]:
self._process_callbacks()
# Handle incoming data on socket.
else:
self._handle_incoming_data(s)
finally:
listen_socket.close() | python | def run(self):
"""
Run the eventloop for the telnet server.
"""
listen_socket = self.create_socket(self.host, self.port)
logger.info('Listening for telnet connections on %s port %r', self.host, self.port)
try:
while True:
# Removed closed connections.
self.connections = set([c for c in self.connections if not c.closed])
# Ignore connections handling commands.
connections = set([c for c in self.connections if not c.handling_command])
# Wait for next event.
read_list = (
[listen_socket, self._schedule_pipe[0]] +
[c.conn for c in connections])
read, _, _ = select.select(read_list, [], [])
for s in read:
# When the socket itself is ready, accept a new connection.
if s == listen_socket:
self._accept(listen_socket)
# If we receive something on our "call_from_executor" pipe, process
# these callbacks in a thread safe way.
elif s == self._schedule_pipe[0]:
self._process_callbacks()
# Handle incoming data on socket.
else:
self._handle_incoming_data(s)
finally:
listen_socket.close() | [
"def",
"run",
"(",
"self",
")",
":",
"listen_socket",
"=",
"self",
".",
"create_socket",
"(",
"self",
".",
"host",
",",
"self",
".",
"port",
")",
"logger",
".",
"info",
"(",
"'Listening for telnet connections on %s port %r'",
",",
"self",
".",
"host",
",",
... | Run the eventloop for the telnet server. | [
"Run",
"the",
"eventloop",
"for",
"the",
"telnet",
"server",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/contrib/telnet/server.py#L350-L386 | train | 207,312 |
wandb/client | wandb/vendor/prompt_toolkit/contrib/telnet/server.py | TelnetServer._accept | def _accept(self, listen_socket):
"""
Accept new incoming connection.
"""
conn, addr = listen_socket.accept()
connection = TelnetConnection(conn, addr, self.application, self, encoding=self.encoding)
self.connections.add(connection)
logger.info('New connection %r %r', *addr) | python | def _accept(self, listen_socket):
"""
Accept new incoming connection.
"""
conn, addr = listen_socket.accept()
connection = TelnetConnection(conn, addr, self.application, self, encoding=self.encoding)
self.connections.add(connection)
logger.info('New connection %r %r', *addr) | [
"def",
"_accept",
"(",
"self",
",",
"listen_socket",
")",
":",
"conn",
",",
"addr",
"=",
"listen_socket",
".",
"accept",
"(",
")",
"connection",
"=",
"TelnetConnection",
"(",
"conn",
",",
"addr",
",",
"self",
".",
"application",
",",
"self",
",",
"encodi... | Accept new incoming connection. | [
"Accept",
"new",
"incoming",
"connection",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/contrib/telnet/server.py#L388-L396 | train | 207,313 |
wandb/client | wandb/vendor/prompt_toolkit/contrib/telnet/server.py | TelnetServer._handle_incoming_data | def _handle_incoming_data(self, conn):
"""
Handle incoming data on socket.
"""
connection = [c for c in self.connections if c.conn == conn][0]
data = conn.recv(1024)
if data:
connection.feed(data)
else:
self.connections.remove(connection) | python | def _handle_incoming_data(self, conn):
"""
Handle incoming data on socket.
"""
connection = [c for c in self.connections if c.conn == conn][0]
data = conn.recv(1024)
if data:
connection.feed(data)
else:
self.connections.remove(connection) | [
"def",
"_handle_incoming_data",
"(",
"self",
",",
"conn",
")",
":",
"connection",
"=",
"[",
"c",
"for",
"c",
"in",
"self",
".",
"connections",
"if",
"c",
".",
"conn",
"==",
"conn",
"]",
"[",
"0",
"]",
"data",
"=",
"conn",
".",
"recv",
"(",
"1024",
... | Handle incoming data on socket. | [
"Handle",
"incoming",
"data",
"on",
"socket",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/contrib/telnet/server.py#L398-L407 | train | 207,314 |
wandb/client | wandb/apis/internal.py | Api.execute | def execute(self, *args, **kwargs):
"""Wrapper around execute that logs in cases of failure."""
try:
return self.client.execute(*args, **kwargs)
except requests.exceptions.HTTPError as err:
res = err.response
logger.error("%s response executing GraphQL." % res.status_code)
logger.error(res.text)
self.display_gorilla_error_if_found(res)
six.reraise(*sys.exc_info()) | python | def execute(self, *args, **kwargs):
"""Wrapper around execute that logs in cases of failure."""
try:
return self.client.execute(*args, **kwargs)
except requests.exceptions.HTTPError as err:
res = err.response
logger.error("%s response executing GraphQL." % res.status_code)
logger.error(res.text)
self.display_gorilla_error_if_found(res)
six.reraise(*sys.exc_info()) | [
"def",
"execute",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"self",
".",
"client",
".",
"execute",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"requests",
".",
"exceptions",
".",
"HTTPErro... | Wrapper around execute that logs in cases of failure. | [
"Wrapper",
"around",
"execute",
"that",
"logs",
"in",
"cases",
"of",
"failure",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/apis/internal.py#L106-L115 | train | 207,315 |
wandb/client | wandb/apis/internal.py | Api.save_pip | def save_pip(self, out_dir):
"""Saves the current working set of pip packages to requirements.txt"""
try:
import pkg_resources
installed_packages = [d for d in iter(pkg_resources.working_set)]
installed_packages_list = sorted(
["%s==%s" % (i.key, i.version) for i in installed_packages]
)
with open(os.path.join(out_dir, 'requirements.txt'), 'w') as f:
f.write("\n".join(installed_packages_list))
except Exception as e:
logger.error("Error saving pip packages") | python | def save_pip(self, out_dir):
"""Saves the current working set of pip packages to requirements.txt"""
try:
import pkg_resources
installed_packages = [d for d in iter(pkg_resources.working_set)]
installed_packages_list = sorted(
["%s==%s" % (i.key, i.version) for i in installed_packages]
)
with open(os.path.join(out_dir, 'requirements.txt'), 'w') as f:
f.write("\n".join(installed_packages_list))
except Exception as e:
logger.error("Error saving pip packages") | [
"def",
"save_pip",
"(",
"self",
",",
"out_dir",
")",
":",
"try",
":",
"import",
"pkg_resources",
"installed_packages",
"=",
"[",
"d",
"for",
"d",
"in",
"iter",
"(",
"pkg_resources",
".",
"working_set",
")",
"]",
"installed_packages_list",
"=",
"sorted",
"(",... | Saves the current working set of pip packages to requirements.txt | [
"Saves",
"the",
"current",
"working",
"set",
"of",
"pip",
"packages",
"to",
"requirements",
".",
"txt"
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/apis/internal.py#L136-L148 | train | 207,316 |
wandb/client | wandb/apis/internal.py | Api.save_patches | def save_patches(self, out_dir):
"""Save the current state of this repository to one or more patches.
Makes one patch against HEAD and another one against the most recent
commit that occurs in an upstream branch. This way we can be robust
to history editing as long as the user never does "push -f" to break
history on an upstream branch.
Writes the first patch to <out_dir>/diff.patch and the second to
<out_dir>/upstream_diff_<commit_id>.patch.
Args:
out_dir (str): Directory to write the patch files.
"""
if not self.git.enabled:
return False
try:
root = self.git.root
if self.git.dirty:
patch_path = os.path.join(out_dir, 'diff.patch')
if self.git.has_submodule_diff:
with open(patch_path, 'wb') as patch:
# we diff against HEAD to ensure we get changes in the index
subprocess.check_call(
['git', 'diff', '--submodule=diff', 'HEAD'], stdout=patch, cwd=root, timeout=5)
else:
with open(patch_path, 'wb') as patch:
subprocess.check_call(
['git', 'diff', 'HEAD'], stdout=patch, cwd=root, timeout=5)
upstream_commit = self.git.get_upstream_fork_point()
if upstream_commit and upstream_commit != self.git.repo.head.commit:
sha = upstream_commit.hexsha
upstream_patch_path = os.path.join(
out_dir, 'upstream_diff_{}.patch'.format(sha))
if self.git.has_submodule_diff:
with open(upstream_patch_path, 'wb') as upstream_patch:
subprocess.check_call(
['git', 'diff', '--submodule=diff', sha], stdout=upstream_patch, cwd=root, timeout=5)
else:
with open(upstream_patch_path, 'wb') as upstream_patch:
subprocess.check_call(
['git', 'diff', sha], stdout=upstream_patch, cwd=root, timeout=5)
except (subprocess.CalledProcessError, subprocess.TimeoutExpired):
logger.error('Error generating diff') | python | def save_patches(self, out_dir):
"""Save the current state of this repository to one or more patches.
Makes one patch against HEAD and another one against the most recent
commit that occurs in an upstream branch. This way we can be robust
to history editing as long as the user never does "push -f" to break
history on an upstream branch.
Writes the first patch to <out_dir>/diff.patch and the second to
<out_dir>/upstream_diff_<commit_id>.patch.
Args:
out_dir (str): Directory to write the patch files.
"""
if not self.git.enabled:
return False
try:
root = self.git.root
if self.git.dirty:
patch_path = os.path.join(out_dir, 'diff.patch')
if self.git.has_submodule_diff:
with open(patch_path, 'wb') as patch:
# we diff against HEAD to ensure we get changes in the index
subprocess.check_call(
['git', 'diff', '--submodule=diff', 'HEAD'], stdout=patch, cwd=root, timeout=5)
else:
with open(patch_path, 'wb') as patch:
subprocess.check_call(
['git', 'diff', 'HEAD'], stdout=patch, cwd=root, timeout=5)
upstream_commit = self.git.get_upstream_fork_point()
if upstream_commit and upstream_commit != self.git.repo.head.commit:
sha = upstream_commit.hexsha
upstream_patch_path = os.path.join(
out_dir, 'upstream_diff_{}.patch'.format(sha))
if self.git.has_submodule_diff:
with open(upstream_patch_path, 'wb') as upstream_patch:
subprocess.check_call(
['git', 'diff', '--submodule=diff', sha], stdout=upstream_patch, cwd=root, timeout=5)
else:
with open(upstream_patch_path, 'wb') as upstream_patch:
subprocess.check_call(
['git', 'diff', sha], stdout=upstream_patch, cwd=root, timeout=5)
except (subprocess.CalledProcessError, subprocess.TimeoutExpired):
logger.error('Error generating diff') | [
"def",
"save_patches",
"(",
"self",
",",
"out_dir",
")",
":",
"if",
"not",
"self",
".",
"git",
".",
"enabled",
":",
"return",
"False",
"try",
":",
"root",
"=",
"self",
".",
"git",
".",
"root",
"if",
"self",
".",
"git",
".",
"dirty",
":",
"patch_pat... | Save the current state of this repository to one or more patches.
Makes one patch against HEAD and another one against the most recent
commit that occurs in an upstream branch. This way we can be robust
to history editing as long as the user never does "push -f" to break
history on an upstream branch.
Writes the first patch to <out_dir>/diff.patch and the second to
<out_dir>/upstream_diff_<commit_id>.patch.
Args:
out_dir (str): Directory to write the patch files. | [
"Save",
"the",
"current",
"state",
"of",
"this",
"repository",
"to",
"one",
"or",
"more",
"patches",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/apis/internal.py#L150-L195 | train | 207,317 |
wandb/client | wandb/apis/internal.py | Api.list_projects | def list_projects(self, entity=None):
"""Lists projects in W&B scoped by entity.
Args:
entity (str, optional): The entity to scope this project to.
Returns:
[{"id","name","description"}]
"""
query = gql('''
query Models($entity: String!) {
models(first: 10, entityName: $entity) {
edges {
node {
id
name
description
}
}
}
}
''')
return self._flatten_edges(self.gql(query, variable_values={
'entity': entity or self.settings('entity')})['models']) | python | def list_projects(self, entity=None):
"""Lists projects in W&B scoped by entity.
Args:
entity (str, optional): The entity to scope this project to.
Returns:
[{"id","name","description"}]
"""
query = gql('''
query Models($entity: String!) {
models(first: 10, entityName: $entity) {
edges {
node {
id
name
description
}
}
}
}
''')
return self._flatten_edges(self.gql(query, variable_values={
'entity': entity or self.settings('entity')})['models']) | [
"def",
"list_projects",
"(",
"self",
",",
"entity",
"=",
"None",
")",
":",
"query",
"=",
"gql",
"(",
"'''\n query Models($entity: String!) {\n models(first: 10, entityName: $entity) {\n edges {\n node {\n id\n ... | Lists projects in W&B scoped by entity.
Args:
entity (str, optional): The entity to scope this project to.
Returns:
[{"id","name","description"}] | [
"Lists",
"projects",
"in",
"W&B",
"scoped",
"by",
"entity",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/apis/internal.py#L315-L338 | train | 207,318 |
wandb/client | wandb/apis/internal.py | Api.list_runs | def list_runs(self, project, entity=None):
"""Lists runs in W&B scoped by project.
Args:
project (str): The project to scope the runs to
entity (str, optional): The entity to scope this project to. Defaults to public models
Returns:
[{"id",name","description"}]
"""
query = gql('''
query Buckets($model: String!, $entity: String!) {
model(name: $model, entityName: $entity) {
buckets(first: 10) {
edges {
node {
id
name
description
}
}
}
}
}
''')
return self._flatten_edges(self.gql(query, variable_values={
'entity': entity or self.settings('entity'),
'model': project or self.settings('project')})['model']['buckets']) | python | def list_runs(self, project, entity=None):
"""Lists runs in W&B scoped by project.
Args:
project (str): The project to scope the runs to
entity (str, optional): The entity to scope this project to. Defaults to public models
Returns:
[{"id",name","description"}]
"""
query = gql('''
query Buckets($model: String!, $entity: String!) {
model(name: $model, entityName: $entity) {
buckets(first: 10) {
edges {
node {
id
name
description
}
}
}
}
}
''')
return self._flatten_edges(self.gql(query, variable_values={
'entity': entity or self.settings('entity'),
'model': project or self.settings('project')})['model']['buckets']) | [
"def",
"list_runs",
"(",
"self",
",",
"project",
",",
"entity",
"=",
"None",
")",
":",
"query",
"=",
"gql",
"(",
"'''\n query Buckets($model: String!, $entity: String!) {\n model(name: $model, entityName: $entity) {\n buckets(first: 10) {\n ... | Lists runs in W&B scoped by project.
Args:
project (str): The project to scope the runs to
entity (str, optional): The entity to scope this project to. Defaults to public models
Returns:
[{"id",name","description"}] | [
"Lists",
"runs",
"in",
"W&B",
"scoped",
"by",
"project",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/apis/internal.py#L366-L393 | train | 207,319 |
wandb/client | wandb/apis/internal.py | Api.launch_run | def launch_run(self, command, project=None, entity=None, run_id=None):
"""Launch a run in the cloud.
Args:
command (str): The command to run
program (str): The file to run
project (str): The project to scope the runs to
entity (str, optional): The entity to scope this project to. Defaults to public models
run_id (str, optional): The run_id to scope to
Returns:
[{"podName","status"}]
"""
query = gql('''
mutation launchRun(
$entity: String
$model: String
$runId: String
$image: String
$command: String
$patch: String
$cwd: String
$datasets: [String]
) {
launchRun(input: {id: $runId, entityName: $entity, patch: $patch, modelName: $model,
image: $image, command: $command, datasets: $datasets, cwd: $cwd}) {
podName
status
runId
}
}
''')
patch = BytesIO()
if self.git.dirty:
self.git.repo.git.execute(['git', 'diff'], output_stream=patch)
patch.seek(0)
cwd = "."
if self.git.enabled:
cwd = cwd + os.getcwd().replace(self.git.repo.working_dir, "")
return self.gql(query, variable_values={
'entity': entity or self.settings('entity'),
'model': project or self.settings('project'),
'command': command,
'runId': run_id,
'patch': patch.read().decode("utf8"),
'cwd': cwd
}) | python | def launch_run(self, command, project=None, entity=None, run_id=None):
"""Launch a run in the cloud.
Args:
command (str): The command to run
program (str): The file to run
project (str): The project to scope the runs to
entity (str, optional): The entity to scope this project to. Defaults to public models
run_id (str, optional): The run_id to scope to
Returns:
[{"podName","status"}]
"""
query = gql('''
mutation launchRun(
$entity: String
$model: String
$runId: String
$image: String
$command: String
$patch: String
$cwd: String
$datasets: [String]
) {
launchRun(input: {id: $runId, entityName: $entity, patch: $patch, modelName: $model,
image: $image, command: $command, datasets: $datasets, cwd: $cwd}) {
podName
status
runId
}
}
''')
patch = BytesIO()
if self.git.dirty:
self.git.repo.git.execute(['git', 'diff'], output_stream=patch)
patch.seek(0)
cwd = "."
if self.git.enabled:
cwd = cwd + os.getcwd().replace(self.git.repo.working_dir, "")
return self.gql(query, variable_values={
'entity': entity or self.settings('entity'),
'model': project or self.settings('project'),
'command': command,
'runId': run_id,
'patch': patch.read().decode("utf8"),
'cwd': cwd
}) | [
"def",
"launch_run",
"(",
"self",
",",
"command",
",",
"project",
"=",
"None",
",",
"entity",
"=",
"None",
",",
"run_id",
"=",
"None",
")",
":",
"query",
"=",
"gql",
"(",
"'''\n mutation launchRun(\n $entity: String\n $model: String\n ... | Launch a run in the cloud.
Args:
command (str): The command to run
program (str): The file to run
project (str): The project to scope the runs to
entity (str, optional): The entity to scope this project to. Defaults to public models
run_id (str, optional): The run_id to scope to
Returns:
[{"podName","status"}] | [
"Launch",
"a",
"run",
"in",
"the",
"cloud",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/apis/internal.py#L396-L442 | train | 207,320 |
wandb/client | wandb/apis/internal.py | Api.run_config | def run_config(self, project, run=None, entity=None):
"""Get the relevant configs for a run
Args:
project (str): The project to download, (can include bucket)
run (str, optional): The run to download
entity (str, optional): The entity to scope this project to.
"""
query = gql('''
query Model($name: String!, $entity: String!, $run: String!) {
model(name: $name, entityName: $entity) {
bucket(name: $run) {
config
commit
patch
files(names: ["wandb-metadata.json"]) {
edges {
node {
url
}
}
}
}
}
}
''')
response = self.gql(query, variable_values={
'name': project, 'run': run, 'entity': entity
})
if response['model'] == None:
raise ValueError("Run {}/{}/{} not found".format(entity, project, run) )
run = response['model']['bucket']
commit = run['commit']
patch = run['patch']
config = json.loads(run['config'] or '{}')
if len(run['files']['edges']) > 0:
url = run['files']['edges'][0]['node']['url']
res = requests.get(url)
res.raise_for_status()
metadata = res.json()
else:
metadata = {}
return (commit, config, patch, metadata) | python | def run_config(self, project, run=None, entity=None):
"""Get the relevant configs for a run
Args:
project (str): The project to download, (can include bucket)
run (str, optional): The run to download
entity (str, optional): The entity to scope this project to.
"""
query = gql('''
query Model($name: String!, $entity: String!, $run: String!) {
model(name: $name, entityName: $entity) {
bucket(name: $run) {
config
commit
patch
files(names: ["wandb-metadata.json"]) {
edges {
node {
url
}
}
}
}
}
}
''')
response = self.gql(query, variable_values={
'name': project, 'run': run, 'entity': entity
})
if response['model'] == None:
raise ValueError("Run {}/{}/{} not found".format(entity, project, run) )
run = response['model']['bucket']
commit = run['commit']
patch = run['patch']
config = json.loads(run['config'] or '{}')
if len(run['files']['edges']) > 0:
url = run['files']['edges'][0]['node']['url']
res = requests.get(url)
res.raise_for_status()
metadata = res.json()
else:
metadata = {}
return (commit, config, patch, metadata) | [
"def",
"run_config",
"(",
"self",
",",
"project",
",",
"run",
"=",
"None",
",",
"entity",
"=",
"None",
")",
":",
"query",
"=",
"gql",
"(",
"'''\n query Model($name: String!, $entity: String!, $run: String!) {\n model(name: $name, entityName: $entity) {\n ... | Get the relevant configs for a run
Args:
project (str): The project to download, (can include bucket)
run (str, optional): The run to download
entity (str, optional): The entity to scope this project to. | [
"Get",
"the",
"relevant",
"configs",
"for",
"a",
"run"
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/apis/internal.py#L445-L488 | train | 207,321 |
wandb/client | wandb/apis/internal.py | Api.run_resume_status | def run_resume_status(self, entity, project_name, name):
"""Check if a run exists and get resume information.
Args:
entity (str, optional): The entity to scope this project to.
project_name (str): The project to download, (can include bucket)
run (str, optional): The run to download
"""
query = gql('''
query Model($project: String!, $entity: String, $name: String!) {
model(name: $project, entityName: $entity) {
id
name
entity {
id
name
}
bucket(name: $name, missingOk: true) {
id
name
logLineCount
historyLineCount
eventsLineCount
historyTail
eventsTail
}
}
}
''')
response = self.gql(query, variable_values={
'entity': entity, 'project': project_name, 'name': name,
})
if 'model' not in response or 'bucket' not in response['model']:
return None
project = response['model']
self.set_setting('project', project_name)
if 'entity' in project:
self.set_setting('entity', project['entity']['name'])
return project['bucket'] | python | def run_resume_status(self, entity, project_name, name):
"""Check if a run exists and get resume information.
Args:
entity (str, optional): The entity to scope this project to.
project_name (str): The project to download, (can include bucket)
run (str, optional): The run to download
"""
query = gql('''
query Model($project: String!, $entity: String, $name: String!) {
model(name: $project, entityName: $entity) {
id
name
entity {
id
name
}
bucket(name: $name, missingOk: true) {
id
name
logLineCount
historyLineCount
eventsLineCount
historyTail
eventsTail
}
}
}
''')
response = self.gql(query, variable_values={
'entity': entity, 'project': project_name, 'name': name,
})
if 'model' not in response or 'bucket' not in response['model']:
return None
project = response['model']
self.set_setting('project', project_name)
if 'entity' in project:
self.set_setting('entity', project['entity']['name'])
return project['bucket'] | [
"def",
"run_resume_status",
"(",
"self",
",",
"entity",
",",
"project_name",
",",
"name",
")",
":",
"query",
"=",
"gql",
"(",
"'''\n query Model($project: String!, $entity: String, $name: String!) {\n model(name: $project, entityName: $entity) {\n id\... | Check if a run exists and get resume information.
Args:
entity (str, optional): The entity to scope this project to.
project_name (str): The project to download, (can include bucket)
run (str, optional): The run to download | [
"Check",
"if",
"a",
"run",
"exists",
"and",
"get",
"resume",
"information",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/apis/internal.py#L491-L534 | train | 207,322 |
wandb/client | wandb/apis/internal.py | Api.upsert_run | def upsert_run(self, id=None, name=None, project=None, host=None,
group=None, tags=None,
config=None, description=None, entity=None, state=None,
repo=None, job_type=None, program_path=None, commit=None,
sweep_name=None, summary_metrics=None, num_retries=None):
"""Update a run
Args:
id (str, optional): The existing run to update
name (str, optional): The name of the run to create
group (str, optional): Name of the group this run is a part of
project (str, optional): The name of the project
config (dict, optional): The latest config params
description (str, optional): A description of this project
entity (str, optional): The entity to scope this project to.
repo (str, optional): Url of the program's repository.
state (str, optional): State of the program.
job_type (str, optional): Type of job, e.g 'train'.
program_path (str, optional): Path to the program.
commit (str, optional): The Git SHA to associate the run with
summary_metrics (str, optional): The JSON summary metrics
"""
mutation = gql('''
mutation UpsertBucket(
$id: String, $name: String,
$project: String,
$entity: String!,
$groupName: String,
$description: String,
$commit: String,
$config: JSONString,
$host: String,
$debug: Boolean,
$program: String,
$repo: String,
$jobType: String,
$state: String,
$sweep: String,
$tags: [String!],
$summaryMetrics: JSONString,
) {
upsertBucket(input: {
id: $id,
name: $name,
groupName: $groupName,
modelName: $project,
entityName: $entity,
description: $description,
config: $config,
commit: $commit,
host: $host,
debug: $debug,
jobProgram: $program,
jobRepo: $repo,
jobType: $jobType,
state: $state,
sweep: $sweep,
tags: $tags,
summaryMetrics: $summaryMetrics,
}) {
bucket {
id
name
description
config
project {
id
name
entity {
id
name
}
}
}
}
}
''')
if config is not None:
config = json.dumps(config)
if not description:
description = None
kwargs = {}
if num_retries is not None:
kwargs['num_retries'] = num_retries
variable_values = {
'id': id, 'entity': entity or self.settings('entity'), 'name': name, 'project': project,
'groupName': group, 'tags': tags,
'description': description, 'config': config, 'commit': commit,
'host': host, 'debug': env.is_debug(), 'repo': repo, 'program': program_path, 'jobType': job_type,
'state': state, 'sweep': sweep_name, 'summaryMetrics': summary_metrics
}
response = self.gql(
mutation, variable_values=variable_values, **kwargs)
run = response['upsertBucket']['bucket']
project = run.get('project')
if project:
self.set_setting('project', project['name'])
entity = project.get('entity')
if entity:
self.set_setting('entity', entity['name'])
return response['upsertBucket']['bucket'] | python | def upsert_run(self, id=None, name=None, project=None, host=None,
group=None, tags=None,
config=None, description=None, entity=None, state=None,
repo=None, job_type=None, program_path=None, commit=None,
sweep_name=None, summary_metrics=None, num_retries=None):
"""Update a run
Args:
id (str, optional): The existing run to update
name (str, optional): The name of the run to create
group (str, optional): Name of the group this run is a part of
project (str, optional): The name of the project
config (dict, optional): The latest config params
description (str, optional): A description of this project
entity (str, optional): The entity to scope this project to.
repo (str, optional): Url of the program's repository.
state (str, optional): State of the program.
job_type (str, optional): Type of job, e.g 'train'.
program_path (str, optional): Path to the program.
commit (str, optional): The Git SHA to associate the run with
summary_metrics (str, optional): The JSON summary metrics
"""
mutation = gql('''
mutation UpsertBucket(
$id: String, $name: String,
$project: String,
$entity: String!,
$groupName: String,
$description: String,
$commit: String,
$config: JSONString,
$host: String,
$debug: Boolean,
$program: String,
$repo: String,
$jobType: String,
$state: String,
$sweep: String,
$tags: [String!],
$summaryMetrics: JSONString,
) {
upsertBucket(input: {
id: $id,
name: $name,
groupName: $groupName,
modelName: $project,
entityName: $entity,
description: $description,
config: $config,
commit: $commit,
host: $host,
debug: $debug,
jobProgram: $program,
jobRepo: $repo,
jobType: $jobType,
state: $state,
sweep: $sweep,
tags: $tags,
summaryMetrics: $summaryMetrics,
}) {
bucket {
id
name
description
config
project {
id
name
entity {
id
name
}
}
}
}
}
''')
if config is not None:
config = json.dumps(config)
if not description:
description = None
kwargs = {}
if num_retries is not None:
kwargs['num_retries'] = num_retries
variable_values = {
'id': id, 'entity': entity or self.settings('entity'), 'name': name, 'project': project,
'groupName': group, 'tags': tags,
'description': description, 'config': config, 'commit': commit,
'host': host, 'debug': env.is_debug(), 'repo': repo, 'program': program_path, 'jobType': job_type,
'state': state, 'sweep': sweep_name, 'summaryMetrics': summary_metrics
}
response = self.gql(
mutation, variable_values=variable_values, **kwargs)
run = response['upsertBucket']['bucket']
project = run.get('project')
if project:
self.set_setting('project', project['name'])
entity = project.get('entity')
if entity:
self.set_setting('entity', entity['name'])
return response['upsertBucket']['bucket'] | [
"def",
"upsert_run",
"(",
"self",
",",
"id",
"=",
"None",
",",
"name",
"=",
"None",
",",
"project",
"=",
"None",
",",
"host",
"=",
"None",
",",
"group",
"=",
"None",
",",
"tags",
"=",
"None",
",",
"config",
"=",
"None",
",",
"description",
"=",
"... | Update a run
Args:
id (str, optional): The existing run to update
name (str, optional): The name of the run to create
group (str, optional): Name of the group this run is a part of
project (str, optional): The name of the project
config (dict, optional): The latest config params
description (str, optional): A description of this project
entity (str, optional): The entity to scope this project to.
repo (str, optional): Url of the program's repository.
state (str, optional): State of the program.
job_type (str, optional): Type of job, e.g 'train'.
program_path (str, optional): Path to the program.
commit (str, optional): The Git SHA to associate the run with
summary_metrics (str, optional): The JSON summary metrics | [
"Update",
"a",
"run"
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/apis/internal.py#L564-L669 | train | 207,323 |
wandb/client | wandb/apis/internal.py | Api.upload_urls | def upload_urls(self, project, files, run=None, entity=None, description=None):
"""Generate temporary resumeable upload urls
Args:
project (str): The project to download
files (list or dict): The filenames to upload
run (str, optional): The run to upload to
entity (str, optional): The entity to scope this project to. Defaults to wandb models
Returns:
(bucket_id, file_info)
bucket_id: id of bucket we uploaded to
file_info: A dict of filenames and urls, also indicates if this revision already has uploaded files.
{
'weights.h5': { "url": "https://weights.url" },
'model.json': { "url": "https://model.json", "updatedAt": '2013-04-26T22:22:23.832Z', 'md5': 'mZFLkyvTelC5g8XnyQrpOw==' },
}
"""
query = gql('''
query Model($name: String!, $files: [String]!, $entity: String!, $run: String!, $description: String) {
model(name: $name, entityName: $entity) {
bucket(name: $run, desc: $description) {
id
files(names: $files) {
edges {
node {
name
url(upload: true)
updatedAt
}
}
}
}
}
}
''')
run_id = run or self.settings('run')
entity = entity or self.settings('entity')
query_result = self.gql(query, variable_values={
'name': project, 'run': run_id,
'entity': entity,
'description': description,
'files': [file for file in files]
})
run = query_result['model']['bucket']
if run:
result = {file['name']: file for file in self._flatten_edges(run['files'])}
return run['id'], result
else:
raise CommError("Run does not exist {}/{}/{}.".format(entity, project, run_id)) | python | def upload_urls(self, project, files, run=None, entity=None, description=None):
"""Generate temporary resumeable upload urls
Args:
project (str): The project to download
files (list or dict): The filenames to upload
run (str, optional): The run to upload to
entity (str, optional): The entity to scope this project to. Defaults to wandb models
Returns:
(bucket_id, file_info)
bucket_id: id of bucket we uploaded to
file_info: A dict of filenames and urls, also indicates if this revision already has uploaded files.
{
'weights.h5': { "url": "https://weights.url" },
'model.json': { "url": "https://model.json", "updatedAt": '2013-04-26T22:22:23.832Z', 'md5': 'mZFLkyvTelC5g8XnyQrpOw==' },
}
"""
query = gql('''
query Model($name: String!, $files: [String]!, $entity: String!, $run: String!, $description: String) {
model(name: $name, entityName: $entity) {
bucket(name: $run, desc: $description) {
id
files(names: $files) {
edges {
node {
name
url(upload: true)
updatedAt
}
}
}
}
}
}
''')
run_id = run or self.settings('run')
entity = entity or self.settings('entity')
query_result = self.gql(query, variable_values={
'name': project, 'run': run_id,
'entity': entity,
'description': description,
'files': [file for file in files]
})
run = query_result['model']['bucket']
if run:
result = {file['name']: file for file in self._flatten_edges(run['files'])}
return run['id'], result
else:
raise CommError("Run does not exist {}/{}/{}.".format(entity, project, run_id)) | [
"def",
"upload_urls",
"(",
"self",
",",
"project",
",",
"files",
",",
"run",
"=",
"None",
",",
"entity",
"=",
"None",
",",
"description",
"=",
"None",
")",
":",
"query",
"=",
"gql",
"(",
"'''\n query Model($name: String!, $files: [String]!, $entity: String!... | Generate temporary resumeable upload urls
Args:
project (str): The project to download
files (list or dict): The filenames to upload
run (str, optional): The run to upload to
entity (str, optional): The entity to scope this project to. Defaults to wandb models
Returns:
(bucket_id, file_info)
bucket_id: id of bucket we uploaded to
file_info: A dict of filenames and urls, also indicates if this revision already has uploaded files.
{
'weights.h5': { "url": "https://weights.url" },
'model.json': { "url": "https://model.json", "updatedAt": '2013-04-26T22:22:23.832Z', 'md5': 'mZFLkyvTelC5g8XnyQrpOw==' },
} | [
"Generate",
"temporary",
"resumeable",
"upload",
"urls"
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/apis/internal.py#L672-L722 | train | 207,324 |
wandb/client | wandb/apis/internal.py | Api.download_file | def download_file(self, url):
"""Initiate a streaming download
Args:
url (str): The url to download
Returns:
A tuple of the content length and the streaming response
"""
response = requests.get(url, stream=True)
response.raise_for_status()
return (int(response.headers.get('content-length', 0)), response) | python | def download_file(self, url):
"""Initiate a streaming download
Args:
url (str): The url to download
Returns:
A tuple of the content length and the streaming response
"""
response = requests.get(url, stream=True)
response.raise_for_status()
return (int(response.headers.get('content-length', 0)), response) | [
"def",
"download_file",
"(",
"self",
",",
"url",
")",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"stream",
"=",
"True",
")",
"response",
".",
"raise_for_status",
"(",
")",
"return",
"(",
"int",
"(",
"response",
".",
"headers",
".",
... | Initiate a streaming download
Args:
url (str): The url to download
Returns:
A tuple of the content length and the streaming response | [
"Initiate",
"a",
"streaming",
"download"
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/apis/internal.py#L806-L817 | train | 207,325 |
wandb/client | wandb/apis/internal.py | Api.upload_file | def upload_file(self, url, file, callback=None, extra_headers={}):
"""Uploads a file to W&B with failure resumption
Args:
url (str): The url to download
file (str): The path to the file you want to upload
callback (:obj:`func`, optional): A callback which is passed the number of
bytes uploaded since the last time it was called, used to report progress
Returns:
The requests library response object
"""
extra_headers = extra_headers.copy()
response = None
if os.stat(file.name).st_size == 0:
raise CommError("%s is an empty file" % file.name)
try:
progress = Progress(file, callback=callback)
response = requests.put(
url, data=progress, headers=extra_headers)
response.raise_for_status()
except requests.exceptions.RequestException as e:
total = progress.len
status = self._status_request(url, total)
# TODO(adrian): there's probably even more stuff we should add here
# like if we're offline, we should retry then too
if status.status_code in (308, 408, 500, 502, 503, 504):
util.sentry_reraise(retry.TransientException(exc=e))
else:
util.sentry_reraise(e)
return response | python | def upload_file(self, url, file, callback=None, extra_headers={}):
"""Uploads a file to W&B with failure resumption
Args:
url (str): The url to download
file (str): The path to the file you want to upload
callback (:obj:`func`, optional): A callback which is passed the number of
bytes uploaded since the last time it was called, used to report progress
Returns:
The requests library response object
"""
extra_headers = extra_headers.copy()
response = None
if os.stat(file.name).st_size == 0:
raise CommError("%s is an empty file" % file.name)
try:
progress = Progress(file, callback=callback)
response = requests.put(
url, data=progress, headers=extra_headers)
response.raise_for_status()
except requests.exceptions.RequestException as e:
total = progress.len
status = self._status_request(url, total)
# TODO(adrian): there's probably even more stuff we should add here
# like if we're offline, we should retry then too
if status.status_code in (308, 408, 500, 502, 503, 504):
util.sentry_reraise(retry.TransientException(exc=e))
else:
util.sentry_reraise(e)
return response | [
"def",
"upload_file",
"(",
"self",
",",
"url",
",",
"file",
",",
"callback",
"=",
"None",
",",
"extra_headers",
"=",
"{",
"}",
")",
":",
"extra_headers",
"=",
"extra_headers",
".",
"copy",
"(",
")",
"response",
"=",
"None",
"if",
"os",
".",
"stat",
"... | Uploads a file to W&B with failure resumption
Args:
url (str): The url to download
file (str): The path to the file you want to upload
callback (:obj:`func`, optional): A callback which is passed the number of
bytes uploaded since the last time it was called, used to report progress
Returns:
The requests library response object | [
"Uploads",
"a",
"file",
"to",
"W&B",
"with",
"failure",
"resumption"
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/apis/internal.py#L842-L873 | train | 207,326 |
wandb/client | wandb/apis/internal.py | Api.register_agent | def register_agent(self, host, sweep_id=None, project_name=None):
"""Register a new agent
Args:
host (str): hostname
persistent (bool): long running or oneoff
sweep (str): sweep id
project_name: (str): model that contains sweep
"""
mutation = gql('''
mutation CreateAgent(
$host: String!
$projectName: String!,
$entityName: String!,
$sweep: String!
) {
createAgent(input: {
host: $host,
projectName: $projectName,
entityName: $entityName,
sweep: $sweep,
}) {
agent {
id
}
}
}
''')
if project_name is None:
project_name = self.settings('project')
# don't retry on validation errors
def no_retry_400(e):
if not isinstance(e, requests.HTTPError):
return True
if e.response.status_code != 400:
return True
body = json.loads(e.response.content)
raise UsageError(body['errors'][0]['message'])
response = self.gql(mutation, variable_values={
'host': host,
'entityName': self.settings("entity"),
'projectName': project_name,
'sweep': sweep_id}, check_retry_fn=no_retry_400)
return response['createAgent']['agent'] | python | def register_agent(self, host, sweep_id=None, project_name=None):
"""Register a new agent
Args:
host (str): hostname
persistent (bool): long running or oneoff
sweep (str): sweep id
project_name: (str): model that contains sweep
"""
mutation = gql('''
mutation CreateAgent(
$host: String!
$projectName: String!,
$entityName: String!,
$sweep: String!
) {
createAgent(input: {
host: $host,
projectName: $projectName,
entityName: $entityName,
sweep: $sweep,
}) {
agent {
id
}
}
}
''')
if project_name is None:
project_name = self.settings('project')
# don't retry on validation errors
def no_retry_400(e):
if not isinstance(e, requests.HTTPError):
return True
if e.response.status_code != 400:
return True
body = json.loads(e.response.content)
raise UsageError(body['errors'][0]['message'])
response = self.gql(mutation, variable_values={
'host': host,
'entityName': self.settings("entity"),
'projectName': project_name,
'sweep': sweep_id}, check_retry_fn=no_retry_400)
return response['createAgent']['agent'] | [
"def",
"register_agent",
"(",
"self",
",",
"host",
",",
"sweep_id",
"=",
"None",
",",
"project_name",
"=",
"None",
")",
":",
"mutation",
"=",
"gql",
"(",
"'''\n mutation CreateAgent(\n $host: String!\n $projectName: String!,\n $entityNa... | Register a new agent
Args:
host (str): hostname
persistent (bool): long running or oneoff
sweep (str): sweep id
project_name: (str): model that contains sweep | [
"Register",
"a",
"new",
"agent"
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/apis/internal.py#L878-L923 | train | 207,327 |
wandb/client | wandb/apis/internal.py | Api.agent_heartbeat | def agent_heartbeat(self, agent_id, metrics, run_states):
"""Notify server about agent state, receive commands.
Args:
agent_id (str): agent_id
metrics (dict): system metrics
run_states (dict): run_id: state mapping
Returns:
List of commands to execute.
"""
mutation = gql('''
mutation Heartbeat(
$id: ID!,
$metrics: JSONString,
$runState: JSONString
) {
agentHeartbeat(input: {
id: $id,
metrics: $metrics,
runState: $runState
}) {
agent {
id
}
commands
}
}
''')
try:
response = self.gql(mutation, variable_values={
'id': agent_id,
'metrics': json.dumps(metrics),
'runState': json.dumps(run_states)})
except Exception as e:
# GQL raises exceptions with stringified python dictionaries :/
message = ast.literal_eval(e.args[0])["message"]
logger.error('Error communicating with W&B: %s', message)
return []
else:
return json.loads(response['agentHeartbeat']['commands']) | python | def agent_heartbeat(self, agent_id, metrics, run_states):
"""Notify server about agent state, receive commands.
Args:
agent_id (str): agent_id
metrics (dict): system metrics
run_states (dict): run_id: state mapping
Returns:
List of commands to execute.
"""
mutation = gql('''
mutation Heartbeat(
$id: ID!,
$metrics: JSONString,
$runState: JSONString
) {
agentHeartbeat(input: {
id: $id,
metrics: $metrics,
runState: $runState
}) {
agent {
id
}
commands
}
}
''')
try:
response = self.gql(mutation, variable_values={
'id': agent_id,
'metrics': json.dumps(metrics),
'runState': json.dumps(run_states)})
except Exception as e:
# GQL raises exceptions with stringified python dictionaries :/
message = ast.literal_eval(e.args[0])["message"]
logger.error('Error communicating with W&B: %s', message)
return []
else:
return json.loads(response['agentHeartbeat']['commands']) | [
"def",
"agent_heartbeat",
"(",
"self",
",",
"agent_id",
",",
"metrics",
",",
"run_states",
")",
":",
"mutation",
"=",
"gql",
"(",
"'''\n mutation Heartbeat(\n $id: ID!,\n $metrics: JSONString,\n $runState: JSONString\n ) {\n a... | Notify server about agent state, receive commands.
Args:
agent_id (str): agent_id
metrics (dict): system metrics
run_states (dict): run_id: state mapping
Returns:
List of commands to execute. | [
"Notify",
"server",
"about",
"agent",
"state",
"receive",
"commands",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/apis/internal.py#L925-L964 | train | 207,328 |
wandb/client | wandb/apis/internal.py | Api.upsert_sweep | def upsert_sweep(self, config):
"""Upsert a sweep object.
Args:
config (str): sweep config (will be converted to yaml)
"""
mutation = gql('''
mutation UpsertSweep(
$config: String,
$description: String,
$entityName: String!,
$projectName: String!
) {
upsertSweep(input: {
config: $config,
description: $description,
entityName: $entityName,
projectName: $projectName
}) {
sweep {
name
}
}
}
''')
# don't retry on validation errors
# TODO(jhr): generalize error handling routines
def no_retry_400_or_404(e):
if not isinstance(e, requests.HTTPError):
return True
if e.response.status_code != 400 and e.response.status_code != 404:
return True
body = json.loads(e.response.content)
raise UsageError(body['errors'][0]['message'])
response = self.gql(mutation, variable_values={
'config': yaml.dump(config),
'description': config.get("description"),
'entityName': self.settings("entity"),
'projectName': self.settings("project")},
check_retry_fn=no_retry_400_or_404)
return response['upsertSweep']['sweep']['name'] | python | def upsert_sweep(self, config):
"""Upsert a sweep object.
Args:
config (str): sweep config (will be converted to yaml)
"""
mutation = gql('''
mutation UpsertSweep(
$config: String,
$description: String,
$entityName: String!,
$projectName: String!
) {
upsertSweep(input: {
config: $config,
description: $description,
entityName: $entityName,
projectName: $projectName
}) {
sweep {
name
}
}
}
''')
# don't retry on validation errors
# TODO(jhr): generalize error handling routines
def no_retry_400_or_404(e):
if not isinstance(e, requests.HTTPError):
return True
if e.response.status_code != 400 and e.response.status_code != 404:
return True
body = json.loads(e.response.content)
raise UsageError(body['errors'][0]['message'])
response = self.gql(mutation, variable_values={
'config': yaml.dump(config),
'description': config.get("description"),
'entityName': self.settings("entity"),
'projectName': self.settings("project")},
check_retry_fn=no_retry_400_or_404)
return response['upsertSweep']['sweep']['name'] | [
"def",
"upsert_sweep",
"(",
"self",
",",
"config",
")",
":",
"mutation",
"=",
"gql",
"(",
"'''\n mutation UpsertSweep(\n $config: String,\n $description: String,\n $entityName: String!,\n $projectName: String!\n ) {\n upser... | Upsert a sweep object.
Args:
config (str): sweep config (will be converted to yaml) | [
"Upsert",
"a",
"sweep",
"object",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/apis/internal.py#L967-L1009 | train | 207,329 |
wandb/client | wandb/apis/internal.py | Api.file_current | def file_current(self, fname, md5):
"""Checksum a file and compare the md5 with the known md5
"""
return os.path.isfile(fname) and util.md5_file(fname) == md5 | python | def file_current(self, fname, md5):
"""Checksum a file and compare the md5 with the known md5
"""
return os.path.isfile(fname) and util.md5_file(fname) == md5 | [
"def",
"file_current",
"(",
"self",
",",
"fname",
",",
"md5",
")",
":",
"return",
"os",
".",
"path",
".",
"isfile",
"(",
"fname",
")",
"and",
"util",
".",
"md5_file",
"(",
"fname",
")",
"==",
"md5"
] | Checksum a file and compare the md5 with the known md5 | [
"Checksum",
"a",
"file",
"and",
"compare",
"the",
"md5",
"with",
"the",
"known",
"md5"
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/apis/internal.py#L1011-L1014 | train | 207,330 |
wandb/client | wandb/apis/internal.py | Api.pull | def pull(self, project, run=None, entity=None):
"""Download files from W&B
Args:
project (str): The project to download
run (str, optional): The run to upload to
entity (str, optional): The entity to scope this project to. Defaults to wandb models
Returns:
The requests library response object
"""
project, run = self.parse_slug(project, run=run)
urls = self.download_urls(project, run, entity)
responses = []
for fileName in urls:
_, response = self.download_write_file(urls[fileName])
if response:
responses.append(response)
return responses | python | def pull(self, project, run=None, entity=None):
"""Download files from W&B
Args:
project (str): The project to download
run (str, optional): The run to upload to
entity (str, optional): The entity to scope this project to. Defaults to wandb models
Returns:
The requests library response object
"""
project, run = self.parse_slug(project, run=run)
urls = self.download_urls(project, run, entity)
responses = []
for fileName in urls:
_, response = self.download_write_file(urls[fileName])
if response:
responses.append(response)
return responses | [
"def",
"pull",
"(",
"self",
",",
"project",
",",
"run",
"=",
"None",
",",
"entity",
"=",
"None",
")",
":",
"project",
",",
"run",
"=",
"self",
".",
"parse_slug",
"(",
"project",
",",
"run",
"=",
"run",
")",
"urls",
"=",
"self",
".",
"download_urls"... | Download files from W&B
Args:
project (str): The project to download
run (str, optional): The run to upload to
entity (str, optional): The entity to scope this project to. Defaults to wandb models
Returns:
The requests library response object | [
"Download",
"files",
"from",
"W&B"
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/apis/internal.py#L1017-L1036 | train | 207,331 |
wandb/client | wandb/apis/internal.py | Api.push | def push(self, files, run=None, entity=None, project=None, description=None, force=True, progress=False):
"""Uploads multiple files to W&B
Args:
files (list or dict): The filenames to upload
run (str, optional): The run to upload to
entity (str, optional): The entity to scope this project to. Defaults to wandb models
project (str, optional): The name of the project to upload to. Defaults to the one in settings.
description (str, optional): The description of the changes
force (bool, optional): Whether to prevent push if git has uncommitted changes
progress (callable, or stream): If callable, will be called with (chunk_bytes,
total_bytes) as argument else if True, renders a progress bar to stream.
Returns:
The requests library response object
"""
if project is None:
project = self.get_project()
if project is None:
raise CommError("No project configured.")
if run is None:
run = self.current_run_id
# TODO(adrian): we use a retriable version of self.upload_file() so
# will never retry self.upload_urls() here. Instead, maybe we should
# make push itself retriable.
run_id, result = self.upload_urls(
project, files, run, entity, description)
responses = []
for file_name, file_info in result.items():
try:
# To handle Windows paths
# TODO: this doesn't handle absolute paths...
normal_name = os.path.join(*file_name.split("/"))
open_file = files[normal_name] if isinstance(
files, dict) else open(normal_name, "rb")
except IOError:
print("%s does not exist" % file_name)
continue
if progress:
if hasattr(progress, '__call__'):
responses.append(self.upload_file_retry(
file_info['url'], open_file, progress))
else:
length = os.fstat(open_file.fileno()).st_size
with click.progressbar(file=progress, length=length, label='Uploading file: %s' % (file_name),
fill_char=click.style('&', fg='green')) as bar:
responses.append(self.upload_file_retry(
file_info['url'], open_file, lambda bites, _: bar.update(bites)))
else:
responses.append(self.upload_file_retry(file_info['url'], open_file))
open_file.close()
return responses | python | def push(self, files, run=None, entity=None, project=None, description=None, force=True, progress=False):
"""Uploads multiple files to W&B
Args:
files (list or dict): The filenames to upload
run (str, optional): The run to upload to
entity (str, optional): The entity to scope this project to. Defaults to wandb models
project (str, optional): The name of the project to upload to. Defaults to the one in settings.
description (str, optional): The description of the changes
force (bool, optional): Whether to prevent push if git has uncommitted changes
progress (callable, or stream): If callable, will be called with (chunk_bytes,
total_bytes) as argument else if True, renders a progress bar to stream.
Returns:
The requests library response object
"""
if project is None:
project = self.get_project()
if project is None:
raise CommError("No project configured.")
if run is None:
run = self.current_run_id
# TODO(adrian): we use a retriable version of self.upload_file() so
# will never retry self.upload_urls() here. Instead, maybe we should
# make push itself retriable.
run_id, result = self.upload_urls(
project, files, run, entity, description)
responses = []
for file_name, file_info in result.items():
try:
# To handle Windows paths
# TODO: this doesn't handle absolute paths...
normal_name = os.path.join(*file_name.split("/"))
open_file = files[normal_name] if isinstance(
files, dict) else open(normal_name, "rb")
except IOError:
print("%s does not exist" % file_name)
continue
if progress:
if hasattr(progress, '__call__'):
responses.append(self.upload_file_retry(
file_info['url'], open_file, progress))
else:
length = os.fstat(open_file.fileno()).st_size
with click.progressbar(file=progress, length=length, label='Uploading file: %s' % (file_name),
fill_char=click.style('&', fg='green')) as bar:
responses.append(self.upload_file_retry(
file_info['url'], open_file, lambda bites, _: bar.update(bites)))
else:
responses.append(self.upload_file_retry(file_info['url'], open_file))
open_file.close()
return responses | [
"def",
"push",
"(",
"self",
",",
"files",
",",
"run",
"=",
"None",
",",
"entity",
"=",
"None",
",",
"project",
"=",
"None",
",",
"description",
"=",
"None",
",",
"force",
"=",
"True",
",",
"progress",
"=",
"False",
")",
":",
"if",
"project",
"is",
... | Uploads multiple files to W&B
Args:
files (list or dict): The filenames to upload
run (str, optional): The run to upload to
entity (str, optional): The entity to scope this project to. Defaults to wandb models
project (str, optional): The name of the project to upload to. Defaults to the one in settings.
description (str, optional): The description of the changes
force (bool, optional): Whether to prevent push if git has uncommitted changes
progress (callable, or stream): If callable, will be called with (chunk_bytes,
total_bytes) as argument else if True, renders a progress bar to stream.
Returns:
The requests library response object | [
"Uploads",
"multiple",
"files",
"to",
"W&B"
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/apis/internal.py#L1042-L1094 | train | 207,332 |
wandb/client | wandb/apis/internal.py | Api.get_file_stream_api | def get_file_stream_api(self):
"""This creates a new file pusher thread. Call start to initiate the thread that talks to W&B"""
if not self._file_stream_api:
if self._current_run_id is None:
raise UsageError(
'Must have a current run to use file stream API.')
self._file_stream_api = FileStreamApi(self, self._current_run_id)
return self._file_stream_api | python | def get_file_stream_api(self):
"""This creates a new file pusher thread. Call start to initiate the thread that talks to W&B"""
if not self._file_stream_api:
if self._current_run_id is None:
raise UsageError(
'Must have a current run to use file stream API.')
self._file_stream_api = FileStreamApi(self, self._current_run_id)
return self._file_stream_api | [
"def",
"get_file_stream_api",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_file_stream_api",
":",
"if",
"self",
".",
"_current_run_id",
"is",
"None",
":",
"raise",
"UsageError",
"(",
"'Must have a current run to use file stream API.'",
")",
"self",
".",
"_fi... | This creates a new file pusher thread. Call start to initiate the thread that talks to W&B | [
"This",
"creates",
"a",
"new",
"file",
"pusher",
"thread",
".",
"Call",
"start",
"to",
"initiate",
"the",
"thread",
"that",
"talks",
"to",
"W&B"
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/apis/internal.py#L1096-L1103 | train | 207,333 |
wandb/client | wandb/apis/internal.py | Api._status_request | def _status_request(self, url, length):
"""Ask google how much we've uploaded"""
return requests.put(
url=url,
headers={'Content-Length': '0',
'Content-Range': 'bytes */%i' % length}
) | python | def _status_request(self, url, length):
"""Ask google how much we've uploaded"""
return requests.put(
url=url,
headers={'Content-Length': '0',
'Content-Range': 'bytes */%i' % length}
) | [
"def",
"_status_request",
"(",
"self",
",",
"url",
",",
"length",
")",
":",
"return",
"requests",
".",
"put",
"(",
"url",
"=",
"url",
",",
"headers",
"=",
"{",
"'Content-Length'",
":",
"'0'",
",",
"'Content-Range'",
":",
"'bytes */%i'",
"%",
"length",
"}... | Ask google how much we've uploaded | [
"Ask",
"google",
"how",
"much",
"we",
"ve",
"uploaded"
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/apis/internal.py#L1105-L1111 | train | 207,334 |
wandb/client | wandb/vendor/prompt_toolkit/completion.py | get_common_complete_suffix | def get_common_complete_suffix(document, completions):
"""
Return the common prefix for all completions.
"""
# Take only completions that don't change the text before the cursor.
def doesnt_change_before_cursor(completion):
end = completion.text[:-completion.start_position]
return document.text_before_cursor.endswith(end)
completions2 = [c for c in completions if doesnt_change_before_cursor(c)]
# When there is at least one completion that changes the text before the
# cursor, don't return any common part.
if len(completions2) != len(completions):
return ''
# Return the common prefix.
def get_suffix(completion):
return completion.text[-completion.start_position:]
return _commonprefix([get_suffix(c) for c in completions2]) | python | def get_common_complete_suffix(document, completions):
"""
Return the common prefix for all completions.
"""
# Take only completions that don't change the text before the cursor.
def doesnt_change_before_cursor(completion):
end = completion.text[:-completion.start_position]
return document.text_before_cursor.endswith(end)
completions2 = [c for c in completions if doesnt_change_before_cursor(c)]
# When there is at least one completion that changes the text before the
# cursor, don't return any common part.
if len(completions2) != len(completions):
return ''
# Return the common prefix.
def get_suffix(completion):
return completion.text[-completion.start_position:]
return _commonprefix([get_suffix(c) for c in completions2]) | [
"def",
"get_common_complete_suffix",
"(",
"document",
",",
"completions",
")",
":",
"# Take only completions that don't change the text before the cursor.",
"def",
"doesnt_change_before_cursor",
"(",
"completion",
")",
":",
"end",
"=",
"completion",
".",
"text",
"[",
":",
... | Return the common prefix for all completions. | [
"Return",
"the",
"common",
"prefix",
"for",
"all",
"completions",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/completion.py#L134-L154 | train | 207,335 |
wandb/client | wandb/vendor/prompt_toolkit/layout/margins.py | PromptMargin.get_width | def get_width(self, cli, ui_content):
" Width to report to the `Window`. "
# Take the width from the first line.
text = token_list_to_text(self.get_prompt_tokens(cli))
return get_cwidth(text) | python | def get_width(self, cli, ui_content):
" Width to report to the `Window`. "
# Take the width from the first line.
text = token_list_to_text(self.get_prompt_tokens(cli))
return get_cwidth(text) | [
"def",
"get_width",
"(",
"self",
",",
"cli",
",",
"ui_content",
")",
":",
"# Take the width from the first line.",
"text",
"=",
"token_list_to_text",
"(",
"self",
".",
"get_prompt_tokens",
"(",
"cli",
")",
")",
"return",
"get_cwidth",
"(",
"text",
")"
] | Width to report to the `Window`. | [
"Width",
"to",
"report",
"to",
"the",
"Window",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/layout/margins.py#L222-L226 | train | 207,336 |
wandb/client | wandb/cli.py | prompt_for_project | def prompt_for_project(ctx, entity):
"""Ask the user for a project, creating one if necessary."""
result = ctx.invoke(projects, entity=entity, display=False)
try:
if len(result) == 0:
project = click.prompt("Enter a name for your first project")
#description = editor()
project = api.upsert_project(project, entity=entity)["name"]
else:
project_names = [project["name"] for project in result]
question = {
'type': 'list',
'name': 'project_name',
'message': "Which project should we use?",
'choices': project_names + ["Create New"]
}
result = whaaaaat.prompt([question])
if result:
project = result['project_name']
else:
project = "Create New"
# TODO: check with the server if the project exists
if project == "Create New":
project = click.prompt(
"Enter a name for your new project", value_proc=api.format_project)
#description = editor()
project = api.upsert_project(project, entity=entity)["name"]
except wandb.apis.CommError as e:
raise ClickException(str(e))
return project | python | def prompt_for_project(ctx, entity):
"""Ask the user for a project, creating one if necessary."""
result = ctx.invoke(projects, entity=entity, display=False)
try:
if len(result) == 0:
project = click.prompt("Enter a name for your first project")
#description = editor()
project = api.upsert_project(project, entity=entity)["name"]
else:
project_names = [project["name"] for project in result]
question = {
'type': 'list',
'name': 'project_name',
'message': "Which project should we use?",
'choices': project_names + ["Create New"]
}
result = whaaaaat.prompt([question])
if result:
project = result['project_name']
else:
project = "Create New"
# TODO: check with the server if the project exists
if project == "Create New":
project = click.prompt(
"Enter a name for your new project", value_proc=api.format_project)
#description = editor()
project = api.upsert_project(project, entity=entity)["name"]
except wandb.apis.CommError as e:
raise ClickException(str(e))
return project | [
"def",
"prompt_for_project",
"(",
"ctx",
",",
"entity",
")",
":",
"result",
"=",
"ctx",
".",
"invoke",
"(",
"projects",
",",
"entity",
"=",
"entity",
",",
"display",
"=",
"False",
")",
"try",
":",
"if",
"len",
"(",
"result",
")",
"==",
"0",
":",
"p... | Ask the user for a project, creating one if necessary. | [
"Ask",
"the",
"user",
"for",
"a",
"project",
"creating",
"one",
"if",
"necessary",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/cli.py#L156-L188 | train | 207,337 |
wandb/client | wandb/cli.py | cli | def cli(ctx):
"""Weights & Biases.
Run "wandb docs" for full documentation.
"""
wandb.try_to_set_up_global_logging()
if ctx.invoked_subcommand is None:
click.echo(ctx.get_help()) | python | def cli(ctx):
"""Weights & Biases.
Run "wandb docs" for full documentation.
"""
wandb.try_to_set_up_global_logging()
if ctx.invoked_subcommand is None:
click.echo(ctx.get_help()) | [
"def",
"cli",
"(",
"ctx",
")",
":",
"wandb",
".",
"try_to_set_up_global_logging",
"(",
")",
"if",
"ctx",
".",
"invoked_subcommand",
"is",
"None",
":",
"click",
".",
"echo",
"(",
"ctx",
".",
"get_help",
"(",
")",
")"
] | Weights & Biases.
Run "wandb docs" for full documentation. | [
"Weights",
"&",
"Biases",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/cli.py#L220-L227 | train | 207,338 |
wandb/client | wandb/wandb_config.py | Config._load_values | def _load_values(self):
"""Load config.yaml from the run directory if available."""
path = self._config_path()
if path is not None and os.path.isfile(path):
self._load_file(path) | python | def _load_values(self):
"""Load config.yaml from the run directory if available."""
path = self._config_path()
if path is not None and os.path.isfile(path):
self._load_file(path) | [
"def",
"_load_values",
"(",
"self",
")",
":",
"path",
"=",
"self",
".",
"_config_path",
"(",
")",
"if",
"path",
"is",
"not",
"None",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"self",
".",
"_load_file",
"(",
"path",
")"
] | Load config.yaml from the run directory if available. | [
"Load",
"config",
".",
"yaml",
"from",
"the",
"run",
"directory",
"if",
"available",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/wandb_config.py#L118-L122 | train | 207,339 |
wandb/client | wandb/wandb_config.py | Config.load_json | def load_json(self, json):
"""Loads existing config from JSON"""
for key in json:
if key == "wandb_version":
continue
self._items[key] = json[key].get('value')
self._descriptions[key] = json[key].get('desc') | python | def load_json(self, json):
"""Loads existing config from JSON"""
for key in json:
if key == "wandb_version":
continue
self._items[key] = json[key].get('value')
self._descriptions[key] = json[key].get('desc') | [
"def",
"load_json",
"(",
"self",
",",
"json",
")",
":",
"for",
"key",
"in",
"json",
":",
"if",
"key",
"==",
"\"wandb_version\"",
":",
"continue",
"self",
".",
"_items",
"[",
"key",
"]",
"=",
"json",
"[",
"key",
"]",
".",
"get",
"(",
"'value'",
")",... | Loads existing config from JSON | [
"Loads",
"existing",
"config",
"from",
"JSON"
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/wandb_config.py#L137-L143 | train | 207,340 |
wandb/client | wandb/wandb_config.py | Config.persist | def persist(self):
"""Stores the current configuration for pushing to W&B"""
# In dryrun mode, without wandb run, we don't
# save config on initial load, because the run directory
# may not be created yet (because we don't know if we're
# being used in a run context, or as an API).
# TODO: Defer saving somehow, maybe via an events system
path = self._config_path()
if path is None:
return
with open(path, "w") as conf_file:
conf_file.write(str(self)) | python | def persist(self):
"""Stores the current configuration for pushing to W&B"""
# In dryrun mode, without wandb run, we don't
# save config on initial load, because the run directory
# may not be created yet (because we don't know if we're
# being used in a run context, or as an API).
# TODO: Defer saving somehow, maybe via an events system
path = self._config_path()
if path is None:
return
with open(path, "w") as conf_file:
conf_file.write(str(self)) | [
"def",
"persist",
"(",
"self",
")",
":",
"# In dryrun mode, without wandb run, we don't",
"# save config on initial load, because the run directory",
"# may not be created yet (because we don't know if we're",
"# being used in a run context, or as an API).",
"# TODO: Defer saving somehow, maybe... | Stores the current configuration for pushing to W&B | [
"Stores",
"the",
"current",
"configuration",
"for",
"pushing",
"to",
"W&B"
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/wandb_config.py#L154-L165 | train | 207,341 |
wandb/client | wandb/git_repo.py | GitRepo.get_upstream_fork_point | def get_upstream_fork_point(self):
"""Get the most recent ancestor of HEAD that occurs on an upstream
branch.
First looks at the current branch's tracking branch, if applicable. If
that doesn't work, looks at every other branch to find the most recent
ancestor of HEAD that occurs on a tracking branch.
Returns:
git.Commit object or None
"""
possible_relatives = []
try:
if not self.repo:
return None
try:
active_branch = self.repo.active_branch
except (TypeError, ValueError):
logger.debug("git is in a detached head state")
return None # detached head
else:
tracking_branch = active_branch.tracking_branch()
if tracking_branch:
possible_relatives.append(tracking_branch.commit)
if not possible_relatives:
for branch in self.repo.branches:
tracking_branch = branch.tracking_branch()
if tracking_branch is not None:
possible_relatives.append(tracking_branch.commit)
head = self.repo.head
most_recent_ancestor = None
for possible_relative in possible_relatives:
# at most one:
for ancestor in self.repo.merge_base(head, possible_relative):
if most_recent_ancestor is None:
most_recent_ancestor = ancestor
elif self.repo.is_ancestor(most_recent_ancestor, ancestor):
most_recent_ancestor = ancestor
return most_recent_ancestor
except exc.GitCommandError as e:
logger.debug("git remote upstream fork point could not be found")
logger.debug(e.message)
return None | python | def get_upstream_fork_point(self):
"""Get the most recent ancestor of HEAD that occurs on an upstream
branch.
First looks at the current branch's tracking branch, if applicable. If
that doesn't work, looks at every other branch to find the most recent
ancestor of HEAD that occurs on a tracking branch.
Returns:
git.Commit object or None
"""
possible_relatives = []
try:
if not self.repo:
return None
try:
active_branch = self.repo.active_branch
except (TypeError, ValueError):
logger.debug("git is in a detached head state")
return None # detached head
else:
tracking_branch = active_branch.tracking_branch()
if tracking_branch:
possible_relatives.append(tracking_branch.commit)
if not possible_relatives:
for branch in self.repo.branches:
tracking_branch = branch.tracking_branch()
if tracking_branch is not None:
possible_relatives.append(tracking_branch.commit)
head = self.repo.head
most_recent_ancestor = None
for possible_relative in possible_relatives:
# at most one:
for ancestor in self.repo.merge_base(head, possible_relative):
if most_recent_ancestor is None:
most_recent_ancestor = ancestor
elif self.repo.is_ancestor(most_recent_ancestor, ancestor):
most_recent_ancestor = ancestor
return most_recent_ancestor
except exc.GitCommandError as e:
logger.debug("git remote upstream fork point could not be found")
logger.debug(e.message)
return None | [
"def",
"get_upstream_fork_point",
"(",
"self",
")",
":",
"possible_relatives",
"=",
"[",
"]",
"try",
":",
"if",
"not",
"self",
".",
"repo",
":",
"return",
"None",
"try",
":",
"active_branch",
"=",
"self",
".",
"repo",
".",
"active_branch",
"except",
"(",
... | Get the most recent ancestor of HEAD that occurs on an upstream
branch.
First looks at the current branch's tracking branch, if applicable. If
that doesn't work, looks at every other branch to find the most recent
ancestor of HEAD that occurs on a tracking branch.
Returns:
git.Commit object or None | [
"Get",
"the",
"most",
"recent",
"ancestor",
"of",
"HEAD",
"that",
"occurs",
"on",
"an",
"upstream",
"branch",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/git_repo.py#L105-L149 | train | 207,342 |
wandb/client | wandb/vendor/prompt_toolkit/key_binding/bindings/vi.py | create_text_object_decorator | def create_text_object_decorator(registry):
"""
Create a decorator that can be used to register Vi text object implementations.
"""
assert isinstance(registry, BaseRegistry)
operator_given = ViWaitingForTextObjectMode()
navigation_mode = ViNavigationMode()
selection_mode = ViSelectionMode()
def text_object_decorator(*keys, **kw):
"""
Register a text object function.
Usage::
@text_object('w', filter=..., no_move_handler=False)
def handler(event):
# Return a text object for this key.
return TextObject(...)
:param no_move_handler: Disable the move handler in navigation mode.
(It's still active in selection mode.)
"""
filter = kw.pop('filter', Always())
no_move_handler = kw.pop('no_move_handler', False)
no_selection_handler = kw.pop('no_selection_handler', False)
eager = kw.pop('eager', False)
assert not kw
def decorator(text_object_func):
assert callable(text_object_func)
@registry.add_binding(*keys, filter=operator_given & filter, eager=eager)
def _(event):
# Arguments are multiplied.
vi_state = event.cli.vi_state
event._arg = (vi_state.operator_arg or 1) * (event.arg or 1)
# Call the text object handler.
text_obj = text_object_func(event)
if text_obj is not None:
assert isinstance(text_obj, TextObject)
# Call the operator function with the text object.
vi_state.operator_func(event, text_obj)
# Clear operator.
event.cli.vi_state.operator_func = None
event.cli.vi_state.operator_arg = None
# Register a move operation. (Doesn't need an operator.)
if not no_move_handler:
@registry.add_binding(*keys, filter=~operator_given & filter & navigation_mode, eager=eager)
def _(event):
" Move handler for navigation mode. "
text_object = text_object_func(event)
event.current_buffer.cursor_position += text_object.start
# Register a move selection operation.
if not no_selection_handler:
@registry.add_binding(*keys, filter=~operator_given & filter & selection_mode, eager=eager)
def _(event):
" Move handler for selection mode. "
text_object = text_object_func(event)
buff = event.current_buffer
# When the text object has both a start and end position, like 'i(' or 'iw',
# Turn this into a selection, otherwise the cursor.
if text_object.end:
# Take selection positions from text object.
start, end = text_object.operator_range(buff.document)
start += buff.cursor_position
end += buff.cursor_position
buff.selection_state.original_cursor_position = start
buff.cursor_position = end
# Take selection type from text object.
if text_object.type == TextObjectType.LINEWISE:
buff.selection_state.type = SelectionType.LINES
else:
buff.selection_state.type = SelectionType.CHARACTERS
else:
event.current_buffer.cursor_position += text_object.start
# Make it possible to chain @text_object decorators.
return text_object_func
return decorator
return text_object_decorator | python | def create_text_object_decorator(registry):
"""
Create a decorator that can be used to register Vi text object implementations.
"""
assert isinstance(registry, BaseRegistry)
operator_given = ViWaitingForTextObjectMode()
navigation_mode = ViNavigationMode()
selection_mode = ViSelectionMode()
def text_object_decorator(*keys, **kw):
"""
Register a text object function.
Usage::
@text_object('w', filter=..., no_move_handler=False)
def handler(event):
# Return a text object for this key.
return TextObject(...)
:param no_move_handler: Disable the move handler in navigation mode.
(It's still active in selection mode.)
"""
filter = kw.pop('filter', Always())
no_move_handler = kw.pop('no_move_handler', False)
no_selection_handler = kw.pop('no_selection_handler', False)
eager = kw.pop('eager', False)
assert not kw
def decorator(text_object_func):
assert callable(text_object_func)
@registry.add_binding(*keys, filter=operator_given & filter, eager=eager)
def _(event):
# Arguments are multiplied.
vi_state = event.cli.vi_state
event._arg = (vi_state.operator_arg or 1) * (event.arg or 1)
# Call the text object handler.
text_obj = text_object_func(event)
if text_obj is not None:
assert isinstance(text_obj, TextObject)
# Call the operator function with the text object.
vi_state.operator_func(event, text_obj)
# Clear operator.
event.cli.vi_state.operator_func = None
event.cli.vi_state.operator_arg = None
# Register a move operation. (Doesn't need an operator.)
if not no_move_handler:
@registry.add_binding(*keys, filter=~operator_given & filter & navigation_mode, eager=eager)
def _(event):
" Move handler for navigation mode. "
text_object = text_object_func(event)
event.current_buffer.cursor_position += text_object.start
# Register a move selection operation.
if not no_selection_handler:
@registry.add_binding(*keys, filter=~operator_given & filter & selection_mode, eager=eager)
def _(event):
" Move handler for selection mode. "
text_object = text_object_func(event)
buff = event.current_buffer
# When the text object has both a start and end position, like 'i(' or 'iw',
# Turn this into a selection, otherwise the cursor.
if text_object.end:
# Take selection positions from text object.
start, end = text_object.operator_range(buff.document)
start += buff.cursor_position
end += buff.cursor_position
buff.selection_state.original_cursor_position = start
buff.cursor_position = end
# Take selection type from text object.
if text_object.type == TextObjectType.LINEWISE:
buff.selection_state.type = SelectionType.LINES
else:
buff.selection_state.type = SelectionType.CHARACTERS
else:
event.current_buffer.cursor_position += text_object.start
# Make it possible to chain @text_object decorators.
return text_object_func
return decorator
return text_object_decorator | [
"def",
"create_text_object_decorator",
"(",
"registry",
")",
":",
"assert",
"isinstance",
"(",
"registry",
",",
"BaseRegistry",
")",
"operator_given",
"=",
"ViWaitingForTextObjectMode",
"(",
")",
"navigation_mode",
"=",
"ViNavigationMode",
"(",
")",
"selection_mode",
... | Create a decorator that can be used to register Vi text object implementations. | [
"Create",
"a",
"decorator",
"that",
"can",
"be",
"used",
"to",
"register",
"Vi",
"text",
"object",
"implementations",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/key_binding/bindings/vi.py#L140-L230 | train | 207,343 |
wandb/client | wandb/vendor/prompt_toolkit/key_binding/bindings/vi.py | create_operator_decorator | def create_operator_decorator(registry):
"""
Create a decorator that can be used for registering Vi operators.
"""
assert isinstance(registry, BaseRegistry)
operator_given = ViWaitingForTextObjectMode()
navigation_mode = ViNavigationMode()
selection_mode = ViSelectionMode()
def operator_decorator(*keys, **kw):
"""
Register a Vi operator.
Usage::
@operator('d', filter=...)
def handler(cli, text_object):
# Do something with the text object here.
"""
filter = kw.pop('filter', Always())
eager = kw.pop('eager', False)
assert not kw
def decorator(operator_func):
@registry.add_binding(*keys, filter=~operator_given & filter & navigation_mode, eager=eager)
def _(event):
"""
Handle operator in navigation mode.
"""
# When this key binding is matched, only set the operator
# function in the ViState. We should execute it after a text
# object has been received.
event.cli.vi_state.operator_func = operator_func
event.cli.vi_state.operator_arg = event.arg
@registry.add_binding(*keys, filter=~operator_given & filter & selection_mode, eager=eager)
def _(event):
"""
Handle operator in selection mode.
"""
buff = event.current_buffer
selection_state = buff.selection_state
# Create text object from selection.
if selection_state.type == SelectionType.LINES:
text_obj_type = TextObjectType.LINEWISE
elif selection_state.type == SelectionType.BLOCK:
text_obj_type = TextObjectType.BLOCK
else:
text_obj_type = TextObjectType.INCLUSIVE
text_object = TextObject(
selection_state.original_cursor_position - buff.cursor_position,
type=text_obj_type)
# Execute operator.
operator_func(event, text_object)
# Quit selection mode.
buff.selection_state = None
return operator_func
return decorator
return operator_decorator | python | def create_operator_decorator(registry):
"""
Create a decorator that can be used for registering Vi operators.
"""
assert isinstance(registry, BaseRegistry)
operator_given = ViWaitingForTextObjectMode()
navigation_mode = ViNavigationMode()
selection_mode = ViSelectionMode()
def operator_decorator(*keys, **kw):
"""
Register a Vi operator.
Usage::
@operator('d', filter=...)
def handler(cli, text_object):
# Do something with the text object here.
"""
filter = kw.pop('filter', Always())
eager = kw.pop('eager', False)
assert not kw
def decorator(operator_func):
@registry.add_binding(*keys, filter=~operator_given & filter & navigation_mode, eager=eager)
def _(event):
"""
Handle operator in navigation mode.
"""
# When this key binding is matched, only set the operator
# function in the ViState. We should execute it after a text
# object has been received.
event.cli.vi_state.operator_func = operator_func
event.cli.vi_state.operator_arg = event.arg
@registry.add_binding(*keys, filter=~operator_given & filter & selection_mode, eager=eager)
def _(event):
"""
Handle operator in selection mode.
"""
buff = event.current_buffer
selection_state = buff.selection_state
# Create text object from selection.
if selection_state.type == SelectionType.LINES:
text_obj_type = TextObjectType.LINEWISE
elif selection_state.type == SelectionType.BLOCK:
text_obj_type = TextObjectType.BLOCK
else:
text_obj_type = TextObjectType.INCLUSIVE
text_object = TextObject(
selection_state.original_cursor_position - buff.cursor_position,
type=text_obj_type)
# Execute operator.
operator_func(event, text_object)
# Quit selection mode.
buff.selection_state = None
return operator_func
return decorator
return operator_decorator | [
"def",
"create_operator_decorator",
"(",
"registry",
")",
":",
"assert",
"isinstance",
"(",
"registry",
",",
"BaseRegistry",
")",
"operator_given",
"=",
"ViWaitingForTextObjectMode",
"(",
")",
"navigation_mode",
"=",
"ViNavigationMode",
"(",
")",
"selection_mode",
"="... | Create a decorator that can be used for registering Vi operators. | [
"Create",
"a",
"decorator",
"that",
"can",
"be",
"used",
"for",
"registering",
"Vi",
"operators",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/key_binding/bindings/vi.py#L233-L297 | train | 207,344 |
wandb/client | wandb/vendor/prompt_toolkit/key_binding/bindings/vi.py | load_vi_open_in_editor_bindings | def load_vi_open_in_editor_bindings():
"""
Pressing 'v' in navigation mode will open the buffer in an external editor.
"""
registry = Registry()
navigation_mode = ViNavigationMode()
registry.add_binding('v', filter=navigation_mode)(
get_by_name('edit-and-execute-command'))
return registry | python | def load_vi_open_in_editor_bindings():
"""
Pressing 'v' in navigation mode will open the buffer in an external editor.
"""
registry = Registry()
navigation_mode = ViNavigationMode()
registry.add_binding('v', filter=navigation_mode)(
get_by_name('edit-and-execute-command'))
return registry | [
"def",
"load_vi_open_in_editor_bindings",
"(",
")",
":",
"registry",
"=",
"Registry",
"(",
")",
"navigation_mode",
"=",
"ViNavigationMode",
"(",
")",
"registry",
".",
"add_binding",
"(",
"'v'",
",",
"filter",
"=",
"navigation_mode",
")",
"(",
"get_by_name",
"(",... | Pressing 'v' in navigation mode will open the buffer in an external editor. | [
"Pressing",
"v",
"in",
"navigation",
"mode",
"will",
"open",
"the",
"buffer",
"in",
"an",
"external",
"editor",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/key_binding/bindings/vi.py#L1711-L1720 | train | 207,345 |
wandb/client | wandb/vendor/prompt_toolkit/key_binding/bindings/vi.py | TextObject.cut | def cut(self, buffer):
"""
Turn text object into `ClipboardData` instance.
"""
from_, to = self.operator_range(buffer.document)
from_ += buffer.cursor_position
to += buffer.cursor_position
to -= 1 # SelectionState does not include the end position, `operator_range` does.
document = Document(buffer.text, to, SelectionState(
original_cursor_position=from_, type=self.selection_type))
new_document, clipboard_data = document.cut_selection()
return new_document, clipboard_data | python | def cut(self, buffer):
"""
Turn text object into `ClipboardData` instance.
"""
from_, to = self.operator_range(buffer.document)
from_ += buffer.cursor_position
to += buffer.cursor_position
to -= 1 # SelectionState does not include the end position, `operator_range` does.
document = Document(buffer.text, to, SelectionState(
original_cursor_position=from_, type=self.selection_type))
new_document, clipboard_data = document.cut_selection()
return new_document, clipboard_data | [
"def",
"cut",
"(",
"self",
",",
"buffer",
")",
":",
"from_",
",",
"to",
"=",
"self",
".",
"operator_range",
"(",
"buffer",
".",
"document",
")",
"from_",
"+=",
"buffer",
".",
"cursor_position",
"to",
"+=",
"buffer",
".",
"cursor_position",
"to",
"-=",
... | Turn text object into `ClipboardData` instance. | [
"Turn",
"text",
"object",
"into",
"ClipboardData",
"instance",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/key_binding/bindings/vi.py#L123-L137 | train | 207,346 |
wandb/client | wandb/vendor/prompt_toolkit/layout/lexers.py | RegexSync.get_sync_start_position | def get_sync_start_position(self, document, lineno):
" Scan backwards, and find a possible position to start. "
pattern = self._compiled_pattern
lines = document.lines
# Scan upwards, until we find a point where we can start the syntax
# synchronisation.
for i in range(lineno, max(-1, lineno - self.MAX_BACKWARDS), -1):
match = pattern.match(lines[i])
if match:
return i, match.start()
# No synchronisation point found. If we aren't that far from the
# beginning, start at the very beginning, otherwise, just try to start
# at the current line.
if lineno < self.FROM_START_IF_NO_SYNC_POS_FOUND:
return 0, 0
else:
return lineno, 0 | python | def get_sync_start_position(self, document, lineno):
" Scan backwards, and find a possible position to start. "
pattern = self._compiled_pattern
lines = document.lines
# Scan upwards, until we find a point where we can start the syntax
# synchronisation.
for i in range(lineno, max(-1, lineno - self.MAX_BACKWARDS), -1):
match = pattern.match(lines[i])
if match:
return i, match.start()
# No synchronisation point found. If we aren't that far from the
# beginning, start at the very beginning, otherwise, just try to start
# at the current line.
if lineno < self.FROM_START_IF_NO_SYNC_POS_FOUND:
return 0, 0
else:
return lineno, 0 | [
"def",
"get_sync_start_position",
"(",
"self",
",",
"document",
",",
"lineno",
")",
":",
"pattern",
"=",
"self",
".",
"_compiled_pattern",
"lines",
"=",
"document",
".",
"lines",
"# Scan upwards, until we find a point where we can start the syntax",
"# synchronisation.",
... | Scan backwards, and find a possible position to start. | [
"Scan",
"backwards",
"and",
"find",
"a",
"possible",
"position",
"to",
"start",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/layout/lexers.py#L106-L124 | train | 207,347 |
wandb/client | wandb/vendor/prompt_toolkit/layout/lexers.py | PygmentsLexer.from_filename | def from_filename(cls, filename, sync_from_start=True):
"""
Create a `Lexer` from a filename.
"""
# Inline imports: the Pygments dependency is optional!
from pygments.util import ClassNotFound
from pygments.lexers import get_lexer_for_filename
try:
pygments_lexer = get_lexer_for_filename(filename)
except ClassNotFound:
return SimpleLexer()
else:
return cls(pygments_lexer.__class__, sync_from_start=sync_from_start) | python | def from_filename(cls, filename, sync_from_start=True):
"""
Create a `Lexer` from a filename.
"""
# Inline imports: the Pygments dependency is optional!
from pygments.util import ClassNotFound
from pygments.lexers import get_lexer_for_filename
try:
pygments_lexer = get_lexer_for_filename(filename)
except ClassNotFound:
return SimpleLexer()
else:
return cls(pygments_lexer.__class__, sync_from_start=sync_from_start) | [
"def",
"from_filename",
"(",
"cls",
",",
"filename",
",",
"sync_from_start",
"=",
"True",
")",
":",
"# Inline imports: the Pygments dependency is optional!",
"from",
"pygments",
".",
"util",
"import",
"ClassNotFound",
"from",
"pygments",
".",
"lexers",
"import",
"get_... | Create a `Lexer` from a filename. | [
"Create",
"a",
"Lexer",
"from",
"a",
"filename",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/layout/lexers.py#L201-L214 | train | 207,348 |
wandb/client | wandb/vendor/wcwidth/wcwidth.py | _bisearch | def _bisearch(ucs, table):
"""
Auxiliary function for binary search in interval table.
:arg int ucs: Ordinal value of unicode character.
:arg list table: List of starting and ending ranges of ordinal values,
in form of ``[(start, end), ...]``.
:rtype: int
:returns: 1 if ordinal value ucs is found within lookup table, else 0.
"""
lbound = 0
ubound = len(table) - 1
if ucs < table[0][0] or ucs > table[ubound][1]:
return 0
while ubound >= lbound:
mid = (lbound + ubound) // 2
if ucs > table[mid][1]:
lbound = mid + 1
elif ucs < table[mid][0]:
ubound = mid - 1
else:
return 1
return 0 | python | def _bisearch(ucs, table):
"""
Auxiliary function for binary search in interval table.
:arg int ucs: Ordinal value of unicode character.
:arg list table: List of starting and ending ranges of ordinal values,
in form of ``[(start, end), ...]``.
:rtype: int
:returns: 1 if ordinal value ucs is found within lookup table, else 0.
"""
lbound = 0
ubound = len(table) - 1
if ucs < table[0][0] or ucs > table[ubound][1]:
return 0
while ubound >= lbound:
mid = (lbound + ubound) // 2
if ucs > table[mid][1]:
lbound = mid + 1
elif ucs < table[mid][0]:
ubound = mid - 1
else:
return 1
return 0 | [
"def",
"_bisearch",
"(",
"ucs",
",",
"table",
")",
":",
"lbound",
"=",
"0",
"ubound",
"=",
"len",
"(",
"table",
")",
"-",
"1",
"if",
"ucs",
"<",
"table",
"[",
"0",
"]",
"[",
"0",
"]",
"or",
"ucs",
">",
"table",
"[",
"ubound",
"]",
"[",
"1",
... | Auxiliary function for binary search in interval table.
:arg int ucs: Ordinal value of unicode character.
:arg list table: List of starting and ending ranges of ordinal values,
in form of ``[(start, end), ...]``.
:rtype: int
:returns: 1 if ordinal value ucs is found within lookup table, else 0. | [
"Auxiliary",
"function",
"for",
"binary",
"search",
"in",
"interval",
"table",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/wcwidth/wcwidth.py#L77-L101 | train | 207,349 |
wandb/client | wandb/vendor/wcwidth/wcwidth.py | wcwidth | def wcwidth(wc):
r"""
Given one unicode character, return its printable length on a terminal.
The wcwidth() function returns 0 if the wc argument has no printable effect
on a terminal (such as NUL '\0'), -1 if wc is not printable, or has an
indeterminate effect on the terminal, such as a control character.
Otherwise, the number of column positions the character occupies on a
graphic terminal (1 or 2) is returned.
The following have a column width of -1:
- C0 control characters (U+001 through U+01F).
- C1 control characters and DEL (U+07F through U+0A0).
The following have a column width of 0:
- Non-spacing and enclosing combining characters (general
category code Mn or Me in the Unicode database).
- NULL (U+0000, 0).
- COMBINING GRAPHEME JOINER (U+034F).
- ZERO WIDTH SPACE (U+200B) through
RIGHT-TO-LEFT MARK (U+200F).
- LINE SEPERATOR (U+2028) and
PARAGRAPH SEPERATOR (U+2029).
- LEFT-TO-RIGHT EMBEDDING (U+202A) through
RIGHT-TO-LEFT OVERRIDE (U+202E).
- WORD JOINER (U+2060) through
INVISIBLE SEPARATOR (U+2063).
The following have a column width of 1:
- SOFT HYPHEN (U+00AD) has a column width of 1.
- All remaining characters (including all printable
ISO 8859-1 and WGL4 characters, Unicode control characters,
etc.) have a column width of 1.
The following have a column width of 2:
- Spacing characters in the East Asian Wide (W) or East Asian
Full-width (F) category as defined in Unicode Technical
Report #11 have a column width of 2.
"""
# pylint: disable=C0103
# Invalid argument name "wc"
ucs = ord(wc)
# NOTE: created by hand, there isn't anything identifiable other than
# general Cf category code to identify these, and some characters in Cf
# category code are of non-zero width.
# pylint: disable=too-many-boolean-expressions
# Too many boolean expressions in if statement (7/5)
if (ucs == 0 or
ucs == 0x034F or
0x200B <= ucs <= 0x200F or
ucs == 0x2028 or
ucs == 0x2029 or
0x202A <= ucs <= 0x202E or
0x2060 <= ucs <= 0x2063):
return 0
# C0/C1 control characters
if ucs < 32 or 0x07F <= ucs < 0x0A0:
return -1
# combining characters with zero width
if _bisearch(ucs, ZERO_WIDTH):
return 0
return 1 + _bisearch(ucs, WIDE_EASTASIAN) | python | def wcwidth(wc):
r"""
Given one unicode character, return its printable length on a terminal.
The wcwidth() function returns 0 if the wc argument has no printable effect
on a terminal (such as NUL '\0'), -1 if wc is not printable, or has an
indeterminate effect on the terminal, such as a control character.
Otherwise, the number of column positions the character occupies on a
graphic terminal (1 or 2) is returned.
The following have a column width of -1:
- C0 control characters (U+001 through U+01F).
- C1 control characters and DEL (U+07F through U+0A0).
The following have a column width of 0:
- Non-spacing and enclosing combining characters (general
category code Mn or Me in the Unicode database).
- NULL (U+0000, 0).
- COMBINING GRAPHEME JOINER (U+034F).
- ZERO WIDTH SPACE (U+200B) through
RIGHT-TO-LEFT MARK (U+200F).
- LINE SEPERATOR (U+2028) and
PARAGRAPH SEPERATOR (U+2029).
- LEFT-TO-RIGHT EMBEDDING (U+202A) through
RIGHT-TO-LEFT OVERRIDE (U+202E).
- WORD JOINER (U+2060) through
INVISIBLE SEPARATOR (U+2063).
The following have a column width of 1:
- SOFT HYPHEN (U+00AD) has a column width of 1.
- All remaining characters (including all printable
ISO 8859-1 and WGL4 characters, Unicode control characters,
etc.) have a column width of 1.
The following have a column width of 2:
- Spacing characters in the East Asian Wide (W) or East Asian
Full-width (F) category as defined in Unicode Technical
Report #11 have a column width of 2.
"""
# pylint: disable=C0103
# Invalid argument name "wc"
ucs = ord(wc)
# NOTE: created by hand, there isn't anything identifiable other than
# general Cf category code to identify these, and some characters in Cf
# category code are of non-zero width.
# pylint: disable=too-many-boolean-expressions
# Too many boolean expressions in if statement (7/5)
if (ucs == 0 or
ucs == 0x034F or
0x200B <= ucs <= 0x200F or
ucs == 0x2028 or
ucs == 0x2029 or
0x202A <= ucs <= 0x202E or
0x2060 <= ucs <= 0x2063):
return 0
# C0/C1 control characters
if ucs < 32 or 0x07F <= ucs < 0x0A0:
return -1
# combining characters with zero width
if _bisearch(ucs, ZERO_WIDTH):
return 0
return 1 + _bisearch(ucs, WIDE_EASTASIAN) | [
"def",
"wcwidth",
"(",
"wc",
")",
":",
"# pylint: disable=C0103",
"# Invalid argument name \"wc\"",
"ucs",
"=",
"ord",
"(",
"wc",
")",
"# NOTE: created by hand, there isn't anything identifiable other than",
"# general Cf category code to identify these, and some characters in ... | r"""
Given one unicode character, return its printable length on a terminal.
The wcwidth() function returns 0 if the wc argument has no printable effect
on a terminal (such as NUL '\0'), -1 if wc is not printable, or has an
indeterminate effect on the terminal, such as a control character.
Otherwise, the number of column positions the character occupies on a
graphic terminal (1 or 2) is returned.
The following have a column width of -1:
- C0 control characters (U+001 through U+01F).
- C1 control characters and DEL (U+07F through U+0A0).
The following have a column width of 0:
- Non-spacing and enclosing combining characters (general
category code Mn or Me in the Unicode database).
- NULL (U+0000, 0).
- COMBINING GRAPHEME JOINER (U+034F).
- ZERO WIDTH SPACE (U+200B) through
RIGHT-TO-LEFT MARK (U+200F).
- LINE SEPERATOR (U+2028) and
PARAGRAPH SEPERATOR (U+2029).
- LEFT-TO-RIGHT EMBEDDING (U+202A) through
RIGHT-TO-LEFT OVERRIDE (U+202E).
- WORD JOINER (U+2060) through
INVISIBLE SEPARATOR (U+2063).
The following have a column width of 1:
- SOFT HYPHEN (U+00AD) has a column width of 1.
- All remaining characters (including all printable
ISO 8859-1 and WGL4 characters, Unicode control characters,
etc.) have a column width of 1.
The following have a column width of 2:
- Spacing characters in the East Asian Wide (W) or East Asian
Full-width (F) category as defined in Unicode Technical
Report #11 have a column width of 2. | [
"r",
"Given",
"one",
"unicode",
"character",
"return",
"its",
"printable",
"length",
"on",
"a",
"terminal",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/wcwidth/wcwidth.py#L104-L182 | train | 207,350 |
wandb/client | wandb/vendor/wcwidth/wcwidth.py | wcswidth | def wcswidth(pwcs, n=None):
"""
Given a unicode string, return its printable length on a terminal.
Return the width, in cells, necessary to display the first ``n``
characters of the unicode string ``pwcs``. When ``n`` is None (default),
return the length of the entire string.
Returns ``-1`` if a non-printable character is encountered.
"""
# pylint: disable=C0103
# Invalid argument name "n"
end = len(pwcs) if n is None else n
idx = slice(0, end)
width = 0
for char in pwcs[idx]:
wcw = wcwidth(char)
if wcw < 0:
return -1
else:
width += wcw
return width | python | def wcswidth(pwcs, n=None):
"""
Given a unicode string, return its printable length on a terminal.
Return the width, in cells, necessary to display the first ``n``
characters of the unicode string ``pwcs``. When ``n`` is None (default),
return the length of the entire string.
Returns ``-1`` if a non-printable character is encountered.
"""
# pylint: disable=C0103
# Invalid argument name "n"
end = len(pwcs) if n is None else n
idx = slice(0, end)
width = 0
for char in pwcs[idx]:
wcw = wcwidth(char)
if wcw < 0:
return -1
else:
width += wcw
return width | [
"def",
"wcswidth",
"(",
"pwcs",
",",
"n",
"=",
"None",
")",
":",
"# pylint: disable=C0103",
"# Invalid argument name \"n\"",
"end",
"=",
"len",
"(",
"pwcs",
")",
"if",
"n",
"is",
"None",
"else",
"n",
"idx",
"=",
"slice",
"(",
"0",
",",
"end",
")"... | Given a unicode string, return its printable length on a terminal.
Return the width, in cells, necessary to display the first ``n``
characters of the unicode string ``pwcs``. When ``n`` is None (default),
return the length of the entire string.
Returns ``-1`` if a non-printable character is encountered. | [
"Given",
"a",
"unicode",
"string",
"return",
"its",
"printable",
"length",
"on",
"a",
"terminal",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/wcwidth/wcwidth.py#L185-L207 | train | 207,351 |
wandb/client | wandb/run_manager.py | RunManager._per_file_event_handler | def _per_file_event_handler(self):
"""Create a Watchdog file event handler that does different things for every file
"""
file_event_handler = PatternMatchingEventHandler()
file_event_handler.on_created = self._on_file_created
file_event_handler.on_modified = self._on_file_modified
file_event_handler.on_moved = self._on_file_moved
file_event_handler._patterns = [
os.path.join(self._watch_dir, os.path.normpath('*'))]
# Ignore hidden files/folders and output.log because we stream it specially
file_event_handler._ignore_patterns = [
'*/.*',
'*.tmp',
os.path.join(self._run.dir, OUTPUT_FNAME)
]
for glob in self._api.settings("ignore_globs"):
file_event_handler._ignore_patterns.append(
os.path.join(self._run.dir, glob))
return file_event_handler | python | def _per_file_event_handler(self):
"""Create a Watchdog file event handler that does different things for every file
"""
file_event_handler = PatternMatchingEventHandler()
file_event_handler.on_created = self._on_file_created
file_event_handler.on_modified = self._on_file_modified
file_event_handler.on_moved = self._on_file_moved
file_event_handler._patterns = [
os.path.join(self._watch_dir, os.path.normpath('*'))]
# Ignore hidden files/folders and output.log because we stream it specially
file_event_handler._ignore_patterns = [
'*/.*',
'*.tmp',
os.path.join(self._run.dir, OUTPUT_FNAME)
]
for glob in self._api.settings("ignore_globs"):
file_event_handler._ignore_patterns.append(
os.path.join(self._run.dir, glob))
return file_event_handler | [
"def",
"_per_file_event_handler",
"(",
"self",
")",
":",
"file_event_handler",
"=",
"PatternMatchingEventHandler",
"(",
")",
"file_event_handler",
".",
"on_created",
"=",
"self",
".",
"_on_file_created",
"file_event_handler",
".",
"on_modified",
"=",
"self",
".",
"_on... | Create a Watchdog file event handler that does different things for every file | [
"Create",
"a",
"Watchdog",
"file",
"event",
"handler",
"that",
"does",
"different",
"things",
"for",
"every",
"file"
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/run_manager.py#L502-L521 | train | 207,352 |
wandb/client | wandb/run_manager.py | RunManager._get_file_event_handler | def _get_file_event_handler(self, file_path, save_name):
"""Get or create an event handler for a particular file.
file_path: the file's actual path
save_name: its path relative to the run directory (aka the watch directory)
"""
self._file_pusher.update_file(save_name, file_path) # track upload progress
if save_name not in self._file_event_handlers:
if save_name == 'wandb-history.jsonl':
self._file_event_handlers['wandb-history.jsonl'] = FileEventHandlerTextStream(
file_path, 'wandb-history.jsonl', self._api)
elif save_name == 'wandb-events.jsonl':
self._file_event_handlers['wandb-events.jsonl'] = FileEventHandlerTextStream(
file_path, 'wandb-events.jsonl', self._api)
elif 'tfevents' in save_name or 'graph.pbtxt' in save_name:
# overwrite the tensorboard but not every reload -- just
# frequently enough to resemble realtime
self._file_event_handlers[save_name] = FileEventHandlerThrottledOverwrite(
file_path, save_name, self._api, self._file_pusher)
# Don't try to stream tensorboard files for now.
# elif 'tfevents' in save_name:
# # TODO: This is hard-coded, but we want to give users control
# # over streaming files (or detect them).
# self._api.get_file_stream_api().set_file_policy(save_name,
# BinaryFilePolicy())
# self._file_event_handlers[save_name] = FileEventHandlerBinaryStream(
# file_path, save_name, self._api)
# Overwrite handler (non-deferred) has a bug, wherein if the file is truncated
# during upload, the request to Google hangs (at least, this is my working
# theory). So for now we defer uploading everything til the end of the run.
# TODO: send wandb-summary during run. One option is to copy to a temporary
# file before uploading.
elif save_name == config.FNAME:
self._file_event_handlers[save_name] = FileEventHandlerConfig(
file_path, save_name, self._api, self._file_pusher, self._run)
elif save_name == 'wandb-summary.json':
# Load the summary into the syncer process for meta etc to work
self._run.summary.load()
self._api.get_file_stream_api().set_file_policy(save_name, OverwriteFilePolicy())
self._file_event_handlers[save_name] = FileEventHandlerSummary(
file_path, save_name, self._api, self._file_pusher, self._run)
elif save_name.startswith('media/'):
# Save media files immediately
self._file_event_handlers[save_name] = FileEventHandlerOverwrite(
file_path, save_name, self._api, self._file_pusher)
else:
Handler = FileEventHandlerOverwriteDeferred
for policy, globs in six.iteritems(self._user_file_policies):
if policy == "end":
continue
for g in globs:
if any(save_name in p for p in glob.glob(os.path.join(self._run.dir, g))):
if policy == "live":
Handler = FileEventHandlerThrottledOverwriteMinWait
self._file_event_handlers[save_name] = Handler(
file_path, save_name, self._api, self._file_pusher)
return self._file_event_handlers[save_name] | python | def _get_file_event_handler(self, file_path, save_name):
"""Get or create an event handler for a particular file.
file_path: the file's actual path
save_name: its path relative to the run directory (aka the watch directory)
"""
self._file_pusher.update_file(save_name, file_path) # track upload progress
if save_name not in self._file_event_handlers:
if save_name == 'wandb-history.jsonl':
self._file_event_handlers['wandb-history.jsonl'] = FileEventHandlerTextStream(
file_path, 'wandb-history.jsonl', self._api)
elif save_name == 'wandb-events.jsonl':
self._file_event_handlers['wandb-events.jsonl'] = FileEventHandlerTextStream(
file_path, 'wandb-events.jsonl', self._api)
elif 'tfevents' in save_name or 'graph.pbtxt' in save_name:
# overwrite the tensorboard but not every reload -- just
# frequently enough to resemble realtime
self._file_event_handlers[save_name] = FileEventHandlerThrottledOverwrite(
file_path, save_name, self._api, self._file_pusher)
# Don't try to stream tensorboard files for now.
# elif 'tfevents' in save_name:
# # TODO: This is hard-coded, but we want to give users control
# # over streaming files (or detect them).
# self._api.get_file_stream_api().set_file_policy(save_name,
# BinaryFilePolicy())
# self._file_event_handlers[save_name] = FileEventHandlerBinaryStream(
# file_path, save_name, self._api)
# Overwrite handler (non-deferred) has a bug, wherein if the file is truncated
# during upload, the request to Google hangs (at least, this is my working
# theory). So for now we defer uploading everything til the end of the run.
# TODO: send wandb-summary during run. One option is to copy to a temporary
# file before uploading.
elif save_name == config.FNAME:
self._file_event_handlers[save_name] = FileEventHandlerConfig(
file_path, save_name, self._api, self._file_pusher, self._run)
elif save_name == 'wandb-summary.json':
# Load the summary into the syncer process for meta etc to work
self._run.summary.load()
self._api.get_file_stream_api().set_file_policy(save_name, OverwriteFilePolicy())
self._file_event_handlers[save_name] = FileEventHandlerSummary(
file_path, save_name, self._api, self._file_pusher, self._run)
elif save_name.startswith('media/'):
# Save media files immediately
self._file_event_handlers[save_name] = FileEventHandlerOverwrite(
file_path, save_name, self._api, self._file_pusher)
else:
Handler = FileEventHandlerOverwriteDeferred
for policy, globs in six.iteritems(self._user_file_policies):
if policy == "end":
continue
for g in globs:
if any(save_name in p for p in glob.glob(os.path.join(self._run.dir, g))):
if policy == "live":
Handler = FileEventHandlerThrottledOverwriteMinWait
self._file_event_handlers[save_name] = Handler(
file_path, save_name, self._api, self._file_pusher)
return self._file_event_handlers[save_name] | [
"def",
"_get_file_event_handler",
"(",
"self",
",",
"file_path",
",",
"save_name",
")",
":",
"self",
".",
"_file_pusher",
".",
"update_file",
"(",
"save_name",
",",
"file_path",
")",
"# track upload progress",
"if",
"save_name",
"not",
"in",
"self",
".",
"_file_... | Get or create an event handler for a particular file.
file_path: the file's actual path
save_name: its path relative to the run directory (aka the watch directory) | [
"Get",
"or",
"create",
"an",
"event",
"handler",
"for",
"a",
"particular",
"file",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/run_manager.py#L611-L668 | train | 207,353 |
wandb/client | wandb/run_manager.py | RunManager.mirror_stdout_stderr | def mirror_stdout_stderr(self):
"""Simple STDOUT and STDERR mirroring used by _init_jupyter"""
# TODO: Ideally we could start collecting logs without pushing
fs_api = self._api.get_file_stream_api()
io_wrap.SimpleTee(sys.stdout, streaming_log.TextStreamPusher(
fs_api, OUTPUT_FNAME, prepend_timestamp=True))
io_wrap.SimpleTee(sys.stderr, streaming_log.TextStreamPusher(
fs_api, OUTPUT_FNAME, prepend_timestamp=True, line_prepend='ERROR')) | python | def mirror_stdout_stderr(self):
"""Simple STDOUT and STDERR mirroring used by _init_jupyter"""
# TODO: Ideally we could start collecting logs without pushing
fs_api = self._api.get_file_stream_api()
io_wrap.SimpleTee(sys.stdout, streaming_log.TextStreamPusher(
fs_api, OUTPUT_FNAME, prepend_timestamp=True))
io_wrap.SimpleTee(sys.stderr, streaming_log.TextStreamPusher(
fs_api, OUTPUT_FNAME, prepend_timestamp=True, line_prepend='ERROR')) | [
"def",
"mirror_stdout_stderr",
"(",
"self",
")",
":",
"# TODO: Ideally we could start collecting logs without pushing",
"fs_api",
"=",
"self",
".",
"_api",
".",
"get_file_stream_api",
"(",
")",
"io_wrap",
".",
"SimpleTee",
"(",
"sys",
".",
"stdout",
",",
"streaming_lo... | Simple STDOUT and STDERR mirroring used by _init_jupyter | [
"Simple",
"STDOUT",
"and",
"STDERR",
"mirroring",
"used",
"by",
"_init_jupyter"
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/run_manager.py#L672-L679 | train | 207,354 |
wandb/client | wandb/run_manager.py | RunManager._get_stdout_stderr_streams | def _get_stdout_stderr_streams(self):
"""Sets up STDOUT and STDERR streams. Only call this once."""
if six.PY2 or not hasattr(sys.stdout, "buffer"):
if hasattr(sys.stdout, "fileno") and sys.stdout.isatty():
try:
stdout = os.fdopen(sys.stdout.fileno(), "w+", 0)
stderr = os.fdopen(sys.stderr.fileno(), "w+", 0)
# OSError [Errno 22] Invalid argument wandb
except OSError:
stdout = sys.stdout
stderr = sys.stderr
else:
stdout = sys.stdout
stderr = sys.stderr
else: # we write binary so grab the raw I/O objects in python 3
try:
stdout = sys.stdout.buffer.raw
stderr = sys.stderr.buffer.raw
except AttributeError:
# The testing environment and potentially others may have screwed with their
# io so we fallback to raw stdout / err
stdout = sys.stdout.buffer
stderr = sys.stderr.buffer
output_log_path = os.path.join(self._run.dir, OUTPUT_FNAME)
self._output_log = WriteSerializingFile(open(output_log_path, 'wb'))
stdout_streams = [stdout, self._output_log]
stderr_streams = [stderr, self._output_log]
if self._cloud:
# Tee stdout/stderr into our TextOutputStream, which will push lines to the cloud.
fs_api = self._api.get_file_stream_api()
self._stdout_stream = streaming_log.TextStreamPusher(
fs_api, OUTPUT_FNAME, prepend_timestamp=True)
self._stderr_stream = streaming_log.TextStreamPusher(
fs_api, OUTPUT_FNAME, line_prepend='ERROR',
prepend_timestamp=True)
stdout_streams.append(self._stdout_stream)
stderr_streams.append(self._stderr_stream)
return stdout_streams, stderr_streams | python | def _get_stdout_stderr_streams(self):
"""Sets up STDOUT and STDERR streams. Only call this once."""
if six.PY2 or not hasattr(sys.stdout, "buffer"):
if hasattr(sys.stdout, "fileno") and sys.stdout.isatty():
try:
stdout = os.fdopen(sys.stdout.fileno(), "w+", 0)
stderr = os.fdopen(sys.stderr.fileno(), "w+", 0)
# OSError [Errno 22] Invalid argument wandb
except OSError:
stdout = sys.stdout
stderr = sys.stderr
else:
stdout = sys.stdout
stderr = sys.stderr
else: # we write binary so grab the raw I/O objects in python 3
try:
stdout = sys.stdout.buffer.raw
stderr = sys.stderr.buffer.raw
except AttributeError:
# The testing environment and potentially others may have screwed with their
# io so we fallback to raw stdout / err
stdout = sys.stdout.buffer
stderr = sys.stderr.buffer
output_log_path = os.path.join(self._run.dir, OUTPUT_FNAME)
self._output_log = WriteSerializingFile(open(output_log_path, 'wb'))
stdout_streams = [stdout, self._output_log]
stderr_streams = [stderr, self._output_log]
if self._cloud:
# Tee stdout/stderr into our TextOutputStream, which will push lines to the cloud.
fs_api = self._api.get_file_stream_api()
self._stdout_stream = streaming_log.TextStreamPusher(
fs_api, OUTPUT_FNAME, prepend_timestamp=True)
self._stderr_stream = streaming_log.TextStreamPusher(
fs_api, OUTPUT_FNAME, line_prepend='ERROR',
prepend_timestamp=True)
stdout_streams.append(self._stdout_stream)
stderr_streams.append(self._stderr_stream)
return stdout_streams, stderr_streams | [
"def",
"_get_stdout_stderr_streams",
"(",
"self",
")",
":",
"if",
"six",
".",
"PY2",
"or",
"not",
"hasattr",
"(",
"sys",
".",
"stdout",
",",
"\"buffer\"",
")",
":",
"if",
"hasattr",
"(",
"sys",
".",
"stdout",
",",
"\"fileno\"",
")",
"and",
"sys",
".",
... | Sets up STDOUT and STDERR streams. Only call this once. | [
"Sets",
"up",
"STDOUT",
"and",
"STDERR",
"streams",
".",
"Only",
"call",
"this",
"once",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/run_manager.py#L685-L727 | train | 207,355 |
wandb/client | wandb/run_manager.py | RunManager._close_stdout_stderr_streams | def _close_stdout_stderr_streams(self):
"""Close output-capturing stuff. This also flushes anything left in
the buffers.
"""
# we don't have tee_file's in headless mode
if self._stdout_tee.tee_file is not None:
self._stdout_tee.tee_file.close()
if self._stderr_tee.tee_file is not None:
self._stderr_tee.tee_file.close()
# TODO(adrian): we should close these even in headless mode
# but in python 2 the read thread doesn't stop on its own
# for some reason
self._stdout_tee.close_join()
self._stderr_tee.close_join()
if self._cloud:
# not set in dry run mode
self._stdout_stream.close()
self._stderr_stream.close()
self._output_log.f.close()
self._output_log = None | python | def _close_stdout_stderr_streams(self):
"""Close output-capturing stuff. This also flushes anything left in
the buffers.
"""
# we don't have tee_file's in headless mode
if self._stdout_tee.tee_file is not None:
self._stdout_tee.tee_file.close()
if self._stderr_tee.tee_file is not None:
self._stderr_tee.tee_file.close()
# TODO(adrian): we should close these even in headless mode
# but in python 2 the read thread doesn't stop on its own
# for some reason
self._stdout_tee.close_join()
self._stderr_tee.close_join()
if self._cloud:
# not set in dry run mode
self._stdout_stream.close()
self._stderr_stream.close()
self._output_log.f.close()
self._output_log = None | [
"def",
"_close_stdout_stderr_streams",
"(",
"self",
")",
":",
"# we don't have tee_file's in headless mode",
"if",
"self",
".",
"_stdout_tee",
".",
"tee_file",
"is",
"not",
"None",
":",
"self",
".",
"_stdout_tee",
".",
"tee_file",
".",
"close",
"(",
")",
"if",
"... | Close output-capturing stuff. This also flushes anything left in
the buffers. | [
"Close",
"output",
"-",
"capturing",
"stuff",
".",
"This",
"also",
"flushes",
"anything",
"left",
"in",
"the",
"buffers",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/run_manager.py#L729-L752 | train | 207,356 |
wandb/client | wandb/run_manager.py | RunManager.shutdown | def shutdown(self, exitcode=0):
"""Stops system stats, streaming handlers, and uploads files without output, used by wandb.monitor"""
logger.info("shutting down system stats and metadata service")
self._system_stats.shutdown()
self._meta.shutdown()
if self._cloud:
logger.info("stopping streaming files and file change observer")
self._stop_file_observer()
self._end_file_syncing(exitcode)
self._run.history.close() | python | def shutdown(self, exitcode=0):
"""Stops system stats, streaming handlers, and uploads files without output, used by wandb.monitor"""
logger.info("shutting down system stats and metadata service")
self._system_stats.shutdown()
self._meta.shutdown()
if self._cloud:
logger.info("stopping streaming files and file change observer")
self._stop_file_observer()
self._end_file_syncing(exitcode)
self._run.history.close() | [
"def",
"shutdown",
"(",
"self",
",",
"exitcode",
"=",
"0",
")",
":",
"logger",
".",
"info",
"(",
"\"shutting down system stats and metadata service\"",
")",
"self",
".",
"_system_stats",
".",
"shutdown",
"(",
")",
"self",
".",
"_meta",
".",
"shutdown",
"(",
... | Stops system stats, streaming handlers, and uploads files without output, used by wandb.monitor | [
"Stops",
"system",
"stats",
"streaming",
"handlers",
"and",
"uploads",
"files",
"without",
"output",
"used",
"by",
"wandb",
".",
"monitor"
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/run_manager.py#L924-L935 | train | 207,357 |
wandb/client | wandb/run_manager.py | RunManager.run_user_process | def run_user_process(self, program, args, env):
"""Launch a user process, capture its output, and sync its files to the backend.
This returns after the process has ended and syncing is done.
Captures ctrl-c's, signals, etc.
"""
stdout_streams, stderr_streams = self._get_stdout_stderr_streams()
if sys.platform == "win32":
# PTYs don't work in windows so we use pipes.
self._stdout_tee = io_wrap.Tee.pipe(*stdout_streams)
self._stderr_tee = io_wrap.Tee.pipe(*stderr_streams)
# Seems like the following actually isn't necessary on Windows
# TODO(adrian): we may need to do the following if we use pipes instead of PTYs
# because Python on Unix doesn't like writing UTF-8 to files
# tell child python interpreters we accept utf-8
# env['PYTHONIOENCODING'] = 'UTF-8'
else:
self._stdout_tee = io_wrap.Tee.pty(*stdout_streams)
self._stderr_tee = io_wrap.Tee.pty(*stderr_streams)
command = [program] + list(args)
runner = util.find_runner(program)
if runner:
command = runner + command
command = ' '.join(six.moves.shlex_quote(arg) for arg in command)
self._stdout_stream.write_string(command + "\n\n")
try:
self.proc = subprocess.Popen(
command,
env=env,
stdout=self._stdout_tee.tee_file,
stderr=self._stderr_tee.tee_file,
shell=True,
)
self._run.pid = self.proc.pid
except (OSError, IOError):
raise Exception('Could not find program: %s' % command)
self._sync_etc() | python | def run_user_process(self, program, args, env):
"""Launch a user process, capture its output, and sync its files to the backend.
This returns after the process has ended and syncing is done.
Captures ctrl-c's, signals, etc.
"""
stdout_streams, stderr_streams = self._get_stdout_stderr_streams()
if sys.platform == "win32":
# PTYs don't work in windows so we use pipes.
self._stdout_tee = io_wrap.Tee.pipe(*stdout_streams)
self._stderr_tee = io_wrap.Tee.pipe(*stderr_streams)
# Seems like the following actually isn't necessary on Windows
# TODO(adrian): we may need to do the following if we use pipes instead of PTYs
# because Python on Unix doesn't like writing UTF-8 to files
# tell child python interpreters we accept utf-8
# env['PYTHONIOENCODING'] = 'UTF-8'
else:
self._stdout_tee = io_wrap.Tee.pty(*stdout_streams)
self._stderr_tee = io_wrap.Tee.pty(*stderr_streams)
command = [program] + list(args)
runner = util.find_runner(program)
if runner:
command = runner + command
command = ' '.join(six.moves.shlex_quote(arg) for arg in command)
self._stdout_stream.write_string(command + "\n\n")
try:
self.proc = subprocess.Popen(
command,
env=env,
stdout=self._stdout_tee.tee_file,
stderr=self._stderr_tee.tee_file,
shell=True,
)
self._run.pid = self.proc.pid
except (OSError, IOError):
raise Exception('Could not find program: %s' % command)
self._sync_etc() | [
"def",
"run_user_process",
"(",
"self",
",",
"program",
",",
"args",
",",
"env",
")",
":",
"stdout_streams",
",",
"stderr_streams",
"=",
"self",
".",
"_get_stdout_stderr_streams",
"(",
")",
"if",
"sys",
".",
"platform",
"==",
"\"win32\"",
":",
"# PTYs don't wo... | Launch a user process, capture its output, and sync its files to the backend.
This returns after the process has ended and syncing is done.
Captures ctrl-c's, signals, etc. | [
"Launch",
"a",
"user",
"process",
"capture",
"its",
"output",
"and",
"sync",
"its",
"files",
"to",
"the",
"backend",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/run_manager.py#L938-L978 | train | 207,358 |
wandb/client | wandb/run_manager.py | RunManager.wrap_existing_process | def wrap_existing_process(self, pid, stdout_read_fd, stderr_read_fd, port=None):
"""Do syncing, etc. for an already-running process.
This returns after the process has ended and syncing is done.
Captures ctrl-c's, signals, etc.
"""
stdout_read_file = os.fdopen(stdout_read_fd, 'rb')
stderr_read_file = os.fdopen(stderr_read_fd, 'rb')
stdout_streams, stderr_streams = self._get_stdout_stderr_streams()
self._stdout_tee = io_wrap.Tee(stdout_read_file, *stdout_streams)
self._stderr_tee = io_wrap.Tee(stderr_read_file, *stderr_streams)
self.proc = Process(pid)
self._run.pid = pid
logger.info("wrapping existing process %i" % pid)
try:
self.init_run()
except LaunchError as e:
logger.exception("catostrophic launch error")
wandb.termerror(str(e))
util.sentry_exc(e)
self._socket.launch_error()
return
if io_wrap.SIGWINCH_HANDLER is not None:
# SIGWINCH_HANDLER (maybe) gets set in self.init_run()
io_wrap.SIGWINCH_HANDLER.add_fd(stdout_read_fd)
io_wrap.SIGWINCH_HANDLER.add_fd(stderr_read_fd)
# Signal the main process that we're all hooked up
logger.info("informing user process we are ready to proceed")
self._socket.ready()
self._sync_etc(headless=True) | python | def wrap_existing_process(self, pid, stdout_read_fd, stderr_read_fd, port=None):
"""Do syncing, etc. for an already-running process.
This returns after the process has ended and syncing is done.
Captures ctrl-c's, signals, etc.
"""
stdout_read_file = os.fdopen(stdout_read_fd, 'rb')
stderr_read_file = os.fdopen(stderr_read_fd, 'rb')
stdout_streams, stderr_streams = self._get_stdout_stderr_streams()
self._stdout_tee = io_wrap.Tee(stdout_read_file, *stdout_streams)
self._stderr_tee = io_wrap.Tee(stderr_read_file, *stderr_streams)
self.proc = Process(pid)
self._run.pid = pid
logger.info("wrapping existing process %i" % pid)
try:
self.init_run()
except LaunchError as e:
logger.exception("catostrophic launch error")
wandb.termerror(str(e))
util.sentry_exc(e)
self._socket.launch_error()
return
if io_wrap.SIGWINCH_HANDLER is not None:
# SIGWINCH_HANDLER (maybe) gets set in self.init_run()
io_wrap.SIGWINCH_HANDLER.add_fd(stdout_read_fd)
io_wrap.SIGWINCH_HANDLER.add_fd(stderr_read_fd)
# Signal the main process that we're all hooked up
logger.info("informing user process we are ready to proceed")
self._socket.ready()
self._sync_etc(headless=True) | [
"def",
"wrap_existing_process",
"(",
"self",
",",
"pid",
",",
"stdout_read_fd",
",",
"stderr_read_fd",
",",
"port",
"=",
"None",
")",
":",
"stdout_read_file",
"=",
"os",
".",
"fdopen",
"(",
"stdout_read_fd",
",",
"'rb'",
")",
"stderr_read_file",
"=",
"os",
"... | Do syncing, etc. for an already-running process.
This returns after the process has ended and syncing is done.
Captures ctrl-c's, signals, etc. | [
"Do",
"syncing",
"etc",
".",
"for",
"an",
"already",
"-",
"running",
"process",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/run_manager.py#L980-L1014 | train | 207,359 |
wandb/client | wandb/vendor/prompt_toolkit/layout/utils.py | token_list_len | def token_list_len(tokenlist):
"""
Return the amount of characters in this token list.
:param tokenlist: List of (token, text) or (token, text, mouse_handler)
tuples.
"""
ZeroWidthEscape = Token.ZeroWidthEscape
return sum(len(item[1]) for item in tokenlist if item[0] != ZeroWidthEscape) | python | def token_list_len(tokenlist):
"""
Return the amount of characters in this token list.
:param tokenlist: List of (token, text) or (token, text, mouse_handler)
tuples.
"""
ZeroWidthEscape = Token.ZeroWidthEscape
return sum(len(item[1]) for item in tokenlist if item[0] != ZeroWidthEscape) | [
"def",
"token_list_len",
"(",
"tokenlist",
")",
":",
"ZeroWidthEscape",
"=",
"Token",
".",
"ZeroWidthEscape",
"return",
"sum",
"(",
"len",
"(",
"item",
"[",
"1",
"]",
")",
"for",
"item",
"in",
"tokenlist",
"if",
"item",
"[",
"0",
"]",
"!=",
"ZeroWidthEsc... | Return the amount of characters in this token list.
:param tokenlist: List of (token, text) or (token, text, mouse_handler)
tuples. | [
"Return",
"the",
"amount",
"of",
"characters",
"in",
"this",
"token",
"list",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/layout/utils.py#L16-L24 | train | 207,360 |
wandb/client | wandb/vendor/prompt_toolkit/layout/utils.py | token_list_to_text | def token_list_to_text(tokenlist):
"""
Concatenate all the text parts again.
"""
ZeroWidthEscape = Token.ZeroWidthEscape
return ''.join(item[1] for item in tokenlist if item[0] != ZeroWidthEscape) | python | def token_list_to_text(tokenlist):
"""
Concatenate all the text parts again.
"""
ZeroWidthEscape = Token.ZeroWidthEscape
return ''.join(item[1] for item in tokenlist if item[0] != ZeroWidthEscape) | [
"def",
"token_list_to_text",
"(",
"tokenlist",
")",
":",
"ZeroWidthEscape",
"=",
"Token",
".",
"ZeroWidthEscape",
"return",
"''",
".",
"join",
"(",
"item",
"[",
"1",
"]",
"for",
"item",
"in",
"tokenlist",
"if",
"item",
"[",
"0",
"]",
"!=",
"ZeroWidthEscape... | Concatenate all the text parts again. | [
"Concatenate",
"all",
"the",
"text",
"parts",
"again",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/layout/utils.py#L39-L44 | train | 207,361 |
wandb/client | wandb/vendor/prompt_toolkit/layout/utils.py | iter_token_lines | def iter_token_lines(tokenlist):
"""
Iterator that yields tokenlists for each line.
"""
line = []
for token, c in explode_tokens(tokenlist):
line.append((token, c))
if c == '\n':
yield line
line = []
yield line | python | def iter_token_lines(tokenlist):
"""
Iterator that yields tokenlists for each line.
"""
line = []
for token, c in explode_tokens(tokenlist):
line.append((token, c))
if c == '\n':
yield line
line = []
yield line | [
"def",
"iter_token_lines",
"(",
"tokenlist",
")",
":",
"line",
"=",
"[",
"]",
"for",
"token",
",",
"c",
"in",
"explode_tokens",
"(",
"tokenlist",
")",
":",
"line",
".",
"append",
"(",
"(",
"token",
",",
"c",
")",
")",
"if",
"c",
"==",
"'\\n'",
":",... | Iterator that yields tokenlists for each line. | [
"Iterator",
"that",
"yields",
"tokenlists",
"for",
"each",
"line",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/layout/utils.py#L47-L59 | train | 207,362 |
wandb/client | wandb/vendor/prompt_toolkit/key_binding/input_processor.py | InputProcessor._process | def _process(self):
"""
Coroutine implementing the key match algorithm. Key strokes are sent
into this generator, and it calls the appropriate handlers.
"""
buffer = self.key_buffer
retry = False
while True:
if retry:
retry = False
else:
buffer.append((yield))
# If we have some key presses, check for matches.
if buffer:
is_prefix_of_longer_match = self._is_prefix_of_longer_match(buffer)
matches = self._get_matches(buffer)
# When eager matches were found, give priority to them and also
# ignore all the longer matches.
eager_matches = [m for m in matches if m.eager(self._cli_ref())]
if eager_matches:
matches = eager_matches
is_prefix_of_longer_match = False
# Exact matches found, call handler.
if not is_prefix_of_longer_match and matches:
self._call_handler(matches[-1], key_sequence=buffer[:])
del buffer[:] # Keep reference.
# No match found.
elif not is_prefix_of_longer_match and not matches:
retry = True
found = False
# Loop over the input, try longest match first and shift.
for i in range(len(buffer), 0, -1):
matches = self._get_matches(buffer[:i])
if matches:
self._call_handler(matches[-1], key_sequence=buffer[:i])
del buffer[:i]
found = True
break
if not found:
del buffer[:1] | python | def _process(self):
"""
Coroutine implementing the key match algorithm. Key strokes are sent
into this generator, and it calls the appropriate handlers.
"""
buffer = self.key_buffer
retry = False
while True:
if retry:
retry = False
else:
buffer.append((yield))
# If we have some key presses, check for matches.
if buffer:
is_prefix_of_longer_match = self._is_prefix_of_longer_match(buffer)
matches = self._get_matches(buffer)
# When eager matches were found, give priority to them and also
# ignore all the longer matches.
eager_matches = [m for m in matches if m.eager(self._cli_ref())]
if eager_matches:
matches = eager_matches
is_prefix_of_longer_match = False
# Exact matches found, call handler.
if not is_prefix_of_longer_match and matches:
self._call_handler(matches[-1], key_sequence=buffer[:])
del buffer[:] # Keep reference.
# No match found.
elif not is_prefix_of_longer_match and not matches:
retry = True
found = False
# Loop over the input, try longest match first and shift.
for i in range(len(buffer), 0, -1):
matches = self._get_matches(buffer[:i])
if matches:
self._call_handler(matches[-1], key_sequence=buffer[:i])
del buffer[:i]
found = True
break
if not found:
del buffer[:1] | [
"def",
"_process",
"(",
"self",
")",
":",
"buffer",
"=",
"self",
".",
"key_buffer",
"retry",
"=",
"False",
"while",
"True",
":",
"if",
"retry",
":",
"retry",
"=",
"False",
"else",
":",
"buffer",
".",
"append",
"(",
"(",
"yield",
")",
")",
"# If we ha... | Coroutine implementing the key match algorithm. Key strokes are sent
into this generator, and it calls the appropriate handlers. | [
"Coroutine",
"implementing",
"the",
"key",
"match",
"algorithm",
".",
"Key",
"strokes",
"are",
"sent",
"into",
"this",
"generator",
"and",
"it",
"calls",
"the",
"appropriate",
"handlers",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/key_binding/input_processor.py#L147-L194 | train | 207,363 |
wandb/client | wandb/vendor/prompt_toolkit/key_binding/input_processor.py | KeyPressEvent.arg | def arg(self):
"""
Repetition argument.
"""
if self._arg == '-':
return -1
result = int(self._arg or 1)
# Don't exceed a million.
if int(result) >= 1000000:
result = 1
return result | python | def arg(self):
"""
Repetition argument.
"""
if self._arg == '-':
return -1
result = int(self._arg or 1)
# Don't exceed a million.
if int(result) >= 1000000:
result = 1
return result | [
"def",
"arg",
"(",
"self",
")",
":",
"if",
"self",
".",
"_arg",
"==",
"'-'",
":",
"return",
"-",
"1",
"result",
"=",
"int",
"(",
"self",
".",
"_arg",
"or",
"1",
")",
"# Don't exceed a million.",
"if",
"int",
"(",
"result",
")",
">=",
"1000000",
":"... | Repetition argument. | [
"Repetition",
"argument",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/key_binding/input_processor.py#L333-L346 | train | 207,364 |
wandb/client | wandb/vendor/prompt_toolkit/key_binding/input_processor.py | KeyPressEvent.append_to_arg_count | def append_to_arg_count(self, data):
"""
Add digit to the input argument.
:param data: the typed digit as string
"""
assert data in '-0123456789'
current = self._arg
if data == '-':
assert current is None or current == '-'
result = data
elif current is None:
result = data
else:
result = "%s%s" % (current, data)
self.input_processor.arg = result | python | def append_to_arg_count(self, data):
"""
Add digit to the input argument.
:param data: the typed digit as string
"""
assert data in '-0123456789'
current = self._arg
if data == '-':
assert current is None or current == '-'
result = data
elif current is None:
result = data
else:
result = "%s%s" % (current, data)
self.input_processor.arg = result | [
"def",
"append_to_arg_count",
"(",
"self",
",",
"data",
")",
":",
"assert",
"data",
"in",
"'-0123456789'",
"current",
"=",
"self",
".",
"_arg",
"if",
"data",
"==",
"'-'",
":",
"assert",
"current",
"is",
"None",
"or",
"current",
"==",
"'-'",
"result",
"="... | Add digit to the input argument.
:param data: the typed digit as string | [
"Add",
"digit",
"to",
"the",
"input",
"argument",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/key_binding/input_processor.py#L355-L372 | train | 207,365 |
wandb/client | wandb/vendor/prompt_toolkit/terminal/win32_input.py | ConsoleInputReader._get_keys | def _get_keys(self, read, input_records):
"""
Generator that yields `KeyPress` objects from the input records.
"""
for i in range(read.value):
ir = input_records[i]
# Get the right EventType from the EVENT_RECORD.
# (For some reason the Windows console application 'cmder'
# [http://gooseberrycreative.com/cmder/] can return '0' for
# ir.EventType. -- Just ignore that.)
if ir.EventType in EventTypes:
ev = getattr(ir.Event, EventTypes[ir.EventType])
# Process if this is a key event. (We also have mouse, menu and
# focus events.)
if type(ev) == KEY_EVENT_RECORD and ev.KeyDown:
for key_press in self._event_to_key_presses(ev):
yield key_press
elif type(ev) == MOUSE_EVENT_RECORD:
for key_press in self._handle_mouse(ev):
yield key_press | python | def _get_keys(self, read, input_records):
"""
Generator that yields `KeyPress` objects from the input records.
"""
for i in range(read.value):
ir = input_records[i]
# Get the right EventType from the EVENT_RECORD.
# (For some reason the Windows console application 'cmder'
# [http://gooseberrycreative.com/cmder/] can return '0' for
# ir.EventType. -- Just ignore that.)
if ir.EventType in EventTypes:
ev = getattr(ir.Event, EventTypes[ir.EventType])
# Process if this is a key event. (We also have mouse, menu and
# focus events.)
if type(ev) == KEY_EVENT_RECORD and ev.KeyDown:
for key_press in self._event_to_key_presses(ev):
yield key_press
elif type(ev) == MOUSE_EVENT_RECORD:
for key_press in self._handle_mouse(ev):
yield key_press | [
"def",
"_get_keys",
"(",
"self",
",",
"read",
",",
"input_records",
")",
":",
"for",
"i",
"in",
"range",
"(",
"read",
".",
"value",
")",
":",
"ir",
"=",
"input_records",
"[",
"i",
"]",
"# Get the right EventType from the EVENT_RECORD.",
"# (For some reason the W... | Generator that yields `KeyPress` objects from the input records. | [
"Generator",
"that",
"yields",
"KeyPress",
"objects",
"from",
"the",
"input",
"records",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/terminal/win32_input.py#L167-L189 | train | 207,366 |
wandb/client | wandb/vendor/prompt_toolkit/terminal/win32_input.py | ConsoleInputReader._event_to_key_presses | def _event_to_key_presses(self, ev):
"""
For this `KEY_EVENT_RECORD`, return a list of `KeyPress` instances.
"""
assert type(ev) == KEY_EVENT_RECORD and ev.KeyDown
result = None
u_char = ev.uChar.UnicodeChar
ascii_char = u_char.encode('utf-8')
# NOTE: We don't use `ev.uChar.AsciiChar`. That appears to be latin-1
# encoded. See also:
# https://github.com/ipython/ipython/issues/10004
# https://github.com/jonathanslenders/python-prompt-toolkit/issues/389
if u_char == '\x00':
if ev.VirtualKeyCode in self.keycodes:
result = KeyPress(self.keycodes[ev.VirtualKeyCode], '')
else:
if ascii_char in self.mappings:
if self.mappings[ascii_char] == Keys.ControlJ:
u_char = '\n' # Windows sends \n, turn into \r for unix compatibility.
result = KeyPress(self.mappings[ascii_char], u_char)
else:
result = KeyPress(u_char, u_char)
# Correctly handle Control-Arrow keys.
if (ev.ControlKeyState & self.LEFT_CTRL_PRESSED or
ev.ControlKeyState & self.RIGHT_CTRL_PRESSED) and result:
if result.key == Keys.Left:
result.key = Keys.ControlLeft
if result.key == Keys.Right:
result.key = Keys.ControlRight
if result.key == Keys.Up:
result.key = Keys.ControlUp
if result.key == Keys.Down:
result.key = Keys.ControlDown
# Turn 'Tab' into 'BackTab' when shift was pressed.
if ev.ControlKeyState & self.SHIFT_PRESSED and result:
if result.key == Keys.Tab:
result.key = Keys.BackTab
# Turn 'Space' into 'ControlSpace' when control was pressed.
if (ev.ControlKeyState & self.LEFT_CTRL_PRESSED or
ev.ControlKeyState & self.RIGHT_CTRL_PRESSED) and result and result.data == ' ':
result = KeyPress(Keys.ControlSpace, ' ')
# Turn Control-Enter into META-Enter. (On a vt100 terminal, we cannot
# detect this combination. But it's really practical on Windows.)
if (ev.ControlKeyState & self.LEFT_CTRL_PRESSED or
ev.ControlKeyState & self.RIGHT_CTRL_PRESSED) and result and \
result.key == Keys.ControlJ:
return [KeyPress(Keys.Escape, ''), result]
# Return result. If alt was pressed, prefix the result with an
# 'Escape' key, just like unix VT100 terminals do.
# NOTE: Only replace the left alt with escape. The right alt key often
# acts as altgr and is used in many non US keyboard layouts for
# typing some special characters, like a backslash. We don't want
# all backslashes to be prefixed with escape. (Esc-\ has a
# meaning in E-macs, for instance.)
if result:
meta_pressed = ev.ControlKeyState & self.LEFT_ALT_PRESSED
if meta_pressed:
return [KeyPress(Keys.Escape, ''), result]
else:
return [result]
else:
return [] | python | def _event_to_key_presses(self, ev):
"""
For this `KEY_EVENT_RECORD`, return a list of `KeyPress` instances.
"""
assert type(ev) == KEY_EVENT_RECORD and ev.KeyDown
result = None
u_char = ev.uChar.UnicodeChar
ascii_char = u_char.encode('utf-8')
# NOTE: We don't use `ev.uChar.AsciiChar`. That appears to be latin-1
# encoded. See also:
# https://github.com/ipython/ipython/issues/10004
# https://github.com/jonathanslenders/python-prompt-toolkit/issues/389
if u_char == '\x00':
if ev.VirtualKeyCode in self.keycodes:
result = KeyPress(self.keycodes[ev.VirtualKeyCode], '')
else:
if ascii_char in self.mappings:
if self.mappings[ascii_char] == Keys.ControlJ:
u_char = '\n' # Windows sends \n, turn into \r for unix compatibility.
result = KeyPress(self.mappings[ascii_char], u_char)
else:
result = KeyPress(u_char, u_char)
# Correctly handle Control-Arrow keys.
if (ev.ControlKeyState & self.LEFT_CTRL_PRESSED or
ev.ControlKeyState & self.RIGHT_CTRL_PRESSED) and result:
if result.key == Keys.Left:
result.key = Keys.ControlLeft
if result.key == Keys.Right:
result.key = Keys.ControlRight
if result.key == Keys.Up:
result.key = Keys.ControlUp
if result.key == Keys.Down:
result.key = Keys.ControlDown
# Turn 'Tab' into 'BackTab' when shift was pressed.
if ev.ControlKeyState & self.SHIFT_PRESSED and result:
if result.key == Keys.Tab:
result.key = Keys.BackTab
# Turn 'Space' into 'ControlSpace' when control was pressed.
if (ev.ControlKeyState & self.LEFT_CTRL_PRESSED or
ev.ControlKeyState & self.RIGHT_CTRL_PRESSED) and result and result.data == ' ':
result = KeyPress(Keys.ControlSpace, ' ')
# Turn Control-Enter into META-Enter. (On a vt100 terminal, we cannot
# detect this combination. But it's really practical on Windows.)
if (ev.ControlKeyState & self.LEFT_CTRL_PRESSED or
ev.ControlKeyState & self.RIGHT_CTRL_PRESSED) and result and \
result.key == Keys.ControlJ:
return [KeyPress(Keys.Escape, ''), result]
# Return result. If alt was pressed, prefix the result with an
# 'Escape' key, just like unix VT100 terminals do.
# NOTE: Only replace the left alt with escape. The right alt key often
# acts as altgr and is used in many non US keyboard layouts for
# typing some special characters, like a backslash. We don't want
# all backslashes to be prefixed with escape. (Esc-\ has a
# meaning in E-macs, for instance.)
if result:
meta_pressed = ev.ControlKeyState & self.LEFT_ALT_PRESSED
if meta_pressed:
return [KeyPress(Keys.Escape, ''), result]
else:
return [result]
else:
return [] | [
"def",
"_event_to_key_presses",
"(",
"self",
",",
"ev",
")",
":",
"assert",
"type",
"(",
"ev",
")",
"==",
"KEY_EVENT_RECORD",
"and",
"ev",
".",
"KeyDown",
"result",
"=",
"None",
"u_char",
"=",
"ev",
".",
"uChar",
".",
"UnicodeChar",
"ascii_char",
"=",
"u... | For this `KEY_EVENT_RECORD`, return a list of `KeyPress` instances. | [
"For",
"this",
"KEY_EVENT_RECORD",
"return",
"a",
"list",
"of",
"KeyPress",
"instances",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/terminal/win32_input.py#L213-L289 | train | 207,367 |
wandb/client | wandb/vendor/prompt_toolkit/terminal/win32_input.py | ConsoleInputReader._handle_mouse | def _handle_mouse(self, ev):
"""
Handle mouse events. Return a list of KeyPress instances.
"""
FROM_LEFT_1ST_BUTTON_PRESSED = 0x1
result = []
# Check event type.
if ev.ButtonState == FROM_LEFT_1ST_BUTTON_PRESSED:
# On a key press, generate both the mouse down and up event.
for event_type in [MouseEventType.MOUSE_DOWN, MouseEventType.MOUSE_UP]:
data = ';'.join([
event_type,
str(ev.MousePosition.X),
str(ev.MousePosition.Y)
])
result.append(KeyPress(Keys.WindowsMouseEvent, data))
return result | python | def _handle_mouse(self, ev):
"""
Handle mouse events. Return a list of KeyPress instances.
"""
FROM_LEFT_1ST_BUTTON_PRESSED = 0x1
result = []
# Check event type.
if ev.ButtonState == FROM_LEFT_1ST_BUTTON_PRESSED:
# On a key press, generate both the mouse down and up event.
for event_type in [MouseEventType.MOUSE_DOWN, MouseEventType.MOUSE_UP]:
data = ';'.join([
event_type,
str(ev.MousePosition.X),
str(ev.MousePosition.Y)
])
result.append(KeyPress(Keys.WindowsMouseEvent, data))
return result | [
"def",
"_handle_mouse",
"(",
"self",
",",
"ev",
")",
":",
"FROM_LEFT_1ST_BUTTON_PRESSED",
"=",
"0x1",
"result",
"=",
"[",
"]",
"# Check event type.",
"if",
"ev",
".",
"ButtonState",
"==",
"FROM_LEFT_1ST_BUTTON_PRESSED",
":",
"# On a key press, generate both the mouse do... | Handle mouse events. Return a list of KeyPress instances. | [
"Handle",
"mouse",
"events",
".",
"Return",
"a",
"list",
"of",
"KeyPress",
"instances",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/terminal/win32_input.py#L291-L310 | train | 207,368 |
wandb/client | wandb/vendor/prompt_toolkit/contrib/regular_languages/regex_parser.py | tokenize_regex | def tokenize_regex(input):
"""
Takes a string, representing a regular expression as input, and tokenizes
it.
:param input: string, representing a regular expression.
:returns: List of tokens.
"""
# Regular expression for tokenizing other regular expressions.
p = re.compile(r'''^(
\(\?P\<[a-zA-Z0-9_-]+\> | # Start of named group.
\(\?#[^)]*\) | # Comment
\(\?= | # Start of lookahead assertion
\(\?! | # Start of negative lookahead assertion
\(\?<= | # If preceded by.
\(\?< | # If not preceded by.
\(?: | # Start of group. (non capturing.)
\( | # Start of group.
\(?[iLmsux] | # Flags.
\(?P=[a-zA-Z]+\) | # Back reference to named group
\) | # End of group.
\{[^{}]*\} | # Repetition
\*\? | \+\? | \?\?\ | # Non greedy repetition.
\* | \+ | \? | # Repetition
\#.*\n | # Comment
\\. |
# Character group.
\[
( [^\]\\] | \\.)*
\] |
[^(){}] |
.
)''', re.VERBOSE)
tokens = []
while input:
m = p.match(input)
if m:
token, input = input[:m.end()], input[m.end():]
if not token.isspace():
tokens.append(token)
else:
raise Exception('Could not tokenize input regex.')
return tokens | python | def tokenize_regex(input):
"""
Takes a string, representing a regular expression as input, and tokenizes
it.
:param input: string, representing a regular expression.
:returns: List of tokens.
"""
# Regular expression for tokenizing other regular expressions.
p = re.compile(r'''^(
\(\?P\<[a-zA-Z0-9_-]+\> | # Start of named group.
\(\?#[^)]*\) | # Comment
\(\?= | # Start of lookahead assertion
\(\?! | # Start of negative lookahead assertion
\(\?<= | # If preceded by.
\(\?< | # If not preceded by.
\(?: | # Start of group. (non capturing.)
\( | # Start of group.
\(?[iLmsux] | # Flags.
\(?P=[a-zA-Z]+\) | # Back reference to named group
\) | # End of group.
\{[^{}]*\} | # Repetition
\*\? | \+\? | \?\?\ | # Non greedy repetition.
\* | \+ | \? | # Repetition
\#.*\n | # Comment
\\. |
# Character group.
\[
( [^\]\\] | \\.)*
\] |
[^(){}] |
.
)''', re.VERBOSE)
tokens = []
while input:
m = p.match(input)
if m:
token, input = input[:m.end()], input[m.end():]
if not token.isspace():
tokens.append(token)
else:
raise Exception('Could not tokenize input regex.')
return tokens | [
"def",
"tokenize_regex",
"(",
"input",
")",
":",
"# Regular expression for tokenizing other regular expressions.",
"p",
"=",
"re",
".",
"compile",
"(",
"r'''^(\n \\(\\?P\\<[a-zA-Z0-9_-]+\\> | # Start of named group.\n \\(\\?#[^)]*\\) | # Comment\n \\(\\?= ... | Takes a string, representing a regular expression as input, and tokenizes
it.
:param input: string, representing a regular expression.
:returns: List of tokens. | [
"Takes",
"a",
"string",
"representing",
"a",
"regular",
"expression",
"as",
"input",
"and",
"tokenizes",
"it",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/contrib/regular_languages/regex_parser.py#L127-L174 | train | 207,369 |
wandb/client | wandb/vendor/prompt_toolkit/contrib/regular_languages/regex_parser.py | parse_regex | def parse_regex(regex_tokens):
"""
Takes a list of tokens from the tokenizer, and returns a parse tree.
"""
# We add a closing brace because that represents the final pop of the stack.
tokens = [')'] + regex_tokens[::-1]
def wrap(lst):
""" Turn list into sequence when it contains several items. """
if len(lst) == 1:
return lst[0]
else:
return Sequence(lst)
def _parse():
or_list = []
result = []
def wrapped_result():
if or_list == []:
return wrap(result)
else:
or_list.append(result)
return Any([wrap(i) for i in or_list])
while tokens:
t = tokens.pop()
if t.startswith('(?P<'):
variable = Variable(_parse(), varname=t[4:-1])
result.append(variable)
elif t in ('*', '*?'):
greedy = (t == '*')
result[-1] = Repeat(result[-1], greedy=greedy)
elif t in ('+', '+?'):
greedy = (t == '+')
result[-1] = Repeat(result[-1], min_repeat=1, greedy=greedy)
elif t in ('?', '??'):
if result == []:
raise Exception('Nothing to repeat.' + repr(tokens))
else:
greedy = (t == '?')
result[-1] = Repeat(result[-1], min_repeat=0, max_repeat=1, greedy=greedy)
elif t == '|':
or_list.append(result)
result = []
elif t in ('(', '(?:'):
result.append(_parse())
elif t == '(?!':
result.append(Lookahead(_parse(), negative=True))
elif t == '(?=':
result.append(Lookahead(_parse(), negative=False))
elif t == ')':
return wrapped_result()
elif t.startswith('#'):
pass
elif t.startswith('{'):
# TODO: implement!
raise Exception('{}-style repitition not yet supported' % t)
elif t.startswith('(?'):
raise Exception('%r not supported' % t)
elif t.isspace():
pass
else:
result.append(Regex(t))
raise Exception("Expecting ')' token")
result = _parse()
if len(tokens) != 0:
raise Exception("Unmatched parantheses.")
else:
return result | python | def parse_regex(regex_tokens):
"""
Takes a list of tokens from the tokenizer, and returns a parse tree.
"""
# We add a closing brace because that represents the final pop of the stack.
tokens = [')'] + regex_tokens[::-1]
def wrap(lst):
""" Turn list into sequence when it contains several items. """
if len(lst) == 1:
return lst[0]
else:
return Sequence(lst)
def _parse():
or_list = []
result = []
def wrapped_result():
if or_list == []:
return wrap(result)
else:
or_list.append(result)
return Any([wrap(i) for i in or_list])
while tokens:
t = tokens.pop()
if t.startswith('(?P<'):
variable = Variable(_parse(), varname=t[4:-1])
result.append(variable)
elif t in ('*', '*?'):
greedy = (t == '*')
result[-1] = Repeat(result[-1], greedy=greedy)
elif t in ('+', '+?'):
greedy = (t == '+')
result[-1] = Repeat(result[-1], min_repeat=1, greedy=greedy)
elif t in ('?', '??'):
if result == []:
raise Exception('Nothing to repeat.' + repr(tokens))
else:
greedy = (t == '?')
result[-1] = Repeat(result[-1], min_repeat=0, max_repeat=1, greedy=greedy)
elif t == '|':
or_list.append(result)
result = []
elif t in ('(', '(?:'):
result.append(_parse())
elif t == '(?!':
result.append(Lookahead(_parse(), negative=True))
elif t == '(?=':
result.append(Lookahead(_parse(), negative=False))
elif t == ')':
return wrapped_result()
elif t.startswith('#'):
pass
elif t.startswith('{'):
# TODO: implement!
raise Exception('{}-style repitition not yet supported' % t)
elif t.startswith('(?'):
raise Exception('%r not supported' % t)
elif t.isspace():
pass
else:
result.append(Regex(t))
raise Exception("Expecting ')' token")
result = _parse()
if len(tokens) != 0:
raise Exception("Unmatched parantheses.")
else:
return result | [
"def",
"parse_regex",
"(",
"regex_tokens",
")",
":",
"# We add a closing brace because that represents the final pop of the stack.",
"tokens",
"=",
"[",
"')'",
"]",
"+",
"regex_tokens",
"[",
":",
":",
"-",
"1",
"]",
"def",
"wrap",
"(",
"lst",
")",
":",
"\"\"\" Tur... | Takes a list of tokens from the tokenizer, and returns a parse tree. | [
"Takes",
"a",
"list",
"of",
"tokens",
"from",
"the",
"tokenizer",
"and",
"returns",
"a",
"parse",
"tree",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/contrib/regular_languages/regex_parser.py#L177-L262 | train | 207,370 |
wandb/client | wandb/vendor/prompt_toolkit/layout/containers.py | HSplit._divide_heigths | def _divide_heigths(self, cli, write_position):
"""
Return the heights for all rows.
Or None when there is not enough space.
"""
if not self.children:
return []
# Calculate heights.
given_dimensions = self.get_dimensions(cli) if self.get_dimensions else None
def get_dimension_for_child(c, index):
if given_dimensions and given_dimensions[index] is not None:
return given_dimensions[index]
else:
return c.preferred_height(cli, write_position.width, write_position.extended_height)
dimensions = [get_dimension_for_child(c, index) for index, c in enumerate(self.children)]
# Sum dimensions
sum_dimensions = sum_layout_dimensions(dimensions)
# If there is not enough space for both.
# Don't do anything.
if sum_dimensions.min > write_position.extended_height:
return
# Find optimal sizes. (Start with minimal size, increase until we cover
# the whole height.)
sizes = [d.min for d in dimensions]
child_generator = take_using_weights(
items=list(range(len(dimensions))),
weights=[d.weight for d in dimensions])
i = next(child_generator)
while sum(sizes) < min(write_position.extended_height, sum_dimensions.preferred):
# Increase until we meet at least the 'preferred' size.
if sizes[i] < dimensions[i].preferred:
sizes[i] += 1
i = next(child_generator)
if not any([cli.is_returning, cli.is_exiting, cli.is_aborting]):
while sum(sizes) < min(write_position.height, sum_dimensions.max):
# Increase until we use all the available space. (or until "max")
if sizes[i] < dimensions[i].max:
sizes[i] += 1
i = next(child_generator)
return sizes | python | def _divide_heigths(self, cli, write_position):
"""
Return the heights for all rows.
Or None when there is not enough space.
"""
if not self.children:
return []
# Calculate heights.
given_dimensions = self.get_dimensions(cli) if self.get_dimensions else None
def get_dimension_for_child(c, index):
if given_dimensions and given_dimensions[index] is not None:
return given_dimensions[index]
else:
return c.preferred_height(cli, write_position.width, write_position.extended_height)
dimensions = [get_dimension_for_child(c, index) for index, c in enumerate(self.children)]
# Sum dimensions
sum_dimensions = sum_layout_dimensions(dimensions)
# If there is not enough space for both.
# Don't do anything.
if sum_dimensions.min > write_position.extended_height:
return
# Find optimal sizes. (Start with minimal size, increase until we cover
# the whole height.)
sizes = [d.min for d in dimensions]
child_generator = take_using_weights(
items=list(range(len(dimensions))),
weights=[d.weight for d in dimensions])
i = next(child_generator)
while sum(sizes) < min(write_position.extended_height, sum_dimensions.preferred):
# Increase until we meet at least the 'preferred' size.
if sizes[i] < dimensions[i].preferred:
sizes[i] += 1
i = next(child_generator)
if not any([cli.is_returning, cli.is_exiting, cli.is_aborting]):
while sum(sizes) < min(write_position.height, sum_dimensions.max):
# Increase until we use all the available space. (or until "max")
if sizes[i] < dimensions[i].max:
sizes[i] += 1
i = next(child_generator)
return sizes | [
"def",
"_divide_heigths",
"(",
"self",
",",
"cli",
",",
"write_position",
")",
":",
"if",
"not",
"self",
".",
"children",
":",
"return",
"[",
"]",
"# Calculate heights.",
"given_dimensions",
"=",
"self",
".",
"get_dimensions",
"(",
"cli",
")",
"if",
"self",
... | Return the heights for all rows.
Or None when there is not enough space. | [
"Return",
"the",
"heights",
"for",
"all",
"rows",
".",
"Or",
"None",
"when",
"there",
"is",
"not",
"enough",
"space",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/layout/containers.py#L160-L210 | train | 207,371 |
wandb/client | wandb/vendor/prompt_toolkit/layout/containers.py | VSplit._divide_widths | def _divide_widths(self, cli, width):
"""
Return the widths for all columns.
Or None when there is not enough space.
"""
if not self.children:
return []
# Calculate widths.
given_dimensions = self.get_dimensions(cli) if self.get_dimensions else None
def get_dimension_for_child(c, index):
if given_dimensions and given_dimensions[index] is not None:
return given_dimensions[index]
else:
return c.preferred_width(cli, width)
dimensions = [get_dimension_for_child(c, index) for index, c in enumerate(self.children)]
# Sum dimensions
sum_dimensions = sum_layout_dimensions(dimensions)
# If there is not enough space for both.
# Don't do anything.
if sum_dimensions.min > width:
return
# Find optimal sizes. (Start with minimal size, increase until we cover
# the whole height.)
sizes = [d.min for d in dimensions]
child_generator = take_using_weights(
items=list(range(len(dimensions))),
weights=[d.weight for d in dimensions])
i = next(child_generator)
while sum(sizes) < min(width, sum_dimensions.preferred):
# Increase until we meet at least the 'preferred' size.
if sizes[i] < dimensions[i].preferred:
sizes[i] += 1
i = next(child_generator)
while sum(sizes) < min(width, sum_dimensions.max):
# Increase until we use all the available space.
if sizes[i] < dimensions[i].max:
sizes[i] += 1
i = next(child_generator)
return sizes | python | def _divide_widths(self, cli, width):
"""
Return the widths for all columns.
Or None when there is not enough space.
"""
if not self.children:
return []
# Calculate widths.
given_dimensions = self.get_dimensions(cli) if self.get_dimensions else None
def get_dimension_for_child(c, index):
if given_dimensions and given_dimensions[index] is not None:
return given_dimensions[index]
else:
return c.preferred_width(cli, width)
dimensions = [get_dimension_for_child(c, index) for index, c in enumerate(self.children)]
# Sum dimensions
sum_dimensions = sum_layout_dimensions(dimensions)
# If there is not enough space for both.
# Don't do anything.
if sum_dimensions.min > width:
return
# Find optimal sizes. (Start with minimal size, increase until we cover
# the whole height.)
sizes = [d.min for d in dimensions]
child_generator = take_using_weights(
items=list(range(len(dimensions))),
weights=[d.weight for d in dimensions])
i = next(child_generator)
while sum(sizes) < min(width, sum_dimensions.preferred):
# Increase until we meet at least the 'preferred' size.
if sizes[i] < dimensions[i].preferred:
sizes[i] += 1
i = next(child_generator)
while sum(sizes) < min(width, sum_dimensions.max):
# Increase until we use all the available space.
if sizes[i] < dimensions[i].max:
sizes[i] += 1
i = next(child_generator)
return sizes | [
"def",
"_divide_widths",
"(",
"self",
",",
"cli",
",",
"width",
")",
":",
"if",
"not",
"self",
".",
"children",
":",
"return",
"[",
"]",
"# Calculate widths.",
"given_dimensions",
"=",
"self",
".",
"get_dimensions",
"(",
"cli",
")",
"if",
"self",
".",
"g... | Return the widths for all columns.
Or None when there is not enough space. | [
"Return",
"the",
"widths",
"for",
"all",
"columns",
".",
"Or",
"None",
"when",
"there",
"is",
"not",
"enough",
"space",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/layout/containers.py#L266-L315 | train | 207,372 |
wandb/client | wandb/vendor/prompt_toolkit/layout/containers.py | WindowRenderInfo.input_line_to_visible_line | def input_line_to_visible_line(self):
"""
Return the dictionary mapping the line numbers of the input buffer to
the lines of the screen. When a line spans several rows at the screen,
the first row appears in the dictionary.
"""
result = {}
for k, v in self.visible_line_to_input_line.items():
if v in result:
result[v] = min(result[v], k)
else:
result[v] = k
return result | python | def input_line_to_visible_line(self):
"""
Return the dictionary mapping the line numbers of the input buffer to
the lines of the screen. When a line spans several rows at the screen,
the first row appears in the dictionary.
"""
result = {}
for k, v in self.visible_line_to_input_line.items():
if v in result:
result[v] = min(result[v], k)
else:
result[v] = k
return result | [
"def",
"input_line_to_visible_line",
"(",
"self",
")",
":",
"result",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"self",
".",
"visible_line_to_input_line",
".",
"items",
"(",
")",
":",
"if",
"v",
"in",
"result",
":",
"result",
"[",
"v",
"]",
"=",
"mi... | Return the dictionary mapping the line numbers of the input buffer to
the lines of the screen. When a line spans several rows at the screen,
the first row appears in the dictionary. | [
"Return",
"the",
"dictionary",
"mapping",
"the",
"line",
"numbers",
"of",
"the",
"input",
"buffer",
"to",
"the",
"lines",
"of",
"the",
"screen",
".",
"When",
"a",
"line",
"spans",
"several",
"rows",
"at",
"the",
"screen",
"the",
"first",
"row",
"appears",
... | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/layout/containers.py#L700-L712 | train | 207,373 |
wandb/client | wandb/vendor/prompt_toolkit/layout/containers.py | WindowRenderInfo.last_visible_line | def last_visible_line(self, before_scroll_offset=False):
"""
Like `first_visible_line`, but for the last visible line.
"""
if before_scroll_offset:
return self.displayed_lines[-1 - self.applied_scroll_offsets.bottom]
else:
return self.displayed_lines[-1] | python | def last_visible_line(self, before_scroll_offset=False):
"""
Like `first_visible_line`, but for the last visible line.
"""
if before_scroll_offset:
return self.displayed_lines[-1 - self.applied_scroll_offsets.bottom]
else:
return self.displayed_lines[-1] | [
"def",
"last_visible_line",
"(",
"self",
",",
"before_scroll_offset",
"=",
"False",
")",
":",
"if",
"before_scroll_offset",
":",
"return",
"self",
".",
"displayed_lines",
"[",
"-",
"1",
"-",
"self",
".",
"applied_scroll_offsets",
".",
"bottom",
"]",
"else",
":... | Like `first_visible_line`, but for the last visible line. | [
"Like",
"first_visible_line",
"but",
"for",
"the",
"last",
"visible",
"line",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/layout/containers.py#L724-L731 | train | 207,374 |
wandb/client | wandb/vendor/prompt_toolkit/layout/containers.py | WindowRenderInfo.center_visible_line | def center_visible_line(self, before_scroll_offset=False,
after_scroll_offset=False):
"""
Like `first_visible_line`, but for the center visible line.
"""
return (self.first_visible_line(after_scroll_offset) +
(self.last_visible_line(before_scroll_offset) -
self.first_visible_line(after_scroll_offset)) // 2
) | python | def center_visible_line(self, before_scroll_offset=False,
after_scroll_offset=False):
"""
Like `first_visible_line`, but for the center visible line.
"""
return (self.first_visible_line(after_scroll_offset) +
(self.last_visible_line(before_scroll_offset) -
self.first_visible_line(after_scroll_offset)) // 2
) | [
"def",
"center_visible_line",
"(",
"self",
",",
"before_scroll_offset",
"=",
"False",
",",
"after_scroll_offset",
"=",
"False",
")",
":",
"return",
"(",
"self",
".",
"first_visible_line",
"(",
"after_scroll_offset",
")",
"+",
"(",
"self",
".",
"last_visible_line",... | Like `first_visible_line`, but for the center visible line. | [
"Like",
"first_visible_line",
"but",
"for",
"the",
"center",
"visible",
"line",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/layout/containers.py#L733-L741 | train | 207,375 |
wandb/client | wandb/vendor/prompt_toolkit/layout/containers.py | Window._merge_dimensions | def _merge_dimensions(dimension, preferred=None, dont_extend=False):
"""
Take the LayoutDimension from this `Window` class and the received
preferred size from the `UIControl` and return a `LayoutDimension` to
report to the parent container.
"""
dimension = dimension or LayoutDimension()
# When a preferred dimension was explicitly given to the Window,
# ignore the UIControl.
if dimension.preferred_specified:
preferred = dimension.preferred
# When a 'preferred' dimension is given by the UIControl, make sure
# that it stays within the bounds of the Window.
if preferred is not None:
if dimension.max:
preferred = min(preferred, dimension.max)
if dimension.min:
preferred = max(preferred, dimension.min)
# When a `dont_extend` flag has been given, use the preferred dimension
# also as the max dimension.
if dont_extend and preferred is not None:
max_ = min(dimension.max, preferred)
else:
max_ = dimension.max
return LayoutDimension(
min=dimension.min, max=max_,
preferred=preferred, weight=dimension.weight) | python | def _merge_dimensions(dimension, preferred=None, dont_extend=False):
"""
Take the LayoutDimension from this `Window` class and the received
preferred size from the `UIControl` and return a `LayoutDimension` to
report to the parent container.
"""
dimension = dimension or LayoutDimension()
# When a preferred dimension was explicitly given to the Window,
# ignore the UIControl.
if dimension.preferred_specified:
preferred = dimension.preferred
# When a 'preferred' dimension is given by the UIControl, make sure
# that it stays within the bounds of the Window.
if preferred is not None:
if dimension.max:
preferred = min(preferred, dimension.max)
if dimension.min:
preferred = max(preferred, dimension.min)
# When a `dont_extend` flag has been given, use the preferred dimension
# also as the max dimension.
if dont_extend and preferred is not None:
max_ = min(dimension.max, preferred)
else:
max_ = dimension.max
return LayoutDimension(
min=dimension.min, max=max_,
preferred=preferred, weight=dimension.weight) | [
"def",
"_merge_dimensions",
"(",
"dimension",
",",
"preferred",
"=",
"None",
",",
"dont_extend",
"=",
"False",
")",
":",
"dimension",
"=",
"dimension",
"or",
"LayoutDimension",
"(",
")",
"# When a preferred dimension was explicitly given to the Window,",
"# ignore the UIC... | Take the LayoutDimension from this `Window` class and the received
preferred size from the `UIControl` and return a `LayoutDimension` to
report to the parent container. | [
"Take",
"the",
"LayoutDimension",
"from",
"this",
"Window",
"class",
"and",
"the",
"received",
"preferred",
"size",
"from",
"the",
"UIControl",
"and",
"return",
"a",
"LayoutDimension",
"to",
"report",
"to",
"the",
"parent",
"container",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/layout/containers.py#L1004-L1035 | train | 207,376 |
wandb/client | wandb/vendor/prompt_toolkit/layout/containers.py | Window._get_ui_content | def _get_ui_content(self, cli, width, height):
"""
Create a `UIContent` instance.
"""
def get_content():
return self.content.create_content(cli, width=width, height=height)
key = (cli.render_counter, width, height)
return self._ui_content_cache.get(key, get_content) | python | def _get_ui_content(self, cli, width, height):
"""
Create a `UIContent` instance.
"""
def get_content():
return self.content.create_content(cli, width=width, height=height)
key = (cli.render_counter, width, height)
return self._ui_content_cache.get(key, get_content) | [
"def",
"_get_ui_content",
"(",
"self",
",",
"cli",
",",
"width",
",",
"height",
")",
":",
"def",
"get_content",
"(",
")",
":",
"return",
"self",
".",
"content",
".",
"create_content",
"(",
"cli",
",",
"width",
"=",
"width",
",",
"height",
"=",
"height"... | Create a `UIContent` instance. | [
"Create",
"a",
"UIContent",
"instance",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/layout/containers.py#L1037-L1045 | train | 207,377 |
wandb/client | wandb/vendor/prompt_toolkit/layout/containers.py | Window._get_digraph_char | def _get_digraph_char(self, cli):
" Return `False`, or the Digraph symbol to be used. "
if cli.quoted_insert:
return '^'
if cli.vi_state.waiting_for_digraph:
if cli.vi_state.digraph_symbol1:
return cli.vi_state.digraph_symbol1
return '?'
return False | python | def _get_digraph_char(self, cli):
" Return `False`, or the Digraph symbol to be used. "
if cli.quoted_insert:
return '^'
if cli.vi_state.waiting_for_digraph:
if cli.vi_state.digraph_symbol1:
return cli.vi_state.digraph_symbol1
return '?'
return False | [
"def",
"_get_digraph_char",
"(",
"self",
",",
"cli",
")",
":",
"if",
"cli",
".",
"quoted_insert",
":",
"return",
"'^'",
"if",
"cli",
".",
"vi_state",
".",
"waiting_for_digraph",
":",
"if",
"cli",
".",
"vi_state",
".",
"digraph_symbol1",
":",
"return",
"cli... | Return `False`, or the Digraph symbol to be used. | [
"Return",
"False",
"or",
"the",
"Digraph",
"symbol",
"to",
"be",
"used",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/layout/containers.py#L1047-L1055 | train | 207,378 |
wandb/client | wandb/vendor/prompt_toolkit/layout/containers.py | Window._highlight_digraph | def _highlight_digraph(self, cli, new_screen):
"""
When we are in Vi digraph mode, put a question mark underneath the
cursor.
"""
digraph_char = self._get_digraph_char(cli)
if digraph_char:
cpos = new_screen.cursor_position
new_screen.data_buffer[cpos.y][cpos.x] = \
_CHAR_CACHE[digraph_char, Token.Digraph] | python | def _highlight_digraph(self, cli, new_screen):
"""
When we are in Vi digraph mode, put a question mark underneath the
cursor.
"""
digraph_char = self._get_digraph_char(cli)
if digraph_char:
cpos = new_screen.cursor_position
new_screen.data_buffer[cpos.y][cpos.x] = \
_CHAR_CACHE[digraph_char, Token.Digraph] | [
"def",
"_highlight_digraph",
"(",
"self",
",",
"cli",
",",
"new_screen",
")",
":",
"digraph_char",
"=",
"self",
".",
"_get_digraph_char",
"(",
"cli",
")",
"if",
"digraph_char",
":",
"cpos",
"=",
"new_screen",
".",
"cursor_position",
"new_screen",
".",
"data_bu... | When we are in Vi digraph mode, put a question mark underneath the
cursor. | [
"When",
"we",
"are",
"in",
"Vi",
"digraph",
"mode",
"put",
"a",
"question",
"mark",
"underneath",
"the",
"cursor",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/layout/containers.py#L1335-L1344 | train | 207,379 |
wandb/client | wandb/vendor/prompt_toolkit/layout/containers.py | Window._show_input_processor_key_buffer | def _show_input_processor_key_buffer(self, cli, new_screen):
"""
When the user is typing a key binding that consists of several keys,
display the last pressed key if the user is in insert mode and the key
is meaningful to be displayed.
E.g. Some people want to bind 'jj' to escape in Vi insert mode. But the
first 'j' needs to be displayed in order to get some feedback.
"""
key_buffer = cli.input_processor.key_buffer
if key_buffer and _in_insert_mode(cli) and not cli.is_done:
# The textual data for the given key. (Can be a VT100 escape
# sequence.)
data = key_buffer[-1].data
# Display only if this is a 1 cell width character.
if get_cwidth(data) == 1:
cpos = new_screen.cursor_position
new_screen.data_buffer[cpos.y][cpos.x] = \
_CHAR_CACHE[data, Token.PartialKeyBinding] | python | def _show_input_processor_key_buffer(self, cli, new_screen):
"""
When the user is typing a key binding that consists of several keys,
display the last pressed key if the user is in insert mode and the key
is meaningful to be displayed.
E.g. Some people want to bind 'jj' to escape in Vi insert mode. But the
first 'j' needs to be displayed in order to get some feedback.
"""
key_buffer = cli.input_processor.key_buffer
if key_buffer and _in_insert_mode(cli) and not cli.is_done:
# The textual data for the given key. (Can be a VT100 escape
# sequence.)
data = key_buffer[-1].data
# Display only if this is a 1 cell width character.
if get_cwidth(data) == 1:
cpos = new_screen.cursor_position
new_screen.data_buffer[cpos.y][cpos.x] = \
_CHAR_CACHE[data, Token.PartialKeyBinding] | [
"def",
"_show_input_processor_key_buffer",
"(",
"self",
",",
"cli",
",",
"new_screen",
")",
":",
"key_buffer",
"=",
"cli",
".",
"input_processor",
".",
"key_buffer",
"if",
"key_buffer",
"and",
"_in_insert_mode",
"(",
"cli",
")",
"and",
"not",
"cli",
".",
"is_d... | When the user is typing a key binding that consists of several keys,
display the last pressed key if the user is in insert mode and the key
is meaningful to be displayed.
E.g. Some people want to bind 'jj' to escape in Vi insert mode. But the
first 'j' needs to be displayed in order to get some feedback. | [
"When",
"the",
"user",
"is",
"typing",
"a",
"key",
"binding",
"that",
"consists",
"of",
"several",
"keys",
"display",
"the",
"last",
"pressed",
"key",
"if",
"the",
"user",
"is",
"in",
"insert",
"mode",
"and",
"the",
"key",
"is",
"meaningful",
"to",
"be",... | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/layout/containers.py#L1346-L1365 | train | 207,380 |
wandb/client | wandb/vendor/prompt_toolkit/layout/containers.py | Window._copy_margin | def _copy_margin(self, cli, lazy_screen, new_screen, write_position, move_x, width):
"""
Copy characters from the margin screen to the real screen.
"""
xpos = write_position.xpos + move_x
ypos = write_position.ypos
margin_write_position = WritePosition(xpos, ypos, width, write_position.height)
self._copy_body(cli, lazy_screen, new_screen, margin_write_position, 0, width) | python | def _copy_margin(self, cli, lazy_screen, new_screen, write_position, move_x, width):
"""
Copy characters from the margin screen to the real screen.
"""
xpos = write_position.xpos + move_x
ypos = write_position.ypos
margin_write_position = WritePosition(xpos, ypos, width, write_position.height)
self._copy_body(cli, lazy_screen, new_screen, margin_write_position, 0, width) | [
"def",
"_copy_margin",
"(",
"self",
",",
"cli",
",",
"lazy_screen",
",",
"new_screen",
",",
"write_position",
",",
"move_x",
",",
"width",
")",
":",
"xpos",
"=",
"write_position",
".",
"xpos",
"+",
"move_x",
"ypos",
"=",
"write_position",
".",
"ypos",
"mar... | Copy characters from the margin screen to the real screen. | [
"Copy",
"characters",
"from",
"the",
"margin",
"screen",
"to",
"the",
"real",
"screen",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/layout/containers.py#L1404-L1412 | train | 207,381 |
wandb/client | wandb/vendor/prompt_toolkit/layout/containers.py | Window._mouse_handler | def _mouse_handler(self, cli, mouse_event):
"""
Mouse handler. Called when the UI control doesn't handle this
particular event.
"""
if mouse_event.event_type == MouseEventType.SCROLL_DOWN:
self._scroll_down(cli)
elif mouse_event.event_type == MouseEventType.SCROLL_UP:
self._scroll_up(cli) | python | def _mouse_handler(self, cli, mouse_event):
"""
Mouse handler. Called when the UI control doesn't handle this
particular event.
"""
if mouse_event.event_type == MouseEventType.SCROLL_DOWN:
self._scroll_down(cli)
elif mouse_event.event_type == MouseEventType.SCROLL_UP:
self._scroll_up(cli) | [
"def",
"_mouse_handler",
"(",
"self",
",",
"cli",
",",
"mouse_event",
")",
":",
"if",
"mouse_event",
".",
"event_type",
"==",
"MouseEventType",
".",
"SCROLL_DOWN",
":",
"self",
".",
"_scroll_down",
"(",
"cli",
")",
"elif",
"mouse_event",
".",
"event_type",
"... | Mouse handler. Called when the UI control doesn't handle this
particular event. | [
"Mouse",
"handler",
".",
"Called",
"when",
"the",
"UI",
"control",
"doesn",
"t",
"handle",
"this",
"particular",
"event",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/layout/containers.py#L1587-L1595 | train | 207,382 |
wandb/client | wandb/vendor/prompt_toolkit/layout/containers.py | Window._scroll_up | def _scroll_up(self, cli):
" Scroll window up. "
info = self.render_info
if info.vertical_scroll > 0:
# TODO: not entirely correct yet in case of line wrapping and long lines.
if info.cursor_position.y >= info.window_height - 1 - info.configured_scroll_offsets.bottom:
self.content.move_cursor_up(cli)
self.vertical_scroll -= 1 | python | def _scroll_up(self, cli):
" Scroll window up. "
info = self.render_info
if info.vertical_scroll > 0:
# TODO: not entirely correct yet in case of line wrapping and long lines.
if info.cursor_position.y >= info.window_height - 1 - info.configured_scroll_offsets.bottom:
self.content.move_cursor_up(cli)
self.vertical_scroll -= 1 | [
"def",
"_scroll_up",
"(",
"self",
",",
"cli",
")",
":",
"info",
"=",
"self",
".",
"render_info",
"if",
"info",
".",
"vertical_scroll",
">",
"0",
":",
"# TODO: not entirely correct yet in case of line wrapping and long lines.",
"if",
"info",
".",
"cursor_position",
"... | Scroll window up. | [
"Scroll",
"window",
"up",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/layout/containers.py#L1607-L1616 | train | 207,383 |
wandb/client | wandb/summary.py | SummarySubDict.update | def update(self, key_vals=None, overwrite=True):
"""Locked keys will be overwritten unless overwrite=False.
Otherwise, written keys will be added to the "locked" list.
"""
if not key_vals:
return
write_items = self._update(key_vals, overwrite)
self._root._root_set(self._path, write_items)
self._root._write(commit=True) | python | def update(self, key_vals=None, overwrite=True):
"""Locked keys will be overwritten unless overwrite=False.
Otherwise, written keys will be added to the "locked" list.
"""
if not key_vals:
return
write_items = self._update(key_vals, overwrite)
self._root._root_set(self._path, write_items)
self._root._write(commit=True) | [
"def",
"update",
"(",
"self",
",",
"key_vals",
"=",
"None",
",",
"overwrite",
"=",
"True",
")",
":",
"if",
"not",
"key_vals",
":",
"return",
"write_items",
"=",
"self",
".",
"_update",
"(",
"key_vals",
",",
"overwrite",
")",
"self",
".",
"_root",
".",
... | Locked keys will be overwritten unless overwrite=False.
Otherwise, written keys will be added to the "locked" list. | [
"Locked",
"keys",
"will",
"be",
"overwritten",
"unless",
"overwrite",
"=",
"False",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/summary.py#L133-L142 | train | 207,384 |
wandb/client | wandb/summary.py | Summary._encode | def _encode(self, value, path_from_root):
"""Normalize, compress, and encode sub-objects for backend storage.
value: Object to encode.
path_from_root: `tuple` of key strings from the top-level summary to the
current `value`.
Returns:
A new tree of dict's with large objects replaced with dictionaries
with "_type" entries that say which type the original data was.
"""
# Constructs a new `dict` tree in `json_value` that discards and/or
# encodes objects that aren't JSON serializable.
if isinstance(value, dict):
json_value = {}
for key, value in six.iteritems(value):
json_value[key] = self._encode(value, path_from_root + (key,))
return json_value
else:
path = ".".join(path_from_root)
if util.is_pandas_data_frame(value):
return util.encode_data_frame(path, value, self._run)
else:
friendly_value, converted = util.json_friendly(data_types.val_to_json(path, value))
json_value, compressed = util.maybe_compress_summary(friendly_value, util.get_h5_typename(value))
if compressed:
self.write_h5(path_from_root, friendly_value)
return json_value
"""
if isinstance(value, dict):
json_child[key], converted = util.json_friendly(
self._encode(value, path_from_root + [key]))
else:
""" | python | def _encode(self, value, path_from_root):
"""Normalize, compress, and encode sub-objects for backend storage.
value: Object to encode.
path_from_root: `tuple` of key strings from the top-level summary to the
current `value`.
Returns:
A new tree of dict's with large objects replaced with dictionaries
with "_type" entries that say which type the original data was.
"""
# Constructs a new `dict` tree in `json_value` that discards and/or
# encodes objects that aren't JSON serializable.
if isinstance(value, dict):
json_value = {}
for key, value in six.iteritems(value):
json_value[key] = self._encode(value, path_from_root + (key,))
return json_value
else:
path = ".".join(path_from_root)
if util.is_pandas_data_frame(value):
return util.encode_data_frame(path, value, self._run)
else:
friendly_value, converted = util.json_friendly(data_types.val_to_json(path, value))
json_value, compressed = util.maybe_compress_summary(friendly_value, util.get_h5_typename(value))
if compressed:
self.write_h5(path_from_root, friendly_value)
return json_value
"""
if isinstance(value, dict):
json_child[key], converted = util.json_friendly(
self._encode(value, path_from_root + [key]))
else:
""" | [
"def",
"_encode",
"(",
"self",
",",
"value",
",",
"path_from_root",
")",
":",
"# Constructs a new `dict` tree in `json_value` that discards and/or",
"# encodes objects that aren't JSON serializable.",
"if",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"json_value",
"=... | Normalize, compress, and encode sub-objects for backend storage.
value: Object to encode.
path_from_root: `tuple` of key strings from the top-level summary to the
current `value`.
Returns:
A new tree of dict's with large objects replaced with dictionaries
with "_type" entries that say which type the original data was. | [
"Normalize",
"compress",
"and",
"encode",
"sub",
"-",
"objects",
"for",
"backend",
"storage",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/summary.py#L271-L307 | train | 207,385 |
wandb/client | wandb/vendor/prompt_toolkit/key_binding/bindings/scroll.py | scroll_one_line_down | def scroll_one_line_down(event):
"""
scroll_offset += 1
"""
w = find_window_for_buffer_name(event.cli, event.cli.current_buffer_name)
b = event.cli.current_buffer
if w:
# When the cursor is at the top, move to the next line. (Otherwise, only scroll.)
if w.render_info:
info = w.render_info
if w.vertical_scroll < info.content_height - info.window_height:
if info.cursor_position.y <= info.configured_scroll_offsets.top:
b.cursor_position += b.document.get_cursor_down_position()
w.vertical_scroll += 1 | python | def scroll_one_line_down(event):
"""
scroll_offset += 1
"""
w = find_window_for_buffer_name(event.cli, event.cli.current_buffer_name)
b = event.cli.current_buffer
if w:
# When the cursor is at the top, move to the next line. (Otherwise, only scroll.)
if w.render_info:
info = w.render_info
if w.vertical_scroll < info.content_height - info.window_height:
if info.cursor_position.y <= info.configured_scroll_offsets.top:
b.cursor_position += b.document.get_cursor_down_position()
w.vertical_scroll += 1 | [
"def",
"scroll_one_line_down",
"(",
"event",
")",
":",
"w",
"=",
"find_window_for_buffer_name",
"(",
"event",
".",
"cli",
",",
"event",
".",
"cli",
".",
"current_buffer_name",
")",
"b",
"=",
"event",
".",
"cli",
".",
"current_buffer",
"if",
"w",
":",
"# Wh... | scroll_offset += 1 | [
"scroll_offset",
"+",
"=",
"1"
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/key_binding/bindings/scroll.py#L105-L121 | train | 207,386 |
wandb/client | wandb/vendor/prompt_toolkit/key_binding/bindings/scroll.py | scroll_one_line_up | def scroll_one_line_up(event):
"""
scroll_offset -= 1
"""
w = find_window_for_buffer_name(event.cli, event.cli.current_buffer_name)
b = event.cli.current_buffer
if w:
# When the cursor is at the bottom, move to the previous line. (Otherwise, only scroll.)
if w.render_info:
info = w.render_info
if w.vertical_scroll > 0:
first_line_height = info.get_height_for_line(info.first_visible_line())
cursor_up = info.cursor_position.y - (info.window_height - 1 - first_line_height -
info.configured_scroll_offsets.bottom)
# Move cursor up, as many steps as the height of the first line.
# TODO: not entirely correct yet, in case of line wrapping and many long lines.
for _ in range(max(0, cursor_up)):
b.cursor_position += b.document.get_cursor_up_position()
# Scroll window
w.vertical_scroll -= 1 | python | def scroll_one_line_up(event):
"""
scroll_offset -= 1
"""
w = find_window_for_buffer_name(event.cli, event.cli.current_buffer_name)
b = event.cli.current_buffer
if w:
# When the cursor is at the bottom, move to the previous line. (Otherwise, only scroll.)
if w.render_info:
info = w.render_info
if w.vertical_scroll > 0:
first_line_height = info.get_height_for_line(info.first_visible_line())
cursor_up = info.cursor_position.y - (info.window_height - 1 - first_line_height -
info.configured_scroll_offsets.bottom)
# Move cursor up, as many steps as the height of the first line.
# TODO: not entirely correct yet, in case of line wrapping and many long lines.
for _ in range(max(0, cursor_up)):
b.cursor_position += b.document.get_cursor_up_position()
# Scroll window
w.vertical_scroll -= 1 | [
"def",
"scroll_one_line_up",
"(",
"event",
")",
":",
"w",
"=",
"find_window_for_buffer_name",
"(",
"event",
".",
"cli",
",",
"event",
".",
"cli",
".",
"current_buffer_name",
")",
"b",
"=",
"event",
".",
"cli",
".",
"current_buffer",
"if",
"w",
":",
"# When... | scroll_offset -= 1 | [
"scroll_offset",
"-",
"=",
"1"
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/key_binding/bindings/scroll.py#L124-L148 | train | 207,387 |
wandb/client | wandb/vendor/prompt_toolkit/layout/menus.py | CompletionsMenuControl.create_content | def create_content(self, cli, width, height):
"""
Create a UIContent object for this control.
"""
complete_state = cli.current_buffer.complete_state
if complete_state:
completions = complete_state.current_completions
index = complete_state.complete_index # Can be None!
# Calculate width of completions menu.
menu_width = self._get_menu_width(width, complete_state)
menu_meta_width = self._get_menu_meta_width(width - menu_width, complete_state)
show_meta = self._show_meta(complete_state)
def get_line(i):
c = completions[i]
is_current_completion = (i == index)
result = self._get_menu_item_tokens(c, is_current_completion, menu_width)
if show_meta:
result += self._get_menu_item_meta_tokens(c, is_current_completion, menu_meta_width)
return result
return UIContent(get_line=get_line,
cursor_position=Point(x=0, y=index or 0),
line_count=len(completions),
default_char=Char(' ', self.token))
return UIContent() | python | def create_content(self, cli, width, height):
"""
Create a UIContent object for this control.
"""
complete_state = cli.current_buffer.complete_state
if complete_state:
completions = complete_state.current_completions
index = complete_state.complete_index # Can be None!
# Calculate width of completions menu.
menu_width = self._get_menu_width(width, complete_state)
menu_meta_width = self._get_menu_meta_width(width - menu_width, complete_state)
show_meta = self._show_meta(complete_state)
def get_line(i):
c = completions[i]
is_current_completion = (i == index)
result = self._get_menu_item_tokens(c, is_current_completion, menu_width)
if show_meta:
result += self._get_menu_item_meta_tokens(c, is_current_completion, menu_meta_width)
return result
return UIContent(get_line=get_line,
cursor_position=Point(x=0, y=index or 0),
line_count=len(completions),
default_char=Char(' ', self.token))
return UIContent() | [
"def",
"create_content",
"(",
"self",
",",
"cli",
",",
"width",
",",
"height",
")",
":",
"complete_state",
"=",
"cli",
".",
"current_buffer",
".",
"complete_state",
"if",
"complete_state",
":",
"completions",
"=",
"complete_state",
".",
"current_completions",
"i... | Create a UIContent object for this control. | [
"Create",
"a",
"UIContent",
"object",
"for",
"this",
"control",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/layout/menus.py#L60-L88 | train | 207,388 |
wandb/client | wandb/vendor/prompt_toolkit/layout/menus.py | CompletionsMenuControl._get_menu_width | def _get_menu_width(self, max_width, complete_state):
"""
Return the width of the main column.
"""
return min(max_width, max(self.MIN_WIDTH, max(get_cwidth(c.display)
for c in complete_state.current_completions) + 2)) | python | def _get_menu_width(self, max_width, complete_state):
"""
Return the width of the main column.
"""
return min(max_width, max(self.MIN_WIDTH, max(get_cwidth(c.display)
for c in complete_state.current_completions) + 2)) | [
"def",
"_get_menu_width",
"(",
"self",
",",
"max_width",
",",
"complete_state",
")",
":",
"return",
"min",
"(",
"max_width",
",",
"max",
"(",
"self",
".",
"MIN_WIDTH",
",",
"max",
"(",
"get_cwidth",
"(",
"c",
".",
"display",
")",
"for",
"c",
"in",
"com... | Return the width of the main column. | [
"Return",
"the",
"width",
"of",
"the",
"main",
"column",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/layout/menus.py#L96-L101 | train | 207,389 |
wandb/client | wandb/vendor/prompt_toolkit/layout/menus.py | CompletionsMenuControl._get_menu_meta_width | def _get_menu_meta_width(self, max_width, complete_state):
"""
Return the width of the meta column.
"""
if self._show_meta(complete_state):
return min(max_width, max(get_cwidth(c.display_meta)
for c in complete_state.current_completions) + 2)
else:
return 0 | python | def _get_menu_meta_width(self, max_width, complete_state):
"""
Return the width of the meta column.
"""
if self._show_meta(complete_state):
return min(max_width, max(get_cwidth(c.display_meta)
for c in complete_state.current_completions) + 2)
else:
return 0 | [
"def",
"_get_menu_meta_width",
"(",
"self",
",",
"max_width",
",",
"complete_state",
")",
":",
"if",
"self",
".",
"_show_meta",
"(",
"complete_state",
")",
":",
"return",
"min",
"(",
"max_width",
",",
"max",
"(",
"get_cwidth",
"(",
"c",
".",
"display_meta",
... | Return the width of the meta column. | [
"Return",
"the",
"width",
"of",
"the",
"meta",
"column",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/layout/menus.py#L103-L111 | train | 207,390 |
wandb/client | wandb/vendor/prompt_toolkit/layout/menus.py | MultiColumnCompletionMenuControl.create_content | def create_content(self, cli, width, height):
"""
Create a UIContent object for this menu.
"""
complete_state = cli.current_buffer.complete_state
column_width = self._get_column_width(complete_state)
self._render_pos_to_completion = {}
def grouper(n, iterable, fillvalue=None):
" grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx "
args = [iter(iterable)] * n
return zip_longest(fillvalue=fillvalue, *args)
def is_current_completion(completion):
" Returns True when this completion is the currently selected one. "
return complete_state.complete_index is not None and c == complete_state.current_completion
# Space required outside of the regular columns, for displaying the
# left and right arrow.
HORIZONTAL_MARGIN_REQUIRED = 3
if complete_state:
# There should be at least one column, but it cannot be wider than
# the available width.
column_width = min(width - HORIZONTAL_MARGIN_REQUIRED, column_width)
# However, when the columns tend to be very wide, because there are
# some very wide entries, shrink it anyway.
if column_width > self.suggested_max_column_width:
# `column_width` can still be bigger that `suggested_max_column_width`,
# but if there is place for two columns, we divide by two.
column_width //= (column_width // self.suggested_max_column_width)
visible_columns = max(1, (width - self._required_margin) // column_width)
columns_ = list(grouper(height, complete_state.current_completions))
rows_ = list(zip(*columns_))
# Make sure the current completion is always visible: update scroll offset.
selected_column = (complete_state.complete_index or 0) // height
self.scroll = min(selected_column, max(self.scroll, selected_column - visible_columns + 1))
render_left_arrow = self.scroll > 0
render_right_arrow = self.scroll < len(rows_[0]) - visible_columns
# Write completions to screen.
tokens_for_line = []
for row_index, row in enumerate(rows_):
tokens = []
middle_row = row_index == len(rows_) // 2
# Draw left arrow if we have hidden completions on the left.
if render_left_arrow:
tokens += [(Token.Scrollbar, '<' if middle_row else ' ')]
# Draw row content.
for column_index, c in enumerate(row[self.scroll:][:visible_columns]):
if c is not None:
tokens += self._get_menu_item_tokens(c, is_current_completion(c), column_width)
# Remember render position for mouse click handler.
for x in range(column_width):
self._render_pos_to_completion[(column_index * column_width + x, row_index)] = c
else:
tokens += [(self.token.Completion, ' ' * column_width)]
# Draw trailing padding. (_get_menu_item_tokens only returns padding on the left.)
tokens += [(self.token.Completion, ' ')]
# Draw right arrow if we have hidden completions on the right.
if render_right_arrow:
tokens += [(Token.Scrollbar, '>' if middle_row else ' ')]
# Newline.
tokens_for_line.append(tokens)
else:
tokens = []
self._rendered_rows = height
self._rendered_columns = visible_columns
self._total_columns = len(columns_)
self._render_left_arrow = render_left_arrow
self._render_right_arrow = render_right_arrow
self._render_width = column_width * visible_columns + render_left_arrow + render_right_arrow + 1
def get_line(i):
return tokens_for_line[i]
return UIContent(get_line=get_line, line_count=len(rows_)) | python | def create_content(self, cli, width, height):
"""
Create a UIContent object for this menu.
"""
complete_state = cli.current_buffer.complete_state
column_width = self._get_column_width(complete_state)
self._render_pos_to_completion = {}
def grouper(n, iterable, fillvalue=None):
" grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx "
args = [iter(iterable)] * n
return zip_longest(fillvalue=fillvalue, *args)
def is_current_completion(completion):
" Returns True when this completion is the currently selected one. "
return complete_state.complete_index is not None and c == complete_state.current_completion
# Space required outside of the regular columns, for displaying the
# left and right arrow.
HORIZONTAL_MARGIN_REQUIRED = 3
if complete_state:
# There should be at least one column, but it cannot be wider than
# the available width.
column_width = min(width - HORIZONTAL_MARGIN_REQUIRED, column_width)
# However, when the columns tend to be very wide, because there are
# some very wide entries, shrink it anyway.
if column_width > self.suggested_max_column_width:
# `column_width` can still be bigger that `suggested_max_column_width`,
# but if there is place for two columns, we divide by two.
column_width //= (column_width // self.suggested_max_column_width)
visible_columns = max(1, (width - self._required_margin) // column_width)
columns_ = list(grouper(height, complete_state.current_completions))
rows_ = list(zip(*columns_))
# Make sure the current completion is always visible: update scroll offset.
selected_column = (complete_state.complete_index or 0) // height
self.scroll = min(selected_column, max(self.scroll, selected_column - visible_columns + 1))
render_left_arrow = self.scroll > 0
render_right_arrow = self.scroll < len(rows_[0]) - visible_columns
# Write completions to screen.
tokens_for_line = []
for row_index, row in enumerate(rows_):
tokens = []
middle_row = row_index == len(rows_) // 2
# Draw left arrow if we have hidden completions on the left.
if render_left_arrow:
tokens += [(Token.Scrollbar, '<' if middle_row else ' ')]
# Draw row content.
for column_index, c in enumerate(row[self.scroll:][:visible_columns]):
if c is not None:
tokens += self._get_menu_item_tokens(c, is_current_completion(c), column_width)
# Remember render position for mouse click handler.
for x in range(column_width):
self._render_pos_to_completion[(column_index * column_width + x, row_index)] = c
else:
tokens += [(self.token.Completion, ' ' * column_width)]
# Draw trailing padding. (_get_menu_item_tokens only returns padding on the left.)
tokens += [(self.token.Completion, ' ')]
# Draw right arrow if we have hidden completions on the right.
if render_right_arrow:
tokens += [(Token.Scrollbar, '>' if middle_row else ' ')]
# Newline.
tokens_for_line.append(tokens)
else:
tokens = []
self._rendered_rows = height
self._rendered_columns = visible_columns
self._total_columns = len(columns_)
self._render_left_arrow = render_left_arrow
self._render_right_arrow = render_right_arrow
self._render_width = column_width * visible_columns + render_left_arrow + render_right_arrow + 1
def get_line(i):
return tokens_for_line[i]
return UIContent(get_line=get_line, line_count=len(rows_)) | [
"def",
"create_content",
"(",
"self",
",",
"cli",
",",
"width",
",",
"height",
")",
":",
"complete_state",
"=",
"cli",
".",
"current_buffer",
".",
"complete_state",
"column_width",
"=",
"self",
".",
"_get_column_width",
"(",
"complete_state",
")",
"self",
".",... | Create a UIContent object for this menu. | [
"Create",
"a",
"UIContent",
"object",
"for",
"this",
"menu",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/layout/menus.py#L273-L363 | train | 207,391 |
wandb/client | wandb/vendor/prompt_toolkit/layout/menus.py | MultiColumnCompletionMenuControl._get_column_width | def _get_column_width(self, complete_state):
"""
Return the width of each column.
"""
return max(get_cwidth(c.display) for c in complete_state.current_completions) + 1 | python | def _get_column_width(self, complete_state):
"""
Return the width of each column.
"""
return max(get_cwidth(c.display) for c in complete_state.current_completions) + 1 | [
"def",
"_get_column_width",
"(",
"self",
",",
"complete_state",
")",
":",
"return",
"max",
"(",
"get_cwidth",
"(",
"c",
".",
"display",
")",
"for",
"c",
"in",
"complete_state",
".",
"current_completions",
")",
"+",
"1"
] | Return the width of each column. | [
"Return",
"the",
"width",
"of",
"each",
"column",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/layout/menus.py#L365-L369 | train | 207,392 |
wandb/client | wandb/vendor/prompt_toolkit/layout/menus.py | MultiColumnCompletionMenuControl.mouse_handler | def mouse_handler(self, cli, mouse_event):
"""
Handle scoll and click events.
"""
b = cli.current_buffer
def scroll_left():
b.complete_previous(count=self._rendered_rows, disable_wrap_around=True)
self.scroll = max(0, self.scroll - 1)
def scroll_right():
b.complete_next(count=self._rendered_rows, disable_wrap_around=True)
self.scroll = min(self._total_columns - self._rendered_columns, self.scroll + 1)
if mouse_event.event_type == MouseEventType.SCROLL_DOWN:
scroll_right()
elif mouse_event.event_type == MouseEventType.SCROLL_UP:
scroll_left()
elif mouse_event.event_type == MouseEventType.MOUSE_UP:
x = mouse_event.position.x
y = mouse_event.position.y
# Mouse click on left arrow.
if x == 0:
if self._render_left_arrow:
scroll_left()
# Mouse click on right arrow.
elif x == self._render_width - 1:
if self._render_right_arrow:
scroll_right()
# Mouse click on completion.
else:
completion = self._render_pos_to_completion.get((x, y))
if completion:
b.apply_completion(completion) | python | def mouse_handler(self, cli, mouse_event):
"""
Handle scoll and click events.
"""
b = cli.current_buffer
def scroll_left():
b.complete_previous(count=self._rendered_rows, disable_wrap_around=True)
self.scroll = max(0, self.scroll - 1)
def scroll_right():
b.complete_next(count=self._rendered_rows, disable_wrap_around=True)
self.scroll = min(self._total_columns - self._rendered_columns, self.scroll + 1)
if mouse_event.event_type == MouseEventType.SCROLL_DOWN:
scroll_right()
elif mouse_event.event_type == MouseEventType.SCROLL_UP:
scroll_left()
elif mouse_event.event_type == MouseEventType.MOUSE_UP:
x = mouse_event.position.x
y = mouse_event.position.y
# Mouse click on left arrow.
if x == 0:
if self._render_left_arrow:
scroll_left()
# Mouse click on right arrow.
elif x == self._render_width - 1:
if self._render_right_arrow:
scroll_right()
# Mouse click on completion.
else:
completion = self._render_pos_to_completion.get((x, y))
if completion:
b.apply_completion(completion) | [
"def",
"mouse_handler",
"(",
"self",
",",
"cli",
",",
"mouse_event",
")",
":",
"b",
"=",
"cli",
".",
"current_buffer",
"def",
"scroll_left",
"(",
")",
":",
"b",
".",
"complete_previous",
"(",
"count",
"=",
"self",
".",
"_rendered_rows",
",",
"disable_wrap_... | Handle scoll and click events. | [
"Handle",
"scoll",
"and",
"click",
"events",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/layout/menus.py#L382-L420 | train | 207,393 |
wandb/client | wandb/vendor/prompt_toolkit/layout/menus.py | _SelectedCompletionMetaControl.preferred_width | def preferred_width(self, cli, max_available_width):
"""
Report the width of the longest meta text as the preferred width of this control.
It could be that we use less width, but this way, we're sure that the
layout doesn't change when we select another completion (E.g. that
completions are suddenly shown in more or fewer columns.)
"""
if cli.current_buffer.complete_state:
state = cli.current_buffer.complete_state
return 2 + max(get_cwidth(c.display_meta) for c in state.current_completions)
else:
return 0 | python | def preferred_width(self, cli, max_available_width):
"""
Report the width of the longest meta text as the preferred width of this control.
It could be that we use less width, but this way, we're sure that the
layout doesn't change when we select another completion (E.g. that
completions are suddenly shown in more or fewer columns.)
"""
if cli.current_buffer.complete_state:
state = cli.current_buffer.complete_state
return 2 + max(get_cwidth(c.display_meta) for c in state.current_completions)
else:
return 0 | [
"def",
"preferred_width",
"(",
"self",
",",
"cli",
",",
"max_available_width",
")",
":",
"if",
"cli",
".",
"current_buffer",
".",
"complete_state",
":",
"state",
"=",
"cli",
".",
"current_buffer",
".",
"complete_state",
"return",
"2",
"+",
"max",
"(",
"get_c... | Report the width of the longest meta text as the preferred width of this control.
It could be that we use less width, but this way, we're sure that the
layout doesn't change when we select another completion (E.g. that
completions are suddenly shown in more or fewer columns.) | [
"Report",
"the",
"width",
"of",
"the",
"longest",
"meta",
"text",
"as",
"the",
"preferred",
"width",
"of",
"this",
"control",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/layout/menus.py#L464-L476 | train | 207,394 |
wandb/client | wandb/vendor/prompt_toolkit/clipboard/base.py | Clipboard.set_text | def set_text(self, text): # Not abstract.
"""
Shortcut for setting plain text on clipboard.
"""
assert isinstance(text, six.string_types)
self.set_data(ClipboardData(text)) | python | def set_text(self, text): # Not abstract.
"""
Shortcut for setting plain text on clipboard.
"""
assert isinstance(text, six.string_types)
self.set_data(ClipboardData(text)) | [
"def",
"set_text",
"(",
"self",
",",
"text",
")",
":",
"# Not abstract.",
"assert",
"isinstance",
"(",
"text",
",",
"six",
".",
"string_types",
")",
"self",
".",
"set_data",
"(",
"ClipboardData",
"(",
"text",
")",
")"
] | Shortcut for setting plain text on clipboard. | [
"Shortcut",
"for",
"setting",
"plain",
"text",
"on",
"clipboard",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/clipboard/base.py#L46-L51 | train | 207,395 |
wandb/client | wandb/vendor/prompt_toolkit/eventloop/asyncio_base.py | AsyncioTimeout.reset | def reset(self):
"""
Reset the timeout. Starts a new timer.
"""
self.counter += 1
local_counter = self.counter
def timer_timeout():
if self.counter == local_counter and self.running:
self.callback()
self.loop.call_later(self.timeout, timer_timeout) | python | def reset(self):
"""
Reset the timeout. Starts a new timer.
"""
self.counter += 1
local_counter = self.counter
def timer_timeout():
if self.counter == local_counter and self.running:
self.callback()
self.loop.call_later(self.timeout, timer_timeout) | [
"def",
"reset",
"(",
"self",
")",
":",
"self",
".",
"counter",
"+=",
"1",
"local_counter",
"=",
"self",
".",
"counter",
"def",
"timer_timeout",
"(",
")",
":",
"if",
"self",
".",
"counter",
"==",
"local_counter",
"and",
"self",
".",
"running",
":",
"sel... | Reset the timeout. Starts a new timer. | [
"Reset",
"the",
"timeout",
".",
"Starts",
"a",
"new",
"timer",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/eventloop/asyncio_base.py#L29-L40 | train | 207,396 |
wandb/client | wandb/util.py | sentry_reraise | def sentry_reraise(exc):
"""Re-raise an exception after logging it to Sentry
Use this for top-level exceptions when you want the user to see the traceback.
Must be called from within an exception handler.
"""
sentry_exc(exc)
# this will messily add this "reraise" function to the stack trace
# but hopefully it's not too bad
six.reraise(type(exc), exc, sys.exc_info()[2]) | python | def sentry_reraise(exc):
"""Re-raise an exception after logging it to Sentry
Use this for top-level exceptions when you want the user to see the traceback.
Must be called from within an exception handler.
"""
sentry_exc(exc)
# this will messily add this "reraise" function to the stack trace
# but hopefully it's not too bad
six.reraise(type(exc), exc, sys.exc_info()[2]) | [
"def",
"sentry_reraise",
"(",
"exc",
")",
":",
"sentry_exc",
"(",
"exc",
")",
"# this will messily add this \"reraise\" function to the stack trace",
"# but hopefully it's not too bad",
"six",
".",
"reraise",
"(",
"type",
"(",
"exc",
")",
",",
"exc",
",",
"sys",
".",
... | Re-raise an exception after logging it to Sentry
Use this for top-level exceptions when you want the user to see the traceback.
Must be called from within an exception handler. | [
"Re",
"-",
"raise",
"an",
"exception",
"after",
"logging",
"it",
"to",
"Sentry"
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/util.py#L77-L87 | train | 207,397 |
wandb/client | wandb/util.py | vendor_import | def vendor_import(name):
"""This enables us to use the vendor directory for packages we don't depend on"""
parent_dir = os.path.abspath(os.path.dirname(__file__))
vendor_dir = os.path.join(parent_dir, 'vendor')
sys.path.insert(1, vendor_dir)
return import_module(name) | python | def vendor_import(name):
"""This enables us to use the vendor directory for packages we don't depend on"""
parent_dir = os.path.abspath(os.path.dirname(__file__))
vendor_dir = os.path.join(parent_dir, 'vendor')
sys.path.insert(1, vendor_dir)
return import_module(name) | [
"def",
"vendor_import",
"(",
"name",
")",
":",
"parent_dir",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
"vendor_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"parent_dir",
",",
"'vendo... | This enables us to use the vendor directory for packages we don't depend on | [
"This",
"enables",
"us",
"to",
"use",
"the",
"vendor",
"directory",
"for",
"packages",
"we",
"don",
"t",
"depend",
"on"
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/util.py#L90-L96 | train | 207,398 |
wandb/client | wandb/util.py | ensure_matplotlib_figure | def ensure_matplotlib_figure(obj):
"""Extract the current figure from a matplotlib object or return the object if it's a figure.
raises ValueError if the object can't be converted.
"""
import matplotlib
from matplotlib.figure import Figure
if obj == matplotlib.pyplot:
obj = obj.gcf()
elif not isinstance(obj, Figure):
if hasattr(obj, "figure"):
obj = obj.figure
# Some matplotlib objects have a figure function
if not isinstance(obj, Figure):
raise ValueError(
"Only matplotlib.pyplot or matplotlib.pyplot.Figure objects are accepted.")
if not obj.gca().has_data():
raise ValueError(
"You attempted to log an empty plot, pass a figure directly or ensure the global plot isn't closed.")
return obj | python | def ensure_matplotlib_figure(obj):
"""Extract the current figure from a matplotlib object or return the object if it's a figure.
raises ValueError if the object can't be converted.
"""
import matplotlib
from matplotlib.figure import Figure
if obj == matplotlib.pyplot:
obj = obj.gcf()
elif not isinstance(obj, Figure):
if hasattr(obj, "figure"):
obj = obj.figure
# Some matplotlib objects have a figure function
if not isinstance(obj, Figure):
raise ValueError(
"Only matplotlib.pyplot or matplotlib.pyplot.Figure objects are accepted.")
if not obj.gca().has_data():
raise ValueError(
"You attempted to log an empty plot, pass a figure directly or ensure the global plot isn't closed.")
return obj | [
"def",
"ensure_matplotlib_figure",
"(",
"obj",
")",
":",
"import",
"matplotlib",
"from",
"matplotlib",
".",
"figure",
"import",
"Figure",
"if",
"obj",
"==",
"matplotlib",
".",
"pyplot",
":",
"obj",
"=",
"obj",
".",
"gcf",
"(",
")",
"elif",
"not",
"isinstan... | Extract the current figure from a matplotlib object or return the object if it's a figure.
raises ValueError if the object can't be converted. | [
"Extract",
"the",
"current",
"figure",
"from",
"a",
"matplotlib",
"object",
"or",
"return",
"the",
"object",
"if",
"it",
"s",
"a",
"figure",
".",
"raises",
"ValueError",
"if",
"the",
"object",
"can",
"t",
"be",
"converted",
"."
] | 7d08954ed5674fee223cd85ed0d8518fe47266b2 | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/util.py#L222-L240 | train | 207,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.