repo stringlengths 7 55 | path stringlengths 4 223 | func_name stringlengths 1 134 | original_string stringlengths 75 104k | language stringclasses 1 value | code stringlengths 75 104k | code_tokens listlengths 19 28.4k | docstring stringlengths 1 46.9k | docstring_tokens listlengths 1 1.97k | sha stringlengths 40 40 | url stringlengths 87 315 | partition stringclasses 1 value |
|---|---|---|---|---|---|---|---|---|---|---|---|
willkg/socorro-siggen | siggen/rules.py | CSignatureTool.normalize_frame | def normalize_frame(
self,
module=None,
function=None,
file=None,
line=None,
module_offset=None,
offset=None,
normalized=None,
**kwargs # eat any extra kwargs passed in
):
"""Normalizes a single frame
Returns a structured conglomeration of the input parameters to serve as
a signature. The parameter names of this function reflect the exact
names of the fields from the jsonMDSW frame output. This allows this
function to be invoked by passing a frame as ``**a_frame``.
Sometimes, a frame may already have a normalized version cached. If
that exists, return it instead.
"""
# If there's a cached normalized value, use that so we don't spend time
# figuring it out again
if normalized is not None:
return normalized
if function:
# If there's a filename and it ends in .rs, then normalize using
# Rust rules
if file and (parse_source_file(file) or '').endswith('.rs'):
return self.normalize_rust_function(
function=function,
line=line
)
# Otherwise normalize it with C/C++ rules
return self.normalize_cpp_function(
function=function,
line=line
)
# If there's a file and line number, use that
if file and line:
filename = file.rstrip('/\\')
if '\\' in filename:
file = filename.rsplit('\\')[-1]
else:
file = filename.rsplit('/')[-1]
return '{}#{}'.format(file, line)
# If there's an offset and no module/module_offset, use that
if not module and not module_offset and offset:
return '@{}'.format(offset)
# Return module/module_offset
return '{}@{}'.format(module or '', module_offset) | python | def normalize_frame(
self,
module=None,
function=None,
file=None,
line=None,
module_offset=None,
offset=None,
normalized=None,
**kwargs # eat any extra kwargs passed in
):
"""Normalizes a single frame
Returns a structured conglomeration of the input parameters to serve as
a signature. The parameter names of this function reflect the exact
names of the fields from the jsonMDSW frame output. This allows this
function to be invoked by passing a frame as ``**a_frame``.
Sometimes, a frame may already have a normalized version cached. If
that exists, return it instead.
"""
# If there's a cached normalized value, use that so we don't spend time
# figuring it out again
if normalized is not None:
return normalized
if function:
# If there's a filename and it ends in .rs, then normalize using
# Rust rules
if file and (parse_source_file(file) or '').endswith('.rs'):
return self.normalize_rust_function(
function=function,
line=line
)
# Otherwise normalize it with C/C++ rules
return self.normalize_cpp_function(
function=function,
line=line
)
# If there's a file and line number, use that
if file and line:
filename = file.rstrip('/\\')
if '\\' in filename:
file = filename.rsplit('\\')[-1]
else:
file = filename.rsplit('/')[-1]
return '{}#{}'.format(file, line)
# If there's an offset and no module/module_offset, use that
if not module and not module_offset and offset:
return '@{}'.format(offset)
# Return module/module_offset
return '{}@{}'.format(module or '', module_offset) | [
"def",
"normalize_frame",
"(",
"self",
",",
"module",
"=",
"None",
",",
"function",
"=",
"None",
",",
"file",
"=",
"None",
",",
"line",
"=",
"None",
",",
"module_offset",
"=",
"None",
",",
"offset",
"=",
"None",
",",
"normalized",
"=",
"None",
",",
"... | Normalizes a single frame
Returns a structured conglomeration of the input parameters to serve as
a signature. The parameter names of this function reflect the exact
names of the fields from the jsonMDSW frame output. This allows this
function to be invoked by passing a frame as ``**a_frame``.
Sometimes, a frame may already have a normalized version cached. If
that exists, return it instead. | [
"Normalizes",
"a",
"single",
"frame"
] | db7e3233e665a458a961c48da22e93a69b1d08d6 | https://github.com/willkg/socorro-siggen/blob/db7e3233e665a458a961c48da22e93a69b1d08d6/siggen/rules.py#L210-L266 | train |
willkg/socorro-siggen | siggen/rules.py | CSignatureTool._do_generate | def _do_generate(self, source_list, hang_type, crashed_thread, delimiter=' | '):
"""
each element of signatureList names a frame in the crash stack; and is:
- a prefix of a relevant frame: Append this element to the signature
- a relevant frame: Append this element and stop looking
- irrelevant: Append this element only after seeing a prefix frame
The signature is a ' | ' separated string of frame names.
"""
notes = []
debug_notes = []
# shorten source_list to the first signatureSentinel
sentinel_locations = []
for a_sentinel in self.signature_sentinels:
if type(a_sentinel) == tuple:
a_sentinel, condition_fn = a_sentinel
if not condition_fn(source_list):
continue
try:
sentinel_locations.append(source_list.index(a_sentinel))
except ValueError:
pass
if sentinel_locations:
min_index = min(sentinel_locations)
debug_notes.append(
'sentinel; starting at "{}" index {}'.format(source_list[min_index], min_index)
)
source_list = source_list[min_index:]
# Get all the relevant frame signatures. Note that these function signatures
# have already been normalized at this point.
new_signature_list = []
for a_signature in source_list:
# If the signature matches the irrelevant signatures regex, skip to the next frame.
if self.irrelevant_signature_re.match(a_signature):
debug_notes.append('irrelevant; ignoring: "{}"'.format(a_signature))
continue
# If the frame signature is a dll, remove the @xxxxx part.
if '.dll' in a_signature.lower():
a_signature = a_signature.split('@')[0]
# If this trimmed DLL signature is the same as the previous frame's, skip it.
if new_signature_list and a_signature == new_signature_list[-1]:
continue
new_signature_list.append(a_signature)
# If the signature does not match the prefix signatures regex, then it is the last
# one we add to the list.
if not self.prefix_signature_re.match(a_signature):
debug_notes.append('not a prefix; stop: "{}"'.format(a_signature))
break
debug_notes.append('prefix; continue iterating: "{}"'.format(a_signature))
# Add a special marker for hang crash reports.
if hang_type:
debug_notes.append(
'hang_type {}: prepending {}'.format(hang_type, self.hang_prefixes[hang_type])
)
new_signature_list.insert(0, self.hang_prefixes[hang_type])
signature = delimiter.join(new_signature_list)
# Handle empty signatures to explain why we failed generating them.
if signature == '' or signature is None:
if crashed_thread is None:
notes.append(
"CSignatureTool: No signature could be created because we do not know which "
"thread crashed"
)
signature = "EMPTY: no crashing thread identified"
else:
notes.append(
"CSignatureTool: No proper signature could be created because no good data "
"for the crashing thread ({}) was found".format(crashed_thread)
)
try:
signature = source_list[0]
except IndexError:
signature = "EMPTY: no frame data available"
return signature, notes, debug_notes | python | def _do_generate(self, source_list, hang_type, crashed_thread, delimiter=' | '):
"""
each element of signatureList names a frame in the crash stack; and is:
- a prefix of a relevant frame: Append this element to the signature
- a relevant frame: Append this element and stop looking
- irrelevant: Append this element only after seeing a prefix frame
The signature is a ' | ' separated string of frame names.
"""
notes = []
debug_notes = []
# shorten source_list to the first signatureSentinel
sentinel_locations = []
for a_sentinel in self.signature_sentinels:
if type(a_sentinel) == tuple:
a_sentinel, condition_fn = a_sentinel
if not condition_fn(source_list):
continue
try:
sentinel_locations.append(source_list.index(a_sentinel))
except ValueError:
pass
if sentinel_locations:
min_index = min(sentinel_locations)
debug_notes.append(
'sentinel; starting at "{}" index {}'.format(source_list[min_index], min_index)
)
source_list = source_list[min_index:]
# Get all the relevant frame signatures. Note that these function signatures
# have already been normalized at this point.
new_signature_list = []
for a_signature in source_list:
# If the signature matches the irrelevant signatures regex, skip to the next frame.
if self.irrelevant_signature_re.match(a_signature):
debug_notes.append('irrelevant; ignoring: "{}"'.format(a_signature))
continue
# If the frame signature is a dll, remove the @xxxxx part.
if '.dll' in a_signature.lower():
a_signature = a_signature.split('@')[0]
# If this trimmed DLL signature is the same as the previous frame's, skip it.
if new_signature_list and a_signature == new_signature_list[-1]:
continue
new_signature_list.append(a_signature)
# If the signature does not match the prefix signatures regex, then it is the last
# one we add to the list.
if not self.prefix_signature_re.match(a_signature):
debug_notes.append('not a prefix; stop: "{}"'.format(a_signature))
break
debug_notes.append('prefix; continue iterating: "{}"'.format(a_signature))
# Add a special marker for hang crash reports.
if hang_type:
debug_notes.append(
'hang_type {}: prepending {}'.format(hang_type, self.hang_prefixes[hang_type])
)
new_signature_list.insert(0, self.hang_prefixes[hang_type])
signature = delimiter.join(new_signature_list)
# Handle empty signatures to explain why we failed generating them.
if signature == '' or signature is None:
if crashed_thread is None:
notes.append(
"CSignatureTool: No signature could be created because we do not know which "
"thread crashed"
)
signature = "EMPTY: no crashing thread identified"
else:
notes.append(
"CSignatureTool: No proper signature could be created because no good data "
"for the crashing thread ({}) was found".format(crashed_thread)
)
try:
signature = source_list[0]
except IndexError:
signature = "EMPTY: no frame data available"
return signature, notes, debug_notes | [
"def",
"_do_generate",
"(",
"self",
",",
"source_list",
",",
"hang_type",
",",
"crashed_thread",
",",
"delimiter",
"=",
"' | '",
")",
":",
"notes",
"=",
"[",
"]",
"debug_notes",
"=",
"[",
"]",
"# shorten source_list to the first signatureSentinel",
"sentinel_locatio... | each element of signatureList names a frame in the crash stack; and is:
- a prefix of a relevant frame: Append this element to the signature
- a relevant frame: Append this element and stop looking
- irrelevant: Append this element only after seeing a prefix frame
The signature is a ' | ' separated string of frame names. | [
"each",
"element",
"of",
"signatureList",
"names",
"a",
"frame",
"in",
"the",
"crash",
"stack",
";",
"and",
"is",
":",
"-",
"a",
"prefix",
"of",
"a",
"relevant",
"frame",
":",
"Append",
"this",
"element",
"to",
"the",
"signature",
"-",
"a",
"relevant",
... | db7e3233e665a458a961c48da22e93a69b1d08d6 | https://github.com/willkg/socorro-siggen/blob/db7e3233e665a458a961c48da22e93a69b1d08d6/siggen/rules.py#L268-L351 | train |
jkahn/islex | islex/tokens.py | _clean_tag | def _clean_tag(t):
"""Fix up some garbage errors."""
# TODO: when score present, include info.
t = _scored_patt.sub(string=t, repl='')
if t == '_country_' or t.startswith('_country:'):
t = 'nnp_country'
elif t == 'vpb':
t = 'vb' # "carjack" is listed with vpb tag.
elif t == 'nnd':
t = 'nns' # "abbes" is listed with nnd tag.
elif t == 'nns_root:':
t = 'nns' # 'micros' is listed as nns_root.
elif t == 'root:zygote':
t = 'nn' # 'root:zygote' for zygote. :-/
elif t.startswith('root:'):
t = 'uh' # Don't know why, but these are all UH tokens.
elif t in ('abbr_united_states_marine_corps', 'abbr_orange_juice'):
t = "abbreviation"
elif t == '+abbreviation':
t = 'abbreviation'
elif t.startswith('fw_misspelling:'):
t = 'fw'
return t | python | def _clean_tag(t):
"""Fix up some garbage errors."""
# TODO: when score present, include info.
t = _scored_patt.sub(string=t, repl='')
if t == '_country_' or t.startswith('_country:'):
t = 'nnp_country'
elif t == 'vpb':
t = 'vb' # "carjack" is listed with vpb tag.
elif t == 'nnd':
t = 'nns' # "abbes" is listed with nnd tag.
elif t == 'nns_root:':
t = 'nns' # 'micros' is listed as nns_root.
elif t == 'root:zygote':
t = 'nn' # 'root:zygote' for zygote. :-/
elif t.startswith('root:'):
t = 'uh' # Don't know why, but these are all UH tokens.
elif t in ('abbr_united_states_marine_corps', 'abbr_orange_juice'):
t = "abbreviation"
elif t == '+abbreviation':
t = 'abbreviation'
elif t.startswith('fw_misspelling:'):
t = 'fw'
return t | [
"def",
"_clean_tag",
"(",
"t",
")",
":",
"# TODO: when score present, include info.",
"t",
"=",
"_scored_patt",
".",
"sub",
"(",
"string",
"=",
"t",
",",
"repl",
"=",
"''",
")",
"if",
"t",
"==",
"'_country_'",
"or",
"t",
".",
"startswith",
"(",
"'_country:... | Fix up some garbage errors. | [
"Fix",
"up",
"some",
"garbage",
"errors",
"."
] | c60fee062e9ebe34a3bef338539749463e47faf0 | https://github.com/jkahn/islex/blob/c60fee062e9ebe34a3bef338539749463e47faf0/islex/tokens.py#L91-L113 | train |
openstax/pyramid_sawing | pyramid_sawing/utils.py | local_settings | def local_settings(settings, prefix):
"""Localizes the settings for the dotted prefix.
For example, if the prefix where 'xyz'::
{'xyz.foo': 'bar', 'other': 'something'}
Would become::
{'foo': 'bar'}
Note, that non-prefixed items are left out and the prefix is dropped.
"""
prefix = "{}.".format(prefix)
new_settings = {k[len(prefix):]: v for k, v in settings.items()
if k.startswith(prefix)}
return new_settings | python | def local_settings(settings, prefix):
"""Localizes the settings for the dotted prefix.
For example, if the prefix where 'xyz'::
{'xyz.foo': 'bar', 'other': 'something'}
Would become::
{'foo': 'bar'}
Note, that non-prefixed items are left out and the prefix is dropped.
"""
prefix = "{}.".format(prefix)
new_settings = {k[len(prefix):]: v for k, v in settings.items()
if k.startswith(prefix)}
return new_settings | [
"def",
"local_settings",
"(",
"settings",
",",
"prefix",
")",
":",
"prefix",
"=",
"\"{}.\"",
".",
"format",
"(",
"prefix",
")",
"new_settings",
"=",
"{",
"k",
"[",
"len",
"(",
"prefix",
")",
":",
"]",
":",
"v",
"for",
"k",
",",
"v",
"in",
"settings... | Localizes the settings for the dotted prefix.
For example, if the prefix where 'xyz'::
{'xyz.foo': 'bar', 'other': 'something'}
Would become::
{'foo': 'bar'}
Note, that non-prefixed items are left out and the prefix is dropped. | [
"Localizes",
"the",
"settings",
"for",
"the",
"dotted",
"prefix",
".",
"For",
"example",
"if",
"the",
"prefix",
"where",
"xyz",
"::"
] | d2ac7faf30c1517ed4621b8e62b7848e926078b9 | https://github.com/openstax/pyramid_sawing/blob/d2ac7faf30c1517ed4621b8e62b7848e926078b9/pyramid_sawing/utils.py#L13-L28 | train |
JohnDoee/thomas | thomas/humanize.py | humanize_bytes | def humanize_bytes(bytes, precision=1):
"""Return a humanized string representation of a number of bytes.
Assumes `from __future__ import division`.
>>> humanize_bytes(1)
'1 byte'
>>> humanize_bytes(1024)
'1.0 kB'
>>> humanize_bytes(1024*123)
'123.0 kB'
>>> humanize_bytes(1024*12342)
'12.1 MB'
>>> humanize_bytes(1024*12342,2)
'12.05 MB'
>>> humanize_bytes(1024*1234,2)
'1.21 MB'
>>> humanize_bytes(1024*1234*1111,2)
'1.31 GB'
>>> humanize_bytes(1024*1234*1111,1)
'1.3 GB'
"""
abbrevs = (
(1<<50, 'PB'),
(1<<40, 'TB'),
(1<<30, 'GB'),
(1<<20, 'MB'),
(1<<10, 'kB'),
(1, 'bytes')
)
if bytes == 1:
return '1 byte'
for factor, suffix in abbrevs:
if bytes >= factor:
break
return '%.*f %s' % (precision, bytes / factor, suffix) | python | def humanize_bytes(bytes, precision=1):
"""Return a humanized string representation of a number of bytes.
Assumes `from __future__ import division`.
>>> humanize_bytes(1)
'1 byte'
>>> humanize_bytes(1024)
'1.0 kB'
>>> humanize_bytes(1024*123)
'123.0 kB'
>>> humanize_bytes(1024*12342)
'12.1 MB'
>>> humanize_bytes(1024*12342,2)
'12.05 MB'
>>> humanize_bytes(1024*1234,2)
'1.21 MB'
>>> humanize_bytes(1024*1234*1111,2)
'1.31 GB'
>>> humanize_bytes(1024*1234*1111,1)
'1.3 GB'
"""
abbrevs = (
(1<<50, 'PB'),
(1<<40, 'TB'),
(1<<30, 'GB'),
(1<<20, 'MB'),
(1<<10, 'kB'),
(1, 'bytes')
)
if bytes == 1:
return '1 byte'
for factor, suffix in abbrevs:
if bytes >= factor:
break
return '%.*f %s' % (precision, bytes / factor, suffix) | [
"def",
"humanize_bytes",
"(",
"bytes",
",",
"precision",
"=",
"1",
")",
":",
"abbrevs",
"=",
"(",
"(",
"1",
"<<",
"50",
",",
"'PB'",
")",
",",
"(",
"1",
"<<",
"40",
",",
"'TB'",
")",
",",
"(",
"1",
"<<",
"30",
",",
"'GB'",
")",
",",
"(",
"1... | Return a humanized string representation of a number of bytes.
Assumes `from __future__ import division`.
>>> humanize_bytes(1)
'1 byte'
>>> humanize_bytes(1024)
'1.0 kB'
>>> humanize_bytes(1024*123)
'123.0 kB'
>>> humanize_bytes(1024*12342)
'12.1 MB'
>>> humanize_bytes(1024*12342,2)
'12.05 MB'
>>> humanize_bytes(1024*1234,2)
'1.21 MB'
>>> humanize_bytes(1024*1234*1111,2)
'1.31 GB'
>>> humanize_bytes(1024*1234*1111,1)
'1.3 GB' | [
"Return",
"a",
"humanized",
"string",
"representation",
"of",
"a",
"number",
"of",
"bytes",
"."
] | 51916dd110098b189a1c2fbcb71794fd9ec94832 | https://github.com/JohnDoee/thomas/blob/51916dd110098b189a1c2fbcb71794fd9ec94832/thomas/humanize.py#L7-L42 | train |
BlackEarth/bl | bl/url.py | URL.parent | def parent(self):
"""return the parent URL, with params, query, and fragment in place"""
path = '/'.join(self.path.split('/')[:-1])
s = path.strip('/').split(':')
if len(s)==2 and s[1]=='':
return None
else:
return self.__class__(self, path=path) | python | def parent(self):
"""return the parent URL, with params, query, and fragment in place"""
path = '/'.join(self.path.split('/')[:-1])
s = path.strip('/').split(':')
if len(s)==2 and s[1]=='':
return None
else:
return self.__class__(self, path=path) | [
"def",
"parent",
"(",
"self",
")",
":",
"path",
"=",
"'/'",
".",
"join",
"(",
"self",
".",
"path",
".",
"split",
"(",
"'/'",
")",
"[",
":",
"-",
"1",
"]",
")",
"s",
"=",
"path",
".",
"strip",
"(",
"'/'",
")",
".",
"split",
"(",
"':'",
")",
... | return the parent URL, with params, query, and fragment in place | [
"return",
"the",
"parent",
"URL",
"with",
"params",
"query",
"and",
"fragment",
"in",
"place"
] | edf6f37dac718987260b90ad0e7f7fe084a7c1a3 | https://github.com/BlackEarth/bl/blob/edf6f37dac718987260b90ad0e7f7fe084a7c1a3/bl/url.py#L100-L107 | train |
BlackEarth/bl | bl/url.py | URL.join | def join(C, *args, **kwargs):
"""join a list of url elements, and include any keyword arguments, as a new URL"""
u = C('/'.join([str(arg).strip('/') for arg in args]), **kwargs)
return u | python | def join(C, *args, **kwargs):
"""join a list of url elements, and include any keyword arguments, as a new URL"""
u = C('/'.join([str(arg).strip('/') for arg in args]), **kwargs)
return u | [
"def",
"join",
"(",
"C",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"u",
"=",
"C",
"(",
"'/'",
".",
"join",
"(",
"[",
"str",
"(",
"arg",
")",
".",
"strip",
"(",
"'/'",
")",
"for",
"arg",
"in",
"args",
"]",
")",
",",
"*",
"*",
... | join a list of url elements, and include any keyword arguments, as a new URL | [
"join",
"a",
"list",
"of",
"url",
"elements",
"and",
"include",
"any",
"keyword",
"arguments",
"as",
"a",
"new",
"URL"
] | edf6f37dac718987260b90ad0e7f7fe084a7c1a3 | https://github.com/BlackEarth/bl/blob/edf6f37dac718987260b90ad0e7f7fe084a7c1a3/bl/url.py#L150-L153 | train |
teepark/junction | junction/hooks.py | select_peer | def select_peer(peer_addrs, service, routing_id, method):
'''Choose a target from the available peers for a singular message
:param peer_addrs:
the ``(host, port)``s of the peers eligible to handle the RPC, and
possibly a ``None`` entry if this hub can handle it locally
:type peer_addrs: list
:param service: the service of the message
:type service: anything hash-able
:param routing_id: the routing_id of the message
:type routing_id: int
:param method: the message method name
:type method: string
:returns: one of the provided peer_addrs
There is no reason to call this method directly, but it may be useful to
override it in a Hub subclass.
This default implementation uses ``None`` if it is available (prefer local
handling), then falls back to a random selection.
'''
if any(p is None for p in peer_addrs):
return None
return random.choice(peer_addrs) | python | def select_peer(peer_addrs, service, routing_id, method):
'''Choose a target from the available peers for a singular message
:param peer_addrs:
the ``(host, port)``s of the peers eligible to handle the RPC, and
possibly a ``None`` entry if this hub can handle it locally
:type peer_addrs: list
:param service: the service of the message
:type service: anything hash-able
:param routing_id: the routing_id of the message
:type routing_id: int
:param method: the message method name
:type method: string
:returns: one of the provided peer_addrs
There is no reason to call this method directly, but it may be useful to
override it in a Hub subclass.
This default implementation uses ``None`` if it is available (prefer local
handling), then falls back to a random selection.
'''
if any(p is None for p in peer_addrs):
return None
return random.choice(peer_addrs) | [
"def",
"select_peer",
"(",
"peer_addrs",
",",
"service",
",",
"routing_id",
",",
"method",
")",
":",
"if",
"any",
"(",
"p",
"is",
"None",
"for",
"p",
"in",
"peer_addrs",
")",
":",
"return",
"None",
"return",
"random",
".",
"choice",
"(",
"peer_addrs",
... | Choose a target from the available peers for a singular message
:param peer_addrs:
the ``(host, port)``s of the peers eligible to handle the RPC, and
possibly a ``None`` entry if this hub can handle it locally
:type peer_addrs: list
:param service: the service of the message
:type service: anything hash-able
:param routing_id: the routing_id of the message
:type routing_id: int
:param method: the message method name
:type method: string
:returns: one of the provided peer_addrs
There is no reason to call this method directly, but it may be useful to
override it in a Hub subclass.
This default implementation uses ``None`` if it is available (prefer local
handling), then falls back to a random selection. | [
"Choose",
"a",
"target",
"from",
"the",
"available",
"peers",
"for",
"a",
"singular",
"message"
] | 481d135d9e53acb55c72686e2eb4483432f35fa6 | https://github.com/teepark/junction/blob/481d135d9e53acb55c72686e2eb4483432f35fa6/junction/hooks.py#L8-L32 | train |
storax/jinjaapidoc | src/jinjaapidoc/__init__.py | setup | def setup(app):
"""Setup the sphinx extension
This will setup autodoc and autosummary.
Add the :class:`ext.ModDocstringDocumenter`.
Add the config values.
Connect builder-inited event to :func:`gendoc.main`.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:returns: None
:rtype: None
:raises: None
"""
# Connect before autosummary
app.connect('builder-inited', gendoc.main)
app.setup_extension('sphinx.ext.autodoc')
app.setup_extension('sphinx.ext.autosummary')
app.add_autodocumenter(ext.ModDocstringDocumenter)
app.add_config_value('jinjaapi_outputdir', '', 'env')
app.add_config_value('jinjaapi_nodelete', True, 'env')
app.add_config_value('jinjaapi_srcdir', '', 'env')
app.add_config_value('jinjaapi_exclude_paths', [], 'env')
app.add_config_value('jinjaapi_force', True, 'env')
app.add_config_value('jinjaapi_followlinks', True, 'env')
app.add_config_value('jinjaapi_dryrun', False, 'env')
app.add_config_value('jinjaapi_includeprivate', True, 'env')
app.add_config_value('jinjaapi_addsummarytemplate', True, 'env')
app.add_config_value('jinjaapi_include_from_all', True, 'env')
return {'version': __version__, 'parallel_read_safe': True} | python | def setup(app):
"""Setup the sphinx extension
This will setup autodoc and autosummary.
Add the :class:`ext.ModDocstringDocumenter`.
Add the config values.
Connect builder-inited event to :func:`gendoc.main`.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:returns: None
:rtype: None
:raises: None
"""
# Connect before autosummary
app.connect('builder-inited', gendoc.main)
app.setup_extension('sphinx.ext.autodoc')
app.setup_extension('sphinx.ext.autosummary')
app.add_autodocumenter(ext.ModDocstringDocumenter)
app.add_config_value('jinjaapi_outputdir', '', 'env')
app.add_config_value('jinjaapi_nodelete', True, 'env')
app.add_config_value('jinjaapi_srcdir', '', 'env')
app.add_config_value('jinjaapi_exclude_paths', [], 'env')
app.add_config_value('jinjaapi_force', True, 'env')
app.add_config_value('jinjaapi_followlinks', True, 'env')
app.add_config_value('jinjaapi_dryrun', False, 'env')
app.add_config_value('jinjaapi_includeprivate', True, 'env')
app.add_config_value('jinjaapi_addsummarytemplate', True, 'env')
app.add_config_value('jinjaapi_include_from_all', True, 'env')
return {'version': __version__, 'parallel_read_safe': True} | [
"def",
"setup",
"(",
"app",
")",
":",
"# Connect before autosummary\r",
"app",
".",
"connect",
"(",
"'builder-inited'",
",",
"gendoc",
".",
"main",
")",
"app",
".",
"setup_extension",
"(",
"'sphinx.ext.autodoc'",
")",
"app",
".",
"setup_extension",
"(",
"'sphinx... | Setup the sphinx extension
This will setup autodoc and autosummary.
Add the :class:`ext.ModDocstringDocumenter`.
Add the config values.
Connect builder-inited event to :func:`gendoc.main`.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:returns: None
:rtype: None
:raises: None | [
"Setup",
"the",
"sphinx",
"extension",
"This",
"will",
"setup",
"autodoc",
"and",
"autosummary",
".",
"Add",
"the",
":",
"class",
":",
"ext",
".",
"ModDocstringDocumenter",
".",
"Add",
"the",
"config",
"values",
".",
"Connect",
"builder",
"-",
"inited",
"eve... | f1eeb6ab5bd1a96c4130306718c6423f37c76856 | https://github.com/storax/jinjaapidoc/blob/f1eeb6ab5bd1a96c4130306718c6423f37c76856/src/jinjaapidoc/__init__.py#L9-L42 | train |
BlackEarth/bl | bl/progress.py | Progress.start | def start(self, key=None, **params):
"""initialize process timing for the current stack"""
self.params.update(**params)
key = key or self.stack_key
if key is not None:
self.current_times[key] = time() | python | def start(self, key=None, **params):
"""initialize process timing for the current stack"""
self.params.update(**params)
key = key or self.stack_key
if key is not None:
self.current_times[key] = time() | [
"def",
"start",
"(",
"self",
",",
"key",
"=",
"None",
",",
"*",
"*",
"params",
")",
":",
"self",
".",
"params",
".",
"update",
"(",
"*",
"*",
"params",
")",
"key",
"=",
"key",
"or",
"self",
".",
"stack_key",
"if",
"key",
"is",
"not",
"None",
":... | initialize process timing for the current stack | [
"initialize",
"process",
"timing",
"for",
"the",
"current",
"stack"
] | edf6f37dac718987260b90ad0e7f7fe084a7c1a3 | https://github.com/BlackEarth/bl/blob/edf6f37dac718987260b90ad0e7f7fe084a7c1a3/bl/progress.py#L19-L24 | train |
BlackEarth/bl | bl/progress.py | Progress.report | def report(self, fraction=None):
"""report the total progress for the current stack, optionally given the local fraction completed.
fraction=None: if given, used as the fraction of the local method so far completed.
runtimes=None: if given, used as the expected runtimes for the current stack.
"""
r = Dict()
local_key = self.stack_key
if local_key is None: return {}
runtimes = self.runtimes()
for key in self.stack_keys:
if self.current_times.get(key) is None:
self.start(key=key)
runtime = runtimes.get(key) or self.runtime(key)
if key == local_key and fraction is not None:
r[key] = fraction
elif runtime is not None:
r[key] = (time() - self.current_times[key]) / runtime
return r | python | def report(self, fraction=None):
"""report the total progress for the current stack, optionally given the local fraction completed.
fraction=None: if given, used as the fraction of the local method so far completed.
runtimes=None: if given, used as the expected runtimes for the current stack.
"""
r = Dict()
local_key = self.stack_key
if local_key is None: return {}
runtimes = self.runtimes()
for key in self.stack_keys:
if self.current_times.get(key) is None:
self.start(key=key)
runtime = runtimes.get(key) or self.runtime(key)
if key == local_key and fraction is not None:
r[key] = fraction
elif runtime is not None:
r[key] = (time() - self.current_times[key]) / runtime
return r | [
"def",
"report",
"(",
"self",
",",
"fraction",
"=",
"None",
")",
":",
"r",
"=",
"Dict",
"(",
")",
"local_key",
"=",
"self",
".",
"stack_key",
"if",
"local_key",
"is",
"None",
":",
"return",
"{",
"}",
"runtimes",
"=",
"self",
".",
"runtimes",
"(",
"... | report the total progress for the current stack, optionally given the local fraction completed.
fraction=None: if given, used as the fraction of the local method so far completed.
runtimes=None: if given, used as the expected runtimes for the current stack. | [
"report",
"the",
"total",
"progress",
"for",
"the",
"current",
"stack",
"optionally",
"given",
"the",
"local",
"fraction",
"completed",
".",
"fraction",
"=",
"None",
":",
"if",
"given",
"used",
"as",
"the",
"fraction",
"of",
"the",
"local",
"method",
"so",
... | edf6f37dac718987260b90ad0e7f7fe084a7c1a3 | https://github.com/BlackEarth/bl/blob/edf6f37dac718987260b90ad0e7f7fe084a7c1a3/bl/progress.py#L34-L51 | train |
BlackEarth/bl | bl/progress.py | Progress.finish | def finish(self):
"""record the current stack process as finished"""
self.report(fraction=1.0)
key = self.stack_key
if key is not None:
if self.data.get(key) is None:
self.data[key] = []
start_time = self.current_times.get(key) or time()
self.data[key].append(Dict(runtime=time()-start_time, **self.params)) | python | def finish(self):
"""record the current stack process as finished"""
self.report(fraction=1.0)
key = self.stack_key
if key is not None:
if self.data.get(key) is None:
self.data[key] = []
start_time = self.current_times.get(key) or time()
self.data[key].append(Dict(runtime=time()-start_time, **self.params)) | [
"def",
"finish",
"(",
"self",
")",
":",
"self",
".",
"report",
"(",
"fraction",
"=",
"1.0",
")",
"key",
"=",
"self",
".",
"stack_key",
"if",
"key",
"is",
"not",
"None",
":",
"if",
"self",
".",
"data",
".",
"get",
"(",
"key",
")",
"is",
"None",
... | record the current stack process as finished | [
"record",
"the",
"current",
"stack",
"process",
"as",
"finished"
] | edf6f37dac718987260b90ad0e7f7fe084a7c1a3 | https://github.com/BlackEarth/bl/blob/edf6f37dac718987260b90ad0e7f7fe084a7c1a3/bl/progress.py#L53-L61 | train |
storax/jinjaapidoc | src/jinjaapidoc/ext.py | ModDocstringDocumenter.add_directive_header | def add_directive_header(self, sig):
"""Add the directive header and options to the generated content."""
domain = getattr(self, 'domain', 'py')
directive = getattr(self, 'directivetype', "module")
name = self.format_name()
self.add_line(u'.. %s:%s:: %s%s' % (domain, directive, name, sig),
'<autodoc>')
if self.options.noindex:
self.add_line(u' :noindex:', '<autodoc>')
if self.objpath:
# Be explicit about the module, this is necessary since .. class::
# etc. don't support a prepended module name
self.add_line(u' :module: %s' % self.modname, '<autodoc>') | python | def add_directive_header(self, sig):
"""Add the directive header and options to the generated content."""
domain = getattr(self, 'domain', 'py')
directive = getattr(self, 'directivetype', "module")
name = self.format_name()
self.add_line(u'.. %s:%s:: %s%s' % (domain, directive, name, sig),
'<autodoc>')
if self.options.noindex:
self.add_line(u' :noindex:', '<autodoc>')
if self.objpath:
# Be explicit about the module, this is necessary since .. class::
# etc. don't support a prepended module name
self.add_line(u' :module: %s' % self.modname, '<autodoc>') | [
"def",
"add_directive_header",
"(",
"self",
",",
"sig",
")",
":",
"domain",
"=",
"getattr",
"(",
"self",
",",
"'domain'",
",",
"'py'",
")",
"directive",
"=",
"getattr",
"(",
"self",
",",
"'directivetype'",
",",
"\"module\"",
")",
"name",
"=",
"self",
"."... | Add the directive header and options to the generated content. | [
"Add",
"the",
"directive",
"header",
"and",
"options",
"to",
"the",
"generated",
"content",
"."
] | f1eeb6ab5bd1a96c4130306718c6423f37c76856 | https://github.com/storax/jinjaapidoc/blob/f1eeb6ab5bd1a96c4130306718c6423f37c76856/src/jinjaapidoc/ext.py#L13-L25 | train |
teepark/junction | junction/client.py | Client.connect | def connect(self):
"Initiate the connection to a proxying hub"
log.info("connecting")
# don't have the connection attempt reconnects, because when it goes
# down we are going to cycle to the next potential peer from the Client
self._peer = connection.Peer(
None, self._dispatcher, self._addrs.popleft(),
backend.Socket(), reconnect=False)
self._peer.start() | python | def connect(self):
"Initiate the connection to a proxying hub"
log.info("connecting")
# don't have the connection attempt reconnects, because when it goes
# down we are going to cycle to the next potential peer from the Client
self._peer = connection.Peer(
None, self._dispatcher, self._addrs.popleft(),
backend.Socket(), reconnect=False)
self._peer.start() | [
"def",
"connect",
"(",
"self",
")",
":",
"log",
".",
"info",
"(",
"\"connecting\"",
")",
"# don't have the connection attempt reconnects, because when it goes",
"# down we are going to cycle to the next potential peer from the Client",
"self",
".",
"_peer",
"=",
"connection",
"... | Initiate the connection to a proxying hub | [
"Initiate",
"the",
"connection",
"to",
"a",
"proxying",
"hub"
] | 481d135d9e53acb55c72686e2eb4483432f35fa6 | https://github.com/teepark/junction/blob/481d135d9e53acb55c72686e2eb4483432f35fa6/junction/client.py#L29-L38 | train |
teepark/junction | junction/client.py | Client.wait_connected | def wait_connected(self, timeout=None):
'''Wait for connections to be made and their handshakes to finish
:param timeout:
maximum time to wait in seconds. with None, there is no timeout.
:type timeout: float or None
:returns:
``True`` if all connections were made, ``False`` if one or more
failed.
'''
result = self._peer.wait_connected(timeout)
if not result:
if timeout is not None:
log.warn("connect wait timed out after %.2f seconds" % timeout)
return result | python | def wait_connected(self, timeout=None):
'''Wait for connections to be made and their handshakes to finish
:param timeout:
maximum time to wait in seconds. with None, there is no timeout.
:type timeout: float or None
:returns:
``True`` if all connections were made, ``False`` if one or more
failed.
'''
result = self._peer.wait_connected(timeout)
if not result:
if timeout is not None:
log.warn("connect wait timed out after %.2f seconds" % timeout)
return result | [
"def",
"wait_connected",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"result",
"=",
"self",
".",
"_peer",
".",
"wait_connected",
"(",
"timeout",
")",
"if",
"not",
"result",
":",
"if",
"timeout",
"is",
"not",
"None",
":",
"log",
".",
"warn",
"... | Wait for connections to be made and their handshakes to finish
:param timeout:
maximum time to wait in seconds. with None, there is no timeout.
:type timeout: float or None
:returns:
``True`` if all connections were made, ``False`` if one or more
failed. | [
"Wait",
"for",
"connections",
"to",
"be",
"made",
"and",
"their",
"handshakes",
"to",
"finish"
] | 481d135d9e53acb55c72686e2eb4483432f35fa6 | https://github.com/teepark/junction/blob/481d135d9e53acb55c72686e2eb4483432f35fa6/junction/client.py#L40-L55 | train |
teepark/junction | junction/client.py | Client.reset | def reset(self):
"Close the current failed connection and prepare for a new one"
log.info("resetting client")
rpc_client = self._rpc_client
self._addrs.append(self._peer.addr)
self.__init__(self._addrs)
self._rpc_client = rpc_client
self._dispatcher.rpc_client = rpc_client
rpc_client._client = weakref.ref(self) | python | def reset(self):
"Close the current failed connection and prepare for a new one"
log.info("resetting client")
rpc_client = self._rpc_client
self._addrs.append(self._peer.addr)
self.__init__(self._addrs)
self._rpc_client = rpc_client
self._dispatcher.rpc_client = rpc_client
rpc_client._client = weakref.ref(self) | [
"def",
"reset",
"(",
"self",
")",
":",
"log",
".",
"info",
"(",
"\"resetting client\"",
")",
"rpc_client",
"=",
"self",
".",
"_rpc_client",
"self",
".",
"_addrs",
".",
"append",
"(",
"self",
".",
"_peer",
".",
"addr",
")",
"self",
".",
"__init__",
"(",... | Close the current failed connection and prepare for a new one | [
"Close",
"the",
"current",
"failed",
"connection",
"and",
"prepare",
"for",
"a",
"new",
"one"
] | 481d135d9e53acb55c72686e2eb4483432f35fa6 | https://github.com/teepark/junction/blob/481d135d9e53acb55c72686e2eb4483432f35fa6/junction/client.py#L57-L65 | train |
teepark/junction | junction/client.py | Client.shutdown | def shutdown(self):
'Close the hub connection'
log.info("shutting down")
self._peer.go_down(reconnect=False, expected=True) | python | def shutdown(self):
'Close the hub connection'
log.info("shutting down")
self._peer.go_down(reconnect=False, expected=True) | [
"def",
"shutdown",
"(",
"self",
")",
":",
"log",
".",
"info",
"(",
"\"shutting down\"",
")",
"self",
".",
"_peer",
".",
"go_down",
"(",
"reconnect",
"=",
"False",
",",
"expected",
"=",
"True",
")"
] | Close the hub connection | [
"Close",
"the",
"hub",
"connection"
] | 481d135d9e53acb55c72686e2eb4483432f35fa6 | https://github.com/teepark/junction/blob/481d135d9e53acb55c72686e2eb4483432f35fa6/junction/client.py#L67-L70 | train |
teepark/junction | junction/client.py | Client.publish | def publish(self, service, routing_id, method, args=None, kwargs=None,
broadcast=False):
'''Send a 1-way message
:param service: the service name (the routing top level)
:type service: anything hash-able
:param routing_id:
The id used for routing within the registered handlers of the
service.
:type routing_id: int
:param method: the method name to call
:type method: string
:param args: the positional arguments to send along with the request
:type args: tuple
:param kwargs: keyword arguments to send along with the request
:type kwargs: dict
:param broadcast:
if ``True``, send to every peer with a matching subscription
:type broadcast: bool
:returns: None. use 'rpc' methods for requests with responses.
:raises:
:class:`Unroutable <junction.errors.Unroutable>` if the client
doesn't have a connection to a hub
'''
if not self._peer.up:
raise errors.Unroutable()
self._dispatcher.send_proxied_publish(service, routing_id, method,
args or (), kwargs or {}, singular=not broadcast) | python | def publish(self, service, routing_id, method, args=None, kwargs=None,
broadcast=False):
'''Send a 1-way message
:param service: the service name (the routing top level)
:type service: anything hash-able
:param routing_id:
The id used for routing within the registered handlers of the
service.
:type routing_id: int
:param method: the method name to call
:type method: string
:param args: the positional arguments to send along with the request
:type args: tuple
:param kwargs: keyword arguments to send along with the request
:type kwargs: dict
:param broadcast:
if ``True``, send to every peer with a matching subscription
:type broadcast: bool
:returns: None. use 'rpc' methods for requests with responses.
:raises:
:class:`Unroutable <junction.errors.Unroutable>` if the client
doesn't have a connection to a hub
'''
if not self._peer.up:
raise errors.Unroutable()
self._dispatcher.send_proxied_publish(service, routing_id, method,
args or (), kwargs or {}, singular=not broadcast) | [
"def",
"publish",
"(",
"self",
",",
"service",
",",
"routing_id",
",",
"method",
",",
"args",
"=",
"None",
",",
"kwargs",
"=",
"None",
",",
"broadcast",
"=",
"False",
")",
":",
"if",
"not",
"self",
".",
"_peer",
".",
"up",
":",
"raise",
"errors",
"... | Send a 1-way message
:param service: the service name (the routing top level)
:type service: anything hash-able
:param routing_id:
The id used for routing within the registered handlers of the
service.
:type routing_id: int
:param method: the method name to call
:type method: string
:param args: the positional arguments to send along with the request
:type args: tuple
:param kwargs: keyword arguments to send along with the request
:type kwargs: dict
:param broadcast:
if ``True``, send to every peer with a matching subscription
:type broadcast: bool
:returns: None. use 'rpc' methods for requests with responses.
:raises:
:class:`Unroutable <junction.errors.Unroutable>` if the client
doesn't have a connection to a hub | [
"Send",
"a",
"1",
"-",
"way",
"message"
] | 481d135d9e53acb55c72686e2eb4483432f35fa6 | https://github.com/teepark/junction/blob/481d135d9e53acb55c72686e2eb4483432f35fa6/junction/client.py#L72-L102 | train |
teepark/junction | junction/client.py | Client.publish_receiver_count | def publish_receiver_count(
self, service, routing_id, method, timeout=None):
'''Get the number of peers that would handle a particular publish
This method will block until a response arrives
:param service: the service name
:type service: anything hash-able
:param routing_id:
the id used for narrowing within the service handlers
:type routing_id: int
:param method: the method name
:type method: string
:param timeout: maximum time to wait for the response
:type timeout: int, float or None
:raises:
- :class:`Unroutable <junction.errors.Unroutable>` if no peers are
registered to receive the message
- :class:`WaitTimeout <junction.errors.WaitTimeout>` if a timeout
was provided and it expires
'''
if not self._peer.up:
raise errors.Unroutable()
return self._rpc_client.recipient_count(self._peer,
const.MSG_TYPE_PUBLISH, service, routing_id, method).wait(
timeout)[0] | python | def publish_receiver_count(
self, service, routing_id, method, timeout=None):
'''Get the number of peers that would handle a particular publish
This method will block until a response arrives
:param service: the service name
:type service: anything hash-able
:param routing_id:
the id used for narrowing within the service handlers
:type routing_id: int
:param method: the method name
:type method: string
:param timeout: maximum time to wait for the response
:type timeout: int, float or None
:raises:
- :class:`Unroutable <junction.errors.Unroutable>` if no peers are
registered to receive the message
- :class:`WaitTimeout <junction.errors.WaitTimeout>` if a timeout
was provided and it expires
'''
if not self._peer.up:
raise errors.Unroutable()
return self._rpc_client.recipient_count(self._peer,
const.MSG_TYPE_PUBLISH, service, routing_id, method).wait(
timeout)[0] | [
"def",
"publish_receiver_count",
"(",
"self",
",",
"service",
",",
"routing_id",
",",
"method",
",",
"timeout",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"_peer",
".",
"up",
":",
"raise",
"errors",
".",
"Unroutable",
"(",
")",
"return",
"self",
... | Get the number of peers that would handle a particular publish
This method will block until a response arrives
:param service: the service name
:type service: anything hash-able
:param routing_id:
the id used for narrowing within the service handlers
:type routing_id: int
:param method: the method name
:type method: string
:param timeout: maximum time to wait for the response
:type timeout: int, float or None
:raises:
- :class:`Unroutable <junction.errors.Unroutable>` if no peers are
registered to receive the message
- :class:`WaitTimeout <junction.errors.WaitTimeout>` if a timeout
was provided and it expires | [
"Get",
"the",
"number",
"of",
"peers",
"that",
"would",
"handle",
"a",
"particular",
"publish"
] | 481d135d9e53acb55c72686e2eb4483432f35fa6 | https://github.com/teepark/junction/blob/481d135d9e53acb55c72686e2eb4483432f35fa6/junction/client.py#L104-L131 | train |
teepark/junction | junction/client.py | Client.send_rpc | def send_rpc(self, service, routing_id, method, args=None, kwargs=None,
broadcast=False):
'''Send out an RPC request
:param service: the service name (the routing top level)
:type service: anything hash-able
:param routing_id:
The id used for routing within the registered handlers of the
service.
:type routing_id: int
:param method: the method name to call
:type method: string
:param args: the positional arguments to send along with the request
:type args: tuple
:param kwargs: keyword arguments to send along with the request
:type kwargs: dict
:param broadcast:
if ``True``, send to all peers with matching subscriptions
:type broadcast: bool
:returns:
a :class:`RPC <junction.futures.RPC>` object representing the
RPC and its future response.
:raises:
:class:`Unroutable <junction.errors.Unroutable>` if the client
doesn't have a connection to a hub
'''
if not self._peer.up:
raise errors.Unroutable()
return self._dispatcher.send_proxied_rpc(service, routing_id, method,
args or (), kwargs or {}, not broadcast) | python | def send_rpc(self, service, routing_id, method, args=None, kwargs=None,
broadcast=False):
'''Send out an RPC request
:param service: the service name (the routing top level)
:type service: anything hash-able
:param routing_id:
The id used for routing within the registered handlers of the
service.
:type routing_id: int
:param method: the method name to call
:type method: string
:param args: the positional arguments to send along with the request
:type args: tuple
:param kwargs: keyword arguments to send along with the request
:type kwargs: dict
:param broadcast:
if ``True``, send to all peers with matching subscriptions
:type broadcast: bool
:returns:
a :class:`RPC <junction.futures.RPC>` object representing the
RPC and its future response.
:raises:
:class:`Unroutable <junction.errors.Unroutable>` if the client
doesn't have a connection to a hub
'''
if not self._peer.up:
raise errors.Unroutable()
return self._dispatcher.send_proxied_rpc(service, routing_id, method,
args or (), kwargs or {}, not broadcast) | [
"def",
"send_rpc",
"(",
"self",
",",
"service",
",",
"routing_id",
",",
"method",
",",
"args",
"=",
"None",
",",
"kwargs",
"=",
"None",
",",
"broadcast",
"=",
"False",
")",
":",
"if",
"not",
"self",
".",
"_peer",
".",
"up",
":",
"raise",
"errors",
... | Send out an RPC request
:param service: the service name (the routing top level)
:type service: anything hash-able
:param routing_id:
The id used for routing within the registered handlers of the
service.
:type routing_id: int
:param method: the method name to call
:type method: string
:param args: the positional arguments to send along with the request
:type args: tuple
:param kwargs: keyword arguments to send along with the request
:type kwargs: dict
:param broadcast:
if ``True``, send to all peers with matching subscriptions
:type broadcast: bool
:returns:
a :class:`RPC <junction.futures.RPC>` object representing the
RPC and its future response.
:raises:
:class:`Unroutable <junction.errors.Unroutable>` if the client
doesn't have a connection to a hub | [
"Send",
"out",
"an",
"RPC",
"request"
] | 481d135d9e53acb55c72686e2eb4483432f35fa6 | https://github.com/teepark/junction/blob/481d135d9e53acb55c72686e2eb4483432f35fa6/junction/client.py#L133-L165 | train |
teepark/junction | junction/client.py | Client.rpc_receiver_count | def rpc_receiver_count(self, service, routing_id, method, timeout=None):
'''Get the number of peers that would handle a particular RPC
This method will block until a response arrives
:param service: the service name
:type service: anything hash-able
:param routing_id:
the id used for narrowing within the service handlers
:type routing_id: int
:param method: the method name
:type method: string
:returns:
the integer number of peers that would receive the described RPC
:raises:
- :class:`Unroutable <junction.errors.Unroutable>` if no peers are
registered to receive the message
- :class:`WaitTimeout <junction.errors.WaitTimeout>` if a timeout
was provided and it expires
'''
if not self._peer.up:
raise errors.Unroutable()
return self._rpc_client.recipient_count(self._peer,
const.MSG_TYPE_RPC_REQUEST, service, routing_id, method).wait(
timeout)[0] | python | def rpc_receiver_count(self, service, routing_id, method, timeout=None):
'''Get the number of peers that would handle a particular RPC
This method will block until a response arrives
:param service: the service name
:type service: anything hash-able
:param routing_id:
the id used for narrowing within the service handlers
:type routing_id: int
:param method: the method name
:type method: string
:returns:
the integer number of peers that would receive the described RPC
:raises:
- :class:`Unroutable <junction.errors.Unroutable>` if no peers are
registered to receive the message
- :class:`WaitTimeout <junction.errors.WaitTimeout>` if a timeout
was provided and it expires
'''
if not self._peer.up:
raise errors.Unroutable()
return self._rpc_client.recipient_count(self._peer,
const.MSG_TYPE_RPC_REQUEST, service, routing_id, method).wait(
timeout)[0] | [
"def",
"rpc_receiver_count",
"(",
"self",
",",
"service",
",",
"routing_id",
",",
"method",
",",
"timeout",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"_peer",
".",
"up",
":",
"raise",
"errors",
".",
"Unroutable",
"(",
")",
"return",
"self",
".",... | Get the number of peers that would handle a particular RPC
This method will block until a response arrives
:param service: the service name
:type service: anything hash-able
:param routing_id:
the id used for narrowing within the service handlers
:type routing_id: int
:param method: the method name
:type method: string
:returns:
the integer number of peers that would receive the described RPC
:raises:
- :class:`Unroutable <junction.errors.Unroutable>` if no peers are
registered to receive the message
- :class:`WaitTimeout <junction.errors.WaitTimeout>` if a timeout
was provided and it expires | [
"Get",
"the",
"number",
"of",
"peers",
"that",
"would",
"handle",
"a",
"particular",
"RPC"
] | 481d135d9e53acb55c72686e2eb4483432f35fa6 | https://github.com/teepark/junction/blob/481d135d9e53acb55c72686e2eb4483432f35fa6/junction/client.py#L207-L234 | train |
berlotto/restapi-client | restapi/__init__.py | Endpoint._headers | def _headers(self, others={}):
"""Return the default headers and others as necessary"""
headers = {
'Content-Type': 'application/json'
}
for p in others.keys():
headers[p] = others[p]
return headers | python | def _headers(self, others={}):
"""Return the default headers and others as necessary"""
headers = {
'Content-Type': 'application/json'
}
for p in others.keys():
headers[p] = others[p]
return headers | [
"def",
"_headers",
"(",
"self",
",",
"others",
"=",
"{",
"}",
")",
":",
"headers",
"=",
"{",
"'Content-Type'",
":",
"'application/json'",
"}",
"for",
"p",
"in",
"others",
".",
"keys",
"(",
")",
":",
"headers",
"[",
"p",
"]",
"=",
"others",
"[",
"p"... | Return the default headers and others as necessary | [
"Return",
"the",
"default",
"headers",
"and",
"others",
"as",
"necessary"
] | c35406c3fa8163fadb2e25ef5c2604d03569a6a1 | https://github.com/berlotto/restapi-client/blob/c35406c3fa8163fadb2e25ef5c2604d03569a6a1/restapi/__init__.py#L64-L72 | train |
Eyepea/tanto | monitoring_agent/configurator.py | read | def read(config_file, configspec, server_mode=False, default_section='default_settings', list_values=True):
'''
Read the config file with spec validation
'''
# configspec = ConfigObj(path.join(path.abspath(path.dirname(__file__)), configspec),
# encoding='UTF8',
# interpolation='Template',
# list_values=False,
# _inspec=True)
config = ConfigObj(config_file,
configspec=path.join(path.abspath(path.dirname(__file__)),
configspec),
list_values=list_values)
validation = config.validate(validate.Validator(), preserve_errors=True)
if validation == True:
config = dict(config)
for section in config:
if section != default_section:
if server_mode: # When it's a servers config file, retrieve the correct fqdn
config[section]['availability'] = True
if config[section]['custom_fqdn'] == None:
config[section]['custom_fqdn'] = socket.getfqdn()
for option in config[section]: # retrieve default configuration for missing values
if config[section][option] == None:
config[section][option] = config[default_section][option]
del(config[default_section])
return config
else:
raise ConfiguratorException(config_file, validation) | python | def read(config_file, configspec, server_mode=False, default_section='default_settings', list_values=True):
'''
Read the config file with spec validation
'''
# configspec = ConfigObj(path.join(path.abspath(path.dirname(__file__)), configspec),
# encoding='UTF8',
# interpolation='Template',
# list_values=False,
# _inspec=True)
config = ConfigObj(config_file,
configspec=path.join(path.abspath(path.dirname(__file__)),
configspec),
list_values=list_values)
validation = config.validate(validate.Validator(), preserve_errors=True)
if validation == True:
config = dict(config)
for section in config:
if section != default_section:
if server_mode: # When it's a servers config file, retrieve the correct fqdn
config[section]['availability'] = True
if config[section]['custom_fqdn'] == None:
config[section]['custom_fqdn'] = socket.getfqdn()
for option in config[section]: # retrieve default configuration for missing values
if config[section][option] == None:
config[section][option] = config[default_section][option]
del(config[default_section])
return config
else:
raise ConfiguratorException(config_file, validation) | [
"def",
"read",
"(",
"config_file",
",",
"configspec",
",",
"server_mode",
"=",
"False",
",",
"default_section",
"=",
"'default_settings'",
",",
"list_values",
"=",
"True",
")",
":",
"# configspec = ConfigObj(path.join(path.abspath(path.dirname(__file__)), configspec),",
... | Read the config file with spec validation | [
"Read",
"the",
"config",
"file",
"with",
"spec",
"validation"
] | ad8fd32e0fd3b7bc3dee5dabb984a2567ae46fe9 | https://github.com/Eyepea/tanto/blob/ad8fd32e0fd3b7bc3dee5dabb984a2567ae46fe9/monitoring_agent/configurator.py#L21-L52 | train |
pmacosta/pmisc | pmisc/strings.py | elapsed_time_string | def elapsed_time_string(start_time, stop_time):
r"""
Return a formatted string with the elapsed time between two time points.
The string includes years (365 days), months (30 days), days (24 hours),
hours (60 minutes), minutes (60 seconds) and seconds. If both arguments
are equal, the string returned is :code:`'None'`; otherwise, the string
returned is [YY year[s], [MM month[s], [DD day[s], [HH hour[s],
[MM minute[s] [and SS second[s\]\]\]\]\]\]. Any part (year[s], month[s],
etc.) is omitted if the value of that part is null/zero
:param start_time: Starting time point
:type start_time: `datetime <https://docs.python.org/3/library/
datetime.html#datetime-objects>`_
:param stop_time: Ending time point
:type stop_time: `datetime`
:rtype: string
:raises: RuntimeError (Invalid time delta specification)
For example:
>>> import datetime, pmisc
>>> start_time = datetime.datetime(2014, 1, 1, 1, 10, 1)
>>> stop_time = datetime.datetime(2015, 1, 3, 1, 10, 3)
>>> pmisc.elapsed_time_string(start_time, stop_time)
'1 year, 2 days and 2 seconds'
"""
if start_time > stop_time:
raise RuntimeError("Invalid time delta specification")
delta_time = stop_time - start_time
# Python 2.6 datetime objects do not have total_seconds() method
tot_seconds = int(
(
delta_time.microseconds
+ (delta_time.seconds + delta_time.days * 24 * 3600) * 10 ** 6
)
/ 10 ** 6
)
years, remainder = divmod(tot_seconds, 365 * 24 * 60 * 60)
months, remainder = divmod(remainder, 30 * 24 * 60 * 60)
days, remainder = divmod(remainder, 24 * 60 * 60)
hours, remainder = divmod(remainder, 60 * 60)
minutes, seconds = divmod(remainder, 60)
token_iter = zip(
[years, months, days, hours, minutes, seconds],
["year", "month", "day", "hour", "minute", "second"],
)
ret_list = [
"{token} {token_name}{plural}".format(
token=num, token_name=desc, plural="s" if num > 1 else ""
)
for num, desc in token_iter
if num > 0
]
if not ret_list:
return "None"
if len(ret_list) == 1:
return ret_list[0]
if len(ret_list) == 2:
return ret_list[0] + " and " + ret_list[1]
return (", ".join(ret_list[0:-1])) + " and " + ret_list[-1] | python | def elapsed_time_string(start_time, stop_time):
r"""
Return a formatted string with the elapsed time between two time points.
The string includes years (365 days), months (30 days), days (24 hours),
hours (60 minutes), minutes (60 seconds) and seconds. If both arguments
are equal, the string returned is :code:`'None'`; otherwise, the string
returned is [YY year[s], [MM month[s], [DD day[s], [HH hour[s],
[MM minute[s] [and SS second[s\]\]\]\]\]\]. Any part (year[s], month[s],
etc.) is omitted if the value of that part is null/zero
:param start_time: Starting time point
:type start_time: `datetime <https://docs.python.org/3/library/
datetime.html#datetime-objects>`_
:param stop_time: Ending time point
:type stop_time: `datetime`
:rtype: string
:raises: RuntimeError (Invalid time delta specification)
For example:
>>> import datetime, pmisc
>>> start_time = datetime.datetime(2014, 1, 1, 1, 10, 1)
>>> stop_time = datetime.datetime(2015, 1, 3, 1, 10, 3)
>>> pmisc.elapsed_time_string(start_time, stop_time)
'1 year, 2 days and 2 seconds'
"""
if start_time > stop_time:
raise RuntimeError("Invalid time delta specification")
delta_time = stop_time - start_time
# Python 2.6 datetime objects do not have total_seconds() method
tot_seconds = int(
(
delta_time.microseconds
+ (delta_time.seconds + delta_time.days * 24 * 3600) * 10 ** 6
)
/ 10 ** 6
)
years, remainder = divmod(tot_seconds, 365 * 24 * 60 * 60)
months, remainder = divmod(remainder, 30 * 24 * 60 * 60)
days, remainder = divmod(remainder, 24 * 60 * 60)
hours, remainder = divmod(remainder, 60 * 60)
minutes, seconds = divmod(remainder, 60)
token_iter = zip(
[years, months, days, hours, minutes, seconds],
["year", "month", "day", "hour", "minute", "second"],
)
ret_list = [
"{token} {token_name}{plural}".format(
token=num, token_name=desc, plural="s" if num > 1 else ""
)
for num, desc in token_iter
if num > 0
]
if not ret_list:
return "None"
if len(ret_list) == 1:
return ret_list[0]
if len(ret_list) == 2:
return ret_list[0] + " and " + ret_list[1]
return (", ".join(ret_list[0:-1])) + " and " + ret_list[-1] | [
"def",
"elapsed_time_string",
"(",
"start_time",
",",
"stop_time",
")",
":",
"if",
"start_time",
">",
"stop_time",
":",
"raise",
"RuntimeError",
"(",
"\"Invalid time delta specification\"",
")",
"delta_time",
"=",
"stop_time",
"-",
"start_time",
"# Python 2.6 datetime o... | r"""
Return a formatted string with the elapsed time between two time points.
The string includes years (365 days), months (30 days), days (24 hours),
hours (60 minutes), minutes (60 seconds) and seconds. If both arguments
are equal, the string returned is :code:`'None'`; otherwise, the string
returned is [YY year[s], [MM month[s], [DD day[s], [HH hour[s],
[MM minute[s] [and SS second[s\]\]\]\]\]\]. Any part (year[s], month[s],
etc.) is omitted if the value of that part is null/zero
:param start_time: Starting time point
:type start_time: `datetime <https://docs.python.org/3/library/
datetime.html#datetime-objects>`_
:param stop_time: Ending time point
:type stop_time: `datetime`
:rtype: string
:raises: RuntimeError (Invalid time delta specification)
For example:
>>> import datetime, pmisc
>>> start_time = datetime.datetime(2014, 1, 1, 1, 10, 1)
>>> stop_time = datetime.datetime(2015, 1, 3, 1, 10, 3)
>>> pmisc.elapsed_time_string(start_time, stop_time)
'1 year, 2 days and 2 seconds' | [
"r",
"Return",
"a",
"formatted",
"string",
"with",
"the",
"elapsed",
"time",
"between",
"two",
"time",
"points",
"."
] | dd2bb32e59eee872f1ef2db2d9921a396ab9f50b | https://github.com/pmacosta/pmisc/blob/dd2bb32e59eee872f1ef2db2d9921a396ab9f50b/pmisc/strings.py#L95-L158 | train |
pmacosta/pmisc | pmisc/strings.py | pcolor | def pcolor(text, color, indent=0):
r"""
Return a string that once printed is colorized.
:param text: Text to colorize
:type text: string
:param color: Color to use, one of :code:`'black'`, :code:`'red'`,
:code:`'green'`, :code:`'yellow'`, :code:`'blue'`,
:code:`'magenta'`, :code:`'cyan'`, :code:`'white'` or
:code:`'none'` (case insensitive)
:type color: string
:param indent: Number of spaces to prefix the output with
:type indent: integer
:rtype: string
:raises:
* RuntimeError (Argument \`color\` is not valid)
* RuntimeError (Argument \`indent\` is not valid)
* RuntimeError (Argument \`text\` is not valid)
* ValueError (Unknown color *[color]*)
"""
esc_dict = {
"black": 30,
"red": 31,
"green": 32,
"yellow": 33,
"blue": 34,
"magenta": 35,
"cyan": 36,
"white": 37,
"none": -1,
}
if not isinstance(text, str):
raise RuntimeError("Argument `text` is not valid")
if not isinstance(color, str):
raise RuntimeError("Argument `color` is not valid")
if not isinstance(indent, int):
raise RuntimeError("Argument `indent` is not valid")
color = color.lower()
if color not in esc_dict:
raise ValueError("Unknown color {color}".format(color=color))
if esc_dict[color] != -1:
return "\033[{color_code}m{indent}{text}\033[0m".format(
color_code=esc_dict[color], indent=" " * indent, text=text
)
return "{indent}{text}".format(indent=" " * indent, text=text) | python | def pcolor(text, color, indent=0):
r"""
Return a string that once printed is colorized.
:param text: Text to colorize
:type text: string
:param color: Color to use, one of :code:`'black'`, :code:`'red'`,
:code:`'green'`, :code:`'yellow'`, :code:`'blue'`,
:code:`'magenta'`, :code:`'cyan'`, :code:`'white'` or
:code:`'none'` (case insensitive)
:type color: string
:param indent: Number of spaces to prefix the output with
:type indent: integer
:rtype: string
:raises:
* RuntimeError (Argument \`color\` is not valid)
* RuntimeError (Argument \`indent\` is not valid)
* RuntimeError (Argument \`text\` is not valid)
* ValueError (Unknown color *[color]*)
"""
esc_dict = {
"black": 30,
"red": 31,
"green": 32,
"yellow": 33,
"blue": 34,
"magenta": 35,
"cyan": 36,
"white": 37,
"none": -1,
}
if not isinstance(text, str):
raise RuntimeError("Argument `text` is not valid")
if not isinstance(color, str):
raise RuntimeError("Argument `color` is not valid")
if not isinstance(indent, int):
raise RuntimeError("Argument `indent` is not valid")
color = color.lower()
if color not in esc_dict:
raise ValueError("Unknown color {color}".format(color=color))
if esc_dict[color] != -1:
return "\033[{color_code}m{indent}{text}\033[0m".format(
color_code=esc_dict[color], indent=" " * indent, text=text
)
return "{indent}{text}".format(indent=" " * indent, text=text) | [
"def",
"pcolor",
"(",
"text",
",",
"color",
",",
"indent",
"=",
"0",
")",
":",
"esc_dict",
"=",
"{",
"\"black\"",
":",
"30",
",",
"\"red\"",
":",
"31",
",",
"\"green\"",
":",
"32",
",",
"\"yellow\"",
":",
"33",
",",
"\"blue\"",
":",
"34",
",",
"\... | r"""
Return a string that once printed is colorized.
:param text: Text to colorize
:type text: string
:param color: Color to use, one of :code:`'black'`, :code:`'red'`,
:code:`'green'`, :code:`'yellow'`, :code:`'blue'`,
:code:`'magenta'`, :code:`'cyan'`, :code:`'white'` or
:code:`'none'` (case insensitive)
:type color: string
:param indent: Number of spaces to prefix the output with
:type indent: integer
:rtype: string
:raises:
* RuntimeError (Argument \`color\` is not valid)
* RuntimeError (Argument \`indent\` is not valid)
* RuntimeError (Argument \`text\` is not valid)
* ValueError (Unknown color *[color]*) | [
"r",
"Return",
"a",
"string",
"that",
"once",
"printed",
"is",
"colorized",
"."
] | dd2bb32e59eee872f1ef2db2d9921a396ab9f50b | https://github.com/pmacosta/pmisc/blob/dd2bb32e59eee872f1ef2db2d9921a396ab9f50b/pmisc/strings.py#L161-L212 | train |
pmacosta/pmisc | pmisc/strings.py | quote_str | def quote_str(obj):
r"""
Add extra quotes to a string.
If the argument is not a string it is returned unmodified.
:param obj: Object
:type obj: any
:rtype: Same as argument
For example:
>>> import pmisc
>>> pmisc.quote_str(5)
5
>>> pmisc.quote_str('Hello!')
'"Hello!"'
>>> pmisc.quote_str('He said "hello!"')
'\'He said "hello!"\''
"""
if not isinstance(obj, str):
return obj
return "'{obj}'".format(obj=obj) if '"' in obj else '"{obj}"'.format(obj=obj) | python | def quote_str(obj):
r"""
Add extra quotes to a string.
If the argument is not a string it is returned unmodified.
:param obj: Object
:type obj: any
:rtype: Same as argument
For example:
>>> import pmisc
>>> pmisc.quote_str(5)
5
>>> pmisc.quote_str('Hello!')
'"Hello!"'
>>> pmisc.quote_str('He said "hello!"')
'\'He said "hello!"\''
"""
if not isinstance(obj, str):
return obj
return "'{obj}'".format(obj=obj) if '"' in obj else '"{obj}"'.format(obj=obj) | [
"def",
"quote_str",
"(",
"obj",
")",
":",
"if",
"not",
"isinstance",
"(",
"obj",
",",
"str",
")",
":",
"return",
"obj",
"return",
"\"'{obj}'\"",
".",
"format",
"(",
"obj",
"=",
"obj",
")",
"if",
"'\"'",
"in",
"obj",
"else",
"'\"{obj}\"'",
".",
"forma... | r"""
Add extra quotes to a string.
If the argument is not a string it is returned unmodified.
:param obj: Object
:type obj: any
:rtype: Same as argument
For example:
>>> import pmisc
>>> pmisc.quote_str(5)
5
>>> pmisc.quote_str('Hello!')
'"Hello!"'
>>> pmisc.quote_str('He said "hello!"')
'\'He said "hello!"\'' | [
"r",
"Add",
"extra",
"quotes",
"to",
"a",
"string",
"."
] | dd2bb32e59eee872f1ef2db2d9921a396ab9f50b | https://github.com/pmacosta/pmisc/blob/dd2bb32e59eee872f1ef2db2d9921a396ab9f50b/pmisc/strings.py#L215-L238 | train |
pmacosta/pmisc | pmisc/strings.py | strframe | def strframe(obj, extended=False):
"""
Return a string with a frame record pretty-formatted.
The record is typically an item in a list generated by `inspect.stack()
<https://docs.python.org/3/library/inspect.html#inspect.stack>`_).
:param obj: Frame record
:type obj: tuple
:param extended: Flag that indicates whether contents of the frame object
are printed (True) or not (False)
:type extended: boolean
:rtype: string
"""
# Stack frame -> (frame object [0], filename [1], line number of current
# line [2], function name [3], list of lines of context from source
# code [4], index of current line within list [5])
fname = normalize_windows_fname(obj[1])
ret = list()
ret.append(pcolor("Frame object ID: {0}".format(hex(id(obj[0]))), "yellow"))
ret.append("File name......: {0}".format(fname))
ret.append("Line number....: {0}".format(obj[2]))
ret.append("Function name..: {0}".format(obj[3]))
ret.append("Context........: {0}".format(obj[4]))
ret.append("Index..........: {0}".format(obj[5]))
if extended:
ret.append("f_back ID......: {0}".format(hex(id(obj[0].f_back))))
ret.append("f_builtins.....: {0}".format(obj[0].f_builtins))
ret.append("f_code.........: {0}".format(obj[0].f_code))
ret.append("f_globals......: {0}".format(obj[0].f_globals))
ret.append("f_lasti........: {0}".format(obj[0].f_lasti))
ret.append("f_lineno.......: {0}".format(obj[0].f_lineno))
ret.append("f_locals.......: {0}".format(obj[0].f_locals))
if hasattr(obj[0], "f_restricted"): # pragma: no cover
ret.append("f_restricted...: {0}".format(obj[0].f_restricted))
ret.append("f_trace........: {0}".format(obj[0].f_trace))
return "\n".join(ret) | python | def strframe(obj, extended=False):
"""
Return a string with a frame record pretty-formatted.
The record is typically an item in a list generated by `inspect.stack()
<https://docs.python.org/3/library/inspect.html#inspect.stack>`_).
:param obj: Frame record
:type obj: tuple
:param extended: Flag that indicates whether contents of the frame object
are printed (True) or not (False)
:type extended: boolean
:rtype: string
"""
# Stack frame -> (frame object [0], filename [1], line number of current
# line [2], function name [3], list of lines of context from source
# code [4], index of current line within list [5])
fname = normalize_windows_fname(obj[1])
ret = list()
ret.append(pcolor("Frame object ID: {0}".format(hex(id(obj[0]))), "yellow"))
ret.append("File name......: {0}".format(fname))
ret.append("Line number....: {0}".format(obj[2]))
ret.append("Function name..: {0}".format(obj[3]))
ret.append("Context........: {0}".format(obj[4]))
ret.append("Index..........: {0}".format(obj[5]))
if extended:
ret.append("f_back ID......: {0}".format(hex(id(obj[0].f_back))))
ret.append("f_builtins.....: {0}".format(obj[0].f_builtins))
ret.append("f_code.........: {0}".format(obj[0].f_code))
ret.append("f_globals......: {0}".format(obj[0].f_globals))
ret.append("f_lasti........: {0}".format(obj[0].f_lasti))
ret.append("f_lineno.......: {0}".format(obj[0].f_lineno))
ret.append("f_locals.......: {0}".format(obj[0].f_locals))
if hasattr(obj[0], "f_restricted"): # pragma: no cover
ret.append("f_restricted...: {0}".format(obj[0].f_restricted))
ret.append("f_trace........: {0}".format(obj[0].f_trace))
return "\n".join(ret) | [
"def",
"strframe",
"(",
"obj",
",",
"extended",
"=",
"False",
")",
":",
"# Stack frame -> (frame object [0], filename [1], line number of current",
"# line [2], function name [3], list of lines of context from source",
"# code [4], index of current line within list [5])",
"fname",
"=",
... | Return a string with a frame record pretty-formatted.
The record is typically an item in a list generated by `inspect.stack()
<https://docs.python.org/3/library/inspect.html#inspect.stack>`_).
:param obj: Frame record
:type obj: tuple
:param extended: Flag that indicates whether contents of the frame object
are printed (True) or not (False)
:type extended: boolean
:rtype: string | [
"Return",
"a",
"string",
"with",
"a",
"frame",
"record",
"pretty",
"-",
"formatted",
"."
] | dd2bb32e59eee872f1ef2db2d9921a396ab9f50b | https://github.com/pmacosta/pmisc/blob/dd2bb32e59eee872f1ef2db2d9921a396ab9f50b/pmisc/strings.py#L241-L279 | train |
limix/optimix | optimix/_variables.py | Variables.set | def set(self, x):
"""
Set variable values via a dictionary mapping name to value.
"""
for name, value in iter(x.items()):
if hasattr(value, "ndim"):
if self[name].value.ndim < value.ndim:
self[name].value.itemset(value.squeeze())
else:
self[name].value = value
else:
self[name].value.itemset(value) | python | def set(self, x):
"""
Set variable values via a dictionary mapping name to value.
"""
for name, value in iter(x.items()):
if hasattr(value, "ndim"):
if self[name].value.ndim < value.ndim:
self[name].value.itemset(value.squeeze())
else:
self[name].value = value
else:
self[name].value.itemset(value) | [
"def",
"set",
"(",
"self",
",",
"x",
")",
":",
"for",
"name",
",",
"value",
"in",
"iter",
"(",
"x",
".",
"items",
"(",
")",
")",
":",
"if",
"hasattr",
"(",
"value",
",",
"\"ndim\"",
")",
":",
"if",
"self",
"[",
"name",
"]",
".",
"value",
".",... | Set variable values via a dictionary mapping name to value. | [
"Set",
"variable",
"values",
"via",
"a",
"dictionary",
"mapping",
"name",
"to",
"value",
"."
] | d7b1356df259c9f6ee0d658258fb47d0074fc416 | https://github.com/limix/optimix/blob/d7b1356df259c9f6ee0d658258fb47d0074fc416/optimix/_variables.py#L6-L17 | train |
limix/optimix | optimix/_variables.py | Variables.select | def select(self, fixed):
"""
Return a subset of variables according to ``fixed``.
"""
names = [n for n in self.names() if self[n].isfixed == fixed]
return Variables({n: self[n] for n in names}) | python | def select(self, fixed):
"""
Return a subset of variables according to ``fixed``.
"""
names = [n for n in self.names() if self[n].isfixed == fixed]
return Variables({n: self[n] for n in names}) | [
"def",
"select",
"(",
"self",
",",
"fixed",
")",
":",
"names",
"=",
"[",
"n",
"for",
"n",
"in",
"self",
".",
"names",
"(",
")",
"if",
"self",
"[",
"n",
"]",
".",
"isfixed",
"==",
"fixed",
"]",
"return",
"Variables",
"(",
"{",
"n",
":",
"self",
... | Return a subset of variables according to ``fixed``. | [
"Return",
"a",
"subset",
"of",
"variables",
"according",
"to",
"fixed",
"."
] | d7b1356df259c9f6ee0d658258fb47d0074fc416 | https://github.com/limix/optimix/blob/d7b1356df259c9f6ee0d658258fb47d0074fc416/optimix/_variables.py#L19-L24 | train |
storborg/packagetrack | packagetrack/usps.py | USPSInterface.validate | def validate(self, tracking_number):
"Return True if this is a valid USPS tracking number."
tracking_num = tracking_number[:-1].replace(' ', '')
odd_total = 0
even_total = 0
for ii, digit in enumerate(tracking_num):
if ii % 2:
odd_total += int(digit)
else:
even_total += int(digit)
total = odd_total + even_total * 3
check = ((total - (total % 10) + 10) - total) % 10
return (check == int(tracking_number[-1:])) | python | def validate(self, tracking_number):
"Return True if this is a valid USPS tracking number."
tracking_num = tracking_number[:-1].replace(' ', '')
odd_total = 0
even_total = 0
for ii, digit in enumerate(tracking_num):
if ii % 2:
odd_total += int(digit)
else:
even_total += int(digit)
total = odd_total + even_total * 3
check = ((total - (total % 10) + 10) - total) % 10
return (check == int(tracking_number[-1:])) | [
"def",
"validate",
"(",
"self",
",",
"tracking_number",
")",
":",
"tracking_num",
"=",
"tracking_number",
"[",
":",
"-",
"1",
"]",
".",
"replace",
"(",
"' '",
",",
"''",
")",
"odd_total",
"=",
"0",
"even_total",
"=",
"0",
"for",
"ii",
",",
"digit",
"... | Return True if this is a valid USPS tracking number. | [
"Return",
"True",
"if",
"this",
"is",
"a",
"valid",
"USPS",
"tracking",
"number",
"."
] | e1e5417565b8e2a919936713e1939db5aa895e56 | https://github.com/storborg/packagetrack/blob/e1e5417565b8e2a919936713e1939db5aa895e56/packagetrack/usps.py#L11-L23 | train |
storborg/packagetrack | packagetrack/ups.py | UPSInterface.validate | def validate(self, tracking_number):
"Return True if this is a valid UPS tracking number."
tracking_num = tracking_number[2:-1]
odd_total = 0
even_total = 0
for ii, digit in enumerate(tracking_num.upper()):
try:
value = int(digit)
except ValueError:
value = int((ord(digit) - 63) % 10)
if (ii + 1) % 2:
odd_total += value
else:
even_total += value
total = odd_total + even_total * 2
check = ((total - (total % 10) + 10) - total) % 10
return (check == int(tracking_number[-1:])) | python | def validate(self, tracking_number):
"Return True if this is a valid UPS tracking number."
tracking_num = tracking_number[2:-1]
odd_total = 0
even_total = 0
for ii, digit in enumerate(tracking_num.upper()):
try:
value = int(digit)
except ValueError:
value = int((ord(digit) - 63) % 10)
if (ii + 1) % 2:
odd_total += value
else:
even_total += value
total = odd_total + even_total * 2
check = ((total - (total % 10) + 10) - total) % 10
return (check == int(tracking_number[-1:])) | [
"def",
"validate",
"(",
"self",
",",
"tracking_number",
")",
":",
"tracking_num",
"=",
"tracking_number",
"[",
"2",
":",
"-",
"1",
"]",
"odd_total",
"=",
"0",
"even_total",
"=",
"0",
"for",
"ii",
",",
"digit",
"in",
"enumerate",
"(",
"tracking_num",
".",... | Return True if this is a valid UPS tracking number. | [
"Return",
"True",
"if",
"this",
"is",
"a",
"valid",
"UPS",
"tracking",
"number",
"."
] | e1e5417565b8e2a919936713e1939db5aa895e56 | https://github.com/storborg/packagetrack/blob/e1e5417565b8e2a919936713e1939db5aa895e56/packagetrack/ups.py#L18-L36 | train |
storborg/packagetrack | packagetrack/ups.py | UPSInterface.track | def track(self, tracking_number):
"Track a UPS package by number. Returns just a delivery date."
resp = self.send_request(tracking_number)
return self.parse_response(resp) | python | def track(self, tracking_number):
"Track a UPS package by number. Returns just a delivery date."
resp = self.send_request(tracking_number)
return self.parse_response(resp) | [
"def",
"track",
"(",
"self",
",",
"tracking_number",
")",
":",
"resp",
"=",
"self",
".",
"send_request",
"(",
"tracking_number",
")",
"return",
"self",
".",
"parse_response",
"(",
"resp",
")"
] | Track a UPS package by number. Returns just a delivery date. | [
"Track",
"a",
"UPS",
"package",
"by",
"number",
".",
"Returns",
"just",
"a",
"delivery",
"date",
"."
] | e1e5417565b8e2a919936713e1939db5aa895e56 | https://github.com/storborg/packagetrack/blob/e1e5417565b8e2a919936713e1939db5aa895e56/packagetrack/ups.py#L86-L89 | train |
stephantul/reach | reach/reach.py | Reach.load | def load(pathtovector,
wordlist=(),
num_to_load=None,
truncate_embeddings=None,
unk_word=None,
sep=" "):
r"""
Read a file in word2vec .txt format.
The load function will raise a ValueError when trying to load items
which do not conform to line lengths.
Parameters
----------
pathtovector : string
The path to the vector file.
header : bool
Whether the vector file has a header of the type
(NUMBER OF ITEMS, SIZE OF VECTOR).
wordlist : iterable, optional, default ()
A list of words you want loaded from the vector file. If this is
None (default), all words will be loaded.
num_to_load : int, optional, default None
The number of items to load from the file. Because loading can take
some time, it is sometimes useful to onlyl load the first n items
from a vector file for quick inspection.
truncate_embeddings : int, optional, default None
If this value is not None, the vectors in the vector space will
be truncated to the number of dimensions indicated by this value.
unk_word : object
The object to treat as UNK in your vector space. If this is not
in your items dictionary after loading, we add it with a zero
vector.
Returns
-------
r : Reach
An initialized Reach instance.
"""
vectors, items = Reach._load(pathtovector,
wordlist,
num_to_load,
truncate_embeddings,
sep)
if unk_word is not None:
if unk_word not in set(items):
unk_vec = np.zeros((1, vectors.shape[1]))
vectors = np.concatenate([unk_vec, vectors], 0)
items = [unk_word] + items
unk_index = 0
else:
unk_index = items.index(unk_word)
else:
unk_index = None
return Reach(vectors,
items,
name=os.path.split(pathtovector)[-1],
unk_index=unk_index) | python | def load(pathtovector,
wordlist=(),
num_to_load=None,
truncate_embeddings=None,
unk_word=None,
sep=" "):
r"""
Read a file in word2vec .txt format.
The load function will raise a ValueError when trying to load items
which do not conform to line lengths.
Parameters
----------
pathtovector : string
The path to the vector file.
header : bool
Whether the vector file has a header of the type
(NUMBER OF ITEMS, SIZE OF VECTOR).
wordlist : iterable, optional, default ()
A list of words you want loaded from the vector file. If this is
None (default), all words will be loaded.
num_to_load : int, optional, default None
The number of items to load from the file. Because loading can take
some time, it is sometimes useful to onlyl load the first n items
from a vector file for quick inspection.
truncate_embeddings : int, optional, default None
If this value is not None, the vectors in the vector space will
be truncated to the number of dimensions indicated by this value.
unk_word : object
The object to treat as UNK in your vector space. If this is not
in your items dictionary after loading, we add it with a zero
vector.
Returns
-------
r : Reach
An initialized Reach instance.
"""
vectors, items = Reach._load(pathtovector,
wordlist,
num_to_load,
truncate_embeddings,
sep)
if unk_word is not None:
if unk_word not in set(items):
unk_vec = np.zeros((1, vectors.shape[1]))
vectors = np.concatenate([unk_vec, vectors], 0)
items = [unk_word] + items
unk_index = 0
else:
unk_index = items.index(unk_word)
else:
unk_index = None
return Reach(vectors,
items,
name=os.path.split(pathtovector)[-1],
unk_index=unk_index) | [
"def",
"load",
"(",
"pathtovector",
",",
"wordlist",
"=",
"(",
")",
",",
"num_to_load",
"=",
"None",
",",
"truncate_embeddings",
"=",
"None",
",",
"unk_word",
"=",
"None",
",",
"sep",
"=",
"\" \"",
")",
":",
"vectors",
",",
"items",
"=",
"Reach",
".",
... | r"""
Read a file in word2vec .txt format.
The load function will raise a ValueError when trying to load items
which do not conform to line lengths.
Parameters
----------
pathtovector : string
The path to the vector file.
header : bool
Whether the vector file has a header of the type
(NUMBER OF ITEMS, SIZE OF VECTOR).
wordlist : iterable, optional, default ()
A list of words you want loaded from the vector file. If this is
None (default), all words will be loaded.
num_to_load : int, optional, default None
The number of items to load from the file. Because loading can take
some time, it is sometimes useful to onlyl load the first n items
from a vector file for quick inspection.
truncate_embeddings : int, optional, default None
If this value is not None, the vectors in the vector space will
be truncated to the number of dimensions indicated by this value.
unk_word : object
The object to treat as UNK in your vector space. If this is not
in your items dictionary after loading, we add it with a zero
vector.
Returns
-------
r : Reach
An initialized Reach instance. | [
"r",
"Read",
"a",
"file",
"in",
"word2vec",
".",
"txt",
"format",
"."
] | e5ed0cc895d17429e797c6d7dd57bce82ff00d5d | https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L74-L133 | train |
stephantul/reach | reach/reach.py | Reach._load | def _load(pathtovector,
wordlist,
num_to_load=None,
truncate_embeddings=None,
sep=" "):
"""Load a matrix and wordlist from a .vec file."""
vectors = []
addedwords = set()
words = []
try:
wordlist = set(wordlist)
except ValueError:
wordlist = set()
logger.info("Loading {0}".format(pathtovector))
firstline = open(pathtovector).readline().strip()
try:
num, size = firstline.split(sep)
num, size = int(num), int(size)
logger.info("Vector space: {} by {}".format(num, size))
header = True
except ValueError:
size = len(firstline.split(sep)) - 1
logger.info("Vector space: {} dim, # items unknown".format(size))
word, rest = firstline.split(sep, 1)
# If the first line is correctly parseable, set header to False.
header = False
if truncate_embeddings is None or truncate_embeddings == 0:
truncate_embeddings = size
for idx, line in enumerate(open(pathtovector, encoding='utf-8')):
if header and idx == 0:
continue
word, rest = line.rstrip(" \n").split(sep, 1)
if wordlist and word not in wordlist:
continue
if word in addedwords:
raise ValueError("Duplicate: {} on line {} was in the "
"vector space twice".format(word, idx))
if len(rest.split(sep)) != size:
raise ValueError("Incorrect input at index {}, size "
"is {}, expected "
"{}".format(idx+1,
len(rest.split(sep)), size))
words.append(word)
addedwords.add(word)
vectors.append(np.fromstring(rest, sep=sep)[:truncate_embeddings])
if num_to_load is not None and len(addedwords) >= num_to_load:
break
vectors = np.array(vectors).astype(np.float32)
logger.info("Loading finished")
if wordlist:
diff = wordlist - addedwords
if diff:
logger.info("Not all items from your wordlist were in your "
"vector space: {}.".format(diff))
return vectors, words | python | def _load(pathtovector,
wordlist,
num_to_load=None,
truncate_embeddings=None,
sep=" "):
"""Load a matrix and wordlist from a .vec file."""
vectors = []
addedwords = set()
words = []
try:
wordlist = set(wordlist)
except ValueError:
wordlist = set()
logger.info("Loading {0}".format(pathtovector))
firstline = open(pathtovector).readline().strip()
try:
num, size = firstline.split(sep)
num, size = int(num), int(size)
logger.info("Vector space: {} by {}".format(num, size))
header = True
except ValueError:
size = len(firstline.split(sep)) - 1
logger.info("Vector space: {} dim, # items unknown".format(size))
word, rest = firstline.split(sep, 1)
# If the first line is correctly parseable, set header to False.
header = False
if truncate_embeddings is None or truncate_embeddings == 0:
truncate_embeddings = size
for idx, line in enumerate(open(pathtovector, encoding='utf-8')):
if header and idx == 0:
continue
word, rest = line.rstrip(" \n").split(sep, 1)
if wordlist and word not in wordlist:
continue
if word in addedwords:
raise ValueError("Duplicate: {} on line {} was in the "
"vector space twice".format(word, idx))
if len(rest.split(sep)) != size:
raise ValueError("Incorrect input at index {}, size "
"is {}, expected "
"{}".format(idx+1,
len(rest.split(sep)), size))
words.append(word)
addedwords.add(word)
vectors.append(np.fromstring(rest, sep=sep)[:truncate_embeddings])
if num_to_load is not None and len(addedwords) >= num_to_load:
break
vectors = np.array(vectors).astype(np.float32)
logger.info("Loading finished")
if wordlist:
diff = wordlist - addedwords
if diff:
logger.info("Not all items from your wordlist were in your "
"vector space: {}.".format(diff))
return vectors, words | [
"def",
"_load",
"(",
"pathtovector",
",",
"wordlist",
",",
"num_to_load",
"=",
"None",
",",
"truncate_embeddings",
"=",
"None",
",",
"sep",
"=",
"\" \"",
")",
":",
"vectors",
"=",
"[",
"]",
"addedwords",
"=",
"set",
"(",
")",
"words",
"=",
"[",
"]",
... | Load a matrix and wordlist from a .vec file. | [
"Load",
"a",
"matrix",
"and",
"wordlist",
"from",
"a",
".",
"vec",
"file",
"."
] | e5ed0cc895d17429e797c6d7dd57bce82ff00d5d | https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L136-L205 | train |
stephantul/reach | reach/reach.py | Reach.vectorize | def vectorize(self, tokens, remove_oov=False, norm=False):
"""
Vectorize a sentence by replacing all items with their vectors.
Parameters
----------
tokens : object or list of objects
The tokens to vectorize.
remove_oov : bool, optional, default False
Whether to remove OOV items. If False, OOV items are replaced by
the UNK glyph. If this is True, the returned sequence might
have a different length than the original sequence.
norm : bool, optional, default False
Whether to return the unit vectors, or the regular vectors.
Returns
-------
s : numpy array
An M * N matrix, where every item has been replaced by
its vector. OOV items are either removed, or replaced
by the value of the UNK glyph.
"""
if not tokens:
raise ValueError("You supplied an empty list.")
index = list(self.bow(tokens, remove_oov=remove_oov))
if not index:
raise ValueError("You supplied a list with only OOV tokens: {}, "
"which then got removed. Set remove_oov to False,"
" or filter your sentences to remove any in which"
" all items are OOV.")
if norm:
return np.stack([self.norm_vectors[x] for x in index])
else:
return np.stack([self.vectors[x] for x in index]) | python | def vectorize(self, tokens, remove_oov=False, norm=False):
"""
Vectorize a sentence by replacing all items with their vectors.
Parameters
----------
tokens : object or list of objects
The tokens to vectorize.
remove_oov : bool, optional, default False
Whether to remove OOV items. If False, OOV items are replaced by
the UNK glyph. If this is True, the returned sequence might
have a different length than the original sequence.
norm : bool, optional, default False
Whether to return the unit vectors, or the regular vectors.
Returns
-------
s : numpy array
An M * N matrix, where every item has been replaced by
its vector. OOV items are either removed, or replaced
by the value of the UNK glyph.
"""
if not tokens:
raise ValueError("You supplied an empty list.")
index = list(self.bow(tokens, remove_oov=remove_oov))
if not index:
raise ValueError("You supplied a list with only OOV tokens: {}, "
"which then got removed. Set remove_oov to False,"
" or filter your sentences to remove any in which"
" all items are OOV.")
if norm:
return np.stack([self.norm_vectors[x] for x in index])
else:
return np.stack([self.vectors[x] for x in index]) | [
"def",
"vectorize",
"(",
"self",
",",
"tokens",
",",
"remove_oov",
"=",
"False",
",",
"norm",
"=",
"False",
")",
":",
"if",
"not",
"tokens",
":",
"raise",
"ValueError",
"(",
"\"You supplied an empty list.\"",
")",
"index",
"=",
"list",
"(",
"self",
".",
... | Vectorize a sentence by replacing all items with their vectors.
Parameters
----------
tokens : object or list of objects
The tokens to vectorize.
remove_oov : bool, optional, default False
Whether to remove OOV items. If False, OOV items are replaced by
the UNK glyph. If this is True, the returned sequence might
have a different length than the original sequence.
norm : bool, optional, default False
Whether to return the unit vectors, or the regular vectors.
Returns
-------
s : numpy array
An M * N matrix, where every item has been replaced by
its vector. OOV items are either removed, or replaced
by the value of the UNK glyph. | [
"Vectorize",
"a",
"sentence",
"by",
"replacing",
"all",
"items",
"with",
"their",
"vectors",
"."
] | e5ed0cc895d17429e797c6d7dd57bce82ff00d5d | https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L211-L245 | train |
stephantul/reach | reach/reach.py | Reach.bow | def bow(self, tokens, remove_oov=False):
"""
Create a bow representation of a list of tokens.
Parameters
----------
tokens : list.
The list of items to change into a bag of words representation.
remove_oov : bool.
Whether to remove OOV items from the input.
If this is True, the length of the returned BOW representation
might not be the length of the original representation.
Returns
-------
bow : generator
A BOW representation of the list of items.
"""
if remove_oov:
tokens = [x for x in tokens if x in self.items]
for t in tokens:
try:
yield self.items[t]
except KeyError:
if self.unk_index is None:
raise ValueError("You supplied OOV items but didn't "
"provide the index of the replacement "
"glyph. Either set remove_oov to True, "
"or set unk_index to the index of the "
"item which replaces any OOV items.")
yield self.unk_index | python | def bow(self, tokens, remove_oov=False):
"""
Create a bow representation of a list of tokens.
Parameters
----------
tokens : list.
The list of items to change into a bag of words representation.
remove_oov : bool.
Whether to remove OOV items from the input.
If this is True, the length of the returned BOW representation
might not be the length of the original representation.
Returns
-------
bow : generator
A BOW representation of the list of items.
"""
if remove_oov:
tokens = [x for x in tokens if x in self.items]
for t in tokens:
try:
yield self.items[t]
except KeyError:
if self.unk_index is None:
raise ValueError("You supplied OOV items but didn't "
"provide the index of the replacement "
"glyph. Either set remove_oov to True, "
"or set unk_index to the index of the "
"item which replaces any OOV items.")
yield self.unk_index | [
"def",
"bow",
"(",
"self",
",",
"tokens",
",",
"remove_oov",
"=",
"False",
")",
":",
"if",
"remove_oov",
":",
"tokens",
"=",
"[",
"x",
"for",
"x",
"in",
"tokens",
"if",
"x",
"in",
"self",
".",
"items",
"]",
"for",
"t",
"in",
"tokens",
":",
"try",... | Create a bow representation of a list of tokens.
Parameters
----------
tokens : list.
The list of items to change into a bag of words representation.
remove_oov : bool.
Whether to remove OOV items from the input.
If this is True, the length of the returned BOW representation
might not be the length of the original representation.
Returns
-------
bow : generator
A BOW representation of the list of items. | [
"Create",
"a",
"bow",
"representation",
"of",
"a",
"list",
"of",
"tokens",
"."
] | e5ed0cc895d17429e797c6d7dd57bce82ff00d5d | https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L247-L279 | train |
stephantul/reach | reach/reach.py | Reach.transform | def transform(self, corpus, remove_oov=False, norm=False):
"""
Transform a corpus by repeated calls to vectorize, defined above.
Parameters
----------
corpus : A list of strings, list of list of strings.
Represents a corpus as a list of sentences, where sentences
can either be strings or lists of tokens.
remove_oov : bool, optional, default False
If True, removes OOV items from the input before vectorization.
Returns
-------
c : list
A list of numpy arrays, where each array represents the transformed
sentence in the original list. The list is guaranteed to be the
same length as the input list, but the arrays in the list may be
of different lengths, depending on whether remove_oov is True.
"""
return [self.vectorize(s, remove_oov=remove_oov, norm=norm)
for s in corpus] | python | def transform(self, corpus, remove_oov=False, norm=False):
"""
Transform a corpus by repeated calls to vectorize, defined above.
Parameters
----------
corpus : A list of strings, list of list of strings.
Represents a corpus as a list of sentences, where sentences
can either be strings or lists of tokens.
remove_oov : bool, optional, default False
If True, removes OOV items from the input before vectorization.
Returns
-------
c : list
A list of numpy arrays, where each array represents the transformed
sentence in the original list. The list is guaranteed to be the
same length as the input list, but the arrays in the list may be
of different lengths, depending on whether remove_oov is True.
"""
return [self.vectorize(s, remove_oov=remove_oov, norm=norm)
for s in corpus] | [
"def",
"transform",
"(",
"self",
",",
"corpus",
",",
"remove_oov",
"=",
"False",
",",
"norm",
"=",
"False",
")",
":",
"return",
"[",
"self",
".",
"vectorize",
"(",
"s",
",",
"remove_oov",
"=",
"remove_oov",
",",
"norm",
"=",
"norm",
")",
"for",
"s",
... | Transform a corpus by repeated calls to vectorize, defined above.
Parameters
----------
corpus : A list of strings, list of list of strings.
Represents a corpus as a list of sentences, where sentences
can either be strings or lists of tokens.
remove_oov : bool, optional, default False
If True, removes OOV items from the input before vectorization.
Returns
-------
c : list
A list of numpy arrays, where each array represents the transformed
sentence in the original list. The list is guaranteed to be the
same length as the input list, but the arrays in the list may be
of different lengths, depending on whether remove_oov is True. | [
"Transform",
"a",
"corpus",
"by",
"repeated",
"calls",
"to",
"vectorize",
"defined",
"above",
"."
] | e5ed0cc895d17429e797c6d7dd57bce82ff00d5d | https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L281-L303 | train |
stephantul/reach | reach/reach.py | Reach.most_similar | def most_similar(self,
items,
num=10,
batch_size=100,
show_progressbar=False,
return_names=True):
"""
Return the num most similar items to a given list of items.
Parameters
----------
items : list of objects or a single object.
The items to get the most similar items to.
num : int, optional, default 10
The number of most similar items to retrieve.
batch_size : int, optional, default 100.
The batch size to use. 100 is a good default option. Increasing
the batch size may increase the speed.
show_progressbar : bool, optional, default False
Whether to show a progressbar.
return_names : bool, optional, default True
Whether to return the item names, or just the distances.
Returns
-------
sim : array
For each items in the input the num most similar items are returned
in the form of (NAME, DISTANCE) tuples. If return_names is false,
the returned list just contains distances.
"""
# This line allows users to input single items.
# We used to rely on string identities, but we now also allow
# anything hashable as keys.
# Might fail if a list of passed items is also in the vocabulary.
# but I can't think of cases when this would happen, and what
# user expectations are.
try:
if items in self.items:
items = [items]
except TypeError:
pass
x = np.stack([self.norm_vectors[self.items[x]] for x in items])
result = self._batch(x,
batch_size,
num+1,
show_progressbar,
return_names)
# list call consumes the generator.
return [x[1:] for x in result] | python | def most_similar(self,
items,
num=10,
batch_size=100,
show_progressbar=False,
return_names=True):
"""
Return the num most similar items to a given list of items.
Parameters
----------
items : list of objects or a single object.
The items to get the most similar items to.
num : int, optional, default 10
The number of most similar items to retrieve.
batch_size : int, optional, default 100.
The batch size to use. 100 is a good default option. Increasing
the batch size may increase the speed.
show_progressbar : bool, optional, default False
Whether to show a progressbar.
return_names : bool, optional, default True
Whether to return the item names, or just the distances.
Returns
-------
sim : array
For each items in the input the num most similar items are returned
in the form of (NAME, DISTANCE) tuples. If return_names is false,
the returned list just contains distances.
"""
# This line allows users to input single items.
# We used to rely on string identities, but we now also allow
# anything hashable as keys.
# Might fail if a list of passed items is also in the vocabulary.
# but I can't think of cases when this would happen, and what
# user expectations are.
try:
if items in self.items:
items = [items]
except TypeError:
pass
x = np.stack([self.norm_vectors[self.items[x]] for x in items])
result = self._batch(x,
batch_size,
num+1,
show_progressbar,
return_names)
# list call consumes the generator.
return [x[1:] for x in result] | [
"def",
"most_similar",
"(",
"self",
",",
"items",
",",
"num",
"=",
"10",
",",
"batch_size",
"=",
"100",
",",
"show_progressbar",
"=",
"False",
",",
"return_names",
"=",
"True",
")",
":",
"# This line allows users to input single items.",
"# We used to rely on string... | Return the num most similar items to a given list of items.
Parameters
----------
items : list of objects or a single object.
The items to get the most similar items to.
num : int, optional, default 10
The number of most similar items to retrieve.
batch_size : int, optional, default 100.
The batch size to use. 100 is a good default option. Increasing
the batch size may increase the speed.
show_progressbar : bool, optional, default False
Whether to show a progressbar.
return_names : bool, optional, default True
Whether to return the item names, or just the distances.
Returns
-------
sim : array
For each items in the input the num most similar items are returned
in the form of (NAME, DISTANCE) tuples. If return_names is false,
the returned list just contains distances. | [
"Return",
"the",
"num",
"most",
"similar",
"items",
"to",
"a",
"given",
"list",
"of",
"items",
"."
] | e5ed0cc895d17429e797c6d7dd57bce82ff00d5d | https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L305-L356 | train |
stephantul/reach | reach/reach.py | Reach.threshold | def threshold(self,
items,
threshold=.5,
batch_size=100,
show_progressbar=False,
return_names=True):
"""
Return all items whose similarity is higher than threshold.
Parameters
----------
items : list of objects or a single object.
The items to get the most similar items to.
threshold : float, optional, default .5
The radius within which to retrieve items.
batch_size : int, optional, default 100.
The batch size to use. 100 is a good default option. Increasing
the batch size may increase the speed.
show_progressbar : bool, optional, default False
Whether to show a progressbar.
return_names : bool, optional, default True
Whether to return the item names, or just the distances.
Returns
-------
sim : array
For each items in the input the num most similar items are returned
in the form of (NAME, DISTANCE) tuples. If return_names is false,
the returned list just contains distances.
"""
# This line allows users to input single items.
# We used to rely on string identities, but we now also allow
# anything hashable as keys.
# Might fail if a list of passed items is also in the vocabulary.
# but I can't think of cases when this would happen, and what
# user expectations are.
try:
if items in self.items:
items = [items]
except TypeError:
pass
x = np.stack([self.norm_vectors[self.items[x]] for x in items])
result = self._threshold_batch(x,
batch_size,
threshold,
show_progressbar,
return_names)
# list call consumes the generator.
return [x[1:] for x in result] | python | def threshold(self,
items,
threshold=.5,
batch_size=100,
show_progressbar=False,
return_names=True):
"""
Return all items whose similarity is higher than threshold.
Parameters
----------
items : list of objects or a single object.
The items to get the most similar items to.
threshold : float, optional, default .5
The radius within which to retrieve items.
batch_size : int, optional, default 100.
The batch size to use. 100 is a good default option. Increasing
the batch size may increase the speed.
show_progressbar : bool, optional, default False
Whether to show a progressbar.
return_names : bool, optional, default True
Whether to return the item names, or just the distances.
Returns
-------
sim : array
For each items in the input the num most similar items are returned
in the form of (NAME, DISTANCE) tuples. If return_names is false,
the returned list just contains distances.
"""
# This line allows users to input single items.
# We used to rely on string identities, but we now also allow
# anything hashable as keys.
# Might fail if a list of passed items is also in the vocabulary.
# but I can't think of cases when this would happen, and what
# user expectations are.
try:
if items in self.items:
items = [items]
except TypeError:
pass
x = np.stack([self.norm_vectors[self.items[x]] for x in items])
result = self._threshold_batch(x,
batch_size,
threshold,
show_progressbar,
return_names)
# list call consumes the generator.
return [x[1:] for x in result] | [
"def",
"threshold",
"(",
"self",
",",
"items",
",",
"threshold",
"=",
".5",
",",
"batch_size",
"=",
"100",
",",
"show_progressbar",
"=",
"False",
",",
"return_names",
"=",
"True",
")",
":",
"# This line allows users to input single items.",
"# We used to rely on str... | Return all items whose similarity is higher than threshold.
Parameters
----------
items : list of objects or a single object.
The items to get the most similar items to.
threshold : float, optional, default .5
The radius within which to retrieve items.
batch_size : int, optional, default 100.
The batch size to use. 100 is a good default option. Increasing
the batch size may increase the speed.
show_progressbar : bool, optional, default False
Whether to show a progressbar.
return_names : bool, optional, default True
Whether to return the item names, or just the distances.
Returns
-------
sim : array
For each items in the input the num most similar items are returned
in the form of (NAME, DISTANCE) tuples. If return_names is false,
the returned list just contains distances. | [
"Return",
"all",
"items",
"whose",
"similarity",
"is",
"higher",
"than",
"threshold",
"."
] | e5ed0cc895d17429e797c6d7dd57bce82ff00d5d | https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L358-L409 | train |
stephantul/reach | reach/reach.py | Reach.nearest_neighbor | def nearest_neighbor(self,
vectors,
num=10,
batch_size=100,
show_progressbar=False,
return_names=True):
"""
Find the nearest neighbors to some arbitrary vector.
This function is meant to be used in composition operations. The
most_similar function can only handle items that are in vocab, and
looks up their vector through a dictionary. Compositions, e.g.
"King - man + woman" are necessarily not in the vocabulary.
Parameters
----------
vectors : list of arrays or numpy array
The vectors to find the nearest neighbors to.
num : int, optional, default 10
The number of most similar items to retrieve.
batch_size : int, optional, default 100.
The batch size to use. 100 is a good default option. Increasing
the batch size may increase speed.
show_progressbar : bool, optional, default False
Whether to show a progressbar.
return_names : bool, optional, default True
Whether to return the item names, or just the distances.
Returns
-------
sim : list of tuples.
For each item in the input the num most similar items are returned
in the form of (NAME, DISTANCE) tuples. If return_names is set to
false, only the distances are returned.
"""
vectors = np.array(vectors)
if np.ndim(vectors) == 1:
vectors = vectors[None, :]
result = []
result = self._batch(vectors,
batch_size,
num+1,
show_progressbar,
return_names)
return list(result) | python | def nearest_neighbor(self,
vectors,
num=10,
batch_size=100,
show_progressbar=False,
return_names=True):
"""
Find the nearest neighbors to some arbitrary vector.
This function is meant to be used in composition operations. The
most_similar function can only handle items that are in vocab, and
looks up their vector through a dictionary. Compositions, e.g.
"King - man + woman" are necessarily not in the vocabulary.
Parameters
----------
vectors : list of arrays or numpy array
The vectors to find the nearest neighbors to.
num : int, optional, default 10
The number of most similar items to retrieve.
batch_size : int, optional, default 100.
The batch size to use. 100 is a good default option. Increasing
the batch size may increase speed.
show_progressbar : bool, optional, default False
Whether to show a progressbar.
return_names : bool, optional, default True
Whether to return the item names, or just the distances.
Returns
-------
sim : list of tuples.
For each item in the input the num most similar items are returned
in the form of (NAME, DISTANCE) tuples. If return_names is set to
false, only the distances are returned.
"""
vectors = np.array(vectors)
if np.ndim(vectors) == 1:
vectors = vectors[None, :]
result = []
result = self._batch(vectors,
batch_size,
num+1,
show_progressbar,
return_names)
return list(result) | [
"def",
"nearest_neighbor",
"(",
"self",
",",
"vectors",
",",
"num",
"=",
"10",
",",
"batch_size",
"=",
"100",
",",
"show_progressbar",
"=",
"False",
",",
"return_names",
"=",
"True",
")",
":",
"vectors",
"=",
"np",
".",
"array",
"(",
"vectors",
")",
"i... | Find the nearest neighbors to some arbitrary vector.
This function is meant to be used in composition operations. The
most_similar function can only handle items that are in vocab, and
looks up their vector through a dictionary. Compositions, e.g.
"King - man + woman" are necessarily not in the vocabulary.
Parameters
----------
vectors : list of arrays or numpy array
The vectors to find the nearest neighbors to.
num : int, optional, default 10
The number of most similar items to retrieve.
batch_size : int, optional, default 100.
The batch size to use. 100 is a good default option. Increasing
the batch size may increase speed.
show_progressbar : bool, optional, default False
Whether to show a progressbar.
return_names : bool, optional, default True
Whether to return the item names, or just the distances.
Returns
-------
sim : list of tuples.
For each item in the input the num most similar items are returned
in the form of (NAME, DISTANCE) tuples. If return_names is set to
false, only the distances are returned. | [
"Find",
"the",
"nearest",
"neighbors",
"to",
"some",
"arbitrary",
"vector",
"."
] | e5ed0cc895d17429e797c6d7dd57bce82ff00d5d | https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L411-L459 | train |
stephantul/reach | reach/reach.py | Reach.nearest_neighbor_threshold | def nearest_neighbor_threshold(self,
vectors,
threshold=.5,
batch_size=100,
show_progressbar=False,
return_names=True):
"""
Find the nearest neighbors to some arbitrary vector.
This function is meant to be used in composition operations. The
most_similar function can only handle items that are in vocab, and
looks up their vector through a dictionary. Compositions, e.g.
"King - man + woman" are necessarily not in the vocabulary.
Parameters
----------
vectors : list of arrays or numpy array
The vectors to find the nearest neighbors to.
threshold : float, optional, default .5
The threshold within to retrieve items.
batch_size : int, optional, default 100.
The batch size to use. 100 is a good default option. Increasing
the batch size may increase speed.
show_progressbar : bool, optional, default False
Whether to show a progressbar.
return_names : bool, optional, default True
Whether to return the item names, or just the distances.
Returns
-------
sim : list of tuples.
For each item in the input the num most similar items are returned
in the form of (NAME, DISTANCE) tuples. If return_names is set to
false, only the distances are returned.
"""
vectors = np.array(vectors)
if np.ndim(vectors) == 1:
vectors = vectors[None, :]
result = []
result = self._threshold_batch(vectors,
batch_size,
threshold,
show_progressbar,
return_names)
return list(result) | python | def nearest_neighbor_threshold(self,
vectors,
threshold=.5,
batch_size=100,
show_progressbar=False,
return_names=True):
"""
Find the nearest neighbors to some arbitrary vector.
This function is meant to be used in composition operations. The
most_similar function can only handle items that are in vocab, and
looks up their vector through a dictionary. Compositions, e.g.
"King - man + woman" are necessarily not in the vocabulary.
Parameters
----------
vectors : list of arrays or numpy array
The vectors to find the nearest neighbors to.
threshold : float, optional, default .5
The threshold within to retrieve items.
batch_size : int, optional, default 100.
The batch size to use. 100 is a good default option. Increasing
the batch size may increase speed.
show_progressbar : bool, optional, default False
Whether to show a progressbar.
return_names : bool, optional, default True
Whether to return the item names, or just the distances.
Returns
-------
sim : list of tuples.
For each item in the input the num most similar items are returned
in the form of (NAME, DISTANCE) tuples. If return_names is set to
false, only the distances are returned.
"""
vectors = np.array(vectors)
if np.ndim(vectors) == 1:
vectors = vectors[None, :]
result = []
result = self._threshold_batch(vectors,
batch_size,
threshold,
show_progressbar,
return_names)
return list(result) | [
"def",
"nearest_neighbor_threshold",
"(",
"self",
",",
"vectors",
",",
"threshold",
"=",
".5",
",",
"batch_size",
"=",
"100",
",",
"show_progressbar",
"=",
"False",
",",
"return_names",
"=",
"True",
")",
":",
"vectors",
"=",
"np",
".",
"array",
"(",
"vecto... | Find the nearest neighbors to some arbitrary vector.
This function is meant to be used in composition operations. The
most_similar function can only handle items that are in vocab, and
looks up their vector through a dictionary. Compositions, e.g.
"King - man + woman" are necessarily not in the vocabulary.
Parameters
----------
vectors : list of arrays or numpy array
The vectors to find the nearest neighbors to.
threshold : float, optional, default .5
The threshold within to retrieve items.
batch_size : int, optional, default 100.
The batch size to use. 100 is a good default option. Increasing
the batch size may increase speed.
show_progressbar : bool, optional, default False
Whether to show a progressbar.
return_names : bool, optional, default True
Whether to return the item names, or just the distances.
Returns
-------
sim : list of tuples.
For each item in the input the num most similar items are returned
in the form of (NAME, DISTANCE) tuples. If return_names is set to
false, only the distances are returned. | [
"Find",
"the",
"nearest",
"neighbors",
"to",
"some",
"arbitrary",
"vector",
"."
] | e5ed0cc895d17429e797c6d7dd57bce82ff00d5d | https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L461-L509 | train |
stephantul/reach | reach/reach.py | Reach._threshold_batch | def _threshold_batch(self,
vectors,
batch_size,
threshold,
show_progressbar,
return_names):
"""Batched cosine distance."""
vectors = self.normalize(vectors)
# Single transpose, makes things faster.
reference_transposed = self.norm_vectors.T
for i in tqdm(range(0, len(vectors), batch_size),
disable=not show_progressbar):
distances = vectors[i: i+batch_size].dot(reference_transposed)
# For safety we clip
distances = np.clip(distances, a_min=.0, a_max=1.0)
for lidx, dists in enumerate(distances):
indices = np.flatnonzero(dists >= threshold)
sorted_indices = indices[np.argsort(-dists[indices])]
if return_names:
yield [(self.indices[d], dists[d])
for d in sorted_indices]
else:
yield list(dists[sorted_indices]) | python | def _threshold_batch(self,
vectors,
batch_size,
threshold,
show_progressbar,
return_names):
"""Batched cosine distance."""
vectors = self.normalize(vectors)
# Single transpose, makes things faster.
reference_transposed = self.norm_vectors.T
for i in tqdm(range(0, len(vectors), batch_size),
disable=not show_progressbar):
distances = vectors[i: i+batch_size].dot(reference_transposed)
# For safety we clip
distances = np.clip(distances, a_min=.0, a_max=1.0)
for lidx, dists in enumerate(distances):
indices = np.flatnonzero(dists >= threshold)
sorted_indices = indices[np.argsort(-dists[indices])]
if return_names:
yield [(self.indices[d], dists[d])
for d in sorted_indices]
else:
yield list(dists[sorted_indices]) | [
"def",
"_threshold_batch",
"(",
"self",
",",
"vectors",
",",
"batch_size",
",",
"threshold",
",",
"show_progressbar",
",",
"return_names",
")",
":",
"vectors",
"=",
"self",
".",
"normalize",
"(",
"vectors",
")",
"# Single transpose, makes things faster.",
"reference... | Batched cosine distance. | [
"Batched",
"cosine",
"distance",
"."
] | e5ed0cc895d17429e797c6d7dd57bce82ff00d5d | https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L511-L536 | train |
stephantul/reach | reach/reach.py | Reach._batch | def _batch(self,
vectors,
batch_size,
num,
show_progressbar,
return_names):
"""Batched cosine distance."""
vectors = self.normalize(vectors)
# Single transpose, makes things faster.
reference_transposed = self.norm_vectors.T
for i in tqdm(range(0, len(vectors), batch_size),
disable=not show_progressbar):
distances = vectors[i: i+batch_size].dot(reference_transposed)
# For safety we clip
distances = np.clip(distances, a_min=.0, a_max=1.0)
if num == 1:
sorted_indices = np.argmax(distances, 1)[:, None]
else:
sorted_indices = np.argpartition(-distances, kth=num, axis=1)
sorted_indices = sorted_indices[:, :num]
for lidx, indices in enumerate(sorted_indices):
dists = distances[lidx, indices]
if return_names:
dindex = np.argsort(-dists)
yield [(self.indices[indices[d]], dists[d])
for d in dindex]
else:
yield list(-1 * np.sort(-dists)) | python | def _batch(self,
vectors,
batch_size,
num,
show_progressbar,
return_names):
"""Batched cosine distance."""
vectors = self.normalize(vectors)
# Single transpose, makes things faster.
reference_transposed = self.norm_vectors.T
for i in tqdm(range(0, len(vectors), batch_size),
disable=not show_progressbar):
distances = vectors[i: i+batch_size].dot(reference_transposed)
# For safety we clip
distances = np.clip(distances, a_min=.0, a_max=1.0)
if num == 1:
sorted_indices = np.argmax(distances, 1)[:, None]
else:
sorted_indices = np.argpartition(-distances, kth=num, axis=1)
sorted_indices = sorted_indices[:, :num]
for lidx, indices in enumerate(sorted_indices):
dists = distances[lidx, indices]
if return_names:
dindex = np.argsort(-dists)
yield [(self.indices[indices[d]], dists[d])
for d in dindex]
else:
yield list(-1 * np.sort(-dists)) | [
"def",
"_batch",
"(",
"self",
",",
"vectors",
",",
"batch_size",
",",
"num",
",",
"show_progressbar",
",",
"return_names",
")",
":",
"vectors",
"=",
"self",
".",
"normalize",
"(",
"vectors",
")",
"# Single transpose, makes things faster.",
"reference_transposed",
... | Batched cosine distance. | [
"Batched",
"cosine",
"distance",
"."
] | e5ed0cc895d17429e797c6d7dd57bce82ff00d5d | https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L538-L568 | train |
stephantul/reach | reach/reach.py | Reach.normalize | def normalize(vectors):
"""
Normalize a matrix of row vectors to unit length.
Contains a shortcut if there are no zero vectors in the matrix.
If there are zero vectors, we do some indexing tricks to avoid
dividing by 0.
Parameters
----------
vectors : np.array
The vectors to normalize.
Returns
-------
vectors : np.array
The input vectors, normalized to unit length.
"""
if np.ndim(vectors) == 1:
norm = np.linalg.norm(vectors)
if norm == 0:
return np.zeros_like(vectors)
return vectors / norm
norm = np.linalg.norm(vectors, axis=1)
if np.any(norm == 0):
nonzero = norm > 0
result = np.zeros_like(vectors)
n = norm[nonzero]
p = vectors[nonzero]
result[nonzero] = p / n[:, None]
return result
else:
return vectors / norm[:, None] | python | def normalize(vectors):
"""
Normalize a matrix of row vectors to unit length.
Contains a shortcut if there are no zero vectors in the matrix.
If there are zero vectors, we do some indexing tricks to avoid
dividing by 0.
Parameters
----------
vectors : np.array
The vectors to normalize.
Returns
-------
vectors : np.array
The input vectors, normalized to unit length.
"""
if np.ndim(vectors) == 1:
norm = np.linalg.norm(vectors)
if norm == 0:
return np.zeros_like(vectors)
return vectors / norm
norm = np.linalg.norm(vectors, axis=1)
if np.any(norm == 0):
nonzero = norm > 0
result = np.zeros_like(vectors)
n = norm[nonzero]
p = vectors[nonzero]
result[nonzero] = p / n[:, None]
return result
else:
return vectors / norm[:, None] | [
"def",
"normalize",
"(",
"vectors",
")",
":",
"if",
"np",
".",
"ndim",
"(",
"vectors",
")",
"==",
"1",
":",
"norm",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"vectors",
")",
"if",
"norm",
"==",
"0",
":",
"return",
"np",
".",
"zeros_like",
"(",
... | Normalize a matrix of row vectors to unit length.
Contains a shortcut if there are no zero vectors in the matrix.
If there are zero vectors, we do some indexing tricks to avoid
dividing by 0.
Parameters
----------
vectors : np.array
The vectors to normalize.
Returns
-------
vectors : np.array
The input vectors, normalized to unit length. | [
"Normalize",
"a",
"matrix",
"of",
"row",
"vectors",
"to",
"unit",
"length",
"."
] | e5ed0cc895d17429e797c6d7dd57bce82ff00d5d | https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L571-L610 | train |
stephantul/reach | reach/reach.py | Reach.vector_similarity | def vector_similarity(self, vector, items):
"""Compute the similarity between a vector and a set of items."""
vector = self.normalize(vector)
items_vec = np.stack([self.norm_vectors[self.items[x]] for x in items])
return vector.dot(items_vec.T) | python | def vector_similarity(self, vector, items):
"""Compute the similarity between a vector and a set of items."""
vector = self.normalize(vector)
items_vec = np.stack([self.norm_vectors[self.items[x]] for x in items])
return vector.dot(items_vec.T) | [
"def",
"vector_similarity",
"(",
"self",
",",
"vector",
",",
"items",
")",
":",
"vector",
"=",
"self",
".",
"normalize",
"(",
"vector",
")",
"items_vec",
"=",
"np",
".",
"stack",
"(",
"[",
"self",
".",
"norm_vectors",
"[",
"self",
".",
"items",
"[",
... | Compute the similarity between a vector and a set of items. | [
"Compute",
"the",
"similarity",
"between",
"a",
"vector",
"and",
"a",
"set",
"of",
"items",
"."
] | e5ed0cc895d17429e797c6d7dd57bce82ff00d5d | https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L612-L616 | train |
stephantul/reach | reach/reach.py | Reach.similarity | def similarity(self, i1, i2):
"""
Compute the similarity between two sets of items.
Parameters
----------
i1 : object
The first set of items.
i2 : object
The second set of item.
Returns
-------
sim : array of floats
An array of similarity scores between 1 and 0.
"""
try:
if i1 in self.items:
i1 = [i1]
except TypeError:
pass
try:
if i2 in self.items:
i2 = [i2]
except TypeError:
pass
i1_vec = np.stack([self.norm_vectors[self.items[x]] for x in i1])
i2_vec = np.stack([self.norm_vectors[self.items[x]] for x in i2])
return i1_vec.dot(i2_vec.T) | python | def similarity(self, i1, i2):
"""
Compute the similarity between two sets of items.
Parameters
----------
i1 : object
The first set of items.
i2 : object
The second set of item.
Returns
-------
sim : array of floats
An array of similarity scores between 1 and 0.
"""
try:
if i1 in self.items:
i1 = [i1]
except TypeError:
pass
try:
if i2 in self.items:
i2 = [i2]
except TypeError:
pass
i1_vec = np.stack([self.norm_vectors[self.items[x]] for x in i1])
i2_vec = np.stack([self.norm_vectors[self.items[x]] for x in i2])
return i1_vec.dot(i2_vec.T) | [
"def",
"similarity",
"(",
"self",
",",
"i1",
",",
"i2",
")",
":",
"try",
":",
"if",
"i1",
"in",
"self",
".",
"items",
":",
"i1",
"=",
"[",
"i1",
"]",
"except",
"TypeError",
":",
"pass",
"try",
":",
"if",
"i2",
"in",
"self",
".",
"items",
":",
... | Compute the similarity between two sets of items.
Parameters
----------
i1 : object
The first set of items.
i2 : object
The second set of item.
Returns
-------
sim : array of floats
An array of similarity scores between 1 and 0. | [
"Compute",
"the",
"similarity",
"between",
"two",
"sets",
"of",
"items",
"."
] | e5ed0cc895d17429e797c6d7dd57bce82ff00d5d | https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L618-L647 | train |
stephantul/reach | reach/reach.py | Reach.prune | def prune(self, wordlist):
"""
Prune the current reach instance by removing items.
Parameters
----------
wordlist : list of str
A list of words to keep. Note that this wordlist need not include
all words in the Reach instance. Any words which are in the
wordlist, but not in the reach instance are ignored.
"""
# Remove duplicates
wordlist = set(wordlist).intersection(set(self.items.keys()))
indices = [self.items[w] for w in wordlist if w in self.items]
if self.unk_index is not None and self.unk_index not in indices:
raise ValueError("Your unknown item is not in your list of items. "
"Set it to None before pruning, or pass your "
"unknown item.")
self.vectors = self.vectors[indices]
self.norm_vectors = self.norm_vectors[indices]
self.items = {w: idx for idx, w in enumerate(wordlist)}
self.indices = {v: k for k, v in self.items.items()}
if self.unk_index is not None:
self.unk_index = self.items[wordlist[self.unk_index]] | python | def prune(self, wordlist):
"""
Prune the current reach instance by removing items.
Parameters
----------
wordlist : list of str
A list of words to keep. Note that this wordlist need not include
all words in the Reach instance. Any words which are in the
wordlist, but not in the reach instance are ignored.
"""
# Remove duplicates
wordlist = set(wordlist).intersection(set(self.items.keys()))
indices = [self.items[w] for w in wordlist if w in self.items]
if self.unk_index is not None and self.unk_index not in indices:
raise ValueError("Your unknown item is not in your list of items. "
"Set it to None before pruning, or pass your "
"unknown item.")
self.vectors = self.vectors[indices]
self.norm_vectors = self.norm_vectors[indices]
self.items = {w: idx for idx, w in enumerate(wordlist)}
self.indices = {v: k for k, v in self.items.items()}
if self.unk_index is not None:
self.unk_index = self.items[wordlist[self.unk_index]] | [
"def",
"prune",
"(",
"self",
",",
"wordlist",
")",
":",
"# Remove duplicates",
"wordlist",
"=",
"set",
"(",
"wordlist",
")",
".",
"intersection",
"(",
"set",
"(",
"self",
".",
"items",
".",
"keys",
"(",
")",
")",
")",
"indices",
"=",
"[",
"self",
"."... | Prune the current reach instance by removing items.
Parameters
----------
wordlist : list of str
A list of words to keep. Note that this wordlist need not include
all words in the Reach instance. Any words which are in the
wordlist, but not in the reach instance are ignored. | [
"Prune",
"the",
"current",
"reach",
"instance",
"by",
"removing",
"items",
"."
] | e5ed0cc895d17429e797c6d7dd57bce82ff00d5d | https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L649-L673 | train |
stephantul/reach | reach/reach.py | Reach.save | def save(self, path, write_header=True):
"""
Save the current vector space in word2vec format.
Parameters
----------
path : str
The path to save the vector file to.
write_header : bool, optional, default True
Whether to write a word2vec-style header as the first line of the
file
"""
with open(path, 'w') as f:
if write_header:
f.write(u"{0} {1}\n".format(str(self.vectors.shape[0]),
str(self.vectors.shape[1])))
for i in range(len(self.items)):
w = self.indices[i]
vec = self.vectors[i]
f.write(u"{0} {1}\n".format(w,
" ".join([str(x) for x in vec]))) | python | def save(self, path, write_header=True):
"""
Save the current vector space in word2vec format.
Parameters
----------
path : str
The path to save the vector file to.
write_header : bool, optional, default True
Whether to write a word2vec-style header as the first line of the
file
"""
with open(path, 'w') as f:
if write_header:
f.write(u"{0} {1}\n".format(str(self.vectors.shape[0]),
str(self.vectors.shape[1])))
for i in range(len(self.items)):
w = self.indices[i]
vec = self.vectors[i]
f.write(u"{0} {1}\n".format(w,
" ".join([str(x) for x in vec]))) | [
"def",
"save",
"(",
"self",
",",
"path",
",",
"write_header",
"=",
"True",
")",
":",
"with",
"open",
"(",
"path",
",",
"'w'",
")",
"as",
"f",
":",
"if",
"write_header",
":",
"f",
".",
"write",
"(",
"u\"{0} {1}\\n\"",
".",
"format",
"(",
"str",
"(",... | Save the current vector space in word2vec format.
Parameters
----------
path : str
The path to save the vector file to.
write_header : bool, optional, default True
Whether to write a word2vec-style header as the first line of the
file | [
"Save",
"the",
"current",
"vector",
"space",
"in",
"word2vec",
"format",
"."
] | e5ed0cc895d17429e797c6d7dd57bce82ff00d5d | https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L675-L700 | train |
stephantul/reach | reach/reach.py | Reach.save_fast_format | def save_fast_format(self, filename):
"""
Save a reach instance in a fast format.
The reach fast format stores the words and vectors of a Reach instance
separately in a JSON and numpy format, respectively.
Parameters
----------
filename : str
The prefix to add to the saved filename. Note that this is not the
real filename under which these items are stored.
The words and unk_index are stored under "{filename}_words.json",
and the numpy matrix is saved under "{filename}_vectors.npy".
"""
items, _ = zip(*sorted(self.items.items(), key=lambda x: x[1]))
items = {"items": items,
"unk_index": self.unk_index,
"name": self.name}
json.dump(items, open("{}_items.json".format(filename), 'w'))
np.save(open("{}_vectors.npy".format(filename), 'wb'), self.vectors) | python | def save_fast_format(self, filename):
"""
Save a reach instance in a fast format.
The reach fast format stores the words and vectors of a Reach instance
separately in a JSON and numpy format, respectively.
Parameters
----------
filename : str
The prefix to add to the saved filename. Note that this is not the
real filename under which these items are stored.
The words and unk_index are stored under "{filename}_words.json",
and the numpy matrix is saved under "{filename}_vectors.npy".
"""
items, _ = zip(*sorted(self.items.items(), key=lambda x: x[1]))
items = {"items": items,
"unk_index": self.unk_index,
"name": self.name}
json.dump(items, open("{}_items.json".format(filename), 'w'))
np.save(open("{}_vectors.npy".format(filename), 'wb'), self.vectors) | [
"def",
"save_fast_format",
"(",
"self",
",",
"filename",
")",
":",
"items",
",",
"_",
"=",
"zip",
"(",
"*",
"sorted",
"(",
"self",
".",
"items",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
")",
")",
"items",... | Save a reach instance in a fast format.
The reach fast format stores the words and vectors of a Reach instance
separately in a JSON and numpy format, respectively.
Parameters
----------
filename : str
The prefix to add to the saved filename. Note that this is not the
real filename under which these items are stored.
The words and unk_index are stored under "{filename}_words.json",
and the numpy matrix is saved under "{filename}_vectors.npy". | [
"Save",
"a",
"reach",
"instance",
"in",
"a",
"fast",
"format",
"."
] | e5ed0cc895d17429e797c6d7dd57bce82ff00d5d | https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L702-L724 | train |
stephantul/reach | reach/reach.py | Reach.load_fast_format | def load_fast_format(filename):
"""
Load a reach instance in fast format.
As described above, the fast format stores the words and vectors of the
Reach instance separately, and is drastically faster than loading from
.txt files.
Parameters
----------
filename : str
The filename prefix from which to load. Note that this is not a
real filepath as such, but a shared prefix for both files.
In order for this to work, both {filename}_words.json and
{filename}_vectors.npy should be present.
"""
words, unk_index, name, vectors = Reach._load_fast(filename)
return Reach(vectors, words, unk_index=unk_index, name=name) | python | def load_fast_format(filename):
"""
Load a reach instance in fast format.
As described above, the fast format stores the words and vectors of the
Reach instance separately, and is drastically faster than loading from
.txt files.
Parameters
----------
filename : str
The filename prefix from which to load. Note that this is not a
real filepath as such, but a shared prefix for both files.
In order for this to work, both {filename}_words.json and
{filename}_vectors.npy should be present.
"""
words, unk_index, name, vectors = Reach._load_fast(filename)
return Reach(vectors, words, unk_index=unk_index, name=name) | [
"def",
"load_fast_format",
"(",
"filename",
")",
":",
"words",
",",
"unk_index",
",",
"name",
",",
"vectors",
"=",
"Reach",
".",
"_load_fast",
"(",
"filename",
")",
"return",
"Reach",
"(",
"vectors",
",",
"words",
",",
"unk_index",
"=",
"unk_index",
",",
... | Load a reach instance in fast format.
As described above, the fast format stores the words and vectors of the
Reach instance separately, and is drastically faster than loading from
.txt files.
Parameters
----------
filename : str
The filename prefix from which to load. Note that this is not a
real filepath as such, but a shared prefix for both files.
In order for this to work, both {filename}_words.json and
{filename}_vectors.npy should be present. | [
"Load",
"a",
"reach",
"instance",
"in",
"fast",
"format",
"."
] | e5ed0cc895d17429e797c6d7dd57bce82ff00d5d | https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L727-L745 | train |
stephantul/reach | reach/reach.py | Reach._load_fast | def _load_fast(filename):
"""Sub for fast loader."""
it = json.load(open("{}_items.json".format(filename)))
words, unk_index, name = it["items"], it["unk_index"], it["name"]
vectors = np.load(open("{}_vectors.npy".format(filename), 'rb'))
return words, unk_index, name, vectors | python | def _load_fast(filename):
"""Sub for fast loader."""
it = json.load(open("{}_items.json".format(filename)))
words, unk_index, name = it["items"], it["unk_index"], it["name"]
vectors = np.load(open("{}_vectors.npy".format(filename), 'rb'))
return words, unk_index, name, vectors | [
"def",
"_load_fast",
"(",
"filename",
")",
":",
"it",
"=",
"json",
".",
"load",
"(",
"open",
"(",
"\"{}_items.json\"",
".",
"format",
"(",
"filename",
")",
")",
")",
"words",
",",
"unk_index",
",",
"name",
"=",
"it",
"[",
"\"items\"",
"]",
",",
"it",... | Sub for fast loader. | [
"Sub",
"for",
"fast",
"loader",
"."
] | e5ed0cc895d17429e797c6d7dd57bce82ff00d5d | https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L748-L753 | train |
bjodah/finitediff | examples/sine.py | demo_usage | def demo_usage(n_data=50, n_fit=537, nhead=5, ntail=5, plot=False, alt=0):
"""
Plots a noisy sine curve and the fitting to it.
Also presents the error and the error in the
approximation of its first derivative (cosine curve)
Usage example for benchmarking:
$ time python sine.py --nhead 3 --ntail 3 --n-fit 500000 --n-data 50000
Usage example for plotting:
$ python sine.py --nhead 1 --ntail 1 --plot
"""
x0, xend = 0, 5
# shaky linspace -5% to +5% noise
x_data = (np.linspace(x0, xend, n_data) +
np.random.rand(n_data)*(xend-x0)/n_data/1.5)
y_data = np.sin(x_data) * (1.0+0.1*(np.random.rand(n_data)-0.5))
x_fit = np.linspace(x0, xend, n_fit)
# Edges behave badly, work around:
x_fit[0] = x_fit[0] + (x_fit[1]-x_fit[0])/2
x_fit[-1] = x_fit[-2]+(x_fit[-1]-x_fit[-2])/2
if alt:
y_fit = np.empty(n_fit)
dydx_fit = np.empty(n_fit)
for i, xf in enumerate(x_fit):
# get index j of first data point beyond xf
j = np.where(x_data > xf)[0][0]
lower_bound = max(0, j-alt)
upper_bound = min(n_data-1, j+alt)
y_fit[i] = derivatives_at_point_by_finite_diff(
x_data[lower_bound:upper_bound],
y_data[lower_bound:upper_bound], xf, 0)
dydx_fit[i] = derivatives_at_point_by_finite_diff(
x_data[lower_bound:upper_bound],
y_data[lower_bound:upper_bound], xf, 1)[1]
else:
interp = interpolate_by_finite_diff(x_data, y_data, x_fit,
1, nhead, ntail)
y_fit = interp[:, 0]
dydx_fit = interp[:, 1]
if plot:
import matplotlib.pyplot as plt
plt.subplot(221)
plt.plot(x_data, y_data, 'x', label='Data points (sin)')
plt.plot(x_fit, y_fit, '-', label='Fitted curve (order=0)')
plt.plot(x_data, np.sin(x_data), '-', label='Analytic sin(x)')
plt.legend()
plt.subplot(222)
plt.plot(x_fit, y_fit-np.sin(x_fit), label='Error in order=0')
plt.legend()
plt.subplot(223)
plt.plot(x_fit, dydx_fit, '-', label='Fitted derivative (order=1)')
plt.plot(x_data, np.cos(x_data), '-', label='Analytic cos(x)')
plt.legend()
plt.subplot(224)
plt.plot(x_fit, dydx_fit-np.cos(x_fit), label='Error in order=1')
plt.legend()
plt.show() | python | def demo_usage(n_data=50, n_fit=537, nhead=5, ntail=5, plot=False, alt=0):
"""
Plots a noisy sine curve and the fitting to it.
Also presents the error and the error in the
approximation of its first derivative (cosine curve)
Usage example for benchmarking:
$ time python sine.py --nhead 3 --ntail 3 --n-fit 500000 --n-data 50000
Usage example for plotting:
$ python sine.py --nhead 1 --ntail 1 --plot
"""
x0, xend = 0, 5
# shaky linspace -5% to +5% noise
x_data = (np.linspace(x0, xend, n_data) +
np.random.rand(n_data)*(xend-x0)/n_data/1.5)
y_data = np.sin(x_data) * (1.0+0.1*(np.random.rand(n_data)-0.5))
x_fit = np.linspace(x0, xend, n_fit)
# Edges behave badly, work around:
x_fit[0] = x_fit[0] + (x_fit[1]-x_fit[0])/2
x_fit[-1] = x_fit[-2]+(x_fit[-1]-x_fit[-2])/2
if alt:
y_fit = np.empty(n_fit)
dydx_fit = np.empty(n_fit)
for i, xf in enumerate(x_fit):
# get index j of first data point beyond xf
j = np.where(x_data > xf)[0][0]
lower_bound = max(0, j-alt)
upper_bound = min(n_data-1, j+alt)
y_fit[i] = derivatives_at_point_by_finite_diff(
x_data[lower_bound:upper_bound],
y_data[lower_bound:upper_bound], xf, 0)
dydx_fit[i] = derivatives_at_point_by_finite_diff(
x_data[lower_bound:upper_bound],
y_data[lower_bound:upper_bound], xf, 1)[1]
else:
interp = interpolate_by_finite_diff(x_data, y_data, x_fit,
1, nhead, ntail)
y_fit = interp[:, 0]
dydx_fit = interp[:, 1]
if plot:
import matplotlib.pyplot as plt
plt.subplot(221)
plt.plot(x_data, y_data, 'x', label='Data points (sin)')
plt.plot(x_fit, y_fit, '-', label='Fitted curve (order=0)')
plt.plot(x_data, np.sin(x_data), '-', label='Analytic sin(x)')
plt.legend()
plt.subplot(222)
plt.plot(x_fit, y_fit-np.sin(x_fit), label='Error in order=0')
plt.legend()
plt.subplot(223)
plt.plot(x_fit, dydx_fit, '-', label='Fitted derivative (order=1)')
plt.plot(x_data, np.cos(x_data), '-', label='Analytic cos(x)')
plt.legend()
plt.subplot(224)
plt.plot(x_fit, dydx_fit-np.cos(x_fit), label='Error in order=1')
plt.legend()
plt.show() | [
"def",
"demo_usage",
"(",
"n_data",
"=",
"50",
",",
"n_fit",
"=",
"537",
",",
"nhead",
"=",
"5",
",",
"ntail",
"=",
"5",
",",
"plot",
"=",
"False",
",",
"alt",
"=",
"0",
")",
":",
"x0",
",",
"xend",
"=",
"0",
",",
"5",
"# shaky linspace -5% to +5... | Plots a noisy sine curve and the fitting to it.
Also presents the error and the error in the
approximation of its first derivative (cosine curve)
Usage example for benchmarking:
$ time python sine.py --nhead 3 --ntail 3 --n-fit 500000 --n-data 50000
Usage example for plotting:
$ python sine.py --nhead 1 --ntail 1 --plot | [
"Plots",
"a",
"noisy",
"sine",
"curve",
"and",
"the",
"fitting",
"to",
"it",
".",
"Also",
"presents",
"the",
"error",
"and",
"the",
"error",
"in",
"the",
"approximation",
"of",
"its",
"first",
"derivative",
"(",
"cosine",
"curve",
")"
] | c1b1c6840512d2206e2f97315d9bf1738c1ca3d3 | https://github.com/bjodah/finitediff/blob/c1b1c6840512d2206e2f97315d9bf1738c1ca3d3/examples/sine.py#L13-L83 | train |
bjodah/finitediff | examples/err.py | demo_err | def demo_err():
"""
This demo shows how the error in the estimate varies depending
on how many data points are included in the interpolation
(m parameter in this function).
"""
max_order = 7
n = 20
l = 0.25
fmt1 = '{0: <5s}\t{1: <21s}\t{2: >21s}\t{3: >21s}\t{4: >21s}'
fmt2 = '{0: <5d}\t{1:20.18f}\t{2: >21.18f}\t{3: >21.18f}\t{4: >21.18f}'
x = np.cumsum(np.random.rand(n)*l)
x = np.concatenate((x[::-1]*-1, x))
lst = []
derivs = np.zeros(n)
for order in range(max_order+1):
print('Order', order)
for m in range(1+order//2, n+1):
sub_x = x[n-m:n+m]
derivs[m-1] = derivatives_at_point_by_finite_diff(
sub_x, np.exp(sub_x), 0, order)[order]
print(fmt1.format('m', 'val', 'diff', 'analytical error',
'diff/analytical'))
for m in range(1, n):
print(fmt2.format(
(m+1)*2, derivs[m], derivs[m]-derivs[m-1],
derivs[m]-1, (derivs[m]-derivs[m-1])/(derivs[m]-1)))
lst.append((derivs[-1], abs(derivs[-1]-derivs[-2])))
print(np.array(lst)) | python | def demo_err():
"""
This demo shows how the error in the estimate varies depending
on how many data points are included in the interpolation
(m parameter in this function).
"""
max_order = 7
n = 20
l = 0.25
fmt1 = '{0: <5s}\t{1: <21s}\t{2: >21s}\t{3: >21s}\t{4: >21s}'
fmt2 = '{0: <5d}\t{1:20.18f}\t{2: >21.18f}\t{3: >21.18f}\t{4: >21.18f}'
x = np.cumsum(np.random.rand(n)*l)
x = np.concatenate((x[::-1]*-1, x))
lst = []
derivs = np.zeros(n)
for order in range(max_order+1):
print('Order', order)
for m in range(1+order//2, n+1):
sub_x = x[n-m:n+m]
derivs[m-1] = derivatives_at_point_by_finite_diff(
sub_x, np.exp(sub_x), 0, order)[order]
print(fmt1.format('m', 'val', 'diff', 'analytical error',
'diff/analytical'))
for m in range(1, n):
print(fmt2.format(
(m+1)*2, derivs[m], derivs[m]-derivs[m-1],
derivs[m]-1, (derivs[m]-derivs[m-1])/(derivs[m]-1)))
lst.append((derivs[-1], abs(derivs[-1]-derivs[-2])))
print(np.array(lst)) | [
"def",
"demo_err",
"(",
")",
":",
"max_order",
"=",
"7",
"n",
"=",
"20",
"l",
"=",
"0.25",
"fmt1",
"=",
"'{0: <5s}\\t{1: <21s}\\t{2: >21s}\\t{3: >21s}\\t{4: >21s}'",
"fmt2",
"=",
"'{0: <5d}\\t{1:20.18f}\\t{2: >21.18f}\\t{3: >21.18f}\\t{4: >21.18f}'",
"x",
"=",
"np",
"."... | This demo shows how the error in the estimate varies depending
on how many data points are included in the interpolation
(m parameter in this function). | [
"This",
"demo",
"shows",
"how",
"the",
"error",
"in",
"the",
"estimate",
"varies",
"depending",
"on",
"how",
"many",
"data",
"points",
"are",
"included",
"in",
"the",
"interpolation",
"(",
"m",
"parameter",
"in",
"this",
"function",
")",
"."
] | c1b1c6840512d2206e2f97315d9bf1738c1ca3d3 | https://github.com/bjodah/finitediff/blob/c1b1c6840512d2206e2f97315d9bf1738c1ca3d3/examples/err.py#L9-L37 | train |
LABHR/octohatrack | octohatrack/helpers.py | display_results | def display_results(repo_name, contributors, api_len):
"""
Fancy display.
"""
print("\n")
print("All Contributors:")
# Sort and consolidate on Name
seen = []
for user in sorted(contributors, key=_sort_by_name):
if user.get("name"):
key = user["name"]
else:
key = user["user_name"]
if key not in seen:
seen.append(key)
if key != user["user_name"]:
print("%s (%s)" % (user["name"], user["user_name"]))
else:
print(user["user_name"])
print("")
print("Repo: %s" % repo_name)
print("GitHub Contributors: %s" % api_len)
print("All Contributors: %s 👏" % len(seen)) | python | def display_results(repo_name, contributors, api_len):
"""
Fancy display.
"""
print("\n")
print("All Contributors:")
# Sort and consolidate on Name
seen = []
for user in sorted(contributors, key=_sort_by_name):
if user.get("name"):
key = user["name"]
else:
key = user["user_name"]
if key not in seen:
seen.append(key)
if key != user["user_name"]:
print("%s (%s)" % (user["name"], user["user_name"]))
else:
print(user["user_name"])
print("")
print("Repo: %s" % repo_name)
print("GitHub Contributors: %s" % api_len)
print("All Contributors: %s 👏" % len(seen)) | [
"def",
"display_results",
"(",
"repo_name",
",",
"contributors",
",",
"api_len",
")",
":",
"print",
"(",
"\"\\n\"",
")",
"print",
"(",
"\"All Contributors:\"",
")",
"# Sort and consolidate on Name",
"seen",
"=",
"[",
"]",
"for",
"user",
"in",
"sorted",
"(",
"c... | Fancy display. | [
"Fancy",
"display",
"."
] | bf855a0190518a3b2c45304cbbac00e22086b6da | https://github.com/LABHR/octohatrack/blob/bf855a0190518a3b2c45304cbbac00e22086b6da/octohatrack/helpers.py#L13-L39 | train |
LABHR/octohatrack | octohatrack/api_helpers.py | get_json | def get_json(uri):
"""
Handle headers and json for us :3
"""
response = requests.get(API + uri, headers=HEADERS)
limit = int(response.headers.get("x-ratelimit-remaining"))
if limit == 0:
sys.stdout.write("\n")
message = "You have run out of GitHub request tokens. "
if int(response.headers.get("x-ratelimit-limit")) == 60:
message += "Set a GITHUB_TOKEN to increase your limit to 5000/hour. "
wait_seconds = int(response.headers.get("x-ratelimit-reset")) - int(time.time())
wait_minutes = math.ceil(wait_seconds / 60)
message += "Try again in ~%d minutes. " % wait_minutes
if "--wait-for-reset" in sys.argv:
progress_message(message.replace("Try ", "Trying "))
time.sleep(wait_seconds + 1)
progress_message("Resuming")
return get_json(uri)
else:
raise ValueError(message)
progress()
return response.json() | python | def get_json(uri):
"""
Handle headers and json for us :3
"""
response = requests.get(API + uri, headers=HEADERS)
limit = int(response.headers.get("x-ratelimit-remaining"))
if limit == 0:
sys.stdout.write("\n")
message = "You have run out of GitHub request tokens. "
if int(response.headers.get("x-ratelimit-limit")) == 60:
message += "Set a GITHUB_TOKEN to increase your limit to 5000/hour. "
wait_seconds = int(response.headers.get("x-ratelimit-reset")) - int(time.time())
wait_minutes = math.ceil(wait_seconds / 60)
message += "Try again in ~%d minutes. " % wait_minutes
if "--wait-for-reset" in sys.argv:
progress_message(message.replace("Try ", "Trying "))
time.sleep(wait_seconds + 1)
progress_message("Resuming")
return get_json(uri)
else:
raise ValueError(message)
progress()
return response.json() | [
"def",
"get_json",
"(",
"uri",
")",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"API",
"+",
"uri",
",",
"headers",
"=",
"HEADERS",
")",
"limit",
"=",
"int",
"(",
"response",
".",
"headers",
".",
"get",
"(",
"\"x-ratelimit-remaining\"",
")",
")",... | Handle headers and json for us :3 | [
"Handle",
"headers",
"and",
"json",
"for",
"us",
":",
"3"
] | bf855a0190518a3b2c45304cbbac00e22086b6da | https://github.com/LABHR/octohatrack/blob/bf855a0190518a3b2c45304cbbac00e22086b6da/octohatrack/api_helpers.py#L24-L51 | train |
LABHR/octohatrack | octohatrack/api_helpers.py | api_walk | def api_walk(uri, per_page=100, key="login"):
"""
For a GitHub URI, walk all the pages until there's no more content
"""
page = 1
result = []
while True:
response = get_json(uri + "?page=%d&per_page=%d" % (page, per_page))
if len(response) == 0:
break
else:
page += 1
for r in response:
if key == USER_LOGIN:
result.append(user_login(r))
else:
result.append(r[key])
return list(set(result)) | python | def api_walk(uri, per_page=100, key="login"):
"""
For a GitHub URI, walk all the pages until there's no more content
"""
page = 1
result = []
while True:
response = get_json(uri + "?page=%d&per_page=%d" % (page, per_page))
if len(response) == 0:
break
else:
page += 1
for r in response:
if key == USER_LOGIN:
result.append(user_login(r))
else:
result.append(r[key])
return list(set(result)) | [
"def",
"api_walk",
"(",
"uri",
",",
"per_page",
"=",
"100",
",",
"key",
"=",
"\"login\"",
")",
":",
"page",
"=",
"1",
"result",
"=",
"[",
"]",
"while",
"True",
":",
"response",
"=",
"get_json",
"(",
"uri",
"+",
"\"?page=%d&per_page=%d\"",
"%",
"(",
"... | For a GitHub URI, walk all the pages until there's no more content | [
"For",
"a",
"GitHub",
"URI",
"walk",
"all",
"the",
"pages",
"until",
"there",
"s",
"no",
"more",
"content"
] | bf855a0190518a3b2c45304cbbac00e22086b6da | https://github.com/LABHR/octohatrack/blob/bf855a0190518a3b2c45304cbbac00e22086b6da/octohatrack/api_helpers.py#L54-L73 | train |
LABHR/octohatrack | octohatrack/api_helpers.py | api_get | def api_get(uri, key=None):
"""
Simple API endpoint get, return only the keys we care about
"""
response = get_json(uri)
if response:
if type(response) == list:
r = response[0]
elif type(response) == dict:
r = response
if type(r) == dict:
# Special nested value we care about
if key == USER_LOGIN:
return user_login(r)
if key in r:
return r[key] | python | def api_get(uri, key=None):
"""
Simple API endpoint get, return only the keys we care about
"""
response = get_json(uri)
if response:
if type(response) == list:
r = response[0]
elif type(response) == dict:
r = response
if type(r) == dict:
# Special nested value we care about
if key == USER_LOGIN:
return user_login(r)
if key in r:
return r[key] | [
"def",
"api_get",
"(",
"uri",
",",
"key",
"=",
"None",
")",
":",
"response",
"=",
"get_json",
"(",
"uri",
")",
"if",
"response",
":",
"if",
"type",
"(",
"response",
")",
"==",
"list",
":",
"r",
"=",
"response",
"[",
"0",
"]",
"elif",
"type",
"(",... | Simple API endpoint get, return only the keys we care about | [
"Simple",
"API",
"endpoint",
"get",
"return",
"only",
"the",
"keys",
"we",
"care",
"about"
] | bf855a0190518a3b2c45304cbbac00e22086b6da | https://github.com/LABHR/octohatrack/blob/bf855a0190518a3b2c45304cbbac00e22086b6da/octohatrack/api_helpers.py#L86-L103 | train |
LABHR/octohatrack | octohatrack_graphql.py | reducejson | def reducejson(j):
"""
Not sure if there's a better way to walk the ... interesting result
"""
authors = []
for key in j["data"]["repository"]["commitComments"]["edges"]:
authors.append(key["node"]["author"])
for key in j["data"]["repository"]["issues"]["nodes"]:
authors.append(key["author"])
for c in key["comments"]["nodes"]:
authors.append(c["author"])
for key in j["data"]["repository"]["pullRequests"]["edges"]:
authors.append(key["node"]["author"])
for c in key["node"]["comments"]["nodes"]:
authors.append(c["author"])
unique = list({v['login']:v for v in authors if v is not None}.values())
return unique | python | def reducejson(j):
"""
Not sure if there's a better way to walk the ... interesting result
"""
authors = []
for key in j["data"]["repository"]["commitComments"]["edges"]:
authors.append(key["node"]["author"])
for key in j["data"]["repository"]["issues"]["nodes"]:
authors.append(key["author"])
for c in key["comments"]["nodes"]:
authors.append(c["author"])
for key in j["data"]["repository"]["pullRequests"]["edges"]:
authors.append(key["node"]["author"])
for c in key["node"]["comments"]["nodes"]:
authors.append(c["author"])
unique = list({v['login']:v for v in authors if v is not None}.values())
return unique | [
"def",
"reducejson",
"(",
"j",
")",
":",
"authors",
"=",
"[",
"]",
"for",
"key",
"in",
"j",
"[",
"\"data\"",
"]",
"[",
"\"repository\"",
"]",
"[",
"\"commitComments\"",
"]",
"[",
"\"edges\"",
"]",
":",
"authors",
".",
"append",
"(",
"key",
"[",
"\"no... | Not sure if there's a better way to walk the ... interesting result | [
"Not",
"sure",
"if",
"there",
"s",
"a",
"better",
"way",
"to",
"walk",
"the",
"...",
"interesting",
"result"
] | bf855a0190518a3b2c45304cbbac00e22086b6da | https://github.com/LABHR/octohatrack/blob/bf855a0190518a3b2c45304cbbac00e22086b6da/octohatrack_graphql.py#L52-L73 | train |
oconnor663/duct.py | duct.py | stringify_with_dot_if_path | def stringify_with_dot_if_path(x):
'''Pathlib never renders a leading './' in front of a local path. That's an
issue because on POSIX subprocess.py (like bash) won't execute scripts in
the current directory without it. In the same vein, we also don't want
Path('echo') to match '/usr/bin/echo' from the $PATH. To work around both
issues, we explicitly join a leading dot to any relative pathlib path.'''
if isinstance(x, PurePath):
# Note that join does nothing if the path is absolute.
return os.path.join('.', str(x))
return x | python | def stringify_with_dot_if_path(x):
'''Pathlib never renders a leading './' in front of a local path. That's an
issue because on POSIX subprocess.py (like bash) won't execute scripts in
the current directory without it. In the same vein, we also don't want
Path('echo') to match '/usr/bin/echo' from the $PATH. To work around both
issues, we explicitly join a leading dot to any relative pathlib path.'''
if isinstance(x, PurePath):
# Note that join does nothing if the path is absolute.
return os.path.join('.', str(x))
return x | [
"def",
"stringify_with_dot_if_path",
"(",
"x",
")",
":",
"if",
"isinstance",
"(",
"x",
",",
"PurePath",
")",
":",
"# Note that join does nothing if the path is absolute.",
"return",
"os",
".",
"path",
".",
"join",
"(",
"'.'",
",",
"str",
"(",
"x",
")",
")",
... | Pathlib never renders a leading './' in front of a local path. That's an
issue because on POSIX subprocess.py (like bash) won't execute scripts in
the current directory without it. In the same vein, we also don't want
Path('echo') to match '/usr/bin/echo' from the $PATH. To work around both
issues, we explicitly join a leading dot to any relative pathlib path. | [
"Pathlib",
"never",
"renders",
"a",
"leading",
".",
"/",
"in",
"front",
"of",
"a",
"local",
"path",
".",
"That",
"s",
"an",
"issue",
"because",
"on",
"POSIX",
"subprocess",
".",
"py",
"(",
"like",
"bash",
")",
"won",
"t",
"execute",
"scripts",
"in",
... | f10f1e9093a2913281294bb89a6e1744aa700e73 | https://github.com/oconnor663/duct.py/blob/f10f1e9093a2913281294bb89a6e1744aa700e73/duct.py#L616-L625 | train |
oconnor663/duct.py | duct.py | maybe_canonicalize_exe_path | def maybe_canonicalize_exe_path(exe_name, iocontext):
'''There's a tricky interaction between exe paths and `dir`. Exe paths can
be relative, and so we have to ask: Is an exe path interpreted relative to
the parent's cwd, or the child's? The answer is that it's platform
dependent! >.< (Windows uses the parent's cwd, but because of the
fork-chdir-exec pattern, Unix usually uses the child's.)
We want to use the parent's cwd consistently, because that saves the caller
from having to worry about whether `dir` will have side effects, and
because it's easy for the caller to use path.join if they want to. That
means that when `dir` is in use, we need to detect exe names that are
relative paths, and absolutify them. We want to do that as little as
possible though, both because canonicalization can fail, and because we
prefer to let the caller control the child's argv[0].
We never want to absolutify a name like "emacs", because that's probably a
program in the PATH rather than a local file. So we look for slashes in the
name to determine what's a filepath and what isn't. Note that anything
given as a Path will always have a slash by the time we get here, because
stringify_with_dot_if_path prepends a ./ to them when they're relative.
This leaves the case where Windows users might pass a local file like
"foo.bat" as a string, which we can't distinguish from a global program
name. However, because the Windows has the preferred "relative to parent's
cwd" behavior already, this case actually works without our help. (The
thing Windows users have to watch out for instead is local files shadowing
global program names, which I don't think we can or should prevent.)'''
has_sep = (os.path.sep in exe_name
or (os.path.altsep is not None and os.path.altsep in exe_name))
if has_sep and iocontext.dir is not None and not os.path.isabs(exe_name):
return os.path.realpath(exe_name)
else:
return exe_name | python | def maybe_canonicalize_exe_path(exe_name, iocontext):
'''There's a tricky interaction between exe paths and `dir`. Exe paths can
be relative, and so we have to ask: Is an exe path interpreted relative to
the parent's cwd, or the child's? The answer is that it's platform
dependent! >.< (Windows uses the parent's cwd, but because of the
fork-chdir-exec pattern, Unix usually uses the child's.)
We want to use the parent's cwd consistently, because that saves the caller
from having to worry about whether `dir` will have side effects, and
because it's easy for the caller to use path.join if they want to. That
means that when `dir` is in use, we need to detect exe names that are
relative paths, and absolutify them. We want to do that as little as
possible though, both because canonicalization can fail, and because we
prefer to let the caller control the child's argv[0].
We never want to absolutify a name like "emacs", because that's probably a
program in the PATH rather than a local file. So we look for slashes in the
name to determine what's a filepath and what isn't. Note that anything
given as a Path will always have a slash by the time we get here, because
stringify_with_dot_if_path prepends a ./ to them when they're relative.
This leaves the case where Windows users might pass a local file like
"foo.bat" as a string, which we can't distinguish from a global program
name. However, because the Windows has the preferred "relative to parent's
cwd" behavior already, this case actually works without our help. (The
thing Windows users have to watch out for instead is local files shadowing
global program names, which I don't think we can or should prevent.)'''
has_sep = (os.path.sep in exe_name
or (os.path.altsep is not None and os.path.altsep in exe_name))
if has_sep and iocontext.dir is not None and not os.path.isabs(exe_name):
return os.path.realpath(exe_name)
else:
return exe_name | [
"def",
"maybe_canonicalize_exe_path",
"(",
"exe_name",
",",
"iocontext",
")",
":",
"has_sep",
"=",
"(",
"os",
".",
"path",
".",
"sep",
"in",
"exe_name",
"or",
"(",
"os",
".",
"path",
".",
"altsep",
"is",
"not",
"None",
"and",
"os",
".",
"path",
".",
... | There's a tricky interaction between exe paths and `dir`. Exe paths can
be relative, and so we have to ask: Is an exe path interpreted relative to
the parent's cwd, or the child's? The answer is that it's platform
dependent! >.< (Windows uses the parent's cwd, but because of the
fork-chdir-exec pattern, Unix usually uses the child's.)
We want to use the parent's cwd consistently, because that saves the caller
from having to worry about whether `dir` will have side effects, and
because it's easy for the caller to use path.join if they want to. That
means that when `dir` is in use, we need to detect exe names that are
relative paths, and absolutify them. We want to do that as little as
possible though, both because canonicalization can fail, and because we
prefer to let the caller control the child's argv[0].
We never want to absolutify a name like "emacs", because that's probably a
program in the PATH rather than a local file. So we look for slashes in the
name to determine what's a filepath and what isn't. Note that anything
given as a Path will always have a slash by the time we get here, because
stringify_with_dot_if_path prepends a ./ to them when they're relative.
This leaves the case where Windows users might pass a local file like
"foo.bat" as a string, which we can't distinguish from a global program
name. However, because the Windows has the preferred "relative to parent's
cwd" behavior already, this case actually works without our help. (The
thing Windows users have to watch out for instead is local files shadowing
global program names, which I don't think we can or should prevent.) | [
"There",
"s",
"a",
"tricky",
"interaction",
"between",
"exe",
"paths",
"and",
"dir",
".",
"Exe",
"paths",
"can",
"be",
"relative",
"and",
"so",
"we",
"have",
"to",
"ask",
":",
"Is",
"an",
"exe",
"path",
"interpreted",
"relative",
"to",
"the",
"parent",
... | f10f1e9093a2913281294bb89a6e1744aa700e73 | https://github.com/oconnor663/duct.py/blob/f10f1e9093a2913281294bb89a6e1744aa700e73/duct.py#L661-L694 | train |
oconnor663/duct.py | duct.py | safe_popen | def safe_popen(*args, **kwargs):
'''This wrapper works around two major deadlock issues to do with pipes.
The first is that, before Python 3.2 on POSIX systems, os.pipe() creates
inheritable file descriptors, which leak to all child processes and prevent
reads from reaching EOF. The workaround for this is to set close_fds=True
on POSIX, which was not the default in those versions. See PEP 0446 for
many details.
The second issue arises on Windows, where we're not allowed to set
close_fds=True while also setting stdin/stdout/stderr. Descriptors from
os.pipe() on Windows have never been inheritable, so it would seem that
we're safe. However, the Windows implementation of subprocess.Popen()
creates temporary inheritable copies of its descriptors, and these can
leak. The workaround for this is to protect Popen() with a global lock. See
https://bugs.python.org/issue25565.'''
close_fds = (os.name != 'nt')
with popen_lock:
return subprocess.Popen(*args, close_fds=close_fds, **kwargs) | python | def safe_popen(*args, **kwargs):
'''This wrapper works around two major deadlock issues to do with pipes.
The first is that, before Python 3.2 on POSIX systems, os.pipe() creates
inheritable file descriptors, which leak to all child processes and prevent
reads from reaching EOF. The workaround for this is to set close_fds=True
on POSIX, which was not the default in those versions. See PEP 0446 for
many details.
The second issue arises on Windows, where we're not allowed to set
close_fds=True while also setting stdin/stdout/stderr. Descriptors from
os.pipe() on Windows have never been inheritable, so it would seem that
we're safe. However, the Windows implementation of subprocess.Popen()
creates temporary inheritable copies of its descriptors, and these can
leak. The workaround for this is to protect Popen() with a global lock. See
https://bugs.python.org/issue25565.'''
close_fds = (os.name != 'nt')
with popen_lock:
return subprocess.Popen(*args, close_fds=close_fds, **kwargs) | [
"def",
"safe_popen",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"close_fds",
"=",
"(",
"os",
".",
"name",
"!=",
"'nt'",
")",
"with",
"popen_lock",
":",
"return",
"subprocess",
".",
"Popen",
"(",
"*",
"args",
",",
"close_fds",
"=",
"close_fd... | This wrapper works around two major deadlock issues to do with pipes.
The first is that, before Python 3.2 on POSIX systems, os.pipe() creates
inheritable file descriptors, which leak to all child processes and prevent
reads from reaching EOF. The workaround for this is to set close_fds=True
on POSIX, which was not the default in those versions. See PEP 0446 for
many details.
The second issue arises on Windows, where we're not allowed to set
close_fds=True while also setting stdin/stdout/stderr. Descriptors from
os.pipe() on Windows have never been inheritable, so it would seem that
we're safe. However, the Windows implementation of subprocess.Popen()
creates temporary inheritable copies of its descriptors, and these can
leak. The workaround for this is to protect Popen() with a global lock. See
https://bugs.python.org/issue25565. | [
"This",
"wrapper",
"works",
"around",
"two",
"major",
"deadlock",
"issues",
"to",
"do",
"with",
"pipes",
".",
"The",
"first",
"is",
"that",
"before",
"Python",
"3",
".",
"2",
"on",
"POSIX",
"systems",
"os",
".",
"pipe",
"()",
"creates",
"inheritable",
"f... | f10f1e9093a2913281294bb89a6e1744aa700e73 | https://github.com/oconnor663/duct.py/blob/f10f1e9093a2913281294bb89a6e1744aa700e73/duct.py#L700-L718 | train |
oconnor663/duct.py | duct.py | Expression.run | def run(self):
'''Execute the expression and return a Result, which includes the exit
status and any captured output. Raise an exception if the status is
non-zero.'''
with spawn_output_reader() as (stdout_capture, stdout_thread):
with spawn_output_reader() as (stderr_capture, stderr_thread):
context = starter_iocontext(stdout_capture, stderr_capture)
status = self._exec(context)
stdout_bytes = stdout_thread.join()
stderr_bytes = stderr_thread.join()
result = Result(status.code, stdout_bytes, stderr_bytes)
if is_checked_error(status):
raise StatusError(result, self)
return result | python | def run(self):
'''Execute the expression and return a Result, which includes the exit
status and any captured output. Raise an exception if the status is
non-zero.'''
with spawn_output_reader() as (stdout_capture, stdout_thread):
with spawn_output_reader() as (stderr_capture, stderr_thread):
context = starter_iocontext(stdout_capture, stderr_capture)
status = self._exec(context)
stdout_bytes = stdout_thread.join()
stderr_bytes = stderr_thread.join()
result = Result(status.code, stdout_bytes, stderr_bytes)
if is_checked_error(status):
raise StatusError(result, self)
return result | [
"def",
"run",
"(",
"self",
")",
":",
"with",
"spawn_output_reader",
"(",
")",
"as",
"(",
"stdout_capture",
",",
"stdout_thread",
")",
":",
"with",
"spawn_output_reader",
"(",
")",
"as",
"(",
"stderr_capture",
",",
"stderr_thread",
")",
":",
"context",
"=",
... | Execute the expression and return a Result, which includes the exit
status and any captured output. Raise an exception if the status is
non-zero. | [
"Execute",
"the",
"expression",
"and",
"return",
"a",
"Result",
"which",
"includes",
"the",
"exit",
"status",
"and",
"any",
"captured",
"output",
".",
"Raise",
"an",
"exception",
"if",
"the",
"status",
"is",
"non",
"-",
"zero",
"."
] | f10f1e9093a2913281294bb89a6e1744aa700e73 | https://github.com/oconnor663/duct.py/blob/f10f1e9093a2913281294bb89a6e1744aa700e73/duct.py#L26-L39 | train |
oconnor663/duct.py | duct.py | Expression.read | def read(self):
'''Execute the expression and capture its output, similar to backticks
or $() in the shell. This is a wrapper around run() which captures
stdout, decodes it, trims it, and returns it directly.'''
result = self.stdout_capture().run()
stdout_str = decode_with_universal_newlines(result.stdout)
return stdout_str.rstrip('\n') | python | def read(self):
'''Execute the expression and capture its output, similar to backticks
or $() in the shell. This is a wrapper around run() which captures
stdout, decodes it, trims it, and returns it directly.'''
result = self.stdout_capture().run()
stdout_str = decode_with_universal_newlines(result.stdout)
return stdout_str.rstrip('\n') | [
"def",
"read",
"(",
"self",
")",
":",
"result",
"=",
"self",
".",
"stdout_capture",
"(",
")",
".",
"run",
"(",
")",
"stdout_str",
"=",
"decode_with_universal_newlines",
"(",
"result",
".",
"stdout",
")",
"return",
"stdout_str",
".",
"rstrip",
"(",
"'\\n'",... | Execute the expression and capture its output, similar to backticks
or $() in the shell. This is a wrapper around run() which captures
stdout, decodes it, trims it, and returns it directly. | [
"Execute",
"the",
"expression",
"and",
"capture",
"its",
"output",
"similar",
"to",
"backticks",
"or",
"$",
"()",
"in",
"the",
"shell",
".",
"This",
"is",
"a",
"wrapper",
"around",
"run",
"()",
"which",
"captures",
"stdout",
"decodes",
"it",
"trims",
"it",... | f10f1e9093a2913281294bb89a6e1744aa700e73 | https://github.com/oconnor663/duct.py/blob/f10f1e9093a2913281294bb89a6e1744aa700e73/duct.py#L41-L47 | train |
oconnor663/duct.py | duct.py | Expression.start | def start(self):
'''Equivalent to `run`, but instead of blocking the current thread,
return a WaitHandle that doesn't block until `wait` is called. This is
currently implemented with a simple background thread, though in theory
it could avoid using threads in most cases.'''
thread = ThreadWithReturn(self.run)
thread.start()
return WaitHandle(thread) | python | def start(self):
'''Equivalent to `run`, but instead of blocking the current thread,
return a WaitHandle that doesn't block until `wait` is called. This is
currently implemented with a simple background thread, though in theory
it could avoid using threads in most cases.'''
thread = ThreadWithReturn(self.run)
thread.start()
return WaitHandle(thread) | [
"def",
"start",
"(",
"self",
")",
":",
"thread",
"=",
"ThreadWithReturn",
"(",
"self",
".",
"run",
")",
"thread",
".",
"start",
"(",
")",
"return",
"WaitHandle",
"(",
"thread",
")"
] | Equivalent to `run`, but instead of blocking the current thread,
return a WaitHandle that doesn't block until `wait` is called. This is
currently implemented with a simple background thread, though in theory
it could avoid using threads in most cases. | [
"Equivalent",
"to",
"run",
"but",
"instead",
"of",
"blocking",
"the",
"current",
"thread",
"return",
"a",
"WaitHandle",
"that",
"doesn",
"t",
"block",
"until",
"wait",
"is",
"called",
".",
"This",
"is",
"currently",
"implemented",
"with",
"a",
"simple",
"bac... | f10f1e9093a2913281294bb89a6e1744aa700e73 | https://github.com/oconnor663/duct.py/blob/f10f1e9093a2913281294bb89a6e1744aa700e73/duct.py#L49-L56 | train |
keans/lmnotify | lmnotify/lmnotify.py | LaMetricManager._exec | def _exec(self, cmd, url, json_data=None):
"""
execute a command at the device using the RESTful API
:param str cmd: one of the REST commands, e.g. GET or POST
:param str url: URL of the REST API the command should be applied to
:param dict json_data: json data that should be attached to the command
"""
assert(cmd in ("GET", "POST", "PUT", "DELETE"))
assert(self.dev is not None)
if json_data is None:
json_data = {}
# add device address to the URL
url = url.format(self.dev["ipv4_internal"])
# set basic authentication
auth = HTTPBasicAuth("dev", self.dev["api_key"])
# execute HTTP request
res = None
if cmd == "GET":
res = self._local_session.session.get(
url, auth=auth, verify=False
)
elif cmd == "POST":
res = self._local_session.session.post(
url, auth=auth, json=json_data, verify=False
)
elif cmd == "PUT":
res = self._local_session.session.put(
url, auth=auth, json=json_data, verify=False
)
elif cmd == "DELETE":
res = self._local_session.session.delete(
url, auth=auth, verify=False
)
if res is not None:
# raise an exception on error
res.raise_for_status()
return res.json() | python | def _exec(self, cmd, url, json_data=None):
"""
execute a command at the device using the RESTful API
:param str cmd: one of the REST commands, e.g. GET or POST
:param str url: URL of the REST API the command should be applied to
:param dict json_data: json data that should be attached to the command
"""
assert(cmd in ("GET", "POST", "PUT", "DELETE"))
assert(self.dev is not None)
if json_data is None:
json_data = {}
# add device address to the URL
url = url.format(self.dev["ipv4_internal"])
# set basic authentication
auth = HTTPBasicAuth("dev", self.dev["api_key"])
# execute HTTP request
res = None
if cmd == "GET":
res = self._local_session.session.get(
url, auth=auth, verify=False
)
elif cmd == "POST":
res = self._local_session.session.post(
url, auth=auth, json=json_data, verify=False
)
elif cmd == "PUT":
res = self._local_session.session.put(
url, auth=auth, json=json_data, verify=False
)
elif cmd == "DELETE":
res = self._local_session.session.delete(
url, auth=auth, verify=False
)
if res is not None:
# raise an exception on error
res.raise_for_status()
return res.json() | [
"def",
"_exec",
"(",
"self",
",",
"cmd",
",",
"url",
",",
"json_data",
"=",
"None",
")",
":",
"assert",
"(",
"cmd",
"in",
"(",
"\"GET\"",
",",
"\"POST\"",
",",
"\"PUT\"",
",",
"\"DELETE\"",
")",
")",
"assert",
"(",
"self",
".",
"dev",
"is",
"not",
... | execute a command at the device using the RESTful API
:param str cmd: one of the REST commands, e.g. GET or POST
:param str url: URL of the REST API the command should be applied to
:param dict json_data: json data that should be attached to the command | [
"execute",
"a",
"command",
"at",
"the",
"device",
"using",
"the",
"RESTful",
"API"
] | b0a5282a582e5090852dc20fea8a135ca258d0d3 | https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L90-L136 | train |
keans/lmnotify | lmnotify/lmnotify.py | LaMetricManager.set_device | def set_device(self, dev):
"""
set the current device (that will be used for following API calls)
:param dict dev: device that should be used for the API calls
(can be obtained via get_devices function)
"""
log.debug("setting device to '{}'".format(dev))
self.dev = dev
self.set_apps_list() | python | def set_device(self, dev):
"""
set the current device (that will be used for following API calls)
:param dict dev: device that should be used for the API calls
(can be obtained via get_devices function)
"""
log.debug("setting device to '{}'".format(dev))
self.dev = dev
self.set_apps_list() | [
"def",
"set_device",
"(",
"self",
",",
"dev",
")",
":",
"log",
".",
"debug",
"(",
"\"setting device to '{}'\"",
".",
"format",
"(",
"dev",
")",
")",
"self",
".",
"dev",
"=",
"dev",
"self",
".",
"set_apps_list",
"(",
")"
] | set the current device (that will be used for following API calls)
:param dict dev: device that should be used for the API calls
(can be obtained via get_devices function) | [
"set",
"the",
"current",
"device",
"(",
"that",
"will",
"be",
"used",
"for",
"following",
"API",
"calls",
")"
] | b0a5282a582e5090852dc20fea8a135ca258d0d3 | https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L146-L155 | train |
keans/lmnotify | lmnotify/lmnotify.py | LaMetricManager._get_widget_id | def _get_widget_id(self, package_name):
"""
returns widget_id for given package_name does not care
about multiple widget ids at the moment, just picks the first
:param str package_name: package to check for
:return: id of first widget which belongs to the given package_name
:rtype: str
"""
widget_id = ""
for app in self.get_apps_list():
if app.package == package_name:
widget_id = list(app.widgets.keys())[0]
return widget_id | python | def _get_widget_id(self, package_name):
"""
returns widget_id for given package_name does not care
about multiple widget ids at the moment, just picks the first
:param str package_name: package to check for
:return: id of first widget which belongs to the given package_name
:rtype: str
"""
widget_id = ""
for app in self.get_apps_list():
if app.package == package_name:
widget_id = list(app.widgets.keys())[0]
return widget_id | [
"def",
"_get_widget_id",
"(",
"self",
",",
"package_name",
")",
":",
"widget_id",
"=",
"\"\"",
"for",
"app",
"in",
"self",
".",
"get_apps_list",
"(",
")",
":",
"if",
"app",
".",
"package",
"==",
"package_name",
":",
"widget_id",
"=",
"list",
"(",
"app",
... | returns widget_id for given package_name does not care
about multiple widget ids at the moment, just picks the first
:param str package_name: package to check for
:return: id of first widget which belongs to the given package_name
:rtype: str | [
"returns",
"widget_id",
"for",
"given",
"package_name",
"does",
"not",
"care",
"about",
"multiple",
"widget",
"ids",
"at",
"the",
"moment",
"just",
"picks",
"the",
"first"
] | b0a5282a582e5090852dc20fea8a135ca258d0d3 | https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L157-L171 | train |
keans/lmnotify | lmnotify/lmnotify.py | LaMetricManager.get_user | def get_user(self):
"""
get the user details via the cloud
"""
log.debug("getting user information from LaMetric cloud...")
_, url = CLOUD_URLS["get_user"]
res = self._cloud_session.session.get(url)
if res is not None:
# raise an exception on error
res.raise_for_status()
return res.json() | python | def get_user(self):
"""
get the user details via the cloud
"""
log.debug("getting user information from LaMetric cloud...")
_, url = CLOUD_URLS["get_user"]
res = self._cloud_session.session.get(url)
if res is not None:
# raise an exception on error
res.raise_for_status()
return res.json() | [
"def",
"get_user",
"(",
"self",
")",
":",
"log",
".",
"debug",
"(",
"\"getting user information from LaMetric cloud...\"",
")",
"_",
",",
"url",
"=",
"CLOUD_URLS",
"[",
"\"get_user\"",
"]",
"res",
"=",
"self",
".",
"_cloud_session",
".",
"session",
".",
"get",... | get the user details via the cloud | [
"get",
"the",
"user",
"details",
"via",
"the",
"cloud"
] | b0a5282a582e5090852dc20fea8a135ca258d0d3 | https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L174-L185 | train |
keans/lmnotify | lmnotify/lmnotify.py | LaMetricManager.get_devices | def get_devices(self, force_reload=False, save_devices=True):
"""
get all devices that are linked to the user, if the local device
file is not existing the devices will be obtained from the LaMetric
cloud, otherwise the local device file will be read.
:param bool force_reload: When True, devices are read again from cloud
:param bool save_devices: When True, devices obtained from the LaMetric
cloud are stored locally
"""
if (
(not os.path.exists(self._devices_filename)) or
(force_reload is True)
):
# -- load devices from LaMetric cloud --
log.debug("getting devices from LaMetric cloud...")
_, url = CLOUD_URLS["get_devices"]
res = self._cloud_session.session.get(url)
if res is not None:
# raise an exception on error
res.raise_for_status()
# store obtained devices internally
self._devices = res.json()
if save_devices is True:
# save obtained devices to the local file
self.save_devices()
return self._devices
else:
# -- load devices from local file --
log.debug(
"getting devices from '{}'...".format(self._devices_filename)
)
return self.load_devices() | python | def get_devices(self, force_reload=False, save_devices=True):
"""
get all devices that are linked to the user, if the local device
file is not existing the devices will be obtained from the LaMetric
cloud, otherwise the local device file will be read.
:param bool force_reload: When True, devices are read again from cloud
:param bool save_devices: When True, devices obtained from the LaMetric
cloud are stored locally
"""
if (
(not os.path.exists(self._devices_filename)) or
(force_reload is True)
):
# -- load devices from LaMetric cloud --
log.debug("getting devices from LaMetric cloud...")
_, url = CLOUD_URLS["get_devices"]
res = self._cloud_session.session.get(url)
if res is not None:
# raise an exception on error
res.raise_for_status()
# store obtained devices internally
self._devices = res.json()
if save_devices is True:
# save obtained devices to the local file
self.save_devices()
return self._devices
else:
# -- load devices from local file --
log.debug(
"getting devices from '{}'...".format(self._devices_filename)
)
return self.load_devices() | [
"def",
"get_devices",
"(",
"self",
",",
"force_reload",
"=",
"False",
",",
"save_devices",
"=",
"True",
")",
":",
"if",
"(",
"(",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"_devices_filename",
")",
")",
"or",
"(",
"force_reload",
"is",... | get all devices that are linked to the user, if the local device
file is not existing the devices will be obtained from the LaMetric
cloud, otherwise the local device file will be read.
:param bool force_reload: When True, devices are read again from cloud
:param bool save_devices: When True, devices obtained from the LaMetric
cloud are stored locally | [
"get",
"all",
"devices",
"that",
"are",
"linked",
"to",
"the",
"user",
"if",
"the",
"local",
"device",
"file",
"is",
"not",
"existing",
"the",
"devices",
"will",
"be",
"obtained",
"from",
"the",
"LaMetric",
"cloud",
"otherwise",
"the",
"local",
"device",
"... | b0a5282a582e5090852dc20fea8a135ca258d0d3 | https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L187-L222 | train |
keans/lmnotify | lmnotify/lmnotify.py | LaMetricManager.save_devices | def save_devices(self):
"""
save devices that have been obtained from LaMetric cloud
to a local file
"""
log.debug("saving devices to ''...".format(self._devices_filename))
if self._devices != []:
with codecs.open(self._devices_filename, "wb", "utf-8") as f:
json.dump(self._devices, f) | python | def save_devices(self):
"""
save devices that have been obtained from LaMetric cloud
to a local file
"""
log.debug("saving devices to ''...".format(self._devices_filename))
if self._devices != []:
with codecs.open(self._devices_filename, "wb", "utf-8") as f:
json.dump(self._devices, f) | [
"def",
"save_devices",
"(",
"self",
")",
":",
"log",
".",
"debug",
"(",
"\"saving devices to ''...\"",
".",
"format",
"(",
"self",
".",
"_devices_filename",
")",
")",
"if",
"self",
".",
"_devices",
"!=",
"[",
"]",
":",
"with",
"codecs",
".",
"open",
"(",... | save devices that have been obtained from LaMetric cloud
to a local file | [
"save",
"devices",
"that",
"have",
"been",
"obtained",
"from",
"LaMetric",
"cloud",
"to",
"a",
"local",
"file"
] | b0a5282a582e5090852dc20fea8a135ca258d0d3 | https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L224-L232 | train |
keans/lmnotify | lmnotify/lmnotify.py | LaMetricManager.get_endpoint_map | def get_endpoint_map(self):
"""
returns API version and endpoint map
"""
log.debug("getting end points...")
cmd, url = DEVICE_URLS["get_endpoint_map"]
return self._exec(cmd, url) | python | def get_endpoint_map(self):
"""
returns API version and endpoint map
"""
log.debug("getting end points...")
cmd, url = DEVICE_URLS["get_endpoint_map"]
return self._exec(cmd, url) | [
"def",
"get_endpoint_map",
"(",
"self",
")",
":",
"log",
".",
"debug",
"(",
"\"getting end points...\"",
")",
"cmd",
",",
"url",
"=",
"DEVICE_URLS",
"[",
"\"get_endpoint_map\"",
"]",
"return",
"self",
".",
"_exec",
"(",
"cmd",
",",
"url",
")"
] | returns API version and endpoint map | [
"returns",
"API",
"version",
"and",
"endpoint",
"map"
] | b0a5282a582e5090852dc20fea8a135ca258d0d3 | https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L235-L241 | train |
keans/lmnotify | lmnotify/lmnotify.py | LaMetricManager.load_devices | def load_devices(self):
"""
load stored devices from the local file
"""
self._devices = []
if os.path.exists(self._devices_filename):
log.debug(
"loading devices from '{}'...".format(self._devices_filename)
)
with codecs.open(self._devices_filename, "rb", "utf-8") as f:
self._devices = json.load(f)
return self._devices | python | def load_devices(self):
"""
load stored devices from the local file
"""
self._devices = []
if os.path.exists(self._devices_filename):
log.debug(
"loading devices from '{}'...".format(self._devices_filename)
)
with codecs.open(self._devices_filename, "rb", "utf-8") as f:
self._devices = json.load(f)
return self._devices | [
"def",
"load_devices",
"(",
"self",
")",
":",
"self",
".",
"_devices",
"=",
"[",
"]",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"_devices_filename",
")",
":",
"log",
".",
"debug",
"(",
"\"loading devices from '{}'...\"",
".",
"format",
"(... | load stored devices from the local file | [
"load",
"stored",
"devices",
"from",
"the",
"local",
"file"
] | b0a5282a582e5090852dc20fea8a135ca258d0d3 | https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L252-L264 | train |
keans/lmnotify | lmnotify/lmnotify.py | LaMetricManager.get_device_state | def get_device_state(self):
"""
returns the full device state
"""
log.debug("getting device state...")
cmd, url = DEVICE_URLS["get_device_state"]
return self._exec(cmd, url) | python | def get_device_state(self):
"""
returns the full device state
"""
log.debug("getting device state...")
cmd, url = DEVICE_URLS["get_device_state"]
return self._exec(cmd, url) | [
"def",
"get_device_state",
"(",
"self",
")",
":",
"log",
".",
"debug",
"(",
"\"getting device state...\"",
")",
"cmd",
",",
"url",
"=",
"DEVICE_URLS",
"[",
"\"get_device_state\"",
"]",
"return",
"self",
".",
"_exec",
"(",
"cmd",
",",
"url",
")"
] | returns the full device state | [
"returns",
"the",
"full",
"device",
"state"
] | b0a5282a582e5090852dc20fea8a135ca258d0d3 | https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L266-L272 | train |
keans/lmnotify | lmnotify/lmnotify.py | LaMetricManager.send_notification | def send_notification(
self, model, priority="warning", icon_type=None, lifetime=None
):
"""
sends new notification to the device
:param Model model: an instance of the Model class that should be used
:param str priority: the priority of the notification
[info, warning or critical] (default: warning)
:param str icon_type: the icon type of the notification
[none, info or alert] (default: None)
:param int lifetime: the lifetime of the notification in ms
(default: 2 min)
"""
assert(priority in ("info", "warning", "critical"))
assert(icon_type in (None, "none", "info", "alert"))
assert((lifetime is None) or (lifetime > 0))
log.debug("sending notification...")
cmd, url = DEVICE_URLS["send_notification"]
json_data = {"model": model.json(), "priority": priority}
if icon_type is not None:
json_data["icon_type"] = icon_type
if lifetime is not None:
json_data["lifetime"] = lifetime
return self._exec(cmd, url, json_data=json_data) | python | def send_notification(
self, model, priority="warning", icon_type=None, lifetime=None
):
"""
sends new notification to the device
:param Model model: an instance of the Model class that should be used
:param str priority: the priority of the notification
[info, warning or critical] (default: warning)
:param str icon_type: the icon type of the notification
[none, info or alert] (default: None)
:param int lifetime: the lifetime of the notification in ms
(default: 2 min)
"""
assert(priority in ("info", "warning", "critical"))
assert(icon_type in (None, "none", "info", "alert"))
assert((lifetime is None) or (lifetime > 0))
log.debug("sending notification...")
cmd, url = DEVICE_URLS["send_notification"]
json_data = {"model": model.json(), "priority": priority}
if icon_type is not None:
json_data["icon_type"] = icon_type
if lifetime is not None:
json_data["lifetime"] = lifetime
return self._exec(cmd, url, json_data=json_data) | [
"def",
"send_notification",
"(",
"self",
",",
"model",
",",
"priority",
"=",
"\"warning\"",
",",
"icon_type",
"=",
"None",
",",
"lifetime",
"=",
"None",
")",
":",
"assert",
"(",
"priority",
"in",
"(",
"\"info\"",
",",
"\"warning\"",
",",
"\"critical\"",
")... | sends new notification to the device
:param Model model: an instance of the Model class that should be used
:param str priority: the priority of the notification
[info, warning or critical] (default: warning)
:param str icon_type: the icon type of the notification
[none, info or alert] (default: None)
:param int lifetime: the lifetime of the notification in ms
(default: 2 min) | [
"sends",
"new",
"notification",
"to",
"the",
"device"
] | b0a5282a582e5090852dc20fea8a135ca258d0d3 | https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L274-L303 | train |
keans/lmnotify | lmnotify/lmnotify.py | LaMetricManager.get_notifications | def get_notifications(self):
"""
returns the list of all notifications in queue
"""
log.debug("getting notifications in queue...")
cmd, url = DEVICE_URLS["get_notifications_queue"]
return self._exec(cmd, url) | python | def get_notifications(self):
"""
returns the list of all notifications in queue
"""
log.debug("getting notifications in queue...")
cmd, url = DEVICE_URLS["get_notifications_queue"]
return self._exec(cmd, url) | [
"def",
"get_notifications",
"(",
"self",
")",
":",
"log",
".",
"debug",
"(",
"\"getting notifications in queue...\"",
")",
"cmd",
",",
"url",
"=",
"DEVICE_URLS",
"[",
"\"get_notifications_queue\"",
"]",
"return",
"self",
".",
"_exec",
"(",
"cmd",
",",
"url",
"... | returns the list of all notifications in queue | [
"returns",
"the",
"list",
"of",
"all",
"notifications",
"in",
"queue"
] | b0a5282a582e5090852dc20fea8a135ca258d0d3 | https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L305-L311 | train |
keans/lmnotify | lmnotify/lmnotify.py | LaMetricManager.get_current_notification | def get_current_notification(self):
"""
returns the current notification (i.e. the one that is visible)
"""
log.debug("getting visible notification...")
cmd, url = DEVICE_URLS["get_current_notification"]
return self._exec(cmd, url) | python | def get_current_notification(self):
"""
returns the current notification (i.e. the one that is visible)
"""
log.debug("getting visible notification...")
cmd, url = DEVICE_URLS["get_current_notification"]
return self._exec(cmd, url) | [
"def",
"get_current_notification",
"(",
"self",
")",
":",
"log",
".",
"debug",
"(",
"\"getting visible notification...\"",
")",
"cmd",
",",
"url",
"=",
"DEVICE_URLS",
"[",
"\"get_current_notification\"",
"]",
"return",
"self",
".",
"_exec",
"(",
"cmd",
",",
"url... | returns the current notification (i.e. the one that is visible) | [
"returns",
"the",
"current",
"notification",
"(",
"i",
".",
"e",
".",
"the",
"one",
"that",
"is",
"visible",
")"
] | b0a5282a582e5090852dc20fea8a135ca258d0d3 | https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L313-L319 | train |
keans/lmnotify | lmnotify/lmnotify.py | LaMetricManager.get_notification | def get_notification(self, notification_id):
"""
returns a specific notification by given id
:param str notification_id: the ID of the notification
"""
log.debug("getting notification '{}'...".format(notification_id))
cmd, url = DEVICE_URLS["get_notification"]
return self._exec(cmd, url.replace(":id", notification_id)) | python | def get_notification(self, notification_id):
"""
returns a specific notification by given id
:param str notification_id: the ID of the notification
"""
log.debug("getting notification '{}'...".format(notification_id))
cmd, url = DEVICE_URLS["get_notification"]
return self._exec(cmd, url.replace(":id", notification_id)) | [
"def",
"get_notification",
"(",
"self",
",",
"notification_id",
")",
":",
"log",
".",
"debug",
"(",
"\"getting notification '{}'...\"",
".",
"format",
"(",
"notification_id",
")",
")",
"cmd",
",",
"url",
"=",
"DEVICE_URLS",
"[",
"\"get_notification\"",
"]",
"ret... | returns a specific notification by given id
:param str notification_id: the ID of the notification | [
"returns",
"a",
"specific",
"notification",
"by",
"given",
"id"
] | b0a5282a582e5090852dc20fea8a135ca258d0d3 | https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L321-L329 | train |
keans/lmnotify | lmnotify/lmnotify.py | LaMetricManager.get_display | def get_display(self):
"""
returns information about the display, including
brightness, screensaver etc.
"""
log.debug("getting display information...")
cmd, url = DEVICE_URLS["get_display"]
return self._exec(cmd, url) | python | def get_display(self):
"""
returns information about the display, including
brightness, screensaver etc.
"""
log.debug("getting display information...")
cmd, url = DEVICE_URLS["get_display"]
return self._exec(cmd, url) | [
"def",
"get_display",
"(",
"self",
")",
":",
"log",
".",
"debug",
"(",
"\"getting display information...\"",
")",
"cmd",
",",
"url",
"=",
"DEVICE_URLS",
"[",
"\"get_display\"",
"]",
"return",
"self",
".",
"_exec",
"(",
"cmd",
",",
"url",
")"
] | returns information about the display, including
brightness, screensaver etc. | [
"returns",
"information",
"about",
"the",
"display",
"including",
"brightness",
"screensaver",
"etc",
"."
] | b0a5282a582e5090852dc20fea8a135ca258d0d3 | https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L341-L348 | train |
keans/lmnotify | lmnotify/lmnotify.py | LaMetricManager.set_display | def set_display(self, brightness=100, brightness_mode="auto"):
"""
allows to modify display state (change brightness)
:param int brightness: display brightness [0, 100] (default: 100)
:param str brightness_mode: the brightness mode of the display
[auto, manual] (default: auto)
"""
assert(brightness_mode in ("auto", "manual"))
assert(brightness in range(101))
log.debug("setting display information...")
cmd, url = DEVICE_URLS["set_display"]
json_data = {
"brightness_mode": brightness_mode,
"brightness": brightness
}
return self._exec(cmd, url, json_data=json_data) | python | def set_display(self, brightness=100, brightness_mode="auto"):
"""
allows to modify display state (change brightness)
:param int brightness: display brightness [0, 100] (default: 100)
:param str brightness_mode: the brightness mode of the display
[auto, manual] (default: auto)
"""
assert(brightness_mode in ("auto", "manual"))
assert(brightness in range(101))
log.debug("setting display information...")
cmd, url = DEVICE_URLS["set_display"]
json_data = {
"brightness_mode": brightness_mode,
"brightness": brightness
}
return self._exec(cmd, url, json_data=json_data) | [
"def",
"set_display",
"(",
"self",
",",
"brightness",
"=",
"100",
",",
"brightness_mode",
"=",
"\"auto\"",
")",
":",
"assert",
"(",
"brightness_mode",
"in",
"(",
"\"auto\"",
",",
"\"manual\"",
")",
")",
"assert",
"(",
"brightness",
"in",
"range",
"(",
"101... | allows to modify display state (change brightness)
:param int brightness: display brightness [0, 100] (default: 100)
:param str brightness_mode: the brightness mode of the display
[auto, manual] (default: auto) | [
"allows",
"to",
"modify",
"display",
"state",
"(",
"change",
"brightness",
")"
] | b0a5282a582e5090852dc20fea8a135ca258d0d3 | https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L350-L369 | train |
keans/lmnotify | lmnotify/lmnotify.py | LaMetricManager.set_screensaver | def set_screensaver(
self, mode, is_mode_enabled, start_time=None, end_time=None,
is_screensaver_enabled=True
):
"""
set the display's screensaver mode
:param str mode: mode of the screensaver
[when_dark, time_based]
:param bool is_mode_enabled: specifies if mode is enabled or disabled
:param str start_time: start time, only used in time_based mode
(format: %H:%M:%S)
:param str end_time: end time, only used in time_based mode
(format: %H:%M:%S)
:param bool is_screensaver_enabled: is overall screensaver turned on
overrules mode specific settings
"""
assert(mode in ("when_dark", "time_based"))
log.debug("setting screensaver to '{}'...".format(mode))
cmd, url = DEVICE_URLS["set_display"]
json_data = {
"screensaver": {
"enabled": is_screensaver_enabled,
"mode": mode,
"mode_params": {
"enabled": is_mode_enabled
},
}
}
if mode == "time_based":
# TODO: add time checks
assert((start_time is not None) and (end_time is not None))
json_data["screensaver"]["mode_params"]["start_time"] = start_time
json_data["screensaver"]["mode_params"]["end_time"] = end_time
return self._exec(cmd, url, json_data=json_data) | python | def set_screensaver(
self, mode, is_mode_enabled, start_time=None, end_time=None,
is_screensaver_enabled=True
):
"""
set the display's screensaver mode
:param str mode: mode of the screensaver
[when_dark, time_based]
:param bool is_mode_enabled: specifies if mode is enabled or disabled
:param str start_time: start time, only used in time_based mode
(format: %H:%M:%S)
:param str end_time: end time, only used in time_based mode
(format: %H:%M:%S)
:param bool is_screensaver_enabled: is overall screensaver turned on
overrules mode specific settings
"""
assert(mode in ("when_dark", "time_based"))
log.debug("setting screensaver to '{}'...".format(mode))
cmd, url = DEVICE_URLS["set_display"]
json_data = {
"screensaver": {
"enabled": is_screensaver_enabled,
"mode": mode,
"mode_params": {
"enabled": is_mode_enabled
},
}
}
if mode == "time_based":
# TODO: add time checks
assert((start_time is not None) and (end_time is not None))
json_data["screensaver"]["mode_params"]["start_time"] = start_time
json_data["screensaver"]["mode_params"]["end_time"] = end_time
return self._exec(cmd, url, json_data=json_data) | [
"def",
"set_screensaver",
"(",
"self",
",",
"mode",
",",
"is_mode_enabled",
",",
"start_time",
"=",
"None",
",",
"end_time",
"=",
"None",
",",
"is_screensaver_enabled",
"=",
"True",
")",
":",
"assert",
"(",
"mode",
"in",
"(",
"\"when_dark\"",
",",
"\"time_ba... | set the display's screensaver mode
:param str mode: mode of the screensaver
[when_dark, time_based]
:param bool is_mode_enabled: specifies if mode is enabled or disabled
:param str start_time: start time, only used in time_based mode
(format: %H:%M:%S)
:param str end_time: end time, only used in time_based mode
(format: %H:%M:%S)
:param bool is_screensaver_enabled: is overall screensaver turned on
overrules mode specific settings | [
"set",
"the",
"display",
"s",
"screensaver",
"mode"
] | b0a5282a582e5090852dc20fea8a135ca258d0d3 | https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L371-L408 | train |
keans/lmnotify | lmnotify/lmnotify.py | LaMetricManager.get_volume | def get_volume(self):
"""
returns the current volume
"""
log.debug("getting volumne...")
cmd, url = DEVICE_URLS["get_volume"]
return self._exec(cmd, url) | python | def get_volume(self):
"""
returns the current volume
"""
log.debug("getting volumne...")
cmd, url = DEVICE_URLS["get_volume"]
return self._exec(cmd, url) | [
"def",
"get_volume",
"(",
"self",
")",
":",
"log",
".",
"debug",
"(",
"\"getting volumne...\"",
")",
"cmd",
",",
"url",
"=",
"DEVICE_URLS",
"[",
"\"get_volume\"",
"]",
"return",
"self",
".",
"_exec",
"(",
"cmd",
",",
"url",
")"
] | returns the current volume | [
"returns",
"the",
"current",
"volume"
] | b0a5282a582e5090852dc20fea8a135ca258d0d3 | https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L410-L416 | train |
keans/lmnotify | lmnotify/lmnotify.py | LaMetricManager.set_volume | def set_volume(self, volume=50):
"""
allows to change the volume
:param int volume: volume to be set for the current device
[0..100] (default: 50)
"""
assert(volume in range(101))
log.debug("setting volume...")
cmd, url = DEVICE_URLS["set_volume"]
json_data = {
"volume": volume,
}
return self._exec(cmd, url, json_data=json_data) | python | def set_volume(self, volume=50):
"""
allows to change the volume
:param int volume: volume to be set for the current device
[0..100] (default: 50)
"""
assert(volume in range(101))
log.debug("setting volume...")
cmd, url = DEVICE_URLS["set_volume"]
json_data = {
"volume": volume,
}
return self._exec(cmd, url, json_data=json_data) | [
"def",
"set_volume",
"(",
"self",
",",
"volume",
"=",
"50",
")",
":",
"assert",
"(",
"volume",
"in",
"range",
"(",
"101",
")",
")",
"log",
".",
"debug",
"(",
"\"setting volume...\"",
")",
"cmd",
",",
"url",
"=",
"DEVICE_URLS",
"[",
"\"set_volume\"",
"]... | allows to change the volume
:param int volume: volume to be set for the current device
[0..100] (default: 50) | [
"allows",
"to",
"change",
"the",
"volume"
] | b0a5282a582e5090852dc20fea8a135ca258d0d3 | https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L418-L433 | train |
keans/lmnotify | lmnotify/lmnotify.py | LaMetricManager.get_bluetooth_state | def get_bluetooth_state(self):
"""
returns the bluetooth state
"""
log.debug("getting bluetooth state...")
cmd, url = DEVICE_URLS["get_bluetooth_state"]
return self._exec(cmd, url) | python | def get_bluetooth_state(self):
"""
returns the bluetooth state
"""
log.debug("getting bluetooth state...")
cmd, url = DEVICE_URLS["get_bluetooth_state"]
return self._exec(cmd, url) | [
"def",
"get_bluetooth_state",
"(",
"self",
")",
":",
"log",
".",
"debug",
"(",
"\"getting bluetooth state...\"",
")",
"cmd",
",",
"url",
"=",
"DEVICE_URLS",
"[",
"\"get_bluetooth_state\"",
"]",
"return",
"self",
".",
"_exec",
"(",
"cmd",
",",
"url",
")"
] | returns the bluetooth state | [
"returns",
"the",
"bluetooth",
"state"
] | b0a5282a582e5090852dc20fea8a135ca258d0d3 | https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L435-L441 | train |
keans/lmnotify | lmnotify/lmnotify.py | LaMetricManager.set_bluetooth | def set_bluetooth(self, active=None, name=None):
"""
allows to activate/deactivate bluetooth and change the name
"""
assert(active is not None or name is not None)
log.debug("setting bluetooth state...")
cmd, url = DEVICE_URLS["set_bluetooth"]
json_data = {}
if name is not None:
json_data["name"] = name
if active is not None:
json_data["active"] = active
return self._exec(cmd, url, json_data=json_data) | python | def set_bluetooth(self, active=None, name=None):
"""
allows to activate/deactivate bluetooth and change the name
"""
assert(active is not None or name is not None)
log.debug("setting bluetooth state...")
cmd, url = DEVICE_URLS["set_bluetooth"]
json_data = {}
if name is not None:
json_data["name"] = name
if active is not None:
json_data["active"] = active
return self._exec(cmd, url, json_data=json_data) | [
"def",
"set_bluetooth",
"(",
"self",
",",
"active",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"assert",
"(",
"active",
"is",
"not",
"None",
"or",
"name",
"is",
"not",
"None",
")",
"log",
".",
"debug",
"(",
"\"setting bluetooth state...\"",
")",
... | allows to activate/deactivate bluetooth and change the name | [
"allows",
"to",
"activate",
"/",
"deactivate",
"bluetooth",
"and",
"change",
"the",
"name"
] | b0a5282a582e5090852dc20fea8a135ca258d0d3 | https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L443-L458 | train |
keans/lmnotify | lmnotify/lmnotify.py | LaMetricManager.get_wifi_state | def get_wifi_state(self):
"""
returns the current Wi-Fi state the device is connected to
"""
log.debug("getting wifi state...")
cmd, url = DEVICE_URLS["get_wifi_state"]
return self._exec(cmd, url) | python | def get_wifi_state(self):
"""
returns the current Wi-Fi state the device is connected to
"""
log.debug("getting wifi state...")
cmd, url = DEVICE_URLS["get_wifi_state"]
return self._exec(cmd, url) | [
"def",
"get_wifi_state",
"(",
"self",
")",
":",
"log",
".",
"debug",
"(",
"\"getting wifi state...\"",
")",
"cmd",
",",
"url",
"=",
"DEVICE_URLS",
"[",
"\"get_wifi_state\"",
"]",
"return",
"self",
".",
"_exec",
"(",
"cmd",
",",
"url",
")"
] | returns the current Wi-Fi state the device is connected to | [
"returns",
"the",
"current",
"Wi",
"-",
"Fi",
"state",
"the",
"device",
"is",
"connected",
"to"
] | b0a5282a582e5090852dc20fea8a135ca258d0d3 | https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L460-L466 | train |
keans/lmnotify | lmnotify/lmnotify.py | LaMetricManager.set_apps_list | def set_apps_list(self):
"""
gets installed apps and puts them into the available_apps list
"""
log.debug("getting apps and setting them in the internal app list...")
cmd, url = DEVICE_URLS["get_apps_list"]
result = self._exec(cmd, url)
self.available_apps = [
AppModel(result[app])
for app in result
] | python | def set_apps_list(self):
"""
gets installed apps and puts them into the available_apps list
"""
log.debug("getting apps and setting them in the internal app list...")
cmd, url = DEVICE_URLS["get_apps_list"]
result = self._exec(cmd, url)
self.available_apps = [
AppModel(result[app])
for app in result
] | [
"def",
"set_apps_list",
"(",
"self",
")",
":",
"log",
".",
"debug",
"(",
"\"getting apps and setting them in the internal app list...\"",
")",
"cmd",
",",
"url",
"=",
"DEVICE_URLS",
"[",
"\"get_apps_list\"",
"]",
"result",
"=",
"self",
".",
"_exec",
"(",
"cmd",
... | gets installed apps and puts them into the available_apps list | [
"gets",
"installed",
"apps",
"and",
"puts",
"them",
"into",
"the",
"available_apps",
"list"
] | b0a5282a582e5090852dc20fea8a135ca258d0d3 | https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L469-L481 | train |
keans/lmnotify | lmnotify/lmnotify.py | LaMetricManager.switch_to_app | def switch_to_app(self, package):
"""
activates an app that is specified by package. Selects the first
app it finds in the app list
:param package: name of package/app
:type package: str
:return: None
:rtype: None
"""
log.debug("switching to app '{}'...".format(package))
cmd, url = DEVICE_URLS["switch_to_app"]
widget_id = self._get_widget_id(package)
url = url.format('{}', package, widget_id)
self.result = self._exec(cmd, url) | python | def switch_to_app(self, package):
"""
activates an app that is specified by package. Selects the first
app it finds in the app list
:param package: name of package/app
:type package: str
:return: None
:rtype: None
"""
log.debug("switching to app '{}'...".format(package))
cmd, url = DEVICE_URLS["switch_to_app"]
widget_id = self._get_widget_id(package)
url = url.format('{}', package, widget_id)
self.result = self._exec(cmd, url) | [
"def",
"switch_to_app",
"(",
"self",
",",
"package",
")",
":",
"log",
".",
"debug",
"(",
"\"switching to app '{}'...\"",
".",
"format",
"(",
"package",
")",
")",
"cmd",
",",
"url",
"=",
"DEVICE_URLS",
"[",
"\"switch_to_app\"",
"]",
"widget_id",
"=",
"self",
... | activates an app that is specified by package. Selects the first
app it finds in the app list
:param package: name of package/app
:type package: str
:return: None
:rtype: None | [
"activates",
"an",
"app",
"that",
"is",
"specified",
"by",
"package",
".",
"Selects",
"the",
"first",
"app",
"it",
"finds",
"in",
"the",
"app",
"list"
] | b0a5282a582e5090852dc20fea8a135ca258d0d3 | https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L489-L505 | train |
keans/lmnotify | lmnotify/lmnotify.py | LaMetricManager.switch_to_next_app | def switch_to_next_app(self):
"""
switches to the next app
"""
log.debug("switching to next app...")
cmd, url = DEVICE_URLS["switch_to_next_app"]
self.result = self._exec(cmd, url) | python | def switch_to_next_app(self):
"""
switches to the next app
"""
log.debug("switching to next app...")
cmd, url = DEVICE_URLS["switch_to_next_app"]
self.result = self._exec(cmd, url) | [
"def",
"switch_to_next_app",
"(",
"self",
")",
":",
"log",
".",
"debug",
"(",
"\"switching to next app...\"",
")",
"cmd",
",",
"url",
"=",
"DEVICE_URLS",
"[",
"\"switch_to_next_app\"",
"]",
"self",
".",
"result",
"=",
"self",
".",
"_exec",
"(",
"cmd",
",",
... | switches to the next app | [
"switches",
"to",
"the",
"next",
"app"
] | b0a5282a582e5090852dc20fea8a135ca258d0d3 | https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L507-L513 | train |
keans/lmnotify | lmnotify/lmnotify.py | LaMetricManager.activate_widget | def activate_widget(self, package):
"""
activate the widget of the given package
:param str package: name of the package
"""
cmd, url = DEVICE_URLS["activate_widget"]
# get widget id for the package
widget_id = self._get_widget_id(package)
url = url.format('{}', package, widget_id)
self.result = self._exec(cmd, url) | python | def activate_widget(self, package):
"""
activate the widget of the given package
:param str package: name of the package
"""
cmd, url = DEVICE_URLS["activate_widget"]
# get widget id for the package
widget_id = self._get_widget_id(package)
url = url.format('{}', package, widget_id)
self.result = self._exec(cmd, url) | [
"def",
"activate_widget",
"(",
"self",
",",
"package",
")",
":",
"cmd",
",",
"url",
"=",
"DEVICE_URLS",
"[",
"\"activate_widget\"",
"]",
"# get widget id for the package",
"widget_id",
"=",
"self",
".",
"_get_widget_id",
"(",
"package",
")",
"url",
"=",
"url",
... | activate the widget of the given package
:param str package: name of the package | [
"activate",
"the",
"widget",
"of",
"the",
"given",
"package"
] | b0a5282a582e5090852dc20fea8a135ca258d0d3 | https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L523-L535 | train |
keans/lmnotify | lmnotify/lmnotify.py | LaMetricManager._app_exec | def _app_exec(self, package, action, params=None):
"""
meta method for all interactions with apps
:param package: name of package/app
:type package: str
:param action: the action to be executed
:type action: str
:param params: optional parameters for this action
:type params: dict
:return: None
:rtype: None
"""
# get list of possible commands from app.actions
allowed_commands = []
for app in self.get_apps_list():
if app.package == package:
allowed_commands = list(app.actions.keys())
break
# check if action is in this list
assert(action in allowed_commands)
cmd, url = DEVICE_URLS["do_action"]
# get widget id for the package
widget_id = self._get_widget_id(package)
url = url.format('{}', package, widget_id)
json_data = {"id": action}
if params is not None:
json_data["params"] = params
self.result = self._exec(cmd, url, json_data=json_data) | python | def _app_exec(self, package, action, params=None):
"""
meta method for all interactions with apps
:param package: name of package/app
:type package: str
:param action: the action to be executed
:type action: str
:param params: optional parameters for this action
:type params: dict
:return: None
:rtype: None
"""
# get list of possible commands from app.actions
allowed_commands = []
for app in self.get_apps_list():
if app.package == package:
allowed_commands = list(app.actions.keys())
break
# check if action is in this list
assert(action in allowed_commands)
cmd, url = DEVICE_URLS["do_action"]
# get widget id for the package
widget_id = self._get_widget_id(package)
url = url.format('{}', package, widget_id)
json_data = {"id": action}
if params is not None:
json_data["params"] = params
self.result = self._exec(cmd, url, json_data=json_data) | [
"def",
"_app_exec",
"(",
"self",
",",
"package",
",",
"action",
",",
"params",
"=",
"None",
")",
":",
"# get list of possible commands from app.actions",
"allowed_commands",
"=",
"[",
"]",
"for",
"app",
"in",
"self",
".",
"get_apps_list",
"(",
")",
":",
"if",
... | meta method for all interactions with apps
:param package: name of package/app
:type package: str
:param action: the action to be executed
:type action: str
:param params: optional parameters for this action
:type params: dict
:return: None
:rtype: None | [
"meta",
"method",
"for",
"all",
"interactions",
"with",
"apps"
] | b0a5282a582e5090852dc20fea8a135ca258d0d3 | https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L537-L569 | train |
keans/lmnotify | lmnotify/lmnotify.py | LaMetricManager.alarm_set | def alarm_set(self, time, wake_with_radio=False):
"""
set the alarm clock
:param str time: time of the alarm (format: %H:%M:%S)
:param bool wake_with_radio: if True, radio will be used for the alarm
instead of beep sound
"""
# TODO: check for correct time format
log.debug("alarm => set...")
params = {
"enabled": True,
"time": time,
"wake_with_radio": wake_with_radio
}
self._app_exec("com.lametric.clock", "clock.alarm", params=params) | python | def alarm_set(self, time, wake_with_radio=False):
"""
set the alarm clock
:param str time: time of the alarm (format: %H:%M:%S)
:param bool wake_with_radio: if True, radio will be used for the alarm
instead of beep sound
"""
# TODO: check for correct time format
log.debug("alarm => set...")
params = {
"enabled": True,
"time": time,
"wake_with_radio": wake_with_radio
}
self._app_exec("com.lametric.clock", "clock.alarm", params=params) | [
"def",
"alarm_set",
"(",
"self",
",",
"time",
",",
"wake_with_radio",
"=",
"False",
")",
":",
"# TODO: check for correct time format",
"log",
".",
"debug",
"(",
"\"alarm => set...\"",
")",
"params",
"=",
"{",
"\"enabled\"",
":",
"True",
",",
"\"time\"",
":",
"... | set the alarm clock
:param str time: time of the alarm (format: %H:%M:%S)
:param bool wake_with_radio: if True, radio will be used for the alarm
instead of beep sound | [
"set",
"the",
"alarm",
"clock"
] | b0a5282a582e5090852dc20fea8a135ca258d0d3 | https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L599-L614 | train |
keans/lmnotify | lmnotify/lmnotify.py | LaMetricManager.alarm_disable | def alarm_disable(self):
"""
disable the alarm
"""
log.debug("alarm => disable...")
params = {"enabled": False}
self._app_exec("com.lametric.clock", "clock.alarm", params=params) | python | def alarm_disable(self):
"""
disable the alarm
"""
log.debug("alarm => disable...")
params = {"enabled": False}
self._app_exec("com.lametric.clock", "clock.alarm", params=params) | [
"def",
"alarm_disable",
"(",
"self",
")",
":",
"log",
".",
"debug",
"(",
"\"alarm => disable...\"",
")",
"params",
"=",
"{",
"\"enabled\"",
":",
"False",
"}",
"self",
".",
"_app_exec",
"(",
"\"com.lametric.clock\"",
",",
"\"clock.alarm\"",
",",
"params",
"=",
... | disable the alarm | [
"disable",
"the",
"alarm"
] | b0a5282a582e5090852dc20fea8a135ca258d0d3 | https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L616-L622 | train |
keans/lmnotify | lmnotify/lmnotify.py | LaMetricManager.countdown_set | def countdown_set(self, duration, start_now):
"""
set the countdown
:param str duration:
:param str start_now:
"""
log.debug("countdown => set...")
params = {'duration': duration, 'start_now': start_now}
self._app_exec(
"com.lametric.countdown", "countdown.configure", params
) | python | def countdown_set(self, duration, start_now):
"""
set the countdown
:param str duration:
:param str start_now:
"""
log.debug("countdown => set...")
params = {'duration': duration, 'start_now': start_now}
self._app_exec(
"com.lametric.countdown", "countdown.configure", params
) | [
"def",
"countdown_set",
"(",
"self",
",",
"duration",
",",
"start_now",
")",
":",
"log",
".",
"debug",
"(",
"\"countdown => set...\"",
")",
"params",
"=",
"{",
"'duration'",
":",
"duration",
",",
"'start_now'",
":",
"start_now",
"}",
"self",
".",
"_app_exec"... | set the countdown
:param str duration:
:param str start_now: | [
"set",
"the",
"countdown"
] | b0a5282a582e5090852dc20fea8a135ca258d0d3 | https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L645-L656 | train |
comtihon/catcher | catcher/steps/external.py | External.action | def action(self, includes: dict, variables: dict) -> tuple:
"""
Call external script.
:param includes: testcase's includes
:param variables: variables
:return: script's output
"""
json_args = fill_template_str(json.dumps(self.data), variables)
p = subprocess.Popen([self.module, json_args], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if p.wait() == 0:
out = p.stdout.read().decode()
debug(out)
return variables, json.loads(out)
else:
out = p.stdout.read().decode()
warning(out)
raise Exception('Execution failed.') | python | def action(self, includes: dict, variables: dict) -> tuple:
"""
Call external script.
:param includes: testcase's includes
:param variables: variables
:return: script's output
"""
json_args = fill_template_str(json.dumps(self.data), variables)
p = subprocess.Popen([self.module, json_args], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if p.wait() == 0:
out = p.stdout.read().decode()
debug(out)
return variables, json.loads(out)
else:
out = p.stdout.read().decode()
warning(out)
raise Exception('Execution failed.') | [
"def",
"action",
"(",
"self",
",",
"includes",
":",
"dict",
",",
"variables",
":",
"dict",
")",
"->",
"tuple",
":",
"json_args",
"=",
"fill_template_str",
"(",
"json",
".",
"dumps",
"(",
"self",
".",
"data",
")",
",",
"variables",
")",
"p",
"=",
"sub... | Call external script.
:param includes: testcase's includes
:param variables: variables
:return: script's output | [
"Call",
"external",
"script",
"."
] | 5124e69d11cb6987daca595a61a4062d2b5f5ecc | https://github.com/comtihon/catcher/blob/5124e69d11cb6987daca595a61a4062d2b5f5ecc/catcher/steps/external.py#L17-L34 | train |
keans/lmnotify | lmnotify/session.py | CloudSession.set_credentials | def set_credentials(self, client_id=None, client_secret=None):
"""
set given credentials and reset the session
"""
self._client_id = client_id
self._client_secret = client_secret
# make sure to reset session due to credential change
self._session = None | python | def set_credentials(self, client_id=None, client_secret=None):
"""
set given credentials and reset the session
"""
self._client_id = client_id
self._client_secret = client_secret
# make sure to reset session due to credential change
self._session = None | [
"def",
"set_credentials",
"(",
"self",
",",
"client_id",
"=",
"None",
",",
"client_secret",
"=",
"None",
")",
":",
"self",
".",
"_client_id",
"=",
"client_id",
"self",
".",
"_client_secret",
"=",
"client_secret",
"# make sure to reset session due to credential change"... | set given credentials and reset the session | [
"set",
"given",
"credentials",
"and",
"reset",
"the",
"session"
] | b0a5282a582e5090852dc20fea8a135ca258d0d3 | https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/session.py#L82-L90 | train |
keans/lmnotify | lmnotify/session.py | CloudSession.init_session | def init_session(self, get_token=True):
"""
init a new oauth2 session that is required to access the cloud
:param bool get_token: if True, a token will be obtained, after
the session has been created
"""
if (self._client_id is None) or (self._client_secret is None):
sys.exit(
"Please make sure to set the client id and client secret "
"via the constructor, the environment variables or the config "
"file; otherwise, the LaMetric cloud cannot be accessed. "
"Abort!"
)
self._session = OAuth2Session(
client=BackendApplicationClient(client_id=self._client_id)
)
if get_token is True:
# get oauth token
self.get_token() | python | def init_session(self, get_token=True):
"""
init a new oauth2 session that is required to access the cloud
:param bool get_token: if True, a token will be obtained, after
the session has been created
"""
if (self._client_id is None) or (self._client_secret is None):
sys.exit(
"Please make sure to set the client id and client secret "
"via the constructor, the environment variables or the config "
"file; otherwise, the LaMetric cloud cannot be accessed. "
"Abort!"
)
self._session = OAuth2Session(
client=BackendApplicationClient(client_id=self._client_id)
)
if get_token is True:
# get oauth token
self.get_token() | [
"def",
"init_session",
"(",
"self",
",",
"get_token",
"=",
"True",
")",
":",
"if",
"(",
"self",
".",
"_client_id",
"is",
"None",
")",
"or",
"(",
"self",
".",
"_client_secret",
"is",
"None",
")",
":",
"sys",
".",
"exit",
"(",
"\"Please make sure to set th... | init a new oauth2 session that is required to access the cloud
:param bool get_token: if True, a token will be obtained, after
the session has been created | [
"init",
"a",
"new",
"oauth2",
"session",
"that",
"is",
"required",
"to",
"access",
"the",
"cloud"
] | b0a5282a582e5090852dc20fea8a135ca258d0d3 | https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/session.py#L98-L119 | train |
keans/lmnotify | lmnotify/session.py | CloudSession.get_token | def get_token(self):
"""
get current oauth token
"""
self.token = self._session.fetch_token(
token_url=CLOUD_URLS["get_token"][1],
client_id=self._client_id,
client_secret=self._client_secret
) | python | def get_token(self):
"""
get current oauth token
"""
self.token = self._session.fetch_token(
token_url=CLOUD_URLS["get_token"][1],
client_id=self._client_id,
client_secret=self._client_secret
) | [
"def",
"get_token",
"(",
"self",
")",
":",
"self",
".",
"token",
"=",
"self",
".",
"_session",
".",
"fetch_token",
"(",
"token_url",
"=",
"CLOUD_URLS",
"[",
"\"get_token\"",
"]",
"[",
"1",
"]",
",",
"client_id",
"=",
"self",
".",
"_client_id",
",",
"cl... | get current oauth token | [
"get",
"current",
"oauth",
"token"
] | b0a5282a582e5090852dc20fea8a135ca258d0d3 | https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/session.py#L121-L129 | train |
comtihon/catcher | catcher/steps/external_step.py | ExternalStep.simple_input | def simple_input(self, variables):
"""
Use this method to get simple input as python object, with all
templates filled in
:param variables:
:return: python object
"""
json_args = fill_template_str(json.dumps(self.data), variables)
return try_get_objects(json_args) | python | def simple_input(self, variables):
"""
Use this method to get simple input as python object, with all
templates filled in
:param variables:
:return: python object
"""
json_args = fill_template_str(json.dumps(self.data), variables)
return try_get_objects(json_args) | [
"def",
"simple_input",
"(",
"self",
",",
"variables",
")",
":",
"json_args",
"=",
"fill_template_str",
"(",
"json",
".",
"dumps",
"(",
"self",
".",
"data",
")",
",",
"variables",
")",
"return",
"try_get_objects",
"(",
"json_args",
")"
] | Use this method to get simple input as python object, with all
templates filled in
:param variables:
:return: python object | [
"Use",
"this",
"method",
"to",
"get",
"simple",
"input",
"as",
"python",
"object",
"with",
"all",
"templates",
"filled",
"in"
] | 5124e69d11cb6987daca595a61a4062d2b5f5ecc | https://github.com/comtihon/catcher/blob/5124e69d11cb6987daca595a61a4062d2b5f5ecc/catcher/steps/external_step.py#L19-L29 | train |
keans/lmnotify | lmnotify/config.py | Config.create | def create(self):
"""
creates an empty configuration file
"""
if not self.exists():
# create new empyt config file based on template
self.config.add_section("lametric")
self.config.set("lametric", "client_id", "")
self.config.set("lametric", "client_secret", "")
# save new config
self.save()
# stop here, so user can set his config
sys.exit(
"An empty config file '{}' has been created. Please set "
"the corresponding LaMetric API credentials.".format(
self._filename
)
) | python | def create(self):
"""
creates an empty configuration file
"""
if not self.exists():
# create new empyt config file based on template
self.config.add_section("lametric")
self.config.set("lametric", "client_id", "")
self.config.set("lametric", "client_secret", "")
# save new config
self.save()
# stop here, so user can set his config
sys.exit(
"An empty config file '{}' has been created. Please set "
"the corresponding LaMetric API credentials.".format(
self._filename
)
) | [
"def",
"create",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"exists",
"(",
")",
":",
"# create new empyt config file based on template",
"self",
".",
"config",
".",
"add_section",
"(",
"\"lametric\"",
")",
"self",
".",
"config",
".",
"set",
"(",
"\"lam... | creates an empty configuration file | [
"creates",
"an",
"empty",
"configuration",
"file"
] | b0a5282a582e5090852dc20fea8a135ca258d0d3 | https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/config.py#L61-L80 | train |
keans/lmnotify | lmnotify/config.py | Config.save | def save(self):
"""
save current config to the file
"""
with open(self._filename, "w") as f:
self.config.write(f) | python | def save(self):
"""
save current config to the file
"""
with open(self._filename, "w") as f:
self.config.write(f) | [
"def",
"save",
"(",
"self",
")",
":",
"with",
"open",
"(",
"self",
".",
"_filename",
",",
"\"w\"",
")",
"as",
"f",
":",
"self",
".",
"config",
".",
"write",
"(",
"f",
")"
] | save current config to the file | [
"save",
"current",
"config",
"to",
"the",
"file"
] | b0a5282a582e5090852dc20fea8a135ca258d0d3 | https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/config.py#L88-L93 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.