text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def isKeyboardShown(self):
'''
Whether the keyboard is displayed.
'''
self.__checkTransport()
dim = self.shell('dumpsys input_method')
if dim:
# FIXME: API >= 15 ?
return "mInputShown=true" in dim
return False | [
"def",
"isKeyboardShown",
"(",
"self",
")",
":",
"self",
".",
"__checkTransport",
"(",
")",
"dim",
"=",
"self",
".",
"shell",
"(",
"'dumpsys input_method'",
")",
"if",
"dim",
":",
"# FIXME: API >= 15 ?",
"return",
"\"mInputShown=true\"",
"in",
"dim",
"return",
"False"
] | 25.454545 | 17.090909 |
def scene_name(sequence_number, scene_id, name):
"""Create a scene.name message"""
return MessageWriter().string("scene.name").uint64(sequence_number).uint32(scene_id).string(name).get() | [
"def",
"scene_name",
"(",
"sequence_number",
",",
"scene_id",
",",
"name",
")",
":",
"return",
"MessageWriter",
"(",
")",
".",
"string",
"(",
"\"scene.name\"",
")",
".",
"uint64",
"(",
"sequence_number",
")",
".",
"uint32",
"(",
"scene_id",
")",
".",
"string",
"(",
"name",
")",
".",
"get",
"(",
")"
] | 66.666667 | 26.333333 |
def show_url(context, **kwargs):
"""Return the show feed URL with different protocol."""
if len(kwargs) != 2:
raise TemplateSyntaxError(_('"show_url" tag takes exactly two keyword arguments.'))
request = context['request']
current_site = get_current_site(request)
url = add_domain(current_site.domain, kwargs['url'])
return re.sub(r'https?:\/\/', '%s://' % kwargs['protocol'], url) | [
"def",
"show_url",
"(",
"context",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"len",
"(",
"kwargs",
")",
"!=",
"2",
":",
"raise",
"TemplateSyntaxError",
"(",
"_",
"(",
"'\"show_url\" tag takes exactly two keyword arguments.'",
")",
")",
"request",
"=",
"context",
"[",
"'request'",
"]",
"current_site",
"=",
"get_current_site",
"(",
"request",
")",
"url",
"=",
"add_domain",
"(",
"current_site",
".",
"domain",
",",
"kwargs",
"[",
"'url'",
"]",
")",
"return",
"re",
".",
"sub",
"(",
"r'https?:\\/\\/'",
",",
"'%s://'",
"%",
"kwargs",
"[",
"'protocol'",
"]",
",",
"url",
")"
] | 50.75 | 16.375 |
def sync(self):
"""
Syncs the information for this settings out to the file system.
"""
if self._customFormat:
self._customFormat.save(self.fileName())
else:
super(XSettings, self).sync() | [
"def",
"sync",
"(",
"self",
")",
":",
"if",
"self",
".",
"_customFormat",
":",
"self",
".",
"_customFormat",
".",
"save",
"(",
"self",
".",
"fileName",
"(",
")",
")",
"else",
":",
"super",
"(",
"XSettings",
",",
"self",
")",
".",
"sync",
"(",
")"
] | 31.375 | 13.125 |
def tshift(self, periods=1, freq=None, axis=0):
"""
Shift the time index, using the index's frequency if available.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
freq : DateOffset, timedelta, or time rule string, default None
Increment to use from the tseries module or time rule (e.g. 'EOM')
axis : int or basestring
Corresponds to the axis that contains the Index
Returns
-------
shifted : NDFrame
Notes
-----
If freq is not specified then tries to use the freq or inferred_freq
attributes of the index. If neither of those attributes exist, a
ValueError is thrown
"""
index = self._get_axis(axis)
if freq is None:
freq = getattr(index, 'freq', None)
if freq is None:
freq = getattr(index, 'inferred_freq', None)
if freq is None:
msg = 'Freq was not given and was not set in the index'
raise ValueError(msg)
if periods == 0:
return self
if isinstance(freq, str):
freq = to_offset(freq)
block_axis = self._get_block_manager_axis(axis)
if isinstance(index, PeriodIndex):
orig_freq = to_offset(index.freq)
if freq == orig_freq:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods)
else:
msg = ('Given freq %s does not match PeriodIndex freq %s' %
(freq.rule_code, orig_freq.rule_code))
raise ValueError(msg)
else:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods, freq)
return self._constructor(new_data).__finalize__(self) | [
"def",
"tshift",
"(",
"self",
",",
"periods",
"=",
"1",
",",
"freq",
"=",
"None",
",",
"axis",
"=",
"0",
")",
":",
"index",
"=",
"self",
".",
"_get_axis",
"(",
"axis",
")",
"if",
"freq",
"is",
"None",
":",
"freq",
"=",
"getattr",
"(",
"index",
",",
"'freq'",
",",
"None",
")",
"if",
"freq",
"is",
"None",
":",
"freq",
"=",
"getattr",
"(",
"index",
",",
"'inferred_freq'",
",",
"None",
")",
"if",
"freq",
"is",
"None",
":",
"msg",
"=",
"'Freq was not given and was not set in the index'",
"raise",
"ValueError",
"(",
"msg",
")",
"if",
"periods",
"==",
"0",
":",
"return",
"self",
"if",
"isinstance",
"(",
"freq",
",",
"str",
")",
":",
"freq",
"=",
"to_offset",
"(",
"freq",
")",
"block_axis",
"=",
"self",
".",
"_get_block_manager_axis",
"(",
"axis",
")",
"if",
"isinstance",
"(",
"index",
",",
"PeriodIndex",
")",
":",
"orig_freq",
"=",
"to_offset",
"(",
"index",
".",
"freq",
")",
"if",
"freq",
"==",
"orig_freq",
":",
"new_data",
"=",
"self",
".",
"_data",
".",
"copy",
"(",
")",
"new_data",
".",
"axes",
"[",
"block_axis",
"]",
"=",
"index",
".",
"shift",
"(",
"periods",
")",
"else",
":",
"msg",
"=",
"(",
"'Given freq %s does not match PeriodIndex freq %s'",
"%",
"(",
"freq",
".",
"rule_code",
",",
"orig_freq",
".",
"rule_code",
")",
")",
"raise",
"ValueError",
"(",
"msg",
")",
"else",
":",
"new_data",
"=",
"self",
".",
"_data",
".",
"copy",
"(",
")",
"new_data",
".",
"axes",
"[",
"block_axis",
"]",
"=",
"index",
".",
"shift",
"(",
"periods",
",",
"freq",
")",
"return",
"self",
".",
"_constructor",
"(",
"new_data",
")",
".",
"__finalize__",
"(",
"self",
")"
] | 32.821429 | 21.25 |
def get_default_plugin(cls):
'''
Return a default plugin.
'''
from importlib import import_module
from django.conf import settings
default_plugin = getattr(settings, 'ACCESS_DEFAULT_PLUGIN', "access.plugins.DjangoAccessPlugin")
if default_plugin not in cls.default_plugins:
logger.info("Creating a default plugin: %s", default_plugin)
path = default_plugin.split('.')
plugin_path = '.'.join(path[:-1])
plugin_name = path[-1]
DefaultPlugin = getattr(import_module(plugin_path), plugin_name)
cls.default_plugins[default_plugin] = DefaultPlugin()
return cls.default_plugins[default_plugin] | [
"def",
"get_default_plugin",
"(",
"cls",
")",
":",
"from",
"importlib",
"import",
"import_module",
"from",
"django",
".",
"conf",
"import",
"settings",
"default_plugin",
"=",
"getattr",
"(",
"settings",
",",
"'ACCESS_DEFAULT_PLUGIN'",
",",
"\"access.plugins.DjangoAccessPlugin\"",
")",
"if",
"default_plugin",
"not",
"in",
"cls",
".",
"default_plugins",
":",
"logger",
".",
"info",
"(",
"\"Creating a default plugin: %s\"",
",",
"default_plugin",
")",
"path",
"=",
"default_plugin",
".",
"split",
"(",
"'.'",
")",
"plugin_path",
"=",
"'.'",
".",
"join",
"(",
"path",
"[",
":",
"-",
"1",
"]",
")",
"plugin_name",
"=",
"path",
"[",
"-",
"1",
"]",
"DefaultPlugin",
"=",
"getattr",
"(",
"import_module",
"(",
"plugin_path",
")",
",",
"plugin_name",
")",
"cls",
".",
"default_plugins",
"[",
"default_plugin",
"]",
"=",
"DefaultPlugin",
"(",
")",
"return",
"cls",
".",
"default_plugins",
"[",
"default_plugin",
"]"
] | 47.2 | 18.4 |
def domain_capabilities(emulator=None, arch=None, machine=None, domain=None, **kwargs):
'''
Return the domain capabilities given an emulator, architecture, machine or virtualization type.
.. versionadded:: 2019.2.0
:param emulator: return the capabilities for the given emulator binary
:param arch: return the capabilities for the given CPU architecture
:param machine: return the capabilities for the given emulated machine type
:param domain: return the capabilities for the given virtualization type.
:param connection: libvirt connection URI, overriding defaults
:param username: username to connect with, overriding defaults
:param password: password to connect with, overriding defaults
The list of the possible emulator, arch, machine and domain can be found in
the host capabilities output.
If none of the parameters is provided, the libvirt default one is returned.
CLI Example:
.. code-block:: bash
salt '*' virt.domain_capabilities arch='x86_64' domain='kvm'
'''
conn = __get_conn(**kwargs)
result = []
try:
caps = ElementTree.fromstring(conn.getDomainCapabilities(emulator, arch, machine, domain, 0))
result = _parse_domain_caps(caps)
finally:
conn.close()
return result | [
"def",
"domain_capabilities",
"(",
"emulator",
"=",
"None",
",",
"arch",
"=",
"None",
",",
"machine",
"=",
"None",
",",
"domain",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"conn",
"=",
"__get_conn",
"(",
"*",
"*",
"kwargs",
")",
"result",
"=",
"[",
"]",
"try",
":",
"caps",
"=",
"ElementTree",
".",
"fromstring",
"(",
"conn",
".",
"getDomainCapabilities",
"(",
"emulator",
",",
"arch",
",",
"machine",
",",
"domain",
",",
"0",
")",
")",
"result",
"=",
"_parse_domain_caps",
"(",
"caps",
")",
"finally",
":",
"conn",
".",
"close",
"(",
")",
"return",
"result"
] | 36.371429 | 31.8 |
def node_dependencies(context: Context):
"""
Updates node.js dependencies
"""
args = ['--loglevel', {0: 'silent', 1: 'warn', 2: 'info'}[context.verbosity]]
if not context.use_colour:
args.append('--color false')
args.append('install')
return context.shell('npm', *args) | [
"def",
"node_dependencies",
"(",
"context",
":",
"Context",
")",
":",
"args",
"=",
"[",
"'--loglevel'",
",",
"{",
"0",
":",
"'silent'",
",",
"1",
":",
"'warn'",
",",
"2",
":",
"'info'",
"}",
"[",
"context",
".",
"verbosity",
"]",
"]",
"if",
"not",
"context",
".",
"use_colour",
":",
"args",
".",
"append",
"(",
"'--color false'",
")",
"args",
".",
"append",
"(",
"'install'",
")",
"return",
"context",
".",
"shell",
"(",
"'npm'",
",",
"*",
"args",
")"
] | 33 | 8.777778 |
def remove_lib(lib_name):
"""remove library.
:param lib_name: library name (e.g. 'PS2Keyboard')
:rtype: None
"""
targ_dlib = libraries_dir() / lib_name
log.debug('remove %s', targ_dlib)
targ_dlib.rmtree() | [
"def",
"remove_lib",
"(",
"lib_name",
")",
":",
"targ_dlib",
"=",
"libraries_dir",
"(",
")",
"/",
"lib_name",
"log",
".",
"debug",
"(",
"'remove %s'",
",",
"targ_dlib",
")",
"targ_dlib",
".",
"rmtree",
"(",
")"
] | 22.5 | 15.6 |
def getPk(self):
'''
getPk - @see ForeignLinkData.getPk
'''
if not self.pk or None in self.pk:
for i in range( len(self.pk) ):
if self.pk[i]:
continue
if self.obj[i] and self.obj[i]._id:
self.pk[i] = self.obj[i]._id
return self.pk | [
"def",
"getPk",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"pk",
"or",
"None",
"in",
"self",
".",
"pk",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"pk",
")",
")",
":",
"if",
"self",
".",
"pk",
"[",
"i",
"]",
":",
"continue",
"if",
"self",
".",
"obj",
"[",
"i",
"]",
"and",
"self",
".",
"obj",
"[",
"i",
"]",
".",
"_id",
":",
"self",
".",
"pk",
"[",
"i",
"]",
"=",
"self",
".",
"obj",
"[",
"i",
"]",
".",
"_id",
"return",
"self",
".",
"pk"
] | 19.384615 | 20.615385 |
def p_expr_assign(p):
'''expr : variable EQUALS expr
| variable EQUALS AND expr'''
if len(p) == 5:
p[0] = ast.Assignment(p[1], p[4], True, lineno=p.lineno(2))
else:
p[0] = ast.Assignment(p[1], p[3], False, lineno=p.lineno(2)) | [
"def",
"p_expr_assign",
"(",
"p",
")",
":",
"if",
"len",
"(",
"p",
")",
"==",
"5",
":",
"p",
"[",
"0",
"]",
"=",
"ast",
".",
"Assignment",
"(",
"p",
"[",
"1",
"]",
",",
"p",
"[",
"4",
"]",
",",
"True",
",",
"lineno",
"=",
"p",
".",
"lineno",
"(",
"2",
")",
")",
"else",
":",
"p",
"[",
"0",
"]",
"=",
"ast",
".",
"Assignment",
"(",
"p",
"[",
"1",
"]",
",",
"p",
"[",
"3",
"]",
",",
"False",
",",
"lineno",
"=",
"p",
".",
"lineno",
"(",
"2",
")",
")"
] | 37 | 19 |
def get_breadcrumbs(url, request=None):
"""
Given a url returns a list of breadcrumbs, which are each a
tuple of (name, url).
"""
from wave.reverse import preserve_builtin_query_params
from wave.settings import api_settings
from wave.views import APIView
view_name_func = api_settings.VIEW_NAME_FUNCTION
def breadcrumbs_recursive(url, breadcrumbs_list, prefix, seen):
"""
Add tuples of (name, url) to the breadcrumbs list,
progressively chomping off parts of the url.
"""
try:
(view, unused_args, unused_kwargs) = resolve(url)
except Exception:
pass
else:
# Check if this is a REST framework view,
# and if so add it to the breadcrumbs
cls = getattr(view, 'cls', None)
if cls is not None and issubclass(cls, APIView):
# Don't list the same view twice in a row.
# Probably an optional trailing slash.
if not seen or seen[-1] != view:
suffix = getattr(view, 'suffix', None)
name = view_name_func(cls, suffix)
insert_url = preserve_builtin_query_params(prefix + url, request)
breadcrumbs_list.insert(0, (name, insert_url))
seen.append(view)
if url == '':
# All done
return breadcrumbs_list
elif url.endswith('/'):
# Drop trailing slash off the end and continue to try to
# resolve more breadcrumbs
url = url.rstrip('/')
return breadcrumbs_recursive(url, breadcrumbs_list, prefix, seen)
# Drop trailing non-slash off the end and continue to try to
# resolve more breadcrumbs
url = url[:url.rfind('/') + 1]
return breadcrumbs_recursive(url, breadcrumbs_list, prefix, seen)
prefix = get_script_prefix().rstrip('/')
url = url[len(prefix):]
return breadcrumbs_recursive(url, [], prefix, []) | [
"def",
"get_breadcrumbs",
"(",
"url",
",",
"request",
"=",
"None",
")",
":",
"from",
"wave",
".",
"reverse",
"import",
"preserve_builtin_query_params",
"from",
"wave",
".",
"settings",
"import",
"api_settings",
"from",
"wave",
".",
"views",
"import",
"APIView",
"view_name_func",
"=",
"api_settings",
".",
"VIEW_NAME_FUNCTION",
"def",
"breadcrumbs_recursive",
"(",
"url",
",",
"breadcrumbs_list",
",",
"prefix",
",",
"seen",
")",
":",
"\"\"\"\n Add tuples of (name, url) to the breadcrumbs list,\n progressively chomping off parts of the url.\n \"\"\"",
"try",
":",
"(",
"view",
",",
"unused_args",
",",
"unused_kwargs",
")",
"=",
"resolve",
"(",
"url",
")",
"except",
"Exception",
":",
"pass",
"else",
":",
"# Check if this is a REST framework view,",
"# and if so add it to the breadcrumbs",
"cls",
"=",
"getattr",
"(",
"view",
",",
"'cls'",
",",
"None",
")",
"if",
"cls",
"is",
"not",
"None",
"and",
"issubclass",
"(",
"cls",
",",
"APIView",
")",
":",
"# Don't list the same view twice in a row.",
"# Probably an optional trailing slash.",
"if",
"not",
"seen",
"or",
"seen",
"[",
"-",
"1",
"]",
"!=",
"view",
":",
"suffix",
"=",
"getattr",
"(",
"view",
",",
"'suffix'",
",",
"None",
")",
"name",
"=",
"view_name_func",
"(",
"cls",
",",
"suffix",
")",
"insert_url",
"=",
"preserve_builtin_query_params",
"(",
"prefix",
"+",
"url",
",",
"request",
")",
"breadcrumbs_list",
".",
"insert",
"(",
"0",
",",
"(",
"name",
",",
"insert_url",
")",
")",
"seen",
".",
"append",
"(",
"view",
")",
"if",
"url",
"==",
"''",
":",
"# All done",
"return",
"breadcrumbs_list",
"elif",
"url",
".",
"endswith",
"(",
"'/'",
")",
":",
"# Drop trailing slash off the end and continue to try to",
"# resolve more breadcrumbs",
"url",
"=",
"url",
".",
"rstrip",
"(",
"'/'",
")",
"return",
"breadcrumbs_recursive",
"(",
"url",
",",
"breadcrumbs_list",
",",
"prefix",
",",
"seen",
")",
"# Drop trailing non-slash off the end and continue to try to",
"# resolve more breadcrumbs",
"url",
"=",
"url",
"[",
":",
"url",
".",
"rfind",
"(",
"'/'",
")",
"+",
"1",
"]",
"return",
"breadcrumbs_recursive",
"(",
"url",
",",
"breadcrumbs_list",
",",
"prefix",
",",
"seen",
")",
"prefix",
"=",
"get_script_prefix",
"(",
")",
".",
"rstrip",
"(",
"'/'",
")",
"url",
"=",
"url",
"[",
"len",
"(",
"prefix",
")",
":",
"]",
"return",
"breadcrumbs_recursive",
"(",
"url",
",",
"[",
"]",
",",
"prefix",
",",
"[",
"]",
")"
] | 37.377358 | 17.830189 |
def rise(self, crd, ev='5deg'):
"""This method will give the rise/set hour-angles of a source. It
needs the position in the frame, and a time. If the latter is not
set, the current time will be used.
:param crd: a direction measure
:param ev: the elevation limit as a quantity or string
:returns: `dict` with rise and set sidereal time quantities or a 2
strings "below" or "above"
"""
if not is_measure(crd):
raise TypeError('No rise/set coordinates specified')
ps = self._getwhere()
self._fillnow()
hd = self.measure(crd, "hadec")
c = self.measure(crd, "app")
evq = dq.quantity(ev)
hdm1 = dq.quantity(hd["m1"])
psm1 = dq.quantity(ps["m1"])
ct = (dq.sin(dq.quantity(ev)) - (dq.sin(hdm1) * dq.sin(psm1))) \
/ (dq.cos(hdm1) * dq.cos(psm1))
if ct.get_value() >= 1:
return {'rise': 'below', 'set': 'below'}
if ct.get_value() <= -1:
return {'rise': 'above', 'set': 'above'}
a = dq.acos(ct)
return dict(rise=dq.quantity(c["m0"]).norm(0) - a,
set=dq.quantity(c["m0"]).norm(0) + a) | [
"def",
"rise",
"(",
"self",
",",
"crd",
",",
"ev",
"=",
"'5deg'",
")",
":",
"if",
"not",
"is_measure",
"(",
"crd",
")",
":",
"raise",
"TypeError",
"(",
"'No rise/set coordinates specified'",
")",
"ps",
"=",
"self",
".",
"_getwhere",
"(",
")",
"self",
".",
"_fillnow",
"(",
")",
"hd",
"=",
"self",
".",
"measure",
"(",
"crd",
",",
"\"hadec\"",
")",
"c",
"=",
"self",
".",
"measure",
"(",
"crd",
",",
"\"app\"",
")",
"evq",
"=",
"dq",
".",
"quantity",
"(",
"ev",
")",
"hdm1",
"=",
"dq",
".",
"quantity",
"(",
"hd",
"[",
"\"m1\"",
"]",
")",
"psm1",
"=",
"dq",
".",
"quantity",
"(",
"ps",
"[",
"\"m1\"",
"]",
")",
"ct",
"=",
"(",
"dq",
".",
"sin",
"(",
"dq",
".",
"quantity",
"(",
"ev",
")",
")",
"-",
"(",
"dq",
".",
"sin",
"(",
"hdm1",
")",
"*",
"dq",
".",
"sin",
"(",
"psm1",
")",
")",
")",
"/",
"(",
"dq",
".",
"cos",
"(",
"hdm1",
")",
"*",
"dq",
".",
"cos",
"(",
"psm1",
")",
")",
"if",
"ct",
".",
"get_value",
"(",
")",
">=",
"1",
":",
"return",
"{",
"'rise'",
":",
"'below'",
",",
"'set'",
":",
"'below'",
"}",
"if",
"ct",
".",
"get_value",
"(",
")",
"<=",
"-",
"1",
":",
"return",
"{",
"'rise'",
":",
"'above'",
",",
"'set'",
":",
"'above'",
"}",
"a",
"=",
"dq",
".",
"acos",
"(",
"ct",
")",
"return",
"dict",
"(",
"rise",
"=",
"dq",
".",
"quantity",
"(",
"c",
"[",
"\"m0\"",
"]",
")",
".",
"norm",
"(",
"0",
")",
"-",
"a",
",",
"set",
"=",
"dq",
".",
"quantity",
"(",
"c",
"[",
"\"m0\"",
"]",
")",
".",
"norm",
"(",
"0",
")",
"+",
"a",
")"
] | 41.206897 | 13.827586 |
def run(self, graph, universe=None):
"""Run the contained protocol on a seed graph.
:param pybel.BELGraph graph: The seed BEL graph
:param pybel.BELGraph universe: Allows just-in-time setting of the universe in case it wasn't set before.
Defaults to the given network.
:return: The new graph is returned if not applied in-place
:rtype: pybel.BELGraph
"""
self.universe = universe or graph.copy()
return self._run_helper(graph.copy(), self.protocol) | [
"def",
"run",
"(",
"self",
",",
"graph",
",",
"universe",
"=",
"None",
")",
":",
"self",
".",
"universe",
"=",
"universe",
"or",
"graph",
".",
"copy",
"(",
")",
"return",
"self",
".",
"_run_helper",
"(",
"graph",
".",
"copy",
"(",
")",
",",
"self",
".",
"protocol",
")"
] | 49.363636 | 20.545455 |
def get_ref_chain(self, ref_limit=100):
"""Get a chain of Dataset objects.
Starts with r, then goes to r.reference (if exists),
then to r.reference.reference, etc.
until we hit ``ref_limit`` or a reference loop.
Parameters
----------
ref_limit : int, optional (default=100)
The limit number of references.
Returns
-------
ref_chain : set of Dataset
Chain of references of the Datasets.
"""
head = self
ref_chain = set()
while len(ref_chain) < ref_limit:
if isinstance(head, Dataset):
ref_chain.add(head)
if (head.reference is not None) and (head.reference not in ref_chain):
head = head.reference
else:
break
else:
break
return ref_chain | [
"def",
"get_ref_chain",
"(",
"self",
",",
"ref_limit",
"=",
"100",
")",
":",
"head",
"=",
"self",
"ref_chain",
"=",
"set",
"(",
")",
"while",
"len",
"(",
"ref_chain",
")",
"<",
"ref_limit",
":",
"if",
"isinstance",
"(",
"head",
",",
"Dataset",
")",
":",
"ref_chain",
".",
"add",
"(",
"head",
")",
"if",
"(",
"head",
".",
"reference",
"is",
"not",
"None",
")",
"and",
"(",
"head",
".",
"reference",
"not",
"in",
"ref_chain",
")",
":",
"head",
"=",
"head",
".",
"reference",
"else",
":",
"break",
"else",
":",
"break",
"return",
"ref_chain"
] | 30.482759 | 15.827586 |
def get_raw_query(self):
"""Returns the raw query to use for current search, based on the
base query + update query
"""
query = self.base_query.copy()
search_query = self.search_query.copy()
query.update(search_query)
# Add sorting criteria
sorting = self.resolve_sorting(query)
query.update(sorting)
# Check if sort_on is an index and if is sortable. Otherwise, assume
# the sorting must be done manually
catalog = api.get_tool(self.catalog_name)
sort_on = query.get("sort_on", None)
if sort_on and not self.is_sortable_index(sort_on, catalog):
del(query["sort_on"])
return query | [
"def",
"get_raw_query",
"(",
"self",
")",
":",
"query",
"=",
"self",
".",
"base_query",
".",
"copy",
"(",
")",
"search_query",
"=",
"self",
".",
"search_query",
".",
"copy",
"(",
")",
"query",
".",
"update",
"(",
"search_query",
")",
"# Add sorting criteria",
"sorting",
"=",
"self",
".",
"resolve_sorting",
"(",
"query",
")",
"query",
".",
"update",
"(",
"sorting",
")",
"# Check if sort_on is an index and if is sortable. Otherwise, assume",
"# the sorting must be done manually",
"catalog",
"=",
"api",
".",
"get_tool",
"(",
"self",
".",
"catalog_name",
")",
"sort_on",
"=",
"query",
".",
"get",
"(",
"\"sort_on\"",
",",
"None",
")",
"if",
"sort_on",
"and",
"not",
"self",
".",
"is_sortable_index",
"(",
"sort_on",
",",
"catalog",
")",
":",
"del",
"(",
"query",
"[",
"\"sort_on\"",
"]",
")",
"return",
"query"
] | 36.631579 | 13.210526 |
def getSiblings(self, textId: str, subreference: Union[str, BaseReference]) -> Tuple[BaseReference, BaseReference]:
""" Retrieve the siblings of a textual node
:param textId: CtsTextMetadata Identifier
:type textId: str
:param subreference: CapitainsCtsPassage Reference
:type subreference: str
:return: Tuple of references
:rtype: (str, str)
"""
raise NotImplementedError() | [
"def",
"getSiblings",
"(",
"self",
",",
"textId",
":",
"str",
",",
"subreference",
":",
"Union",
"[",
"str",
",",
"BaseReference",
"]",
")",
"->",
"Tuple",
"[",
"BaseReference",
",",
"BaseReference",
"]",
":",
"raise",
"NotImplementedError",
"(",
")"
] | 39.727273 | 17.181818 |
def get_filter(self, name=constants.STRING_FILTER):
"""Get the filter on the source.
:param name: The name of the filter. This will be encoded as
an AMQP Symbol. By default this is set to b'apache.org:selector-filter:string'.
:type name: bytes
"""
try:
filter_key = c_uamqp.symbol_value(name)
return self._address.filter_set[filter_key].value
except (TypeError, KeyError):
return None | [
"def",
"get_filter",
"(",
"self",
",",
"name",
"=",
"constants",
".",
"STRING_FILTER",
")",
":",
"try",
":",
"filter_key",
"=",
"c_uamqp",
".",
"symbol_value",
"(",
"name",
")",
"return",
"self",
".",
"_address",
".",
"filter_set",
"[",
"filter_key",
"]",
".",
"value",
"except",
"(",
"TypeError",
",",
"KeyError",
")",
":",
"return",
"None"
] | 38.916667 | 18.5 |
def clear_assessment(self):
"""Clears the assessment.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.clear_avatar_template
if (self.get_assessment_metadata().is_read_only() or
self.get_assessment_metadata().is_required()):
raise errors.NoAccess()
self._my_map['assessmentId'] = self._assessment_default | [
"def",
"clear_assessment",
"(",
"self",
")",
":",
"# Implemented from template for osid.resource.ResourceForm.clear_avatar_template",
"if",
"(",
"self",
".",
"get_assessment_metadata",
"(",
")",
".",
"is_read_only",
"(",
")",
"or",
"self",
".",
"get_assessment_metadata",
"(",
")",
".",
"is_required",
"(",
")",
")",
":",
"raise",
"errors",
".",
"NoAccess",
"(",
")",
"self",
".",
"_my_map",
"[",
"'assessmentId'",
"]",
"=",
"self",
".",
"_assessment_default"
] | 42.615385 | 20.461538 |
def bisect_right(a, x, lo=0, hi=None):
"""Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e <= x, and all e in
a[i:] have e > x. So if x already appears in the list, a.insert(x) will
insert just after the rightmost x already there.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if x < a[mid]: hi = mid
else: lo = mid+1
return lo | [
"def",
"bisect_right",
"(",
"a",
",",
"x",
",",
"lo",
"=",
"0",
",",
"hi",
"=",
"None",
")",
":",
"if",
"lo",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"'lo must be non-negative'",
")",
"if",
"hi",
"is",
"None",
":",
"hi",
"=",
"len",
"(",
"a",
")",
"while",
"lo",
"<",
"hi",
":",
"mid",
"=",
"(",
"lo",
"+",
"hi",
")",
"//",
"2",
"if",
"x",
"<",
"a",
"[",
"mid",
"]",
":",
"hi",
"=",
"mid",
"else",
":",
"lo",
"=",
"mid",
"+",
"1",
"return",
"lo"
] | 31.8 | 20.6 |
def awaitAnyTermination(self, timeout=None):
"""Wait until any of the queries on the associated SQLContext has terminated since the
creation of the context, or since :func:`resetTerminated()` was called. If any query was
terminated with an exception, then the exception will be thrown.
If `timeout` is set, it returns whether the query has terminated or not within the
`timeout` seconds.
If a query has terminated, then subsequent calls to :func:`awaitAnyTermination()` will
either return immediately (if the query was terminated by :func:`query.stop()`),
or throw the exception immediately (if the query was terminated with exception). Use
:func:`resetTerminated()` to clear past terminations and wait for new terminations.
In the case where multiple queries have terminated since :func:`resetTermination()`
was called, if any query has terminated with exception, then :func:`awaitAnyTermination()`
will throw any of the exception. For correctly documenting exceptions across multiple
queries, users need to stop all of them after any of them terminates with exception, and
then check the `query.exception()` for each query.
throws :class:`StreamingQueryException`, if `this` query has terminated with an exception
"""
if timeout is not None:
if not isinstance(timeout, (int, float)) or timeout < 0:
raise ValueError("timeout must be a positive integer or float. Got %s" % timeout)
return self._jsqm.awaitAnyTermination(int(timeout * 1000))
else:
return self._jsqm.awaitAnyTermination() | [
"def",
"awaitAnyTermination",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"if",
"timeout",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"timeout",
",",
"(",
"int",
",",
"float",
")",
")",
"or",
"timeout",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"timeout must be a positive integer or float. Got %s\"",
"%",
"timeout",
")",
"return",
"self",
".",
"_jsqm",
".",
"awaitAnyTermination",
"(",
"int",
"(",
"timeout",
"*",
"1000",
")",
")",
"else",
":",
"return",
"self",
".",
"_jsqm",
".",
"awaitAnyTermination",
"(",
")"
] | 63.884615 | 36 |
def get(self, key, failobj=None, exact=0):
"""Returns failobj if key is not found or is ambiguous"""
if not exact:
try:
key = self.getfullkey(key)
except KeyError:
return failobj
return self.data.get(key,failobj) | [
"def",
"get",
"(",
"self",
",",
"key",
",",
"failobj",
"=",
"None",
",",
"exact",
"=",
"0",
")",
":",
"if",
"not",
"exact",
":",
"try",
":",
"key",
"=",
"self",
".",
"getfullkey",
"(",
"key",
")",
"except",
"KeyError",
":",
"return",
"failobj",
"return",
"self",
".",
"data",
".",
"get",
"(",
"key",
",",
"failobj",
")"
] | 28.5 | 15 |
def cleanup(self):
"""
Remove those pending exits if:
a) they are the return exits of non-returning SimProcedures
b) they are the return exits of non-returning syscalls
b) they are the return exits of non-returning functions
:return: None
"""
pending_exits_to_remove = defaultdict(list)
for func_addr in self._updated_functions:
if func_addr not in self._jobs:
continue
jobs = self._jobs[func_addr]
for i, pe in enumerate(jobs):
if pe.returning_source is None:
# The original call failed. This pending exit must be followed.
continue
func = self._functions.function(pe.returning_source)
if func is None:
# Why does it happen?
l.warning("An expected function at %s is not found. Please report it to Fish.",
pe.returning_source if pe.returning_source is not None else 'None')
continue
if func.returning is False:
# Oops, it's not returning
# Remove this pending exit
pending_exits_to_remove[pe.returning_source].append(i)
for func_addr, indices in pending_exits_to_remove.items():
jobs = self._jobs[func_addr]
for index in reversed(indices):
job = jobs[index]
self._deregister_job_callback(job.func_addr, job)
del jobs[index]
self._job_count -= 1
if not jobs:
del self._jobs[func_addr]
self.clear_updated_functions() | [
"def",
"cleanup",
"(",
"self",
")",
":",
"pending_exits_to_remove",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"func_addr",
"in",
"self",
".",
"_updated_functions",
":",
"if",
"func_addr",
"not",
"in",
"self",
".",
"_jobs",
":",
"continue",
"jobs",
"=",
"self",
".",
"_jobs",
"[",
"func_addr",
"]",
"for",
"i",
",",
"pe",
"in",
"enumerate",
"(",
"jobs",
")",
":",
"if",
"pe",
".",
"returning_source",
"is",
"None",
":",
"# The original call failed. This pending exit must be followed.",
"continue",
"func",
"=",
"self",
".",
"_functions",
".",
"function",
"(",
"pe",
".",
"returning_source",
")",
"if",
"func",
"is",
"None",
":",
"# Why does it happen?",
"l",
".",
"warning",
"(",
"\"An expected function at %s is not found. Please report it to Fish.\"",
",",
"pe",
".",
"returning_source",
"if",
"pe",
".",
"returning_source",
"is",
"not",
"None",
"else",
"'None'",
")",
"continue",
"if",
"func",
".",
"returning",
"is",
"False",
":",
"# Oops, it's not returning",
"# Remove this pending exit",
"pending_exits_to_remove",
"[",
"pe",
".",
"returning_source",
"]",
".",
"append",
"(",
"i",
")",
"for",
"func_addr",
",",
"indices",
"in",
"pending_exits_to_remove",
".",
"items",
"(",
")",
":",
"jobs",
"=",
"self",
".",
"_jobs",
"[",
"func_addr",
"]",
"for",
"index",
"in",
"reversed",
"(",
"indices",
")",
":",
"job",
"=",
"jobs",
"[",
"index",
"]",
"self",
".",
"_deregister_job_callback",
"(",
"job",
".",
"func_addr",
",",
"job",
")",
"del",
"jobs",
"[",
"index",
"]",
"self",
".",
"_job_count",
"-=",
"1",
"if",
"not",
"jobs",
":",
"del",
"self",
".",
"_jobs",
"[",
"func_addr",
"]",
"self",
".",
"clear_updated_functions",
"(",
")"
] | 38.363636 | 18.272727 |
def _dictify(self, doc):
"""Transforms the replies to a regular Python dict with
strings and datetimes.
Tested with BankID version 2.5 return data.
:param doc: The response as interpreted by :py:mod:`zeep`.
:returns: The response parsed to a dict.
:rtype: dict
"""
return {
k: (self._dictify(doc[k]) if hasattr(doc[k], "_xsd_type") else doc[k])
for k in doc
} | [
"def",
"_dictify",
"(",
"self",
",",
"doc",
")",
":",
"return",
"{",
"k",
":",
"(",
"self",
".",
"_dictify",
"(",
"doc",
"[",
"k",
"]",
")",
"if",
"hasattr",
"(",
"doc",
"[",
"k",
"]",
",",
"\"_xsd_type\"",
")",
"else",
"doc",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"doc",
"}"
] | 29.6 | 21.6 |
def fake_lens_path_set(lens_path, value, obj):
"""
Simulates R.set with a lens_path since we don't have lens functions
:param lens_path: Array of string paths
:param value: The value to set at the lens path
:param obj: Object containing the given path
:return: The value at the path or None
"""
segment = head(lens_path)
obj_copy = copy.copy(obj)
def set_array_index(i, v, l):
# Fill the array with None up to the given index and set the index to v
try:
l[i] = v
except IndexError:
for _ in range(i - len(l) + 1):
l.append(None)
l[i] = v
if not (length(lens_path) - 1):
# Done
new_value = value
else:
# Find the value at the path or create a {} or [] at obj[segment]
found_or_created = item_path_or(
if_else(
lambda segment: segment.isnumeric(),
always([]),
always({})
)(head(tail(lens_path))),
segment,
obj
)
# Recurse on the rest of the path
new_value = fake_lens_path_set(tail(lens_path), value, found_or_created)
# Set or replace
if segment.isnumeric():
set_array_index(int(segment), new_value, obj_copy)
else:
obj_copy[segment] = new_value
return obj_copy | [
"def",
"fake_lens_path_set",
"(",
"lens_path",
",",
"value",
",",
"obj",
")",
":",
"segment",
"=",
"head",
"(",
"lens_path",
")",
"obj_copy",
"=",
"copy",
".",
"copy",
"(",
"obj",
")",
"def",
"set_array_index",
"(",
"i",
",",
"v",
",",
"l",
")",
":",
"# Fill the array with None up to the given index and set the index to v",
"try",
":",
"l",
"[",
"i",
"]",
"=",
"v",
"except",
"IndexError",
":",
"for",
"_",
"in",
"range",
"(",
"i",
"-",
"len",
"(",
"l",
")",
"+",
"1",
")",
":",
"l",
".",
"append",
"(",
"None",
")",
"l",
"[",
"i",
"]",
"=",
"v",
"if",
"not",
"(",
"length",
"(",
"lens_path",
")",
"-",
"1",
")",
":",
"# Done",
"new_value",
"=",
"value",
"else",
":",
"# Find the value at the path or create a {} or [] at obj[segment]",
"found_or_created",
"=",
"item_path_or",
"(",
"if_else",
"(",
"lambda",
"segment",
":",
"segment",
".",
"isnumeric",
"(",
")",
",",
"always",
"(",
"[",
"]",
")",
",",
"always",
"(",
"{",
"}",
")",
")",
"(",
"head",
"(",
"tail",
"(",
"lens_path",
")",
")",
")",
",",
"segment",
",",
"obj",
")",
"# Recurse on the rest of the path",
"new_value",
"=",
"fake_lens_path_set",
"(",
"tail",
"(",
"lens_path",
")",
",",
"value",
",",
"found_or_created",
")",
"# Set or replace",
"if",
"segment",
".",
"isnumeric",
"(",
")",
":",
"set_array_index",
"(",
"int",
"(",
"segment",
")",
",",
"new_value",
",",
"obj_copy",
")",
"else",
":",
"obj_copy",
"[",
"segment",
"]",
"=",
"new_value",
"return",
"obj_copy"
] | 30.906977 | 17.186047 |
def avail_locations(call=None):
'''
Return available Packet datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations packet-provider
salt-cloud -f avail_locations packet-provider
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
vm_ = get_configured_provider()
manager = packet.Manager(auth_token=vm_['token'])
ret = {}
for facility in manager.list_facilities():
ret[facility.name] = facility.__dict__
return ret | [
"def",
"avail_locations",
"(",
"call",
"=",
"None",
")",
":",
"if",
"call",
"==",
"'action'",
":",
"raise",
"SaltCloudException",
"(",
"'The avail_locations function must be called with -f or --function.'",
")",
"vm_",
"=",
"get_configured_provider",
"(",
")",
"manager",
"=",
"packet",
".",
"Manager",
"(",
"auth_token",
"=",
"vm_",
"[",
"'token'",
"]",
")",
"ret",
"=",
"{",
"}",
"for",
"facility",
"in",
"manager",
".",
"list_facilities",
"(",
")",
":",
"ret",
"[",
"facility",
".",
"name",
"]",
"=",
"facility",
".",
"__dict__",
"return",
"ret"
] | 23.6 | 24.24 |
def scope(self, *args, **kwargs):
# type: (*Any, **Any) -> Scope
"""Return a single scope based on the provided name.
If additional `keyword=value` arguments are provided, these are added to the request parameters. Please
refer to the documentation of the KE-chain API for additional query parameters.
:return: a single :class:`models.Scope`
:raises NotFoundError: When no `Scope` is found
:raises MultipleFoundError: When more than a single `Scope` is found
"""
_scopes = self.scopes(*args, **kwargs)
if len(_scopes) == 0:
raise NotFoundError("No scope fits criteria")
if len(_scopes) != 1:
raise MultipleFoundError("Multiple scopes fit criteria")
return _scopes[0] | [
"def",
"scope",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# type: (*Any, **Any) -> Scope",
"_scopes",
"=",
"self",
".",
"scopes",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"len",
"(",
"_scopes",
")",
"==",
"0",
":",
"raise",
"NotFoundError",
"(",
"\"No scope fits criteria\"",
")",
"if",
"len",
"(",
"_scopes",
")",
"!=",
"1",
":",
"raise",
"MultipleFoundError",
"(",
"\"Multiple scopes fit criteria\"",
")",
"return",
"_scopes",
"[",
"0",
"]"
] | 40.631579 | 22.789474 |
def libvlc_video_set_format_callbacks(mp, setup, cleanup):
'''Set decoded video chroma and dimensions. This only works in combination with
L{libvlc_video_set_callbacks}().
@param mp: the media player.
@param setup: callback to select the video format (cannot be NULL).
@param cleanup: callback to release any allocated resources (or NULL).
@version: LibVLC 2.0.0 or later.
'''
f = _Cfunctions.get('libvlc_video_set_format_callbacks', None) or \
_Cfunction('libvlc_video_set_format_callbacks', ((1,), (1,), (1,),), None,
None, MediaPlayer, VideoFormatCb, VideoCleanupCb)
return f(mp, setup, cleanup) | [
"def",
"libvlc_video_set_format_callbacks",
"(",
"mp",
",",
"setup",
",",
"cleanup",
")",
":",
"f",
"=",
"_Cfunctions",
".",
"get",
"(",
"'libvlc_video_set_format_callbacks'",
",",
"None",
")",
"or",
"_Cfunction",
"(",
"'libvlc_video_set_format_callbacks'",
",",
"(",
"(",
"1",
",",
")",
",",
"(",
"1",
",",
")",
",",
"(",
"1",
",",
")",
",",
")",
",",
"None",
",",
"None",
",",
"MediaPlayer",
",",
"VideoFormatCb",
",",
"VideoCleanupCb",
")",
"return",
"f",
"(",
"mp",
",",
"setup",
",",
"cleanup",
")"
] | 54.25 | 23.75 |
def process(self):
"""
Call the function with the args/kwds and tell the ApplyResult
that its result is ready. Correctly handles the exceptions
happening during the execution of the function
"""
try:
result = self._func(*self._args, **self._kwds)
except:
self._result._set_exception()
else:
self._result._set_value(result) | [
"def",
"process",
"(",
"self",
")",
":",
"try",
":",
"result",
"=",
"self",
".",
"_func",
"(",
"*",
"self",
".",
"_args",
",",
"*",
"*",
"self",
".",
"_kwds",
")",
"except",
":",
"self",
".",
"_result",
".",
"_set_exception",
"(",
")",
"else",
":",
"self",
".",
"_result",
".",
"_set_value",
"(",
"result",
")"
] | 34.25 | 16.083333 |
def purge_old_logs(delete_before_days=7):
"""
Purges old logs from the database table
"""
delete_before_date = timezone.now() - timedelta(days=delete_before_days)
logs_deleted = Log.objects.filter(
created_on__lte=delete_before_date).delete()
return logs_deleted | [
"def",
"purge_old_logs",
"(",
"delete_before_days",
"=",
"7",
")",
":",
"delete_before_date",
"=",
"timezone",
".",
"now",
"(",
")",
"-",
"timedelta",
"(",
"days",
"=",
"delete_before_days",
")",
"logs_deleted",
"=",
"Log",
".",
"objects",
".",
"filter",
"(",
"created_on__lte",
"=",
"delete_before_date",
")",
".",
"delete",
"(",
")",
"return",
"logs_deleted"
] | 35.875 | 8.875 |
def delete(self, resource_id, **kwargs):
"""
Deletes a resource by ID.
"""
return self.client._delete(self._url(resource_id), **kwargs) | [
"def",
"delete",
"(",
"self",
",",
"resource_id",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"client",
".",
"_delete",
"(",
"self",
".",
"_url",
"(",
"resource_id",
")",
",",
"*",
"*",
"kwargs",
")"
] | 27.166667 | 12.5 |
def post_handler_err(self, function_arn, invocation_id, handler_err):
"""
Post the error message from executing the function handler for :code:`function_arn`
with specifid :code:`invocation_id`
:param function_arn: Arn of the Lambda function which has the handler error message.
:type function_arn: string
:param invocation_id: Invocation ID of the work that is being requested
:type invocation_id: string
:param handler_err: the error message caught from handler
:type handler_err: string
"""
url = self._get_work_url(function_arn)
runtime_logger.info('Posting handler error for invocation id [{}] to {}'.format(invocation_id, url))
payload = json.dumps({
"errorMessage": handler_err,
}).encode('utf-8')
request = Request(url, payload)
request.add_header(HEADER_INVOCATION_ID, invocation_id)
request.add_header(HEADER_FUNCTION_ERR_TYPE, "Handled")
request.add_header(HEADER_AUTH_TOKEN, self.auth_token)
urlopen(request)
runtime_logger.info('Posted handler error for invocation id [{}]'.format(invocation_id)) | [
"def",
"post_handler_err",
"(",
"self",
",",
"function_arn",
",",
"invocation_id",
",",
"handler_err",
")",
":",
"url",
"=",
"self",
".",
"_get_work_url",
"(",
"function_arn",
")",
"runtime_logger",
".",
"info",
"(",
"'Posting handler error for invocation id [{}] to {}'",
".",
"format",
"(",
"invocation_id",
",",
"url",
")",
")",
"payload",
"=",
"json",
".",
"dumps",
"(",
"{",
"\"errorMessage\"",
":",
"handler_err",
",",
"}",
")",
".",
"encode",
"(",
"'utf-8'",
")",
"request",
"=",
"Request",
"(",
"url",
",",
"payload",
")",
"request",
".",
"add_header",
"(",
"HEADER_INVOCATION_ID",
",",
"invocation_id",
")",
"request",
".",
"add_header",
"(",
"HEADER_FUNCTION_ERR_TYPE",
",",
"\"Handled\"",
")",
"request",
".",
"add_header",
"(",
"HEADER_AUTH_TOKEN",
",",
"self",
".",
"auth_token",
")",
"urlopen",
"(",
"request",
")",
"runtime_logger",
".",
"info",
"(",
"'Posted handler error for invocation id [{}]'",
".",
"format",
"(",
"invocation_id",
")",
")"
] | 37.419355 | 26.322581 |
def similarity_transformation(rot, mat):
""" R x M x R^-1 """
return np.dot(rot, np.dot(mat, np.linalg.inv(rot))) | [
"def",
"similarity_transformation",
"(",
"rot",
",",
"mat",
")",
":",
"return",
"np",
".",
"dot",
"(",
"rot",
",",
"np",
".",
"dot",
"(",
"mat",
",",
"np",
".",
"linalg",
".",
"inv",
"(",
"rot",
")",
")",
")"
] | 39.666667 | 5 |
def iter_causes(self):
"""Iterate over all causes."""
curr = self._cause
while curr is not None:
yield curr
curr = curr._cause | [
"def",
"iter_causes",
"(",
"self",
")",
":",
"curr",
"=",
"self",
".",
"_cause",
"while",
"curr",
"is",
"not",
"None",
":",
"yield",
"curr",
"curr",
"=",
"curr",
".",
"_cause"
] | 28.166667 | 11.5 |
def get_conductivity(self, output='eigs', doping_levels=True,
relaxation_time=1e-14):
"""
Gives the conductivity (1/Ohm*m) in either a full 3x3 tensor
form, as 3 eigenvalues, or as the average value
(trace/3.0) If doping_levels=True, the results are given at
different p and n doping
levels (given by self.doping), otherwise it is given as a series
of electron chemical potential values
Args:
output (string): the type of output. 'tensor' give the full
3x3 tensor, 'eigs' its 3 eigenvalues and
'average' the average of the three eigenvalues
doping_levels (boolean): True for the results to be given at
different doping levels, False for results
at different electron chemical potentials
relaxation_time (float): constant relaxation time in secs
Returns:
If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}.
The 'p' links to conductivity
at p-type doping and 'n' to the conductivity at n-type
doping. Otherwise,
returns a {temp:[]} dictionary. The result contains either
the sorted three eigenvalues of the symmetric
conductivity tensor (format='eigs') or a full tensor (3x3
array) (output='tensor') or as an average
(output='average').
The result includes a given constant relaxation time
units are 1/Ohm*m
"""
return BoltztrapAnalyzer._format_to_output(self._cond,
self._cond_doping, output,
doping_levels,
relaxation_time) | [
"def",
"get_conductivity",
"(",
"self",
",",
"output",
"=",
"'eigs'",
",",
"doping_levels",
"=",
"True",
",",
"relaxation_time",
"=",
"1e-14",
")",
":",
"return",
"BoltztrapAnalyzer",
".",
"_format_to_output",
"(",
"self",
".",
"_cond",
",",
"self",
".",
"_cond_doping",
",",
"output",
",",
"doping_levels",
",",
"relaxation_time",
")"
] | 50.918919 | 22.486486 |
def get_project(self) -> str:
"""Get the ihc project"""
xdoc = self.connection.soap_action('/ws/ControllerService',
'getIHCProject', "")
if xdoc:
base64data = xdoc.find(
'./SOAP-ENV:Body/ns1:getIHCProject1/ns1:data',
IHCSoapClient.ihcns).text
if not base64:
return False
compresseddata = base64.b64decode(base64data)
return zlib.decompress(compresseddata,
16+zlib.MAX_WBITS).decode('ISO-8859-1')
return False | [
"def",
"get_project",
"(",
"self",
")",
"->",
"str",
":",
"xdoc",
"=",
"self",
".",
"connection",
".",
"soap_action",
"(",
"'/ws/ControllerService'",
",",
"'getIHCProject'",
",",
"\"\"",
")",
"if",
"xdoc",
":",
"base64data",
"=",
"xdoc",
".",
"find",
"(",
"'./SOAP-ENV:Body/ns1:getIHCProject1/ns1:data'",
",",
"IHCSoapClient",
".",
"ihcns",
")",
".",
"text",
"if",
"not",
"base64",
":",
"return",
"False",
"compresseddata",
"=",
"base64",
".",
"b64decode",
"(",
"base64data",
")",
"return",
"zlib",
".",
"decompress",
"(",
"compresseddata",
",",
"16",
"+",
"zlib",
".",
"MAX_WBITS",
")",
".",
"decode",
"(",
"'ISO-8859-1'",
")",
"return",
"False"
] | 42.928571 | 15.714286 |
def __flags(self):
"""
Internal method. Turns arguments into flags.
"""
flags = []
if self._capture:
flags.append("-capture")
if self._spy:
flags.append("-spy")
if self._dbpath:
flags += ["-db-path", self._dbpath]
flags += ["-db", "boltdb"]
else:
flags += ["-db", "memory"]
if self._synthesize:
assert(self._middleware)
flags += ["-synthesize"]
if self._simulation:
flags += ["-import", self._simulation]
if self._proxyPort:
flags += ["-pp", str(self._proxyPort)]
if self._adminPort:
flags += ["-ap", str(self._adminPort)]
if self._modify:
flags += ["-modify"]
if self._verbose:
flags += ["-v"]
if self._dev:
flags += ["-dev"]
if self._metrics:
flags += ["-metrics"]
if self._auth:
flags += ["-auth"]
if self._middleware:
flags += ["-middleware", self._middleware]
if self._cert:
flags += ["-cert", self._cert]
if self._certName:
flags += ["-cert-name", self._certName]
if self._certOrg:
flags += ["-cert-org", self._certOrg]
if self._destination:
flags += ["-destination", self._destination]
if self._key:
flags += ["-key", self._key]
if self._dest:
for i in range(len(self._dest)):
flags += ["-dest", self._dest[i]]
if self._generateCACert:
flags += ["-generate-ca-cert"]
if not self._tlsVerification:
flags += ["-tls-verification", "false"]
logging.debug("flags:" + str(flags))
return flags | [
"def",
"__flags",
"(",
"self",
")",
":",
"flags",
"=",
"[",
"]",
"if",
"self",
".",
"_capture",
":",
"flags",
".",
"append",
"(",
"\"-capture\"",
")",
"if",
"self",
".",
"_spy",
":",
"flags",
".",
"append",
"(",
"\"-spy\"",
")",
"if",
"self",
".",
"_dbpath",
":",
"flags",
"+=",
"[",
"\"-db-path\"",
",",
"self",
".",
"_dbpath",
"]",
"flags",
"+=",
"[",
"\"-db\"",
",",
"\"boltdb\"",
"]",
"else",
":",
"flags",
"+=",
"[",
"\"-db\"",
",",
"\"memory\"",
"]",
"if",
"self",
".",
"_synthesize",
":",
"assert",
"(",
"self",
".",
"_middleware",
")",
"flags",
"+=",
"[",
"\"-synthesize\"",
"]",
"if",
"self",
".",
"_simulation",
":",
"flags",
"+=",
"[",
"\"-import\"",
",",
"self",
".",
"_simulation",
"]",
"if",
"self",
".",
"_proxyPort",
":",
"flags",
"+=",
"[",
"\"-pp\"",
",",
"str",
"(",
"self",
".",
"_proxyPort",
")",
"]",
"if",
"self",
".",
"_adminPort",
":",
"flags",
"+=",
"[",
"\"-ap\"",
",",
"str",
"(",
"self",
".",
"_adminPort",
")",
"]",
"if",
"self",
".",
"_modify",
":",
"flags",
"+=",
"[",
"\"-modify\"",
"]",
"if",
"self",
".",
"_verbose",
":",
"flags",
"+=",
"[",
"\"-v\"",
"]",
"if",
"self",
".",
"_dev",
":",
"flags",
"+=",
"[",
"\"-dev\"",
"]",
"if",
"self",
".",
"_metrics",
":",
"flags",
"+=",
"[",
"\"-metrics\"",
"]",
"if",
"self",
".",
"_auth",
":",
"flags",
"+=",
"[",
"\"-auth\"",
"]",
"if",
"self",
".",
"_middleware",
":",
"flags",
"+=",
"[",
"\"-middleware\"",
",",
"self",
".",
"_middleware",
"]",
"if",
"self",
".",
"_cert",
":",
"flags",
"+=",
"[",
"\"-cert\"",
",",
"self",
".",
"_cert",
"]",
"if",
"self",
".",
"_certName",
":",
"flags",
"+=",
"[",
"\"-cert-name\"",
",",
"self",
".",
"_certName",
"]",
"if",
"self",
".",
"_certOrg",
":",
"flags",
"+=",
"[",
"\"-cert-org\"",
",",
"self",
".",
"_certOrg",
"]",
"if",
"self",
".",
"_destination",
":",
"flags",
"+=",
"[",
"\"-destination\"",
",",
"self",
".",
"_destination",
"]",
"if",
"self",
".",
"_key",
":",
"flags",
"+=",
"[",
"\"-key\"",
",",
"self",
".",
"_key",
"]",
"if",
"self",
".",
"_dest",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"_dest",
")",
")",
":",
"flags",
"+=",
"[",
"\"-dest\"",
",",
"self",
".",
"_dest",
"[",
"i",
"]",
"]",
"if",
"self",
".",
"_generateCACert",
":",
"flags",
"+=",
"[",
"\"-generate-ca-cert\"",
"]",
"if",
"not",
"self",
".",
"_tlsVerification",
":",
"flags",
"+=",
"[",
"\"-tls-verification\"",
",",
"\"false\"",
"]",
"logging",
".",
"debug",
"(",
"\"flags:\"",
"+",
"str",
"(",
"flags",
")",
")",
"return",
"flags"
] | 32.145455 | 11.563636 |
def add(self, num):
"""
Adds num to the current value
"""
try:
val = self.value() + num
except:
val = num
self.set(max(0, val)) | [
"def",
"add",
"(",
"self",
",",
"num",
")",
":",
"try",
":",
"val",
"=",
"self",
".",
"value",
"(",
")",
"+",
"num",
"except",
":",
"val",
"=",
"num",
"self",
".",
"set",
"(",
"max",
"(",
"0",
",",
"val",
")",
")"
] | 21.222222 | 12.333333 |
def kms_encrypt(value, key, aws_config=None):
"""Encrypt and value with KMS key.
Args:
value (str): value to encrypt
key (str): key id or alias
aws_config (optional[dict]): aws credentials
dict of arguments passed into boto3 session
example:
aws_creds = {'aws_access_key_id': aws_access_key_id,
'aws_secret_access_key': aws_secret_access_key,
'region_name': 'us-east-1'}
Returns:
str: encrypted cipher text
"""
aws_config = aws_config or {}
aws = boto3.session.Session(**aws_config)
client = aws.client('kms')
enc_res = client.encrypt(KeyId=key,
Plaintext=value)
return n(b64encode(enc_res['CiphertextBlob'])) | [
"def",
"kms_encrypt",
"(",
"value",
",",
"key",
",",
"aws_config",
"=",
"None",
")",
":",
"aws_config",
"=",
"aws_config",
"or",
"{",
"}",
"aws",
"=",
"boto3",
".",
"session",
".",
"Session",
"(",
"*",
"*",
"aws_config",
")",
"client",
"=",
"aws",
".",
"client",
"(",
"'kms'",
")",
"enc_res",
"=",
"client",
".",
"encrypt",
"(",
"KeyId",
"=",
"key",
",",
"Plaintext",
"=",
"value",
")",
"return",
"n",
"(",
"b64encode",
"(",
"enc_res",
"[",
"'CiphertextBlob'",
"]",
")",
")"
] | 35.681818 | 14.727273 |
def create_for_rectangle(self, x, y, width, height):
"""
Create a new surface that is a rectangle within this surface.
All operations drawn to this surface are then clipped and translated
onto the target surface.
Nothing drawn via this sub-surface outside of its bounds
is drawn onto the target surface,
making this a useful method for passing constrained child surfaces
to library routines that draw directly onto the parent surface,
i.e. with no further backend allocations,
double buffering or copies.
.. note::
As of cairo 1.12,
the semantics of subsurfaces have not been finalized yet
unless the rectangle is in full device units,
is contained within the extents of the target surface,
and the target or subsurface's device transforms are not changed.
:param x:
The x-origin of the sub-surface
from the top-left of the target surface (in device-space units)
:param y:
The y-origin of the sub-surface
from the top-left of the target surface (in device-space units)
:param width:
Width of the sub-surface (in device-space units)
:param height:
Height of the sub-surface (in device-space units)
:type x: float
:type y: float
:type width: float
:type height: float
:returns:
A new :class:`Surface` object.
*New in cairo 1.10.*
"""
return Surface._from_pointer(
cairo.cairo_surface_create_for_rectangle(
self._pointer, x, y, width, height),
incref=False) | [
"def",
"create_for_rectangle",
"(",
"self",
",",
"x",
",",
"y",
",",
"width",
",",
"height",
")",
":",
"return",
"Surface",
".",
"_from_pointer",
"(",
"cairo",
".",
"cairo_surface_create_for_rectangle",
"(",
"self",
".",
"_pointer",
",",
"x",
",",
"y",
",",
"width",
",",
"height",
")",
",",
"incref",
"=",
"False",
")"
] | 38.272727 | 19.863636 |
def disconnect(self, callback=None):
""" Disconnect the callback from this group. See
:func:`connect() <vispy.event.EmitterGroup.connect>` and
:func:`EventEmitter.connect() <vispy.event.EventEmitter.connect>` for
more information.
"""
ret = EventEmitter.disconnect(self, callback)
if len(self._callbacks) == 0:
self._connect_emitters(False)
return ret | [
"def",
"disconnect",
"(",
"self",
",",
"callback",
"=",
"None",
")",
":",
"ret",
"=",
"EventEmitter",
".",
"disconnect",
"(",
"self",
",",
"callback",
")",
"if",
"len",
"(",
"self",
".",
"_callbacks",
")",
"==",
"0",
":",
"self",
".",
"_connect_emitters",
"(",
"False",
")",
"return",
"ret"
] | 41.8 | 11.9 |
def generate_files(path='', ext='', level=None, dirs=False, files=True, verbosity=0):
""" Recursively generate files (and thier stats) in the indicated directory
Filter by the indicated file name extension (ext)
Args:
path (str): Root/base path to search.
ext (str or list of str): File name extension(s).
Only file paths that ".endswith()" this string will be returned
level (int, optional): Depth of file tree to halt recursion at.
None = full recursion to as deep as it goes
0 = nonrecursive, just provide a list of files at the root level of the tree
1 = one level of depth deeper in the tree
typ (type): output type (default: list). if a mapping type is provided the keys will be the full paths (unique)
dirs (bool): Whether to yield dir paths along with file paths (default: False)
files (bool): Whether to yield file paths (default: True)
`dirs=True`, `files=False` is equivalent to `ls -d`
Returns:
list of dicts: dict keys are { 'path', 'name', 'bytes', 'created', 'modified', 'accessed', 'permissions' }
path (str): Full, absolute paths to file beneath the indicated directory and ending with `ext`
name (str): File name only (everythin after the last slash in the path)
size (int): File size in bytes
changed_any (datetime): Timestamp for modification of either metadata (e.g. permissions) or content
modified (datetime): File content modification timestamp from file system
accessed (datetime): File access timestamp from file system
permissions (int): File permissions bytes as a chown-style integer with a maximum of 4 digits
type (str): One of 'file', 'dir', 'symlink->file', 'symlink->dir', 'symlink->broken'
e.g.: 777 or 1755
Examples:
>>> 'util.py' in [d['name'] for d in generate_files(os.path.dirname(__file__), ext='.py', level=0)]
True
>>> next(d for d in generate_files(os.path.dirname(__file__), ext='.py')
... if d['name'] == 'util.py')['size'] > 1000
True
>>> sorted(next(generate_files()).keys())
['accessed', 'changed_any', 'dir', 'mode', 'modified', 'name', 'path', 'size', 'type']
There should be an __init__ file in the same directory as this script.
And it should be at the top of the list.
>>> sorted(d['name'] for d in generate_files(os.path.dirname(__file__), ext='.py', level=0))[0]
'__init__.py'
>>> len(list(generate_files(__file__, ext='.')))
0
>>> len(list(generate_files(__file__, ext=['invalidexttesting123', False])))
0
>>> len(list(generate_files(__file__, ext=['.py', '.pyc', 'invalidexttesting123', False]))) > 0
True
>>> sorted(generate_files(__file__))[0]['name'] == os.path.basename(__file__)
True
>>> sorted(list(generate_files())[0].keys())
['accessed', 'changed_any', 'dir', 'mode', 'modified', 'name', 'path', 'size', 'type']
>>> all(d['type'] in ('file', 'dir',
... 'symlink->file', 'symlink->dir', 'symlink->broken',
... 'mount-point->file', 'mount-point->dir',
... 'block-device', 'pipe', 'special', 'socket', 'unknown')
... for d in generate_files(level=1, files=True, dirs=True))
True
"""
path = expand_path(path or '.')
# None interpreted as '', False is interpreted as '.' (no ext will be accepted)
ext = '.' if ext is False else ext
# multiple extensions can be specified in a list or tuple
ext = ext if ext and isinstance(ext, (list, tuple)) else [ext]
# case-insensitive extensions, '.' ext means only no-extensions are accepted
ext = set(x.lower() if x else '.' if x is False else '' for x in ext)
if os.path.isfile(path):
fn = os.path.basename(path)
# only yield the stat dict if the extension is among those that match or files without any ext are desired
if not ext or any(path.lower().endswith(x) or (x == '.' and '.' not in fn) for x in ext):
yield path_status(os.path.dirname(path), os.path.basename(path), verbosity=verbosity)
else:
for dir_path, dir_names, filenames in walk_level(path, level=level):
if verbosity > 0:
print('Checking path "{}"'.format(dir_path))
if files:
for fn in filenames: # itertools.chain(filenames, dir_names)
if ext and not any((fn.lower().endswith(x) or (x == '.' and x not in fn) for x in ext)):
continue
stat = path_status(dir_path, fn, verbosity=verbosity)
if stat and stat['name'] and stat['path']:
yield stat
if dirs:
for fn in dir_names:
if ext and not any((fn.lower().endswith(x) or (x == '.' and x not in fn) for x in ext)):
continue
yield path_status(dir_path, fn, verbosity=verbosity) | [
"def",
"generate_files",
"(",
"path",
"=",
"''",
",",
"ext",
"=",
"''",
",",
"level",
"=",
"None",
",",
"dirs",
"=",
"False",
",",
"files",
"=",
"True",
",",
"verbosity",
"=",
"0",
")",
":",
"path",
"=",
"expand_path",
"(",
"path",
"or",
"'.'",
")",
"# None interpreted as '', False is interpreted as '.' (no ext will be accepted)",
"ext",
"=",
"'.'",
"if",
"ext",
"is",
"False",
"else",
"ext",
"# multiple extensions can be specified in a list or tuple",
"ext",
"=",
"ext",
"if",
"ext",
"and",
"isinstance",
"(",
"ext",
",",
"(",
"list",
",",
"tuple",
")",
")",
"else",
"[",
"ext",
"]",
"# case-insensitive extensions, '.' ext means only no-extensions are accepted",
"ext",
"=",
"set",
"(",
"x",
".",
"lower",
"(",
")",
"if",
"x",
"else",
"'.'",
"if",
"x",
"is",
"False",
"else",
"''",
"for",
"x",
"in",
"ext",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"fn",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"path",
")",
"# only yield the stat dict if the extension is among those that match or files without any ext are desired",
"if",
"not",
"ext",
"or",
"any",
"(",
"path",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"x",
")",
"or",
"(",
"x",
"==",
"'.'",
"and",
"'.'",
"not",
"in",
"fn",
")",
"for",
"x",
"in",
"ext",
")",
":",
"yield",
"path_status",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
",",
"os",
".",
"path",
".",
"basename",
"(",
"path",
")",
",",
"verbosity",
"=",
"verbosity",
")",
"else",
":",
"for",
"dir_path",
",",
"dir_names",
",",
"filenames",
"in",
"walk_level",
"(",
"path",
",",
"level",
"=",
"level",
")",
":",
"if",
"verbosity",
">",
"0",
":",
"print",
"(",
"'Checking path \"{}\"'",
".",
"format",
"(",
"dir_path",
")",
")",
"if",
"files",
":",
"for",
"fn",
"in",
"filenames",
":",
"# itertools.chain(filenames, dir_names)",
"if",
"ext",
"and",
"not",
"any",
"(",
"(",
"fn",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"x",
")",
"or",
"(",
"x",
"==",
"'.'",
"and",
"x",
"not",
"in",
"fn",
")",
"for",
"x",
"in",
"ext",
")",
")",
":",
"continue",
"stat",
"=",
"path_status",
"(",
"dir_path",
",",
"fn",
",",
"verbosity",
"=",
"verbosity",
")",
"if",
"stat",
"and",
"stat",
"[",
"'name'",
"]",
"and",
"stat",
"[",
"'path'",
"]",
":",
"yield",
"stat",
"if",
"dirs",
":",
"for",
"fn",
"in",
"dir_names",
":",
"if",
"ext",
"and",
"not",
"any",
"(",
"(",
"fn",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"x",
")",
"or",
"(",
"x",
"==",
"'.'",
"and",
"x",
"not",
"in",
"fn",
")",
"for",
"x",
"in",
"ext",
")",
")",
":",
"continue",
"yield",
"path_status",
"(",
"dir_path",
",",
"fn",
",",
"verbosity",
"=",
"verbosity",
")"
] | 56.752809 | 32.123596 |
def fastrcnn_Xconv1fc_head(feature, num_convs, norm=None):
"""
Args:
feature (NCHW):
num_classes(int): num_category + 1
num_convs (int): number of conv layers
norm (str or None): either None or 'GN'
Returns:
2D head feature
"""
assert norm in [None, 'GN'], norm
l = feature
with argscope(Conv2D, data_format='channels_first',
kernel_initializer=tf.variance_scaling_initializer(
scale=2.0, mode='fan_out',
distribution='untruncated_normal' if get_tf_version_tuple() >= (1, 12) else 'normal')):
for k in range(num_convs):
l = Conv2D('conv{}'.format(k), l, cfg.FPN.FRCNN_CONV_HEAD_DIM, 3, activation=tf.nn.relu)
if norm is not None:
l = GroupNorm('gn{}'.format(k), l)
l = FullyConnected('fc', l, cfg.FPN.FRCNN_FC_HEAD_DIM,
kernel_initializer=tf.variance_scaling_initializer(), activation=tf.nn.relu)
return l | [
"def",
"fastrcnn_Xconv1fc_head",
"(",
"feature",
",",
"num_convs",
",",
"norm",
"=",
"None",
")",
":",
"assert",
"norm",
"in",
"[",
"None",
",",
"'GN'",
"]",
",",
"norm",
"l",
"=",
"feature",
"with",
"argscope",
"(",
"Conv2D",
",",
"data_format",
"=",
"'channels_first'",
",",
"kernel_initializer",
"=",
"tf",
".",
"variance_scaling_initializer",
"(",
"scale",
"=",
"2.0",
",",
"mode",
"=",
"'fan_out'",
",",
"distribution",
"=",
"'untruncated_normal'",
"if",
"get_tf_version_tuple",
"(",
")",
">=",
"(",
"1",
",",
"12",
")",
"else",
"'normal'",
")",
")",
":",
"for",
"k",
"in",
"range",
"(",
"num_convs",
")",
":",
"l",
"=",
"Conv2D",
"(",
"'conv{}'",
".",
"format",
"(",
"k",
")",
",",
"l",
",",
"cfg",
".",
"FPN",
".",
"FRCNN_CONV_HEAD_DIM",
",",
"3",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
")",
"if",
"norm",
"is",
"not",
"None",
":",
"l",
"=",
"GroupNorm",
"(",
"'gn{}'",
".",
"format",
"(",
"k",
")",
",",
"l",
")",
"l",
"=",
"FullyConnected",
"(",
"'fc'",
",",
"l",
",",
"cfg",
".",
"FPN",
".",
"FRCNN_FC_HEAD_DIM",
",",
"kernel_initializer",
"=",
"tf",
".",
"variance_scaling_initializer",
"(",
")",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
")",
"return",
"l"
] | 41.666667 | 21.333333 |
def _add_torus_extrude_lines(
self, irad, orad, lcar=None, R=numpy.eye(3), x0=numpy.array([0.0, 0.0, 0.0])
):
"""Create Gmsh code for the torus in the x-y plane under the coordinate
transformation
.. math::
\\hat{x} = R x + x_0.
:param irad: inner radius of the torus
:param orad: outer radius of the torus
"""
self.add_comment("Torus")
# Add circle
x0t = numpy.dot(R, numpy.array([0.0, orad, 0.0]))
# Get circles in y-z plane
Rc = numpy.array([[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]])
c = self.add_circle(x0 + x0t, irad, lcar=lcar, R=numpy.dot(R, Rc))
rot_axis = [0.0, 0.0, 1.0]
rot_axis = numpy.dot(R, rot_axis)
point_on_rot_axis = [0.0, 0.0, 0.0]
point_on_rot_axis = numpy.dot(R, point_on_rot_axis) + x0
# Form the torus by extruding the circle three times by 2/3*pi. This
# works around the inability of Gmsh to extrude by pi or more. The
# Extrude() macro returns an array; the first [0] entry in the array is
# the entity that has been extruded at the far end. This can be used
# for the following Extrude() step. The second [1] entry of the array
# is the surface that was created by the extrusion.
previous = c.line_loop.lines
angle = "2*Pi/3"
all_surfaces = []
for i in range(3):
self.add_comment("Round no. {}".format(i + 1))
for k, p in enumerate(previous):
# ts1[] = Extrude {{0,0,1}, {0,0,0}, 2*Pi/3}{Line{tc1};};
# ...
top, surf, _ = self.extrude(
p,
rotation_axis=rot_axis,
point_on_axis=point_on_rot_axis,
angle=angle,
)
all_surfaces.append(surf)
previous[k] = top
# compound_surface = CompoundSurface(all_surfaces)
surface_loop = self.add_surface_loop(all_surfaces)
vol = self.add_volume(surface_loop)
# The newline at the end is essential:
# If a GEO file doesn't end in a newline, Gmsh will report a syntax
# error.
self.add_comment("\n")
return vol | [
"def",
"_add_torus_extrude_lines",
"(",
"self",
",",
"irad",
",",
"orad",
",",
"lcar",
"=",
"None",
",",
"R",
"=",
"numpy",
".",
"eye",
"(",
"3",
")",
",",
"x0",
"=",
"numpy",
".",
"array",
"(",
"[",
"0.0",
",",
"0.0",
",",
"0.0",
"]",
")",
")",
":",
"self",
".",
"add_comment",
"(",
"\"Torus\"",
")",
"# Add circle",
"x0t",
"=",
"numpy",
".",
"dot",
"(",
"R",
",",
"numpy",
".",
"array",
"(",
"[",
"0.0",
",",
"orad",
",",
"0.0",
"]",
")",
")",
"# Get circles in y-z plane",
"Rc",
"=",
"numpy",
".",
"array",
"(",
"[",
"[",
"0.0",
",",
"0.0",
",",
"1.0",
"]",
",",
"[",
"0.0",
",",
"1.0",
",",
"0.0",
"]",
",",
"[",
"1.0",
",",
"0.0",
",",
"0.0",
"]",
"]",
")",
"c",
"=",
"self",
".",
"add_circle",
"(",
"x0",
"+",
"x0t",
",",
"irad",
",",
"lcar",
"=",
"lcar",
",",
"R",
"=",
"numpy",
".",
"dot",
"(",
"R",
",",
"Rc",
")",
")",
"rot_axis",
"=",
"[",
"0.0",
",",
"0.0",
",",
"1.0",
"]",
"rot_axis",
"=",
"numpy",
".",
"dot",
"(",
"R",
",",
"rot_axis",
")",
"point_on_rot_axis",
"=",
"[",
"0.0",
",",
"0.0",
",",
"0.0",
"]",
"point_on_rot_axis",
"=",
"numpy",
".",
"dot",
"(",
"R",
",",
"point_on_rot_axis",
")",
"+",
"x0",
"# Form the torus by extruding the circle three times by 2/3*pi. This",
"# works around the inability of Gmsh to extrude by pi or more. The",
"# Extrude() macro returns an array; the first [0] entry in the array is",
"# the entity that has been extruded at the far end. This can be used",
"# for the following Extrude() step. The second [1] entry of the array",
"# is the surface that was created by the extrusion.",
"previous",
"=",
"c",
".",
"line_loop",
".",
"lines",
"angle",
"=",
"\"2*Pi/3\"",
"all_surfaces",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"3",
")",
":",
"self",
".",
"add_comment",
"(",
"\"Round no. {}\"",
".",
"format",
"(",
"i",
"+",
"1",
")",
")",
"for",
"k",
",",
"p",
"in",
"enumerate",
"(",
"previous",
")",
":",
"# ts1[] = Extrude {{0,0,1}, {0,0,0}, 2*Pi/3}{Line{tc1};};",
"# ...",
"top",
",",
"surf",
",",
"_",
"=",
"self",
".",
"extrude",
"(",
"p",
",",
"rotation_axis",
"=",
"rot_axis",
",",
"point_on_axis",
"=",
"point_on_rot_axis",
",",
"angle",
"=",
"angle",
",",
")",
"all_surfaces",
".",
"append",
"(",
"surf",
")",
"previous",
"[",
"k",
"]",
"=",
"top",
"# compound_surface = CompoundSurface(all_surfaces)",
"surface_loop",
"=",
"self",
".",
"add_surface_loop",
"(",
"all_surfaces",
")",
"vol",
"=",
"self",
".",
"add_volume",
"(",
"surface_loop",
")",
"# The newline at the end is essential:",
"# If a GEO file doesn't end in a newline, Gmsh will report a syntax",
"# error.",
"self",
".",
"add_comment",
"(",
"\"\\n\"",
")",
"return",
"vol"
] | 38.396552 | 20.017241 |
def say(
text = None,
preference_program = "festival",
background = False,
silent = True,
filepath = None
):
"""
Say specified text to speakers or to file, as specified. Determine the
program to use based on the specified program preference and availability,
then say the text to speakers or synthesize speech of the text and save it
to file, as specified.
"""
if not text:
if not silent:
print("text not specified")
return False
# Determine the program to use based on program preference and program
# availability.
preference_order_programs = [
"festival",
"espeak",
"pico2wave",
"deep_throat.py"
]
# Remove the specified preference program from the default program
# preferences order and prioritise it.
preference_order_programs.remove(preference_program)
preference_order_programs.insert(0, preference_program)
# Determine first program that is available in the programs order of
# preference.
preference_order_programs_available =\
[program for program in preference_order_programs \
if shijian.which(program) is not None]
if not preference_order_programs_available:
if not silent:
print("text-to-speech program unavailable")
return False
program = preference_order_programs_available[0]
if program != preference_program and not silent:
print("text-to-speech preference program unavailable, using {program}".format(program = program))
if program == "festival":
if not filepath:
command = """
echo "{text}" | festival --tts
""".format(text = text)
else:
command = """
echo "{text}" | text2wave -o {filepath}
""".format(text = text, filepath = filepath)
elif program == "espeak":
if not filepath:
command = """
echo "{text}" | espeak
""".format(text = text)
else:
command = """
echo "{text}" | espeak -w {filepath}
""".format(text = text, filepath = filepath)
elif program == "pico2wave":
if not filepath:
command = """
pico2wave --wave="{filepath}" "{text}"
aplay --quiet "{filepath}"
""".format(text = text, filepath = shijian.tmp_filepath() + ".wav")
else:
command = """
pico2wave --wave="{filepath}" "{text}"
""".format(text = text, filepath = filepath)
elif program == "deep_throat.py":
if not filepath:
command = """
echo "{text}" | deep_throat.py
""".format(text = text)
else:
command = """
deep_throat.py --text="{text}" --savetowavefile --outfile="{filepath}"
""".format(text = text, filepath = filepath)
if filepath:
background = False
if background:
command = command.rstrip().rstrip("\n") + " &"
command = textwrap.dedent(command)
engage_command(command = command, background = background) | [
"def",
"say",
"(",
"text",
"=",
"None",
",",
"preference_program",
"=",
"\"festival\"",
",",
"background",
"=",
"False",
",",
"silent",
"=",
"True",
",",
"filepath",
"=",
"None",
")",
":",
"if",
"not",
"text",
":",
"if",
"not",
"silent",
":",
"print",
"(",
"\"text not specified\"",
")",
"return",
"False",
"# Determine the program to use based on program preference and program",
"# availability.",
"preference_order_programs",
"=",
"[",
"\"festival\"",
",",
"\"espeak\"",
",",
"\"pico2wave\"",
",",
"\"deep_throat.py\"",
"]",
"# Remove the specified preference program from the default program",
"# preferences order and prioritise it.",
"preference_order_programs",
".",
"remove",
"(",
"preference_program",
")",
"preference_order_programs",
".",
"insert",
"(",
"0",
",",
"preference_program",
")",
"# Determine first program that is available in the programs order of",
"# preference.",
"preference_order_programs_available",
"=",
"[",
"program",
"for",
"program",
"in",
"preference_order_programs",
"if",
"shijian",
".",
"which",
"(",
"program",
")",
"is",
"not",
"None",
"]",
"if",
"not",
"preference_order_programs_available",
":",
"if",
"not",
"silent",
":",
"print",
"(",
"\"text-to-speech program unavailable\"",
")",
"return",
"False",
"program",
"=",
"preference_order_programs_available",
"[",
"0",
"]",
"if",
"program",
"!=",
"preference_program",
"and",
"not",
"silent",
":",
"print",
"(",
"\"text-to-speech preference program unavailable, using {program}\"",
".",
"format",
"(",
"program",
"=",
"program",
")",
")",
"if",
"program",
"==",
"\"festival\"",
":",
"if",
"not",
"filepath",
":",
"command",
"=",
"\"\"\"\n echo \"{text}\" | festival --tts\n \"\"\"",
".",
"format",
"(",
"text",
"=",
"text",
")",
"else",
":",
"command",
"=",
"\"\"\"\n echo \"{text}\" | text2wave -o {filepath}\n \"\"\"",
".",
"format",
"(",
"text",
"=",
"text",
",",
"filepath",
"=",
"filepath",
")",
"elif",
"program",
"==",
"\"espeak\"",
":",
"if",
"not",
"filepath",
":",
"command",
"=",
"\"\"\"\n echo \"{text}\" | espeak\n \"\"\"",
".",
"format",
"(",
"text",
"=",
"text",
")",
"else",
":",
"command",
"=",
"\"\"\"\n echo \"{text}\" | espeak -w {filepath}\n \"\"\"",
".",
"format",
"(",
"text",
"=",
"text",
",",
"filepath",
"=",
"filepath",
")",
"elif",
"program",
"==",
"\"pico2wave\"",
":",
"if",
"not",
"filepath",
":",
"command",
"=",
"\"\"\"\n pico2wave --wave=\"{filepath}\" \"{text}\"\n aplay --quiet \"{filepath}\"\n \"\"\"",
".",
"format",
"(",
"text",
"=",
"text",
",",
"filepath",
"=",
"shijian",
".",
"tmp_filepath",
"(",
")",
"+",
"\".wav\"",
")",
"else",
":",
"command",
"=",
"\"\"\"\n pico2wave --wave=\"{filepath}\" \"{text}\"\n \"\"\"",
".",
"format",
"(",
"text",
"=",
"text",
",",
"filepath",
"=",
"filepath",
")",
"elif",
"program",
"==",
"\"deep_throat.py\"",
":",
"if",
"not",
"filepath",
":",
"command",
"=",
"\"\"\"\n echo \"{text}\" | deep_throat.py\n \"\"\"",
".",
"format",
"(",
"text",
"=",
"text",
")",
"else",
":",
"command",
"=",
"\"\"\"\n deep_throat.py --text=\"{text}\" --savetowavefile --outfile=\"{filepath}\"\n \"\"\"",
".",
"format",
"(",
"text",
"=",
"text",
",",
"filepath",
"=",
"filepath",
")",
"if",
"filepath",
":",
"background",
"=",
"False",
"if",
"background",
":",
"command",
"=",
"command",
".",
"rstrip",
"(",
")",
".",
"rstrip",
"(",
"\"\\n\"",
")",
"+",
"\" &\"",
"command",
"=",
"textwrap",
".",
"dedent",
"(",
"command",
")",
"engage_command",
"(",
"command",
"=",
"command",
",",
"background",
"=",
"background",
")"
] | 36.928571 | 13.952381 |
def seq():
"""
Counts up sequentially from a number based on the current time
:rtype int:
"""
current_frame = inspect.currentframe().f_back
trace_string = ""
while current_frame.f_back:
trace_string = trace_string + current_frame.f_back.f_code.co_name
current_frame = current_frame.f_back
return counter.get_from_trace(trace_string) | [
"def",
"seq",
"(",
")",
":",
"current_frame",
"=",
"inspect",
".",
"currentframe",
"(",
")",
".",
"f_back",
"trace_string",
"=",
"\"\"",
"while",
"current_frame",
".",
"f_back",
":",
"trace_string",
"=",
"trace_string",
"+",
"current_frame",
".",
"f_back",
".",
"f_code",
".",
"co_name",
"current_frame",
"=",
"current_frame",
".",
"f_back",
"return",
"counter",
".",
"get_from_trace",
"(",
"trace_string",
")"
] | 31.25 | 16.416667 |
def import_boundary_event_to_graph(diagram_graph, process_id, process_attributes, element):
"""
Adds to graph the new element that represents BPMN boundary event.
Boundary event inherits sequence of eventDefinitionRef from Event type.
Separate methods for each event type are required since each of them has different variants
(Message, Error, Signal etc.).
:param diagram_graph: NetworkX graph representing a BPMN process diagram,
:param process_id: string object, representing an ID of process element,
:param process_attributes: dictionary that holds attribute values of 'process' element, which is parent of
imported flow node,
:param element: object representing a BPMN XML 'endEvent' element.
"""
element_id = element.getAttribute(consts.Consts.id)
boundary_event_definitions = {'messageEventDefinition', 'timerEventDefinition', 'signalEventDefinition',
'conditionalEventDefinition', 'escalationEventDefinition', 'errorEventDefinition'}
BpmnDiagramGraphImport.import_flow_node_to_graph(diagram_graph, process_id, process_attributes, element)
diagram_graph.node[element_id][consts.Consts.parallel_multiple] = \
element.getAttribute(consts.Consts.parallel_multiple) \
if element.hasAttribute(consts.Consts.parallel_multiple) else "false"
diagram_graph.node[element_id][consts.Consts.cancel_activity] = \
element.getAttribute(consts.Consts.cancel_activity) \
if element.hasAttribute(consts.Consts.cancel_activity) else "true"
diagram_graph.node[element_id][consts.Consts.attached_to_ref] = \
element.getAttribute(consts.Consts.attached_to_ref)
BpmnDiagramGraphImport.import_event_definition_elements(diagram_graph, element,
boundary_event_definitions) | [
"def",
"import_boundary_event_to_graph",
"(",
"diagram_graph",
",",
"process_id",
",",
"process_attributes",
",",
"element",
")",
":",
"element_id",
"=",
"element",
".",
"getAttribute",
"(",
"consts",
".",
"Consts",
".",
"id",
")",
"boundary_event_definitions",
"=",
"{",
"'messageEventDefinition'",
",",
"'timerEventDefinition'",
",",
"'signalEventDefinition'",
",",
"'conditionalEventDefinition'",
",",
"'escalationEventDefinition'",
",",
"'errorEventDefinition'",
"}",
"BpmnDiagramGraphImport",
".",
"import_flow_node_to_graph",
"(",
"diagram_graph",
",",
"process_id",
",",
"process_attributes",
",",
"element",
")",
"diagram_graph",
".",
"node",
"[",
"element_id",
"]",
"[",
"consts",
".",
"Consts",
".",
"parallel_multiple",
"]",
"=",
"element",
".",
"getAttribute",
"(",
"consts",
".",
"Consts",
".",
"parallel_multiple",
")",
"if",
"element",
".",
"hasAttribute",
"(",
"consts",
".",
"Consts",
".",
"parallel_multiple",
")",
"else",
"\"false\"",
"diagram_graph",
".",
"node",
"[",
"element_id",
"]",
"[",
"consts",
".",
"Consts",
".",
"cancel_activity",
"]",
"=",
"element",
".",
"getAttribute",
"(",
"consts",
".",
"Consts",
".",
"cancel_activity",
")",
"if",
"element",
".",
"hasAttribute",
"(",
"consts",
".",
"Consts",
".",
"cancel_activity",
")",
"else",
"\"true\"",
"diagram_graph",
".",
"node",
"[",
"element_id",
"]",
"[",
"consts",
".",
"Consts",
".",
"attached_to_ref",
"]",
"=",
"element",
".",
"getAttribute",
"(",
"consts",
".",
"Consts",
".",
"attached_to_ref",
")",
"BpmnDiagramGraphImport",
".",
"import_event_definition_elements",
"(",
"diagram_graph",
",",
"element",
",",
"boundary_event_definitions",
")"
] | 67.137931 | 38.172414 |
def _decode_v2(value):
"""
Decode ':' and '$' characters encoded by `_encode`.
"""
if re.search(r'(?<!\$):', value):
raise ValueError("Unescaped ':' in the encoded string")
decode_colons = value.replace('$:', ':')
if re.search(r'(?<!\$)(\$\$)*\$([^$]|\Z)', decode_colons):
raise ValueError("Unescaped '$' in encoded string")
return decode_colons.replace('$$', '$') | [
"def",
"_decode_v2",
"(",
"value",
")",
":",
"if",
"re",
".",
"search",
"(",
"r'(?<!\\$):'",
",",
"value",
")",
":",
"raise",
"ValueError",
"(",
"\"Unescaped ':' in the encoded string\"",
")",
"decode_colons",
"=",
"value",
".",
"replace",
"(",
"'$:'",
",",
"':'",
")",
"if",
"re",
".",
"search",
"(",
"r'(?<!\\$)(\\$\\$)*\\$([^$]|\\Z)'",
",",
"decode_colons",
")",
":",
"raise",
"ValueError",
"(",
"\"Unescaped '$' in encoded string\"",
")",
"return",
"decode_colons",
".",
"replace",
"(",
"'$$'",
",",
"'$'",
")"
] | 33.25 | 15.583333 |
def get_tr(self):
"""Returns the top right border of the cell"""
cell_above = CellBorders(self.cell_attributes,
*self.cell.get_above_key_rect())
return cell_above.get_r() | [
"def",
"get_tr",
"(",
"self",
")",
":",
"cell_above",
"=",
"CellBorders",
"(",
"self",
".",
"cell_attributes",
",",
"*",
"self",
".",
"cell",
".",
"get_above_key_rect",
"(",
")",
")",
"return",
"cell_above",
".",
"get_r",
"(",
")"
] | 37.166667 | 18.166667 |
def find_birthdays(request):
"""Return information on user birthdays."""
today = date.today()
custom = False
yr_inc = 0
if "birthday_month" in request.GET and "birthday_day" in request.GET:
try:
mon = int(request.GET["birthday_month"])
day = int(request.GET["birthday_day"])
yr = today.year
""" If searching a date that already happened this year, skip to the next year. """
if mon < today.month or (mon == today.month and day < today.day):
yr += 1
yr_inc = 1
real_today = today
today = date(yr, mon, day)
if today:
custom = True
else:
today = real_today
except ValueError:
pass
key = "birthdays:{}".format(today)
cached = cache.get(key)
if cached:
logger.debug("Birthdays on {} loaded " "from cache.".format(today))
logger.debug(cached)
return cached
else:
logger.debug("Loading and caching birthday info for {}".format(today))
tomorrow = today + timedelta(days=1)
try:
data = {
"custom": custom,
"today": {
"date": today,
"users": [{
"id": u.id,
"full_name": u.full_name,
"grade": {
"name": u.grade.name
},
"age": (u.age + yr_inc) if u.age is not None else -1,
"public": u.properties.attribute_is_public("show_birthday")
} if u else {} for u in User.objects.users_with_birthday(today.month, today.day)],
"inc": 0,
},
"tomorrow": {
"date": tomorrow,
"users": [{
"id": u.id,
"full_name": u.full_name,
"grade": {
"name": u.grade.name
},
"age": (u.age - 1) if u.age is not None else -1,
"public": u.properties.attribute_is_public("show_birthday")
} for u in User.objects.users_with_birthday(tomorrow.month, tomorrow.day)],
"inc": 1,
},
} # yapf: disable
except AttributeError:
return None
else:
cache.set(key, data, timeout=60 * 60 * 6)
return data | [
"def",
"find_birthdays",
"(",
"request",
")",
":",
"today",
"=",
"date",
".",
"today",
"(",
")",
"custom",
"=",
"False",
"yr_inc",
"=",
"0",
"if",
"\"birthday_month\"",
"in",
"request",
".",
"GET",
"and",
"\"birthday_day\"",
"in",
"request",
".",
"GET",
":",
"try",
":",
"mon",
"=",
"int",
"(",
"request",
".",
"GET",
"[",
"\"birthday_month\"",
"]",
")",
"day",
"=",
"int",
"(",
"request",
".",
"GET",
"[",
"\"birthday_day\"",
"]",
")",
"yr",
"=",
"today",
".",
"year",
"\"\"\" If searching a date that already happened this year, skip to the next year. \"\"\"",
"if",
"mon",
"<",
"today",
".",
"month",
"or",
"(",
"mon",
"==",
"today",
".",
"month",
"and",
"day",
"<",
"today",
".",
"day",
")",
":",
"yr",
"+=",
"1",
"yr_inc",
"=",
"1",
"real_today",
"=",
"today",
"today",
"=",
"date",
"(",
"yr",
",",
"mon",
",",
"day",
")",
"if",
"today",
":",
"custom",
"=",
"True",
"else",
":",
"today",
"=",
"real_today",
"except",
"ValueError",
":",
"pass",
"key",
"=",
"\"birthdays:{}\"",
".",
"format",
"(",
"today",
")",
"cached",
"=",
"cache",
".",
"get",
"(",
"key",
")",
"if",
"cached",
":",
"logger",
".",
"debug",
"(",
"\"Birthdays on {} loaded \"",
"\"from cache.\"",
".",
"format",
"(",
"today",
")",
")",
"logger",
".",
"debug",
"(",
"cached",
")",
"return",
"cached",
"else",
":",
"logger",
".",
"debug",
"(",
"\"Loading and caching birthday info for {}\"",
".",
"format",
"(",
"today",
")",
")",
"tomorrow",
"=",
"today",
"+",
"timedelta",
"(",
"days",
"=",
"1",
")",
"try",
":",
"data",
"=",
"{",
"\"custom\"",
":",
"custom",
",",
"\"today\"",
":",
"{",
"\"date\"",
":",
"today",
",",
"\"users\"",
":",
"[",
"{",
"\"id\"",
":",
"u",
".",
"id",
",",
"\"full_name\"",
":",
"u",
".",
"full_name",
",",
"\"grade\"",
":",
"{",
"\"name\"",
":",
"u",
".",
"grade",
".",
"name",
"}",
",",
"\"age\"",
":",
"(",
"u",
".",
"age",
"+",
"yr_inc",
")",
"if",
"u",
".",
"age",
"is",
"not",
"None",
"else",
"-",
"1",
",",
"\"public\"",
":",
"u",
".",
"properties",
".",
"attribute_is_public",
"(",
"\"show_birthday\"",
")",
"}",
"if",
"u",
"else",
"{",
"}",
"for",
"u",
"in",
"User",
".",
"objects",
".",
"users_with_birthday",
"(",
"today",
".",
"month",
",",
"today",
".",
"day",
")",
"]",
",",
"\"inc\"",
":",
"0",
",",
"}",
",",
"\"tomorrow\"",
":",
"{",
"\"date\"",
":",
"tomorrow",
",",
"\"users\"",
":",
"[",
"{",
"\"id\"",
":",
"u",
".",
"id",
",",
"\"full_name\"",
":",
"u",
".",
"full_name",
",",
"\"grade\"",
":",
"{",
"\"name\"",
":",
"u",
".",
"grade",
".",
"name",
"}",
",",
"\"age\"",
":",
"(",
"u",
".",
"age",
"-",
"1",
")",
"if",
"u",
".",
"age",
"is",
"not",
"None",
"else",
"-",
"1",
",",
"\"public\"",
":",
"u",
".",
"properties",
".",
"attribute_is_public",
"(",
"\"show_birthday\"",
")",
"}",
"for",
"u",
"in",
"User",
".",
"objects",
".",
"users_with_birthday",
"(",
"tomorrow",
".",
"month",
",",
"tomorrow",
".",
"day",
")",
"]",
",",
"\"inc\"",
":",
"1",
",",
"}",
",",
"}",
"# yapf: disable",
"except",
"AttributeError",
":",
"return",
"None",
"else",
":",
"cache",
".",
"set",
"(",
"key",
",",
"data",
",",
"timeout",
"=",
"60",
"*",
"60",
"*",
"6",
")",
"return",
"data"
] | 36.085714 | 18.742857 |
def search(self, ngram):
'''
Parameters
----------
ngram str or unicode, string to search for
Returns
-------
pd.DataFrame, {'texts': <matching texts>, 'categories': <corresponding categories>}
'''
mask = self._document_index_mask(ngram)
return pd.DataFrame({
'text': self.get_texts()[mask],
'category': [self._category_idx_store.getval(idx)
for idx in self._y[mask]]
}) | [
"def",
"search",
"(",
"self",
",",
"ngram",
")",
":",
"mask",
"=",
"self",
".",
"_document_index_mask",
"(",
"ngram",
")",
"return",
"pd",
".",
"DataFrame",
"(",
"{",
"'text'",
":",
"self",
".",
"get_texts",
"(",
")",
"[",
"mask",
"]",
",",
"'category'",
":",
"[",
"self",
".",
"_category_idx_store",
".",
"getval",
"(",
"idx",
")",
"for",
"idx",
"in",
"self",
".",
"_y",
"[",
"mask",
"]",
"]",
"}",
")"
] | 23.529412 | 23.882353 |
def partial_regardless(__fn, *a, **kw):
"""Wrap a note for injection of a partially applied function, or don't.
Use this instead of `partial` when binding a callable that may or may
not have annotations.
"""
return (PARTIAL_REGARDLESS, (__fn, a, tuple(kw.items()))) | [
"def",
"partial_regardless",
"(",
"__fn",
",",
"*",
"a",
",",
"*",
"*",
"kw",
")",
":",
"return",
"(",
"PARTIAL_REGARDLESS",
",",
"(",
"__fn",
",",
"a",
",",
"tuple",
"(",
"kw",
".",
"items",
"(",
")",
")",
")",
")"
] | 42.857143 | 16.285714 |
def predict_mhci_binding(job, peptfile, allele, peplen, univ_options, mhci_options):
"""
Predict binding for each peptide in `peptfile` to `allele` using the IEDB mhci binding
prediction tool.
:param toil.fileStore.FileID peptfile: The input peptide fasta
:param str allele: Allele to predict binding against
:param str peplen: Length of peptides to process
:param dict univ_options: Dict of universal options used by almost all tools
:param dict mhci_options: Options specific to mhci binding prediction
:return: fsID for file containing the predictions
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
input_files = {
'peptfile.faa': peptfile}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=True)
peptides = read_peptide_file(os.path.join(os.getcwd(), 'peptfile.faa'))
if not peptides:
return job.fileStore.writeGlobalFile(job.fileStore.getLocalTempFile())
parameters = [mhci_options['pred'],
allele,
peplen,
input_files['peptfile.faa']]
with open('/'.join([work_dir, 'predictions.tsv']), 'w') as predfile:
docker_call(tool='mhci', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], outfile=predfile, interactive=True,
tool_version=mhci_options['version'])
output_file = job.fileStore.writeGlobalFile(predfile.name)
job.fileStore.logToMaster('Ran mhci on %s:%s:%s successfully'
% (univ_options['patient'], allele, peplen))
return output_file | [
"def",
"predict_mhci_binding",
"(",
"job",
",",
"peptfile",
",",
"allele",
",",
"peplen",
",",
"univ_options",
",",
"mhci_options",
")",
":",
"work_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"input_files",
"=",
"{",
"'peptfile.faa'",
":",
"peptfile",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"True",
")",
"peptides",
"=",
"read_peptide_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"'peptfile.faa'",
")",
")",
"if",
"not",
"peptides",
":",
"return",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"job",
".",
"fileStore",
".",
"getLocalTempFile",
"(",
")",
")",
"parameters",
"=",
"[",
"mhci_options",
"[",
"'pred'",
"]",
",",
"allele",
",",
"peplen",
",",
"input_files",
"[",
"'peptfile.faa'",
"]",
"]",
"with",
"open",
"(",
"'/'",
".",
"join",
"(",
"[",
"work_dir",
",",
"'predictions.tsv'",
"]",
")",
",",
"'w'",
")",
"as",
"predfile",
":",
"docker_call",
"(",
"tool",
"=",
"'mhci'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
",",
"outfile",
"=",
"predfile",
",",
"interactive",
"=",
"True",
",",
"tool_version",
"=",
"mhci_options",
"[",
"'version'",
"]",
")",
"output_file",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"predfile",
".",
"name",
")",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Ran mhci on %s:%s:%s successfully'",
"%",
"(",
"univ_options",
"[",
"'patient'",
"]",
",",
"allele",
",",
"peplen",
")",
")",
"return",
"output_file"
] | 50.40625 | 23.59375 |
def getroot(self):
"""Build XML object, return the root"""
builder = ET.TreeBuilder()
self.build(builder)
return builder.close() | [
"def",
"getroot",
"(",
"self",
")",
":",
"builder",
"=",
"ET",
".",
"TreeBuilder",
"(",
")",
"self",
".",
"build",
"(",
"builder",
")",
"return",
"builder",
".",
"close",
"(",
")"
] | 31.2 | 10.2 |
def run(command, cwd=None, shell=False, raiseOnError=False):
"""
Executes a child process and waits for it to complete
"""
returncode = subprocess.call(command, cwd=cwd, shell=shell)
if raiseOnError == True and returncode != 0:
raise Exception('child process ' + str(command) + ' failed with exit code ' + str(returncode))
return returncode | [
"def",
"run",
"(",
"command",
",",
"cwd",
"=",
"None",
",",
"shell",
"=",
"False",
",",
"raiseOnError",
"=",
"False",
")",
":",
"returncode",
"=",
"subprocess",
".",
"call",
"(",
"command",
",",
"cwd",
"=",
"cwd",
",",
"shell",
"=",
"shell",
")",
"if",
"raiseOnError",
"==",
"True",
"and",
"returncode",
"!=",
"0",
":",
"raise",
"Exception",
"(",
"'child process '",
"+",
"str",
"(",
"command",
")",
"+",
"' failed with exit code '",
"+",
"str",
"(",
"returncode",
")",
")",
"return",
"returncode"
] | 43.5 | 17.5 |
def _get_options(ret=None):
'''
Returns options used for the MySQL connection.
'''
defaults = {'host': 'salt',
'user': 'salt',
'pass': 'salt',
'db': 'salt',
'port': 3306,
'ssl_ca': None,
'ssl_cert': None,
'ssl_key': None}
attrs = {'host': 'host',
'user': 'user',
'pass': 'pass',
'db': 'db',
'port': 'port',
'ssl_ca': 'ssl_ca',
'ssl_cert': 'ssl_cert',
'ssl_key': 'ssl_key'}
_options = salt.returners.get_returner_options(__virtualname__,
ret,
attrs,
__salt__=__salt__,
__opts__=__opts__,
defaults=defaults)
# post processing
for k, v in six.iteritems(_options):
if isinstance(v, six.string_types) and v.lower() == 'none':
# Ensure 'None' is rendered as None
_options[k] = None
if k == 'port':
# Ensure port is an int
_options[k] = int(v)
return _options | [
"def",
"_get_options",
"(",
"ret",
"=",
"None",
")",
":",
"defaults",
"=",
"{",
"'host'",
":",
"'salt'",
",",
"'user'",
":",
"'salt'",
",",
"'pass'",
":",
"'salt'",
",",
"'db'",
":",
"'salt'",
",",
"'port'",
":",
"3306",
",",
"'ssl_ca'",
":",
"None",
",",
"'ssl_cert'",
":",
"None",
",",
"'ssl_key'",
":",
"None",
"}",
"attrs",
"=",
"{",
"'host'",
":",
"'host'",
",",
"'user'",
":",
"'user'",
",",
"'pass'",
":",
"'pass'",
",",
"'db'",
":",
"'db'",
",",
"'port'",
":",
"'port'",
",",
"'ssl_ca'",
":",
"'ssl_ca'",
",",
"'ssl_cert'",
":",
"'ssl_cert'",
",",
"'ssl_key'",
":",
"'ssl_key'",
"}",
"_options",
"=",
"salt",
".",
"returners",
".",
"get_returner_options",
"(",
"__virtualname__",
",",
"ret",
",",
"attrs",
",",
"__salt__",
"=",
"__salt__",
",",
"__opts__",
"=",
"__opts__",
",",
"defaults",
"=",
"defaults",
")",
"# post processing",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"_options",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"six",
".",
"string_types",
")",
"and",
"v",
".",
"lower",
"(",
")",
"==",
"'none'",
":",
"# Ensure 'None' is rendered as None",
"_options",
"[",
"k",
"]",
"=",
"None",
"if",
"k",
"==",
"'port'",
":",
"# Ensure port is an int",
"_options",
"[",
"k",
"]",
"=",
"int",
"(",
"v",
")",
"return",
"_options"
] | 33.578947 | 16.421053 |
def _find_valid_block(self, table, worksheet, flags, units, used_cells, start_pos, end_pos):
'''
Searches for the next location where a valid block could reside and constructs the block
object representing that location.
'''
for row_index in range(len(table)):
if row_index < start_pos[0] or row_index > end_pos[0]:
continue
convRow = table[row_index]
used_row = used_cells[row_index]
for column_index, conv in enumerate(convRow):
if (column_index < start_pos[1] or column_index > end_pos[1] or used_row[column_index]):
continue
# Is non empty cell?
if not is_empty_cell(conv):
block_start, block_end = self._find_block_bounds(table, used_cells,
(row_index, column_index), start_pos, end_pos)
if (block_end[0] > block_start[0] and
block_end[1] > block_start[1]):
try:
return TableBlock(table, used_cells, block_start, block_end, worksheet,
flags, units, self.assume_complete_blocks, self.max_title_rows)
except InvalidBlockError:
pass
# Prevent infinite loops if something goes wrong
used_cells[row_index][column_index] = True | [
"def",
"_find_valid_block",
"(",
"self",
",",
"table",
",",
"worksheet",
",",
"flags",
",",
"units",
",",
"used_cells",
",",
"start_pos",
",",
"end_pos",
")",
":",
"for",
"row_index",
"in",
"range",
"(",
"len",
"(",
"table",
")",
")",
":",
"if",
"row_index",
"<",
"start_pos",
"[",
"0",
"]",
"or",
"row_index",
">",
"end_pos",
"[",
"0",
"]",
":",
"continue",
"convRow",
"=",
"table",
"[",
"row_index",
"]",
"used_row",
"=",
"used_cells",
"[",
"row_index",
"]",
"for",
"column_index",
",",
"conv",
"in",
"enumerate",
"(",
"convRow",
")",
":",
"if",
"(",
"column_index",
"<",
"start_pos",
"[",
"1",
"]",
"or",
"column_index",
">",
"end_pos",
"[",
"1",
"]",
"or",
"used_row",
"[",
"column_index",
"]",
")",
":",
"continue",
"# Is non empty cell?",
"if",
"not",
"is_empty_cell",
"(",
"conv",
")",
":",
"block_start",
",",
"block_end",
"=",
"self",
".",
"_find_block_bounds",
"(",
"table",
",",
"used_cells",
",",
"(",
"row_index",
",",
"column_index",
")",
",",
"start_pos",
",",
"end_pos",
")",
"if",
"(",
"block_end",
"[",
"0",
"]",
">",
"block_start",
"[",
"0",
"]",
"and",
"block_end",
"[",
"1",
"]",
">",
"block_start",
"[",
"1",
"]",
")",
":",
"try",
":",
"return",
"TableBlock",
"(",
"table",
",",
"used_cells",
",",
"block_start",
",",
"block_end",
",",
"worksheet",
",",
"flags",
",",
"units",
",",
"self",
".",
"assume_complete_blocks",
",",
"self",
".",
"max_title_rows",
")",
"except",
"InvalidBlockError",
":",
"pass",
"# Prevent infinite loops if something goes wrong",
"used_cells",
"[",
"row_index",
"]",
"[",
"column_index",
"]",
"=",
"True"
] | 55.730769 | 24.346154 |
def ReadMostRecentClientGraphSeries(self, client_label,
report_type
):
"""See db.Database."""
series_with_timestamps = self.ReadAllClientGraphSeries(
client_label, report_type)
if not series_with_timestamps:
return None
_, latest_series = list(sorted(iteritems(series_with_timestamps)))[-1]
return latest_series | [
"def",
"ReadMostRecentClientGraphSeries",
"(",
"self",
",",
"client_label",
",",
"report_type",
")",
":",
"series_with_timestamps",
"=",
"self",
".",
"ReadAllClientGraphSeries",
"(",
"client_label",
",",
"report_type",
")",
"if",
"not",
"series_with_timestamps",
":",
"return",
"None",
"_",
",",
"latest_series",
"=",
"list",
"(",
"sorted",
"(",
"iteritems",
"(",
"series_with_timestamps",
")",
")",
")",
"[",
"-",
"1",
"]",
"return",
"latest_series"
] | 41.1 | 12.9 |
def pinv(a, rcond=None, validate_args=False, name=None):
"""Compute the Moore-Penrose pseudo-inverse of a matrix.
Calculate the [generalized inverse of a matrix](
https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse) using its
singular-value decomposition (SVD) and including all large singular values.
The pseudo-inverse of a matrix `A`, is defined as: "the matrix that 'solves'
[the least-squares problem] `A @ x = b`," i.e., if `x_hat` is a solution, then
`A_pinv` is the matrix such that `x_hat = A_pinv @ b`. It can be shown that if
`U @ Sigma @ V.T = A` is the singular value decomposition of `A`, then
`A_pinv = V @ inv(Sigma) U^T`. [(Strang, 1980)][1]
This function is analogous to [`numpy.linalg.pinv`](
https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.pinv.html).
It differs only in default value of `rcond`. In `numpy.linalg.pinv`, the
default `rcond` is `1e-15`. Here the default is
`10. * max(num_rows, num_cols) * np.finfo(dtype).eps`.
Args:
a: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be
pseudo-inverted.
rcond: `Tensor` of small singular value cutoffs. Singular values smaller
(in modulus) than `rcond` * largest_singular_value (again, in modulus) are
set to zero. Must broadcast against `tf.shape(a)[:-2]`.
Default value: `10. * max(num_rows, num_cols) * np.finfo(a.dtype).eps`.
validate_args: When `True`, additional assertions might be embedded in the
graph.
Default value: `False` (i.e., no graph assertions are added).
name: Python `str` prefixed to ops created by this function.
Default value: "pinv".
Returns:
a_pinv: The pseudo-inverse of input `a`. Has same shape as `a` except
rightmost two dimensions are transposed.
Raises:
TypeError: if input `a` does not have `float`-like `dtype`.
ValueError: if input `a` has fewer than 2 dimensions.
#### Examples
```python
import tensorflow as tf
import tensorflow_probability as tfp
a = tf.constant([[1., 0.4, 0.5],
[0.4, 0.2, 0.25],
[0.5, 0.25, 0.35]])
tf.matmul(tfp.math.pinv(a), a)
# ==> array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]], dtype=float32)
a = tf.constant([[1., 0.4, 0.5, 1.],
[0.4, 0.2, 0.25, 2.],
[0.5, 0.25, 0.35, 3.]])
tf.matmul(tfp.math.pinv(a), a)
# ==> array([[ 0.76, 0.37, 0.21, -0.02],
[ 0.37, 0.43, -0.33, 0.02],
[ 0.21, -0.33, 0.81, 0.01],
[-0.02, 0.02, 0.01, 1. ]], dtype=float32)
```
#### References
[1]: G. Strang. "Linear Algebra and Its Applications, 2nd Ed." Academic Press,
Inc., 1980, pp. 139-142.
"""
with tf.compat.v1.name_scope(name, 'pinv', [a, rcond]):
a = tf.convert_to_tensor(value=a, name='a')
assertions = _maybe_validate_matrix(a, validate_args)
if assertions:
with tf.control_dependencies(assertions):
a = tf.identity(a)
dtype = a.dtype.as_numpy_dtype
if rcond is None:
def get_dim_size(dim):
if tf.compat.dimension_value(a.shape[dim]) is not None:
return tf.compat.dimension_value(a.shape[dim])
return tf.shape(input=a)[dim]
num_rows = get_dim_size(-2)
num_cols = get_dim_size(-1)
if isinstance(num_rows, int) and isinstance(num_cols, int):
max_rows_cols = float(max(num_rows, num_cols))
else:
max_rows_cols = tf.cast(tf.maximum(num_rows, num_cols), dtype)
rcond = 10. * max_rows_cols * np.finfo(dtype).eps
rcond = tf.convert_to_tensor(value=rcond, dtype=dtype, name='rcond')
# Calculate pseudo inverse via SVD.
# Note: if a is symmetric then u == v. (We might observe additional
# performance by explicitly setting `v = u` in such cases.)
[
singular_values, # Sigma
left_singular_vectors, # U
right_singular_vectors, # V
] = tf.linalg.svd(a, full_matrices=False, compute_uv=True)
# Saturate small singular values to inf. This has the effect of make
# `1. / s = 0.` while not resulting in `NaN` gradients.
cutoff = rcond * tf.reduce_max(input_tensor=singular_values, axis=-1)
singular_values = tf.where(
singular_values > cutoff[..., tf.newaxis], singular_values,
tf.fill(tf.shape(input=singular_values), np.array(np.inf, dtype)))
# Although `a == tf.matmul(u, s * v, transpose_b=True)` we swap
# `u` and `v` here so that `tf.matmul(pinv(A), A) = tf.eye()`, i.e.,
# a matrix inverse has "transposed" semantics.
a_pinv = tf.matmul(
right_singular_vectors / singular_values[..., tf.newaxis, :],
left_singular_vectors,
adjoint_b=True)
if a.shape.ndims is not None:
a_pinv.set_shape(a.shape[:-2].concatenate([a.shape[-1], a.shape[-2]]))
return a_pinv | [
"def",
"pinv",
"(",
"a",
",",
"rcond",
"=",
"None",
",",
"validate_args",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'pinv'",
",",
"[",
"a",
",",
"rcond",
"]",
")",
":",
"a",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"a",
",",
"name",
"=",
"'a'",
")",
"assertions",
"=",
"_maybe_validate_matrix",
"(",
"a",
",",
"validate_args",
")",
"if",
"assertions",
":",
"with",
"tf",
".",
"control_dependencies",
"(",
"assertions",
")",
":",
"a",
"=",
"tf",
".",
"identity",
"(",
"a",
")",
"dtype",
"=",
"a",
".",
"dtype",
".",
"as_numpy_dtype",
"if",
"rcond",
"is",
"None",
":",
"def",
"get_dim_size",
"(",
"dim",
")",
":",
"if",
"tf",
".",
"compat",
".",
"dimension_value",
"(",
"a",
".",
"shape",
"[",
"dim",
"]",
")",
"is",
"not",
"None",
":",
"return",
"tf",
".",
"compat",
".",
"dimension_value",
"(",
"a",
".",
"shape",
"[",
"dim",
"]",
")",
"return",
"tf",
".",
"shape",
"(",
"input",
"=",
"a",
")",
"[",
"dim",
"]",
"num_rows",
"=",
"get_dim_size",
"(",
"-",
"2",
")",
"num_cols",
"=",
"get_dim_size",
"(",
"-",
"1",
")",
"if",
"isinstance",
"(",
"num_rows",
",",
"int",
")",
"and",
"isinstance",
"(",
"num_cols",
",",
"int",
")",
":",
"max_rows_cols",
"=",
"float",
"(",
"max",
"(",
"num_rows",
",",
"num_cols",
")",
")",
"else",
":",
"max_rows_cols",
"=",
"tf",
".",
"cast",
"(",
"tf",
".",
"maximum",
"(",
"num_rows",
",",
"num_cols",
")",
",",
"dtype",
")",
"rcond",
"=",
"10.",
"*",
"max_rows_cols",
"*",
"np",
".",
"finfo",
"(",
"dtype",
")",
".",
"eps",
"rcond",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"rcond",
",",
"dtype",
"=",
"dtype",
",",
"name",
"=",
"'rcond'",
")",
"# Calculate pseudo inverse via SVD.",
"# Note: if a is symmetric then u == v. (We might observe additional",
"# performance by explicitly setting `v = u` in such cases.)",
"[",
"singular_values",
",",
"# Sigma",
"left_singular_vectors",
",",
"# U",
"right_singular_vectors",
",",
"# V",
"]",
"=",
"tf",
".",
"linalg",
".",
"svd",
"(",
"a",
",",
"full_matrices",
"=",
"False",
",",
"compute_uv",
"=",
"True",
")",
"# Saturate small singular values to inf. This has the effect of make",
"# `1. / s = 0.` while not resulting in `NaN` gradients.",
"cutoff",
"=",
"rcond",
"*",
"tf",
".",
"reduce_max",
"(",
"input_tensor",
"=",
"singular_values",
",",
"axis",
"=",
"-",
"1",
")",
"singular_values",
"=",
"tf",
".",
"where",
"(",
"singular_values",
">",
"cutoff",
"[",
"...",
",",
"tf",
".",
"newaxis",
"]",
",",
"singular_values",
",",
"tf",
".",
"fill",
"(",
"tf",
".",
"shape",
"(",
"input",
"=",
"singular_values",
")",
",",
"np",
".",
"array",
"(",
"np",
".",
"inf",
",",
"dtype",
")",
")",
")",
"# Although `a == tf.matmul(u, s * v, transpose_b=True)` we swap",
"# `u` and `v` here so that `tf.matmul(pinv(A), A) = tf.eye()`, i.e.,",
"# a matrix inverse has \"transposed\" semantics.",
"a_pinv",
"=",
"tf",
".",
"matmul",
"(",
"right_singular_vectors",
"/",
"singular_values",
"[",
"...",
",",
"tf",
".",
"newaxis",
",",
":",
"]",
",",
"left_singular_vectors",
",",
"adjoint_b",
"=",
"True",
")",
"if",
"a",
".",
"shape",
".",
"ndims",
"is",
"not",
"None",
":",
"a_pinv",
".",
"set_shape",
"(",
"a",
".",
"shape",
"[",
":",
"-",
"2",
"]",
".",
"concatenate",
"(",
"[",
"a",
".",
"shape",
"[",
"-",
"1",
"]",
",",
"a",
".",
"shape",
"[",
"-",
"2",
"]",
"]",
")",
")",
"return",
"a_pinv"
] | 38.861789 | 22.821138 |
def _descendants(self):
"""
Scans full list of node descendants.
:return: Generator of nodes.
"""
children = self._children
if children is not None:
for child in children.values():
yield from child._descendants
yield child | [
"def",
"_descendants",
"(",
"self",
")",
":",
"children",
"=",
"self",
".",
"_children",
"if",
"children",
"is",
"not",
"None",
":",
"for",
"child",
"in",
"children",
".",
"values",
"(",
")",
":",
"yield",
"from",
"child",
".",
"_descendants",
"yield",
"child"
] | 27.727273 | 9.181818 |
def fixup_scipy_ndimage_result(whatever_it_returned):
"""Convert a result from scipy.ndimage to a numpy array
scipy.ndimage has the annoying habit of returning a single, bare
value instead of an array if the indexes passed in are of length 1.
For instance:
scind.maximum(image, labels, [1]) returns a float
but
scind.maximum(image, labels, [1,2]) returns a list
"""
if getattr(whatever_it_returned,"__getitem__",False):
return np.array(whatever_it_returned)
else:
return np.array([whatever_it_returned]) | [
"def",
"fixup_scipy_ndimage_result",
"(",
"whatever_it_returned",
")",
":",
"if",
"getattr",
"(",
"whatever_it_returned",
",",
"\"__getitem__\"",
",",
"False",
")",
":",
"return",
"np",
".",
"array",
"(",
"whatever_it_returned",
")",
"else",
":",
"return",
"np",
".",
"array",
"(",
"[",
"whatever_it_returned",
"]",
")"
] | 39.357143 | 17.928571 |
def get_formatter_by_name(_alias, **options):
"""Lookup and instantiate a formatter by alias.
Raises ClassNotFound if not found.
"""
cls = find_formatter_class(_alias)
if cls is None:
raise ClassNotFound("no formatter found for name %r" % _alias)
return cls(**options) | [
"def",
"get_formatter_by_name",
"(",
"_alias",
",",
"*",
"*",
"options",
")",
":",
"cls",
"=",
"find_formatter_class",
"(",
"_alias",
")",
"if",
"cls",
"is",
"None",
":",
"raise",
"ClassNotFound",
"(",
"\"no formatter found for name %r\"",
"%",
"_alias",
")",
"return",
"cls",
"(",
"*",
"*",
"options",
")"
] | 32.555556 | 12.777778 |
def device_unmounted(self, device):
"""Show unmount notification for specified device object."""
if not self._mounter.is_handleable(device):
return
self._show_notification(
'device_unmounted',
_('Device unmounted'),
_('{0.ui_label} unmounted', device),
device.icon_name) | [
"def",
"device_unmounted",
"(",
"self",
",",
"device",
")",
":",
"if",
"not",
"self",
".",
"_mounter",
".",
"is_handleable",
"(",
"device",
")",
":",
"return",
"self",
".",
"_show_notification",
"(",
"'device_unmounted'",
",",
"_",
"(",
"'Device unmounted'",
")",
",",
"_",
"(",
"'{0.ui_label} unmounted'",
",",
"device",
")",
",",
"device",
".",
"icon_name",
")"
] | 38.444444 | 8.888889 |
def update_process_died_status(self):
""" Update the flag indicating whether any process exited and did not provide a result. """
# There is a result pending, the process is no longer alive, yet there is no result in the queue
# This means the decoder process has not succesfully produced metrics
queue_should_hold_result = self._results_pending and self.decoder_process is not None and not self.decoder_process.is_alive()
if queue_should_hold_result and self.decoder_metric_queue.empty():
self._any_process_died = True | [
"def",
"update_process_died_status",
"(",
"self",
")",
":",
"# There is a result pending, the process is no longer alive, yet there is no result in the queue",
"# This means the decoder process has not succesfully produced metrics",
"queue_should_hold_result",
"=",
"self",
".",
"_results_pending",
"and",
"self",
".",
"decoder_process",
"is",
"not",
"None",
"and",
"not",
"self",
".",
"decoder_process",
".",
"is_alive",
"(",
")",
"if",
"queue_should_hold_result",
"and",
"self",
".",
"decoder_metric_queue",
".",
"empty",
"(",
")",
":",
"self",
".",
"_any_process_died",
"=",
"True"
] | 70.625 | 34 |
def repo_is_dirty(self):
"""
Return true if repo is dirty
"""
try:
subprocess.check_call(['git', 'diff-files', '--quiet'], cwd=self.repo_dir)
# Return code is 0
return False
except subprocess.CalledProcessError:
return True | [
"def",
"repo_is_dirty",
"(",
"self",
")",
":",
"try",
":",
"subprocess",
".",
"check_call",
"(",
"[",
"'git'",
",",
"'diff-files'",
",",
"'--quiet'",
"]",
",",
"cwd",
"=",
"self",
".",
"repo_dir",
")",
"# Return code is 0",
"return",
"False",
"except",
"subprocess",
".",
"CalledProcessError",
":",
"return",
"True"
] | 30.2 | 14.2 |
def stop(self):
'''
Stop the auth proc.
'''
log.info('Stopping auth process')
self.__up = False
self.socket.close() | [
"def",
"stop",
"(",
"self",
")",
":",
"log",
".",
"info",
"(",
"'Stopping auth process'",
")",
"self",
".",
"__up",
"=",
"False",
"self",
".",
"socket",
".",
"close",
"(",
")"
] | 22.428571 | 17.857143 |
def _merge_last(values, merge_after, merge_with=' '):
'''
Merge values all values after X into the last value
'''
if len(values) > merge_after:
values = values[0:(merge_after-1)] + [merge_with.join(values[(merge_after-1):])]
return values | [
"def",
"_merge_last",
"(",
"values",
",",
"merge_after",
",",
"merge_with",
"=",
"' '",
")",
":",
"if",
"len",
"(",
"values",
")",
">",
"merge_after",
":",
"values",
"=",
"values",
"[",
"0",
":",
"(",
"merge_after",
"-",
"1",
")",
"]",
"+",
"[",
"merge_with",
".",
"join",
"(",
"values",
"[",
"(",
"merge_after",
"-",
"1",
")",
":",
"]",
")",
"]",
"return",
"values"
] | 32.5 | 26.5 |
def unbuild_month(self, dt):
"""
Deletes the directory at self.get_build_path.
"""
self.year = str(dt.year)
self.month = str(dt.month)
logger.debug("Building %s-%s" % (self.year, self.month))
target_path = os.path.split(self.get_build_path())[0]
if self.fs.exists(target_path):
logger.debug("Removing {}".format(target_path))
self.fs.removetree(target_path) | [
"def",
"unbuild_month",
"(",
"self",
",",
"dt",
")",
":",
"self",
".",
"year",
"=",
"str",
"(",
"dt",
".",
"year",
")",
"self",
".",
"month",
"=",
"str",
"(",
"dt",
".",
"month",
")",
"logger",
".",
"debug",
"(",
"\"Building %s-%s\"",
"%",
"(",
"self",
".",
"year",
",",
"self",
".",
"month",
")",
")",
"target_path",
"=",
"os",
".",
"path",
".",
"split",
"(",
"self",
".",
"get_build_path",
"(",
")",
")",
"[",
"0",
"]",
"if",
"self",
".",
"fs",
".",
"exists",
"(",
"target_path",
")",
":",
"logger",
".",
"debug",
"(",
"\"Removing {}\"",
".",
"format",
"(",
"target_path",
")",
")",
"self",
".",
"fs",
".",
"removetree",
"(",
"target_path",
")"
] | 39.545455 | 9.727273 |
def cycles_engine(**kwargs):
"""engine to extract cycles"""
logging.info("cycles_engine:")
logging.info("Not ready for production")
# raise NotImplementedError
experiments = kwargs["experiments"]
farms = []
barn = "raw_dir" # Its a murder in the red barn - murder in the red barn
for experiment in experiments:
farms.append([])
if experiment.all_in_memory:
logging.debug("all in memory")
for key in experiment.cell_data_frames:
logging.debug(f"extracting cycles from {key}")
else:
logging.debug("dont have it in memory - need to lookup in the files")
for key in experiment.cell_data_frames:
logging.debug(f"looking up cellpyfile for {key}")
return farms, barn | [
"def",
"cycles_engine",
"(",
"*",
"*",
"kwargs",
")",
":",
"logging",
".",
"info",
"(",
"\"cycles_engine:\"",
")",
"logging",
".",
"info",
"(",
"\"Not ready for production\"",
")",
"# raise NotImplementedError",
"experiments",
"=",
"kwargs",
"[",
"\"experiments\"",
"]",
"farms",
"=",
"[",
"]",
"barn",
"=",
"\"raw_dir\"",
"# Its a murder in the red barn - murder in the red barn",
"for",
"experiment",
"in",
"experiments",
":",
"farms",
".",
"append",
"(",
"[",
"]",
")",
"if",
"experiment",
".",
"all_in_memory",
":",
"logging",
".",
"debug",
"(",
"\"all in memory\"",
")",
"for",
"key",
"in",
"experiment",
".",
"cell_data_frames",
":",
"logging",
".",
"debug",
"(",
"f\"extracting cycles from {key}\"",
")",
"else",
":",
"logging",
".",
"debug",
"(",
"\"dont have it in memory - need to lookup in the files\"",
")",
"for",
"key",
"in",
"experiment",
".",
"cell_data_frames",
":",
"logging",
".",
"debug",
"(",
"f\"looking up cellpyfile for {key}\"",
")",
"return",
"farms",
",",
"barn"
] | 34 | 19.043478 |
def get_edge_attr(self, n, m, attr):
'''
API: get_edge_attr(self, n, m, attr)
Description:
Returns attribute attr of edge (n,m).
Input:
n: Source node name.
m: Sink node name.
attr: Attribute of edge.
Pre:
Graph should have this edge.
Return:
Value of edge attribute attr.
'''
if self.graph_type is DIRECTED_GRAPH:
return self.edge_attr[(n,m)][attr]
else:
try:
return self.edge_attr[(n,m)][attr]
except KeyError:
return self.edge_attr[(m,n)][attr] | [
"def",
"get_edge_attr",
"(",
"self",
",",
"n",
",",
"m",
",",
"attr",
")",
":",
"if",
"self",
".",
"graph_type",
"is",
"DIRECTED_GRAPH",
":",
"return",
"self",
".",
"edge_attr",
"[",
"(",
"n",
",",
"m",
")",
"]",
"[",
"attr",
"]",
"else",
":",
"try",
":",
"return",
"self",
".",
"edge_attr",
"[",
"(",
"n",
",",
"m",
")",
"]",
"[",
"attr",
"]",
"except",
"KeyError",
":",
"return",
"self",
".",
"edge_attr",
"[",
"(",
"m",
",",
"n",
")",
"]",
"[",
"attr",
"]"
] | 30.238095 | 13.666667 |
def restore_version(self, symbol, as_of, prune_previous_version=True):
"""
Restore the specified 'symbol' data and metadata to the state of a given version/snapshot/date.
Returns a VersionedItem object only with a metadata element.
Fast operation: Zero data/segment read/write operations.
Parameters
----------
symbol : `str`
symbol name for the item
as_of : `str` or `int` or `datetime.datetime`
Return the data as it was as_of the point in time.
`int` : specific version number
`str` : snapshot name which contains the version
`datetime.datetime` : the version of the data that existed as_of the requested point in time
prune_previous_version : `bool`
Removes previous (non-snapshotted) versions from the database.
Default: True
Returns
-------
`VersionedItem`
VersionedItem named tuple containing the metadata of the written symbol's version document in the store.
"""
# TODO: This operation is tricky as it may create history branches and lead to corrupted symbols.
# To avoid this we do concat_rewrite (see Issue #579)
# Investigate how this can be optimized and maintain safety (i.e. avoid read/write with serialization
# and compression costs, but instead:
# clone segments (server-side?) / crate new (base) version / update segments' parent).
version_to_restore = self._read_metadata(symbol, as_of=as_of)
# At this point it is guaranteed that the as_of version exists and doesn't have the symbol marked as deleted.
# If we try to restore the last version, do nothing (No-Op) and return the associated VesionedItem.
if self._last_version_seqnum(symbol) == version_to_restore['version']:
return VersionedItem(symbol=symbol, library=self._arctic_lib.get_name(),
version=version_to_restore['version'],
host=self._arctic_lib.arctic.mongo_host,
metadata=version_to_restore.pop('metadata', None), data=None)
# Read the existing data from as_of
item = self.read(symbol, as_of=as_of)
# Write back, creating a new base version
new_item = self.write(symbol,
data=item.data, metadata=item.metadata, prune_previous_version=prune_previous_version)
return new_item | [
"def",
"restore_version",
"(",
"self",
",",
"symbol",
",",
"as_of",
",",
"prune_previous_version",
"=",
"True",
")",
":",
"# TODO: This operation is tricky as it may create history branches and lead to corrupted symbols.",
"# To avoid this we do concat_rewrite (see Issue #579)",
"# Investigate how this can be optimized and maintain safety (i.e. avoid read/write with serialization",
"# and compression costs, but instead:",
"# clone segments (server-side?) / crate new (base) version / update segments' parent).",
"version_to_restore",
"=",
"self",
".",
"_read_metadata",
"(",
"symbol",
",",
"as_of",
"=",
"as_of",
")",
"# At this point it is guaranteed that the as_of version exists and doesn't have the symbol marked as deleted.",
"# If we try to restore the last version, do nothing (No-Op) and return the associated VesionedItem.",
"if",
"self",
".",
"_last_version_seqnum",
"(",
"symbol",
")",
"==",
"version_to_restore",
"[",
"'version'",
"]",
":",
"return",
"VersionedItem",
"(",
"symbol",
"=",
"symbol",
",",
"library",
"=",
"self",
".",
"_arctic_lib",
".",
"get_name",
"(",
")",
",",
"version",
"=",
"version_to_restore",
"[",
"'version'",
"]",
",",
"host",
"=",
"self",
".",
"_arctic_lib",
".",
"arctic",
".",
"mongo_host",
",",
"metadata",
"=",
"version_to_restore",
".",
"pop",
"(",
"'metadata'",
",",
"None",
")",
",",
"data",
"=",
"None",
")",
"# Read the existing data from as_of",
"item",
"=",
"self",
".",
"read",
"(",
"symbol",
",",
"as_of",
"=",
"as_of",
")",
"# Write back, creating a new base version",
"new_item",
"=",
"self",
".",
"write",
"(",
"symbol",
",",
"data",
"=",
"item",
".",
"data",
",",
"metadata",
"=",
"item",
".",
"metadata",
",",
"prune_previous_version",
"=",
"prune_previous_version",
")",
"return",
"new_item"
] | 53.06383 | 31.702128 |
def update_transfer_config(
self,
transfer_config,
update_mask,
authorization_code=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Updates a data transfer configuration.
All fields must be set, even if they are not updated.
Example:
>>> from google.cloud import bigquery_datatransfer_v1
>>>
>>> client = bigquery_datatransfer_v1.DataTransferServiceClient()
>>>
>>> # TODO: Initialize `transfer_config`:
>>> transfer_config = {}
>>>
>>> # TODO: Initialize `update_mask`:
>>> update_mask = {}
>>>
>>> response = client.update_transfer_config(transfer_config, update_mask)
Args:
transfer_config (Union[dict, ~google.cloud.bigquery_datatransfer_v1.types.TransferConfig]): Data transfer configuration to create.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigquery_datatransfer_v1.types.TransferConfig`
update_mask (Union[dict, ~google.cloud.bigquery_datatransfer_v1.types.FieldMask]): Required list of fields to be updated in this request.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigquery_datatransfer_v1.types.FieldMask`
authorization_code (str): Optional OAuth2 authorization code to use with this transfer
configuration. If it is provided, the transfer configuration will be
associated with the authorizing user. In order to obtain
authorization\_code, please make a request to
https://www.gstatic.com/bigquerydatatransfer/oauthz/auth?client\_id=&scope=<data\_source\_scopes>&redirect\_uri=<redirect\_uri>
- client\_id should be OAuth client\_id of BigQuery DTS API for the
given data source returned by ListDataSources method.
- data\_source\_scopes are the scopes returned by ListDataSources
method.
- redirect\_uri is an optional parameter. If not specified, then
authorization code is posted to the opener of authorization flow
window. Otherwise it will be sent to the redirect uri. A special
value of urn:ietf:wg:oauth:2.0:oob means that authorization code
should be returned in the title bar of the browser, with the page
text prompting the user to copy the code and paste it in the
application.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.bigquery_datatransfer_v1.types.TransferConfig` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "update_transfer_config" not in self._inner_api_calls:
self._inner_api_calls[
"update_transfer_config"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_transfer_config,
default_retry=self._method_configs["UpdateTransferConfig"].retry,
default_timeout=self._method_configs["UpdateTransferConfig"].timeout,
client_info=self._client_info,
)
request = datatransfer_pb2.UpdateTransferConfigRequest(
transfer_config=transfer_config,
update_mask=update_mask,
authorization_code=authorization_code,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("transfer_config.name", transfer_config.name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["update_transfer_config"](
request, retry=retry, timeout=timeout, metadata=metadata
) | [
"def",
"update_transfer_config",
"(",
"self",
",",
"transfer_config",
",",
"update_mask",
",",
"authorization_code",
"=",
"None",
",",
"retry",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"method",
".",
"DEFAULT",
",",
"timeout",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"method",
".",
"DEFAULT",
",",
"metadata",
"=",
"None",
",",
")",
":",
"# Wrap the transport method to add retry and timeout logic.",
"if",
"\"update_transfer_config\"",
"not",
"in",
"self",
".",
"_inner_api_calls",
":",
"self",
".",
"_inner_api_calls",
"[",
"\"update_transfer_config\"",
"]",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"method",
".",
"wrap_method",
"(",
"self",
".",
"transport",
".",
"update_transfer_config",
",",
"default_retry",
"=",
"self",
".",
"_method_configs",
"[",
"\"UpdateTransferConfig\"",
"]",
".",
"retry",
",",
"default_timeout",
"=",
"self",
".",
"_method_configs",
"[",
"\"UpdateTransferConfig\"",
"]",
".",
"timeout",
",",
"client_info",
"=",
"self",
".",
"_client_info",
",",
")",
"request",
"=",
"datatransfer_pb2",
".",
"UpdateTransferConfigRequest",
"(",
"transfer_config",
"=",
"transfer_config",
",",
"update_mask",
"=",
"update_mask",
",",
"authorization_code",
"=",
"authorization_code",
",",
")",
"if",
"metadata",
"is",
"None",
":",
"metadata",
"=",
"[",
"]",
"metadata",
"=",
"list",
"(",
"metadata",
")",
"try",
":",
"routing_header",
"=",
"[",
"(",
"\"transfer_config.name\"",
",",
"transfer_config",
".",
"name",
")",
"]",
"except",
"AttributeError",
":",
"pass",
"else",
":",
"routing_metadata",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"routing_header",
".",
"to_grpc_metadata",
"(",
"routing_header",
")",
"metadata",
".",
"append",
"(",
"routing_metadata",
")",
"return",
"self",
".",
"_inner_api_calls",
"[",
"\"update_transfer_config\"",
"]",
"(",
"request",
",",
"retry",
"=",
"retry",
",",
"timeout",
"=",
"timeout",
",",
"metadata",
"=",
"metadata",
")"
] | 48.679612 | 28.718447 |
async def delete(self):
"""Delete this Bcache."""
await self._handler.delete(
system_id=self.node.system_id, id=self.id) | [
"async",
"def",
"delete",
"(",
"self",
")",
":",
"await",
"self",
".",
"_handler",
".",
"delete",
"(",
"system_id",
"=",
"self",
".",
"node",
".",
"system_id",
",",
"id",
"=",
"self",
".",
"id",
")"
] | 36.25 | 9 |
def setup_oauth(username, password, basemaps_token_uri, authcfg_id=AUTHCFG_ID, authcfg_name=AUTHCFG_NAME):
"""Setup oauth configuration to access the BCS API,
return authcfg_id on success, None on failure
"""
cfgjson = {
"accessMethod" : 0,
"apiKey" : "",
"clientId" : "",
"clientSecret" : "",
"configType" : 1,
"grantFlow" : 2,
"password" : password,
"persistToken" : False,
"redirectPort" : '7070',
"redirectUrl" : "",
"refreshTokenUrl" : "",
"requestTimeout" : '30',
"requestUrl" : "",
"scope" : "",
"state" : "",
"tokenUrl" : basemaps_token_uri,
"username" : username,
"version" : 1
}
if authcfg_id not in auth_manager().availableAuthMethodConfigs():
authConfig = QgsAuthMethodConfig('OAuth2')
authConfig.setId(authcfg_id)
authConfig.setName(authcfg_name)
authConfig.setConfig('oauth2config', json.dumps(cfgjson))
if auth_manager().storeAuthenticationConfig(authConfig):
return authcfg_id
else:
authConfig = QgsAuthMethodConfig()
auth_manager().loadAuthenticationConfig(authcfg_id, authConfig, True)
authConfig.setName(authcfg_name)
authConfig.setConfig('oauth2config', json.dumps(cfgjson))
if auth_manager().updateAuthenticationConfig(authConfig):
return authcfg_id
return None | [
"def",
"setup_oauth",
"(",
"username",
",",
"password",
",",
"basemaps_token_uri",
",",
"authcfg_id",
"=",
"AUTHCFG_ID",
",",
"authcfg_name",
"=",
"AUTHCFG_NAME",
")",
":",
"cfgjson",
"=",
"{",
"\"accessMethod\"",
":",
"0",
",",
"\"apiKey\"",
":",
"\"\"",
",",
"\"clientId\"",
":",
"\"\"",
",",
"\"clientSecret\"",
":",
"\"\"",
",",
"\"configType\"",
":",
"1",
",",
"\"grantFlow\"",
":",
"2",
",",
"\"password\"",
":",
"password",
",",
"\"persistToken\"",
":",
"False",
",",
"\"redirectPort\"",
":",
"'7070'",
",",
"\"redirectUrl\"",
":",
"\"\"",
",",
"\"refreshTokenUrl\"",
":",
"\"\"",
",",
"\"requestTimeout\"",
":",
"'30'",
",",
"\"requestUrl\"",
":",
"\"\"",
",",
"\"scope\"",
":",
"\"\"",
",",
"\"state\"",
":",
"\"\"",
",",
"\"tokenUrl\"",
":",
"basemaps_token_uri",
",",
"\"username\"",
":",
"username",
",",
"\"version\"",
":",
"1",
"}",
"if",
"authcfg_id",
"not",
"in",
"auth_manager",
"(",
")",
".",
"availableAuthMethodConfigs",
"(",
")",
":",
"authConfig",
"=",
"QgsAuthMethodConfig",
"(",
"'OAuth2'",
")",
"authConfig",
".",
"setId",
"(",
"authcfg_id",
")",
"authConfig",
".",
"setName",
"(",
"authcfg_name",
")",
"authConfig",
".",
"setConfig",
"(",
"'oauth2config'",
",",
"json",
".",
"dumps",
"(",
"cfgjson",
")",
")",
"if",
"auth_manager",
"(",
")",
".",
"storeAuthenticationConfig",
"(",
"authConfig",
")",
":",
"return",
"authcfg_id",
"else",
":",
"authConfig",
"=",
"QgsAuthMethodConfig",
"(",
")",
"auth_manager",
"(",
")",
".",
"loadAuthenticationConfig",
"(",
"authcfg_id",
",",
"authConfig",
",",
"True",
")",
"authConfig",
".",
"setName",
"(",
"authcfg_name",
")",
"authConfig",
".",
"setConfig",
"(",
"'oauth2config'",
",",
"json",
".",
"dumps",
"(",
"cfgjson",
")",
")",
"if",
"auth_manager",
"(",
")",
".",
"updateAuthenticationConfig",
"(",
"authConfig",
")",
":",
"return",
"authcfg_id",
"return",
"None"
] | 34.25 | 17.9 |
def autoinc(self):
"""
Conditionally updates the stack version in the file associated with
this config.
This handles both official releases (i.e. QA configs), and release
candidates. Assumptions about version:
- Official release versions are MAJOR.minor, where MAJOR and minor
are both non-negative integers. E.g.
2.9
2.10
2.11
3.0
3.1
3.2
etc...
- Release candidate versions are MAJOR.minor-rc.N, where MAJOR,
minor, and N are all non-negative integers.
3.5-rc.1
3.5-rc.2
"""
if not self.get('autoinc_version'):
return
oldver = self['version']
newver = bump_version_tail(oldver)
config_path = self.filepath
temp_fd, temp_name = tempfile.mkstemp(
dir=os.path.dirname(config_path),
)
with open(config_path) as old:
with os.fdopen(temp_fd, 'w') as new:
for oldline in old:
if oldline.startswith('version:'):
new.write("version: '%s'\n" % newver)
continue
new.write(oldline)
# no need to backup the old file, it's under version control anyway -
# right???
log.info('Incrementing stack version %s -> %s' % (oldver, newver))
os.rename(temp_name, config_path) | [
"def",
"autoinc",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"get",
"(",
"'autoinc_version'",
")",
":",
"return",
"oldver",
"=",
"self",
"[",
"'version'",
"]",
"newver",
"=",
"bump_version_tail",
"(",
"oldver",
")",
"config_path",
"=",
"self",
".",
"filepath",
"temp_fd",
",",
"temp_name",
"=",
"tempfile",
".",
"mkstemp",
"(",
"dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"config_path",
")",
",",
")",
"with",
"open",
"(",
"config_path",
")",
"as",
"old",
":",
"with",
"os",
".",
"fdopen",
"(",
"temp_fd",
",",
"'w'",
")",
"as",
"new",
":",
"for",
"oldline",
"in",
"old",
":",
"if",
"oldline",
".",
"startswith",
"(",
"'version:'",
")",
":",
"new",
".",
"write",
"(",
"\"version: '%s'\\n\"",
"%",
"newver",
")",
"continue",
"new",
".",
"write",
"(",
"oldline",
")",
"# no need to backup the old file, it's under version control anyway -",
"# right???",
"log",
".",
"info",
"(",
"'Incrementing stack version %s -> %s'",
"%",
"(",
"oldver",
",",
"newver",
")",
")",
"os",
".",
"rename",
"(",
"temp_name",
",",
"config_path",
")"
] | 31.291667 | 20.541667 |
def delete_element(self, element):
"""Deletes an element (lazily).
:raises: ValueError -- No such element in the priority queue.
"""
if element not in self.element_finder:
raise ValueError("No such element in the priority queue.")
entry = self.element_finder[element]
entry[1] = self.INVALID | [
"def",
"delete_element",
"(",
"self",
",",
"element",
")",
":",
"if",
"element",
"not",
"in",
"self",
".",
"element_finder",
":",
"raise",
"ValueError",
"(",
"\"No such element in the priority queue.\"",
")",
"entry",
"=",
"self",
".",
"element_finder",
"[",
"element",
"]",
"entry",
"[",
"1",
"]",
"=",
"self",
".",
"INVALID"
] | 34.4 | 16.4 |
def split(txt, seps):
"""
Splits a text in a meaningful list of words based on a list of word
separators (define in pyqode.core.settings)
:param txt: Text to split
:param seps: List of words separators
:return: A **set** of words found in the document (excluding
punctuations, numbers, ...)
"""
# replace all possible separators with a default sep
default_sep = seps[0]
for sep in seps[1:]:
if sep:
txt = txt.replace(sep, default_sep)
# now we can split using the default_sep
raw_words = txt.split(default_sep)
words = set()
for word in raw_words:
# w = w.strip()
if word.replace('_', '').isalpha():
words.add(word)
return sorted(words) | [
"def",
"split",
"(",
"txt",
",",
"seps",
")",
":",
"# replace all possible separators with a default sep",
"default_sep",
"=",
"seps",
"[",
"0",
"]",
"for",
"sep",
"in",
"seps",
"[",
"1",
":",
"]",
":",
"if",
"sep",
":",
"txt",
"=",
"txt",
".",
"replace",
"(",
"sep",
",",
"default_sep",
")",
"# now we can split using the default_sep",
"raw_words",
"=",
"txt",
".",
"split",
"(",
"default_sep",
")",
"words",
"=",
"set",
"(",
")",
"for",
"word",
"in",
"raw_words",
":",
"# w = w.strip()",
"if",
"word",
".",
"replace",
"(",
"'_'",
",",
"''",
")",
".",
"isalpha",
"(",
")",
":",
"words",
".",
"add",
"(",
"word",
")",
"return",
"sorted",
"(",
"words",
")"
] | 35.434783 | 13.086957 |
def forward(self, data_batch, is_train=None):
"""Split `data_batch` according to workload and run forward on each devices.
Parameters
----------
data_batch : DataBatch
Or could be any object implementing similar interface.
is_train : bool
The hint for the backend, indicating whether we are during training phase.
Default is `None`, then the value `self.for_training` will be used.
Returns
-------
"""
_load_data(data_batch, self.data_arrays, self.data_layouts)
if is_train is None:
is_train = self.for_training
if isinstance(data_batch, list):
if self.label_arrays is not None and data_batch is not None and data_batch[0].label:
_load_label(data_batch, self.label_arrays, self.label_layouts)
else:
if self.label_arrays is not None and data_batch.label:
_load_label(data_batch, self.label_arrays, self.label_layouts)
for exec_ in self.execs:
exec_.forward(is_train=is_train) | [
"def",
"forward",
"(",
"self",
",",
"data_batch",
",",
"is_train",
"=",
"None",
")",
":",
"_load_data",
"(",
"data_batch",
",",
"self",
".",
"data_arrays",
",",
"self",
".",
"data_layouts",
")",
"if",
"is_train",
"is",
"None",
":",
"is_train",
"=",
"self",
".",
"for_training",
"if",
"isinstance",
"(",
"data_batch",
",",
"list",
")",
":",
"if",
"self",
".",
"label_arrays",
"is",
"not",
"None",
"and",
"data_batch",
"is",
"not",
"None",
"and",
"data_batch",
"[",
"0",
"]",
".",
"label",
":",
"_load_label",
"(",
"data_batch",
",",
"self",
".",
"label_arrays",
",",
"self",
".",
"label_layouts",
")",
"else",
":",
"if",
"self",
".",
"label_arrays",
"is",
"not",
"None",
"and",
"data_batch",
".",
"label",
":",
"_load_label",
"(",
"data_batch",
",",
"self",
".",
"label_arrays",
",",
"self",
".",
"label_layouts",
")",
"for",
"exec_",
"in",
"self",
".",
"execs",
":",
"exec_",
".",
"forward",
"(",
"is_train",
"=",
"is_train",
")"
] | 39.703704 | 23.444444 |
def parameters(self):
"""The cell parameters (lengths and angles)"""
length_a = np.linalg.norm(self.matrix[:, 0])
length_b = np.linalg.norm(self.matrix[:, 1])
length_c = np.linalg.norm(self.matrix[:, 2])
alpha = np.arccos(np.dot(self.matrix[:, 1], self.matrix[:, 2]) / (length_b * length_c))
beta = np.arccos(np.dot(self.matrix[:, 2], self.matrix[:, 0]) / (length_c * length_a))
gamma = np.arccos(np.dot(self.matrix[:, 0], self.matrix[:, 1]) / (length_a * length_b))
return (
np.array([length_a, length_b, length_c], float),
np.array([alpha, beta, gamma], float)
) | [
"def",
"parameters",
"(",
"self",
")",
":",
"length_a",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"self",
".",
"matrix",
"[",
":",
",",
"0",
"]",
")",
"length_b",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"self",
".",
"matrix",
"[",
":",
",",
"1",
"]",
")",
"length_c",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"self",
".",
"matrix",
"[",
":",
",",
"2",
"]",
")",
"alpha",
"=",
"np",
".",
"arccos",
"(",
"np",
".",
"dot",
"(",
"self",
".",
"matrix",
"[",
":",
",",
"1",
"]",
",",
"self",
".",
"matrix",
"[",
":",
",",
"2",
"]",
")",
"/",
"(",
"length_b",
"*",
"length_c",
")",
")",
"beta",
"=",
"np",
".",
"arccos",
"(",
"np",
".",
"dot",
"(",
"self",
".",
"matrix",
"[",
":",
",",
"2",
"]",
",",
"self",
".",
"matrix",
"[",
":",
",",
"0",
"]",
")",
"/",
"(",
"length_c",
"*",
"length_a",
")",
")",
"gamma",
"=",
"np",
".",
"arccos",
"(",
"np",
".",
"dot",
"(",
"self",
".",
"matrix",
"[",
":",
",",
"0",
"]",
",",
"self",
".",
"matrix",
"[",
":",
",",
"1",
"]",
")",
"/",
"(",
"length_a",
"*",
"length_b",
")",
")",
"return",
"(",
"np",
".",
"array",
"(",
"[",
"length_a",
",",
"length_b",
",",
"length_c",
"]",
",",
"float",
")",
",",
"np",
".",
"array",
"(",
"[",
"alpha",
",",
"beta",
",",
"gamma",
"]",
",",
"float",
")",
")"
] | 54.083333 | 25.25 |
def cc(self, cc_emails, global_substitutions=None, is_multiple=False, p=0):
"""Adds Cc objects to the Personalization object
:param cc_emails: An Cc or list of Cc objects
:type cc_emails: Cc, list(Cc), tuple
:param global_substitutions: A dict of substitutions for all recipients
:type global_substitutions: dict
:param is_multiple: Create a new personilization for each recipient
:type is_multiple: bool
:param p: p is the Personalization object or Personalization object
index
:type p: Personalization, integer, optional
"""
if isinstance(cc_emails, list):
for email in cc_emails:
if isinstance(email, str):
email = Cc(email, None)
if isinstance(email, tuple):
email = Cc(email[0], email[1])
self.add_cc(email, global_substitutions, is_multiple, p)
else:
if isinstance(cc_emails, str):
cc_emails = Cc(cc_emails, None)
if isinstance(cc_emails, tuple):
cc_emails = To(cc_emails[0], cc_emails[1])
self.add_cc(cc_emails, global_substitutions, is_multiple, p) | [
"def",
"cc",
"(",
"self",
",",
"cc_emails",
",",
"global_substitutions",
"=",
"None",
",",
"is_multiple",
"=",
"False",
",",
"p",
"=",
"0",
")",
":",
"if",
"isinstance",
"(",
"cc_emails",
",",
"list",
")",
":",
"for",
"email",
"in",
"cc_emails",
":",
"if",
"isinstance",
"(",
"email",
",",
"str",
")",
":",
"email",
"=",
"Cc",
"(",
"email",
",",
"None",
")",
"if",
"isinstance",
"(",
"email",
",",
"tuple",
")",
":",
"email",
"=",
"Cc",
"(",
"email",
"[",
"0",
"]",
",",
"email",
"[",
"1",
"]",
")",
"self",
".",
"add_cc",
"(",
"email",
",",
"global_substitutions",
",",
"is_multiple",
",",
"p",
")",
"else",
":",
"if",
"isinstance",
"(",
"cc_emails",
",",
"str",
")",
":",
"cc_emails",
"=",
"Cc",
"(",
"cc_emails",
",",
"None",
")",
"if",
"isinstance",
"(",
"cc_emails",
",",
"tuple",
")",
":",
"cc_emails",
"=",
"To",
"(",
"cc_emails",
"[",
"0",
"]",
",",
"cc_emails",
"[",
"1",
"]",
")",
"self",
".",
"add_cc",
"(",
"cc_emails",
",",
"global_substitutions",
",",
"is_multiple",
",",
"p",
")"
] | 46.692308 | 14.807692 |
def _psplit(self):
"""
Split `self` at both north and south poles.
:return: A list of split StridedIntervals
"""
nsplit_list = self._nsplit()
psplit_list = [ ]
for si in nsplit_list:
psplit_list.extend(si._ssplit())
return psplit_list | [
"def",
"_psplit",
"(",
"self",
")",
":",
"nsplit_list",
"=",
"self",
".",
"_nsplit",
"(",
")",
"psplit_list",
"=",
"[",
"]",
"for",
"si",
"in",
"nsplit_list",
":",
"psplit_list",
".",
"extend",
"(",
"si",
".",
"_ssplit",
"(",
")",
")",
"return",
"psplit_list"
] | 21.5 | 17.785714 |
def bake_content(request):
"""Invoke the baking process - trigger post-publication"""
ident_hash = request.matchdict['ident_hash']
try:
id, version = split_ident_hash(ident_hash)
except IdentHashError:
raise httpexceptions.HTTPNotFound()
if not version:
raise httpexceptions.HTTPBadRequest('must specify the version')
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute("""\
SELECT bool(portal_type = 'Collection'), stateid, module_ident
FROM modules
WHERE ident_hash(uuid, major_version, minor_version) = %s
""", (ident_hash,))
try:
is_binder, stateid, module_ident = cursor.fetchone()
except TypeError:
raise httpexceptions.HTTPNotFound()
if not is_binder:
raise httpexceptions.HTTPBadRequest(
'{} is not a book'.format(ident_hash))
if stateid == 5:
cursor.execute("""\
SELECT pg_notify('post_publication',
'{"module_ident": '||%s||',
"ident_hash": "'||%s||'",
"timestamp": "'||CURRENT_TIMESTAMP||'"}')
""", (module_ident, ident_hash))
else:
cursor.execute("""\
UPDATE modules SET stateid = 5
WHERE ident_hash(uuid, major_version, minor_version) = %s
""", (ident_hash,)) | [
"def",
"bake_content",
"(",
"request",
")",
":",
"ident_hash",
"=",
"request",
".",
"matchdict",
"[",
"'ident_hash'",
"]",
"try",
":",
"id",
",",
"version",
"=",
"split_ident_hash",
"(",
"ident_hash",
")",
"except",
"IdentHashError",
":",
"raise",
"httpexceptions",
".",
"HTTPNotFound",
"(",
")",
"if",
"not",
"version",
":",
"raise",
"httpexceptions",
".",
"HTTPBadRequest",
"(",
"'must specify the version'",
")",
"with",
"db_connect",
"(",
")",
"as",
"db_conn",
":",
"with",
"db_conn",
".",
"cursor",
"(",
")",
"as",
"cursor",
":",
"cursor",
".",
"execute",
"(",
"\"\"\"\\\nSELECT bool(portal_type = 'Collection'), stateid, module_ident\nFROM modules\nWHERE ident_hash(uuid, major_version, minor_version) = %s\n\"\"\"",
",",
"(",
"ident_hash",
",",
")",
")",
"try",
":",
"is_binder",
",",
"stateid",
",",
"module_ident",
"=",
"cursor",
".",
"fetchone",
"(",
")",
"except",
"TypeError",
":",
"raise",
"httpexceptions",
".",
"HTTPNotFound",
"(",
")",
"if",
"not",
"is_binder",
":",
"raise",
"httpexceptions",
".",
"HTTPBadRequest",
"(",
"'{} is not a book'",
".",
"format",
"(",
"ident_hash",
")",
")",
"if",
"stateid",
"==",
"5",
":",
"cursor",
".",
"execute",
"(",
"\"\"\"\\\nSELECT pg_notify('post_publication',\n'{\"module_ident\": '||%s||',\n \"ident_hash\": \"'||%s||'\",\n \"timestamp\": \"'||CURRENT_TIMESTAMP||'\"}')\n\"\"\"",
",",
"(",
"module_ident",
",",
"ident_hash",
")",
")",
"else",
":",
"cursor",
".",
"execute",
"(",
"\"\"\"\\\nUPDATE modules SET stateid = 5\nWHERE ident_hash(uuid, major_version, minor_version) = %s\n\"\"\"",
",",
"(",
"ident_hash",
",",
")",
")"
] | 34.105263 | 14.131579 |
def iter_used_addresses(
adapter, # type: BaseAdapter
seed, # type: Seed
start, # type: int
security_level=None, # type: Optional[int]
):
# type: (...) -> Generator[Tuple[Address, List[TransactionHash]], None, None]
"""
Scans the Tangle for used addresses.
This is basically the opposite of invoking ``getNewAddresses`` with
``stop=None``.
"""
if security_level is None:
security_level = AddressGenerator.DEFAULT_SECURITY_LEVEL
ft_command = FindTransactionsCommand(adapter)
for addy in AddressGenerator(seed, security_level).create_iterator(start):
ft_response = ft_command(addresses=[addy])
if ft_response['hashes']:
yield addy, ft_response['hashes']
else:
break
# Reset the command so that we can call it again.
ft_command.reset() | [
"def",
"iter_used_addresses",
"(",
"adapter",
",",
"# type: BaseAdapter",
"seed",
",",
"# type: Seed",
"start",
",",
"# type: int",
"security_level",
"=",
"None",
",",
"# type: Optional[int]",
")",
":",
"# type: (...) -> Generator[Tuple[Address, List[TransactionHash]], None, None]",
"if",
"security_level",
"is",
"None",
":",
"security_level",
"=",
"AddressGenerator",
".",
"DEFAULT_SECURITY_LEVEL",
"ft_command",
"=",
"FindTransactionsCommand",
"(",
"adapter",
")",
"for",
"addy",
"in",
"AddressGenerator",
"(",
"seed",
",",
"security_level",
")",
".",
"create_iterator",
"(",
"start",
")",
":",
"ft_response",
"=",
"ft_command",
"(",
"addresses",
"=",
"[",
"addy",
"]",
")",
"if",
"ft_response",
"[",
"'hashes'",
"]",
":",
"yield",
"addy",
",",
"ft_response",
"[",
"'hashes'",
"]",
"else",
":",
"break",
"# Reset the command so that we can call it again.",
"ft_command",
".",
"reset",
"(",
")"
] | 30.5 | 20.428571 |
def textcontent(self, cls='current', correctionhandling=CorrectionHandling.CURRENT):
"""See :meth:`AbstractElement.textcontent`"""
if cls == 'original': correctionhandling = CorrectionHandling.ORIGINAL #backward compatibility
if correctionhandling in (CorrectionHandling.CURRENT, CorrectionHandling.EITHER):
for e in self:
if isinstance(e, New) or isinstance(e, Current):
return e.textcontent(cls,correctionhandling)
if correctionhandling in (CorrectionHandling.ORIGINAL, CorrectionHandling.EITHER):
for e in self:
if isinstance(e, Original):
return e.textcontent(cls,correctionhandling)
raise NoSuchText | [
"def",
"textcontent",
"(",
"self",
",",
"cls",
"=",
"'current'",
",",
"correctionhandling",
"=",
"CorrectionHandling",
".",
"CURRENT",
")",
":",
"if",
"cls",
"==",
"'original'",
":",
"correctionhandling",
"=",
"CorrectionHandling",
".",
"ORIGINAL",
"#backward compatibility",
"if",
"correctionhandling",
"in",
"(",
"CorrectionHandling",
".",
"CURRENT",
",",
"CorrectionHandling",
".",
"EITHER",
")",
":",
"for",
"e",
"in",
"self",
":",
"if",
"isinstance",
"(",
"e",
",",
"New",
")",
"or",
"isinstance",
"(",
"e",
",",
"Current",
")",
":",
"return",
"e",
".",
"textcontent",
"(",
"cls",
",",
"correctionhandling",
")",
"if",
"correctionhandling",
"in",
"(",
"CorrectionHandling",
".",
"ORIGINAL",
",",
"CorrectionHandling",
".",
"EITHER",
")",
":",
"for",
"e",
"in",
"self",
":",
"if",
"isinstance",
"(",
"e",
",",
"Original",
")",
":",
"return",
"e",
".",
"textcontent",
"(",
"cls",
",",
"correctionhandling",
")",
"raise",
"NoSuchText"
] | 60.75 | 27 |
async def can_run(self, ctx):
"""|coro|
Checks if the command can be executed by checking all the predicates
inside the :attr:`.checks` attribute.
Parameters
-----------
ctx: :class:`.Context`
The ctx of the command currently being invoked.
Raises
-------
:class:`CommandError`
Any command error that was raised during a check call will be propagated
by this function.
Returns
--------
:class:`bool`
A boolean indicating if the command can be invoked.
"""
original = ctx.command
ctx.command = self
try:
if not await ctx.bot.can_run(ctx):
raise CheckFailure('The global check functions for command {0.qualified_name} failed.'.format(self))
cog = self.cog
if cog is not None:
local_check = Cog._get_overridden_method(cog.cog_check)
if local_check is not None:
ret = await discord.utils.maybe_coroutine(local_check, ctx)
if not ret:
return False
predicates = self.checks
if not predicates:
# since we have no checks, then we just return True.
return True
return await discord.utils.async_all(predicate(ctx) for predicate in predicates)
finally:
ctx.command = original | [
"async",
"def",
"can_run",
"(",
"self",
",",
"ctx",
")",
":",
"original",
"=",
"ctx",
".",
"command",
"ctx",
".",
"command",
"=",
"self",
"try",
":",
"if",
"not",
"await",
"ctx",
".",
"bot",
".",
"can_run",
"(",
"ctx",
")",
":",
"raise",
"CheckFailure",
"(",
"'The global check functions for command {0.qualified_name} failed.'",
".",
"format",
"(",
"self",
")",
")",
"cog",
"=",
"self",
".",
"cog",
"if",
"cog",
"is",
"not",
"None",
":",
"local_check",
"=",
"Cog",
".",
"_get_overridden_method",
"(",
"cog",
".",
"cog_check",
")",
"if",
"local_check",
"is",
"not",
"None",
":",
"ret",
"=",
"await",
"discord",
".",
"utils",
".",
"maybe_coroutine",
"(",
"local_check",
",",
"ctx",
")",
"if",
"not",
"ret",
":",
"return",
"False",
"predicates",
"=",
"self",
".",
"checks",
"if",
"not",
"predicates",
":",
"# since we have no checks, then we just return True.",
"return",
"True",
"return",
"await",
"discord",
".",
"utils",
".",
"async_all",
"(",
"predicate",
"(",
"ctx",
")",
"for",
"predicate",
"in",
"predicates",
")",
"finally",
":",
"ctx",
".",
"command",
"=",
"original"
] | 31.304348 | 23.304348 |
def save(cls,instance,filename):
"""Class method save for saving TracingVariable."""
filename = cls.correct_file_extension(filename)
try:
with open(filename,'wb') as f:
pickle.dump(instance,f,protocol=pickle.HIGHEST_PROTOCOL)
except MemoryError as e:
print ('{} occurred, will downsampled the saved file by 20.'
.format(type(e).__name__))
copy_instance = instance.copy()
copy_instance.H = copy_instance.H[::20,:,:]
copy_instance.Y = copy_instance.Y[::20,:]
with open(filename,'wb') as f:
pickle.dump(copy_instance,f,protocol=pickle.HIGHEST_PROTOCOL) | [
"def",
"save",
"(",
"cls",
",",
"instance",
",",
"filename",
")",
":",
"filename",
"=",
"cls",
".",
"correct_file_extension",
"(",
"filename",
")",
"try",
":",
"with",
"open",
"(",
"filename",
",",
"'wb'",
")",
"as",
"f",
":",
"pickle",
".",
"dump",
"(",
"instance",
",",
"f",
",",
"protocol",
"=",
"pickle",
".",
"HIGHEST_PROTOCOL",
")",
"except",
"MemoryError",
"as",
"e",
":",
"print",
"(",
"'{} occurred, will downsampled the saved file by 20.'",
".",
"format",
"(",
"type",
"(",
"e",
")",
".",
"__name__",
")",
")",
"copy_instance",
"=",
"instance",
".",
"copy",
"(",
")",
"copy_instance",
".",
"H",
"=",
"copy_instance",
".",
"H",
"[",
":",
":",
"20",
",",
":",
",",
":",
"]",
"copy_instance",
".",
"Y",
"=",
"copy_instance",
".",
"Y",
"[",
":",
":",
"20",
",",
":",
"]",
"with",
"open",
"(",
"filename",
",",
"'wb'",
")",
"as",
"f",
":",
"pickle",
".",
"dump",
"(",
"copy_instance",
",",
"f",
",",
"protocol",
"=",
"pickle",
".",
"HIGHEST_PROTOCOL",
")"
] | 49.357143 | 14.285714 |
def cprint(string, fg=None, bg=None, end='\n', target=sys.stdout):
"""Print a colored string to the target handle.
fg and bg specify foreground- and background colors, respectively. The
remaining keyword arguments are the same as for Python's built-in print
function. Colors are returned to their defaults before the function
returns.
"""
_color_manager.set_color(fg, bg)
target.write(string + end)
target.flush() # Needed for Python 3.x
_color_manager.set_defaults() | [
"def",
"cprint",
"(",
"string",
",",
"fg",
"=",
"None",
",",
"bg",
"=",
"None",
",",
"end",
"=",
"'\\n'",
",",
"target",
"=",
"sys",
".",
"stdout",
")",
":",
"_color_manager",
".",
"set_color",
"(",
"fg",
",",
"bg",
")",
"target",
".",
"write",
"(",
"string",
"+",
"end",
")",
"target",
".",
"flush",
"(",
")",
"# Needed for Python 3.x",
"_color_manager",
".",
"set_defaults",
"(",
")"
] | 38.307692 | 19.846154 |
def get_ec2_info(connection,
instance_id,
region,
username=None):
""" queries EC2 for details about a particular instance_id
"""
instance = connection.get_only_instances(
filters={'instance_id': instance_id}
)[0]
data = instance.__dict__
data['state'] = instance.state
data['cloud_type'] = 'ec2'
try:
volume = connection.get_all_volumes(
filters={'attachment.instance-id': instance.id}
)[0].id
data['volume'] = volume
except:
data['volume'] = ''
return data | [
"def",
"get_ec2_info",
"(",
"connection",
",",
"instance_id",
",",
"region",
",",
"username",
"=",
"None",
")",
":",
"instance",
"=",
"connection",
".",
"get_only_instances",
"(",
"filters",
"=",
"{",
"'instance_id'",
":",
"instance_id",
"}",
")",
"[",
"0",
"]",
"data",
"=",
"instance",
".",
"__dict__",
"data",
"[",
"'state'",
"]",
"=",
"instance",
".",
"state",
"data",
"[",
"'cloud_type'",
"]",
"=",
"'ec2'",
"try",
":",
"volume",
"=",
"connection",
".",
"get_all_volumes",
"(",
"filters",
"=",
"{",
"'attachment.instance-id'",
":",
"instance",
".",
"id",
"}",
")",
"[",
"0",
"]",
".",
"id",
"data",
"[",
"'volume'",
"]",
"=",
"volume",
"except",
":",
"data",
"[",
"'volume'",
"]",
"=",
"''",
"return",
"data"
] | 26.590909 | 15.818182 |
def element_exists(self, element):
"""
Checks if given element exists.
Usage::
>>> plist_file_parser = PlistFileParser("standard.plist")
>>> plist_file_parser.parse()
True
>>> plist_file_parser.element_exists("String A")
True
>>> plist_file_parser.element_exists("String Nemo")
False
:param element: Element to check existence.
:type element: unicode
:return: Element existence.
:rtype: bool
"""
if not self.__elements:
return False
for item in foundations.walkers.dictionaries_walker(self.__elements):
path, key, value = item
if key == element:
LOGGER.debug("> '{0}' attribute exists.".format(element))
return True
LOGGER.debug("> '{0}' element doesn't exists.".format(element))
return False | [
"def",
"element_exists",
"(",
"self",
",",
"element",
")",
":",
"if",
"not",
"self",
".",
"__elements",
":",
"return",
"False",
"for",
"item",
"in",
"foundations",
".",
"walkers",
".",
"dictionaries_walker",
"(",
"self",
".",
"__elements",
")",
":",
"path",
",",
"key",
",",
"value",
"=",
"item",
"if",
"key",
"==",
"element",
":",
"LOGGER",
".",
"debug",
"(",
"\"> '{0}' attribute exists.\"",
".",
"format",
"(",
"element",
")",
")",
"return",
"True",
"LOGGER",
".",
"debug",
"(",
"\"> '{0}' element doesn't exists.\"",
".",
"format",
"(",
"element",
")",
")",
"return",
"False"
] | 29.548387 | 20.516129 |
def map_fit(interface, state, label, inp):
"""
Function counts occurrences of feature values for every row in given data chunk. For continuous features it returns
number of values and it calculates mean and variance for every feature.
For discrete features it counts occurrences of labels and values for every feature. It returns occurrences of pairs:
label, feature index, feature values.
"""
import numpy as np
combiner = {} # combiner used for joining of intermediate pairs
out = interface.output(0) # all outputted pairs have the same output label
for row in inp: # for every row in data chunk
row = row.strip().split(state["delimiter"]) # split row
if len(row) > 1: # check if row is empty
for i, j in enumerate(state["X_indices"]): # for defined features
if row[j] not in state["missing_vals"]: # check missing values
# creates a pair - label, feature index
pair = row[state["y_index"]] + state["delimiter"] + str(j)
if state["X_meta"][i] == "c": # continuous features
if pair in combiner:
# convert to float and store value
combiner[pair].append(np.float32(row[j]))
else:
combiner[pair] = [np.float32(row[j])]
else: # discrete features
# add feature value to pair
pair += state["delimiter"] + row[j]
# increase counts of current pair
combiner[pair] = combiner.get(pair, 0) + 1
# increase label counts
combiner[row[state["y_index"]]] = combiner.get(row[state["y_index"]], 0) + 1
for k, v in combiner.iteritems(): # all pairs in combiner are output
if len(k.split(state["delimiter"])) == 2: # continous features
# number of elements, partial mean and variance
out.add(k, (np.size(v), np.mean(v, dtype=np.float32), np.var(v, dtype=np.float32)))
else: # discrete features and labels
out.add(k, v) | [
"def",
"map_fit",
"(",
"interface",
",",
"state",
",",
"label",
",",
"inp",
")",
":",
"import",
"numpy",
"as",
"np",
"combiner",
"=",
"{",
"}",
"# combiner used for joining of intermediate pairs",
"out",
"=",
"interface",
".",
"output",
"(",
"0",
")",
"# all outputted pairs have the same output label",
"for",
"row",
"in",
"inp",
":",
"# for every row in data chunk",
"row",
"=",
"row",
".",
"strip",
"(",
")",
".",
"split",
"(",
"state",
"[",
"\"delimiter\"",
"]",
")",
"# split row",
"if",
"len",
"(",
"row",
")",
">",
"1",
":",
"# check if row is empty",
"for",
"i",
",",
"j",
"in",
"enumerate",
"(",
"state",
"[",
"\"X_indices\"",
"]",
")",
":",
"# for defined features",
"if",
"row",
"[",
"j",
"]",
"not",
"in",
"state",
"[",
"\"missing_vals\"",
"]",
":",
"# check missing values",
"# creates a pair - label, feature index",
"pair",
"=",
"row",
"[",
"state",
"[",
"\"y_index\"",
"]",
"]",
"+",
"state",
"[",
"\"delimiter\"",
"]",
"+",
"str",
"(",
"j",
")",
"if",
"state",
"[",
"\"X_meta\"",
"]",
"[",
"i",
"]",
"==",
"\"c\"",
":",
"# continuous features",
"if",
"pair",
"in",
"combiner",
":",
"# convert to float and store value",
"combiner",
"[",
"pair",
"]",
".",
"append",
"(",
"np",
".",
"float32",
"(",
"row",
"[",
"j",
"]",
")",
")",
"else",
":",
"combiner",
"[",
"pair",
"]",
"=",
"[",
"np",
".",
"float32",
"(",
"row",
"[",
"j",
"]",
")",
"]",
"else",
":",
"# discrete features",
"# add feature value to pair",
"pair",
"+=",
"state",
"[",
"\"delimiter\"",
"]",
"+",
"row",
"[",
"j",
"]",
"# increase counts of current pair",
"combiner",
"[",
"pair",
"]",
"=",
"combiner",
".",
"get",
"(",
"pair",
",",
"0",
")",
"+",
"1",
"# increase label counts",
"combiner",
"[",
"row",
"[",
"state",
"[",
"\"y_index\"",
"]",
"]",
"]",
"=",
"combiner",
".",
"get",
"(",
"row",
"[",
"state",
"[",
"\"y_index\"",
"]",
"]",
",",
"0",
")",
"+",
"1",
"for",
"k",
",",
"v",
"in",
"combiner",
".",
"iteritems",
"(",
")",
":",
"# all pairs in combiner are output",
"if",
"len",
"(",
"k",
".",
"split",
"(",
"state",
"[",
"\"delimiter\"",
"]",
")",
")",
"==",
"2",
":",
"# continous features",
"# number of elements, partial mean and variance",
"out",
".",
"add",
"(",
"k",
",",
"(",
"np",
".",
"size",
"(",
"v",
")",
",",
"np",
".",
"mean",
"(",
"v",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
",",
"np",
".",
"var",
"(",
"v",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
")",
")",
"else",
":",
"# discrete features and labels",
"out",
".",
"add",
"(",
"k",
",",
"v",
")"
] | 52.780488 | 26.292683 |
def _init_optional_attrs(self, optional_attrs):
"""Prepare to store data from user-desired optional fields.
Not loading these optional fields by default saves in space and speed.
But allow the possibility for saving these fields, if the user desires,
Including:
comment consider def is_class_level is_metadata_tag is_transitive
relationship replaced_by subset synonym transitive_over xref
"""
# Written by DV Klopfenstein
# Required attributes are always loaded. All others are optionally loaded.
self.attrs_req = ['id', 'alt_id', 'name', 'namespace', 'is_a', 'is_obsolete']
self.attrs_scalar = ['comment', 'defn',
'is_class_level', 'is_metadata_tag',
'is_transitive', 'transitive_over']
self.attrs_nested = frozenset(['relationship'])
# Allow user to specify either: 'def' or 'defn'
# 'def' is an obo field name, but 'defn' is legal Python attribute name
fnc = lambda aopt: aopt if aopt != "defn" else "def"
if optional_attrs is None:
optional_attrs = []
elif isinstance(optional_attrs, str):
optional_attrs = [fnc(optional_attrs)] if optional_attrs not in self.attrs_req else []
elif isinstance(optional_attrs, list) or isinstance(optional_attrs, set):
optional_attrs = set([fnc(f) for f in optional_attrs if f not in self.attrs_req])
else:
raise Exception("optional_attrs arg MUST BE A str, list, or set.")
self.optional_attrs = optional_attrs | [
"def",
"_init_optional_attrs",
"(",
"self",
",",
"optional_attrs",
")",
":",
"# Written by DV Klopfenstein",
"# Required attributes are always loaded. All others are optionally loaded.",
"self",
".",
"attrs_req",
"=",
"[",
"'id'",
",",
"'alt_id'",
",",
"'name'",
",",
"'namespace'",
",",
"'is_a'",
",",
"'is_obsolete'",
"]",
"self",
".",
"attrs_scalar",
"=",
"[",
"'comment'",
",",
"'defn'",
",",
"'is_class_level'",
",",
"'is_metadata_tag'",
",",
"'is_transitive'",
",",
"'transitive_over'",
"]",
"self",
".",
"attrs_nested",
"=",
"frozenset",
"(",
"[",
"'relationship'",
"]",
")",
"# Allow user to specify either: 'def' or 'defn'",
"# 'def' is an obo field name, but 'defn' is legal Python attribute name",
"fnc",
"=",
"lambda",
"aopt",
":",
"aopt",
"if",
"aopt",
"!=",
"\"defn\"",
"else",
"\"def\"",
"if",
"optional_attrs",
"is",
"None",
":",
"optional_attrs",
"=",
"[",
"]",
"elif",
"isinstance",
"(",
"optional_attrs",
",",
"str",
")",
":",
"optional_attrs",
"=",
"[",
"fnc",
"(",
"optional_attrs",
")",
"]",
"if",
"optional_attrs",
"not",
"in",
"self",
".",
"attrs_req",
"else",
"[",
"]",
"elif",
"isinstance",
"(",
"optional_attrs",
",",
"list",
")",
"or",
"isinstance",
"(",
"optional_attrs",
",",
"set",
")",
":",
"optional_attrs",
"=",
"set",
"(",
"[",
"fnc",
"(",
"f",
")",
"for",
"f",
"in",
"optional_attrs",
"if",
"f",
"not",
"in",
"self",
".",
"attrs_req",
"]",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"optional_attrs arg MUST BE A str, list, or set.\"",
")",
"self",
".",
"optional_attrs",
"=",
"optional_attrs"
] | 57.428571 | 24.928571 |
def make_node(clss, node, *params):
''' This will return a node with a param_list
(declared in a function declaration)
Parameters:
-node: A SymbolPARAMLIST instance or None
-params: SymbolPARAMDECL insances
'''
if node is None:
node = clss()
if node.token != 'PARAMLIST':
return clss.make_node(None, node, *params)
for i in params:
if i is not None:
node.appendChild(i)
return node | [
"def",
"make_node",
"(",
"clss",
",",
"node",
",",
"*",
"params",
")",
":",
"if",
"node",
"is",
"None",
":",
"node",
"=",
"clss",
"(",
")",
"if",
"node",
".",
"token",
"!=",
"'PARAMLIST'",
":",
"return",
"clss",
".",
"make_node",
"(",
"None",
",",
"node",
",",
"*",
"params",
")",
"for",
"i",
"in",
"params",
":",
"if",
"i",
"is",
"not",
"None",
":",
"node",
".",
"appendChild",
"(",
"i",
")",
"return",
"node"
] | 28.166667 | 17.277778 |
def disable_gen(self, idx):
"""
Disable a PV element for TDS
Parameters
----------
idx
Returns
-------
"""
self.u[self.uid[idx]] = 0
self.system.dae.factorize = True | [
"def",
"disable_gen",
"(",
"self",
",",
"idx",
")",
":",
"self",
".",
"u",
"[",
"self",
".",
"uid",
"[",
"idx",
"]",
"]",
"=",
"0",
"self",
".",
"system",
".",
"dae",
".",
"factorize",
"=",
"True"
] | 16.785714 | 19.071429 |
def _ObjectFactory(obj):
"""Parse a python ``{name: schema}`` dict as an :py:class:`Object` instance.
- A property name prepended by "+" is required
- A property name prepended by "?" is optional
- Any other property is required if :py:attr:`Object.REQUIRED_PROPERTIES`
is True else it's optional
"""
if isinstance(obj, dict):
optional, required = {}, {}
for key, value in iteritems(obj):
if key.startswith("+"):
required[key[1:]] = value
elif key.startswith("?"):
optional[key[1:]] = value
elif Object.REQUIRED_PROPERTIES:
required[key] = value
else:
optional[key] = value
return Object(optional, required) | [
"def",
"_ObjectFactory",
"(",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"dict",
")",
":",
"optional",
",",
"required",
"=",
"{",
"}",
",",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"iteritems",
"(",
"obj",
")",
":",
"if",
"key",
".",
"startswith",
"(",
"\"+\"",
")",
":",
"required",
"[",
"key",
"[",
"1",
":",
"]",
"]",
"=",
"value",
"elif",
"key",
".",
"startswith",
"(",
"\"?\"",
")",
":",
"optional",
"[",
"key",
"[",
"1",
":",
"]",
"]",
"=",
"value",
"elif",
"Object",
".",
"REQUIRED_PROPERTIES",
":",
"required",
"[",
"key",
"]",
"=",
"value",
"else",
":",
"optional",
"[",
"key",
"]",
"=",
"value",
"return",
"Object",
"(",
"optional",
",",
"required",
")"
] | 37.75 | 9.1 |
def check_dns_txt(domain, prefix, code):
"""
Validates a domain by checking that {prefix}={code} is present in the TXT DNS record
of the domain to check.
Returns true if verification suceeded.
"""
token = '{}={}'.format(prefix, code)
try:
for rr in dns.resolver.query(domain, 'TXT'):
if token in rr.to_text():
return True
except:
logger.debug('', exc_info=True)
return False | [
"def",
"check_dns_txt",
"(",
"domain",
",",
"prefix",
",",
"code",
")",
":",
"token",
"=",
"'{}={}'",
".",
"format",
"(",
"prefix",
",",
"code",
")",
"try",
":",
"for",
"rr",
"in",
"dns",
".",
"resolver",
".",
"query",
"(",
"domain",
",",
"'TXT'",
")",
":",
"if",
"token",
"in",
"rr",
".",
"to_text",
"(",
")",
":",
"return",
"True",
"except",
":",
"logger",
".",
"debug",
"(",
"''",
",",
"exc_info",
"=",
"True",
")",
"return",
"False"
] | 29.4 | 14.466667 |
def envs():
'''
Each container configuration can have an environment setting, or defaults
to base
'''
saltenvs = []
for container in __opts__.get('azurefs', []):
saltenvs.append(container.get('saltenv', 'base'))
# Remove duplicates
return list(set(saltenvs)) | [
"def",
"envs",
"(",
")",
":",
"saltenvs",
"=",
"[",
"]",
"for",
"container",
"in",
"__opts__",
".",
"get",
"(",
"'azurefs'",
",",
"[",
"]",
")",
":",
"saltenvs",
".",
"append",
"(",
"container",
".",
"get",
"(",
"'saltenv'",
",",
"'base'",
")",
")",
"# Remove duplicates",
"return",
"list",
"(",
"set",
"(",
"saltenvs",
")",
")"
] | 28.9 | 23.7 |
def tmDiff2(tm1, tm2, verbosity = 0, relaxSegmentTests =True,
checkLearn = True, checkStates = True):
"""
Given two TM instances, list the difference between them and returns False
if there is a difference. This function checks the major parameters. If this
passes (and checkLearn is true) it checks the number of segments on each cell.
If this passes, checks each synapse on each segment.
When comparing C++ and Py, the segments are usually in different orders in the
cells. tmDiff ignores segment order when comparing TM's.
If checkLearn is True, will check learn states as well as all the segments
If checkStates is True, will check the various state arrays
"""
# First check basic parameters. If we fail here, don't continue
if sameTMParams(tm1, tm2) == False:
print "Two TM's have different parameters"
return False
tm1Label = "<tm_1 (%s)>" % tm1.__class__.__name__
tm2Label = "<tm_2 (%s)>" % tm2.__class__.__name__
result = True
if checkStates:
# Compare states at t first, they usually diverge before the structure of the
# cells starts diverging
if (tm1.infActiveState['t'] != tm2.infActiveState['t']).any():
print 'Active states diverged', numpy.where(tm1.infActiveState['t'] != tm2.infActiveState['t'])
result = False
if (tm1.infPredictedState['t'] - tm2.infPredictedState['t']).any():
print 'Predicted states diverged', numpy.where(tm1.infPredictedState['t'] != tm2.infPredictedState['t'])
result = False
if checkLearn and (tm1.lrnActiveState['t'] - tm2.lrnActiveState['t']).any():
print 'lrnActiveState[t] diverged', numpy.where(tm1.lrnActiveState['t'] != tm2.lrnActiveState['t'])
result = False
if checkLearn and (tm1.lrnPredictedState['t'] - tm2.lrnPredictedState['t']).any():
print 'lrnPredictedState[t] diverged', numpy.where(tm1.lrnPredictedState['t'] != tm2.lrnPredictedState['t'])
result = False
if checkLearn and abs(tm1.getAvgLearnedSeqLength() - tm2.getAvgLearnedSeqLength()) > 0.01:
print "Average learned sequence lengths differ: ",
print tm1.getAvgLearnedSeqLength(), " vs ", tm2.getAvgLearnedSeqLength()
result = False
# TODO: check confidence at T (confT)
# Now check some high level learned parameters.
if tm1.getNumSegments() != tm2.getNumSegments():
print "Number of segments are different", tm1.getNumSegments(), tm2.getNumSegments()
result = False
if tm1.getNumSynapses() != tm2.getNumSynapses():
print "Number of synapses are different", tm1.getNumSynapses(), tm2.getNumSynapses()
if verbosity >= 3:
print "%s: " % tm1Label,
tm1.printCells()
print "\n%s : " % tm2Label,
tm2.printCells()
#result = False
# Check that each cell has the same number of segments and synapses
for c in xrange(tm1.numberOfCols):
for i in xrange(tm2.cellsPerColumn):
if tm1.getNumSegmentsInCell(c, i) != tm2.getNumSegmentsInCell(c, i):
print "Num segments different in cell:",c,i,
print tm1.getNumSegmentsInCell(c, i), tm2.getNumSegmentsInCell(c, i)
result = False
# If the above tests pass, then check each segment and report differences
# Note that segments in tm1 can be in a different order than tm2. Here we
# make sure that, for each segment in tm1, there is an identical segment
# in tm2.
if result == True and not relaxSegmentTests and checkLearn:
for c in xrange(tm1.numberOfCols):
for i in xrange(tm2.cellsPerColumn):
nSegs = tm1.getNumSegmentsInCell(c, i)
for segIdx in xrange(nSegs):
tm1seg = tm1.getSegmentOnCell(c, i, segIdx)
# Loop through all segments in tm2seg and see if any of them match tm1seg
res = False
for tm2segIdx in xrange(nSegs):
tm2seg = tm2.getSegmentOnCell(c, i, tm2segIdx)
if sameSegment(tm1seg, tm2seg) == True:
res = True
break
if res == False:
print "\nSegments are different for cell:",c,i
result = False
if verbosity >= 0:
print "%s : " % tm1Label,
tm1.printCell(c, i)
print "\n%s : " % tm2Label,
tm2.printCell(c, i)
if result == True and (verbosity > 1):
print "TM's match"
return result | [
"def",
"tmDiff2",
"(",
"tm1",
",",
"tm2",
",",
"verbosity",
"=",
"0",
",",
"relaxSegmentTests",
"=",
"True",
",",
"checkLearn",
"=",
"True",
",",
"checkStates",
"=",
"True",
")",
":",
"# First check basic parameters. If we fail here, don't continue",
"if",
"sameTMParams",
"(",
"tm1",
",",
"tm2",
")",
"==",
"False",
":",
"print",
"\"Two TM's have different parameters\"",
"return",
"False",
"tm1Label",
"=",
"\"<tm_1 (%s)>\"",
"%",
"tm1",
".",
"__class__",
".",
"__name__",
"tm2Label",
"=",
"\"<tm_2 (%s)>\"",
"%",
"tm2",
".",
"__class__",
".",
"__name__",
"result",
"=",
"True",
"if",
"checkStates",
":",
"# Compare states at t first, they usually diverge before the structure of the",
"# cells starts diverging",
"if",
"(",
"tm1",
".",
"infActiveState",
"[",
"'t'",
"]",
"!=",
"tm2",
".",
"infActiveState",
"[",
"'t'",
"]",
")",
".",
"any",
"(",
")",
":",
"print",
"'Active states diverged'",
",",
"numpy",
".",
"where",
"(",
"tm1",
".",
"infActiveState",
"[",
"'t'",
"]",
"!=",
"tm2",
".",
"infActiveState",
"[",
"'t'",
"]",
")",
"result",
"=",
"False",
"if",
"(",
"tm1",
".",
"infPredictedState",
"[",
"'t'",
"]",
"-",
"tm2",
".",
"infPredictedState",
"[",
"'t'",
"]",
")",
".",
"any",
"(",
")",
":",
"print",
"'Predicted states diverged'",
",",
"numpy",
".",
"where",
"(",
"tm1",
".",
"infPredictedState",
"[",
"'t'",
"]",
"!=",
"tm2",
".",
"infPredictedState",
"[",
"'t'",
"]",
")",
"result",
"=",
"False",
"if",
"checkLearn",
"and",
"(",
"tm1",
".",
"lrnActiveState",
"[",
"'t'",
"]",
"-",
"tm2",
".",
"lrnActiveState",
"[",
"'t'",
"]",
")",
".",
"any",
"(",
")",
":",
"print",
"'lrnActiveState[t] diverged'",
",",
"numpy",
".",
"where",
"(",
"tm1",
".",
"lrnActiveState",
"[",
"'t'",
"]",
"!=",
"tm2",
".",
"lrnActiveState",
"[",
"'t'",
"]",
")",
"result",
"=",
"False",
"if",
"checkLearn",
"and",
"(",
"tm1",
".",
"lrnPredictedState",
"[",
"'t'",
"]",
"-",
"tm2",
".",
"lrnPredictedState",
"[",
"'t'",
"]",
")",
".",
"any",
"(",
")",
":",
"print",
"'lrnPredictedState[t] diverged'",
",",
"numpy",
".",
"where",
"(",
"tm1",
".",
"lrnPredictedState",
"[",
"'t'",
"]",
"!=",
"tm2",
".",
"lrnPredictedState",
"[",
"'t'",
"]",
")",
"result",
"=",
"False",
"if",
"checkLearn",
"and",
"abs",
"(",
"tm1",
".",
"getAvgLearnedSeqLength",
"(",
")",
"-",
"tm2",
".",
"getAvgLearnedSeqLength",
"(",
")",
")",
">",
"0.01",
":",
"print",
"\"Average learned sequence lengths differ: \"",
",",
"print",
"tm1",
".",
"getAvgLearnedSeqLength",
"(",
")",
",",
"\" vs \"",
",",
"tm2",
".",
"getAvgLearnedSeqLength",
"(",
")",
"result",
"=",
"False",
"# TODO: check confidence at T (confT)",
"# Now check some high level learned parameters.",
"if",
"tm1",
".",
"getNumSegments",
"(",
")",
"!=",
"tm2",
".",
"getNumSegments",
"(",
")",
":",
"print",
"\"Number of segments are different\"",
",",
"tm1",
".",
"getNumSegments",
"(",
")",
",",
"tm2",
".",
"getNumSegments",
"(",
")",
"result",
"=",
"False",
"if",
"tm1",
".",
"getNumSynapses",
"(",
")",
"!=",
"tm2",
".",
"getNumSynapses",
"(",
")",
":",
"print",
"\"Number of synapses are different\"",
",",
"tm1",
".",
"getNumSynapses",
"(",
")",
",",
"tm2",
".",
"getNumSynapses",
"(",
")",
"if",
"verbosity",
">=",
"3",
":",
"print",
"\"%s: \"",
"%",
"tm1Label",
",",
"tm1",
".",
"printCells",
"(",
")",
"print",
"\"\\n%s : \"",
"%",
"tm2Label",
",",
"tm2",
".",
"printCells",
"(",
")",
"#result = False",
"# Check that each cell has the same number of segments and synapses",
"for",
"c",
"in",
"xrange",
"(",
"tm1",
".",
"numberOfCols",
")",
":",
"for",
"i",
"in",
"xrange",
"(",
"tm2",
".",
"cellsPerColumn",
")",
":",
"if",
"tm1",
".",
"getNumSegmentsInCell",
"(",
"c",
",",
"i",
")",
"!=",
"tm2",
".",
"getNumSegmentsInCell",
"(",
"c",
",",
"i",
")",
":",
"print",
"\"Num segments different in cell:\"",
",",
"c",
",",
"i",
",",
"print",
"tm1",
".",
"getNumSegmentsInCell",
"(",
"c",
",",
"i",
")",
",",
"tm2",
".",
"getNumSegmentsInCell",
"(",
"c",
",",
"i",
")",
"result",
"=",
"False",
"# If the above tests pass, then check each segment and report differences",
"# Note that segments in tm1 can be in a different order than tm2. Here we",
"# make sure that, for each segment in tm1, there is an identical segment",
"# in tm2.",
"if",
"result",
"==",
"True",
"and",
"not",
"relaxSegmentTests",
"and",
"checkLearn",
":",
"for",
"c",
"in",
"xrange",
"(",
"tm1",
".",
"numberOfCols",
")",
":",
"for",
"i",
"in",
"xrange",
"(",
"tm2",
".",
"cellsPerColumn",
")",
":",
"nSegs",
"=",
"tm1",
".",
"getNumSegmentsInCell",
"(",
"c",
",",
"i",
")",
"for",
"segIdx",
"in",
"xrange",
"(",
"nSegs",
")",
":",
"tm1seg",
"=",
"tm1",
".",
"getSegmentOnCell",
"(",
"c",
",",
"i",
",",
"segIdx",
")",
"# Loop through all segments in tm2seg and see if any of them match tm1seg",
"res",
"=",
"False",
"for",
"tm2segIdx",
"in",
"xrange",
"(",
"nSegs",
")",
":",
"tm2seg",
"=",
"tm2",
".",
"getSegmentOnCell",
"(",
"c",
",",
"i",
",",
"tm2segIdx",
")",
"if",
"sameSegment",
"(",
"tm1seg",
",",
"tm2seg",
")",
"==",
"True",
":",
"res",
"=",
"True",
"break",
"if",
"res",
"==",
"False",
":",
"print",
"\"\\nSegments are different for cell:\"",
",",
"c",
",",
"i",
"result",
"=",
"False",
"if",
"verbosity",
">=",
"0",
":",
"print",
"\"%s : \"",
"%",
"tm1Label",
",",
"tm1",
".",
"printCell",
"(",
"c",
",",
"i",
")",
"print",
"\"\\n%s : \"",
"%",
"tm2Label",
",",
"tm2",
".",
"printCell",
"(",
"c",
",",
"i",
")",
"if",
"result",
"==",
"True",
"and",
"(",
"verbosity",
">",
"1",
")",
":",
"print",
"\"TM's match\"",
"return",
"result"
] | 39.858491 | 25.235849 |
def get_order_container(self, quote_id):
"""Generate an order container from a quote object.
:param quote_id: ID number of target quote
"""
quote = self.client['Billing_Order_Quote']
container = quote.getRecalculatedOrderContainer(id=quote_id)
return container | [
"def",
"get_order_container",
"(",
"self",
",",
"quote_id",
")",
":",
"quote",
"=",
"self",
".",
"client",
"[",
"'Billing_Order_Quote'",
"]",
"container",
"=",
"quote",
".",
"getRecalculatedOrderContainer",
"(",
"id",
"=",
"quote_id",
")",
"return",
"container"
] | 33.555556 | 16 |
def finalize(self):
"""Closes socket and terminates context
NO-OP if already closed.
"""
if self._context is not None:
if self._socket is not None:
self._close_socket(confused=False)
self._context.term()
self._context = None
self._poll = None | [
"def",
"finalize",
"(",
"self",
")",
":",
"if",
"self",
".",
"_context",
"is",
"not",
"None",
":",
"if",
"self",
".",
"_socket",
"is",
"not",
"None",
":",
"self",
".",
"_close_socket",
"(",
"confused",
"=",
"False",
")",
"self",
".",
"_context",
".",
"term",
"(",
")",
"self",
".",
"_context",
"=",
"None",
"self",
".",
"_poll",
"=",
"None"
] | 27.416667 | 12.416667 |
def checktype(self, val, kind, **kargs):
"""Raise TypeError if val does not satisfy kind."""
if not isinstance(val, kind):
raise TypeError('Expected {}; got {}'.format(
self.str_kind(kind), self.str_valtype(val))) | [
"def",
"checktype",
"(",
"self",
",",
"val",
",",
"kind",
",",
"*",
"*",
"kargs",
")",
":",
"if",
"not",
"isinstance",
"(",
"val",
",",
"kind",
")",
":",
"raise",
"TypeError",
"(",
"'Expected {}; got {}'",
".",
"format",
"(",
"self",
".",
"str_kind",
"(",
"kind",
")",
",",
"self",
".",
"str_valtype",
"(",
"val",
")",
")",
")"
] | 53 | 10.4 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.