text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def send_message(self, user=None, message=None, channel=None):
""" Todo """
self.logger.info("sending message to %s: %s", user, message)
cid=channel
if not cid:
for cid in self.channels:
if str(self.channels[cid]) == str(user):
channel=cid
self.logger.debug(cid)
if (channel):
self.post('channels/'+cid+'/messages',
json.dumps({'content': message,
'nonce': random_integer(-2**63, 2**63 - 1)}))
else:
logger.error("Unknown user %s",user) | [
"def",
"send_message",
"(",
"self",
",",
"user",
"=",
"None",
",",
"message",
"=",
"None",
",",
"channel",
"=",
"None",
")",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"sending message to %s: %s\"",
",",
"user",
",",
"message",
")",
"cid",
"=",
"channel",
"if",
"not",
"cid",
":",
"for",
"cid",
"in",
"self",
".",
"channels",
":",
"if",
"str",
"(",
"self",
".",
"channels",
"[",
"cid",
"]",
")",
"==",
"str",
"(",
"user",
")",
":",
"channel",
"=",
"cid",
"self",
".",
"logger",
".",
"debug",
"(",
"cid",
")",
"if",
"(",
"channel",
")",
":",
"self",
".",
"post",
"(",
"'channels/'",
"+",
"cid",
"+",
"'/messages'",
",",
"json",
".",
"dumps",
"(",
"{",
"'content'",
":",
"message",
",",
"'nonce'",
":",
"random_integer",
"(",
"-",
"2",
"**",
"63",
",",
"2",
"**",
"63",
"-",
"1",
")",
"}",
")",
")",
"else",
":",
"logger",
".",
"error",
"(",
"\"Unknown user %s\"",
",",
"user",
")"
] | 35.733333 | 16 |
def param_query(name: str) -> PAParams:
'''Find the PAParams for a network by its long or short name. Raises
UnsupportedNetwork if no PAParams is found.
'''
for pa_params in params:
if name in (pa_params.network_name, pa_params.network_shortname,):
return pa_params
raise UnsupportedNetwork | [
"def",
"param_query",
"(",
"name",
":",
"str",
")",
"->",
"PAParams",
":",
"for",
"pa_params",
"in",
"params",
":",
"if",
"name",
"in",
"(",
"pa_params",
".",
"network_name",
",",
"pa_params",
".",
"network_shortname",
",",
")",
":",
"return",
"pa_params",
"raise",
"UnsupportedNetwork"
] | 32.3 | 22.3 |
def debug_dump(message, file_prefix="dump"):
"""
Utility while developing to dump message data to play with in the
interpreter
"""
global index
index += 1
with open("%s_%s.dump" % (file_prefix, index), 'w') as f:
f.write(message.SerializeToString())
f.close() | [
"def",
"debug_dump",
"(",
"message",
",",
"file_prefix",
"=",
"\"dump\"",
")",
":",
"global",
"index",
"index",
"+=",
"1",
"with",
"open",
"(",
"\"%s_%s.dump\"",
"%",
"(",
"file_prefix",
",",
"index",
")",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"message",
".",
"SerializeToString",
"(",
")",
")",
"f",
".",
"close",
"(",
")"
] | 24.5 | 19.666667 |
def rest(o) -> Optional[ISeq]:
"""If o is a ISeq, return the elements after the first in o. If o is None,
returns an empty seq. Otherwise, coerces o to a seq and returns the rest."""
if o is None:
return None
if isinstance(o, ISeq):
s = o.rest
if s is None:
return lseq.EMPTY
return s
n = to_seq(o)
if n is None:
return lseq.EMPTY
return n.rest | [
"def",
"rest",
"(",
"o",
")",
"->",
"Optional",
"[",
"ISeq",
"]",
":",
"if",
"o",
"is",
"None",
":",
"return",
"None",
"if",
"isinstance",
"(",
"o",
",",
"ISeq",
")",
":",
"s",
"=",
"o",
".",
"rest",
"if",
"s",
"is",
"None",
":",
"return",
"lseq",
".",
"EMPTY",
"return",
"s",
"n",
"=",
"to_seq",
"(",
"o",
")",
"if",
"n",
"is",
"None",
":",
"return",
"lseq",
".",
"EMPTY",
"return",
"n",
".",
"rest"
] | 29.357143 | 16.214286 |
def p_expression_lessthan(self, p):
'expression : expression LT expression'
p[0] = LessThan(p[1], p[3], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | [
"def",
"p_expression_lessthan",
"(",
"self",
",",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"LessThan",
"(",
"p",
"[",
"1",
"]",
",",
"p",
"[",
"3",
"]",
",",
"lineno",
"=",
"p",
".",
"lineno",
"(",
"1",
")",
")",
"p",
".",
"set_lineno",
"(",
"0",
",",
"p",
".",
"lineno",
"(",
"1",
")",
")"
] | 43.25 | 7.75 |
def commitVCS(self, tag=None):
''' Commit the current working directory state (or do nothing if the
working directory is not version controlled)
'''
if not self.vcs:
return
self.vcs.commit(message='version %s' % tag, tag=tag) | [
"def",
"commitVCS",
"(",
"self",
",",
"tag",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"vcs",
":",
"return",
"self",
".",
"vcs",
".",
"commit",
"(",
"message",
"=",
"'version %s'",
"%",
"tag",
",",
"tag",
"=",
"tag",
")"
] | 39.285714 | 21.285714 |
def type_list(self, index_name):
'''
List the types available in an index
'''
request = self.session
url = 'http://%s:%s/%s/_mapping' % (self.host, self.port, index_name)
response = request.get(url)
if request.status_code == 200:
return response[index_name].keys()
else:
return response | [
"def",
"type_list",
"(",
"self",
",",
"index_name",
")",
":",
"request",
"=",
"self",
".",
"session",
"url",
"=",
"'http://%s:%s/%s/_mapping'",
"%",
"(",
"self",
".",
"host",
",",
"self",
".",
"port",
",",
"index_name",
")",
"response",
"=",
"request",
".",
"get",
"(",
"url",
")",
"if",
"request",
".",
"status_code",
"==",
"200",
":",
"return",
"response",
"[",
"index_name",
"]",
".",
"keys",
"(",
")",
"else",
":",
"return",
"response"
] | 33.090909 | 15.454545 |
def process_casperjs_stdout(stdout):
"""Parse and digest capture script output.
"""
for line in stdout.splitlines():
bits = line.split(':', 1)
if len(bits) < 2:
bits = ('INFO', bits)
level, msg = bits
if level == 'FATAL':
logger.fatal(msg)
raise CaptureError(msg)
elif level == 'ERROR':
logger.error(msg)
else:
logger.info(msg) | [
"def",
"process_casperjs_stdout",
"(",
"stdout",
")",
":",
"for",
"line",
"in",
"stdout",
".",
"splitlines",
"(",
")",
":",
"bits",
"=",
"line",
".",
"split",
"(",
"':'",
",",
"1",
")",
"if",
"len",
"(",
"bits",
")",
"<",
"2",
":",
"bits",
"=",
"(",
"'INFO'",
",",
"bits",
")",
"level",
",",
"msg",
"=",
"bits",
"if",
"level",
"==",
"'FATAL'",
":",
"logger",
".",
"fatal",
"(",
"msg",
")",
"raise",
"CaptureError",
"(",
"msg",
")",
"elif",
"level",
"==",
"'ERROR'",
":",
"logger",
".",
"error",
"(",
"msg",
")",
"else",
":",
"logger",
".",
"info",
"(",
"msg",
")"
] | 27.0625 | 11.25 |
def formfield(self, **kwargs):
"""Gets the form field associated with this field."""
defaults = {
'form_class': LocalizedTextFieldForm
}
defaults.update(kwargs)
return super().formfield(**defaults) | [
"def",
"formfield",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"defaults",
"=",
"{",
"'form_class'",
":",
"LocalizedTextFieldForm",
"}",
"defaults",
".",
"update",
"(",
"kwargs",
")",
"return",
"super",
"(",
")",
".",
"formfield",
"(",
"*",
"*",
"defaults",
")"
] | 27 | 18 |
def first_rec(ofile, Rec, file_type):
"""
opens the file ofile as a magic template file with headers as the keys to Rec
"""
keylist = []
opened = False
# sometimes Windows needs a little extra time to open a file
# or else it throws an error
while not opened:
try:
pmag_out = open(ofile, 'w')
opened = True
except IOError:
time.sleep(1)
outstring = "tab \t" + file_type + "\n"
pmag_out.write(outstring)
keystring = ""
for key in list(Rec.keys()):
keystring = keystring + '\t' + key.strip()
keylist.append(key)
keystring = keystring + '\n'
pmag_out.write(keystring[1:])
pmag_out.close()
return keylist | [
"def",
"first_rec",
"(",
"ofile",
",",
"Rec",
",",
"file_type",
")",
":",
"keylist",
"=",
"[",
"]",
"opened",
"=",
"False",
"# sometimes Windows needs a little extra time to open a file",
"# or else it throws an error",
"while",
"not",
"opened",
":",
"try",
":",
"pmag_out",
"=",
"open",
"(",
"ofile",
",",
"'w'",
")",
"opened",
"=",
"True",
"except",
"IOError",
":",
"time",
".",
"sleep",
"(",
"1",
")",
"outstring",
"=",
"\"tab \\t\"",
"+",
"file_type",
"+",
"\"\\n\"",
"pmag_out",
".",
"write",
"(",
"outstring",
")",
"keystring",
"=",
"\"\"",
"for",
"key",
"in",
"list",
"(",
"Rec",
".",
"keys",
"(",
")",
")",
":",
"keystring",
"=",
"keystring",
"+",
"'\\t'",
"+",
"key",
".",
"strip",
"(",
")",
"keylist",
".",
"append",
"(",
"key",
")",
"keystring",
"=",
"keystring",
"+",
"'\\n'",
"pmag_out",
".",
"write",
"(",
"keystring",
"[",
"1",
":",
"]",
")",
"pmag_out",
".",
"close",
"(",
")",
"return",
"keylist"
] | 29.541667 | 14.208333 |
def distribution_compatible(dist, supported_tags=None):
"""Is this distribution compatible with the given interpreter/platform combination?
:param supported_tags: A list of tag tuples specifying which tags are supported
by the platform in question.
:returns: True if the distribution is compatible, False if it is unrecognized or incompatible.
"""
if supported_tags is None:
supported_tags = get_supported()
package = Package.from_href(dist.location)
if not package:
return False
return package.compatible(supported_tags) | [
"def",
"distribution_compatible",
"(",
"dist",
",",
"supported_tags",
"=",
"None",
")",
":",
"if",
"supported_tags",
"is",
"None",
":",
"supported_tags",
"=",
"get_supported",
"(",
")",
"package",
"=",
"Package",
".",
"from_href",
"(",
"dist",
".",
"location",
")",
"if",
"not",
"package",
":",
"return",
"False",
"return",
"package",
".",
"compatible",
"(",
"supported_tags",
")"
] | 41.384615 | 17.692308 |
def authorize(context, action, target, do_raise=True):
"""Verify that the action is valid on the target in this context.
:param context: monasca project context
:param action: String representing the action to be checked. This
should be colon separated for clarity.
:param target: Dictionary representing the object of the action for
object creation. This should be a dictionary representing
the location of the object e.g.
``{'project_id': 'context.project_id'}``
:param do_raise: if True (the default), raises PolicyNotAuthorized,
if False returns False
:type context: object
:type action: str
:type target: dict
:type do_raise: bool
:return: returns a non-False value (not necessarily True) if authorized,
and the False if not authorized and do_raise if False
:raises oslo_policy.policy.PolicyNotAuthorized: if verification fails
"""
init()
credentials = context.to_policy_values()
try:
result = _ENFORCER.authorize(action, target, credentials,
do_raise=do_raise, action=action)
return result
except policy.PolicyNotRegistered:
LOG.exception('Policy not registered')
raise
except Exception:
LOG.debug('Policy check for %(action)s failed with credentials '
'%(credentials)s',
{'action': action, 'credentials': credentials})
raise | [
"def",
"authorize",
"(",
"context",
",",
"action",
",",
"target",
",",
"do_raise",
"=",
"True",
")",
":",
"init",
"(",
")",
"credentials",
"=",
"context",
".",
"to_policy_values",
"(",
")",
"try",
":",
"result",
"=",
"_ENFORCER",
".",
"authorize",
"(",
"action",
",",
"target",
",",
"credentials",
",",
"do_raise",
"=",
"do_raise",
",",
"action",
"=",
"action",
")",
"return",
"result",
"except",
"policy",
".",
"PolicyNotRegistered",
":",
"LOG",
".",
"exception",
"(",
"'Policy not registered'",
")",
"raise",
"except",
"Exception",
":",
"LOG",
".",
"debug",
"(",
"'Policy check for %(action)s failed with credentials '",
"'%(credentials)s'",
",",
"{",
"'action'",
":",
"action",
",",
"'credentials'",
":",
"credentials",
"}",
")",
"raise"
] | 42.8 | 20.514286 |
def export_cmd_options(self, options_override=None, standalone=False):
"""
Override!
:return:
"""
cmd_options = super(MongosServer, self).export_cmd_options(
options_override=options_override)
# Add configServers arg
cluster = self.get_validate_cluster()
cmd_options["configdb"] = cluster.get_config_db_address()
return cmd_options | [
"def",
"export_cmd_options",
"(",
"self",
",",
"options_override",
"=",
"None",
",",
"standalone",
"=",
"False",
")",
":",
"cmd_options",
"=",
"super",
"(",
"MongosServer",
",",
"self",
")",
".",
"export_cmd_options",
"(",
"options_override",
"=",
"options_override",
")",
"# Add configServers arg",
"cluster",
"=",
"self",
".",
"get_validate_cluster",
"(",
")",
"cmd_options",
"[",
"\"configdb\"",
"]",
"=",
"cluster",
".",
"get_config_db_address",
"(",
")",
"return",
"cmd_options"
] | 29.214286 | 19.928571 |
def copy_file(self, from_path, to_path):
""" Copy file. """
if not op.exists(op.dirname(to_path)):
self.make_directory(op.dirname(to_path))
shutil.copy(from_path, to_path)
logging.debug('File copied: {0}'.format(to_path)) | [
"def",
"copy_file",
"(",
"self",
",",
"from_path",
",",
"to_path",
")",
":",
"if",
"not",
"op",
".",
"exists",
"(",
"op",
".",
"dirname",
"(",
"to_path",
")",
")",
":",
"self",
".",
"make_directory",
"(",
"op",
".",
"dirname",
"(",
"to_path",
")",
")",
"shutil",
".",
"copy",
"(",
"from_path",
",",
"to_path",
")",
"logging",
".",
"debug",
"(",
"'File copied: {0}'",
".",
"format",
"(",
"to_path",
")",
")"
] | 37.142857 | 10.857143 |
def get_content_commit_date(extensions, acceptance_callback=None,
root_dir='.'):
"""Get the datetime for the most recent commit to a project that
affected certain types of content.
Parameters
----------
extensions : sequence of 'str'
Extensions of files to consider in getting the most recent commit
date. For example, ``('rst', 'svg', 'png')`` are content extensions
for a Sphinx project. **Extension comparision is case sensitive.** add
uppercase variants to match uppercase extensions.
acceptance_callback : callable
Callable function whose sole argument is a file path, and returns
`True` or `False` depending on whether the file's commit date should
be considered or not. This callback is only run on files that are
included by ``extensions``. Thus this callback is a way to exclude
specific files that would otherwise be included by their extension.
root_dir : 'str`, optional
Only content contained within this root directory is considered.
This directory must be, or be contained by, a Git repository. This is
the current working directory by default.
Returns
-------
commit_date : `datetime.datetime`
Datetime of the most recent content commit.
Raises
------
RuntimeError
Raised if no content files are found.
"""
logger = logging.getLogger(__name__)
def _null_callback(_):
return True
if acceptance_callback is None:
acceptance_callback = _null_callback
# Cache the repo object for each query
root_dir = os.path.abspath(root_dir)
repo = git.repo.base.Repo(path=root_dir, search_parent_directories=True)
# Iterate over all files with all file extensions, looking for the
# newest commit datetime.
newest_datetime = None
iters = [_iter_filepaths_with_extension(ext, root_dir=root_dir)
for ext in extensions]
for content_path in itertools.chain(*iters):
content_path = os.path.abspath(os.path.join(root_dir, content_path))
if acceptance_callback(content_path):
logger.debug('Found content path %r', content_path)
try:
commit_datetime = read_git_commit_timestamp_for_file(
content_path, repo=repo)
logger.debug('Commit timestamp of %r is %s',
content_path, commit_datetime)
except IOError:
logger.warning(
'Count not get commit for %r, skipping',
content_path)
continue
if not newest_datetime or commit_datetime > newest_datetime:
# Seed initial newest_datetime
# or set a newer newest_datetime
newest_datetime = commit_datetime
logger.debug('Newest commit timestamp is %s', newest_datetime)
logger.debug('Final commit timestamp is %s', newest_datetime)
if newest_datetime is None:
raise RuntimeError('No content files found in {}'.format(root_dir))
return newest_datetime | [
"def",
"get_content_commit_date",
"(",
"extensions",
",",
"acceptance_callback",
"=",
"None",
",",
"root_dir",
"=",
"'.'",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"def",
"_null_callback",
"(",
"_",
")",
":",
"return",
"True",
"if",
"acceptance_callback",
"is",
"None",
":",
"acceptance_callback",
"=",
"_null_callback",
"# Cache the repo object for each query",
"root_dir",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"root_dir",
")",
"repo",
"=",
"git",
".",
"repo",
".",
"base",
".",
"Repo",
"(",
"path",
"=",
"root_dir",
",",
"search_parent_directories",
"=",
"True",
")",
"# Iterate over all files with all file extensions, looking for the",
"# newest commit datetime.",
"newest_datetime",
"=",
"None",
"iters",
"=",
"[",
"_iter_filepaths_with_extension",
"(",
"ext",
",",
"root_dir",
"=",
"root_dir",
")",
"for",
"ext",
"in",
"extensions",
"]",
"for",
"content_path",
"in",
"itertools",
".",
"chain",
"(",
"*",
"iters",
")",
":",
"content_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"content_path",
")",
")",
"if",
"acceptance_callback",
"(",
"content_path",
")",
":",
"logger",
".",
"debug",
"(",
"'Found content path %r'",
",",
"content_path",
")",
"try",
":",
"commit_datetime",
"=",
"read_git_commit_timestamp_for_file",
"(",
"content_path",
",",
"repo",
"=",
"repo",
")",
"logger",
".",
"debug",
"(",
"'Commit timestamp of %r is %s'",
",",
"content_path",
",",
"commit_datetime",
")",
"except",
"IOError",
":",
"logger",
".",
"warning",
"(",
"'Count not get commit for %r, skipping'",
",",
"content_path",
")",
"continue",
"if",
"not",
"newest_datetime",
"or",
"commit_datetime",
">",
"newest_datetime",
":",
"# Seed initial newest_datetime",
"# or set a newer newest_datetime",
"newest_datetime",
"=",
"commit_datetime",
"logger",
".",
"debug",
"(",
"'Newest commit timestamp is %s'",
",",
"newest_datetime",
")",
"logger",
".",
"debug",
"(",
"'Final commit timestamp is %s'",
",",
"newest_datetime",
")",
"if",
"newest_datetime",
"is",
"None",
":",
"raise",
"RuntimeError",
"(",
"'No content files found in {}'",
".",
"format",
"(",
"root_dir",
")",
")",
"return",
"newest_datetime"
] | 39.576923 | 21.769231 |
def _set_meter(self, v, load=False):
"""
Setter method for meter, mapped from YANG variable /openflow_state/meter (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_meter is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_meter() directly.
YANG Description: Meter
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=meter.meter, is_container='container', presence=False, yang_name="meter", rest_name="meter", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'openflow-meter', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """meter must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=meter.meter, is_container='container', presence=False, yang_name="meter", rest_name="meter", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'openflow-meter', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='container', is_config=False)""",
})
self.__meter = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_meter",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"meter",
".",
"meter",
",",
"is_container",
"=",
"'container'",
",",
"presence",
"=",
"False",
",",
"yang_name",
"=",
"\"meter\"",
",",
"rest_name",
"=",
"\"meter\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'callpoint'",
":",
"u'openflow-meter'",
",",
"u'cli-suppress-show-path'",
":",
"None",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-openflow-operational'",
",",
"defining_module",
"=",
"'brocade-openflow-operational'",
",",
"yang_type",
"=",
"'container'",
",",
"is_config",
"=",
"False",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"meter must be of a type compatible with container\"\"\"",
",",
"'defined-type'",
":",
"\"container\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=meter.meter, is_container='container', presence=False, yang_name=\"meter\", rest_name=\"meter\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'openflow-meter', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='container', is_config=False)\"\"\"",
",",
"}",
")",
"self",
".",
"__meter",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] | 69.291667 | 33.875 |
def find_guests(names, path=None):
'''
Return a dict of hosts and named guests
path
path to the container parent
default: /var/lib/lxc (system default)
.. versionadded:: 2015.8.0
'''
ret = {}
names = names.split(',')
for data in _list_iter(path=path):
host, stat = next(six.iteritems(data))
for state in stat:
for name in stat[state]:
if name in names:
if host in ret:
ret[host].append(name)
else:
ret[host] = [name]
return ret | [
"def",
"find_guests",
"(",
"names",
",",
"path",
"=",
"None",
")",
":",
"ret",
"=",
"{",
"}",
"names",
"=",
"names",
".",
"split",
"(",
"','",
")",
"for",
"data",
"in",
"_list_iter",
"(",
"path",
"=",
"path",
")",
":",
"host",
",",
"stat",
"=",
"next",
"(",
"six",
".",
"iteritems",
"(",
"data",
")",
")",
"for",
"state",
"in",
"stat",
":",
"for",
"name",
"in",
"stat",
"[",
"state",
"]",
":",
"if",
"name",
"in",
"names",
":",
"if",
"host",
"in",
"ret",
":",
"ret",
"[",
"host",
"]",
".",
"append",
"(",
"name",
")",
"else",
":",
"ret",
"[",
"host",
"]",
"=",
"[",
"name",
"]",
"return",
"ret"
] | 25.913043 | 16.086957 |
def get_assignable_repository_ids(self, repository_id):
"""Gets a list of repositories including and under the given repository node in which any asset can be assigned.
arg: repository_id (osid.id.Id): the ``Id`` of the
``Repository``
return: (osid.id.IdList) - list of assignable repository ``Ids``
raise: NullArgument - ``repository_id`` is ``null``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.get_assignable_bin_ids
# This will likely be overridden by an authorization adapter
mgr = self._get_provider_manager('REPOSITORY', local=True)
lookup_session = mgr.get_repository_lookup_session(proxy=self._proxy)
repositories = lookup_session.get_repositories()
id_list = []
for repository in repositories:
id_list.append(repository.get_id())
return IdList(id_list) | [
"def",
"get_assignable_repository_ids",
"(",
"self",
",",
"repository_id",
")",
":",
"# Implemented from template for",
"# osid.resource.ResourceBinAssignmentSession.get_assignable_bin_ids",
"# This will likely be overridden by an authorization adapter",
"mgr",
"=",
"self",
".",
"_get_provider_manager",
"(",
"'REPOSITORY'",
",",
"local",
"=",
"True",
")",
"lookup_session",
"=",
"mgr",
".",
"get_repository_lookup_session",
"(",
"proxy",
"=",
"self",
".",
"_proxy",
")",
"repositories",
"=",
"lookup_session",
".",
"get_repositories",
"(",
")",
"id_list",
"=",
"[",
"]",
"for",
"repository",
"in",
"repositories",
":",
"id_list",
".",
"append",
"(",
"repository",
".",
"get_id",
"(",
")",
")",
"return",
"IdList",
"(",
"id_list",
")"
] | 50.142857 | 19.333333 |
def _build_youtube_dl(worker, destdir, site):
'''
Builds a `youtube_dl.YoutubeDL` for brozzling `site` with `worker`.
The `YoutubeDL` instance does a few special brozzler-specific things:
- keeps track of urls fetched using a `YoutubeDLSpy`
- periodically updates `site.last_claimed` in rethinkdb
- if brozzling through warcprox and downloading segmented videos (e.g.
HLS), pushes the stitched-up video created by youtube-dl to warcprox
using a WARCPROX_WRITE_RECORD request
- some logging
Args:
worker (brozzler.BrozzlerWorker): the calling brozzler worker
destdir (str): where to save downloaded videos
site (brozzler.Site): the site we are brozzling
Returns:
a `youtube_dl.YoutubeDL` instance
'''
class _YoutubeDL(youtube_dl.YoutubeDL):
logger = logging.getLogger(__module__ + "." + __qualname__)
def urlopen(self, req):
try:
url = req.full_url
except AttributeError:
url = req
self.logger.debug('fetching %r', url)
return super().urlopen(req)
def add_default_extra_info(self, ie_result, ie, url):
# hook in some logging
super().add_default_extra_info(ie_result, ie, url)
if ie_result.get('_type') == 'playlist':
self.logger.info(
'extractor %r found playlist in %s', ie.IE_NAME, url)
if ie.IE_NAME == 'youtube:playlist':
# At this point ie_result['entries'] is an iterator that
# will fetch more metadata from youtube to list all the
# videos. We unroll that iterator here partly because
# otherwise `process_ie_result()` will clobber it, and we
# use it later to extract the watch pages as outlinks.
ie_result['entries_no_dl'] = list(ie_result['entries'])
ie_result['entries'] = []
self.logger.info(
'not downoading %s videos from this youtube '
'playlist because we expect to capture them from '
'individual watch pages',
len(ie_result['entries_no_dl']))
else:
self.logger.info(
'extractor %r found a video in %s', ie.IE_NAME, url)
def _push_stitched_up_vid_to_warcprox(self, site, info_dict, ctx):
# XXX Don't know how to get the right content-type. Youtube-dl
# doesn't supply it. Sometimes (with --hls-prefer-native)
# youtube-dl produces a stitched-up video that /usr/bin/file fails
# to identify (says "application/octet-stream"). `ffprobe` doesn't
# give us a mimetype.
if info_dict.get('ext') == 'mp4':
mimetype = 'video/mp4'
else:
try:
import magic
mimetype = magic.from_file(ctx['filename'], mime=True)
except ImportError as e:
mimetype = 'video/%s' % info_dict['ext']
self.logger.warn(
'guessing mimetype %s because %r', mimetype, e)
url = 'youtube-dl:%05d:%s' % (
info_dict.get('playlist_index') or 1,
info_dict['webpage_url'])
size = os.path.getsize(ctx['filename'])
self.logger.info(
'pushing %r video stitched-up as %s (%s bytes) to '
'warcprox at %s with url %s', info_dict['format'],
mimetype, size, worker._proxy_for(site), url)
with open(ctx['filename'], 'rb') as f:
# include content-length header to avoid chunked
# transfer, which warcprox currently rejects
extra_headers = dict(site.extra_headers())
extra_headers['content-length'] = size
request, response = worker._warcprox_write_record(
warcprox_address=worker._proxy_for(site), url=url,
warc_type='resource', content_type=mimetype, payload=f,
extra_headers=extra_headers)
# consulted by _remember_videos()
self.stitch_ups.append({
'url': url,
'response_code': response.code,
'content-type': mimetype,
'content-length': size,
})
def process_info(self, info_dict):
'''
See comment above on `_finish_frag_download()`
'''
def ffd_callback(ffd_self, ctx):
if worker._using_warcprox(site):
self._push_stitched_up_vid_to_warcprox(site, info_dict, ctx)
try:
thread_local.finish_frag_download_callback = ffd_callback
return super().process_info(info_dict)
finally:
delattr(thread_local, 'finish_frag_download_callback')
def maybe_heartbeat_site_last_claimed(*args, **kwargs):
# in case youtube-dl takes a long time, heartbeat site.last_claimed
# to prevent another brozzler-worker from claiming the site
try:
if site.rr and doublethink.utcnow() - site.last_claimed > datetime.timedelta(minutes=worker.SITE_SESSION_MINUTES):
worker.logger.debug(
'heartbeating site.last_claimed to prevent another '
'brozzler-worker claiming this site id=%r', site.id)
site.last_claimed = doublethink.utcnow()
site.save()
except:
worker.logger.debug(
'problem heartbeating site.last_claimed site id=%r',
site.id, exc_info=True)
ydl_opts = {
"outtmpl": "{}/ydl%(autonumber)s.out".format(destdir),
"retries": 1,
"nocheckcertificate": True,
"hls_prefer_native": True,
"noprogress": True,
"nopart": True,
"no_color": True,
"progress_hooks": [maybe_heartbeat_site_last_claimed],
# https://github.com/rg3/youtube-dl/blob/master/README.md#format-selection
# "best: Select the best quality format represented by a single
# file with video and audio."
"format": "best/bestvideo+bestaudio",
### we do our own logging
# "logger": logging.getLogger("youtube_dl"),
"verbose": False,
"quiet": True,
}
if worker._proxy_for(site):
ydl_opts["proxy"] = "http://{}".format(worker._proxy_for(site))
ydl = _YoutubeDL(ydl_opts)
if site.extra_headers():
ydl._opener.add_handler(ExtraHeaderAdder(site.extra_headers()))
ydl.fetch_spy = YoutubeDLSpy()
ydl.stitch_ups = []
ydl._opener.add_handler(ydl.fetch_spy)
return ydl | [
"def",
"_build_youtube_dl",
"(",
"worker",
",",
"destdir",
",",
"site",
")",
":",
"class",
"_YoutubeDL",
"(",
"youtube_dl",
".",
"YoutubeDL",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__module__",
"+",
"\".\"",
"+",
"__qualname__",
")",
"def",
"urlopen",
"(",
"self",
",",
"req",
")",
":",
"try",
":",
"url",
"=",
"req",
".",
"full_url",
"except",
"AttributeError",
":",
"url",
"=",
"req",
"self",
".",
"logger",
".",
"debug",
"(",
"'fetching %r'",
",",
"url",
")",
"return",
"super",
"(",
")",
".",
"urlopen",
"(",
"req",
")",
"def",
"add_default_extra_info",
"(",
"self",
",",
"ie_result",
",",
"ie",
",",
"url",
")",
":",
"# hook in some logging",
"super",
"(",
")",
".",
"add_default_extra_info",
"(",
"ie_result",
",",
"ie",
",",
"url",
")",
"if",
"ie_result",
".",
"get",
"(",
"'_type'",
")",
"==",
"'playlist'",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'extractor %r found playlist in %s'",
",",
"ie",
".",
"IE_NAME",
",",
"url",
")",
"if",
"ie",
".",
"IE_NAME",
"==",
"'youtube:playlist'",
":",
"# At this point ie_result['entries'] is an iterator that",
"# will fetch more metadata from youtube to list all the",
"# videos. We unroll that iterator here partly because",
"# otherwise `process_ie_result()` will clobber it, and we",
"# use it later to extract the watch pages as outlinks.",
"ie_result",
"[",
"'entries_no_dl'",
"]",
"=",
"list",
"(",
"ie_result",
"[",
"'entries'",
"]",
")",
"ie_result",
"[",
"'entries'",
"]",
"=",
"[",
"]",
"self",
".",
"logger",
".",
"info",
"(",
"'not downoading %s videos from this youtube '",
"'playlist because we expect to capture them from '",
"'individual watch pages'",
",",
"len",
"(",
"ie_result",
"[",
"'entries_no_dl'",
"]",
")",
")",
"else",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'extractor %r found a video in %s'",
",",
"ie",
".",
"IE_NAME",
",",
"url",
")",
"def",
"_push_stitched_up_vid_to_warcprox",
"(",
"self",
",",
"site",
",",
"info_dict",
",",
"ctx",
")",
":",
"# XXX Don't know how to get the right content-type. Youtube-dl",
"# doesn't supply it. Sometimes (with --hls-prefer-native)",
"# youtube-dl produces a stitched-up video that /usr/bin/file fails",
"# to identify (says \"application/octet-stream\"). `ffprobe` doesn't",
"# give us a mimetype.",
"if",
"info_dict",
".",
"get",
"(",
"'ext'",
")",
"==",
"'mp4'",
":",
"mimetype",
"=",
"'video/mp4'",
"else",
":",
"try",
":",
"import",
"magic",
"mimetype",
"=",
"magic",
".",
"from_file",
"(",
"ctx",
"[",
"'filename'",
"]",
",",
"mime",
"=",
"True",
")",
"except",
"ImportError",
"as",
"e",
":",
"mimetype",
"=",
"'video/%s'",
"%",
"info_dict",
"[",
"'ext'",
"]",
"self",
".",
"logger",
".",
"warn",
"(",
"'guessing mimetype %s because %r'",
",",
"mimetype",
",",
"e",
")",
"url",
"=",
"'youtube-dl:%05d:%s'",
"%",
"(",
"info_dict",
".",
"get",
"(",
"'playlist_index'",
")",
"or",
"1",
",",
"info_dict",
"[",
"'webpage_url'",
"]",
")",
"size",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"ctx",
"[",
"'filename'",
"]",
")",
"self",
".",
"logger",
".",
"info",
"(",
"'pushing %r video stitched-up as %s (%s bytes) to '",
"'warcprox at %s with url %s'",
",",
"info_dict",
"[",
"'format'",
"]",
",",
"mimetype",
",",
"size",
",",
"worker",
".",
"_proxy_for",
"(",
"site",
")",
",",
"url",
")",
"with",
"open",
"(",
"ctx",
"[",
"'filename'",
"]",
",",
"'rb'",
")",
"as",
"f",
":",
"# include content-length header to avoid chunked",
"# transfer, which warcprox currently rejects",
"extra_headers",
"=",
"dict",
"(",
"site",
".",
"extra_headers",
"(",
")",
")",
"extra_headers",
"[",
"'content-length'",
"]",
"=",
"size",
"request",
",",
"response",
"=",
"worker",
".",
"_warcprox_write_record",
"(",
"warcprox_address",
"=",
"worker",
".",
"_proxy_for",
"(",
"site",
")",
",",
"url",
"=",
"url",
",",
"warc_type",
"=",
"'resource'",
",",
"content_type",
"=",
"mimetype",
",",
"payload",
"=",
"f",
",",
"extra_headers",
"=",
"extra_headers",
")",
"# consulted by _remember_videos()",
"self",
".",
"stitch_ups",
".",
"append",
"(",
"{",
"'url'",
":",
"url",
",",
"'response_code'",
":",
"response",
".",
"code",
",",
"'content-type'",
":",
"mimetype",
",",
"'content-length'",
":",
"size",
",",
"}",
")",
"def",
"process_info",
"(",
"self",
",",
"info_dict",
")",
":",
"'''\n See comment above on `_finish_frag_download()`\n '''",
"def",
"ffd_callback",
"(",
"ffd_self",
",",
"ctx",
")",
":",
"if",
"worker",
".",
"_using_warcprox",
"(",
"site",
")",
":",
"self",
".",
"_push_stitched_up_vid_to_warcprox",
"(",
"site",
",",
"info_dict",
",",
"ctx",
")",
"try",
":",
"thread_local",
".",
"finish_frag_download_callback",
"=",
"ffd_callback",
"return",
"super",
"(",
")",
".",
"process_info",
"(",
"info_dict",
")",
"finally",
":",
"delattr",
"(",
"thread_local",
",",
"'finish_frag_download_callback'",
")",
"def",
"maybe_heartbeat_site_last_claimed",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# in case youtube-dl takes a long time, heartbeat site.last_claimed",
"# to prevent another brozzler-worker from claiming the site",
"try",
":",
"if",
"site",
".",
"rr",
"and",
"doublethink",
".",
"utcnow",
"(",
")",
"-",
"site",
".",
"last_claimed",
">",
"datetime",
".",
"timedelta",
"(",
"minutes",
"=",
"worker",
".",
"SITE_SESSION_MINUTES",
")",
":",
"worker",
".",
"logger",
".",
"debug",
"(",
"'heartbeating site.last_claimed to prevent another '",
"'brozzler-worker claiming this site id=%r'",
",",
"site",
".",
"id",
")",
"site",
".",
"last_claimed",
"=",
"doublethink",
".",
"utcnow",
"(",
")",
"site",
".",
"save",
"(",
")",
"except",
":",
"worker",
".",
"logger",
".",
"debug",
"(",
"'problem heartbeating site.last_claimed site id=%r'",
",",
"site",
".",
"id",
",",
"exc_info",
"=",
"True",
")",
"ydl_opts",
"=",
"{",
"\"outtmpl\"",
":",
"\"{}/ydl%(autonumber)s.out\"",
".",
"format",
"(",
"destdir",
")",
",",
"\"retries\"",
":",
"1",
",",
"\"nocheckcertificate\"",
":",
"True",
",",
"\"hls_prefer_native\"",
":",
"True",
",",
"\"noprogress\"",
":",
"True",
",",
"\"nopart\"",
":",
"True",
",",
"\"no_color\"",
":",
"True",
",",
"\"progress_hooks\"",
":",
"[",
"maybe_heartbeat_site_last_claimed",
"]",
",",
"# https://github.com/rg3/youtube-dl/blob/master/README.md#format-selection",
"# \"best: Select the best quality format represented by a single",
"# file with video and audio.\"",
"\"format\"",
":",
"\"best/bestvideo+bestaudio\"",
",",
"### we do our own logging",
"# \"logger\": logging.getLogger(\"youtube_dl\"),",
"\"verbose\"",
":",
"False",
",",
"\"quiet\"",
":",
"True",
",",
"}",
"if",
"worker",
".",
"_proxy_for",
"(",
"site",
")",
":",
"ydl_opts",
"[",
"\"proxy\"",
"]",
"=",
"\"http://{}\"",
".",
"format",
"(",
"worker",
".",
"_proxy_for",
"(",
"site",
")",
")",
"ydl",
"=",
"_YoutubeDL",
"(",
"ydl_opts",
")",
"if",
"site",
".",
"extra_headers",
"(",
")",
":",
"ydl",
".",
"_opener",
".",
"add_handler",
"(",
"ExtraHeaderAdder",
"(",
"site",
".",
"extra_headers",
"(",
")",
")",
")",
"ydl",
".",
"fetch_spy",
"=",
"YoutubeDLSpy",
"(",
")",
"ydl",
".",
"stitch_ups",
"=",
"[",
"]",
"ydl",
".",
"_opener",
".",
"add_handler",
"(",
"ydl",
".",
"fetch_spy",
")",
"return",
"ydl"
] | 44.109677 | 21.490323 |
def generateRecords(self, records):
"""Generate multiple records. Refer to definition for generateRecord"""
if self.verbosity>0: print 'Generating', len(records), 'records...'
for record in records:
self.generateRecord(record) | [
"def",
"generateRecords",
"(",
"self",
",",
"records",
")",
":",
"if",
"self",
".",
"verbosity",
">",
"0",
":",
"print",
"'Generating'",
",",
"len",
"(",
"records",
")",
",",
"'records...'",
"for",
"record",
"in",
"records",
":",
"self",
".",
"generateRecord",
"(",
"record",
")"
] | 40 | 16.166667 |
def start_adc_comparator(self, channel, high_threshold, low_threshold,
gain=1, data_rate=None, active_low=True,
traditional=True, latching=False, num_readings=1):
"""Start continuous ADC conversions on the specified channel (0-3) with
the comparator enabled. When enabled the comparator to will check if
the ADC value is within the high_threshold & low_threshold value (both
should be signed 16-bit integers) and trigger the ALERT pin. The
behavior can be controlled by the following parameters:
- active_low: Boolean that indicates if ALERT is pulled low or high
when active/triggered. Default is true, active low.
- traditional: Boolean that indicates if the comparator is in traditional
mode where it fires when the value is within the threshold,
or in window mode where it fires when the value is _outside_
the threshold range. Default is true, traditional mode.
- latching: Boolean that indicates if the alert should be held until
get_last_result() is called to read the value and clear
the alert. Default is false, non-latching.
- num_readings: The number of readings that match the comparator before
triggering the alert. Can be 1, 2, or 4. Default is 1.
Will return an initial conversion result, then call the get_last_result()
function continuously to read the most recent conversion result. Call
stop_adc() to stop conversions.
"""
assert 0 <= channel <= 3, 'Channel must be a value within 0-3!'
# Start continuous reads with comparator and set the mux value to the
# channel plus the highest bit (bit 3) set.
return self._read_comparator(channel + 0x04, gain, data_rate,
ADS1x15_CONFIG_MODE_CONTINUOUS,
high_threshold, low_threshold, active_low,
traditional, latching, num_readings) | [
"def",
"start_adc_comparator",
"(",
"self",
",",
"channel",
",",
"high_threshold",
",",
"low_threshold",
",",
"gain",
"=",
"1",
",",
"data_rate",
"=",
"None",
",",
"active_low",
"=",
"True",
",",
"traditional",
"=",
"True",
",",
"latching",
"=",
"False",
",",
"num_readings",
"=",
"1",
")",
":",
"assert",
"0",
"<=",
"channel",
"<=",
"3",
",",
"'Channel must be a value within 0-3!'",
"# Start continuous reads with comparator and set the mux value to the",
"# channel plus the highest bit (bit 3) set.",
"return",
"self",
".",
"_read_comparator",
"(",
"channel",
"+",
"0x04",
",",
"gain",
",",
"data_rate",
",",
"ADS1x15_CONFIG_MODE_CONTINUOUS",
",",
"high_threshold",
",",
"low_threshold",
",",
"active_low",
",",
"traditional",
",",
"latching",
",",
"num_readings",
")"
] | 71.8 | 31.533333 |
def compare_mim_panels(self, existing_panel, new_panel):
"""Check if the latest version of OMIM differs from the most recent in database
Return all genes that where not in the previous version.
Args:
existing_panel(dict)
new_panel(dict)
Returns:
new_genes(set(str))
"""
existing_genes = set([gene['hgnc_id'] for gene in existing_panel['genes']])
new_genes = set([gene['hgnc_id'] for gene in new_panel['genes']])
return new_genes.difference(existing_genes) | [
"def",
"compare_mim_panels",
"(",
"self",
",",
"existing_panel",
",",
"new_panel",
")",
":",
"existing_genes",
"=",
"set",
"(",
"[",
"gene",
"[",
"'hgnc_id'",
"]",
"for",
"gene",
"in",
"existing_panel",
"[",
"'genes'",
"]",
"]",
")",
"new_genes",
"=",
"set",
"(",
"[",
"gene",
"[",
"'hgnc_id'",
"]",
"for",
"gene",
"in",
"new_panel",
"[",
"'genes'",
"]",
"]",
")",
"return",
"new_genes",
".",
"difference",
"(",
"existing_genes",
")"
] | 36.466667 | 22.066667 |
def lookup(self, req, parent, name):
"""Look up a directory entry by name and get its attributes.
Valid replies:
reply_entry
reply_err
"""
self.reply_err(req, errno.ENOENT) | [
"def",
"lookup",
"(",
"self",
",",
"req",
",",
"parent",
",",
"name",
")",
":",
"self",
".",
"reply_err",
"(",
"req",
",",
"errno",
".",
"ENOENT",
")"
] | 27.75 | 12.375 |
def get_server_capabilities(self):
"""Returns the server capabilities
raises: IloError on an error from iLO.
"""
capabilities = {}
sushy_system = self._get_sushy_system(PROLIANT_SYSTEM_ID)
sushy_manager = self._get_sushy_manager(PROLIANT_MANAGER_ID)
try:
count = len(sushy_system.pci_devices.gpu_devices)
boot_mode = rf_utils.get_supported_boot_mode(
sushy_system.supported_boot_mode)
capabilities.update(
{'pci_gpu_devices': count,
'ilo_firmware_version': sushy_manager.firmware_version,
'rom_firmware_version': sushy_system.rom_version,
'server_model': sushy_system.model,
'nic_capacity': sushy_system.pci_devices.max_nic_capacity,
'boot_mode_bios': boot_mode.boot_mode_bios,
'boot_mode_uefi': boot_mode.boot_mode_uefi})
tpm_state = sushy_system.bios_settings.tpm_state
all_key_to_value_expression_tuples = [
('sriov_enabled',
sushy_system.bios_settings.sriov == sys_cons.SRIOV_ENABLED),
('cpu_vt',
sushy_system.bios_settings.cpu_vt == (
sys_cons.CPUVT_ENABLED)),
('trusted_boot',
(tpm_state == sys_cons.TPM_PRESENT_ENABLED
or tpm_state == sys_cons.TPM_PRESENT_DISABLED)),
('secure_boot', self._has_secure_boot()),
('iscsi_boot',
(sushy_system.bios_settings.iscsi_resource.
is_iscsi_boot_supported())),
('hardware_supports_raid',
len(sushy_system.smart_storage.array_controllers.
members_identities) > 0),
('has_ssd',
common_storage.has_ssd(sushy_system)),
('has_rotational',
common_storage.has_rotational(sushy_system)),
('has_nvme_ssd',
common_storage.has_nvme_ssd(sushy_system))
]
all_key_to_value_expression_tuples += (
[('logical_raid_level_' + x, True)
for x in sushy_system.smart_storage.logical_raid_levels])
all_key_to_value_expression_tuples += (
[('drive_rotational_' + str(x) + '_rpm', True)
for x in
common_storage.get_drive_rotational_speed_rpm(sushy_system)])
capabilities.update(
{key: 'true'
for (key, value) in all_key_to_value_expression_tuples
if value})
memory_data = sushy_system.memory.details()
if memory_data.has_nvdimm_n:
capabilities.update(
{'persistent_memory': (
json.dumps(memory_data.has_persistent_memory)),
'nvdimm_n': (
json.dumps(memory_data.has_nvdimm_n)),
'logical_nvdimm_n': (
json.dumps(memory_data.has_logical_nvdimm_n))})
except sushy.exceptions.SushyError as e:
msg = (self._("The Redfish controller is unable to get "
"resource or its members. Error "
"%(error)s)") % {'error': str(e)})
LOG.debug(msg)
raise exception.IloError(msg)
return capabilities | [
"def",
"get_server_capabilities",
"(",
"self",
")",
":",
"capabilities",
"=",
"{",
"}",
"sushy_system",
"=",
"self",
".",
"_get_sushy_system",
"(",
"PROLIANT_SYSTEM_ID",
")",
"sushy_manager",
"=",
"self",
".",
"_get_sushy_manager",
"(",
"PROLIANT_MANAGER_ID",
")",
"try",
":",
"count",
"=",
"len",
"(",
"sushy_system",
".",
"pci_devices",
".",
"gpu_devices",
")",
"boot_mode",
"=",
"rf_utils",
".",
"get_supported_boot_mode",
"(",
"sushy_system",
".",
"supported_boot_mode",
")",
"capabilities",
".",
"update",
"(",
"{",
"'pci_gpu_devices'",
":",
"count",
",",
"'ilo_firmware_version'",
":",
"sushy_manager",
".",
"firmware_version",
",",
"'rom_firmware_version'",
":",
"sushy_system",
".",
"rom_version",
",",
"'server_model'",
":",
"sushy_system",
".",
"model",
",",
"'nic_capacity'",
":",
"sushy_system",
".",
"pci_devices",
".",
"max_nic_capacity",
",",
"'boot_mode_bios'",
":",
"boot_mode",
".",
"boot_mode_bios",
",",
"'boot_mode_uefi'",
":",
"boot_mode",
".",
"boot_mode_uefi",
"}",
")",
"tpm_state",
"=",
"sushy_system",
".",
"bios_settings",
".",
"tpm_state",
"all_key_to_value_expression_tuples",
"=",
"[",
"(",
"'sriov_enabled'",
",",
"sushy_system",
".",
"bios_settings",
".",
"sriov",
"==",
"sys_cons",
".",
"SRIOV_ENABLED",
")",
",",
"(",
"'cpu_vt'",
",",
"sushy_system",
".",
"bios_settings",
".",
"cpu_vt",
"==",
"(",
"sys_cons",
".",
"CPUVT_ENABLED",
")",
")",
",",
"(",
"'trusted_boot'",
",",
"(",
"tpm_state",
"==",
"sys_cons",
".",
"TPM_PRESENT_ENABLED",
"or",
"tpm_state",
"==",
"sys_cons",
".",
"TPM_PRESENT_DISABLED",
")",
")",
",",
"(",
"'secure_boot'",
",",
"self",
".",
"_has_secure_boot",
"(",
")",
")",
",",
"(",
"'iscsi_boot'",
",",
"(",
"sushy_system",
".",
"bios_settings",
".",
"iscsi_resource",
".",
"is_iscsi_boot_supported",
"(",
")",
")",
")",
",",
"(",
"'hardware_supports_raid'",
",",
"len",
"(",
"sushy_system",
".",
"smart_storage",
".",
"array_controllers",
".",
"members_identities",
")",
">",
"0",
")",
",",
"(",
"'has_ssd'",
",",
"common_storage",
".",
"has_ssd",
"(",
"sushy_system",
")",
")",
",",
"(",
"'has_rotational'",
",",
"common_storage",
".",
"has_rotational",
"(",
"sushy_system",
")",
")",
",",
"(",
"'has_nvme_ssd'",
",",
"common_storage",
".",
"has_nvme_ssd",
"(",
"sushy_system",
")",
")",
"]",
"all_key_to_value_expression_tuples",
"+=",
"(",
"[",
"(",
"'logical_raid_level_'",
"+",
"x",
",",
"True",
")",
"for",
"x",
"in",
"sushy_system",
".",
"smart_storage",
".",
"logical_raid_levels",
"]",
")",
"all_key_to_value_expression_tuples",
"+=",
"(",
"[",
"(",
"'drive_rotational_'",
"+",
"str",
"(",
"x",
")",
"+",
"'_rpm'",
",",
"True",
")",
"for",
"x",
"in",
"common_storage",
".",
"get_drive_rotational_speed_rpm",
"(",
"sushy_system",
")",
"]",
")",
"capabilities",
".",
"update",
"(",
"{",
"key",
":",
"'true'",
"for",
"(",
"key",
",",
"value",
")",
"in",
"all_key_to_value_expression_tuples",
"if",
"value",
"}",
")",
"memory_data",
"=",
"sushy_system",
".",
"memory",
".",
"details",
"(",
")",
"if",
"memory_data",
".",
"has_nvdimm_n",
":",
"capabilities",
".",
"update",
"(",
"{",
"'persistent_memory'",
":",
"(",
"json",
".",
"dumps",
"(",
"memory_data",
".",
"has_persistent_memory",
")",
")",
",",
"'nvdimm_n'",
":",
"(",
"json",
".",
"dumps",
"(",
"memory_data",
".",
"has_nvdimm_n",
")",
")",
",",
"'logical_nvdimm_n'",
":",
"(",
"json",
".",
"dumps",
"(",
"memory_data",
".",
"has_logical_nvdimm_n",
")",
")",
"}",
")",
"except",
"sushy",
".",
"exceptions",
".",
"SushyError",
"as",
"e",
":",
"msg",
"=",
"(",
"self",
".",
"_",
"(",
"\"The Redfish controller is unable to get \"",
"\"resource or its members. Error \"",
"\"%(error)s)\"",
")",
"%",
"{",
"'error'",
":",
"str",
"(",
"e",
")",
"}",
")",
"LOG",
".",
"debug",
"(",
"msg",
")",
"raise",
"exception",
".",
"IloError",
"(",
"msg",
")",
"return",
"capabilities"
] | 42.911392 | 18.21519 |
def _download_py2(link, path, __hdr__):
"""Download a file from a link in Python 2."""
try:
req = urllib2.Request(link, headers=__hdr__)
u = urllib2.urlopen(req)
except Exception as e:
raise Exception(' Download failed with the error:\n{}'.format(e))
with open(path, 'wb') as outf:
for l in u:
outf.write(l)
u.close() | [
"def",
"_download_py2",
"(",
"link",
",",
"path",
",",
"__hdr__",
")",
":",
"try",
":",
"req",
"=",
"urllib2",
".",
"Request",
"(",
"link",
",",
"headers",
"=",
"__hdr__",
")",
"u",
"=",
"urllib2",
".",
"urlopen",
"(",
"req",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"Exception",
"(",
"' Download failed with the error:\\n{}'",
".",
"format",
"(",
"e",
")",
")",
"with",
"open",
"(",
"path",
",",
"'wb'",
")",
"as",
"outf",
":",
"for",
"l",
"in",
"u",
":",
"outf",
".",
"write",
"(",
"l",
")",
"u",
".",
"close",
"(",
")"
] | 30.916667 | 17.416667 |
def compute(a, b, axis):
"""
Finds optimal displacements localized along an axis
"""
delta = []
for aa, bb in zip(rollaxis(a, axis, 0), rollaxis(b, axis, 0)):
delta.append(Displacement.compute(aa, bb).delta)
return LocalDisplacement(delta, axis=axis) | [
"def",
"compute",
"(",
"a",
",",
"b",
",",
"axis",
")",
":",
"delta",
"=",
"[",
"]",
"for",
"aa",
",",
"bb",
"in",
"zip",
"(",
"rollaxis",
"(",
"a",
",",
"axis",
",",
"0",
")",
",",
"rollaxis",
"(",
"b",
",",
"axis",
",",
"0",
")",
")",
":",
"delta",
".",
"append",
"(",
"Displacement",
".",
"compute",
"(",
"aa",
",",
"bb",
")",
".",
"delta",
")",
"return",
"LocalDisplacement",
"(",
"delta",
",",
"axis",
"=",
"axis",
")"
] | 37.875 | 14.625 |
def get_symbols(self, site='Pro'):
"""
获取支持的交易对
:param site:
:return:
"""
assert site in ['Pro', 'HADAX']
params = {}
path = f'/v1{"/" if site == "Pro" else "/hadax/"}common/symbols'
def _wrapper(_func):
@wraps(_func)
def handle():
_func(api_key_get(params, path))
return handle
return _wrapper | [
"def",
"get_symbols",
"(",
"self",
",",
"site",
"=",
"'Pro'",
")",
":",
"assert",
"site",
"in",
"[",
"'Pro'",
",",
"'HADAX'",
"]",
"params",
"=",
"{",
"}",
"path",
"=",
"f'/v1{\"/\" if site == \"Pro\" else \"/hadax/\"}common/symbols'",
"def",
"_wrapper",
"(",
"_func",
")",
":",
"@",
"wraps",
"(",
"_func",
")",
"def",
"handle",
"(",
")",
":",
"_func",
"(",
"api_key_get",
"(",
"params",
",",
"path",
")",
")",
"return",
"handle",
"return",
"_wrapper"
] | 22.888889 | 18.333333 |
def kde_plot_df(df, xlims=None, **kwargs):
"""Plots kde estimates of distributions of samples in each cell of the
input pandas DataFrame.
There is one subplot for each dataframe column, and on each subplot there
is one kde line.
Parameters
----------
df: pandas data frame
Each cell must contain a 1d numpy array of samples.
xlims: dict, optional
Dictionary of xlimits - keys are column names and values are lists of
length 2.
num_xticks: int, optional
Number of xticks on each subplot.
figsize: tuple, optional
Size of figure in inches.
nrows: int, optional
Number of rows of subplots.
ncols: int, optional
Number of columns of subplots.
normalize: bool, optional
If true, kde plots are normalized to have the same area under their
curves. If False, their max value is set to 1.
legend: bool, optional
Should a legend be added?
legend_kwargs: dict, optional
Additional kwargs for legend.
Returns
-------
fig: matplotlib figure
"""
assert xlims is None or isinstance(xlims, dict)
figsize = kwargs.pop('figsize', (6.4, 1.5))
num_xticks = kwargs.pop('num_xticks', None)
nrows = kwargs.pop('nrows', 1)
ncols = kwargs.pop('ncols', int(np.ceil(len(df.columns) / nrows)))
normalize = kwargs.pop('normalize', True)
legend = kwargs.pop('legend', False)
legend_kwargs = kwargs.pop('legend_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize)
for nax, col in enumerate(df):
if nrows == 1:
ax = axes[nax]
else:
ax = axes[nax // ncols, nax % ncols]
supmin = df[col].apply(np.min).min()
supmax = df[col].apply(np.max).max()
support = np.linspace(supmin - 0.1 * (supmax - supmin),
supmax + 0.1 * (supmax - supmin), 200)
handles = []
labels = []
for name, samps in df[col].iteritems():
pdf = scipy.stats.gaussian_kde(samps)(support)
if not normalize:
pdf /= pdf.max()
handles.append(ax.plot(support, pdf, label=name)[0])
labels.append(name)
ax.set_ylim(bottom=0)
ax.set_yticks([])
if xlims is not None:
try:
ax.set_xlim(xlims[col])
except KeyError:
pass
ax.set_xlabel(col)
if num_xticks is not None:
ax.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(
nbins=num_xticks))
if legend:
fig.legend(handles, labels, **legend_kwargs)
return fig | [
"def",
"kde_plot_df",
"(",
"df",
",",
"xlims",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"xlims",
"is",
"None",
"or",
"isinstance",
"(",
"xlims",
",",
"dict",
")",
"figsize",
"=",
"kwargs",
".",
"pop",
"(",
"'figsize'",
",",
"(",
"6.4",
",",
"1.5",
")",
")",
"num_xticks",
"=",
"kwargs",
".",
"pop",
"(",
"'num_xticks'",
",",
"None",
")",
"nrows",
"=",
"kwargs",
".",
"pop",
"(",
"'nrows'",
",",
"1",
")",
"ncols",
"=",
"kwargs",
".",
"pop",
"(",
"'ncols'",
",",
"int",
"(",
"np",
".",
"ceil",
"(",
"len",
"(",
"df",
".",
"columns",
")",
"/",
"nrows",
")",
")",
")",
"normalize",
"=",
"kwargs",
".",
"pop",
"(",
"'normalize'",
",",
"True",
")",
"legend",
"=",
"kwargs",
".",
"pop",
"(",
"'legend'",
",",
"False",
")",
"legend_kwargs",
"=",
"kwargs",
".",
"pop",
"(",
"'legend_kwargs'",
",",
"{",
"}",
")",
"if",
"kwargs",
":",
"raise",
"TypeError",
"(",
"'Unexpected **kwargs: {0}'",
".",
"format",
"(",
"kwargs",
")",
")",
"fig",
",",
"axes",
"=",
"plt",
".",
"subplots",
"(",
"nrows",
"=",
"nrows",
",",
"ncols",
"=",
"ncols",
",",
"figsize",
"=",
"figsize",
")",
"for",
"nax",
",",
"col",
"in",
"enumerate",
"(",
"df",
")",
":",
"if",
"nrows",
"==",
"1",
":",
"ax",
"=",
"axes",
"[",
"nax",
"]",
"else",
":",
"ax",
"=",
"axes",
"[",
"nax",
"//",
"ncols",
",",
"nax",
"%",
"ncols",
"]",
"supmin",
"=",
"df",
"[",
"col",
"]",
".",
"apply",
"(",
"np",
".",
"min",
")",
".",
"min",
"(",
")",
"supmax",
"=",
"df",
"[",
"col",
"]",
".",
"apply",
"(",
"np",
".",
"max",
")",
".",
"max",
"(",
")",
"support",
"=",
"np",
".",
"linspace",
"(",
"supmin",
"-",
"0.1",
"*",
"(",
"supmax",
"-",
"supmin",
")",
",",
"supmax",
"+",
"0.1",
"*",
"(",
"supmax",
"-",
"supmin",
")",
",",
"200",
")",
"handles",
"=",
"[",
"]",
"labels",
"=",
"[",
"]",
"for",
"name",
",",
"samps",
"in",
"df",
"[",
"col",
"]",
".",
"iteritems",
"(",
")",
":",
"pdf",
"=",
"scipy",
".",
"stats",
".",
"gaussian_kde",
"(",
"samps",
")",
"(",
"support",
")",
"if",
"not",
"normalize",
":",
"pdf",
"/=",
"pdf",
".",
"max",
"(",
")",
"handles",
".",
"append",
"(",
"ax",
".",
"plot",
"(",
"support",
",",
"pdf",
",",
"label",
"=",
"name",
")",
"[",
"0",
"]",
")",
"labels",
".",
"append",
"(",
"name",
")",
"ax",
".",
"set_ylim",
"(",
"bottom",
"=",
"0",
")",
"ax",
".",
"set_yticks",
"(",
"[",
"]",
")",
"if",
"xlims",
"is",
"not",
"None",
":",
"try",
":",
"ax",
".",
"set_xlim",
"(",
"xlims",
"[",
"col",
"]",
")",
"except",
"KeyError",
":",
"pass",
"ax",
".",
"set_xlabel",
"(",
"col",
")",
"if",
"num_xticks",
"is",
"not",
"None",
":",
"ax",
".",
"xaxis",
".",
"set_major_locator",
"(",
"matplotlib",
".",
"ticker",
".",
"MaxNLocator",
"(",
"nbins",
"=",
"num_xticks",
")",
")",
"if",
"legend",
":",
"fig",
".",
"legend",
"(",
"handles",
",",
"labels",
",",
"*",
"*",
"legend_kwargs",
")",
"return",
"fig"
] | 35.421053 | 15.907895 |
def plot_sediment_memory(self, ax=None):
"""Plot sediment memory prior and posterior distributions"""
if ax is None:
ax = plt.gca()
y_prior, x_prior = self.prior_sediment_memory()
ax.plot(x_prior, y_prior, label='Prior')
y_posterior = self.mcmcfit.sediment_memory
density = scipy.stats.gaussian_kde(y_posterior ** (1/self.thick))
density.covariance_factor = lambda: 0.25
density._compute_covariance()
ax.plot(x_prior, density(x_prior), label='Posterior')
mem_mean = self.mcmcsetup.mcmc_kws['mem_mean']
mem_strength = self.mcmcsetup.mcmc_kws['mem_strength']
annotstr_template = 'mem_strength: {0}\nmem_mean: {1}\nthick: {2} cm'
annotstr = annotstr_template.format(mem_strength, mem_mean, self.thick)
ax.annotate(annotstr, xy=(0.9, 0.9), xycoords='axes fraction',
horizontalalignment='right', verticalalignment='top')
ax.set_ylabel('Density')
ax.set_xlabel('Memory (ratio)')
ax.grid(True)
return ax | [
"def",
"plot_sediment_memory",
"(",
"self",
",",
"ax",
"=",
"None",
")",
":",
"if",
"ax",
"is",
"None",
":",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"y_prior",
",",
"x_prior",
"=",
"self",
".",
"prior_sediment_memory",
"(",
")",
"ax",
".",
"plot",
"(",
"x_prior",
",",
"y_prior",
",",
"label",
"=",
"'Prior'",
")",
"y_posterior",
"=",
"self",
".",
"mcmcfit",
".",
"sediment_memory",
"density",
"=",
"scipy",
".",
"stats",
".",
"gaussian_kde",
"(",
"y_posterior",
"**",
"(",
"1",
"/",
"self",
".",
"thick",
")",
")",
"density",
".",
"covariance_factor",
"=",
"lambda",
":",
"0.25",
"density",
".",
"_compute_covariance",
"(",
")",
"ax",
".",
"plot",
"(",
"x_prior",
",",
"density",
"(",
"x_prior",
")",
",",
"label",
"=",
"'Posterior'",
")",
"mem_mean",
"=",
"self",
".",
"mcmcsetup",
".",
"mcmc_kws",
"[",
"'mem_mean'",
"]",
"mem_strength",
"=",
"self",
".",
"mcmcsetup",
".",
"mcmc_kws",
"[",
"'mem_strength'",
"]",
"annotstr_template",
"=",
"'mem_strength: {0}\\nmem_mean: {1}\\nthick: {2} cm'",
"annotstr",
"=",
"annotstr_template",
".",
"format",
"(",
"mem_strength",
",",
"mem_mean",
",",
"self",
".",
"thick",
")",
"ax",
".",
"annotate",
"(",
"annotstr",
",",
"xy",
"=",
"(",
"0.9",
",",
"0.9",
")",
",",
"xycoords",
"=",
"'axes fraction'",
",",
"horizontalalignment",
"=",
"'right'",
",",
"verticalalignment",
"=",
"'top'",
")",
"ax",
".",
"set_ylabel",
"(",
"'Density'",
")",
"ax",
".",
"set_xlabel",
"(",
"'Memory (ratio)'",
")",
"ax",
".",
"grid",
"(",
"True",
")",
"return",
"ax"
] | 42.08 | 20.64 |
def append_instances(cls, inst1, inst2):
"""
Merges the two datasets (one-after-the-other). Throws an exception if the datasets aren't compatible.
:param inst1: the first dataset
:type inst1: Instances
:param inst2: the first dataset
:type inst2: Instances
:return: the combined dataset
:rtype: Instances
"""
msg = inst1.equal_headers(inst2)
if msg is not None:
raise Exception("Cannot appent instances: " + msg)
result = cls.copy_instances(inst1)
for i in xrange(inst2.num_instances):
result.add_instance(inst2.get_instance(i))
return result | [
"def",
"append_instances",
"(",
"cls",
",",
"inst1",
",",
"inst2",
")",
":",
"msg",
"=",
"inst1",
".",
"equal_headers",
"(",
"inst2",
")",
"if",
"msg",
"is",
"not",
"None",
":",
"raise",
"Exception",
"(",
"\"Cannot appent instances: \"",
"+",
"msg",
")",
"result",
"=",
"cls",
".",
"copy_instances",
"(",
"inst1",
")",
"for",
"i",
"in",
"xrange",
"(",
"inst2",
".",
"num_instances",
")",
":",
"result",
".",
"add_instance",
"(",
"inst2",
".",
"get_instance",
"(",
"i",
")",
")",
"return",
"result"
] | 36.777778 | 12.444444 |
def uint16_gt(a: int, b: int) -> bool:
"""
Return a > b.
"""
half_mod = 0x8000
return (((a < b) and ((b - a) > half_mod)) or
((a > b) and ((a - b) < half_mod))) | [
"def",
"uint16_gt",
"(",
"a",
":",
"int",
",",
"b",
":",
"int",
")",
"->",
"bool",
":",
"half_mod",
"=",
"0x8000",
"return",
"(",
"(",
"(",
"a",
"<",
"b",
")",
"and",
"(",
"(",
"b",
"-",
"a",
")",
">",
"half_mod",
")",
")",
"or",
"(",
"(",
"a",
">",
"b",
")",
"and",
"(",
"(",
"a",
"-",
"b",
")",
"<",
"half_mod",
")",
")",
")"
] | 26.571429 | 8.571429 |
def continent(self, code: bool = False) -> str:
"""Get a random continent name or continent code.
:param code: Return code of continent.
:return: Continent name.
"""
codes = CONTINENT_CODES if \
code else self._data['continent']
return self.random.choice(codes) | [
"def",
"continent",
"(",
"self",
",",
"code",
":",
"bool",
"=",
"False",
")",
"->",
"str",
":",
"codes",
"=",
"CONTINENT_CODES",
"if",
"code",
"else",
"self",
".",
"_data",
"[",
"'continent'",
"]",
"return",
"self",
".",
"random",
".",
"choice",
"(",
"codes",
")"
] | 31.4 | 11 |
def transpose_mat44(src_mat, transpose_mat=None):
"""Create a transpose of a matrix."""
if not transpose_mat:
transpose_mat = Matrix44()
for i in range(4):
for j in range(4):
transpose_mat.data[i][j] = src_mat.data[j][i]
return transpose_mat | [
"def",
"transpose_mat44",
"(",
"src_mat",
",",
"transpose_mat",
"=",
"None",
")",
":",
"if",
"not",
"transpose_mat",
":",
"transpose_mat",
"=",
"Matrix44",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"4",
")",
":",
"for",
"j",
"in",
"range",
"(",
"4",
")",
":",
"transpose_mat",
".",
"data",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"src_mat",
".",
"data",
"[",
"j",
"]",
"[",
"i",
"]",
"return",
"transpose_mat"
] | 25.272727 | 19.545455 |
def update(self, table_name, primary_key, instance):
""" replaces document identified by the primary_key or creates one if a matching document does not exist"""
assert isinstance(primary_key, dict)
assert isinstance(instance, BaseDocument)
collection = self._db[table_name]
# work with a copy of the document, as the direct type change of the _id field
# is later negated by the `BaseDocument.to_json` method
document = instance.document
if '_id' in document:
document['_id'] = ObjectId(document['_id'])
update_result = collection.replace_one(filter=primary_key, replacement=document, upsert=True)
if update_result.upserted_id:
instance['_id'] = update_result.upserted_id
return update_result.upserted_id | [
"def",
"update",
"(",
"self",
",",
"table_name",
",",
"primary_key",
",",
"instance",
")",
":",
"assert",
"isinstance",
"(",
"primary_key",
",",
"dict",
")",
"assert",
"isinstance",
"(",
"instance",
",",
"BaseDocument",
")",
"collection",
"=",
"self",
".",
"_db",
"[",
"table_name",
"]",
"# work with a copy of the document, as the direct type change of the _id field",
"# is later negated by the `BaseDocument.to_json` method",
"document",
"=",
"instance",
".",
"document",
"if",
"'_id'",
"in",
"document",
":",
"document",
"[",
"'_id'",
"]",
"=",
"ObjectId",
"(",
"document",
"[",
"'_id'",
"]",
")",
"update_result",
"=",
"collection",
".",
"replace_one",
"(",
"filter",
"=",
"primary_key",
",",
"replacement",
"=",
"document",
",",
"upsert",
"=",
"True",
")",
"if",
"update_result",
".",
"upserted_id",
":",
"instance",
"[",
"'_id'",
"]",
"=",
"update_result",
".",
"upserted_id",
"return",
"update_result",
".",
"upserted_id"
] | 50.1875 | 17.75 |
def sync_list_items(self):
"""
Access the sync_list_items
:returns: twilio.rest.sync.v1.service.sync_list.sync_list_item.SyncListItemList
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_item.SyncListItemList
"""
if self._sync_list_items is None:
self._sync_list_items = SyncListItemList(
self._version,
service_sid=self._solution['service_sid'],
list_sid=self._solution['sid'],
)
return self._sync_list_items | [
"def",
"sync_list_items",
"(",
"self",
")",
":",
"if",
"self",
".",
"_sync_list_items",
"is",
"None",
":",
"self",
".",
"_sync_list_items",
"=",
"SyncListItemList",
"(",
"self",
".",
"_version",
",",
"service_sid",
"=",
"self",
".",
"_solution",
"[",
"'service_sid'",
"]",
",",
"list_sid",
"=",
"self",
".",
"_solution",
"[",
"'sid'",
"]",
",",
")",
"return",
"self",
".",
"_sync_list_items"
] | 38 | 16.571429 |
def _get_block_data(self, mat, block):
"""Retrieve a block from a 3D or 4D volume
Parameters
----------
mat: a 3D or 4D volume
block: a tuple containing block information:
- a triple containing the lowest-coordinate voxel in the block
- a triple containing the size in voxels of the block
Returns
-------
In the case of a 3D array, a 3D subarray at the block location
In the case of a 4D array, a 4D subarray at the block location,
including the entire fourth dimension.
"""
(pt, sz) = block
if len(mat.shape) == 3:
return mat[pt[0]:pt[0]+sz[0],
pt[1]:pt[1]+sz[1],
pt[2]:pt[2]+sz[2]].copy()
elif len(mat.shape) == 4:
return mat[pt[0]:pt[0]+sz[0],
pt[1]:pt[1]+sz[1],
pt[2]:pt[2]+sz[2],
:].copy() | [
"def",
"_get_block_data",
"(",
"self",
",",
"mat",
",",
"block",
")",
":",
"(",
"pt",
",",
"sz",
")",
"=",
"block",
"if",
"len",
"(",
"mat",
".",
"shape",
")",
"==",
"3",
":",
"return",
"mat",
"[",
"pt",
"[",
"0",
"]",
":",
"pt",
"[",
"0",
"]",
"+",
"sz",
"[",
"0",
"]",
",",
"pt",
"[",
"1",
"]",
":",
"pt",
"[",
"1",
"]",
"+",
"sz",
"[",
"1",
"]",
",",
"pt",
"[",
"2",
"]",
":",
"pt",
"[",
"2",
"]",
"+",
"sz",
"[",
"2",
"]",
"]",
".",
"copy",
"(",
")",
"elif",
"len",
"(",
"mat",
".",
"shape",
")",
"==",
"4",
":",
"return",
"mat",
"[",
"pt",
"[",
"0",
"]",
":",
"pt",
"[",
"0",
"]",
"+",
"sz",
"[",
"0",
"]",
",",
"pt",
"[",
"1",
"]",
":",
"pt",
"[",
"1",
"]",
"+",
"sz",
"[",
"1",
"]",
",",
"pt",
"[",
"2",
"]",
":",
"pt",
"[",
"2",
"]",
"+",
"sz",
"[",
"2",
"]",
",",
":",
"]",
".",
"copy",
"(",
")"
] | 31.4 | 17.766667 |
def new(cls, func, args, mon, count=0):
"""
:returns: a new Result instance
"""
try:
with mon:
val = func(*args)
except StopIteration:
res = Result(None, mon, msg='TASK_ENDED')
except Exception:
_etype, exc, tb = sys.exc_info()
res = Result(exc, mon, ''.join(traceback.format_tb(tb)),
count=count)
else:
res = Result(val, mon, count=count)
return res | [
"def",
"new",
"(",
"cls",
",",
"func",
",",
"args",
",",
"mon",
",",
"count",
"=",
"0",
")",
":",
"try",
":",
"with",
"mon",
":",
"val",
"=",
"func",
"(",
"*",
"args",
")",
"except",
"StopIteration",
":",
"res",
"=",
"Result",
"(",
"None",
",",
"mon",
",",
"msg",
"=",
"'TASK_ENDED'",
")",
"except",
"Exception",
":",
"_etype",
",",
"exc",
",",
"tb",
"=",
"sys",
".",
"exc_info",
"(",
")",
"res",
"=",
"Result",
"(",
"exc",
",",
"mon",
",",
"''",
".",
"join",
"(",
"traceback",
".",
"format_tb",
"(",
"tb",
")",
")",
",",
"count",
"=",
"count",
")",
"else",
":",
"res",
"=",
"Result",
"(",
"val",
",",
"mon",
",",
"count",
"=",
"count",
")",
"return",
"res"
] | 31.25 | 11.625 |
def expected_related_units(reltype=None):
"""Get a generator for units we expect to join relation based on
goal-state.
Note that you can not use this function for the peer relation, take a look
at expected_peer_units() for that.
This function will raise KeyError if you request information for a
relation type for which juju goal-state does not have information. It will
raise NotImplementedError if used with juju versions without goal-state
support.
Example usage:
log('participant {} of {} joined relation {}'
.format(len(related_units()),
len(list(expected_related_units())),
relation_type()))
:param reltype: Relation type to list data for, default is to list data for
the realtion type we are currently executing a hook for.
:type reltype: str
:returns: iterator
:rtype: types.GeneratorType
:raises: KeyError, NotImplementedError
"""
if not has_juju_version("2.4.4"):
# goal-state existed in 2.4.0, but did not list individual units to
# join a relation in 2.4.1 through 2.4.3. (LP: #1794739)
raise NotImplementedError("goal-state relation unit count")
reltype = reltype or relation_type()
_goal_state = goal_state()
return (key for key in _goal_state['relations'][reltype] if '/' in key) | [
"def",
"expected_related_units",
"(",
"reltype",
"=",
"None",
")",
":",
"if",
"not",
"has_juju_version",
"(",
"\"2.4.4\"",
")",
":",
"# goal-state existed in 2.4.0, but did not list individual units to",
"# join a relation in 2.4.1 through 2.4.3. (LP: #1794739)",
"raise",
"NotImplementedError",
"(",
"\"goal-state relation unit count\"",
")",
"reltype",
"=",
"reltype",
"or",
"relation_type",
"(",
")",
"_goal_state",
"=",
"goal_state",
"(",
")",
"return",
"(",
"key",
"for",
"key",
"in",
"_goal_state",
"[",
"'relations'",
"]",
"[",
"reltype",
"]",
"if",
"'/'",
"in",
"key",
")"
] | 41.625 | 20.84375 |
def WriteEventBody(self, event):
"""Writes the body of an event object to the spreadsheet.
Args:
event (EventObject): event.
"""
for field_name in self._fields:
if field_name == 'datetime':
output_value = self._FormatDateTime(event)
else:
output_value = self._dynamic_fields_helper.GetFormattedField(
event, field_name)
output_value = self._RemoveIllegalXMLCharacters(output_value)
# Auto adjust the column width based on the length of the output value.
column_index = self._fields.index(field_name)
self._column_widths.setdefault(column_index, 0)
if field_name == 'datetime':
column_width = min(
self._MAX_COLUMN_WIDTH, len(self._timestamp_format) + 2)
else:
column_width = min(self._MAX_COLUMN_WIDTH, len(output_value) + 2)
self._column_widths[column_index] = max(
self._MIN_COLUMN_WIDTH, self._column_widths[column_index],
column_width)
self._sheet.set_column(
column_index, column_index, self._column_widths[column_index])
if (field_name == 'datetime'
and isinstance(output_value, datetime.datetime)):
self._sheet.write_datetime(
self._current_row, column_index, output_value)
else:
self._sheet.write(self._current_row, column_index, output_value)
self._current_row += 1 | [
"def",
"WriteEventBody",
"(",
"self",
",",
"event",
")",
":",
"for",
"field_name",
"in",
"self",
".",
"_fields",
":",
"if",
"field_name",
"==",
"'datetime'",
":",
"output_value",
"=",
"self",
".",
"_FormatDateTime",
"(",
"event",
")",
"else",
":",
"output_value",
"=",
"self",
".",
"_dynamic_fields_helper",
".",
"GetFormattedField",
"(",
"event",
",",
"field_name",
")",
"output_value",
"=",
"self",
".",
"_RemoveIllegalXMLCharacters",
"(",
"output_value",
")",
"# Auto adjust the column width based on the length of the output value.",
"column_index",
"=",
"self",
".",
"_fields",
".",
"index",
"(",
"field_name",
")",
"self",
".",
"_column_widths",
".",
"setdefault",
"(",
"column_index",
",",
"0",
")",
"if",
"field_name",
"==",
"'datetime'",
":",
"column_width",
"=",
"min",
"(",
"self",
".",
"_MAX_COLUMN_WIDTH",
",",
"len",
"(",
"self",
".",
"_timestamp_format",
")",
"+",
"2",
")",
"else",
":",
"column_width",
"=",
"min",
"(",
"self",
".",
"_MAX_COLUMN_WIDTH",
",",
"len",
"(",
"output_value",
")",
"+",
"2",
")",
"self",
".",
"_column_widths",
"[",
"column_index",
"]",
"=",
"max",
"(",
"self",
".",
"_MIN_COLUMN_WIDTH",
",",
"self",
".",
"_column_widths",
"[",
"column_index",
"]",
",",
"column_width",
")",
"self",
".",
"_sheet",
".",
"set_column",
"(",
"column_index",
",",
"column_index",
",",
"self",
".",
"_column_widths",
"[",
"column_index",
"]",
")",
"if",
"(",
"field_name",
"==",
"'datetime'",
"and",
"isinstance",
"(",
"output_value",
",",
"datetime",
".",
"datetime",
")",
")",
":",
"self",
".",
"_sheet",
".",
"write_datetime",
"(",
"self",
".",
"_current_row",
",",
"column_index",
",",
"output_value",
")",
"else",
":",
"self",
".",
"_sheet",
".",
"write",
"(",
"self",
".",
"_current_row",
",",
"column_index",
",",
"output_value",
")",
"self",
".",
"_current_row",
"+=",
"1"
] | 35 | 21.25641 |
def touch(fname):
"""
Mimics the `touch` command
Busy loops until the mtime has actually been changed, use for tests only
"""
orig_mtime = get_mtime(fname)
while get_mtime(fname) == orig_mtime:
pathlib.Path(fname).touch() | [
"def",
"touch",
"(",
"fname",
")",
":",
"orig_mtime",
"=",
"get_mtime",
"(",
"fname",
")",
"while",
"get_mtime",
"(",
"fname",
")",
"==",
"orig_mtime",
":",
"pathlib",
".",
"Path",
"(",
"fname",
")",
".",
"touch",
"(",
")"
] | 27.333333 | 13.555556 |
def pruned_c2cifft(invec, outvec, indices, pretransposed=False):
"""
Perform a pruned iFFT, only valid for power of 2 iffts as the
decomposition is easier to choose. This is not a strict requirement of the
functions, but it is unlikely to the optimal to use anything but power
of 2. (Alex to provide more details in write up.
Parameters
-----------
invec : array
The input vector. This should be the correlation between the data and
the template at full sample rate. Ideally this is pre-transposed, but
if not this will be transposed in this function.
outvec : array
The output of the first phase of the pruned FFT.
indices : array of ints
The indexes at which to calculate the full sample-rate SNR.
pretransposed : boolean, default=False
Used to indicate whether or not invec is pretransposed.
Returns
--------
SNRs : array
The complex SNRs at the indexes given by indices.
"""
N1, N2 = splay(invec)
if not pretransposed:
invec = fft_transpose(invec)
first_phase(invec, outvec, N1=N1, N2=N2)
out = fast_second_phase(outvec, indices, N1=N1, N2=N2)
return out | [
"def",
"pruned_c2cifft",
"(",
"invec",
",",
"outvec",
",",
"indices",
",",
"pretransposed",
"=",
"False",
")",
":",
"N1",
",",
"N2",
"=",
"splay",
"(",
"invec",
")",
"if",
"not",
"pretransposed",
":",
"invec",
"=",
"fft_transpose",
"(",
"invec",
")",
"first_phase",
"(",
"invec",
",",
"outvec",
",",
"N1",
"=",
"N1",
",",
"N2",
"=",
"N2",
")",
"out",
"=",
"fast_second_phase",
"(",
"outvec",
",",
"indices",
",",
"N1",
"=",
"N1",
",",
"N2",
"=",
"N2",
")",
"return",
"out"
] | 36.6875 | 21.875 |
def acquire(lock, blocking=True):
"""Acquire a lock, possibly in a non-blocking fashion.
Includes backwards compatibility hacks for old versions of Python, dask
and dask-distributed.
"""
if blocking:
# no arguments needed
return lock.acquire()
elif DistributedLock is not None and isinstance(lock, DistributedLock):
# distributed.Lock doesn't support the blocking argument yet:
# https://github.com/dask/distributed/pull/2412
return lock.acquire(timeout=0)
else:
# "blocking" keyword argument not supported for:
# - threading.Lock on Python 2.
# - dask.SerializableLock with dask v1.0.0 or earlier.
# - multiprocessing.Lock calls the argument "block" instead.
return lock.acquire(blocking) | [
"def",
"acquire",
"(",
"lock",
",",
"blocking",
"=",
"True",
")",
":",
"if",
"blocking",
":",
"# no arguments needed",
"return",
"lock",
".",
"acquire",
"(",
")",
"elif",
"DistributedLock",
"is",
"not",
"None",
"and",
"isinstance",
"(",
"lock",
",",
"DistributedLock",
")",
":",
"# distributed.Lock doesn't support the blocking argument yet:",
"# https://github.com/dask/distributed/pull/2412",
"return",
"lock",
".",
"acquire",
"(",
"timeout",
"=",
"0",
")",
"else",
":",
"# \"blocking\" keyword argument not supported for:",
"# - threading.Lock on Python 2.",
"# - dask.SerializableLock with dask v1.0.0 or earlier.",
"# - multiprocessing.Lock calls the argument \"block\" instead.",
"return",
"lock",
".",
"acquire",
"(",
"blocking",
")"
] | 41.052632 | 17.105263 |
def get_tc_device(self):
"""
Return a device name that associated network communication direction.
"""
if self.direction == TrafficDirection.OUTGOING:
return self.device
if self.direction == TrafficDirection.INCOMING:
return self.ifb_device
raise ParameterError(
"unknown direction", expected=TrafficDirection.LIST, value=self.direction
) | [
"def",
"get_tc_device",
"(",
"self",
")",
":",
"if",
"self",
".",
"direction",
"==",
"TrafficDirection",
".",
"OUTGOING",
":",
"return",
"self",
".",
"device",
"if",
"self",
".",
"direction",
"==",
"TrafficDirection",
".",
"INCOMING",
":",
"return",
"self",
".",
"ifb_device",
"raise",
"ParameterError",
"(",
"\"unknown direction\"",
",",
"expected",
"=",
"TrafficDirection",
".",
"LIST",
",",
"value",
"=",
"self",
".",
"direction",
")"
] | 30 | 21.857143 |
def get_offset(self):
"""
Return offset from tus server.
This is different from the instance attribute 'offset' because this makes an
http request to the tus server to retrieve the offset.
"""
resp = requests.head(self.url, headers=self.headers)
offset = resp.headers.get('upload-offset')
if offset is None:
msg = 'Attempt to retrieve offset fails with status {}'.format(resp.status_code)
raise TusCommunicationError(msg, resp.status_code, resp.content)
return int(offset) | [
"def",
"get_offset",
"(",
"self",
")",
":",
"resp",
"=",
"requests",
".",
"head",
"(",
"self",
".",
"url",
",",
"headers",
"=",
"self",
".",
"headers",
")",
"offset",
"=",
"resp",
".",
"headers",
".",
"get",
"(",
"'upload-offset'",
")",
"if",
"offset",
"is",
"None",
":",
"msg",
"=",
"'Attempt to retrieve offset fails with status {}'",
".",
"format",
"(",
"resp",
".",
"status_code",
")",
"raise",
"TusCommunicationError",
"(",
"msg",
",",
"resp",
".",
"status_code",
",",
"resp",
".",
"content",
")",
"return",
"int",
"(",
"offset",
")"
] | 42.846154 | 21 |
def analyze_fa(fa):
"""
analyze fa (names, insertions) and convert fasta to prodigal/cmscan safe file
- find insertions (masked sequence)
- make upper case
- assign names to id number
"""
if fa.name == '<stdin>':
safe = 'temp.id'
else:
safe = '%s.id' % (fa.name)
safe = open(safe, 'w')
sequences = {} # sequences[id] = sequence
insertions = {} # insertions[id] = [[start, stop], [start, stop], ...]
count = 0
id2name = {}
names = []
for seq in parse_fasta(fa):
id = '%010d' % (count,)
name = seq[0].split('>', 1)[1]
id2name[id] = name
id2name[name] = id
names.append(name)
insertions[id] = insertions_from_masked(seq[1])
sequences[id] = seq
print('\n'.join(['>%s' % (id), seq[1].upper()]), file=safe)
count += 1
safe.close()
lookup = open('%s.id.lookup' % (fa.name), 'w')
for i in list(id2name.items()):
print('\t'.join(i), file=lookup)
lookup.close()
return safe.name, sequences, id2name, names, insertions | [
"def",
"analyze_fa",
"(",
"fa",
")",
":",
"if",
"fa",
".",
"name",
"==",
"'<stdin>'",
":",
"safe",
"=",
"'temp.id'",
"else",
":",
"safe",
"=",
"'%s.id'",
"%",
"(",
"fa",
".",
"name",
")",
"safe",
"=",
"open",
"(",
"safe",
",",
"'w'",
")",
"sequences",
"=",
"{",
"}",
"# sequences[id] = sequence",
"insertions",
"=",
"{",
"}",
"# insertions[id] = [[start, stop], [start, stop], ...]",
"count",
"=",
"0",
"id2name",
"=",
"{",
"}",
"names",
"=",
"[",
"]",
"for",
"seq",
"in",
"parse_fasta",
"(",
"fa",
")",
":",
"id",
"=",
"'%010d'",
"%",
"(",
"count",
",",
")",
"name",
"=",
"seq",
"[",
"0",
"]",
".",
"split",
"(",
"'>'",
",",
"1",
")",
"[",
"1",
"]",
"id2name",
"[",
"id",
"]",
"=",
"name",
"id2name",
"[",
"name",
"]",
"=",
"id",
"names",
".",
"append",
"(",
"name",
")",
"insertions",
"[",
"id",
"]",
"=",
"insertions_from_masked",
"(",
"seq",
"[",
"1",
"]",
")",
"sequences",
"[",
"id",
"]",
"=",
"seq",
"print",
"(",
"'\\n'",
".",
"join",
"(",
"[",
"'>%s'",
"%",
"(",
"id",
")",
",",
"seq",
"[",
"1",
"]",
".",
"upper",
"(",
")",
"]",
")",
",",
"file",
"=",
"safe",
")",
"count",
"+=",
"1",
"safe",
".",
"close",
"(",
")",
"lookup",
"=",
"open",
"(",
"'%s.id.lookup'",
"%",
"(",
"fa",
".",
"name",
")",
",",
"'w'",
")",
"for",
"i",
"in",
"list",
"(",
"id2name",
".",
"items",
"(",
")",
")",
":",
"print",
"(",
"'\\t'",
".",
"join",
"(",
"i",
")",
",",
"file",
"=",
"lookup",
")",
"lookup",
".",
"close",
"(",
")",
"return",
"safe",
".",
"name",
",",
"sequences",
",",
"id2name",
",",
"names",
",",
"insertions"
] | 31.848485 | 15.30303 |
def ruamelindex(self, strictindex):
"""
Get the ruamel equivalent of a strict parsed index.
E.g. 0 -> 0, 1 -> 2, parsed-via-slugify -> Parsed via slugify
"""
return (
self.key_association.get(strictindex, strictindex)
if self.is_mapping()
else strictindex
) | [
"def",
"ruamelindex",
"(",
"self",
",",
"strictindex",
")",
":",
"return",
"(",
"self",
".",
"key_association",
".",
"get",
"(",
"strictindex",
",",
"strictindex",
")",
"if",
"self",
".",
"is_mapping",
"(",
")",
"else",
"strictindex",
")"
] | 30.181818 | 17.272727 |
def kw_map(**kws):
"""
Decorator for renamed keyword arguments, given a keyword argument
mapping "actual_name: kwarg_rename" for each keyword parameter to
be renamed.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
for actual_name, kwarg_rename in kws.items():
if kwarg_rename in kwargs:
kwargs[actual_name] = kwargs[kwarg_rename]
del kwargs[kwarg_rename]
return func(*args, **kwargs)
return wrapper
return decorator | [
"def",
"kw_map",
"(",
"*",
"*",
"kws",
")",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"actual_name",
",",
"kwarg_rename",
"in",
"kws",
".",
"items",
"(",
")",
":",
"if",
"kwarg_rename",
"in",
"kwargs",
":",
"kwargs",
"[",
"actual_name",
"]",
"=",
"kwargs",
"[",
"kwarg_rename",
"]",
"del",
"kwargs",
"[",
"kwarg_rename",
"]",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper",
"return",
"decorator"
] | 35.1875 | 13.5625 |
def _chien_search_faster(self, sigma):
'''Faster chien search, processing only useful coefficients (the ones in the messages) instead of the whole 2^8 range.
Besides the speed boost, this also allows to fix a number of issue: correctly decoding when the last ecc byte is corrupted, and accepting messages of length n > 2^8.'''
n = self.n
X = []
j = []
p = GF2int(self.generator)
# Normally we should try all 2^8 possible values, but here we optimize to just check the interesting symbols
# This also allows to accept messages where n > 2^8.
for l in _range(n):
#l = (i+self.fcr)
# These evaluations could be more efficient, but oh well
if sigma.evaluate( p**(-l) ) == 0: # If it's 0, then bingo! It's an error location
# Compute the error location polynomial X (will be directly used to compute the errors magnitudes inside the Forney algorithm)
X.append( p**l )
# Compute the coefficient position (not the error position, it's actually the reverse: we compute the degree of the term where the error is located. To get the error position, just compute n-1-j).
# This is different than the notes, I think the notes were in error
# Notes said j values were just l, when it's actually 255-l
j.append(l)
# Sanity check: the number of errors/errata positions found should be exactly the same as the length of the errata locator polynomial
errs_nb = len(sigma) - 1 # compute the exact number of errors/errata that this error locator should find
if len(j) != errs_nb:
# Note: decoding messages+ecc with length n > self.gf2_charac does work partially, but it's wrong, because you will get duplicated values, and then Chien Search cannot discriminate which root is correct and which is not. The duplication of values is normally prevented by the prime polynomial reduction when generating the field (see init_lut() in ff.py), but if you overflow the field, you have no guarantee anymore. We may try to use a bruteforce approach: the correct positions ARE in the final array j, but the problem is because we are above the Galois Field's range, there is a wraparound because of overflow so that for example if j should be [0, 1, 2, 3], we will also get [255, 256, 257, 258] (because 258 % 255 == 3, same for the other values), so we can't discriminate. The issue with that bruteforce approach is that fixing any errs_nb errors among those will always give a correct output message (in the sense that the syndrome will be all 0), so we may not even be able to check if that's correct or not, so there's clearly no way to decode a message of greater length than the field.
raise RSCodecError("Too many (or few) errors found by Chien Search for the errata locator polynomial!")
return X, j | [
"def",
"_chien_search_faster",
"(",
"self",
",",
"sigma",
")",
":",
"n",
"=",
"self",
".",
"n",
"X",
"=",
"[",
"]",
"j",
"=",
"[",
"]",
"p",
"=",
"GF2int",
"(",
"self",
".",
"generator",
")",
"# Normally we should try all 2^8 possible values, but here we optimize to just check the interesting symbols",
"# This also allows to accept messages where n > 2^8.",
"for",
"l",
"in",
"_range",
"(",
"n",
")",
":",
"#l = (i+self.fcr)",
"# These evaluations could be more efficient, but oh well",
"if",
"sigma",
".",
"evaluate",
"(",
"p",
"**",
"(",
"-",
"l",
")",
")",
"==",
"0",
":",
"# If it's 0, then bingo! It's an error location",
"# Compute the error location polynomial X (will be directly used to compute the errors magnitudes inside the Forney algorithm)",
"X",
".",
"append",
"(",
"p",
"**",
"l",
")",
"# Compute the coefficient position (not the error position, it's actually the reverse: we compute the degree of the term where the error is located. To get the error position, just compute n-1-j).",
"# This is different than the notes, I think the notes were in error",
"# Notes said j values were just l, when it's actually 255-l",
"j",
".",
"append",
"(",
"l",
")",
"# Sanity check: the number of errors/errata positions found should be exactly the same as the length of the errata locator polynomial",
"errs_nb",
"=",
"len",
"(",
"sigma",
")",
"-",
"1",
"# compute the exact number of errors/errata that this error locator should find",
"if",
"len",
"(",
"j",
")",
"!=",
"errs_nb",
":",
"# Note: decoding messages+ecc with length n > self.gf2_charac does work partially, but it's wrong, because you will get duplicated values, and then Chien Search cannot discriminate which root is correct and which is not. The duplication of values is normally prevented by the prime polynomial reduction when generating the field (see init_lut() in ff.py), but if you overflow the field, you have no guarantee anymore. We may try to use a bruteforce approach: the correct positions ARE in the final array j, but the problem is because we are above the Galois Field's range, there is a wraparound because of overflow so that for example if j should be [0, 1, 2, 3], we will also get [255, 256, 257, 258] (because 258 % 255 == 3, same for the other values), so we can't discriminate. The issue with that bruteforce approach is that fixing any errs_nb errors among those will always give a correct output message (in the sense that the syndrome will be all 0), so we may not even be able to check if that's correct or not, so there's clearly no way to decode a message of greater length than the field.",
"raise",
"RSCodecError",
"(",
"\"Too many (or few) errors found by Chien Search for the errata locator polynomial!\"",
")",
"return",
"X",
",",
"j"
] | 107.814815 | 85.518519 |
def add_node_set_configuration(self, param_name, node_to_value):
"""
Set Nodes parameter
:param param_name: parameter identifier (as specified by the chosen model)
:param node_to_value: dictionary mapping each node a parameter value
"""
for nid, val in future.utils.iteritems(node_to_value):
self.add_node_configuration(param_name, nid, val) | [
"def",
"add_node_set_configuration",
"(",
"self",
",",
"param_name",
",",
"node_to_value",
")",
":",
"for",
"nid",
",",
"val",
"in",
"future",
".",
"utils",
".",
"iteritems",
"(",
"node_to_value",
")",
":",
"self",
".",
"add_node_configuration",
"(",
"param_name",
",",
"nid",
",",
"val",
")"
] | 43.777778 | 22 |
def post_event_unpublish(self, id, **data):
"""
POST /events/:id/unpublish/
Unpublishes an event. In order for a free event to be unpublished, it must not have any pending or completed orders,
even if the event is in the past. In order for a paid event to be unpublished, it must not have any pending or completed
orders, unless the event has been completed and paid out. Returns a boolean indicating success or failure of the
unpublish.
"""
return self.post("/events/{0}/unpublish/".format(id), data=data) | [
"def",
"post_event_unpublish",
"(",
"self",
",",
"id",
",",
"*",
"*",
"data",
")",
":",
"return",
"self",
".",
"post",
"(",
"\"/events/{0}/unpublish/\"",
".",
"format",
"(",
"id",
")",
",",
"data",
"=",
"data",
")"
] | 57 | 34.6 |
def _checkAllAdmxPolicies(policy_class,
adml_language='en-US',
return_full_policy_names=False,
hierarchical_return=False,
return_not_configured=False):
'''
rewrite of _getAllAdminTemplateSettingsFromRegPolFile where instead of
looking only at the contents of the file, we're going to loop through every
policy and look in the registry.pol file to determine if it is
enabled/disabled/not configured
'''
log.debug('POLICY CLASS == %s', policy_class)
module_policy_data = _policy_info()
policy_file_data = _read_regpol_file(module_policy_data.admx_registry_classes[policy_class]['policy_path'])
admx_policies = []
policy_vals = {}
hierarchy = {}
full_names = {}
admx_policy_definitions = _get_policy_definitions(language=adml_language)
adml_policy_resources = _get_policy_resources(language=adml_language)
if policy_file_data:
log.debug('POLICY CLASS %s has file data', policy_class)
policy_filedata_split = re.sub(
salt.utils.stringutils.to_bytes(r'\]{0}$'.format(chr(0))),
b'',
re.sub(salt.utils.stringutils.to_bytes(r'^\[{0}'.format(chr(0))),
b'',
re.sub(re.escape(module_policy_data.reg_pol_header.encode('utf-16-le')),
b'',
policy_file_data))).split(']['.encode('utf-16-le'))
for policy_item in policy_filedata_split:
policy_item_key = policy_item.split('{0};'.format(chr(0)).encode('utf-16-le'))[0].decode('utf-16-le').lower()
if policy_item_key:
for admx_item in REGKEY_XPATH(admx_policy_definitions, keyvalue=policy_item_key):
if etree.QName(admx_item).localname == 'policy':
if admx_item not in admx_policies:
admx_policies.append(admx_item)
else:
for policy_item in POLICY_ANCESTOR_XPATH(admx_item):
if policy_item not in admx_policies:
admx_policies.append(policy_item)
log.debug('%s policies to examine', len(admx_policies))
if return_not_configured:
log.debug('returning non configured policies')
not_configured_policies = ALL_CLASS_POLICY_XPATH(admx_policy_definitions, registry_class=policy_class)
for policy_item in admx_policies:
if policy_item in not_configured_policies:
not_configured_policies.remove(policy_item)
for not_configured_policy in not_configured_policies:
not_configured_policy_namespace = not_configured_policy.nsmap[not_configured_policy.prefix]
if not_configured_policy_namespace not in policy_vals:
policy_vals[not_configured_policy_namespace] = {}
policy_vals[not_configured_policy_namespace][not_configured_policy.attrib['name']] = 'Not Configured'
if return_full_policy_names:
if not_configured_policy_namespace not in full_names:
full_names[not_configured_policy_namespace] = {}
full_names[not_configured_policy_namespace][not_configured_policy.attrib['name']] = _getFullPolicyName(
policy_item=not_configured_policy,
policy_name=not_configured_policy.attrib['name'],
return_full_policy_names=return_full_policy_names,
adml_language=adml_language)
log.debug('building hierarchy for non-configured item %s',
not_configured_policy.attrib['name'])
if not_configured_policy_namespace not in hierarchy:
hierarchy[not_configured_policy_namespace] = {}
hierarchy[not_configured_policy_namespace][not_configured_policy.attrib['name']] = _build_parent_list(
policy_definition=not_configured_policy,
return_full_policy_names=return_full_policy_names,
adml_language=adml_language)
for admx_policy in admx_policies:
this_valuename = None
this_policy_setting = 'Not Configured'
element_only_enabled_disabled = True
explicit_enable_disable_value_setting = False
if 'key' in admx_policy.attrib:
this_key = admx_policy.attrib['key']
else:
log.error('policy item %s does not have the required "key" '
'attribute', admx_policy.attrib)
break
if 'valueName' in admx_policy.attrib:
this_valuename = admx_policy.attrib['valueName']
if 'name' in admx_policy.attrib:
this_policyname = admx_policy.attrib['name']
else:
log.error('policy item %s does not have the required "name" '
'attribute', admx_policy.attrib)
break
this_policynamespace = admx_policy.nsmap[admx_policy.prefix]
if ENABLED_VALUE_XPATH(admx_policy) and this_policy_setting == 'Not Configured':
# some policies have a disabled list but not an enabled list
# added this to address those issues
if DISABLED_LIST_XPATH(admx_policy) or DISABLED_VALUE_XPATH(admx_policy):
element_only_enabled_disabled = False
explicit_enable_disable_value_setting = True
if _checkValueItemParent(admx_policy,
this_policyname,
this_key,
this_valuename,
ENABLED_VALUE_XPATH,
policy_file_data):
this_policy_setting = 'Enabled'
log.debug('%s is enabled by detected ENABLED_VALUE_XPATH', this_policyname)
if this_policynamespace not in policy_vals:
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][this_policyname] = this_policy_setting
if DISABLED_VALUE_XPATH(admx_policy) and this_policy_setting == 'Not Configured':
# some policies have a disabled list but not an enabled list
# added this to address those issues
if ENABLED_LIST_XPATH(admx_policy) or ENABLED_VALUE_XPATH(admx_policy):
element_only_enabled_disabled = False
explicit_enable_disable_value_setting = True
if _checkValueItemParent(admx_policy,
this_policyname,
this_key,
this_valuename,
DISABLED_VALUE_XPATH,
policy_file_data):
this_policy_setting = 'Disabled'
log.debug('%s is disabled by detected DISABLED_VALUE_XPATH', this_policyname)
if this_policynamespace not in policy_vals:
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][this_policyname] = this_policy_setting
if ENABLED_LIST_XPATH(admx_policy) and this_policy_setting == 'Not Configured':
if DISABLED_LIST_XPATH(admx_policy) or DISABLED_VALUE_XPATH(admx_policy):
element_only_enabled_disabled = False
explicit_enable_disable_value_setting = True
if _checkListItem(admx_policy, this_policyname, this_key, ENABLED_LIST_XPATH, policy_file_data):
this_policy_setting = 'Enabled'
log.debug('%s is enabled by detected ENABLED_LIST_XPATH', this_policyname)
if this_policynamespace not in policy_vals:
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][this_policyname] = this_policy_setting
if DISABLED_LIST_XPATH(admx_policy) and this_policy_setting == 'Not Configured':
if ENABLED_LIST_XPATH(admx_policy) or ENABLED_VALUE_XPATH(admx_policy):
element_only_enabled_disabled = False
explicit_enable_disable_value_setting = True
if _checkListItem(admx_policy, this_policyname, this_key, DISABLED_LIST_XPATH, policy_file_data):
this_policy_setting = 'Disabled'
log.debug('%s is disabled by detected DISABLED_LIST_XPATH', this_policyname)
if this_policynamespace not in policy_vals:
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][this_policyname] = this_policy_setting
if not explicit_enable_disable_value_setting and this_valuename:
# the policy has a key/valuename but no explicit enabled/Disabled
# Value or List
# these seem to default to a REG_DWORD 1 = "Enabled" **del. = "Disabled"
if _regexSearchRegPolData(re.escape(_buildKnownDataSearchString(this_key,
this_valuename,
'REG_DWORD',
'1')),
policy_file_data):
this_policy_setting = 'Enabled'
log.debug('%s is enabled by no explicit enable/disable list or value', this_policyname)
if this_policynamespace not in policy_vals:
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][this_policyname] = this_policy_setting
elif _regexSearchRegPolData(re.escape(_buildKnownDataSearchString(this_key,
this_valuename,
'REG_DWORD',
None,
check_deleted=True)),
policy_file_data):
this_policy_setting = 'Disabled'
log.debug('%s is disabled by no explicit enable/disable list or value', this_policyname)
if this_policynamespace not in policy_vals:
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][this_policyname] = this_policy_setting
if ELEMENTS_XPATH(admx_policy):
if element_only_enabled_disabled or this_policy_setting == 'Enabled':
# TODO does this need to be modified based on the 'required' attribute?
required_elements = {}
configured_elements = {}
policy_disabled_elements = 0
for elements_item in ELEMENTS_XPATH(admx_policy):
for child_item in elements_item.getchildren():
this_element_name = _getFullPolicyName(
policy_item=child_item,
policy_name=child_item.attrib['id'],
return_full_policy_names=return_full_policy_names,
adml_language=adml_language)
required_elements[this_element_name] = None
child_key = this_key
child_valuename = this_valuename
if 'key' in child_item.attrib:
child_key = child_item.attrib['key']
if 'valueName' in child_item.attrib:
child_valuename = child_item.attrib['valueName']
if etree.QName(child_item).localname == 'boolean':
# https://msdn.microsoft.com/en-us/library/dn605978(v=vs.85).aspx
if child_item.getchildren():
if TRUE_VALUE_XPATH(child_item) and this_element_name not in configured_elements:
if _checkValueItemParent(child_item,
this_policyname,
child_key,
child_valuename,
TRUE_VALUE_XPATH,
policy_file_data):
configured_elements[this_element_name] = True
log.debug('element %s is configured true',
child_item.attrib['id'])
if FALSE_VALUE_XPATH(child_item) and this_element_name not in configured_elements:
if _checkValueItemParent(child_item,
this_policyname,
child_key,
child_valuename,
FALSE_VALUE_XPATH,
policy_file_data):
configured_elements[this_element_name] = False
policy_disabled_elements = policy_disabled_elements + 1
log.debug('element %s is configured false',
child_item.attrib['id'])
# WARNING - no standard ADMX files use true/falseList
# so this hasn't actually been tested
if TRUE_LIST_XPATH(child_item) and this_element_name not in configured_elements:
log.debug('checking trueList')
if _checkListItem(child_item,
this_policyname,
this_key,
TRUE_LIST_XPATH,
policy_file_data):
configured_elements[this_element_name] = True
log.debug('element %s is configured true',
child_item.attrib['id'])
if FALSE_LIST_XPATH(child_item) and this_element_name not in configured_elements:
log.debug('checking falseList')
if _checkListItem(child_item,
this_policyname,
this_key,
FALSE_LIST_XPATH,
policy_file_data):
configured_elements[this_element_name] = False
policy_disabled_elements = policy_disabled_elements + 1
log.debug('element %s is configured false',
child_item.attrib['id'])
else:
if _regexSearchRegPolData(re.escape(_processValueItem(child_item,
child_key,
child_valuename,
admx_policy,
elements_item,
check_deleted=True)),
policy_file_data):
configured_elements[this_element_name] = False
policy_disabled_elements = policy_disabled_elements + 1
log.debug('element %s is configured false', child_item.attrib['id'])
elif _regexSearchRegPolData(re.escape(_processValueItem(child_item,
child_key,
child_valuename,
admx_policy,
elements_item,
check_deleted=False)),
policy_file_data):
configured_elements[this_element_name] = True
log.debug('element %s is configured true',
child_item.attrib['id'])
elif etree.QName(child_item).localname == 'decimal' \
or etree.QName(child_item).localname == 'text' \
or etree.QName(child_item).localname == 'longDecimal' \
or etree.QName(child_item).localname == 'multiText':
# https://msdn.microsoft.com/en-us/library/dn605987(v=vs.85).aspx
if _regexSearchRegPolData(re.escape(_processValueItem(child_item,
child_key,
child_valuename,
admx_policy,
elements_item,
check_deleted=True)),
policy_file_data):
configured_elements[this_element_name] = 'Disabled'
policy_disabled_elements = policy_disabled_elements + 1
log.debug('element %s is disabled',
child_item.attrib['id'])
elif _regexSearchRegPolData(re.escape(_processValueItem(child_item,
child_key,
child_valuename,
admx_policy,
elements_item,
check_deleted=False)),
policy_file_data):
configured_value = _getDataFromRegPolData(_processValueItem(child_item,
child_key,
child_valuename,
admx_policy,
elements_item,
check_deleted=False),
policy_file_data)
configured_elements[this_element_name] = configured_value
log.debug('element %s is enabled, value == %s',
child_item.attrib['id'],
configured_value)
elif etree.QName(child_item).localname == 'enum':
if _regexSearchRegPolData(re.escape(_processValueItem(child_item,
child_key,
child_valuename,
admx_policy,
elements_item,
check_deleted=True)),
policy_file_data):
log.debug('enum element %s is disabled',
child_item.attrib['id'])
configured_elements[this_element_name] = 'Disabled'
policy_disabled_elements = policy_disabled_elements + 1
else:
for enum_item in child_item.getchildren():
if _checkValueItemParent(enum_item,
child_item.attrib['id'],
child_key,
child_valuename,
VALUE_XPATH,
policy_file_data):
if VALUE_LIST_XPATH(enum_item):
log.debug('enum item has a valueList')
if _checkListItem(enum_item,
this_policyname,
child_key,
VALUE_LIST_XPATH,
policy_file_data):
log.debug('all valueList items exist in file')
configured_elements[this_element_name] = _getAdmlDisplayName(
adml_policy_resources,
enum_item.attrib['displayName'])
break
else:
configured_elements[this_element_name] = _getAdmlDisplayName(
adml_policy_resources,
enum_item.attrib['displayName'])
break
elif etree.QName(child_item).localname == 'list':
return_value_name = False
if 'explicitValue' in child_item.attrib \
and child_item.attrib['explicitValue'].lower() == 'true':
log.debug('explicitValue list, we will return value names')
return_value_name = True
if _regexSearchRegPolData(re.escape(_processValueItem(child_item,
child_key,
child_valuename,
admx_policy,
elements_item,
check_deleted=False)
) + salt.utils.stringutils.to_bytes(r'(?!\*\*delvals\.)'),
policy_file_data):
configured_value = _getDataFromRegPolData(_processValueItem(child_item,
child_key,
child_valuename,
admx_policy,
elements_item,
check_deleted=False),
policy_file_data,
return_value_name=return_value_name)
configured_elements[this_element_name] = configured_value
log.debug('element %s is enabled values: %s',
child_item.attrib['id'],
configured_value)
elif _regexSearchRegPolData(re.escape(_processValueItem(child_item,
child_key,
child_valuename,
admx_policy,
elements_item,
check_deleted=True)),
policy_file_data):
configured_elements[this_element_name] = "Disabled"
policy_disabled_elements = policy_disabled_elements + 1
log.debug('element %s is disabled', child_item.attrib['id'])
if element_only_enabled_disabled:
if required_elements \
and len(configured_elements) == len(required_elements):
if policy_disabled_elements == len(required_elements):
log.debug('%s is disabled by all enum elements', this_policyname)
if this_policynamespace not in policy_vals:
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][this_policyname] = 'Disabled'
else:
if this_policynamespace not in policy_vals:
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][this_policyname] = configured_elements
log.debug('%s is enabled by enum elements', this_policyname)
else:
if this_policy_setting == 'Enabled':
if this_policynamespace not in policy_vals:
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][this_policyname] = configured_elements
if return_full_policy_names and this_policynamespace in policy_vals and this_policyname in policy_vals[this_policynamespace]:
if this_policynamespace not in full_names:
full_names[this_policynamespace] = {}
full_names[this_policynamespace][this_policyname] = _getFullPolicyName(
policy_item=admx_policy,
policy_name=admx_policy.attrib['name'],
return_full_policy_names=return_full_policy_names,
adml_language=adml_language)
if this_policynamespace in policy_vals and this_policyname in policy_vals[this_policynamespace]:
if this_policynamespace not in hierarchy:
hierarchy[this_policynamespace] = {}
hierarchy[this_policynamespace][this_policyname] = _build_parent_list(
policy_definition=admx_policy,
return_full_policy_names=return_full_policy_names,
adml_language=adml_language)
if policy_vals and return_full_policy_names and not hierarchical_return:
unpathed_dict = {}
pathed_dict = {}
for policy_namespace in list(policy_vals):
for policy_item in list(policy_vals[policy_namespace]):
if full_names[policy_namespace][policy_item] in policy_vals[policy_namespace]:
# add this item with the path'd full name
full_path_list = hierarchy[policy_namespace][policy_item]
full_path_list.reverse()
full_path_list.append(full_names[policy_namespace][policy_item])
policy_vals['\\'.join(full_path_list)] = policy_vals[policy_namespace].pop(policy_item)
pathed_dict[full_names[policy_namespace][policy_item]] = True
else:
policy_vals[policy_namespace][full_names[policy_namespace][policy_item]] = policy_vals[policy_namespace].pop(policy_item)
if policy_namespace not in unpathed_dict:
unpathed_dict[policy_namespace] = {}
unpathed_dict[policy_namespace][full_names[policy_namespace][policy_item]] = policy_item
# go back and remove any "unpathed" policies that need a full path
for path_needed in unpathed_dict[policy_namespace]:
# remove the item with the same full name and re-add it w/a path'd version
full_path_list = hierarchy[policy_namespace][unpathed_dict[policy_namespace][path_needed]]
full_path_list.reverse()
full_path_list.append(path_needed)
log.debug('full_path_list == %s', full_path_list)
policy_vals['\\'.join(full_path_list)] = policy_vals[policy_namespace].pop(path_needed)
for policy_namespace in list(policy_vals):
if policy_vals[policy_namespace] == {}:
policy_vals.pop(policy_namespace)
if policy_vals and hierarchical_return:
if hierarchy:
for policy_namespace in hierarchy:
for hierarchy_item in hierarchy[policy_namespace]:
if hierarchy_item in policy_vals[policy_namespace]:
tdict = {}
first_item = True
for item in hierarchy[policy_namespace][hierarchy_item]:
newdict = {}
if first_item:
h_policy_name = hierarchy_item
if return_full_policy_names:
h_policy_name = full_names[policy_namespace][hierarchy_item]
newdict[item] = {h_policy_name: policy_vals[policy_namespace].pop(hierarchy_item)}
first_item = False
else:
newdict[item] = tdict
tdict = newdict
if tdict:
policy_vals = dictupdate.update(policy_vals, tdict)
if policy_namespace in policy_vals and policy_vals[policy_namespace] == {}:
policy_vals.pop(policy_namespace)
policy_vals = {
module_policy_data.admx_registry_classes[policy_class]['lgpo_section']: {
'Administrative Templates': policy_vals
}
}
return policy_vals | [
"def",
"_checkAllAdmxPolicies",
"(",
"policy_class",
",",
"adml_language",
"=",
"'en-US'",
",",
"return_full_policy_names",
"=",
"False",
",",
"hierarchical_return",
"=",
"False",
",",
"return_not_configured",
"=",
"False",
")",
":",
"log",
".",
"debug",
"(",
"'POLICY CLASS == %s'",
",",
"policy_class",
")",
"module_policy_data",
"=",
"_policy_info",
"(",
")",
"policy_file_data",
"=",
"_read_regpol_file",
"(",
"module_policy_data",
".",
"admx_registry_classes",
"[",
"policy_class",
"]",
"[",
"'policy_path'",
"]",
")",
"admx_policies",
"=",
"[",
"]",
"policy_vals",
"=",
"{",
"}",
"hierarchy",
"=",
"{",
"}",
"full_names",
"=",
"{",
"}",
"admx_policy_definitions",
"=",
"_get_policy_definitions",
"(",
"language",
"=",
"adml_language",
")",
"adml_policy_resources",
"=",
"_get_policy_resources",
"(",
"language",
"=",
"adml_language",
")",
"if",
"policy_file_data",
":",
"log",
".",
"debug",
"(",
"'POLICY CLASS %s has file data'",
",",
"policy_class",
")",
"policy_filedata_split",
"=",
"re",
".",
"sub",
"(",
"salt",
".",
"utils",
".",
"stringutils",
".",
"to_bytes",
"(",
"r'\\]{0}$'",
".",
"format",
"(",
"chr",
"(",
"0",
")",
")",
")",
",",
"b''",
",",
"re",
".",
"sub",
"(",
"salt",
".",
"utils",
".",
"stringutils",
".",
"to_bytes",
"(",
"r'^\\[{0}'",
".",
"format",
"(",
"chr",
"(",
"0",
")",
")",
")",
",",
"b''",
",",
"re",
".",
"sub",
"(",
"re",
".",
"escape",
"(",
"module_policy_data",
".",
"reg_pol_header",
".",
"encode",
"(",
"'utf-16-le'",
")",
")",
",",
"b''",
",",
"policy_file_data",
")",
")",
")",
".",
"split",
"(",
"']['",
".",
"encode",
"(",
"'utf-16-le'",
")",
")",
"for",
"policy_item",
"in",
"policy_filedata_split",
":",
"policy_item_key",
"=",
"policy_item",
".",
"split",
"(",
"'{0};'",
".",
"format",
"(",
"chr",
"(",
"0",
")",
")",
".",
"encode",
"(",
"'utf-16-le'",
")",
")",
"[",
"0",
"]",
".",
"decode",
"(",
"'utf-16-le'",
")",
".",
"lower",
"(",
")",
"if",
"policy_item_key",
":",
"for",
"admx_item",
"in",
"REGKEY_XPATH",
"(",
"admx_policy_definitions",
",",
"keyvalue",
"=",
"policy_item_key",
")",
":",
"if",
"etree",
".",
"QName",
"(",
"admx_item",
")",
".",
"localname",
"==",
"'policy'",
":",
"if",
"admx_item",
"not",
"in",
"admx_policies",
":",
"admx_policies",
".",
"append",
"(",
"admx_item",
")",
"else",
":",
"for",
"policy_item",
"in",
"POLICY_ANCESTOR_XPATH",
"(",
"admx_item",
")",
":",
"if",
"policy_item",
"not",
"in",
"admx_policies",
":",
"admx_policies",
".",
"append",
"(",
"policy_item",
")",
"log",
".",
"debug",
"(",
"'%s policies to examine'",
",",
"len",
"(",
"admx_policies",
")",
")",
"if",
"return_not_configured",
":",
"log",
".",
"debug",
"(",
"'returning non configured policies'",
")",
"not_configured_policies",
"=",
"ALL_CLASS_POLICY_XPATH",
"(",
"admx_policy_definitions",
",",
"registry_class",
"=",
"policy_class",
")",
"for",
"policy_item",
"in",
"admx_policies",
":",
"if",
"policy_item",
"in",
"not_configured_policies",
":",
"not_configured_policies",
".",
"remove",
"(",
"policy_item",
")",
"for",
"not_configured_policy",
"in",
"not_configured_policies",
":",
"not_configured_policy_namespace",
"=",
"not_configured_policy",
".",
"nsmap",
"[",
"not_configured_policy",
".",
"prefix",
"]",
"if",
"not_configured_policy_namespace",
"not",
"in",
"policy_vals",
":",
"policy_vals",
"[",
"not_configured_policy_namespace",
"]",
"=",
"{",
"}",
"policy_vals",
"[",
"not_configured_policy_namespace",
"]",
"[",
"not_configured_policy",
".",
"attrib",
"[",
"'name'",
"]",
"]",
"=",
"'Not Configured'",
"if",
"return_full_policy_names",
":",
"if",
"not_configured_policy_namespace",
"not",
"in",
"full_names",
":",
"full_names",
"[",
"not_configured_policy_namespace",
"]",
"=",
"{",
"}",
"full_names",
"[",
"not_configured_policy_namespace",
"]",
"[",
"not_configured_policy",
".",
"attrib",
"[",
"'name'",
"]",
"]",
"=",
"_getFullPolicyName",
"(",
"policy_item",
"=",
"not_configured_policy",
",",
"policy_name",
"=",
"not_configured_policy",
".",
"attrib",
"[",
"'name'",
"]",
",",
"return_full_policy_names",
"=",
"return_full_policy_names",
",",
"adml_language",
"=",
"adml_language",
")",
"log",
".",
"debug",
"(",
"'building hierarchy for non-configured item %s'",
",",
"not_configured_policy",
".",
"attrib",
"[",
"'name'",
"]",
")",
"if",
"not_configured_policy_namespace",
"not",
"in",
"hierarchy",
":",
"hierarchy",
"[",
"not_configured_policy_namespace",
"]",
"=",
"{",
"}",
"hierarchy",
"[",
"not_configured_policy_namespace",
"]",
"[",
"not_configured_policy",
".",
"attrib",
"[",
"'name'",
"]",
"]",
"=",
"_build_parent_list",
"(",
"policy_definition",
"=",
"not_configured_policy",
",",
"return_full_policy_names",
"=",
"return_full_policy_names",
",",
"adml_language",
"=",
"adml_language",
")",
"for",
"admx_policy",
"in",
"admx_policies",
":",
"this_valuename",
"=",
"None",
"this_policy_setting",
"=",
"'Not Configured'",
"element_only_enabled_disabled",
"=",
"True",
"explicit_enable_disable_value_setting",
"=",
"False",
"if",
"'key'",
"in",
"admx_policy",
".",
"attrib",
":",
"this_key",
"=",
"admx_policy",
".",
"attrib",
"[",
"'key'",
"]",
"else",
":",
"log",
".",
"error",
"(",
"'policy item %s does not have the required \"key\" '",
"'attribute'",
",",
"admx_policy",
".",
"attrib",
")",
"break",
"if",
"'valueName'",
"in",
"admx_policy",
".",
"attrib",
":",
"this_valuename",
"=",
"admx_policy",
".",
"attrib",
"[",
"'valueName'",
"]",
"if",
"'name'",
"in",
"admx_policy",
".",
"attrib",
":",
"this_policyname",
"=",
"admx_policy",
".",
"attrib",
"[",
"'name'",
"]",
"else",
":",
"log",
".",
"error",
"(",
"'policy item %s does not have the required \"name\" '",
"'attribute'",
",",
"admx_policy",
".",
"attrib",
")",
"break",
"this_policynamespace",
"=",
"admx_policy",
".",
"nsmap",
"[",
"admx_policy",
".",
"prefix",
"]",
"if",
"ENABLED_VALUE_XPATH",
"(",
"admx_policy",
")",
"and",
"this_policy_setting",
"==",
"'Not Configured'",
":",
"# some policies have a disabled list but not an enabled list",
"# added this to address those issues",
"if",
"DISABLED_LIST_XPATH",
"(",
"admx_policy",
")",
"or",
"DISABLED_VALUE_XPATH",
"(",
"admx_policy",
")",
":",
"element_only_enabled_disabled",
"=",
"False",
"explicit_enable_disable_value_setting",
"=",
"True",
"if",
"_checkValueItemParent",
"(",
"admx_policy",
",",
"this_policyname",
",",
"this_key",
",",
"this_valuename",
",",
"ENABLED_VALUE_XPATH",
",",
"policy_file_data",
")",
":",
"this_policy_setting",
"=",
"'Enabled'",
"log",
".",
"debug",
"(",
"'%s is enabled by detected ENABLED_VALUE_XPATH'",
",",
"this_policyname",
")",
"if",
"this_policynamespace",
"not",
"in",
"policy_vals",
":",
"policy_vals",
"[",
"this_policynamespace",
"]",
"=",
"{",
"}",
"policy_vals",
"[",
"this_policynamespace",
"]",
"[",
"this_policyname",
"]",
"=",
"this_policy_setting",
"if",
"DISABLED_VALUE_XPATH",
"(",
"admx_policy",
")",
"and",
"this_policy_setting",
"==",
"'Not Configured'",
":",
"# some policies have a disabled list but not an enabled list",
"# added this to address those issues",
"if",
"ENABLED_LIST_XPATH",
"(",
"admx_policy",
")",
"or",
"ENABLED_VALUE_XPATH",
"(",
"admx_policy",
")",
":",
"element_only_enabled_disabled",
"=",
"False",
"explicit_enable_disable_value_setting",
"=",
"True",
"if",
"_checkValueItemParent",
"(",
"admx_policy",
",",
"this_policyname",
",",
"this_key",
",",
"this_valuename",
",",
"DISABLED_VALUE_XPATH",
",",
"policy_file_data",
")",
":",
"this_policy_setting",
"=",
"'Disabled'",
"log",
".",
"debug",
"(",
"'%s is disabled by detected DISABLED_VALUE_XPATH'",
",",
"this_policyname",
")",
"if",
"this_policynamespace",
"not",
"in",
"policy_vals",
":",
"policy_vals",
"[",
"this_policynamespace",
"]",
"=",
"{",
"}",
"policy_vals",
"[",
"this_policynamespace",
"]",
"[",
"this_policyname",
"]",
"=",
"this_policy_setting",
"if",
"ENABLED_LIST_XPATH",
"(",
"admx_policy",
")",
"and",
"this_policy_setting",
"==",
"'Not Configured'",
":",
"if",
"DISABLED_LIST_XPATH",
"(",
"admx_policy",
")",
"or",
"DISABLED_VALUE_XPATH",
"(",
"admx_policy",
")",
":",
"element_only_enabled_disabled",
"=",
"False",
"explicit_enable_disable_value_setting",
"=",
"True",
"if",
"_checkListItem",
"(",
"admx_policy",
",",
"this_policyname",
",",
"this_key",
",",
"ENABLED_LIST_XPATH",
",",
"policy_file_data",
")",
":",
"this_policy_setting",
"=",
"'Enabled'",
"log",
".",
"debug",
"(",
"'%s is enabled by detected ENABLED_LIST_XPATH'",
",",
"this_policyname",
")",
"if",
"this_policynamespace",
"not",
"in",
"policy_vals",
":",
"policy_vals",
"[",
"this_policynamespace",
"]",
"=",
"{",
"}",
"policy_vals",
"[",
"this_policynamespace",
"]",
"[",
"this_policyname",
"]",
"=",
"this_policy_setting",
"if",
"DISABLED_LIST_XPATH",
"(",
"admx_policy",
")",
"and",
"this_policy_setting",
"==",
"'Not Configured'",
":",
"if",
"ENABLED_LIST_XPATH",
"(",
"admx_policy",
")",
"or",
"ENABLED_VALUE_XPATH",
"(",
"admx_policy",
")",
":",
"element_only_enabled_disabled",
"=",
"False",
"explicit_enable_disable_value_setting",
"=",
"True",
"if",
"_checkListItem",
"(",
"admx_policy",
",",
"this_policyname",
",",
"this_key",
",",
"DISABLED_LIST_XPATH",
",",
"policy_file_data",
")",
":",
"this_policy_setting",
"=",
"'Disabled'",
"log",
".",
"debug",
"(",
"'%s is disabled by detected DISABLED_LIST_XPATH'",
",",
"this_policyname",
")",
"if",
"this_policynamespace",
"not",
"in",
"policy_vals",
":",
"policy_vals",
"[",
"this_policynamespace",
"]",
"=",
"{",
"}",
"policy_vals",
"[",
"this_policynamespace",
"]",
"[",
"this_policyname",
"]",
"=",
"this_policy_setting",
"if",
"not",
"explicit_enable_disable_value_setting",
"and",
"this_valuename",
":",
"# the policy has a key/valuename but no explicit enabled/Disabled",
"# Value or List",
"# these seem to default to a REG_DWORD 1 = \"Enabled\" **del. = \"Disabled\"",
"if",
"_regexSearchRegPolData",
"(",
"re",
".",
"escape",
"(",
"_buildKnownDataSearchString",
"(",
"this_key",
",",
"this_valuename",
",",
"'REG_DWORD'",
",",
"'1'",
")",
")",
",",
"policy_file_data",
")",
":",
"this_policy_setting",
"=",
"'Enabled'",
"log",
".",
"debug",
"(",
"'%s is enabled by no explicit enable/disable list or value'",
",",
"this_policyname",
")",
"if",
"this_policynamespace",
"not",
"in",
"policy_vals",
":",
"policy_vals",
"[",
"this_policynamespace",
"]",
"=",
"{",
"}",
"policy_vals",
"[",
"this_policynamespace",
"]",
"[",
"this_policyname",
"]",
"=",
"this_policy_setting",
"elif",
"_regexSearchRegPolData",
"(",
"re",
".",
"escape",
"(",
"_buildKnownDataSearchString",
"(",
"this_key",
",",
"this_valuename",
",",
"'REG_DWORD'",
",",
"None",
",",
"check_deleted",
"=",
"True",
")",
")",
",",
"policy_file_data",
")",
":",
"this_policy_setting",
"=",
"'Disabled'",
"log",
".",
"debug",
"(",
"'%s is disabled by no explicit enable/disable list or value'",
",",
"this_policyname",
")",
"if",
"this_policynamespace",
"not",
"in",
"policy_vals",
":",
"policy_vals",
"[",
"this_policynamespace",
"]",
"=",
"{",
"}",
"policy_vals",
"[",
"this_policynamespace",
"]",
"[",
"this_policyname",
"]",
"=",
"this_policy_setting",
"if",
"ELEMENTS_XPATH",
"(",
"admx_policy",
")",
":",
"if",
"element_only_enabled_disabled",
"or",
"this_policy_setting",
"==",
"'Enabled'",
":",
"# TODO does this need to be modified based on the 'required' attribute?",
"required_elements",
"=",
"{",
"}",
"configured_elements",
"=",
"{",
"}",
"policy_disabled_elements",
"=",
"0",
"for",
"elements_item",
"in",
"ELEMENTS_XPATH",
"(",
"admx_policy",
")",
":",
"for",
"child_item",
"in",
"elements_item",
".",
"getchildren",
"(",
")",
":",
"this_element_name",
"=",
"_getFullPolicyName",
"(",
"policy_item",
"=",
"child_item",
",",
"policy_name",
"=",
"child_item",
".",
"attrib",
"[",
"'id'",
"]",
",",
"return_full_policy_names",
"=",
"return_full_policy_names",
",",
"adml_language",
"=",
"adml_language",
")",
"required_elements",
"[",
"this_element_name",
"]",
"=",
"None",
"child_key",
"=",
"this_key",
"child_valuename",
"=",
"this_valuename",
"if",
"'key'",
"in",
"child_item",
".",
"attrib",
":",
"child_key",
"=",
"child_item",
".",
"attrib",
"[",
"'key'",
"]",
"if",
"'valueName'",
"in",
"child_item",
".",
"attrib",
":",
"child_valuename",
"=",
"child_item",
".",
"attrib",
"[",
"'valueName'",
"]",
"if",
"etree",
".",
"QName",
"(",
"child_item",
")",
".",
"localname",
"==",
"'boolean'",
":",
"# https://msdn.microsoft.com/en-us/library/dn605978(v=vs.85).aspx",
"if",
"child_item",
".",
"getchildren",
"(",
")",
":",
"if",
"TRUE_VALUE_XPATH",
"(",
"child_item",
")",
"and",
"this_element_name",
"not",
"in",
"configured_elements",
":",
"if",
"_checkValueItemParent",
"(",
"child_item",
",",
"this_policyname",
",",
"child_key",
",",
"child_valuename",
",",
"TRUE_VALUE_XPATH",
",",
"policy_file_data",
")",
":",
"configured_elements",
"[",
"this_element_name",
"]",
"=",
"True",
"log",
".",
"debug",
"(",
"'element %s is configured true'",
",",
"child_item",
".",
"attrib",
"[",
"'id'",
"]",
")",
"if",
"FALSE_VALUE_XPATH",
"(",
"child_item",
")",
"and",
"this_element_name",
"not",
"in",
"configured_elements",
":",
"if",
"_checkValueItemParent",
"(",
"child_item",
",",
"this_policyname",
",",
"child_key",
",",
"child_valuename",
",",
"FALSE_VALUE_XPATH",
",",
"policy_file_data",
")",
":",
"configured_elements",
"[",
"this_element_name",
"]",
"=",
"False",
"policy_disabled_elements",
"=",
"policy_disabled_elements",
"+",
"1",
"log",
".",
"debug",
"(",
"'element %s is configured false'",
",",
"child_item",
".",
"attrib",
"[",
"'id'",
"]",
")",
"# WARNING - no standard ADMX files use true/falseList",
"# so this hasn't actually been tested",
"if",
"TRUE_LIST_XPATH",
"(",
"child_item",
")",
"and",
"this_element_name",
"not",
"in",
"configured_elements",
":",
"log",
".",
"debug",
"(",
"'checking trueList'",
")",
"if",
"_checkListItem",
"(",
"child_item",
",",
"this_policyname",
",",
"this_key",
",",
"TRUE_LIST_XPATH",
",",
"policy_file_data",
")",
":",
"configured_elements",
"[",
"this_element_name",
"]",
"=",
"True",
"log",
".",
"debug",
"(",
"'element %s is configured true'",
",",
"child_item",
".",
"attrib",
"[",
"'id'",
"]",
")",
"if",
"FALSE_LIST_XPATH",
"(",
"child_item",
")",
"and",
"this_element_name",
"not",
"in",
"configured_elements",
":",
"log",
".",
"debug",
"(",
"'checking falseList'",
")",
"if",
"_checkListItem",
"(",
"child_item",
",",
"this_policyname",
",",
"this_key",
",",
"FALSE_LIST_XPATH",
",",
"policy_file_data",
")",
":",
"configured_elements",
"[",
"this_element_name",
"]",
"=",
"False",
"policy_disabled_elements",
"=",
"policy_disabled_elements",
"+",
"1",
"log",
".",
"debug",
"(",
"'element %s is configured false'",
",",
"child_item",
".",
"attrib",
"[",
"'id'",
"]",
")",
"else",
":",
"if",
"_regexSearchRegPolData",
"(",
"re",
".",
"escape",
"(",
"_processValueItem",
"(",
"child_item",
",",
"child_key",
",",
"child_valuename",
",",
"admx_policy",
",",
"elements_item",
",",
"check_deleted",
"=",
"True",
")",
")",
",",
"policy_file_data",
")",
":",
"configured_elements",
"[",
"this_element_name",
"]",
"=",
"False",
"policy_disabled_elements",
"=",
"policy_disabled_elements",
"+",
"1",
"log",
".",
"debug",
"(",
"'element %s is configured false'",
",",
"child_item",
".",
"attrib",
"[",
"'id'",
"]",
")",
"elif",
"_regexSearchRegPolData",
"(",
"re",
".",
"escape",
"(",
"_processValueItem",
"(",
"child_item",
",",
"child_key",
",",
"child_valuename",
",",
"admx_policy",
",",
"elements_item",
",",
"check_deleted",
"=",
"False",
")",
")",
",",
"policy_file_data",
")",
":",
"configured_elements",
"[",
"this_element_name",
"]",
"=",
"True",
"log",
".",
"debug",
"(",
"'element %s is configured true'",
",",
"child_item",
".",
"attrib",
"[",
"'id'",
"]",
")",
"elif",
"etree",
".",
"QName",
"(",
"child_item",
")",
".",
"localname",
"==",
"'decimal'",
"or",
"etree",
".",
"QName",
"(",
"child_item",
")",
".",
"localname",
"==",
"'text'",
"or",
"etree",
".",
"QName",
"(",
"child_item",
")",
".",
"localname",
"==",
"'longDecimal'",
"or",
"etree",
".",
"QName",
"(",
"child_item",
")",
".",
"localname",
"==",
"'multiText'",
":",
"# https://msdn.microsoft.com/en-us/library/dn605987(v=vs.85).aspx",
"if",
"_regexSearchRegPolData",
"(",
"re",
".",
"escape",
"(",
"_processValueItem",
"(",
"child_item",
",",
"child_key",
",",
"child_valuename",
",",
"admx_policy",
",",
"elements_item",
",",
"check_deleted",
"=",
"True",
")",
")",
",",
"policy_file_data",
")",
":",
"configured_elements",
"[",
"this_element_name",
"]",
"=",
"'Disabled'",
"policy_disabled_elements",
"=",
"policy_disabled_elements",
"+",
"1",
"log",
".",
"debug",
"(",
"'element %s is disabled'",
",",
"child_item",
".",
"attrib",
"[",
"'id'",
"]",
")",
"elif",
"_regexSearchRegPolData",
"(",
"re",
".",
"escape",
"(",
"_processValueItem",
"(",
"child_item",
",",
"child_key",
",",
"child_valuename",
",",
"admx_policy",
",",
"elements_item",
",",
"check_deleted",
"=",
"False",
")",
")",
",",
"policy_file_data",
")",
":",
"configured_value",
"=",
"_getDataFromRegPolData",
"(",
"_processValueItem",
"(",
"child_item",
",",
"child_key",
",",
"child_valuename",
",",
"admx_policy",
",",
"elements_item",
",",
"check_deleted",
"=",
"False",
")",
",",
"policy_file_data",
")",
"configured_elements",
"[",
"this_element_name",
"]",
"=",
"configured_value",
"log",
".",
"debug",
"(",
"'element %s is enabled, value == %s'",
",",
"child_item",
".",
"attrib",
"[",
"'id'",
"]",
",",
"configured_value",
")",
"elif",
"etree",
".",
"QName",
"(",
"child_item",
")",
".",
"localname",
"==",
"'enum'",
":",
"if",
"_regexSearchRegPolData",
"(",
"re",
".",
"escape",
"(",
"_processValueItem",
"(",
"child_item",
",",
"child_key",
",",
"child_valuename",
",",
"admx_policy",
",",
"elements_item",
",",
"check_deleted",
"=",
"True",
")",
")",
",",
"policy_file_data",
")",
":",
"log",
".",
"debug",
"(",
"'enum element %s is disabled'",
",",
"child_item",
".",
"attrib",
"[",
"'id'",
"]",
")",
"configured_elements",
"[",
"this_element_name",
"]",
"=",
"'Disabled'",
"policy_disabled_elements",
"=",
"policy_disabled_elements",
"+",
"1",
"else",
":",
"for",
"enum_item",
"in",
"child_item",
".",
"getchildren",
"(",
")",
":",
"if",
"_checkValueItemParent",
"(",
"enum_item",
",",
"child_item",
".",
"attrib",
"[",
"'id'",
"]",
",",
"child_key",
",",
"child_valuename",
",",
"VALUE_XPATH",
",",
"policy_file_data",
")",
":",
"if",
"VALUE_LIST_XPATH",
"(",
"enum_item",
")",
":",
"log",
".",
"debug",
"(",
"'enum item has a valueList'",
")",
"if",
"_checkListItem",
"(",
"enum_item",
",",
"this_policyname",
",",
"child_key",
",",
"VALUE_LIST_XPATH",
",",
"policy_file_data",
")",
":",
"log",
".",
"debug",
"(",
"'all valueList items exist in file'",
")",
"configured_elements",
"[",
"this_element_name",
"]",
"=",
"_getAdmlDisplayName",
"(",
"adml_policy_resources",
",",
"enum_item",
".",
"attrib",
"[",
"'displayName'",
"]",
")",
"break",
"else",
":",
"configured_elements",
"[",
"this_element_name",
"]",
"=",
"_getAdmlDisplayName",
"(",
"adml_policy_resources",
",",
"enum_item",
".",
"attrib",
"[",
"'displayName'",
"]",
")",
"break",
"elif",
"etree",
".",
"QName",
"(",
"child_item",
")",
".",
"localname",
"==",
"'list'",
":",
"return_value_name",
"=",
"False",
"if",
"'explicitValue'",
"in",
"child_item",
".",
"attrib",
"and",
"child_item",
".",
"attrib",
"[",
"'explicitValue'",
"]",
".",
"lower",
"(",
")",
"==",
"'true'",
":",
"log",
".",
"debug",
"(",
"'explicitValue list, we will return value names'",
")",
"return_value_name",
"=",
"True",
"if",
"_regexSearchRegPolData",
"(",
"re",
".",
"escape",
"(",
"_processValueItem",
"(",
"child_item",
",",
"child_key",
",",
"child_valuename",
",",
"admx_policy",
",",
"elements_item",
",",
"check_deleted",
"=",
"False",
")",
")",
"+",
"salt",
".",
"utils",
".",
"stringutils",
".",
"to_bytes",
"(",
"r'(?!\\*\\*delvals\\.)'",
")",
",",
"policy_file_data",
")",
":",
"configured_value",
"=",
"_getDataFromRegPolData",
"(",
"_processValueItem",
"(",
"child_item",
",",
"child_key",
",",
"child_valuename",
",",
"admx_policy",
",",
"elements_item",
",",
"check_deleted",
"=",
"False",
")",
",",
"policy_file_data",
",",
"return_value_name",
"=",
"return_value_name",
")",
"configured_elements",
"[",
"this_element_name",
"]",
"=",
"configured_value",
"log",
".",
"debug",
"(",
"'element %s is enabled values: %s'",
",",
"child_item",
".",
"attrib",
"[",
"'id'",
"]",
",",
"configured_value",
")",
"elif",
"_regexSearchRegPolData",
"(",
"re",
".",
"escape",
"(",
"_processValueItem",
"(",
"child_item",
",",
"child_key",
",",
"child_valuename",
",",
"admx_policy",
",",
"elements_item",
",",
"check_deleted",
"=",
"True",
")",
")",
",",
"policy_file_data",
")",
":",
"configured_elements",
"[",
"this_element_name",
"]",
"=",
"\"Disabled\"",
"policy_disabled_elements",
"=",
"policy_disabled_elements",
"+",
"1",
"log",
".",
"debug",
"(",
"'element %s is disabled'",
",",
"child_item",
".",
"attrib",
"[",
"'id'",
"]",
")",
"if",
"element_only_enabled_disabled",
":",
"if",
"required_elements",
"and",
"len",
"(",
"configured_elements",
")",
"==",
"len",
"(",
"required_elements",
")",
":",
"if",
"policy_disabled_elements",
"==",
"len",
"(",
"required_elements",
")",
":",
"log",
".",
"debug",
"(",
"'%s is disabled by all enum elements'",
",",
"this_policyname",
")",
"if",
"this_policynamespace",
"not",
"in",
"policy_vals",
":",
"policy_vals",
"[",
"this_policynamespace",
"]",
"=",
"{",
"}",
"policy_vals",
"[",
"this_policynamespace",
"]",
"[",
"this_policyname",
"]",
"=",
"'Disabled'",
"else",
":",
"if",
"this_policynamespace",
"not",
"in",
"policy_vals",
":",
"policy_vals",
"[",
"this_policynamespace",
"]",
"=",
"{",
"}",
"policy_vals",
"[",
"this_policynamespace",
"]",
"[",
"this_policyname",
"]",
"=",
"configured_elements",
"log",
".",
"debug",
"(",
"'%s is enabled by enum elements'",
",",
"this_policyname",
")",
"else",
":",
"if",
"this_policy_setting",
"==",
"'Enabled'",
":",
"if",
"this_policynamespace",
"not",
"in",
"policy_vals",
":",
"policy_vals",
"[",
"this_policynamespace",
"]",
"=",
"{",
"}",
"policy_vals",
"[",
"this_policynamespace",
"]",
"[",
"this_policyname",
"]",
"=",
"configured_elements",
"if",
"return_full_policy_names",
"and",
"this_policynamespace",
"in",
"policy_vals",
"and",
"this_policyname",
"in",
"policy_vals",
"[",
"this_policynamespace",
"]",
":",
"if",
"this_policynamespace",
"not",
"in",
"full_names",
":",
"full_names",
"[",
"this_policynamespace",
"]",
"=",
"{",
"}",
"full_names",
"[",
"this_policynamespace",
"]",
"[",
"this_policyname",
"]",
"=",
"_getFullPolicyName",
"(",
"policy_item",
"=",
"admx_policy",
",",
"policy_name",
"=",
"admx_policy",
".",
"attrib",
"[",
"'name'",
"]",
",",
"return_full_policy_names",
"=",
"return_full_policy_names",
",",
"adml_language",
"=",
"adml_language",
")",
"if",
"this_policynamespace",
"in",
"policy_vals",
"and",
"this_policyname",
"in",
"policy_vals",
"[",
"this_policynamespace",
"]",
":",
"if",
"this_policynamespace",
"not",
"in",
"hierarchy",
":",
"hierarchy",
"[",
"this_policynamespace",
"]",
"=",
"{",
"}",
"hierarchy",
"[",
"this_policynamespace",
"]",
"[",
"this_policyname",
"]",
"=",
"_build_parent_list",
"(",
"policy_definition",
"=",
"admx_policy",
",",
"return_full_policy_names",
"=",
"return_full_policy_names",
",",
"adml_language",
"=",
"adml_language",
")",
"if",
"policy_vals",
"and",
"return_full_policy_names",
"and",
"not",
"hierarchical_return",
":",
"unpathed_dict",
"=",
"{",
"}",
"pathed_dict",
"=",
"{",
"}",
"for",
"policy_namespace",
"in",
"list",
"(",
"policy_vals",
")",
":",
"for",
"policy_item",
"in",
"list",
"(",
"policy_vals",
"[",
"policy_namespace",
"]",
")",
":",
"if",
"full_names",
"[",
"policy_namespace",
"]",
"[",
"policy_item",
"]",
"in",
"policy_vals",
"[",
"policy_namespace",
"]",
":",
"# add this item with the path'd full name",
"full_path_list",
"=",
"hierarchy",
"[",
"policy_namespace",
"]",
"[",
"policy_item",
"]",
"full_path_list",
".",
"reverse",
"(",
")",
"full_path_list",
".",
"append",
"(",
"full_names",
"[",
"policy_namespace",
"]",
"[",
"policy_item",
"]",
")",
"policy_vals",
"[",
"'\\\\'",
".",
"join",
"(",
"full_path_list",
")",
"]",
"=",
"policy_vals",
"[",
"policy_namespace",
"]",
".",
"pop",
"(",
"policy_item",
")",
"pathed_dict",
"[",
"full_names",
"[",
"policy_namespace",
"]",
"[",
"policy_item",
"]",
"]",
"=",
"True",
"else",
":",
"policy_vals",
"[",
"policy_namespace",
"]",
"[",
"full_names",
"[",
"policy_namespace",
"]",
"[",
"policy_item",
"]",
"]",
"=",
"policy_vals",
"[",
"policy_namespace",
"]",
".",
"pop",
"(",
"policy_item",
")",
"if",
"policy_namespace",
"not",
"in",
"unpathed_dict",
":",
"unpathed_dict",
"[",
"policy_namespace",
"]",
"=",
"{",
"}",
"unpathed_dict",
"[",
"policy_namespace",
"]",
"[",
"full_names",
"[",
"policy_namespace",
"]",
"[",
"policy_item",
"]",
"]",
"=",
"policy_item",
"# go back and remove any \"unpathed\" policies that need a full path",
"for",
"path_needed",
"in",
"unpathed_dict",
"[",
"policy_namespace",
"]",
":",
"# remove the item with the same full name and re-add it w/a path'd version",
"full_path_list",
"=",
"hierarchy",
"[",
"policy_namespace",
"]",
"[",
"unpathed_dict",
"[",
"policy_namespace",
"]",
"[",
"path_needed",
"]",
"]",
"full_path_list",
".",
"reverse",
"(",
")",
"full_path_list",
".",
"append",
"(",
"path_needed",
")",
"log",
".",
"debug",
"(",
"'full_path_list == %s'",
",",
"full_path_list",
")",
"policy_vals",
"[",
"'\\\\'",
".",
"join",
"(",
"full_path_list",
")",
"]",
"=",
"policy_vals",
"[",
"policy_namespace",
"]",
".",
"pop",
"(",
"path_needed",
")",
"for",
"policy_namespace",
"in",
"list",
"(",
"policy_vals",
")",
":",
"if",
"policy_vals",
"[",
"policy_namespace",
"]",
"==",
"{",
"}",
":",
"policy_vals",
".",
"pop",
"(",
"policy_namespace",
")",
"if",
"policy_vals",
"and",
"hierarchical_return",
":",
"if",
"hierarchy",
":",
"for",
"policy_namespace",
"in",
"hierarchy",
":",
"for",
"hierarchy_item",
"in",
"hierarchy",
"[",
"policy_namespace",
"]",
":",
"if",
"hierarchy_item",
"in",
"policy_vals",
"[",
"policy_namespace",
"]",
":",
"tdict",
"=",
"{",
"}",
"first_item",
"=",
"True",
"for",
"item",
"in",
"hierarchy",
"[",
"policy_namespace",
"]",
"[",
"hierarchy_item",
"]",
":",
"newdict",
"=",
"{",
"}",
"if",
"first_item",
":",
"h_policy_name",
"=",
"hierarchy_item",
"if",
"return_full_policy_names",
":",
"h_policy_name",
"=",
"full_names",
"[",
"policy_namespace",
"]",
"[",
"hierarchy_item",
"]",
"newdict",
"[",
"item",
"]",
"=",
"{",
"h_policy_name",
":",
"policy_vals",
"[",
"policy_namespace",
"]",
".",
"pop",
"(",
"hierarchy_item",
")",
"}",
"first_item",
"=",
"False",
"else",
":",
"newdict",
"[",
"item",
"]",
"=",
"tdict",
"tdict",
"=",
"newdict",
"if",
"tdict",
":",
"policy_vals",
"=",
"dictupdate",
".",
"update",
"(",
"policy_vals",
",",
"tdict",
")",
"if",
"policy_namespace",
"in",
"policy_vals",
"and",
"policy_vals",
"[",
"policy_namespace",
"]",
"==",
"{",
"}",
":",
"policy_vals",
".",
"pop",
"(",
"policy_namespace",
")",
"policy_vals",
"=",
"{",
"module_policy_data",
".",
"admx_registry_classes",
"[",
"policy_class",
"]",
"[",
"'lgpo_section'",
"]",
":",
"{",
"'Administrative Templates'",
":",
"policy_vals",
"}",
"}",
"return",
"policy_vals"
] | 73.227074 | 36.615721 |
def get_formatted_path(self, **kwargs):
"""
Format this endpoint's path with the supplied keyword arguments
:return:
The fully-formatted path
:rtype:
str
"""
self._validate_path_placeholders(self.path_placeholders, kwargs)
return self.path.format(**kwargs) | [
"def",
"get_formatted_path",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_validate_path_placeholders",
"(",
"self",
".",
"path_placeholders",
",",
"kwargs",
")",
"return",
"self",
".",
"path",
".",
"format",
"(",
"*",
"*",
"kwargs",
")"
] | 27.25 | 18.583333 |
def HernquistX(s):
"""
Computes X function from equations (33) & (34) of Hernquist (1990)
"""
if(s<0.):
raise ValueError("s must be positive in Hernquist X function")
elif(s<1.):
return numpy.log((1+numpy.sqrt(1-s*s))/s)/numpy.sqrt(1-s*s)
elif(s==1.):
return 1.
else:
return numpy.arccos(1./s)/numpy.sqrt(s*s-1) | [
"def",
"HernquistX",
"(",
"s",
")",
":",
"if",
"(",
"s",
"<",
"0.",
")",
":",
"raise",
"ValueError",
"(",
"\"s must be positive in Hernquist X function\"",
")",
"elif",
"(",
"s",
"<",
"1.",
")",
":",
"return",
"numpy",
".",
"log",
"(",
"(",
"1",
"+",
"numpy",
".",
"sqrt",
"(",
"1",
"-",
"s",
"*",
"s",
")",
")",
"/",
"s",
")",
"/",
"numpy",
".",
"sqrt",
"(",
"1",
"-",
"s",
"*",
"s",
")",
"elif",
"(",
"s",
"==",
"1.",
")",
":",
"return",
"1.",
"else",
":",
"return",
"numpy",
".",
"arccos",
"(",
"1.",
"/",
"s",
")",
"/",
"numpy",
".",
"sqrt",
"(",
"s",
"*",
"s",
"-",
"1",
")"
] | 30.166667 | 21 |
def encode_nibbles(nibbles):
"""
The Hex Prefix function
"""
if is_nibbles_terminated(nibbles):
flag = HP_FLAG_2
else:
flag = HP_FLAG_0
raw_nibbles = remove_nibbles_terminator(nibbles)
is_odd = len(raw_nibbles) % 2
if is_odd:
flagged_nibbles = tuple(itertools.chain(
(flag + 1,),
raw_nibbles,
))
else:
flagged_nibbles = tuple(itertools.chain(
(flag, 0),
raw_nibbles,
))
prefixed_value = nibbles_to_bytes(flagged_nibbles)
return prefixed_value | [
"def",
"encode_nibbles",
"(",
"nibbles",
")",
":",
"if",
"is_nibbles_terminated",
"(",
"nibbles",
")",
":",
"flag",
"=",
"HP_FLAG_2",
"else",
":",
"flag",
"=",
"HP_FLAG_0",
"raw_nibbles",
"=",
"remove_nibbles_terminator",
"(",
"nibbles",
")",
"is_odd",
"=",
"len",
"(",
"raw_nibbles",
")",
"%",
"2",
"if",
"is_odd",
":",
"flagged_nibbles",
"=",
"tuple",
"(",
"itertools",
".",
"chain",
"(",
"(",
"flag",
"+",
"1",
",",
")",
",",
"raw_nibbles",
",",
")",
")",
"else",
":",
"flagged_nibbles",
"=",
"tuple",
"(",
"itertools",
".",
"chain",
"(",
"(",
"flag",
",",
"0",
")",
",",
"raw_nibbles",
",",
")",
")",
"prefixed_value",
"=",
"nibbles_to_bytes",
"(",
"flagged_nibbles",
")",
"return",
"prefixed_value"
] | 20.777778 | 19.888889 |
def measures(self):
"""Iterate over all measures"""
from ambry.valuetype.core import ROLE
return [c for c in self.columns if c.role == ROLE.MEASURE] | [
"def",
"measures",
"(",
"self",
")",
":",
"from",
"ambry",
".",
"valuetype",
".",
"core",
"import",
"ROLE",
"return",
"[",
"c",
"for",
"c",
"in",
"self",
".",
"columns",
"if",
"c",
".",
"role",
"==",
"ROLE",
".",
"MEASURE",
"]"
] | 33.8 | 18.4 |
def download_file(url, filename):
"""Downloads file from url to a path with filename"""
r = _get_requests_session().get(url, stream=True)
if not r.ok:
raise IOError("Unable to download file")
with open(filename, "wb") as f:
f.write(r.content) | [
"def",
"download_file",
"(",
"url",
",",
"filename",
")",
":",
"r",
"=",
"_get_requests_session",
"(",
")",
".",
"get",
"(",
"url",
",",
"stream",
"=",
"True",
")",
"if",
"not",
"r",
".",
"ok",
":",
"raise",
"IOError",
"(",
"\"Unable to download file\"",
")",
"with",
"open",
"(",
"filename",
",",
"\"wb\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"r",
".",
"content",
")"
] | 33.5 | 13.875 |
def iterate(self, image, feature_extractor, feature_vector):
"""iterate(image, feature_extractor, feature_vector) -> bounding_box
Scales the given image, and extracts features from all possible bounding boxes.
For each of the sampled bounding boxes, this function fills the given pre-allocated feature vector and yields the current bounding box.
**Parameters:**
``image`` : array_like(2D)
The given image to extract features for
``feature_extractor`` : :py:class:`FeatureExtractor`
The feature extractor to use to extract the features for the sampled patches
``feature_vector`` : :py:class:`numpy.ndarray` (1D, uint16)
The pre-allocated feature vector that will be filled inside this function; needs to be of size :py:attr:`FeatureExtractor.number_of_features`
**Yields:**
``bounding_box`` : :py:class:`BoundingBox`
The bounding box for which the current features are extracted for
"""
for scale, scaled_image_shape in self.scales(image):
# prepare the feature extractor to extract features from the given image
feature_extractor.prepare(image, scale)
for bb in self.sample_scaled(scaled_image_shape):
# extract features for
feature_extractor.extract_indexed(bb, feature_vector)
yield bb.scale(1./scale) | [
"def",
"iterate",
"(",
"self",
",",
"image",
",",
"feature_extractor",
",",
"feature_vector",
")",
":",
"for",
"scale",
",",
"scaled_image_shape",
"in",
"self",
".",
"scales",
"(",
"image",
")",
":",
"# prepare the feature extractor to extract features from the given image",
"feature_extractor",
".",
"prepare",
"(",
"image",
",",
"scale",
")",
"for",
"bb",
"in",
"self",
".",
"sample_scaled",
"(",
"scaled_image_shape",
")",
":",
"# extract features for",
"feature_extractor",
".",
"extract_indexed",
"(",
"bb",
",",
"feature_vector",
")",
"yield",
"bb",
".",
"scale",
"(",
"1.",
"/",
"scale",
")"
] | 43.066667 | 29.366667 |
def air_absorption_coefficient(medium, wavelength):
"""
The function returns linear absorbtion coefficient of selected medium at
given x-ray wavelength [mm^-1]
Mass attenuation coefficients are taken from NIST "Tables of X-Ray Mass
Attenuation Coefficients and Mass Energy-Absorption Coefficients from 1
keV to 20 MeV for Elements Z = 1 to 92 and 48 Additional Substances of Dosimetric Interest
J. H. Hubbell and S. M. Seltzer
http://www.nist.gov/pml/data/xraycoef/index.cfm
"""
if medium == 'Helium':
density = 1.663e-04
# the table contains photon energy [Mev] and mass attenuation coefficient
# mu/sigma [cm^2/g]
mass_attenuation_coefficient = np.array([[1.00000e-03, 6.084e+01],
[1.50000e-03, 1.676e+01],
[2.00000e-03, 6.863e+00],
[3.00000e-03, 2.007e+00],
[4.00000e-03, 9.329e-01],
[5.00000e-03, 5.766e-01],
[6.00000e-03, 4.195e-01],
[8.00000e-03, 2.933e-01],
[1.00000e-02, 2.476e-01],
[1.50000e-02, 2.092e-01],
[2.00000e-02, 1.960e-01],
[3.00000e-02, 1.838e-01],
[4.00000e-02, 1.763e-01],
[5.00000e-02, 1.703e-01],
[6.00000e-02, 1.651e-01],
[8.00000e-02, 1.562e-01],
[1.00000e-01, 1.486e-01],
[1.50000e-01, 1.336e-01],
[2.00000e-01, 1.224e-01],
[3.00000e-01, 1.064e-01],
[4.00000e-01, 9.535e-02],
[5.00000e-01, 8.707e-02],
[6.00000e-01, 8.054e-02],
[8.00000e-01, 7.076e-02],
[1.00000e+00, 6.362e-02],
[1.25000e+00, 5.688e-02],
[1.50000e+00, 5.173e-02],
[2.00000e+00, 4.422e-02],
[3.00000e+00, 3.503e-02],
[4.00000e+00, 2.949e-02],
[5.00000e+00, 2.577e-02],
[6.00000e+00, 2.307e-02],
[8.00000e+00, 1.940e-02],
[1.00000e+01, 1.703e-02],
[1.50000e+01, 1.363e-02],
[2.00000e+01, 1.183e-02]])
elif medium == 'Air':
density = 1.205e-03
mass_attenuation_coefficient = np.array([[1.00000e-03, 3.606e+03],
[1.50000e-03, 1.191e+03],
[2.00000e-03, 5.279e+02],
[3.00000e-03, 1.625e+02],
[3.20290e-03, 1.340e+02],
[3.202900000001e-03, 1.485e+02],
[4.00000e-03, 7.788e+01],
[5.00000e-03, 4.027e+01],
[6.00000e-03, 2.341e+01],
[8.00000e-03, 9.921e+00],
[1.00000e-02, 5.120e+00],
[1.50000e-02, 1.614e+00],
[2.00000e-02, 7.779e-01],
[3.00000e-02, 3.538e-01],
[4.00000e-02, 2.485e-01],
[5.00000e-02, 2.080e-01],
[6.00000e-02, 1.875e-01],
[8.00000e-02, 1.662e-01],
[1.00000e-01, 1.541e-01],
[1.50000e-01, 1.356e-01],
[2.00000e-01, 1.233e-01],
[3.00000e-01, 1.067e-01],
[4.00000e-01, 9.549e-02],
[5.00000e-01, 8.712e-02],
[6.00000e-01, 8.055e-02],
[8.00000e-01, 7.074e-02],
[1.00000e+00, 6.358e-02],
[1.25000e+00, 5.687e-02],
[1.50000e+00, 5.175e-02],
[2.00000e+00, 4.447e-02],
[3.00000e+00, 3.581e-02],
[4.00000e+00, 3.079e-02],
[5.00000e+00, 2.751e-02],
[6.00000e+00, 2.522e-02],
[8.00000e+00, 2.225e-02],
[1.00000e+01, 2.045e-02],
[1.50000e+01, 1.810e-02],
[2.00000e+01, 1.705e-02]])
else:
raise Exception('Unknown medium ' + medium)
etw = 1.23985e-2 # [Mev*Angstroem]
photon_energy = etw / wavelength
if photon_energy < min(mass_attenuation_coefficient[:, 0]):
raise Exception('Wavelength is too large, using nearest value')
if photon_energy > max(mass_attenuation_coefficient[:, 0]):
raise Exception('Wavelength is too small, using nearest value')
# 0.1 here converts from cm^-1 to mm^-1
mu = 0.1 * density * np.interp(photon_energy,
mass_attenuation_coefficient[:, 0],
mass_attenuation_coefficient[:, 1])
return mu | [
"def",
"air_absorption_coefficient",
"(",
"medium",
",",
"wavelength",
")",
":",
"if",
"medium",
"==",
"'Helium'",
":",
"density",
"=",
"1.663e-04",
"# the table contains photon energy [Mev] and mass attenuation coefficient",
"# mu/sigma [cm^2/g]",
"mass_attenuation_coefficient",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"1.00000e-03",
",",
"6.084e+01",
"]",
",",
"[",
"1.50000e-03",
",",
"1.676e+01",
"]",
",",
"[",
"2.00000e-03",
",",
"6.863e+00",
"]",
",",
"[",
"3.00000e-03",
",",
"2.007e+00",
"]",
",",
"[",
"4.00000e-03",
",",
"9.329e-01",
"]",
",",
"[",
"5.00000e-03",
",",
"5.766e-01",
"]",
",",
"[",
"6.00000e-03",
",",
"4.195e-01",
"]",
",",
"[",
"8.00000e-03",
",",
"2.933e-01",
"]",
",",
"[",
"1.00000e-02",
",",
"2.476e-01",
"]",
",",
"[",
"1.50000e-02",
",",
"2.092e-01",
"]",
",",
"[",
"2.00000e-02",
",",
"1.960e-01",
"]",
",",
"[",
"3.00000e-02",
",",
"1.838e-01",
"]",
",",
"[",
"4.00000e-02",
",",
"1.763e-01",
"]",
",",
"[",
"5.00000e-02",
",",
"1.703e-01",
"]",
",",
"[",
"6.00000e-02",
",",
"1.651e-01",
"]",
",",
"[",
"8.00000e-02",
",",
"1.562e-01",
"]",
",",
"[",
"1.00000e-01",
",",
"1.486e-01",
"]",
",",
"[",
"1.50000e-01",
",",
"1.336e-01",
"]",
",",
"[",
"2.00000e-01",
",",
"1.224e-01",
"]",
",",
"[",
"3.00000e-01",
",",
"1.064e-01",
"]",
",",
"[",
"4.00000e-01",
",",
"9.535e-02",
"]",
",",
"[",
"5.00000e-01",
",",
"8.707e-02",
"]",
",",
"[",
"6.00000e-01",
",",
"8.054e-02",
"]",
",",
"[",
"8.00000e-01",
",",
"7.076e-02",
"]",
",",
"[",
"1.00000e+00",
",",
"6.362e-02",
"]",
",",
"[",
"1.25000e+00",
",",
"5.688e-02",
"]",
",",
"[",
"1.50000e+00",
",",
"5.173e-02",
"]",
",",
"[",
"2.00000e+00",
",",
"4.422e-02",
"]",
",",
"[",
"3.00000e+00",
",",
"3.503e-02",
"]",
",",
"[",
"4.00000e+00",
",",
"2.949e-02",
"]",
",",
"[",
"5.00000e+00",
",",
"2.577e-02",
"]",
",",
"[",
"6.00000e+00",
",",
"2.307e-02",
"]",
",",
"[",
"8.00000e+00",
",",
"1.940e-02",
"]",
",",
"[",
"1.00000e+01",
",",
"1.703e-02",
"]",
",",
"[",
"1.50000e+01",
",",
"1.363e-02",
"]",
",",
"[",
"2.00000e+01",
",",
"1.183e-02",
"]",
"]",
")",
"elif",
"medium",
"==",
"'Air'",
":",
"density",
"=",
"1.205e-03",
"mass_attenuation_coefficient",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"1.00000e-03",
",",
"3.606e+03",
"]",
",",
"[",
"1.50000e-03",
",",
"1.191e+03",
"]",
",",
"[",
"2.00000e-03",
",",
"5.279e+02",
"]",
",",
"[",
"3.00000e-03",
",",
"1.625e+02",
"]",
",",
"[",
"3.20290e-03",
",",
"1.340e+02",
"]",
",",
"[",
"3.202900000001e-03",
",",
"1.485e+02",
"]",
",",
"[",
"4.00000e-03",
",",
"7.788e+01",
"]",
",",
"[",
"5.00000e-03",
",",
"4.027e+01",
"]",
",",
"[",
"6.00000e-03",
",",
"2.341e+01",
"]",
",",
"[",
"8.00000e-03",
",",
"9.921e+00",
"]",
",",
"[",
"1.00000e-02",
",",
"5.120e+00",
"]",
",",
"[",
"1.50000e-02",
",",
"1.614e+00",
"]",
",",
"[",
"2.00000e-02",
",",
"7.779e-01",
"]",
",",
"[",
"3.00000e-02",
",",
"3.538e-01",
"]",
",",
"[",
"4.00000e-02",
",",
"2.485e-01",
"]",
",",
"[",
"5.00000e-02",
",",
"2.080e-01",
"]",
",",
"[",
"6.00000e-02",
",",
"1.875e-01",
"]",
",",
"[",
"8.00000e-02",
",",
"1.662e-01",
"]",
",",
"[",
"1.00000e-01",
",",
"1.541e-01",
"]",
",",
"[",
"1.50000e-01",
",",
"1.356e-01",
"]",
",",
"[",
"2.00000e-01",
",",
"1.233e-01",
"]",
",",
"[",
"3.00000e-01",
",",
"1.067e-01",
"]",
",",
"[",
"4.00000e-01",
",",
"9.549e-02",
"]",
",",
"[",
"5.00000e-01",
",",
"8.712e-02",
"]",
",",
"[",
"6.00000e-01",
",",
"8.055e-02",
"]",
",",
"[",
"8.00000e-01",
",",
"7.074e-02",
"]",
",",
"[",
"1.00000e+00",
",",
"6.358e-02",
"]",
",",
"[",
"1.25000e+00",
",",
"5.687e-02",
"]",
",",
"[",
"1.50000e+00",
",",
"5.175e-02",
"]",
",",
"[",
"2.00000e+00",
",",
"4.447e-02",
"]",
",",
"[",
"3.00000e+00",
",",
"3.581e-02",
"]",
",",
"[",
"4.00000e+00",
",",
"3.079e-02",
"]",
",",
"[",
"5.00000e+00",
",",
"2.751e-02",
"]",
",",
"[",
"6.00000e+00",
",",
"2.522e-02",
"]",
",",
"[",
"8.00000e+00",
",",
"2.225e-02",
"]",
",",
"[",
"1.00000e+01",
",",
"2.045e-02",
"]",
",",
"[",
"1.50000e+01",
",",
"1.810e-02",
"]",
",",
"[",
"2.00000e+01",
",",
"1.705e-02",
"]",
"]",
")",
"else",
":",
"raise",
"Exception",
"(",
"'Unknown medium '",
"+",
"medium",
")",
"etw",
"=",
"1.23985e-2",
"# [Mev*Angstroem]",
"photon_energy",
"=",
"etw",
"/",
"wavelength",
"if",
"photon_energy",
"<",
"min",
"(",
"mass_attenuation_coefficient",
"[",
":",
",",
"0",
"]",
")",
":",
"raise",
"Exception",
"(",
"'Wavelength is too large, using nearest value'",
")",
"if",
"photon_energy",
">",
"max",
"(",
"mass_attenuation_coefficient",
"[",
":",
",",
"0",
"]",
")",
":",
"raise",
"Exception",
"(",
"'Wavelength is too small, using nearest value'",
")",
"# 0.1 here converts from cm^-1 to mm^-1",
"mu",
"=",
"0.1",
"*",
"density",
"*",
"np",
".",
"interp",
"(",
"photon_energy",
",",
"mass_attenuation_coefficient",
"[",
":",
",",
"0",
"]",
",",
"mass_attenuation_coefficient",
"[",
":",
",",
"1",
"]",
")",
"return",
"mu"
] | 61.783784 | 30.657658 |
def on(self, *args):
"""
If no arguments are specified, turn all the LEDs on. If arguments are
specified, they must be the indexes of the LEDs you wish to turn on.
For example::
from gpiozero import LEDBoard
leds = LEDBoard(2, 3, 4, 5)
leds.on(0) # turn on the first LED (pin 2)
leds.on(-1) # turn on the last LED (pin 5)
leds.on(1, 2) # turn on the middle LEDs (pins 3 and 4)
leds.off() # turn off all LEDs
leds.on() # turn on all LEDs
If :meth:`blink` is currently active, it will be stopped first.
:param int args:
The index(es) of the LED(s) to turn on. If no indexes are specified
turn on all LEDs.
"""
self._stop_blink()
if args:
for index in args:
self[index].on()
else:
super(LEDBoard, self).on() | [
"def",
"on",
"(",
"self",
",",
"*",
"args",
")",
":",
"self",
".",
"_stop_blink",
"(",
")",
"if",
"args",
":",
"for",
"index",
"in",
"args",
":",
"self",
"[",
"index",
"]",
".",
"on",
"(",
")",
"else",
":",
"super",
"(",
"LEDBoard",
",",
"self",
")",
".",
"on",
"(",
")"
] | 34.148148 | 19.407407 |
def transcript_to_gpd_line(tx,transcript_name=None,gene_name=None,direction=None):
"""Get the genpred format string representation of the mapping
:param transcript_name:
:param gene_name:
:param strand:
:type transcript_name: string
:type gene_name: string
:type strand: string
:return: GPD line
:rtype: string
"""
tname = tx._options.name
if transcript_name: tname = transcript_name
gname = tx._options.gene_name
if gene_name: gname = gene_name
dir = tx._options.direction
if direction: dir = direction
# check for if we just have a single name
if not tname: tname = str(uuid.uuid4())
if not gname:
gname = tname
out = ''
out += gname + "\t"
out += tname + "\t"
out += tx.exons[0].chr + "\t"
out += dir + "\t"
out += str(tx.exons[0].start-1) + "\t"
out += str(tx.exons[-1].end) + "\t"
out += str(tx.exons[0].start-1) + "\t"
out += str(tx.exons[-1].end) + "\t"
out += str(len(tx.exons)) + "\t"
out += str(','.join([str(x.start-1) for x in tx.exons]))+','+"\t"
out += str(','.join([str(x.end) for x in tx.exons]))+','
return out | [
"def",
"transcript_to_gpd_line",
"(",
"tx",
",",
"transcript_name",
"=",
"None",
",",
"gene_name",
"=",
"None",
",",
"direction",
"=",
"None",
")",
":",
"tname",
"=",
"tx",
".",
"_options",
".",
"name",
"if",
"transcript_name",
":",
"tname",
"=",
"transcript_name",
"gname",
"=",
"tx",
".",
"_options",
".",
"gene_name",
"if",
"gene_name",
":",
"gname",
"=",
"gene_name",
"dir",
"=",
"tx",
".",
"_options",
".",
"direction",
"if",
"direction",
":",
"dir",
"=",
"direction",
"# check for if we just have a single name",
"if",
"not",
"tname",
":",
"tname",
"=",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"if",
"not",
"gname",
":",
"gname",
"=",
"tname",
"out",
"=",
"''",
"out",
"+=",
"gname",
"+",
"\"\\t\"",
"out",
"+=",
"tname",
"+",
"\"\\t\"",
"out",
"+=",
"tx",
".",
"exons",
"[",
"0",
"]",
".",
"chr",
"+",
"\"\\t\"",
"out",
"+=",
"dir",
"+",
"\"\\t\"",
"out",
"+=",
"str",
"(",
"tx",
".",
"exons",
"[",
"0",
"]",
".",
"start",
"-",
"1",
")",
"+",
"\"\\t\"",
"out",
"+=",
"str",
"(",
"tx",
".",
"exons",
"[",
"-",
"1",
"]",
".",
"end",
")",
"+",
"\"\\t\"",
"out",
"+=",
"str",
"(",
"tx",
".",
"exons",
"[",
"0",
"]",
".",
"start",
"-",
"1",
")",
"+",
"\"\\t\"",
"out",
"+=",
"str",
"(",
"tx",
".",
"exons",
"[",
"-",
"1",
"]",
".",
"end",
")",
"+",
"\"\\t\"",
"out",
"+=",
"str",
"(",
"len",
"(",
"tx",
".",
"exons",
")",
")",
"+",
"\"\\t\"",
"out",
"+=",
"str",
"(",
"','",
".",
"join",
"(",
"[",
"str",
"(",
"x",
".",
"start",
"-",
"1",
")",
"for",
"x",
"in",
"tx",
".",
"exons",
"]",
")",
")",
"+",
"','",
"+",
"\"\\t\"",
"out",
"+=",
"str",
"(",
"','",
".",
"join",
"(",
"[",
"str",
"(",
"x",
".",
"end",
")",
"for",
"x",
"in",
"tx",
".",
"exons",
"]",
")",
")",
"+",
"','",
"return",
"out"
] | 32.285714 | 13.8 |
def write_file(path, data):
"""Writes data to specified path."""
with open(path, 'w') as f:
log.debug('setting %s contents:\n%s', path, data)
f.write(data)
return f | [
"def",
"write_file",
"(",
"path",
",",
"data",
")",
":",
"with",
"open",
"(",
"path",
",",
"'w'",
")",
"as",
"f",
":",
"log",
".",
"debug",
"(",
"'setting %s contents:\\n%s'",
",",
"path",
",",
"data",
")",
"f",
".",
"write",
"(",
"data",
")",
"return",
"f"
] | 31.166667 | 14.5 |
def read_hatlc(hatlc):
'''
This reads a consolidated HAT LC written by the functions above.
Returns a dict.
'''
lcfname = os.path.basename(hatlc)
# unzip the files first
if '.gz' in lcfname:
lcf = gzip.open(hatlc,'rb')
elif '.bz2' in lcfname:
lcf = bz2.BZ2File(hatlc, 'rb')
else:
lcf = open(hatlc,'rb')
if '.fits' in lcfname and HAVEPYFITS:
hdulist = pyfits.open(lcf)
objectinfo = hdulist[0].header
objectlc = hdulist[1].data
lccols = objectlc.columns.names
hdulist.close()
lcf.close()
lcdict = {}
for col in lccols:
lcdict[col] = np.array(objectlc[col])
lcdict['hatid'] = objectinfo['hatid']
lcdict['twomassid'] = objectinfo['2massid']
lcdict['ra'] = objectinfo['ra']
lcdict['dec'] = objectinfo['dec']
lcdict['mags'] = [objectinfo[x] for x in ('vmag','rmag','imag',
'jmag','hmag','kmag')]
lcdict['ndet'] = objectinfo['ndet']
lcdict['hatstations'] = objectinfo['hats']
lcdict['filters'] = objectinfo['filters']
lcdict['columns'] = lccols
return lcdict
elif '.fits' in lcfname and not HAVEPYFITS:
print("can't read %s since we don't have the pyfits module" % lcfname)
return
elif '.csv' in lcfname or '.hatlc' in lcfname:
lcflines = lcf.read().decode().split('\n')
lcf.close()
# now process the read-in LC
objectdata = [x for x in lcflines if x.startswith('#')]
objectlc = [x for x in lcflines if not x.startswith('#')]
objectlc = [x for x in objectlc if len(x) > 1]
if '.csv' in lcfname:
objectlc = [x.split(',') for x in objectlc]
else:
objectlc = [x.split() for x in objectlc]
# transpose split rows to get columns
objectlc = list(zip(*objectlc))
# read the header to figure out the object's info and column names
objectdata = [x.strip('#') for x in objectdata]
objectdata = [x.strip() for x in objectdata]
objectdata = [x for x in objectdata if len(x) > 0]
hatid, twomassid = objectdata[0].split(' - ')
ra, dec = objectdata[1].split(', ')
ra = float(ra.split(' = ')[-1].strip(' deg'))
dec = float(dec.split(' = ')[-1].strip(' deg'))
vmag, rmag, imag, jmag, hmag, kmag = objectdata[2].split(', ')
vmag = float(vmag.split(' = ')[-1])
rmag = float(rmag.split(' = ')[-1])
imag = float(imag.split(' = ')[-1])
jmag = float(jmag.split(' = ')[-1])
hmag = float(hmag.split(' = ')[-1])
kmag = float(kmag.split(' = ')[-1])
ndet = int(objectdata[3].split(': ')[-1])
hatstations = objectdata[4].split(': ')[-1]
filterhead_ind = objectdata.index('Filters used:')
columnhead_ind = objectdata.index('Columns:')
filters = objectdata[filterhead_ind:columnhead_ind]
columndefs = objectdata[columnhead_ind+1:]
columns = []
for line in columndefs:
colnum, colname, coldesc = line.split(' - ')
columns.append(colname)
lcdict = {}
# now write all the columns to the output dictionary
for ind, col in enumerate(columns):
# this formats everything nicely using our existing column
# definitions
lcdict[col] = np.array([TEXTLC_OUTPUT_COLUMNS[col][3](x)
for x in objectlc[ind]])
# write the object metadata to the output dictionary
lcdict['hatid'] = hatid
lcdict['twomassid'] = twomassid.replace('2MASS J','')
lcdict['ra'] = ra
lcdict['dec'] = dec
lcdict['mags'] = [vmag, rmag, imag, jmag, hmag, kmag]
lcdict['ndet'] = ndet
lcdict['hatstations'] = hatstations.split(', ')
lcdict['filters'] = filters[1:]
lcdict['cols'] = columns
return lcdict | [
"def",
"read_hatlc",
"(",
"hatlc",
")",
":",
"lcfname",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"hatlc",
")",
"# unzip the files first",
"if",
"'.gz'",
"in",
"lcfname",
":",
"lcf",
"=",
"gzip",
".",
"open",
"(",
"hatlc",
",",
"'rb'",
")",
"elif",
"'.bz2'",
"in",
"lcfname",
":",
"lcf",
"=",
"bz2",
".",
"BZ2File",
"(",
"hatlc",
",",
"'rb'",
")",
"else",
":",
"lcf",
"=",
"open",
"(",
"hatlc",
",",
"'rb'",
")",
"if",
"'.fits'",
"in",
"lcfname",
"and",
"HAVEPYFITS",
":",
"hdulist",
"=",
"pyfits",
".",
"open",
"(",
"lcf",
")",
"objectinfo",
"=",
"hdulist",
"[",
"0",
"]",
".",
"header",
"objectlc",
"=",
"hdulist",
"[",
"1",
"]",
".",
"data",
"lccols",
"=",
"objectlc",
".",
"columns",
".",
"names",
"hdulist",
".",
"close",
"(",
")",
"lcf",
".",
"close",
"(",
")",
"lcdict",
"=",
"{",
"}",
"for",
"col",
"in",
"lccols",
":",
"lcdict",
"[",
"col",
"]",
"=",
"np",
".",
"array",
"(",
"objectlc",
"[",
"col",
"]",
")",
"lcdict",
"[",
"'hatid'",
"]",
"=",
"objectinfo",
"[",
"'hatid'",
"]",
"lcdict",
"[",
"'twomassid'",
"]",
"=",
"objectinfo",
"[",
"'2massid'",
"]",
"lcdict",
"[",
"'ra'",
"]",
"=",
"objectinfo",
"[",
"'ra'",
"]",
"lcdict",
"[",
"'dec'",
"]",
"=",
"objectinfo",
"[",
"'dec'",
"]",
"lcdict",
"[",
"'mags'",
"]",
"=",
"[",
"objectinfo",
"[",
"x",
"]",
"for",
"x",
"in",
"(",
"'vmag'",
",",
"'rmag'",
",",
"'imag'",
",",
"'jmag'",
",",
"'hmag'",
",",
"'kmag'",
")",
"]",
"lcdict",
"[",
"'ndet'",
"]",
"=",
"objectinfo",
"[",
"'ndet'",
"]",
"lcdict",
"[",
"'hatstations'",
"]",
"=",
"objectinfo",
"[",
"'hats'",
"]",
"lcdict",
"[",
"'filters'",
"]",
"=",
"objectinfo",
"[",
"'filters'",
"]",
"lcdict",
"[",
"'columns'",
"]",
"=",
"lccols",
"return",
"lcdict",
"elif",
"'.fits'",
"in",
"lcfname",
"and",
"not",
"HAVEPYFITS",
":",
"print",
"(",
"\"can't read %s since we don't have the pyfits module\"",
"%",
"lcfname",
")",
"return",
"elif",
"'.csv'",
"in",
"lcfname",
"or",
"'.hatlc'",
"in",
"lcfname",
":",
"lcflines",
"=",
"lcf",
".",
"read",
"(",
")",
".",
"decode",
"(",
")",
".",
"split",
"(",
"'\\n'",
")",
"lcf",
".",
"close",
"(",
")",
"# now process the read-in LC",
"objectdata",
"=",
"[",
"x",
"for",
"x",
"in",
"lcflines",
"if",
"x",
".",
"startswith",
"(",
"'#'",
")",
"]",
"objectlc",
"=",
"[",
"x",
"for",
"x",
"in",
"lcflines",
"if",
"not",
"x",
".",
"startswith",
"(",
"'#'",
")",
"]",
"objectlc",
"=",
"[",
"x",
"for",
"x",
"in",
"objectlc",
"if",
"len",
"(",
"x",
")",
">",
"1",
"]",
"if",
"'.csv'",
"in",
"lcfname",
":",
"objectlc",
"=",
"[",
"x",
".",
"split",
"(",
"','",
")",
"for",
"x",
"in",
"objectlc",
"]",
"else",
":",
"objectlc",
"=",
"[",
"x",
".",
"split",
"(",
")",
"for",
"x",
"in",
"objectlc",
"]",
"# transpose split rows to get columns",
"objectlc",
"=",
"list",
"(",
"zip",
"(",
"*",
"objectlc",
")",
")",
"# read the header to figure out the object's info and column names",
"objectdata",
"=",
"[",
"x",
".",
"strip",
"(",
"'#'",
")",
"for",
"x",
"in",
"objectdata",
"]",
"objectdata",
"=",
"[",
"x",
".",
"strip",
"(",
")",
"for",
"x",
"in",
"objectdata",
"]",
"objectdata",
"=",
"[",
"x",
"for",
"x",
"in",
"objectdata",
"if",
"len",
"(",
"x",
")",
">",
"0",
"]",
"hatid",
",",
"twomassid",
"=",
"objectdata",
"[",
"0",
"]",
".",
"split",
"(",
"' - '",
")",
"ra",
",",
"dec",
"=",
"objectdata",
"[",
"1",
"]",
".",
"split",
"(",
"', '",
")",
"ra",
"=",
"float",
"(",
"ra",
".",
"split",
"(",
"' = '",
")",
"[",
"-",
"1",
"]",
".",
"strip",
"(",
"' deg'",
")",
")",
"dec",
"=",
"float",
"(",
"dec",
".",
"split",
"(",
"' = '",
")",
"[",
"-",
"1",
"]",
".",
"strip",
"(",
"' deg'",
")",
")",
"vmag",
",",
"rmag",
",",
"imag",
",",
"jmag",
",",
"hmag",
",",
"kmag",
"=",
"objectdata",
"[",
"2",
"]",
".",
"split",
"(",
"', '",
")",
"vmag",
"=",
"float",
"(",
"vmag",
".",
"split",
"(",
"' = '",
")",
"[",
"-",
"1",
"]",
")",
"rmag",
"=",
"float",
"(",
"rmag",
".",
"split",
"(",
"' = '",
")",
"[",
"-",
"1",
"]",
")",
"imag",
"=",
"float",
"(",
"imag",
".",
"split",
"(",
"' = '",
")",
"[",
"-",
"1",
"]",
")",
"jmag",
"=",
"float",
"(",
"jmag",
".",
"split",
"(",
"' = '",
")",
"[",
"-",
"1",
"]",
")",
"hmag",
"=",
"float",
"(",
"hmag",
".",
"split",
"(",
"' = '",
")",
"[",
"-",
"1",
"]",
")",
"kmag",
"=",
"float",
"(",
"kmag",
".",
"split",
"(",
"' = '",
")",
"[",
"-",
"1",
"]",
")",
"ndet",
"=",
"int",
"(",
"objectdata",
"[",
"3",
"]",
".",
"split",
"(",
"': '",
")",
"[",
"-",
"1",
"]",
")",
"hatstations",
"=",
"objectdata",
"[",
"4",
"]",
".",
"split",
"(",
"': '",
")",
"[",
"-",
"1",
"]",
"filterhead_ind",
"=",
"objectdata",
".",
"index",
"(",
"'Filters used:'",
")",
"columnhead_ind",
"=",
"objectdata",
".",
"index",
"(",
"'Columns:'",
")",
"filters",
"=",
"objectdata",
"[",
"filterhead_ind",
":",
"columnhead_ind",
"]",
"columndefs",
"=",
"objectdata",
"[",
"columnhead_ind",
"+",
"1",
":",
"]",
"columns",
"=",
"[",
"]",
"for",
"line",
"in",
"columndefs",
":",
"colnum",
",",
"colname",
",",
"coldesc",
"=",
"line",
".",
"split",
"(",
"' - '",
")",
"columns",
".",
"append",
"(",
"colname",
")",
"lcdict",
"=",
"{",
"}",
"# now write all the columns to the output dictionary",
"for",
"ind",
",",
"col",
"in",
"enumerate",
"(",
"columns",
")",
":",
"# this formats everything nicely using our existing column",
"# definitions",
"lcdict",
"[",
"col",
"]",
"=",
"np",
".",
"array",
"(",
"[",
"TEXTLC_OUTPUT_COLUMNS",
"[",
"col",
"]",
"[",
"3",
"]",
"(",
"x",
")",
"for",
"x",
"in",
"objectlc",
"[",
"ind",
"]",
"]",
")",
"# write the object metadata to the output dictionary",
"lcdict",
"[",
"'hatid'",
"]",
"=",
"hatid",
"lcdict",
"[",
"'twomassid'",
"]",
"=",
"twomassid",
".",
"replace",
"(",
"'2MASS J'",
",",
"''",
")",
"lcdict",
"[",
"'ra'",
"]",
"=",
"ra",
"lcdict",
"[",
"'dec'",
"]",
"=",
"dec",
"lcdict",
"[",
"'mags'",
"]",
"=",
"[",
"vmag",
",",
"rmag",
",",
"imag",
",",
"jmag",
",",
"hmag",
",",
"kmag",
"]",
"lcdict",
"[",
"'ndet'",
"]",
"=",
"ndet",
"lcdict",
"[",
"'hatstations'",
"]",
"=",
"hatstations",
".",
"split",
"(",
"', '",
")",
"lcdict",
"[",
"'filters'",
"]",
"=",
"filters",
"[",
"1",
":",
"]",
"lcdict",
"[",
"'cols'",
"]",
"=",
"columns",
"return",
"lcdict"
] | 31.580645 | 20.419355 |
def remove_term(self, t):
"""Only removes top-level terms. Child terms can be removed at the parent. """
try:
self.terms.remove(t)
except ValueError:
pass
if t.section and t.parent_term_lc == 'root':
t.section = self.add_section(t.section)
t.section.remove_term(t, remove_from_doc=False)
if t.parent:
try:
t.parent.remove_child(t)
except ValueError:
pass | [
"def",
"remove_term",
"(",
"self",
",",
"t",
")",
":",
"try",
":",
"self",
".",
"terms",
".",
"remove",
"(",
"t",
")",
"except",
"ValueError",
":",
"pass",
"if",
"t",
".",
"section",
"and",
"t",
".",
"parent_term_lc",
"==",
"'root'",
":",
"t",
".",
"section",
"=",
"self",
".",
"add_section",
"(",
"t",
".",
"section",
")",
"t",
".",
"section",
".",
"remove_term",
"(",
"t",
",",
"remove_from_doc",
"=",
"False",
")",
"if",
"t",
".",
"parent",
":",
"try",
":",
"t",
".",
"parent",
".",
"remove_child",
"(",
"t",
")",
"except",
"ValueError",
":",
"pass"
] | 26.944444 | 20.277778 |
def within_history(rev, windowdict):
"""Return whether the windowdict has history at the revision."""
if not windowdict:
return False
begin = windowdict._past[0][0] if windowdict._past else \
windowdict._future[-1][0]
end = windowdict._future[0][0] if windowdict._future else \
windowdict._past[-1][0]
return begin <= rev <= end | [
"def",
"within_history",
"(",
"rev",
",",
"windowdict",
")",
":",
"if",
"not",
"windowdict",
":",
"return",
"False",
"begin",
"=",
"windowdict",
".",
"_past",
"[",
"0",
"]",
"[",
"0",
"]",
"if",
"windowdict",
".",
"_past",
"else",
"windowdict",
".",
"_future",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
"end",
"=",
"windowdict",
".",
"_future",
"[",
"0",
"]",
"[",
"0",
"]",
"if",
"windowdict",
".",
"_future",
"else",
"windowdict",
".",
"_past",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
"return",
"begin",
"<=",
"rev",
"<=",
"end"
] | 41.111111 | 11.777778 |
def _normalize(self, string):
''' Returns a sanitized string. '''
string = super(VerbixDe, self)._normalize(string)
string = string.replace('sie; Sie', 'sie')
string = string.strip()
return string | [
"def",
"_normalize",
"(",
"self",
",",
"string",
")",
":",
"string",
"=",
"super",
"(",
"VerbixDe",
",",
"self",
")",
".",
"_normalize",
"(",
"string",
")",
"string",
"=",
"string",
".",
"replace",
"(",
"'sie; Sie'",
",",
"'sie'",
")",
"string",
"=",
"string",
".",
"strip",
"(",
")",
"return",
"string"
] | 28.714286 | 15.571429 |
def _normalize(self):
""" Make the quaternion unit length.
"""
# Get length
L = self.norm()
if not L:
raise ValueError('Quaternion cannot have 0-length.')
# Correct
self.w /= L
self.x /= L
self.y /= L
self.z /= L | [
"def",
"_normalize",
"(",
"self",
")",
":",
"# Get length",
"L",
"=",
"self",
".",
"norm",
"(",
")",
"if",
"not",
"L",
":",
"raise",
"ValueError",
"(",
"'Quaternion cannot have 0-length.'",
")",
"# Correct",
"self",
".",
"w",
"/=",
"L",
"self",
".",
"x",
"/=",
"L",
"self",
".",
"y",
"/=",
"L",
"self",
".",
"z",
"/=",
"L"
] | 24.416667 | 17.5 |
def fill_kwargs(self, input_layer, kwargs):
"""Applies name_suffix and defaults to kwargs and returns the result."""
return input_layer._replace_args_with_defaults(_args=self._assign_defaults,
**kwargs) | [
"def",
"fill_kwargs",
"(",
"self",
",",
"input_layer",
",",
"kwargs",
")",
":",
"return",
"input_layer",
".",
"_replace_args_with_defaults",
"(",
"_args",
"=",
"self",
".",
"_assign_defaults",
",",
"*",
"*",
"kwargs",
")"
] | 64.5 | 15.5 |
def on_timer(self, _signum, _unused_frame):
"""Invoked by the Poll timer signal.
:param int _signum: The signal that was invoked
:param frame _unused_frame: The frame that was interrupted
"""
if self.is_shutting_down:
LOGGER.debug('Polling timer fired while shutting down')
return
if not self.polled:
self.poll()
self.polled = True
self.set_timer(5) # Wait 5 seconds for results
else:
self.polled = False
self.poll_results_check()
self.set_timer(self.poll_interval) # Wait poll interval duration
# If stats logging is enabled, log the stats
if self.log_stats_enabled:
self.log_stats()
# Increment the unresponsive children
for proc_name in self.poll_data['processes']:
self.unresponsive[proc_name] += 1
# Remove counters for processes that came back to life
for proc_name in list(self.unresponsive.keys()):
if proc_name not in self.poll_data['processes']:
del self.unresponsive[proc_name] | [
"def",
"on_timer",
"(",
"self",
",",
"_signum",
",",
"_unused_frame",
")",
":",
"if",
"self",
".",
"is_shutting_down",
":",
"LOGGER",
".",
"debug",
"(",
"'Polling timer fired while shutting down'",
")",
"return",
"if",
"not",
"self",
".",
"polled",
":",
"self",
".",
"poll",
"(",
")",
"self",
".",
"polled",
"=",
"True",
"self",
".",
"set_timer",
"(",
"5",
")",
"# Wait 5 seconds for results",
"else",
":",
"self",
".",
"polled",
"=",
"False",
"self",
".",
"poll_results_check",
"(",
")",
"self",
".",
"set_timer",
"(",
"self",
".",
"poll_interval",
")",
"# Wait poll interval duration",
"# If stats logging is enabled, log the stats",
"if",
"self",
".",
"log_stats_enabled",
":",
"self",
".",
"log_stats",
"(",
")",
"# Increment the unresponsive children",
"for",
"proc_name",
"in",
"self",
".",
"poll_data",
"[",
"'processes'",
"]",
":",
"self",
".",
"unresponsive",
"[",
"proc_name",
"]",
"+=",
"1",
"# Remove counters for processes that came back to life",
"for",
"proc_name",
"in",
"list",
"(",
"self",
".",
"unresponsive",
".",
"keys",
"(",
")",
")",
":",
"if",
"proc_name",
"not",
"in",
"self",
".",
"poll_data",
"[",
"'processes'",
"]",
":",
"del",
"self",
".",
"unresponsive",
"[",
"proc_name",
"]"
] | 37.322581 | 18.645161 |
def get_policy_config(platform,
filters=None,
prepend=True,
pillar_key='acl',
pillarenv=None,
saltenv=None,
merge_pillar=True,
only_lower_merge=False,
revision_id=None,
revision_no=None,
revision_date=True,
revision_date_format='%Y/%m/%d'):
'''
Return the configuration of the whole policy.
platform
The name of the Capirca platform.
filters
List of filters for this policy.
If not specified or empty, will try to load the configuration from the pillar,
unless ``merge_pillar`` is set as ``False``.
prepend: ``True``
When ``merge_pillar`` is set as ``True``, the final list of filters generated by merging
the filters from ``filters`` with those defined in the pillar (if any): new filters are prepended
at the beginning, while existing ones will preserve the position. To add the new filters
at the end of the list, set this argument to ``False``.
pillar_key: ``acl``
The key in the pillar containing the default attributes values. Default: ``acl``.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
merge_pillar: ``True``
Merge the CLI variables with the pillar. Default: ``True``.
only_lower_merge: ``False``
Specify if it should merge only the filters and terms fields. Otherwise it will try
to merge everything at the policy level. Default: ``False``.
revision_id
Add a comment in the policy config having the description for the changes applied.
revision_no
The revision count.
revision_date: ``True``
Boolean flag: display the date when the policy configuration was generated. Default: ``True``.
revision_date_format: ``%Y/%m/%d``
The date format to be used when generating the perforce data. Default: ``%Y/%m/%d`` (<year>/<month>/<day>).
CLI Example:
.. code-block:: bash
salt '*' capirca.get_policy_config juniper pillar_key=netacl
Output Example:
.. code-block:: text
firewall {
family inet {
replace:
/*
** $Id:$
** $Date:$
** $Revision:$
**
*/
filter my-filter {
term my-term {
from {
source-port [ 1234 1235 ];
}
then {
reject;
}
}
term my-other-term {
from {
protocol tcp;
source-port 5678-5680;
}
then accept;
}
}
}
}
firewall {
family inet {
replace:
/*
** $Id:$
** $Date:$
** $Revision:$
**
*/
filter my-other-filter {
interface-specific;
term dummy-term {
from {
protocol [ tcp udp ];
}
then {
reject;
}
}
}
}
}
The policy configuration has been loaded from the pillar, having the following structure:
.. code-block:: yaml
netacl:
- my-filter:
options:
- not-interface-specific
terms:
- my-term:
source_port: [1234, 1235]
action: reject
- my-other-term:
source_port:
- [5678, 5680]
protocol: tcp
action: accept
- my-other-filter:
terms:
- dummy-term:
protocol:
- tcp
- udp
action: reject
'''
if not filters:
filters = []
if merge_pillar and not only_lower_merge:
# the pillar key for the policy config is the `pillar_key` itself
policy_pillar_cfg = _get_pillar_cfg(pillar_key,
saltenv=saltenv,
pillarenv=pillarenv)
# now, let's merge everything witht the pillar data
# again, this will not remove any extra filters/terms
# but it will merge with the pillar data
# if this behaviour is not wanted, the user can set `merge_pillar` as `False`
filters = _merge_list_of_dict(filters, policy_pillar_cfg, prepend=prepend)
policy_object = _get_policy_object(platform,
filters=filters,
pillar_key=pillar_key,
pillarenv=pillarenv,
saltenv=saltenv,
merge_pillar=merge_pillar)
policy_text = six.text_type(policy_object)
return _revision_tag(policy_text,
revision_id=revision_id,
revision_no=revision_no,
revision_date=revision_date,
revision_date_format=revision_date_format) | [
"def",
"get_policy_config",
"(",
"platform",
",",
"filters",
"=",
"None",
",",
"prepend",
"=",
"True",
",",
"pillar_key",
"=",
"'acl'",
",",
"pillarenv",
"=",
"None",
",",
"saltenv",
"=",
"None",
",",
"merge_pillar",
"=",
"True",
",",
"only_lower_merge",
"=",
"False",
",",
"revision_id",
"=",
"None",
",",
"revision_no",
"=",
"None",
",",
"revision_date",
"=",
"True",
",",
"revision_date_format",
"=",
"'%Y/%m/%d'",
")",
":",
"if",
"not",
"filters",
":",
"filters",
"=",
"[",
"]",
"if",
"merge_pillar",
"and",
"not",
"only_lower_merge",
":",
"# the pillar key for the policy config is the `pillar_key` itself",
"policy_pillar_cfg",
"=",
"_get_pillar_cfg",
"(",
"pillar_key",
",",
"saltenv",
"=",
"saltenv",
",",
"pillarenv",
"=",
"pillarenv",
")",
"# now, let's merge everything witht the pillar data",
"# again, this will not remove any extra filters/terms",
"# but it will merge with the pillar data",
"# if this behaviour is not wanted, the user can set `merge_pillar` as `False`",
"filters",
"=",
"_merge_list_of_dict",
"(",
"filters",
",",
"policy_pillar_cfg",
",",
"prepend",
"=",
"prepend",
")",
"policy_object",
"=",
"_get_policy_object",
"(",
"platform",
",",
"filters",
"=",
"filters",
",",
"pillar_key",
"=",
"pillar_key",
",",
"pillarenv",
"=",
"pillarenv",
",",
"saltenv",
"=",
"saltenv",
",",
"merge_pillar",
"=",
"merge_pillar",
")",
"policy_text",
"=",
"six",
".",
"text_type",
"(",
"policy_object",
")",
"return",
"_revision_tag",
"(",
"policy_text",
",",
"revision_id",
"=",
"revision_id",
",",
"revision_no",
"=",
"revision_no",
",",
"revision_date",
"=",
"revision_date",
",",
"revision_date_format",
"=",
"revision_date_format",
")"
] | 34.059172 | 20.508876 |
def add(self, iterable):
""" Insert an iterable (pattern) item into the markov chain.
The order of the pattern will define more of the chain.
"""
item1 = item2 = MarkovChain.START
for item3 in iterable:
self[(item1, item2)].add_side(item3)
item1 = item2
item2 = item3
self[(item1, item2)].add_side(MarkovChain.END) | [
"def",
"add",
"(",
"self",
",",
"iterable",
")",
":",
"item1",
"=",
"item2",
"=",
"MarkovChain",
".",
"START",
"for",
"item3",
"in",
"iterable",
":",
"self",
"[",
"(",
"item1",
",",
"item2",
")",
"]",
".",
"add_side",
"(",
"item3",
")",
"item1",
"=",
"item2",
"item2",
"=",
"item3",
"self",
"[",
"(",
"item1",
",",
"item2",
")",
"]",
".",
"add_side",
"(",
"MarkovChain",
".",
"END",
")"
] | 39.3 | 10.6 |
def to_numpy_matrix(self, variable_order=None):
"""Convert a binary quadratic model to NumPy 2D array.
Args:
variable_order (list, optional):
If provided, indexes the rows/columns of the NumPy array. If `variable_order` includes
any variables not in the binary quadratic model, these are added to the NumPy array.
Returns:
:class:`numpy.ndarray`: The binary quadratic model as a NumPy 2D array. Note that the
binary quadratic model is converted to :class:`~.Vartype.BINARY` vartype.
Notes:
The matrix representation of a binary quadratic model only makes sense for binary models.
For a binary sample x, the energy of the model is given by:
.. math::
E(x) = x^T Q x
The offset is dropped when converting to a NumPy array.
Examples:
This example converts a binary quadratic model to NumPy array format while
ordering variables and adding one ('d').
>>> import dimod
>>> import numpy as np
...
>>> model = dimod.BinaryQuadraticModel({'a': 1, 'b': -1, 'c': .5},
... {('a', 'b'): .5, ('b', 'c'): 1.5},
... 1.4,
... dimod.BINARY)
>>> model.to_numpy_matrix(variable_order=['d', 'c', 'b', 'a'])
array([[ 0. , 0. , 0. , 0. ],
[ 0. , 0.5, 1.5, 0. ],
[ 0. , 0. , -1. , 0.5],
[ 0. , 0. , 0. , 1. ]])
"""
import numpy as np
if variable_order is None:
# just use the existing variable labels, assuming that they are [0, N)
num_variables = len(self)
mat = np.zeros((num_variables, num_variables), dtype=float)
try:
for v, bias in iteritems(self.binary.linear):
mat[v, v] = bias
except IndexError:
raise ValueError(("if 'variable_order' is not provided, binary quadratic model must be "
"index labeled [0, ..., N-1]"))
for (u, v), bias in iteritems(self.binary.quadratic):
if u < v:
mat[u, v] = bias
else:
mat[v, u] = bias
else:
num_variables = len(variable_order)
idx = {v: i for i, v in enumerate(variable_order)}
mat = np.zeros((num_variables, num_variables), dtype=float)
try:
for v, bias in iteritems(self.binary.linear):
mat[idx[v], idx[v]] = bias
except KeyError as e:
raise ValueError(("variable {} is missing from variable_order".format(e)))
for (u, v), bias in iteritems(self.binary.quadratic):
iu, iv = idx[u], idx[v]
if iu < iv:
mat[iu, iv] = bias
else:
mat[iv, iu] = bias
return mat | [
"def",
"to_numpy_matrix",
"(",
"self",
",",
"variable_order",
"=",
"None",
")",
":",
"import",
"numpy",
"as",
"np",
"if",
"variable_order",
"is",
"None",
":",
"# just use the existing variable labels, assuming that they are [0, N)",
"num_variables",
"=",
"len",
"(",
"self",
")",
"mat",
"=",
"np",
".",
"zeros",
"(",
"(",
"num_variables",
",",
"num_variables",
")",
",",
"dtype",
"=",
"float",
")",
"try",
":",
"for",
"v",
",",
"bias",
"in",
"iteritems",
"(",
"self",
".",
"binary",
".",
"linear",
")",
":",
"mat",
"[",
"v",
",",
"v",
"]",
"=",
"bias",
"except",
"IndexError",
":",
"raise",
"ValueError",
"(",
"(",
"\"if 'variable_order' is not provided, binary quadratic model must be \"",
"\"index labeled [0, ..., N-1]\"",
")",
")",
"for",
"(",
"u",
",",
"v",
")",
",",
"bias",
"in",
"iteritems",
"(",
"self",
".",
"binary",
".",
"quadratic",
")",
":",
"if",
"u",
"<",
"v",
":",
"mat",
"[",
"u",
",",
"v",
"]",
"=",
"bias",
"else",
":",
"mat",
"[",
"v",
",",
"u",
"]",
"=",
"bias",
"else",
":",
"num_variables",
"=",
"len",
"(",
"variable_order",
")",
"idx",
"=",
"{",
"v",
":",
"i",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"variable_order",
")",
"}",
"mat",
"=",
"np",
".",
"zeros",
"(",
"(",
"num_variables",
",",
"num_variables",
")",
",",
"dtype",
"=",
"float",
")",
"try",
":",
"for",
"v",
",",
"bias",
"in",
"iteritems",
"(",
"self",
".",
"binary",
".",
"linear",
")",
":",
"mat",
"[",
"idx",
"[",
"v",
"]",
",",
"idx",
"[",
"v",
"]",
"]",
"=",
"bias",
"except",
"KeyError",
"as",
"e",
":",
"raise",
"ValueError",
"(",
"(",
"\"variable {} is missing from variable_order\"",
".",
"format",
"(",
"e",
")",
")",
")",
"for",
"(",
"u",
",",
"v",
")",
",",
"bias",
"in",
"iteritems",
"(",
"self",
".",
"binary",
".",
"quadratic",
")",
":",
"iu",
",",
"iv",
"=",
"idx",
"[",
"u",
"]",
",",
"idx",
"[",
"v",
"]",
"if",
"iu",
"<",
"iv",
":",
"mat",
"[",
"iu",
",",
"iv",
"]",
"=",
"bias",
"else",
":",
"mat",
"[",
"iv",
",",
"iu",
"]",
"=",
"bias",
"return",
"mat"
] | 38.45 | 25.3375 |
def resolve_loader(self, meta: ProgramDescription):
"""
Resolve program loader
"""
if not meta.loader:
meta.loader = 'single' if meta.path else 'separate'
for loader_cls in self._loaders:
if loader_cls.name == meta.loader:
meta.loader_cls = loader_cls
break
else:
raise ImproperlyConfigured(
(
"Program {} has no loader class registered."
"Check PROGRAM_LOADERS or PROGRAM_DIRS"
).format(meta.path)
) | [
"def",
"resolve_loader",
"(",
"self",
",",
"meta",
":",
"ProgramDescription",
")",
":",
"if",
"not",
"meta",
".",
"loader",
":",
"meta",
".",
"loader",
"=",
"'single'",
"if",
"meta",
".",
"path",
"else",
"'separate'",
"for",
"loader_cls",
"in",
"self",
".",
"_loaders",
":",
"if",
"loader_cls",
".",
"name",
"==",
"meta",
".",
"loader",
":",
"meta",
".",
"loader_cls",
"=",
"loader_cls",
"break",
"else",
":",
"raise",
"ImproperlyConfigured",
"(",
"(",
"\"Program {} has no loader class registered.\"",
"\"Check PROGRAM_LOADERS or PROGRAM_DIRS\"",
")",
".",
"format",
"(",
"meta",
".",
"path",
")",
")"
] | 32.444444 | 14 |
def member_command(self, member_id, command):
"""apply command (start/stop/restart) to member instance of replica set
Args:
member_id - member index
command - string command (start/stop/restart)
return True if operation success otherwise False
"""
server_id = self._servers.host_to_server_id(
self.member_id_to_host(member_id))
return self._servers.command(server_id, command) | [
"def",
"member_command",
"(",
"self",
",",
"member_id",
",",
"command",
")",
":",
"server_id",
"=",
"self",
".",
"_servers",
".",
"host_to_server_id",
"(",
"self",
".",
"member_id_to_host",
"(",
"member_id",
")",
")",
"return",
"self",
".",
"_servers",
".",
"command",
"(",
"server_id",
",",
"command",
")"
] | 41 | 13 |
def __merge_json_values(current, previous):
"""Merges the values between the current and previous run of the script."""
for value in current:
name = value['name']
# Find the previous value
previous_value = __find_and_remove_value(previous, value)
if previous_value is not None:
flags = value['flags']
previous_flags = previous_value['flags']
if flags != previous_flags:
logging.warning(
'Flags for %s are different. Using previous value.', name)
value['flags'] = previous_flags
else:
logging.warning('Value %s is a new value', name)
for value in previous:
name = value['name']
logging.warning(
'Value %s not present in current run. Appending value.', name)
current.append(value) | [
"def",
"__merge_json_values",
"(",
"current",
",",
"previous",
")",
":",
"for",
"value",
"in",
"current",
":",
"name",
"=",
"value",
"[",
"'name'",
"]",
"# Find the previous value",
"previous_value",
"=",
"__find_and_remove_value",
"(",
"previous",
",",
"value",
")",
"if",
"previous_value",
"is",
"not",
"None",
":",
"flags",
"=",
"value",
"[",
"'flags'",
"]",
"previous_flags",
"=",
"previous_value",
"[",
"'flags'",
"]",
"if",
"flags",
"!=",
"previous_flags",
":",
"logging",
".",
"warning",
"(",
"'Flags for %s are different. Using previous value.'",
",",
"name",
")",
"value",
"[",
"'flags'",
"]",
"=",
"previous_flags",
"else",
":",
"logging",
".",
"warning",
"(",
"'Value %s is a new value'",
",",
"name",
")",
"for",
"value",
"in",
"previous",
":",
"name",
"=",
"value",
"[",
"'name'",
"]",
"logging",
".",
"warning",
"(",
"'Value %s not present in current run. Appending value.'",
",",
"name",
")",
"current",
".",
"append",
"(",
"value",
")"
] | 32.576923 | 19.615385 |
def sample_discrete_from_log(p_log,return_lognorms=False,axis=0,dtype=np.int32):
'samples log probability array along specified axis'
lognorms = logsumexp(p_log,axis=axis)
cumvals = np.exp(p_log - np.expand_dims(lognorms,axis)).cumsum(axis)
thesize = np.array(p_log.shape)
thesize[axis] = 1
randvals = random(size=thesize) * \
np.reshape(cumvals[[slice(None) if i is not axis else -1
for i in range(p_log.ndim)]],thesize)
samples = np.sum(randvals > cumvals,axis=axis,dtype=dtype)
if return_lognorms:
return samples, lognorms
else:
return samples | [
"def",
"sample_discrete_from_log",
"(",
"p_log",
",",
"return_lognorms",
"=",
"False",
",",
"axis",
"=",
"0",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
":",
"lognorms",
"=",
"logsumexp",
"(",
"p_log",
",",
"axis",
"=",
"axis",
")",
"cumvals",
"=",
"np",
".",
"exp",
"(",
"p_log",
"-",
"np",
".",
"expand_dims",
"(",
"lognorms",
",",
"axis",
")",
")",
".",
"cumsum",
"(",
"axis",
")",
"thesize",
"=",
"np",
".",
"array",
"(",
"p_log",
".",
"shape",
")",
"thesize",
"[",
"axis",
"]",
"=",
"1",
"randvals",
"=",
"random",
"(",
"size",
"=",
"thesize",
")",
"*",
"np",
".",
"reshape",
"(",
"cumvals",
"[",
"[",
"slice",
"(",
"None",
")",
"if",
"i",
"is",
"not",
"axis",
"else",
"-",
"1",
"for",
"i",
"in",
"range",
"(",
"p_log",
".",
"ndim",
")",
"]",
"]",
",",
"thesize",
")",
"samples",
"=",
"np",
".",
"sum",
"(",
"randvals",
">",
"cumvals",
",",
"axis",
"=",
"axis",
",",
"dtype",
"=",
"dtype",
")",
"if",
"return_lognorms",
":",
"return",
"samples",
",",
"lognorms",
"else",
":",
"return",
"samples"
] | 43.785714 | 17.928571 |
def append(self, *other):
"""
Append self with other stream(s). Chaining this way has the behaviour:
``self = Stream(self, *others)``
"""
self._data = it.chain(self._data, Stream(*other)._data)
return self | [
"def",
"append",
"(",
"self",
",",
"*",
"other",
")",
":",
"self",
".",
"_data",
"=",
"it",
".",
"chain",
"(",
"self",
".",
"_data",
",",
"Stream",
"(",
"*",
"other",
")",
".",
"_data",
")",
"return",
"self"
] | 25 | 19.444444 |
def create_role_config_groups(resource_root, service_name, apigroup_list,
cluster_name="default"):
"""
Create role config groups.
@param resource_root: The root Resource object.
@param service_name: Service name.
@param apigroup_list: List of role config groups to create.
@param cluster_name: Cluster name.
@return: New ApiRoleConfigGroup object.
@since: API v3
"""
return call(resource_root.post,
_get_role_config_groups_path(cluster_name, service_name),
ApiRoleConfigGroup, True, data=apigroup_list, api_version=3) | [
"def",
"create_role_config_groups",
"(",
"resource_root",
",",
"service_name",
",",
"apigroup_list",
",",
"cluster_name",
"=",
"\"default\"",
")",
":",
"return",
"call",
"(",
"resource_root",
".",
"post",
",",
"_get_role_config_groups_path",
"(",
"cluster_name",
",",
"service_name",
")",
",",
"ApiRoleConfigGroup",
",",
"True",
",",
"data",
"=",
"apigroup_list",
",",
"api_version",
"=",
"3",
")"
] | 38.571429 | 12.571429 |
def verify(self, obj):
"""Verify that the object conforms to this verifier's schema
Args:
obj (object): A python object to verify
Raises:
ValidationError: If there is a problem verifying the dictionary, a
ValidationError is thrown with at least the reason key set indicating
the reason for the lack of validation.
"""
out_obj = {}
if not isinstance(obj, dict):
raise ValidationError("Invalid dictionary", reason="object is not a dictionary")
if self._fixed_length is not None and len(obj) != self._fixed_length:
raise ValidationError("Dictionary did not have the correct length", expected_length=self._fixed_length,
actual_length=self._fixed_length)
unmatched_keys = set(obj.keys())
required_keys = set(self._required_keys.keys())
# First check and make sure that all required keys are included and verify them
for key in required_keys:
if key not in unmatched_keys:
raise ValidationError("Required key not found in dictionary",
reason="required key %s not found" % key, key=key)
out_obj[key] = self._required_keys[key].verify(obj[key])
unmatched_keys.remove(key)
# Now check and see if any of the keys in the dictionary are optional and check them
to_remove = set()
for key in unmatched_keys:
if key not in self._optional_keys:
continue
out_obj[key] = self._optional_keys[key].verify(obj[key])
to_remove.add(key)
unmatched_keys -= to_remove
# If there are additional keys, they need to match at least one of the additional key rules
if len(unmatched_keys) > 0:
if len(self._additional_key_rules) == 0:
raise ValidationError("Extra key found in dictionary that does not allow extra keys",
reason="extra keys found that were not expected", keys=unmatched_keys)
to_remove = set()
for key in unmatched_keys:
for key_match, rule in self._additional_key_rules:
if key_match is None or key_match.matches(key):
out_obj[key] = rule.verify(obj[key])
to_remove.add(key)
break
unmatched_keys -= to_remove
if len(unmatched_keys) > 0:
raise ValidationError("Extra key found in dictionary that did not match any extra key rule",
reason="extra keys found that did not match any rule", keys=unmatched_keys)
return out_obj | [
"def",
"verify",
"(",
"self",
",",
"obj",
")",
":",
"out_obj",
"=",
"{",
"}",
"if",
"not",
"isinstance",
"(",
"obj",
",",
"dict",
")",
":",
"raise",
"ValidationError",
"(",
"\"Invalid dictionary\"",
",",
"reason",
"=",
"\"object is not a dictionary\"",
")",
"if",
"self",
".",
"_fixed_length",
"is",
"not",
"None",
"and",
"len",
"(",
"obj",
")",
"!=",
"self",
".",
"_fixed_length",
":",
"raise",
"ValidationError",
"(",
"\"Dictionary did not have the correct length\"",
",",
"expected_length",
"=",
"self",
".",
"_fixed_length",
",",
"actual_length",
"=",
"self",
".",
"_fixed_length",
")",
"unmatched_keys",
"=",
"set",
"(",
"obj",
".",
"keys",
"(",
")",
")",
"required_keys",
"=",
"set",
"(",
"self",
".",
"_required_keys",
".",
"keys",
"(",
")",
")",
"# First check and make sure that all required keys are included and verify them",
"for",
"key",
"in",
"required_keys",
":",
"if",
"key",
"not",
"in",
"unmatched_keys",
":",
"raise",
"ValidationError",
"(",
"\"Required key not found in dictionary\"",
",",
"reason",
"=",
"\"required key %s not found\"",
"%",
"key",
",",
"key",
"=",
"key",
")",
"out_obj",
"[",
"key",
"]",
"=",
"self",
".",
"_required_keys",
"[",
"key",
"]",
".",
"verify",
"(",
"obj",
"[",
"key",
"]",
")",
"unmatched_keys",
".",
"remove",
"(",
"key",
")",
"# Now check and see if any of the keys in the dictionary are optional and check them",
"to_remove",
"=",
"set",
"(",
")",
"for",
"key",
"in",
"unmatched_keys",
":",
"if",
"key",
"not",
"in",
"self",
".",
"_optional_keys",
":",
"continue",
"out_obj",
"[",
"key",
"]",
"=",
"self",
".",
"_optional_keys",
"[",
"key",
"]",
".",
"verify",
"(",
"obj",
"[",
"key",
"]",
")",
"to_remove",
".",
"add",
"(",
"key",
")",
"unmatched_keys",
"-=",
"to_remove",
"# If there are additional keys, they need to match at least one of the additional key rules",
"if",
"len",
"(",
"unmatched_keys",
")",
">",
"0",
":",
"if",
"len",
"(",
"self",
".",
"_additional_key_rules",
")",
"==",
"0",
":",
"raise",
"ValidationError",
"(",
"\"Extra key found in dictionary that does not allow extra keys\"",
",",
"reason",
"=",
"\"extra keys found that were not expected\"",
",",
"keys",
"=",
"unmatched_keys",
")",
"to_remove",
"=",
"set",
"(",
")",
"for",
"key",
"in",
"unmatched_keys",
":",
"for",
"key_match",
",",
"rule",
"in",
"self",
".",
"_additional_key_rules",
":",
"if",
"key_match",
"is",
"None",
"or",
"key_match",
".",
"matches",
"(",
"key",
")",
":",
"out_obj",
"[",
"key",
"]",
"=",
"rule",
".",
"verify",
"(",
"obj",
"[",
"key",
"]",
")",
"to_remove",
".",
"add",
"(",
"key",
")",
"break",
"unmatched_keys",
"-=",
"to_remove",
"if",
"len",
"(",
"unmatched_keys",
")",
">",
"0",
":",
"raise",
"ValidationError",
"(",
"\"Extra key found in dictionary that did not match any extra key rule\"",
",",
"reason",
"=",
"\"extra keys found that did not match any rule\"",
",",
"keys",
"=",
"unmatched_keys",
")",
"return",
"out_obj"
] | 42.046154 | 28 |
def room_reserve(self):
"""
This method create a new record for hotel.reservation
-----------------------------------------------------
@param self: The object pointer
@return: new record set for hotel reservation.
"""
hotel_res_obj = self.env['hotel.reservation']
for res in self:
rec = (hotel_res_obj.create
({'partner_id': res.partner_id.id,
'partner_invoice_id': res.partner_invoice_id.id,
'partner_order_id': res.partner_order_id.id,
'partner_shipping_id': res.partner_shipping_id.id,
'checkin': res.check_in,
'checkout': res.check_out,
'warehouse_id': res.warehouse_id.id,
'pricelist_id': res.pricelist_id.id,
'adults': res.adults,
'reservation_line': [(0, 0,
{'reserve': [(6, 0,
[res.room_id.id])],
'name': (res.room_id and
res.room_id.name or '')
})]
}))
return rec | [
"def",
"room_reserve",
"(",
"self",
")",
":",
"hotel_res_obj",
"=",
"self",
".",
"env",
"[",
"'hotel.reservation'",
"]",
"for",
"res",
"in",
"self",
":",
"rec",
"=",
"(",
"hotel_res_obj",
".",
"create",
"(",
"{",
"'partner_id'",
":",
"res",
".",
"partner_id",
".",
"id",
",",
"'partner_invoice_id'",
":",
"res",
".",
"partner_invoice_id",
".",
"id",
",",
"'partner_order_id'",
":",
"res",
".",
"partner_order_id",
".",
"id",
",",
"'partner_shipping_id'",
":",
"res",
".",
"partner_shipping_id",
".",
"id",
",",
"'checkin'",
":",
"res",
".",
"check_in",
",",
"'checkout'",
":",
"res",
".",
"check_out",
",",
"'warehouse_id'",
":",
"res",
".",
"warehouse_id",
".",
"id",
",",
"'pricelist_id'",
":",
"res",
".",
"pricelist_id",
".",
"id",
",",
"'adults'",
":",
"res",
".",
"adults",
",",
"'reservation_line'",
":",
"[",
"(",
"0",
",",
"0",
",",
"{",
"'reserve'",
":",
"[",
"(",
"6",
",",
"0",
",",
"[",
"res",
".",
"room_id",
".",
"id",
"]",
")",
"]",
",",
"'name'",
":",
"(",
"res",
".",
"room_id",
"and",
"res",
".",
"room_id",
".",
"name",
"or",
"''",
")",
"}",
")",
"]",
"}",
")",
")",
"return",
"rec"
] | 48.185185 | 15.740741 |
def install(force=False, lazy=False):
"""
Download the ANTLR v4 tool jar. (Raises :exception:`OSError` if jar
is already available, unless ``lazy`` is ``True``.)
:param bool force: Force download even if local jar already exists.
:param bool lazy: Don't report an error if local jar already exists and
don't try to download it either.
"""
if exists(antlr_jar_path):
if lazy:
return
if not force:
raise OSError(errno.EEXIST, 'file already exists', antlr_jar_path)
tool_url = config['tool_url']
ssl_context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH)
with contextlib.closing(urlopen(tool_url, context=ssl_context)) as response:
tool_jar = response.read()
if not isdir(dirname(antlr_jar_path)):
makedirs(dirname(antlr_jar_path))
with open(antlr_jar_path, mode='wb') as tool_file:
tool_file.write(tool_jar) | [
"def",
"install",
"(",
"force",
"=",
"False",
",",
"lazy",
"=",
"False",
")",
":",
"if",
"exists",
"(",
"antlr_jar_path",
")",
":",
"if",
"lazy",
":",
"return",
"if",
"not",
"force",
":",
"raise",
"OSError",
"(",
"errno",
".",
"EEXIST",
",",
"'file already exists'",
",",
"antlr_jar_path",
")",
"tool_url",
"=",
"config",
"[",
"'tool_url'",
"]",
"ssl_context",
"=",
"ssl",
".",
"create_default_context",
"(",
"purpose",
"=",
"ssl",
".",
"Purpose",
".",
"CLIENT_AUTH",
")",
"with",
"contextlib",
".",
"closing",
"(",
"urlopen",
"(",
"tool_url",
",",
"context",
"=",
"ssl_context",
")",
")",
"as",
"response",
":",
"tool_jar",
"=",
"response",
".",
"read",
"(",
")",
"if",
"not",
"isdir",
"(",
"dirname",
"(",
"antlr_jar_path",
")",
")",
":",
"makedirs",
"(",
"dirname",
"(",
"antlr_jar_path",
")",
")",
"with",
"open",
"(",
"antlr_jar_path",
",",
"mode",
"=",
"'wb'",
")",
"as",
"tool_file",
":",
"tool_file",
".",
"write",
"(",
"tool_jar",
")"
] | 35.384615 | 20.846154 |
def _field_sort_name(cls, name):
"""Get a sort key for a field name that determines the order
fields should be written in.
Fields names are kept unchanged, unless they are instances of
:class:`DateItemField`, in which case `year`, `month`, and `day`
are replaced by `date0`, `date1`, and `date2`, respectively, to
make them appear in that order.
"""
if isinstance(cls.__dict__[name], DateItemField):
name = re.sub('year', 'date0', name)
name = re.sub('month', 'date1', name)
name = re.sub('day', 'date2', name)
return name | [
"def",
"_field_sort_name",
"(",
"cls",
",",
"name",
")",
":",
"if",
"isinstance",
"(",
"cls",
".",
"__dict__",
"[",
"name",
"]",
",",
"DateItemField",
")",
":",
"name",
"=",
"re",
".",
"sub",
"(",
"'year'",
",",
"'date0'",
",",
"name",
")",
"name",
"=",
"re",
".",
"sub",
"(",
"'month'",
",",
"'date1'",
",",
"name",
")",
"name",
"=",
"re",
".",
"sub",
"(",
"'day'",
",",
"'date2'",
",",
"name",
")",
"return",
"name"
] | 44.357143 | 15 |
def _build_put_headers(self, robj, if_none_match=False):
"""Build the headers for a POST/PUT request."""
# Construct the headers...
if robj.charset is not None:
content_type = ('%s; charset="%s"' %
(robj.content_type, robj.charset))
else:
content_type = robj.content_type
headers = MultiDict({'Content-Type': content_type,
'X-Riak-ClientId': self._client_id})
# Add the vclock if it exists...
if robj.vclock is not None:
headers['X-Riak-Vclock'] = robj.vclock.encode('base64')
# Create the header from metadata
self._add_links_for_riak_object(robj, headers)
for key in robj.usermeta.keys():
headers['X-Riak-Meta-%s' % key] = robj.usermeta[key]
for field, value in robj.indexes:
key = 'X-Riak-Index-%s' % field
if key in headers:
headers[key] += ", " + str(value)
else:
headers[key] = str(value)
if if_none_match:
headers['If-None-Match'] = '*'
return headers | [
"def",
"_build_put_headers",
"(",
"self",
",",
"robj",
",",
"if_none_match",
"=",
"False",
")",
":",
"# Construct the headers...",
"if",
"robj",
".",
"charset",
"is",
"not",
"None",
":",
"content_type",
"=",
"(",
"'%s; charset=\"%s\"'",
"%",
"(",
"robj",
".",
"content_type",
",",
"robj",
".",
"charset",
")",
")",
"else",
":",
"content_type",
"=",
"robj",
".",
"content_type",
"headers",
"=",
"MultiDict",
"(",
"{",
"'Content-Type'",
":",
"content_type",
",",
"'X-Riak-ClientId'",
":",
"self",
".",
"_client_id",
"}",
")",
"# Add the vclock if it exists...",
"if",
"robj",
".",
"vclock",
"is",
"not",
"None",
":",
"headers",
"[",
"'X-Riak-Vclock'",
"]",
"=",
"robj",
".",
"vclock",
".",
"encode",
"(",
"'base64'",
")",
"# Create the header from metadata",
"self",
".",
"_add_links_for_riak_object",
"(",
"robj",
",",
"headers",
")",
"for",
"key",
"in",
"robj",
".",
"usermeta",
".",
"keys",
"(",
")",
":",
"headers",
"[",
"'X-Riak-Meta-%s'",
"%",
"key",
"]",
"=",
"robj",
".",
"usermeta",
"[",
"key",
"]",
"for",
"field",
",",
"value",
"in",
"robj",
".",
"indexes",
":",
"key",
"=",
"'X-Riak-Index-%s'",
"%",
"field",
"if",
"key",
"in",
"headers",
":",
"headers",
"[",
"key",
"]",
"+=",
"\", \"",
"+",
"str",
"(",
"value",
")",
"else",
":",
"headers",
"[",
"key",
"]",
"=",
"str",
"(",
"value",
")",
"if",
"if_none_match",
":",
"headers",
"[",
"'If-None-Match'",
"]",
"=",
"'*'",
"return",
"headers"
] | 33 | 17.735294 |
def background_color(self):
"""Background color."""
if self._has_real():
return self._data.real_background_color
return self._data.background_color | [
"def",
"background_color",
"(",
"self",
")",
":",
"if",
"self",
".",
"_has_real",
"(",
")",
":",
"return",
"self",
".",
"_data",
".",
"real_background_color",
"return",
"self",
".",
"_data",
".",
"background_color"
] | 35.8 | 7.6 |
def _get_addresses(self, address_data, retain_name=False):
"""
Takes RFC-compliant email addresses in both terse (email only)
and verbose (name + email) forms and returns a list of
email address strings
(TODO: breaking change that returns a tuple of (name, email) per string)
"""
if retain_name:
raise NotImplementedError(
"Not yet implemented, but will need client-code changes too"
)
# We trust than an email address contains an "@" after
# email.utils.getaddresses has done the hard work. If we wanted
# to we could use a regex to check for greater email validity
# NB: getaddresses expects a list, so ensure we feed it appropriately
if isinstance(address_data, str):
if "[" not in address_data:
# Definitely turn these into a list
# NB: this is pretty assumptive, but still prob OK
address_data = [address_data]
output = [x[1] for x in getaddresses(address_data) if "@" in x[1]]
return output | [
"def",
"_get_addresses",
"(",
"self",
",",
"address_data",
",",
"retain_name",
"=",
"False",
")",
":",
"if",
"retain_name",
":",
"raise",
"NotImplementedError",
"(",
"\"Not yet implemented, but will need client-code changes too\"",
")",
"# We trust than an email address contains an \"@\" after",
"# email.utils.getaddresses has done the hard work. If we wanted",
"# to we could use a regex to check for greater email validity",
"# NB: getaddresses expects a list, so ensure we feed it appropriately",
"if",
"isinstance",
"(",
"address_data",
",",
"str",
")",
":",
"if",
"\"[\"",
"not",
"in",
"address_data",
":",
"# Definitely turn these into a list",
"# NB: this is pretty assumptive, but still prob OK",
"address_data",
"=",
"[",
"address_data",
"]",
"output",
"=",
"[",
"x",
"[",
"1",
"]",
"for",
"x",
"in",
"getaddresses",
"(",
"address_data",
")",
"if",
"\"@\"",
"in",
"x",
"[",
"1",
"]",
"]",
"return",
"output"
] | 41.807692 | 22.269231 |
async def channel_cmd(self, ctx, *, channel : discord.Channel = None):
"""Ignores a specific channel from being processed.
If no channel is specified, the current channel is ignored.
If a channel is ignored then the bot does not process commands in that
channel until it is unignored.
"""
if channel is None:
channel = ctx.message.channel
ignored = self.config.get('ignored', [])
if channel.id in ignored:
await self.bot.responses.failure(message='That channel is already ignored.')
return
ignored.append(channel.id)
await self.config.put('ignored', ignored)
await self.bot.responses.success(message='Channel <#{}> will be ignored.'.format(channel.id)) | [
"async",
"def",
"channel_cmd",
"(",
"self",
",",
"ctx",
",",
"*",
",",
"channel",
":",
"discord",
".",
"Channel",
"=",
"None",
")",
":",
"if",
"channel",
"is",
"None",
":",
"channel",
"=",
"ctx",
".",
"message",
".",
"channel",
"ignored",
"=",
"self",
".",
"config",
".",
"get",
"(",
"'ignored'",
",",
"[",
"]",
")",
"if",
"channel",
".",
"id",
"in",
"ignored",
":",
"await",
"self",
".",
"bot",
".",
"responses",
".",
"failure",
"(",
"message",
"=",
"'That channel is already ignored.'",
")",
"return",
"ignored",
".",
"append",
"(",
"channel",
".",
"id",
")",
"await",
"self",
".",
"config",
".",
"put",
"(",
"'ignored'",
",",
"ignored",
")",
"await",
"self",
".",
"bot",
".",
"responses",
".",
"success",
"(",
"message",
"=",
"'Channel <#{}> will be ignored.'",
".",
"format",
"(",
"channel",
".",
"id",
")",
")"
] | 40.105263 | 22.736842 |
def to_escpos(self):
""" converts the current style to an escpos command string """
cmd = ''
ordered_cmds = self.cmds.keys()
ordered_cmds.sort(lambda x,y: cmp(self.cmds[x]['_order'], self.cmds[y]['_order']))
for style in ordered_cmds:
cmd += self.cmds[style][self.get(style)]
return cmd | [
"def",
"to_escpos",
"(",
"self",
")",
":",
"cmd",
"=",
"''",
"ordered_cmds",
"=",
"self",
".",
"cmds",
".",
"keys",
"(",
")",
"ordered_cmds",
".",
"sort",
"(",
"lambda",
"x",
",",
"y",
":",
"cmp",
"(",
"self",
".",
"cmds",
"[",
"x",
"]",
"[",
"'_order'",
"]",
",",
"self",
".",
"cmds",
"[",
"y",
"]",
"[",
"'_order'",
"]",
")",
")",
"for",
"style",
"in",
"ordered_cmds",
":",
"cmd",
"+=",
"self",
".",
"cmds",
"[",
"style",
"]",
"[",
"self",
".",
"get",
"(",
"style",
")",
"]",
"return",
"cmd"
] | 42.375 | 16.875 |
def attributes(self, filter=Filter()):
"""
Get only the attribute content.
@param filter: A filter to constrain the result.
@type filter: L{Filter}
@return: A list of tuples (attr, ancestry)
@rtype: [(L{SchemaObject}, [L{SchemaObject},..]),..]
"""
result = []
for child, ancestry in self:
if child.isattr() and child in filter:
result.append((child, ancestry))
return result | [
"def",
"attributes",
"(",
"self",
",",
"filter",
"=",
"Filter",
"(",
")",
")",
":",
"result",
"=",
"[",
"]",
"for",
"child",
",",
"ancestry",
"in",
"self",
":",
"if",
"child",
".",
"isattr",
"(",
")",
"and",
"child",
"in",
"filter",
":",
"result",
".",
"append",
"(",
"(",
"child",
",",
"ancestry",
")",
")",
"return",
"result"
] | 36.153846 | 9.230769 |
def connect_tcp(self, address, port):
"""Connect to tcp/ip `address`:`port`. Delegated to `_connect_tcp`."""
info('Connecting to TCP address: %s:%d', address, port)
self._connect_tcp(address, port) | [
"def",
"connect_tcp",
"(",
"self",
",",
"address",
",",
"port",
")",
":",
"info",
"(",
"'Connecting to TCP address: %s:%d'",
",",
"address",
",",
"port",
")",
"self",
".",
"_connect_tcp",
"(",
"address",
",",
"port",
")"
] | 54.5 | 6.5 |
def lemmatize(ambiguous_word: str, pos: str = None, neverstem=False,
lemmatizer=wnl, stemmer=porter) -> str:
"""
Tries to convert a surface word into lemma, and if lemmatize word is not in
wordnet then try and convert surface word into its stem.
This is to handle the case where users input a surface word as an ambiguous
word and the surface word is a not a lemma.
"""
# Try to be a little smarter and use most frequent POS.
pos = pos if pos else penn2morphy(pos_tag([ambiguous_word])[0][1],
default_to_noun=True)
lemma = lemmatizer.lemmatize(ambiguous_word, pos=pos)
stem = stemmer.stem(ambiguous_word)
# Ensure that ambiguous word is a lemma.
if not wn.synsets(lemma):
if neverstem:
return ambiguous_word
if not wn.synsets(stem):
return ambiguous_word
else:
return stem
else:
return lemma | [
"def",
"lemmatize",
"(",
"ambiguous_word",
":",
"str",
",",
"pos",
":",
"str",
"=",
"None",
",",
"neverstem",
"=",
"False",
",",
"lemmatizer",
"=",
"wnl",
",",
"stemmer",
"=",
"porter",
")",
"->",
"str",
":",
"# Try to be a little smarter and use most frequent POS.",
"pos",
"=",
"pos",
"if",
"pos",
"else",
"penn2morphy",
"(",
"pos_tag",
"(",
"[",
"ambiguous_word",
"]",
")",
"[",
"0",
"]",
"[",
"1",
"]",
",",
"default_to_noun",
"=",
"True",
")",
"lemma",
"=",
"lemmatizer",
".",
"lemmatize",
"(",
"ambiguous_word",
",",
"pos",
"=",
"pos",
")",
"stem",
"=",
"stemmer",
".",
"stem",
"(",
"ambiguous_word",
")",
"# Ensure that ambiguous word is a lemma.",
"if",
"not",
"wn",
".",
"synsets",
"(",
"lemma",
")",
":",
"if",
"neverstem",
":",
"return",
"ambiguous_word",
"if",
"not",
"wn",
".",
"synsets",
"(",
"stem",
")",
":",
"return",
"ambiguous_word",
"else",
":",
"return",
"stem",
"else",
":",
"return",
"lemma"
] | 37.6 | 18.48 |
def expected_inheritance(variant_obj):
"""Gather information from common gene information."""
manual_models = set()
for gene in variant_obj.get('genes', []):
manual_models.update(gene.get('manual_inheritance', []))
return list(manual_models) | [
"def",
"expected_inheritance",
"(",
"variant_obj",
")",
":",
"manual_models",
"=",
"set",
"(",
")",
"for",
"gene",
"in",
"variant_obj",
".",
"get",
"(",
"'genes'",
",",
"[",
"]",
")",
":",
"manual_models",
".",
"update",
"(",
"gene",
".",
"get",
"(",
"'manual_inheritance'",
",",
"[",
"]",
")",
")",
"return",
"list",
"(",
"manual_models",
")"
] | 43.333333 | 9.333333 |
def iter_search_nodes(self, **conditions):
"""
Search nodes in an interative way. Matches are being yield as
they are being found. This avoids to scan the full tree
topology before returning the first matches. Useful when
dealing with huge trees.
"""
for n in self.traverse():
conditions_passed = 0
for key, value in six.iteritems(conditions):
if hasattr(n, key) and getattr(n, key) == value:
conditions_passed +=1
if conditions_passed == len(conditions):
yield n | [
"def",
"iter_search_nodes",
"(",
"self",
",",
"*",
"*",
"conditions",
")",
":",
"for",
"n",
"in",
"self",
".",
"traverse",
"(",
")",
":",
"conditions_passed",
"=",
"0",
"for",
"key",
",",
"value",
"in",
"six",
".",
"iteritems",
"(",
"conditions",
")",
":",
"if",
"hasattr",
"(",
"n",
",",
"key",
")",
"and",
"getattr",
"(",
"n",
",",
"key",
")",
"==",
"value",
":",
"conditions_passed",
"+=",
"1",
"if",
"conditions_passed",
"==",
"len",
"(",
"conditions",
")",
":",
"yield",
"n"
] | 42.428571 | 12.142857 |
def _create_cfgnode(self, sim_successors, call_stack, func_addr, block_id=None, depth=None, exception_info=None):
"""
Create a context-sensitive CFGNode instance for a specific block.
:param SimSuccessors sim_successors: The SimSuccessors object.
:param CallStack call_stack_suffix: The call stack.
:param int func_addr: Address of the current function.
:param BlockID block_id: The block ID of this CFGNode.
:param int or None depth: Depth of this CFGNode.
:return: A CFGNode instance.
:rtype: CFGNode
"""
sa = sim_successors.artifacts # shorthand
# Determine if this is a SimProcedure, and further, if this is a syscall
syscall = None
is_syscall = False
if sim_successors.sort == 'SimProcedure':
is_simprocedure = True
if sa['is_syscall'] is True:
is_syscall = True
syscall = sim_successors.artifacts['procedure']
else:
is_simprocedure = False
if is_simprocedure:
simproc_name = sa['name'].split('.')[-1]
if simproc_name == "ReturnUnconstrained" and sa['resolves'] is not None:
simproc_name = sa['resolves']
no_ret = False
if syscall is not None and sa['no_ret']:
no_ret = True
cfg_node = CFGENode(sim_successors.addr,
None,
self.model,
callstack_key=call_stack.stack_suffix(self.context_sensitivity_level),
input_state=None,
simprocedure_name=simproc_name,
syscall_name=syscall,
no_ret=no_ret,
is_syscall=is_syscall,
function_address=sim_successors.addr,
block_id=block_id,
depth=depth,
creation_failure_info=exception_info,
thumb=(isinstance(self.project.arch, ArchARM) and sim_successors.addr & 1),
)
else:
cfg_node = CFGENode(sim_successors.addr,
sa['irsb_size'],
self.model,
callstack_key=call_stack.stack_suffix(self.context_sensitivity_level),
input_state=None,
is_syscall=is_syscall,
function_address=func_addr,
block_id=block_id,
depth=depth,
irsb=sim_successors.artifacts['irsb'],
creation_failure_info=exception_info,
thumb=(isinstance(self.project.arch, ArchARM) and sim_successors.addr & 1),
)
return cfg_node | [
"def",
"_create_cfgnode",
"(",
"self",
",",
"sim_successors",
",",
"call_stack",
",",
"func_addr",
",",
"block_id",
"=",
"None",
",",
"depth",
"=",
"None",
",",
"exception_info",
"=",
"None",
")",
":",
"sa",
"=",
"sim_successors",
".",
"artifacts",
"# shorthand",
"# Determine if this is a SimProcedure, and further, if this is a syscall",
"syscall",
"=",
"None",
"is_syscall",
"=",
"False",
"if",
"sim_successors",
".",
"sort",
"==",
"'SimProcedure'",
":",
"is_simprocedure",
"=",
"True",
"if",
"sa",
"[",
"'is_syscall'",
"]",
"is",
"True",
":",
"is_syscall",
"=",
"True",
"syscall",
"=",
"sim_successors",
".",
"artifacts",
"[",
"'procedure'",
"]",
"else",
":",
"is_simprocedure",
"=",
"False",
"if",
"is_simprocedure",
":",
"simproc_name",
"=",
"sa",
"[",
"'name'",
"]",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"if",
"simproc_name",
"==",
"\"ReturnUnconstrained\"",
"and",
"sa",
"[",
"'resolves'",
"]",
"is",
"not",
"None",
":",
"simproc_name",
"=",
"sa",
"[",
"'resolves'",
"]",
"no_ret",
"=",
"False",
"if",
"syscall",
"is",
"not",
"None",
"and",
"sa",
"[",
"'no_ret'",
"]",
":",
"no_ret",
"=",
"True",
"cfg_node",
"=",
"CFGENode",
"(",
"sim_successors",
".",
"addr",
",",
"None",
",",
"self",
".",
"model",
",",
"callstack_key",
"=",
"call_stack",
".",
"stack_suffix",
"(",
"self",
".",
"context_sensitivity_level",
")",
",",
"input_state",
"=",
"None",
",",
"simprocedure_name",
"=",
"simproc_name",
",",
"syscall_name",
"=",
"syscall",
",",
"no_ret",
"=",
"no_ret",
",",
"is_syscall",
"=",
"is_syscall",
",",
"function_address",
"=",
"sim_successors",
".",
"addr",
",",
"block_id",
"=",
"block_id",
",",
"depth",
"=",
"depth",
",",
"creation_failure_info",
"=",
"exception_info",
",",
"thumb",
"=",
"(",
"isinstance",
"(",
"self",
".",
"project",
".",
"arch",
",",
"ArchARM",
")",
"and",
"sim_successors",
".",
"addr",
"&",
"1",
")",
",",
")",
"else",
":",
"cfg_node",
"=",
"CFGENode",
"(",
"sim_successors",
".",
"addr",
",",
"sa",
"[",
"'irsb_size'",
"]",
",",
"self",
".",
"model",
",",
"callstack_key",
"=",
"call_stack",
".",
"stack_suffix",
"(",
"self",
".",
"context_sensitivity_level",
")",
",",
"input_state",
"=",
"None",
",",
"is_syscall",
"=",
"is_syscall",
",",
"function_address",
"=",
"func_addr",
",",
"block_id",
"=",
"block_id",
",",
"depth",
"=",
"depth",
",",
"irsb",
"=",
"sim_successors",
".",
"artifacts",
"[",
"'irsb'",
"]",
",",
"creation_failure_info",
"=",
"exception_info",
",",
"thumb",
"=",
"(",
"isinstance",
"(",
"self",
".",
"project",
".",
"arch",
",",
"ArchARM",
")",
"and",
"sim_successors",
".",
"addr",
"&",
"1",
")",
",",
")",
"return",
"cfg_node"
] | 47.38806 | 23.059701 |
def _handle_input_request(self, msg):
"""Save history and add a %plot magic."""
if self._hidden:
raise RuntimeError('Request for raw input during hidden execution.')
# Make sure that all output from the SUB channel has been processed
# before entering readline mode.
self.kernel_client.iopub_channel.flush()
def callback(line):
# Save history to browse it later
if not (len(self._control.history) > 0
and self._control.history[-1] == line):
# do not save pdb commands
cmd = line.split(" ")[0]
if "do_" + cmd not in dir(pdb.Pdb):
self._control.history.append(line)
# This is the Spyder addition: add a %plot magic to display
# plots while debugging
if line.startswith('%plot '):
line = line.split()[-1]
code = "__spy_code__ = get_ipython().run_cell('%s')" % line
self.kernel_client.input(code)
else:
self.kernel_client.input(line)
if self._reading:
self._reading = False
self._readline(msg['content']['prompt'], callback=callback,
password=msg['content']['password']) | [
"def",
"_handle_input_request",
"(",
"self",
",",
"msg",
")",
":",
"if",
"self",
".",
"_hidden",
":",
"raise",
"RuntimeError",
"(",
"'Request for raw input during hidden execution.'",
")",
"# Make sure that all output from the SUB channel has been processed",
"# before entering readline mode.",
"self",
".",
"kernel_client",
".",
"iopub_channel",
".",
"flush",
"(",
")",
"def",
"callback",
"(",
"line",
")",
":",
"# Save history to browse it later",
"if",
"not",
"(",
"len",
"(",
"self",
".",
"_control",
".",
"history",
")",
">",
"0",
"and",
"self",
".",
"_control",
".",
"history",
"[",
"-",
"1",
"]",
"==",
"line",
")",
":",
"# do not save pdb commands",
"cmd",
"=",
"line",
".",
"split",
"(",
"\" \"",
")",
"[",
"0",
"]",
"if",
"\"do_\"",
"+",
"cmd",
"not",
"in",
"dir",
"(",
"pdb",
".",
"Pdb",
")",
":",
"self",
".",
"_control",
".",
"history",
".",
"append",
"(",
"line",
")",
"# This is the Spyder addition: add a %plot magic to display",
"# plots while debugging",
"if",
"line",
".",
"startswith",
"(",
"'%plot '",
")",
":",
"line",
"=",
"line",
".",
"split",
"(",
")",
"[",
"-",
"1",
"]",
"code",
"=",
"\"__spy_code__ = get_ipython().run_cell('%s')\"",
"%",
"line",
"self",
".",
"kernel_client",
".",
"input",
"(",
"code",
")",
"else",
":",
"self",
".",
"kernel_client",
".",
"input",
"(",
"line",
")",
"if",
"self",
".",
"_reading",
":",
"self",
".",
"_reading",
"=",
"False",
"self",
".",
"_readline",
"(",
"msg",
"[",
"'content'",
"]",
"[",
"'prompt'",
"]",
",",
"callback",
"=",
"callback",
",",
"password",
"=",
"msg",
"[",
"'content'",
"]",
"[",
"'password'",
"]",
")"
] | 42.5 | 15.733333 |
def _generate_route_helper(self, namespace, route, download_to_file=False):
"""Generate a Python method that corresponds to a route.
:param namespace: Namespace that the route belongs to.
:param stone.ir.ApiRoute route: IR node for the route.
:param bool download_to_file: Whether a special version of the route
that downloads the response body to a file should be generated.
This can only be used for download-style routes.
"""
arg_data_type = route.arg_data_type
result_data_type = route.result_data_type
request_binary_body = route.attrs.get('style') == 'upload'
response_binary_body = route.attrs.get('style') == 'download'
if download_to_file:
assert response_binary_body, 'download_to_file can only be set ' \
'for download-style routes.'
self._generate_route_method_decl(namespace,
route,
arg_data_type,
request_binary_body,
method_name_suffix='_to_file',
extra_args=['download_path'])
else:
self._generate_route_method_decl(namespace,
route,
arg_data_type,
request_binary_body)
with self.indent():
extra_request_args = None
extra_return_arg = None
footer = None
if request_binary_body:
extra_request_args = [('f',
'bytes',
'Contents to upload.')]
elif download_to_file:
extra_request_args = [('download_path',
'str',
'Path on local machine to save file.')]
if response_binary_body and not download_to_file:
extra_return_arg = ':class:`requests.models.Response`'
footer = DOCSTRING_CLOSE_RESPONSE
if route.doc:
func_docstring = self.process_doc(route.doc, self._docf)
else:
func_docstring = None
self._generate_docstring_for_func(
namespace,
arg_data_type,
result_data_type,
route.error_data_type,
overview=func_docstring,
extra_request_args=extra_request_args,
extra_return_arg=extra_return_arg,
footer=footer,
)
self._maybe_generate_deprecation_warning(route)
# Code to instantiate a class for the request data type
if is_void_type(arg_data_type):
self.emit('arg = None')
elif is_struct_type(arg_data_type):
self.generate_multiline_list(
[f.name for f in arg_data_type.all_fields],
before='arg = {}.{}'.format(
fmt_namespace(arg_data_type.namespace.name),
fmt_class(arg_data_type.name)),
)
elif not is_union_type(arg_data_type):
raise AssertionError('Unhandled request type %r' %
arg_data_type)
# Code to make the request
args = [
'{}.{}'.format(fmt_namespace(namespace.name),
fmt_func(route.name, version=route.version)),
"'{}'".format(namespace.name),
'arg']
if request_binary_body:
args.append('f')
else:
args.append('None')
self.generate_multiline_list(args, 'r = self.request', compact=False)
if download_to_file:
self.emit('self._save_body_to_file(download_path, r[1])')
if is_void_type(result_data_type):
self.emit('return None')
else:
self.emit('return r[0]')
else:
if is_void_type(result_data_type):
self.emit('return None')
else:
self.emit('return r')
self.emit() | [
"def",
"_generate_route_helper",
"(",
"self",
",",
"namespace",
",",
"route",
",",
"download_to_file",
"=",
"False",
")",
":",
"arg_data_type",
"=",
"route",
".",
"arg_data_type",
"result_data_type",
"=",
"route",
".",
"result_data_type",
"request_binary_body",
"=",
"route",
".",
"attrs",
".",
"get",
"(",
"'style'",
")",
"==",
"'upload'",
"response_binary_body",
"=",
"route",
".",
"attrs",
".",
"get",
"(",
"'style'",
")",
"==",
"'download'",
"if",
"download_to_file",
":",
"assert",
"response_binary_body",
",",
"'download_to_file can only be set '",
"'for download-style routes.'",
"self",
".",
"_generate_route_method_decl",
"(",
"namespace",
",",
"route",
",",
"arg_data_type",
",",
"request_binary_body",
",",
"method_name_suffix",
"=",
"'_to_file'",
",",
"extra_args",
"=",
"[",
"'download_path'",
"]",
")",
"else",
":",
"self",
".",
"_generate_route_method_decl",
"(",
"namespace",
",",
"route",
",",
"arg_data_type",
",",
"request_binary_body",
")",
"with",
"self",
".",
"indent",
"(",
")",
":",
"extra_request_args",
"=",
"None",
"extra_return_arg",
"=",
"None",
"footer",
"=",
"None",
"if",
"request_binary_body",
":",
"extra_request_args",
"=",
"[",
"(",
"'f'",
",",
"'bytes'",
",",
"'Contents to upload.'",
")",
"]",
"elif",
"download_to_file",
":",
"extra_request_args",
"=",
"[",
"(",
"'download_path'",
",",
"'str'",
",",
"'Path on local machine to save file.'",
")",
"]",
"if",
"response_binary_body",
"and",
"not",
"download_to_file",
":",
"extra_return_arg",
"=",
"':class:`requests.models.Response`'",
"footer",
"=",
"DOCSTRING_CLOSE_RESPONSE",
"if",
"route",
".",
"doc",
":",
"func_docstring",
"=",
"self",
".",
"process_doc",
"(",
"route",
".",
"doc",
",",
"self",
".",
"_docf",
")",
"else",
":",
"func_docstring",
"=",
"None",
"self",
".",
"_generate_docstring_for_func",
"(",
"namespace",
",",
"arg_data_type",
",",
"result_data_type",
",",
"route",
".",
"error_data_type",
",",
"overview",
"=",
"func_docstring",
",",
"extra_request_args",
"=",
"extra_request_args",
",",
"extra_return_arg",
"=",
"extra_return_arg",
",",
"footer",
"=",
"footer",
",",
")",
"self",
".",
"_maybe_generate_deprecation_warning",
"(",
"route",
")",
"# Code to instantiate a class for the request data type",
"if",
"is_void_type",
"(",
"arg_data_type",
")",
":",
"self",
".",
"emit",
"(",
"'arg = None'",
")",
"elif",
"is_struct_type",
"(",
"arg_data_type",
")",
":",
"self",
".",
"generate_multiline_list",
"(",
"[",
"f",
".",
"name",
"for",
"f",
"in",
"arg_data_type",
".",
"all_fields",
"]",
",",
"before",
"=",
"'arg = {}.{}'",
".",
"format",
"(",
"fmt_namespace",
"(",
"arg_data_type",
".",
"namespace",
".",
"name",
")",
",",
"fmt_class",
"(",
"arg_data_type",
".",
"name",
")",
")",
",",
")",
"elif",
"not",
"is_union_type",
"(",
"arg_data_type",
")",
":",
"raise",
"AssertionError",
"(",
"'Unhandled request type %r'",
"%",
"arg_data_type",
")",
"# Code to make the request",
"args",
"=",
"[",
"'{}.{}'",
".",
"format",
"(",
"fmt_namespace",
"(",
"namespace",
".",
"name",
")",
",",
"fmt_func",
"(",
"route",
".",
"name",
",",
"version",
"=",
"route",
".",
"version",
")",
")",
",",
"\"'{}'\"",
".",
"format",
"(",
"namespace",
".",
"name",
")",
",",
"'arg'",
"]",
"if",
"request_binary_body",
":",
"args",
".",
"append",
"(",
"'f'",
")",
"else",
":",
"args",
".",
"append",
"(",
"'None'",
")",
"self",
".",
"generate_multiline_list",
"(",
"args",
",",
"'r = self.request'",
",",
"compact",
"=",
"False",
")",
"if",
"download_to_file",
":",
"self",
".",
"emit",
"(",
"'self._save_body_to_file(download_path, r[1])'",
")",
"if",
"is_void_type",
"(",
"result_data_type",
")",
":",
"self",
".",
"emit",
"(",
"'return None'",
")",
"else",
":",
"self",
".",
"emit",
"(",
"'return r[0]'",
")",
"else",
":",
"if",
"is_void_type",
"(",
"result_data_type",
")",
":",
"self",
".",
"emit",
"(",
"'return None'",
")",
"else",
":",
"self",
".",
"emit",
"(",
"'return r'",
")",
"self",
".",
"emit",
"(",
")"
] | 42.5 | 18.078431 |
def all(self):
""" Returns list with vids of all indexed partitions. """
partitions = []
query = text("""
SELECT dataset_vid, vid
FROM partition_index;""")
for result in self.execute(query):
dataset_vid, vid = result
partitions.append(PartitionSearchResult(dataset_vid=dataset_vid, vid=vid, score=1))
return partitions | [
"def",
"all",
"(",
"self",
")",
":",
"partitions",
"=",
"[",
"]",
"query",
"=",
"text",
"(",
"\"\"\"\n SELECT dataset_vid, vid\n FROM partition_index;\"\"\"",
")",
"for",
"result",
"in",
"self",
".",
"execute",
"(",
"query",
")",
":",
"dataset_vid",
",",
"vid",
"=",
"result",
"partitions",
".",
"append",
"(",
"PartitionSearchResult",
"(",
"dataset_vid",
"=",
"dataset_vid",
",",
"vid",
"=",
"vid",
",",
"score",
"=",
"1",
")",
")",
"return",
"partitions"
] | 33.083333 | 16.916667 |
def install_var(instance, clear_target, clear_all):
"""Install required folders in /var"""
_check_root()
log("Checking frontend library and cache directories",
emitter='MANAGE')
uid = pwd.getpwnam("hfos").pw_uid
gid = grp.getgrnam("hfos").gr_gid
join = os.path.join
# If these need changes, make sure they are watertight and don't remove
# wanted stuff!
target_paths = (
'/var/www/challenges', # For LetsEncrypt acme certificate challenges
join('/var/lib/hfos', instance),
join('/var/local/hfos', instance),
join('/var/local/hfos', instance, 'backup'),
join('/var/cache/hfos', instance),
join('/var/cache/hfos', instance, 'tilecache'),
join('/var/cache/hfos', instance, 'rastertiles'),
join('/var/cache/hfos', instance, 'rastercache')
)
logfile = "/var/log/hfos-" + instance + ".log"
for item in target_paths:
if os.path.exists(item):
log("Path already exists: " + item)
if clear_all or (clear_target and 'cache' in item):
log("Cleaning up: " + item, lvl=warn)
shutil.rmtree(item)
if not os.path.exists(item):
log("Creating path: " + item)
os.mkdir(item)
os.chown(item, uid, gid)
# Touch logfile to make sure it exists
open(logfile, "a").close()
os.chown(logfile, uid, gid)
log("Done: Install Var") | [
"def",
"install_var",
"(",
"instance",
",",
"clear_target",
",",
"clear_all",
")",
":",
"_check_root",
"(",
")",
"log",
"(",
"\"Checking frontend library and cache directories\"",
",",
"emitter",
"=",
"'MANAGE'",
")",
"uid",
"=",
"pwd",
".",
"getpwnam",
"(",
"\"hfos\"",
")",
".",
"pw_uid",
"gid",
"=",
"grp",
".",
"getgrnam",
"(",
"\"hfos\"",
")",
".",
"gr_gid",
"join",
"=",
"os",
".",
"path",
".",
"join",
"# If these need changes, make sure they are watertight and don't remove",
"# wanted stuff!",
"target_paths",
"=",
"(",
"'/var/www/challenges'",
",",
"# For LetsEncrypt acme certificate challenges",
"join",
"(",
"'/var/lib/hfos'",
",",
"instance",
")",
",",
"join",
"(",
"'/var/local/hfos'",
",",
"instance",
")",
",",
"join",
"(",
"'/var/local/hfos'",
",",
"instance",
",",
"'backup'",
")",
",",
"join",
"(",
"'/var/cache/hfos'",
",",
"instance",
")",
",",
"join",
"(",
"'/var/cache/hfos'",
",",
"instance",
",",
"'tilecache'",
")",
",",
"join",
"(",
"'/var/cache/hfos'",
",",
"instance",
",",
"'rastertiles'",
")",
",",
"join",
"(",
"'/var/cache/hfos'",
",",
"instance",
",",
"'rastercache'",
")",
")",
"logfile",
"=",
"\"/var/log/hfos-\"",
"+",
"instance",
"+",
"\".log\"",
"for",
"item",
"in",
"target_paths",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"item",
")",
":",
"log",
"(",
"\"Path already exists: \"",
"+",
"item",
")",
"if",
"clear_all",
"or",
"(",
"clear_target",
"and",
"'cache'",
"in",
"item",
")",
":",
"log",
"(",
"\"Cleaning up: \"",
"+",
"item",
",",
"lvl",
"=",
"warn",
")",
"shutil",
".",
"rmtree",
"(",
"item",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"item",
")",
":",
"log",
"(",
"\"Creating path: \"",
"+",
"item",
")",
"os",
".",
"mkdir",
"(",
"item",
")",
"os",
".",
"chown",
"(",
"item",
",",
"uid",
",",
"gid",
")",
"# Touch logfile to make sure it exists",
"open",
"(",
"logfile",
",",
"\"a\"",
")",
".",
"close",
"(",
")",
"os",
".",
"chown",
"(",
"logfile",
",",
"uid",
",",
"gid",
")",
"log",
"(",
"\"Done: Install Var\"",
")"
] | 32.767442 | 17.55814 |
def add_widget(self, widget, column=0):
"""
Add a widget to this Layout.
If you are adding this Widget to the Layout dynamically after starting to play the Scene,
don't forget to ensure that the value is explicitly set before the next update.
:param widget: The widget to be added.
:param column: The column within the widget for this widget. Defaults to zero.
"""
# Make sure that the Layout is fully initialised before we try to add any widgets.
if self._frame is None:
raise RuntimeError("You must add the Layout to the Frame before you can add a Widget.")
# Now process the widget.
self._columns[column].append(widget)
widget.register_frame(self._frame)
if widget.name in self._frame.data:
widget.value = self._frame.data[widget.name] | [
"def",
"add_widget",
"(",
"self",
",",
"widget",
",",
"column",
"=",
"0",
")",
":",
"# Make sure that the Layout is fully initialised before we try to add any widgets.",
"if",
"self",
".",
"_frame",
"is",
"None",
":",
"raise",
"RuntimeError",
"(",
"\"You must add the Layout to the Frame before you can add a Widget.\"",
")",
"# Now process the widget.",
"self",
".",
"_columns",
"[",
"column",
"]",
".",
"append",
"(",
"widget",
")",
"widget",
".",
"register_frame",
"(",
"self",
".",
"_frame",
")",
"if",
"widget",
".",
"name",
"in",
"self",
".",
"_frame",
".",
"data",
":",
"widget",
".",
"value",
"=",
"self",
".",
"_frame",
".",
"data",
"[",
"widget",
".",
"name",
"]"
] | 42.6 | 23.6 |
def run_sorters(sorter_list, recording_dict_or_list, working_folder, grouping_property=None,
shared_binary_copy=False, engine=None, engine_kargs={}, debug=False, write_log=True):
"""
This run several sorter on several recording.
Simple implementation will nested loops.
Need to be done with multiprocessing.
sorter_list: list of str (sorter names)
recording_dict_or_list: a dict (or a list) of recording
working_folder : str
engine = None ( = 'loop') or 'multiprocessing'
processes = only if 'multiprocessing' if None then processes=os.cpu_count()
debug=True/False to control sorter verbosity
Note: engine='multiprocessing' use the python multiprocessing module.
This do not allow to have subprocess in subprocess.
So sorter that already use internally multiprocessing, this will fail.
Parameters
----------
sorter_list: list of str
List of sorter name.
recording_dict_or_list: dict or list
A dict of recording. The key will be the name of teh recording.
In a list is given then the name will be recording_0, recording_1, ...
working_folder: a path
The working directory.
This must not exists before calling this function.
grouping_property:
The property of grouping given to sorters.
shared_binary_copy: False default
Before running each sorter, all recording are copied inside
the working_folder with the raw binary format (BinDatRecordingExtractor)
and new recording are done BinDatRecordingExtractor.
This avoid multiple copy inside each sorter of teh same file but
imply a global of all files.
engine: 'loop' or 'multiprocessing'
engine_kargs: dict
This contains kargs specific to the launcher engine:
* 'loop' : no kargs
* 'multiprocessing' : {'processes' : } number of processes
debug: bool default True
write_log: bool default True
Output
----------
results : dict
The output is nested dict[rec_name][sorter_name] of SortingExtrator.
"""
assert not os.path.exists(working_folder), 'working_folder already exists, please remove it'
working_folder = Path(working_folder)
for sorter_name in sorter_list:
assert sorter_name in sorter_dict, '{} is not in sorter list'.format(sorter_name)
if isinstance(recording_dict_or_list, list):
# in case of list
recording_dict = { 'recording_{}'.format(i): rec for i, rec in enumerate(recording_dict_or_list) }
elif isinstance(recording_dict_or_list, dict):
recording_dict = recording_dict_or_list
else:
raise(ValueError('bad recording dict'))
if shared_binary_copy:
os.makedirs(working_folder / 'raw_files')
old_rec_dict = dict(recording_dict)
recording_dict = {}
for rec_name, recording in old_rec_dict.items():
if grouping_property is not None:
recording_list = se.get_sub_extractors_by_property(recording, grouping_property)
n_group = len(recording_list)
assert n_group == 1, 'shared_binary_copy work only when one group'
recording = recording_list[0]
grouping_property = None
raw_filename = working_folder / 'raw_files' / (rec_name+'.raw')
prb_filename = working_folder / 'raw_files' / (rec_name+'.prb')
n_chan = recording.get_num_channels()
chunksize = 2**24// n_chan
sr = recording.get_sampling_frequency()
# save binary
se.write_binary_dat_format(recording, raw_filename, time_axis=0, dtype='float32', chunksize=chunksize)
# save location (with PRB format)
se.save_probe_file(recording, prb_filename, format='spyking_circus')
# make new recording
new_rec = se.BinDatRecordingExtractor(raw_filename, sr, n_chan, 'float32', frames_first=True)
se.load_probe_file(new_rec, prb_filename)
recording_dict[rec_name] = new_rec
task_list = []
for rec_name, recording in recording_dict.items():
for sorter_name in sorter_list:
output_folder = working_folder / 'output_folders' / rec_name / sorter_name
task_list.append((rec_name, recording, sorter_name, output_folder, grouping_property, debug, write_log))
if engine is None or engine == 'loop':
# simple loop in main process
for arg_list in task_list:
print(arg_list)
_run_one(arg_list)
elif engine == 'multiprocessing':
# use mp.Pool
processes = engine_kargs.get('processes', None)
pool = multiprocessing.Pool(processes)
pool.map(_run_one, task_list)
if write_log:
# collect run time and write to cvs
with open(working_folder / 'run_time.csv', mode='w') as f:
for task in task_list:
rec_name = task[0]
sorter_name = task[2]
output_folder = task[3]
if os.path.exists(output_folder / 'run_log.txt'):
with open(output_folder / 'run_log.txt', mode='r') as logfile:
run_time = float(logfile.readline().replace('run_time:', ''))
txt = '{}\t{}\t{}\n'.format(rec_name, sorter_name,run_time)
f.write(txt)
results = collect_results(working_folder)
return results | [
"def",
"run_sorters",
"(",
"sorter_list",
",",
"recording_dict_or_list",
",",
"working_folder",
",",
"grouping_property",
"=",
"None",
",",
"shared_binary_copy",
"=",
"False",
",",
"engine",
"=",
"None",
",",
"engine_kargs",
"=",
"{",
"}",
",",
"debug",
"=",
"False",
",",
"write_log",
"=",
"True",
")",
":",
"assert",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"working_folder",
")",
",",
"'working_folder already exists, please remove it'",
"working_folder",
"=",
"Path",
"(",
"working_folder",
")",
"for",
"sorter_name",
"in",
"sorter_list",
":",
"assert",
"sorter_name",
"in",
"sorter_dict",
",",
"'{} is not in sorter list'",
".",
"format",
"(",
"sorter_name",
")",
"if",
"isinstance",
"(",
"recording_dict_or_list",
",",
"list",
")",
":",
"# in case of list",
"recording_dict",
"=",
"{",
"'recording_{}'",
".",
"format",
"(",
"i",
")",
":",
"rec",
"for",
"i",
",",
"rec",
"in",
"enumerate",
"(",
"recording_dict_or_list",
")",
"}",
"elif",
"isinstance",
"(",
"recording_dict_or_list",
",",
"dict",
")",
":",
"recording_dict",
"=",
"recording_dict_or_list",
"else",
":",
"raise",
"(",
"ValueError",
"(",
"'bad recording dict'",
")",
")",
"if",
"shared_binary_copy",
":",
"os",
".",
"makedirs",
"(",
"working_folder",
"/",
"'raw_files'",
")",
"old_rec_dict",
"=",
"dict",
"(",
"recording_dict",
")",
"recording_dict",
"=",
"{",
"}",
"for",
"rec_name",
",",
"recording",
"in",
"old_rec_dict",
".",
"items",
"(",
")",
":",
"if",
"grouping_property",
"is",
"not",
"None",
":",
"recording_list",
"=",
"se",
".",
"get_sub_extractors_by_property",
"(",
"recording",
",",
"grouping_property",
")",
"n_group",
"=",
"len",
"(",
"recording_list",
")",
"assert",
"n_group",
"==",
"1",
",",
"'shared_binary_copy work only when one group'",
"recording",
"=",
"recording_list",
"[",
"0",
"]",
"grouping_property",
"=",
"None",
"raw_filename",
"=",
"working_folder",
"/",
"'raw_files'",
"/",
"(",
"rec_name",
"+",
"'.raw'",
")",
"prb_filename",
"=",
"working_folder",
"/",
"'raw_files'",
"/",
"(",
"rec_name",
"+",
"'.prb'",
")",
"n_chan",
"=",
"recording",
".",
"get_num_channels",
"(",
")",
"chunksize",
"=",
"2",
"**",
"24",
"//",
"n_chan",
"sr",
"=",
"recording",
".",
"get_sampling_frequency",
"(",
")",
"# save binary",
"se",
".",
"write_binary_dat_format",
"(",
"recording",
",",
"raw_filename",
",",
"time_axis",
"=",
"0",
",",
"dtype",
"=",
"'float32'",
",",
"chunksize",
"=",
"chunksize",
")",
"# save location (with PRB format)",
"se",
".",
"save_probe_file",
"(",
"recording",
",",
"prb_filename",
",",
"format",
"=",
"'spyking_circus'",
")",
"# make new recording",
"new_rec",
"=",
"se",
".",
"BinDatRecordingExtractor",
"(",
"raw_filename",
",",
"sr",
",",
"n_chan",
",",
"'float32'",
",",
"frames_first",
"=",
"True",
")",
"se",
".",
"load_probe_file",
"(",
"new_rec",
",",
"prb_filename",
")",
"recording_dict",
"[",
"rec_name",
"]",
"=",
"new_rec",
"task_list",
"=",
"[",
"]",
"for",
"rec_name",
",",
"recording",
"in",
"recording_dict",
".",
"items",
"(",
")",
":",
"for",
"sorter_name",
"in",
"sorter_list",
":",
"output_folder",
"=",
"working_folder",
"/",
"'output_folders'",
"/",
"rec_name",
"/",
"sorter_name",
"task_list",
".",
"append",
"(",
"(",
"rec_name",
",",
"recording",
",",
"sorter_name",
",",
"output_folder",
",",
"grouping_property",
",",
"debug",
",",
"write_log",
")",
")",
"if",
"engine",
"is",
"None",
"or",
"engine",
"==",
"'loop'",
":",
"# simple loop in main process",
"for",
"arg_list",
"in",
"task_list",
":",
"print",
"(",
"arg_list",
")",
"_run_one",
"(",
"arg_list",
")",
"elif",
"engine",
"==",
"'multiprocessing'",
":",
"# use mp.Pool",
"processes",
"=",
"engine_kargs",
".",
"get",
"(",
"'processes'",
",",
"None",
")",
"pool",
"=",
"multiprocessing",
".",
"Pool",
"(",
"processes",
")",
"pool",
".",
"map",
"(",
"_run_one",
",",
"task_list",
")",
"if",
"write_log",
":",
"# collect run time and write to cvs",
"with",
"open",
"(",
"working_folder",
"/",
"'run_time.csv'",
",",
"mode",
"=",
"'w'",
")",
"as",
"f",
":",
"for",
"task",
"in",
"task_list",
":",
"rec_name",
"=",
"task",
"[",
"0",
"]",
"sorter_name",
"=",
"task",
"[",
"2",
"]",
"output_folder",
"=",
"task",
"[",
"3",
"]",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"output_folder",
"/",
"'run_log.txt'",
")",
":",
"with",
"open",
"(",
"output_folder",
"/",
"'run_log.txt'",
",",
"mode",
"=",
"'r'",
")",
"as",
"logfile",
":",
"run_time",
"=",
"float",
"(",
"logfile",
".",
"readline",
"(",
")",
".",
"replace",
"(",
"'run_time:'",
",",
"''",
")",
")",
"txt",
"=",
"'{}\\t{}\\t{}\\n'",
".",
"format",
"(",
"rec_name",
",",
"sorter_name",
",",
"run_time",
")",
"f",
".",
"write",
"(",
"txt",
")",
"results",
"=",
"collect_results",
"(",
"working_folder",
")",
"return",
"results"
] | 38.062937 | 23.657343 |
def make_uniq_for_step(ctx, ukeys, step, stage, full_data, clean_missing_after_seconds, to_uniq):
"""initially just a copy from UNIQ_PULL"""
# TODO:
# this still seems to work ok for Storage types json/bubble,
# for DS we need to reload de dumped step to uniqify
if not ukeys:
return to_uniq
else:
uniq_data = bubble_lod_load(ctx, step, stage)
ctx.say('Creating uniq identifiers for [' + step + '] information', 0)
ctx.gbc.say('uniq_data:', stuff=uniq_data, verbosity=1000)
# TODO:make: data->keyed.items
uniq_step_res = make_uniq(ctx=ctx,
ldict=to_uniq,
keyed=uniq_data,
uniqstr=ukeys,
tag=step,
full_data=full_data,
remove_missing_after_seconds=clean_missing_after_seconds)
ctx.gbc.say('uniq_step_res:', stuff=uniq_step_res, verbosity=1000)
to_uniq_newest = get_newest_uniq(ctx.gbc, uniq_step_res)
# TODO: selected pulled only from slice of uniq
# PROBLEM: slice of pull is not equal to slice of newest uniq,
# can only select keys from newest, from slice of pulled
# need a uid list from to_transform
# to_transform = get_gen_slice(gbc, to_transform_newest, amount, index)
# for now not a big problem, as with 'pump' there should be no problem
to_uniq = to_uniq_newest
# todo make keyed.items->data
uniq_res_list = get_uniq_list(ctx.gbc, uniq_step_res)
reset = True
pfr = bubble_lod_dump(ctx=ctx,
step=step,
stage=stage,
full_data=full_data,
reset=reset,
data_gen=uniq_res_list)
ctx.gbc.say('saved uniq ' + step + ' data res:',
stuff=pfr, verbosity=700)
return to_uniq | [
"def",
"make_uniq_for_step",
"(",
"ctx",
",",
"ukeys",
",",
"step",
",",
"stage",
",",
"full_data",
",",
"clean_missing_after_seconds",
",",
"to_uniq",
")",
":",
"# TODO:",
"# this still seems to work ok for Storage types json/bubble,",
"# for DS we need to reload de dumped step to uniqify",
"if",
"not",
"ukeys",
":",
"return",
"to_uniq",
"else",
":",
"uniq_data",
"=",
"bubble_lod_load",
"(",
"ctx",
",",
"step",
",",
"stage",
")",
"ctx",
".",
"say",
"(",
"'Creating uniq identifiers for ['",
"+",
"step",
"+",
"'] information'",
",",
"0",
")",
"ctx",
".",
"gbc",
".",
"say",
"(",
"'uniq_data:'",
",",
"stuff",
"=",
"uniq_data",
",",
"verbosity",
"=",
"1000",
")",
"# TODO:make: data->keyed.items",
"uniq_step_res",
"=",
"make_uniq",
"(",
"ctx",
"=",
"ctx",
",",
"ldict",
"=",
"to_uniq",
",",
"keyed",
"=",
"uniq_data",
",",
"uniqstr",
"=",
"ukeys",
",",
"tag",
"=",
"step",
",",
"full_data",
"=",
"full_data",
",",
"remove_missing_after_seconds",
"=",
"clean_missing_after_seconds",
")",
"ctx",
".",
"gbc",
".",
"say",
"(",
"'uniq_step_res:'",
",",
"stuff",
"=",
"uniq_step_res",
",",
"verbosity",
"=",
"1000",
")",
"to_uniq_newest",
"=",
"get_newest_uniq",
"(",
"ctx",
".",
"gbc",
",",
"uniq_step_res",
")",
"# TODO: selected pulled only from slice of uniq",
"# PROBLEM: slice of pull is not equal to slice of newest uniq,",
"# can only select keys from newest, from slice of pulled",
"# need a uid list from to_transform",
"# to_transform = get_gen_slice(gbc, to_transform_newest, amount, index)",
"# for now not a big problem, as with 'pump' there should be no problem",
"to_uniq",
"=",
"to_uniq_newest",
"# todo make keyed.items->data",
"uniq_res_list",
"=",
"get_uniq_list",
"(",
"ctx",
".",
"gbc",
",",
"uniq_step_res",
")",
"reset",
"=",
"True",
"pfr",
"=",
"bubble_lod_dump",
"(",
"ctx",
"=",
"ctx",
",",
"step",
"=",
"step",
",",
"stage",
"=",
"stage",
",",
"full_data",
"=",
"full_data",
",",
"reset",
"=",
"reset",
",",
"data_gen",
"=",
"uniq_res_list",
")",
"ctx",
".",
"gbc",
".",
"say",
"(",
"'saved uniq '",
"+",
"step",
"+",
"' data res:'",
",",
"stuff",
"=",
"pfr",
",",
"verbosity",
"=",
"700",
")",
"return",
"to_uniq"
] | 41.625 | 21.25 |
def load_corpus(*data_file_paths):
"""
Return the data contained within a specified corpus.
"""
for file_path in data_file_paths:
corpus = []
corpus_data = read_corpus(file_path)
conversations = corpus_data.get('conversations', [])
corpus.extend(conversations)
categories = corpus_data.get('categories', [])
yield corpus, categories, file_path | [
"def",
"load_corpus",
"(",
"*",
"data_file_paths",
")",
":",
"for",
"file_path",
"in",
"data_file_paths",
":",
"corpus",
"=",
"[",
"]",
"corpus_data",
"=",
"read_corpus",
"(",
"file_path",
")",
"conversations",
"=",
"corpus_data",
".",
"get",
"(",
"'conversations'",
",",
"[",
"]",
")",
"corpus",
".",
"extend",
"(",
"conversations",
")",
"categories",
"=",
"corpus_data",
".",
"get",
"(",
"'categories'",
",",
"[",
"]",
")",
"yield",
"corpus",
",",
"categories",
",",
"file_path"
] | 28.357143 | 15.071429 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.