text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def sh(cmd, ignore_error=False, cwd=None, shell=False, **kwargs):
"""
Execute a command with subprocess.Popen and block until output
Args:
cmd (tuple or str): same as subprocess.Popen args
Keyword Arguments:
ignore_error (bool): if False, raise an Exception if p.returncode is
not 0
cwd (str): current working directory path to run cmd with
shell (bool): subprocess.Popen ``shell`` kwarg
Returns:
str: stdout output of wrapped call to ``sh`` (``subprocess.Popen``)
Raises:
Exception: if ignore_error is true and returncode is not zero
.. note:: this executes commands with ``shell=True``: careful with
shell-escaping.
"""
kwargs.update({
'shell': shell,
'cwd': cwd,
'stderr': subprocess.STDOUT,
'stdout': subprocess.PIPE,})
log.debug((('cmd', cmd), ('kwargs', kwargs)))
p = subprocess.Popen(cmd, universal_newlines=True, **kwargs)
p_stdout = p.communicate()[0]
if p.returncode and not ignore_error:
raise subprocess.CalledProcessError(p.returncode, cmd, p_stdout)
return p_stdout | [
"def",
"sh",
"(",
"cmd",
",",
"ignore_error",
"=",
"False",
",",
"cwd",
"=",
"None",
",",
"shell",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
".",
"update",
"(",
"{",
"'shell'",
":",
"shell",
",",
"'cwd'",
":",
"cwd",
",",
"'stderr'",
":",
"subprocess",
".",
"STDOUT",
",",
"'stdout'",
":",
"subprocess",
".",
"PIPE",
",",
"}",
")",
"log",
".",
"debug",
"(",
"(",
"(",
"'cmd'",
",",
"cmd",
")",
",",
"(",
"'kwargs'",
",",
"kwargs",
")",
")",
")",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"universal_newlines",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
"p_stdout",
"=",
"p",
".",
"communicate",
"(",
")",
"[",
"0",
"]",
"if",
"p",
".",
"returncode",
"and",
"not",
"ignore_error",
":",
"raise",
"subprocess",
".",
"CalledProcessError",
"(",
"p",
".",
"returncode",
",",
"cmd",
",",
"p_stdout",
")",
"return",
"p_stdout"
] | 32.794118 | 23.088235 |
def OnApprove(self, event):
"""File approve event handler"""
if not self.main_window.safe_mode:
return
msg = _(u"You are going to approve and trust a file that\n"
u"you have not created yourself.\n"
u"After proceeding, the file is executed.\n \n"
u"It may harm your system as any program can.\n"
u"Please check all cells thoroughly before\nproceeding.\n \n"
u"Proceed and sign this file as trusted?")
short_msg = _("Security warning")
if self.main_window.interfaces.get_warning_choice(msg, short_msg):
# Leave safe mode
self.main_window.grid.actions.leave_safe_mode()
# Display safe mode end in status bar
statustext = _("Safe mode deactivated.")
post_command_event(self.main_window, self.main_window.StatusBarMsg,
text=statustext) | [
"def",
"OnApprove",
"(",
"self",
",",
"event",
")",
":",
"if",
"not",
"self",
".",
"main_window",
".",
"safe_mode",
":",
"return",
"msg",
"=",
"_",
"(",
"u\"You are going to approve and trust a file that\\n\"",
"u\"you have not created yourself.\\n\"",
"u\"After proceeding, the file is executed.\\n \\n\"",
"u\"It may harm your system as any program can.\\n\"",
"u\"Please check all cells thoroughly before\\nproceeding.\\n \\n\"",
"u\"Proceed and sign this file as trusted?\"",
")",
"short_msg",
"=",
"_",
"(",
"\"Security warning\"",
")",
"if",
"self",
".",
"main_window",
".",
"interfaces",
".",
"get_warning_choice",
"(",
"msg",
",",
"short_msg",
")",
":",
"# Leave safe mode",
"self",
".",
"main_window",
".",
"grid",
".",
"actions",
".",
"leave_safe_mode",
"(",
")",
"# Display safe mode end in status bar",
"statustext",
"=",
"_",
"(",
"\"Safe mode deactivated.\"",
")",
"post_command_event",
"(",
"self",
".",
"main_window",
",",
"self",
".",
"main_window",
".",
"StatusBarMsg",
",",
"text",
"=",
"statustext",
")"
] | 39.041667 | 22.875 |
def _check_by_re(self, url_data, content):
""" Finds urls by re.
:param url_data: object for url storing
:param content: file content
"""
for link_re in self._link_res:
for u in link_re.finditer(content):
self._save_url(url_data, content, u.group(1), u.start(1)) | [
"def",
"_check_by_re",
"(",
"self",
",",
"url_data",
",",
"content",
")",
":",
"for",
"link_re",
"in",
"self",
".",
"_link_res",
":",
"for",
"u",
"in",
"link_re",
".",
"finditer",
"(",
"content",
")",
":",
"self",
".",
"_save_url",
"(",
"url_data",
",",
"content",
",",
"u",
".",
"group",
"(",
"1",
")",
",",
"u",
".",
"start",
"(",
"1",
")",
")"
] | 35.888889 | 10.555556 |
def paste_template(self, template_name, template=None, deploy_dir=None):
" Paste template. "
LOGGER.debug("Paste template: %s" % template_name)
deploy_dir = deploy_dir or self.deploy_dir
template = template or self._get_template_path(template_name)
self.read([op.join(template, settings.CFGNAME)], extending=True)
for fname in gen_template_files(template):
curdir = op.join(deploy_dir, op.dirname(fname))
if not op.exists(curdir):
makedirs(curdir)
source = op.join(template, fname)
target = op.join(deploy_dir, fname)
copy2(source, target)
name, ext = op.splitext(fname)
if ext == '.tmpl':
t = Template.from_filename(target, namespace=self.as_dict())
with open(op.join(deploy_dir, name), 'w') as f:
f.write(t.substitute())
remove(target)
return deploy_dir | [
"def",
"paste_template",
"(",
"self",
",",
"template_name",
",",
"template",
"=",
"None",
",",
"deploy_dir",
"=",
"None",
")",
":",
"LOGGER",
".",
"debug",
"(",
"\"Paste template: %s\"",
"%",
"template_name",
")",
"deploy_dir",
"=",
"deploy_dir",
"or",
"self",
".",
"deploy_dir",
"template",
"=",
"template",
"or",
"self",
".",
"_get_template_path",
"(",
"template_name",
")",
"self",
".",
"read",
"(",
"[",
"op",
".",
"join",
"(",
"template",
",",
"settings",
".",
"CFGNAME",
")",
"]",
",",
"extending",
"=",
"True",
")",
"for",
"fname",
"in",
"gen_template_files",
"(",
"template",
")",
":",
"curdir",
"=",
"op",
".",
"join",
"(",
"deploy_dir",
",",
"op",
".",
"dirname",
"(",
"fname",
")",
")",
"if",
"not",
"op",
".",
"exists",
"(",
"curdir",
")",
":",
"makedirs",
"(",
"curdir",
")",
"source",
"=",
"op",
".",
"join",
"(",
"template",
",",
"fname",
")",
"target",
"=",
"op",
".",
"join",
"(",
"deploy_dir",
",",
"fname",
")",
"copy2",
"(",
"source",
",",
"target",
")",
"name",
",",
"ext",
"=",
"op",
".",
"splitext",
"(",
"fname",
")",
"if",
"ext",
"==",
"'.tmpl'",
":",
"t",
"=",
"Template",
".",
"from_filename",
"(",
"target",
",",
"namespace",
"=",
"self",
".",
"as_dict",
"(",
")",
")",
"with",
"open",
"(",
"op",
".",
"join",
"(",
"deploy_dir",
",",
"name",
")",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"t",
".",
"substitute",
"(",
")",
")",
"remove",
"(",
"target",
")",
"return",
"deploy_dir"
] | 40 | 18.833333 |
def run_nohup(self, cmd, working_dir=None):
"""
:param cmd:
:param working_dir: 当前的工作目录,如果没有 home 目录,会因为一些原因导致运行失败,比如没有无法创建 nohup.out
:return:
"""
cmd = 'nohup %s &\n\n' % cmd
if working_dir is not None:
cmd = 'cd {}; {}'.format(working_dir, cmd)
self.run_expect_command(cmd) | [
"def",
"run_nohup",
"(",
"self",
",",
"cmd",
",",
"working_dir",
"=",
"None",
")",
":",
"cmd",
"=",
"'nohup %s &\\n\\n'",
"%",
"cmd",
"if",
"working_dir",
"is",
"not",
"None",
":",
"cmd",
"=",
"'cd {}; {}'",
".",
"format",
"(",
"working_dir",
",",
"cmd",
")",
"self",
".",
"run_expect_command",
"(",
"cmd",
")"
] | 31.090909 | 14.181818 |
def get_bootstrap(cls, name, ctx):
'''Returns an instance of a bootstrap with the given name.
This is the only way you should access a bootstrap class, as
it sets the bootstrap directory correctly.
'''
if name is None:
return None
if not hasattr(cls, 'bootstraps'):
cls.bootstraps = {}
if name in cls.bootstraps:
return cls.bootstraps[name]
mod = importlib.import_module('pythonforandroid.bootstraps.{}'
.format(name))
if len(logger.handlers) > 1:
logger.removeHandler(logger.handlers[1])
bootstrap = mod.bootstrap
bootstrap.bootstrap_dir = join(ctx.root_dir, 'bootstraps', name)
bootstrap.ctx = ctx
return bootstrap | [
"def",
"get_bootstrap",
"(",
"cls",
",",
"name",
",",
"ctx",
")",
":",
"if",
"name",
"is",
"None",
":",
"return",
"None",
"if",
"not",
"hasattr",
"(",
"cls",
",",
"'bootstraps'",
")",
":",
"cls",
".",
"bootstraps",
"=",
"{",
"}",
"if",
"name",
"in",
"cls",
".",
"bootstraps",
":",
"return",
"cls",
".",
"bootstraps",
"[",
"name",
"]",
"mod",
"=",
"importlib",
".",
"import_module",
"(",
"'pythonforandroid.bootstraps.{}'",
".",
"format",
"(",
"name",
")",
")",
"if",
"len",
"(",
"logger",
".",
"handlers",
")",
">",
"1",
":",
"logger",
".",
"removeHandler",
"(",
"logger",
".",
"handlers",
"[",
"1",
"]",
")",
"bootstrap",
"=",
"mod",
".",
"bootstrap",
"bootstrap",
".",
"bootstrap_dir",
"=",
"join",
"(",
"ctx",
".",
"root_dir",
",",
"'bootstraps'",
",",
"name",
")",
"bootstrap",
".",
"ctx",
"=",
"ctx",
"return",
"bootstrap"
] | 39.4 | 15.8 |
def load_class(self, classname):
"""
Loads a class looking for it in each module registered.
:param classname: Class name you want to load.
:type classname: str
:return: Class object
:rtype: type
"""
module_list = self._get_module_list()
for module in module_list:
try:
return import_class(classname, module.__name__)
except (AttributeError, ImportError):
pass
raise ImportError("Class '{0}' could not be loaded.".format(classname)) | [
"def",
"load_class",
"(",
"self",
",",
"classname",
")",
":",
"module_list",
"=",
"self",
".",
"_get_module_list",
"(",
")",
"for",
"module",
"in",
"module_list",
":",
"try",
":",
"return",
"import_class",
"(",
"classname",
",",
"module",
".",
"__name__",
")",
"except",
"(",
"AttributeError",
",",
"ImportError",
")",
":",
"pass",
"raise",
"ImportError",
"(",
"\"Class '{0}' could not be loaded.\"",
".",
"format",
"(",
"classname",
")",
")"
] | 29.157895 | 19.684211 |
def rm_docs(self):
"""Remove converted docs."""
for filename in self.created:
if os.path.exists(filename):
os.unlink(filename) | [
"def",
"rm_docs",
"(",
"self",
")",
":",
"for",
"filename",
"in",
"self",
".",
"created",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"os",
".",
"unlink",
"(",
"filename",
")"
] | 33.2 | 6 |
def get(self, content_id, feature_names=None):
'''Retrieve a feature collection.
If a feature collection with the given id does not
exist, then ``None`` is returned.
:param str content_id: Content identifier.
:param [str] feature_names:
A list of feature names to retrieve. When ``None``, all
features are retrieved. Wildcards are allowed.
:rtype: :class:`dossier.fc.FeatureCollection` or ``None``
'''
try:
resp = self.conn.get(index=self.index, doc_type=self.type,
id=eid(content_id),
_source=self._source(feature_names))
return self.fc_from_dict(resp['_source']['fc'])
except NotFoundError:
return None
except:
raise | [
"def",
"get",
"(",
"self",
",",
"content_id",
",",
"feature_names",
"=",
"None",
")",
":",
"try",
":",
"resp",
"=",
"self",
".",
"conn",
".",
"get",
"(",
"index",
"=",
"self",
".",
"index",
",",
"doc_type",
"=",
"self",
".",
"type",
",",
"id",
"=",
"eid",
"(",
"content_id",
")",
",",
"_source",
"=",
"self",
".",
"_source",
"(",
"feature_names",
")",
")",
"return",
"self",
".",
"fc_from_dict",
"(",
"resp",
"[",
"'_source'",
"]",
"[",
"'fc'",
"]",
")",
"except",
"NotFoundError",
":",
"return",
"None",
"except",
":",
"raise"
] | 38.761905 | 19.52381 |
def build(id=None, name=None, revision=None,
temporary_build=False, timestamp_alignment=False,
no_build_dependencies=False,
keep_pod_on_failure=False,
force_rebuild=False,
rebuild_mode=common.REBUILD_MODES_DEFAULT):
"""
Trigger a BuildConfiguration by name or ID
"""
data = build_raw(id, name, revision, temporary_build, timestamp_alignment, no_build_dependencies,
keep_pod_on_failure, force_rebuild, rebuild_mode)
if data:
return utils.format_json(data) | [
"def",
"build",
"(",
"id",
"=",
"None",
",",
"name",
"=",
"None",
",",
"revision",
"=",
"None",
",",
"temporary_build",
"=",
"False",
",",
"timestamp_alignment",
"=",
"False",
",",
"no_build_dependencies",
"=",
"False",
",",
"keep_pod_on_failure",
"=",
"False",
",",
"force_rebuild",
"=",
"False",
",",
"rebuild_mode",
"=",
"common",
".",
"REBUILD_MODES_DEFAULT",
")",
":",
"data",
"=",
"build_raw",
"(",
"id",
",",
"name",
",",
"revision",
",",
"temporary_build",
",",
"timestamp_alignment",
",",
"no_build_dependencies",
",",
"keep_pod_on_failure",
",",
"force_rebuild",
",",
"rebuild_mode",
")",
"if",
"data",
":",
"return",
"utils",
".",
"format_json",
"(",
"data",
")"
] | 41.076923 | 13.230769 |
def node_predictions(self):
""" Determines which rows fall into which node """
pred = np.zeros(self.data_size)
for node in self:
if node.is_terminal:
pred[node.indices] = node.node_id
return pred | [
"def",
"node_predictions",
"(",
"self",
")",
":",
"pred",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"data_size",
")",
"for",
"node",
"in",
"self",
":",
"if",
"node",
".",
"is_terminal",
":",
"pred",
"[",
"node",
".",
"indices",
"]",
"=",
"node",
".",
"node_id",
"return",
"pred"
] | 35.571429 | 9.571429 |
def set_defaults(self, config_file):
"""Set defaults.
"""
self.defaults = Defaults(config_file)
self.python = Python()
self.setuptools = Setuptools()
self.docutils = Docutils()
self.styles = self.defaults.styles
self.browser = self.defaults.browser
self.list = False | [
"def",
"set_defaults",
"(",
"self",
",",
"config_file",
")",
":",
"self",
".",
"defaults",
"=",
"Defaults",
"(",
"config_file",
")",
"self",
".",
"python",
"=",
"Python",
"(",
")",
"self",
".",
"setuptools",
"=",
"Setuptools",
"(",
")",
"self",
".",
"docutils",
"=",
"Docutils",
"(",
")",
"self",
".",
"styles",
"=",
"self",
".",
"defaults",
".",
"styles",
"self",
".",
"browser",
"=",
"self",
".",
"defaults",
".",
"browser",
"self",
".",
"list",
"=",
"False"
] | 32.9 | 4.8 |
def print_info(self, capture):
"""Prints information about the unprocessed image.
Reads one frame from the source to determine image colors, dimensions
and data types.
Args:
capture: the source to read from.
"""
self.frame_offset += 1
ret, frame = capture.read()
if ret:
print('Capture Information')
print('\tDimensions (HxW): {}x{}'.format(*frame.shape[0:2]))
print('\tColor channels: {}'.format(frame.shape[2] if
len(frame.shape) > 2 else 1))
print('\tColor range: {}-{}'.format(np.min(frame),
np.max(frame)))
print('\tdtype: {}'.format(frame.dtype))
else:
print('No source found.') | [
"def",
"print_info",
"(",
"self",
",",
"capture",
")",
":",
"self",
".",
"frame_offset",
"+=",
"1",
"ret",
",",
"frame",
"=",
"capture",
".",
"read",
"(",
")",
"if",
"ret",
":",
"print",
"(",
"'Capture Information'",
")",
"print",
"(",
"'\\tDimensions (HxW): {}x{}'",
".",
"format",
"(",
"*",
"frame",
".",
"shape",
"[",
"0",
":",
"2",
"]",
")",
")",
"print",
"(",
"'\\tColor channels: {}'",
".",
"format",
"(",
"frame",
".",
"shape",
"[",
"2",
"]",
"if",
"len",
"(",
"frame",
".",
"shape",
")",
">",
"2",
"else",
"1",
")",
")",
"print",
"(",
"'\\tColor range: {}-{}'",
".",
"format",
"(",
"np",
".",
"min",
"(",
"frame",
")",
",",
"np",
".",
"max",
"(",
"frame",
")",
")",
")",
"print",
"(",
"'\\tdtype: {}'",
".",
"format",
"(",
"frame",
".",
"dtype",
")",
")",
"else",
":",
"print",
"(",
"'No source found.'",
")"
] | 40.142857 | 20.095238 |
def get_url(pif, dataset, version=1, site="https://citrination.com"):
"""
Construct the URL of a PIF on a site
:param pif: to construct URL for
:param dataset: the pif will belong to
:param version: of the PIF (default: 1)
:param site: for the dataset (default: https://citrination.com)
:return: the URL as a string
"""
return "{site}/datasets/{dataset}/version/{version}/pif/{uid}".format(
uid=pif.uid, version=version, dataset=dataset, site=site
) | [
"def",
"get_url",
"(",
"pif",
",",
"dataset",
",",
"version",
"=",
"1",
",",
"site",
"=",
"\"https://citrination.com\"",
")",
":",
"return",
"\"{site}/datasets/{dataset}/version/{version}/pif/{uid}\"",
".",
"format",
"(",
"uid",
"=",
"pif",
".",
"uid",
",",
"version",
"=",
"version",
",",
"dataset",
"=",
"dataset",
",",
"site",
"=",
"site",
")"
] | 40.5 | 13.833333 |
def _set_metric(self, metric_name, metric_type, value, tags=None):
"""
Set a metric
"""
if metric_type == self.GAUGE:
self.gauge(metric_name, value, tags=tags)
else:
self.log.error('Metric type "{}" unknown'.format(metric_type)) | [
"def",
"_set_metric",
"(",
"self",
",",
"metric_name",
",",
"metric_type",
",",
"value",
",",
"tags",
"=",
"None",
")",
":",
"if",
"metric_type",
"==",
"self",
".",
"GAUGE",
":",
"self",
".",
"gauge",
"(",
"metric_name",
",",
"value",
",",
"tags",
"=",
"tags",
")",
"else",
":",
"self",
".",
"log",
".",
"error",
"(",
"'Metric type \"{}\" unknown'",
".",
"format",
"(",
"metric_type",
")",
")"
] | 35.625 | 15.375 |
def ll(self, folder="", begin_from_file="", num=-1, all_grant_data=False):
"""
Get the list of files and permissions from S3.
This is similar to LL (ls -lah) in Linux: List of files with permissions.
Parameters
----------
folder : string
Path to file on S3
num: integer, optional
number of results to return, by default it returns all results.
begin_from_file : string, optional
which file to start from on S3.
This is usedful in case you are iterating over lists of files and you need to page the result by
starting listing from a certain file and fetching certain num (number) of files.
all_grant_data : Boolean, optional
More detailed file permission data will be returned.
Examples
--------
>>> from s3utils import S3utils
>>> s3utils = S3utils(
... AWS_ACCESS_KEY_ID = 'your access key',
... AWS_SECRET_ACCESS_KEY = 'your secret key',
... AWS_STORAGE_BUCKET_NAME = 'your bucket name',
... S3UTILS_DEBUG_LEVEL = 1, #change it to 0 for less verbose
... )
>>> import json
>>> # We use json.dumps to print the results more readable:
>>> my_folder_stuff = s3utils.ll("/test/")
>>> print(json.dumps(my_folder_stuff, indent=2))
{
"test/myfolder/": [
{
"name": "owner's name",
"permission": "FULL_CONTROL"
}
],
"test/myfolder/em/": [
{
"name": "owner's name",
"permission": "FULL_CONTROL"
}
],
"test/myfolder/hoho/": [
{
"name": "owner's name",
"permission": "FULL_CONTROL"
}
],
"test/myfolder/hoho/.DS_Store": [
{
"name": "owner's name",
"permission": "FULL_CONTROL"
},
{
"name": null,
"permission": "READ"
}
],
"test/myfolder/hoho/haha/": [
{
"name": "owner's name",
"permission": "FULL_CONTROL"
}
],
"test/myfolder/hoho/haha/ff": [
{
"name": "owner's name",
"permission": "FULL_CONTROL"
},
{
"name": null,
"permission": "READ"
}
],
"test/myfolder/hoho/photo.JPG": [
{
"name": "owner's name",
"permission": "FULL_CONTROL"
},
{
"name": null,
"permission": "READ"
}
],
}
"""
return self.ls(folder=folder, begin_from_file=begin_from_file, num=num, get_grants=True, all_grant_data=all_grant_data) | [
"def",
"ll",
"(",
"self",
",",
"folder",
"=",
"\"\"",
",",
"begin_from_file",
"=",
"\"\"",
",",
"num",
"=",
"-",
"1",
",",
"all_grant_data",
"=",
"False",
")",
":",
"return",
"self",
".",
"ls",
"(",
"folder",
"=",
"folder",
",",
"begin_from_file",
"=",
"begin_from_file",
",",
"num",
"=",
"num",
",",
"get_grants",
"=",
"True",
",",
"all_grant_data",
"=",
"all_grant_data",
")"
] | 32.4375 | 19.208333 |
def _get_representative(self, obj):
"""Finds and returns the root of the set containing `obj`."""
if obj not in self._parents:
self._parents[obj] = obj
self._weights[obj] = 1
self._prev_next[obj] = [obj, obj]
self._min_values[obj] = obj
return obj
path = [obj]
root = self._parents[obj]
while root != path[-1]:
path.append(root)
root = self._parents[root]
# compress the path and return
for ancestor in path:
self._parents[ancestor] = root
return root | [
"def",
"_get_representative",
"(",
"self",
",",
"obj",
")",
":",
"if",
"obj",
"not",
"in",
"self",
".",
"_parents",
":",
"self",
".",
"_parents",
"[",
"obj",
"]",
"=",
"obj",
"self",
".",
"_weights",
"[",
"obj",
"]",
"=",
"1",
"self",
".",
"_prev_next",
"[",
"obj",
"]",
"=",
"[",
"obj",
",",
"obj",
"]",
"self",
".",
"_min_values",
"[",
"obj",
"]",
"=",
"obj",
"return",
"obj",
"path",
"=",
"[",
"obj",
"]",
"root",
"=",
"self",
".",
"_parents",
"[",
"obj",
"]",
"while",
"root",
"!=",
"path",
"[",
"-",
"1",
"]",
":",
"path",
".",
"append",
"(",
"root",
")",
"root",
"=",
"self",
".",
"_parents",
"[",
"root",
"]",
"# compress the path and return",
"for",
"ancestor",
"in",
"path",
":",
"self",
".",
"_parents",
"[",
"ancestor",
"]",
"=",
"root",
"return",
"root"
] | 29.75 | 12.4 |
def _setns(self, value):
"""
Set the default repository namespace to be used.
This method exists for compatibility. Use the :attr:`default_namespace`
property instead.
"""
if self.conn is not None:
self.conn.default_namespace = value
else:
self.__default_namespace = value | [
"def",
"_setns",
"(",
"self",
",",
"value",
")",
":",
"if",
"self",
".",
"conn",
"is",
"not",
"None",
":",
"self",
".",
"conn",
".",
"default_namespace",
"=",
"value",
"else",
":",
"self",
".",
"__default_namespace",
"=",
"value"
] | 31.181818 | 15.545455 |
def is_deposit_confirmed(
channel_state: NettingChannelState,
block_number: BlockNumber,
) -> bool:
"""True if the block which mined the deposit transaction has been
confirmed.
"""
if not channel_state.deposit_transaction_queue:
return False
return is_transaction_confirmed(
channel_state.deposit_transaction_queue[0].block_number,
block_number,
) | [
"def",
"is_deposit_confirmed",
"(",
"channel_state",
":",
"NettingChannelState",
",",
"block_number",
":",
"BlockNumber",
",",
")",
"->",
"bool",
":",
"if",
"not",
"channel_state",
".",
"deposit_transaction_queue",
":",
"return",
"False",
"return",
"is_transaction_confirmed",
"(",
"channel_state",
".",
"deposit_transaction_queue",
"[",
"0",
"]",
".",
"block_number",
",",
"block_number",
",",
")"
] | 28.5 | 16.642857 |
def isRectangular(self):
"""Check if quad is rectangular.
"""
# if any two of the 4 corners are equal return false
upper = (self.ur - self.ul).unit
if not bool(upper):
return False
right = (self.lr - self.ur).unit
if not bool(right):
return False
left = (self.ll - self.ul).unit
if not bool(left):
return False
lower = (self.lr - self.ll).unit
if not bool(lower):
return False
eps = 1e-5
# we now have 4 sides of length 1. If 3 of them have 90 deg angles,
# then it is a rectangle -- we check via scalar product == 0
return abs(sum(map(lambda x,y: x*y, upper, right))) <= eps and \
abs(sum(map(lambda x,y: x*y, upper, left))) <= eps and \
abs(sum(map(lambda x,y: x*y, left, lower))) <= eps | [
"def",
"isRectangular",
"(",
"self",
")",
":",
"# if any two of the 4 corners are equal return false",
"upper",
"=",
"(",
"self",
".",
"ur",
"-",
"self",
".",
"ul",
")",
".",
"unit",
"if",
"not",
"bool",
"(",
"upper",
")",
":",
"return",
"False",
"right",
"=",
"(",
"self",
".",
"lr",
"-",
"self",
".",
"ur",
")",
".",
"unit",
"if",
"not",
"bool",
"(",
"right",
")",
":",
"return",
"False",
"left",
"=",
"(",
"self",
".",
"ll",
"-",
"self",
".",
"ul",
")",
".",
"unit",
"if",
"not",
"bool",
"(",
"left",
")",
":",
"return",
"False",
"lower",
"=",
"(",
"self",
".",
"lr",
"-",
"self",
".",
"ll",
")",
".",
"unit",
"if",
"not",
"bool",
"(",
"lower",
")",
":",
"return",
"False",
"eps",
"=",
"1e-5",
"# we now have 4 sides of length 1. If 3 of them have 90 deg angles,",
"# then it is a rectangle -- we check via scalar product == 0",
"return",
"abs",
"(",
"sum",
"(",
"map",
"(",
"lambda",
"x",
",",
"y",
":",
"x",
"*",
"y",
",",
"upper",
",",
"right",
")",
")",
")",
"<=",
"eps",
"and",
"abs",
"(",
"sum",
"(",
"map",
"(",
"lambda",
"x",
",",
"y",
":",
"x",
"*",
"y",
",",
"upper",
",",
"left",
")",
")",
")",
"<=",
"eps",
"and",
"abs",
"(",
"sum",
"(",
"map",
"(",
"lambda",
"x",
",",
"y",
":",
"x",
"*",
"y",
",",
"left",
",",
"lower",
")",
")",
")",
"<=",
"eps"
] | 38.318182 | 13.727273 |
def get_captcha(self, id=None):
""" http://api.yandex.ru/cleanweb/doc/dg/concepts/get-captcha.xml"""
payload = {'id': id}
r = self.request('get', 'http://cleanweb-api.yandex.ru/1.0/get-captcha', params=payload)
return dict((item.tag, item.text) for item in ET.fromstring(r.content)) | [
"def",
"get_captcha",
"(",
"self",
",",
"id",
"=",
"None",
")",
":",
"payload",
"=",
"{",
"'id'",
":",
"id",
"}",
"r",
"=",
"self",
".",
"request",
"(",
"'get'",
",",
"'http://cleanweb-api.yandex.ru/1.0/get-captcha'",
",",
"params",
"=",
"payload",
")",
"return",
"dict",
"(",
"(",
"item",
".",
"tag",
",",
"item",
".",
"text",
")",
"for",
"item",
"in",
"ET",
".",
"fromstring",
"(",
"r",
".",
"content",
")",
")"
] | 62 | 23.2 |
def print_status(self, repo_name, repo_path):
"""Print repository status."""
color = Color()
self.logger.info(color.colored(
"=> [%s] %s" % (repo_name, repo_path), "green"))
try:
repo = Repository(repo_path)
repo.status()
except RepositoryError as e:
self.logger.error(e)
pass
print("\n") | [
"def",
"print_status",
"(",
"self",
",",
"repo_name",
",",
"repo_path",
")",
":",
"color",
"=",
"Color",
"(",
")",
"self",
".",
"logger",
".",
"info",
"(",
"color",
".",
"colored",
"(",
"\"=> [%s] %s\"",
"%",
"(",
"repo_name",
",",
"repo_path",
")",
",",
"\"green\"",
")",
")",
"try",
":",
"repo",
"=",
"Repository",
"(",
"repo_path",
")",
"repo",
".",
"status",
"(",
")",
"except",
"RepositoryError",
"as",
"e",
":",
"self",
".",
"logger",
".",
"error",
"(",
"e",
")",
"pass",
"print",
"(",
"\"\\n\"",
")"
] | 32.083333 | 11.916667 |
def get_json_payload_magic_envelope(self, payload):
"""Encrypted JSON payload"""
private_key = self._get_user_key()
return EncryptedPayload.decrypt(payload=payload, private_key=private_key) | [
"def",
"get_json_payload_magic_envelope",
"(",
"self",
",",
"payload",
")",
":",
"private_key",
"=",
"self",
".",
"_get_user_key",
"(",
")",
"return",
"EncryptedPayload",
".",
"decrypt",
"(",
"payload",
"=",
"payload",
",",
"private_key",
"=",
"private_key",
")"
] | 52.5 | 13.5 |
def get_node_label(self, model):
"""
Defines how labels are constructed from models.
Default - uses verbose name, lines breaks where sensible
"""
if model.is_proxy:
label = "(P) %s" % (model.name.title())
else:
label = "%s" % (model.name.title())
line = ""
new_label = []
for w in label.split(" "):
if len(line + w) > 15:
new_label.append(line)
line = w
else:
line += " "
line += w
new_label.append(line)
return "\n".join(new_label) | [
"def",
"get_node_label",
"(",
"self",
",",
"model",
")",
":",
"if",
"model",
".",
"is_proxy",
":",
"label",
"=",
"\"(P) %s\"",
"%",
"(",
"model",
".",
"name",
".",
"title",
"(",
")",
")",
"else",
":",
"label",
"=",
"\"%s\"",
"%",
"(",
"model",
".",
"name",
".",
"title",
"(",
")",
")",
"line",
"=",
"\"\"",
"new_label",
"=",
"[",
"]",
"for",
"w",
"in",
"label",
".",
"split",
"(",
"\" \"",
")",
":",
"if",
"len",
"(",
"line",
"+",
"w",
")",
">",
"15",
":",
"new_label",
".",
"append",
"(",
"line",
")",
"line",
"=",
"w",
"else",
":",
"line",
"+=",
"\" \"",
"line",
"+=",
"w",
"new_label",
".",
"append",
"(",
"line",
")",
"return",
"\"\\n\"",
".",
"join",
"(",
"new_label",
")"
] | 27.863636 | 14.681818 |
def _get_known_noncoding_het_snp(data_dict):
'''If ref is coding, return None. If the data dict has a known snp, and
samtools made a call, then return the string ref_name_change and the
% of reads supporting the variant type. If noncoding, but no
samtools call, then return None'''
if data_dict['gene'] == '1':
return None
if data_dict['known_var'] == '1' and data_dict['ref_ctg_effect'] == 'SNP' \
and data_dict['smtls_nts'] != '.' and ';' not in data_dict['smtls_nts']:
nucleotides = data_dict['smtls_nts'].split(',')
depths = data_dict['smtls_nts_depth'].split(',')
if len(nucleotides) != len(depths):
raise Error('Mismatch in number of inferred nucleotides from ctg_nt, smtls_nts, smtls_nts_depth columns. Cannot continue\n' + str(data_dict))
try:
var_nucleotide = data_dict['known_var_change'][-1]
depths = [int(x) for x in depths]
nuc_to_depth = dict(zip(nucleotides, depths))
total_depth = sum(depths)
var_depth = nuc_to_depth.get(var_nucleotide, 0)
percent_depth = round(100 * var_depth / total_depth, 1)
except:
return None
return data_dict['known_var_change'], percent_depth
else:
return None | [
"def",
"_get_known_noncoding_het_snp",
"(",
"data_dict",
")",
":",
"if",
"data_dict",
"[",
"'gene'",
"]",
"==",
"'1'",
":",
"return",
"None",
"if",
"data_dict",
"[",
"'known_var'",
"]",
"==",
"'1'",
"and",
"data_dict",
"[",
"'ref_ctg_effect'",
"]",
"==",
"'SNP'",
"and",
"data_dict",
"[",
"'smtls_nts'",
"]",
"!=",
"'.'",
"and",
"';'",
"not",
"in",
"data_dict",
"[",
"'smtls_nts'",
"]",
":",
"nucleotides",
"=",
"data_dict",
"[",
"'smtls_nts'",
"]",
".",
"split",
"(",
"','",
")",
"depths",
"=",
"data_dict",
"[",
"'smtls_nts_depth'",
"]",
".",
"split",
"(",
"','",
")",
"if",
"len",
"(",
"nucleotides",
")",
"!=",
"len",
"(",
"depths",
")",
":",
"raise",
"Error",
"(",
"'Mismatch in number of inferred nucleotides from ctg_nt, smtls_nts, smtls_nts_depth columns. Cannot continue\\n'",
"+",
"str",
"(",
"data_dict",
")",
")",
"try",
":",
"var_nucleotide",
"=",
"data_dict",
"[",
"'known_var_change'",
"]",
"[",
"-",
"1",
"]",
"depths",
"=",
"[",
"int",
"(",
"x",
")",
"for",
"x",
"in",
"depths",
"]",
"nuc_to_depth",
"=",
"dict",
"(",
"zip",
"(",
"nucleotides",
",",
"depths",
")",
")",
"total_depth",
"=",
"sum",
"(",
"depths",
")",
"var_depth",
"=",
"nuc_to_depth",
".",
"get",
"(",
"var_nucleotide",
",",
"0",
")",
"percent_depth",
"=",
"round",
"(",
"100",
"*",
"var_depth",
"/",
"total_depth",
",",
"1",
")",
"except",
":",
"return",
"None",
"return",
"data_dict",
"[",
"'known_var_change'",
"]",
",",
"percent_depth",
"else",
":",
"return",
"None"
] | 47.482759 | 27 |
def loadb(chars, no_bytes=False, object_hook=None, object_pairs_hook=None, intern_object_keys=False):
"""Decodes and returns UBJSON from the given bytes or bytesarray object. See
load() for available arguments."""
with BytesIO(chars) as fp:
return load(fp, no_bytes=no_bytes, object_hook=object_hook, object_pairs_hook=object_pairs_hook,
intern_object_keys=intern_object_keys) | [
"def",
"loadb",
"(",
"chars",
",",
"no_bytes",
"=",
"False",
",",
"object_hook",
"=",
"None",
",",
"object_pairs_hook",
"=",
"None",
",",
"intern_object_keys",
"=",
"False",
")",
":",
"with",
"BytesIO",
"(",
"chars",
")",
"as",
"fp",
":",
"return",
"load",
"(",
"fp",
",",
"no_bytes",
"=",
"no_bytes",
",",
"object_hook",
"=",
"object_hook",
",",
"object_pairs_hook",
"=",
"object_pairs_hook",
",",
"intern_object_keys",
"=",
"intern_object_keys",
")"
] | 69 | 25.5 |
def main():
"""
Update the ossuary Postgres db with images observed for OSSOS.
iq: Go through and check all ossuary's images for new existence of IQs/zeropoints.
comment: Go through all ossuary and
Then updates ossuary with new images that are at any stage of processing.
Constructs full image entries, including header and info in the vtags, and inserts to ossuary.
TODO: a CLUSTER after data is inserted - maybe once a week, depending how much there is
CLUSTER images; need to sqlalchemy this one
"""
parser = argparse.ArgumentParser()
parser.add_argument("-iq", "--iq", action="store_true",
help="Check existing images in ossuary that do not yet have "
"IQ/zeropoint information; update where possible.")
parser.add_argument("-comment", action="store_true",
help="Add comments on images provided by S. Gwyn to database.")
parser.add_argument("-snr", action="store_true",
help="Update existing images in ossuary for SNR info where that exists in a vtag.")
args = parser.parse_args()
images = web.field_obs.queries.ImagesQuery()
processed_images, iqs = retrieve_processed_images(images) # straight list of primary keys
commdict = parse_sgwn_comments()
if args.iq:
unmeasured_iqs = iq_unmeasured_images(images)
sys.stdout.write('%d images in ossuary; updating %d with new IQ/zeropoint information.\n' %
(len(processed_images), len(unmeasured_iqs)))
for n, image in enumerate(unmeasured_iqs): # it's in the db, so has already passed the other checks
update_values(images, image)
sys.stdout.write('%s %d/%d...ossuary updated.\n' % (image, n + 1, len(unmeasured_iqs)))
if args.snr:
unmeasured = snr_unmeasured_images(images)
sys.stdout.write('%d images in ossuary; updating %d with new SNR information.\n' %
(len(processed_images), len(unmeasured)))
for n, image in enumerate(unmeasured): # it's in the db, so has already passed the other checks
update_values(images, image, iq_zeropt=False, snr=True)
sys.stdout.write('%s %d/%d...ossuary updated.\n' % (image, n + 1, len(unmeasured)))
if args.comment:
sys.stdout.write('%d images in ossuary; updating with new comment information.\n' %
len(processed_images))
for image in commdict.keys():
if int(image) in processed_images:
update_values(images, image, iq_zeropt=False, comment=True, commdict=commdict)
sys.stdout.write('%s has comment...\n' % image)
unprocessed_images = parse_unprocessed_images(storage.list_dbimages(), processed_images)
sys.stdout.write('%d images in ossuary; updating with %d new in VOspace.\n' %
(len(processed_images), len(unprocessed_images)))
for n, image in enumerate(unprocessed_images):
sys.stdout.write('%s %d/%d ' % (image, n + 1, len(unprocessed_images)))
try:
subheader, fullheader = get_header(image)
if subheader is not None:
sys.stdout.write('Header obtained. ')
verify_ossos_image(fullheader)
header = get_iq_and_zeropoint(image, subheader)
header = get_snr(image, header)
if image in commdict.keys():
header['comment'] = commdict[image]
put_image_in_database(header, images)
sys.stdout.write('...added to ossuary...\n')
# generate_MegaCam_previews(image)
# sys.stdout.write(' .gif preview saved.\n')
else:
sys.stdout.write('Header is not available: skipping.\n')
except Exception, e:
sys.stdout.write('... %s\n' % e) | [
"def",
"main",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"parser",
".",
"add_argument",
"(",
"\"-iq\"",
",",
"\"--iq\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Check existing images in ossuary that do not yet have \"",
"\"IQ/zeropoint information; update where possible.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"-comment\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Add comments on images provided by S. Gwyn to database.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"-snr\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Update existing images in ossuary for SNR info where that exists in a vtag.\"",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"images",
"=",
"web",
".",
"field_obs",
".",
"queries",
".",
"ImagesQuery",
"(",
")",
"processed_images",
",",
"iqs",
"=",
"retrieve_processed_images",
"(",
"images",
")",
"# straight list of primary keys",
"commdict",
"=",
"parse_sgwn_comments",
"(",
")",
"if",
"args",
".",
"iq",
":",
"unmeasured_iqs",
"=",
"iq_unmeasured_images",
"(",
"images",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"'%d images in ossuary; updating %d with new IQ/zeropoint information.\\n'",
"%",
"(",
"len",
"(",
"processed_images",
")",
",",
"len",
"(",
"unmeasured_iqs",
")",
")",
")",
"for",
"n",
",",
"image",
"in",
"enumerate",
"(",
"unmeasured_iqs",
")",
":",
"# it's in the db, so has already passed the other checks",
"update_values",
"(",
"images",
",",
"image",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"'%s %d/%d...ossuary updated.\\n'",
"%",
"(",
"image",
",",
"n",
"+",
"1",
",",
"len",
"(",
"unmeasured_iqs",
")",
")",
")",
"if",
"args",
".",
"snr",
":",
"unmeasured",
"=",
"snr_unmeasured_images",
"(",
"images",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"'%d images in ossuary; updating %d with new SNR information.\\n'",
"%",
"(",
"len",
"(",
"processed_images",
")",
",",
"len",
"(",
"unmeasured",
")",
")",
")",
"for",
"n",
",",
"image",
"in",
"enumerate",
"(",
"unmeasured",
")",
":",
"# it's in the db, so has already passed the other checks",
"update_values",
"(",
"images",
",",
"image",
",",
"iq_zeropt",
"=",
"False",
",",
"snr",
"=",
"True",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"'%s %d/%d...ossuary updated.\\n'",
"%",
"(",
"image",
",",
"n",
"+",
"1",
",",
"len",
"(",
"unmeasured",
")",
")",
")",
"if",
"args",
".",
"comment",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"'%d images in ossuary; updating with new comment information.\\n'",
"%",
"len",
"(",
"processed_images",
")",
")",
"for",
"image",
"in",
"commdict",
".",
"keys",
"(",
")",
":",
"if",
"int",
"(",
"image",
")",
"in",
"processed_images",
":",
"update_values",
"(",
"images",
",",
"image",
",",
"iq_zeropt",
"=",
"False",
",",
"comment",
"=",
"True",
",",
"commdict",
"=",
"commdict",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"'%s has comment...\\n'",
"%",
"image",
")",
"unprocessed_images",
"=",
"parse_unprocessed_images",
"(",
"storage",
".",
"list_dbimages",
"(",
")",
",",
"processed_images",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"'%d images in ossuary; updating with %d new in VOspace.\\n'",
"%",
"(",
"len",
"(",
"processed_images",
")",
",",
"len",
"(",
"unprocessed_images",
")",
")",
")",
"for",
"n",
",",
"image",
"in",
"enumerate",
"(",
"unprocessed_images",
")",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"'%s %d/%d '",
"%",
"(",
"image",
",",
"n",
"+",
"1",
",",
"len",
"(",
"unprocessed_images",
")",
")",
")",
"try",
":",
"subheader",
",",
"fullheader",
"=",
"get_header",
"(",
"image",
")",
"if",
"subheader",
"is",
"not",
"None",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"'Header obtained. '",
")",
"verify_ossos_image",
"(",
"fullheader",
")",
"header",
"=",
"get_iq_and_zeropoint",
"(",
"image",
",",
"subheader",
")",
"header",
"=",
"get_snr",
"(",
"image",
",",
"header",
")",
"if",
"image",
"in",
"commdict",
".",
"keys",
"(",
")",
":",
"header",
"[",
"'comment'",
"]",
"=",
"commdict",
"[",
"image",
"]",
"put_image_in_database",
"(",
"header",
",",
"images",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"'...added to ossuary...\\n'",
")",
"# generate_MegaCam_previews(image)",
"# sys.stdout.write(' .gif preview saved.\\n')",
"else",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"'Header is not available: skipping.\\n'",
")",
"except",
"Exception",
",",
"e",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"'... %s\\n'",
"%",
"e",
")"
] | 52.589041 | 27.30137 |
def residual_plots(df, rep_stats=None, els=['Mg', 'Sr', 'Al', 'Mn', 'Fe', 'Cu', 'Zn', 'B']):
"""
Function for plotting Test User and LAtools data comparison.
Parameters
----------
df : pandas.DataFrame
A dataframe containing reference ('X/Ca_r'), test user
('X/Ca_t') and LAtools ('X123') data.
rep_stats : dict
Reproducibility stats of the reference data produced by
`pairwise_reproducibility`
els : list
list of elements (names only) to plot.
"""
# get corresponding analyte and ratio names
As = []
Rs = []
analytes = [c for c in df.columns if ('/' not in c)]
ratios = [c for c in df.columns if ('/' in c)]
for e in els:
if e == 'Sr':
As.append('88Sr')
elif e == 'Mg':
As.append('24Mg')
else:
As.append([a for a in analytes if e in a][0])
Rs.append([r for r in ratios if e in r][0])
fig, axs = plt.subplots(len(els), 2, figsize=(5, len(els) * 2))
for i, (e, a) in enumerate(zip(Rs, As)):
lax, hax = axs[i]
x = df.loc[:, e].values
yl = df.loc[:, a].values
c = element_colour(fmt_el(a))
u = 'mmol/mol'
# calculate residuals
rl = yl - x
# plot residuals
lax.scatter(x, rl, c=c, s=15, lw=0.5, edgecolor='k', alpha=0.5)
# plot PDFs
rl = rl[~np.isnan(rl)]
lims = np.percentile(rl, [99, 1])
lims += lims.ptp() * np.array((-1.25, 1.25))
bins = np.linspace(*lims, 100)
kdl = stats.gaussian_kde(rl, .4)
hax.fill_betweenx(bins, kdl(bins), facecolor=c, alpha=0.7, edgecolor='k', lw=0.5, label='LAtools')
hax.set_xlim([0, hax.get_xlim()[-1]])
# axis labels, annotations and limits
lax.set_ylabel(e + ' ('+ u + ')')
lax.text(.02,.02,fmt_RSS(rl), fontsize=8,
ha='left', va='bottom', transform=lax.transAxes)
xlim = np.percentile(x[~np.isnan(x)], [0, 98])
lax.set_xlim(xlim)
for ax in axs[i]:
ax.set_ylim(lims)
# zero line and 2SD precision
ax.axhline(0, c='k', ls='dashed', alpha=0.6)
if rep_stats is not None:
ax.axhspan(-rep_stats[e][0] * 2, rep_stats[e][0] * 2, color=(0,0,0,0.2), zorder=-1)
if not ax.is_first_col():
ax.set_yticklabels([])
if ax.is_last_row():
hax.set_xlabel('Density')
lax.set_xlabel('Iolite User')
if ax.is_first_row():
lax.set_title('LAtools', loc='left')
fig.tight_layout()
return fig, axs | [
"def",
"residual_plots",
"(",
"df",
",",
"rep_stats",
"=",
"None",
",",
"els",
"=",
"[",
"'Mg'",
",",
"'Sr'",
",",
"'Al'",
",",
"'Mn'",
",",
"'Fe'",
",",
"'Cu'",
",",
"'Zn'",
",",
"'B'",
"]",
")",
":",
"# get corresponding analyte and ratio names",
"As",
"=",
"[",
"]",
"Rs",
"=",
"[",
"]",
"analytes",
"=",
"[",
"c",
"for",
"c",
"in",
"df",
".",
"columns",
"if",
"(",
"'/'",
"not",
"in",
"c",
")",
"]",
"ratios",
"=",
"[",
"c",
"for",
"c",
"in",
"df",
".",
"columns",
"if",
"(",
"'/'",
"in",
"c",
")",
"]",
"for",
"e",
"in",
"els",
":",
"if",
"e",
"==",
"'Sr'",
":",
"As",
".",
"append",
"(",
"'88Sr'",
")",
"elif",
"e",
"==",
"'Mg'",
":",
"As",
".",
"append",
"(",
"'24Mg'",
")",
"else",
":",
"As",
".",
"append",
"(",
"[",
"a",
"for",
"a",
"in",
"analytes",
"if",
"e",
"in",
"a",
"]",
"[",
"0",
"]",
")",
"Rs",
".",
"append",
"(",
"[",
"r",
"for",
"r",
"in",
"ratios",
"if",
"e",
"in",
"r",
"]",
"[",
"0",
"]",
")",
"fig",
",",
"axs",
"=",
"plt",
".",
"subplots",
"(",
"len",
"(",
"els",
")",
",",
"2",
",",
"figsize",
"=",
"(",
"5",
",",
"len",
"(",
"els",
")",
"*",
"2",
")",
")",
"for",
"i",
",",
"(",
"e",
",",
"a",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"Rs",
",",
"As",
")",
")",
":",
"lax",
",",
"hax",
"=",
"axs",
"[",
"i",
"]",
"x",
"=",
"df",
".",
"loc",
"[",
":",
",",
"e",
"]",
".",
"values",
"yl",
"=",
"df",
".",
"loc",
"[",
":",
",",
"a",
"]",
".",
"values",
"c",
"=",
"element_colour",
"(",
"fmt_el",
"(",
"a",
")",
")",
"u",
"=",
"'mmol/mol'",
"# calculate residuals",
"rl",
"=",
"yl",
"-",
"x",
"# plot residuals",
"lax",
".",
"scatter",
"(",
"x",
",",
"rl",
",",
"c",
"=",
"c",
",",
"s",
"=",
"15",
",",
"lw",
"=",
"0.5",
",",
"edgecolor",
"=",
"'k'",
",",
"alpha",
"=",
"0.5",
")",
"# plot PDFs",
"rl",
"=",
"rl",
"[",
"~",
"np",
".",
"isnan",
"(",
"rl",
")",
"]",
"lims",
"=",
"np",
".",
"percentile",
"(",
"rl",
",",
"[",
"99",
",",
"1",
"]",
")",
"lims",
"+=",
"lims",
".",
"ptp",
"(",
")",
"*",
"np",
".",
"array",
"(",
"(",
"-",
"1.25",
",",
"1.25",
")",
")",
"bins",
"=",
"np",
".",
"linspace",
"(",
"*",
"lims",
",",
"100",
")",
"kdl",
"=",
"stats",
".",
"gaussian_kde",
"(",
"rl",
",",
".4",
")",
"hax",
".",
"fill_betweenx",
"(",
"bins",
",",
"kdl",
"(",
"bins",
")",
",",
"facecolor",
"=",
"c",
",",
"alpha",
"=",
"0.7",
",",
"edgecolor",
"=",
"'k'",
",",
"lw",
"=",
"0.5",
",",
"label",
"=",
"'LAtools'",
")",
"hax",
".",
"set_xlim",
"(",
"[",
"0",
",",
"hax",
".",
"get_xlim",
"(",
")",
"[",
"-",
"1",
"]",
"]",
")",
"# axis labels, annotations and limits",
"lax",
".",
"set_ylabel",
"(",
"e",
"+",
"' ('",
"+",
"u",
"+",
"')'",
")",
"lax",
".",
"text",
"(",
".02",
",",
".02",
",",
"fmt_RSS",
"(",
"rl",
")",
",",
"fontsize",
"=",
"8",
",",
"ha",
"=",
"'left'",
",",
"va",
"=",
"'bottom'",
",",
"transform",
"=",
"lax",
".",
"transAxes",
")",
"xlim",
"=",
"np",
".",
"percentile",
"(",
"x",
"[",
"~",
"np",
".",
"isnan",
"(",
"x",
")",
"]",
",",
"[",
"0",
",",
"98",
"]",
")",
"lax",
".",
"set_xlim",
"(",
"xlim",
")",
"for",
"ax",
"in",
"axs",
"[",
"i",
"]",
":",
"ax",
".",
"set_ylim",
"(",
"lims",
")",
"# zero line and 2SD precision",
"ax",
".",
"axhline",
"(",
"0",
",",
"c",
"=",
"'k'",
",",
"ls",
"=",
"'dashed'",
",",
"alpha",
"=",
"0.6",
")",
"if",
"rep_stats",
"is",
"not",
"None",
":",
"ax",
".",
"axhspan",
"(",
"-",
"rep_stats",
"[",
"e",
"]",
"[",
"0",
"]",
"*",
"2",
",",
"rep_stats",
"[",
"e",
"]",
"[",
"0",
"]",
"*",
"2",
",",
"color",
"=",
"(",
"0",
",",
"0",
",",
"0",
",",
"0.2",
")",
",",
"zorder",
"=",
"-",
"1",
")",
"if",
"not",
"ax",
".",
"is_first_col",
"(",
")",
":",
"ax",
".",
"set_yticklabels",
"(",
"[",
"]",
")",
"if",
"ax",
".",
"is_last_row",
"(",
")",
":",
"hax",
".",
"set_xlabel",
"(",
"'Density'",
")",
"lax",
".",
"set_xlabel",
"(",
"'Iolite User'",
")",
"if",
"ax",
".",
"is_first_row",
"(",
")",
":",
"lax",
".",
"set_title",
"(",
"'LAtools'",
",",
"loc",
"=",
"'left'",
")",
"fig",
".",
"tight_layout",
"(",
")",
"return",
"fig",
",",
"axs"
] | 31.729412 | 18.976471 |
def has_near_match_generic_ngrams(subsequence, sequence, search_params):
"""search for near-matches of subsequence in sequence
This searches for near-matches, where the nearly-matching parts of the
sequence must meet the following limitations (relative to the subsequence):
* the maximum allowed number of character substitutions
* the maximum allowed number of new characters inserted
* and the maximum allowed number of character deletions
* the total number of substitutions, insertions and deletions
"""
if not subsequence:
raise ValueError('Given subsequence is empty!')
for match in _find_near_matches_generic_ngrams(subsequence, sequence, search_params):
return True
return False | [
"def",
"has_near_match_generic_ngrams",
"(",
"subsequence",
",",
"sequence",
",",
"search_params",
")",
":",
"if",
"not",
"subsequence",
":",
"raise",
"ValueError",
"(",
"'Given subsequence is empty!'",
")",
"for",
"match",
"in",
"_find_near_matches_generic_ngrams",
"(",
"subsequence",
",",
"sequence",
",",
"search_params",
")",
":",
"return",
"True",
"return",
"False"
] | 43.117647 | 25.470588 |
def _repeat(self, index, stage, stop):
""" Repeat a stage.
:param index: Stage index.
:param stage: Stage object to repeat.
:param iterations: Number of iterations (default infinite).
:param stages: Stages back to repeat (default 1).
"""
times = None
if 'iterations' in stage.kwargs:
times = stage.kwargs['iterations'] - 1
stages_back = 1
if 'stages' in stage.kwargs:
stages_back = stage.kwargs['stages']
i = 0
while i != times:
if stop.is_set():
break
for forward in range(stages_back):
if stop.is_set():
break
stage_index = index - stages_back + forward
self._execute_stage(stage_index, self._pipe[stage_index], stop)
i += 1 | [
"def",
"_repeat",
"(",
"self",
",",
"index",
",",
"stage",
",",
"stop",
")",
":",
"times",
"=",
"None",
"if",
"'iterations'",
"in",
"stage",
".",
"kwargs",
":",
"times",
"=",
"stage",
".",
"kwargs",
"[",
"'iterations'",
"]",
"-",
"1",
"stages_back",
"=",
"1",
"if",
"'stages'",
"in",
"stage",
".",
"kwargs",
":",
"stages_back",
"=",
"stage",
".",
"kwargs",
"[",
"'stages'",
"]",
"i",
"=",
"0",
"while",
"i",
"!=",
"times",
":",
"if",
"stop",
".",
"is_set",
"(",
")",
":",
"break",
"for",
"forward",
"in",
"range",
"(",
"stages_back",
")",
":",
"if",
"stop",
".",
"is_set",
"(",
")",
":",
"break",
"stage_index",
"=",
"index",
"-",
"stages_back",
"+",
"forward",
"self",
".",
"_execute_stage",
"(",
"stage_index",
",",
"self",
".",
"_pipe",
"[",
"stage_index",
"]",
",",
"stop",
")",
"i",
"+=",
"1"
] | 35.166667 | 14 |
def is_builtin_type(tp):
"""Checks if the given type is a builtin one.
"""
return hasattr(__builtins__, tp.__name__) and tp is getattr(__builtins__, tp.__name__) | [
"def",
"is_builtin_type",
"(",
"tp",
")",
":",
"return",
"hasattr",
"(",
"__builtins__",
",",
"tp",
".",
"__name__",
")",
"and",
"tp",
"is",
"getattr",
"(",
"__builtins__",
",",
"tp",
".",
"__name__",
")"
] | 42.5 | 16.5 |
def inspect(self, w):
"""Get the latest value of the wire given, if possible."""
if isinstance(w, WireVector):
w = w.name
try:
vals = self.tracer.trace[w]
except KeyError:
pass
else:
if not vals:
raise PyrtlError('No context available. Please run a simulation step')
return vals[-1]
raise PyrtlError('CompiledSimulation does not support inspecting internal WireVectors') | [
"def",
"inspect",
"(",
"self",
",",
"w",
")",
":",
"if",
"isinstance",
"(",
"w",
",",
"WireVector",
")",
":",
"w",
"=",
"w",
".",
"name",
"try",
":",
"vals",
"=",
"self",
".",
"tracer",
".",
"trace",
"[",
"w",
"]",
"except",
"KeyError",
":",
"pass",
"else",
":",
"if",
"not",
"vals",
":",
"raise",
"PyrtlError",
"(",
"'No context available. Please run a simulation step'",
")",
"return",
"vals",
"[",
"-",
"1",
"]",
"raise",
"PyrtlError",
"(",
"'CompiledSimulation does not support inspecting internal WireVectors'",
")"
] | 37.076923 | 20.461538 |
def get_tops(self):
'''
Gather the top files
'''
tops = collections.defaultdict(list)
include = collections.defaultdict(list)
done = collections.defaultdict(list)
errors = []
# Gather initial top files
try:
saltenvs = set()
if self.opts['pillarenv']:
# If the specified pillarenv is not present in the available
# pillar environments, do not cache the pillar top file.
if self.opts['pillarenv'] not in self.opts['pillar_roots']:
log.debug(
'pillarenv \'%s\' not found in the configured pillar '
'environments (%s)',
self.opts['pillarenv'], ', '.join(self.opts['pillar_roots'])
)
else:
saltenvs.add(self.opts['pillarenv'])
else:
saltenvs = self._get_envs()
if self.opts.get('pillar_source_merging_strategy', None) == "none":
saltenvs &= set([self.saltenv or 'base'])
for saltenv in saltenvs:
top = self.client.cache_file(self.opts['state_top'], saltenv)
if top:
tops[saltenv].append(compile_template(
top,
self.rend,
self.opts['renderer'],
self.opts['renderer_blacklist'],
self.opts['renderer_whitelist'],
saltenv=saltenv,
_pillar_rend=True,
))
except Exception as exc:
errors.append(
('Rendering Primary Top file failed, render error:\n{0}'
.format(exc)))
log.exception('Pillar rendering failed for minion %s', self.minion_id)
# Search initial top files for includes
for saltenv, ctops in six.iteritems(tops):
for ctop in ctops:
if 'include' not in ctop:
continue
for sls in ctop['include']:
include[saltenv].append(sls)
ctop.pop('include')
# Go through the includes and pull out the extra tops and add them
while include:
pops = []
for saltenv, states in six.iteritems(include):
pops.append(saltenv)
if not states:
continue
for sls in states:
if sls in done[saltenv]:
continue
try:
tops[saltenv].append(
compile_template(
self.client.get_state(
sls,
saltenv
).get('dest', False),
self.rend,
self.opts['renderer'],
self.opts['renderer_blacklist'],
self.opts['renderer_whitelist'],
saltenv=saltenv,
_pillar_rend=True,
)
)
except Exception as exc:
errors.append(
('Rendering Top file {0} failed, render error'
':\n{1}').format(sls, exc))
done[saltenv].append(sls)
for saltenv in pops:
if saltenv in include:
include.pop(saltenv)
return tops, errors | [
"def",
"get_tops",
"(",
"self",
")",
":",
"tops",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"include",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"done",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"errors",
"=",
"[",
"]",
"# Gather initial top files",
"try",
":",
"saltenvs",
"=",
"set",
"(",
")",
"if",
"self",
".",
"opts",
"[",
"'pillarenv'",
"]",
":",
"# If the specified pillarenv is not present in the available",
"# pillar environments, do not cache the pillar top file.",
"if",
"self",
".",
"opts",
"[",
"'pillarenv'",
"]",
"not",
"in",
"self",
".",
"opts",
"[",
"'pillar_roots'",
"]",
":",
"log",
".",
"debug",
"(",
"'pillarenv \\'%s\\' not found in the configured pillar '",
"'environments (%s)'",
",",
"self",
".",
"opts",
"[",
"'pillarenv'",
"]",
",",
"', '",
".",
"join",
"(",
"self",
".",
"opts",
"[",
"'pillar_roots'",
"]",
")",
")",
"else",
":",
"saltenvs",
".",
"add",
"(",
"self",
".",
"opts",
"[",
"'pillarenv'",
"]",
")",
"else",
":",
"saltenvs",
"=",
"self",
".",
"_get_envs",
"(",
")",
"if",
"self",
".",
"opts",
".",
"get",
"(",
"'pillar_source_merging_strategy'",
",",
"None",
")",
"==",
"\"none\"",
":",
"saltenvs",
"&=",
"set",
"(",
"[",
"self",
".",
"saltenv",
"or",
"'base'",
"]",
")",
"for",
"saltenv",
"in",
"saltenvs",
":",
"top",
"=",
"self",
".",
"client",
".",
"cache_file",
"(",
"self",
".",
"opts",
"[",
"'state_top'",
"]",
",",
"saltenv",
")",
"if",
"top",
":",
"tops",
"[",
"saltenv",
"]",
".",
"append",
"(",
"compile_template",
"(",
"top",
",",
"self",
".",
"rend",
",",
"self",
".",
"opts",
"[",
"'renderer'",
"]",
",",
"self",
".",
"opts",
"[",
"'renderer_blacklist'",
"]",
",",
"self",
".",
"opts",
"[",
"'renderer_whitelist'",
"]",
",",
"saltenv",
"=",
"saltenv",
",",
"_pillar_rend",
"=",
"True",
",",
")",
")",
"except",
"Exception",
"as",
"exc",
":",
"errors",
".",
"append",
"(",
"(",
"'Rendering Primary Top file failed, render error:\\n{0}'",
".",
"format",
"(",
"exc",
")",
")",
")",
"log",
".",
"exception",
"(",
"'Pillar rendering failed for minion %s'",
",",
"self",
".",
"minion_id",
")",
"# Search initial top files for includes",
"for",
"saltenv",
",",
"ctops",
"in",
"six",
".",
"iteritems",
"(",
"tops",
")",
":",
"for",
"ctop",
"in",
"ctops",
":",
"if",
"'include'",
"not",
"in",
"ctop",
":",
"continue",
"for",
"sls",
"in",
"ctop",
"[",
"'include'",
"]",
":",
"include",
"[",
"saltenv",
"]",
".",
"append",
"(",
"sls",
")",
"ctop",
".",
"pop",
"(",
"'include'",
")",
"# Go through the includes and pull out the extra tops and add them",
"while",
"include",
":",
"pops",
"=",
"[",
"]",
"for",
"saltenv",
",",
"states",
"in",
"six",
".",
"iteritems",
"(",
"include",
")",
":",
"pops",
".",
"append",
"(",
"saltenv",
")",
"if",
"not",
"states",
":",
"continue",
"for",
"sls",
"in",
"states",
":",
"if",
"sls",
"in",
"done",
"[",
"saltenv",
"]",
":",
"continue",
"try",
":",
"tops",
"[",
"saltenv",
"]",
".",
"append",
"(",
"compile_template",
"(",
"self",
".",
"client",
".",
"get_state",
"(",
"sls",
",",
"saltenv",
")",
".",
"get",
"(",
"'dest'",
",",
"False",
")",
",",
"self",
".",
"rend",
",",
"self",
".",
"opts",
"[",
"'renderer'",
"]",
",",
"self",
".",
"opts",
"[",
"'renderer_blacklist'",
"]",
",",
"self",
".",
"opts",
"[",
"'renderer_whitelist'",
"]",
",",
"saltenv",
"=",
"saltenv",
",",
"_pillar_rend",
"=",
"True",
",",
")",
")",
"except",
"Exception",
"as",
"exc",
":",
"errors",
".",
"append",
"(",
"(",
"'Rendering Top file {0} failed, render error'",
"':\\n{1}'",
")",
".",
"format",
"(",
"sls",
",",
"exc",
")",
")",
"done",
"[",
"saltenv",
"]",
".",
"append",
"(",
"sls",
")",
"for",
"saltenv",
"in",
"pops",
":",
"if",
"saltenv",
"in",
"include",
":",
"include",
".",
"pop",
"(",
"saltenv",
")",
"return",
"tops",
",",
"errors"
] | 42.215909 | 15.556818 |
def format_invoke_command(self, string):
"""
Formats correctly the string output from the invoke() method,
replacing line breaks and tabs when necessary.
"""
string = string.replace('\\n', '\n')
formated_response = ''
for line in string.splitlines():
if line.startswith('REPORT'):
line = line.replace('\t', '\n')
if line.startswith('[DEBUG]'):
line = line.replace('\t', ' ')
formated_response += line + '\n'
formated_response = formated_response.replace('\n\n', '\n')
return formated_response | [
"def",
"format_invoke_command",
"(",
"self",
",",
"string",
")",
":",
"string",
"=",
"string",
".",
"replace",
"(",
"'\\\\n'",
",",
"'\\n'",
")",
"formated_response",
"=",
"''",
"for",
"line",
"in",
"string",
".",
"splitlines",
"(",
")",
":",
"if",
"line",
".",
"startswith",
"(",
"'REPORT'",
")",
":",
"line",
"=",
"line",
".",
"replace",
"(",
"'\\t'",
",",
"'\\n'",
")",
"if",
"line",
".",
"startswith",
"(",
"'[DEBUG]'",
")",
":",
"line",
"=",
"line",
".",
"replace",
"(",
"'\\t'",
",",
"' '",
")",
"formated_response",
"+=",
"line",
"+",
"'\\n'",
"formated_response",
"=",
"formated_response",
".",
"replace",
"(",
"'\\n\\n'",
",",
"'\\n'",
")",
"return",
"formated_response"
] | 34.333333 | 12.888889 |
def update_saved_search(self, id, **kwargs): # noqa: E501
"""Update a specific saved search # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_saved_search(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param SavedSearch body: Example Body: <pre>{ \"query\": { \"foo\": \"{\\\"searchTerms\\\":[{\\\"type\\\":\\\"freetext\\\",\\\"value\\\":\\\"foo\\\"}]}\" }, \"entityType\": \"DASHBOARD\" }</pre>
:return: ResponseContainerSavedSearch
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_saved_search_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.update_saved_search_with_http_info(id, **kwargs) # noqa: E501
return data | [
"def",
"update_saved_search",
"(",
"self",
",",
"id",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"update_saved_search_with_http_info",
"(",
"id",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"update_saved_search_with_http_info",
"(",
"id",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"return",
"data"
] | 49.590909 | 25.863636 |
def ParseLines(self, input_lines):
"""Parses list of lines.
Args:
input_lines: A list of strings of input to parse (no newlines on the
strings).
Raises:
PDDMError if there are any issues.
"""
current_macro = None
for line in input_lines:
if line.startswith('PDDM-'):
directive = line.split(' ', 1)[0]
if directive == 'PDDM-DEFINE':
name, args = self._ParseDefineLine(line)
if self._macros.get(name):
raise PDDMError('Attempt to redefine macro: "%s"' % line)
current_macro = self.MacroDefinition(name, args)
self._macros[name] = current_macro
continue
if directive == 'PDDM-DEFINE-END':
if not current_macro:
raise PDDMError('Got DEFINE-END directive without an active macro:'
' "%s"' % line)
current_macro = None
continue
raise PDDMError('Hit a line with an unknown directive: "%s"' % line)
if current_macro:
current_macro.AppendLine(line)
continue
# Allow blank lines between macro definitions.
if line.strip() == '':
continue
raise PDDMError('Hit a line that wasn\'t a directive and no open macro'
' definition: "%s"' % line) | [
"def",
"ParseLines",
"(",
"self",
",",
"input_lines",
")",
":",
"current_macro",
"=",
"None",
"for",
"line",
"in",
"input_lines",
":",
"if",
"line",
".",
"startswith",
"(",
"'PDDM-'",
")",
":",
"directive",
"=",
"line",
".",
"split",
"(",
"' '",
",",
"1",
")",
"[",
"0",
"]",
"if",
"directive",
"==",
"'PDDM-DEFINE'",
":",
"name",
",",
"args",
"=",
"self",
".",
"_ParseDefineLine",
"(",
"line",
")",
"if",
"self",
".",
"_macros",
".",
"get",
"(",
"name",
")",
":",
"raise",
"PDDMError",
"(",
"'Attempt to redefine macro: \"%s\"'",
"%",
"line",
")",
"current_macro",
"=",
"self",
".",
"MacroDefinition",
"(",
"name",
",",
"args",
")",
"self",
".",
"_macros",
"[",
"name",
"]",
"=",
"current_macro",
"continue",
"if",
"directive",
"==",
"'PDDM-DEFINE-END'",
":",
"if",
"not",
"current_macro",
":",
"raise",
"PDDMError",
"(",
"'Got DEFINE-END directive without an active macro:'",
"' \"%s\"'",
"%",
"line",
")",
"current_macro",
"=",
"None",
"continue",
"raise",
"PDDMError",
"(",
"'Hit a line with an unknown directive: \"%s\"'",
"%",
"line",
")",
"if",
"current_macro",
":",
"current_macro",
".",
"AppendLine",
"(",
"line",
")",
"continue",
"# Allow blank lines between macro definitions.",
"if",
"line",
".",
"strip",
"(",
")",
"==",
"''",
":",
"continue",
"raise",
"PDDMError",
"(",
"'Hit a line that wasn\\'t a directive and no open macro'",
"' definition: \"%s\"'",
"%",
"line",
")"
] | 33.051282 | 17.794872 |
def init_session(db_url=None, echo=False, engine=None, settings=None):
"""
A SQLAlchemy Session requires that an engine be initialized if one isn't
provided.
"""
if engine is None:
engine = init_engine(db_url=db_url, echo=echo, settings=settings)
return sessionmaker(bind=engine) | [
"def",
"init_session",
"(",
"db_url",
"=",
"None",
",",
"echo",
"=",
"False",
",",
"engine",
"=",
"None",
",",
"settings",
"=",
"None",
")",
":",
"if",
"engine",
"is",
"None",
":",
"engine",
"=",
"init_engine",
"(",
"db_url",
"=",
"db_url",
",",
"echo",
"=",
"echo",
",",
"settings",
"=",
"settings",
")",
"return",
"sessionmaker",
"(",
"bind",
"=",
"engine",
")"
] | 38 | 18.5 |
def _srvc_make_overview_tables(self, tables_to_make, traj=None):
"""Creates the overview tables in overview group"""
for table_name in tables_to_make:
# Prepare the tables desciptions, depending on which overview table we create
# we need different columns
paramdescriptiondict = {}
expectedrows = 0
# Every overview table has a name and location column
paramdescriptiondict['location'] = pt.StringCol(
pypetconstants.HDF5_STRCOL_MAX_LOCATION_LENGTH,
pos=0)
paramdescriptiondict['name'] = pt.StringCol(pypetconstants.HDF5_STRCOL_MAX_NAME_LENGTH,
pos=1)
paramdescriptiondict['comment'] = pt.StringCol(
pypetconstants.HDF5_STRCOL_MAX_COMMENT_LENGTH)
paramdescriptiondict['value'] = pt.StringCol(
pypetconstants.HDF5_STRCOL_MAX_VALUE_LENGTH, pos=2)
if table_name == 'config_overview':
if traj is not None:
expectedrows = len(traj._config)
if table_name == 'parameters_overview':
if traj is not None:
expectedrows = len(traj._parameters)
if table_name == 'explored_parameters_overview':
paramdescriptiondict['range'] = pt.StringCol(
pypetconstants.HDF5_STRCOL_MAX_RANGE_LENGTH)
paramdescriptiondict['length'] = pt.IntCol()
if traj is not None:
expectedrows = len(traj._explored_parameters)
if table_name.endswith('summary'):
paramdescriptiondict['hexdigest'] = pt.StringCol(64, pos=10)
# Check if the user provided an estimate of the amount of results per run
# This can help to speed up storing
if table_name == 'derived_parameters_overview':
expectedrows = self._derived_parameters_per_run
if traj is not None:
expectedrows *= len(traj)
expectedrows += len(traj._derived_parameters)
if table_name == 'results_overview':
expectedrows = self._results_per_run
if traj is not None:
expectedrows *= len(traj)
expectedrows += len(traj._results)
if expectedrows > 0:
paramtable = self._all_get_or_create_table(where=self._overview_group,
tablename=table_name,
description=paramdescriptiondict,
expectedrows=expectedrows)
else:
paramtable = self._all_get_or_create_table(where=self._overview_group,
tablename=table_name,
description=paramdescriptiondict)
paramtable.flush() | [
"def",
"_srvc_make_overview_tables",
"(",
"self",
",",
"tables_to_make",
",",
"traj",
"=",
"None",
")",
":",
"for",
"table_name",
"in",
"tables_to_make",
":",
"# Prepare the tables desciptions, depending on which overview table we create",
"# we need different columns",
"paramdescriptiondict",
"=",
"{",
"}",
"expectedrows",
"=",
"0",
"# Every overview table has a name and location column",
"paramdescriptiondict",
"[",
"'location'",
"]",
"=",
"pt",
".",
"StringCol",
"(",
"pypetconstants",
".",
"HDF5_STRCOL_MAX_LOCATION_LENGTH",
",",
"pos",
"=",
"0",
")",
"paramdescriptiondict",
"[",
"'name'",
"]",
"=",
"pt",
".",
"StringCol",
"(",
"pypetconstants",
".",
"HDF5_STRCOL_MAX_NAME_LENGTH",
",",
"pos",
"=",
"1",
")",
"paramdescriptiondict",
"[",
"'comment'",
"]",
"=",
"pt",
".",
"StringCol",
"(",
"pypetconstants",
".",
"HDF5_STRCOL_MAX_COMMENT_LENGTH",
")",
"paramdescriptiondict",
"[",
"'value'",
"]",
"=",
"pt",
".",
"StringCol",
"(",
"pypetconstants",
".",
"HDF5_STRCOL_MAX_VALUE_LENGTH",
",",
"pos",
"=",
"2",
")",
"if",
"table_name",
"==",
"'config_overview'",
":",
"if",
"traj",
"is",
"not",
"None",
":",
"expectedrows",
"=",
"len",
"(",
"traj",
".",
"_config",
")",
"if",
"table_name",
"==",
"'parameters_overview'",
":",
"if",
"traj",
"is",
"not",
"None",
":",
"expectedrows",
"=",
"len",
"(",
"traj",
".",
"_parameters",
")",
"if",
"table_name",
"==",
"'explored_parameters_overview'",
":",
"paramdescriptiondict",
"[",
"'range'",
"]",
"=",
"pt",
".",
"StringCol",
"(",
"pypetconstants",
".",
"HDF5_STRCOL_MAX_RANGE_LENGTH",
")",
"paramdescriptiondict",
"[",
"'length'",
"]",
"=",
"pt",
".",
"IntCol",
"(",
")",
"if",
"traj",
"is",
"not",
"None",
":",
"expectedrows",
"=",
"len",
"(",
"traj",
".",
"_explored_parameters",
")",
"if",
"table_name",
".",
"endswith",
"(",
"'summary'",
")",
":",
"paramdescriptiondict",
"[",
"'hexdigest'",
"]",
"=",
"pt",
".",
"StringCol",
"(",
"64",
",",
"pos",
"=",
"10",
")",
"# Check if the user provided an estimate of the amount of results per run",
"# This can help to speed up storing",
"if",
"table_name",
"==",
"'derived_parameters_overview'",
":",
"expectedrows",
"=",
"self",
".",
"_derived_parameters_per_run",
"if",
"traj",
"is",
"not",
"None",
":",
"expectedrows",
"*=",
"len",
"(",
"traj",
")",
"expectedrows",
"+=",
"len",
"(",
"traj",
".",
"_derived_parameters",
")",
"if",
"table_name",
"==",
"'results_overview'",
":",
"expectedrows",
"=",
"self",
".",
"_results_per_run",
"if",
"traj",
"is",
"not",
"None",
":",
"expectedrows",
"*=",
"len",
"(",
"traj",
")",
"expectedrows",
"+=",
"len",
"(",
"traj",
".",
"_results",
")",
"if",
"expectedrows",
">",
"0",
":",
"paramtable",
"=",
"self",
".",
"_all_get_or_create_table",
"(",
"where",
"=",
"self",
".",
"_overview_group",
",",
"tablename",
"=",
"table_name",
",",
"description",
"=",
"paramdescriptiondict",
",",
"expectedrows",
"=",
"expectedrows",
")",
"else",
":",
"paramtable",
"=",
"self",
".",
"_all_get_or_create_table",
"(",
"where",
"=",
"self",
".",
"_overview_group",
",",
"tablename",
"=",
"table_name",
",",
"description",
"=",
"paramdescriptiondict",
")",
"paramtable",
".",
"flush",
"(",
")"
] | 45.402985 | 24.671642 |
def network_expansion_diff (networkA, networkB, filename=None, boundaries=[]):
"""Plot relative network expansion derivation of AC- and DC-lines.
Parameters
----------
networkA: PyPSA network container
Holds topology of grid including results from powerflow analysis
networkB: PyPSA network container
Holds topology of grid including results from powerflow analysis
filename: str or None
Save figure in this direction
boundaries: array
Set boundaries of heatmap axis
"""
cmap = plt.cm.jet
array_line = [['Line'] * len(networkA.lines), networkA.lines.index]
extension_lines = pd.Series(100 *\
((networkA.lines.s_nom_opt - \
networkB.lines.s_nom_opt)/\
networkA.lines.s_nom_opt ).values,\
index=array_line)
array_link = [['Link'] * len(networkA.links), networkA.links.index]
extension_links = pd.Series(100 *
((networkA.links.p_nom_opt -\
networkB.links.p_nom_opt)/\
networkA.links.p_nom_opt).values,\
index=array_link)
extension = extension_lines.append(extension_links)
ll = networkA.plot(
line_colors=extension,
line_cmap=cmap,
bus_sizes=0,
title="Derivation of AC- and DC-line extension",
line_widths=2)
if not boundaries:
v = np.linspace(min(extension), max(extension), 101)
boundaries = [min(extension).round(0), max(extension).round(0)]
else:
v = np.linspace(boundaries[0], boundaries[1], 101)
if not extension_links.empty:
cb_Link = plt.colorbar(ll[2], boundaries=v,
ticks=v[0:101:10])
cb_Link.set_clim(vmin=boundaries[0], vmax=boundaries[1])
cb_Link.remove()
cb = plt.colorbar(ll[1], boundaries=v,
ticks=v[0:101:10], fraction=0.046, pad=0.04)
cb.set_clim(vmin=boundaries[0], vmax=boundaries[1])
cb.set_label('line extension derivation in %')
if filename is None:
plt.show()
else:
plt.savefig(filename)
plt.close() | [
"def",
"network_expansion_diff",
"(",
"networkA",
",",
"networkB",
",",
"filename",
"=",
"None",
",",
"boundaries",
"=",
"[",
"]",
")",
":",
"cmap",
"=",
"plt",
".",
"cm",
".",
"jet",
"array_line",
"=",
"[",
"[",
"'Line'",
"]",
"*",
"len",
"(",
"networkA",
".",
"lines",
")",
",",
"networkA",
".",
"lines",
".",
"index",
"]",
"extension_lines",
"=",
"pd",
".",
"Series",
"(",
"100",
"*",
"(",
"(",
"networkA",
".",
"lines",
".",
"s_nom_opt",
"-",
"networkB",
".",
"lines",
".",
"s_nom_opt",
")",
"/",
"networkA",
".",
"lines",
".",
"s_nom_opt",
")",
".",
"values",
",",
"index",
"=",
"array_line",
")",
"array_link",
"=",
"[",
"[",
"'Link'",
"]",
"*",
"len",
"(",
"networkA",
".",
"links",
")",
",",
"networkA",
".",
"links",
".",
"index",
"]",
"extension_links",
"=",
"pd",
".",
"Series",
"(",
"100",
"*",
"(",
"(",
"networkA",
".",
"links",
".",
"p_nom_opt",
"-",
"networkB",
".",
"links",
".",
"p_nom_opt",
")",
"/",
"networkA",
".",
"links",
".",
"p_nom_opt",
")",
".",
"values",
",",
"index",
"=",
"array_link",
")",
"extension",
"=",
"extension_lines",
".",
"append",
"(",
"extension_links",
")",
"ll",
"=",
"networkA",
".",
"plot",
"(",
"line_colors",
"=",
"extension",
",",
"line_cmap",
"=",
"cmap",
",",
"bus_sizes",
"=",
"0",
",",
"title",
"=",
"\"Derivation of AC- and DC-line extension\"",
",",
"line_widths",
"=",
"2",
")",
"if",
"not",
"boundaries",
":",
"v",
"=",
"np",
".",
"linspace",
"(",
"min",
"(",
"extension",
")",
",",
"max",
"(",
"extension",
")",
",",
"101",
")",
"boundaries",
"=",
"[",
"min",
"(",
"extension",
")",
".",
"round",
"(",
"0",
")",
",",
"max",
"(",
"extension",
")",
".",
"round",
"(",
"0",
")",
"]",
"else",
":",
"v",
"=",
"np",
".",
"linspace",
"(",
"boundaries",
"[",
"0",
"]",
",",
"boundaries",
"[",
"1",
"]",
",",
"101",
")",
"if",
"not",
"extension_links",
".",
"empty",
":",
"cb_Link",
"=",
"plt",
".",
"colorbar",
"(",
"ll",
"[",
"2",
"]",
",",
"boundaries",
"=",
"v",
",",
"ticks",
"=",
"v",
"[",
"0",
":",
"101",
":",
"10",
"]",
")",
"cb_Link",
".",
"set_clim",
"(",
"vmin",
"=",
"boundaries",
"[",
"0",
"]",
",",
"vmax",
"=",
"boundaries",
"[",
"1",
"]",
")",
"cb_Link",
".",
"remove",
"(",
")",
"cb",
"=",
"plt",
".",
"colorbar",
"(",
"ll",
"[",
"1",
"]",
",",
"boundaries",
"=",
"v",
",",
"ticks",
"=",
"v",
"[",
"0",
":",
"101",
":",
"10",
"]",
",",
"fraction",
"=",
"0.046",
",",
"pad",
"=",
"0.04",
")",
"cb",
".",
"set_clim",
"(",
"vmin",
"=",
"boundaries",
"[",
"0",
"]",
",",
"vmax",
"=",
"boundaries",
"[",
"1",
"]",
")",
"cb",
".",
"set_label",
"(",
"'line extension derivation in %'",
")",
"if",
"filename",
"is",
"None",
":",
"plt",
".",
"show",
"(",
")",
"else",
":",
"plt",
".",
"savefig",
"(",
"filename",
")",
"plt",
".",
"close",
"(",
")"
] | 33.411765 | 21.955882 |
def p_iteration_statement_4(self, p):
"""
iteration_statement \
: FOR LPAREN left_hand_side_expr IN expr RPAREN statement
"""
p[0] = ast.ForIn(item=p[3], iterable=p[5], statement=p[7]) | [
"def",
"p_iteration_statement_4",
"(",
"self",
",",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"ast",
".",
"ForIn",
"(",
"item",
"=",
"p",
"[",
"3",
"]",
",",
"iterable",
"=",
"p",
"[",
"5",
"]",
",",
"statement",
"=",
"p",
"[",
"7",
"]",
")"
] | 37.166667 | 11.5 |
def set_input_by_xpath(self, xpath, value):
"""
Set the value of form element by xpath
:param xpath: xpath path
:param value: value which should be set to element
"""
elem = self.select(xpath).node()
if self._lxml_form is None:
# Explicitly set the default form
# which contains found element
parent = elem
while True:
parent = parent.getparent() # pylint: disable=no-member
if parent.tag == 'form':
self._lxml_form = parent
break
# pylint: disable=no-member
return self.set_input(elem.get('name'), value) | [
"def",
"set_input_by_xpath",
"(",
"self",
",",
"xpath",
",",
"value",
")",
":",
"elem",
"=",
"self",
".",
"select",
"(",
"xpath",
")",
".",
"node",
"(",
")",
"if",
"self",
".",
"_lxml_form",
"is",
"None",
":",
"# Explicitly set the default form",
"# which contains found element",
"parent",
"=",
"elem",
"while",
"True",
":",
"parent",
"=",
"parent",
".",
"getparent",
"(",
")",
"# pylint: disable=no-member",
"if",
"parent",
".",
"tag",
"==",
"'form'",
":",
"self",
".",
"_lxml_form",
"=",
"parent",
"break",
"# pylint: disable=no-member",
"return",
"self",
".",
"set_input",
"(",
"elem",
".",
"get",
"(",
"'name'",
")",
",",
"value",
")"
] | 30.909091 | 14 |
def into_view(self):
"""Converts the index into a view"""
try:
return View._from_ptr(rustcall(
_lib.lsm_index_into_view,
self._get_ptr()))
finally:
self._ptr = None | [
"def",
"into_view",
"(",
"self",
")",
":",
"try",
":",
"return",
"View",
".",
"_from_ptr",
"(",
"rustcall",
"(",
"_lib",
".",
"lsm_index_into_view",
",",
"self",
".",
"_get_ptr",
"(",
")",
")",
")",
"finally",
":",
"self",
".",
"_ptr",
"=",
"None"
] | 29.625 | 11.875 |
def write_hdf5(self, filename, dataset_name=None, info=None, group_name=None):
r"""Writes a unyt_array to hdf5 file.
Parameters
----------
filename: string
The filename to create and write a dataset to
dataset_name: string
The name of the dataset to create in the file.
info: dictionary
A dictionary of supplementary info to write to append as attributes
to the dataset.
group_name: string
An optional group to write the arrays to. If not specified, the
arrays are datasets at the top level by default.
Examples
--------
>>> from unyt import cm
>>> a = [1,2,3]*cm
>>> myinfo = {'field':'dinosaurs', 'type':'field_data'}
>>> a.write_hdf5('test_array_data.h5', dataset_name='dinosaurs',
... info=myinfo) # doctest: +SKIP
"""
from unyt._on_demand_imports import _h5py as h5py
import pickle
if info is None:
info = {}
info["units"] = str(self.units)
info["unit_registry"] = np.void(pickle.dumps(self.units.registry.lut))
if dataset_name is None:
dataset_name = "array_data"
f = h5py.File(filename)
if group_name is not None:
if group_name in f:
g = f[group_name]
else:
g = f.create_group(group_name)
else:
g = f
if dataset_name in g.keys():
d = g[dataset_name]
# Overwrite without deleting if we can get away with it.
if d.shape == self.shape and d.dtype == self.dtype:
d[...] = self
for k in d.attrs.keys():
del d.attrs[k]
else:
del f[dataset_name]
d = g.create_dataset(dataset_name, data=self)
else:
d = g.create_dataset(dataset_name, data=self)
for k, v in info.items():
d.attrs[k] = v
f.close() | [
"def",
"write_hdf5",
"(",
"self",
",",
"filename",
",",
"dataset_name",
"=",
"None",
",",
"info",
"=",
"None",
",",
"group_name",
"=",
"None",
")",
":",
"from",
"unyt",
".",
"_on_demand_imports",
"import",
"_h5py",
"as",
"h5py",
"import",
"pickle",
"if",
"info",
"is",
"None",
":",
"info",
"=",
"{",
"}",
"info",
"[",
"\"units\"",
"]",
"=",
"str",
"(",
"self",
".",
"units",
")",
"info",
"[",
"\"unit_registry\"",
"]",
"=",
"np",
".",
"void",
"(",
"pickle",
".",
"dumps",
"(",
"self",
".",
"units",
".",
"registry",
".",
"lut",
")",
")",
"if",
"dataset_name",
"is",
"None",
":",
"dataset_name",
"=",
"\"array_data\"",
"f",
"=",
"h5py",
".",
"File",
"(",
"filename",
")",
"if",
"group_name",
"is",
"not",
"None",
":",
"if",
"group_name",
"in",
"f",
":",
"g",
"=",
"f",
"[",
"group_name",
"]",
"else",
":",
"g",
"=",
"f",
".",
"create_group",
"(",
"group_name",
")",
"else",
":",
"g",
"=",
"f",
"if",
"dataset_name",
"in",
"g",
".",
"keys",
"(",
")",
":",
"d",
"=",
"g",
"[",
"dataset_name",
"]",
"# Overwrite without deleting if we can get away with it.",
"if",
"d",
".",
"shape",
"==",
"self",
".",
"shape",
"and",
"d",
".",
"dtype",
"==",
"self",
".",
"dtype",
":",
"d",
"[",
"...",
"]",
"=",
"self",
"for",
"k",
"in",
"d",
".",
"attrs",
".",
"keys",
"(",
")",
":",
"del",
"d",
".",
"attrs",
"[",
"k",
"]",
"else",
":",
"del",
"f",
"[",
"dataset_name",
"]",
"d",
"=",
"g",
".",
"create_dataset",
"(",
"dataset_name",
",",
"data",
"=",
"self",
")",
"else",
":",
"d",
"=",
"g",
".",
"create_dataset",
"(",
"dataset_name",
",",
"data",
"=",
"self",
")",
"for",
"k",
",",
"v",
"in",
"info",
".",
"items",
"(",
")",
":",
"d",
".",
"attrs",
"[",
"k",
"]",
"=",
"v",
"f",
".",
"close",
"(",
")"
] | 31.746032 | 20.15873 |
def polygonVertices(x, y, radius, sides, rotationDegrees=0, stretchHorizontal=1.0, stretchVertical=1.0):
"""
Returns a generator that produces the (x, y) points of the vertices of a regular polygon.
`x` and `y` mark the center of the polygon, `radius` indicates the size,
`sides` specifies what kind of polygon it is.
Odd-sided polygons have a pointed corner at the top and flat horizontal
side at the bottom. The `rotationDegrees` argument will rotate the polygon
counterclockwise.
The polygon can be stretched by passing `stretchHorizontal` or `stretchVertical`
arguments. Passing `2.0` for `stretchHorizontal`, for example, will double with
width of the polygon.
If `filled` is set to `True`, the generator will also produce the interior
(x, y) points.
(Note: The `thickness` parameter is not yet implemented.)
>>> list(polygonVertices(10, 10, 8, 5))
[(10, 2.0), (3, 8.0), (6, 16.0), (14, 16.0), (17, 8.0)]
>>> drawPoints(polygonVertices(10, 10, 8, 5))
,,,,,,,O,,,,,,,
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
O,,,,,,,,,,,,,O
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
,,,O,,,,,,,O,,,
>>> drawPoints(polygonVertices(10, 10, 8, 5, rotationDegrees=20))
,,,,,O,,,,,,,,
,,,,,,,,,,,,,,
,,,,,,,,,,,,,,
,,,,,,,,,,,,,O
,,,,,,,,,,,,,,
,,,,,,,,,,,,,,
,,,,,,,,,,,,,,
O,,,,,,,,,,,,,
,,,,,,,,,,,,,,
,,,,,,,,,,,,,,
,,,,,,,,,,,,,,
,,,,,,,,,,,,,O
,,,,,,,,,,,,,,
,,,,,,,,,,,,,,
,,,,,O,,,,,,,,
"""
# TODO - validate x, y, radius, sides
# Setting the start point like this guarantees a flat side will be on the "bottom" of the polygon.
if sides % 2 == 1:
angleOfStartPointDegrees = 90 + rotationDegrees
else:
angleOfStartPointDegrees = 90 + rotationDegrees - (180 / sides)
for sideNum in range(sides):
angleOfPointRadians = math.radians(angleOfStartPointDegrees + (360 / sides * sideNum))
yield ( int(math.cos(angleOfPointRadians) * radius * stretchHorizontal) + x,
-(int(math.sin(angleOfPointRadians) * radius) * stretchVertical) + y) | [
"def",
"polygonVertices",
"(",
"x",
",",
"y",
",",
"radius",
",",
"sides",
",",
"rotationDegrees",
"=",
"0",
",",
"stretchHorizontal",
"=",
"1.0",
",",
"stretchVertical",
"=",
"1.0",
")",
":",
"# TODO - validate x, y, radius, sides",
"# Setting the start point like this guarantees a flat side will be on the \"bottom\" of the polygon.",
"if",
"sides",
"%",
"2",
"==",
"1",
":",
"angleOfStartPointDegrees",
"=",
"90",
"+",
"rotationDegrees",
"else",
":",
"angleOfStartPointDegrees",
"=",
"90",
"+",
"rotationDegrees",
"-",
"(",
"180",
"/",
"sides",
")",
"for",
"sideNum",
"in",
"range",
"(",
"sides",
")",
":",
"angleOfPointRadians",
"=",
"math",
".",
"radians",
"(",
"angleOfStartPointDegrees",
"+",
"(",
"360",
"/",
"sides",
"*",
"sideNum",
")",
")",
"yield",
"(",
"int",
"(",
"math",
".",
"cos",
"(",
"angleOfPointRadians",
")",
"*",
"radius",
"*",
"stretchHorizontal",
")",
"+",
"x",
",",
"-",
"(",
"int",
"(",
"math",
".",
"sin",
"(",
"angleOfPointRadians",
")",
"*",
"radius",
")",
"*",
"stretchVertical",
")",
"+",
"y",
")"
] | 32.823529 | 26.676471 |
def paintEvent(self, event):
"""Qt Override.
Include a validation icon to the left of the line edit.
"""
super(IconLineEdit, self).paintEvent(event)
painter = QPainter(self)
rect = self.geometry()
space = int((rect.height())/6)
h = rect.height() - space
w = rect.width() - h
if self._icon_visible:
if self._status and self._status_set:
pixmap = self._set_icon.pixmap(h, h)
elif self._status:
pixmap = self._valid_icon.pixmap(h, h)
else:
pixmap = self._invalid_icon.pixmap(h, h)
painter.drawPixmap(w, space, pixmap)
application_style = QApplication.style().objectName()
if self._application_style != application_style:
self._application_style = application_style
self._refresh()
# Small hack to gurantee correct padding on Spyder start
if self._paint_count < 5:
self._paint_count += 1
self._refresh() | [
"def",
"paintEvent",
"(",
"self",
",",
"event",
")",
":",
"super",
"(",
"IconLineEdit",
",",
"self",
")",
".",
"paintEvent",
"(",
"event",
")",
"painter",
"=",
"QPainter",
"(",
"self",
")",
"rect",
"=",
"self",
".",
"geometry",
"(",
")",
"space",
"=",
"int",
"(",
"(",
"rect",
".",
"height",
"(",
")",
")",
"/",
"6",
")",
"h",
"=",
"rect",
".",
"height",
"(",
")",
"-",
"space",
"w",
"=",
"rect",
".",
"width",
"(",
")",
"-",
"h",
"if",
"self",
".",
"_icon_visible",
":",
"if",
"self",
".",
"_status",
"and",
"self",
".",
"_status_set",
":",
"pixmap",
"=",
"self",
".",
"_set_icon",
".",
"pixmap",
"(",
"h",
",",
"h",
")",
"elif",
"self",
".",
"_status",
":",
"pixmap",
"=",
"self",
".",
"_valid_icon",
".",
"pixmap",
"(",
"h",
",",
"h",
")",
"else",
":",
"pixmap",
"=",
"self",
".",
"_invalid_icon",
".",
"pixmap",
"(",
"h",
",",
"h",
")",
"painter",
".",
"drawPixmap",
"(",
"w",
",",
"space",
",",
"pixmap",
")",
"application_style",
"=",
"QApplication",
".",
"style",
"(",
")",
".",
"objectName",
"(",
")",
"if",
"self",
".",
"_application_style",
"!=",
"application_style",
":",
"self",
".",
"_application_style",
"=",
"application_style",
"self",
".",
"_refresh",
"(",
")",
"# Small hack to gurantee correct padding on Spyder start",
"if",
"self",
".",
"_paint_count",
"<",
"5",
":",
"self",
".",
"_paint_count",
"+=",
"1",
"self",
".",
"_refresh",
"(",
")"
] | 32.1875 | 16.9375 |
def ramp_array(rampdata, ti, gain=1.0, ron=1.0,
badpixels=None, dtype='float64',
saturation=65631, blank=0, nsig=None, normalize=False):
"""Loop over the first axis applying ramp processing.
*rampdata* is assumed to be a 3D numpy.ndarray containing the
result of a nIR observation in folow-up-the-ramp mode.
The shape of the array must be of the form N_s x M x N, with N_s being
the number of samples.
:param fowlerdata: Convertible to a 3D numpy.ndarray
:param ti: Integration time.
:param gain: Detector gain.
:param ron: Detector readout noise in counts.
:param badpixels: An optional MxN mask of dtype 'uint8'.
:param dtype: The dtype of the float outputs.
:param saturation: The saturation level of the detector.
:param blank: Invalid values in output are substituted by *blank*.
:returns: A tuple of signal, variance of the signal, numper of pixels used
and badpixel mask.
:raises: ValueError
"""
import numina.array._nirproc as _nirproc
if ti <= 0:
raise ValueError("invalid parameter, ti <= 0.0")
if gain <= 0:
raise ValueError("invalid parameter, gain <= 0.0")
if ron <= 0:
raise ValueError("invalid parameter, ron < 0.0")
if saturation <= 0:
raise ValueError("invalid parameter, saturation <= 0")
rampdata = numpy.asarray(rampdata)
if rampdata.ndim != 3:
raise ValueError('rampdata must be 3D')
# change byteorder
ndtype = rampdata.dtype.newbyteorder('=')
rampdata = numpy.asarray(rampdata, dtype=ndtype)
# type of the output
fdtype = numpy.result_type(rampdata.dtype, dtype)
# Type of the mask
mdtype = numpy.dtype('uint8')
fshape = (rampdata.shape[1], rampdata.shape[2])
if badpixels is None:
badpixels = numpy.zeros(fshape, dtype=mdtype)
else:
if badpixels.shape != fshape:
msg = 'shape of badpixels is not compatible with shape of rampdata'
raise ValueError(msg)
if badpixels.dtype != mdtype:
raise ValueError('dtype of badpixels must be uint8')
result = numpy.empty(fshape, dtype=fdtype)
var = numpy.empty_like(result)
npix = numpy.empty(fshape, dtype=mdtype)
mask = badpixels.copy()
_nirproc._process_ramp_intl(
rampdata, ti, gain, ron, badpixels,
saturation, blank, result, var, npix, mask
)
return result, var, npix, mask | [
"def",
"ramp_array",
"(",
"rampdata",
",",
"ti",
",",
"gain",
"=",
"1.0",
",",
"ron",
"=",
"1.0",
",",
"badpixels",
"=",
"None",
",",
"dtype",
"=",
"'float64'",
",",
"saturation",
"=",
"65631",
",",
"blank",
"=",
"0",
",",
"nsig",
"=",
"None",
",",
"normalize",
"=",
"False",
")",
":",
"import",
"numina",
".",
"array",
".",
"_nirproc",
"as",
"_nirproc",
"if",
"ti",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"invalid parameter, ti <= 0.0\"",
")",
"if",
"gain",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"invalid parameter, gain <= 0.0\"",
")",
"if",
"ron",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"invalid parameter, ron < 0.0\"",
")",
"if",
"saturation",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"invalid parameter, saturation <= 0\"",
")",
"rampdata",
"=",
"numpy",
".",
"asarray",
"(",
"rampdata",
")",
"if",
"rampdata",
".",
"ndim",
"!=",
"3",
":",
"raise",
"ValueError",
"(",
"'rampdata must be 3D'",
")",
"# change byteorder",
"ndtype",
"=",
"rampdata",
".",
"dtype",
".",
"newbyteorder",
"(",
"'='",
")",
"rampdata",
"=",
"numpy",
".",
"asarray",
"(",
"rampdata",
",",
"dtype",
"=",
"ndtype",
")",
"# type of the output",
"fdtype",
"=",
"numpy",
".",
"result_type",
"(",
"rampdata",
".",
"dtype",
",",
"dtype",
")",
"# Type of the mask",
"mdtype",
"=",
"numpy",
".",
"dtype",
"(",
"'uint8'",
")",
"fshape",
"=",
"(",
"rampdata",
".",
"shape",
"[",
"1",
"]",
",",
"rampdata",
".",
"shape",
"[",
"2",
"]",
")",
"if",
"badpixels",
"is",
"None",
":",
"badpixels",
"=",
"numpy",
".",
"zeros",
"(",
"fshape",
",",
"dtype",
"=",
"mdtype",
")",
"else",
":",
"if",
"badpixels",
".",
"shape",
"!=",
"fshape",
":",
"msg",
"=",
"'shape of badpixels is not compatible with shape of rampdata'",
"raise",
"ValueError",
"(",
"msg",
")",
"if",
"badpixels",
".",
"dtype",
"!=",
"mdtype",
":",
"raise",
"ValueError",
"(",
"'dtype of badpixels must be uint8'",
")",
"result",
"=",
"numpy",
".",
"empty",
"(",
"fshape",
",",
"dtype",
"=",
"fdtype",
")",
"var",
"=",
"numpy",
".",
"empty_like",
"(",
"result",
")",
"npix",
"=",
"numpy",
".",
"empty",
"(",
"fshape",
",",
"dtype",
"=",
"mdtype",
")",
"mask",
"=",
"badpixels",
".",
"copy",
"(",
")",
"_nirproc",
".",
"_process_ramp_intl",
"(",
"rampdata",
",",
"ti",
",",
"gain",
",",
"ron",
",",
"badpixels",
",",
"saturation",
",",
"blank",
",",
"result",
",",
"var",
",",
"npix",
",",
"mask",
")",
"return",
"result",
",",
"var",
",",
"npix",
",",
"mask"
] | 35.25 | 18.808824 |
def acquisition_function_withGradients(self, x):
"""
Returns the acquisition function and its its gradient at x.
"""
aqu_x = self.acquisition_function(x)
aqu_x_grad = self.d_acquisition_function(x)
return aqu_x, aqu_x_grad | [
"def",
"acquisition_function_withGradients",
"(",
"self",
",",
"x",
")",
":",
"aqu_x",
"=",
"self",
".",
"acquisition_function",
"(",
"x",
")",
"aqu_x_grad",
"=",
"self",
".",
"d_acquisition_function",
"(",
"x",
")",
"return",
"aqu_x",
",",
"aqu_x_grad"
] | 38.428571 | 9 |
def mark_dirty(self):
"""Invalidates memoized fingerprints for this payload.
Exposed for testing.
:API: public
"""
self._fingerprint_memo_map = {}
for field in self._fields.values():
field.mark_dirty() | [
"def",
"mark_dirty",
"(",
"self",
")",
":",
"self",
".",
"_fingerprint_memo_map",
"=",
"{",
"}",
"for",
"field",
"in",
"self",
".",
"_fields",
".",
"values",
"(",
")",
":",
"field",
".",
"mark_dirty",
"(",
")"
] | 22.4 | 16.1 |
def compute_similarities(hdf5_file, data, N_processes):
"""Compute a matrix of pairwise L2 Euclidean distances among samples from 'data'.
This computation is to be done in parallel by 'N_processes' distinct processes.
Those processes (which are instances of the class 'Similarities_worker')
are prevented from simultaneously accessing the HDF5 data structure
at 'hdf5_file' through the use of a multiprocessing.Lock object.
"""
slice_queue = multiprocessing.JoinableQueue()
pid_list = []
for i in range(N_processes):
worker = Similarities_worker(hdf5_file, '/aff_prop_group/similarities',
data, slice_queue)
worker.daemon = True
worker.start()
pid_list.append(worker.pid)
for rows_slice in chunk_generator(data.shape[0], 2 * N_processes):
slice_queue.put(rows_slice)
slice_queue.join()
slice_queue.close()
terminate_processes(pid_list)
gc.collect() | [
"def",
"compute_similarities",
"(",
"hdf5_file",
",",
"data",
",",
"N_processes",
")",
":",
"slice_queue",
"=",
"multiprocessing",
".",
"JoinableQueue",
"(",
")",
"pid_list",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"N_processes",
")",
":",
"worker",
"=",
"Similarities_worker",
"(",
"hdf5_file",
",",
"'/aff_prop_group/similarities'",
",",
"data",
",",
"slice_queue",
")",
"worker",
".",
"daemon",
"=",
"True",
"worker",
".",
"start",
"(",
")",
"pid_list",
".",
"append",
"(",
"worker",
".",
"pid",
")",
"for",
"rows_slice",
"in",
"chunk_generator",
"(",
"data",
".",
"shape",
"[",
"0",
"]",
",",
"2",
"*",
"N_processes",
")",
":",
"slice_queue",
".",
"put",
"(",
"rows_slice",
")",
"slice_queue",
".",
"join",
"(",
")",
"slice_queue",
".",
"close",
"(",
")",
"terminate_processes",
"(",
"pid_list",
")",
"gc",
".",
"collect",
"(",
")"
] | 38.615385 | 22.230769 |
def sub_channel(self):
"""Get the SUB socket channel object."""
if self._sub_channel is None:
self._sub_channel = self.sub_channel_class(self.context,
self.session,
(self.ip, self.iopub_port))
return self._sub_channel | [
"def",
"sub_channel",
"(",
"self",
")",
":",
"if",
"self",
".",
"_sub_channel",
"is",
"None",
":",
"self",
".",
"_sub_channel",
"=",
"self",
".",
"sub_channel_class",
"(",
"self",
".",
"context",
",",
"self",
".",
"session",
",",
"(",
"self",
".",
"ip",
",",
"self",
".",
"iopub_port",
")",
")",
"return",
"self",
".",
"_sub_channel"
] | 51 | 18.142857 |
def simplify_other(major, minor, dist):
"""
Simplify the point featurecollection of poi with another point features accoording by distance.
Attention: point featurecollection only
Keyword arguments:
major -- major geojson
minor -- minor geojson
dist -- distance
return a geojson featurecollection with two parts of featurecollection
"""
result = deepcopy(major)
if major['type'] == 'FeatureCollection' and minor['type'] == 'FeatureCollection':
arc = dist/6371000*180/math.pi*2
for minorfeature in minor['features']:
minorgeom = minorfeature['geometry']
minorlng = minorgeom['coordinates'][0]
minorlat = minorgeom['coordinates'][1]
is_accept = True
for mainfeature in major['features']:
maingeom = mainfeature['geometry']
mainlng = maingeom['coordinates'][0]
mainlat = maingeom['coordinates'][1]
if abs(minorlat-mainlat) <= arc and abs(minorlng-mainlng) <= arc:
distance = point_distance(maingeom, minorgeom)
if distance < dist:
is_accept = False
break
if is_accept:
result["features"].append(minorfeature)
return result | [
"def",
"simplify_other",
"(",
"major",
",",
"minor",
",",
"dist",
")",
":",
"result",
"=",
"deepcopy",
"(",
"major",
")",
"if",
"major",
"[",
"'type'",
"]",
"==",
"'FeatureCollection'",
"and",
"minor",
"[",
"'type'",
"]",
"==",
"'FeatureCollection'",
":",
"arc",
"=",
"dist",
"/",
"6371000",
"*",
"180",
"/",
"math",
".",
"pi",
"*",
"2",
"for",
"minorfeature",
"in",
"minor",
"[",
"'features'",
"]",
":",
"minorgeom",
"=",
"minorfeature",
"[",
"'geometry'",
"]",
"minorlng",
"=",
"minorgeom",
"[",
"'coordinates'",
"]",
"[",
"0",
"]",
"minorlat",
"=",
"minorgeom",
"[",
"'coordinates'",
"]",
"[",
"1",
"]",
"is_accept",
"=",
"True",
"for",
"mainfeature",
"in",
"major",
"[",
"'features'",
"]",
":",
"maingeom",
"=",
"mainfeature",
"[",
"'geometry'",
"]",
"mainlng",
"=",
"maingeom",
"[",
"'coordinates'",
"]",
"[",
"0",
"]",
"mainlat",
"=",
"maingeom",
"[",
"'coordinates'",
"]",
"[",
"1",
"]",
"if",
"abs",
"(",
"minorlat",
"-",
"mainlat",
")",
"<=",
"arc",
"and",
"abs",
"(",
"minorlng",
"-",
"mainlng",
")",
"<=",
"arc",
":",
"distance",
"=",
"point_distance",
"(",
"maingeom",
",",
"minorgeom",
")",
"if",
"distance",
"<",
"dist",
":",
"is_accept",
"=",
"False",
"break",
"if",
"is_accept",
":",
"result",
"[",
"\"features\"",
"]",
".",
"append",
"(",
"minorfeature",
")",
"return",
"result"
] | 38.676471 | 17.088235 |
def build_y(self):
"""Build transmission line admittance matrix into self.Y"""
if not self.n:
return
self.y1 = mul(self.u, self.g1 + self.b1 * 1j)
self.y2 = mul(self.u, self.g2 + self.b2 * 1j)
self.y12 = div(self.u, self.r + self.x * 1j)
self.m = polar(self.tap, self.phi * deg2rad)
self.m2 = abs(self.m)**2
self.mconj = conj(self.m)
# build self and mutual admittances into Y
self.Y = spmatrix(
div(self.y12 + self.y1, self.m2), self.a1, self.a1,
(self.nb, self.nb), 'z')
self.Y -= spmatrix(
div(self.y12, self.mconj), self.a1, self.a2, (self.nb, self.nb),
'z')
self.Y -= spmatrix(
div(self.y12, self.m), self.a2, self.a1, (self.nb, self.nb), 'z')
self.Y += spmatrix(self.y12 + self.y2, self.a2, self.a2,
(self.nb, self.nb), 'z') | [
"def",
"build_y",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"n",
":",
"return",
"self",
".",
"y1",
"=",
"mul",
"(",
"self",
".",
"u",
",",
"self",
".",
"g1",
"+",
"self",
".",
"b1",
"*",
"1j",
")",
"self",
".",
"y2",
"=",
"mul",
"(",
"self",
".",
"u",
",",
"self",
".",
"g2",
"+",
"self",
".",
"b2",
"*",
"1j",
")",
"self",
".",
"y12",
"=",
"div",
"(",
"self",
".",
"u",
",",
"self",
".",
"r",
"+",
"self",
".",
"x",
"*",
"1j",
")",
"self",
".",
"m",
"=",
"polar",
"(",
"self",
".",
"tap",
",",
"self",
".",
"phi",
"*",
"deg2rad",
")",
"self",
".",
"m2",
"=",
"abs",
"(",
"self",
".",
"m",
")",
"**",
"2",
"self",
".",
"mconj",
"=",
"conj",
"(",
"self",
".",
"m",
")",
"# build self and mutual admittances into Y",
"self",
".",
"Y",
"=",
"spmatrix",
"(",
"div",
"(",
"self",
".",
"y12",
"+",
"self",
".",
"y1",
",",
"self",
".",
"m2",
")",
",",
"self",
".",
"a1",
",",
"self",
".",
"a1",
",",
"(",
"self",
".",
"nb",
",",
"self",
".",
"nb",
")",
",",
"'z'",
")",
"self",
".",
"Y",
"-=",
"spmatrix",
"(",
"div",
"(",
"self",
".",
"y12",
",",
"self",
".",
"mconj",
")",
",",
"self",
".",
"a1",
",",
"self",
".",
"a2",
",",
"(",
"self",
".",
"nb",
",",
"self",
".",
"nb",
")",
",",
"'z'",
")",
"self",
".",
"Y",
"-=",
"spmatrix",
"(",
"div",
"(",
"self",
".",
"y12",
",",
"self",
".",
"m",
")",
",",
"self",
".",
"a2",
",",
"self",
".",
"a1",
",",
"(",
"self",
".",
"nb",
",",
"self",
".",
"nb",
")",
",",
"'z'",
")",
"self",
".",
"Y",
"+=",
"spmatrix",
"(",
"self",
".",
"y12",
"+",
"self",
".",
"y2",
",",
"self",
".",
"a2",
",",
"self",
".",
"a2",
",",
"(",
"self",
".",
"nb",
",",
"self",
".",
"nb",
")",
",",
"'z'",
")"
] | 41.5 | 17.090909 |
def batchnorm_2d(nf:int, norm_type:NormType=NormType.Batch):
"A batchnorm2d layer with `nf` features initialized depending on `norm_type`."
bn = nn.BatchNorm2d(nf)
with torch.no_grad():
bn.bias.fill_(1e-3)
bn.weight.fill_(0. if norm_type==NormType.BatchZero else 1.)
return bn | [
"def",
"batchnorm_2d",
"(",
"nf",
":",
"int",
",",
"norm_type",
":",
"NormType",
"=",
"NormType",
".",
"Batch",
")",
":",
"bn",
"=",
"nn",
".",
"BatchNorm2d",
"(",
"nf",
")",
"with",
"torch",
".",
"no_grad",
"(",
")",
":",
"bn",
".",
"bias",
".",
"fill_",
"(",
"1e-3",
")",
"bn",
".",
"weight",
".",
"fill_",
"(",
"0.",
"if",
"norm_type",
"==",
"NormType",
".",
"BatchZero",
"else",
"1.",
")",
"return",
"bn"
] | 43.142857 | 22.571429 |
def formatTime (self, record, datefmt=None):
"""Returns the creation time of the given LogRecord as formatted text.
NOTE: The datefmt parameter and self.converter (the time
conversion method) are ignored. BSD Syslog Protocol messages
always use local time, and by our convention, Syslog Protocol
messages use UTC.
"""
if self.bsd:
lt_ts = datetime.datetime.fromtimestamp(record.created)
ts = lt_ts.strftime(self.BSD_DATEFMT)
if ts[4] == '0':
ts = ts[0:4] + ' ' + ts[5:]
else:
utc_ts = datetime.datetime.utcfromtimestamp(record.created)
ts = utc_ts.strftime(self.SYS_DATEFMT)
return ts | [
"def",
"formatTime",
"(",
"self",
",",
"record",
",",
"datefmt",
"=",
"None",
")",
":",
"if",
"self",
".",
"bsd",
":",
"lt_ts",
"=",
"datetime",
".",
"datetime",
".",
"fromtimestamp",
"(",
"record",
".",
"created",
")",
"ts",
"=",
"lt_ts",
".",
"strftime",
"(",
"self",
".",
"BSD_DATEFMT",
")",
"if",
"ts",
"[",
"4",
"]",
"==",
"'0'",
":",
"ts",
"=",
"ts",
"[",
"0",
":",
"4",
"]",
"+",
"' '",
"+",
"ts",
"[",
"5",
":",
"]",
"else",
":",
"utc_ts",
"=",
"datetime",
".",
"datetime",
".",
"utcfromtimestamp",
"(",
"record",
".",
"created",
")",
"ts",
"=",
"utc_ts",
".",
"strftime",
"(",
"self",
".",
"SYS_DATEFMT",
")",
"return",
"ts"
] | 42.470588 | 18.058824 |
def _convertEntities(self, match):
"""Used in a call to re.sub to replace HTML, XML, and numeric
entities with the appropriate Unicode characters. If HTML
entities are being converted, any unrecognized entities are
escaped."""
x = match.group(1)
if self.convertHTMLEntities and x in name2codepoint:
return unichr(name2codepoint[x])
elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS:
if self.convertXMLEntities:
return self.XML_ENTITIES_TO_SPECIAL_CHARS[x]
else:
return u'&%s;' % x
elif len(x) > 0 and x[0] == '#':
# Handle numeric entities
if len(x) > 1 and x[1] == 'x':
return unichr(int(x[2:], 16))
else:
return unichr(int(x[1:]))
elif self.escapeUnrecognizedEntities:
return u'&%s;' % x
else:
return u'&%s;' % x | [
"def",
"_convertEntities",
"(",
"self",
",",
"match",
")",
":",
"x",
"=",
"match",
".",
"group",
"(",
"1",
")",
"if",
"self",
".",
"convertHTMLEntities",
"and",
"x",
"in",
"name2codepoint",
":",
"return",
"unichr",
"(",
"name2codepoint",
"[",
"x",
"]",
")",
"elif",
"x",
"in",
"self",
".",
"XML_ENTITIES_TO_SPECIAL_CHARS",
":",
"if",
"self",
".",
"convertXMLEntities",
":",
"return",
"self",
".",
"XML_ENTITIES_TO_SPECIAL_CHARS",
"[",
"x",
"]",
"else",
":",
"return",
"u'&%s;'",
"%",
"x",
"elif",
"len",
"(",
"x",
")",
">",
"0",
"and",
"x",
"[",
"0",
"]",
"==",
"'#'",
":",
"# Handle numeric entities",
"if",
"len",
"(",
"x",
")",
">",
"1",
"and",
"x",
"[",
"1",
"]",
"==",
"'x'",
":",
"return",
"unichr",
"(",
"int",
"(",
"x",
"[",
"2",
":",
"]",
",",
"16",
")",
")",
"else",
":",
"return",
"unichr",
"(",
"int",
"(",
"x",
"[",
"1",
":",
"]",
")",
")",
"elif",
"self",
".",
"escapeUnrecognizedEntities",
":",
"return",
"u'&%s;'",
"%",
"x",
"else",
":",
"return",
"u'&%s;'",
"%",
"x"
] | 38.791667 | 11.708333 |
def ready(self):
"""Perform application initialization."""
# Initialize the type extension composer.
from . composer import composer
composer.discover_extensions()
is_migrating = sys.argv[1:2] == ['migrate']
if is_migrating:
# Do not register signals and ES indices when:
# * migrating - model instances used during migrations do
# not contain the full functionality of models and things
# like content types don't work correctly and signals are
# not versioned so they are guaranteed to work only with
# the last version of the model
return
# Connect all signals
from . import signals # pylint: disable=unused-import
# Register ES indices
from . builder import index_builder | [
"def",
"ready",
"(",
"self",
")",
":",
"# Initialize the type extension composer.",
"from",
".",
"composer",
"import",
"composer",
"composer",
".",
"discover_extensions",
"(",
")",
"is_migrating",
"=",
"sys",
".",
"argv",
"[",
"1",
":",
"2",
"]",
"==",
"[",
"'migrate'",
"]",
"if",
"is_migrating",
":",
"# Do not register signals and ES indices when:",
"# * migrating - model instances used during migrations do",
"# not contain the full functionality of models and things",
"# like content types don't work correctly and signals are",
"# not versioned so they are guaranteed to work only with",
"# the last version of the model",
"return",
"# Connect all signals",
"from",
".",
"import",
"signals",
"# pylint: disable=unused-import",
"# Register ES indices",
"from",
".",
"builder",
"import",
"index_builder"
] | 39.571429 | 18.857143 |
def _setup_file_hierarchy_limit(
self, files_count_limit, files_size_limit, temp_dir, cgroups, pid_to_kill):
"""Start thread that enforces any file-hiearchy limits."""
if files_count_limit is not None or files_size_limit is not None:
file_hierarchy_limit_thread = FileHierarchyLimitThread(
self._get_result_files_base(temp_dir),
files_count_limit=files_count_limit,
files_size_limit=files_size_limit,
cgroups=cgroups,
pid_to_kill=pid_to_kill,
callbackFn=self._set_termination_reason,
kill_process_fn=self._kill_process)
file_hierarchy_limit_thread.start()
return file_hierarchy_limit_thread
return None | [
"def",
"_setup_file_hierarchy_limit",
"(",
"self",
",",
"files_count_limit",
",",
"files_size_limit",
",",
"temp_dir",
",",
"cgroups",
",",
"pid_to_kill",
")",
":",
"if",
"files_count_limit",
"is",
"not",
"None",
"or",
"files_size_limit",
"is",
"not",
"None",
":",
"file_hierarchy_limit_thread",
"=",
"FileHierarchyLimitThread",
"(",
"self",
".",
"_get_result_files_base",
"(",
"temp_dir",
")",
",",
"files_count_limit",
"=",
"files_count_limit",
",",
"files_size_limit",
"=",
"files_size_limit",
",",
"cgroups",
"=",
"cgroups",
",",
"pid_to_kill",
"=",
"pid_to_kill",
",",
"callbackFn",
"=",
"self",
".",
"_set_termination_reason",
",",
"kill_process_fn",
"=",
"self",
".",
"_kill_process",
")",
"file_hierarchy_limit_thread",
".",
"start",
"(",
")",
"return",
"file_hierarchy_limit_thread",
"return",
"None"
] | 51.466667 | 14.666667 |
def format_raw_script(raw_script):
"""Creates single script from a list of script parts.
:type raw_script: [basestring]
:rtype: basestring
"""
if six.PY2:
script = ' '.join(arg.decode('utf-8') for arg in raw_script)
else:
script = ' '.join(raw_script)
return script.strip() | [
"def",
"format_raw_script",
"(",
"raw_script",
")",
":",
"if",
"six",
".",
"PY2",
":",
"script",
"=",
"' '",
".",
"join",
"(",
"arg",
".",
"decode",
"(",
"'utf-8'",
")",
"for",
"arg",
"in",
"raw_script",
")",
"else",
":",
"script",
"=",
"' '",
".",
"join",
"(",
"raw_script",
")",
"return",
"script",
".",
"strip",
"(",
")"
] | 23.692308 | 19.384615 |
def _set_bridge_domain(self, v, load=False):
"""
Setter method for bridge_domain, mapped from YANG variable /bridge_domain (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_bridge_domain is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bridge_domain() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("bridge_domain_id bridge_domain_type",bridge_domain.bridge_domain, yang_name="bridge-domain", rest_name="bridge-domain", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='bridge-domain-id bridge-domain-type', extensions={u'tailf-common': {u'info': u'bridge-domain name for Node Specific configuration', u'cli-no-key-completion': None, u'sort-priority': u'106', u'cli-suppress-list-no': None, u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'BridgeDomainBasicCallpoint', u'cli-mode-name': u'config-bridge-domain-$(bridge-domain-id)'}}), is_container='list', yang_name="bridge-domain", rest_name="bridge-domain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'bridge-domain name for Node Specific configuration', u'cli-no-key-completion': None, u'sort-priority': u'106', u'cli-suppress-list-no': None, u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'BridgeDomainBasicCallpoint', u'cli-mode-name': u'config-bridge-domain-$(bridge-domain-id)'}}, namespace='urn:brocade.com:mgmt:brocade-bridge-domain', defining_module='brocade-bridge-domain', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """bridge_domain must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("bridge_domain_id bridge_domain_type",bridge_domain.bridge_domain, yang_name="bridge-domain", rest_name="bridge-domain", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='bridge-domain-id bridge-domain-type', extensions={u'tailf-common': {u'info': u'bridge-domain name for Node Specific configuration', u'cli-no-key-completion': None, u'sort-priority': u'106', u'cli-suppress-list-no': None, u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'BridgeDomainBasicCallpoint', u'cli-mode-name': u'config-bridge-domain-$(bridge-domain-id)'}}), is_container='list', yang_name="bridge-domain", rest_name="bridge-domain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'bridge-domain name for Node Specific configuration', u'cli-no-key-completion': None, u'sort-priority': u'106', u'cli-suppress-list-no': None, u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'BridgeDomainBasicCallpoint', u'cli-mode-name': u'config-bridge-domain-$(bridge-domain-id)'}}, namespace='urn:brocade.com:mgmt:brocade-bridge-domain', defining_module='brocade-bridge-domain', yang_type='list', is_config=True)""",
})
self.__bridge_domain = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_bridge_domain",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"YANGListType",
"(",
"\"bridge_domain_id bridge_domain_type\"",
",",
"bridge_domain",
".",
"bridge_domain",
",",
"yang_name",
"=",
"\"bridge-domain\"",
",",
"rest_name",
"=",
"\"bridge-domain\"",
",",
"parent",
"=",
"self",
",",
"is_container",
"=",
"'list'",
",",
"user_ordered",
"=",
"False",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"yang_keys",
"=",
"'bridge-domain-id bridge-domain-type'",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'bridge-domain name for Node Specific configuration'",
",",
"u'cli-no-key-completion'",
":",
"None",
",",
"u'sort-priority'",
":",
"u'106'",
",",
"u'cli-suppress-list-no'",
":",
"None",
",",
"u'cli-no-match-completion'",
":",
"None",
",",
"u'cli-full-command'",
":",
"None",
",",
"u'callpoint'",
":",
"u'BridgeDomainBasicCallpoint'",
",",
"u'cli-mode-name'",
":",
"u'config-bridge-domain-$(bridge-domain-id)'",
"}",
"}",
")",
",",
"is_container",
"=",
"'list'",
",",
"yang_name",
"=",
"\"bridge-domain\"",
",",
"rest_name",
"=",
"\"bridge-domain\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'bridge-domain name for Node Specific configuration'",
",",
"u'cli-no-key-completion'",
":",
"None",
",",
"u'sort-priority'",
":",
"u'106'",
",",
"u'cli-suppress-list-no'",
":",
"None",
",",
"u'cli-no-match-completion'",
":",
"None",
",",
"u'cli-full-command'",
":",
"None",
",",
"u'callpoint'",
":",
"u'BridgeDomainBasicCallpoint'",
",",
"u'cli-mode-name'",
":",
"u'config-bridge-domain-$(bridge-domain-id)'",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-bridge-domain'",
",",
"defining_module",
"=",
"'brocade-bridge-domain'",
",",
"yang_type",
"=",
"'list'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"bridge_domain must be of a type compatible with list\"\"\"",
",",
"'defined-type'",
":",
"\"list\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=YANGListType(\"bridge_domain_id bridge_domain_type\",bridge_domain.bridge_domain, yang_name=\"bridge-domain\", rest_name=\"bridge-domain\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='bridge-domain-id bridge-domain-type', extensions={u'tailf-common': {u'info': u'bridge-domain name for Node Specific configuration', u'cli-no-key-completion': None, u'sort-priority': u'106', u'cli-suppress-list-no': None, u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'BridgeDomainBasicCallpoint', u'cli-mode-name': u'config-bridge-domain-$(bridge-domain-id)'}}), is_container='list', yang_name=\"bridge-domain\", rest_name=\"bridge-domain\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'bridge-domain name for Node Specific configuration', u'cli-no-key-completion': None, u'sort-priority': u'106', u'cli-suppress-list-no': None, u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'BridgeDomainBasicCallpoint', u'cli-mode-name': u'config-bridge-domain-$(bridge-domain-id)'}}, namespace='urn:brocade.com:mgmt:brocade-bridge-domain', defining_module='brocade-bridge-domain', yang_type='list', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__bridge_domain",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] | 152 | 73.181818 |
def init_app(self, app):
"""
This callback can be used to initialize an application for the
use with this prometheus reporter setup.
This is usually used with a flask "app factory" configuration. Please
see: http://flask.pocoo.org/docs/1.0/patterns/appfactories/
Note, that you need to use `PrometheusMetrics(app=None, ...)`
for this mode, otherwise it is called automatically.
:param app: the Flask application
"""
if self.path:
self.register_endpoint(self.path, app)
if self._export_defaults:
self.export_defaults(
self.buckets, self.group_by,
self._defaults_prefix, app
) | [
"def",
"init_app",
"(",
"self",
",",
"app",
")",
":",
"if",
"self",
".",
"path",
":",
"self",
".",
"register_endpoint",
"(",
"self",
".",
"path",
",",
"app",
")",
"if",
"self",
".",
"_export_defaults",
":",
"self",
".",
"export_defaults",
"(",
"self",
".",
"buckets",
",",
"self",
".",
"group_by",
",",
"self",
".",
"_defaults_prefix",
",",
"app",
")"
] | 32.454545 | 20.181818 |
def parse_forensic_report(feedback_report, sample, msg_date,
nameservers=None, dns_timeout=2.0,
strip_attachment_payloads=False,
parallel=False):
"""
Converts a DMARC forensic report and sample to a ``OrderedDict``
Args:
feedback_report (str): A message's feedback report as a string
sample (str): The RFC 822 headers or RFC 822 message sample
msg_date (str): The message's date header
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
dns_timeout (float): Sets the DNS timeout in seconds
strip_attachment_payloads (bool): Remove attachment payloads from
forensic report results
parallel (bool): Parallel processing
Returns:
OrderedDict: A parsed report and sample
"""
delivery_results = ["delivered", "spam", "policy", "reject", "other"]
try:
parsed_report = OrderedDict()
report_values = feedback_report_regex.findall(feedback_report)
for report_value in report_values:
key = report_value[0].lower().replace("-", "_")
parsed_report[key] = report_value[1]
if "arrival_date" not in parsed_report:
if msg_date is None:
raise InvalidForensicReport(
"Forensic sample is not a valid email")
parsed_report["arrival_date"] = msg_date.isoformat()
if "version" not in parsed_report:
parsed_report["version"] = 1
if "user_agent" not in parsed_report:
parsed_report["user_agent"] = None
if "delivery_result" not in parsed_report:
parsed_report["delivery_result"] = None
else:
for delivery_result in delivery_results:
if delivery_result in parsed_report["delivery_result"].lower():
parsed_report["delivery_result"] = delivery_result
break
if parsed_report["delivery_result"] not in delivery_results:
parsed_report["delivery_result"] = "other"
arrival_utc = human_timestamp_to_datetime(
parsed_report["arrival_date"], to_utc=True)
arrival_utc = arrival_utc.strftime("%Y-%m-%d %H:%M:%S")
parsed_report["arrival_date_utc"] = arrival_utc
ip_address = parsed_report["source_ip"]
parsed_report_source = get_ip_address_info(ip_address,
nameservers=nameservers,
timeout=dns_timeout,
parallel=parallel)
parsed_report["source"] = parsed_report_source
del parsed_report["source_ip"]
if "identity_alignment" not in parsed_report:
parsed_report["authentication_mechanisms"] = []
elif parsed_report["identity_alignment"] == "none":
parsed_report["authentication_mechanisms"] = []
del parsed_report["identity_alignment"]
else:
auth_mechanisms = parsed_report["identity_alignment"]
auth_mechanisms = auth_mechanisms.split(",")
parsed_report["authentication_mechanisms"] = auth_mechanisms
del parsed_report["identity_alignment"]
if "auth_failure" not in parsed_report:
parsed_report["auth_failure"] = "dmarc"
auth_failure = parsed_report["auth_failure"].split(",")
parsed_report["auth_failure"] = auth_failure
optional_fields = ["original_envelope_id", "dkim_domain",
"original_mail_from", "original_rcpt_to"]
for optional_field in optional_fields:
if optional_field not in parsed_report:
parsed_report[optional_field] = None
parsed_sample = parse_email(
sample,
strip_attachment_payloads=strip_attachment_payloads)
if "reported_domain" not in parsed_report:
parsed_report["reported_domain"] = parsed_sample["from"]["domain"]
sample_headers_only = False
number_of_attachments = len(parsed_sample["attachments"])
if number_of_attachments < 1 and parsed_sample["body"] is None:
sample_headers_only = True
if sample_headers_only and parsed_sample["has_defects"]:
del parsed_sample["defects"]
del parsed_sample["defects_categories"]
del parsed_sample["has_defects"]
parsed_report["sample_headers_only"] = sample_headers_only
parsed_report["sample"] = sample
parsed_report["parsed_sample"] = parsed_sample
return parsed_report
except KeyError as error:
raise InvalidForensicReport("Missing value: {0}".format(
error.__str__()))
except Exception as error:
raise InvalidForensicReport(
"Unexpected error: {0}".format(error.__str__())) | [
"def",
"parse_forensic_report",
"(",
"feedback_report",
",",
"sample",
",",
"msg_date",
",",
"nameservers",
"=",
"None",
",",
"dns_timeout",
"=",
"2.0",
",",
"strip_attachment_payloads",
"=",
"False",
",",
"parallel",
"=",
"False",
")",
":",
"delivery_results",
"=",
"[",
"\"delivered\"",
",",
"\"spam\"",
",",
"\"policy\"",
",",
"\"reject\"",
",",
"\"other\"",
"]",
"try",
":",
"parsed_report",
"=",
"OrderedDict",
"(",
")",
"report_values",
"=",
"feedback_report_regex",
".",
"findall",
"(",
"feedback_report",
")",
"for",
"report_value",
"in",
"report_values",
":",
"key",
"=",
"report_value",
"[",
"0",
"]",
".",
"lower",
"(",
")",
".",
"replace",
"(",
"\"-\"",
",",
"\"_\"",
")",
"parsed_report",
"[",
"key",
"]",
"=",
"report_value",
"[",
"1",
"]",
"if",
"\"arrival_date\"",
"not",
"in",
"parsed_report",
":",
"if",
"msg_date",
"is",
"None",
":",
"raise",
"InvalidForensicReport",
"(",
"\"Forensic sample is not a valid email\"",
")",
"parsed_report",
"[",
"\"arrival_date\"",
"]",
"=",
"msg_date",
".",
"isoformat",
"(",
")",
"if",
"\"version\"",
"not",
"in",
"parsed_report",
":",
"parsed_report",
"[",
"\"version\"",
"]",
"=",
"1",
"if",
"\"user_agent\"",
"not",
"in",
"parsed_report",
":",
"parsed_report",
"[",
"\"user_agent\"",
"]",
"=",
"None",
"if",
"\"delivery_result\"",
"not",
"in",
"parsed_report",
":",
"parsed_report",
"[",
"\"delivery_result\"",
"]",
"=",
"None",
"else",
":",
"for",
"delivery_result",
"in",
"delivery_results",
":",
"if",
"delivery_result",
"in",
"parsed_report",
"[",
"\"delivery_result\"",
"]",
".",
"lower",
"(",
")",
":",
"parsed_report",
"[",
"\"delivery_result\"",
"]",
"=",
"delivery_result",
"break",
"if",
"parsed_report",
"[",
"\"delivery_result\"",
"]",
"not",
"in",
"delivery_results",
":",
"parsed_report",
"[",
"\"delivery_result\"",
"]",
"=",
"\"other\"",
"arrival_utc",
"=",
"human_timestamp_to_datetime",
"(",
"parsed_report",
"[",
"\"arrival_date\"",
"]",
",",
"to_utc",
"=",
"True",
")",
"arrival_utc",
"=",
"arrival_utc",
".",
"strftime",
"(",
"\"%Y-%m-%d %H:%M:%S\"",
")",
"parsed_report",
"[",
"\"arrival_date_utc\"",
"]",
"=",
"arrival_utc",
"ip_address",
"=",
"parsed_report",
"[",
"\"source_ip\"",
"]",
"parsed_report_source",
"=",
"get_ip_address_info",
"(",
"ip_address",
",",
"nameservers",
"=",
"nameservers",
",",
"timeout",
"=",
"dns_timeout",
",",
"parallel",
"=",
"parallel",
")",
"parsed_report",
"[",
"\"source\"",
"]",
"=",
"parsed_report_source",
"del",
"parsed_report",
"[",
"\"source_ip\"",
"]",
"if",
"\"identity_alignment\"",
"not",
"in",
"parsed_report",
":",
"parsed_report",
"[",
"\"authentication_mechanisms\"",
"]",
"=",
"[",
"]",
"elif",
"parsed_report",
"[",
"\"identity_alignment\"",
"]",
"==",
"\"none\"",
":",
"parsed_report",
"[",
"\"authentication_mechanisms\"",
"]",
"=",
"[",
"]",
"del",
"parsed_report",
"[",
"\"identity_alignment\"",
"]",
"else",
":",
"auth_mechanisms",
"=",
"parsed_report",
"[",
"\"identity_alignment\"",
"]",
"auth_mechanisms",
"=",
"auth_mechanisms",
".",
"split",
"(",
"\",\"",
")",
"parsed_report",
"[",
"\"authentication_mechanisms\"",
"]",
"=",
"auth_mechanisms",
"del",
"parsed_report",
"[",
"\"identity_alignment\"",
"]",
"if",
"\"auth_failure\"",
"not",
"in",
"parsed_report",
":",
"parsed_report",
"[",
"\"auth_failure\"",
"]",
"=",
"\"dmarc\"",
"auth_failure",
"=",
"parsed_report",
"[",
"\"auth_failure\"",
"]",
".",
"split",
"(",
"\",\"",
")",
"parsed_report",
"[",
"\"auth_failure\"",
"]",
"=",
"auth_failure",
"optional_fields",
"=",
"[",
"\"original_envelope_id\"",
",",
"\"dkim_domain\"",
",",
"\"original_mail_from\"",
",",
"\"original_rcpt_to\"",
"]",
"for",
"optional_field",
"in",
"optional_fields",
":",
"if",
"optional_field",
"not",
"in",
"parsed_report",
":",
"parsed_report",
"[",
"optional_field",
"]",
"=",
"None",
"parsed_sample",
"=",
"parse_email",
"(",
"sample",
",",
"strip_attachment_payloads",
"=",
"strip_attachment_payloads",
")",
"if",
"\"reported_domain\"",
"not",
"in",
"parsed_report",
":",
"parsed_report",
"[",
"\"reported_domain\"",
"]",
"=",
"parsed_sample",
"[",
"\"from\"",
"]",
"[",
"\"domain\"",
"]",
"sample_headers_only",
"=",
"False",
"number_of_attachments",
"=",
"len",
"(",
"parsed_sample",
"[",
"\"attachments\"",
"]",
")",
"if",
"number_of_attachments",
"<",
"1",
"and",
"parsed_sample",
"[",
"\"body\"",
"]",
"is",
"None",
":",
"sample_headers_only",
"=",
"True",
"if",
"sample_headers_only",
"and",
"parsed_sample",
"[",
"\"has_defects\"",
"]",
":",
"del",
"parsed_sample",
"[",
"\"defects\"",
"]",
"del",
"parsed_sample",
"[",
"\"defects_categories\"",
"]",
"del",
"parsed_sample",
"[",
"\"has_defects\"",
"]",
"parsed_report",
"[",
"\"sample_headers_only\"",
"]",
"=",
"sample_headers_only",
"parsed_report",
"[",
"\"sample\"",
"]",
"=",
"sample",
"parsed_report",
"[",
"\"parsed_sample\"",
"]",
"=",
"parsed_sample",
"return",
"parsed_report",
"except",
"KeyError",
"as",
"error",
":",
"raise",
"InvalidForensicReport",
"(",
"\"Missing value: {0}\"",
".",
"format",
"(",
"error",
".",
"__str__",
"(",
")",
")",
")",
"except",
"Exception",
"as",
"error",
":",
"raise",
"InvalidForensicReport",
"(",
"\"Unexpected error: {0}\"",
".",
"format",
"(",
"error",
".",
"__str__",
"(",
")",
")",
")"
] | 42.321739 | 19.973913 |
def update_case(case_obj, existing_case):
"""Update an existing case
This will add paths to VCF files, individuals etc
Args:
case_obj(models.Case)
existing_case(models.Case)
Returns:
updated_case(models.Case): Updated existing case
"""
variant_nrs = ['nr_variants', 'nr_sv_variants']
individuals = [('individuals','_inds'), ('sv_individuals','_sv_inds')]
updated_case = deepcopy(existing_case)
for i,file_name in enumerate(['vcf_path','vcf_sv_path']):
variant_type = 'snv'
if file_name == 'vcf_sv_path':
variant_type = 'sv'
if case_obj.get(file_name):
if updated_case.get(file_name):
LOG.warning("VCF of type %s already exists in case", variant_type)
raise CaseError("Can not replace VCF in existing case")
else:
updated_case[file_name] = case_obj[file_name]
updated_case[variant_nrs[i]] = case_obj[variant_nrs[i]]
updated_case[individuals[i][0]] = case_obj[individuals[i][0]]
updated_case[individuals[i][1]] = case_obj[individuals[i][1]]
return updated_case | [
"def",
"update_case",
"(",
"case_obj",
",",
"existing_case",
")",
":",
"variant_nrs",
"=",
"[",
"'nr_variants'",
",",
"'nr_sv_variants'",
"]",
"individuals",
"=",
"[",
"(",
"'individuals'",
",",
"'_inds'",
")",
",",
"(",
"'sv_individuals'",
",",
"'_sv_inds'",
")",
"]",
"updated_case",
"=",
"deepcopy",
"(",
"existing_case",
")",
"for",
"i",
",",
"file_name",
"in",
"enumerate",
"(",
"[",
"'vcf_path'",
",",
"'vcf_sv_path'",
"]",
")",
":",
"variant_type",
"=",
"'snv'",
"if",
"file_name",
"==",
"'vcf_sv_path'",
":",
"variant_type",
"=",
"'sv'",
"if",
"case_obj",
".",
"get",
"(",
"file_name",
")",
":",
"if",
"updated_case",
".",
"get",
"(",
"file_name",
")",
":",
"LOG",
".",
"warning",
"(",
"\"VCF of type %s already exists in case\"",
",",
"variant_type",
")",
"raise",
"CaseError",
"(",
"\"Can not replace VCF in existing case\"",
")",
"else",
":",
"updated_case",
"[",
"file_name",
"]",
"=",
"case_obj",
"[",
"file_name",
"]",
"updated_case",
"[",
"variant_nrs",
"[",
"i",
"]",
"]",
"=",
"case_obj",
"[",
"variant_nrs",
"[",
"i",
"]",
"]",
"updated_case",
"[",
"individuals",
"[",
"i",
"]",
"[",
"0",
"]",
"]",
"=",
"case_obj",
"[",
"individuals",
"[",
"i",
"]",
"[",
"0",
"]",
"]",
"updated_case",
"[",
"individuals",
"[",
"i",
"]",
"[",
"1",
"]",
"]",
"=",
"case_obj",
"[",
"individuals",
"[",
"i",
"]",
"[",
"1",
"]",
"]",
"return",
"updated_case"
] | 36.65625 | 20.75 |
def get_qrcode_url(self, ticket, data=None):
"""
通过 ticket 换取二维码地址
详情请参考
https://iot.weixin.qq.com/wiki/new/index.html?page=3-4-4
:param ticket: 二维码 ticket
:param data: 额外数据
:return: 二维码地址
"""
url = 'https://we.qq.com/d/{ticket}'.format(ticket=ticket)
if data:
if isinstance(data, (dict, tuple, list)):
data = urllib.urlencode(data)
data = to_text(base64.b64encode(to_binary(data)))
url = '{base}#{data}'.format(base=url, data=data)
return url | [
"def",
"get_qrcode_url",
"(",
"self",
",",
"ticket",
",",
"data",
"=",
"None",
")",
":",
"url",
"=",
"'https://we.qq.com/d/{ticket}'",
".",
"format",
"(",
"ticket",
"=",
"ticket",
")",
"if",
"data",
":",
"if",
"isinstance",
"(",
"data",
",",
"(",
"dict",
",",
"tuple",
",",
"list",
")",
")",
":",
"data",
"=",
"urllib",
".",
"urlencode",
"(",
"data",
")",
"data",
"=",
"to_text",
"(",
"base64",
".",
"b64encode",
"(",
"to_binary",
"(",
"data",
")",
")",
")",
"url",
"=",
"'{base}#{data}'",
".",
"format",
"(",
"base",
"=",
"url",
",",
"data",
"=",
"data",
")",
"return",
"url"
] | 33.411765 | 16.588235 |
def write_virtual_memory(self, cpu_id, address, size, bytes_p):
"""Writes guest virtual memory, access handles (MMIO++) are ignored.
This feature is not implemented in the 4.0.0 release but may show up
in a dot release.
in cpu_id of type int
The identifier of the Virtual CPU.
in address of type int
The guest virtual address.
in size of type int
The number of bytes to read.
in bytes_p of type str
The bytes to write.
"""
if not isinstance(cpu_id, baseinteger):
raise TypeError("cpu_id can only be an instance of type baseinteger")
if not isinstance(address, baseinteger):
raise TypeError("address can only be an instance of type baseinteger")
if not isinstance(size, baseinteger):
raise TypeError("size can only be an instance of type baseinteger")
if not isinstance(bytes_p, list):
raise TypeError("bytes_p can only be an instance of type list")
for a in bytes_p[:10]:
if not isinstance(a, basestring):
raise TypeError(
"array can only contain objects of type basestring")
self._call("writeVirtualMemory",
in_p=[cpu_id, address, size, bytes_p]) | [
"def",
"write_virtual_memory",
"(",
"self",
",",
"cpu_id",
",",
"address",
",",
"size",
",",
"bytes_p",
")",
":",
"if",
"not",
"isinstance",
"(",
"cpu_id",
",",
"baseinteger",
")",
":",
"raise",
"TypeError",
"(",
"\"cpu_id can only be an instance of type baseinteger\"",
")",
"if",
"not",
"isinstance",
"(",
"address",
",",
"baseinteger",
")",
":",
"raise",
"TypeError",
"(",
"\"address can only be an instance of type baseinteger\"",
")",
"if",
"not",
"isinstance",
"(",
"size",
",",
"baseinteger",
")",
":",
"raise",
"TypeError",
"(",
"\"size can only be an instance of type baseinteger\"",
")",
"if",
"not",
"isinstance",
"(",
"bytes_p",
",",
"list",
")",
":",
"raise",
"TypeError",
"(",
"\"bytes_p can only be an instance of type list\"",
")",
"for",
"a",
"in",
"bytes_p",
"[",
":",
"10",
"]",
":",
"if",
"not",
"isinstance",
"(",
"a",
",",
"basestring",
")",
":",
"raise",
"TypeError",
"(",
"\"array can only contain objects of type basestring\"",
")",
"self",
".",
"_call",
"(",
"\"writeVirtualMemory\"",
",",
"in_p",
"=",
"[",
"cpu_id",
",",
"address",
",",
"size",
",",
"bytes_p",
"]",
")"
] | 39.69697 | 18.878788 |
def sync_model(self, comment='', compact_central=False,
release_borrowed=True, release_workset=True,
save_local=False):
"""Append a sync model entry to the journal.
This instructs Revit to sync the currently open workshared model.
Args:
comment (str): comment to be provided for the sync step
compact_central (bool): if True compacts the central file
release_borrowed (bool): if True releases the borrowed elements
release_workset (bool): if True releases the borrowed worksets
save_local (bool): if True saves the local file as well
"""
self._add_entry(templates.FILE_SYNC_START)
if compact_central:
self._add_entry(templates.FILE_SYNC_COMPACT)
if release_borrowed:
self._add_entry(templates.FILE_SYNC_RELEASE_BORROWED)
if release_workset:
self._add_entry(templates.FILE_SYNC_RELEASE_USERWORKSETS)
if save_local:
self._add_entry(templates.FILE_SYNC_RELEASE_SAVELOCAL)
self._add_entry(templates.FILE_SYNC_COMMENT_OK
.format(sync_comment=comment)) | [
"def",
"sync_model",
"(",
"self",
",",
"comment",
"=",
"''",
",",
"compact_central",
"=",
"False",
",",
"release_borrowed",
"=",
"True",
",",
"release_workset",
"=",
"True",
",",
"save_local",
"=",
"False",
")",
":",
"self",
".",
"_add_entry",
"(",
"templates",
".",
"FILE_SYNC_START",
")",
"if",
"compact_central",
":",
"self",
".",
"_add_entry",
"(",
"templates",
".",
"FILE_SYNC_COMPACT",
")",
"if",
"release_borrowed",
":",
"self",
".",
"_add_entry",
"(",
"templates",
".",
"FILE_SYNC_RELEASE_BORROWED",
")",
"if",
"release_workset",
":",
"self",
".",
"_add_entry",
"(",
"templates",
".",
"FILE_SYNC_RELEASE_USERWORKSETS",
")",
"if",
"save_local",
":",
"self",
".",
"_add_entry",
"(",
"templates",
".",
"FILE_SYNC_RELEASE_SAVELOCAL",
")",
"self",
".",
"_add_entry",
"(",
"templates",
".",
"FILE_SYNC_COMMENT_OK",
".",
"format",
"(",
"sync_comment",
"=",
"comment",
")",
")"
] | 43.814815 | 22.666667 |
def alter_field(self, model, old_field, new_field, strict=False):
"""Ran when the configuration on a field changed."""
is_old_field_hstore = isinstance(old_field, HStoreField)
is_new_field_hstore = isinstance(new_field, HStoreField)
if not is_old_field_hstore and not is_new_field_hstore:
return
old_uniqueness = getattr(old_field, 'uniqueness', []) or []
new_uniqueness = getattr(new_field, 'uniqueness', []) or []
# handle field renames before moving on
if str(old_field.column) != str(new_field.column):
for keys in self._iterate_uniqueness_keys(old_field):
self._rename_hstore_unique(
model._meta.db_table,
model._meta.db_table,
old_field,
new_field,
keys
)
# drop the indexes for keys that have been removed
for keys in old_uniqueness:
if keys not in new_uniqueness:
self._drop_hstore_unique(
model,
old_field,
self._compose_keys(keys)
)
# create new indexes for keys that have been added
for keys in new_uniqueness:
if keys not in old_uniqueness:
self._create_hstore_unique(
model,
new_field,
self._compose_keys(keys)
) | [
"def",
"alter_field",
"(",
"self",
",",
"model",
",",
"old_field",
",",
"new_field",
",",
"strict",
"=",
"False",
")",
":",
"is_old_field_hstore",
"=",
"isinstance",
"(",
"old_field",
",",
"HStoreField",
")",
"is_new_field_hstore",
"=",
"isinstance",
"(",
"new_field",
",",
"HStoreField",
")",
"if",
"not",
"is_old_field_hstore",
"and",
"not",
"is_new_field_hstore",
":",
"return",
"old_uniqueness",
"=",
"getattr",
"(",
"old_field",
",",
"'uniqueness'",
",",
"[",
"]",
")",
"or",
"[",
"]",
"new_uniqueness",
"=",
"getattr",
"(",
"new_field",
",",
"'uniqueness'",
",",
"[",
"]",
")",
"or",
"[",
"]",
"# handle field renames before moving on",
"if",
"str",
"(",
"old_field",
".",
"column",
")",
"!=",
"str",
"(",
"new_field",
".",
"column",
")",
":",
"for",
"keys",
"in",
"self",
".",
"_iterate_uniqueness_keys",
"(",
"old_field",
")",
":",
"self",
".",
"_rename_hstore_unique",
"(",
"model",
".",
"_meta",
".",
"db_table",
",",
"model",
".",
"_meta",
".",
"db_table",
",",
"old_field",
",",
"new_field",
",",
"keys",
")",
"# drop the indexes for keys that have been removed",
"for",
"keys",
"in",
"old_uniqueness",
":",
"if",
"keys",
"not",
"in",
"new_uniqueness",
":",
"self",
".",
"_drop_hstore_unique",
"(",
"model",
",",
"old_field",
",",
"self",
".",
"_compose_keys",
"(",
"keys",
")",
")",
"# create new indexes for keys that have been added",
"for",
"keys",
"in",
"new_uniqueness",
":",
"if",
"keys",
"not",
"in",
"old_uniqueness",
":",
"self",
".",
"_create_hstore_unique",
"(",
"model",
",",
"new_field",
",",
"self",
".",
"_compose_keys",
"(",
"keys",
")",
")"
] | 36.3 | 17.05 |
def wrap(self, LayoutClass, *args, **kwargs):
"""
Wraps every layout object pointed in `self.slice` under a `LayoutClass` instance with
`args` and `kwargs` passed.
"""
def wrap_object(layout_object, j):
layout_object.fields[j] = self.wrapped_object(
LayoutClass, layout_object.fields[j], *args, **kwargs
)
self.pre_map(wrap_object) | [
"def",
"wrap",
"(",
"self",
",",
"LayoutClass",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"wrap_object",
"(",
"layout_object",
",",
"j",
")",
":",
"layout_object",
".",
"fields",
"[",
"j",
"]",
"=",
"self",
".",
"wrapped_object",
"(",
"LayoutClass",
",",
"layout_object",
".",
"fields",
"[",
"j",
"]",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"pre_map",
"(",
"wrap_object",
")"
] | 37.272727 | 16.909091 |
def vxvyvz_to_vrpmllpmbb(vx,vy,vz,l,b,d,XYZ=False,degree=False):
"""
NAME:
vxvyvz_to_vrpmllpmbb
PURPOSE:
Transform velocities in the rectangular Galactic coordinate frame to the spherical Galactic coordinate frame (can take vector inputs)
INPUT:
vx - velocity towards the Galactic Center (km/s)
vy - velocity in the direction of Galactic rotation (km/s)
vz - velocity towards the North Galactic Pole (km/s)
l - Galactic longitude
b - Galactic lattitude
d - distance (kpc)
XYZ - (bool) If True, then l,b,d is actually X,Y,Z (rectangular Galactic coordinates)
degree - (bool) if True, l and b are in degrees
OUTPUT:
(vr,pmll x cos(b),pmbb) in (km/s,mas/yr,mas/yr); pmll = mu_l * cos(b)
For vector inputs [:,3]
HISTORY:
2009-10-24 - Written - Bovy (NYU)
2014-06-14 - Re-written w/ numpy functions for speed and w/ decorators for beauty - Bovy (IAS)
"""
#Whether to use degrees and scalar input is handled by decorators
if XYZ: #undo the incorrect conversion that the decorator did
if degree:
l*= 180./nu.pi
b*= 180./nu.pi
lbd= XYZ_to_lbd(l,b,d,degree=False)
l= lbd[:,0]
b= lbd[:,1]
d= lbd[:,2]
R=nu.zeros((3,3,len(l)))
R[0,0]= nu.cos(l)*nu.cos(b)
R[0,1]= -nu.sin(l)
R[0,2]= -nu.cos(l)*nu.sin(b)
R[1,0]= nu.sin(l)*nu.cos(b)
R[1,1]= nu.cos(l)
R[1,2]= -nu.sin(l)*nu.sin(b)
R[2,0]= nu.sin(b)
R[2,2]= nu.cos(b)
invxyz= nu.array([[vx,vx,vx],
[vy,vy,vy],
[vz,vz,vz]])
vrvlvb= (R.T*invxyz.T).sum(-1)
vrvlvb[:,1]/= d*_K
vrvlvb[:,2]/= d*_K
return vrvlvb | [
"def",
"vxvyvz_to_vrpmllpmbb",
"(",
"vx",
",",
"vy",
",",
"vz",
",",
"l",
",",
"b",
",",
"d",
",",
"XYZ",
"=",
"False",
",",
"degree",
"=",
"False",
")",
":",
"#Whether to use degrees and scalar input is handled by decorators",
"if",
"XYZ",
":",
"#undo the incorrect conversion that the decorator did",
"if",
"degree",
":",
"l",
"*=",
"180.",
"/",
"nu",
".",
"pi",
"b",
"*=",
"180.",
"/",
"nu",
".",
"pi",
"lbd",
"=",
"XYZ_to_lbd",
"(",
"l",
",",
"b",
",",
"d",
",",
"degree",
"=",
"False",
")",
"l",
"=",
"lbd",
"[",
":",
",",
"0",
"]",
"b",
"=",
"lbd",
"[",
":",
",",
"1",
"]",
"d",
"=",
"lbd",
"[",
":",
",",
"2",
"]",
"R",
"=",
"nu",
".",
"zeros",
"(",
"(",
"3",
",",
"3",
",",
"len",
"(",
"l",
")",
")",
")",
"R",
"[",
"0",
",",
"0",
"]",
"=",
"nu",
".",
"cos",
"(",
"l",
")",
"*",
"nu",
".",
"cos",
"(",
"b",
")",
"R",
"[",
"0",
",",
"1",
"]",
"=",
"-",
"nu",
".",
"sin",
"(",
"l",
")",
"R",
"[",
"0",
",",
"2",
"]",
"=",
"-",
"nu",
".",
"cos",
"(",
"l",
")",
"*",
"nu",
".",
"sin",
"(",
"b",
")",
"R",
"[",
"1",
",",
"0",
"]",
"=",
"nu",
".",
"sin",
"(",
"l",
")",
"*",
"nu",
".",
"cos",
"(",
"b",
")",
"R",
"[",
"1",
",",
"1",
"]",
"=",
"nu",
".",
"cos",
"(",
"l",
")",
"R",
"[",
"1",
",",
"2",
"]",
"=",
"-",
"nu",
".",
"sin",
"(",
"l",
")",
"*",
"nu",
".",
"sin",
"(",
"b",
")",
"R",
"[",
"2",
",",
"0",
"]",
"=",
"nu",
".",
"sin",
"(",
"b",
")",
"R",
"[",
"2",
",",
"2",
"]",
"=",
"nu",
".",
"cos",
"(",
"b",
")",
"invxyz",
"=",
"nu",
".",
"array",
"(",
"[",
"[",
"vx",
",",
"vx",
",",
"vx",
"]",
",",
"[",
"vy",
",",
"vy",
",",
"vy",
"]",
",",
"[",
"vz",
",",
"vz",
",",
"vz",
"]",
"]",
")",
"vrvlvb",
"=",
"(",
"R",
".",
"T",
"*",
"invxyz",
".",
"T",
")",
".",
"sum",
"(",
"-",
"1",
")",
"vrvlvb",
"[",
":",
",",
"1",
"]",
"/=",
"d",
"*",
"_K",
"vrvlvb",
"[",
":",
",",
"2",
"]",
"/=",
"d",
"*",
"_K",
"return",
"vrvlvb"
] | 25.606061 | 25.606061 |
def scan_dir_for_template_files(search_dir):
"""
Return a map of "likely service/template name" to "template file".
This includes all the template files in fixtures and in services.
"""
template_files = {}
cf_dir = os.path.join(search_dir, 'cloudformation')
for type in os.listdir(cf_dir):
template_dir = os.path.join(cf_dir, type, 'templates')
for x in os.listdir(template_dir):
name = os.path.splitext(x)[0]
template_files[name] = os.path.join(template_dir, x)
return template_files | [
"def",
"scan_dir_for_template_files",
"(",
"search_dir",
")",
":",
"template_files",
"=",
"{",
"}",
"cf_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"search_dir",
",",
"'cloudformation'",
")",
"for",
"type",
"in",
"os",
".",
"listdir",
"(",
"cf_dir",
")",
":",
"template_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"cf_dir",
",",
"type",
",",
"'templates'",
")",
"for",
"x",
"in",
"os",
".",
"listdir",
"(",
"template_dir",
")",
":",
"name",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"x",
")",
"[",
"0",
"]",
"template_files",
"[",
"name",
"]",
"=",
"os",
".",
"path",
".",
"join",
"(",
"template_dir",
",",
"x",
")",
"return",
"template_files"
] | 41.846154 | 12.615385 |
def save(self, filename):
"""Save metadata to XML file"""
with io.open(filename,'w',encoding='utf-8') as f:
f.write(self.xml()) | [
"def",
"save",
"(",
"self",
",",
"filename",
")",
":",
"with",
"io",
".",
"open",
"(",
"filename",
",",
"'w'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"self",
".",
"xml",
"(",
")",
")"
] | 38 | 10.25 |
def filename_for_track(track):
"""
:return: A safe filename for the given track
"""
artist = track.user['permalink']
title = track.title
return '{}-{}.mp3'.format(artist, title).lower().replace(' ', '_').replace('/', '_') | [
"def",
"filename_for_track",
"(",
"track",
")",
":",
"artist",
"=",
"track",
".",
"user",
"[",
"'permalink'",
"]",
"title",
"=",
"track",
".",
"title",
"return",
"'{}-{}.mp3'",
".",
"format",
"(",
"artist",
",",
"title",
")",
".",
"lower",
"(",
")",
".",
"replace",
"(",
"' '",
",",
"'_'",
")",
".",
"replace",
"(",
"'/'",
",",
"'_'",
")"
] | 29.875 | 15.875 |
def execute_cmdline_scenarios(scenario_name, args, command_args):
"""
Execute scenario sequences based on parsed command-line arguments.
This is useful for subcommands that run scenario sequences, which
excludes subcommands such as ``list``, ``login``, and ``matrix``.
``args`` and ``command_args`` are combined using :func:`get_configs`
to generate the scenario(s) configuration.
:param scenario_name: Name of scenario to run, or ``None`` to run all.
:param args: ``args`` dict from ``click`` command context
:param command_args: dict of command argumentss, including the target
subcommand to execute
:returns: None
"""
scenarios = molecule.scenarios.Scenarios(
get_configs(args, command_args), scenario_name)
scenarios.print_matrix()
for scenario in scenarios:
try:
execute_scenario(scenario)
except SystemExit:
# if the command has a 'destroy' arg, like test does,
# handle that behavior here.
if command_args.get('destroy') == 'always':
msg = ('An error occurred during the {} sequence action: '
"'{}'. Cleaning up.").format(scenario.config.subcommand,
scenario.config.action)
LOG.warn(msg)
execute_subcommand(scenario.config, 'cleanup')
execute_subcommand(scenario.config, 'destroy')
# always prune ephemeral dir if destroying on failure
scenario.prune()
util.sysexit()
else:
raise | [
"def",
"execute_cmdline_scenarios",
"(",
"scenario_name",
",",
"args",
",",
"command_args",
")",
":",
"scenarios",
"=",
"molecule",
".",
"scenarios",
".",
"Scenarios",
"(",
"get_configs",
"(",
"args",
",",
"command_args",
")",
",",
"scenario_name",
")",
"scenarios",
".",
"print_matrix",
"(",
")",
"for",
"scenario",
"in",
"scenarios",
":",
"try",
":",
"execute_scenario",
"(",
"scenario",
")",
"except",
"SystemExit",
":",
"# if the command has a 'destroy' arg, like test does,",
"# handle that behavior here.",
"if",
"command_args",
".",
"get",
"(",
"'destroy'",
")",
"==",
"'always'",
":",
"msg",
"=",
"(",
"'An error occurred during the {} sequence action: '",
"\"'{}'. Cleaning up.\"",
")",
".",
"format",
"(",
"scenario",
".",
"config",
".",
"subcommand",
",",
"scenario",
".",
"config",
".",
"action",
")",
"LOG",
".",
"warn",
"(",
"msg",
")",
"execute_subcommand",
"(",
"scenario",
".",
"config",
",",
"'cleanup'",
")",
"execute_subcommand",
"(",
"scenario",
".",
"config",
",",
"'destroy'",
")",
"# always prune ephemeral dir if destroying on failure",
"scenario",
".",
"prune",
"(",
")",
"util",
".",
"sysexit",
"(",
")",
"else",
":",
"raise"
] | 42.657895 | 21.184211 |
def _windows_wwns():
'''
Return Fibre Channel port WWNs from a Windows host.
'''
ps_cmd = r'Get-WmiObject -ErrorAction Stop ' \
r'-class MSFC_FibrePortHBAAttributes ' \
r'-namespace "root\WMI" | ' \
r'Select -Expandproperty Attributes | ' \
r'%{($_.PortWWN | % {"{0:x2}" -f $_}) -join ""}'
ret = []
cmd_ret = salt.modules.cmdmod.powershell(ps_cmd)
for line in cmd_ret:
ret.append(line.rstrip())
return ret | [
"def",
"_windows_wwns",
"(",
")",
":",
"ps_cmd",
"=",
"r'Get-WmiObject -ErrorAction Stop '",
"r'-class MSFC_FibrePortHBAAttributes '",
"r'-namespace \"root\\WMI\" | '",
"r'Select -Expandproperty Attributes | '",
"r'%{($_.PortWWN | % {\"{0:x2}\" -f $_}) -join \"\"}'",
"ret",
"=",
"[",
"]",
"cmd_ret",
"=",
"salt",
".",
"modules",
".",
"cmdmod",
".",
"powershell",
"(",
"ps_cmd",
")",
"for",
"line",
"in",
"cmd_ret",
":",
"ret",
".",
"append",
"(",
"line",
".",
"rstrip",
"(",
")",
")",
"return",
"ret"
] | 34.571429 | 17.857143 |
def delete_dataset(self, project_id, dataset_id):
"""
Delete a dataset of Big query in your project.
:param project_id: The name of the project where we have the dataset .
:type project_id: str
:param dataset_id: The dataset to be delete.
:type dataset_id: str
:return:
"""
project_id = project_id if project_id is not None else self.project_id
self.log.info('Deleting from project: %s Dataset:%s',
project_id, dataset_id)
try:
self.service.datasets().delete(
projectId=project_id,
datasetId=dataset_id).execute(num_retries=self.num_retries)
self.log.info('Dataset deleted successfully: In project %s '
'Dataset %s', project_id, dataset_id)
except HttpError as err:
raise AirflowException(
'BigQuery job failed. Error was: {}'.format(err.content)
) | [
"def",
"delete_dataset",
"(",
"self",
",",
"project_id",
",",
"dataset_id",
")",
":",
"project_id",
"=",
"project_id",
"if",
"project_id",
"is",
"not",
"None",
"else",
"self",
".",
"project_id",
"self",
".",
"log",
".",
"info",
"(",
"'Deleting from project: %s Dataset:%s'",
",",
"project_id",
",",
"dataset_id",
")",
"try",
":",
"self",
".",
"service",
".",
"datasets",
"(",
")",
".",
"delete",
"(",
"projectId",
"=",
"project_id",
",",
"datasetId",
"=",
"dataset_id",
")",
".",
"execute",
"(",
"num_retries",
"=",
"self",
".",
"num_retries",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Dataset deleted successfully: In project %s '",
"'Dataset %s'",
",",
"project_id",
",",
"dataset_id",
")",
"except",
"HttpError",
"as",
"err",
":",
"raise",
"AirflowException",
"(",
"'BigQuery job failed. Error was: {}'",
".",
"format",
"(",
"err",
".",
"content",
")",
")"
] | 40.333333 | 19.166667 |
def data_csv(request, measurement_list):
"""This view generates a csv output of all data for a strain.
For this function to work, you have to provide the filtered set of measurements."""
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=data.csv'
writer = csv.writer(response)
writer.writerow(["Animal", "Genotype", "Gender","Assay", "Value","Strain", "Background","Age", "Cage", "Feeding", "Treatment"])
for measurement in measurement_list:
writer.writerow([
measurement.animal,
measurement.animal.Genotype,
measurement.animal.Gender,
measurement.assay,
measurement.values.split(',')[0],
measurement.animal.Strain,
measurement.animal.Background,
measurement.age(),
measurement.animal.Cage,
measurement.experiment.feeding_state,
measurement.animal.treatment_set.all(),
])
return response | [
"def",
"data_csv",
"(",
"request",
",",
"measurement_list",
")",
":",
"response",
"=",
"HttpResponse",
"(",
"content_type",
"=",
"'text/csv'",
")",
"response",
"[",
"'Content-Disposition'",
"]",
"=",
"'attachment; filename=data.csv'",
"writer",
"=",
"csv",
".",
"writer",
"(",
"response",
")",
"writer",
".",
"writerow",
"(",
"[",
"\"Animal\"",
",",
"\"Genotype\"",
",",
"\"Gender\"",
",",
"\"Assay\"",
",",
"\"Value\"",
",",
"\"Strain\"",
",",
"\"Background\"",
",",
"\"Age\"",
",",
"\"Cage\"",
",",
"\"Feeding\"",
",",
"\"Treatment\"",
"]",
")",
"for",
"measurement",
"in",
"measurement_list",
":",
"writer",
".",
"writerow",
"(",
"[",
"measurement",
".",
"animal",
",",
"measurement",
".",
"animal",
".",
"Genotype",
",",
"measurement",
".",
"animal",
".",
"Gender",
",",
"measurement",
".",
"assay",
",",
"measurement",
".",
"values",
".",
"split",
"(",
"','",
")",
"[",
"0",
"]",
",",
"measurement",
".",
"animal",
".",
"Strain",
",",
"measurement",
".",
"animal",
".",
"Background",
",",
"measurement",
".",
"age",
"(",
")",
",",
"measurement",
".",
"animal",
".",
"Cage",
",",
"measurement",
".",
"experiment",
".",
"feeding_state",
",",
"measurement",
".",
"animal",
".",
"treatment_set",
".",
"all",
"(",
")",
",",
"]",
")",
"return",
"response"
] | 42.041667 | 14.208333 |
def merge_tasks(core_collections, sandbox_collections, id_prefix, new_tasks, batch_size=100, wipe=False):
"""Merge core and sandbox collections into a temporary collection in the sandbox.
:param core_collections: Core collection info
:type core_collections: Collections
:param sandbox_collections: Sandbox collection info
:type sandbox_collections: Collections
"""
merged = copy.copy(sandbox_collections)
# create/clear target collection
target = merged.database[new_tasks]
if wipe:
_log.debug("merge_tasks.wipe.begin")
target.remove()
merged.database['counter'].remove()
_log.debug("merge_tasks.wipe.end")
# perform the merge
batch = []
for doc in core_collections.tasks.find():
batch.append(doc)
if len(batch) == batch_size:
target.insert(batch)
batch = []
if batch:
target.insert(batch)
batch = []
for doc in sandbox_collections.tasks.find():
doc['task_id'] = id_prefix + '-' + str(doc['task_id'])
batch.append(doc)
if len(batch) == batch_size:
target.insert(batch)
batch = []
if batch:
target.insert(batch) | [
"def",
"merge_tasks",
"(",
"core_collections",
",",
"sandbox_collections",
",",
"id_prefix",
",",
"new_tasks",
",",
"batch_size",
"=",
"100",
",",
"wipe",
"=",
"False",
")",
":",
"merged",
"=",
"copy",
".",
"copy",
"(",
"sandbox_collections",
")",
"# create/clear target collection",
"target",
"=",
"merged",
".",
"database",
"[",
"new_tasks",
"]",
"if",
"wipe",
":",
"_log",
".",
"debug",
"(",
"\"merge_tasks.wipe.begin\"",
")",
"target",
".",
"remove",
"(",
")",
"merged",
".",
"database",
"[",
"'counter'",
"]",
".",
"remove",
"(",
")",
"_log",
".",
"debug",
"(",
"\"merge_tasks.wipe.end\"",
")",
"# perform the merge",
"batch",
"=",
"[",
"]",
"for",
"doc",
"in",
"core_collections",
".",
"tasks",
".",
"find",
"(",
")",
":",
"batch",
".",
"append",
"(",
"doc",
")",
"if",
"len",
"(",
"batch",
")",
"==",
"batch_size",
":",
"target",
".",
"insert",
"(",
"batch",
")",
"batch",
"=",
"[",
"]",
"if",
"batch",
":",
"target",
".",
"insert",
"(",
"batch",
")",
"batch",
"=",
"[",
"]",
"for",
"doc",
"in",
"sandbox_collections",
".",
"tasks",
".",
"find",
"(",
")",
":",
"doc",
"[",
"'task_id'",
"]",
"=",
"id_prefix",
"+",
"'-'",
"+",
"str",
"(",
"doc",
"[",
"'task_id'",
"]",
")",
"batch",
".",
"append",
"(",
"doc",
")",
"if",
"len",
"(",
"batch",
")",
"==",
"batch_size",
":",
"target",
".",
"insert",
"(",
"batch",
")",
"batch",
"=",
"[",
"]",
"if",
"batch",
":",
"target",
".",
"insert",
"(",
"batch",
")"
] | 34.764706 | 13.705882 |
def validate_float(cls, value):
"""
Note that int values are accepted.
"""
if not isinstance(value, (int, float)):
raise TypeError(
"value must be a number, got %s" % type(value)
) | [
"def",
"validate_float",
"(",
"cls",
",",
"value",
")",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"value must be a number, got %s\"",
"%",
"type",
"(",
"value",
")",
")"
] | 30.625 | 9.875 |
def maximum_vline_bundle(self, x0, y0, y1):
"""Compute a maximum set of vertical lines in the unit cells ``(x0,y)``
for :math:`y0 \leq y \leq y1`.
INPUTS:
y0,x0,x1: int
OUTPUT:
list of lists of qubits
"""
y_range = range(y1, y0 - 1, -1) if y0 < y1 else range(y1, y0 + 1)
vlines = [[(x0, y, 1, k) for y in y_range] for k in range(self.L)]
return list(filter(self._contains_line, vlines)) | [
"def",
"maximum_vline_bundle",
"(",
"self",
",",
"x0",
",",
"y0",
",",
"y1",
")",
":",
"y_range",
"=",
"range",
"(",
"y1",
",",
"y0",
"-",
"1",
",",
"-",
"1",
")",
"if",
"y0",
"<",
"y1",
"else",
"range",
"(",
"y1",
",",
"y0",
"+",
"1",
")",
"vlines",
"=",
"[",
"[",
"(",
"x0",
",",
"y",
",",
"1",
",",
"k",
")",
"for",
"y",
"in",
"y_range",
"]",
"for",
"k",
"in",
"range",
"(",
"self",
".",
"L",
")",
"]",
"return",
"list",
"(",
"filter",
"(",
"self",
".",
"_contains_line",
",",
"vlines",
")",
")"
] | 33.142857 | 19.857143 |
def p_expr_LT_expr(p):
""" expr : expr LT expr
"""
p[0] = make_binary(p.lineno(2), 'LT', p[1], p[3], lambda x, y: x < y) | [
"def",
"p_expr_LT_expr",
"(",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"make_binary",
"(",
"p",
".",
"lineno",
"(",
"2",
")",
",",
"'LT'",
",",
"p",
"[",
"1",
"]",
",",
"p",
"[",
"3",
"]",
",",
"lambda",
"x",
",",
"y",
":",
"x",
"<",
"y",
")"
] | 32.25 | 12.75 |
def write_tokens_to_file(self):
''' Write api tokens to a file '''
config = dict()
config['API_KEY'] = self.api_key
config['ACCESS_TOKEN'] = self.access_token
config['REFRESH_TOKEN'] = self.refresh_token
config['AUTHORIZATION_CODE'] = self.authorization_code
if self.file_based_config:
config_from_file(self.config_filename, config)
else:
self.config = config | [
"def",
"write_tokens_to_file",
"(",
"self",
")",
":",
"config",
"=",
"dict",
"(",
")",
"config",
"[",
"'API_KEY'",
"]",
"=",
"self",
".",
"api_key",
"config",
"[",
"'ACCESS_TOKEN'",
"]",
"=",
"self",
".",
"access_token",
"config",
"[",
"'REFRESH_TOKEN'",
"]",
"=",
"self",
".",
"refresh_token",
"config",
"[",
"'AUTHORIZATION_CODE'",
"]",
"=",
"self",
".",
"authorization_code",
"if",
"self",
".",
"file_based_config",
":",
"config_from_file",
"(",
"self",
".",
"config_filename",
",",
"config",
")",
"else",
":",
"self",
".",
"config",
"=",
"config"
] | 39.727273 | 11.909091 |
def clear_adb_log(self):
"""Clears cached adb content."""
try:
self._ad.adb.logcat('-c')
except adb.AdbError as e:
# On Android O, the clear command fails due to a known bug.
# Catching this so we don't crash from this Android issue.
if b'failed to clear' in e.stderr:
self._ad.log.warning(
'Encountered known Android error to clear logcat.')
else:
raise | [
"def",
"clear_adb_log",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"_ad",
".",
"adb",
".",
"logcat",
"(",
"'-c'",
")",
"except",
"adb",
".",
"AdbError",
"as",
"e",
":",
"# On Android O, the clear command fails due to a known bug.",
"# Catching this so we don't crash from this Android issue.",
"if",
"b'failed to clear'",
"in",
"e",
".",
"stderr",
":",
"self",
".",
"_ad",
".",
"log",
".",
"warning",
"(",
"'Encountered known Android error to clear logcat.'",
")",
"else",
":",
"raise"
] | 39.916667 | 16.416667 |
def remove(path):
"""Wrapper that switches between os.remove and shutil.rmtree depending on
whether the provided path is a file or directory.
"""
if not os.path.exists(path):
return
if os.path.isdir(path):
return shutil.rmtree(path)
if os.path.isfile(path):
return os.remove(path) | [
"def",
"remove",
"(",
"path",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"return",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"return",
"shutil",
".",
"rmtree",
"(",
"path",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"return",
"os",
".",
"remove",
"(",
"path",
")"
] | 24.538462 | 17.769231 |
def get_Mapping_key_value(mp):
"""Retrieves the key and value types from a PEP 484 mapping or subclass of such.
mp must be a (subclass of) typing.Mapping.
"""
try:
res = _select_Generic_superclass_parameters(mp, typing.Mapping)
except TypeError:
res = None
if res is None:
raise TypeError("Has no key/value types: "+type_str(mp))
else:
return tuple(res) | [
"def",
"get_Mapping_key_value",
"(",
"mp",
")",
":",
"try",
":",
"res",
"=",
"_select_Generic_superclass_parameters",
"(",
"mp",
",",
"typing",
".",
"Mapping",
")",
"except",
"TypeError",
":",
"res",
"=",
"None",
"if",
"res",
"is",
"None",
":",
"raise",
"TypeError",
"(",
"\"Has no key/value types: \"",
"+",
"type_str",
"(",
"mp",
")",
")",
"else",
":",
"return",
"tuple",
"(",
"res",
")"
] | 33.5 | 17.583333 |
def _build_filter(*patterns):
"""
Given a list of patterns, return a callable that will be true only if
the input matches at least one of the patterns.
"""
return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns) | [
"def",
"_build_filter",
"(",
"*",
"patterns",
")",
":",
"return",
"lambda",
"name",
":",
"any",
"(",
"fnmatchcase",
"(",
"name",
",",
"pat",
"=",
"pat",
")",
"for",
"pat",
"in",
"patterns",
")"
] | 43.666667 | 17 |
def from_python_file(
cls, python_file, lambdas_path, json_filename: str, stem: str
):
"""Builds GrFN object from Python file."""
with open(python_file, "r") as f:
pySrc = f.read()
return cls.from_python_src(pySrc, lambdas_path, json_filename, stem) | [
"def",
"from_python_file",
"(",
"cls",
",",
"python_file",
",",
"lambdas_path",
",",
"json_filename",
":",
"str",
",",
"stem",
":",
"str",
")",
":",
"with",
"open",
"(",
"python_file",
",",
"\"r\"",
")",
"as",
"f",
":",
"pySrc",
"=",
"f",
".",
"read",
"(",
")",
"return",
"cls",
".",
"from_python_src",
"(",
"pySrc",
",",
"lambdas_path",
",",
"json_filename",
",",
"stem",
")"
] | 41.571429 | 18.714286 |
def get_version(cls, settings: Dict[str, Any], path: str) -> Optional[str]:
"""Generate the version string to be used in static URLs.
``settings`` is the `Application.settings` dictionary and ``path``
is the relative location of the requested asset on the filesystem.
The returned value should be a string, or ``None`` if no version
could be determined.
.. versionchanged:: 3.1
This method was previously recommended for subclasses to override;
`get_content_version` is now preferred as it allows the base
class to handle caching of the result.
"""
abs_path = cls.get_absolute_path(settings["static_path"], path)
return cls._get_cached_version(abs_path) | [
"def",
"get_version",
"(",
"cls",
",",
"settings",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
",",
"path",
":",
"str",
")",
"->",
"Optional",
"[",
"str",
"]",
":",
"abs_path",
"=",
"cls",
".",
"get_absolute_path",
"(",
"settings",
"[",
"\"static_path\"",
"]",
",",
"path",
")",
"return",
"cls",
".",
"_get_cached_version",
"(",
"abs_path",
")"
] | 49.733333 | 23.466667 |
def is_playing_shared_game(self, steamID, appid_playing, format=None):
"""Returns valid lender SteamID if game currently played is borrowed.
steamID: The users ID
appid_playing: The game player is currently playing
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid' : steamID, 'appid_playing' : appid_playing}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'IsPlayingSharedGame', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format) | [
"def",
"is_playing_shared_game",
"(",
"self",
",",
"steamID",
",",
"appid_playing",
",",
"format",
"=",
"None",
")",
":",
"parameters",
"=",
"{",
"'steamid'",
":",
"steamID",
",",
"'appid_playing'",
":",
"appid_playing",
"}",
"if",
"format",
"is",
"not",
"None",
":",
"parameters",
"[",
"'format'",
"]",
"=",
"format",
"url",
"=",
"self",
".",
"create_request_url",
"(",
"self",
".",
"interface",
",",
"'IsPlayingSharedGame'",
",",
"1",
",",
"parameters",
")",
"data",
"=",
"self",
".",
"retrieve_request",
"(",
"url",
")",
"return",
"self",
".",
"return_data",
"(",
"data",
",",
"format",
"=",
"format",
")"
] | 43.8 | 19 |
def AddRow(self, *args):
''' Parms are a variable number of Elements '''
NumRows = len(self.Rows) # number of existing rows is our row number
CurrentRowNumber = NumRows # this row's number
CurrentRow = [] # start with a blank row and build up
# ------------------------- Add the elements to a row ------------------------- #
for i, element in enumerate(args): # Loop through list of elements and add them to the row
element.Position = (CurrentRowNumber, i)
element.ParentContainer = self
CurrentRow.append(element)
# ------------------------- Append the row to list of Rows ------------------------- #
self.Rows.append(CurrentRow) | [
"def",
"AddRow",
"(",
"self",
",",
"*",
"args",
")",
":",
"NumRows",
"=",
"len",
"(",
"self",
".",
"Rows",
")",
"# number of existing rows is our row number",
"CurrentRowNumber",
"=",
"NumRows",
"# this row's number",
"CurrentRow",
"=",
"[",
"]",
"# start with a blank row and build up",
"# ------------------------- Add the elements to a row ------------------------- #",
"for",
"i",
",",
"element",
"in",
"enumerate",
"(",
"args",
")",
":",
"# Loop through list of elements and add them to the row",
"element",
".",
"Position",
"=",
"(",
"CurrentRowNumber",
",",
"i",
")",
"element",
".",
"ParentContainer",
"=",
"self",
"CurrentRow",
".",
"append",
"(",
"element",
")",
"# ------------------------- Append the row to list of Rows ------------------------- #",
"self",
".",
"Rows",
".",
"append",
"(",
"CurrentRow",
")"
] | 60.583333 | 24.25 |
def DoesNotContain(self, value):
"""Sets the type of the WHERE clause as "does not contain".
Args:
value: The value to be used in the WHERE condition.
Returns:
The query builder that this WHERE builder links to.
"""
self._awql = self._CreateSingleValueCondition(value, 'DOES_NOT_CONTAIN')
return self._query_builder | [
"def",
"DoesNotContain",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"_awql",
"=",
"self",
".",
"_CreateSingleValueCondition",
"(",
"value",
",",
"'DOES_NOT_CONTAIN'",
")",
"return",
"self",
".",
"_query_builder"
] | 31.181818 | 20.636364 |
def is_list_of_list(item):
"""
check whether the item is list (tuple)
and consist of list (tuple) elements
"""
if (
type(item) in (list, tuple)
and len(item)
and isinstance(item[0], (list, tuple))
):
return True
return False | [
"def",
"is_list_of_list",
"(",
"item",
")",
":",
"if",
"(",
"type",
"(",
"item",
")",
"in",
"(",
"list",
",",
"tuple",
")",
"and",
"len",
"(",
"item",
")",
"and",
"isinstance",
"(",
"item",
"[",
"0",
"]",
",",
"(",
"list",
",",
"tuple",
")",
")",
")",
":",
"return",
"True",
"return",
"False"
] | 22.75 | 13.083333 |
def expand_user(path):
"""Expand '~'-style usernames in strings.
This is similar to :func:`os.path.expanduser`, but it computes and returns
extra information that will be useful if the input was being used in
computing completions, and you wish to return the completions with the
original '~' instead of its expanded value.
Parameters
----------
path : str
String to be expanded. If no ~ is present, the output is the same as the
input.
Returns
-------
newpath : str
Result of ~ expansion in the input path.
tilde_expand : bool
Whether any expansion was performed or not.
tilde_val : str
The value that ~ was replaced with.
"""
# Default values
tilde_expand = False
tilde_val = ''
newpath = path
if path.startswith('~'):
tilde_expand = True
rest = len(path)-1
newpath = os.path.expanduser(path)
if rest:
tilde_val = newpath[:-rest]
else:
tilde_val = newpath
return newpath, tilde_expand, tilde_val | [
"def",
"expand_user",
"(",
"path",
")",
":",
"# Default values",
"tilde_expand",
"=",
"False",
"tilde_val",
"=",
"''",
"newpath",
"=",
"path",
"if",
"path",
".",
"startswith",
"(",
"'~'",
")",
":",
"tilde_expand",
"=",
"True",
"rest",
"=",
"len",
"(",
"path",
")",
"-",
"1",
"newpath",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"path",
")",
"if",
"rest",
":",
"tilde_val",
"=",
"newpath",
"[",
":",
"-",
"rest",
"]",
"else",
":",
"tilde_val",
"=",
"newpath",
"return",
"newpath",
",",
"tilde_expand",
",",
"tilde_val"
] | 27.368421 | 20.894737 |
def chunked_qs(qs, chunk_size=10000, fields=None):
"""
Generator to iterate over the given QuerySet, chunk_size rows at a time
Usage:
>>> qs = FailureLine.objects.filter(action='test_result')
>>> for qs in chunked_qs(qs, chunk_size=10000, fields=['id', 'message']):
... for line in qs:
... print(line.message)
Note: While Django 2.0 provides chunking [1] via QuerySet.iterator() we
can't make use of this while using MySQL which doesn't support streaming
results.
[1]: https://docs.djangoproject.com/en/2.0/ref/models/querysets/#iterator
"""
min_id = 0
while True:
chunk = qs.filter(id__gt=min_id).order_by('id')
if fields is not None:
chunk = chunk.only(*fields)
# Cast to a list to execute the QuerySet here and allow us to get the
# last ID when updating min_id. We can't use .last() later as it
# ignores the slicing we do.
rows = list(chunk[:chunk_size])
total = len(rows)
if total < 1:
break
yield rows
# update the minimum ID for next iteration
min_id = rows[-1].id | [
"def",
"chunked_qs",
"(",
"qs",
",",
"chunk_size",
"=",
"10000",
",",
"fields",
"=",
"None",
")",
":",
"min_id",
"=",
"0",
"while",
"True",
":",
"chunk",
"=",
"qs",
".",
"filter",
"(",
"id__gt",
"=",
"min_id",
")",
".",
"order_by",
"(",
"'id'",
")",
"if",
"fields",
"is",
"not",
"None",
":",
"chunk",
"=",
"chunk",
".",
"only",
"(",
"*",
"fields",
")",
"# Cast to a list to execute the QuerySet here and allow us to get the",
"# last ID when updating min_id. We can't use .last() later as it",
"# ignores the slicing we do.",
"rows",
"=",
"list",
"(",
"chunk",
"[",
":",
"chunk_size",
"]",
")",
"total",
"=",
"len",
"(",
"rows",
")",
"if",
"total",
"<",
"1",
":",
"break",
"yield",
"rows",
"# update the minimum ID for next iteration",
"min_id",
"=",
"rows",
"[",
"-",
"1",
"]",
".",
"id"
] | 29.282051 | 25.128205 |
def p_params(self, p):
'params : params_begin param_end'
p[0] = p[1] + (p[2],)
p.set_lineno(0, p.lineno(1)) | [
"def",
"p_params",
"(",
"self",
",",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"p",
"[",
"1",
"]",
"+",
"(",
"p",
"[",
"2",
"]",
",",
")",
"p",
".",
"set_lineno",
"(",
"0",
",",
"p",
".",
"lineno",
"(",
"1",
")",
")"
] | 32 | 8.5 |
def _GetEventData(
self, parser_mediator, record_index, evt_record, recovered=False):
"""Retrieves event data from the Windows EventLog (EVT) record.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
record_index (int): event record index.
evt_record (pyevt.record): event record.
recovered (Optional[bool]): True if the record was recovered.
Returns:
WinEvtRecordEventData: event data.
"""
event_data = WinEvtRecordEventData()
try:
event_data.record_number = evt_record.identifier
except OverflowError as exception:
parser_mediator.ProduceExtractionWarning((
'unable to read record identifier from event record: {0:d} '
'with error: {1!s}').format(record_index, exception))
try:
event_identifier = evt_record.event_identifier
except OverflowError as exception:
parser_mediator.ProduceExtractionWarning((
'unable to read event identifier from event record: {0:d} '
'with error: {1!s}').format(record_index, exception))
event_identifier = None
event_data.offset = evt_record.offset
event_data.recovered = recovered
# We want the event identifier to match the behavior of that of the EVTX
# event records.
if event_identifier is not None:
event_data.event_identifier = event_identifier & 0xffff
event_data.facility = (event_identifier >> 16) & 0x0fff
event_data.severity = event_identifier >> 30
event_data.message_identifier = event_identifier
event_data.event_type = evt_record.event_type
event_data.event_category = evt_record.event_category
event_data.source_name = evt_record.source_name
# Computer name is the value stored in the event record and does not
# necessarily corresponds with the actual hostname.
event_data.computer_name = evt_record.computer_name
event_data.user_sid = evt_record.user_security_identifier
event_data.strings = list(evt_record.strings)
return event_data | [
"def",
"_GetEventData",
"(",
"self",
",",
"parser_mediator",
",",
"record_index",
",",
"evt_record",
",",
"recovered",
"=",
"False",
")",
":",
"event_data",
"=",
"WinEvtRecordEventData",
"(",
")",
"try",
":",
"event_data",
".",
"record_number",
"=",
"evt_record",
".",
"identifier",
"except",
"OverflowError",
"as",
"exception",
":",
"parser_mediator",
".",
"ProduceExtractionWarning",
"(",
"(",
"'unable to read record identifier from event record: {0:d} '",
"'with error: {1!s}'",
")",
".",
"format",
"(",
"record_index",
",",
"exception",
")",
")",
"try",
":",
"event_identifier",
"=",
"evt_record",
".",
"event_identifier",
"except",
"OverflowError",
"as",
"exception",
":",
"parser_mediator",
".",
"ProduceExtractionWarning",
"(",
"(",
"'unable to read event identifier from event record: {0:d} '",
"'with error: {1!s}'",
")",
".",
"format",
"(",
"record_index",
",",
"exception",
")",
")",
"event_identifier",
"=",
"None",
"event_data",
".",
"offset",
"=",
"evt_record",
".",
"offset",
"event_data",
".",
"recovered",
"=",
"recovered",
"# We want the event identifier to match the behavior of that of the EVTX",
"# event records.",
"if",
"event_identifier",
"is",
"not",
"None",
":",
"event_data",
".",
"event_identifier",
"=",
"event_identifier",
"&",
"0xffff",
"event_data",
".",
"facility",
"=",
"(",
"event_identifier",
">>",
"16",
")",
"&",
"0x0fff",
"event_data",
".",
"severity",
"=",
"event_identifier",
">>",
"30",
"event_data",
".",
"message_identifier",
"=",
"event_identifier",
"event_data",
".",
"event_type",
"=",
"evt_record",
".",
"event_type",
"event_data",
".",
"event_category",
"=",
"evt_record",
".",
"event_category",
"event_data",
".",
"source_name",
"=",
"evt_record",
".",
"source_name",
"# Computer name is the value stored in the event record and does not",
"# necessarily corresponds with the actual hostname.",
"event_data",
".",
"computer_name",
"=",
"evt_record",
".",
"computer_name",
"event_data",
".",
"user_sid",
"=",
"evt_record",
".",
"user_security_identifier",
"event_data",
".",
"strings",
"=",
"list",
"(",
"evt_record",
".",
"strings",
")",
"return",
"event_data"
] | 37.290909 | 20.927273 |
def run(self, model, tol=0.001, max_iters=999, verbose=True):
"""
Runs EM iterations
:param model: Normalization model (1: Gene->Allele->Isoform, 2: Gene->Isoform->Allele, 3: Gene->Isoform*Allele, 4: Gene*Isoform*Allele)
:param tol: Tolerance for termination
:param max_iters: Maximum number of iterations until termination
:param verbose: Display information on how EM is running
:return: Nothing (as it performs in-place operations)
"""
orig_err_states = np.seterr(all='raise')
np.seterr(under='ignore')
if verbose:
print
print "Iter No Time (hh:mm:ss) Total change (TPM) "
print "------- --------------- ----------------------"
num_iters = 0
err_sum = 1000000.0
time0 = time.time()
target_err = 1000000.0 * tol
while err_sum > target_err and num_iters < max_iters:
prev_isoform_expression = self.get_allelic_expression().sum(axis=0)
prev_isoform_expression *= (1000000.0 / prev_isoform_expression.sum())
self.update_allelic_expression(model=model)
curr_isoform_expression = self.get_allelic_expression().sum(axis=0)
curr_isoform_expression *= (1000000.0 / curr_isoform_expression.sum())
err = np.abs(curr_isoform_expression - prev_isoform_expression)
err_sum = err.sum()
num_iters += 1
if verbose:
time1 = time.time()
delmin, s = divmod(int(time1 - time0), 60)
h, m = divmod(delmin, 60)
print " %5d %4d:%02d:%02d %9.1f / 1000000" % (num_iters, h, m, s, err_sum) | [
"def",
"run",
"(",
"self",
",",
"model",
",",
"tol",
"=",
"0.001",
",",
"max_iters",
"=",
"999",
",",
"verbose",
"=",
"True",
")",
":",
"orig_err_states",
"=",
"np",
".",
"seterr",
"(",
"all",
"=",
"'raise'",
")",
"np",
".",
"seterr",
"(",
"under",
"=",
"'ignore'",
")",
"if",
"verbose",
":",
"print",
"print",
"\"Iter No Time (hh:mm:ss) Total change (TPM) \"",
"print",
"\"------- --------------- ----------------------\"",
"num_iters",
"=",
"0",
"err_sum",
"=",
"1000000.0",
"time0",
"=",
"time",
".",
"time",
"(",
")",
"target_err",
"=",
"1000000.0",
"*",
"tol",
"while",
"err_sum",
">",
"target_err",
"and",
"num_iters",
"<",
"max_iters",
":",
"prev_isoform_expression",
"=",
"self",
".",
"get_allelic_expression",
"(",
")",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
"prev_isoform_expression",
"*=",
"(",
"1000000.0",
"/",
"prev_isoform_expression",
".",
"sum",
"(",
")",
")",
"self",
".",
"update_allelic_expression",
"(",
"model",
"=",
"model",
")",
"curr_isoform_expression",
"=",
"self",
".",
"get_allelic_expression",
"(",
")",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
"curr_isoform_expression",
"*=",
"(",
"1000000.0",
"/",
"curr_isoform_expression",
".",
"sum",
"(",
")",
")",
"err",
"=",
"np",
".",
"abs",
"(",
"curr_isoform_expression",
"-",
"prev_isoform_expression",
")",
"err_sum",
"=",
"err",
".",
"sum",
"(",
")",
"num_iters",
"+=",
"1",
"if",
"verbose",
":",
"time1",
"=",
"time",
".",
"time",
"(",
")",
"delmin",
",",
"s",
"=",
"divmod",
"(",
"int",
"(",
"time1",
"-",
"time0",
")",
",",
"60",
")",
"h",
",",
"m",
"=",
"divmod",
"(",
"delmin",
",",
"60",
")",
"print",
"\" %5d %4d:%02d:%02d %9.1f / 1000000\"",
"%",
"(",
"num_iters",
",",
"h",
",",
"m",
",",
"s",
",",
"err_sum",
")"
] | 49.529412 | 22.941176 |
def lookup(self, vtype, vname, target_id=None):
"""Return value of vname from the variable store vtype.
Valid vtypes are `strings` 'counters', and `pending`. If the value
is not found in the current steps store, earlier steps will be
checked. If not found, '', 0, or (None, None) is returned.
"""
nullvals = {'strings': '', 'counters': 0, 'pending': (None, None)}
nullval = nullvals[vtype]
vstyle = None
if vtype == 'counters':
if len(vname) > 1:
vname, vstyle = vname
else:
vname = vname[0]
if target_id is not None:
try:
state = self.state[vtype][target_id]
steps = self.state[vtype][target_id].keys()
except KeyError:
log(WARN, u'Bad ID target lookup {}'.format(
target_id).encode('utf-8'))
return nullval
else:
state = self.state
steps = self.state['scope']
for step in steps:
if vname in state[step][vtype]:
if vtype == 'pending':
return(state[step][vtype][vname], step)
else:
val = state[step][vtype][vname]
if vstyle is not None:
return self.counter_style(val, vstyle)
return val
else:
return nullval | [
"def",
"lookup",
"(",
"self",
",",
"vtype",
",",
"vname",
",",
"target_id",
"=",
"None",
")",
":",
"nullvals",
"=",
"{",
"'strings'",
":",
"''",
",",
"'counters'",
":",
"0",
",",
"'pending'",
":",
"(",
"None",
",",
"None",
")",
"}",
"nullval",
"=",
"nullvals",
"[",
"vtype",
"]",
"vstyle",
"=",
"None",
"if",
"vtype",
"==",
"'counters'",
":",
"if",
"len",
"(",
"vname",
")",
">",
"1",
":",
"vname",
",",
"vstyle",
"=",
"vname",
"else",
":",
"vname",
"=",
"vname",
"[",
"0",
"]",
"if",
"target_id",
"is",
"not",
"None",
":",
"try",
":",
"state",
"=",
"self",
".",
"state",
"[",
"vtype",
"]",
"[",
"target_id",
"]",
"steps",
"=",
"self",
".",
"state",
"[",
"vtype",
"]",
"[",
"target_id",
"]",
".",
"keys",
"(",
")",
"except",
"KeyError",
":",
"log",
"(",
"WARN",
",",
"u'Bad ID target lookup {}'",
".",
"format",
"(",
"target_id",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"return",
"nullval",
"else",
":",
"state",
"=",
"self",
".",
"state",
"steps",
"=",
"self",
".",
"state",
"[",
"'scope'",
"]",
"for",
"step",
"in",
"steps",
":",
"if",
"vname",
"in",
"state",
"[",
"step",
"]",
"[",
"vtype",
"]",
":",
"if",
"vtype",
"==",
"'pending'",
":",
"return",
"(",
"state",
"[",
"step",
"]",
"[",
"vtype",
"]",
"[",
"vname",
"]",
",",
"step",
")",
"else",
":",
"val",
"=",
"state",
"[",
"step",
"]",
"[",
"vtype",
"]",
"[",
"vname",
"]",
"if",
"vstyle",
"is",
"not",
"None",
":",
"return",
"self",
".",
"counter_style",
"(",
"val",
",",
"vstyle",
")",
"return",
"val",
"else",
":",
"return",
"nullval"
] | 34.731707 | 17.121951 |
def convertVariable(self, key, varName, varValue):
"""Puts the function in the globals() of the main module."""
if isinstance(varValue, encapsulation.FunctionEncapsulation):
result = varValue.getFunction()
# Update the global scope of the function to match the current module
mainModule = sys.modules["__main__"]
result.__name__ = varName
result.__globals__.update(mainModule.__dict__)
setattr(mainModule, varName, result)
shared.elements[key].update({
varName: result,
}) | [
"def",
"convertVariable",
"(",
"self",
",",
"key",
",",
"varName",
",",
"varValue",
")",
":",
"if",
"isinstance",
"(",
"varValue",
",",
"encapsulation",
".",
"FunctionEncapsulation",
")",
":",
"result",
"=",
"varValue",
".",
"getFunction",
"(",
")",
"# Update the global scope of the function to match the current module",
"mainModule",
"=",
"sys",
".",
"modules",
"[",
"\"__main__\"",
"]",
"result",
".",
"__name__",
"=",
"varName",
"result",
".",
"__globals__",
".",
"update",
"(",
"mainModule",
".",
"__dict__",
")",
"setattr",
"(",
"mainModule",
",",
"varName",
",",
"result",
")",
"shared",
".",
"elements",
"[",
"key",
"]",
".",
"update",
"(",
"{",
"varName",
":",
"result",
",",
"}",
")"
] | 45.307692 | 15 |
def _is_better_match(x, y, matched_a, matched_b, attributes_dict_a, attributes_dict_b):
"""
:param x: The first element of a possible match.
:param y: The second element of a possible match.
:param matched_a: The current matches for the first set.
:param matched_b: The current matches for the second set.
:param attributes_dict_a: The attributes for each element in the first set.
:param attributes_dict_b: The attributes for each element in the second set.
:returns: True/False
"""
attributes_x = attributes_dict_a[x]
attributes_y = attributes_dict_b[y]
if x in matched_a:
attributes_match = attributes_dict_b[matched_a[x]]
if _euclidean_dist(attributes_x, attributes_y) >= _euclidean_dist(attributes_x, attributes_match):
return False
if y in matched_b:
attributes_match = attributes_dict_a[matched_b[y]]
if _euclidean_dist(attributes_x, attributes_y) >= _euclidean_dist(attributes_y, attributes_match):
return False
return True | [
"def",
"_is_better_match",
"(",
"x",
",",
"y",
",",
"matched_a",
",",
"matched_b",
",",
"attributes_dict_a",
",",
"attributes_dict_b",
")",
":",
"attributes_x",
"=",
"attributes_dict_a",
"[",
"x",
"]",
"attributes_y",
"=",
"attributes_dict_b",
"[",
"y",
"]",
"if",
"x",
"in",
"matched_a",
":",
"attributes_match",
"=",
"attributes_dict_b",
"[",
"matched_a",
"[",
"x",
"]",
"]",
"if",
"_euclidean_dist",
"(",
"attributes_x",
",",
"attributes_y",
")",
">=",
"_euclidean_dist",
"(",
"attributes_x",
",",
"attributes_match",
")",
":",
"return",
"False",
"if",
"y",
"in",
"matched_b",
":",
"attributes_match",
"=",
"attributes_dict_a",
"[",
"matched_b",
"[",
"y",
"]",
"]",
"if",
"_euclidean_dist",
"(",
"attributes_x",
",",
"attributes_y",
")",
">=",
"_euclidean_dist",
"(",
"attributes_y",
",",
"attributes_match",
")",
":",
"return",
"False",
"return",
"True"
] | 52.428571 | 24.619048 |
def parse_mailto(mailto_str):
"""
Interpret mailto-string
:param mailto_str: the string to interpret. Must conform to :rfc:2368.
:type mailto_str: str
:return: the header fields and the body found in the mailto link as a tuple
of length two
:rtype: tuple(dict(str->list(str)), str)
"""
if mailto_str.startswith('mailto:'):
import urllib.parse
to_str, parms_str = mailto_str[7:].partition('?')[::2]
headers = {}
body = u''
to = urllib.parse.unquote(to_str)
if to:
headers['To'] = [to]
for s in parms_str.split('&'):
key, value = s.partition('=')[::2]
key = key.capitalize()
if key == 'Body':
body = urllib.parse.unquote(value)
elif value:
headers[key] = [urllib.parse.unquote(value)]
return (headers, body)
else:
return (None, None) | [
"def",
"parse_mailto",
"(",
"mailto_str",
")",
":",
"if",
"mailto_str",
".",
"startswith",
"(",
"'mailto:'",
")",
":",
"import",
"urllib",
".",
"parse",
"to_str",
",",
"parms_str",
"=",
"mailto_str",
"[",
"7",
":",
"]",
".",
"partition",
"(",
"'?'",
")",
"[",
":",
":",
"2",
"]",
"headers",
"=",
"{",
"}",
"body",
"=",
"u''",
"to",
"=",
"urllib",
".",
"parse",
".",
"unquote",
"(",
"to_str",
")",
"if",
"to",
":",
"headers",
"[",
"'To'",
"]",
"=",
"[",
"to",
"]",
"for",
"s",
"in",
"parms_str",
".",
"split",
"(",
"'&'",
")",
":",
"key",
",",
"value",
"=",
"s",
".",
"partition",
"(",
"'='",
")",
"[",
":",
":",
"2",
"]",
"key",
"=",
"key",
".",
"capitalize",
"(",
")",
"if",
"key",
"==",
"'Body'",
":",
"body",
"=",
"urllib",
".",
"parse",
".",
"unquote",
"(",
"value",
")",
"elif",
"value",
":",
"headers",
"[",
"key",
"]",
"=",
"[",
"urllib",
".",
"parse",
".",
"unquote",
"(",
"value",
")",
"]",
"return",
"(",
"headers",
",",
"body",
")",
"else",
":",
"return",
"(",
"None",
",",
"None",
")"
] | 30.433333 | 16.433333 |
def _check_quotas(self, time_ms):
"""
Check if we have violated our quota for any metric that
has a configured quota
"""
for metric in self._metrics:
if metric.config and metric.config.quota:
value = metric.value(time_ms)
if not metric.config.quota.is_acceptable(value):
raise QuotaViolationError("'%s' violated quota. Actual: "
"%d, Threshold: %d" %
(metric.metric_name,
value,
metric.config.quota.bound)) | [
"def",
"_check_quotas",
"(",
"self",
",",
"time_ms",
")",
":",
"for",
"metric",
"in",
"self",
".",
"_metrics",
":",
"if",
"metric",
".",
"config",
"and",
"metric",
".",
"config",
".",
"quota",
":",
"value",
"=",
"metric",
".",
"value",
"(",
"time_ms",
")",
"if",
"not",
"metric",
".",
"config",
".",
"quota",
".",
"is_acceptable",
"(",
"value",
")",
":",
"raise",
"QuotaViolationError",
"(",
"\"'%s' violated quota. Actual: \"",
"\"%d, Threshold: %d\"",
"%",
"(",
"metric",
".",
"metric_name",
",",
"value",
",",
"metric",
".",
"config",
".",
"quota",
".",
"bound",
")",
")"
] | 48.785714 | 15.928571 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.