text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def shut_down_instance(self, instances=None):
"""Shut down a list of instances, if provided.
If no instance is provided, the last instance started up will be shut down.
"""
if instances and len(self.instances) > 0:
print(instances)
try:
print([i.id for i in instances])
except Exception as e:
print(e)
term = self.client.terminate_instances(InstanceIds=instances)
logger.info("Shut down {} instances (ids:{}".format(len(instances), str(instances)))
elif len(self.instances) > 0:
instance = self.instances.pop()
term = self.client.terminate_instances(InstanceIds=[instance])
logger.info("Shut down 1 instance (id:{})".format(instance))
else:
logger.warn("No Instances to shut down.\n")
return -1
self.get_instance_state()
return term | [
"def",
"shut_down_instance",
"(",
"self",
",",
"instances",
"=",
"None",
")",
":",
"if",
"instances",
"and",
"len",
"(",
"self",
".",
"instances",
")",
">",
"0",
":",
"print",
"(",
"instances",
")",
"try",
":",
"print",
"(",
"[",
"i",
".",
"id",
"for",
"i",
"in",
"instances",
"]",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"e",
")",
"term",
"=",
"self",
".",
"client",
".",
"terminate_instances",
"(",
"InstanceIds",
"=",
"instances",
")",
"logger",
".",
"info",
"(",
"\"Shut down {} instances (ids:{}\"",
".",
"format",
"(",
"len",
"(",
"instances",
")",
",",
"str",
"(",
"instances",
")",
")",
")",
"elif",
"len",
"(",
"self",
".",
"instances",
")",
">",
"0",
":",
"instance",
"=",
"self",
".",
"instances",
".",
"pop",
"(",
")",
"term",
"=",
"self",
".",
"client",
".",
"terminate_instances",
"(",
"InstanceIds",
"=",
"[",
"instance",
"]",
")",
"logger",
".",
"info",
"(",
"\"Shut down 1 instance (id:{})\"",
".",
"format",
"(",
"instance",
")",
")",
"else",
":",
"logger",
".",
"warn",
"(",
"\"No Instances to shut down.\\n\"",
")",
"return",
"-",
"1",
"self",
".",
"get_instance_state",
"(",
")",
"return",
"term"
] | 40.347826 | 19.695652 |
def parse_stats_file(self, file_name):
""" Read and parse given file_name, return config as a dictionary """
stats = {}
try:
with open(file_name, "r") as fhandle:
fbuffer = []
save_buffer = False
for line in fhandle:
line = line.rstrip("\n")
line = self._trim(line)
if line == "" or line.startswith("#"):
continue
elif line.endswith("{"):
save_buffer = True
fbuffer.append(line)
continue
elif line.endswith("}"):
tmp_dict = self._parse_config_buffer(fbuffer)
fbuffer = None
fbuffer = list()
if len(tmp_dict) < 1:
continue
if tmp_dict["_type"] == "info":
stats["info"] = tmp_dict
elif tmp_dict["_type"] == "programstatus":
stats["programstatus"] = tmp_dict
else:
entity_type = tmp_dict["_type"]
if entity_type not in stats.keys():
stats[entity_type] = []
stats[entity_type].append(tmp_dict)
continue
elif save_buffer is True:
fbuffer.append(line)
except Exception as exception:
self.log.info("Caught exception: %s", exception)
return stats | [
"def",
"parse_stats_file",
"(",
"self",
",",
"file_name",
")",
":",
"stats",
"=",
"{",
"}",
"try",
":",
"with",
"open",
"(",
"file_name",
",",
"\"r\"",
")",
"as",
"fhandle",
":",
"fbuffer",
"=",
"[",
"]",
"save_buffer",
"=",
"False",
"for",
"line",
"in",
"fhandle",
":",
"line",
"=",
"line",
".",
"rstrip",
"(",
"\"\\n\"",
")",
"line",
"=",
"self",
".",
"_trim",
"(",
"line",
")",
"if",
"line",
"==",
"\"\"",
"or",
"line",
".",
"startswith",
"(",
"\"#\"",
")",
":",
"continue",
"elif",
"line",
".",
"endswith",
"(",
"\"{\"",
")",
":",
"save_buffer",
"=",
"True",
"fbuffer",
".",
"append",
"(",
"line",
")",
"continue",
"elif",
"line",
".",
"endswith",
"(",
"\"}\"",
")",
":",
"tmp_dict",
"=",
"self",
".",
"_parse_config_buffer",
"(",
"fbuffer",
")",
"fbuffer",
"=",
"None",
"fbuffer",
"=",
"list",
"(",
")",
"if",
"len",
"(",
"tmp_dict",
")",
"<",
"1",
":",
"continue",
"if",
"tmp_dict",
"[",
"\"_type\"",
"]",
"==",
"\"info\"",
":",
"stats",
"[",
"\"info\"",
"]",
"=",
"tmp_dict",
"elif",
"tmp_dict",
"[",
"\"_type\"",
"]",
"==",
"\"programstatus\"",
":",
"stats",
"[",
"\"programstatus\"",
"]",
"=",
"tmp_dict",
"else",
":",
"entity_type",
"=",
"tmp_dict",
"[",
"\"_type\"",
"]",
"if",
"entity_type",
"not",
"in",
"stats",
".",
"keys",
"(",
")",
":",
"stats",
"[",
"entity_type",
"]",
"=",
"[",
"]",
"stats",
"[",
"entity_type",
"]",
".",
"append",
"(",
"tmp_dict",
")",
"continue",
"elif",
"save_buffer",
"is",
"True",
":",
"fbuffer",
".",
"append",
"(",
"line",
")",
"except",
"Exception",
"as",
"exception",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Caught exception: %s\"",
",",
"exception",
")",
"return",
"stats"
] | 39.190476 | 14.309524 |
def _renameClasses(classes, prefix):
"""
Replace class IDs with nice strings.
"""
renameMap = {}
for classID, glyphList in classes.items():
if len(glyphList) == 0:
groupName = "%s_empty_lu.%d_st.%d_cl.%d" % (prefix, classID[0], classID[1], classID[2])
elif len(glyphList) == 1:
groupName = list(glyphList)[0]
else:
glyphList = list(sorted(glyphList))
groupName = prefix + glyphList[0]
renameMap[classID] = groupName
return renameMap | [
"def",
"_renameClasses",
"(",
"classes",
",",
"prefix",
")",
":",
"renameMap",
"=",
"{",
"}",
"for",
"classID",
",",
"glyphList",
"in",
"classes",
".",
"items",
"(",
")",
":",
"if",
"len",
"(",
"glyphList",
")",
"==",
"0",
":",
"groupName",
"=",
"\"%s_empty_lu.%d_st.%d_cl.%d\"",
"%",
"(",
"prefix",
",",
"classID",
"[",
"0",
"]",
",",
"classID",
"[",
"1",
"]",
",",
"classID",
"[",
"2",
"]",
")",
"elif",
"len",
"(",
"glyphList",
")",
"==",
"1",
":",
"groupName",
"=",
"list",
"(",
"glyphList",
")",
"[",
"0",
"]",
"else",
":",
"glyphList",
"=",
"list",
"(",
"sorted",
"(",
"glyphList",
")",
")",
"groupName",
"=",
"prefix",
"+",
"glyphList",
"[",
"0",
"]",
"renameMap",
"[",
"classID",
"]",
"=",
"groupName",
"return",
"renameMap"
] | 34.8 | 11.333333 |
def _get_more(collection_name, num_to_return, cursor_id):
"""Get an OP_GET_MORE message."""
return b"".join([
_ZERO_32,
_make_c_string(collection_name),
_pack_int(num_to_return),
_pack_long_long(cursor_id)]) | [
"def",
"_get_more",
"(",
"collection_name",
",",
"num_to_return",
",",
"cursor_id",
")",
":",
"return",
"b\"\"",
".",
"join",
"(",
"[",
"_ZERO_32",
",",
"_make_c_string",
"(",
"collection_name",
")",
",",
"_pack_int",
"(",
"num_to_return",
")",
",",
"_pack_long_long",
"(",
"cursor_id",
")",
"]",
")"
] | 34.428571 | 10 |
def update(self, title, rulegroups):
"""Updates this segment."""
body = {
"Title": title,
"RuleGroups": rulegroups}
response = self._put("/segments/%s.json" %
self.segment_id, json.dumps(body)) | [
"def",
"update",
"(",
"self",
",",
"title",
",",
"rulegroups",
")",
":",
"body",
"=",
"{",
"\"Title\"",
":",
"title",
",",
"\"RuleGroups\"",
":",
"rulegroups",
"}",
"response",
"=",
"self",
".",
"_put",
"(",
"\"/segments/%s.json\"",
"%",
"self",
".",
"segment_id",
",",
"json",
".",
"dumps",
"(",
"body",
")",
")"
] | 37.714286 | 11 |
def reparentDirectories(self):
'''
Helper method for :func:`~exhale.graph.ExhaleRoot.reparentAll`. Adds
subdirectories as children to the relevant directory ExhaleNode. If a node in
``self.dirs`` is added as a child to a different directory node, it is removed
from the ``self.dirs`` list.
'''
dir_parts = []
dir_ranks = []
for d in self.dirs:
parts = d.name.split(os.sep)
for p in parts:
if p not in dir_parts:
dir_parts.append(p)
dir_ranks.append((len(parts), d))
traversal = sorted(dir_ranks)
removals = []
for rank, directory in reversed(traversal):
# rank one means top level directory
if rank < 2:
break
# otherwise, this is nested
for p_rank, p_directory in reversed(traversal):
if p_rank == rank - 1:
if p_directory.name == os.path.dirname(directory.name):
p_directory.children.append(directory)
directory.parent = p_directory
if directory not in removals:
removals.append(directory)
break
for rm in removals:
self.dirs.remove(rm) | [
"def",
"reparentDirectories",
"(",
"self",
")",
":",
"dir_parts",
"=",
"[",
"]",
"dir_ranks",
"=",
"[",
"]",
"for",
"d",
"in",
"self",
".",
"dirs",
":",
"parts",
"=",
"d",
".",
"name",
".",
"split",
"(",
"os",
".",
"sep",
")",
"for",
"p",
"in",
"parts",
":",
"if",
"p",
"not",
"in",
"dir_parts",
":",
"dir_parts",
".",
"append",
"(",
"p",
")",
"dir_ranks",
".",
"append",
"(",
"(",
"len",
"(",
"parts",
")",
",",
"d",
")",
")",
"traversal",
"=",
"sorted",
"(",
"dir_ranks",
")",
"removals",
"=",
"[",
"]",
"for",
"rank",
",",
"directory",
"in",
"reversed",
"(",
"traversal",
")",
":",
"# rank one means top level directory",
"if",
"rank",
"<",
"2",
":",
"break",
"# otherwise, this is nested",
"for",
"p_rank",
",",
"p_directory",
"in",
"reversed",
"(",
"traversal",
")",
":",
"if",
"p_rank",
"==",
"rank",
"-",
"1",
":",
"if",
"p_directory",
".",
"name",
"==",
"os",
".",
"path",
".",
"dirname",
"(",
"directory",
".",
"name",
")",
":",
"p_directory",
".",
"children",
".",
"append",
"(",
"directory",
")",
"directory",
".",
"parent",
"=",
"p_directory",
"if",
"directory",
"not",
"in",
"removals",
":",
"removals",
".",
"append",
"(",
"directory",
")",
"break",
"for",
"rm",
"in",
"removals",
":",
"self",
".",
"dirs",
".",
"remove",
"(",
"rm",
")"
] | 38.852941 | 17.029412 |
def getPortNumberList(self):
"""
Get the port number of each hub toward device.
"""
port_list = (c_uint8 * PATH_MAX_DEPTH)()
result = libusb1.libusb_get_port_numbers(
self.device_p, port_list, len(port_list))
mayRaiseUSBError(result)
return list(port_list[:result]) | [
"def",
"getPortNumberList",
"(",
"self",
")",
":",
"port_list",
"=",
"(",
"c_uint8",
"*",
"PATH_MAX_DEPTH",
")",
"(",
")",
"result",
"=",
"libusb1",
".",
"libusb_get_port_numbers",
"(",
"self",
".",
"device_p",
",",
"port_list",
",",
"len",
"(",
"port_list",
")",
")",
"mayRaiseUSBError",
"(",
"result",
")",
"return",
"list",
"(",
"port_list",
"[",
":",
"result",
"]",
")"
] | 36.111111 | 7.222222 |
def linkify_es_by_h(self, hosts):
"""Add each escalation object into host.escalation attribute
:param hosts: host list, used to look for a specific host
:type hosts: alignak.objects.host.Hosts
:return: None
"""
for escal in self:
# If no host, no hope of having a service
if (not hasattr(escal, 'host_name') or escal.host_name.strip() == '' or
(hasattr(escal, 'service_description')
and escal.service_description.strip() != '')):
continue
# I must be NOT a escalation on for service
for hname in strip_and_uniq(escal.host_name.split(',')):
host = hosts.find_by_name(hname)
if host is not None:
host.escalations.append(escal.uuid) | [
"def",
"linkify_es_by_h",
"(",
"self",
",",
"hosts",
")",
":",
"for",
"escal",
"in",
"self",
":",
"# If no host, no hope of having a service",
"if",
"(",
"not",
"hasattr",
"(",
"escal",
",",
"'host_name'",
")",
"or",
"escal",
".",
"host_name",
".",
"strip",
"(",
")",
"==",
"''",
"or",
"(",
"hasattr",
"(",
"escal",
",",
"'service_description'",
")",
"and",
"escal",
".",
"service_description",
".",
"strip",
"(",
")",
"!=",
"''",
")",
")",
":",
"continue",
"# I must be NOT a escalation on for service",
"for",
"hname",
"in",
"strip_and_uniq",
"(",
"escal",
".",
"host_name",
".",
"split",
"(",
"','",
")",
")",
":",
"host",
"=",
"hosts",
".",
"find_by_name",
"(",
"hname",
")",
"if",
"host",
"is",
"not",
"None",
":",
"host",
".",
"escalations",
".",
"append",
"(",
"escal",
".",
"uuid",
")"
] | 45.444444 | 16.611111 |
def postpro_standardize(data, report=None):
"""
Standardizes everything in data (along axis -1).
If report variable is passed, this is added to the report.
"""
if not report:
report = {}
# First make dim 1 = time.
data = np.transpose(data, [2, 0, 1])
standardized_data = (data - data.mean(axis=0)) / data.std(axis=0)
standardized_data = np.transpose(standardized_data, [1, 2, 0])
report['standardize'] = {}
report['standardize']['performed'] = 'yes'
report['standardize']['method'] = 'Z-score'
# The above makes self connections to nan, set to 1.
data = set_diagonal(data, 1)
return standardized_data, report | [
"def",
"postpro_standardize",
"(",
"data",
",",
"report",
"=",
"None",
")",
":",
"if",
"not",
"report",
":",
"report",
"=",
"{",
"}",
"# First make dim 1 = time.",
"data",
"=",
"np",
".",
"transpose",
"(",
"data",
",",
"[",
"2",
",",
"0",
",",
"1",
"]",
")",
"standardized_data",
"=",
"(",
"data",
"-",
"data",
".",
"mean",
"(",
"axis",
"=",
"0",
")",
")",
"/",
"data",
".",
"std",
"(",
"axis",
"=",
"0",
")",
"standardized_data",
"=",
"np",
".",
"transpose",
"(",
"standardized_data",
",",
"[",
"1",
",",
"2",
",",
"0",
"]",
")",
"report",
"[",
"'standardize'",
"]",
"=",
"{",
"}",
"report",
"[",
"'standardize'",
"]",
"[",
"'performed'",
"]",
"=",
"'yes'",
"report",
"[",
"'standardize'",
"]",
"[",
"'method'",
"]",
"=",
"'Z-score'",
"# The above makes self connections to nan, set to 1.",
"data",
"=",
"set_diagonal",
"(",
"data",
",",
"1",
")",
"return",
"standardized_data",
",",
"report"
] | 36.666667 | 13.111111 |
def _PrintProcessingTime(self, processing_status):
"""Prints the processing time.
Args:
processing_status (ProcessingStatus): processing status.
"""
if not processing_status:
processing_time = '00:00:00'
else:
processing_time = time.time() - processing_status.start_time
time_struct = time.gmtime(processing_time)
processing_time = time.strftime('%H:%M:%S', time_struct)
self._output_writer.Write(
'Processing time\t\t: {0:s}\n'.format(processing_time)) | [
"def",
"_PrintProcessingTime",
"(",
"self",
",",
"processing_status",
")",
":",
"if",
"not",
"processing_status",
":",
"processing_time",
"=",
"'00:00:00'",
"else",
":",
"processing_time",
"=",
"time",
".",
"time",
"(",
")",
"-",
"processing_status",
".",
"start_time",
"time_struct",
"=",
"time",
".",
"gmtime",
"(",
"processing_time",
")",
"processing_time",
"=",
"time",
".",
"strftime",
"(",
"'%H:%M:%S'",
",",
"time_struct",
")",
"self",
".",
"_output_writer",
".",
"Write",
"(",
"'Processing time\\t\\t: {0:s}\\n'",
".",
"format",
"(",
"processing_time",
")",
")"
] | 33.533333 | 18.666667 |
def set_date_bounds(self, date):
'''
Pass in the date used in the original query.
:param date: Date (date range) that was queried:
date -> 'd', '~d', 'd~', 'd~d'
d -> '%Y-%m-%d %H:%M:%S,%f', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d'
'''
if date is not None:
split = date.split('~')
if len(split) == 1:
self._lbound = ts2dt(date)
self._rbound = ts2dt(date)
elif len(split) == 2:
if split[0] != '':
self._lbound = ts2dt(split[0])
if split[1] != '':
self._rbound = ts2dt(split[1])
else:
raise Exception('Date %s is not in the correct format' % date) | [
"def",
"set_date_bounds",
"(",
"self",
",",
"date",
")",
":",
"if",
"date",
"is",
"not",
"None",
":",
"split",
"=",
"date",
".",
"split",
"(",
"'~'",
")",
"if",
"len",
"(",
"split",
")",
"==",
"1",
":",
"self",
".",
"_lbound",
"=",
"ts2dt",
"(",
"date",
")",
"self",
".",
"_rbound",
"=",
"ts2dt",
"(",
"date",
")",
"elif",
"len",
"(",
"split",
")",
"==",
"2",
":",
"if",
"split",
"[",
"0",
"]",
"!=",
"''",
":",
"self",
".",
"_lbound",
"=",
"ts2dt",
"(",
"split",
"[",
"0",
"]",
")",
"if",
"split",
"[",
"1",
"]",
"!=",
"''",
":",
"self",
".",
"_rbound",
"=",
"ts2dt",
"(",
"split",
"[",
"1",
"]",
")",
"else",
":",
"raise",
"Exception",
"(",
"'Date %s is not in the correct format'",
"%",
"date",
")"
] | 37.5 | 14.9 |
def create_pipfile(self, python=None):
"""Creates the Pipfile, filled with juicy defaults."""
from .vendor.pip_shims.shims import (
ConfigOptionParser, make_option_group, index_group
)
config_parser = ConfigOptionParser(name=self.name)
config_parser.add_option_group(make_option_group(index_group, config_parser))
install = config_parser.option_groups[0]
indexes = (
" ".join(install.get_option("--extra-index-url").default)
.lstrip("\n")
.split("\n")
)
sources = [DEFAULT_SOURCE,]
for i, index in enumerate(indexes):
if not index:
continue
source_name = "pip_index_{}".format(i)
verify_ssl = index.startswith("https")
sources.append(
{u"url": index, u"verify_ssl": verify_ssl, u"name": source_name}
)
data = {
u"source": sources,
# Default packages.
u"packages": {},
u"dev-packages": {},
}
# Default requires.
required_python = python
if not python:
if self.virtualenv_location:
required_python = self.which("python", self.virtualenv_location)
else:
required_python = self.which("python")
version = python_version(required_python) or PIPENV_DEFAULT_PYTHON_VERSION
if version and len(version) >= 3:
data[u"requires"] = {"python_version": version[: len("2.7")]}
self.write_toml(data) | [
"def",
"create_pipfile",
"(",
"self",
",",
"python",
"=",
"None",
")",
":",
"from",
".",
"vendor",
".",
"pip_shims",
".",
"shims",
"import",
"(",
"ConfigOptionParser",
",",
"make_option_group",
",",
"index_group",
")",
"config_parser",
"=",
"ConfigOptionParser",
"(",
"name",
"=",
"self",
".",
"name",
")",
"config_parser",
".",
"add_option_group",
"(",
"make_option_group",
"(",
"index_group",
",",
"config_parser",
")",
")",
"install",
"=",
"config_parser",
".",
"option_groups",
"[",
"0",
"]",
"indexes",
"=",
"(",
"\" \"",
".",
"join",
"(",
"install",
".",
"get_option",
"(",
"\"--extra-index-url\"",
")",
".",
"default",
")",
".",
"lstrip",
"(",
"\"\\n\"",
")",
".",
"split",
"(",
"\"\\n\"",
")",
")",
"sources",
"=",
"[",
"DEFAULT_SOURCE",
",",
"]",
"for",
"i",
",",
"index",
"in",
"enumerate",
"(",
"indexes",
")",
":",
"if",
"not",
"index",
":",
"continue",
"source_name",
"=",
"\"pip_index_{}\"",
".",
"format",
"(",
"i",
")",
"verify_ssl",
"=",
"index",
".",
"startswith",
"(",
"\"https\"",
")",
"sources",
".",
"append",
"(",
"{",
"u\"url\"",
":",
"index",
",",
"u\"verify_ssl\"",
":",
"verify_ssl",
",",
"u\"name\"",
":",
"source_name",
"}",
")",
"data",
"=",
"{",
"u\"source\"",
":",
"sources",
",",
"# Default packages.",
"u\"packages\"",
":",
"{",
"}",
",",
"u\"dev-packages\"",
":",
"{",
"}",
",",
"}",
"# Default requires.",
"required_python",
"=",
"python",
"if",
"not",
"python",
":",
"if",
"self",
".",
"virtualenv_location",
":",
"required_python",
"=",
"self",
".",
"which",
"(",
"\"python\"",
",",
"self",
".",
"virtualenv_location",
")",
"else",
":",
"required_python",
"=",
"self",
".",
"which",
"(",
"\"python\"",
")",
"version",
"=",
"python_version",
"(",
"required_python",
")",
"or",
"PIPENV_DEFAULT_PYTHON_VERSION",
"if",
"version",
"and",
"len",
"(",
"version",
")",
">=",
"3",
":",
"data",
"[",
"u\"requires\"",
"]",
"=",
"{",
"\"python_version\"",
":",
"version",
"[",
":",
"len",
"(",
"\"2.7\"",
")",
"]",
"}",
"self",
".",
"write_toml",
"(",
"data",
")"
] | 36.761905 | 19 |
def has_methods(*method_names):
"""Return a test function that, when given an object (class or an
instance), returns ``True`` if that object has all of the (regular) methods
in ``method_names``. Note: this is testing for regular methods only and the
test function will correctly return ``False`` if an instance has one of the
specified methods as a classmethod or a staticmethod. However, it will
incorrectly return ``True`` (false positives) for classmethods and
staticmethods on a *class*.
"""
def test(obj):
for method_name in method_names:
try:
method = getattr(obj, method_name)
except AttributeError:
return False
else:
if not callable(method):
return False
if not isinstance(obj, type):
try:
# An instance method is a method type with a __self__
# attribute that references the instance.
if method.__self__ is not obj:
return False
except AttributeError:
return False
return True
return test | [
"def",
"has_methods",
"(",
"*",
"method_names",
")",
":",
"def",
"test",
"(",
"obj",
")",
":",
"for",
"method_name",
"in",
"method_names",
":",
"try",
":",
"method",
"=",
"getattr",
"(",
"obj",
",",
"method_name",
")",
"except",
"AttributeError",
":",
"return",
"False",
"else",
":",
"if",
"not",
"callable",
"(",
"method",
")",
":",
"return",
"False",
"if",
"not",
"isinstance",
"(",
"obj",
",",
"type",
")",
":",
"try",
":",
"# An instance method is a method type with a __self__",
"# attribute that references the instance.",
"if",
"method",
".",
"__self__",
"is",
"not",
"obj",
":",
"return",
"False",
"except",
"AttributeError",
":",
"return",
"False",
"return",
"True",
"return",
"test"
] | 40.366667 | 17.766667 |
def update_token(self, token, secret):
"""Update token with new values.
:param token: The token value.
:param secret: The secret key.
"""
if self.access_token != token or self.secret != secret:
with db.session.begin_nested():
self.access_token = token
self.secret = secret
db.session.add(self) | [
"def",
"update_token",
"(",
"self",
",",
"token",
",",
"secret",
")",
":",
"if",
"self",
".",
"access_token",
"!=",
"token",
"or",
"self",
".",
"secret",
"!=",
"secret",
":",
"with",
"db",
".",
"session",
".",
"begin_nested",
"(",
")",
":",
"self",
".",
"access_token",
"=",
"token",
"self",
".",
"secret",
"=",
"secret",
"db",
".",
"session",
".",
"add",
"(",
"self",
")"
] | 34.909091 | 7.363636 |
def whiten(self, segment_duration, max_filter_duration, trunc_method='hann',
remove_corrupted=True, low_frequency_cutoff=None,
return_psd=False, **kwds):
""" Return a whitened time series
Parameters
----------
segment_duration: float
Duration in seconds to use for each sample of the spectrum.
max_filter_duration : int
Maximum length of the time-domain filter in seconds.
trunc_method : {None, 'hann'}
Function used for truncating the time-domain filter.
None produces a hard truncation at `max_filter_len`.
remove_corrupted : {True, boolean}
If True, the region of the time series corrupted by the whitening
is excised before returning. If false, the corrupted regions
are not excised and the full time series is returned.
low_frequency_cutoff : {None, float}
Low frequency cutoff to pass to the inverse spectrum truncation.
This should be matched to a known low frequency cutoff of the
data if there is one.
return_psd : {False, Boolean}
Return the estimated and conditioned PSD that was used to whiten
the data.
kwds : keywords
Additional keyword arguments are passed on to the `pycbc.psd.welch` method.
Returns
-------
whitened_data : TimeSeries
The whitened time series
"""
from pycbc.psd import inverse_spectrum_truncation, interpolate
# Estimate the noise spectrum
psd = self.psd(segment_duration, **kwds)
psd = interpolate(psd, self.delta_f)
max_filter_len = int(max_filter_duration * self.sample_rate)
# Interpolate and smooth to the desired corruption length
psd = inverse_spectrum_truncation(psd,
max_filter_len=max_filter_len,
low_frequency_cutoff=low_frequency_cutoff,
trunc_method=trunc_method)
# Whiten the data by the asd
white = (self.to_frequencyseries() / psd**0.5).to_timeseries()
if remove_corrupted:
white = white[int(max_filter_len/2):int(len(self)-max_filter_len/2)]
if return_psd:
return white, psd
return white | [
"def",
"whiten",
"(",
"self",
",",
"segment_duration",
",",
"max_filter_duration",
",",
"trunc_method",
"=",
"'hann'",
",",
"remove_corrupted",
"=",
"True",
",",
"low_frequency_cutoff",
"=",
"None",
",",
"return_psd",
"=",
"False",
",",
"*",
"*",
"kwds",
")",
":",
"from",
"pycbc",
".",
"psd",
"import",
"inverse_spectrum_truncation",
",",
"interpolate",
"# Estimate the noise spectrum",
"psd",
"=",
"self",
".",
"psd",
"(",
"segment_duration",
",",
"*",
"*",
"kwds",
")",
"psd",
"=",
"interpolate",
"(",
"psd",
",",
"self",
".",
"delta_f",
")",
"max_filter_len",
"=",
"int",
"(",
"max_filter_duration",
"*",
"self",
".",
"sample_rate",
")",
"# Interpolate and smooth to the desired corruption length",
"psd",
"=",
"inverse_spectrum_truncation",
"(",
"psd",
",",
"max_filter_len",
"=",
"max_filter_len",
",",
"low_frequency_cutoff",
"=",
"low_frequency_cutoff",
",",
"trunc_method",
"=",
"trunc_method",
")",
"# Whiten the data by the asd",
"white",
"=",
"(",
"self",
".",
"to_frequencyseries",
"(",
")",
"/",
"psd",
"**",
"0.5",
")",
".",
"to_timeseries",
"(",
")",
"if",
"remove_corrupted",
":",
"white",
"=",
"white",
"[",
"int",
"(",
"max_filter_len",
"/",
"2",
")",
":",
"int",
"(",
"len",
"(",
"self",
")",
"-",
"max_filter_len",
"/",
"2",
")",
"]",
"if",
"return_psd",
":",
"return",
"white",
",",
"psd",
"return",
"white"
] | 41.618182 | 20.927273 |
def _create_transmissions(self, content_metadata_item_map):
"""
Create ContentMetadataItemTransmision models for the given content metadata items.
"""
# pylint: disable=invalid-name
ContentMetadataItemTransmission = apps.get_model(
'integrated_channel',
'ContentMetadataItemTransmission'
)
transmissions = []
for content_id, channel_metadata in content_metadata_item_map.items():
transmissions.append(
ContentMetadataItemTransmission(
enterprise_customer=self.enterprise_configuration.enterprise_customer,
integrated_channel_code=self.enterprise_configuration.channel_code(),
content_id=content_id,
channel_metadata=channel_metadata
)
)
ContentMetadataItemTransmission.objects.bulk_create(transmissions) | [
"def",
"_create_transmissions",
"(",
"self",
",",
"content_metadata_item_map",
")",
":",
"# pylint: disable=invalid-name",
"ContentMetadataItemTransmission",
"=",
"apps",
".",
"get_model",
"(",
"'integrated_channel'",
",",
"'ContentMetadataItemTransmission'",
")",
"transmissions",
"=",
"[",
"]",
"for",
"content_id",
",",
"channel_metadata",
"in",
"content_metadata_item_map",
".",
"items",
"(",
")",
":",
"transmissions",
".",
"append",
"(",
"ContentMetadataItemTransmission",
"(",
"enterprise_customer",
"=",
"self",
".",
"enterprise_configuration",
".",
"enterprise_customer",
",",
"integrated_channel_code",
"=",
"self",
".",
"enterprise_configuration",
".",
"channel_code",
"(",
")",
",",
"content_id",
"=",
"content_id",
",",
"channel_metadata",
"=",
"channel_metadata",
")",
")",
"ContentMetadataItemTransmission",
".",
"objects",
".",
"bulk_create",
"(",
"transmissions",
")"
] | 45.8 | 19.8 |
def apply_getters(self, task):
"""
This function is called when we specify the task dependencies with the syntax:
deps={node: "@property"}
In this case the task has to the get `property` from `node` before starting the calculation.
At present, the following properties are supported:
- @structure
"""
if not self.getters: return
for getter in self.getters:
if getter == "@structure":
task.history.info("Getting structure from %s" % self.node)
new_structure = self.node.get_final_structure()
task._change_structure(new_structure)
else:
raise ValueError("Wrong getter %s" % getter) | [
"def",
"apply_getters",
"(",
"self",
",",
"task",
")",
":",
"if",
"not",
"self",
".",
"getters",
":",
"return",
"for",
"getter",
"in",
"self",
".",
"getters",
":",
"if",
"getter",
"==",
"\"@structure\"",
":",
"task",
".",
"history",
".",
"info",
"(",
"\"Getting structure from %s\"",
"%",
"self",
".",
"node",
")",
"new_structure",
"=",
"self",
".",
"node",
".",
"get_final_structure",
"(",
")",
"task",
".",
"_change_structure",
"(",
"new_structure",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Wrong getter %s\"",
"%",
"getter",
")"
] | 34.857143 | 22.857143 |
def quotes_by_instrument_urls(cls, client, urls):
"""
fetch and return results
"""
instruments = ",".join(urls)
params = {"instruments": instruments}
url = "https://api.robinhood.com/marketdata/quotes/"
data = client.get(url, params=params)
results = data["results"]
while "next" in data and data["next"]:
data = client.get(data["next"])
results.extend(data["results"])
return results | [
"def",
"quotes_by_instrument_urls",
"(",
"cls",
",",
"client",
",",
"urls",
")",
":",
"instruments",
"=",
"\",\"",
".",
"join",
"(",
"urls",
")",
"params",
"=",
"{",
"\"instruments\"",
":",
"instruments",
"}",
"url",
"=",
"\"https://api.robinhood.com/marketdata/quotes/\"",
"data",
"=",
"client",
".",
"get",
"(",
"url",
",",
"params",
"=",
"params",
")",
"results",
"=",
"data",
"[",
"\"results\"",
"]",
"while",
"\"next\"",
"in",
"data",
"and",
"data",
"[",
"\"next\"",
"]",
":",
"data",
"=",
"client",
".",
"get",
"(",
"data",
"[",
"\"next\"",
"]",
")",
"results",
".",
"extend",
"(",
"data",
"[",
"\"results\"",
"]",
")",
"return",
"results"
] | 36.615385 | 6.769231 |
def get_vulnerability_functions_05(node, fname):
"""
:param node:
a vulnerabilityModel node
:param fname:
path of the vulnerability filter
:returns:
a dictionary imt, vf_id -> vulnerability function
"""
# NB: the IMTs can be duplicated and with different levels, each
# vulnerability function in a set will get its own levels
vf_ids = set()
vmodel = scientific.VulnerabilityModel(**node.attrib)
# imt, vf_id -> vulnerability function
for vfun in node.getnodes('vulnerabilityFunction'):
with context(fname, vfun):
imt = vfun.imls['imt']
imls = numpy.array(~vfun.imls)
vf_id = vfun['id']
if vf_id in vf_ids:
raise InvalidFile(
'Duplicated vulnerabilityFunctionID: %s: %s, line %d' %
(vf_id, fname, vfun.lineno))
vf_ids.add(vf_id)
num_probs = None
if vfun['dist'] == 'PM':
loss_ratios, probs = [], []
for probabilities in vfun[1:]:
loss_ratios.append(probabilities['lr'])
probs.append(valid.probabilities(~probabilities))
if num_probs is None:
num_probs = len(probs[-1])
elif len(probs[-1]) != num_probs:
raise ValueError(
'Wrong number of probabilities (expected %d, '
'got %d) in %s, line %d' %
(num_probs, len(probs[-1]), fname,
probabilities.lineno))
all_probs = numpy.array(probs)
assert all_probs.shape == (len(loss_ratios), len(imls)), (
len(loss_ratios), len(imls))
vmodel[imt, vf_id] = (
scientific.VulnerabilityFunctionWithPMF(
vf_id, imt, imls, numpy.array(loss_ratios),
all_probs))
# the seed will be set by readinput.get_risk_model
else:
with context(fname, vfun):
loss_ratios = ~vfun.meanLRs
coefficients = ~vfun.covLRs
if len(loss_ratios) != len(imls):
raise InvalidFile(
'There are %d loss ratios, but %d imls: %s, line %d' %
(len(loss_ratios), len(imls), fname,
vfun.meanLRs.lineno))
if len(coefficients) != len(imls):
raise InvalidFile(
'There are %d coefficients, but %d imls: %s, '
'line %d' % (len(coefficients), len(imls), fname,
vfun.covLRs.lineno))
with context(fname, vfun):
vmodel[imt, vf_id] = scientific.VulnerabilityFunction(
vf_id, imt, imls, loss_ratios, coefficients,
vfun['dist'])
return vmodel | [
"def",
"get_vulnerability_functions_05",
"(",
"node",
",",
"fname",
")",
":",
"# NB: the IMTs can be duplicated and with different levels, each",
"# vulnerability function in a set will get its own levels",
"vf_ids",
"=",
"set",
"(",
")",
"vmodel",
"=",
"scientific",
".",
"VulnerabilityModel",
"(",
"*",
"*",
"node",
".",
"attrib",
")",
"# imt, vf_id -> vulnerability function",
"for",
"vfun",
"in",
"node",
".",
"getnodes",
"(",
"'vulnerabilityFunction'",
")",
":",
"with",
"context",
"(",
"fname",
",",
"vfun",
")",
":",
"imt",
"=",
"vfun",
".",
"imls",
"[",
"'imt'",
"]",
"imls",
"=",
"numpy",
".",
"array",
"(",
"~",
"vfun",
".",
"imls",
")",
"vf_id",
"=",
"vfun",
"[",
"'id'",
"]",
"if",
"vf_id",
"in",
"vf_ids",
":",
"raise",
"InvalidFile",
"(",
"'Duplicated vulnerabilityFunctionID: %s: %s, line %d'",
"%",
"(",
"vf_id",
",",
"fname",
",",
"vfun",
".",
"lineno",
")",
")",
"vf_ids",
".",
"add",
"(",
"vf_id",
")",
"num_probs",
"=",
"None",
"if",
"vfun",
"[",
"'dist'",
"]",
"==",
"'PM'",
":",
"loss_ratios",
",",
"probs",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"probabilities",
"in",
"vfun",
"[",
"1",
":",
"]",
":",
"loss_ratios",
".",
"append",
"(",
"probabilities",
"[",
"'lr'",
"]",
")",
"probs",
".",
"append",
"(",
"valid",
".",
"probabilities",
"(",
"~",
"probabilities",
")",
")",
"if",
"num_probs",
"is",
"None",
":",
"num_probs",
"=",
"len",
"(",
"probs",
"[",
"-",
"1",
"]",
")",
"elif",
"len",
"(",
"probs",
"[",
"-",
"1",
"]",
")",
"!=",
"num_probs",
":",
"raise",
"ValueError",
"(",
"'Wrong number of probabilities (expected %d, '",
"'got %d) in %s, line %d'",
"%",
"(",
"num_probs",
",",
"len",
"(",
"probs",
"[",
"-",
"1",
"]",
")",
",",
"fname",
",",
"probabilities",
".",
"lineno",
")",
")",
"all_probs",
"=",
"numpy",
".",
"array",
"(",
"probs",
")",
"assert",
"all_probs",
".",
"shape",
"==",
"(",
"len",
"(",
"loss_ratios",
")",
",",
"len",
"(",
"imls",
")",
")",
",",
"(",
"len",
"(",
"loss_ratios",
")",
",",
"len",
"(",
"imls",
")",
")",
"vmodel",
"[",
"imt",
",",
"vf_id",
"]",
"=",
"(",
"scientific",
".",
"VulnerabilityFunctionWithPMF",
"(",
"vf_id",
",",
"imt",
",",
"imls",
",",
"numpy",
".",
"array",
"(",
"loss_ratios",
")",
",",
"all_probs",
")",
")",
"# the seed will be set by readinput.get_risk_model",
"else",
":",
"with",
"context",
"(",
"fname",
",",
"vfun",
")",
":",
"loss_ratios",
"=",
"~",
"vfun",
".",
"meanLRs",
"coefficients",
"=",
"~",
"vfun",
".",
"covLRs",
"if",
"len",
"(",
"loss_ratios",
")",
"!=",
"len",
"(",
"imls",
")",
":",
"raise",
"InvalidFile",
"(",
"'There are %d loss ratios, but %d imls: %s, line %d'",
"%",
"(",
"len",
"(",
"loss_ratios",
")",
",",
"len",
"(",
"imls",
")",
",",
"fname",
",",
"vfun",
".",
"meanLRs",
".",
"lineno",
")",
")",
"if",
"len",
"(",
"coefficients",
")",
"!=",
"len",
"(",
"imls",
")",
":",
"raise",
"InvalidFile",
"(",
"'There are %d coefficients, but %d imls: %s, '",
"'line %d'",
"%",
"(",
"len",
"(",
"coefficients",
")",
",",
"len",
"(",
"imls",
")",
",",
"fname",
",",
"vfun",
".",
"covLRs",
".",
"lineno",
")",
")",
"with",
"context",
"(",
"fname",
",",
"vfun",
")",
":",
"vmodel",
"[",
"imt",
",",
"vf_id",
"]",
"=",
"scientific",
".",
"VulnerabilityFunction",
"(",
"vf_id",
",",
"imt",
",",
"imls",
",",
"loss_ratios",
",",
"coefficients",
",",
"vfun",
"[",
"'dist'",
"]",
")",
"return",
"vmodel"
] | 43.184615 | 12.876923 |
def _layout(self, node):
"""ETE calls this function to style each node before rendering.
- ETE terms:
- A Style is a specification for how to render the node itself
- A Face defines extra information that is rendered outside of the node
- Face objects are used here to provide more control on how to draw the nodes.
"""
def set_edge_style():
"""Set the style for edges and make the node invisible."""
node_style = ete3.NodeStyle()
node_style["vt_line_color"] = EDGE_COLOR
node_style["hz_line_color"] = EDGE_COLOR
node_style["vt_line_width"] = EDGE_WIDTH
node_style["hz_line_width"] = EDGE_WIDTH
node_style["size"] = 0
node.set_style(node_style)
def style_subject_node(color="Black"):
"""Specify the appearance of Subject nodes."""
face = ete3.TextFace(node.name, fsize=SUBJECT_NODE_FONT_SIZE, fgcolor=color)
set_face_margin(face)
node.add_face(face, column=0, position="branch-right")
def style_type_node(color="Black"):
"""Specify the appearance of Type nodes."""
face = ete3.CircleFace(
radius=TYPE_NODE_RADIUS,
color=TYPE_NODE_COLOR_DICT.get(node.name, "White"),
style="circle",
label={
"text": node.name,
"color": color,
"fontsize": (
TYPE_NODE_FONT_SIZE_FILE
if self._render_type == "file"
else TYPE_NODE_FONT_SIZE_BROWSE
),
},
)
set_face_margin(face)
node.add_face(face, column=0, position="branch-right")
def set_face_margin(face):
"""Add margins to Face object.
- Add space between inner_border and border on TextFace.
- Add space outside bounding area of CircleFace.
"""
face.margin_left = 5
face.margin_right = 5
# face.margin_top = 5
# face.margin_bottom = 5
set_edge_style()
if hasattr(node, SUBJECT_NODE_TAG):
style_subject_node()
elif hasattr(node, TYPE_NODE_TAG):
style_type_node()
else:
raise AssertionError("Unknown node type") | [
"def",
"_layout",
"(",
"self",
",",
"node",
")",
":",
"def",
"set_edge_style",
"(",
")",
":",
"\"\"\"Set the style for edges and make the node invisible.\"\"\"",
"node_style",
"=",
"ete3",
".",
"NodeStyle",
"(",
")",
"node_style",
"[",
"\"vt_line_color\"",
"]",
"=",
"EDGE_COLOR",
"node_style",
"[",
"\"hz_line_color\"",
"]",
"=",
"EDGE_COLOR",
"node_style",
"[",
"\"vt_line_width\"",
"]",
"=",
"EDGE_WIDTH",
"node_style",
"[",
"\"hz_line_width\"",
"]",
"=",
"EDGE_WIDTH",
"node_style",
"[",
"\"size\"",
"]",
"=",
"0",
"node",
".",
"set_style",
"(",
"node_style",
")",
"def",
"style_subject_node",
"(",
"color",
"=",
"\"Black\"",
")",
":",
"\"\"\"Specify the appearance of Subject nodes.\"\"\"",
"face",
"=",
"ete3",
".",
"TextFace",
"(",
"node",
".",
"name",
",",
"fsize",
"=",
"SUBJECT_NODE_FONT_SIZE",
",",
"fgcolor",
"=",
"color",
")",
"set_face_margin",
"(",
"face",
")",
"node",
".",
"add_face",
"(",
"face",
",",
"column",
"=",
"0",
",",
"position",
"=",
"\"branch-right\"",
")",
"def",
"style_type_node",
"(",
"color",
"=",
"\"Black\"",
")",
":",
"\"\"\"Specify the appearance of Type nodes.\"\"\"",
"face",
"=",
"ete3",
".",
"CircleFace",
"(",
"radius",
"=",
"TYPE_NODE_RADIUS",
",",
"color",
"=",
"TYPE_NODE_COLOR_DICT",
".",
"get",
"(",
"node",
".",
"name",
",",
"\"White\"",
")",
",",
"style",
"=",
"\"circle\"",
",",
"label",
"=",
"{",
"\"text\"",
":",
"node",
".",
"name",
",",
"\"color\"",
":",
"color",
",",
"\"fontsize\"",
":",
"(",
"TYPE_NODE_FONT_SIZE_FILE",
"if",
"self",
".",
"_render_type",
"==",
"\"file\"",
"else",
"TYPE_NODE_FONT_SIZE_BROWSE",
")",
",",
"}",
",",
")",
"set_face_margin",
"(",
"face",
")",
"node",
".",
"add_face",
"(",
"face",
",",
"column",
"=",
"0",
",",
"position",
"=",
"\"branch-right\"",
")",
"def",
"set_face_margin",
"(",
"face",
")",
":",
"\"\"\"Add margins to Face object.\n\n - Add space between inner_border and border on TextFace.\n - Add space outside bounding area of CircleFace.\n\n \"\"\"",
"face",
".",
"margin_left",
"=",
"5",
"face",
".",
"margin_right",
"=",
"5",
"# face.margin_top = 5",
"# face.margin_bottom = 5",
"set_edge_style",
"(",
")",
"if",
"hasattr",
"(",
"node",
",",
"SUBJECT_NODE_TAG",
")",
":",
"style_subject_node",
"(",
")",
"elif",
"hasattr",
"(",
"node",
",",
"TYPE_NODE_TAG",
")",
":",
"style_type_node",
"(",
")",
"else",
":",
"raise",
"AssertionError",
"(",
"\"Unknown node type\"",
")"
] | 36.461538 | 16.584615 |
def fit(self, X, y, sample_weight=None):
"""
Fit a binary classifier with sample weights to data.
Note
----
Examples at each sample are accepted with probability = weight/Z,
where Z = max(weight) + extra_rej_const.
Larger values for extra_rej_const ensure that no example gets selected in
every single sample, but results in smaller sample sizes as more examples are rejected.
Parameters
----------
X : array (n_samples, n_features)
Data on which to fit the model.
y : array (n_samples,) or (n_samples, 1)
Class of each observation.
sample_weight : array (n_samples,) or (n_samples, 1)
Weights indicating how important is each observation in the loss function.
"""
assert self.extra_rej_const >= 0
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
else:
if isinstance(sample_weight, list):
sample_weight = np.array(sample_weight)
if len(sample_weight.shape):
sample_weight = sample_weight.reshape(-1)
assert sample_weight.shape[0] == X.shape[0]
assert sample_weight.min() > 0
Z = sample_weight.max() + self.extra_rej_const
sample_weight = sample_weight / Z # sample weight is now acceptance prob
self.classifiers = [deepcopy(self.base_classifier) for c in range(self.n_samples)]
### Note: don't parallelize random number generation, as it's not always thread-safe
take_all = np.random.random(size = (self.n_samples, X.shape[0]))
Parallel(n_jobs=self.njobs, verbose=0, require="sharedmem")(delayed(self._fit)(c, take_all, X, y, sample_weight) for c in range(self.n_samples))
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
",",
"sample_weight",
"=",
"None",
")",
":",
"assert",
"self",
".",
"extra_rej_const",
">=",
"0",
"if",
"sample_weight",
"is",
"None",
":",
"sample_weight",
"=",
"np",
".",
"ones",
"(",
"y",
".",
"shape",
"[",
"0",
"]",
")",
"else",
":",
"if",
"isinstance",
"(",
"sample_weight",
",",
"list",
")",
":",
"sample_weight",
"=",
"np",
".",
"array",
"(",
"sample_weight",
")",
"if",
"len",
"(",
"sample_weight",
".",
"shape",
")",
":",
"sample_weight",
"=",
"sample_weight",
".",
"reshape",
"(",
"-",
"1",
")",
"assert",
"sample_weight",
".",
"shape",
"[",
"0",
"]",
"==",
"X",
".",
"shape",
"[",
"0",
"]",
"assert",
"sample_weight",
".",
"min",
"(",
")",
">",
"0",
"Z",
"=",
"sample_weight",
".",
"max",
"(",
")",
"+",
"self",
".",
"extra_rej_const",
"sample_weight",
"=",
"sample_weight",
"/",
"Z",
"# sample weight is now acceptance prob",
"self",
".",
"classifiers",
"=",
"[",
"deepcopy",
"(",
"self",
".",
"base_classifier",
")",
"for",
"c",
"in",
"range",
"(",
"self",
".",
"n_samples",
")",
"]",
"### Note: don't parallelize random number generation, as it's not always thread-safe",
"take_all",
"=",
"np",
".",
"random",
".",
"random",
"(",
"size",
"=",
"(",
"self",
".",
"n_samples",
",",
"X",
".",
"shape",
"[",
"0",
"]",
")",
")",
"Parallel",
"(",
"n_jobs",
"=",
"self",
".",
"njobs",
",",
"verbose",
"=",
"0",
",",
"require",
"=",
"\"sharedmem\"",
")",
"(",
"delayed",
"(",
"self",
".",
"_fit",
")",
"(",
"c",
",",
"take_all",
",",
"X",
",",
"y",
",",
"sample_weight",
")",
"for",
"c",
"in",
"range",
"(",
"self",
".",
"n_samples",
")",
")",
"return",
"self"
] | 47.342105 | 22.289474 |
def segments_from_numpy(segments):
"""reverses segments_to_numpy"""
segments = segments if SEGMENTS_DIRECTION == 0 else segments.tranpose()
segments = [map(int, s) for s in segments]
return segments | [
"def",
"segments_from_numpy",
"(",
"segments",
")",
":",
"segments",
"=",
"segments",
"if",
"SEGMENTS_DIRECTION",
"==",
"0",
"else",
"segments",
".",
"tranpose",
"(",
")",
"segments",
"=",
"[",
"map",
"(",
"int",
",",
"s",
")",
"for",
"s",
"in",
"segments",
"]",
"return",
"segments"
] | 42 | 13.6 |
def items(self):
"""Get all the message's header fields and values.
These will be sorted in the order they appeared in the original
message, or were added to the message, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [(k, self.policy.header_fetch_parse(k, v))
for k, v in self._headers] | [
"def",
"items",
"(",
"self",
")",
":",
"return",
"[",
"(",
"k",
",",
"self",
".",
"policy",
".",
"header_fetch_parse",
"(",
"k",
",",
"v",
")",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"_headers",
"]"
] | 41.8 | 21.1 |
def feed_packets(self, binary_packets, linktype=LinkTypes.ETHERNET):
"""
Gets a list of binary packets, parses them using tshark and returns their parsed values.
Keeps the packets in the internal packet list as well.
By default, assumes the packets are ethernet packets. For another link type, supply the linktype argument (most
can be found in the class LinkTypes)
"""
self._current_linktype = linktype
parsed_packets = self.parse_packets(binary_packets)
self._packets.extend(parsed_packets)
self.close()
return parsed_packets | [
"def",
"feed_packets",
"(",
"self",
",",
"binary_packets",
",",
"linktype",
"=",
"LinkTypes",
".",
"ETHERNET",
")",
":",
"self",
".",
"_current_linktype",
"=",
"linktype",
"parsed_packets",
"=",
"self",
".",
"parse_packets",
"(",
"binary_packets",
")",
"self",
".",
"_packets",
".",
"extend",
"(",
"parsed_packets",
")",
"self",
".",
"close",
"(",
")",
"return",
"parsed_packets"
] | 46.461538 | 21.846154 |
def setSystemVariable(self, remote, name, value):
"""Set a system variable on CCU / Homegear"""
if self.remotes[remote]['username'] and self.remotes[remote]['password']:
LOG.debug(
"ServerThread.setSystemVariable: Setting System variable via JSON-RPC")
session = self.jsonRpcLogin(remote)
if not session:
return
try:
params = {"_session_id_": session,
"name": name, "value": value}
if value is True or value is False:
params['value'] = int(value)
response = self._rpcfunctions.jsonRpcPost(
self.remotes[remote]['ip'], self.remotes[remote].get('jsonport', DEFAULT_JSONPORT), "SysVar.setBool", params)
else:
response = self._rpcfunctions.jsonRpcPost(
self.remotes[remote]['ip'], self.remotes[remote].get('jsonport', DEFAULT_JSONPORT), "SysVar.setFloat", params)
if response['error'] is None and response['result']:
res = response['result']
LOG.debug(
"ServerThread.setSystemVariable: Result while setting variable: %s" % str(res))
else:
if response['error']:
LOG.debug("ServerThread.setSystemVariable: Error while setting variable: %s" % str(
response['error']))
self.jsonRpcLogout(remote, session)
except Exception as err:
self.jsonRpcLogout(remote, session)
LOG.warning(
"ServerThread.setSystemVariable: Exception: %s" % str(err))
else:
try:
return self.proxies["%s-%s" % (self._interface_id, remote)].setSystemVariable(name, value)
except Exception as err:
LOG.debug(
"ServerThread.setSystemVariable: Exception: %s" % str(err)) | [
"def",
"setSystemVariable",
"(",
"self",
",",
"remote",
",",
"name",
",",
"value",
")",
":",
"if",
"self",
".",
"remotes",
"[",
"remote",
"]",
"[",
"'username'",
"]",
"and",
"self",
".",
"remotes",
"[",
"remote",
"]",
"[",
"'password'",
"]",
":",
"LOG",
".",
"debug",
"(",
"\"ServerThread.setSystemVariable: Setting System variable via JSON-RPC\"",
")",
"session",
"=",
"self",
".",
"jsonRpcLogin",
"(",
"remote",
")",
"if",
"not",
"session",
":",
"return",
"try",
":",
"params",
"=",
"{",
"\"_session_id_\"",
":",
"session",
",",
"\"name\"",
":",
"name",
",",
"\"value\"",
":",
"value",
"}",
"if",
"value",
"is",
"True",
"or",
"value",
"is",
"False",
":",
"params",
"[",
"'value'",
"]",
"=",
"int",
"(",
"value",
")",
"response",
"=",
"self",
".",
"_rpcfunctions",
".",
"jsonRpcPost",
"(",
"self",
".",
"remotes",
"[",
"remote",
"]",
"[",
"'ip'",
"]",
",",
"self",
".",
"remotes",
"[",
"remote",
"]",
".",
"get",
"(",
"'jsonport'",
",",
"DEFAULT_JSONPORT",
")",
",",
"\"SysVar.setBool\"",
",",
"params",
")",
"else",
":",
"response",
"=",
"self",
".",
"_rpcfunctions",
".",
"jsonRpcPost",
"(",
"self",
".",
"remotes",
"[",
"remote",
"]",
"[",
"'ip'",
"]",
",",
"self",
".",
"remotes",
"[",
"remote",
"]",
".",
"get",
"(",
"'jsonport'",
",",
"DEFAULT_JSONPORT",
")",
",",
"\"SysVar.setFloat\"",
",",
"params",
")",
"if",
"response",
"[",
"'error'",
"]",
"is",
"None",
"and",
"response",
"[",
"'result'",
"]",
":",
"res",
"=",
"response",
"[",
"'result'",
"]",
"LOG",
".",
"debug",
"(",
"\"ServerThread.setSystemVariable: Result while setting variable: %s\"",
"%",
"str",
"(",
"res",
")",
")",
"else",
":",
"if",
"response",
"[",
"'error'",
"]",
":",
"LOG",
".",
"debug",
"(",
"\"ServerThread.setSystemVariable: Error while setting variable: %s\"",
"%",
"str",
"(",
"response",
"[",
"'error'",
"]",
")",
")",
"self",
".",
"jsonRpcLogout",
"(",
"remote",
",",
"session",
")",
"except",
"Exception",
"as",
"err",
":",
"self",
".",
"jsonRpcLogout",
"(",
"remote",
",",
"session",
")",
"LOG",
".",
"warning",
"(",
"\"ServerThread.setSystemVariable: Exception: %s\"",
"%",
"str",
"(",
"err",
")",
")",
"else",
":",
"try",
":",
"return",
"self",
".",
"proxies",
"[",
"\"%s-%s\"",
"%",
"(",
"self",
".",
"_interface_id",
",",
"remote",
")",
"]",
".",
"setSystemVariable",
"(",
"name",
",",
"value",
")",
"except",
"Exception",
"as",
"err",
":",
"LOG",
".",
"debug",
"(",
"\"ServerThread.setSystemVariable: Exception: %s\"",
"%",
"str",
"(",
"err",
")",
")"
] | 52.684211 | 25.289474 |
def upload(cls, file_obj, store=None):
"""Uploads a file and returns ``File`` instance.
Args:
- file_obj: file object to upload to
- store (Optional[bool]): Should the file be automatically stored
upon upload. Defaults to None.
- False - do not store file
- True - store file (can result in error if autostore
is disabled for project)
- None - use project settings
Returns:
``File`` instance
"""
if store is None:
store = 'auto'
elif store:
store = '1'
else:
store = '0'
data = {
'UPLOADCARE_STORE': store,
}
files = uploading_request('POST', 'base/', data=data,
files={'file': file_obj})
file_ = cls(files['file'])
return file_ | [
"def",
"upload",
"(",
"cls",
",",
"file_obj",
",",
"store",
"=",
"None",
")",
":",
"if",
"store",
"is",
"None",
":",
"store",
"=",
"'auto'",
"elif",
"store",
":",
"store",
"=",
"'1'",
"else",
":",
"store",
"=",
"'0'",
"data",
"=",
"{",
"'UPLOADCARE_STORE'",
":",
"store",
",",
"}",
"files",
"=",
"uploading_request",
"(",
"'POST'",
",",
"'base/'",
",",
"data",
"=",
"data",
",",
"files",
"=",
"{",
"'file'",
":",
"file_obj",
"}",
")",
"file_",
"=",
"cls",
"(",
"files",
"[",
"'file'",
"]",
")",
"return",
"file_"
] | 29.419355 | 19.387097 |
def runSwarm(self, workingDirPath):
"""
Runs a swarm with data within a working directory. This assumes that the
user has already run prepareSwarm().
:param workingDirPath: absolute or relative path to working directory
"""
if not os.path.exists(workingDirPath):
raise Exception("Working directory %s does not exist!" % workingDirPath)
banner("RUNNING SWARM")
self._modelParams = swarm(workingDirPath) | [
"def",
"runSwarm",
"(",
"self",
",",
"workingDirPath",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"workingDirPath",
")",
":",
"raise",
"Exception",
"(",
"\"Working directory %s does not exist!\"",
"%",
"workingDirPath",
")",
"banner",
"(",
"\"RUNNING SWARM\"",
")",
"self",
".",
"_modelParams",
"=",
"swarm",
"(",
"workingDirPath",
")"
] | 43.1 | 13.3 |
def setDateTimeStart(self, dtime):
"""
Sets the starting date time for this gantt chart.
:param dtime | <QDateTime>
"""
self._dateStart = dtime.date()
self._timeStart = dtime.time()
self._allDay = False | [
"def",
"setDateTimeStart",
"(",
"self",
",",
"dtime",
")",
":",
"self",
".",
"_dateStart",
"=",
"dtime",
".",
"date",
"(",
")",
"self",
".",
"_timeStart",
"=",
"dtime",
".",
"time",
"(",
")",
"self",
".",
"_allDay",
"=",
"False"
] | 30.222222 | 7.555556 |
def from_connections(cls, caption, connections):
"""Create a new Data Source give a list of Connections."""
root = ET.Element('datasource', caption=caption, version='10.0', inline='true')
outer_connection = ET.SubElement(root, 'connection')
outer_connection.set('class', 'federated')
named_conns = ET.SubElement(outer_connection, 'named-connections')
for conn in connections:
nc = ET.SubElement(named_conns,
'named-connection',
name=_make_unique_name(conn.dbclass),
caption=conn.server)
nc.append(conn._connectionXML)
return cls(root) | [
"def",
"from_connections",
"(",
"cls",
",",
"caption",
",",
"connections",
")",
":",
"root",
"=",
"ET",
".",
"Element",
"(",
"'datasource'",
",",
"caption",
"=",
"caption",
",",
"version",
"=",
"'10.0'",
",",
"inline",
"=",
"'true'",
")",
"outer_connection",
"=",
"ET",
".",
"SubElement",
"(",
"root",
",",
"'connection'",
")",
"outer_connection",
".",
"set",
"(",
"'class'",
",",
"'federated'",
")",
"named_conns",
"=",
"ET",
".",
"SubElement",
"(",
"outer_connection",
",",
"'named-connections'",
")",
"for",
"conn",
"in",
"connections",
":",
"nc",
"=",
"ET",
".",
"SubElement",
"(",
"named_conns",
",",
"'named-connection'",
",",
"name",
"=",
"_make_unique_name",
"(",
"conn",
".",
"dbclass",
")",
",",
"caption",
"=",
"conn",
".",
"server",
")",
"nc",
".",
"append",
"(",
"conn",
".",
"_connectionXML",
")",
"return",
"cls",
"(",
"root",
")"
] | 49.642857 | 16.928571 |
def _gcs_get_key_names(bucket, pattern):
""" Get names of all Google Cloud Storage keys in a specified bucket that match a pattern. """
return [obj.metadata.name for obj in _gcs_get_keys(bucket, pattern)] | [
"def",
"_gcs_get_key_names",
"(",
"bucket",
",",
"pattern",
")",
":",
"return",
"[",
"obj",
".",
"metadata",
".",
"name",
"for",
"obj",
"in",
"_gcs_get_keys",
"(",
"bucket",
",",
"pattern",
")",
"]"
] | 68.666667 | 10 |
def getLogicalLines(fp, allowQP=True, findBegin=False):
"""
Iterate through a stream, yielding one logical line at a time.
Because many applications still use vCard 2.1, we have to deal with the
quoted-printable encoding for long lines, as well as the vCard 3.0 and
vCalendar line folding technique, a whitespace character at the start
of the line.
Quoted-printable data will be decoded in the Behavior decoding phase.
# We're leaving this test in for awhile, because the unittest was ugly and dumb.
>>> from six import StringIO
>>> f=StringIO(testLines)
>>> for n, l in enumerate(getLogicalLines(f)):
... print("Line %s: %s" % (n, l[0]))
...
Line 0: Line 0 text, Line 0 continued.
Line 1: Line 1;encoding=quoted-printable:this is an evil=
evil=
format.
Line 2: Line 2 is a new line, it does not start with whitespace.
"""
if not allowQP:
val = fp.read(-1)
#Shouldn't need this anymore...
"""
if len(val) > 0:
if not findBegin:
val = val.decode('utf-8')
else:
for encoding in 'utf-8', 'utf-16-LE', 'utf-16-BE', 'iso-8859-1':
try:
val = val.decode(encoding)
if begin_re.search(val) is not None:
break
except UnicodeDecodeError:
pass
else:
raise ParseError('Could not find BEGIN when trying to determine encoding')
"""
# strip off any UTF8 BOMs which Python's UTF8 decoder leaves
#val = val.lstrip( unicode( codecs.BOM_UTF8, "utf8" ) )
lineNumber = 1
for match in logical_lines_re.finditer(val):
line, n = wrap_re.subn('', match.group())
if line != '':
yield line, lineNumber
lineNumber += n
else:
quotedPrintable = False
newbuffer = six.StringIO
logicalLine = newbuffer()
lineNumber = 0
lineStartNumber = 0
while True:
line = fp.readline()
if line == '':
break
else:
line = line.rstrip(CRLF)
lineNumber += 1
if line.rstrip() == '':
if logicalLine.tell() > 0:
yield logicalLine.getvalue(), lineStartNumber
lineStartNumber = lineNumber
logicalLine = newbuffer()
quotedPrintable = False
continue
if quotedPrintable and allowQP:
logicalLine.write('\n')
logicalLine.write(line)
quotedPrintable = False
elif line[0] in SPACEORTAB:
logicalLine.write(line[1:])
elif logicalLine.tell() > 0:
yield logicalLine.getvalue(), lineStartNumber
lineStartNumber = lineNumber
logicalLine = newbuffer()
logicalLine.write(line)
else:
logicalLine = newbuffer()
logicalLine.write(line)
# vCard 2.1 allows parameters to be encoded without a parameter name.
# False positives are unlikely, but possible.
val = logicalLine.getvalue()
if val[-1]=='=' and val.lower().find('quoted-printable') >= 0:
quotedPrintable=True
if logicalLine.tell() > 0:
yield logicalLine.getvalue(), lineStartNumber | [
"def",
"getLogicalLines",
"(",
"fp",
",",
"allowQP",
"=",
"True",
",",
"findBegin",
"=",
"False",
")",
":",
"if",
"not",
"allowQP",
":",
"val",
"=",
"fp",
".",
"read",
"(",
"-",
"1",
")",
"#Shouldn't need this anymore...",
"\"\"\"\n if len(val) > 0:\n if not findBegin:\n val = val.decode('utf-8')\n else:\n for encoding in 'utf-8', 'utf-16-LE', 'utf-16-BE', 'iso-8859-1':\n try:\n val = val.decode(encoding)\n if begin_re.search(val) is not None:\n break\n except UnicodeDecodeError:\n pass\n else:\n raise ParseError('Could not find BEGIN when trying to determine encoding')\n \"\"\"",
"# strip off any UTF8 BOMs which Python's UTF8 decoder leaves",
"#val = val.lstrip( unicode( codecs.BOM_UTF8, \"utf8\" ) )",
"lineNumber",
"=",
"1",
"for",
"match",
"in",
"logical_lines_re",
".",
"finditer",
"(",
"val",
")",
":",
"line",
",",
"n",
"=",
"wrap_re",
".",
"subn",
"(",
"''",
",",
"match",
".",
"group",
"(",
")",
")",
"if",
"line",
"!=",
"''",
":",
"yield",
"line",
",",
"lineNumber",
"lineNumber",
"+=",
"n",
"else",
":",
"quotedPrintable",
"=",
"False",
"newbuffer",
"=",
"six",
".",
"StringIO",
"logicalLine",
"=",
"newbuffer",
"(",
")",
"lineNumber",
"=",
"0",
"lineStartNumber",
"=",
"0",
"while",
"True",
":",
"line",
"=",
"fp",
".",
"readline",
"(",
")",
"if",
"line",
"==",
"''",
":",
"break",
"else",
":",
"line",
"=",
"line",
".",
"rstrip",
"(",
"CRLF",
")",
"lineNumber",
"+=",
"1",
"if",
"line",
".",
"rstrip",
"(",
")",
"==",
"''",
":",
"if",
"logicalLine",
".",
"tell",
"(",
")",
">",
"0",
":",
"yield",
"logicalLine",
".",
"getvalue",
"(",
")",
",",
"lineStartNumber",
"lineStartNumber",
"=",
"lineNumber",
"logicalLine",
"=",
"newbuffer",
"(",
")",
"quotedPrintable",
"=",
"False",
"continue",
"if",
"quotedPrintable",
"and",
"allowQP",
":",
"logicalLine",
".",
"write",
"(",
"'\\n'",
")",
"logicalLine",
".",
"write",
"(",
"line",
")",
"quotedPrintable",
"=",
"False",
"elif",
"line",
"[",
"0",
"]",
"in",
"SPACEORTAB",
":",
"logicalLine",
".",
"write",
"(",
"line",
"[",
"1",
":",
"]",
")",
"elif",
"logicalLine",
".",
"tell",
"(",
")",
">",
"0",
":",
"yield",
"logicalLine",
".",
"getvalue",
"(",
")",
",",
"lineStartNumber",
"lineStartNumber",
"=",
"lineNumber",
"logicalLine",
"=",
"newbuffer",
"(",
")",
"logicalLine",
".",
"write",
"(",
"line",
")",
"else",
":",
"logicalLine",
"=",
"newbuffer",
"(",
")",
"logicalLine",
".",
"write",
"(",
"line",
")",
"# vCard 2.1 allows parameters to be encoded without a parameter name.",
"# False positives are unlikely, but possible.",
"val",
"=",
"logicalLine",
".",
"getvalue",
"(",
")",
"if",
"val",
"[",
"-",
"1",
"]",
"==",
"'='",
"and",
"val",
".",
"lower",
"(",
")",
".",
"find",
"(",
"'quoted-printable'",
")",
">=",
"0",
":",
"quotedPrintable",
"=",
"True",
"if",
"logicalLine",
".",
"tell",
"(",
")",
">",
"0",
":",
"yield",
"logicalLine",
".",
"getvalue",
"(",
")",
",",
"lineStartNumber"
] | 35.659794 | 16.793814 |
def get_divisions(self):
"""
Get the "current" division and return a dictionary of divisions
so the user can select the right one.
"""
ret = self.rest(GET('v1/current/Me?$select=CurrentDivision'))
current_division = ret[0]['CurrentDivision']
assert isinstance(current_division, int)
urlbase = 'v1/%d/' % (current_division,)
resource = urljoin(urlbase, 'hrm/Divisions?$select=Code,Description')
ret = self.rest(GET(resource))
choices = dict((i['Code'], i['Description']) for i in ret)
return choices, current_division | [
"def",
"get_divisions",
"(",
"self",
")",
":",
"ret",
"=",
"self",
".",
"rest",
"(",
"GET",
"(",
"'v1/current/Me?$select=CurrentDivision'",
")",
")",
"current_division",
"=",
"ret",
"[",
"0",
"]",
"[",
"'CurrentDivision'",
"]",
"assert",
"isinstance",
"(",
"current_division",
",",
"int",
")",
"urlbase",
"=",
"'v1/%d/'",
"%",
"(",
"current_division",
",",
")",
"resource",
"=",
"urljoin",
"(",
"urlbase",
",",
"'hrm/Divisions?$select=Code,Description'",
")",
"ret",
"=",
"self",
".",
"rest",
"(",
"GET",
"(",
"resource",
")",
")",
"choices",
"=",
"dict",
"(",
"(",
"i",
"[",
"'Code'",
"]",
",",
"i",
"[",
"'Description'",
"]",
")",
"for",
"i",
"in",
"ret",
")",
"return",
"choices",
",",
"current_division"
] | 40 | 16.933333 |
def _parse_response(self, respond):
"""parse text of response for HTTP errors
This parses the text of the response to decide whether to
retry request or raise exception. At the moment this only
detects an exception condition.
Args:
respond (Response): requests.Response object
Returns:
bool: False if the request should be retried, True
if not.
Raises:
RegisterSizeError
"""
# convert error messages into exceptions
mobj = self._max_qubit_error_re.match(respond.text)
if mobj:
raise RegisterSizeError(
'device register size must be <= {}'.format(mobj.group(1)))
return True | [
"def",
"_parse_response",
"(",
"self",
",",
"respond",
")",
":",
"# convert error messages into exceptions",
"mobj",
"=",
"self",
".",
"_max_qubit_error_re",
".",
"match",
"(",
"respond",
".",
"text",
")",
"if",
"mobj",
":",
"raise",
"RegisterSizeError",
"(",
"'device register size must be <= {}'",
".",
"format",
"(",
"mobj",
".",
"group",
"(",
"1",
")",
")",
")",
"return",
"True"
] | 31.782609 | 20.391304 |
def clone(src, dst_path, skip_globals, skip_dimensions, skip_variables):
"""
Mostly ripped from nc3tonc4 in netCDF4-python.
Added ability to skip dimension and variables.
Removed all of the unpacking logic for shorts.
"""
if os.path.exists(dst_path):
os.unlink(dst_path)
dst = netCDF4.Dataset(dst_path, 'w')
# Global attributes
for attname in src.ncattrs():
if attname not in skip_globals:
setattr(dst, attname, getattr(src, attname))
# Dimensions
unlimdim = None
unlimdimname = False
for dimname, dim in src.dimensions.items():
# Skip what we need to
if dimname in skip_dimensions:
continue
if dim.isunlimited():
unlimdim = dim
unlimdimname = dimname
dst.createDimension(dimname, None)
else:
dst.createDimension(dimname, len(dim))
# Variables
for varname, ncvar in src.variables.items():
# Skip what we need to
if varname in skip_variables:
continue
hasunlimdim = False
if unlimdimname and unlimdimname in ncvar.dimensions:
hasunlimdim = True
filler = None
if hasattr(ncvar, '_FillValue'):
filler = ncvar._FillValue
if ncvar.chunking == "contiguous":
var = dst.createVariable(varname, ncvar.dtype, ncvar.dimensions, fill_value=filler)
else:
var = dst.createVariable(varname, ncvar.dtype, ncvar.dimensions, fill_value=filler, chunksizes=ncvar.chunking())
# Attributes
for attname in ncvar.ncattrs():
if attname == '_FillValue':
continue
else:
setattr(var, attname, getattr(ncvar, attname))
# Data
nchunk = 1000
if hasunlimdim:
if nchunk:
start = 0
stop = len(unlimdim)
step = nchunk
if step < 1:
step = 1
for n in range(start, stop, step):
nmax = n + nchunk
if nmax > len(unlimdim):
nmax = len(unlimdim)
idata = ncvar[n:nmax]
var[n:nmax] = idata
else:
idata = ncvar[:]
var[0:len(unlimdim)] = idata
else:
idata = ncvar[:]
var[:] = idata
dst.sync()
src.close()
dst.close() | [
"def",
"clone",
"(",
"src",
",",
"dst_path",
",",
"skip_globals",
",",
"skip_dimensions",
",",
"skip_variables",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"dst_path",
")",
":",
"os",
".",
"unlink",
"(",
"dst_path",
")",
"dst",
"=",
"netCDF4",
".",
"Dataset",
"(",
"dst_path",
",",
"'w'",
")",
"# Global attributes",
"for",
"attname",
"in",
"src",
".",
"ncattrs",
"(",
")",
":",
"if",
"attname",
"not",
"in",
"skip_globals",
":",
"setattr",
"(",
"dst",
",",
"attname",
",",
"getattr",
"(",
"src",
",",
"attname",
")",
")",
"# Dimensions",
"unlimdim",
"=",
"None",
"unlimdimname",
"=",
"False",
"for",
"dimname",
",",
"dim",
"in",
"src",
".",
"dimensions",
".",
"items",
"(",
")",
":",
"# Skip what we need to",
"if",
"dimname",
"in",
"skip_dimensions",
":",
"continue",
"if",
"dim",
".",
"isunlimited",
"(",
")",
":",
"unlimdim",
"=",
"dim",
"unlimdimname",
"=",
"dimname",
"dst",
".",
"createDimension",
"(",
"dimname",
",",
"None",
")",
"else",
":",
"dst",
".",
"createDimension",
"(",
"dimname",
",",
"len",
"(",
"dim",
")",
")",
"# Variables",
"for",
"varname",
",",
"ncvar",
"in",
"src",
".",
"variables",
".",
"items",
"(",
")",
":",
"# Skip what we need to",
"if",
"varname",
"in",
"skip_variables",
":",
"continue",
"hasunlimdim",
"=",
"False",
"if",
"unlimdimname",
"and",
"unlimdimname",
"in",
"ncvar",
".",
"dimensions",
":",
"hasunlimdim",
"=",
"True",
"filler",
"=",
"None",
"if",
"hasattr",
"(",
"ncvar",
",",
"'_FillValue'",
")",
":",
"filler",
"=",
"ncvar",
".",
"_FillValue",
"if",
"ncvar",
".",
"chunking",
"==",
"\"contiguous\"",
":",
"var",
"=",
"dst",
".",
"createVariable",
"(",
"varname",
",",
"ncvar",
".",
"dtype",
",",
"ncvar",
".",
"dimensions",
",",
"fill_value",
"=",
"filler",
")",
"else",
":",
"var",
"=",
"dst",
".",
"createVariable",
"(",
"varname",
",",
"ncvar",
".",
"dtype",
",",
"ncvar",
".",
"dimensions",
",",
"fill_value",
"=",
"filler",
",",
"chunksizes",
"=",
"ncvar",
".",
"chunking",
"(",
")",
")",
"# Attributes",
"for",
"attname",
"in",
"ncvar",
".",
"ncattrs",
"(",
")",
":",
"if",
"attname",
"==",
"'_FillValue'",
":",
"continue",
"else",
":",
"setattr",
"(",
"var",
",",
"attname",
",",
"getattr",
"(",
"ncvar",
",",
"attname",
")",
")",
"# Data",
"nchunk",
"=",
"1000",
"if",
"hasunlimdim",
":",
"if",
"nchunk",
":",
"start",
"=",
"0",
"stop",
"=",
"len",
"(",
"unlimdim",
")",
"step",
"=",
"nchunk",
"if",
"step",
"<",
"1",
":",
"step",
"=",
"1",
"for",
"n",
"in",
"range",
"(",
"start",
",",
"stop",
",",
"step",
")",
":",
"nmax",
"=",
"n",
"+",
"nchunk",
"if",
"nmax",
">",
"len",
"(",
"unlimdim",
")",
":",
"nmax",
"=",
"len",
"(",
"unlimdim",
")",
"idata",
"=",
"ncvar",
"[",
"n",
":",
"nmax",
"]",
"var",
"[",
"n",
":",
"nmax",
"]",
"=",
"idata",
"else",
":",
"idata",
"=",
"ncvar",
"[",
":",
"]",
"var",
"[",
"0",
":",
"len",
"(",
"unlimdim",
")",
"]",
"=",
"idata",
"else",
":",
"idata",
"=",
"ncvar",
"[",
":",
"]",
"var",
"[",
":",
"]",
"=",
"idata",
"dst",
".",
"sync",
"(",
")",
"src",
".",
"close",
"(",
")",
"dst",
".",
"close",
"(",
")"
] | 28.564706 | 18.376471 |
def _init_client():
'''Setup client and init datastore.
'''
global client, path_prefix
if client is not None:
return
etcd_kwargs = {
'host': __opts__.get('etcd.host', '127.0.0.1'),
'port': __opts__.get('etcd.port', 2379),
'protocol': __opts__.get('etcd.protocol', 'http'),
'allow_reconnect': __opts__.get('etcd.allow_reconnect', True),
'allow_redirect': __opts__.get('etcd.allow_redirect', False),
'srv_domain': __opts__.get('etcd.srv_domain', None),
'read_timeout': __opts__.get('etcd.read_timeout', 60),
'username': __opts__.get('etcd.username', None),
'password': __opts__.get('etcd.password', None),
'cert': __opts__.get('etcd.cert', None),
'ca_cert': __opts__.get('etcd.ca_cert', None),
}
path_prefix = __opts__.get('etcd.path_prefix', _DEFAULT_PATH_PREFIX)
if path_prefix != "":
path_prefix = '/{0}'.format(path_prefix.strip('/'))
log.info("etcd: Setting up client with params: %r", etcd_kwargs)
client = etcd.Client(**etcd_kwargs)
try:
client.read(path_prefix)
except etcd.EtcdKeyNotFound:
log.info("etcd: Creating dir %r", path_prefix)
client.write(path_prefix, None, dir=True) | [
"def",
"_init_client",
"(",
")",
":",
"global",
"client",
",",
"path_prefix",
"if",
"client",
"is",
"not",
"None",
":",
"return",
"etcd_kwargs",
"=",
"{",
"'host'",
":",
"__opts__",
".",
"get",
"(",
"'etcd.host'",
",",
"'127.0.0.1'",
")",
",",
"'port'",
":",
"__opts__",
".",
"get",
"(",
"'etcd.port'",
",",
"2379",
")",
",",
"'protocol'",
":",
"__opts__",
".",
"get",
"(",
"'etcd.protocol'",
",",
"'http'",
")",
",",
"'allow_reconnect'",
":",
"__opts__",
".",
"get",
"(",
"'etcd.allow_reconnect'",
",",
"True",
")",
",",
"'allow_redirect'",
":",
"__opts__",
".",
"get",
"(",
"'etcd.allow_redirect'",
",",
"False",
")",
",",
"'srv_domain'",
":",
"__opts__",
".",
"get",
"(",
"'etcd.srv_domain'",
",",
"None",
")",
",",
"'read_timeout'",
":",
"__opts__",
".",
"get",
"(",
"'etcd.read_timeout'",
",",
"60",
")",
",",
"'username'",
":",
"__opts__",
".",
"get",
"(",
"'etcd.username'",
",",
"None",
")",
",",
"'password'",
":",
"__opts__",
".",
"get",
"(",
"'etcd.password'",
",",
"None",
")",
",",
"'cert'",
":",
"__opts__",
".",
"get",
"(",
"'etcd.cert'",
",",
"None",
")",
",",
"'ca_cert'",
":",
"__opts__",
".",
"get",
"(",
"'etcd.ca_cert'",
",",
"None",
")",
",",
"}",
"path_prefix",
"=",
"__opts__",
".",
"get",
"(",
"'etcd.path_prefix'",
",",
"_DEFAULT_PATH_PREFIX",
")",
"if",
"path_prefix",
"!=",
"\"\"",
":",
"path_prefix",
"=",
"'/{0}'",
".",
"format",
"(",
"path_prefix",
".",
"strip",
"(",
"'/'",
")",
")",
"log",
".",
"info",
"(",
"\"etcd: Setting up client with params: %r\"",
",",
"etcd_kwargs",
")",
"client",
"=",
"etcd",
".",
"Client",
"(",
"*",
"*",
"etcd_kwargs",
")",
"try",
":",
"client",
".",
"read",
"(",
"path_prefix",
")",
"except",
"etcd",
".",
"EtcdKeyNotFound",
":",
"log",
".",
"info",
"(",
"\"etcd: Creating dir %r\"",
",",
"path_prefix",
")",
"client",
".",
"write",
"(",
"path_prefix",
",",
"None",
",",
"dir",
"=",
"True",
")"
] | 42.566667 | 20.233333 |
def dir(self, path='/', slash=True, bus=False, timeout=0):
"""list entities at path"""
if slash:
msg = MSG_DIRALLSLASH
else:
msg = MSG_DIRALL
if bus:
flags = self.flags | FLG_BUS_RET
else:
flags = self.flags & ~FLG_BUS_RET
ret, data = self.sendmess(msg, str2bytez(path), flags, timeout=timeout)
if ret < 0:
raise OwnetError(-ret, self.errmess[-ret], path)
if data:
return bytes2str(data).split(',')
else:
return [] | [
"def",
"dir",
"(",
"self",
",",
"path",
"=",
"'/'",
",",
"slash",
"=",
"True",
",",
"bus",
"=",
"False",
",",
"timeout",
"=",
"0",
")",
":",
"if",
"slash",
":",
"msg",
"=",
"MSG_DIRALLSLASH",
"else",
":",
"msg",
"=",
"MSG_DIRALL",
"if",
"bus",
":",
"flags",
"=",
"self",
".",
"flags",
"|",
"FLG_BUS_RET",
"else",
":",
"flags",
"=",
"self",
".",
"flags",
"&",
"~",
"FLG_BUS_RET",
"ret",
",",
"data",
"=",
"self",
".",
"sendmess",
"(",
"msg",
",",
"str2bytez",
"(",
"path",
")",
",",
"flags",
",",
"timeout",
"=",
"timeout",
")",
"if",
"ret",
"<",
"0",
":",
"raise",
"OwnetError",
"(",
"-",
"ret",
",",
"self",
".",
"errmess",
"[",
"-",
"ret",
"]",
",",
"path",
")",
"if",
"data",
":",
"return",
"bytes2str",
"(",
"data",
")",
".",
"split",
"(",
"','",
")",
"else",
":",
"return",
"[",
"]"
] | 29.157895 | 20.157895 |
def run_analysis( named_analysis, prepared_analyses=None,log_dir=default_log_dir):
"""
Runs just the named analysis. Otherwise just like run_analyses
"""
if prepared_analyses == None:
prepared_analyses = prepare_analyses()
state_collection = funtool.state_collection.StateCollection([],{})
for analysis in prepared_analyses:
if analysis.name == named_analysis:
state_collection= funtool.analysis.run_analysis(analysis, state_collection, log_dir)
return state_collection | [
"def",
"run_analysis",
"(",
"named_analysis",
",",
"prepared_analyses",
"=",
"None",
",",
"log_dir",
"=",
"default_log_dir",
")",
":",
"if",
"prepared_analyses",
"==",
"None",
":",
"prepared_analyses",
"=",
"prepare_analyses",
"(",
")",
"state_collection",
"=",
"funtool",
".",
"state_collection",
".",
"StateCollection",
"(",
"[",
"]",
",",
"{",
"}",
")",
"for",
"analysis",
"in",
"prepared_analyses",
":",
"if",
"analysis",
".",
"name",
"==",
"named_analysis",
":",
"state_collection",
"=",
"funtool",
".",
"analysis",
".",
"run_analysis",
"(",
"analysis",
",",
"state_collection",
",",
"log_dir",
")",
"return",
"state_collection"
] | 46.818182 | 16.818182 |
def _prefix_from_prefix_string(self, prefixlen_str):
"""Turn a prefix length string into an integer.
Args:
prefixlen_str: A decimal string containing the prefix length.
Returns:
The prefix length as an integer.
Raises:
NetmaskValueError: If the input is malformed or out of range.
"""
try:
if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str):
raise ValueError
prefixlen = int(prefixlen_str)
if not (0 <= prefixlen <= self._max_prefixlen):
raise ValueError
except ValueError:
raise NetmaskValueError('%s is not a valid prefix length' %
prefixlen_str)
return prefixlen | [
"def",
"_prefix_from_prefix_string",
"(",
"self",
",",
"prefixlen_str",
")",
":",
"try",
":",
"if",
"not",
"_BaseV4",
".",
"_DECIMAL_DIGITS",
".",
"issuperset",
"(",
"prefixlen_str",
")",
":",
"raise",
"ValueError",
"prefixlen",
"=",
"int",
"(",
"prefixlen_str",
")",
"if",
"not",
"(",
"0",
"<=",
"prefixlen",
"<=",
"self",
".",
"_max_prefixlen",
")",
":",
"raise",
"ValueError",
"except",
"ValueError",
":",
"raise",
"NetmaskValueError",
"(",
"'%s is not a valid prefix length'",
"%",
"prefixlen_str",
")",
"return",
"prefixlen"
] | 33.391304 | 21.043478 |
def GetPathFromLink(resource_link, resource_type=''):
"""Gets path from resource link with optional resource type
:param str resource_link:
:param str resource_type:
:return:
Path from resource link with resource type appended (if provided).
:rtype: str
"""
resource_link = TrimBeginningAndEndingSlashes(resource_link)
if IsNameBased(resource_link):
# Replace special characters in string using the %xx escape. For example, space(' ') would be replaced by %20
# This function is intended for quoting the path section of the URL and excludes '/' to be quoted as that's the default safe char
resource_link = urllib_quote(resource_link)
# Padding leading and trailing slashes to the path returned both for name based and resource id based links
if resource_type:
return '/' + resource_link + '/' + resource_type + '/'
else:
return '/' + resource_link + '/' | [
"def",
"GetPathFromLink",
"(",
"resource_link",
",",
"resource_type",
"=",
"''",
")",
":",
"resource_link",
"=",
"TrimBeginningAndEndingSlashes",
"(",
"resource_link",
")",
"if",
"IsNameBased",
"(",
"resource_link",
")",
":",
"# Replace special characters in string using the %xx escape. For example, space(' ') would be replaced by %20",
"# This function is intended for quoting the path section of the URL and excludes '/' to be quoted as that's the default safe char",
"resource_link",
"=",
"urllib_quote",
"(",
"resource_link",
")",
"# Padding leading and trailing slashes to the path returned both for name based and resource id based links",
"if",
"resource_type",
":",
"return",
"'/'",
"+",
"resource_link",
"+",
"'/'",
"+",
"resource_type",
"+",
"'/'",
"else",
":",
"return",
"'/'",
"+",
"resource_link",
"+",
"'/'"
] | 42.909091 | 28.363636 |
def iteritems(self):
"""Iterate over all header lines, including duplicate ones."""
for key in self:
vals = _dict_getitem(self, key)
for val in vals[1:]:
yield vals[0], val | [
"def",
"iteritems",
"(",
"self",
")",
":",
"for",
"key",
"in",
"self",
":",
"vals",
"=",
"_dict_getitem",
"(",
"self",
",",
"key",
")",
"for",
"val",
"in",
"vals",
"[",
"1",
":",
"]",
":",
"yield",
"vals",
"[",
"0",
"]",
",",
"val"
] | 37.166667 | 8.833333 |
def merge(old_df, new_df, return_index=False):
"""
Merge two dataframes of buildings. The old dataframe is
usually the buildings dataset and the new dataframe is a modified
(by the user) version of what is returned by the pick method.
Parameters
----------
old_df : dataframe
Current set of buildings
new_df : dataframe
New buildings to add, usually comes from this module
return_index : bool
If return_index is true, this method will return the new
index of new_df (which changes in order to create a unique
index after the merge)
Returns
-------
df : dataframe
Combined DataFrame of buildings, makes sure indexes don't overlap
index : pd.Index
If and only if return_index is True, return the new index for the
new_df dataframe (which changes in order to create a unique index
after the merge)
"""
maxind = np.max(old_df.index.values)
new_df = new_df.reset_index(drop=True)
new_df.index = new_df.index + maxind + 1
concat_df = pd.concat([old_df, new_df], verify_integrity=True)
concat_df.index.name = 'building_id'
if return_index:
return concat_df, new_df.index
return concat_df | [
"def",
"merge",
"(",
"old_df",
",",
"new_df",
",",
"return_index",
"=",
"False",
")",
":",
"maxind",
"=",
"np",
".",
"max",
"(",
"old_df",
".",
"index",
".",
"values",
")",
"new_df",
"=",
"new_df",
".",
"reset_index",
"(",
"drop",
"=",
"True",
")",
"new_df",
".",
"index",
"=",
"new_df",
".",
"index",
"+",
"maxind",
"+",
"1",
"concat_df",
"=",
"pd",
".",
"concat",
"(",
"[",
"old_df",
",",
"new_df",
"]",
",",
"verify_integrity",
"=",
"True",
")",
"concat_df",
".",
"index",
".",
"name",
"=",
"'building_id'",
"if",
"return_index",
":",
"return",
"concat_df",
",",
"new_df",
".",
"index",
"return",
"concat_df"
] | 37.166667 | 20.055556 |
def getServiceModuleName(self):
'''return module name.
'''
name = GetModuleBaseNameFromWSDL(self.wsdl)
if not name:
raise WsdlGeneratorError, 'could not determine a service name'
if self.server_module_suffix is None:
return name
return '%s%s' %(name, self.server_module_suffix) | [
"def",
"getServiceModuleName",
"(",
"self",
")",
":",
"name",
"=",
"GetModuleBaseNameFromWSDL",
"(",
"self",
".",
"wsdl",
")",
"if",
"not",
"name",
":",
"raise",
"WsdlGeneratorError",
",",
"'could not determine a service name'",
"if",
"self",
".",
"server_module_suffix",
"is",
"None",
":",
"return",
"name",
"return",
"'%s%s'",
"%",
"(",
"name",
",",
"self",
".",
"server_module_suffix",
")"
] | 34.9 | 18.3 |
def _load_extensions(self):
"""
Load all extension files into the namespace pykwalify.ext
"""
log.debug(u"loading all extensions : %s", self.extensions)
self.loaded_extensions = []
for f in self.extensions:
if not os.path.isabs(f):
f = os.path.abspath(f)
if not os.path.exists(f):
raise CoreError(u"Extension file: {0} not found on disk".format(f))
self.loaded_extensions.append(imp.load_source("", f))
log.debug(self.loaded_extensions)
log.debug([dir(m) for m in self.loaded_extensions]) | [
"def",
"_load_extensions",
"(",
"self",
")",
":",
"log",
".",
"debug",
"(",
"u\"loading all extensions : %s\"",
",",
"self",
".",
"extensions",
")",
"self",
".",
"loaded_extensions",
"=",
"[",
"]",
"for",
"f",
"in",
"self",
".",
"extensions",
":",
"if",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"f",
")",
":",
"f",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"f",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"f",
")",
":",
"raise",
"CoreError",
"(",
"u\"Extension file: {0} not found on disk\"",
".",
"format",
"(",
"f",
")",
")",
"self",
".",
"loaded_extensions",
".",
"append",
"(",
"imp",
".",
"load_source",
"(",
"\"\"",
",",
"f",
")",
")",
"log",
".",
"debug",
"(",
"self",
".",
"loaded_extensions",
")",
"log",
".",
"debug",
"(",
"[",
"dir",
"(",
"m",
")",
"for",
"m",
"in",
"self",
".",
"loaded_extensions",
"]",
")"
] | 31.947368 | 19.631579 |
def run(self, args=None):
"""
Runs the main command or sub command based on user input
"""
if not args:
args = self.parse(sys.argv[1:])
if getattr(args, 'verbose', False):
self.logger.setLevel(logging.DEBUG)
try:
if hasattr(args, 'run'):
args.run(self, args)
else:
self.__main__(args) # pylint: disable-msg=E1101
except Exception as e: # pylint: disable-msg=W0703
import traceback
self.logger.debug(traceback.format_exc())
self.logger.error(str(e))
if self.raise_exceptions:
raise
sys.exit(2) | [
"def",
"run",
"(",
"self",
",",
"args",
"=",
"None",
")",
":",
"if",
"not",
"args",
":",
"args",
"=",
"self",
".",
"parse",
"(",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
")",
"if",
"getattr",
"(",
"args",
",",
"'verbose'",
",",
"False",
")",
":",
"self",
".",
"logger",
".",
"setLevel",
"(",
"logging",
".",
"DEBUG",
")",
"try",
":",
"if",
"hasattr",
"(",
"args",
",",
"'run'",
")",
":",
"args",
".",
"run",
"(",
"self",
",",
"args",
")",
"else",
":",
"self",
".",
"__main__",
"(",
"args",
")",
"# pylint: disable-msg=E1101",
"except",
"Exception",
"as",
"e",
":",
"# pylint: disable-msg=W0703",
"import",
"traceback",
"self",
".",
"logger",
".",
"debug",
"(",
"traceback",
".",
"format_exc",
"(",
")",
")",
"self",
".",
"logger",
".",
"error",
"(",
"str",
"(",
"e",
")",
")",
"if",
"self",
".",
"raise_exceptions",
":",
"raise",
"sys",
".",
"exit",
"(",
"2",
")"
] | 29.869565 | 15.695652 |
def doesIntersect(self, other):
'''
:param: other - Circle class
Returns True iff:
self.center.distance(other.center) <= self.radius+other.radius
'''
otherType = type(other)
if issubclass(otherType, Ellipse):
distance = self.center.distance(other.center)
radiisum = self.radius + other.radius
return distance <= radiisum
if issubclass(otherType, Line):
raise NotImplementedError('doesIntersect,other is Line class')
raise TypeError("unknown type '{t}'".format(t=otherType)) | [
"def",
"doesIntersect",
"(",
"self",
",",
"other",
")",
":",
"otherType",
"=",
"type",
"(",
"other",
")",
"if",
"issubclass",
"(",
"otherType",
",",
"Ellipse",
")",
":",
"distance",
"=",
"self",
".",
"center",
".",
"distance",
"(",
"other",
".",
"center",
")",
"radiisum",
"=",
"self",
".",
"radius",
"+",
"other",
".",
"radius",
"return",
"distance",
"<=",
"radiisum",
"if",
"issubclass",
"(",
"otherType",
",",
"Line",
")",
":",
"raise",
"NotImplementedError",
"(",
"'doesIntersect,other is Line class'",
")",
"raise",
"TypeError",
"(",
"\"unknown type '{t}'\"",
".",
"format",
"(",
"t",
"=",
"otherType",
")",
")"
] | 30.631579 | 21.894737 |
def MethodCalled(self, mock_method):
"""Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group.
"""
# Check to see if this method exists, and if so add it to the set of
# called methods.
for method in self._methods:
if method == mock_method:
self._methods_called.add(mock_method)
# Always put this group back on top of the queue, because we don't know
# when we are done.
mock_method._call_queue.appendleft(self)
return self, method
if self.IsSatisfied():
next_method = mock_method._PopNextMethod();
return next_method, None
else:
raise UnexpectedMethodCallError(mock_method, self) | [
"def",
"MethodCalled",
"(",
"self",
",",
"mock_method",
")",
":",
"# Check to see if this method exists, and if so add it to the set of",
"# called methods.",
"for",
"method",
"in",
"self",
".",
"_methods",
":",
"if",
"method",
"==",
"mock_method",
":",
"self",
".",
"_methods_called",
".",
"add",
"(",
"mock_method",
")",
"# Always put this group back on top of the queue, because we don't know",
"# when we are done.",
"mock_method",
".",
"_call_queue",
".",
"appendleft",
"(",
"self",
")",
"return",
"self",
",",
"method",
"if",
"self",
".",
"IsSatisfied",
"(",
")",
":",
"next_method",
"=",
"mock_method",
".",
"_PopNextMethod",
"(",
")",
"return",
"next_method",
",",
"None",
"else",
":",
"raise",
"UnexpectedMethodCallError",
"(",
"mock_method",
",",
"self",
")"
] | 29.40625 | 22.96875 |
def get_url_rev_options(self, url):
# type: (str) -> Tuple[str, RevOptions]
"""
Return the URL and RevOptions object to use in obtain() and in
some cases export(), as a tuple (url, rev_options).
"""
url, rev, user_pass = self.get_url_rev_and_auth(url)
username, password = user_pass
extra_args = self.make_rev_args(username, password)
rev_options = self.make_rev_options(rev, extra_args=extra_args)
return url, rev_options | [
"def",
"get_url_rev_options",
"(",
"self",
",",
"url",
")",
":",
"# type: (str) -> Tuple[str, RevOptions]",
"url",
",",
"rev",
",",
"user_pass",
"=",
"self",
".",
"get_url_rev_and_auth",
"(",
"url",
")",
"username",
",",
"password",
"=",
"user_pass",
"extra_args",
"=",
"self",
".",
"make_rev_args",
"(",
"username",
",",
"password",
")",
"rev_options",
"=",
"self",
".",
"make_rev_options",
"(",
"rev",
",",
"extra_args",
"=",
"extra_args",
")",
"return",
"url",
",",
"rev_options"
] | 41 | 15.166667 |
def db_to_df(db_file, slabs=None, facet=None):
"""Transforms database to data frame.
Parameters
----------
db_file : Path to database
slabs : Which metals (slabs) to select.
facet : Which facets to select.
Returns
-------
df : Data frame.
"""
systems = []
data = []
if slabs:
for slab in slabs:
data_tmp = select_data(db_file, slab=slab, facet=facet)
data.append(data_tmp)
subsystem = [tup[0] for i, tup in enumerate(data_tmp)]
systems.append(list(set(subsystem))[0])
else:
data_tmp = select_data(db_file)
data.append(data_tmp)
df = pd.DataFrame()
system, facet, reactants, products, reaction_energy = [], [], [], [], []
for entry in data:
for reaction in entry:
system.append(str(reaction[0]))
facet.append(str(reaction[1]))
reactants_i = [molecule for molecule in ast.literal_eval(reaction[2]).keys()]
reactants.append(reactants_i)
products_i = [molecule for molecule in ast.literal_eval(reaction[3]).keys()]
products.append(products_i)
reaction_energy.append(float(reaction[4]))
df[0] = system
df[1] = facet
df[2] = reactants
df[4] = products
df[5] = reaction_energy
df.columns = ['system', 'facet', 'reactants', 'products', 'reaction_energy']
labs = auto_labels(df)
df['labels'] = labs
df = df.sort_values(by=['facet', 'system'])
df = df.reset_index(drop=True)
return(df) | [
"def",
"db_to_df",
"(",
"db_file",
",",
"slabs",
"=",
"None",
",",
"facet",
"=",
"None",
")",
":",
"systems",
"=",
"[",
"]",
"data",
"=",
"[",
"]",
"if",
"slabs",
":",
"for",
"slab",
"in",
"slabs",
":",
"data_tmp",
"=",
"select_data",
"(",
"db_file",
",",
"slab",
"=",
"slab",
",",
"facet",
"=",
"facet",
")",
"data",
".",
"append",
"(",
"data_tmp",
")",
"subsystem",
"=",
"[",
"tup",
"[",
"0",
"]",
"for",
"i",
",",
"tup",
"in",
"enumerate",
"(",
"data_tmp",
")",
"]",
"systems",
".",
"append",
"(",
"list",
"(",
"set",
"(",
"subsystem",
")",
")",
"[",
"0",
"]",
")",
"else",
":",
"data_tmp",
"=",
"select_data",
"(",
"db_file",
")",
"data",
".",
"append",
"(",
"data_tmp",
")",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
")",
"system",
",",
"facet",
",",
"reactants",
",",
"products",
",",
"reaction_energy",
"=",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
"for",
"entry",
"in",
"data",
":",
"for",
"reaction",
"in",
"entry",
":",
"system",
".",
"append",
"(",
"str",
"(",
"reaction",
"[",
"0",
"]",
")",
")",
"facet",
".",
"append",
"(",
"str",
"(",
"reaction",
"[",
"1",
"]",
")",
")",
"reactants_i",
"=",
"[",
"molecule",
"for",
"molecule",
"in",
"ast",
".",
"literal_eval",
"(",
"reaction",
"[",
"2",
"]",
")",
".",
"keys",
"(",
")",
"]",
"reactants",
".",
"append",
"(",
"reactants_i",
")",
"products_i",
"=",
"[",
"molecule",
"for",
"molecule",
"in",
"ast",
".",
"literal_eval",
"(",
"reaction",
"[",
"3",
"]",
")",
".",
"keys",
"(",
")",
"]",
"products",
".",
"append",
"(",
"products_i",
")",
"reaction_energy",
".",
"append",
"(",
"float",
"(",
"reaction",
"[",
"4",
"]",
")",
")",
"df",
"[",
"0",
"]",
"=",
"system",
"df",
"[",
"1",
"]",
"=",
"facet",
"df",
"[",
"2",
"]",
"=",
"reactants",
"df",
"[",
"4",
"]",
"=",
"products",
"df",
"[",
"5",
"]",
"=",
"reaction_energy",
"df",
".",
"columns",
"=",
"[",
"'system'",
",",
"'facet'",
",",
"'reactants'",
",",
"'products'",
",",
"'reaction_energy'",
"]",
"labs",
"=",
"auto_labels",
"(",
"df",
")",
"df",
"[",
"'labels'",
"]",
"=",
"labs",
"df",
"=",
"df",
".",
"sort_values",
"(",
"by",
"=",
"[",
"'facet'",
",",
"'system'",
"]",
")",
"df",
"=",
"df",
".",
"reset_index",
"(",
"drop",
"=",
"True",
")",
"return",
"(",
"df",
")"
] | 30.77551 | 19.714286 |
def agent_run(args):
"""A version of `wandb run` that the agent uses to run things.
"""
run = wandb.wandb_run.Run.from_environment_or_defaults()
run.enable_logging()
api = wandb.apis.InternalApi()
api.set_current_run_id(run.id)
# TODO: better failure handling
root = api.git.root
# handle non-git directories
if not root:
root = os.path.abspath(os.getcwd())
host = socket.gethostname()
remote_url = 'file://{}{}'.format(host, root)
run.save(program=args['program'], api=api)
env = dict(os.environ)
run.set_environment(env)
try:
rm = wandb.run_manager.RunManager(api, run)
except wandb.run_manager.Error:
exc_type, exc_value, exc_traceback = sys.exc_info()
wandb.termerror('An Exception was raised during setup, see %s for full traceback.' %
util.get_log_file_path())
wandb.termerror(exc_value)
if 'permission' in str(exc_value):
wandb.termerror(
'Are you sure you provided the correct API key to "wandb login"?')
lines = traceback.format_exception(
exc_type, exc_value, exc_traceback)
logging.error('\n'.join(lines))
else:
rm.run_user_process(args['program'], args['args'], env) | [
"def",
"agent_run",
"(",
"args",
")",
":",
"run",
"=",
"wandb",
".",
"wandb_run",
".",
"Run",
".",
"from_environment_or_defaults",
"(",
")",
"run",
".",
"enable_logging",
"(",
")",
"api",
"=",
"wandb",
".",
"apis",
".",
"InternalApi",
"(",
")",
"api",
".",
"set_current_run_id",
"(",
"run",
".",
"id",
")",
"# TODO: better failure handling",
"root",
"=",
"api",
".",
"git",
".",
"root",
"# handle non-git directories",
"if",
"not",
"root",
":",
"root",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"getcwd",
"(",
")",
")",
"host",
"=",
"socket",
".",
"gethostname",
"(",
")",
"remote_url",
"=",
"'file://{}{}'",
".",
"format",
"(",
"host",
",",
"root",
")",
"run",
".",
"save",
"(",
"program",
"=",
"args",
"[",
"'program'",
"]",
",",
"api",
"=",
"api",
")",
"env",
"=",
"dict",
"(",
"os",
".",
"environ",
")",
"run",
".",
"set_environment",
"(",
"env",
")",
"try",
":",
"rm",
"=",
"wandb",
".",
"run_manager",
".",
"RunManager",
"(",
"api",
",",
"run",
")",
"except",
"wandb",
".",
"run_manager",
".",
"Error",
":",
"exc_type",
",",
"exc_value",
",",
"exc_traceback",
"=",
"sys",
".",
"exc_info",
"(",
")",
"wandb",
".",
"termerror",
"(",
"'An Exception was raised during setup, see %s for full traceback.'",
"%",
"util",
".",
"get_log_file_path",
"(",
")",
")",
"wandb",
".",
"termerror",
"(",
"exc_value",
")",
"if",
"'permission'",
"in",
"str",
"(",
"exc_value",
")",
":",
"wandb",
".",
"termerror",
"(",
"'Are you sure you provided the correct API key to \"wandb login\"?'",
")",
"lines",
"=",
"traceback",
".",
"format_exception",
"(",
"exc_type",
",",
"exc_value",
",",
"exc_traceback",
")",
"logging",
".",
"error",
"(",
"'\\n'",
".",
"join",
"(",
"lines",
")",
")",
"else",
":",
"rm",
".",
"run_user_process",
"(",
"args",
"[",
"'program'",
"]",
",",
"args",
"[",
"'args'",
"]",
",",
"env",
")"
] | 35.083333 | 16.388889 |
def com_google_fonts_check_canonical_filename(font):
"""Checking file is named canonically.
A font's filename must be composed in the following manner:
<familyname>-<stylename>.ttf
e.g. Nunito-Regular.ttf,
Oswald-BoldItalic.ttf
Variable fonts must use the "-VF" suffix:
e.g. Roboto-VF.ttf,
Barlow-VF.ttf,
Example-Roman-VF.ttf,
Familyname-Italic-VF.ttf
"""
from fontTools.ttLib import TTFont
from fontbakery.profiles.shared_conditions import is_variable_font
from fontbakery.constants import (STATIC_STYLE_NAMES,
VARFONT_SUFFIXES)
if canonical_stylename(font):
yield PASS, f"{font} is named canonically."
else:
if os.path.exists(font) and is_variable_font(TTFont(font)):
if suffix(font) in STATIC_STYLE_NAMES:
yield FAIL, (f'This is a variable font, but it is using'
' a naming scheme typical of a static font.')
yield FAIL, ('Please change the font filename to use one'
' of the following valid suffixes for variable fonts:'
f' {", ".join(VARFONT_SUFFIXES)}')
else:
style_names = '", "'.join(STATIC_STYLE_NAMES)
yield FAIL, (f'Style name used in "{font}" is not canonical.'
' You should rebuild the font using'
' any of the following'
f' style names: "{style_names}".') | [
"def",
"com_google_fonts_check_canonical_filename",
"(",
"font",
")",
":",
"from",
"fontTools",
".",
"ttLib",
"import",
"TTFont",
"from",
"fontbakery",
".",
"profiles",
".",
"shared_conditions",
"import",
"is_variable_font",
"from",
"fontbakery",
".",
"constants",
"import",
"(",
"STATIC_STYLE_NAMES",
",",
"VARFONT_SUFFIXES",
")",
"if",
"canonical_stylename",
"(",
"font",
")",
":",
"yield",
"PASS",
",",
"f\"{font} is named canonically.\"",
"else",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"font",
")",
"and",
"is_variable_font",
"(",
"TTFont",
"(",
"font",
")",
")",
":",
"if",
"suffix",
"(",
"font",
")",
"in",
"STATIC_STYLE_NAMES",
":",
"yield",
"FAIL",
",",
"(",
"f'This is a variable font, but it is using'",
"' a naming scheme typical of a static font.'",
")",
"yield",
"FAIL",
",",
"(",
"'Please change the font filename to use one'",
"' of the following valid suffixes for variable fonts:'",
"f' {\", \".join(VARFONT_SUFFIXES)}'",
")",
"else",
":",
"style_names",
"=",
"'\", \"'",
".",
"join",
"(",
"STATIC_STYLE_NAMES",
")",
"yield",
"FAIL",
",",
"(",
"f'Style name used in \"{font}\" is not canonical.'",
"' You should rebuild the font using'",
"' any of the following'",
"f' style names: \"{style_names}\".'",
")"
] | 38.555556 | 17.972222 |
def message(self, message, thread_id=None):
"""Message the user associated with this profile.
:param message: The message to send to this user.
:param thread_id: The id of the thread to respond to, if any.
"""
return_value = helpers.Messager(self._session).send(
self.username, message, self.authcode, thread_id
)
self.refresh(reload=False)
return return_value | [
"def",
"message",
"(",
"self",
",",
"message",
",",
"thread_id",
"=",
"None",
")",
":",
"return_value",
"=",
"helpers",
".",
"Messager",
"(",
"self",
".",
"_session",
")",
".",
"send",
"(",
"self",
".",
"username",
",",
"message",
",",
"self",
".",
"authcode",
",",
"thread_id",
")",
"self",
".",
"refresh",
"(",
"reload",
"=",
"False",
")",
"return",
"return_value"
] | 38.818182 | 16.272727 |
def get_methods(self):
"""
Return all method objects
:rtype: a list of :class:`EncodedMethod` objects
"""
if self.__cache_all_methods is None:
self.__cache_all_methods = []
for i in self.get_classes():
for j in i.get_methods():
self.__cache_all_methods.append(j)
return self.__cache_all_methods | [
"def",
"get_methods",
"(",
"self",
")",
":",
"if",
"self",
".",
"__cache_all_methods",
"is",
"None",
":",
"self",
".",
"__cache_all_methods",
"=",
"[",
"]",
"for",
"i",
"in",
"self",
".",
"get_classes",
"(",
")",
":",
"for",
"j",
"in",
"i",
".",
"get_methods",
"(",
")",
":",
"self",
".",
"__cache_all_methods",
".",
"append",
"(",
"j",
")",
"return",
"self",
".",
"__cache_all_methods"
] | 32.666667 | 8.5 |
def search(query, data, replacements=None):
"""Yield objects from 'data' that match the 'query'."""
query = q.Query(query, params=replacements)
for entry in data:
if solve.solve(query, entry).value:
yield entry | [
"def",
"search",
"(",
"query",
",",
"data",
",",
"replacements",
"=",
"None",
")",
":",
"query",
"=",
"q",
".",
"Query",
"(",
"query",
",",
"params",
"=",
"replacements",
")",
"for",
"entry",
"in",
"data",
":",
"if",
"solve",
".",
"solve",
"(",
"query",
",",
"entry",
")",
".",
"value",
":",
"yield",
"entry"
] | 39.5 | 8 |
def convert_json_str_to_dict(i):
"""
Input: {
str - string (use ' instead of ", i.e. {'a':'b'}
to avoid issues in CMD in Windows and Linux!)
(skip_quote_replacement) - if 'yes', do not make above replacement
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
dict - dict from json file
}
"""
s=i['str']
if i.get('skip_quote_replacement','')!='yes':
s=s.replace('"', '\\"')
s=s.replace('\'', '"')
try:
d=json.loads(s, encoding='utf8')
except Exception as e:
return {'return':1, 'error':'problem converting text to json ('+format(e)+')'}
return {'return':0, 'dict': d} | [
"def",
"convert_json_str_to_dict",
"(",
"i",
")",
":",
"s",
"=",
"i",
"[",
"'str'",
"]",
"if",
"i",
".",
"get",
"(",
"'skip_quote_replacement'",
",",
"''",
")",
"!=",
"'yes'",
":",
"s",
"=",
"s",
".",
"replace",
"(",
"'\"'",
",",
"'\\\\\"'",
")",
"s",
"=",
"s",
".",
"replace",
"(",
"'\\''",
",",
"'\"'",
")",
"try",
":",
"d",
"=",
"json",
".",
"loads",
"(",
"s",
",",
"encoding",
"=",
"'utf8'",
")",
"except",
"Exception",
"as",
"e",
":",
"return",
"{",
"'return'",
":",
"1",
",",
"'error'",
":",
"'problem converting text to json ('",
"+",
"format",
"(",
"e",
")",
"+",
"')'",
"}",
"return",
"{",
"'return'",
":",
"0",
",",
"'dict'",
":",
"d",
"}"
] | 29.266667 | 24.533333 |
def get_contract(self, contract_name: str) -> Dict:
""" Return ABI, BIN of the given contract. """
assert self.contracts, 'ContractManager should have contracts compiled'
return self.contracts[contract_name] | [
"def",
"get_contract",
"(",
"self",
",",
"contract_name",
":",
"str",
")",
"->",
"Dict",
":",
"assert",
"self",
".",
"contracts",
",",
"'ContractManager should have contracts compiled'",
"return",
"self",
".",
"contracts",
"[",
"contract_name",
"]"
] | 57 | 13.5 |
def extract_tiff_thumbnail(self, thumb_ifd):
"""
Extract uncompressed TIFF thumbnail.
Take advantage of the pre-existing layout in the thumbnail IFD as
much as possible
"""
thumb = self.tags.get('Thumbnail Compression')
if not thumb or thumb.printable != 'Uncompressed TIFF':
return
entries = self.s2n(thumb_ifd, 2)
# this is header plus offset to IFD ...
if self.endian == 'M':
tiff = 'MM\x00*\x00\x00\x00\x08'
else:
tiff = 'II*\x00\x08\x00\x00\x00'
# ... plus thumbnail IFD data plus a null "next IFD" pointer
self.file.seek(self.offset + thumb_ifd)
tiff += self.file.read(entries * 12 + 2) + '\x00\x00\x00\x00'
# fix up large value offset pointers into data area
for i in range(entries):
entry = thumb_ifd + 2 + 12 * i
tag = self.s2n(entry, 2)
field_type = self.s2n(entry + 2, 2)
type_length = FIELD_TYPES[field_type][0]
count = self.s2n(entry + 4, 4)
old_offset = self.s2n(entry + 8, 4)
# start of the 4-byte pointer area in entry
ptr = i * 12 + 18
# remember strip offsets location
if tag == 0x0111:
strip_off = ptr
strip_len = count * type_length
# is it in the data area?
if count * type_length > 4:
# update offset pointer (nasty "strings are immutable" crap)
# should be able to say "tiff[ptr:ptr+4]=newoff"
newoff = len(tiff)
tiff = tiff[:ptr] + self.n2s(newoff, 4) + tiff[ptr + 4:]
# remember strip offsets location
if tag == 0x0111:
strip_off = newoff
strip_len = 4
# get original data and store it
self.file.seek(self.offset + old_offset)
tiff += self.file.read(count * type_length)
# add pixel strips and update strip offset info
old_offsets = self.tags['Thumbnail StripOffsets'].values
old_counts = self.tags['Thumbnail StripByteCounts'].values
for i in range(len(old_offsets)):
# update offset pointer (more nasty "strings are immutable" crap)
offset = self.n2s(len(tiff), strip_len)
tiff = tiff[:strip_off] + offset + tiff[strip_off + strip_len:]
strip_off += strip_len
# add pixel strip to end
self.file.seek(self.offset + old_offsets[i])
tiff += self.file.read(old_counts[i])
self.tags['TIFFThumbnail'] = tiff | [
"def",
"extract_tiff_thumbnail",
"(",
"self",
",",
"thumb_ifd",
")",
":",
"thumb",
"=",
"self",
".",
"tags",
".",
"get",
"(",
"'Thumbnail Compression'",
")",
"if",
"not",
"thumb",
"or",
"thumb",
".",
"printable",
"!=",
"'Uncompressed TIFF'",
":",
"return",
"entries",
"=",
"self",
".",
"s2n",
"(",
"thumb_ifd",
",",
"2",
")",
"# this is header plus offset to IFD ...",
"if",
"self",
".",
"endian",
"==",
"'M'",
":",
"tiff",
"=",
"'MM\\x00*\\x00\\x00\\x00\\x08'",
"else",
":",
"tiff",
"=",
"'II*\\x00\\x08\\x00\\x00\\x00'",
"# ... plus thumbnail IFD data plus a null \"next IFD\" pointer",
"self",
".",
"file",
".",
"seek",
"(",
"self",
".",
"offset",
"+",
"thumb_ifd",
")",
"tiff",
"+=",
"self",
".",
"file",
".",
"read",
"(",
"entries",
"*",
"12",
"+",
"2",
")",
"+",
"'\\x00\\x00\\x00\\x00'",
"# fix up large value offset pointers into data area",
"for",
"i",
"in",
"range",
"(",
"entries",
")",
":",
"entry",
"=",
"thumb_ifd",
"+",
"2",
"+",
"12",
"*",
"i",
"tag",
"=",
"self",
".",
"s2n",
"(",
"entry",
",",
"2",
")",
"field_type",
"=",
"self",
".",
"s2n",
"(",
"entry",
"+",
"2",
",",
"2",
")",
"type_length",
"=",
"FIELD_TYPES",
"[",
"field_type",
"]",
"[",
"0",
"]",
"count",
"=",
"self",
".",
"s2n",
"(",
"entry",
"+",
"4",
",",
"4",
")",
"old_offset",
"=",
"self",
".",
"s2n",
"(",
"entry",
"+",
"8",
",",
"4",
")",
"# start of the 4-byte pointer area in entry",
"ptr",
"=",
"i",
"*",
"12",
"+",
"18",
"# remember strip offsets location",
"if",
"tag",
"==",
"0x0111",
":",
"strip_off",
"=",
"ptr",
"strip_len",
"=",
"count",
"*",
"type_length",
"# is it in the data area?",
"if",
"count",
"*",
"type_length",
">",
"4",
":",
"# update offset pointer (nasty \"strings are immutable\" crap)",
"# should be able to say \"tiff[ptr:ptr+4]=newoff\"",
"newoff",
"=",
"len",
"(",
"tiff",
")",
"tiff",
"=",
"tiff",
"[",
":",
"ptr",
"]",
"+",
"self",
".",
"n2s",
"(",
"newoff",
",",
"4",
")",
"+",
"tiff",
"[",
"ptr",
"+",
"4",
":",
"]",
"# remember strip offsets location",
"if",
"tag",
"==",
"0x0111",
":",
"strip_off",
"=",
"newoff",
"strip_len",
"=",
"4",
"# get original data and store it",
"self",
".",
"file",
".",
"seek",
"(",
"self",
".",
"offset",
"+",
"old_offset",
")",
"tiff",
"+=",
"self",
".",
"file",
".",
"read",
"(",
"count",
"*",
"type_length",
")",
"# add pixel strips and update strip offset info",
"old_offsets",
"=",
"self",
".",
"tags",
"[",
"'Thumbnail StripOffsets'",
"]",
".",
"values",
"old_counts",
"=",
"self",
".",
"tags",
"[",
"'Thumbnail StripByteCounts'",
"]",
".",
"values",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"old_offsets",
")",
")",
":",
"# update offset pointer (more nasty \"strings are immutable\" crap)",
"offset",
"=",
"self",
".",
"n2s",
"(",
"len",
"(",
"tiff",
")",
",",
"strip_len",
")",
"tiff",
"=",
"tiff",
"[",
":",
"strip_off",
"]",
"+",
"offset",
"+",
"tiff",
"[",
"strip_off",
"+",
"strip_len",
":",
"]",
"strip_off",
"+=",
"strip_len",
"# add pixel strip to end",
"self",
".",
"file",
".",
"seek",
"(",
"self",
".",
"offset",
"+",
"old_offsets",
"[",
"i",
"]",
")",
"tiff",
"+=",
"self",
".",
"file",
".",
"read",
"(",
"old_counts",
"[",
"i",
"]",
")",
"self",
".",
"tags",
"[",
"'TIFFThumbnail'",
"]",
"=",
"tiff"
] | 42.387097 | 14.645161 |
def vertical_padding(self, padding=None):
"""Returns or sets (if a value is provided) the chart's vertical
padding. This determines how much space will be above and below the
display area, as a proportion of overall height, and should be a value
between 0 and 0.5
:param float padding: If given, the chart's vertical_padding\
will be set to this.
:raises ValueError: if a value outside of 0 < n < 0.5 is given.
:rtype: float"""
if padding is None:
return self._vertical_padding
else:
if not isinstance(padding, float):
raise TypeError("padding must be float, not '%s'" % str(padding))
if not 0 < padding < 0.5:
raise ValueError(
"padding must be between 0 and 0.5 (not inclusive), not '%s'" % str(padding)
)
self._vertical_padding = padding | [
"def",
"vertical_padding",
"(",
"self",
",",
"padding",
"=",
"None",
")",
":",
"if",
"padding",
"is",
"None",
":",
"return",
"self",
".",
"_vertical_padding",
"else",
":",
"if",
"not",
"isinstance",
"(",
"padding",
",",
"float",
")",
":",
"raise",
"TypeError",
"(",
"\"padding must be float, not '%s'\"",
"%",
"str",
"(",
"padding",
")",
")",
"if",
"not",
"0",
"<",
"padding",
"<",
"0.5",
":",
"raise",
"ValueError",
"(",
"\"padding must be between 0 and 0.5 (not inclusive), not '%s'\"",
"%",
"str",
"(",
"padding",
")",
")",
"self",
".",
"_vertical_padding",
"=",
"padding"
] | 43.571429 | 19.952381 |
def bytes2iec(size, compact=False):
""" Convert a size value in bytes to its equivalent in IEC notation.
See `<http://physics.nist.gov/cuu/Units/binary.html>`_.
Parameters:
size (int): Number of bytes.
compact (bool): If ``True``, the result contains no spaces.
Return:
String representation of ``size``.
Raises:
ValueError: Negative or out of bounds value for ``size``.
"""
postfn = lambda text: text.replace(' ', '') if compact else text
if size < 0:
raise ValueError("Negative byte size value {}".format(size))
if size < 1024:
return postfn('{:4d} bytes'.format(size))
scaled = size
for iec_unit in IEC_UNITS[1:]:
scaled /= 1024.0
if scaled < 1024:
return postfn('{:6.1f} {}'.format(scaled, iec_unit))
raise ValueError("Byte size value {} out of bounds".format(size)) | [
"def",
"bytes2iec",
"(",
"size",
",",
"compact",
"=",
"False",
")",
":",
"postfn",
"=",
"lambda",
"text",
":",
"text",
".",
"replace",
"(",
"' '",
",",
"''",
")",
"if",
"compact",
"else",
"text",
"if",
"size",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"Negative byte size value {}\"",
".",
"format",
"(",
"size",
")",
")",
"if",
"size",
"<",
"1024",
":",
"return",
"postfn",
"(",
"'{:4d} bytes'",
".",
"format",
"(",
"size",
")",
")",
"scaled",
"=",
"size",
"for",
"iec_unit",
"in",
"IEC_UNITS",
"[",
"1",
":",
"]",
":",
"scaled",
"/=",
"1024.0",
"if",
"scaled",
"<",
"1024",
":",
"return",
"postfn",
"(",
"'{:6.1f} {}'",
".",
"format",
"(",
"scaled",
",",
"iec_unit",
")",
")",
"raise",
"ValueError",
"(",
"\"Byte size value {} out of bounds\"",
".",
"format",
"(",
"size",
")",
")"
] | 32.321429 | 22.428571 |
def example_add_line_to_file():
""" Different methods to append a given line to the file, all work the same. """
my_file = FileAsObj('/tmp/example_file.txt')
my_file.add('foo')
my_file.append('bar')
# Add a new line to my_file that contains the word 'lol' and print True|False if my_file was changed.
print(my_file + 'lol')
# Add line even if it already exists in the file.
my_file.unique = False
my_file.add('foo') | [
"def",
"example_add_line_to_file",
"(",
")",
":",
"my_file",
"=",
"FileAsObj",
"(",
"'/tmp/example_file.txt'",
")",
"my_file",
".",
"add",
"(",
"'foo'",
")",
"my_file",
".",
"append",
"(",
"'bar'",
")",
"# Add a new line to my_file that contains the word 'lol' and print True|False if my_file was changed.",
"print",
"(",
"my_file",
"+",
"'lol'",
")",
"# Add line even if it already exists in the file.",
"my_file",
".",
"unique",
"=",
"False",
"my_file",
".",
"add",
"(",
"'foo'",
")"
] | 44.2 | 17.4 |
def add_actions(target, actions, insert_before=None):
"""Add actions to a QMenu or a QToolBar."""
previous_action = None
target_actions = list(target.actions())
if target_actions:
previous_action = target_actions[-1]
if previous_action.isSeparator():
previous_action = None
for action in actions:
if (action is None) and (previous_action is not None):
if insert_before is None:
target.addSeparator()
else:
target.insertSeparator(insert_before)
elif isinstance(action, QMenu):
if insert_before is None:
target.addMenu(action)
else:
target.insertMenu(insert_before, action)
elif isinstance(action, QAction):
if isinstance(action, SpyderAction):
if isinstance(target, QMenu) or not isinstance(target, QToolBar):
try:
action = action.no_icon_action
except RuntimeError:
continue
if insert_before is None:
# This is needed in order to ignore adding an action whose
# wrapped C/C++ object has been deleted. See issue 5074
try:
target.addAction(action)
except RuntimeError:
continue
else:
target.insertAction(insert_before, action)
previous_action = action | [
"def",
"add_actions",
"(",
"target",
",",
"actions",
",",
"insert_before",
"=",
"None",
")",
":",
"previous_action",
"=",
"None",
"target_actions",
"=",
"list",
"(",
"target",
".",
"actions",
"(",
")",
")",
"if",
"target_actions",
":",
"previous_action",
"=",
"target_actions",
"[",
"-",
"1",
"]",
"if",
"previous_action",
".",
"isSeparator",
"(",
")",
":",
"previous_action",
"=",
"None",
"for",
"action",
"in",
"actions",
":",
"if",
"(",
"action",
"is",
"None",
")",
"and",
"(",
"previous_action",
"is",
"not",
"None",
")",
":",
"if",
"insert_before",
"is",
"None",
":",
"target",
".",
"addSeparator",
"(",
")",
"else",
":",
"target",
".",
"insertSeparator",
"(",
"insert_before",
")",
"elif",
"isinstance",
"(",
"action",
",",
"QMenu",
")",
":",
"if",
"insert_before",
"is",
"None",
":",
"target",
".",
"addMenu",
"(",
"action",
")",
"else",
":",
"target",
".",
"insertMenu",
"(",
"insert_before",
",",
"action",
")",
"elif",
"isinstance",
"(",
"action",
",",
"QAction",
")",
":",
"if",
"isinstance",
"(",
"action",
",",
"SpyderAction",
")",
":",
"if",
"isinstance",
"(",
"target",
",",
"QMenu",
")",
"or",
"not",
"isinstance",
"(",
"target",
",",
"QToolBar",
")",
":",
"try",
":",
"action",
"=",
"action",
".",
"no_icon_action",
"except",
"RuntimeError",
":",
"continue",
"if",
"insert_before",
"is",
"None",
":",
"# This is needed in order to ignore adding an action whose\r",
"# wrapped C/C++ object has been deleted. See issue 5074\r",
"try",
":",
"target",
".",
"addAction",
"(",
"action",
")",
"except",
"RuntimeError",
":",
"continue",
"else",
":",
"target",
".",
"insertAction",
"(",
"insert_before",
",",
"action",
")",
"previous_action",
"=",
"action"
] | 41.694444 | 11.805556 |
def jwt_proccessor():
"""Context processor for jwt."""
def jwt():
"""Context processor function to generate jwt."""
token = current_accounts.jwt_creation_factory()
return Markup(
render_template(
current_app.config['ACCOUNTS_JWT_DOM_TOKEN_TEMPLATE'],
token=token
)
)
def jwt_token():
"""Context processor function to generate jwt."""
return current_accounts.jwt_creation_factory()
return {
'jwt': jwt,
'jwt_token': jwt_token,
} | [
"def",
"jwt_proccessor",
"(",
")",
":",
"def",
"jwt",
"(",
")",
":",
"\"\"\"Context processor function to generate jwt.\"\"\"",
"token",
"=",
"current_accounts",
".",
"jwt_creation_factory",
"(",
")",
"return",
"Markup",
"(",
"render_template",
"(",
"current_app",
".",
"config",
"[",
"'ACCOUNTS_JWT_DOM_TOKEN_TEMPLATE'",
"]",
",",
"token",
"=",
"token",
")",
")",
"def",
"jwt_token",
"(",
")",
":",
"\"\"\"Context processor function to generate jwt.\"\"\"",
"return",
"current_accounts",
".",
"jwt_creation_factory",
"(",
")",
"return",
"{",
"'jwt'",
":",
"jwt",
",",
"'jwt_token'",
":",
"jwt_token",
",",
"}"
] | 27.5 | 19.9 |
def dispatch(self, message):
"""
dispatch
"""
handlers = []
for handler in self.handlers:
if handler["method"] != message.method:
continue
handlers.append(handler)
return handlers | [
"def",
"dispatch",
"(",
"self",
",",
"message",
")",
":",
"handlers",
"=",
"[",
"]",
"for",
"handler",
"in",
"self",
".",
"handlers",
":",
"if",
"handler",
"[",
"\"method\"",
"]",
"!=",
"message",
".",
"method",
":",
"continue",
"handlers",
".",
"append",
"(",
"handler",
")",
"return",
"handlers"
] | 23.454545 | 13.272727 |
def show(cls, msg=None):
"""
Show the log interface on the page.
"""
if msg:
cls.add(msg)
cls.overlay.show()
cls.overlay.el.bind("click", lambda x: cls.hide())
cls.el.style.display = "block"
cls.bind() | [
"def",
"show",
"(",
"cls",
",",
"msg",
"=",
"None",
")",
":",
"if",
"msg",
":",
"cls",
".",
"add",
"(",
"msg",
")",
"cls",
".",
"overlay",
".",
"show",
"(",
")",
"cls",
".",
"overlay",
".",
"el",
".",
"bind",
"(",
"\"click\"",
",",
"lambda",
"x",
":",
"cls",
".",
"hide",
"(",
")",
")",
"cls",
".",
"el",
".",
"style",
".",
"display",
"=",
"\"block\"",
"cls",
".",
"bind",
"(",
")"
] | 22.333333 | 16.333333 |
def add_arg(self, arg):
""" Add an argument
"""
if not isinstance(arg, File):
arg = str(arg)
self._args += [arg] | [
"def",
"add_arg",
"(",
"self",
",",
"arg",
")",
":",
"if",
"not",
"isinstance",
"(",
"arg",
",",
"File",
")",
":",
"arg",
"=",
"str",
"(",
"arg",
")",
"self",
".",
"_args",
"+=",
"[",
"arg",
"]"
] | 21.571429 | 12.428571 |
def get_objects(self, subject=None, predicate=None):
"""Returns a generator of objects that correspond to the
specified subjects and predicates."""
for triple in self.triples:
# Filter out non-matches
if ((subject and triple['subject'] != subject) or
(predicate and triple['predicate'] != predicate)):
continue
yield triple['object'] | [
"def",
"get_objects",
"(",
"self",
",",
"subject",
"=",
"None",
",",
"predicate",
"=",
"None",
")",
":",
"for",
"triple",
"in",
"self",
".",
"triples",
":",
"# Filter out non-matches",
"if",
"(",
"(",
"subject",
"and",
"triple",
"[",
"'subject'",
"]",
"!=",
"subject",
")",
"or",
"(",
"predicate",
"and",
"triple",
"[",
"'predicate'",
"]",
"!=",
"predicate",
")",
")",
":",
"continue",
"yield",
"triple",
"[",
"'object'",
"]"
] | 34.75 | 17.5 |
def calc_asymptotic_covariance(hessian, fisher_info_matrix):
"""
Parameters
----------
hessian : 2D ndarray.
It should have shape `(num_vars, num_vars)`. It is the matrix of second
derivatives of the total loss across the dataset, with respect to each
pair of coefficients being estimated.
fisher_info_matrix : 2D ndarray.
It should have a shape of `(num_vars, num_vars)`. It is the
approximation of the negative of the expected hessian formed by taking
the outer product of (each observation's gradient of the loss function)
with itself, and then summing across all observations.
Returns
-------
huber_white_matrix : 2D ndarray.
Will have shape `(num_vars, num_vars)`. The entries in the returned
matrix are calculated by the following formula:
`hess_inverse * fisher_info_matrix * hess_inverse`.
"""
# Calculate the inverse of the hessian
hess_inv = scipy.linalg.inv(hessian)
return np.dot(hess_inv, np.dot(fisher_info_matrix, hess_inv)) | [
"def",
"calc_asymptotic_covariance",
"(",
"hessian",
",",
"fisher_info_matrix",
")",
":",
"# Calculate the inverse of the hessian",
"hess_inv",
"=",
"scipy",
".",
"linalg",
".",
"inv",
"(",
"hessian",
")",
"return",
"np",
".",
"dot",
"(",
"hess_inv",
",",
"np",
".",
"dot",
"(",
"fisher_info_matrix",
",",
"hess_inv",
")",
")"
] | 41.84 | 21.52 |
async def _readline(self, timeout: NumType = None):
"""
Wraps reader.readuntil() with error handling.
"""
if self._stream_reader is None or self._stream_writer is None:
raise SMTPServerDisconnected("Client not connected")
read_task = asyncio.Task(
self._stream_reader.readuntil(separator=b"\n"), loop=self._loop
)
try:
line = await asyncio.wait_for(
read_task, timeout, loop=self._loop
) # type: bytes
except asyncio.LimitOverrunError:
raise SMTPResponseException(
SMTPStatus.unrecognized_command, "Line too long."
)
except asyncio.TimeoutError as exc:
raise SMTPTimeoutError(str(exc))
except asyncio.IncompleteReadError as exc:
if exc.partial == b"":
# if we got only an EOF, raise SMTPServerDisconnected
raise SMTPServerDisconnected("Unexpected EOF received")
else:
# otherwise, close our connection but try to parse the
# response anyways
self._stream_writer.close()
line = exc.partial
return line | [
"async",
"def",
"_readline",
"(",
"self",
",",
"timeout",
":",
"NumType",
"=",
"None",
")",
":",
"if",
"self",
".",
"_stream_reader",
"is",
"None",
"or",
"self",
".",
"_stream_writer",
"is",
"None",
":",
"raise",
"SMTPServerDisconnected",
"(",
"\"Client not connected\"",
")",
"read_task",
"=",
"asyncio",
".",
"Task",
"(",
"self",
".",
"_stream_reader",
".",
"readuntil",
"(",
"separator",
"=",
"b\"\\n\"",
")",
",",
"loop",
"=",
"self",
".",
"_loop",
")",
"try",
":",
"line",
"=",
"await",
"asyncio",
".",
"wait_for",
"(",
"read_task",
",",
"timeout",
",",
"loop",
"=",
"self",
".",
"_loop",
")",
"# type: bytes",
"except",
"asyncio",
".",
"LimitOverrunError",
":",
"raise",
"SMTPResponseException",
"(",
"SMTPStatus",
".",
"unrecognized_command",
",",
"\"Line too long.\"",
")",
"except",
"asyncio",
".",
"TimeoutError",
"as",
"exc",
":",
"raise",
"SMTPTimeoutError",
"(",
"str",
"(",
"exc",
")",
")",
"except",
"asyncio",
".",
"IncompleteReadError",
"as",
"exc",
":",
"if",
"exc",
".",
"partial",
"==",
"b\"\"",
":",
"# if we got only an EOF, raise SMTPServerDisconnected",
"raise",
"SMTPServerDisconnected",
"(",
"\"Unexpected EOF received\"",
")",
"else",
":",
"# otherwise, close our connection but try to parse the",
"# response anyways",
"self",
".",
"_stream_writer",
".",
"close",
"(",
")",
"line",
"=",
"exc",
".",
"partial",
"return",
"line"
] | 38.612903 | 16.419355 |
def generate_user(self):
'''generate a new user in the database, still session based so we
create a new identifier. This function is called from the users new
entrypoint, and it assumes we want a user generated with a token.
'''
token = str(uuid.uuid4())
return self.generate_subid(token=token, return_user=True) | [
"def",
"generate_user",
"(",
"self",
")",
":",
"token",
"=",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"return",
"self",
".",
"generate_subid",
"(",
"token",
"=",
"token",
",",
"return_user",
"=",
"True",
")"
] | 48.142857 | 25.285714 |
def import_class(class_path):
"""
Returns class from the given path.
For example, in order to get class located at
``mypackage.subpackage.MyClass``:
try:
hgrepo = import_class('mypackage.subpackage.MyClass')
except ImportError:
# hadle error
"""
splitted = class_path.split('.')
mod_path = '.'.join(splitted[:-1])
class_name = splitted[-1]
# import may throw ImportError
class_mod = __import__(mod_path, {}, {}, [class_name])
try:
cls = getattr(class_mod, class_name)
except AttributeError:
raise ImportError("Couldn't import %r" % class_path)
return cls | [
"def",
"import_class",
"(",
"class_path",
")",
":",
"splitted",
"=",
"class_path",
".",
"split",
"(",
"'.'",
")",
"mod_path",
"=",
"'.'",
".",
"join",
"(",
"splitted",
"[",
":",
"-",
"1",
"]",
")",
"class_name",
"=",
"splitted",
"[",
"-",
"1",
"]",
"# import may throw ImportError",
"class_mod",
"=",
"__import__",
"(",
"mod_path",
",",
"{",
"}",
",",
"{",
"}",
",",
"[",
"class_name",
"]",
")",
"try",
":",
"cls",
"=",
"getattr",
"(",
"class_mod",
",",
"class_name",
")",
"except",
"AttributeError",
":",
"raise",
"ImportError",
"(",
"\"Couldn't import %r\"",
"%",
"class_path",
")",
"return",
"cls"
] | 29.227273 | 14.681818 |
def _compute_threshold(x):
"""
ref: https://github.com/XJTUWYD/TWN
Computing the threshold.
"""
x_sum = tf.reduce_sum(tf.abs(x), reduction_indices=None, keepdims=False, name=None)
threshold = tf.div(x_sum, tf.cast(tf.size(x), tf.float32), name=None)
threshold = tf.multiply(0.7, threshold, name=None)
return threshold | [
"def",
"_compute_threshold",
"(",
"x",
")",
":",
"x_sum",
"=",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"abs",
"(",
"x",
")",
",",
"reduction_indices",
"=",
"None",
",",
"keepdims",
"=",
"False",
",",
"name",
"=",
"None",
")",
"threshold",
"=",
"tf",
".",
"div",
"(",
"x_sum",
",",
"tf",
".",
"cast",
"(",
"tf",
".",
"size",
"(",
"x",
")",
",",
"tf",
".",
"float32",
")",
",",
"name",
"=",
"None",
")",
"threshold",
"=",
"tf",
".",
"multiply",
"(",
"0.7",
",",
"threshold",
",",
"name",
"=",
"None",
")",
"return",
"threshold"
] | 37.888889 | 15.666667 |
def main():
"""
Wrapper for OGR
"""
parser = argparse.ArgumentParser(
description='Command line interface to python-ontobio.golr library'
"""
Provides command line interface onto the ontobio.golr python library, a high level
abstraction layer over Monarch and GO solr indices.
""",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-r', '--resource', type=str, required=False,
help='Name of ontology')
parser.add_argument('-d', '--display', type=str, default='o', required=False,
help='What to display: some combination of o, s, r. o=object ancestors, s=subject ancestors. If r present, draws s<->o relations ')
parser.add_argument('-o', '--outfile', type=str, required=False,
help='Path to output file')
parser.add_argument('-t', '--to', type=str, required=False,
help='Output to (tree, dot, ...)')
parser.add_argument('-C', '--category', type=str, required=False,
help='Category')
parser.add_argument('-c', '--container_properties', nargs='*', type=str, required=False,
help='Properties to nest in graph')
parser.add_argument('-s', '--species', type=str, required=False,
help='NCBITaxon ID')
parser.add_argument('-e', '--evidence', type=str, required=False,
help='ECO ID')
parser.add_argument('-G', '--graph', type=str, default='', required=False,
help='Graph type. m=minimal')
parser.add_argument('-S', '--slim', nargs='*', type=str, required=False,
help='Slim IDs')
parser.add_argument('-M', '--mapids', type=str, required=False,
help='Map identifiers to this ID space, e.g. ENSEMBL')
parser.add_argument('-p', '--properties', nargs='*', type=str, required=False,
help='Properties')
parser.add_argument('-v', '--verbosity', default=0, action='count',
help='Increase output verbosity')
parser.add_argument('ids',nargs='*')
# ontology
args = parser.parse_args()
if args.verbosity >= 2:
logging.basicConfig(level=logging.DEBUG)
elif args.verbosity == 1:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.WARNING)
logging.info("Welcome!")
ont = None
g = None
handle = args.resource
if handle is not None:
logging.info("Handle: {}".format(handle))
factory = OntologyFactory()
logging.info("Factory: {}".format(factory))
ont = factory.create(handle)
logging.info("Created ont: {}".format(ont))
g = ont.get_filtered_graph(relations=args.properties)
w = GraphRenderer.create(args.to)
nodes = set()
display = args.display
# query all IDs, gathering associations
assocs = []
for id in args.ids:
this_assocs, facets = search_golr_wrap(id,
args.category,
subject_taxon=args.species,
rows=1000,
slim=args.slim,
evidence=args.evidence,
map_identifiers=args.mapids)
assocs += this_assocs
logging.info("Num assocs: {}".format(len(assocs)))
for a in assocs:
print("{}\t{}\t{}\t{}".format(a['subject'],
a['subject_label'],
a['relation'],
";".join(a['objects'])))
if ont is not None:
# gather all ontology classes used
for a in assocs:
objs = a['objects']
if display.find('r') > -1:
pass
if display.find('o') > -1:
for obj in objs:
nodes.add(obj)
if ont is not None:
nodes.update(ont.ancestors(obj))
if display.find('s') > -1:
sub = a['subject']
nodes.add(sub)
if ont is not None:
nodes.update(ont.ancestors(sub))
# create a subgraph
subg = g.subgraph(nodes)
# optionally add edges between subj and obj nodes
if display.find('r') > -1:
for a in assocs:
rel = a['relation']
sub = a['subject']
objs = a['objects']
if rel is None:
rel = 'rdfs:seeAlso'
for obj in objs:
logging.info("Adding assoc rel {} {} {}".format(sub,obj,rel))
subg.add_edge(obj,sub,pred=rel)
# display tree/graph
show_graph(subg, nodes, objs, args) | [
"def",
"main",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'Command line interface to python-ontobio.golr library'",
"\"\"\"\n\n Provides command line interface onto the ontobio.golr python library, a high level\n abstraction layer over Monarch and GO solr indices.\n \"\"\"",
",",
"formatter_class",
"=",
"argparse",
".",
"RawTextHelpFormatter",
")",
"parser",
".",
"add_argument",
"(",
"'-r'",
",",
"'--resource'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"False",
",",
"help",
"=",
"'Name of ontology'",
")",
"parser",
".",
"add_argument",
"(",
"'-d'",
",",
"'--display'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'o'",
",",
"required",
"=",
"False",
",",
"help",
"=",
"'What to display: some combination of o, s, r. o=object ancestors, s=subject ancestors. If r present, draws s<->o relations '",
")",
"parser",
".",
"add_argument",
"(",
"'-o'",
",",
"'--outfile'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"False",
",",
"help",
"=",
"'Path to output file'",
")",
"parser",
".",
"add_argument",
"(",
"'-t'",
",",
"'--to'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"False",
",",
"help",
"=",
"'Output to (tree, dot, ...)'",
")",
"parser",
".",
"add_argument",
"(",
"'-C'",
",",
"'--category'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"False",
",",
"help",
"=",
"'Category'",
")",
"parser",
".",
"add_argument",
"(",
"'-c'",
",",
"'--container_properties'",
",",
"nargs",
"=",
"'*'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"False",
",",
"help",
"=",
"'Properties to nest in graph'",
")",
"parser",
".",
"add_argument",
"(",
"'-s'",
",",
"'--species'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"False",
",",
"help",
"=",
"'NCBITaxon ID'",
")",
"parser",
".",
"add_argument",
"(",
"'-e'",
",",
"'--evidence'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"False",
",",
"help",
"=",
"'ECO ID'",
")",
"parser",
".",
"add_argument",
"(",
"'-G'",
",",
"'--graph'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"''",
",",
"required",
"=",
"False",
",",
"help",
"=",
"'Graph type. m=minimal'",
")",
"parser",
".",
"add_argument",
"(",
"'-S'",
",",
"'--slim'",
",",
"nargs",
"=",
"'*'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"False",
",",
"help",
"=",
"'Slim IDs'",
")",
"parser",
".",
"add_argument",
"(",
"'-M'",
",",
"'--mapids'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"False",
",",
"help",
"=",
"'Map identifiers to this ID space, e.g. ENSEMBL'",
")",
"parser",
".",
"add_argument",
"(",
"'-p'",
",",
"'--properties'",
",",
"nargs",
"=",
"'*'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"False",
",",
"help",
"=",
"'Properties'",
")",
"parser",
".",
"add_argument",
"(",
"'-v'",
",",
"'--verbosity'",
",",
"default",
"=",
"0",
",",
"action",
"=",
"'count'",
",",
"help",
"=",
"'Increase output verbosity'",
")",
"parser",
".",
"add_argument",
"(",
"'ids'",
",",
"nargs",
"=",
"'*'",
")",
"# ontology",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"if",
"args",
".",
"verbosity",
">=",
"2",
":",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"logging",
".",
"DEBUG",
")",
"elif",
"args",
".",
"verbosity",
"==",
"1",
":",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"logging",
".",
"INFO",
")",
"else",
":",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"logging",
".",
"WARNING",
")",
"logging",
".",
"info",
"(",
"\"Welcome!\"",
")",
"ont",
"=",
"None",
"g",
"=",
"None",
"handle",
"=",
"args",
".",
"resource",
"if",
"handle",
"is",
"not",
"None",
":",
"logging",
".",
"info",
"(",
"\"Handle: {}\"",
".",
"format",
"(",
"handle",
")",
")",
"factory",
"=",
"OntologyFactory",
"(",
")",
"logging",
".",
"info",
"(",
"\"Factory: {}\"",
".",
"format",
"(",
"factory",
")",
")",
"ont",
"=",
"factory",
".",
"create",
"(",
"handle",
")",
"logging",
".",
"info",
"(",
"\"Created ont: {}\"",
".",
"format",
"(",
"ont",
")",
")",
"g",
"=",
"ont",
".",
"get_filtered_graph",
"(",
"relations",
"=",
"args",
".",
"properties",
")",
"w",
"=",
"GraphRenderer",
".",
"create",
"(",
"args",
".",
"to",
")",
"nodes",
"=",
"set",
"(",
")",
"display",
"=",
"args",
".",
"display",
"# query all IDs, gathering associations",
"assocs",
"=",
"[",
"]",
"for",
"id",
"in",
"args",
".",
"ids",
":",
"this_assocs",
",",
"facets",
"=",
"search_golr_wrap",
"(",
"id",
",",
"args",
".",
"category",
",",
"subject_taxon",
"=",
"args",
".",
"species",
",",
"rows",
"=",
"1000",
",",
"slim",
"=",
"args",
".",
"slim",
",",
"evidence",
"=",
"args",
".",
"evidence",
",",
"map_identifiers",
"=",
"args",
".",
"mapids",
")",
"assocs",
"+=",
"this_assocs",
"logging",
".",
"info",
"(",
"\"Num assocs: {}\"",
".",
"format",
"(",
"len",
"(",
"assocs",
")",
")",
")",
"for",
"a",
"in",
"assocs",
":",
"print",
"(",
"\"{}\\t{}\\t{}\\t{}\"",
".",
"format",
"(",
"a",
"[",
"'subject'",
"]",
",",
"a",
"[",
"'subject_label'",
"]",
",",
"a",
"[",
"'relation'",
"]",
",",
"\";\"",
".",
"join",
"(",
"a",
"[",
"'objects'",
"]",
")",
")",
")",
"if",
"ont",
"is",
"not",
"None",
":",
"# gather all ontology classes used",
"for",
"a",
"in",
"assocs",
":",
"objs",
"=",
"a",
"[",
"'objects'",
"]",
"if",
"display",
".",
"find",
"(",
"'r'",
")",
">",
"-",
"1",
":",
"pass",
"if",
"display",
".",
"find",
"(",
"'o'",
")",
">",
"-",
"1",
":",
"for",
"obj",
"in",
"objs",
":",
"nodes",
".",
"add",
"(",
"obj",
")",
"if",
"ont",
"is",
"not",
"None",
":",
"nodes",
".",
"update",
"(",
"ont",
".",
"ancestors",
"(",
"obj",
")",
")",
"if",
"display",
".",
"find",
"(",
"'s'",
")",
">",
"-",
"1",
":",
"sub",
"=",
"a",
"[",
"'subject'",
"]",
"nodes",
".",
"add",
"(",
"sub",
")",
"if",
"ont",
"is",
"not",
"None",
":",
"nodes",
".",
"update",
"(",
"ont",
".",
"ancestors",
"(",
"sub",
")",
")",
"# create a subgraph",
"subg",
"=",
"g",
".",
"subgraph",
"(",
"nodes",
")",
"# optionally add edges between subj and obj nodes",
"if",
"display",
".",
"find",
"(",
"'r'",
")",
">",
"-",
"1",
":",
"for",
"a",
"in",
"assocs",
":",
"rel",
"=",
"a",
"[",
"'relation'",
"]",
"sub",
"=",
"a",
"[",
"'subject'",
"]",
"objs",
"=",
"a",
"[",
"'objects'",
"]",
"if",
"rel",
"is",
"None",
":",
"rel",
"=",
"'rdfs:seeAlso'",
"for",
"obj",
"in",
"objs",
":",
"logging",
".",
"info",
"(",
"\"Adding assoc rel {} {} {}\"",
".",
"format",
"(",
"sub",
",",
"obj",
",",
"rel",
")",
")",
"subg",
".",
"add_edge",
"(",
"obj",
",",
"sub",
",",
"pred",
"=",
"rel",
")",
"# display tree/graph",
"show_graph",
"(",
"subg",
",",
"nodes",
",",
"objs",
",",
"args",
")"
] | 37.430769 | 20.253846 |
def info(args):
" Show information about site. "
site = find_site(args.PATH)
print_header("%s -- install information" % site.get_name())
LOGGER.debug(site.get_info(full=True))
return True | [
"def",
"info",
"(",
"args",
")",
":",
"site",
"=",
"find_site",
"(",
"args",
".",
"PATH",
")",
"print_header",
"(",
"\"%s -- install information\"",
"%",
"site",
".",
"get_name",
"(",
")",
")",
"LOGGER",
".",
"debug",
"(",
"site",
".",
"get_info",
"(",
"full",
"=",
"True",
")",
")",
"return",
"True"
] | 28.857143 | 18.285714 |
def AddKeywordsForName(self, name, keywords):
"""Associates keywords with name.
Records that keywords are associated with name.
Args:
name: A name which should be associated with some keywords.
keywords: A collection of keywords to associate with name.
"""
data_store.DB.IndexAddKeywordsForName(self.urn, name, keywords) | [
"def",
"AddKeywordsForName",
"(",
"self",
",",
"name",
",",
"keywords",
")",
":",
"data_store",
".",
"DB",
".",
"IndexAddKeywordsForName",
"(",
"self",
".",
"urn",
",",
"name",
",",
"keywords",
")"
] | 34.5 | 20.3 |
def default(python=None, runas=None):
'''
Returns or sets the currently defined default python.
python=None
The version to set as the default. Should match one of the versions
listed by :mod:`pyenv.versions <salt.modules.pyenv.versions>`. Leave
blank to return the current default.
CLI Example:
.. code-block:: bash
salt '*' pyenv.default
salt '*' pyenv.default 2.0.0-p0
'''
if python:
_pyenv_exec('global', python, runas=runas)
return True
else:
ret = _pyenv_exec('global', runas=runas)
return '' if ret is False else ret.strip() | [
"def",
"default",
"(",
"python",
"=",
"None",
",",
"runas",
"=",
"None",
")",
":",
"if",
"python",
":",
"_pyenv_exec",
"(",
"'global'",
",",
"python",
",",
"runas",
"=",
"runas",
")",
"return",
"True",
"else",
":",
"ret",
"=",
"_pyenv_exec",
"(",
"'global'",
",",
"runas",
"=",
"runas",
")",
"return",
"''",
"if",
"ret",
"is",
"False",
"else",
"ret",
".",
"strip",
"(",
")"
] | 28.045455 | 22.863636 |
def transition(trname='', field='', check=None, before=None, after=None):
"""Decorator to declare a function as a transition implementation."""
if is_callable(trname):
raise ValueError(
"The @transition decorator should be called as "
"@transition(['transition_name'], **kwargs)")
if check or before or after:
warnings.warn(
"The use of check=, before= and after= in @transition decorators is "
"deprecated in favor of @transition_check, @before_transition and "
"@after_transition decorators.",
DeprecationWarning,
stacklevel=2)
return TransitionWrapper(trname, field=field, check=check, before=before, after=after) | [
"def",
"transition",
"(",
"trname",
"=",
"''",
",",
"field",
"=",
"''",
",",
"check",
"=",
"None",
",",
"before",
"=",
"None",
",",
"after",
"=",
"None",
")",
":",
"if",
"is_callable",
"(",
"trname",
")",
":",
"raise",
"ValueError",
"(",
"\"The @transition decorator should be called as \"",
"\"@transition(['transition_name'], **kwargs)\"",
")",
"if",
"check",
"or",
"before",
"or",
"after",
":",
"warnings",
".",
"warn",
"(",
"\"The use of check=, before= and after= in @transition decorators is \"",
"\"deprecated in favor of @transition_check, @before_transition and \"",
"\"@after_transition decorators.\"",
",",
"DeprecationWarning",
",",
"stacklevel",
"=",
"2",
")",
"return",
"TransitionWrapper",
"(",
"trname",
",",
"field",
"=",
"field",
",",
"check",
"=",
"check",
",",
"before",
"=",
"before",
",",
"after",
"=",
"after",
")"
] | 51.357143 | 20.142857 |
def create_instance(self, name, moduleName, settings):
""" Creates an instance of <moduleName> at <name> with
<settings>. """
if name in self.insts:
raise ValueError("There's already an instance named %s" %
name)
if moduleName not in self.modules:
raise ValueError("There's no module %s" % moduleName)
md = self.modules[moduleName]
deps = dict()
for k, v in six.iteritems(md.deps):
if k not in settings:
settings[k] = self._get_or_create_a(v.type)
if settings[k] is None:
if not v.allow_null:
raise ValueError("`null' not allowed for %s" % k)
elif settings[k] not in self.insts:
raise ValueError("No such instance %s" % settings[k])
else:
settings[k] = self.insts[settings[k]].object
deps[k] = settings[k]
for k, v in six.iteritems(md.vsettings):
if k not in settings:
settings[k] = v.default
if v.default is None:
self.l.warn('%s:%s not set' % (name, k))
self.l.info('create_instance %-15s %s' % (name, md.implementedBy))
cl = get_by_path(md.implementedBy)
il = logging.getLogger(name)
obj = cl(settings, il)
self.register_instance(name, moduleName, obj, settings, deps)
return obj | [
"def",
"create_instance",
"(",
"self",
",",
"name",
",",
"moduleName",
",",
"settings",
")",
":",
"if",
"name",
"in",
"self",
".",
"insts",
":",
"raise",
"ValueError",
"(",
"\"There's already an instance named %s\"",
"%",
"name",
")",
"if",
"moduleName",
"not",
"in",
"self",
".",
"modules",
":",
"raise",
"ValueError",
"(",
"\"There's no module %s\"",
"%",
"moduleName",
")",
"md",
"=",
"self",
".",
"modules",
"[",
"moduleName",
"]",
"deps",
"=",
"dict",
"(",
")",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"md",
".",
"deps",
")",
":",
"if",
"k",
"not",
"in",
"settings",
":",
"settings",
"[",
"k",
"]",
"=",
"self",
".",
"_get_or_create_a",
"(",
"v",
".",
"type",
")",
"if",
"settings",
"[",
"k",
"]",
"is",
"None",
":",
"if",
"not",
"v",
".",
"allow_null",
":",
"raise",
"ValueError",
"(",
"\"`null' not allowed for %s\"",
"%",
"k",
")",
"elif",
"settings",
"[",
"k",
"]",
"not",
"in",
"self",
".",
"insts",
":",
"raise",
"ValueError",
"(",
"\"No such instance %s\"",
"%",
"settings",
"[",
"k",
"]",
")",
"else",
":",
"settings",
"[",
"k",
"]",
"=",
"self",
".",
"insts",
"[",
"settings",
"[",
"k",
"]",
"]",
".",
"object",
"deps",
"[",
"k",
"]",
"=",
"settings",
"[",
"k",
"]",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"md",
".",
"vsettings",
")",
":",
"if",
"k",
"not",
"in",
"settings",
":",
"settings",
"[",
"k",
"]",
"=",
"v",
".",
"default",
"if",
"v",
".",
"default",
"is",
"None",
":",
"self",
".",
"l",
".",
"warn",
"(",
"'%s:%s not set'",
"%",
"(",
"name",
",",
"k",
")",
")",
"self",
".",
"l",
".",
"info",
"(",
"'create_instance %-15s %s'",
"%",
"(",
"name",
",",
"md",
".",
"implementedBy",
")",
")",
"cl",
"=",
"get_by_path",
"(",
"md",
".",
"implementedBy",
")",
"il",
"=",
"logging",
".",
"getLogger",
"(",
"name",
")",
"obj",
"=",
"cl",
"(",
"settings",
",",
"il",
")",
"self",
".",
"register_instance",
"(",
"name",
",",
"moduleName",
",",
"obj",
",",
"settings",
",",
"deps",
")",
"return",
"obj"
] | 44.75 | 12.40625 |
def random_sample(self, k=1):
"""
Return a *k* length list of unique elements chosen from the Set.
Elements are not removed. Similar to :func:`random.sample` function
from standard library.
:param k: Size of the sample, defaults to 1.
:rtype: :class:`list`
"""
# k == 0: no work to do
if k == 0:
results = []
# k == 1: same behavior on all versions of Redis
elif k == 1:
results = [self.redis.srandmember(self.key)]
# k != 1, Redis version >= 2.6: compute in Redis
else:
results = self.redis.srandmember(self.key, k)
return [self._unpickle(x) for x in results] | [
"def",
"random_sample",
"(",
"self",
",",
"k",
"=",
"1",
")",
":",
"# k == 0: no work to do",
"if",
"k",
"==",
"0",
":",
"results",
"=",
"[",
"]",
"# k == 1: same behavior on all versions of Redis",
"elif",
"k",
"==",
"1",
":",
"results",
"=",
"[",
"self",
".",
"redis",
".",
"srandmember",
"(",
"self",
".",
"key",
")",
"]",
"# k != 1, Redis version >= 2.6: compute in Redis",
"else",
":",
"results",
"=",
"self",
".",
"redis",
".",
"srandmember",
"(",
"self",
".",
"key",
",",
"k",
")",
"return",
"[",
"self",
".",
"_unpickle",
"(",
"x",
")",
"for",
"x",
"in",
"results",
"]"
] | 32.904762 | 19.095238 |
def from_dict(template_name, template_values_dict):
"""
Parses the input and returns an instance of this class.
:param string template_name: Name of the template
:param dict template_values_dict: Dictionary containing the value of the template. This dict must have passed
the JSON Schema validation.
:return Template: Instance of this class containing the values provided in this dictionary
"""
parameters = template_values_dict.get("Parameters", {})
definition = template_values_dict.get("Definition", {})
return Template(template_name, parameters, definition) | [
"def",
"from_dict",
"(",
"template_name",
",",
"template_values_dict",
")",
":",
"parameters",
"=",
"template_values_dict",
".",
"get",
"(",
"\"Parameters\"",
",",
"{",
"}",
")",
"definition",
"=",
"template_values_dict",
".",
"get",
"(",
"\"Definition\"",
",",
"{",
"}",
")",
"return",
"Template",
"(",
"template_name",
",",
"parameters",
",",
"definition",
")"
] | 45.357143 | 26.785714 |
def build_stats(counts):
"""Return stats information from counts structure."""
stats = {
'status': 0,
'reportnum': counts['reportnum'],
'title': counts['title'],
'author': counts['auth_group'],
'url': counts['url'],
'doi': counts['doi'],
'misc': counts['misc'],
}
stats_str = "%(status)s-%(reportnum)s-%(title)s-%(author)s-%(url)s-%(doi)s-%(misc)s" % stats
stats["old_stats_str"] = stats_str
stats["date"] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
stats["version"] = version
return stats | [
"def",
"build_stats",
"(",
"counts",
")",
":",
"stats",
"=",
"{",
"'status'",
":",
"0",
",",
"'reportnum'",
":",
"counts",
"[",
"'reportnum'",
"]",
",",
"'title'",
":",
"counts",
"[",
"'title'",
"]",
",",
"'author'",
":",
"counts",
"[",
"'auth_group'",
"]",
",",
"'url'",
":",
"counts",
"[",
"'url'",
"]",
",",
"'doi'",
":",
"counts",
"[",
"'doi'",
"]",
",",
"'misc'",
":",
"counts",
"[",
"'misc'",
"]",
",",
"}",
"stats_str",
"=",
"\"%(status)s-%(reportnum)s-%(title)s-%(author)s-%(url)s-%(doi)s-%(misc)s\"",
"%",
"stats",
"stats",
"[",
"\"old_stats_str\"",
"]",
"=",
"stats_str",
"stats",
"[",
"\"date\"",
"]",
"=",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"\"%Y-%m-%d %H:%M:%S\"",
")",
"stats",
"[",
"\"version\"",
"]",
"=",
"version",
"return",
"stats"
] | 35.3125 | 15.875 |
def tidy_eggs_list(eggs_list):
"""Tidy the given eggs list
"""
tmp = []
for line in eggs_list:
line = line.lstrip().rstrip()
line = line.replace('\'', '')
line = line.replace(',', '')
if line.endswith('site-packages'):
continue
tmp.append(line)
return tmp | [
"def",
"tidy_eggs_list",
"(",
"eggs_list",
")",
":",
"tmp",
"=",
"[",
"]",
"for",
"line",
"in",
"eggs_list",
":",
"line",
"=",
"line",
".",
"lstrip",
"(",
")",
".",
"rstrip",
"(",
")",
"line",
"=",
"line",
".",
"replace",
"(",
"'\\''",
",",
"''",
")",
"line",
"=",
"line",
".",
"replace",
"(",
"','",
",",
"''",
")",
"if",
"line",
".",
"endswith",
"(",
"'site-packages'",
")",
":",
"continue",
"tmp",
".",
"append",
"(",
"line",
")",
"return",
"tmp"
] | 26.333333 | 10.5 |
def load_from_file(cls, file_path):
"""Load the meta data given a file_path or empty meta data"""
data = None
if os.path.exists(file_path):
metadata_file = open(file_path)
data = json.loads(metadata_file.read())
return cls(initial=data) | [
"def",
"load_from_file",
"(",
"cls",
",",
"file_path",
")",
":",
"data",
"=",
"None",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"file_path",
")",
":",
"metadata_file",
"=",
"open",
"(",
"file_path",
")",
"data",
"=",
"json",
".",
"loads",
"(",
"metadata_file",
".",
"read",
"(",
")",
")",
"return",
"cls",
"(",
"initial",
"=",
"data",
")"
] | 40.857143 | 7.285714 |
def _merge_a_into_b(self, a, b):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
from easydict import EasyDict as edict
if type(a) is not edict:
return
for k, v in a.items():
# a must specify keys that are in b
if k not in b:
raise KeyError('{} is not a valid config key'.format(k))
# the types must match, too
old_type = type(b[k])
if old_type is not type(v):
if isinstance(b[k], np.ndarray):
v = np.array(v, dtype=b[k].dtype)
else:
raise ValueError(('Type mismatch ({} vs. {}) '
'for config key: {}').format(type(b[k]),
type(v), k))
# recursively merge dicts
if type(v) is edict:
try:
self._merge_a_into_b(a[k], b[k])
except:
print('Error under config key: {}'.format(k))
raise
else:
b[k] = v
return b | [
"def",
"_merge_a_into_b",
"(",
"self",
",",
"a",
",",
"b",
")",
":",
"from",
"easydict",
"import",
"EasyDict",
"as",
"edict",
"if",
"type",
"(",
"a",
")",
"is",
"not",
"edict",
":",
"return",
"for",
"k",
",",
"v",
"in",
"a",
".",
"items",
"(",
")",
":",
"# a must specify keys that are in b",
"if",
"k",
"not",
"in",
"b",
":",
"raise",
"KeyError",
"(",
"'{} is not a valid config key'",
".",
"format",
"(",
"k",
")",
")",
"# the types must match, too",
"old_type",
"=",
"type",
"(",
"b",
"[",
"k",
"]",
")",
"if",
"old_type",
"is",
"not",
"type",
"(",
"v",
")",
":",
"if",
"isinstance",
"(",
"b",
"[",
"k",
"]",
",",
"np",
".",
"ndarray",
")",
":",
"v",
"=",
"np",
".",
"array",
"(",
"v",
",",
"dtype",
"=",
"b",
"[",
"k",
"]",
".",
"dtype",
")",
"else",
":",
"raise",
"ValueError",
"(",
"(",
"'Type mismatch ({} vs. {}) '",
"'for config key: {}'",
")",
".",
"format",
"(",
"type",
"(",
"b",
"[",
"k",
"]",
")",
",",
"type",
"(",
"v",
")",
",",
"k",
")",
")",
"# recursively merge dicts",
"if",
"type",
"(",
"v",
")",
"is",
"edict",
":",
"try",
":",
"self",
".",
"_merge_a_into_b",
"(",
"a",
"[",
"k",
"]",
",",
"b",
"[",
"k",
"]",
")",
"except",
":",
"print",
"(",
"'Error under config key: {}'",
".",
"format",
"(",
"k",
")",
")",
"raise",
"else",
":",
"b",
"[",
"k",
"]",
"=",
"v",
"return",
"b"
] | 36.878788 | 17 |
def _printAvailableCheckpoints(experimentDir):
"""List available checkpoints for the specified experiment."""
checkpointParentDir = getCheckpointParentDir(experimentDir)
if not os.path.exists(checkpointParentDir):
print "No available checkpoints."
return
checkpointDirs = [x for x in os.listdir(checkpointParentDir)
if _isCheckpointDir(os.path.join(checkpointParentDir, x))]
if not checkpointDirs:
print "No available checkpoints."
return
print "Available checkpoints:"
checkpointList = [_checkpointLabelFromCheckpointDir(x)
for x in checkpointDirs]
for checkpoint in sorted(checkpointList):
print "\t", checkpoint
print
print "To start from a checkpoint:"
print " python run_opf_experiment.py experiment --load <CHECKPOINT>"
print "For example, to start from the checkpoint \"MyCheckpoint\":"
print " python run_opf_experiment.py experiment --load MyCheckpoint" | [
"def",
"_printAvailableCheckpoints",
"(",
"experimentDir",
")",
":",
"checkpointParentDir",
"=",
"getCheckpointParentDir",
"(",
"experimentDir",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"checkpointParentDir",
")",
":",
"print",
"\"No available checkpoints.\"",
"return",
"checkpointDirs",
"=",
"[",
"x",
"for",
"x",
"in",
"os",
".",
"listdir",
"(",
"checkpointParentDir",
")",
"if",
"_isCheckpointDir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"checkpointParentDir",
",",
"x",
")",
")",
"]",
"if",
"not",
"checkpointDirs",
":",
"print",
"\"No available checkpoints.\"",
"return",
"print",
"\"Available checkpoints:\"",
"checkpointList",
"=",
"[",
"_checkpointLabelFromCheckpointDir",
"(",
"x",
")",
"for",
"x",
"in",
"checkpointDirs",
"]",
"for",
"checkpoint",
"in",
"sorted",
"(",
"checkpointList",
")",
":",
"print",
"\"\\t\"",
",",
"checkpoint",
"print",
"print",
"\"To start from a checkpoint:\"",
"print",
"\" python run_opf_experiment.py experiment --load <CHECKPOINT>\"",
"print",
"\"For example, to start from the checkpoint \\\"MyCheckpoint\\\":\"",
"print",
"\" python run_opf_experiment.py experiment --load MyCheckpoint\""
] | 35.769231 | 21 |
def moves_from_last_n_games(self, n, moves, shuffle,
column_family, column):
"""Randomly choose a given number of moves from the last n games.
Args:
n: number of games at the end of this GameQueue to source.
moves: number of moves to be sampled from `n` games.
shuffle: if True, shuffle the selected moves.
column_family: name of the column family containing move examples.
column: name of the column containing move examples.
Returns:
a dataset containing the selected moves.
"""
self.wait_for_fresh_games()
latest_game = self.latest_game_number
utils.dbg('Latest game in %s: %s' % (self.btspec.table, latest_game))
if latest_game == 0:
raise ValueError('Cannot find a latest game in the table')
start = int(max(0, latest_game - n))
ds = self.moves_from_games(start, latest_game, moves, shuffle,
column_family, column)
return ds | [
"def",
"moves_from_last_n_games",
"(",
"self",
",",
"n",
",",
"moves",
",",
"shuffle",
",",
"column_family",
",",
"column",
")",
":",
"self",
".",
"wait_for_fresh_games",
"(",
")",
"latest_game",
"=",
"self",
".",
"latest_game_number",
"utils",
".",
"dbg",
"(",
"'Latest game in %s: %s'",
"%",
"(",
"self",
".",
"btspec",
".",
"table",
",",
"latest_game",
")",
")",
"if",
"latest_game",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"'Cannot find a latest game in the table'",
")",
"start",
"=",
"int",
"(",
"max",
"(",
"0",
",",
"latest_game",
"-",
"n",
")",
")",
"ds",
"=",
"self",
".",
"moves_from_games",
"(",
"start",
",",
"latest_game",
",",
"moves",
",",
"shuffle",
",",
"column_family",
",",
"column",
")",
"return",
"ds"
] | 43.375 | 20.791667 |
def logsumexp(x):
"""Numerically stable log(sum(exp(x))), also defined in scipy.misc"""
max_x = np.max(x)
return max_x + np.log(np.sum(np.exp(x - max_x))) | [
"def",
"logsumexp",
"(",
"x",
")",
":",
"max_x",
"=",
"np",
".",
"max",
"(",
"x",
")",
"return",
"max_x",
"+",
"np",
".",
"log",
"(",
"np",
".",
"sum",
"(",
"np",
".",
"exp",
"(",
"x",
"-",
"max_x",
")",
")",
")"
] | 40.75 | 13.5 |
def get_paged(self, endpoint, params=None, page_size=50, merge=False):
"""
GET with paging (for large payloads).
:param page_size: how many objects per page
:param endpoint: DHIS2 API endpoint
:param params: HTTP parameters (dict), defaults to None
:param merge: If true, return a list containing all pages instead of one page. Defaults to False.
:return: generator OR a normal DHIS2 response dict, e.g. {"organisationUnits": [...]}
"""
try:
if not isinstance(page_size, (string_types, int)) or int(page_size) < 1:
raise ValueError
except ValueError:
raise ClientException("page_size must be > 1")
params = {} if not params else params
if 'paging' in params:
raise ClientException("Can't set paging manually in `params` when using `get_paged`")
params['pageSize'] = page_size
params['page'] = 1
params['totalPages'] = True
collection = endpoint.split('/')[0] # only use e.g. events when submitting events/query as endpoint
def page_generator():
"""Yield pages"""
page = self.get(endpoint=endpoint, file_type='json', params=params).json()
page_count = page['pager']['pageCount']
yield page
while page['pager']['page'] < page_count:
params['page'] += 1
page = self.get(endpoint=endpoint, file_type='json', params=params).json()
yield page
if not merge:
return page_generator()
else:
data = []
for p in page_generator():
data.append(p[collection])
return {collection: list(chain.from_iterable(data))} | [
"def",
"get_paged",
"(",
"self",
",",
"endpoint",
",",
"params",
"=",
"None",
",",
"page_size",
"=",
"50",
",",
"merge",
"=",
"False",
")",
":",
"try",
":",
"if",
"not",
"isinstance",
"(",
"page_size",
",",
"(",
"string_types",
",",
"int",
")",
")",
"or",
"int",
"(",
"page_size",
")",
"<",
"1",
":",
"raise",
"ValueError",
"except",
"ValueError",
":",
"raise",
"ClientException",
"(",
"\"page_size must be > 1\"",
")",
"params",
"=",
"{",
"}",
"if",
"not",
"params",
"else",
"params",
"if",
"'paging'",
"in",
"params",
":",
"raise",
"ClientException",
"(",
"\"Can't set paging manually in `params` when using `get_paged`\"",
")",
"params",
"[",
"'pageSize'",
"]",
"=",
"page_size",
"params",
"[",
"'page'",
"]",
"=",
"1",
"params",
"[",
"'totalPages'",
"]",
"=",
"True",
"collection",
"=",
"endpoint",
".",
"split",
"(",
"'/'",
")",
"[",
"0",
"]",
"# only use e.g. events when submitting events/query as endpoint",
"def",
"page_generator",
"(",
")",
":",
"\"\"\"Yield pages\"\"\"",
"page",
"=",
"self",
".",
"get",
"(",
"endpoint",
"=",
"endpoint",
",",
"file_type",
"=",
"'json'",
",",
"params",
"=",
"params",
")",
".",
"json",
"(",
")",
"page_count",
"=",
"page",
"[",
"'pager'",
"]",
"[",
"'pageCount'",
"]",
"yield",
"page",
"while",
"page",
"[",
"'pager'",
"]",
"[",
"'page'",
"]",
"<",
"page_count",
":",
"params",
"[",
"'page'",
"]",
"+=",
"1",
"page",
"=",
"self",
".",
"get",
"(",
"endpoint",
"=",
"endpoint",
",",
"file_type",
"=",
"'json'",
",",
"params",
"=",
"params",
")",
".",
"json",
"(",
")",
"yield",
"page",
"if",
"not",
"merge",
":",
"return",
"page_generator",
"(",
")",
"else",
":",
"data",
"=",
"[",
"]",
"for",
"p",
"in",
"page_generator",
"(",
")",
":",
"data",
".",
"append",
"(",
"p",
"[",
"collection",
"]",
")",
"return",
"{",
"collection",
":",
"list",
"(",
"chain",
".",
"from_iterable",
"(",
"data",
")",
")",
"}"
] | 41.380952 | 22.119048 |
def _reset_changes(self):
"""Stores current values for comparison later"""
self._original = {}
if self.last_updated is not None:
self._original['last_updated'] = self.last_updated | [
"def",
"_reset_changes",
"(",
"self",
")",
":",
"self",
".",
"_original",
"=",
"{",
"}",
"if",
"self",
".",
"last_updated",
"is",
"not",
"None",
":",
"self",
".",
"_original",
"[",
"'last_updated'",
"]",
"=",
"self",
".",
"last_updated"
] | 42.2 | 10.2 |
def add_vtarg_and_adv(seg, gamma, lam):
"""
Compute target value using TD(lambda) estimator, and advantage with GAE(lambda)
"""
new = np.append(seg["new"], 0) # last element is only used for last vtarg, but we already zeroed it if last new = 1
vpred = np.append(seg["vpred"], seg["nextvpred"])
T = len(seg["rew"])
seg["adv"] = gaelam = np.empty(T, 'float32')
rew = seg["rew"]
lastgaelam = 0
for t in reversed(range(T)):
nonterminal = 1-new[t+1]
delta = rew[t] + gamma * vpred[t+1] * nonterminal - vpred[t]
gaelam[t] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelam
seg["tdlamret"] = seg["adv"] + seg["vpred"] | [
"def",
"add_vtarg_and_adv",
"(",
"seg",
",",
"gamma",
",",
"lam",
")",
":",
"new",
"=",
"np",
".",
"append",
"(",
"seg",
"[",
"\"new\"",
"]",
",",
"0",
")",
"# last element is only used for last vtarg, but we already zeroed it if last new = 1",
"vpred",
"=",
"np",
".",
"append",
"(",
"seg",
"[",
"\"vpred\"",
"]",
",",
"seg",
"[",
"\"nextvpred\"",
"]",
")",
"T",
"=",
"len",
"(",
"seg",
"[",
"\"rew\"",
"]",
")",
"seg",
"[",
"\"adv\"",
"]",
"=",
"gaelam",
"=",
"np",
".",
"empty",
"(",
"T",
",",
"'float32'",
")",
"rew",
"=",
"seg",
"[",
"\"rew\"",
"]",
"lastgaelam",
"=",
"0",
"for",
"t",
"in",
"reversed",
"(",
"range",
"(",
"T",
")",
")",
":",
"nonterminal",
"=",
"1",
"-",
"new",
"[",
"t",
"+",
"1",
"]",
"delta",
"=",
"rew",
"[",
"t",
"]",
"+",
"gamma",
"*",
"vpred",
"[",
"t",
"+",
"1",
"]",
"*",
"nonterminal",
"-",
"vpred",
"[",
"t",
"]",
"gaelam",
"[",
"t",
"]",
"=",
"lastgaelam",
"=",
"delta",
"+",
"gamma",
"*",
"lam",
"*",
"nonterminal",
"*",
"lastgaelam",
"seg",
"[",
"\"tdlamret\"",
"]",
"=",
"seg",
"[",
"\"adv\"",
"]",
"+",
"seg",
"[",
"\"vpred\"",
"]"
] | 45 | 19.533333 |
def create(self, name, **request_parameters):
"""Create a team.
The authenticated user is automatically added as a member of the team.
Args:
name(basestring): A user-friendly name for the team.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
Team: A Team object with the details of the created team.
Raises:
TypeError: If the parameter types are incorrect.
ApiError: If the Webex Teams cloud returns an error.
"""
check_type(name, basestring, may_be_none=False)
post_data = dict_from_items_with_values(
request_parameters,
name=name,
)
# API request
json_data = self._session.post(API_ENDPOINT, json=post_data)
# Return a team object created from the response JSON data
return self._object_factory(OBJECT_TYPE, json_data) | [
"def",
"create",
"(",
"self",
",",
"name",
",",
"*",
"*",
"request_parameters",
")",
":",
"check_type",
"(",
"name",
",",
"basestring",
",",
"may_be_none",
"=",
"False",
")",
"post_data",
"=",
"dict_from_items_with_values",
"(",
"request_parameters",
",",
"name",
"=",
"name",
",",
")",
"# API request",
"json_data",
"=",
"self",
".",
"_session",
".",
"post",
"(",
"API_ENDPOINT",
",",
"json",
"=",
"post_data",
")",
"# Return a team object created from the response JSON data",
"return",
"self",
".",
"_object_factory",
"(",
"OBJECT_TYPE",
",",
"json_data",
")"
] | 32.8 | 25.8 |
def scannerData(self, reqId, rank, contractDetails, distance, benchmark, projection, legsStr):
"""scannerData(EWrapper self, int reqId, int rank, ContractDetails contractDetails, IBString const & distance, IBString const & benchmark, IBString const & projection, IBString const & legsStr)"""
return _swigibpy.EWrapper_scannerData(self, reqId, rank, contractDetails, distance, benchmark, projection, legsStr) | [
"def",
"scannerData",
"(",
"self",
",",
"reqId",
",",
"rank",
",",
"contractDetails",
",",
"distance",
",",
"benchmark",
",",
"projection",
",",
"legsStr",
")",
":",
"return",
"_swigibpy",
".",
"EWrapper_scannerData",
"(",
"self",
",",
"reqId",
",",
"rank",
",",
"contractDetails",
",",
"distance",
",",
"benchmark",
",",
"projection",
",",
"legsStr",
")"
] | 140.333333 | 45.666667 |
def items_get(self, session, **kwargs):
'''taobao.fenxiao.distributor.items.get 查询商品下载记录
供应商查询分销商商品下载记录。'''
request = TOPRequest('taobao.fenxiao.distributor.items.get')
for k, v in kwargs.iteritems():
if k not in ('distributor_id', 'start_modified', 'end_modified', 'page_no', 'page_size', 'product_id') and v==None: continue
request[k] = v
self.create(self.execute(request, session), fields=['total_results','records'], models={'records':FenxiaoItemRecord})
return self.records | [
"def",
"items_get",
"(",
"self",
",",
"session",
",",
"*",
"*",
"kwargs",
")",
":",
"request",
"=",
"TOPRequest",
"(",
"'taobao.fenxiao.distributor.items.get'",
")",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"iteritems",
"(",
")",
":",
"if",
"k",
"not",
"in",
"(",
"'distributor_id'",
",",
"'start_modified'",
",",
"'end_modified'",
",",
"'page_no'",
",",
"'page_size'",
",",
"'product_id'",
")",
"and",
"v",
"==",
"None",
":",
"continue",
"request",
"[",
"k",
"]",
"=",
"v",
"self",
".",
"create",
"(",
"self",
".",
"execute",
"(",
"request",
",",
"session",
")",
",",
"fields",
"=",
"[",
"'total_results'",
",",
"'records'",
"]",
",",
"models",
"=",
"{",
"'records'",
":",
"FenxiaoItemRecord",
"}",
")",
"return",
"self",
".",
"records"
] | 55 | 30 |
def create_unique_id(src, ase):
# type: (blobxfer.models.upload.LocalPath,
# blobxfer.models.azure.StorageEntity) -> str
"""Create a unique id given a LocalPath and StorageEntity
:param blobxfer.models.upload.LocalPath src: local path
:param blobxfer.models.azure.StorageEntity ase: azure storage entity
:rtype: str
:return: unique id for pair
"""
return ';'.join(
(str(src.absolute_path), ase._client.primary_endpoint, ase.path)
) | [
"def",
"create_unique_id",
"(",
"src",
",",
"ase",
")",
":",
"# type: (blobxfer.models.upload.LocalPath,",
"# blobxfer.models.azure.StorageEntity) -> str",
"return",
"';'",
".",
"join",
"(",
"(",
"str",
"(",
"src",
".",
"absolute_path",
")",
",",
"ase",
".",
"_client",
".",
"primary_endpoint",
",",
"ase",
".",
"path",
")",
")"
] | 43.25 | 17.25 |
def unpickle(filepath):
"""Decompress and unpickle."""
arr = []
with open(filepath, 'rb') as f:
carr = f.read(blosc.MAX_BUFFERSIZE)
while len(carr) > 0:
arr.append(blosc.decompress(carr))
carr = f.read(blosc.MAX_BUFFERSIZE)
return pkl.loads(b"".join(arr)) | [
"def",
"unpickle",
"(",
"filepath",
")",
":",
"arr",
"=",
"[",
"]",
"with",
"open",
"(",
"filepath",
",",
"'rb'",
")",
"as",
"f",
":",
"carr",
"=",
"f",
".",
"read",
"(",
"blosc",
".",
"MAX_BUFFERSIZE",
")",
"while",
"len",
"(",
"carr",
")",
">",
"0",
":",
"arr",
".",
"append",
"(",
"blosc",
".",
"decompress",
"(",
"carr",
")",
")",
"carr",
"=",
"f",
".",
"read",
"(",
"blosc",
".",
"MAX_BUFFERSIZE",
")",
"return",
"pkl",
".",
"loads",
"(",
"b\"\"",
".",
"join",
"(",
"arr",
")",
")"
] | 33.666667 | 9.222222 |
def growth_curve(self, method='best', **method_options):
"""
Return QMED estimate using best available methodology depending on what catchment attributes are available.
====================== ====================== ==================================================================
`method` `method_options` notes
====================== ====================== ==================================================================
`enhanced_single_site` `distr='glo'` Preferred method for gauged catchments (i.e. with
`as_rural=False` `Catchment.amax_record`).
`single_site` `distr='glo'` Alternative method for gauged catchments. Uses AMAX data from
subject station only.
`pooling_group` `distr='glo'` Only possible method for ungauged catchments.
`as_rural=False`
====================== ====================== ==================================================================
:param method: methodology to use to estimate the growth curve. Default: automatically choose best method.
:type method: str
:param method_options: any optional parameters for the growth curve method function
:type method_options: kwargs
:return: Inverse cumulative distribution function, callable class with one parameter `aep` (annual exceedance
probability)
:type: :class:`.GrowthCurve`
"""
if method == 'best':
if self.catchment.amax_records:
# Gauged catchment, use enhanced single site
self.results_log['method'] = 'enhanced_single_site'
return self._growth_curve_enhanced_single_site()
else:
# Ungauged catchment, standard pooling group
self.results_log['method'] = 'pooling_group'
return self._growth_curve_pooling_group()
else:
try:
self.results_log['method'] = 'method'
return getattr(self, '_growth_curve_' + method)(**method_options)
except AttributeError:
raise AttributeError("Method `{}` to estimate the growth curve does not exist.".format(method)) | [
"def",
"growth_curve",
"(",
"self",
",",
"method",
"=",
"'best'",
",",
"*",
"*",
"method_options",
")",
":",
"if",
"method",
"==",
"'best'",
":",
"if",
"self",
".",
"catchment",
".",
"amax_records",
":",
"# Gauged catchment, use enhanced single site",
"self",
".",
"results_log",
"[",
"'method'",
"]",
"=",
"'enhanced_single_site'",
"return",
"self",
".",
"_growth_curve_enhanced_single_site",
"(",
")",
"else",
":",
"# Ungauged catchment, standard pooling group",
"self",
".",
"results_log",
"[",
"'method'",
"]",
"=",
"'pooling_group'",
"return",
"self",
".",
"_growth_curve_pooling_group",
"(",
")",
"else",
":",
"try",
":",
"self",
".",
"results_log",
"[",
"'method'",
"]",
"=",
"'method'",
"return",
"getattr",
"(",
"self",
",",
"'_growth_curve_'",
"+",
"method",
")",
"(",
"*",
"*",
"method_options",
")",
"except",
"AttributeError",
":",
"raise",
"AttributeError",
"(",
"\"Method `{}` to estimate the growth curve does not exist.\"",
".",
"format",
"(",
"method",
")",
")"
] | 61.631579 | 34 |
def neighbors((x, y)):
"""Return the (possibly out of bounds) neighbors of a point."""
yield x + 1, y
yield x - 1, y
yield x, y + 1
yield x, y - 1
yield x + 1, y + 1
yield x + 1, y - 1
yield x - 1, y + 1
yield x - 1, y - 1 | [
"def",
"neighbors",
"(",
"(",
"x",
",",
"y",
")",
")",
":",
"yield",
"x",
"+",
"1",
",",
"y",
"yield",
"x",
"-",
"1",
",",
"y",
"yield",
"x",
",",
"y",
"+",
"1",
"yield",
"x",
",",
"y",
"-",
"1",
"yield",
"x",
"+",
"1",
",",
"y",
"+",
"1",
"yield",
"x",
"+",
"1",
",",
"y",
"-",
"1",
"yield",
"x",
"-",
"1",
",",
"y",
"+",
"1",
"yield",
"x",
"-",
"1",
",",
"y",
"-",
"1"
] | 24.9 | 17.8 |
def _get_url(self, url):
"""Build provider's url. Join with base_url part if needed."""
if self.base_url and not url.startswith(('http://', 'https://')):
return urljoin(self.base_url, url)
return url | [
"def",
"_get_url",
"(",
"self",
",",
"url",
")",
":",
"if",
"self",
".",
"base_url",
"and",
"not",
"url",
".",
"startswith",
"(",
"(",
"'http://'",
",",
"'https://'",
")",
")",
":",
"return",
"urljoin",
"(",
"self",
".",
"base_url",
",",
"url",
")",
"return",
"url"
] | 46.2 | 15.4 |
def do_updatereplication(self, line):
"""updatereplication <identifier> [identifier ...] Update the Replication Policy
on one or more existing Science Data Objects."""
pids = self._split_args(line, 1, -1)
self._command_processor.update_replication_policy(pids)
self._print_info_if_verbose(
"Added replication policy update operation for identifiers {} to write queue".format(
", ".join(pids)
)
) | [
"def",
"do_updatereplication",
"(",
"self",
",",
"line",
")",
":",
"pids",
"=",
"self",
".",
"_split_args",
"(",
"line",
",",
"1",
",",
"-",
"1",
")",
"self",
".",
"_command_processor",
".",
"update_replication_policy",
"(",
"pids",
")",
"self",
".",
"_print_info_if_verbose",
"(",
"\"Added replication policy update operation for identifiers {} to write queue\"",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"pids",
")",
")",
")"
] | 47.4 | 15.8 |
def register(cls, subclass):
"""Register a virtual subclass of an ABC."""
if not isinstance(cls, type):
raise TypeError("Can only register classes")
if issubclass(subclass, cls):
return # Already a subclass
# Subtle: test for cycles *after* testing for "already a subclass";
# this means we allow X.register(X) and interpret it as a no-op.
if issubclass(cls, subclass):
# This would create a cycle, which is bad for the algorithm below
raise RuntimeError("Refusing to create an inheritance cycle")
cls._abc_registry.add(subclass)
ABCMeta._abc_invalidation_counter += 1 | [
"def",
"register",
"(",
"cls",
",",
"subclass",
")",
":",
"if",
"not",
"isinstance",
"(",
"cls",
",",
"type",
")",
":",
"raise",
"TypeError",
"(",
"\"Can only register classes\"",
")",
"if",
"issubclass",
"(",
"subclass",
",",
"cls",
")",
":",
"return",
"# Already a subclass",
"# Subtle: test for cycles *after* testing for \"already a subclass\";",
"# this means we allow X.register(X) and interpret it as a no-op.",
"if",
"issubclass",
"(",
"cls",
",",
"subclass",
")",
":",
"# This would create a cycle, which is bad for the algorithm below",
"raise",
"RuntimeError",
"(",
"\"Refusing to create an inheritance cycle\"",
")",
"cls",
".",
"_abc_registry",
".",
"add",
"(",
"subclass",
")",
"ABCMeta",
".",
"_abc_invalidation_counter",
"+=",
"1"
] | 51.461538 | 13.923077 |
def get_team_id(team_name):
""" Returns the team ID associated with the team name that is passed in.
Parameters
----------
team_name : str
The team name whose ID we want. NOTE: Only pass in the team name
(e.g. "Lakers"), not the city, or city and team name, or the team
abbreviation.
Returns
-------
team_id : int
The team ID associated with the team name.
"""
df = get_all_team_ids()
df = df[df.TEAM_NAME == team_name]
if len(df) == 0:
er = "Invalid team name or there is no team with that name."
raise ValueError(er)
team_id = df.TEAM_ID.iloc[0]
return team_id | [
"def",
"get_team_id",
"(",
"team_name",
")",
":",
"df",
"=",
"get_all_team_ids",
"(",
")",
"df",
"=",
"df",
"[",
"df",
".",
"TEAM_NAME",
"==",
"team_name",
"]",
"if",
"len",
"(",
"df",
")",
"==",
"0",
":",
"er",
"=",
"\"Invalid team name or there is no team with that name.\"",
"raise",
"ValueError",
"(",
"er",
")",
"team_id",
"=",
"df",
".",
"TEAM_ID",
".",
"iloc",
"[",
"0",
"]",
"return",
"team_id"
] | 28 | 21.173913 |
def run_processes(self,
procdetails: List[ProcessDetails],
subproc_run_timeout_sec: float = 1,
stop_event_timeout_ms: int = 1000,
kill_timeout_sec: float = 5) -> None:
"""
Run multiple child processes.
Args:
procdetails: list of :class:`ProcessDetails` objects (q.v.)
subproc_run_timeout_sec: time (in seconds) to wait for each process
when polling child processes to see how they're getting on
(default ``1``)
stop_event_timeout_ms: time to wait (in ms) while checking the
Windows stop event for this service (default ``1000``)
kill_timeout_sec: how long (in seconds) will we wait for the
subprocesses to end peacefully, before we try to kill them?
.. todo::
cardinal_pythonlib.winservice.WindowsService: NOT YET IMPLEMENTED:
Windows service autorestart
"""
# https://stackoverflow.com/questions/16333054
def cleanup():
self.debug("atexit function called: cleaning up")
for pmgr_ in self.process_managers:
pmgr_.stop()
atexit.register(cleanup)
# Set up process info
self.process_managers = [] # type: List[ProcessManager]
n = len(procdetails)
for i, details in enumerate(procdetails):
pmgr = ProcessManager(details, i + 1, n,
kill_timeout_sec=kill_timeout_sec,
debugging=self.debugging)
self.process_managers.append(pmgr)
# Start processes
for pmgr in self.process_managers:
pmgr.start()
self.info("All started")
# Run processes
something_running = True
stop_requested = False
subproc_failed = False
while something_running and not stop_requested and not subproc_failed:
if (win32event.WaitForSingleObject(
self.h_stop_event,
stop_event_timeout_ms) == win32event.WAIT_OBJECT_0):
stop_requested = True
self.info("Stop requested; stopping")
else:
something_running = False
for pmgr in self.process_managers:
if subproc_failed:
break
try:
retcode = pmgr.wait(timeout_s=subproc_run_timeout_sec)
if retcode != 0:
subproc_failed = True
except subprocess.TimeoutExpired:
something_running = True
# Kill any outstanding processes
#
# (a) Slow way
# for pmgr in self.process_managers:
# pmgr.stop()
#
# (b) Faster (slightly more parallel) way
# for pmgr in self.process_managers:
# pmgr.terminate()
# for pmgr in self.process_managers:
# pmgr.stop_having_terminated()
#
# ... No, it's bad if we leave things orphaned.
# Let's go for slow, clean code.
for pmgr in self.process_managers:
pmgr.stop()
self.info("All stopped") | [
"def",
"run_processes",
"(",
"self",
",",
"procdetails",
":",
"List",
"[",
"ProcessDetails",
"]",
",",
"subproc_run_timeout_sec",
":",
"float",
"=",
"1",
",",
"stop_event_timeout_ms",
":",
"int",
"=",
"1000",
",",
"kill_timeout_sec",
":",
"float",
"=",
"5",
")",
"->",
"None",
":",
"# https://stackoverflow.com/questions/16333054",
"def",
"cleanup",
"(",
")",
":",
"self",
".",
"debug",
"(",
"\"atexit function called: cleaning up\"",
")",
"for",
"pmgr_",
"in",
"self",
".",
"process_managers",
":",
"pmgr_",
".",
"stop",
"(",
")",
"atexit",
".",
"register",
"(",
"cleanup",
")",
"# Set up process info",
"self",
".",
"process_managers",
"=",
"[",
"]",
"# type: List[ProcessManager]",
"n",
"=",
"len",
"(",
"procdetails",
")",
"for",
"i",
",",
"details",
"in",
"enumerate",
"(",
"procdetails",
")",
":",
"pmgr",
"=",
"ProcessManager",
"(",
"details",
",",
"i",
"+",
"1",
",",
"n",
",",
"kill_timeout_sec",
"=",
"kill_timeout_sec",
",",
"debugging",
"=",
"self",
".",
"debugging",
")",
"self",
".",
"process_managers",
".",
"append",
"(",
"pmgr",
")",
"# Start processes",
"for",
"pmgr",
"in",
"self",
".",
"process_managers",
":",
"pmgr",
".",
"start",
"(",
")",
"self",
".",
"info",
"(",
"\"All started\"",
")",
"# Run processes",
"something_running",
"=",
"True",
"stop_requested",
"=",
"False",
"subproc_failed",
"=",
"False",
"while",
"something_running",
"and",
"not",
"stop_requested",
"and",
"not",
"subproc_failed",
":",
"if",
"(",
"win32event",
".",
"WaitForSingleObject",
"(",
"self",
".",
"h_stop_event",
",",
"stop_event_timeout_ms",
")",
"==",
"win32event",
".",
"WAIT_OBJECT_0",
")",
":",
"stop_requested",
"=",
"True",
"self",
".",
"info",
"(",
"\"Stop requested; stopping\"",
")",
"else",
":",
"something_running",
"=",
"False",
"for",
"pmgr",
"in",
"self",
".",
"process_managers",
":",
"if",
"subproc_failed",
":",
"break",
"try",
":",
"retcode",
"=",
"pmgr",
".",
"wait",
"(",
"timeout_s",
"=",
"subproc_run_timeout_sec",
")",
"if",
"retcode",
"!=",
"0",
":",
"subproc_failed",
"=",
"True",
"except",
"subprocess",
".",
"TimeoutExpired",
":",
"something_running",
"=",
"True",
"# Kill any outstanding processes",
"#",
"# (a) Slow way",
"# for pmgr in self.process_managers:",
"# pmgr.stop()",
"#",
"# (b) Faster (slightly more parallel) way",
"# for pmgr in self.process_managers:",
"# pmgr.terminate()",
"# for pmgr in self.process_managers:",
"# pmgr.stop_having_terminated()",
"#",
"# ... No, it's bad if we leave things orphaned.",
"# Let's go for slow, clean code.",
"for",
"pmgr",
"in",
"self",
".",
"process_managers",
":",
"pmgr",
".",
"stop",
"(",
")",
"self",
".",
"info",
"(",
"\"All stopped\"",
")"
] | 37.581395 | 17.860465 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.