text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def entry_set(self, predicate=None):
"""
Returns a list clone of the mappings contained in this map.
**Warning:
The list is NOT backed by the map, so changes to the map are NOT reflected in the list, and vice-versa.**
:param predicate: (Predicate), predicate for the map to filter entries (optional).
:return: (Sequence), the list of key-value tuples in the map.
.. seealso:: :class:`~hazelcast.serialization.predicate.Predicate` for more info about predicates.
"""
if predicate:
predicate_data = self._to_data(predicate)
return self._encode_invoke(map_entries_with_predicate_codec, predicate=predicate_data)
else:
return self._encode_invoke(map_entry_set_codec) | [
"def",
"entry_set",
"(",
"self",
",",
"predicate",
"=",
"None",
")",
":",
"if",
"predicate",
":",
"predicate_data",
"=",
"self",
".",
"_to_data",
"(",
"predicate",
")",
"return",
"self",
".",
"_encode_invoke",
"(",
"map_entries_with_predicate_codec",
",",
"predicate",
"=",
"predicate_data",
")",
"else",
":",
"return",
"self",
".",
"_encode_invoke",
"(",
"map_entry_set_codec",
")"
] | 45 | 31 |
def _set_fcoe_fip_advertisement(self, v, load=False):
"""
Setter method for fcoe_fip_advertisement, mapped from YANG variable /fcoe/fcoe_fabric_map/fcoe_fip_advertisement (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_fcoe_fip_advertisement is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_fcoe_fip_advertisement() directly.
YANG Description: This provides the grouping of all FIP configuration
elements.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=fcoe_fip_advertisement.fcoe_fip_advertisement, is_container='container', presence=False, yang_name="fcoe-fip-advertisement", rest_name="advertisement", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure the FIP Advertisement interval', u'alt-name': u'advertisement', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-fcoe', defining_module='brocade-fcoe', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """fcoe_fip_advertisement must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=fcoe_fip_advertisement.fcoe_fip_advertisement, is_container='container', presence=False, yang_name="fcoe-fip-advertisement", rest_name="advertisement", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure the FIP Advertisement interval', u'alt-name': u'advertisement', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-fcoe', defining_module='brocade-fcoe', yang_type='container', is_config=True)""",
})
self.__fcoe_fip_advertisement = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_fcoe_fip_advertisement",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"fcoe_fip_advertisement",
".",
"fcoe_fip_advertisement",
",",
"is_container",
"=",
"'container'",
",",
"presence",
"=",
"False",
",",
"yang_name",
"=",
"\"fcoe-fip-advertisement\"",
",",
"rest_name",
"=",
"\"advertisement\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'Configure the FIP Advertisement interval'",
",",
"u'alt-name'",
":",
"u'advertisement'",
",",
"u'cli-incomplete-no'",
":",
"None",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-fcoe'",
",",
"defining_module",
"=",
"'brocade-fcoe'",
",",
"yang_type",
"=",
"'container'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"fcoe_fip_advertisement must be of a type compatible with container\"\"\"",
",",
"'defined-type'",
":",
"\"container\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=fcoe_fip_advertisement.fcoe_fip_advertisement, is_container='container', presence=False, yang_name=\"fcoe-fip-advertisement\", rest_name=\"advertisement\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure the FIP Advertisement interval', u'alt-name': u'advertisement', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-fcoe', defining_module='brocade-fcoe', yang_type='container', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__fcoe_fip_advertisement",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] | 79.56 | 40.12 |
def cast_problem(problem):
"""
Casts problem object with known interface as OptProblem.
Parameters
----------
problem : Object
"""
# Optproblem
if isinstance(problem,OptProblem):
return problem
# Other
else:
# Type Base
if (not hasattr(problem,'G') or
(problem.G.shape[0] == problem.G.shape[1] and
problem.G.shape[0] == problem.G.nnz and
np.all(problem.G.row == problem.G.col) and
np.all(problem.G.data == 1.))):
return create_problem_from_type_base(problem)
# Type A
else:
return create_problem_from_type_A(problem) | [
"def",
"cast_problem",
"(",
"problem",
")",
":",
"# Optproblem",
"if",
"isinstance",
"(",
"problem",
",",
"OptProblem",
")",
":",
"return",
"problem",
"# Other",
"else",
":",
"# Type Base",
"if",
"(",
"not",
"hasattr",
"(",
"problem",
",",
"'G'",
")",
"or",
"(",
"problem",
".",
"G",
".",
"shape",
"[",
"0",
"]",
"==",
"problem",
".",
"G",
".",
"shape",
"[",
"1",
"]",
"and",
"problem",
".",
"G",
".",
"shape",
"[",
"0",
"]",
"==",
"problem",
".",
"G",
".",
"nnz",
"and",
"np",
".",
"all",
"(",
"problem",
".",
"G",
".",
"row",
"==",
"problem",
".",
"G",
".",
"col",
")",
"and",
"np",
".",
"all",
"(",
"problem",
".",
"G",
".",
"data",
"==",
"1.",
")",
")",
")",
":",
"return",
"create_problem_from_type_base",
"(",
"problem",
")",
"# Type A",
"else",
":",
"return",
"create_problem_from_type_A",
"(",
"problem",
")"
] | 24.740741 | 20.222222 |
def on_view_change_start(self):
"""
Notifies node about the fact that view changed to let it
prepare for election
"""
self.view_changer.start_view_change_ts = self.utc_epoch()
for replica in self.replicas.values():
replica.on_view_change_start()
logger.info("{} resetting monitor stats at view change start".format(self))
self.monitor.reset()
self.processStashedMsgsForView(self.viewNo)
self.backup_instance_faulty_processor.restore_replicas()
self.drop_primaries()
pop_keys(self.msgsForFutureViews, lambda x: x <= self.viewNo)
self.logNodeInfo()
# Keep on doing catchup until >(n-f) nodes LedgerStatus same on have a
# prepared certificate the first PRE-PREPARE of the new view
logger.info('{}{} changed to view {}, will start catchup now'.
format(VIEW_CHANGE_PREFIX, self, self.viewNo))
self._cancel(self._check_view_change_completed)
self._schedule(action=self._check_view_change_completed,
seconds=self._view_change_timeout)
# Set to 0 even when set to 0 in `on_view_change_complete` since
# catchup might be started due to several reasons.
self.catchup_rounds_without_txns = 0
self.last_sent_pp_store_helper.erase_last_sent_pp_seq_no() | [
"def",
"on_view_change_start",
"(",
"self",
")",
":",
"self",
".",
"view_changer",
".",
"start_view_change_ts",
"=",
"self",
".",
"utc_epoch",
"(",
")",
"for",
"replica",
"in",
"self",
".",
"replicas",
".",
"values",
"(",
")",
":",
"replica",
".",
"on_view_change_start",
"(",
")",
"logger",
".",
"info",
"(",
"\"{} resetting monitor stats at view change start\"",
".",
"format",
"(",
"self",
")",
")",
"self",
".",
"monitor",
".",
"reset",
"(",
")",
"self",
".",
"processStashedMsgsForView",
"(",
"self",
".",
"viewNo",
")",
"self",
".",
"backup_instance_faulty_processor",
".",
"restore_replicas",
"(",
")",
"self",
".",
"drop_primaries",
"(",
")",
"pop_keys",
"(",
"self",
".",
"msgsForFutureViews",
",",
"lambda",
"x",
":",
"x",
"<=",
"self",
".",
"viewNo",
")",
"self",
".",
"logNodeInfo",
"(",
")",
"# Keep on doing catchup until >(n-f) nodes LedgerStatus same on have a",
"# prepared certificate the first PRE-PREPARE of the new view",
"logger",
".",
"info",
"(",
"'{}{} changed to view {}, will start catchup now'",
".",
"format",
"(",
"VIEW_CHANGE_PREFIX",
",",
"self",
",",
"self",
".",
"viewNo",
")",
")",
"self",
".",
"_cancel",
"(",
"self",
".",
"_check_view_change_completed",
")",
"self",
".",
"_schedule",
"(",
"action",
"=",
"self",
".",
"_check_view_change_completed",
",",
"seconds",
"=",
"self",
".",
"_view_change_timeout",
")",
"# Set to 0 even when set to 0 in `on_view_change_complete` since",
"# catchup might be started due to several reasons.",
"self",
".",
"catchup_rounds_without_txns",
"=",
"0",
"self",
".",
"last_sent_pp_store_helper",
".",
"erase_last_sent_pp_seq_no",
"(",
")"
] | 43.419355 | 21.935484 |
def kudos(self):
"""
:class:`list` of :class:`stravalib.model.ActivityKudos` objects for this activity.
"""
if self._kudos is None:
self.assert_bind_client()
self._kudos = self.bind_client.get_activity_kudos(self.id)
return self._kudos | [
"def",
"kudos",
"(",
"self",
")",
":",
"if",
"self",
".",
"_kudos",
"is",
"None",
":",
"self",
".",
"assert_bind_client",
"(",
")",
"self",
".",
"_kudos",
"=",
"self",
".",
"bind_client",
".",
"get_activity_kudos",
"(",
"self",
".",
"id",
")",
"return",
"self",
".",
"_kudos"
] | 36.5 | 16.25 |
def prepare(host, default_protocol='telnet', **kwargs):
"""
Creates an instance of the protocol by either parsing the given
URL-formatted hostname using :class:`Exscript.util.url`, or according to
the options of the given :class:`Exscript.Host`.
:type host: str or Host
:param host: A URL-formatted hostname or a :class:`Exscript.Host` instance.
:type default_protocol: str
:param default_protocol: Protocol that is used if the URL specifies none.
:type kwargs: dict
:param kwargs: Passed to the protocol constructor.
:rtype: Protocol
:return: An instance of the protocol.
"""
host = to_host(host, default_protocol=default_protocol)
protocol = host.get_protocol()
conn = create_protocol(protocol, **kwargs)
if protocol == 'pseudo':
filename = host.get_address()
conn.device.add_commands_from_file(filename)
return conn | [
"def",
"prepare",
"(",
"host",
",",
"default_protocol",
"=",
"'telnet'",
",",
"*",
"*",
"kwargs",
")",
":",
"host",
"=",
"to_host",
"(",
"host",
",",
"default_protocol",
"=",
"default_protocol",
")",
"protocol",
"=",
"host",
".",
"get_protocol",
"(",
")",
"conn",
"=",
"create_protocol",
"(",
"protocol",
",",
"*",
"*",
"kwargs",
")",
"if",
"protocol",
"==",
"'pseudo'",
":",
"filename",
"=",
"host",
".",
"get_address",
"(",
")",
"conn",
".",
"device",
".",
"add_commands_from_file",
"(",
"filename",
")",
"return",
"conn"
] | 40.454545 | 16.363636 |
def Read(self):
"""See base class."""
buf = ctypes.create_string_buffer(self.desc.internal_max_in_report_len)
num_read = wintypes.DWORD()
ret = kernel32.ReadFile(
self.dev, buf, len(buf), ctypes.byref(num_read), None)
if num_read.value != self.desc.internal_max_in_report_len:
raise errors.HidError("Failed to read full length report from device.")
if not ret:
raise ctypes.WinError()
# Convert the string buffer to a list of numbers. Throw away the first
# byte, which is the report id (which we don't care about).
return list(bytearray(buf[1:])) | [
"def",
"Read",
"(",
"self",
")",
":",
"buf",
"=",
"ctypes",
".",
"create_string_buffer",
"(",
"self",
".",
"desc",
".",
"internal_max_in_report_len",
")",
"num_read",
"=",
"wintypes",
".",
"DWORD",
"(",
")",
"ret",
"=",
"kernel32",
".",
"ReadFile",
"(",
"self",
".",
"dev",
",",
"buf",
",",
"len",
"(",
"buf",
")",
",",
"ctypes",
".",
"byref",
"(",
"num_read",
")",
",",
"None",
")",
"if",
"num_read",
".",
"value",
"!=",
"self",
".",
"desc",
".",
"internal_max_in_report_len",
":",
"raise",
"errors",
".",
"HidError",
"(",
"\"Failed to read full length report from device.\"",
")",
"if",
"not",
"ret",
":",
"raise",
"ctypes",
".",
"WinError",
"(",
")",
"# Convert the string buffer to a list of numbers. Throw away the first",
"# byte, which is the report id (which we don't care about).",
"return",
"list",
"(",
"bytearray",
"(",
"buf",
"[",
"1",
":",
"]",
")",
")"
] | 37 | 23.8125 |
def make_citation_dict(td):
"""
Update a citation dictionary by editing the Author field
:param td: A BixTex format citation dict or a term
:return:
"""
from datetime import datetime
if isinstance(td, dict):
d = td
name = d['name_link']
else:
d = td.as_dict()
d['_term'] = td
try:
d['name_link'] = td.name
except AttributeError:
d['name_link'] = td['name_link'].value
if 'author' in d and isinstance(d['author'], str):
authors = []
for e in d['author'].split(';'):
author_d = HumanName(e).as_dict(include_empty=False)
if 'suffix' in author_d:
author_d['lineage'] = author_d['suffix']
del author_d['suffix']
authors.append(author_d)
d['author'] = authors
if not 'type' in d:
if '_term' in d:
t = d['_term']
if t.term_is('Root.Reference') or t.term_is('Root.Resource'):
d['type'] = 'dataset'
elif t.term_is('Root.Citation'):
d['type'] = 'article'
else:
d['type'] = 'article'
if d['type'] == 'dataset':
if not 'editor' in d:
d['editor'] = [HumanName('Missing Editor').as_dict(include_empty=False)]
if not 'accessdate' in d:
d['accessdate'] = datetime.now().strftime('%Y-%m-%d')
if not 'author' in d:
d['author'] = [HumanName('Missing Author').as_dict(include_empty=False)]
if not 'title' in d:
d['title'] = d.get('description', '<Missing Title>')
if not 'journal' in d:
d['journal'] = '<Missing Journal>'
if not 'year' in d:
d['year'] = '<Missing Year>'
if '_term' in d:
del d['_term']
return d | [
"def",
"make_citation_dict",
"(",
"td",
")",
":",
"from",
"datetime",
"import",
"datetime",
"if",
"isinstance",
"(",
"td",
",",
"dict",
")",
":",
"d",
"=",
"td",
"name",
"=",
"d",
"[",
"'name_link'",
"]",
"else",
":",
"d",
"=",
"td",
".",
"as_dict",
"(",
")",
"d",
"[",
"'_term'",
"]",
"=",
"td",
"try",
":",
"d",
"[",
"'name_link'",
"]",
"=",
"td",
".",
"name",
"except",
"AttributeError",
":",
"d",
"[",
"'name_link'",
"]",
"=",
"td",
"[",
"'name_link'",
"]",
".",
"value",
"if",
"'author'",
"in",
"d",
"and",
"isinstance",
"(",
"d",
"[",
"'author'",
"]",
",",
"str",
")",
":",
"authors",
"=",
"[",
"]",
"for",
"e",
"in",
"d",
"[",
"'author'",
"]",
".",
"split",
"(",
"';'",
")",
":",
"author_d",
"=",
"HumanName",
"(",
"e",
")",
".",
"as_dict",
"(",
"include_empty",
"=",
"False",
")",
"if",
"'suffix'",
"in",
"author_d",
":",
"author_d",
"[",
"'lineage'",
"]",
"=",
"author_d",
"[",
"'suffix'",
"]",
"del",
"author_d",
"[",
"'suffix'",
"]",
"authors",
".",
"append",
"(",
"author_d",
")",
"d",
"[",
"'author'",
"]",
"=",
"authors",
"if",
"not",
"'type'",
"in",
"d",
":",
"if",
"'_term'",
"in",
"d",
":",
"t",
"=",
"d",
"[",
"'_term'",
"]",
"if",
"t",
".",
"term_is",
"(",
"'Root.Reference'",
")",
"or",
"t",
".",
"term_is",
"(",
"'Root.Resource'",
")",
":",
"d",
"[",
"'type'",
"]",
"=",
"'dataset'",
"elif",
"t",
".",
"term_is",
"(",
"'Root.Citation'",
")",
":",
"d",
"[",
"'type'",
"]",
"=",
"'article'",
"else",
":",
"d",
"[",
"'type'",
"]",
"=",
"'article'",
"if",
"d",
"[",
"'type'",
"]",
"==",
"'dataset'",
":",
"if",
"not",
"'editor'",
"in",
"d",
":",
"d",
"[",
"'editor'",
"]",
"=",
"[",
"HumanName",
"(",
"'Missing Editor'",
")",
".",
"as_dict",
"(",
"include_empty",
"=",
"False",
")",
"]",
"if",
"not",
"'accessdate'",
"in",
"d",
":",
"d",
"[",
"'accessdate'",
"]",
"=",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"'%Y-%m-%d'",
")",
"if",
"not",
"'author'",
"in",
"d",
":",
"d",
"[",
"'author'",
"]",
"=",
"[",
"HumanName",
"(",
"'Missing Author'",
")",
".",
"as_dict",
"(",
"include_empty",
"=",
"False",
")",
"]",
"if",
"not",
"'title'",
"in",
"d",
":",
"d",
"[",
"'title'",
"]",
"=",
"d",
".",
"get",
"(",
"'description'",
",",
"'<Missing Title>'",
")",
"if",
"not",
"'journal'",
"in",
"d",
":",
"d",
"[",
"'journal'",
"]",
"=",
"'<Missing Journal>'",
"if",
"not",
"'year'",
"in",
"d",
":",
"d",
"[",
"'year'",
"]",
"=",
"'<Missing Year>'",
"if",
"'_term'",
"in",
"d",
":",
"del",
"d",
"[",
"'_term'",
"]",
"return",
"d"
] | 25.463768 | 21.289855 |
def create_project(self,
project_name,
dataset_name,
hostname,
is_public,
s3backend=0,
kvserver='localhost',
kvengine='MySQL',
mdengine='MySQL',
description=''):
"""
Creates a project with the given parameters.
Arguments:
project_name (str): Project name
dataset_name (str): Dataset name project is based on
hostname (str): Hostname
s3backend (str): S3 region to save the data in
is_public (int): 1 is public. 0 is not public.
kvserver (str): Server to store key value pairs in
kvengine (str): Database to store key value pairs in
mdengine (str): ???
description (str): Description for your project
Returns:
bool: True if project created, false if not created.
"""
url = self.url() + "/resource/dataset/{}".format(
dataset_name) + "/project/{}/".format(project_name)
json = {
"project_name": project_name,
"host": hostname,
"s3backend": s3backend,
"public": is_public,
"kvserver": kvserver,
"kvengine": kvengine,
"mdengine": mdengine,
"project_description": description
}
req = self.remote_utils.post_url(url, json=json)
if req.status_code is not 201:
raise RemoteDataUploadError('Could not upload {}'.format(req))
if req.content == "" or req.content == b'':
return True
else:
return False | [
"def",
"create_project",
"(",
"self",
",",
"project_name",
",",
"dataset_name",
",",
"hostname",
",",
"is_public",
",",
"s3backend",
"=",
"0",
",",
"kvserver",
"=",
"'localhost'",
",",
"kvengine",
"=",
"'MySQL'",
",",
"mdengine",
"=",
"'MySQL'",
",",
"description",
"=",
"''",
")",
":",
"url",
"=",
"self",
".",
"url",
"(",
")",
"+",
"\"/resource/dataset/{}\"",
".",
"format",
"(",
"dataset_name",
")",
"+",
"\"/project/{}/\"",
".",
"format",
"(",
"project_name",
")",
"json",
"=",
"{",
"\"project_name\"",
":",
"project_name",
",",
"\"host\"",
":",
"hostname",
",",
"\"s3backend\"",
":",
"s3backend",
",",
"\"public\"",
":",
"is_public",
",",
"\"kvserver\"",
":",
"kvserver",
",",
"\"kvengine\"",
":",
"kvengine",
",",
"\"mdengine\"",
":",
"mdengine",
",",
"\"project_description\"",
":",
"description",
"}",
"req",
"=",
"self",
".",
"remote_utils",
".",
"post_url",
"(",
"url",
",",
"json",
"=",
"json",
")",
"if",
"req",
".",
"status_code",
"is",
"not",
"201",
":",
"raise",
"RemoteDataUploadError",
"(",
"'Could not upload {}'",
".",
"format",
"(",
"req",
")",
")",
"if",
"req",
".",
"content",
"==",
"\"\"",
"or",
"req",
".",
"content",
"==",
"b''",
":",
"return",
"True",
"else",
":",
"return",
"False"
] | 34.26 | 15.66 |
def get_cache_key(path):
"""
Create a cache key by concatenating the prefix with a hash of the path.
"""
# Python 2/3 support for path hashing
try:
path_hash = hashlib.md5(path).hexdigest()
except TypeError:
path_hash = hashlib.md5(path.encode('utf-8')).hexdigest()
return settings.cache_key_prefix + path_hash | [
"def",
"get_cache_key",
"(",
"path",
")",
":",
"# Python 2/3 support for path hashing",
"try",
":",
"path_hash",
"=",
"hashlib",
".",
"md5",
"(",
"path",
")",
".",
"hexdigest",
"(",
")",
"except",
"TypeError",
":",
"path_hash",
"=",
"hashlib",
".",
"md5",
"(",
"path",
".",
"encode",
"(",
"'utf-8'",
")",
")",
".",
"hexdigest",
"(",
")",
"return",
"settings",
".",
"cache_key_prefix",
"+",
"path_hash"
] | 34.5 | 14.5 |
def traceroute_batch(input_list, results={}, method="udp", cmd_arguments=None,
delay_time=0.1, max_threads=100):
"""
This is a parallel version of the traceroute primitive.
:param input_list: the input is a list of domain names
:param method: the packet type used for traceroute, UDP by default
:param cmd_arguments: the list of arguments that need to be passed
to traceroute.
:param delay_time: delay before starting each thread
:param max_threads: maximum number of concurrent threads
:return:
"""
threads = []
thread_error = False
thread_wait_timeout = 200
ind = 1
total_item_count = len(input_list)
for domain in input_list:
wait_time = 0
while threading.active_count() > max_threads:
time.sleep(1)
wait_time += 1
if wait_time > thread_wait_timeout:
thread_error = True
break
if thread_error:
results["error"] = "Threads took too long to finish."
break
# add just a little bit of delay before starting the thread
# to avoid overwhelming the connection.
time.sleep(delay_time)
log_prefix = "%d/%d: " % (ind, total_item_count)
thread = threading.Thread(target=traceroute,
args=(domain, method, cmd_arguments,
results, log_prefix))
ind += 1
thread.setDaemon(1)
thread_open_success = False
retries = 0
while not thread_open_success and retries < MAX_THREAD_START_RETRY:
try:
thread.start()
threads.append(thread)
thread_open_success = True
except:
retries += 1
time.sleep(THREAD_START_DELAY)
logging.error("%sThread start failed for %s, retrying... (%d/%d)" % (log_prefix, domain, retries, MAX_THREAD_START_RETRY))
if retries == MAX_THREAD_START_RETRY:
logging.error("%sCan't start a new thread for %s after %d retries." % (log_prefix, domain, retries))
for thread in threads:
thread.join(thread_wait_timeout)
return results | [
"def",
"traceroute_batch",
"(",
"input_list",
",",
"results",
"=",
"{",
"}",
",",
"method",
"=",
"\"udp\"",
",",
"cmd_arguments",
"=",
"None",
",",
"delay_time",
"=",
"0.1",
",",
"max_threads",
"=",
"100",
")",
":",
"threads",
"=",
"[",
"]",
"thread_error",
"=",
"False",
"thread_wait_timeout",
"=",
"200",
"ind",
"=",
"1",
"total_item_count",
"=",
"len",
"(",
"input_list",
")",
"for",
"domain",
"in",
"input_list",
":",
"wait_time",
"=",
"0",
"while",
"threading",
".",
"active_count",
"(",
")",
">",
"max_threads",
":",
"time",
".",
"sleep",
"(",
"1",
")",
"wait_time",
"+=",
"1",
"if",
"wait_time",
">",
"thread_wait_timeout",
":",
"thread_error",
"=",
"True",
"break",
"if",
"thread_error",
":",
"results",
"[",
"\"error\"",
"]",
"=",
"\"Threads took too long to finish.\"",
"break",
"# add just a little bit of delay before starting the thread",
"# to avoid overwhelming the connection.",
"time",
".",
"sleep",
"(",
"delay_time",
")",
"log_prefix",
"=",
"\"%d/%d: \"",
"%",
"(",
"ind",
",",
"total_item_count",
")",
"thread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"traceroute",
",",
"args",
"=",
"(",
"domain",
",",
"method",
",",
"cmd_arguments",
",",
"results",
",",
"log_prefix",
")",
")",
"ind",
"+=",
"1",
"thread",
".",
"setDaemon",
"(",
"1",
")",
"thread_open_success",
"=",
"False",
"retries",
"=",
"0",
"while",
"not",
"thread_open_success",
"and",
"retries",
"<",
"MAX_THREAD_START_RETRY",
":",
"try",
":",
"thread",
".",
"start",
"(",
")",
"threads",
".",
"append",
"(",
"thread",
")",
"thread_open_success",
"=",
"True",
"except",
":",
"retries",
"+=",
"1",
"time",
".",
"sleep",
"(",
"THREAD_START_DELAY",
")",
"logging",
".",
"error",
"(",
"\"%sThread start failed for %s, retrying... (%d/%d)\"",
"%",
"(",
"log_prefix",
",",
"domain",
",",
"retries",
",",
"MAX_THREAD_START_RETRY",
")",
")",
"if",
"retries",
"==",
"MAX_THREAD_START_RETRY",
":",
"logging",
".",
"error",
"(",
"\"%sCan't start a new thread for %s after %d retries.\"",
"%",
"(",
"log_prefix",
",",
"domain",
",",
"retries",
")",
")",
"for",
"thread",
"in",
"threads",
":",
"thread",
".",
"join",
"(",
"thread_wait_timeout",
")",
"return",
"results"
] | 36.716667 | 20.883333 |
def text_dict_write(fpath, dict_):
"""
Very naive, but readable way of storing a dictionary on disk
FIXME: This broke on RoseMary's big dataset. Not sure why. It gave bad
syntax. And the SyntaxError did not seem to be excepted.
"""
#dict_ = text_dict_read(fpath)
#dict_[key] = val
dict_text2 = util_str.repr4(dict_, strvals=False)
if VERBOSE:
print('[cache] ' + str(dict_text2))
util_io.write_to(fpath, dict_text2) | [
"def",
"text_dict_write",
"(",
"fpath",
",",
"dict_",
")",
":",
"#dict_ = text_dict_read(fpath)",
"#dict_[key] = val",
"dict_text2",
"=",
"util_str",
".",
"repr4",
"(",
"dict_",
",",
"strvals",
"=",
"False",
")",
"if",
"VERBOSE",
":",
"print",
"(",
"'[cache] '",
"+",
"str",
"(",
"dict_text2",
")",
")",
"util_io",
".",
"write_to",
"(",
"fpath",
",",
"dict_text2",
")"
] | 37.583333 | 12.583333 |
def write_edges(
edges: Mapping[str, Any],
filename: str,
jsonlines: bool = False,
gzipflag: bool = False,
yaml: bool = False,
):
"""Write edges to file
Args:
edges (Mapping[str, Any]): in edges JSON Schema format
filename (str): filename to write
jsonlines (bool): output in JSONLines format?
gzipflag (bool): create gzipped file?
yaml (bool): create yaml file?
"""
pass | [
"def",
"write_edges",
"(",
"edges",
":",
"Mapping",
"[",
"str",
",",
"Any",
"]",
",",
"filename",
":",
"str",
",",
"jsonlines",
":",
"bool",
"=",
"False",
",",
"gzipflag",
":",
"bool",
"=",
"False",
",",
"yaml",
":",
"bool",
"=",
"False",
",",
")",
":",
"pass"
] | 25.411765 | 16.647059 |
def process_raw_data(cls, raw_data):
"""Create a new model using raw API response."""
properties = raw_data["properties"]
qos_settings = properties.get("qosSettings", {})
properties["qosSettings"] = VirtualSwtichQosSettings.from_raw_data(
raw_data=qos_settings)
return super(VirtualSwitchManager, cls).process_raw_data(raw_data) | [
"def",
"process_raw_data",
"(",
"cls",
",",
"raw_data",
")",
":",
"properties",
"=",
"raw_data",
"[",
"\"properties\"",
"]",
"qos_settings",
"=",
"properties",
".",
"get",
"(",
"\"qosSettings\"",
",",
"{",
"}",
")",
"properties",
"[",
"\"qosSettings\"",
"]",
"=",
"VirtualSwtichQosSettings",
".",
"from_raw_data",
"(",
"raw_data",
"=",
"qos_settings",
")",
"return",
"super",
"(",
"VirtualSwitchManager",
",",
"cls",
")",
".",
"process_raw_data",
"(",
"raw_data",
")"
] | 53.428571 | 14 |
def _recv_callback(self, msg):
"""
Method is called when there is a message coming from a Mongrel2 server.
This message should be a valid Request String.
"""
m2req = MongrelRequest.parse(msg[0])
MongrelConnection(m2req, self._sending_stream, self.request_callback,
no_keep_alive=self.no_keep_alive, xheaders=self.xheaders) | [
"def",
"_recv_callback",
"(",
"self",
",",
"msg",
")",
":",
"m2req",
"=",
"MongrelRequest",
".",
"parse",
"(",
"msg",
"[",
"0",
"]",
")",
"MongrelConnection",
"(",
"m2req",
",",
"self",
".",
"_sending_stream",
",",
"self",
".",
"request_callback",
",",
"no_keep_alive",
"=",
"self",
".",
"no_keep_alive",
",",
"xheaders",
"=",
"self",
".",
"xheaders",
")"
] | 46.875 | 16.625 |
def _start_ubridge_capture(self, adapter_number, output_file):
"""
Start a packet capture in uBridge.
:param adapter_number: adapter number
:param output_file: PCAP destination file for the capture
"""
adapter = "bridge{}".format(adapter_number)
if not self.ubridge:
raise DockerError("Cannot start the packet capture: uBridge is not running")
yield from self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name=adapter, output_file=output_file)) | [
"def",
"_start_ubridge_capture",
"(",
"self",
",",
"adapter_number",
",",
"output_file",
")",
":",
"adapter",
"=",
"\"bridge{}\"",
".",
"format",
"(",
"adapter_number",
")",
"if",
"not",
"self",
".",
"ubridge",
":",
"raise",
"DockerError",
"(",
"\"Cannot start the packet capture: uBridge is not running\"",
")",
"yield",
"from",
"self",
".",
"_ubridge_send",
"(",
"'bridge start_capture {name} \"{output_file}\"'",
".",
"format",
"(",
"name",
"=",
"adapter",
",",
"output_file",
"=",
"output_file",
")",
")"
] | 44.416667 | 24.583333 |
def limits(self, value, square=False):
"""TODO: doc + server side implementation"""
if isinstance(value, six.string_types):
import re
match = re.match(r"(\d*)(\D*)", value)
if match is None:
raise ValueError("do not understand limit specifier %r, examples are 90%, 3sigma")
else:
value, type = match.groups()
import ast
value = ast.literal_eval(value)
type = type.strip()
if type in ["s", "sigma"]:
return self.limits_sigma(value)
elif type in ["ss", "sigmasquare"]:
return self.limits_sigma(value, square=True)
elif type in ["%", "percent"]:
return self.limits_percentage(value)
elif type in ["%s", "%square", "percentsquare"]:
return self.limits_percentage(value, square=True)
if value is None:
return self.limits_percentage(square=square)
else:
return value | [
"def",
"limits",
"(",
"self",
",",
"value",
",",
"square",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"six",
".",
"string_types",
")",
":",
"import",
"re",
"match",
"=",
"re",
".",
"match",
"(",
"r\"(\\d*)(\\D*)\"",
",",
"value",
")",
"if",
"match",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"do not understand limit specifier %r, examples are 90%, 3sigma\"",
")",
"else",
":",
"value",
",",
"type",
"=",
"match",
".",
"groups",
"(",
")",
"import",
"ast",
"value",
"=",
"ast",
".",
"literal_eval",
"(",
"value",
")",
"type",
"=",
"type",
".",
"strip",
"(",
")",
"if",
"type",
"in",
"[",
"\"s\"",
",",
"\"sigma\"",
"]",
":",
"return",
"self",
".",
"limits_sigma",
"(",
"value",
")",
"elif",
"type",
"in",
"[",
"\"ss\"",
",",
"\"sigmasquare\"",
"]",
":",
"return",
"self",
".",
"limits_sigma",
"(",
"value",
",",
"square",
"=",
"True",
")",
"elif",
"type",
"in",
"[",
"\"%\"",
",",
"\"percent\"",
"]",
":",
"return",
"self",
".",
"limits_percentage",
"(",
"value",
")",
"elif",
"type",
"in",
"[",
"\"%s\"",
",",
"\"%square\"",
",",
"\"percentsquare\"",
"]",
":",
"return",
"self",
".",
"limits_percentage",
"(",
"value",
",",
"square",
"=",
"True",
")",
"if",
"value",
"is",
"None",
":",
"return",
"self",
".",
"limits_percentage",
"(",
"square",
"=",
"square",
")",
"else",
":",
"return",
"value"
] | 44.375 | 14.875 |
def dump_jwks(kbl, target, private=False):
"""
Write a JWK to a file. Will ignore symmetric keys !!
:param kbl: List of KeyBundles
:param target: Name of the file to which everything should be written
:param private: Should also the private parts be exported
"""
keys = []
for kb in kbl:
keys.extend([k.serialize(private) for k in kb.keys() if
k.kty != 'oct' and not k.inactive_since])
res = {"keys": keys}
try:
f = open(target, 'w')
except IOError:
(head, tail) = os.path.split(target)
os.makedirs(head)
f = open(target, 'w')
_txt = json.dumps(res)
f.write(_txt)
f.close() | [
"def",
"dump_jwks",
"(",
"kbl",
",",
"target",
",",
"private",
"=",
"False",
")",
":",
"keys",
"=",
"[",
"]",
"for",
"kb",
"in",
"kbl",
":",
"keys",
".",
"extend",
"(",
"[",
"k",
".",
"serialize",
"(",
"private",
")",
"for",
"k",
"in",
"kb",
".",
"keys",
"(",
")",
"if",
"k",
".",
"kty",
"!=",
"'oct'",
"and",
"not",
"k",
".",
"inactive_since",
"]",
")",
"res",
"=",
"{",
"\"keys\"",
":",
"keys",
"}",
"try",
":",
"f",
"=",
"open",
"(",
"target",
",",
"'w'",
")",
"except",
"IOError",
":",
"(",
"head",
",",
"tail",
")",
"=",
"os",
".",
"path",
".",
"split",
"(",
"target",
")",
"os",
".",
"makedirs",
"(",
"head",
")",
"f",
"=",
"open",
"(",
"target",
",",
"'w'",
")",
"_txt",
"=",
"json",
".",
"dumps",
"(",
"res",
")",
"f",
".",
"write",
"(",
"_txt",
")",
"f",
".",
"close",
"(",
")"
] | 26.8 | 20.24 |
def list(self, argv):
"""List all current search jobs if no jobs specified, otherwise
list the properties of the specified jobs."""
def read(job):
for key in sorted(job.content.keys()):
# Ignore some fields that make the output hard to read and
# that are available via other commands.
if key in ["performance"]: continue
print("%s: %s" % (key, job.content[key]))
if len(argv) == 0:
index = 0
for job in self.service.jobs:
print("@%d : %s" % (index, job.sid))
index += 1
return
self.foreach(argv, read) | [
"def",
"list",
"(",
"self",
",",
"argv",
")",
":",
"def",
"read",
"(",
"job",
")",
":",
"for",
"key",
"in",
"sorted",
"(",
"job",
".",
"content",
".",
"keys",
"(",
")",
")",
":",
"# Ignore some fields that make the output hard to read and",
"# that are available via other commands.",
"if",
"key",
"in",
"[",
"\"performance\"",
"]",
":",
"continue",
"print",
"(",
"\"%s: %s\"",
"%",
"(",
"key",
",",
"job",
".",
"content",
"[",
"key",
"]",
")",
")",
"if",
"len",
"(",
"argv",
")",
"==",
"0",
":",
"index",
"=",
"0",
"for",
"job",
"in",
"self",
".",
"service",
".",
"jobs",
":",
"print",
"(",
"\"@%d : %s\"",
"%",
"(",
"index",
",",
"job",
".",
"sid",
")",
")",
"index",
"+=",
"1",
"return",
"self",
".",
"foreach",
"(",
"argv",
",",
"read",
")"
] | 35.473684 | 17.631579 |
def slice_slice(old_slice, applied_slice, size):
"""Given a slice and the size of the dimension to which it will be applied,
index it with another slice to return a new slice equivalent to applying
the slices sequentially
"""
step = (old_slice.step or 1) * (applied_slice.step or 1)
# For now, use the hack of turning old_slice into an ndarray to reconstruct
# the slice start and stop. This is not entirely ideal, but it is still
# definitely better than leaving the indexer as an array.
items = _expand_slice(old_slice, size)[applied_slice]
if len(items) > 0:
start = items[0]
stop = items[-1] + int(np.sign(step))
if stop < 0:
stop = None
else:
start = 0
stop = 0
return slice(start, stop, step) | [
"def",
"slice_slice",
"(",
"old_slice",
",",
"applied_slice",
",",
"size",
")",
":",
"step",
"=",
"(",
"old_slice",
".",
"step",
"or",
"1",
")",
"*",
"(",
"applied_slice",
".",
"step",
"or",
"1",
")",
"# For now, use the hack of turning old_slice into an ndarray to reconstruct",
"# the slice start and stop. This is not entirely ideal, but it is still",
"# definitely better than leaving the indexer as an array.",
"items",
"=",
"_expand_slice",
"(",
"old_slice",
",",
"size",
")",
"[",
"applied_slice",
"]",
"if",
"len",
"(",
"items",
")",
">",
"0",
":",
"start",
"=",
"items",
"[",
"0",
"]",
"stop",
"=",
"items",
"[",
"-",
"1",
"]",
"+",
"int",
"(",
"np",
".",
"sign",
"(",
"step",
")",
")",
"if",
"stop",
"<",
"0",
":",
"stop",
"=",
"None",
"else",
":",
"start",
"=",
"0",
"stop",
"=",
"0",
"return",
"slice",
"(",
"start",
",",
"stop",
",",
"step",
")"
] | 39 | 19.4 |
def is_url(text):
""" Check if the given text looks like a URL. """
if text is None:
return False
text = text.lower()
return text.startswith('http://') or text.startswith('https://') or \
text.startswith('urn:') or text.startswith('file://') | [
"def",
"is_url",
"(",
"text",
")",
":",
"if",
"text",
"is",
"None",
":",
"return",
"False",
"text",
"=",
"text",
".",
"lower",
"(",
")",
"return",
"text",
".",
"startswith",
"(",
"'http://'",
")",
"or",
"text",
".",
"startswith",
"(",
"'https://'",
")",
"or",
"text",
".",
"startswith",
"(",
"'urn:'",
")",
"or",
"text",
".",
"startswith",
"(",
"'file://'",
")"
] | 38.142857 | 19.142857 |
def send_message(frm=None, to=None, text=None):
"""Shortcut to send a sms using libnexmo api.
:param frm: The originator of the message
:param to: The message's recipient
:param text: The text message body
Example usage:
>>> send_message(to='+33123456789', text='My sms message body')
"""
assert to is not None
assert text is not None
if frm is None:
frm = settings.NEXMO_DEFAULT_FROM
client = nexmo.Client(key=settings.NEXMO_API_KEY, secret=settings.NEXMO_API_SECRET)
response = client.send_message({
'from': frm,
'to': to,
'text': text
})
return response | [
"def",
"send_message",
"(",
"frm",
"=",
"None",
",",
"to",
"=",
"None",
",",
"text",
"=",
"None",
")",
":",
"assert",
"to",
"is",
"not",
"None",
"assert",
"text",
"is",
"not",
"None",
"if",
"frm",
"is",
"None",
":",
"frm",
"=",
"settings",
".",
"NEXMO_DEFAULT_FROM",
"client",
"=",
"nexmo",
".",
"Client",
"(",
"key",
"=",
"settings",
".",
"NEXMO_API_KEY",
",",
"secret",
"=",
"settings",
".",
"NEXMO_API_SECRET",
")",
"response",
"=",
"client",
".",
"send_message",
"(",
"{",
"'from'",
":",
"frm",
",",
"'to'",
":",
"to",
",",
"'text'",
":",
"text",
"}",
")",
"return",
"response"
] | 25.04 | 20.96 |
def get_confidence(self):
"""return confidence based on existing data"""
# if we didn't receive any character in our consideration range,
# return negative answer
if self._mTotalChars <= 0 or self._mFreqChars <= MINIMUM_DATA_THRESHOLD:
return SURE_NO
if self._mTotalChars != self._mFreqChars:
r = (self._mFreqChars / ((self._mTotalChars - self._mFreqChars)
* self._mTypicalDistributionRatio))
if r < SURE_YES:
return r
# normalize confidence (we don't want to be 100% sure)
return SURE_YES | [
"def",
"get_confidence",
"(",
"self",
")",
":",
"# if we didn't receive any character in our consideration range,",
"# return negative answer",
"if",
"self",
".",
"_mTotalChars",
"<=",
"0",
"or",
"self",
".",
"_mFreqChars",
"<=",
"MINIMUM_DATA_THRESHOLD",
":",
"return",
"SURE_NO",
"if",
"self",
".",
"_mTotalChars",
"!=",
"self",
".",
"_mFreqChars",
":",
"r",
"=",
"(",
"self",
".",
"_mFreqChars",
"/",
"(",
"(",
"self",
".",
"_mTotalChars",
"-",
"self",
".",
"_mFreqChars",
")",
"*",
"self",
".",
"_mTypicalDistributionRatio",
")",
")",
"if",
"r",
"<",
"SURE_YES",
":",
"return",
"r",
"# normalize confidence (we don't want to be 100% sure)",
"return",
"SURE_YES"
] | 40.133333 | 20.8 |
def to_safe_str(s):
"""
converts some (tr) non-ascii chars to ascii counterparts,
then return the result as lowercase
"""
# TODO: This is insufficient as it doesn't do anything for other non-ascii chars
return re.sub(r'[^0-9a-zA-Z]+', '_', s.strip().replace(u'ğ', 'g').replace(u'ö', 'o').replace(
u'ç', 'c').replace(u'Ç','c').replace(u'Ö', u'O').replace(u'Ş', 's').replace(
u'Ü', 'u').replace(u'ı', 'i').replace(u'İ','i').replace(u'Ğ', 'g').replace(
u'ö', 'o').replace(u'ş', 's').replace(u'ü', 'u').lower(), re.UNICODE) | [
"def",
"to_safe_str",
"(",
"s",
")",
":",
"# TODO: This is insufficient as it doesn't do anything for other non-ascii chars",
"return",
"re",
".",
"sub",
"(",
"r'[^0-9a-zA-Z]+'",
",",
"'_'",
",",
"s",
".",
"strip",
"(",
")",
".",
"replace",
"(",
"u'ğ',",
" ",
"g')",
".",
"r",
"eplace(",
"u",
"'ö', ",
"'",
"').",
"r",
"e",
"place(",
"",
"u'ç',",
" ",
"c')",
".",
"r",
"eplace(",
"u",
"'Ç','",
"c",
"').",
"r",
"e",
"place(u",
"'",
"Ö', u",
"'",
"').r",
"e",
"p",
"lace(u'",
"Ş",
"', 's",
"'",
".re",
"p",
"l",
"ace(",
"",
"u'Ü',",
" ",
"u')",
".",
"r",
"eplace(",
"u",
"'ı', ",
"'",
"').",
"r",
"e",
"place(u",
"'",
"İ','i",
"'",
").r",
"e",
"p",
"lace(u'",
"Ğ",
"', 'g",
"'",
".re",
"p",
"l",
"ace(",
"",
"u'ö',",
" ",
"o')",
".",
"r",
"eplace(",
"u",
"'ş', ",
"'",
"').",
"r",
"e",
"place(u",
"'",
"ü', '",
"u",
").l",
"o",
"w",
"er(),",
" ",
"r",
"e",
"UN",
"I",
"CODE)",
""
] | 55.8 | 26.8 |
def _build_loop(self, lexer):
"""Build saveframe loop.
:param lexer: instance of lexical analyzer.
:type lexer: :func:`~nmrstarlib.bmrblex.bmrblex`
:return: Fields and values of the loop.
:rtype: :py:class:`tuple`
"""
fields = []
values = []
token = next(lexer)
while token[0] == u"_":
fields.append(token[1:])
token = next(lexer)
while token != u"stop_":
values.append(token)
token = next(lexer)
assert float(len(values) / len(fields)).is_integer(), \
"Error in loop construction: number of fields must be equal to number of values."
values = [OrderedDict(zip(fields, values[i:i + len(fields)])) for i in range(0, len(values), len(fields))]
return fields, values | [
"def",
"_build_loop",
"(",
"self",
",",
"lexer",
")",
":",
"fields",
"=",
"[",
"]",
"values",
"=",
"[",
"]",
"token",
"=",
"next",
"(",
"lexer",
")",
"while",
"token",
"[",
"0",
"]",
"==",
"u\"_\"",
":",
"fields",
".",
"append",
"(",
"token",
"[",
"1",
":",
"]",
")",
"token",
"=",
"next",
"(",
"lexer",
")",
"while",
"token",
"!=",
"u\"stop_\"",
":",
"values",
".",
"append",
"(",
"token",
")",
"token",
"=",
"next",
"(",
"lexer",
")",
"assert",
"float",
"(",
"len",
"(",
"values",
")",
"/",
"len",
"(",
"fields",
")",
")",
".",
"is_integer",
"(",
")",
",",
"\"Error in loop construction: number of fields must be equal to number of values.\"",
"values",
"=",
"[",
"OrderedDict",
"(",
"zip",
"(",
"fields",
",",
"values",
"[",
"i",
":",
"i",
"+",
"len",
"(",
"fields",
")",
"]",
")",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"values",
")",
",",
"len",
"(",
"fields",
")",
")",
"]",
"return",
"fields",
",",
"values"
] | 32.64 | 20.6 |
def _add_disease_association_attributes(self, att_ind_start, att_mappings):
"""Add disease association information to the attribute mapping dictionary.
:param int att_ind_start: Start index for enumerating the attributes.
:param dict att_mappings: Dictionary of mappings between vertices and enumerated attributes.
"""
disease_mappings = self.get_disease_mappings(att_ind_start)
for vertex in self.graph.vs:
assoc_diseases = vertex["associated_diseases"]
if assoc_diseases is not None:
assoc_disease_ids = [disease_mappings[disease] for disease in assoc_diseases]
att_mappings[vertex.index].extend(assoc_disease_ids) | [
"def",
"_add_disease_association_attributes",
"(",
"self",
",",
"att_ind_start",
",",
"att_mappings",
")",
":",
"disease_mappings",
"=",
"self",
".",
"get_disease_mappings",
"(",
"att_ind_start",
")",
"for",
"vertex",
"in",
"self",
".",
"graph",
".",
"vs",
":",
"assoc_diseases",
"=",
"vertex",
"[",
"\"associated_diseases\"",
"]",
"if",
"assoc_diseases",
"is",
"not",
"None",
":",
"assoc_disease_ids",
"=",
"[",
"disease_mappings",
"[",
"disease",
"]",
"for",
"disease",
"in",
"assoc_diseases",
"]",
"att_mappings",
"[",
"vertex",
".",
"index",
"]",
".",
"extend",
"(",
"assoc_disease_ids",
")"
] | 59.166667 | 25.333333 |
def onSelect_specimen(self, event):
"""
update figures and text when a new specimen is selected
"""
self.selected_meas = []
self.select_specimen(str(self.specimens_box.GetValue()))
if self.ie_open:
self.ie.change_selected(self.current_fit)
self.update_selection() | [
"def",
"onSelect_specimen",
"(",
"self",
",",
"event",
")",
":",
"self",
".",
"selected_meas",
"=",
"[",
"]",
"self",
".",
"select_specimen",
"(",
"str",
"(",
"self",
".",
"specimens_box",
".",
"GetValue",
"(",
")",
")",
")",
"if",
"self",
".",
"ie_open",
":",
"self",
".",
"ie",
".",
"change_selected",
"(",
"self",
".",
"current_fit",
")",
"self",
".",
"update_selection",
"(",
")"
] | 35.888889 | 11 |
def handle(client_message, handle_event_member=None, handle_event_member_list=None, handle_event_member_attribute_change=None, to_object=None):
""" Event handler """
message_type = client_message.get_message_type()
if message_type == EVENT_MEMBER and handle_event_member is not None:
member = MemberCodec.decode(client_message, to_object)
event_type = client_message.read_int()
handle_event_member(member=member, event_type=event_type)
if message_type == EVENT_MEMBERLIST and handle_event_member_list is not None:
members_size = client_message.read_int()
members = []
for _ in range(0, members_size):
members_item = MemberCodec.decode(client_message, to_object)
members.append(members_item)
handle_event_member_list(members=members)
if message_type == EVENT_MEMBERATTRIBUTECHANGE and handle_event_member_attribute_change is not None:
uuid = client_message.read_str()
key = client_message.read_str()
operation_type = client_message.read_int()
value = None
if not client_message.read_bool():
value = client_message.read_str()
handle_event_member_attribute_change(uuid=uuid, key=key, operation_type=operation_type, value=value) | [
"def",
"handle",
"(",
"client_message",
",",
"handle_event_member",
"=",
"None",
",",
"handle_event_member_list",
"=",
"None",
",",
"handle_event_member_attribute_change",
"=",
"None",
",",
"to_object",
"=",
"None",
")",
":",
"message_type",
"=",
"client_message",
".",
"get_message_type",
"(",
")",
"if",
"message_type",
"==",
"EVENT_MEMBER",
"and",
"handle_event_member",
"is",
"not",
"None",
":",
"member",
"=",
"MemberCodec",
".",
"decode",
"(",
"client_message",
",",
"to_object",
")",
"event_type",
"=",
"client_message",
".",
"read_int",
"(",
")",
"handle_event_member",
"(",
"member",
"=",
"member",
",",
"event_type",
"=",
"event_type",
")",
"if",
"message_type",
"==",
"EVENT_MEMBERLIST",
"and",
"handle_event_member_list",
"is",
"not",
"None",
":",
"members_size",
"=",
"client_message",
".",
"read_int",
"(",
")",
"members",
"=",
"[",
"]",
"for",
"_",
"in",
"range",
"(",
"0",
",",
"members_size",
")",
":",
"members_item",
"=",
"MemberCodec",
".",
"decode",
"(",
"client_message",
",",
"to_object",
")",
"members",
".",
"append",
"(",
"members_item",
")",
"handle_event_member_list",
"(",
"members",
"=",
"members",
")",
"if",
"message_type",
"==",
"EVENT_MEMBERATTRIBUTECHANGE",
"and",
"handle_event_member_attribute_change",
"is",
"not",
"None",
":",
"uuid",
"=",
"client_message",
".",
"read_str",
"(",
")",
"key",
"=",
"client_message",
".",
"read_str",
"(",
")",
"operation_type",
"=",
"client_message",
".",
"read_int",
"(",
")",
"value",
"=",
"None",
"if",
"not",
"client_message",
".",
"read_bool",
"(",
")",
":",
"value",
"=",
"client_message",
".",
"read_str",
"(",
")",
"handle_event_member_attribute_change",
"(",
"uuid",
"=",
"uuid",
",",
"key",
"=",
"key",
",",
"operation_type",
"=",
"operation_type",
",",
"value",
"=",
"value",
")"
] | 57.409091 | 21.818182 |
def model(self,
voltages=True,
sensitivities=False,
potentials=False,
output_directory=None,
silent=False,
):
"""Forward model the tomodir and read in the results
"""
self._check_state()
if self.can_model:
if output_directory is not None:
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
tempdir = output_directory
self._model(voltages, sensitivities, potentials, tempdir)
else:
raise IOError(
'output directory already exists: {0}'.format(
output_directory
)
)
else:
with tempfile.TemporaryDirectory(dir=self.tempdir) as tempdir:
self._model(
voltages, sensitivities, potentials, tempdir,
silent=silent
)
return 1
else:
print('Sorry, not all required information to model are present')
print('Check:')
print('1) configurations present: self.configs.configs')
print('2) is a model present')
return None | [
"def",
"model",
"(",
"self",
",",
"voltages",
"=",
"True",
",",
"sensitivities",
"=",
"False",
",",
"potentials",
"=",
"False",
",",
"output_directory",
"=",
"None",
",",
"silent",
"=",
"False",
",",
")",
":",
"self",
".",
"_check_state",
"(",
")",
"if",
"self",
".",
"can_model",
":",
"if",
"output_directory",
"is",
"not",
"None",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"output_directory",
")",
":",
"os",
".",
"makedirs",
"(",
"output_directory",
")",
"tempdir",
"=",
"output_directory",
"self",
".",
"_model",
"(",
"voltages",
",",
"sensitivities",
",",
"potentials",
",",
"tempdir",
")",
"else",
":",
"raise",
"IOError",
"(",
"'output directory already exists: {0}'",
".",
"format",
"(",
"output_directory",
")",
")",
"else",
":",
"with",
"tempfile",
".",
"TemporaryDirectory",
"(",
"dir",
"=",
"self",
".",
"tempdir",
")",
"as",
"tempdir",
":",
"self",
".",
"_model",
"(",
"voltages",
",",
"sensitivities",
",",
"potentials",
",",
"tempdir",
",",
"silent",
"=",
"silent",
")",
"return",
"1",
"else",
":",
"print",
"(",
"'Sorry, not all required information to model are present'",
")",
"print",
"(",
"'Check:'",
")",
"print",
"(",
"'1) configurations present: self.configs.configs'",
")",
"print",
"(",
"'2) is a model present'",
")",
"return",
"None"
] | 36.694444 | 16.333333 |
def fix_queue_critical(self):
"""
This function tries to fix critical events originating from the queue submission system.
Returns the number of tasks that have been fixed.
"""
count = 0
for task in self.iflat_tasks(status=self.S_QCRITICAL):
logger.info("Will try to fix task %s" % str(task))
try:
print(task.fix_queue_critical())
count += 1
except FixQueueCriticalError:
logger.info("Not able to fix task %s" % task)
return count | [
"def",
"fix_queue_critical",
"(",
"self",
")",
":",
"count",
"=",
"0",
"for",
"task",
"in",
"self",
".",
"iflat_tasks",
"(",
"status",
"=",
"self",
".",
"S_QCRITICAL",
")",
":",
"logger",
".",
"info",
"(",
"\"Will try to fix task %s\"",
"%",
"str",
"(",
"task",
")",
")",
"try",
":",
"print",
"(",
"task",
".",
"fix_queue_critical",
"(",
")",
")",
"count",
"+=",
"1",
"except",
"FixQueueCriticalError",
":",
"logger",
".",
"info",
"(",
"\"Not able to fix task %s\"",
"%",
"task",
")",
"return",
"count"
] | 34.8125 | 19.9375 |
def get_bgcolor(self, index):
"""Background color depending on value"""
value = self.get_value(index)
if index.column() < 3:
color = ReadOnlyCollectionsModel.get_bgcolor(self, index)
else:
if self.remote:
color_name = value['color']
else:
color_name = get_color_name(value)
color = QColor(color_name)
color.setAlphaF(.2)
return color | [
"def",
"get_bgcolor",
"(",
"self",
",",
"index",
")",
":",
"value",
"=",
"self",
".",
"get_value",
"(",
"index",
")",
"if",
"index",
".",
"column",
"(",
")",
"<",
"3",
":",
"color",
"=",
"ReadOnlyCollectionsModel",
".",
"get_bgcolor",
"(",
"self",
",",
"index",
")",
"else",
":",
"if",
"self",
".",
"remote",
":",
"color_name",
"=",
"value",
"[",
"'color'",
"]",
"else",
":",
"color_name",
"=",
"get_color_name",
"(",
"value",
")",
"color",
"=",
"QColor",
"(",
"color_name",
")",
"color",
".",
"setAlphaF",
"(",
".2",
")",
"return",
"color"
] | 35.769231 | 11.923077 |
def set_scenario_status(scenario_id, status, **kwargs):
"""
Set the status of a scenario.
"""
user_id = kwargs.get('user_id')
_check_can_edit_scenario(scenario_id, kwargs['user_id'])
scenario_i = _get_scenario(scenario_id, user_id)
scenario_i.status = status
db.DBSession.flush()
return 'OK' | [
"def",
"set_scenario_status",
"(",
"scenario_id",
",",
"status",
",",
"*",
"*",
"kwargs",
")",
":",
"user_id",
"=",
"kwargs",
".",
"get",
"(",
"'user_id'",
")",
"_check_can_edit_scenario",
"(",
"scenario_id",
",",
"kwargs",
"[",
"'user_id'",
"]",
")",
"scenario_i",
"=",
"_get_scenario",
"(",
"scenario_id",
",",
"user_id",
")",
"scenario_i",
".",
"status",
"=",
"status",
"db",
".",
"DBSession",
".",
"flush",
"(",
")",
"return",
"'OK'"
] | 23 | 19 |
def remove_bookmark(self, time):
"""User removes bookmark.
Parameters
----------
time : tuple of float
start and end of the new bookmark, in s
"""
self.annot.remove_bookmark(time=time)
self.update_annotations() | [
"def",
"remove_bookmark",
"(",
"self",
",",
"time",
")",
":",
"self",
".",
"annot",
".",
"remove_bookmark",
"(",
"time",
"=",
"time",
")",
"self",
".",
"update_annotations",
"(",
")"
] | 27 | 12.6 |
def get_params(self, pnames=None):
""" Return a list of Parameter objects
Parameters
----------
pname : list or None
If a list get the Parameter objects with those names
If none, get all the Parameter objects
Returns
-------
params : list
list of Parameters
"""
l = []
if pnames is None:
pnames = self.params.keys()
for pname in pnames:
p = self.params[pname]
if isinstance(p, Parameter):
l.append(p)
return l | [
"def",
"get_params",
"(",
"self",
",",
"pnames",
"=",
"None",
")",
":",
"l",
"=",
"[",
"]",
"if",
"pnames",
"is",
"None",
":",
"pnames",
"=",
"self",
".",
"params",
".",
"keys",
"(",
")",
"for",
"pname",
"in",
"pnames",
":",
"p",
"=",
"self",
".",
"params",
"[",
"pname",
"]",
"if",
"isinstance",
"(",
"p",
",",
"Parameter",
")",
":",
"l",
".",
"append",
"(",
"p",
")",
"return",
"l"
] | 22 | 19.576923 |
def scale2x(self, surface):
"""
Scales using the AdvanceMAME Scale2X algorithm which does a
'jaggie-less' scale of bitmap graphics.
"""
assert(self._scale == 2)
return self._pygame.transform.scale2x(surface) | [
"def",
"scale2x",
"(",
"self",
",",
"surface",
")",
":",
"assert",
"(",
"self",
".",
"_scale",
"==",
"2",
")",
"return",
"self",
".",
"_pygame",
".",
"transform",
".",
"scale2x",
"(",
"surface",
")"
] | 35.571429 | 9.857143 |
def handle_user(self, params):
"""
Handle the USER command which identifies the user to the server.
"""
params = params.split(' ', 3)
if len(params) != 4:
raise IRCError.from_name(
'needmoreparams',
'USER :Not enough parameters')
user, mode, unused, realname = params
self.user = user
self.mode = mode
self.realname = realname
return '' | [
"def",
"handle_user",
"(",
"self",
",",
"params",
")",
":",
"params",
"=",
"params",
".",
"split",
"(",
"' '",
",",
"3",
")",
"if",
"len",
"(",
"params",
")",
"!=",
"4",
":",
"raise",
"IRCError",
".",
"from_name",
"(",
"'needmoreparams'",
",",
"'USER :Not enough parameters'",
")",
"user",
",",
"mode",
",",
"unused",
",",
"realname",
"=",
"params",
"self",
".",
"user",
"=",
"user",
"self",
".",
"mode",
"=",
"mode",
"self",
".",
"realname",
"=",
"realname",
"return",
"''"
] | 27.9375 | 13.8125 |
def _get_parser(self, env):
""" Creates base argument parser.
`env`
Runtime ``Environment`` instance.
* Raises ``HelpBanner`` exception when certain conditions apply.
Returns ``FocusArgumentParser`` object.
"""
version_str = 'focus version ' + __version__
usage_str = 'focus [-h] [-v] [--no-color] <command> [<args>]'
# setup parser
parser = FocusArgParser(description=("Command-line productivity tool "
"for improved task workflows."),
epilog=("See 'focus help <command>' for more "
"information on a specific command."),
usage=usage_str)
parser.add_argument('-v', '--version', action='version',
version=version_str)
parser.add_argument('--no-color', action='store_true',
help='disables colors')
# fetch command plugins
commands = []
active = env.task.active
command_hooks = registration.get_registered(command_hooks=True,
task_active=active)
# extract command name and docstrings as help text
for plugin in command_hooks:
help_text = (plugin.__doc__ or '').strip().rstrip('.').lower()
commands.append((plugin.command, help_text))
commands.sort(key=lambda x: x[0]) # command ordered
# install subparsers
subparsers = parser.add_subparsers(title='available commands')
# install 'help' subparser
help_parser = subparsers.add_parser('help', add_help=False)
help_parser.set_defaults(func=self._handle_help)
# install 'version' subparser
version_parser = subparsers.add_parser('version', add_help=False)
def _print_version(env, args):
env.io.write(version_str)
return True
version_parser.set_defaults(func=_print_version)
# install command subparsers based on registered command plugins.
# this allows for focus commands (e.g. focus on [...])
for command, help_ in commands:
cmd_parser = subparsers.add_parser(command, help=help_,
add_help=False)
# use wrapper to bind command value and passthru to _handle_command
# when executed later
def _run(command):
def _wrapper(env, args):
return self._handle_command(command, env, args)
return _wrapper
cmd_parser.set_defaults(func=_run(command))
return parser | [
"def",
"_get_parser",
"(",
"self",
",",
"env",
")",
":",
"version_str",
"=",
"'focus version '",
"+",
"__version__",
"usage_str",
"=",
"'focus [-h] [-v] [--no-color] <command> [<args>]'",
"# setup parser",
"parser",
"=",
"FocusArgParser",
"(",
"description",
"=",
"(",
"\"Command-line productivity tool \"",
"\"for improved task workflows.\"",
")",
",",
"epilog",
"=",
"(",
"\"See 'focus help <command>' for more \"",
"\"information on a specific command.\"",
")",
",",
"usage",
"=",
"usage_str",
")",
"parser",
".",
"add_argument",
"(",
"'-v'",
",",
"'--version'",
",",
"action",
"=",
"'version'",
",",
"version",
"=",
"version_str",
")",
"parser",
".",
"add_argument",
"(",
"'--no-color'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'disables colors'",
")",
"# fetch command plugins",
"commands",
"=",
"[",
"]",
"active",
"=",
"env",
".",
"task",
".",
"active",
"command_hooks",
"=",
"registration",
".",
"get_registered",
"(",
"command_hooks",
"=",
"True",
",",
"task_active",
"=",
"active",
")",
"# extract command name and docstrings as help text",
"for",
"plugin",
"in",
"command_hooks",
":",
"help_text",
"=",
"(",
"plugin",
".",
"__doc__",
"or",
"''",
")",
".",
"strip",
"(",
")",
".",
"rstrip",
"(",
"'.'",
")",
".",
"lower",
"(",
")",
"commands",
".",
"append",
"(",
"(",
"plugin",
".",
"command",
",",
"help_text",
")",
")",
"commands",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
")",
"# command ordered",
"# install subparsers",
"subparsers",
"=",
"parser",
".",
"add_subparsers",
"(",
"title",
"=",
"'available commands'",
")",
"# install 'help' subparser",
"help_parser",
"=",
"subparsers",
".",
"add_parser",
"(",
"'help'",
",",
"add_help",
"=",
"False",
")",
"help_parser",
".",
"set_defaults",
"(",
"func",
"=",
"self",
".",
"_handle_help",
")",
"# install 'version' subparser",
"version_parser",
"=",
"subparsers",
".",
"add_parser",
"(",
"'version'",
",",
"add_help",
"=",
"False",
")",
"def",
"_print_version",
"(",
"env",
",",
"args",
")",
":",
"env",
".",
"io",
".",
"write",
"(",
"version_str",
")",
"return",
"True",
"version_parser",
".",
"set_defaults",
"(",
"func",
"=",
"_print_version",
")",
"# install command subparsers based on registered command plugins.",
"# this allows for focus commands (e.g. focus on [...])",
"for",
"command",
",",
"help_",
"in",
"commands",
":",
"cmd_parser",
"=",
"subparsers",
".",
"add_parser",
"(",
"command",
",",
"help",
"=",
"help_",
",",
"add_help",
"=",
"False",
")",
"# use wrapper to bind command value and passthru to _handle_command",
"# when executed later",
"def",
"_run",
"(",
"command",
")",
":",
"def",
"_wrapper",
"(",
"env",
",",
"args",
")",
":",
"return",
"self",
".",
"_handle_command",
"(",
"command",
",",
"env",
",",
"args",
")",
"return",
"_wrapper",
"cmd_parser",
".",
"set_defaults",
"(",
"func",
"=",
"_run",
"(",
"command",
")",
")",
"return",
"parser"
] | 39 | 23.202899 |
def correlation_matrix(nums_with_uncert):
"""
Calculate the correlation matrix of uncertain variables, oriented by the
order of the inputs
Parameters
----------
nums_with_uncert : array-like
A list of variables that have an associated uncertainty
Returns
-------
corr_matrix : 2d-array-like
A nested list containing covariance values
Example
-------
>>> x = N(1, 0.1)
>>> y = N(10, 0.1)
>>> z = x + 2*y
>>> correlation_matrix([x,y,z])
[[ 0.99969486 0.00254001 0.4489385 ]
[ 0.00254001 0.99982321 0.89458702]
[ 0.4489385 0.89458702 1. ]]
"""
ufuncs = list(map(to_uncertain_func, nums_with_uncert))
data = np.vstack([ufunc._mcpts for ufunc in ufuncs])
return np.corrcoef(data.T, rowvar=0) | [
"def",
"correlation_matrix",
"(",
"nums_with_uncert",
")",
":",
"ufuncs",
"=",
"list",
"(",
"map",
"(",
"to_uncertain_func",
",",
"nums_with_uncert",
")",
")",
"data",
"=",
"np",
".",
"vstack",
"(",
"[",
"ufunc",
".",
"_mcpts",
"for",
"ufunc",
"in",
"ufuncs",
"]",
")",
"return",
"np",
".",
"corrcoef",
"(",
"data",
".",
"T",
",",
"rowvar",
"=",
"0",
")"
] | 27.533333 | 18.533333 |
def enabled(name='allprofiles'):
'''
Enable all the firewall profiles (Windows only)
Args:
profile (Optional[str]): The name of the profile to enable. Default is
``allprofiles``. Valid options are:
- allprofiles
- domainprofile
- privateprofile
- publicprofile
Example:
.. code-block:: yaml
# To enable the domain profile
enable_domain:
win_firewall.enabled:
- name: domainprofile
# To enable all profiles
enable_all:
win_firewall.enabled:
- name: allprofiles
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
profile_map = {'domainprofile': 'Domain',
'privateprofile': 'Private',
'publicprofile': 'Public',
'allprofiles': 'All'}
# Make sure the profile name is valid
if name not in profile_map:
raise SaltInvocationError('Invalid profile name: {0}'.format(name))
current_config = __salt__['firewall.get_config']()
if name != 'allprofiles' and profile_map[name] not in current_config:
ret['result'] = False
ret['comment'] = 'Profile {0} does not exist in firewall.get_config' \
''.format(name)
return ret
for key in current_config:
if not current_config[key]:
if name == 'allprofiles' or key == profile_map[name]:
ret['changes'][key] = 'enabled'
if __opts__['test']:
ret['result'] = not ret['changes'] or None
ret['comment'] = ret['changes']
ret['changes'] = {}
return ret
# Enable it
if ret['changes']:
try:
ret['result'] = __salt__['firewall.enable'](name)
except CommandExecutionError:
ret['comment'] = 'Firewall Profile {0} could not be enabled' \
''.format(profile_map[name])
else:
if name == 'allprofiles':
msg = 'All the firewall profiles are enabled'
else:
msg = 'Firewall profile {0} is enabled'.format(name)
ret['comment'] = msg
return ret | [
"def",
"enabled",
"(",
"name",
"=",
"'allprofiles'",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'result'",
":",
"True",
",",
"'changes'",
":",
"{",
"}",
",",
"'comment'",
":",
"''",
"}",
"profile_map",
"=",
"{",
"'domainprofile'",
":",
"'Domain'",
",",
"'privateprofile'",
":",
"'Private'",
",",
"'publicprofile'",
":",
"'Public'",
",",
"'allprofiles'",
":",
"'All'",
"}",
"# Make sure the profile name is valid",
"if",
"name",
"not",
"in",
"profile_map",
":",
"raise",
"SaltInvocationError",
"(",
"'Invalid profile name: {0}'",
".",
"format",
"(",
"name",
")",
")",
"current_config",
"=",
"__salt__",
"[",
"'firewall.get_config'",
"]",
"(",
")",
"if",
"name",
"!=",
"'allprofiles'",
"and",
"profile_map",
"[",
"name",
"]",
"not",
"in",
"current_config",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"'Profile {0} does not exist in firewall.get_config'",
"''",
".",
"format",
"(",
"name",
")",
"return",
"ret",
"for",
"key",
"in",
"current_config",
":",
"if",
"not",
"current_config",
"[",
"key",
"]",
":",
"if",
"name",
"==",
"'allprofiles'",
"or",
"key",
"==",
"profile_map",
"[",
"name",
"]",
":",
"ret",
"[",
"'changes'",
"]",
"[",
"key",
"]",
"=",
"'enabled'",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"ret",
"[",
"'result'",
"]",
"=",
"not",
"ret",
"[",
"'changes'",
"]",
"or",
"None",
"ret",
"[",
"'comment'",
"]",
"=",
"ret",
"[",
"'changes'",
"]",
"ret",
"[",
"'changes'",
"]",
"=",
"{",
"}",
"return",
"ret",
"# Enable it",
"if",
"ret",
"[",
"'changes'",
"]",
":",
"try",
":",
"ret",
"[",
"'result'",
"]",
"=",
"__salt__",
"[",
"'firewall.enable'",
"]",
"(",
"name",
")",
"except",
"CommandExecutionError",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Firewall Profile {0} could not be enabled'",
"''",
".",
"format",
"(",
"profile_map",
"[",
"name",
"]",
")",
"else",
":",
"if",
"name",
"==",
"'allprofiles'",
":",
"msg",
"=",
"'All the firewall profiles are enabled'",
"else",
":",
"msg",
"=",
"'Firewall profile {0} is enabled'",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'comment'",
"]",
"=",
"msg",
"return",
"ret"
] | 29.162162 | 20.27027 |
def parse_directives(lexer: Lexer, is_const: bool) -> List[DirectiveNode]:
"""Directives[Const]: Directive[?Const]+"""
directives: List[DirectiveNode] = []
append = directives.append
while peek(lexer, TokenKind.AT):
append(parse_directive(lexer, is_const))
return directives | [
"def",
"parse_directives",
"(",
"lexer",
":",
"Lexer",
",",
"is_const",
":",
"bool",
")",
"->",
"List",
"[",
"DirectiveNode",
"]",
":",
"directives",
":",
"List",
"[",
"DirectiveNode",
"]",
"=",
"[",
"]",
"append",
"=",
"directives",
".",
"append",
"while",
"peek",
"(",
"lexer",
",",
"TokenKind",
".",
"AT",
")",
":",
"append",
"(",
"parse_directive",
"(",
"lexer",
",",
"is_const",
")",
")",
"return",
"directives"
] | 42.285714 | 10.714286 |
def rows_from_csv(filename, predicate=None, encoding='utf-8'):
"""\
Returns an iterator over all rows in the provided CSV `filename`.
`filename`
Absolute path to a file to read the cables from.
The file must be a CSV file with the following columns:
<identifier>, <creation-date>, <reference-id>, <origin>, <classification-level>, <references-to-other-cables>, <header>, <body>
The delimiter must be a comma (``,``) and the content must be enclosed in double quotes (``"``).
`predicate`
A predicate that is invoked for each cable reference identifier.
If the predicate evaluates to ``False`` the cable is ignored.
By default, all cables are used.
I.e. ``cables_from_csv('cables.csv', lambda r: r.startswith('09'))``
would return cables where the reference identifier starts with ``09``.
`encoding`
The file encoding (``UTF-8`` by default).
"""
pred = predicate or bool
with open(filename, 'rb') as f:
for row in _UnicodeReader(f, encoding=encoding, delimiter=',', quotechar='"', escapechar='\\'):
ident, created, reference_id, origin, classification, references, header, body = row
if row and pred(reference_id):
yield ident, created, reference_id, origin, classification, references, header, body | [
"def",
"rows_from_csv",
"(",
"filename",
",",
"predicate",
"=",
"None",
",",
"encoding",
"=",
"'utf-8'",
")",
":",
"pred",
"=",
"predicate",
"or",
"bool",
"with",
"open",
"(",
"filename",
",",
"'rb'",
")",
"as",
"f",
":",
"for",
"row",
"in",
"_UnicodeReader",
"(",
"f",
",",
"encoding",
"=",
"encoding",
",",
"delimiter",
"=",
"','",
",",
"quotechar",
"=",
"'\"'",
",",
"escapechar",
"=",
"'\\\\'",
")",
":",
"ident",
",",
"created",
",",
"reference_id",
",",
"origin",
",",
"classification",
",",
"references",
",",
"header",
",",
"body",
"=",
"row",
"if",
"row",
"and",
"pred",
"(",
"reference_id",
")",
":",
"yield",
"ident",
",",
"created",
",",
"reference_id",
",",
"origin",
",",
"classification",
",",
"references",
",",
"header",
",",
"body"
] | 55.625 | 29.5 |
def consume_asset(event, agreement_id, did, service_agreement, consumer_account, consume_callback):
"""
Consumption of an asset after get the event call.
:param event: AttributeDict with the event data.
:param agreement_id: id of the agreement, hex str
:param did: DID, str
:param service_agreement: ServiceAgreement instance
:param consumer_account: Account instance of the consumer
:param consume_callback:
"""
logger.debug(f"consuming asset after event {event}.")
if consume_callback:
config = ConfigProvider.get_config()
secret_store = SecretStoreProvider.get_secret_store(
config.secret_store_url, config.parity_url, consumer_account
)
brizo = BrizoProvider.get_brizo()
consume_callback(
agreement_id,
service_agreement.service_definition_id,
DIDResolver(Keeper.get_instance().did_registry).resolve(did),
consumer_account,
ConfigProvider.get_config().downloads_path,
brizo,
secret_store
) | [
"def",
"consume_asset",
"(",
"event",
",",
"agreement_id",
",",
"did",
",",
"service_agreement",
",",
"consumer_account",
",",
"consume_callback",
")",
":",
"logger",
".",
"debug",
"(",
"f\"consuming asset after event {event}.\"",
")",
"if",
"consume_callback",
":",
"config",
"=",
"ConfigProvider",
".",
"get_config",
"(",
")",
"secret_store",
"=",
"SecretStoreProvider",
".",
"get_secret_store",
"(",
"config",
".",
"secret_store_url",
",",
"config",
".",
"parity_url",
",",
"consumer_account",
")",
"brizo",
"=",
"BrizoProvider",
".",
"get_brizo",
"(",
")",
"consume_callback",
"(",
"agreement_id",
",",
"service_agreement",
".",
"service_definition_id",
",",
"DIDResolver",
"(",
"Keeper",
".",
"get_instance",
"(",
")",
".",
"did_registry",
")",
".",
"resolve",
"(",
"did",
")",
",",
"consumer_account",
",",
"ConfigProvider",
".",
"get_config",
"(",
")",
".",
"downloads_path",
",",
"brizo",
",",
"secret_store",
")"
] | 37.714286 | 19 |
def _store_compressed_sequence_to_node(self, node):
"""
make a compressed representation of a pair of sequences only counting
the number of times a particular pair of states (e.g. (A,T)) is observed
the the aligned sequences of parent and child.
Parameters
-----------
node : PhyloTree.Clade
Tree node. **Note** because the method operates
on the sequences on both sides of a branch, sequence reconstruction
must be performed prior to calling this method.
"""
seq_pairs, multiplicity = self.gtr.compress_sequence_pair(node.up.cseq,
node.cseq,
pattern_multiplicity = self.multiplicity,
ignore_gaps = self.ignore_gaps)
node.compressed_sequence = {'pair':seq_pairs, 'multiplicity':multiplicity} | [
"def",
"_store_compressed_sequence_to_node",
"(",
"self",
",",
"node",
")",
":",
"seq_pairs",
",",
"multiplicity",
"=",
"self",
".",
"gtr",
".",
"compress_sequence_pair",
"(",
"node",
".",
"up",
".",
"cseq",
",",
"node",
".",
"cseq",
",",
"pattern_multiplicity",
"=",
"self",
".",
"multiplicity",
",",
"ignore_gaps",
"=",
"self",
".",
"ignore_gaps",
")",
"node",
".",
"compressed_sequence",
"=",
"{",
"'pair'",
":",
"seq_pairs",
",",
"'multiplicity'",
":",
"multiplicity",
"}"
] | 46.5 | 26.6 |
def coords_from_query(query):
"""Transform a query line into a (lng, lat) pair of coordinates."""
try:
coords = json.loads(query)
except ValueError:
query = query.replace(',', ' ')
vals = query.split()
coords = [float(v) for v in vals]
return tuple(coords[:2]) | [
"def",
"coords_from_query",
"(",
"query",
")",
":",
"try",
":",
"coords",
"=",
"json",
".",
"loads",
"(",
"query",
")",
"except",
"ValueError",
":",
"query",
"=",
"query",
".",
"replace",
"(",
"','",
",",
"' '",
")",
"vals",
"=",
"query",
".",
"split",
"(",
")",
"coords",
"=",
"[",
"float",
"(",
"v",
")",
"for",
"v",
"in",
"vals",
"]",
"return",
"tuple",
"(",
"coords",
"[",
":",
"2",
"]",
")"
] | 33.333333 | 10.333333 |
def _remove_hidden_parts(projected_surface):
"""Removes parts of a projected surface that are not visible.
Args:
projected_surface (surface): the surface to use
Returns:
surface: A projected surface.
"""
surface = np.copy(projected_surface)
surface[~_make_occlusion_mask(projected_surface)] = np.nan
return surface | [
"def",
"_remove_hidden_parts",
"(",
"projected_surface",
")",
":",
"surface",
"=",
"np",
".",
"copy",
"(",
"projected_surface",
")",
"surface",
"[",
"~",
"_make_occlusion_mask",
"(",
"projected_surface",
")",
"]",
"=",
"np",
".",
"nan",
"return",
"surface"
] | 29.083333 | 17.083333 |
def main():
'''Calculate the depth of a liquid in centimeters using a HCSR04 sensor
and a Raspberry Pi'''
trig_pin = 17
echo_pin = 27
# Default values
# unit = 'metric'
# temperature = 20
# round_to = 1
hole_depth = 80 # centimeters
# Create a distance reading with the hcsr04 sensor module
value = sensor.Measurement(trig_pin,
echo_pin
)
raw_measurement = value.raw_distance()
# To overide default values you can pass the following to value
# value = sensor.Measurement(trig_pin,
# echo_pin,
# temperature=10,
# round_to=2
# )
# Calculate the liquid depth, in centimeters, of a hole filled
# with liquid
liquid_depth = value.depth_metric(raw_measurement, hole_depth)
print("Depth = {} centimeters".format(liquid_depth)) | [
"def",
"main",
"(",
")",
":",
"trig_pin",
"=",
"17",
"echo_pin",
"=",
"27",
"# Default values",
"# unit = 'metric'",
"# temperature = 20",
"# round_to = 1",
"hole_depth",
"=",
"80",
"# centimeters",
"# Create a distance reading with the hcsr04 sensor module",
"value",
"=",
"sensor",
".",
"Measurement",
"(",
"trig_pin",
",",
"echo_pin",
")",
"raw_measurement",
"=",
"value",
".",
"raw_distance",
"(",
")",
"# To overide default values you can pass the following to value",
"# value = sensor.Measurement(trig_pin,",
"# echo_pin,",
"# temperature=10,",
"# round_to=2",
"# )",
"# Calculate the liquid depth, in centimeters, of a hole filled",
"# with liquid",
"liquid_depth",
"=",
"value",
".",
"depth_metric",
"(",
"raw_measurement",
",",
"hole_depth",
")",
"print",
"(",
"\"Depth = {} centimeters\"",
".",
"format",
"(",
"liquid_depth",
")",
")"
] | 29.030303 | 21.151515 |
def flag(name=None):
"""
Creates the grammar for a Flag (F) field, accepting only 'Y', 'N' or 'U'.
:param name: name for the field
:return: grammar for the flag field
"""
if name is None:
name = 'Flag Field'
# Basic field
field = pp.Regex('[YNU]')
# Name
field.setName(name)
field.leaveWhitespace()
return field | [
"def",
"flag",
"(",
"name",
"=",
"None",
")",
":",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"'Flag Field'",
"# Basic field",
"field",
"=",
"pp",
".",
"Regex",
"(",
"'[YNU]'",
")",
"# Name",
"field",
".",
"setName",
"(",
"name",
")",
"field",
".",
"leaveWhitespace",
"(",
")",
"return",
"field"
] | 17.7 | 22.7 |
def clear(self, username, project):
"""Clear the cache for given project."""
method = 'DELETE'
url = ('/project/{username}/{project}/build-cache?'
'circle-token={token}'.format(username=username,
project=project,
token=self.client.api_token))
json_data = self.client.request(method, url)
return json_data | [
"def",
"clear",
"(",
"self",
",",
"username",
",",
"project",
")",
":",
"method",
"=",
"'DELETE'",
"url",
"=",
"(",
"'/project/{username}/{project}/build-cache?'",
"'circle-token={token}'",
".",
"format",
"(",
"username",
"=",
"username",
",",
"project",
"=",
"project",
",",
"token",
"=",
"self",
".",
"client",
".",
"api_token",
")",
")",
"json_data",
"=",
"self",
".",
"client",
".",
"request",
"(",
"method",
",",
"url",
")",
"return",
"json_data"
] | 49 | 16.111111 |
def get(self, community_id):
"""Get the details of the specified community.
.. http:get:: /communities/(string:id)
Returns a JSON dictionary with the details of the specified
community.
**Request**:
.. sourcecode:: http
GET /communities/communities/comm1 HTTP/1.1
Accept: application/json
Content-Type: application/json
Host: localhost:5000
:reqheader Content-Type: application/json
:query string id: ID of an specific community to get more
information.
**Response**:
.. sourcecode:: http
HTTP/1.0 200 OK
Content-Length: 334
Content-Type: application/json
{
"id_user": 1,
"description": "",
"title": "",
"created": "2016-04-05T14:56:37.051462",
"id": "comm1",
"page": "",
"curation_policy": ""
}
:resheader Content-Type: application/json
:statuscode 200: no error
:statuscode 404: page not found
"""
community = Community.get(community_id)
if not community:
abort(404)
etag = community.version_id
self.check_etag(etag)
response = self.make_response(
community, links_item_factory=default_links_item_factory)
response.set_etag(etag)
return response | [
"def",
"get",
"(",
"self",
",",
"community_id",
")",
":",
"community",
"=",
"Community",
".",
"get",
"(",
"community_id",
")",
"if",
"not",
"community",
":",
"abort",
"(",
"404",
")",
"etag",
"=",
"community",
".",
"version_id",
"self",
".",
"check_etag",
"(",
"etag",
")",
"response",
"=",
"self",
".",
"make_response",
"(",
"community",
",",
"links_item_factory",
"=",
"default_links_item_factory",
")",
"response",
".",
"set_etag",
"(",
"etag",
")",
"return",
"response"
] | 36.209302 | 12.046512 |
def get_schema_input_version(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_schema = ET.Element("get_schema")
config = get_schema
input = ET.SubElement(get_schema, "input")
version = ET.SubElement(input, "version")
version.text = kwargs.pop('version')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"get_schema_input_version",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"get_schema",
"=",
"ET",
".",
"Element",
"(",
"\"get_schema\"",
")",
"config",
"=",
"get_schema",
"input",
"=",
"ET",
".",
"SubElement",
"(",
"get_schema",
",",
"\"input\"",
")",
"version",
"=",
"ET",
".",
"SubElement",
"(",
"input",
",",
"\"version\"",
")",
"version",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'version'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | 35.5 | 9.583333 |
def get_all(symbol):
"""
Get all available quote data for the given ticker symbol.
Returns a dictionary.
"""
ids = \
'ydb2r1b3qpoc1d1cd2c6t1k2p2c8m5c3m6gm7hm8k1m3lm4l1t8w1g1w4g3p' \
'1g4mg5m2g6kvjj1j5j3k4f6j6nk5n4ws1xj2va5b6k3t7a2t615l2el3e7v1' \
'e8v7e9s6b4j4p5p6rr2r5r6r7s7'
values = _request(symbol, ids).split(',')
return dict(
dividend_yield=values[0],
dividend_per_share=values[1],
ask_realtime=values[2],
dividend_pay_date=values[3],
bid_realtime=values[4],
ex_dividend_date=values[5],
previous_close=values[6],
today_open=values[7],
change=values[8],
last_trade_date=values[9],
change_percent_change=values[10],
trade_date=values[11],
change_realtime=values[12],
last_trade_time=values[13],
change_percent_realtime=values[14],
change_percent=values[15],
after_hours_change_realtime=values[16],
change_200_sma=values[17],
todays_low=values[18],
change_50_sma=values[19],
todays_high=values[20],
percent_change_50_sma=values[21],
last_trade_realtime_time=values[22],
fifty_sma=values[23],
last_trade_time_plus=values[24],
twohundred_sma=values[25],
last_trade_price=values[26],
one_year_target=values[27],
todays_value_change=values[28],
holdings_gain_percent=values[29],
todays_value_change_realtime=values[30],
annualized_gain=values[31],
price_paid=values[32],
holdings_gain=values[33],
todays_range=values[34],
holdings_gain_percent_realtime=values[35],
todays_range_realtime=values[36],
holdings_gain_realtime=values[37],
fiftytwo_week_high=values[38],
more_info=values[39],
fiftytwo_week_low=values[40],
market_cap=values[41],
change_from_52_week_low=values[42],
market_cap_realtime=values[43],
change_from_52_week_high=values[44],
float_shares=values[45],
percent_change_from_52_week_low=values[46],
company_name=values[47],
percent_change_from_52_week_high=values[48],
notes=values[49],
fiftytwo_week_range=values[50],
shares_owned=values[51],
stock_exchange=values[52],
shares_outstanding=values[53],
volume=values[54],
ask_size=values[55],
bid_size=values[56],
last_trade_size=values[57],
ticker_trend=values[58],
average_daily_volume=values[59],
trade_links=values[60],
order_book_realtime=values[61],
high_limit=values[62],
eps=values[63],
low_limit=values[64],
eps_estimate_current_year=values[65],
holdings_value=values[66],
eps_estimate_next_year=values[67],
holdings_value_realtime=values[68],
eps_estimate_next_quarter=values[69],
revenue=values[70],
book_value=values[71],
ebitda=values[72],
price_sales=values[73],
price_book=values[74],
pe=values[75],
pe_realtime=values[76],
peg=values[77],
price_eps_estimate_current_year=values[78],
price_eps_estimate_next_year=values[79],
short_ratio=values[80],
) | [
"def",
"get_all",
"(",
"symbol",
")",
":",
"ids",
"=",
"'ydb2r1b3qpoc1d1cd2c6t1k2p2c8m5c3m6gm7hm8k1m3lm4l1t8w1g1w4g3p'",
"'1g4mg5m2g6kvjj1j5j3k4f6j6nk5n4ws1xj2va5b6k3t7a2t615l2el3e7v1'",
"'e8v7e9s6b4j4p5p6rr2r5r6r7s7'",
"values",
"=",
"_request",
"(",
"symbol",
",",
"ids",
")",
".",
"split",
"(",
"','",
")",
"return",
"dict",
"(",
"dividend_yield",
"=",
"values",
"[",
"0",
"]",
",",
"dividend_per_share",
"=",
"values",
"[",
"1",
"]",
",",
"ask_realtime",
"=",
"values",
"[",
"2",
"]",
",",
"dividend_pay_date",
"=",
"values",
"[",
"3",
"]",
",",
"bid_realtime",
"=",
"values",
"[",
"4",
"]",
",",
"ex_dividend_date",
"=",
"values",
"[",
"5",
"]",
",",
"previous_close",
"=",
"values",
"[",
"6",
"]",
",",
"today_open",
"=",
"values",
"[",
"7",
"]",
",",
"change",
"=",
"values",
"[",
"8",
"]",
",",
"last_trade_date",
"=",
"values",
"[",
"9",
"]",
",",
"change_percent_change",
"=",
"values",
"[",
"10",
"]",
",",
"trade_date",
"=",
"values",
"[",
"11",
"]",
",",
"change_realtime",
"=",
"values",
"[",
"12",
"]",
",",
"last_trade_time",
"=",
"values",
"[",
"13",
"]",
",",
"change_percent_realtime",
"=",
"values",
"[",
"14",
"]",
",",
"change_percent",
"=",
"values",
"[",
"15",
"]",
",",
"after_hours_change_realtime",
"=",
"values",
"[",
"16",
"]",
",",
"change_200_sma",
"=",
"values",
"[",
"17",
"]",
",",
"todays_low",
"=",
"values",
"[",
"18",
"]",
",",
"change_50_sma",
"=",
"values",
"[",
"19",
"]",
",",
"todays_high",
"=",
"values",
"[",
"20",
"]",
",",
"percent_change_50_sma",
"=",
"values",
"[",
"21",
"]",
",",
"last_trade_realtime_time",
"=",
"values",
"[",
"22",
"]",
",",
"fifty_sma",
"=",
"values",
"[",
"23",
"]",
",",
"last_trade_time_plus",
"=",
"values",
"[",
"24",
"]",
",",
"twohundred_sma",
"=",
"values",
"[",
"25",
"]",
",",
"last_trade_price",
"=",
"values",
"[",
"26",
"]",
",",
"one_year_target",
"=",
"values",
"[",
"27",
"]",
",",
"todays_value_change",
"=",
"values",
"[",
"28",
"]",
",",
"holdings_gain_percent",
"=",
"values",
"[",
"29",
"]",
",",
"todays_value_change_realtime",
"=",
"values",
"[",
"30",
"]",
",",
"annualized_gain",
"=",
"values",
"[",
"31",
"]",
",",
"price_paid",
"=",
"values",
"[",
"32",
"]",
",",
"holdings_gain",
"=",
"values",
"[",
"33",
"]",
",",
"todays_range",
"=",
"values",
"[",
"34",
"]",
",",
"holdings_gain_percent_realtime",
"=",
"values",
"[",
"35",
"]",
",",
"todays_range_realtime",
"=",
"values",
"[",
"36",
"]",
",",
"holdings_gain_realtime",
"=",
"values",
"[",
"37",
"]",
",",
"fiftytwo_week_high",
"=",
"values",
"[",
"38",
"]",
",",
"more_info",
"=",
"values",
"[",
"39",
"]",
",",
"fiftytwo_week_low",
"=",
"values",
"[",
"40",
"]",
",",
"market_cap",
"=",
"values",
"[",
"41",
"]",
",",
"change_from_52_week_low",
"=",
"values",
"[",
"42",
"]",
",",
"market_cap_realtime",
"=",
"values",
"[",
"43",
"]",
",",
"change_from_52_week_high",
"=",
"values",
"[",
"44",
"]",
",",
"float_shares",
"=",
"values",
"[",
"45",
"]",
",",
"percent_change_from_52_week_low",
"=",
"values",
"[",
"46",
"]",
",",
"company_name",
"=",
"values",
"[",
"47",
"]",
",",
"percent_change_from_52_week_high",
"=",
"values",
"[",
"48",
"]",
",",
"notes",
"=",
"values",
"[",
"49",
"]",
",",
"fiftytwo_week_range",
"=",
"values",
"[",
"50",
"]",
",",
"shares_owned",
"=",
"values",
"[",
"51",
"]",
",",
"stock_exchange",
"=",
"values",
"[",
"52",
"]",
",",
"shares_outstanding",
"=",
"values",
"[",
"53",
"]",
",",
"volume",
"=",
"values",
"[",
"54",
"]",
",",
"ask_size",
"=",
"values",
"[",
"55",
"]",
",",
"bid_size",
"=",
"values",
"[",
"56",
"]",
",",
"last_trade_size",
"=",
"values",
"[",
"57",
"]",
",",
"ticker_trend",
"=",
"values",
"[",
"58",
"]",
",",
"average_daily_volume",
"=",
"values",
"[",
"59",
"]",
",",
"trade_links",
"=",
"values",
"[",
"60",
"]",
",",
"order_book_realtime",
"=",
"values",
"[",
"61",
"]",
",",
"high_limit",
"=",
"values",
"[",
"62",
"]",
",",
"eps",
"=",
"values",
"[",
"63",
"]",
",",
"low_limit",
"=",
"values",
"[",
"64",
"]",
",",
"eps_estimate_current_year",
"=",
"values",
"[",
"65",
"]",
",",
"holdings_value",
"=",
"values",
"[",
"66",
"]",
",",
"eps_estimate_next_year",
"=",
"values",
"[",
"67",
"]",
",",
"holdings_value_realtime",
"=",
"values",
"[",
"68",
"]",
",",
"eps_estimate_next_quarter",
"=",
"values",
"[",
"69",
"]",
",",
"revenue",
"=",
"values",
"[",
"70",
"]",
",",
"book_value",
"=",
"values",
"[",
"71",
"]",
",",
"ebitda",
"=",
"values",
"[",
"72",
"]",
",",
"price_sales",
"=",
"values",
"[",
"73",
"]",
",",
"price_book",
"=",
"values",
"[",
"74",
"]",
",",
"pe",
"=",
"values",
"[",
"75",
"]",
",",
"pe_realtime",
"=",
"values",
"[",
"76",
"]",
",",
"peg",
"=",
"values",
"[",
"77",
"]",
",",
"price_eps_estimate_current_year",
"=",
"values",
"[",
"78",
"]",
",",
"price_eps_estimate_next_year",
"=",
"values",
"[",
"79",
"]",
",",
"short_ratio",
"=",
"values",
"[",
"80",
"]",
",",
")"
] | 34.468085 | 8.914894 |
def is_closed(self) -> Optional[bool]:
"""For Magnet Sensor; True if Closed, False if Open."""
if self._device_type is not None and self._device_type == DeviceType.DoorMagnet:
return bool(self._current_status & 0x01)
return None | [
"def",
"is_closed",
"(",
"self",
")",
"->",
"Optional",
"[",
"bool",
"]",
":",
"if",
"self",
".",
"_device_type",
"is",
"not",
"None",
"and",
"self",
".",
"_device_type",
"==",
"DeviceType",
".",
"DoorMagnet",
":",
"return",
"bool",
"(",
"self",
".",
"_current_status",
"&",
"0x01",
")",
"return",
"None"
] | 52 | 16.6 |
def _create_skeleton_3(pc, l, num_section):
"""
Bottom level: {"measurement": [], "model": [{summary, distributions, ensemble}]}
Fill in measurement and model tables with N number of EMPTY meas, summary, ensemble, and distributions.
:param str pc: Paleo or Chron "mode"
:param list l:
:param dict num_section:
:return dict:
"""
logger_excel.info("enter create_skeleton_inner_2")
# Table Template: Model
template_model = {"summaryTable": {}, "ensembleTable": {}, "distributionTable": []}
# Build string appropriate for paleo/chron mode
pc_meas = "{}MeasurementTable".format(pc)
pc_mod = "{}Model".format(pc)
# Loop for each table count
for idx1, table in num_section.items():
try:
# Create N number of empty measurement lists
l[idx1 - 1][pc_meas] = [None] * num_section[idx1]["ct_meas"]
# Create N number of empty model table lists
l[idx1 - 1][pc_mod] = [copy.deepcopy(template_model)] * num_section[idx1]["ct_model"]
# Create N number of empty model tables at list index
#
for idx2, nums in table["ct_in_model"].items():
dists = []
try:
# Create N number of empty distributions at list index
[dists.append({}) for i in range(0, nums["ct_dist"])]
except IndexError as e:
logger_excel.debug("excel: create_metadata_skeleton: paleo tables messed up, {}".format(e))
# Model template complete, insert it at list index
l[idx1 - 1][pc_mod][idx2-1] = {"summaryTable": {}, "ensembleTable": {}, "distributionTable": dists}
except IndexError as e:
logger_excel.warn("create_skeleton_inner_tables: IndexError: {}".format(e))
except KeyError as e:
logger_excel.warn("create_skeleton_inner_tables: KeyError: {}".format(e))
return l | [
"def",
"_create_skeleton_3",
"(",
"pc",
",",
"l",
",",
"num_section",
")",
":",
"logger_excel",
".",
"info",
"(",
"\"enter create_skeleton_inner_2\"",
")",
"# Table Template: Model",
"template_model",
"=",
"{",
"\"summaryTable\"",
":",
"{",
"}",
",",
"\"ensembleTable\"",
":",
"{",
"}",
",",
"\"distributionTable\"",
":",
"[",
"]",
"}",
"# Build string appropriate for paleo/chron mode",
"pc_meas",
"=",
"\"{}MeasurementTable\"",
".",
"format",
"(",
"pc",
")",
"pc_mod",
"=",
"\"{}Model\"",
".",
"format",
"(",
"pc",
")",
"# Loop for each table count",
"for",
"idx1",
",",
"table",
"in",
"num_section",
".",
"items",
"(",
")",
":",
"try",
":",
"# Create N number of empty measurement lists",
"l",
"[",
"idx1",
"-",
"1",
"]",
"[",
"pc_meas",
"]",
"=",
"[",
"None",
"]",
"*",
"num_section",
"[",
"idx1",
"]",
"[",
"\"ct_meas\"",
"]",
"# Create N number of empty model table lists",
"l",
"[",
"idx1",
"-",
"1",
"]",
"[",
"pc_mod",
"]",
"=",
"[",
"copy",
".",
"deepcopy",
"(",
"template_model",
")",
"]",
"*",
"num_section",
"[",
"idx1",
"]",
"[",
"\"ct_model\"",
"]",
"# Create N number of empty model tables at list index",
"#",
"for",
"idx2",
",",
"nums",
"in",
"table",
"[",
"\"ct_in_model\"",
"]",
".",
"items",
"(",
")",
":",
"dists",
"=",
"[",
"]",
"try",
":",
"# Create N number of empty distributions at list index",
"[",
"dists",
".",
"append",
"(",
"{",
"}",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"nums",
"[",
"\"ct_dist\"",
"]",
")",
"]",
"except",
"IndexError",
"as",
"e",
":",
"logger_excel",
".",
"debug",
"(",
"\"excel: create_metadata_skeleton: paleo tables messed up, {}\"",
".",
"format",
"(",
"e",
")",
")",
"# Model template complete, insert it at list index",
"l",
"[",
"idx1",
"-",
"1",
"]",
"[",
"pc_mod",
"]",
"[",
"idx2",
"-",
"1",
"]",
"=",
"{",
"\"summaryTable\"",
":",
"{",
"}",
",",
"\"ensembleTable\"",
":",
"{",
"}",
",",
"\"distributionTable\"",
":",
"dists",
"}",
"except",
"IndexError",
"as",
"e",
":",
"logger_excel",
".",
"warn",
"(",
"\"create_skeleton_inner_tables: IndexError: {}\"",
".",
"format",
"(",
"e",
")",
")",
"except",
"KeyError",
"as",
"e",
":",
"logger_excel",
".",
"warn",
"(",
"\"create_skeleton_inner_tables: KeyError: {}\"",
".",
"format",
"(",
"e",
")",
")",
"return",
"l"
] | 43.636364 | 26.227273 |
def emg_tkeo(emg):
"""
Calculates the Teager–Kaiser Energy operator.
Parameters
----------
emg : array
raw EMG signal.
Returns
-------
tkeo : 1D array_like
signal processed by the Teager–Kaiser Energy operator.
Notes
-----
*Authors*
- Marcos Duarte
*See Also*
See this notebook [1]_.
References
----------
.. [1] https://github.com/demotu/BMC/blob/master/notebooks/Electromyography.ipynb
"""
emg = np.asarray(emg)
tkeo = np.copy(emg)
# Teager–Kaiser Energy operator
tkeo[1:-1] = emg[1:-1]*emg[1:-1] - emg[:-2]*emg[2:]
# correct the data in the extremities
tkeo[0], tkeo[-1] = tkeo[1], tkeo[-2]
return(tkeo) | [
"def",
"emg_tkeo",
"(",
"emg",
")",
":",
"emg",
"=",
"np",
".",
"asarray",
"(",
"emg",
")",
"tkeo",
"=",
"np",
".",
"copy",
"(",
"emg",
")",
"# Teager–Kaiser Energy operator",
"tkeo",
"[",
"1",
":",
"-",
"1",
"]",
"=",
"emg",
"[",
"1",
":",
"-",
"1",
"]",
"*",
"emg",
"[",
"1",
":",
"-",
"1",
"]",
"-",
"emg",
"[",
":",
"-",
"2",
"]",
"*",
"emg",
"[",
"2",
":",
"]",
"# correct the data in the extremities",
"tkeo",
"[",
"0",
"]",
",",
"tkeo",
"[",
"-",
"1",
"]",
"=",
"tkeo",
"[",
"1",
"]",
",",
"tkeo",
"[",
"-",
"2",
"]",
"return",
"(",
"tkeo",
")"
] | 17.375 | 25.625 |
def OnActivateCard(self, card):
"""Called when a card is activated by double-clicking
on the card or reader tree control or toolbar.
In this sample, we just connect to the card on the first activation."""
SimpleSCardAppEventObserver.OnActivateCard(self, card)
self.feedbacktext.SetLabel('Activated card: ' + repr(card))
self.transmitbutton.Enable() | [
"def",
"OnActivateCard",
"(",
"self",
",",
"card",
")",
":",
"SimpleSCardAppEventObserver",
".",
"OnActivateCard",
"(",
"self",
",",
"card",
")",
"self",
".",
"feedbacktext",
".",
"SetLabel",
"(",
"'Activated card: '",
"+",
"repr",
"(",
"card",
")",
")",
"self",
".",
"transmitbutton",
".",
"Enable",
"(",
")"
] | 55.714286 | 10.857143 |
def predict_topk(self, dataset, output_type="probability", k=3, batch_size=64):
"""
Return top-k predictions for the ``dataset``, using the trained model.
Predictions are returned as an SFrame with three columns: `id`,
`class`, and `probability`, `margin`, or `rank`, depending on the ``output_type``
parameter. Input dataset size must be the same as for training of the model.
Parameters
----------
dataset : SFrame | SArray | turicreate.Image
Images to be classified.
If dataset is an SFrame, it must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
output_type : {'probability', 'rank', 'margin'}, optional
Choose the return type of the prediction:
- `probability`: Probability associated with each label in the prediction.
- `rank` : Rank associated with each label in the prediction.
- `margin` : Margin associated with each label in the prediction.
k : int, optional
Number of classes to return for each input example.
Returns
-------
out : SFrame
An SFrame with model predictions.
See Also
--------
predict, classify, evaluate
Examples
--------
>>> pred = m.predict_topk(validation_data, k=3)
>>> pred
+----+-------+-------------------+
| id | class | probability |
+----+-------+-------------------+
| 0 | 4 | 0.995623886585 |
| 0 | 9 | 0.0038311756216 |
| 0 | 7 | 0.000301006948575 |
| 1 | 1 | 0.928708016872 |
| 1 | 3 | 0.0440889261663 |
| 1 | 2 | 0.0176190119237 |
| 2 | 3 | 0.996967732906 |
| 2 | 2 | 0.00151345680933 |
| 2 | 7 | 0.000637513934635 |
| 3 | 1 | 0.998070061207 |
| .. | ... | ... |
+----+-------+-------------------+
[35688 rows x 3 columns]
"""
if not isinstance(dataset, (_tc.SFrame, _tc.SArray, _tc.Image)):
raise TypeError('dataset must be either an SFrame, SArray or turicreate.Image')
if(batch_size < 1):
raise ValueError("'batch_size' must be greater than or equal to 1")
dataset, _ = self._canonize_input(dataset)
extracted_features = self._extract_features(dataset)
return self.classifier.predict_topk(extracted_features, output_type = output_type, k = k) | [
"def",
"predict_topk",
"(",
"self",
",",
"dataset",
",",
"output_type",
"=",
"\"probability\"",
",",
"k",
"=",
"3",
",",
"batch_size",
"=",
"64",
")",
":",
"if",
"not",
"isinstance",
"(",
"dataset",
",",
"(",
"_tc",
".",
"SFrame",
",",
"_tc",
".",
"SArray",
",",
"_tc",
".",
"Image",
")",
")",
":",
"raise",
"TypeError",
"(",
"'dataset must be either an SFrame, SArray or turicreate.Image'",
")",
"if",
"(",
"batch_size",
"<",
"1",
")",
":",
"raise",
"ValueError",
"(",
"\"'batch_size' must be greater than or equal to 1\"",
")",
"dataset",
",",
"_",
"=",
"self",
".",
"_canonize_input",
"(",
"dataset",
")",
"extracted_features",
"=",
"self",
".",
"_extract_features",
"(",
"dataset",
")",
"return",
"self",
".",
"classifier",
".",
"predict_topk",
"(",
"extracted_features",
",",
"output_type",
"=",
"output_type",
",",
"k",
"=",
"k",
")"
] | 40.875 | 21.75 |
def backup(host=None, core_name=None, append_core_to_path=False):
'''
Tell solr make a backup. This method can be mis-leading since it uses the
backup API. If an error happens during the backup you are not notified.
The status: 'OK' in the response simply means that solr received the
request successfully.
host : str (None)
The solr host to query. __opts__['host'] is default.
core_name : str (None)
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
append_core_to_path : boolean (False)
If True add the name of the core to the backup path. Assumes that
minion backup path is not None.
Return : dict<str,obj>::
{'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
CLI Example:
.. code-block:: bash
salt '*' solr.backup music
'''
path = __opts__['solr.backup_path']
num_backups = __opts__['solr.num_backups']
if path is not None:
if not path.endswith(os.path.sep):
path += os.path.sep
ret = _get_return_dict()
if _get_none_or_value(core_name) is None and _check_for_cores():
success = True
for name in __opts__['solr.cores']:
params = []
if path is not None:
path = path + name if append_core_to_path else path
params.append("&location={0}".format(path + name))
params.append("&numberToKeep={0}".format(num_backups))
resp = _replication_request('backup', host=host, core_name=name,
params=params)
if not resp['success']:
success = False
data = {name: {'data': resp['data']}}
ret = _update_return_dict(ret, success, data,
resp['errors'], resp['warnings'])
return ret
else:
if core_name is not None and path is not None:
if append_core_to_path:
path += core_name
if path is not None:
params = ["location={0}".format(path)]
params.append("&numberToKeep={0}".format(num_backups))
resp = _replication_request('backup', host=host, core_name=core_name,
params=params)
return resp | [
"def",
"backup",
"(",
"host",
"=",
"None",
",",
"core_name",
"=",
"None",
",",
"append_core_to_path",
"=",
"False",
")",
":",
"path",
"=",
"__opts__",
"[",
"'solr.backup_path'",
"]",
"num_backups",
"=",
"__opts__",
"[",
"'solr.num_backups'",
"]",
"if",
"path",
"is",
"not",
"None",
":",
"if",
"not",
"path",
".",
"endswith",
"(",
"os",
".",
"path",
".",
"sep",
")",
":",
"path",
"+=",
"os",
".",
"path",
".",
"sep",
"ret",
"=",
"_get_return_dict",
"(",
")",
"if",
"_get_none_or_value",
"(",
"core_name",
")",
"is",
"None",
"and",
"_check_for_cores",
"(",
")",
":",
"success",
"=",
"True",
"for",
"name",
"in",
"__opts__",
"[",
"'solr.cores'",
"]",
":",
"params",
"=",
"[",
"]",
"if",
"path",
"is",
"not",
"None",
":",
"path",
"=",
"path",
"+",
"name",
"if",
"append_core_to_path",
"else",
"path",
"params",
".",
"append",
"(",
"\"&location={0}\"",
".",
"format",
"(",
"path",
"+",
"name",
")",
")",
"params",
".",
"append",
"(",
"\"&numberToKeep={0}\"",
".",
"format",
"(",
"num_backups",
")",
")",
"resp",
"=",
"_replication_request",
"(",
"'backup'",
",",
"host",
"=",
"host",
",",
"core_name",
"=",
"name",
",",
"params",
"=",
"params",
")",
"if",
"not",
"resp",
"[",
"'success'",
"]",
":",
"success",
"=",
"False",
"data",
"=",
"{",
"name",
":",
"{",
"'data'",
":",
"resp",
"[",
"'data'",
"]",
"}",
"}",
"ret",
"=",
"_update_return_dict",
"(",
"ret",
",",
"success",
",",
"data",
",",
"resp",
"[",
"'errors'",
"]",
",",
"resp",
"[",
"'warnings'",
"]",
")",
"return",
"ret",
"else",
":",
"if",
"core_name",
"is",
"not",
"None",
"and",
"path",
"is",
"not",
"None",
":",
"if",
"append_core_to_path",
":",
"path",
"+=",
"core_name",
"if",
"path",
"is",
"not",
"None",
":",
"params",
"=",
"[",
"\"location={0}\"",
".",
"format",
"(",
"path",
")",
"]",
"params",
".",
"append",
"(",
"\"&numberToKeep={0}\"",
".",
"format",
"(",
"num_backups",
")",
")",
"resp",
"=",
"_replication_request",
"(",
"'backup'",
",",
"host",
"=",
"host",
",",
"core_name",
"=",
"core_name",
",",
"params",
"=",
"params",
")",
"return",
"resp"
] | 39.084746 | 20.983051 |
def plot_zed(ZED, datablock, angle, s, units):
"""
function to make equal area plot and zijderveld plot
Parameters
_________
ZED : dictionary with keys for plots
eqarea : figure number for equal area projection
zijd : figure number for zijderveld plot
demag : figure number for magnetization against demag step
datablock : nested list of [step, dec, inc, M (Am2), quality]
step : units assumed in SI
M : units assumed Am2
quality : [g,b], good or bad measurement; if bad will be marked as such
angle : angle for X axis in horizontal plane, if 0, x will be 0 declination
s : specimen name
units : SI units ['K','T','U'] for kelvin, tesla or undefined
Effects
_______
calls plotting functions for equal area, zijderveld and demag figures
"""
for fignum in list(ZED.keys()):
fig = plt.figure(num=ZED[fignum])
plt.clf()
if not isServer:
plt.figtext(.02, .01, version_num)
DIbad, DIgood = [], []
for rec in datablock:
if cb.is_null(rec[1],zero_as_null=False):
print('-W- You are missing a declination for specimen', s, ', skipping this row')
continue
if cb.is_null(rec[2],zero_as_null=False):
print('-W- You are missing an inclination for specimen', s, ', skipping this row')
continue
if rec[5] == 'b':
DIbad.append((rec[1], rec[2]))
else:
DIgood.append((rec[1], rec[2]))
badsym = {'lower': ['+', 'g'], 'upper': ['x', 'c']}
if len(DIgood) > 0:
plot_eq(ZED['eqarea'], DIgood, s)
if len(DIbad) > 0:
plot_di_sym(ZED['eqarea'], DIbad, badsym)
elif len(DIbad) > 0:
plot_eq_sym(ZED['eqarea'], DIbad, s, badsym)
AngleX, AngleY = [], []
XY = pmag.dimap(angle, 90.)
AngleX.append(XY[0])
AngleY.append(XY[1])
XY = pmag.dimap(angle, 0.)
AngleX.append(XY[0])
AngleY.append(XY[1])
plt.figure(num=ZED['eqarea'])
# Draw a line for Zijderveld horizontal axis
plt.plot(AngleX, AngleY, 'r-')
if AngleX[-1] == 0:
AngleX[-1] = 0.01
plt.text(AngleX[-1] + (old_div(AngleX[-1], abs(AngleX[-1]))) * .1,
AngleY[-1] + (old_div(AngleY[-1], abs(AngleY[-1]))) * .1, 'X')
norm = 1
#if units=="U": norm=0
# if there are NO good points, don't try to plot
if DIgood:
plot_mag(ZED['demag'], datablock, s, 1, units, norm)
plot_zij(ZED['zijd'], datablock, angle, s, norm)
else:
ZED.pop('demag')
ZED.pop('zijd')
return ZED | [
"def",
"plot_zed",
"(",
"ZED",
",",
"datablock",
",",
"angle",
",",
"s",
",",
"units",
")",
":",
"for",
"fignum",
"in",
"list",
"(",
"ZED",
".",
"keys",
"(",
")",
")",
":",
"fig",
"=",
"plt",
".",
"figure",
"(",
"num",
"=",
"ZED",
"[",
"fignum",
"]",
")",
"plt",
".",
"clf",
"(",
")",
"if",
"not",
"isServer",
":",
"plt",
".",
"figtext",
"(",
".02",
",",
".01",
",",
"version_num",
")",
"DIbad",
",",
"DIgood",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"rec",
"in",
"datablock",
":",
"if",
"cb",
".",
"is_null",
"(",
"rec",
"[",
"1",
"]",
",",
"zero_as_null",
"=",
"False",
")",
":",
"print",
"(",
"'-W- You are missing a declination for specimen'",
",",
"s",
",",
"', skipping this row'",
")",
"continue",
"if",
"cb",
".",
"is_null",
"(",
"rec",
"[",
"2",
"]",
",",
"zero_as_null",
"=",
"False",
")",
":",
"print",
"(",
"'-W- You are missing an inclination for specimen'",
",",
"s",
",",
"', skipping this row'",
")",
"continue",
"if",
"rec",
"[",
"5",
"]",
"==",
"'b'",
":",
"DIbad",
".",
"append",
"(",
"(",
"rec",
"[",
"1",
"]",
",",
"rec",
"[",
"2",
"]",
")",
")",
"else",
":",
"DIgood",
".",
"append",
"(",
"(",
"rec",
"[",
"1",
"]",
",",
"rec",
"[",
"2",
"]",
")",
")",
"badsym",
"=",
"{",
"'lower'",
":",
"[",
"'+'",
",",
"'g'",
"]",
",",
"'upper'",
":",
"[",
"'x'",
",",
"'c'",
"]",
"}",
"if",
"len",
"(",
"DIgood",
")",
">",
"0",
":",
"plot_eq",
"(",
"ZED",
"[",
"'eqarea'",
"]",
",",
"DIgood",
",",
"s",
")",
"if",
"len",
"(",
"DIbad",
")",
">",
"0",
":",
"plot_di_sym",
"(",
"ZED",
"[",
"'eqarea'",
"]",
",",
"DIbad",
",",
"badsym",
")",
"elif",
"len",
"(",
"DIbad",
")",
">",
"0",
":",
"plot_eq_sym",
"(",
"ZED",
"[",
"'eqarea'",
"]",
",",
"DIbad",
",",
"s",
",",
"badsym",
")",
"AngleX",
",",
"AngleY",
"=",
"[",
"]",
",",
"[",
"]",
"XY",
"=",
"pmag",
".",
"dimap",
"(",
"angle",
",",
"90.",
")",
"AngleX",
".",
"append",
"(",
"XY",
"[",
"0",
"]",
")",
"AngleY",
".",
"append",
"(",
"XY",
"[",
"1",
"]",
")",
"XY",
"=",
"pmag",
".",
"dimap",
"(",
"angle",
",",
"0.",
")",
"AngleX",
".",
"append",
"(",
"XY",
"[",
"0",
"]",
")",
"AngleY",
".",
"append",
"(",
"XY",
"[",
"1",
"]",
")",
"plt",
".",
"figure",
"(",
"num",
"=",
"ZED",
"[",
"'eqarea'",
"]",
")",
"# Draw a line for Zijderveld horizontal axis",
"plt",
".",
"plot",
"(",
"AngleX",
",",
"AngleY",
",",
"'r-'",
")",
"if",
"AngleX",
"[",
"-",
"1",
"]",
"==",
"0",
":",
"AngleX",
"[",
"-",
"1",
"]",
"=",
"0.01",
"plt",
".",
"text",
"(",
"AngleX",
"[",
"-",
"1",
"]",
"+",
"(",
"old_div",
"(",
"AngleX",
"[",
"-",
"1",
"]",
",",
"abs",
"(",
"AngleX",
"[",
"-",
"1",
"]",
")",
")",
")",
"*",
".1",
",",
"AngleY",
"[",
"-",
"1",
"]",
"+",
"(",
"old_div",
"(",
"AngleY",
"[",
"-",
"1",
"]",
",",
"abs",
"(",
"AngleY",
"[",
"-",
"1",
"]",
")",
")",
")",
"*",
".1",
",",
"'X'",
")",
"norm",
"=",
"1",
"#if units==\"U\": norm=0",
"# if there are NO good points, don't try to plot",
"if",
"DIgood",
":",
"plot_mag",
"(",
"ZED",
"[",
"'demag'",
"]",
",",
"datablock",
",",
"s",
",",
"1",
",",
"units",
",",
"norm",
")",
"plot_zij",
"(",
"ZED",
"[",
"'zijd'",
"]",
",",
"datablock",
",",
"angle",
",",
"s",
",",
"norm",
")",
"else",
":",
"ZED",
".",
"pop",
"(",
"'demag'",
")",
"ZED",
".",
"pop",
"(",
"'zijd'",
")",
"return",
"ZED"
] | 35.957746 | 18.43662 |
def _addSexSpecificity(self, subject_id, sex):
"""
Add sex specificity to a subject (eg association node)
In our modeling we use this to add a qualifier to a triple
for example, this genotype to phenotype association
is specific to this sex (see MGI, IMPC)
This expects the client to define the ontology term
for sex (eg PATO)
Note this class is probably not the right place for this
method, but putting here until a better home is found
:param subject_id:
:param sex:
:return:
"""
self.graph.addTriple(subject_id, self.globaltt['has_sex_specificty'], sex) | [
"def",
"_addSexSpecificity",
"(",
"self",
",",
"subject_id",
",",
"sex",
")",
":",
"self",
".",
"graph",
".",
"addTriple",
"(",
"subject_id",
",",
"self",
".",
"globaltt",
"[",
"'has_sex_specificty'",
"]",
",",
"sex",
")"
] | 36.333333 | 21.111111 |
def execute(self, *args, **kwargs):
"""
Initializes and runs the tool.
This is shorhand to parse command line arguments, then calling:
self.setup(parsed_arguments)
self.process()
"""
args = self.parser.parse_args(*args, **kwargs)
self.process(args) | [
"def",
"execute",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"args",
"=",
"self",
".",
"parser",
".",
"parse_args",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"process",
"(",
"args",
")"
] | 31.2 | 12 |
def array(source_array, ctx=None, dtype=None):
"""Creates an array from any object exposing the array interface.
Parameters
----------
source_array : array_like
An object exposing the array interface, an object whose `__array__`
method returns an array, or any (nested) sequence.
ctx : Context, optional
Device context (default is the current default context).
dtype : str or numpy.dtype, optional
The data type of the output array. The default dtype is ``source_array.dtype``
if `source_array` is an `NDArray`, `float32` otherwise.
Returns
-------
NDArray
An `NDArray` with the same contents as the `source_array`.
"""
if isinstance(source_array, NDArray):
dtype = source_array.dtype if dtype is None else dtype
else:
dtype = mx_real_t if dtype is None else dtype
if not isinstance(source_array, np.ndarray):
try:
source_array = np.array(source_array, dtype=dtype)
except:
raise TypeError('source_array must be array like object')
arr = empty(source_array.shape, ctx, dtype)
arr[:] = source_array
return arr | [
"def",
"array",
"(",
"source_array",
",",
"ctx",
"=",
"None",
",",
"dtype",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"source_array",
",",
"NDArray",
")",
":",
"dtype",
"=",
"source_array",
".",
"dtype",
"if",
"dtype",
"is",
"None",
"else",
"dtype",
"else",
":",
"dtype",
"=",
"mx_real_t",
"if",
"dtype",
"is",
"None",
"else",
"dtype",
"if",
"not",
"isinstance",
"(",
"source_array",
",",
"np",
".",
"ndarray",
")",
":",
"try",
":",
"source_array",
"=",
"np",
".",
"array",
"(",
"source_array",
",",
"dtype",
"=",
"dtype",
")",
"except",
":",
"raise",
"TypeError",
"(",
"'source_array must be array like object'",
")",
"arr",
"=",
"empty",
"(",
"source_array",
".",
"shape",
",",
"ctx",
",",
"dtype",
")",
"arr",
"[",
":",
"]",
"=",
"source_array",
"return",
"arr"
] | 37.677419 | 21.032258 |
def great_circle_dist(lat1, lon1, lat2, lon2):
"""
Get the distance (in meters) between two lat/lon points
via the Haversine formula.
Parameters
----------
lat1, lon1, lat2, lon2 : float
Latitude and longitude in degrees.
Returns
-------
dist : float
Distance in meters.
"""
radius = 6372795 # meters
lat1 = math.radians(lat1)
lon1 = math.radians(lon1)
lat2 = math.radians(lat2)
lon2 = math.radians(lon2)
dlat = lat2 - lat1
dlon = lon2 - lon1
# formula from:
# http://en.wikipedia.org/wiki/Haversine_formula#The_haversine_formula
a = math.pow(math.sin(dlat / 2), 2)
b = math.cos(lat1) * math.cos(lat2) * math.pow(math.sin(dlon / 2), 2)
d = 2 * radius * math.asin(math.sqrt(a + b))
return d | [
"def",
"great_circle_dist",
"(",
"lat1",
",",
"lon1",
",",
"lat2",
",",
"lon2",
")",
":",
"radius",
"=",
"6372795",
"# meters",
"lat1",
"=",
"math",
".",
"radians",
"(",
"lat1",
")",
"lon1",
"=",
"math",
".",
"radians",
"(",
"lon1",
")",
"lat2",
"=",
"math",
".",
"radians",
"(",
"lat2",
")",
"lon2",
"=",
"math",
".",
"radians",
"(",
"lon2",
")",
"dlat",
"=",
"lat2",
"-",
"lat1",
"dlon",
"=",
"lon2",
"-",
"lon1",
"# formula from:",
"# http://en.wikipedia.org/wiki/Haversine_formula#The_haversine_formula",
"a",
"=",
"math",
".",
"pow",
"(",
"math",
".",
"sin",
"(",
"dlat",
"/",
"2",
")",
",",
"2",
")",
"b",
"=",
"math",
".",
"cos",
"(",
"lat1",
")",
"*",
"math",
".",
"cos",
"(",
"lat2",
")",
"*",
"math",
".",
"pow",
"(",
"math",
".",
"sin",
"(",
"dlon",
"/",
"2",
")",
",",
"2",
")",
"d",
"=",
"2",
"*",
"radius",
"*",
"math",
".",
"asin",
"(",
"math",
".",
"sqrt",
"(",
"a",
"+",
"b",
")",
")",
"return",
"d"
] | 23.424242 | 20.757576 |
def analytical(src, rec, res, freqtime, solution='fs', signal=None, ab=11,
aniso=None, epermH=None, epermV=None, mpermH=None, mpermV=None,
verb=2):
r"""Return the analytical full- or half-space solution.
Calculate the electromagnetic frequency- or time-domain field due to
infinitesimal small electric or magnetic dipole source(s), measured by
infinitesimal small electric or magnetic dipole receiver(s); sources and
receivers are directed along the principal directions x, y, or z, and all
sources are at the same depth, as well as all receivers are at the same
depth.
In the case of a halfspace the air-interface is located at z = 0 m.
You can call the functions ``fullspace`` and ``halfspace`` in ``kernel.py``
directly. This interface is just to provide a consistent interface with the
same input parameters as for instance for ``dipole``.
This function yields the same result if ``solution='fs'`` as ``dipole``, if
the model is a fullspace.
Included are:
- Full fullspace solution (``solution='fs'``) for ee-, me-, em-,
mm-fields, only frequency domain, [HuTS15]_.
- Diffusive fullspace solution (``solution='dfs'``) for ee-fields,
[SlHM10]_.
- Diffusive halfspace solution (``solution='dhs'``) for ee-fields,
[SlHM10]_.
- Diffusive direct- and reflected field and airwave
(``solution='dsplit'``) for ee-fields, [SlHM10]_.
- Diffusive direct- and reflected field and airwave
(``solution='dtetm'``) for ee-fields, split into TE and TM mode
[SlHM10]_.
Parameters
----------
src, rec : list of floats or arrays
Source and receiver coordinates (m): [x, y, z].
The x- and y-coordinates can be arrays, z is a single value.
The x- and y-coordinates must have the same dimension.
res : float
Horizontal resistivity rho_h (Ohm.m).
Alternatively, res can be a dictionary. See the main manual of empymod
too see how to exploit this hook to re-calculate etaH, etaV, zetaH, and
zetaV, which can be used to, for instance, use the Cole-Cole model for
IP.
freqtime : array_like
Frequencies f (Hz) if ``signal`` == None, else times t (s); (f, t > 0).
solution : str, optional
Defines which solution is returned:
- 'fs' : Full fullspace solution (ee-, me-, em-, mm-fields); f-domain.
- 'dfs' : Diffusive fullspace solution (ee-fields only).
- 'dhs' : Diffusive halfspace solution (ee-fields only).
- 'dsplit' : Diffusive direct- and reflected field and airwave
(ee-fields only).
- 'dtetm' : as dsplit, but direct fielt TE, TM; reflected field TE, TM,
and airwave (ee-fields only).
signal : {None, 0, 1, -1}, optional
Source signal, default is None:
- None: Frequency-domain response
- -1 : Switch-off time-domain response
- 0 : Impulse time-domain response
- +1 : Switch-on time-domain response
ab : int, optional
Source-receiver configuration, defaults to 11.
+---------------+-------+------+------+------+------+------+------+
| | electric source | magnetic source |
+===============+=======+======+======+======+======+======+======+
| | **x**| **y**| **z**| **x**| **y**| **z**|
+---------------+-------+------+------+------+------+------+------+
| | **x** | 11 | 12 | 13 | 14 | 15 | 16 |
+ **electric** +-------+------+------+------+------+------+------+
| | **y** | 21 | 22 | 23 | 24 | 25 | 26 |
+ **receiver** +-------+------+------+------+------+------+------+
| | **z** | 31 | 32 | 33 | 34 | 35 | 36 |
+---------------+-------+------+------+------+------+------+------+
| | **x** | 41 | 42 | 43 | 44 | 45 | 46 |
+ **magnetic** +-------+------+------+------+------+------+------+
| | **y** | 51 | 52 | 53 | 54 | 55 | 56 |
+ **receiver** +-------+------+------+------+------+------+------+
| | **z** | 61 | 62 | 63 | 64 | 65 | 66 |
+---------------+-------+------+------+------+------+------+------+
aniso : float, optional
Anisotropy lambda = sqrt(rho_v/rho_h) (-); defaults to one.
epermH, epermV : float, optional
Relative horizontal/vertical electric permittivity epsilon_h/epsilon_v
(-); default is one. Ignored for the diffusive solution.
mpermH, mpermV : float, optional
Relative horizontal/vertical magnetic permeability mu_h/mu_v (-);
default is one. Ignored for the diffusive solution.
verb : {0, 1, 2, 3, 4}, optional
Level of verbosity, default is 2:
- 0: Print nothing.
- 1: Print warnings.
- 2: Print additional runtime
- 3: Print additional start/stop, condensed parameter information.
- 4: Print additional full parameter information
Returns
-------
EM : ndarray, (nfreq, nrec, nsrc)
Frequency- or time-domain EM field (depending on ``signal``):
- If rec is electric, returns E [V/m].
- If rec is magnetic, returns B [T] (not H [A/m]!).
However, source and receiver are normalised. So for instance in the
electric case the source strength is 1 A and its length is 1 m. So the
electric field could also be written as [V/(A.m2)].
The shape of EM is (nfreq, nrec, nsrc). However, single dimensions
are removed.
If ``solution='dsplit'``, three ndarrays are returned: direct, reflect,
air.
If ``solution='dtetm'``, five ndarrays are returned: direct_TE,
direct_TM, reflect_TE, reflect_TM, air.
Examples
--------
>>> import numpy as np
>>> from empymod import analytical
>>> src = [0, 0, 0]
>>> rec = [np.arange(1, 11)*500, np.zeros(10), 200]
>>> res = 50
>>> EMfield = analytical(src, rec, res, freqtime=1, verb=0)
>>> print(EMfield)
[ 4.03091405e-08 -9.69163818e-10j 6.97630362e-09 -4.88342150e-10j
2.15205979e-09 -2.97489809e-10j 8.90394459e-10 -1.99313433e-10j
4.32915802e-10 -1.40741644e-10j 2.31674165e-10 -1.02579391e-10j
1.31469130e-10 -7.62770461e-11j 7.72342470e-11 -5.74534125e-11j
4.61480481e-11 -4.36275540e-11j 2.76174038e-11 -3.32860932e-11j]
"""
# === 1. LET'S START ============
t0 = printstartfinish(verb)
# === 2. CHECK INPUT ============
# Check times or frequencies
if signal is not None:
freqtime = check_time_only(freqtime, signal, verb)
# Check layer parameters
model = check_model([], res, aniso, epermH, epermV, mpermH, mpermV, True,
verb)
depth, res, aniso, epermH, epermV, mpermH, mpermV, _ = model
# Check frequency => get etaH, etaV, zetaH, and zetaV
frequency = check_frequency(freqtime, res, aniso, epermH, epermV, mpermH,
mpermV, verb)
freqtime, etaH, etaV, zetaH, zetaV = frequency
# Update etaH/etaV and zetaH/zetaV according to user-provided model
if isinstance(res, dict) and 'func_eta' in res:
etaH, etaV = res['func_eta'](res, locals())
if isinstance(res, dict) and 'func_zeta' in res:
zetaH, zetaV = res['func_zeta'](res, locals())
# Check src-rec configuration
# => Get flags if src or rec or both are magnetic (msrc, mrec)
ab_calc, msrc, mrec = check_ab(ab, verb)
# Check src and rec
src, nsrc = check_dipole(src, 'src', verb)
rec, nrec = check_dipole(rec, 'rec', verb)
# Get offsets and angles (off, angle)
off, angle = get_off_ang(src, rec, nsrc, nrec, verb)
# Get layer number in which src and rec reside (lsrc/lrec)
_, zsrc = get_layer_nr(src, depth)
_, zrec = get_layer_nr(rec, depth)
# Check possibilities
check_solution(solution, signal, ab, msrc, mrec)
# === 3. EM-FIELD CALCULATION ============
if solution[0] == 'd':
EM = kernel.halfspace(off, angle, zsrc, zrec, etaH, etaV,
freqtime[:, None], ab_calc, signal, solution)
else:
if ab_calc not in [36, ]:
EM = kernel.fullspace(off, angle, zsrc, zrec, etaH, etaV, zetaH,
zetaV, ab_calc, msrc, mrec)
else:
# If <ab> = 36 (or 63), field is zero
# In `bipole` and in `dipole`, this is taken care of in `fem`. Here
# we have to take care of it separately
EM = np.zeros((freqtime.size*nrec*nsrc), dtype=complex)
# Squeeze
if solution[1:] == 'split':
EM = (np.squeeze(EM[0].reshape((-1, nrec, nsrc), order='F')),
np.squeeze(EM[1].reshape((-1, nrec, nsrc), order='F')),
np.squeeze(EM[2].reshape((-1, nrec, nsrc), order='F')))
elif solution[1:] == 'tetm':
EM = (np.squeeze(EM[0].reshape((-1, nrec, nsrc), order='F')),
np.squeeze(EM[1].reshape((-1, nrec, nsrc), order='F')),
np.squeeze(EM[2].reshape((-1, nrec, nsrc), order='F')),
np.squeeze(EM[3].reshape((-1, nrec, nsrc), order='F')),
np.squeeze(EM[4].reshape((-1, nrec, nsrc), order='F')))
else:
EM = np.squeeze(EM.reshape((-1, nrec, nsrc), order='F'))
# === 4. FINISHED ============
printstartfinish(verb, t0)
return EM | [
"def",
"analytical",
"(",
"src",
",",
"rec",
",",
"res",
",",
"freqtime",
",",
"solution",
"=",
"'fs'",
",",
"signal",
"=",
"None",
",",
"ab",
"=",
"11",
",",
"aniso",
"=",
"None",
",",
"epermH",
"=",
"None",
",",
"epermV",
"=",
"None",
",",
"mpermH",
"=",
"None",
",",
"mpermV",
"=",
"None",
",",
"verb",
"=",
"2",
")",
":",
"# === 1. LET'S START ============",
"t0",
"=",
"printstartfinish",
"(",
"verb",
")",
"# === 2. CHECK INPUT ============",
"# Check times or frequencies",
"if",
"signal",
"is",
"not",
"None",
":",
"freqtime",
"=",
"check_time_only",
"(",
"freqtime",
",",
"signal",
",",
"verb",
")",
"# Check layer parameters",
"model",
"=",
"check_model",
"(",
"[",
"]",
",",
"res",
",",
"aniso",
",",
"epermH",
",",
"epermV",
",",
"mpermH",
",",
"mpermV",
",",
"True",
",",
"verb",
")",
"depth",
",",
"res",
",",
"aniso",
",",
"epermH",
",",
"epermV",
",",
"mpermH",
",",
"mpermV",
",",
"_",
"=",
"model",
"# Check frequency => get etaH, etaV, zetaH, and zetaV",
"frequency",
"=",
"check_frequency",
"(",
"freqtime",
",",
"res",
",",
"aniso",
",",
"epermH",
",",
"epermV",
",",
"mpermH",
",",
"mpermV",
",",
"verb",
")",
"freqtime",
",",
"etaH",
",",
"etaV",
",",
"zetaH",
",",
"zetaV",
"=",
"frequency",
"# Update etaH/etaV and zetaH/zetaV according to user-provided model",
"if",
"isinstance",
"(",
"res",
",",
"dict",
")",
"and",
"'func_eta'",
"in",
"res",
":",
"etaH",
",",
"etaV",
"=",
"res",
"[",
"'func_eta'",
"]",
"(",
"res",
",",
"locals",
"(",
")",
")",
"if",
"isinstance",
"(",
"res",
",",
"dict",
")",
"and",
"'func_zeta'",
"in",
"res",
":",
"zetaH",
",",
"zetaV",
"=",
"res",
"[",
"'func_zeta'",
"]",
"(",
"res",
",",
"locals",
"(",
")",
")",
"# Check src-rec configuration",
"# => Get flags if src or rec or both are magnetic (msrc, mrec)",
"ab_calc",
",",
"msrc",
",",
"mrec",
"=",
"check_ab",
"(",
"ab",
",",
"verb",
")",
"# Check src and rec",
"src",
",",
"nsrc",
"=",
"check_dipole",
"(",
"src",
",",
"'src'",
",",
"verb",
")",
"rec",
",",
"nrec",
"=",
"check_dipole",
"(",
"rec",
",",
"'rec'",
",",
"verb",
")",
"# Get offsets and angles (off, angle)",
"off",
",",
"angle",
"=",
"get_off_ang",
"(",
"src",
",",
"rec",
",",
"nsrc",
",",
"nrec",
",",
"verb",
")",
"# Get layer number in which src and rec reside (lsrc/lrec)",
"_",
",",
"zsrc",
"=",
"get_layer_nr",
"(",
"src",
",",
"depth",
")",
"_",
",",
"zrec",
"=",
"get_layer_nr",
"(",
"rec",
",",
"depth",
")",
"# Check possibilities",
"check_solution",
"(",
"solution",
",",
"signal",
",",
"ab",
",",
"msrc",
",",
"mrec",
")",
"# === 3. EM-FIELD CALCULATION ============",
"if",
"solution",
"[",
"0",
"]",
"==",
"'d'",
":",
"EM",
"=",
"kernel",
".",
"halfspace",
"(",
"off",
",",
"angle",
",",
"zsrc",
",",
"zrec",
",",
"etaH",
",",
"etaV",
",",
"freqtime",
"[",
":",
",",
"None",
"]",
",",
"ab_calc",
",",
"signal",
",",
"solution",
")",
"else",
":",
"if",
"ab_calc",
"not",
"in",
"[",
"36",
",",
"]",
":",
"EM",
"=",
"kernel",
".",
"fullspace",
"(",
"off",
",",
"angle",
",",
"zsrc",
",",
"zrec",
",",
"etaH",
",",
"etaV",
",",
"zetaH",
",",
"zetaV",
",",
"ab_calc",
",",
"msrc",
",",
"mrec",
")",
"else",
":",
"# If <ab> = 36 (or 63), field is zero",
"# In `bipole` and in `dipole`, this is taken care of in `fem`. Here",
"# we have to take care of it separately",
"EM",
"=",
"np",
".",
"zeros",
"(",
"(",
"freqtime",
".",
"size",
"*",
"nrec",
"*",
"nsrc",
")",
",",
"dtype",
"=",
"complex",
")",
"# Squeeze",
"if",
"solution",
"[",
"1",
":",
"]",
"==",
"'split'",
":",
"EM",
"=",
"(",
"np",
".",
"squeeze",
"(",
"EM",
"[",
"0",
"]",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"nrec",
",",
"nsrc",
")",
",",
"order",
"=",
"'F'",
")",
")",
",",
"np",
".",
"squeeze",
"(",
"EM",
"[",
"1",
"]",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"nrec",
",",
"nsrc",
")",
",",
"order",
"=",
"'F'",
")",
")",
",",
"np",
".",
"squeeze",
"(",
"EM",
"[",
"2",
"]",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"nrec",
",",
"nsrc",
")",
",",
"order",
"=",
"'F'",
")",
")",
")",
"elif",
"solution",
"[",
"1",
":",
"]",
"==",
"'tetm'",
":",
"EM",
"=",
"(",
"np",
".",
"squeeze",
"(",
"EM",
"[",
"0",
"]",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"nrec",
",",
"nsrc",
")",
",",
"order",
"=",
"'F'",
")",
")",
",",
"np",
".",
"squeeze",
"(",
"EM",
"[",
"1",
"]",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"nrec",
",",
"nsrc",
")",
",",
"order",
"=",
"'F'",
")",
")",
",",
"np",
".",
"squeeze",
"(",
"EM",
"[",
"2",
"]",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"nrec",
",",
"nsrc",
")",
",",
"order",
"=",
"'F'",
")",
")",
",",
"np",
".",
"squeeze",
"(",
"EM",
"[",
"3",
"]",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"nrec",
",",
"nsrc",
")",
",",
"order",
"=",
"'F'",
")",
")",
",",
"np",
".",
"squeeze",
"(",
"EM",
"[",
"4",
"]",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"nrec",
",",
"nsrc",
")",
",",
"order",
"=",
"'F'",
")",
")",
")",
"else",
":",
"EM",
"=",
"np",
".",
"squeeze",
"(",
"EM",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"nrec",
",",
"nsrc",
")",
",",
"order",
"=",
"'F'",
")",
")",
"# === 4. FINISHED ============",
"printstartfinish",
"(",
"verb",
",",
"t0",
")",
"return",
"EM"
] | 42.617117 | 25.040541 |
def on_close_shortcut(self, *args):
"""Close selected state machine (triggered by shortcut)"""
state_machine_m = self.model.get_selected_state_machine_model()
if state_machine_m is None:
return
self.on_close_clicked(None, state_machine_m, None, force=False) | [
"def",
"on_close_shortcut",
"(",
"self",
",",
"*",
"args",
")",
":",
"state_machine_m",
"=",
"self",
".",
"model",
".",
"get_selected_state_machine_model",
"(",
")",
"if",
"state_machine_m",
"is",
"None",
":",
"return",
"self",
".",
"on_close_clicked",
"(",
"None",
",",
"state_machine_m",
",",
"None",
",",
"force",
"=",
"False",
")"
] | 49.333333 | 15.666667 |
def update_picks(self, games=None, points=None):
'''
games can be dict of {game.id: winner_id} for all picked games to update
'''
if games:
game_dict = {g.id: g for g in self.gameset.games.filter(id__in=games)}
game_picks = {pick.game.id: pick for pick in self.gamepicks.filter(game__id__in=games)}
for key, winner in games.items():
game = game_dict[key]
if not game.has_started:
pick = game_picks[key]
pick.winner_id = winner
pick.save()
if points is not None:
self.points = points
self.save()
if games or points:
self.updated_signal.send(sender=self.__class__, pickset=self, auto_pick=False) | [
"def",
"update_picks",
"(",
"self",
",",
"games",
"=",
"None",
",",
"points",
"=",
"None",
")",
":",
"if",
"games",
":",
"game_dict",
"=",
"{",
"g",
".",
"id",
":",
"g",
"for",
"g",
"in",
"self",
".",
"gameset",
".",
"games",
".",
"filter",
"(",
"id__in",
"=",
"games",
")",
"}",
"game_picks",
"=",
"{",
"pick",
".",
"game",
".",
"id",
":",
"pick",
"for",
"pick",
"in",
"self",
".",
"gamepicks",
".",
"filter",
"(",
"game__id__in",
"=",
"games",
")",
"}",
"for",
"key",
",",
"winner",
"in",
"games",
".",
"items",
"(",
")",
":",
"game",
"=",
"game_dict",
"[",
"key",
"]",
"if",
"not",
"game",
".",
"has_started",
":",
"pick",
"=",
"game_picks",
"[",
"key",
"]",
"pick",
".",
"winner_id",
"=",
"winner",
"pick",
".",
"save",
"(",
")",
"if",
"points",
"is",
"not",
"None",
":",
"self",
".",
"points",
"=",
"points",
"self",
".",
"save",
"(",
")",
"if",
"games",
"or",
"points",
":",
"self",
".",
"updated_signal",
".",
"send",
"(",
"sender",
"=",
"self",
".",
"__class__",
",",
"pickset",
"=",
"self",
",",
"auto_pick",
"=",
"False",
")"
] | 39.4 | 21.5 |
def find_common(self, l):
"""
Find common parts in thelist items ex: 'ab' for ['abcd','abce','abf']
requires an ordered list
"""
if len(l) == 1:
return l[0]
init = l[0]
for item in l[1:]:
for i, (x, y) in enumerate(zip(init, item)):
if x != y:
init = "".join(init[:i])
break
if not init:
return None
return init | [
"def",
"find_common",
"(",
"self",
",",
"l",
")",
":",
"if",
"len",
"(",
"l",
")",
"==",
"1",
":",
"return",
"l",
"[",
"0",
"]",
"init",
"=",
"l",
"[",
"0",
"]",
"for",
"item",
"in",
"l",
"[",
"1",
":",
"]",
":",
"for",
"i",
",",
"(",
"x",
",",
"y",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"init",
",",
"item",
")",
")",
":",
"if",
"x",
"!=",
"y",
":",
"init",
"=",
"\"\"",
".",
"join",
"(",
"init",
"[",
":",
"i",
"]",
")",
"break",
"if",
"not",
"init",
":",
"return",
"None",
"return",
"init"
] | 26 | 17.111111 |
def remove_constraint(self, name):
"""Remove a constraint from the problem"""
index = self._get_constraint_index(name)
# Remove from matrix
self._A = np.delete(self.A, index, 0)
# Remove from upper_bounds
self.upper_bounds = np.delete(self.upper_bounds, index)
# Remove from constraint list
del self._constraints[name]
self._update_constraint_indices()
self._reset_solution() | [
"def",
"remove_constraint",
"(",
"self",
",",
"name",
")",
":",
"index",
"=",
"self",
".",
"_get_constraint_index",
"(",
"name",
")",
"# Remove from matrix",
"self",
".",
"_A",
"=",
"np",
".",
"delete",
"(",
"self",
".",
"A",
",",
"index",
",",
"0",
")",
"# Remove from upper_bounds",
"self",
".",
"upper_bounds",
"=",
"np",
".",
"delete",
"(",
"self",
".",
"upper_bounds",
",",
"index",
")",
"# Remove from constraint list",
"del",
"self",
".",
"_constraints",
"[",
"name",
"]",
"self",
".",
"_update_constraint_indices",
"(",
")",
"self",
".",
"_reset_solution",
"(",
")"
] | 40.454545 | 7.181818 |
def child_task(self):
'''child process - this holds all the GUI elements'''
mp_util.child_close_fds()
import matplotlib
import wx_processguard
from wx_loader import wx
from live_graph_ui import GraphFrame
matplotlib.use('WXAgg')
app = wx.App(False)
app.frame = GraphFrame(state=self)
app.frame.Show()
app.MainLoop() | [
"def",
"child_task",
"(",
"self",
")",
":",
"mp_util",
".",
"child_close_fds",
"(",
")",
"import",
"matplotlib",
"import",
"wx_processguard",
"from",
"wx_loader",
"import",
"wx",
"from",
"live_graph_ui",
"import",
"GraphFrame",
"matplotlib",
".",
"use",
"(",
"'WXAgg'",
")",
"app",
"=",
"wx",
".",
"App",
"(",
"False",
")",
"app",
".",
"frame",
"=",
"GraphFrame",
"(",
"state",
"=",
"self",
")",
"app",
".",
"frame",
".",
"Show",
"(",
")",
"app",
".",
"MainLoop",
"(",
")"
] | 28.571429 | 15.285714 |
def mode_date(self, rows: List[Row], column: DateColumn) -> Date:
"""
Takes a list of rows and a column and returns the most frequent value under
that column in those rows.
"""
most_frequent_list = self._get_most_frequent_values(rows, column)
if not most_frequent_list:
return Date(-1, -1, -1)
most_frequent_value = most_frequent_list[0]
if not isinstance(most_frequent_value, Date):
raise ExecutionError(f"Invalid valus for mode_date: {most_frequent_value}")
return most_frequent_value | [
"def",
"mode_date",
"(",
"self",
",",
"rows",
":",
"List",
"[",
"Row",
"]",
",",
"column",
":",
"DateColumn",
")",
"->",
"Date",
":",
"most_frequent_list",
"=",
"self",
".",
"_get_most_frequent_values",
"(",
"rows",
",",
"column",
")",
"if",
"not",
"most_frequent_list",
":",
"return",
"Date",
"(",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
"most_frequent_value",
"=",
"most_frequent_list",
"[",
"0",
"]",
"if",
"not",
"isinstance",
"(",
"most_frequent_value",
",",
"Date",
")",
":",
"raise",
"ExecutionError",
"(",
"f\"Invalid valus for mode_date: {most_frequent_value}\"",
")",
"return",
"most_frequent_value"
] | 47.583333 | 16.25 |
def run_jar(jar_name, more_args=None, properties=None, hadoop_conf_dir=None,
keep_streams=True):
"""
Run a jar on Hadoop (``hadoop jar`` command).
All arguments are passed to :func:`run_cmd` (``args = [jar_name] +
more_args``) .
"""
if hu.is_readable(jar_name):
args = [jar_name]
if more_args is not None:
args.extend(more_args)
return run_cmd(
"jar", args, properties, hadoop_conf_dir=hadoop_conf_dir,
keep_streams=keep_streams
)
else:
raise ValueError("Can't read jar file %s" % jar_name) | [
"def",
"run_jar",
"(",
"jar_name",
",",
"more_args",
"=",
"None",
",",
"properties",
"=",
"None",
",",
"hadoop_conf_dir",
"=",
"None",
",",
"keep_streams",
"=",
"True",
")",
":",
"if",
"hu",
".",
"is_readable",
"(",
"jar_name",
")",
":",
"args",
"=",
"[",
"jar_name",
"]",
"if",
"more_args",
"is",
"not",
"None",
":",
"args",
".",
"extend",
"(",
"more_args",
")",
"return",
"run_cmd",
"(",
"\"jar\"",
",",
"args",
",",
"properties",
",",
"hadoop_conf_dir",
"=",
"hadoop_conf_dir",
",",
"keep_streams",
"=",
"keep_streams",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Can't read jar file %s\"",
"%",
"jar_name",
")"
] | 32.777778 | 17.444444 |
def _retrieve_certificate(self, access_token, timeout=3):
"""
Generates a new private key and certificate request, submits the request to be
signed by the SLCS CA and returns the certificate.
"""
logger.debug("Retrieve certificate with token.")
# Generate a new key pair
key_pair = crypto.PKey()
key_pair.generate_key(crypto.TYPE_RSA, 2048)
private_key = crypto.dump_privatekey(crypto.FILETYPE_PEM, key_pair).decode("utf-8")
# Generate a certificate request using that key-pair
cert_request = crypto.X509Req()
# Create public key object
cert_request.set_pubkey(key_pair)
# Add the public key to the request
cert_request.sign(key_pair, 'md5')
der_cert_req = crypto.dump_certificate_request(crypto.FILETYPE_ASN1, cert_request)
encoded_cert_req = base64.b64encode(der_cert_req)
# Build the OAuth session object
token = {'access_token': access_token, 'token_type': 'Bearer'}
client = OAuth2Session(token=token)
response = client.post(
self.certificate_url,
data={'certificate_request': encoded_cert_req},
verify=False,
timeout=timeout,
)
if response.ok:
content = "{} {}".format(response.text, private_key)
with open(self.esgf_credentials, 'w') as fh:
fh.write(content)
logger.debug('Fetched certificate successfully.')
else:
msg = "Could not get certificate: {} {}".format(response.status_code, response.reason)
raise Exception(msg)
return True | [
"def",
"_retrieve_certificate",
"(",
"self",
",",
"access_token",
",",
"timeout",
"=",
"3",
")",
":",
"logger",
".",
"debug",
"(",
"\"Retrieve certificate with token.\"",
")",
"# Generate a new key pair",
"key_pair",
"=",
"crypto",
".",
"PKey",
"(",
")",
"key_pair",
".",
"generate_key",
"(",
"crypto",
".",
"TYPE_RSA",
",",
"2048",
")",
"private_key",
"=",
"crypto",
".",
"dump_privatekey",
"(",
"crypto",
".",
"FILETYPE_PEM",
",",
"key_pair",
")",
".",
"decode",
"(",
"\"utf-8\"",
")",
"# Generate a certificate request using that key-pair",
"cert_request",
"=",
"crypto",
".",
"X509Req",
"(",
")",
"# Create public key object",
"cert_request",
".",
"set_pubkey",
"(",
"key_pair",
")",
"# Add the public key to the request",
"cert_request",
".",
"sign",
"(",
"key_pair",
",",
"'md5'",
")",
"der_cert_req",
"=",
"crypto",
".",
"dump_certificate_request",
"(",
"crypto",
".",
"FILETYPE_ASN1",
",",
"cert_request",
")",
"encoded_cert_req",
"=",
"base64",
".",
"b64encode",
"(",
"der_cert_req",
")",
"# Build the OAuth session object",
"token",
"=",
"{",
"'access_token'",
":",
"access_token",
",",
"'token_type'",
":",
"'Bearer'",
"}",
"client",
"=",
"OAuth2Session",
"(",
"token",
"=",
"token",
")",
"response",
"=",
"client",
".",
"post",
"(",
"self",
".",
"certificate_url",
",",
"data",
"=",
"{",
"'certificate_request'",
":",
"encoded_cert_req",
"}",
",",
"verify",
"=",
"False",
",",
"timeout",
"=",
"timeout",
",",
")",
"if",
"response",
".",
"ok",
":",
"content",
"=",
"\"{} {}\"",
".",
"format",
"(",
"response",
".",
"text",
",",
"private_key",
")",
"with",
"open",
"(",
"self",
".",
"esgf_credentials",
",",
"'w'",
")",
"as",
"fh",
":",
"fh",
".",
"write",
"(",
"content",
")",
"logger",
".",
"debug",
"(",
"'Fetched certificate successfully.'",
")",
"else",
":",
"msg",
"=",
"\"Could not get certificate: {} {}\"",
".",
"format",
"(",
"response",
".",
"status_code",
",",
"response",
".",
"reason",
")",
"raise",
"Exception",
"(",
"msg",
")",
"return",
"True"
] | 37.045455 | 20.909091 |
def itemgetter(k, ellipsis=False, key=None):
"""
Looks up ``k`` as an index of the column's value.
If ``k`` is a ``slice`` type object, then ``ellipsis`` can be given as a string to use to
indicate truncation. Alternatively, ``ellipsis`` can be set to ``True`` to use a default
``'...'``.
If a ``key`` is given, it may be a function which maps the target value to something else
before the item lookup takes place.
Examples::
# Choose an item from a list source.
winner = columns.TextColumn("Winner", sources=['get_rankings'],
processor=itemgetter(0))
# Take instance.description[:30] and append "..." to the end if truncation occurs.
description = columns.TextColumn("Description", sources=['description'],
processor=itemgetter(slice(None, 30), ellipsis=True))
"""
def helper(instance, *args, **kwargs):
default_value = kwargs.get('default_value')
if default_value is None:
default_value = instance
value = default_value[k]
if ellipsis and isinstance(k, slice) and isinstance(value, six.string_types) and \
len(default_value) > len(value):
if ellipsis is True:
value += "..."
else:
value += ellipsis
return value
if key:
helper = keyed_helper(helper)(key=key)
return helper | [
"def",
"itemgetter",
"(",
"k",
",",
"ellipsis",
"=",
"False",
",",
"key",
"=",
"None",
")",
":",
"def",
"helper",
"(",
"instance",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"default_value",
"=",
"kwargs",
".",
"get",
"(",
"'default_value'",
")",
"if",
"default_value",
"is",
"None",
":",
"default_value",
"=",
"instance",
"value",
"=",
"default_value",
"[",
"k",
"]",
"if",
"ellipsis",
"and",
"isinstance",
"(",
"k",
",",
"slice",
")",
"and",
"isinstance",
"(",
"value",
",",
"six",
".",
"string_types",
")",
"and",
"len",
"(",
"default_value",
")",
">",
"len",
"(",
"value",
")",
":",
"if",
"ellipsis",
"is",
"True",
":",
"value",
"+=",
"\"...\"",
"else",
":",
"value",
"+=",
"ellipsis",
"return",
"value",
"if",
"key",
":",
"helper",
"=",
"keyed_helper",
"(",
"helper",
")",
"(",
"key",
"=",
"key",
")",
"return",
"helper"
] | 37.736842 | 24.315789 |
def _ParseRecordLogline(self, parser_mediator, structure):
"""Parses a logline record structure and produces events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
"""
date_time = dfdatetime_time_elements.TimeElementsInMilliseconds()
try:
datetime_iso8601 = self._GetISO8601String(structure.date_time)
date_time.CopyFromStringISO8601(datetime_iso8601)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date time value: {0!s}'.format(structure.date_time))
return
event_data = GoogleDriveSyncLogEventData()
event_data.log_level = structure.log_level
event_data.pid = structure.pid
event_data.thread = structure.thread
event_data.source_code = structure.source_code
# Replace newlines with spaces in structure.message to preserve output.
event_data.message = structure.message.replace('\n', ' ')
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data) | [
"def",
"_ParseRecordLogline",
"(",
"self",
",",
"parser_mediator",
",",
"structure",
")",
":",
"date_time",
"=",
"dfdatetime_time_elements",
".",
"TimeElementsInMilliseconds",
"(",
")",
"try",
":",
"datetime_iso8601",
"=",
"self",
".",
"_GetISO8601String",
"(",
"structure",
".",
"date_time",
")",
"date_time",
".",
"CopyFromStringISO8601",
"(",
"datetime_iso8601",
")",
"except",
"ValueError",
":",
"parser_mediator",
".",
"ProduceExtractionWarning",
"(",
"'invalid date time value: {0!s}'",
".",
"format",
"(",
"structure",
".",
"date_time",
")",
")",
"return",
"event_data",
"=",
"GoogleDriveSyncLogEventData",
"(",
")",
"event_data",
".",
"log_level",
"=",
"structure",
".",
"log_level",
"event_data",
".",
"pid",
"=",
"structure",
".",
"pid",
"event_data",
".",
"thread",
"=",
"structure",
".",
"thread",
"event_data",
".",
"source_code",
"=",
"structure",
".",
"source_code",
"# Replace newlines with spaces in structure.message to preserve output.",
"event_data",
".",
"message",
"=",
"structure",
".",
"message",
".",
"replace",
"(",
"'\\n'",
",",
"' '",
")",
"event",
"=",
"time_events",
".",
"DateTimeValuesEvent",
"(",
"date_time",
",",
"definitions",
".",
"TIME_DESCRIPTION_ADDED",
")",
"parser_mediator",
".",
"ProduceEventWithEventData",
"(",
"event",
",",
"event_data",
")"
] | 40.064516 | 21.290323 |
def get_response_code(url, timeout=10):
'''
Visit the URL and return the HTTP response code in 'int'
'''
try:
req = urllib2.urlopen(url, timeout=timeout)
except HTTPError, e:
return e.getcode()
except Exception, _:
fail("Couldn't reach the URL '%s'" % url)
else:
return req.getcode() | [
"def",
"get_response_code",
"(",
"url",
",",
"timeout",
"=",
"10",
")",
":",
"try",
":",
"req",
"=",
"urllib2",
".",
"urlopen",
"(",
"url",
",",
"timeout",
"=",
"timeout",
")",
"except",
"HTTPError",
",",
"e",
":",
"return",
"e",
".",
"getcode",
"(",
")",
"except",
"Exception",
",",
"_",
":",
"fail",
"(",
"\"Couldn't reach the URL '%s'\"",
"%",
"url",
")",
"else",
":",
"return",
"req",
".",
"getcode",
"(",
")"
] | 28 | 18.666667 |
def is_question_answered(self, question_id):
"""has the question matching item_id been answered and not skipped"""
question_map = self._get_question_map(question_id) # will raise NotFound()
if 'missingResponse' in question_map['responses'][0]:
return False
else:
return True | [
"def",
"is_question_answered",
"(",
"self",
",",
"question_id",
")",
":",
"question_map",
"=",
"self",
".",
"_get_question_map",
"(",
"question_id",
")",
"# will raise NotFound()",
"if",
"'missingResponse'",
"in",
"question_map",
"[",
"'responses'",
"]",
"[",
"0",
"]",
":",
"return",
"False",
"else",
":",
"return",
"True"
] | 46.428571 | 18.285714 |
def _run_program(self, bin, fastafile, params=None):
"""
Run Trawler and predict motifs from a FASTA file.
Parameters
----------
bin : str
Command used to run the tool.
fastafile : str
Name of the FASTA input file.
params : dict, optional
Optional parameters. For some of the tools required parameters
are passed using this dictionary.
Returns
-------
motifs : list of Motif instances
The predicted motifs.
stdout : str
Standard out of the tool.
stderr : str
Standard error of the tool.
"""
params = self._parse_params(params)
tmp = NamedTemporaryFile(mode="w", dir=self.tmpdir, delete=False)
shutil.copy(fastafile, tmp.name)
fastafile = tmp.name
current_path = os.getcwd()
os.chdir(self.dir())
motifs = []
stdout = ""
stderr = ""
for wildcard in [0,1,2]:
cmd = "%s -sample %s -background %s -directory %s -strand %s -wildcard %s" % (
bin,
fastafile,
params["background"],
self.tmpdir,
params["strand"],
wildcard,
)
p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
out,err = p.communicate()
stdout += out.decode()
stderr += err.decode()
os.chdir(current_path)
pwmfiles = glob.glob("{}/tmp*/result/*pwm".format(self.tmpdir))
if len(pwmfiles) > 0:
out_file = pwmfiles[0]
stdout += "\nOutfile: {}".format(out_file)
my_motifs = []
if os.path.exists(out_file):
my_motifs = read_motifs(out_file, fmt="pwm")
for m in motifs:
m.id = "{}_{}".format(self.name, m.id)
stdout += "\nTrawler: {} motifs".format(len(motifs))
# remove temporary files
if os.path.exists(tmp.name):
os.unlink(tmp.name)
for motif in my_motifs:
motif.id = "{}_{}_{}".format(self.name, wildcard, motif.id)
motifs += my_motifs
else:
stderr += "\nNo outfile found"
return motifs, stdout, stderr | [
"def",
"_run_program",
"(",
"self",
",",
"bin",
",",
"fastafile",
",",
"params",
"=",
"None",
")",
":",
"params",
"=",
"self",
".",
"_parse_params",
"(",
"params",
")",
"tmp",
"=",
"NamedTemporaryFile",
"(",
"mode",
"=",
"\"w\"",
",",
"dir",
"=",
"self",
".",
"tmpdir",
",",
"delete",
"=",
"False",
")",
"shutil",
".",
"copy",
"(",
"fastafile",
",",
"tmp",
".",
"name",
")",
"fastafile",
"=",
"tmp",
".",
"name",
"current_path",
"=",
"os",
".",
"getcwd",
"(",
")",
"os",
".",
"chdir",
"(",
"self",
".",
"dir",
"(",
")",
")",
"motifs",
"=",
"[",
"]",
"stdout",
"=",
"\"\"",
"stderr",
"=",
"\"\"",
"for",
"wildcard",
"in",
"[",
"0",
",",
"1",
",",
"2",
"]",
":",
"cmd",
"=",
"\"%s -sample %s -background %s -directory %s -strand %s -wildcard %s\"",
"%",
"(",
"bin",
",",
"fastafile",
",",
"params",
"[",
"\"background\"",
"]",
",",
"self",
".",
"tmpdir",
",",
"params",
"[",
"\"strand\"",
"]",
",",
"wildcard",
",",
")",
"p",
"=",
"Popen",
"(",
"cmd",
",",
"shell",
"=",
"True",
",",
"stdout",
"=",
"PIPE",
",",
"stderr",
"=",
"PIPE",
")",
"out",
",",
"err",
"=",
"p",
".",
"communicate",
"(",
")",
"stdout",
"+=",
"out",
".",
"decode",
"(",
")",
"stderr",
"+=",
"err",
".",
"decode",
"(",
")",
"os",
".",
"chdir",
"(",
"current_path",
")",
"pwmfiles",
"=",
"glob",
".",
"glob",
"(",
"\"{}/tmp*/result/*pwm\"",
".",
"format",
"(",
"self",
".",
"tmpdir",
")",
")",
"if",
"len",
"(",
"pwmfiles",
")",
">",
"0",
":",
"out_file",
"=",
"pwmfiles",
"[",
"0",
"]",
"stdout",
"+=",
"\"\\nOutfile: {}\"",
".",
"format",
"(",
"out_file",
")",
"my_motifs",
"=",
"[",
"]",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"out_file",
")",
":",
"my_motifs",
"=",
"read_motifs",
"(",
"out_file",
",",
"fmt",
"=",
"\"pwm\"",
")",
"for",
"m",
"in",
"motifs",
":",
"m",
".",
"id",
"=",
"\"{}_{}\"",
".",
"format",
"(",
"self",
".",
"name",
",",
"m",
".",
"id",
")",
"stdout",
"+=",
"\"\\nTrawler: {} motifs\"",
".",
"format",
"(",
"len",
"(",
"motifs",
")",
")",
"# remove temporary files",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"tmp",
".",
"name",
")",
":",
"os",
".",
"unlink",
"(",
"tmp",
".",
"name",
")",
"for",
"motif",
"in",
"my_motifs",
":",
"motif",
".",
"id",
"=",
"\"{}_{}_{}\"",
".",
"format",
"(",
"self",
".",
"name",
",",
"wildcard",
",",
"motif",
".",
"id",
")",
"motifs",
"+=",
"my_motifs",
"else",
":",
"stderr",
"+=",
"\"\\nNo outfile found\"",
"return",
"motifs",
",",
"stdout",
",",
"stderr"
] | 31.518987 | 17.037975 |
def add_field(self, name: str, value: str, inline: bool = True) -> None:
"""
Adds an embed field.
Parameters
----------
name: str
Name attribute of the embed field.
value: str
Value attribute of the embed field.
inline: bool
Defaults to :class:`True`.
Whether or not the embed should be inline.
"""
field = {
'name': name,
'value': value,
'inline': inline
}
self.fields.append(field) | [
"def",
"add_field",
"(",
"self",
",",
"name",
":",
"str",
",",
"value",
":",
"str",
",",
"inline",
":",
"bool",
"=",
"True",
")",
"->",
"None",
":",
"field",
"=",
"{",
"'name'",
":",
"name",
",",
"'value'",
":",
"value",
",",
"'inline'",
":",
"inline",
"}",
"self",
".",
"fields",
".",
"append",
"(",
"field",
")"
] | 23.347826 | 19.26087 |
def verify_item_signature(signature_attribute, encrypted_item, verification_key, crypto_config):
# type: (dynamodb_types.BINARY_ATTRIBUTE, dynamodb_types.ITEM, DelegatedKey, CryptoConfig) -> None
"""Verify the item signature.
:param dict signature_attribute: Item signature DynamoDB attribute value
:param dict encrypted_item: Encrypted DynamoDB item
:param DelegatedKey verification_key: DelegatedKey to use to calculate the signature
:param CryptoConfig crypto_config: Cryptographic configuration
"""
signature = signature_attribute[Tag.BINARY.dynamodb_tag]
verification_key.verify(
algorithm=verification_key.algorithm,
signature=signature,
data=_string_to_sign(
item=encrypted_item,
table_name=crypto_config.encryption_context.table_name,
attribute_actions=crypto_config.attribute_actions,
),
) | [
"def",
"verify_item_signature",
"(",
"signature_attribute",
",",
"encrypted_item",
",",
"verification_key",
",",
"crypto_config",
")",
":",
"# type: (dynamodb_types.BINARY_ATTRIBUTE, dynamodb_types.ITEM, DelegatedKey, CryptoConfig) -> None",
"signature",
"=",
"signature_attribute",
"[",
"Tag",
".",
"BINARY",
".",
"dynamodb_tag",
"]",
"verification_key",
".",
"verify",
"(",
"algorithm",
"=",
"verification_key",
".",
"algorithm",
",",
"signature",
"=",
"signature",
",",
"data",
"=",
"_string_to_sign",
"(",
"item",
"=",
"encrypted_item",
",",
"table_name",
"=",
"crypto_config",
".",
"encryption_context",
".",
"table_name",
",",
"attribute_actions",
"=",
"crypto_config",
".",
"attribute_actions",
",",
")",
",",
")"
] | 46.789474 | 24.473684 |
def getComponentExceptionSummary(self, tmaster, component_name, instances=[], callback=None):
"""
Get the summary of exceptions for component_name and list of instances.
Empty instance list will fetch all exceptions.
"""
if not tmaster or not tmaster.host or not tmaster.stats_port:
return
exception_request = tmaster_pb2.ExceptionLogRequest()
exception_request.component_name = component_name
if len(instances) > 0:
exception_request.instances.extend(instances)
request_str = exception_request.SerializeToString()
port = str(tmaster.stats_port)
host = tmaster.host
url = "http://{0}:{1}/exceptionsummary".format(host, port)
Log.debug("Creating request object.")
request = tornado.httpclient.HTTPRequest(url,
body=request_str,
method='POST',
request_timeout=5)
Log.debug('Making HTTP call to fetch exceptionsummary url: %s', url)
try:
client = tornado.httpclient.AsyncHTTPClient()
result = yield client.fetch(request)
Log.debug("HTTP call complete.")
except tornado.httpclient.HTTPError as e:
raise Exception(str(e))
# Check the response code - error if it is in 400s or 500s
responseCode = result.code
if responseCode >= 400:
message = "Error in getting exceptions from Tmaster, code: " + responseCode
Log.error(message)
raise tornado.gen.Return({
"message": message
})
# Parse the response from tmaster.
exception_response = tmaster_pb2.ExceptionLogResponse()
exception_response.ParseFromString(result.body)
if exception_response.status.status == common_pb2.NOTOK:
if exception_response.status.HasField("message"):
raise tornado.gen.Return({
"message": exception_response.status.message
})
# Send response
ret = []
for exception_log in exception_response.exceptions:
ret.append({'class_name': exception_log.stacktrace,
'lasttime': exception_log.lasttime,
'firsttime': exception_log.firsttime,
'count': str(exception_log.count)})
raise tornado.gen.Return(ret) | [
"def",
"getComponentExceptionSummary",
"(",
"self",
",",
"tmaster",
",",
"component_name",
",",
"instances",
"=",
"[",
"]",
",",
"callback",
"=",
"None",
")",
":",
"if",
"not",
"tmaster",
"or",
"not",
"tmaster",
".",
"host",
"or",
"not",
"tmaster",
".",
"stats_port",
":",
"return",
"exception_request",
"=",
"tmaster_pb2",
".",
"ExceptionLogRequest",
"(",
")",
"exception_request",
".",
"component_name",
"=",
"component_name",
"if",
"len",
"(",
"instances",
")",
">",
"0",
":",
"exception_request",
".",
"instances",
".",
"extend",
"(",
"instances",
")",
"request_str",
"=",
"exception_request",
".",
"SerializeToString",
"(",
")",
"port",
"=",
"str",
"(",
"tmaster",
".",
"stats_port",
")",
"host",
"=",
"tmaster",
".",
"host",
"url",
"=",
"\"http://{0}:{1}/exceptionsummary\"",
".",
"format",
"(",
"host",
",",
"port",
")",
"Log",
".",
"debug",
"(",
"\"Creating request object.\"",
")",
"request",
"=",
"tornado",
".",
"httpclient",
".",
"HTTPRequest",
"(",
"url",
",",
"body",
"=",
"request_str",
",",
"method",
"=",
"'POST'",
",",
"request_timeout",
"=",
"5",
")",
"Log",
".",
"debug",
"(",
"'Making HTTP call to fetch exceptionsummary url: %s'",
",",
"url",
")",
"try",
":",
"client",
"=",
"tornado",
".",
"httpclient",
".",
"AsyncHTTPClient",
"(",
")",
"result",
"=",
"yield",
"client",
".",
"fetch",
"(",
"request",
")",
"Log",
".",
"debug",
"(",
"\"HTTP call complete.\"",
")",
"except",
"tornado",
".",
"httpclient",
".",
"HTTPError",
"as",
"e",
":",
"raise",
"Exception",
"(",
"str",
"(",
"e",
")",
")",
"# Check the response code - error if it is in 400s or 500s",
"responseCode",
"=",
"result",
".",
"code",
"if",
"responseCode",
">=",
"400",
":",
"message",
"=",
"\"Error in getting exceptions from Tmaster, code: \"",
"+",
"responseCode",
"Log",
".",
"error",
"(",
"message",
")",
"raise",
"tornado",
".",
"gen",
".",
"Return",
"(",
"{",
"\"message\"",
":",
"message",
"}",
")",
"# Parse the response from tmaster.",
"exception_response",
"=",
"tmaster_pb2",
".",
"ExceptionLogResponse",
"(",
")",
"exception_response",
".",
"ParseFromString",
"(",
"result",
".",
"body",
")",
"if",
"exception_response",
".",
"status",
".",
"status",
"==",
"common_pb2",
".",
"NOTOK",
":",
"if",
"exception_response",
".",
"status",
".",
"HasField",
"(",
"\"message\"",
")",
":",
"raise",
"tornado",
".",
"gen",
".",
"Return",
"(",
"{",
"\"message\"",
":",
"exception_response",
".",
"status",
".",
"message",
"}",
")",
"# Send response",
"ret",
"=",
"[",
"]",
"for",
"exception_log",
"in",
"exception_response",
".",
"exceptions",
":",
"ret",
".",
"append",
"(",
"{",
"'class_name'",
":",
"exception_log",
".",
"stacktrace",
",",
"'lasttime'",
":",
"exception_log",
".",
"lasttime",
",",
"'firsttime'",
":",
"exception_log",
".",
"firsttime",
",",
"'count'",
":",
"str",
"(",
"exception_log",
".",
"count",
")",
"}",
")",
"raise",
"tornado",
".",
"gen",
".",
"Return",
"(",
"ret",
")"
] | 40.381818 | 18.127273 |
def colorize(self, col, row, value=None):
'Returns curses attribute for the given col/row/value'
# colorstack = tuple(c.coloropt for c in self.getColorizers() if wrapply(c.func, self, col, row, value))
colorstack = []
for colorizer in self.getColorizers():
try:
r = colorizer.func(self, col, row, value)
if r:
colorstack.append(colorizer.coloropt if colorizer.coloropt else r)
except Exception as e:
exceptionCaught(e)
return colors.resolve_colors(tuple(colorstack)) | [
"def",
"colorize",
"(",
"self",
",",
"col",
",",
"row",
",",
"value",
"=",
"None",
")",
":",
"# colorstack = tuple(c.coloropt for c in self.getColorizers() if wrapply(c.func, self, col, row, value))",
"colorstack",
"=",
"[",
"]",
"for",
"colorizer",
"in",
"self",
".",
"getColorizers",
"(",
")",
":",
"try",
":",
"r",
"=",
"colorizer",
".",
"func",
"(",
"self",
",",
"col",
",",
"row",
",",
"value",
")",
"if",
"r",
":",
"colorstack",
".",
"append",
"(",
"colorizer",
".",
"coloropt",
"if",
"colorizer",
".",
"coloropt",
"else",
"r",
")",
"except",
"Exception",
"as",
"e",
":",
"exceptionCaught",
"(",
"e",
")",
"return",
"colors",
".",
"resolve_colors",
"(",
"tuple",
"(",
"colorstack",
")",
")"
] | 39.066667 | 24.666667 |
def to_mef(data, channels, sc_list, sc_channels = None):
"""
Transform flow cytometry data using a standard curve function.
This function accepts a list of standard curves (`sc_list`) and a list
of channels to which those standard curves should be applied
(`sc_channels`). `to_mef` automatically checks whether a standard curve
is available for each channel specified in `channels`, and throws an
error otherwise.
This function is intended to be reduced to the following signature::
to_mef_reduced(data, channels)
by using ``functools.partial`` once a list of standard curves and their
respective channels is available.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : int, str, list of int, list of str
Channels on which to perform the transformation. If `channels` is
None, perform transformation in all channels specified on
`sc_channels`.
sc_list : list of functions
Functions implementing the standard curves for each channel in
`sc_channels`.
sc_channels : list of int or list of str, optional
List of channels corresponding to each function in `sc_list`. If
None, use all channels in `data`.
Returns
-------
FCSData or numpy array
NxD transformed flow cytometry data.
Raises
------
ValueError
If any channel specified in `channels` is not in `sc_channels`.
"""
# Default sc_channels
if sc_channels is None:
if data.ndim == 1:
sc_channels = range(data.shape[0])
else:
sc_channels = range(data.shape[1])
# Check that sc_channels and sc_list have the same length
if len(sc_channels) != len(sc_list):
raise ValueError("sc_channels and sc_list should have the same length")
# Convert sc_channels to indices
if hasattr(data, '_name_to_index'):
sc_channels = data._name_to_index(sc_channels)
# Default channels
if channels is None:
channels = sc_channels
# Convert channels to iterable
if not (hasattr(channels, '__iter__') \
and not isinstance(channels, six.string_types)):
channels = [channels]
# Convert channels to index
if hasattr(data, '_name_to_index'):
channels_ind = data._name_to_index(channels)
else:
channels_ind = channels
# Check if every channel is in sc_channels
for chi, chs in zip(channels_ind, channels):
if chi not in sc_channels:
raise ValueError("no standard curve for channel {}".format(chs))
# Copy data array
data_t = data.copy().astype(np.float64)
# Iterate over channels
for chi, sc in zip(sc_channels, sc_list):
if chi not in channels_ind:
continue
# Apply transformation
data_t[:,chi] = sc(data_t[:,chi])
# Apply transformation to range
if hasattr(data_t, '_range') and data_t._range[chi] is not None:
data_t._range[chi] = [sc(data_t._range[chi][0]),
sc(data_t._range[chi][1])]
return data_t | [
"def",
"to_mef",
"(",
"data",
",",
"channels",
",",
"sc_list",
",",
"sc_channels",
"=",
"None",
")",
":",
"# Default sc_channels",
"if",
"sc_channels",
"is",
"None",
":",
"if",
"data",
".",
"ndim",
"==",
"1",
":",
"sc_channels",
"=",
"range",
"(",
"data",
".",
"shape",
"[",
"0",
"]",
")",
"else",
":",
"sc_channels",
"=",
"range",
"(",
"data",
".",
"shape",
"[",
"1",
"]",
")",
"# Check that sc_channels and sc_list have the same length",
"if",
"len",
"(",
"sc_channels",
")",
"!=",
"len",
"(",
"sc_list",
")",
":",
"raise",
"ValueError",
"(",
"\"sc_channels and sc_list should have the same length\"",
")",
"# Convert sc_channels to indices",
"if",
"hasattr",
"(",
"data",
",",
"'_name_to_index'",
")",
":",
"sc_channels",
"=",
"data",
".",
"_name_to_index",
"(",
"sc_channels",
")",
"# Default channels",
"if",
"channels",
"is",
"None",
":",
"channels",
"=",
"sc_channels",
"# Convert channels to iterable",
"if",
"not",
"(",
"hasattr",
"(",
"channels",
",",
"'__iter__'",
")",
"and",
"not",
"isinstance",
"(",
"channels",
",",
"six",
".",
"string_types",
")",
")",
":",
"channels",
"=",
"[",
"channels",
"]",
"# Convert channels to index",
"if",
"hasattr",
"(",
"data",
",",
"'_name_to_index'",
")",
":",
"channels_ind",
"=",
"data",
".",
"_name_to_index",
"(",
"channels",
")",
"else",
":",
"channels_ind",
"=",
"channels",
"# Check if every channel is in sc_channels",
"for",
"chi",
",",
"chs",
"in",
"zip",
"(",
"channels_ind",
",",
"channels",
")",
":",
"if",
"chi",
"not",
"in",
"sc_channels",
":",
"raise",
"ValueError",
"(",
"\"no standard curve for channel {}\"",
".",
"format",
"(",
"chs",
")",
")",
"# Copy data array",
"data_t",
"=",
"data",
".",
"copy",
"(",
")",
".",
"astype",
"(",
"np",
".",
"float64",
")",
"# Iterate over channels",
"for",
"chi",
",",
"sc",
"in",
"zip",
"(",
"sc_channels",
",",
"sc_list",
")",
":",
"if",
"chi",
"not",
"in",
"channels_ind",
":",
"continue",
"# Apply transformation",
"data_t",
"[",
":",
",",
"chi",
"]",
"=",
"sc",
"(",
"data_t",
"[",
":",
",",
"chi",
"]",
")",
"# Apply transformation to range",
"if",
"hasattr",
"(",
"data_t",
",",
"'_range'",
")",
"and",
"data_t",
".",
"_range",
"[",
"chi",
"]",
"is",
"not",
"None",
":",
"data_t",
".",
"_range",
"[",
"chi",
"]",
"=",
"[",
"sc",
"(",
"data_t",
".",
"_range",
"[",
"chi",
"]",
"[",
"0",
"]",
")",
",",
"sc",
"(",
"data_t",
".",
"_range",
"[",
"chi",
"]",
"[",
"1",
"]",
")",
"]",
"return",
"data_t"
] | 35.393258 | 19.707865 |
def _remove(self, telegram):
"""
Remove telegram from buffer and incomplete data preceding it. This
is easier than validating the data before adding it to the buffer.
:param str telegram:
:return:
"""
# Remove data leading up to the telegram and the telegram itself.
index = self._buffer.index(telegram) + len(telegram)
self._buffer = self._buffer[index:] | [
"def",
"_remove",
"(",
"self",
",",
"telegram",
")",
":",
"# Remove data leading up to the telegram and the telegram itself.",
"index",
"=",
"self",
".",
"_buffer",
".",
"index",
"(",
"telegram",
")",
"+",
"len",
"(",
"telegram",
")",
"self",
".",
"_buffer",
"=",
"self",
".",
"_buffer",
"[",
"index",
":",
"]"
] | 38 | 19.272727 |
def inline(self) -> str:
"""
Return inline string format of the instance
:return:
"""
return "{0}:{1}".format(self.index, ' '.join([str(p) for p in self.parameters])) | [
"def",
"inline",
"(",
"self",
")",
"->",
"str",
":",
"return",
"\"{0}:{1}\"",
".",
"format",
"(",
"self",
".",
"index",
",",
"' '",
".",
"join",
"(",
"[",
"str",
"(",
"p",
")",
"for",
"p",
"in",
"self",
".",
"parameters",
"]",
")",
")"
] | 28.714286 | 19.857143 |
def dump_config(self):
"""Pretty print the configuration dict to stdout."""
yaml_content = self.get_merged_config()
print('YAML Configuration\n%s\n' % yaml_content.read())
try:
self.load()
print('Python Configuration\n%s\n' % pretty(self.yamldocs))
except ConfigError:
sys.stderr.write(
'config parse error. try running with --logfile=/dev/tty\n')
raise | [
"def",
"dump_config",
"(",
"self",
")",
":",
"yaml_content",
"=",
"self",
".",
"get_merged_config",
"(",
")",
"print",
"(",
"'YAML Configuration\\n%s\\n'",
"%",
"yaml_content",
".",
"read",
"(",
")",
")",
"try",
":",
"self",
".",
"load",
"(",
")",
"print",
"(",
"'Python Configuration\\n%s\\n'",
"%",
"pretty",
"(",
"self",
".",
"yamldocs",
")",
")",
"except",
"ConfigError",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"'config parse error. try running with --logfile=/dev/tty\\n'",
")",
"raise"
] | 40.636364 | 18.818182 |
def to_special_value(self, value):
"""Checks if value is a special SPDX value such as
NONE, NOASSERTION or UNKNOWN if so returns proper model.
else returns value"""
if value == self.spdx_namespace.none:
return utils.SPDXNone()
elif value == self.spdx_namespace.noassertion:
return utils.NoAssert()
elif value == self.spdx_namespace.unknown:
return utils.UnKnown()
else:
return value | [
"def",
"to_special_value",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"==",
"self",
".",
"spdx_namespace",
".",
"none",
":",
"return",
"utils",
".",
"SPDXNone",
"(",
")",
"elif",
"value",
"==",
"self",
".",
"spdx_namespace",
".",
"noassertion",
":",
"return",
"utils",
".",
"NoAssert",
"(",
")",
"elif",
"value",
"==",
"self",
".",
"spdx_namespace",
".",
"unknown",
":",
"return",
"utils",
".",
"UnKnown",
"(",
")",
"else",
":",
"return",
"value"
] | 39.583333 | 9.833333 |
def make_ccys(db):
'''
Create the currency dictionary
'''
dfr = 4
dollar = r'\u0024'
peso = r'\u20b1'
kr = r'kr'
insert = db.insert
# G10 & SCANDI
insert('EUR', '978', 'EU', 1,
'Euro', dfr, 'EU', '30/360', 'ACT/360',
future='FE', symbol=r'\u20ac', html='€')
insert('GBP', '826', 'BP', 2,
'British Pound', dfr, 'GB', 'ACT/365', 'ACT/365',
symbol=r'\u00a3', html='£')
insert('AUD', '036', 'AD', 3,
'Australian Dollar', dfr, 'AU', 'ACT/365', 'ACT/365',
symbol=dollar, html='$')
insert('NZD', '554', 'ND', 4,
'New-Zealand Dollar', dfr, 'NZ', 'ACT/365', 'ACT/365',
symbol=dollar, html='$')
insert('USD', '840', 'UD', 5,
'US Dollar', 0, 'US', '30/360', 'ACT/360',
future='ED', symbol=dollar, html='$')
insert('CAD', '124', 'CD', 6,
'Canadian Dollar', dfr, 'CA', 'ACT/365', 'ACT/365',
symbol=dollar, html='$')
insert('CHF', '756', 'SF', 7,
'Swiss Franc', dfr, 'CH', '30/360', 'ACT/360',
symbol=r'Fr', html='₣')
insert('NOK', '578', 'NK', 8,
'Norwegian Krona', dfr, 'NO', '30/360', 'ACT/360',
symbol=kr, html=kr)
insert('SEK', '752', 'SK', 9,
'Swedish Krona', dfr, 'SE', '30/360', 'ACT/360',
symbol=kr, html=kr)
insert('DKK', '208', 'DK', 10,
'Danish Krona', dfr, 'DK', '30/360', 'ACT/360',
symbol=kr, html=kr)
insert('JPY', '392', 'JY', 10000,
'Japanese Yen', 2, 'JP', 'ACT/365', 'ACT/360',
symbol=r'\u00a5', html='¥')
# ASIA
insert('CNY', '156', 'CY', 680,
'Chinese Renminbi', dfr, 'CN', 'ACT/365', 'ACT/365',
symbol=r'\u00a5', html='¥')
insert('KRW', '410', 'KW', 110000,
'South Korean won', 2, 'KR', 'ACT/365', 'ACT/365',
symbol=r'\u20a9', html='₩')
insert('SGD', '702', 'SD', 15,
'Singapore Dollar', dfr, 'SG', 'ACT/365', 'ACT/365',
symbol=dollar, html='$')
insert('IDR', '360', 'IH', 970000,
'Indonesian Rupiah', 0, 'ID', 'ACT/360', 'ACT/360',
symbol=r'Rp', html='Rp')
insert('THB', '764', 'TB', 3300,
'Thai Baht', 2, 'TH', 'ACT/365', 'ACT/365',
symbol=r'\u0e3f', html='฿')
insert('TWD', '901', 'TD', 18,
'Taiwan Dollar', dfr, 'TW', 'ACT/365', 'ACT/365',
symbol=dollar, html='$')
insert('HKD', '344', 'HD', 19,
'Hong Kong Dollar', dfr, 'HK', 'ACT/365', 'ACT/365',
symbol=r'\u5713', html='HK$')
insert('PHP', '608', 'PP', 4770,
'Philippines Peso', dfr, 'PH', 'ACT/360', 'ACT/360',
symbol=peso, html='₱')
insert('INR', '356', 'IR', 4500,
'Indian Rupee', dfr, 'IN', 'ACT/365', 'ACT/365',
symbol=r'\u20a8', html='₨')
insert('MYR', '458', 'MR', 345,
'Malaysian Ringgit', dfr, 'MY', 'ACT/365', 'ACT/365')
insert('VND', '704', 'VD', 1700000,
'Vietnamese Dong', 0, 'VN', 'ACT/365', 'ACT/365',
symbol=r'\u20ab', html='₫')
# LATIN AMERICA
insert('BRL', '986', 'BC', 200,
'Brazilian Real', dfr, 'BR', 'BUS/252', 'BUS/252',
symbol=r'R$')
insert('PEN', '604', 'PS', 220,
'Peruvian New Sol', dfr, 'PE', 'ACT/360', 'ACT/360',
symbol=r'S/.')
insert('ARS', '032', 'AP', 301,
'Argentine Peso', dfr, 'AR', '30/360', 'ACT/360',
symbol=dollar, html='$')
insert('MXN', '484', 'MP', 1330,
'Mexican Peso', dfr, 'MX', 'ACT/360', 'ACT/360',
symbol=dollar, html='$')
insert('CLP', '152', 'CH', 54500,
'Chilean Peso', 2, 'CL', 'ACT/360', 'ACT/360',
symbol=dollar, html='$')
insert('COP', '170', 'CL', 190000,
'Colombian Peso', 2, 'CO', 'ACT/360', 'ACT/360',
symbol=dollar, html='$')
# TODO: Check towletters code and position
insert('JMD', '388', 'JD', 410,
'Jamaican Dollar', dfr, 'JM', 'ACT/360', 'ACT/360',
symbol=dollar, html='$')
# TODO: Check towletters code and position
insert('TTD', '780', 'TT', 410,
'Trinidad and Tobago Dollar', dfr, 'TT', 'ACT/360', 'ACT/360',
symbol=dollar, html='$')
# TODO: Check towletters code and position
insert('BMD', '060', 'BD', 410,
'Bermudian Dollar', dfr, 'BM',
symbol=dollar, html='$')
# EASTERN EUROPE
insert('CZK', '203', 'CK', 28,
'Czech Koruna', dfr, 'CZ', 'ACT/360', 'ACT/360',
symbol=r'\u004b\u010d')
insert('PLN', '985', 'PZ', 29,
'Polish Zloty', dfr, 'PL', 'ACT/ACT', 'ACT/365',
symbol=r'\u0050\u0142')
insert('TRY', '949', 'TY', 30,
'Turkish Lira', dfr, 'TR', 'ACT/360', 'ACT/360',
symbol=r'\u0054\u004c')
insert('HUF', '348', 'HF', 32,
'Hungarian Forint', dfr, 'HU', 'ACT/365', 'ACT/360',
symbol=r'Ft', html='Ft')
insert('RON', '946', 'RN', 34,
'Romanian Leu', dfr, 'RO', 'ACT/360', 'ACT/360')
insert('RUB', '643', 'RR', 36,
'Russian Ruble', dfr, 'RU', 'ACT/ACT', 'ACT/ACT',
symbol=r'\u0440\u0443\u0431')
# TODO: Check towletters code and position
insert('HRK', '191', 'HK', 410,
'Croatian kuna', dfr, 'HR',
symbol=r'kn')
# TODO: Check towletters code and position
insert('KZT', '398', 'KT', 410,
'Tenge', dfr, 'KZ',
symbol=r'\u20b8', html='₸')
# TODO: Check towletters code and position
insert('BGN', '975', 'BN', 410,
'Bulgarian Lev', dfr, 'BG',
symbol=r'\u043b\u0432.', html='лв')
# MIDDLE EAST & AFRICA
insert('ILS', '376', 'IS', 410,
'Israeli Shekel', dfr, 'IL', 'ACT/365', 'ACT/365',
symbol=r'\u20aa', html='₪')
# TODO: Check towletters code and position
insert('AED', '784', 'AE', 410,
'United Arab Emirates Dirham', dfr, 'AE')
# TODO: Check towletters code and position
insert('QAR', '634', 'QA', 410,
'Qatari Riyal', dfr, 'QA',
symbol=r'\ufdfc', html='﷼')
# TODO: Check towletters code and position
insert('SAR', '682', 'SR', 410,
'Saudi Riyal', dfr, 'SA',
symbol=r'\ufdfc', html='﷼')
insert('EGP', '818', 'EP', 550,
'Egyptian Pound', dfr, 'EG',
symbol=r'\u00a3', html='£')
insert('ZAR', '710', 'SA', 750,
'South African Rand', dfr, 'ZA', 'ACT/365', 'ACT/365',
symbol=r'R', html='R')
# BITCOIN
insert('XBT', '000', 'BT', -1,
'Bitcoin', 8, 'WW',
symbol=r'\u0e3f', html='฿') | [
"def",
"make_ccys",
"(",
"db",
")",
":",
"dfr",
"=",
"4",
"dollar",
"=",
"r'\\u0024'",
"peso",
"=",
"r'\\u20b1'",
"kr",
"=",
"r'kr'",
"insert",
"=",
"db",
".",
"insert",
"# G10 & SCANDI",
"insert",
"(",
"'EUR'",
",",
"'978'",
",",
"'EU'",
",",
"1",
",",
"'Euro'",
",",
"dfr",
",",
"'EU'",
",",
"'30/360'",
",",
"'ACT/360'",
",",
"future",
"=",
"'FE'",
",",
"symbol",
"=",
"r'\\u20ac'",
",",
"html",
"=",
"'€'",
")",
"insert",
"(",
"'GBP'",
",",
"'826'",
",",
"'BP'",
",",
"2",
",",
"'British Pound'",
",",
"dfr",
",",
"'GB'",
",",
"'ACT/365'",
",",
"'ACT/365'",
",",
"symbol",
"=",
"r'\\u00a3'",
",",
"html",
"=",
"'£'",
")",
"insert",
"(",
"'AUD'",
",",
"'036'",
",",
"'AD'",
",",
"3",
",",
"'Australian Dollar'",
",",
"dfr",
",",
"'AU'",
",",
"'ACT/365'",
",",
"'ACT/365'",
",",
"symbol",
"=",
"dollar",
",",
"html",
"=",
"'$'",
")",
"insert",
"(",
"'NZD'",
",",
"'554'",
",",
"'ND'",
",",
"4",
",",
"'New-Zealand Dollar'",
",",
"dfr",
",",
"'NZ'",
",",
"'ACT/365'",
",",
"'ACT/365'",
",",
"symbol",
"=",
"dollar",
",",
"html",
"=",
"'$'",
")",
"insert",
"(",
"'USD'",
",",
"'840'",
",",
"'UD'",
",",
"5",
",",
"'US Dollar'",
",",
"0",
",",
"'US'",
",",
"'30/360'",
",",
"'ACT/360'",
",",
"future",
"=",
"'ED'",
",",
"symbol",
"=",
"dollar",
",",
"html",
"=",
"'$'",
")",
"insert",
"(",
"'CAD'",
",",
"'124'",
",",
"'CD'",
",",
"6",
",",
"'Canadian Dollar'",
",",
"dfr",
",",
"'CA'",
",",
"'ACT/365'",
",",
"'ACT/365'",
",",
"symbol",
"=",
"dollar",
",",
"html",
"=",
"'$'",
")",
"insert",
"(",
"'CHF'",
",",
"'756'",
",",
"'SF'",
",",
"7",
",",
"'Swiss Franc'",
",",
"dfr",
",",
"'CH'",
",",
"'30/360'",
",",
"'ACT/360'",
",",
"symbol",
"=",
"r'Fr'",
",",
"html",
"=",
"'₣'",
")",
"insert",
"(",
"'NOK'",
",",
"'578'",
",",
"'NK'",
",",
"8",
",",
"'Norwegian Krona'",
",",
"dfr",
",",
"'NO'",
",",
"'30/360'",
",",
"'ACT/360'",
",",
"symbol",
"=",
"kr",
",",
"html",
"=",
"kr",
")",
"insert",
"(",
"'SEK'",
",",
"'752'",
",",
"'SK'",
",",
"9",
",",
"'Swedish Krona'",
",",
"dfr",
",",
"'SE'",
",",
"'30/360'",
",",
"'ACT/360'",
",",
"symbol",
"=",
"kr",
",",
"html",
"=",
"kr",
")",
"insert",
"(",
"'DKK'",
",",
"'208'",
",",
"'DK'",
",",
"10",
",",
"'Danish Krona'",
",",
"dfr",
",",
"'DK'",
",",
"'30/360'",
",",
"'ACT/360'",
",",
"symbol",
"=",
"kr",
",",
"html",
"=",
"kr",
")",
"insert",
"(",
"'JPY'",
",",
"'392'",
",",
"'JY'",
",",
"10000",
",",
"'Japanese Yen'",
",",
"2",
",",
"'JP'",
",",
"'ACT/365'",
",",
"'ACT/360'",
",",
"symbol",
"=",
"r'\\u00a5'",
",",
"html",
"=",
"'¥'",
")",
"# ASIA",
"insert",
"(",
"'CNY'",
",",
"'156'",
",",
"'CY'",
",",
"680",
",",
"'Chinese Renminbi'",
",",
"dfr",
",",
"'CN'",
",",
"'ACT/365'",
",",
"'ACT/365'",
",",
"symbol",
"=",
"r'\\u00a5'",
",",
"html",
"=",
"'¥'",
")",
"insert",
"(",
"'KRW'",
",",
"'410'",
",",
"'KW'",
",",
"110000",
",",
"'South Korean won'",
",",
"2",
",",
"'KR'",
",",
"'ACT/365'",
",",
"'ACT/365'",
",",
"symbol",
"=",
"r'\\u20a9'",
",",
"html",
"=",
"'₩'",
")",
"insert",
"(",
"'SGD'",
",",
"'702'",
",",
"'SD'",
",",
"15",
",",
"'Singapore Dollar'",
",",
"dfr",
",",
"'SG'",
",",
"'ACT/365'",
",",
"'ACT/365'",
",",
"symbol",
"=",
"dollar",
",",
"html",
"=",
"'$'",
")",
"insert",
"(",
"'IDR'",
",",
"'360'",
",",
"'IH'",
",",
"970000",
",",
"'Indonesian Rupiah'",
",",
"0",
",",
"'ID'",
",",
"'ACT/360'",
",",
"'ACT/360'",
",",
"symbol",
"=",
"r'Rp'",
",",
"html",
"=",
"'Rp'",
")",
"insert",
"(",
"'THB'",
",",
"'764'",
",",
"'TB'",
",",
"3300",
",",
"'Thai Baht'",
",",
"2",
",",
"'TH'",
",",
"'ACT/365'",
",",
"'ACT/365'",
",",
"symbol",
"=",
"r'\\u0e3f'",
",",
"html",
"=",
"'฿'",
")",
"insert",
"(",
"'TWD'",
",",
"'901'",
",",
"'TD'",
",",
"18",
",",
"'Taiwan Dollar'",
",",
"dfr",
",",
"'TW'",
",",
"'ACT/365'",
",",
"'ACT/365'",
",",
"symbol",
"=",
"dollar",
",",
"html",
"=",
"'$'",
")",
"insert",
"(",
"'HKD'",
",",
"'344'",
",",
"'HD'",
",",
"19",
",",
"'Hong Kong Dollar'",
",",
"dfr",
",",
"'HK'",
",",
"'ACT/365'",
",",
"'ACT/365'",
",",
"symbol",
"=",
"r'\\u5713'",
",",
"html",
"=",
"'HK$'",
")",
"insert",
"(",
"'PHP'",
",",
"'608'",
",",
"'PP'",
",",
"4770",
",",
"'Philippines Peso'",
",",
"dfr",
",",
"'PH'",
",",
"'ACT/360'",
",",
"'ACT/360'",
",",
"symbol",
"=",
"peso",
",",
"html",
"=",
"'₱'",
")",
"insert",
"(",
"'INR'",
",",
"'356'",
",",
"'IR'",
",",
"4500",
",",
"'Indian Rupee'",
",",
"dfr",
",",
"'IN'",
",",
"'ACT/365'",
",",
"'ACT/365'",
",",
"symbol",
"=",
"r'\\u20a8'",
",",
"html",
"=",
"'₨'",
")",
"insert",
"(",
"'MYR'",
",",
"'458'",
",",
"'MR'",
",",
"345",
",",
"'Malaysian Ringgit'",
",",
"dfr",
",",
"'MY'",
",",
"'ACT/365'",
",",
"'ACT/365'",
")",
"insert",
"(",
"'VND'",
",",
"'704'",
",",
"'VD'",
",",
"1700000",
",",
"'Vietnamese Dong'",
",",
"0",
",",
"'VN'",
",",
"'ACT/365'",
",",
"'ACT/365'",
",",
"symbol",
"=",
"r'\\u20ab'",
",",
"html",
"=",
"'₫'",
")",
"# LATIN AMERICA",
"insert",
"(",
"'BRL'",
",",
"'986'",
",",
"'BC'",
",",
"200",
",",
"'Brazilian Real'",
",",
"dfr",
",",
"'BR'",
",",
"'BUS/252'",
",",
"'BUS/252'",
",",
"symbol",
"=",
"r'R$'",
")",
"insert",
"(",
"'PEN'",
",",
"'604'",
",",
"'PS'",
",",
"220",
",",
"'Peruvian New Sol'",
",",
"dfr",
",",
"'PE'",
",",
"'ACT/360'",
",",
"'ACT/360'",
",",
"symbol",
"=",
"r'S/.'",
")",
"insert",
"(",
"'ARS'",
",",
"'032'",
",",
"'AP'",
",",
"301",
",",
"'Argentine Peso'",
",",
"dfr",
",",
"'AR'",
",",
"'30/360'",
",",
"'ACT/360'",
",",
"symbol",
"=",
"dollar",
",",
"html",
"=",
"'$'",
")",
"insert",
"(",
"'MXN'",
",",
"'484'",
",",
"'MP'",
",",
"1330",
",",
"'Mexican Peso'",
",",
"dfr",
",",
"'MX'",
",",
"'ACT/360'",
",",
"'ACT/360'",
",",
"symbol",
"=",
"dollar",
",",
"html",
"=",
"'$'",
")",
"insert",
"(",
"'CLP'",
",",
"'152'",
",",
"'CH'",
",",
"54500",
",",
"'Chilean Peso'",
",",
"2",
",",
"'CL'",
",",
"'ACT/360'",
",",
"'ACT/360'",
",",
"symbol",
"=",
"dollar",
",",
"html",
"=",
"'$'",
")",
"insert",
"(",
"'COP'",
",",
"'170'",
",",
"'CL'",
",",
"190000",
",",
"'Colombian Peso'",
",",
"2",
",",
"'CO'",
",",
"'ACT/360'",
",",
"'ACT/360'",
",",
"symbol",
"=",
"dollar",
",",
"html",
"=",
"'$'",
")",
"# TODO: Check towletters code and position",
"insert",
"(",
"'JMD'",
",",
"'388'",
",",
"'JD'",
",",
"410",
",",
"'Jamaican Dollar'",
",",
"dfr",
",",
"'JM'",
",",
"'ACT/360'",
",",
"'ACT/360'",
",",
"symbol",
"=",
"dollar",
",",
"html",
"=",
"'$'",
")",
"# TODO: Check towletters code and position",
"insert",
"(",
"'TTD'",
",",
"'780'",
",",
"'TT'",
",",
"410",
",",
"'Trinidad and Tobago Dollar'",
",",
"dfr",
",",
"'TT'",
",",
"'ACT/360'",
",",
"'ACT/360'",
",",
"symbol",
"=",
"dollar",
",",
"html",
"=",
"'$'",
")",
"# TODO: Check towletters code and position",
"insert",
"(",
"'BMD'",
",",
"'060'",
",",
"'BD'",
",",
"410",
",",
"'Bermudian Dollar'",
",",
"dfr",
",",
"'BM'",
",",
"symbol",
"=",
"dollar",
",",
"html",
"=",
"'$'",
")",
"# EASTERN EUROPE",
"insert",
"(",
"'CZK'",
",",
"'203'",
",",
"'CK'",
",",
"28",
",",
"'Czech Koruna'",
",",
"dfr",
",",
"'CZ'",
",",
"'ACT/360'",
",",
"'ACT/360'",
",",
"symbol",
"=",
"r'\\u004b\\u010d'",
")",
"insert",
"(",
"'PLN'",
",",
"'985'",
",",
"'PZ'",
",",
"29",
",",
"'Polish Zloty'",
",",
"dfr",
",",
"'PL'",
",",
"'ACT/ACT'",
",",
"'ACT/365'",
",",
"symbol",
"=",
"r'\\u0050\\u0142'",
")",
"insert",
"(",
"'TRY'",
",",
"'949'",
",",
"'TY'",
",",
"30",
",",
"'Turkish Lira'",
",",
"dfr",
",",
"'TR'",
",",
"'ACT/360'",
",",
"'ACT/360'",
",",
"symbol",
"=",
"r'\\u0054\\u004c'",
")",
"insert",
"(",
"'HUF'",
",",
"'348'",
",",
"'HF'",
",",
"32",
",",
"'Hungarian Forint'",
",",
"dfr",
",",
"'HU'",
",",
"'ACT/365'",
",",
"'ACT/360'",
",",
"symbol",
"=",
"r'Ft'",
",",
"html",
"=",
"'Ft'",
")",
"insert",
"(",
"'RON'",
",",
"'946'",
",",
"'RN'",
",",
"34",
",",
"'Romanian Leu'",
",",
"dfr",
",",
"'RO'",
",",
"'ACT/360'",
",",
"'ACT/360'",
")",
"insert",
"(",
"'RUB'",
",",
"'643'",
",",
"'RR'",
",",
"36",
",",
"'Russian Ruble'",
",",
"dfr",
",",
"'RU'",
",",
"'ACT/ACT'",
",",
"'ACT/ACT'",
",",
"symbol",
"=",
"r'\\u0440\\u0443\\u0431'",
")",
"# TODO: Check towletters code and position",
"insert",
"(",
"'HRK'",
",",
"'191'",
",",
"'HK'",
",",
"410",
",",
"'Croatian kuna'",
",",
"dfr",
",",
"'HR'",
",",
"symbol",
"=",
"r'kn'",
")",
"# TODO: Check towletters code and position",
"insert",
"(",
"'KZT'",
",",
"'398'",
",",
"'KT'",
",",
"410",
",",
"'Tenge'",
",",
"dfr",
",",
"'KZ'",
",",
"symbol",
"=",
"r'\\u20b8'",
",",
"html",
"=",
"'₸'",
")",
"# TODO: Check towletters code and position",
"insert",
"(",
"'BGN'",
",",
"'975'",
",",
"'BN'",
",",
"410",
",",
"'Bulgarian Lev'",
",",
"dfr",
",",
"'BG'",
",",
"symbol",
"=",
"r'\\u043b\\u0432.'",
",",
"html",
"=",
"'лв'",
")",
"# MIDDLE EAST & AFRICA",
"insert",
"(",
"'ILS'",
",",
"'376'",
",",
"'IS'",
",",
"410",
",",
"'Israeli Shekel'",
",",
"dfr",
",",
"'IL'",
",",
"'ACT/365'",
",",
"'ACT/365'",
",",
"symbol",
"=",
"r'\\u20aa'",
",",
"html",
"=",
"'₪'",
")",
"# TODO: Check towletters code and position",
"insert",
"(",
"'AED'",
",",
"'784'",
",",
"'AE'",
",",
"410",
",",
"'United Arab Emirates Dirham'",
",",
"dfr",
",",
"'AE'",
")",
"# TODO: Check towletters code and position",
"insert",
"(",
"'QAR'",
",",
"'634'",
",",
"'QA'",
",",
"410",
",",
"'Qatari Riyal'",
",",
"dfr",
",",
"'QA'",
",",
"symbol",
"=",
"r'\\ufdfc'",
",",
"html",
"=",
"'﷼'",
")",
"# TODO: Check towletters code and position",
"insert",
"(",
"'SAR'",
",",
"'682'",
",",
"'SR'",
",",
"410",
",",
"'Saudi Riyal'",
",",
"dfr",
",",
"'SA'",
",",
"symbol",
"=",
"r'\\ufdfc'",
",",
"html",
"=",
"'﷼'",
")",
"insert",
"(",
"'EGP'",
",",
"'818'",
",",
"'EP'",
",",
"550",
",",
"'Egyptian Pound'",
",",
"dfr",
",",
"'EG'",
",",
"symbol",
"=",
"r'\\u00a3'",
",",
"html",
"=",
"'£'",
")",
"insert",
"(",
"'ZAR'",
",",
"'710'",
",",
"'SA'",
",",
"750",
",",
"'South African Rand'",
",",
"dfr",
",",
"'ZA'",
",",
"'ACT/365'",
",",
"'ACT/365'",
",",
"symbol",
"=",
"r'R'",
",",
"html",
"=",
"'R'",
")",
"# BITCOIN",
"insert",
"(",
"'XBT'",
",",
"'000'",
",",
"'BT'",
",",
"-",
"1",
",",
"'Bitcoin'",
",",
"8",
",",
"'WW'",
",",
"symbol",
"=",
"r'\\u0e3f'",
",",
"html",
"=",
"'฿'",
")"
] | 39.958333 | 11.446429 |
def catalogue_mt_filter(self, mt_table, flag=None):
"""
Filter the catalogue using a magnitude-time table. The table has
two columns and n-rows.
:param nump.ndarray mt_table:
Magnitude time table with n-rows where column 1 is year and column
2 is magnitude
"""
if flag is None:
# No flag defined, therefore all events are initially valid
flag = np.ones(self.get_number_events(), dtype=bool)
for comp_val in mt_table:
id0 = np.logical_and(self.data['year'].astype(float) < comp_val[0],
self.data['magnitude'] < comp_val[1])
print(id0)
flag[id0] = False
if not np.all(flag):
self.purge_catalogue(flag) | [
"def",
"catalogue_mt_filter",
"(",
"self",
",",
"mt_table",
",",
"flag",
"=",
"None",
")",
":",
"if",
"flag",
"is",
"None",
":",
"# No flag defined, therefore all events are initially valid",
"flag",
"=",
"np",
".",
"ones",
"(",
"self",
".",
"get_number_events",
"(",
")",
",",
"dtype",
"=",
"bool",
")",
"for",
"comp_val",
"in",
"mt_table",
":",
"id0",
"=",
"np",
".",
"logical_and",
"(",
"self",
".",
"data",
"[",
"'year'",
"]",
".",
"astype",
"(",
"float",
")",
"<",
"comp_val",
"[",
"0",
"]",
",",
"self",
".",
"data",
"[",
"'magnitude'",
"]",
"<",
"comp_val",
"[",
"1",
"]",
")",
"print",
"(",
"id0",
")",
"flag",
"[",
"id0",
"]",
"=",
"False",
"if",
"not",
"np",
".",
"all",
"(",
"flag",
")",
":",
"self",
".",
"purge_catalogue",
"(",
"flag",
")"
] | 36.904762 | 19.857143 |
def _get_id_from_username(self, username):
"""Looks up a username's id
:param string username: Username to lookup
:returns: The id that matches username.
"""
_mask = "mask[id, username]"
_filter = {'users': {'username': utils.query_filter(username)}}
user = self.list_users(_mask, _filter)
if len(user) == 1:
return [user[0]['id']]
elif len(user) > 1:
raise exceptions.SoftLayerError("Multiple users found with the name: %s" % username)
else:
raise exceptions.SoftLayerError("Unable to find user id for %s" % username) | [
"def",
"_get_id_from_username",
"(",
"self",
",",
"username",
")",
":",
"_mask",
"=",
"\"mask[id, username]\"",
"_filter",
"=",
"{",
"'users'",
":",
"{",
"'username'",
":",
"utils",
".",
"query_filter",
"(",
"username",
")",
"}",
"}",
"user",
"=",
"self",
".",
"list_users",
"(",
"_mask",
",",
"_filter",
")",
"if",
"len",
"(",
"user",
")",
"==",
"1",
":",
"return",
"[",
"user",
"[",
"0",
"]",
"[",
"'id'",
"]",
"]",
"elif",
"len",
"(",
"user",
")",
">",
"1",
":",
"raise",
"exceptions",
".",
"SoftLayerError",
"(",
"\"Multiple users found with the name: %s\"",
"%",
"username",
")",
"else",
":",
"raise",
"exceptions",
".",
"SoftLayerError",
"(",
"\"Unable to find user id for %s\"",
"%",
"username",
")"
] | 41.4 | 17.533333 |
def hide_arp_holder_arp_entry_interfacetype_Port_channel_Port_channel(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_arp_holder = ET.SubElement(config, "hide-arp-holder", xmlns="urn:brocade.com:mgmt:brocade-arp")
arp_entry = ET.SubElement(hide_arp_holder, "arp-entry")
arp_ip_address_key = ET.SubElement(arp_entry, "arp-ip-address")
arp_ip_address_key.text = kwargs.pop('arp_ip_address')
interfacetype = ET.SubElement(arp_entry, "interfacetype")
Port_channel = ET.SubElement(interfacetype, "Port-channel")
Port_channel = ET.SubElement(Port_channel, "Port-channel")
Port_channel.text = kwargs.pop('Port_channel')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"hide_arp_holder_arp_entry_interfacetype_Port_channel_Port_channel",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"hide_arp_holder",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"hide-arp-holder\"",
",",
"xmlns",
"=",
"\"urn:brocade.com:mgmt:brocade-arp\"",
")",
"arp_entry",
"=",
"ET",
".",
"SubElement",
"(",
"hide_arp_holder",
",",
"\"arp-entry\"",
")",
"arp_ip_address_key",
"=",
"ET",
".",
"SubElement",
"(",
"arp_entry",
",",
"\"arp-ip-address\"",
")",
"arp_ip_address_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'arp_ip_address'",
")",
"interfacetype",
"=",
"ET",
".",
"SubElement",
"(",
"arp_entry",
",",
"\"interfacetype\"",
")",
"Port_channel",
"=",
"ET",
".",
"SubElement",
"(",
"interfacetype",
",",
"\"Port-channel\"",
")",
"Port_channel",
"=",
"ET",
".",
"SubElement",
"(",
"Port_channel",
",",
"\"Port-channel\"",
")",
"Port_channel",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'Port_channel'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | 53.866667 | 23.4 |
def _get_merged_params_string(self):
"""Returns the merged nextflow params string from a dictionary object.
The params dict should be a set of key:value pairs with the
parameter name, and the default parameter value::
self.params = {
"genomeSize": 2.1,
"minCoverage": 15
}
The values are then added to the string as they are. For instance,
a ``2.1`` float will appear as ``param = 2.1`` and a
``"'teste'" string will appear as ``param = 'teste'`` (Note the
string).
Identical parameters in multiple processes will be merged into the same
param.
Returns
-------
str
Nextflow params configuration string
"""
params_temp = {}
for p in self.processes:
logger.debug("[{}] Adding parameters: {}".format(p.template,
p.params))
for param, val in p.params.items():
params_temp[param] = val["default"]
config_str = "\n\t" + "\n\t".join([
"{} = {}".format(param, val) for param, val in params_temp.items()
])
return config_str | [
"def",
"_get_merged_params_string",
"(",
"self",
")",
":",
"params_temp",
"=",
"{",
"}",
"for",
"p",
"in",
"self",
".",
"processes",
":",
"logger",
".",
"debug",
"(",
"\"[{}] Adding parameters: {}\"",
".",
"format",
"(",
"p",
".",
"template",
",",
"p",
".",
"params",
")",
")",
"for",
"param",
",",
"val",
"in",
"p",
".",
"params",
".",
"items",
"(",
")",
":",
"params_temp",
"[",
"param",
"]",
"=",
"val",
"[",
"\"default\"",
"]",
"config_str",
"=",
"\"\\n\\t\"",
"+",
"\"\\n\\t\"",
".",
"join",
"(",
"[",
"\"{} = {}\"",
".",
"format",
"(",
"param",
",",
"val",
")",
"for",
"param",
",",
"val",
"in",
"params_temp",
".",
"items",
"(",
")",
"]",
")",
"return",
"config_str"
] | 30.3 | 24.825 |
def get_requirements_file_from_url(url):
"""fetches the requiremets from the url"""
response = requests.get(url)
if response.status_code == 200:
return StringIO(response.text)
else:
return StringIO("") | [
"def",
"get_requirements_file_from_url",
"(",
"url",
")",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"url",
")",
"if",
"response",
".",
"status_code",
"==",
"200",
":",
"return",
"StringIO",
"(",
"response",
".",
"text",
")",
"else",
":",
"return",
"StringIO",
"(",
"\"\"",
")"
] | 28.375 | 12.375 |
def merge_featurecollection(*jsons):
"""
merge features into one featurecollection
Keyword arguments:
jsons -- jsons object list
return geojson featurecollection
"""
features = []
for json in jsons:
if json['type'] == 'FeatureCollection':
for feature in json['features']:
features.append(feature)
return {"type":'FeatureCollection', "features":features} | [
"def",
"merge_featurecollection",
"(",
"*",
"jsons",
")",
":",
"features",
"=",
"[",
"]",
"for",
"json",
"in",
"jsons",
":",
"if",
"json",
"[",
"'type'",
"]",
"==",
"'FeatureCollection'",
":",
"for",
"feature",
"in",
"json",
"[",
"'features'",
"]",
":",
"features",
".",
"append",
"(",
"feature",
")",
"return",
"{",
"\"type\"",
":",
"'FeatureCollection'",
",",
"\"features\"",
":",
"features",
"}"
] | 27.733333 | 12.666667 |
def _R2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rderiv
PURPOSE:
evaluate the second radial derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the second radial derivative
HISTORY:
2013-09-08 - Written - Bovy (IAS)
"""
r2= R**2.+z**2.
rb= nu.sqrt(r2+self.b2)
return -(-self.b**3.-self.b*z**2.+(2.*R**2.-z**2.-self.b**2.)*rb)/\
rb**3./(self.b+rb)**3. | [
"def",
"_R2deriv",
"(",
"self",
",",
"R",
",",
"z",
",",
"phi",
"=",
"0.",
",",
"t",
"=",
"0.",
")",
":",
"r2",
"=",
"R",
"**",
"2.",
"+",
"z",
"**",
"2.",
"rb",
"=",
"nu",
".",
"sqrt",
"(",
"r2",
"+",
"self",
".",
"b2",
")",
"return",
"-",
"(",
"-",
"self",
".",
"b",
"**",
"3.",
"-",
"self",
".",
"b",
"*",
"z",
"**",
"2.",
"+",
"(",
"2.",
"*",
"R",
"**",
"2.",
"-",
"z",
"**",
"2.",
"-",
"self",
".",
"b",
"**",
"2.",
")",
"*",
"rb",
")",
"/",
"rb",
"**",
"3.",
"/",
"(",
"self",
".",
"b",
"+",
"rb",
")",
"**",
"3."
] | 29.15 | 15.35 |
def probConn (self, preCellsTags, postCellsTags, connParam):
from .. import sim
''' Generates connections between all pre and post-syn cells based on probability values'''
if sim.cfg.verbose: print('Generating set of probabilistic connections (rule: %s) ...' % (connParam['label']))
allRands = self.generateRandsPrePost(preCellsTags, postCellsTags)
# get list of params that have a lambda function
paramsStrFunc = [param for param in [p+'Func' for p in self.connStringFuncParams] if param in connParam]
# copy the vars into args immediately and work out which keys are associated with lambda functions only once per method
funcKeys = {}
for paramStrFunc in paramsStrFunc:
connParam[paramStrFunc + 'Args'] = connParam[paramStrFunc + 'Vars'].copy()
funcKeys[paramStrFunc] = [key for key in connParam[paramStrFunc + 'Vars'] if callable(connParam[paramStrFunc + 'Vars'][key])]
# probabilistic connections with disynapticBias (deprecated)
if isinstance(connParam.get('disynapticBias', None), Number):
allPreGids = sim._gatherAllCellConnPreGids()
prePreGids = {gid: allPreGids[gid] for gid in preCellsTags}
postPreGids = {gid: allPreGids[gid] for gid in postCellsTags}
probMatrix = {(preCellGid,postCellGid): connParam['probabilityFunc'][preCellGid,postCellGid] if 'probabilityFunc' in connParam else connParam['probability']
for postCellGid,postCellTags in postCellsTags.items() # for each postsyn cell
for preCellGid, preCellTags in preCellsTags.items() # for each presyn cell
if postCellGid in self.gid2lid} # check if postsyn is in this node
connGids = self._disynapticBiasProb2(probMatrix, allRands, connParam['disynapticBias'], prePreGids, postPreGids)
for preCellGid, postCellGid in connGids:
for paramStrFunc in paramsStrFunc: # call lambda functions to get weight func args
connParam[paramStrFunc+'Args'] = {k:v if isinstance(v, Number) else v(preCellsTags[preCellGid],postCellsTags[postCellGid]) for k,v in connParam[paramStrFunc+'Vars'].items()}
self._addCellConn(connParam, preCellGid, postCellGid) # add connection
# standard probabilistic conenctions
else:
# calculate the conn preGids of the each pre and post cell
# for postCellGid,postCellTags in sorted(postCellsTags.items()): # for each postsyn cell
for postCellGid,postCellTags in postCellsTags.items(): # for each postsyn cell # for each postsyn cell
if postCellGid in self.gid2lid: # check if postsyn is in this node
for preCellGid, preCellTags in preCellsTags.items(): # for each presyn cell
probability = connParam['probabilityFunc'][preCellGid,postCellGid] if 'probabilityFunc' in connParam else connParam['probability']
if probability >= allRands[preCellGid,postCellGid]:
for paramStrFunc in paramsStrFunc: # call lambda functions to get weight func args
# update the relevant FuncArgs dict where lambda functions are known to exist in the corresponding FuncVars dict
for funcKey in funcKeys[paramStrFunc]:
connParam[paramStrFunc + 'Args'][funcKey] = connParam[paramStrFunc + 'Vars'][funcKey](preCellTags, postCellTags)
# connParam[paramStrFunc+'Args'] = {k:v if isinstance(v, Number) else v(preCellTags,postCellTags) for k,v in connParam[paramStrFunc+'Vars'].items()}
self._addCellConn(connParam, preCellGid, postCellGid) | [
"def",
"probConn",
"(",
"self",
",",
"preCellsTags",
",",
"postCellsTags",
",",
"connParam",
")",
":",
"from",
".",
".",
"import",
"sim",
"if",
"sim",
".",
"cfg",
".",
"verbose",
":",
"print",
"(",
"'Generating set of probabilistic connections (rule: %s) ...'",
"%",
"(",
"connParam",
"[",
"'label'",
"]",
")",
")",
"allRands",
"=",
"self",
".",
"generateRandsPrePost",
"(",
"preCellsTags",
",",
"postCellsTags",
")",
"# get list of params that have a lambda function",
"paramsStrFunc",
"=",
"[",
"param",
"for",
"param",
"in",
"[",
"p",
"+",
"'Func'",
"for",
"p",
"in",
"self",
".",
"connStringFuncParams",
"]",
"if",
"param",
"in",
"connParam",
"]",
"# copy the vars into args immediately and work out which keys are associated with lambda functions only once per method",
"funcKeys",
"=",
"{",
"}",
"for",
"paramStrFunc",
"in",
"paramsStrFunc",
":",
"connParam",
"[",
"paramStrFunc",
"+",
"'Args'",
"]",
"=",
"connParam",
"[",
"paramStrFunc",
"+",
"'Vars'",
"]",
".",
"copy",
"(",
")",
"funcKeys",
"[",
"paramStrFunc",
"]",
"=",
"[",
"key",
"for",
"key",
"in",
"connParam",
"[",
"paramStrFunc",
"+",
"'Vars'",
"]",
"if",
"callable",
"(",
"connParam",
"[",
"paramStrFunc",
"+",
"'Vars'",
"]",
"[",
"key",
"]",
")",
"]",
"# probabilistic connections with disynapticBias (deprecated)",
"if",
"isinstance",
"(",
"connParam",
".",
"get",
"(",
"'disynapticBias'",
",",
"None",
")",
",",
"Number",
")",
":",
"allPreGids",
"=",
"sim",
".",
"_gatherAllCellConnPreGids",
"(",
")",
"prePreGids",
"=",
"{",
"gid",
":",
"allPreGids",
"[",
"gid",
"]",
"for",
"gid",
"in",
"preCellsTags",
"}",
"postPreGids",
"=",
"{",
"gid",
":",
"allPreGids",
"[",
"gid",
"]",
"for",
"gid",
"in",
"postCellsTags",
"}",
"probMatrix",
"=",
"{",
"(",
"preCellGid",
",",
"postCellGid",
")",
":",
"connParam",
"[",
"'probabilityFunc'",
"]",
"[",
"preCellGid",
",",
"postCellGid",
"]",
"if",
"'probabilityFunc'",
"in",
"connParam",
"else",
"connParam",
"[",
"'probability'",
"]",
"for",
"postCellGid",
",",
"postCellTags",
"in",
"postCellsTags",
".",
"items",
"(",
")",
"# for each postsyn cell",
"for",
"preCellGid",
",",
"preCellTags",
"in",
"preCellsTags",
".",
"items",
"(",
")",
"# for each presyn cell",
"if",
"postCellGid",
"in",
"self",
".",
"gid2lid",
"}",
"# check if postsyn is in this node",
"connGids",
"=",
"self",
".",
"_disynapticBiasProb2",
"(",
"probMatrix",
",",
"allRands",
",",
"connParam",
"[",
"'disynapticBias'",
"]",
",",
"prePreGids",
",",
"postPreGids",
")",
"for",
"preCellGid",
",",
"postCellGid",
"in",
"connGids",
":",
"for",
"paramStrFunc",
"in",
"paramsStrFunc",
":",
"# call lambda functions to get weight func args",
"connParam",
"[",
"paramStrFunc",
"+",
"'Args'",
"]",
"=",
"{",
"k",
":",
"v",
"if",
"isinstance",
"(",
"v",
",",
"Number",
")",
"else",
"v",
"(",
"preCellsTags",
"[",
"preCellGid",
"]",
",",
"postCellsTags",
"[",
"postCellGid",
"]",
")",
"for",
"k",
",",
"v",
"in",
"connParam",
"[",
"paramStrFunc",
"+",
"'Vars'",
"]",
".",
"items",
"(",
")",
"}",
"self",
".",
"_addCellConn",
"(",
"connParam",
",",
"preCellGid",
",",
"postCellGid",
")",
"# add connection",
"# standard probabilistic conenctions ",
"else",
":",
"# calculate the conn preGids of the each pre and post cell",
"# for postCellGid,postCellTags in sorted(postCellsTags.items()): # for each postsyn cell",
"for",
"postCellGid",
",",
"postCellTags",
"in",
"postCellsTags",
".",
"items",
"(",
")",
":",
"# for each postsyn cell # for each postsyn cell",
"if",
"postCellGid",
"in",
"self",
".",
"gid2lid",
":",
"# check if postsyn is in this node",
"for",
"preCellGid",
",",
"preCellTags",
"in",
"preCellsTags",
".",
"items",
"(",
")",
":",
"# for each presyn cell",
"probability",
"=",
"connParam",
"[",
"'probabilityFunc'",
"]",
"[",
"preCellGid",
",",
"postCellGid",
"]",
"if",
"'probabilityFunc'",
"in",
"connParam",
"else",
"connParam",
"[",
"'probability'",
"]",
"if",
"probability",
">=",
"allRands",
"[",
"preCellGid",
",",
"postCellGid",
"]",
":",
"for",
"paramStrFunc",
"in",
"paramsStrFunc",
":",
"# call lambda functions to get weight func args",
"# update the relevant FuncArgs dict where lambda functions are known to exist in the corresponding FuncVars dict",
"for",
"funcKey",
"in",
"funcKeys",
"[",
"paramStrFunc",
"]",
":",
"connParam",
"[",
"paramStrFunc",
"+",
"'Args'",
"]",
"[",
"funcKey",
"]",
"=",
"connParam",
"[",
"paramStrFunc",
"+",
"'Vars'",
"]",
"[",
"funcKey",
"]",
"(",
"preCellTags",
",",
"postCellTags",
")",
"# connParam[paramStrFunc+'Args'] = {k:v if isinstance(v, Number) else v(preCellTags,postCellTags) for k,v in connParam[paramStrFunc+'Vars'].items()}",
"self",
".",
"_addCellConn",
"(",
"connParam",
",",
"preCellGid",
",",
"postCellGid",
")"
] | 76.020408 | 51.44898 |
def set_schedule(self, schedule):
"""Takes the output from `auto` and sets the attributes
"""
self.Tmax = schedule['tmax']
self.Tmin = schedule['tmin']
self.steps = int(schedule['steps'])
self.updates = int(schedule['updates']) | [
"def",
"set_schedule",
"(",
"self",
",",
"schedule",
")",
":",
"self",
".",
"Tmax",
"=",
"schedule",
"[",
"'tmax'",
"]",
"self",
".",
"Tmin",
"=",
"schedule",
"[",
"'tmin'",
"]",
"self",
".",
"steps",
"=",
"int",
"(",
"schedule",
"[",
"'steps'",
"]",
")",
"self",
".",
"updates",
"=",
"int",
"(",
"schedule",
"[",
"'updates'",
"]",
")"
] | 38.428571 | 3.571429 |
def cumulative_statistics(self):
"""
Access the cumulative_statistics
:returns: twilio.rest.taskrouter.v1.workspace.worker.workers_cumulative_statistics.WorkersCumulativeStatisticsList
:rtype: twilio.rest.taskrouter.v1.workspace.worker.workers_cumulative_statistics.WorkersCumulativeStatisticsList
"""
if self._cumulative_statistics is None:
self._cumulative_statistics = WorkersCumulativeStatisticsList(
self._version,
workspace_sid=self._solution['workspace_sid'],
)
return self._cumulative_statistics | [
"def",
"cumulative_statistics",
"(",
"self",
")",
":",
"if",
"self",
".",
"_cumulative_statistics",
"is",
"None",
":",
"self",
".",
"_cumulative_statistics",
"=",
"WorkersCumulativeStatisticsList",
"(",
"self",
".",
"_version",
",",
"workspace_sid",
"=",
"self",
".",
"_solution",
"[",
"'workspace_sid'",
"]",
",",
")",
"return",
"self",
".",
"_cumulative_statistics"
] | 46.461538 | 24 |
def find_cards(self, source=None, **filters):
"""
Generate a card pool with all cards matching specified filters
"""
if not filters:
new_filters = self.filters.copy()
else:
new_filters = filters.copy()
for k, v in new_filters.items():
if isinstance(v, LazyValue):
new_filters[k] = v.evaluate(source)
from .. import cards
return cards.filter(**new_filters) | [
"def",
"find_cards",
"(",
"self",
",",
"source",
"=",
"None",
",",
"*",
"*",
"filters",
")",
":",
"if",
"not",
"filters",
":",
"new_filters",
"=",
"self",
".",
"filters",
".",
"copy",
"(",
")",
"else",
":",
"new_filters",
"=",
"filters",
".",
"copy",
"(",
")",
"for",
"k",
",",
"v",
"in",
"new_filters",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"LazyValue",
")",
":",
"new_filters",
"[",
"k",
"]",
"=",
"v",
".",
"evaluate",
"(",
"source",
")",
"from",
".",
".",
"import",
"cards",
"return",
"cards",
".",
"filter",
"(",
"*",
"*",
"new_filters",
")"
] | 24.8 | 14.4 |
async def dist(self, mesg):
'''
Distribute an existing event tuple.
Args:
mesg ((str,dict)): An event tuple.
Example:
await base.dist( ('foo',{'bar':'baz'}) )
'''
if self.isfini:
return ()
ret = []
for func in self._syn_funcs.get(mesg[0], ()):
try:
ret.append(await s_coro.ornot(func, mesg))
except asyncio.CancelledError:
raise
except Exception:
logger.exception('base %s error with mesg %s', self, mesg)
for func in self._syn_links:
try:
ret.append(await func(mesg))
except asyncio.CancelledError:
raise
except Exception:
logger.exception('base %s error with mesg %s', self, mesg)
return ret | [
"async",
"def",
"dist",
"(",
"self",
",",
"mesg",
")",
":",
"if",
"self",
".",
"isfini",
":",
"return",
"(",
")",
"ret",
"=",
"[",
"]",
"for",
"func",
"in",
"self",
".",
"_syn_funcs",
".",
"get",
"(",
"mesg",
"[",
"0",
"]",
",",
"(",
")",
")",
":",
"try",
":",
"ret",
".",
"append",
"(",
"await",
"s_coro",
".",
"ornot",
"(",
"func",
",",
"mesg",
")",
")",
"except",
"asyncio",
".",
"CancelledError",
":",
"raise",
"except",
"Exception",
":",
"logger",
".",
"exception",
"(",
"'base %s error with mesg %s'",
",",
"self",
",",
"mesg",
")",
"for",
"func",
"in",
"self",
".",
"_syn_links",
":",
"try",
":",
"ret",
".",
"append",
"(",
"await",
"func",
"(",
"mesg",
")",
")",
"except",
"asyncio",
".",
"CancelledError",
":",
"raise",
"except",
"Exception",
":",
"logger",
".",
"exception",
"(",
"'base %s error with mesg %s'",
",",
"self",
",",
"mesg",
")",
"return",
"ret"
] | 25.088235 | 22.5 |
def track_field(field):
"""
Returns whether the given field should be tracked by Auditlog.
Untracked fields are many-to-many relations and relations to the Auditlog LogEntry model.
:param field: The field to check.
:type field: Field
:return: Whether the given field should be tracked.
:rtype: bool
"""
from auditlog.models import LogEntry
# Do not track many to many relations
if field.many_to_many:
return False
# Do not track relations to LogEntry
if getattr(field, 'remote_field', None) is not None and field.remote_field.model == LogEntry:
return False
# 1.8 check
elif getattr(field, 'rel', None) is not None and field.rel.to == LogEntry:
return False
return True | [
"def",
"track_field",
"(",
"field",
")",
":",
"from",
"auditlog",
".",
"models",
"import",
"LogEntry",
"# Do not track many to many relations",
"if",
"field",
".",
"many_to_many",
":",
"return",
"False",
"# Do not track relations to LogEntry",
"if",
"getattr",
"(",
"field",
",",
"'remote_field'",
",",
"None",
")",
"is",
"not",
"None",
"and",
"field",
".",
"remote_field",
".",
"model",
"==",
"LogEntry",
":",
"return",
"False",
"# 1.8 check",
"elif",
"getattr",
"(",
"field",
",",
"'rel'",
",",
"None",
")",
"is",
"not",
"None",
"and",
"field",
".",
"rel",
".",
"to",
"==",
"LogEntry",
":",
"return",
"False",
"return",
"True"
] | 29.52 | 23.04 |
def mark(self, channel_name, ts):
""" https://api.slack.com/methods/channels.mark
"""
channel_id = self.get_channel_id(channel_name)
self.params.update({
'channel': channel_id,
'ts': ts,
})
return FromUrl('https://slack.com/api/channels.mark', self._requests)(data=self.params).post() | [
"def",
"mark",
"(",
"self",
",",
"channel_name",
",",
"ts",
")",
":",
"channel_id",
"=",
"self",
".",
"get_channel_id",
"(",
"channel_name",
")",
"self",
".",
"params",
".",
"update",
"(",
"{",
"'channel'",
":",
"channel_id",
",",
"'ts'",
":",
"ts",
",",
"}",
")",
"return",
"FromUrl",
"(",
"'https://slack.com/api/channels.mark'",
",",
"self",
".",
"_requests",
")",
"(",
"data",
"=",
"self",
".",
"params",
")",
".",
"post",
"(",
")"
] | 39.888889 | 15.444444 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.