text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def destroy(self):
""" A reimplemented destructor that destroys the layout widget.
"""
layout = self.layout
if layout is not None:
layout.removeFromSuperview()
self.layout = None
super(UiKitViewGroup, self).destroy() | [
"def",
"destroy",
"(",
"self",
")",
":",
"layout",
"=",
"self",
".",
"layout",
"if",
"layout",
"is",
"not",
"None",
":",
"layout",
".",
"removeFromSuperview",
"(",
")",
"self",
".",
"layout",
"=",
"None",
"super",
"(",
"UiKitViewGroup",
",",
"self",
")",
".",
"destroy",
"(",
")"
] | 30.333333 | 11 |
def _get_gcloud_records(self, gcloud_zone, page_token=None):
""" Generator function which yields ResourceRecordSet for the managed
gcloud zone, until there are no more records to pull.
:param gcloud_zone: zone to pull records from
:type gcloud_zone: google.cloud.dns.ManagedZone
:param page_token: page token for the page to get
:return: a resource record set
:type return: google.cloud.dns.ResourceRecordSet
"""
gcloud_iterator = gcloud_zone.list_resource_record_sets(
page_token=page_token)
for gcloud_record in gcloud_iterator:
yield gcloud_record
# This is to get results which may be on a "paged" page.
# (if more than max_results) entries.
if gcloud_iterator.next_page_token:
for gcloud_record in self._get_gcloud_records(
gcloud_zone, gcloud_iterator.next_page_token):
# yield from is in python 3 only.
yield gcloud_record | [
"def",
"_get_gcloud_records",
"(",
"self",
",",
"gcloud_zone",
",",
"page_token",
"=",
"None",
")",
":",
"gcloud_iterator",
"=",
"gcloud_zone",
".",
"list_resource_record_sets",
"(",
"page_token",
"=",
"page_token",
")",
"for",
"gcloud_record",
"in",
"gcloud_iterator",
":",
"yield",
"gcloud_record",
"# This is to get results which may be on a \"paged\" page.",
"# (if more than max_results) entries.",
"if",
"gcloud_iterator",
".",
"next_page_token",
":",
"for",
"gcloud_record",
"in",
"self",
".",
"_get_gcloud_records",
"(",
"gcloud_zone",
",",
"gcloud_iterator",
".",
"next_page_token",
")",
":",
"# yield from is in python 3 only.",
"yield",
"gcloud_record"
] | 46.636364 | 15.363636 |
def intersects_all(self, other):
"""
Returns True if each segmentlist in other intersects the
corresponding segmentlist in self; returns False
if this is not the case, or if other is empty.
See also:
.intersects(), .all_intersects(), .all_intersects_all()
"""
return all(key in self and self[key].intersects(value) for key, value in other.iteritems()) and bool(other) | [
"def",
"intersects_all",
"(",
"self",
",",
"other",
")",
":",
"return",
"all",
"(",
"key",
"in",
"self",
"and",
"self",
"[",
"key",
"]",
".",
"intersects",
"(",
"value",
")",
"for",
"key",
",",
"value",
"in",
"other",
".",
"iteritems",
"(",
")",
")",
"and",
"bool",
"(",
"other",
")"
] | 34.181818 | 21.818182 |
def _getDistances(self, inputPattern, partitionId=None):
"""Return the distances from inputPattern to all stored patterns.
:param inputPattern The pattern from which distances to all other patterns
are returned
:param partitionId If provided, ignore all training vectors with this
partitionId.
"""
if not self._finishedLearning:
self.finishLearning()
self._finishedLearning = True
if self._vt is not None and len(self._vt) > 0:
inputPattern = numpy.dot(self._vt, inputPattern - self._mean)
sparseInput = self._sparsifyVector(inputPattern)
# Compute distances
dist = self._calcDistance(sparseInput)
# Invalidate results where category is -1
if self._specificIndexTraining:
dist[numpy.array(self._categoryList) == -1] = numpy.inf
# Ignore vectors with this partition id by setting their distances to inf
if partitionId is not None:
dist[self._partitionIdMap.get(partitionId, [])] = numpy.inf
return dist | [
"def",
"_getDistances",
"(",
"self",
",",
"inputPattern",
",",
"partitionId",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"_finishedLearning",
":",
"self",
".",
"finishLearning",
"(",
")",
"self",
".",
"_finishedLearning",
"=",
"True",
"if",
"self",
".",
"_vt",
"is",
"not",
"None",
"and",
"len",
"(",
"self",
".",
"_vt",
")",
">",
"0",
":",
"inputPattern",
"=",
"numpy",
".",
"dot",
"(",
"self",
".",
"_vt",
",",
"inputPattern",
"-",
"self",
".",
"_mean",
")",
"sparseInput",
"=",
"self",
".",
"_sparsifyVector",
"(",
"inputPattern",
")",
"# Compute distances",
"dist",
"=",
"self",
".",
"_calcDistance",
"(",
"sparseInput",
")",
"# Invalidate results where category is -1",
"if",
"self",
".",
"_specificIndexTraining",
":",
"dist",
"[",
"numpy",
".",
"array",
"(",
"self",
".",
"_categoryList",
")",
"==",
"-",
"1",
"]",
"=",
"numpy",
".",
"inf",
"# Ignore vectors with this partition id by setting their distances to inf",
"if",
"partitionId",
"is",
"not",
"None",
":",
"dist",
"[",
"self",
".",
"_partitionIdMap",
".",
"get",
"(",
"partitionId",
",",
"[",
"]",
")",
"]",
"=",
"numpy",
".",
"inf",
"return",
"dist"
] | 33.862069 | 21.586207 |
def _get_callsites(self, function_address):
"""
Get where a specific function is called.
:param function_address: Address of the target function
:return: A list of CFGNodes whose exits include a call/jump to the given function
"""
all_predecessors = []
nodes = self.get_all_nodes(function_address)
for n in nodes:
predecessors = list(self.get_predecessors(n))
all_predecessors.extend(predecessors)
return all_predecessors | [
"def",
"_get_callsites",
"(",
"self",
",",
"function_address",
")",
":",
"all_predecessors",
"=",
"[",
"]",
"nodes",
"=",
"self",
".",
"get_all_nodes",
"(",
"function_address",
")",
"for",
"n",
"in",
"nodes",
":",
"predecessors",
"=",
"list",
"(",
"self",
".",
"get_predecessors",
"(",
"n",
")",
")",
"all_predecessors",
".",
"extend",
"(",
"predecessors",
")",
"return",
"all_predecessors"
] | 35.2 | 20 |
def validate(cls, partial=True, **kwargs):
"""
Validate kwargs before setting attributes on the model
"""
data = kwargs
if not partial:
data = dict(**kwargs, **{col.name: None for col in cls.__table__.c
if col.name not in kwargs})
errors = defaultdict(list)
for name, value in data.items():
for validator in cls._get_validators(name):
try:
validator(value)
except ValidationError as e:
e.model = cls
e.column = name
errors[name].append(str(e))
if errors:
raise ValidationErrors(errors) | [
"def",
"validate",
"(",
"cls",
",",
"partial",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"data",
"=",
"kwargs",
"if",
"not",
"partial",
":",
"data",
"=",
"dict",
"(",
"*",
"*",
"kwargs",
",",
"*",
"*",
"{",
"col",
".",
"name",
":",
"None",
"for",
"col",
"in",
"cls",
".",
"__table__",
".",
"c",
"if",
"col",
".",
"name",
"not",
"in",
"kwargs",
"}",
")",
"errors",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"name",
",",
"value",
"in",
"data",
".",
"items",
"(",
")",
":",
"for",
"validator",
"in",
"cls",
".",
"_get_validators",
"(",
"name",
")",
":",
"try",
":",
"validator",
"(",
"value",
")",
"except",
"ValidationError",
"as",
"e",
":",
"e",
".",
"model",
"=",
"cls",
"e",
".",
"column",
"=",
"name",
"errors",
"[",
"name",
"]",
".",
"append",
"(",
"str",
"(",
"e",
")",
")",
"if",
"errors",
":",
"raise",
"ValidationErrors",
"(",
"errors",
")"
] | 34.095238 | 14 |
def _set_best(self, v, load=False):
"""
Setter method for best, mapped from YANG variable /routing_system/route_map/content/match/additional_paths/advertise_set/best (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_best is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_best() directly.
YANG Description: BGP Add-Path advertise best n paths
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..16']}), is_leaf=True, yang_name="best", rest_name="best", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'code-name': u'additional-paths-advertise-set-best', u'cli-full-no': None, u'info': u'BGP Add-Path advertise best n paths'}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """best must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..16']}), is_leaf=True, yang_name="best", rest_name="best", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'code-name': u'additional-paths-advertise-set-best', u'cli-full-no': None, u'info': u'BGP Add-Path advertise best n paths'}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='uint32', is_config=True)""",
})
self.__best = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_best",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"RestrictedClassType",
"(",
"base_type",
"=",
"RestrictedClassType",
"(",
"base_type",
"=",
"long",
",",
"restriction_dict",
"=",
"{",
"'range'",
":",
"[",
"'0..4294967295'",
"]",
"}",
",",
"int_size",
"=",
"32",
")",
",",
"restriction_dict",
"=",
"{",
"'range'",
":",
"[",
"u'1..16'",
"]",
"}",
")",
",",
"is_leaf",
"=",
"True",
",",
"yang_name",
"=",
"\"best\"",
",",
"rest_name",
"=",
"\"best\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'cli-full-command'",
":",
"None",
",",
"u'code-name'",
":",
"u'additional-paths-advertise-set-best'",
",",
"u'cli-full-no'",
":",
"None",
",",
"u'info'",
":",
"u'BGP Add-Path advertise best n paths'",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-ip-policy'",
",",
"defining_module",
"=",
"'brocade-ip-policy'",
",",
"yang_type",
"=",
"'uint32'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"best must be of a type compatible with uint32\"\"\"",
",",
"'defined-type'",
":",
"\"uint32\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..16']}), is_leaf=True, yang_name=\"best\", rest_name=\"best\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'code-name': u'additional-paths-advertise-set-best', u'cli-full-no': None, u'info': u'BGP Add-Path advertise best n paths'}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='uint32', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__best",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] | 87.583333 | 43.958333 |
def get_diff(original, fixed, file_name,
original_label='original', fixed_label='fixed'):
"""Return text of unified diff between original and fixed."""
original, fixed = original.splitlines(True), fixed.splitlines(True)
newline = '\n'
from difflib import unified_diff
diff = unified_diff(original, fixed,
os.path.join(original_label, file_name),
os.path.join(fixed_label, file_name),
lineterm=newline)
text = ''
for line in diff:
text += line
# Work around missing newline (http://bugs.python.org/issue2142).
if not line.endswith(newline):
text += newline + r'\ No newline at end of file' + newline
return text | [
"def",
"get_diff",
"(",
"original",
",",
"fixed",
",",
"file_name",
",",
"original_label",
"=",
"'original'",
",",
"fixed_label",
"=",
"'fixed'",
")",
":",
"original",
",",
"fixed",
"=",
"original",
".",
"splitlines",
"(",
"True",
")",
",",
"fixed",
".",
"splitlines",
"(",
"True",
")",
"newline",
"=",
"'\\n'",
"from",
"difflib",
"import",
"unified_diff",
"diff",
"=",
"unified_diff",
"(",
"original",
",",
"fixed",
",",
"os",
".",
"path",
".",
"join",
"(",
"original_label",
",",
"file_name",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"fixed_label",
",",
"file_name",
")",
",",
"lineterm",
"=",
"newline",
")",
"text",
"=",
"''",
"for",
"line",
"in",
"diff",
":",
"text",
"+=",
"line",
"# Work around missing newline (http://bugs.python.org/issue2142).",
"if",
"not",
"line",
".",
"endswith",
"(",
"newline",
")",
":",
"text",
"+=",
"newline",
"+",
"r'\\ No newline at end of file'",
"+",
"newline",
"return",
"text"
] | 41.5 | 17.777778 |
def report(self):
"""
Report elapsed time.
"""
if not self.end_time:
self.end()
print ("Time: {} mins".format((self.end_time - self.start_time )/ 60)) | [
"def",
"report",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"end_time",
":",
"self",
".",
"end",
"(",
")",
"print",
"(",
"\"Time: {} mins\"",
".",
"format",
"(",
"(",
"self",
".",
"end_time",
"-",
"self",
".",
"start_time",
")",
"/",
"60",
")",
")"
] | 28 | 14.571429 |
def categorical(pvals, size=None, random_state=None):
"""Return random integer from a categorical distribution
Parameters
----------
pvals : sequence of floats, length p
Probabilities of each of the ``p`` different outcomes. These
should sum to 1.
size : int or tuple of ints, optional
Defines the shape of the returned array of random integers. If None
(the default), returns a single float.
random_state: RandomState or an int seed, optional
A random number generator instance.
"""
cumsum = np.cumsum(pvals)
if size is None:
size = (1,)
axis = 0
elif isinstance(size, tuple):
size = size + (1,)
axis = len(size) - 1
else:
raise TypeError('size must be an int or tuple of ints')
random_state = check_random_state(random_state)
return np.sum(cumsum < random_state.random_sample(size), axis=axis) | [
"def",
"categorical",
"(",
"pvals",
",",
"size",
"=",
"None",
",",
"random_state",
"=",
"None",
")",
":",
"cumsum",
"=",
"np",
".",
"cumsum",
"(",
"pvals",
")",
"if",
"size",
"is",
"None",
":",
"size",
"=",
"(",
"1",
",",
")",
"axis",
"=",
"0",
"elif",
"isinstance",
"(",
"size",
",",
"tuple",
")",
":",
"size",
"=",
"size",
"+",
"(",
"1",
",",
")",
"axis",
"=",
"len",
"(",
"size",
")",
"-",
"1",
"else",
":",
"raise",
"TypeError",
"(",
"'size must be an int or tuple of ints'",
")",
"random_state",
"=",
"check_random_state",
"(",
"random_state",
")",
"return",
"np",
".",
"sum",
"(",
"cumsum",
"<",
"random_state",
".",
"random_sample",
"(",
"size",
")",
",",
"axis",
"=",
"axis",
")"
] | 34.807692 | 17.461538 |
def service_details(self, io_handler, service_id):
"""
Prints the details of the service with the given ID
"""
svc_ref = self._context.get_service_reference(
None, "({0}={1})".format(constants.SERVICE_ID, service_id)
)
if svc_ref is None:
io_handler.write_line("Service not found: {0}", service_id)
return False
lines = [
"ID............: {0}".format(
svc_ref.get_property(constants.SERVICE_ID)
),
"Rank..........: {0}".format(
svc_ref.get_property(constants.SERVICE_RANKING)
),
"Specifications: {0}".format(
svc_ref.get_property(constants.OBJECTCLASS)
),
"Bundle........: {0}".format(svc_ref.get_bundle()),
"Properties....:",
]
for key, value in sorted(svc_ref.get_properties().items()):
lines.append("\t{0} = {1}".format(key, value))
lines.append("Bundles using this service:")
for bundle in svc_ref.get_using_bundles():
lines.append("\t{0}".format(bundle))
lines.append("")
io_handler.write("\n".join(lines))
return None | [
"def",
"service_details",
"(",
"self",
",",
"io_handler",
",",
"service_id",
")",
":",
"svc_ref",
"=",
"self",
".",
"_context",
".",
"get_service_reference",
"(",
"None",
",",
"\"({0}={1})\"",
".",
"format",
"(",
"constants",
".",
"SERVICE_ID",
",",
"service_id",
")",
")",
"if",
"svc_ref",
"is",
"None",
":",
"io_handler",
".",
"write_line",
"(",
"\"Service not found: {0}\"",
",",
"service_id",
")",
"return",
"False",
"lines",
"=",
"[",
"\"ID............: {0}\"",
".",
"format",
"(",
"svc_ref",
".",
"get_property",
"(",
"constants",
".",
"SERVICE_ID",
")",
")",
",",
"\"Rank..........: {0}\"",
".",
"format",
"(",
"svc_ref",
".",
"get_property",
"(",
"constants",
".",
"SERVICE_RANKING",
")",
")",
",",
"\"Specifications: {0}\"",
".",
"format",
"(",
"svc_ref",
".",
"get_property",
"(",
"constants",
".",
"OBJECTCLASS",
")",
")",
",",
"\"Bundle........: {0}\"",
".",
"format",
"(",
"svc_ref",
".",
"get_bundle",
"(",
")",
")",
",",
"\"Properties....:\"",
",",
"]",
"for",
"key",
",",
"value",
"in",
"sorted",
"(",
"svc_ref",
".",
"get_properties",
"(",
")",
".",
"items",
"(",
")",
")",
":",
"lines",
".",
"append",
"(",
"\"\\t{0} = {1}\"",
".",
"format",
"(",
"key",
",",
"value",
")",
")",
"lines",
".",
"append",
"(",
"\"Bundles using this service:\"",
")",
"for",
"bundle",
"in",
"svc_ref",
".",
"get_using_bundles",
"(",
")",
":",
"lines",
".",
"append",
"(",
"\"\\t{0}\"",
".",
"format",
"(",
"bundle",
")",
")",
"lines",
".",
"append",
"(",
"\"\"",
")",
"io_handler",
".",
"write",
"(",
"\"\\n\"",
".",
"join",
"(",
"lines",
")",
")",
"return",
"None"
] | 35.558824 | 18.382353 |
def get_users(self, omit_empty_organisms=False):
"""
Get all users known to this Apollo instance
:type omit_empty_organisms: bool
:param omit_empty_organisms: Will omit users having no access to any organism
:rtype: list of dicts
:return: list of user info dictionaries
"""
payload = {}
if omit_empty_organisms:
payload['omitEmptyOrganisms'] = omit_empty_organisms
res = self.post('loadUsers', payload)
data = [_fix_user(user) for user in res]
return data | [
"def",
"get_users",
"(",
"self",
",",
"omit_empty_organisms",
"=",
"False",
")",
":",
"payload",
"=",
"{",
"}",
"if",
"omit_empty_organisms",
":",
"payload",
"[",
"'omitEmptyOrganisms'",
"]",
"=",
"omit_empty_organisms",
"res",
"=",
"self",
".",
"post",
"(",
"'loadUsers'",
",",
"payload",
")",
"data",
"=",
"[",
"_fix_user",
"(",
"user",
")",
"for",
"user",
"in",
"res",
"]",
"return",
"data"
] | 34.375 | 15.5 |
def parseGameTree(self):
""" Called when "(" encountered, ends when a matching ")" encountered.
Parses and returns one 'GameTree' from 'self.data'. Raises
'GameTreeParseError' if a problem is encountered."""
g = GameTree()
while self.index < self.datalen:
match = self.reGameTreeNext.match(self.data, self.index)
if match:
self.index = match.end()
if match.group(1) == ";": # found start of node
if g.variations:
raise GameTreeParseError(
"A node was encountered after a variation.")
g.append(g.makeNode(self.parseNode()))
elif match.group(1) == "(": # found start of variation
g.variations = self.parseVariations()
else: # found end of GameTree ")"
return g
else: # error
raise GameTreeParseError
return g | [
"def",
"parseGameTree",
"(",
"self",
")",
":",
"g",
"=",
"GameTree",
"(",
")",
"while",
"self",
".",
"index",
"<",
"self",
".",
"datalen",
":",
"match",
"=",
"self",
".",
"reGameTreeNext",
".",
"match",
"(",
"self",
".",
"data",
",",
"self",
".",
"index",
")",
"if",
"match",
":",
"self",
".",
"index",
"=",
"match",
".",
"end",
"(",
")",
"if",
"match",
".",
"group",
"(",
"1",
")",
"==",
"\";\"",
":",
"# found start of node",
"if",
"g",
".",
"variations",
":",
"raise",
"GameTreeParseError",
"(",
"\"A node was encountered after a variation.\"",
")",
"g",
".",
"append",
"(",
"g",
".",
"makeNode",
"(",
"self",
".",
"parseNode",
"(",
")",
")",
")",
"elif",
"match",
".",
"group",
"(",
"1",
")",
"==",
"\"(\"",
":",
"# found start of variation",
"g",
".",
"variations",
"=",
"self",
".",
"parseVariations",
"(",
")",
"else",
":",
"# found end of GameTree \")\"",
"return",
"g",
"else",
":",
"# error",
"raise",
"GameTreeParseError",
"return",
"g"
] | 37.47619 | 14.095238 |
def availability_zone_list(request):
"""Utility method to retrieve a list of availability zones."""
try:
return api.nova.availability_zone_list(request)
except Exception:
exceptions.handle(request,
_('Unable to retrieve Nova availability zones.'))
return [] | [
"def",
"availability_zone_list",
"(",
"request",
")",
":",
"try",
":",
"return",
"api",
".",
"nova",
".",
"availability_zone_list",
"(",
"request",
")",
"except",
"Exception",
":",
"exceptions",
".",
"handle",
"(",
"request",
",",
"_",
"(",
"'Unable to retrieve Nova availability zones.'",
")",
")",
"return",
"[",
"]"
] | 39 | 16.75 |
def _checkremove_que(self, word):
"""If word ends in -que and if word is not in pass list, strip -que"""
in_que_pass_list = False
que_pass_list = ['atque',
'quoque',
'neque',
'itaque',
'absque',
'apsque',
'abusque',
'adaeque',
'adusque',
'denique',
'deque',
'susque',
'oblique',
'peraeque',
'plenisque',
'quandoque',
'quisque',
'quaeque',
'cuiusque',
'cuique',
'quemque',
'quamque',
'quaque',
'quique',
'quorumque',
'quarumque',
'quibusque',
'quosque',
'quasque',
'quotusquisque',
'quousque',
'ubique',
'undique',
'usque',
'uterque',
'utique',
'utroque',
'utribique',
'torque',
'coque',
'concoque',
'contorque',
'detorque',
'decoque',
'excoque',
'extorque',
'obtorque',
'optorque',
'retorque',
'recoque',
'attorque',
'incoque',
'intorque',
'praetorque']
if word not in que_pass_list:
word = re.sub(r'que$', '', word)
else:
in_que_pass_list = True
return word, in_que_pass_list | [
"def",
"_checkremove_que",
"(",
"self",
",",
"word",
")",
":",
"in_que_pass_list",
"=",
"False",
"que_pass_list",
"=",
"[",
"'atque'",
",",
"'quoque'",
",",
"'neque'",
",",
"'itaque'",
",",
"'absque'",
",",
"'apsque'",
",",
"'abusque'",
",",
"'adaeque'",
",",
"'adusque'",
",",
"'denique'",
",",
"'deque'",
",",
"'susque'",
",",
"'oblique'",
",",
"'peraeque'",
",",
"'plenisque'",
",",
"'quandoque'",
",",
"'quisque'",
",",
"'quaeque'",
",",
"'cuiusque'",
",",
"'cuique'",
",",
"'quemque'",
",",
"'quamque'",
",",
"'quaque'",
",",
"'quique'",
",",
"'quorumque'",
",",
"'quarumque'",
",",
"'quibusque'",
",",
"'quosque'",
",",
"'quasque'",
",",
"'quotusquisque'",
",",
"'quousque'",
",",
"'ubique'",
",",
"'undique'",
",",
"'usque'",
",",
"'uterque'",
",",
"'utique'",
",",
"'utroque'",
",",
"'utribique'",
",",
"'torque'",
",",
"'coque'",
",",
"'concoque'",
",",
"'contorque'",
",",
"'detorque'",
",",
"'decoque'",
",",
"'excoque'",
",",
"'extorque'",
",",
"'obtorque'",
",",
"'optorque'",
",",
"'retorque'",
",",
"'recoque'",
",",
"'attorque'",
",",
"'incoque'",
",",
"'intorque'",
",",
"'praetorque'",
"]",
"if",
"word",
"not",
"in",
"que_pass_list",
":",
"word",
"=",
"re",
".",
"sub",
"(",
"r'que$'",
",",
"''",
",",
"word",
")",
"else",
":",
"in_que_pass_list",
"=",
"True",
"return",
"word",
",",
"in_que_pass_list"
] | 32.712121 | 7.984848 |
def download_file_part_run(download_context):
"""
Function run by CreateProjectCommand to create the project.
Runs in a background process.
:param download_context: UploadContext: contains data service setup and project name to create.
"""
destination_dir, file_url_data_dict, seek_amt, bytes_to_read = download_context.params
project_file = ProjectFile(file_url_data_dict)
local_path = project_file.get_local_path(destination_dir)
retry_chunk_downloader = RetryChunkDownloader(project_file, local_path,
seek_amt, bytes_to_read,
download_context)
retry_chunk_downloader.run()
return 'ok' | [
"def",
"download_file_part_run",
"(",
"download_context",
")",
":",
"destination_dir",
",",
"file_url_data_dict",
",",
"seek_amt",
",",
"bytes_to_read",
"=",
"download_context",
".",
"params",
"project_file",
"=",
"ProjectFile",
"(",
"file_url_data_dict",
")",
"local_path",
"=",
"project_file",
".",
"get_local_path",
"(",
"destination_dir",
")",
"retry_chunk_downloader",
"=",
"RetryChunkDownloader",
"(",
"project_file",
",",
"local_path",
",",
"seek_amt",
",",
"bytes_to_read",
",",
"download_context",
")",
"retry_chunk_downloader",
".",
"run",
"(",
")",
"return",
"'ok'"
] | 51.285714 | 21.714286 |
def _from_string(cls, serialized):
"""
Return an instance of `cls` parsed from its `serialized` form.
Args:
cls: The :class:`OpaqueKey` subclass.
serialized (unicode): A serialized :class:`OpaqueKey`, with namespace already removed.
Raises:
InvalidKeyError: Should be raised if `serialized` is not a valid serialized key
understood by `cls`.
"""
try:
def_key, aside_type = _split_keys_v2(serialized)
return cls(DefinitionKey.from_string(def_key), aside_type)
except ValueError as exc:
raise InvalidKeyError(cls, exc.args) | [
"def",
"_from_string",
"(",
"cls",
",",
"serialized",
")",
":",
"try",
":",
"def_key",
",",
"aside_type",
"=",
"_split_keys_v2",
"(",
"serialized",
")",
"return",
"cls",
"(",
"DefinitionKey",
".",
"from_string",
"(",
"def_key",
")",
",",
"aside_type",
")",
"except",
"ValueError",
"as",
"exc",
":",
"raise",
"InvalidKeyError",
"(",
"cls",
",",
"exc",
".",
"args",
")"
] | 38.294118 | 22.529412 |
def _get_ngrams(segment, max_order):
"""Extracts all n-grams up to a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams up to max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in range(1, max_order + 1):
for i in range(0, len(segment) - order + 1):
ngram = tuple(segment[i:i + order])
ngram_counts[ngram] += 1
return ngram_counts | [
"def",
"_get_ngrams",
"(",
"segment",
",",
"max_order",
")",
":",
"ngram_counts",
"=",
"collections",
".",
"Counter",
"(",
")",
"for",
"order",
"in",
"range",
"(",
"1",
",",
"max_order",
"+",
"1",
")",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"segment",
")",
"-",
"order",
"+",
"1",
")",
":",
"ngram",
"=",
"tuple",
"(",
"segment",
"[",
"i",
":",
"i",
"+",
"order",
"]",
")",
"ngram_counts",
"[",
"ngram",
"]",
"+=",
"1",
"return",
"ngram_counts"
] | 34.555556 | 17.055556 |
def is_entailed_by(self, other):
"""
Other is more specific than self. Other is bounded within self.
"""
other = self.coerce(other)
to_i = self.to_i
return to_i(other.low) >= to_i(self.low) and \
to_i(other.high) <= to_i(self.high) | [
"def",
"is_entailed_by",
"(",
"self",
",",
"other",
")",
":",
"other",
"=",
"self",
".",
"coerce",
"(",
"other",
")",
"to_i",
"=",
"self",
".",
"to_i",
"return",
"to_i",
"(",
"other",
".",
"low",
")",
">=",
"to_i",
"(",
"self",
".",
"low",
")",
"and",
"to_i",
"(",
"other",
".",
"high",
")",
"<=",
"to_i",
"(",
"self",
".",
"high",
")"
] | 36.25 | 11 |
def drop_tree(self):
"""
Removes this element from the tree, including its children and
text. The tail text is joined to the previous element or
parent.
"""
parent = self.getparent()
assert parent is not None
if self.tail:
previous = self.getprevious()
if previous is None:
parent.text = (parent.text or '') + self.tail
else:
previous.tail = (previous.tail or '') + self.tail
parent.remove(self) | [
"def",
"drop_tree",
"(",
"self",
")",
":",
"parent",
"=",
"self",
".",
"getparent",
"(",
")",
"assert",
"parent",
"is",
"not",
"None",
"if",
"self",
".",
"tail",
":",
"previous",
"=",
"self",
".",
"getprevious",
"(",
")",
"if",
"previous",
"is",
"None",
":",
"parent",
".",
"text",
"=",
"(",
"parent",
".",
"text",
"or",
"''",
")",
"+",
"self",
".",
"tail",
"else",
":",
"previous",
".",
"tail",
"=",
"(",
"previous",
".",
"tail",
"or",
"''",
")",
"+",
"self",
".",
"tail",
"parent",
".",
"remove",
"(",
"self",
")"
] | 34.8 | 14.933333 |
def is_clicked(self, MouseStateType):
"""
Did the user depress and release the button to signify a click?
MouseStateType is the button to query. Values found under StateTypes.py
"""
return self.previous_mouse_state.query_state(MouseStateType) and (
not self.current_mouse_state.query_state(MouseStateType)) | [
"def",
"is_clicked",
"(",
"self",
",",
"MouseStateType",
")",
":",
"return",
"self",
".",
"previous_mouse_state",
".",
"query_state",
"(",
"MouseStateType",
")",
"and",
"(",
"not",
"self",
".",
"current_mouse_state",
".",
"query_state",
"(",
"MouseStateType",
")",
")"
] | 49.714286 | 18.857143 |
def clear_db():
"""Clear the entire db."""
cursor = '0'
while cursor != 0:
cursor, keys = DB.scan(cursor, match='*', count=5000)
if keys:
DB.delete(*keys) | [
"def",
"clear_db",
"(",
")",
":",
"cursor",
"=",
"'0'",
"while",
"cursor",
"!=",
"0",
":",
"cursor",
",",
"keys",
"=",
"DB",
".",
"scan",
"(",
"cursor",
",",
"match",
"=",
"'*'",
",",
"count",
"=",
"5000",
")",
"if",
"keys",
":",
"DB",
".",
"delete",
"(",
"*",
"keys",
")"
] | 26.857143 | 17.714286 |
def withFile(file, func, mode='r', expand=False):
"""Pass `file` to `func` and ensure the file is closed afterwards. If
`file` is a string, open according to `mode`; if `expand` is true also
expand user and vars.
"""
file = _normalizeToFile(file, mode=mode, expand=expand)
try: return func(file)
finally: file.close() | [
"def",
"withFile",
"(",
"file",
",",
"func",
",",
"mode",
"=",
"'r'",
",",
"expand",
"=",
"False",
")",
":",
"file",
"=",
"_normalizeToFile",
"(",
"file",
",",
"mode",
"=",
"mode",
",",
"expand",
"=",
"expand",
")",
"try",
":",
"return",
"func",
"(",
"file",
")",
"finally",
":",
"file",
".",
"close",
"(",
")"
] | 43.75 | 12.5 |
def uninstall(self):
"""
Remove agent's files from remote host
"""
log_filename = "agent_{host}.log".format(host=self.host)
data_filename = "agent_{host}.rawdata".format(host=self.host)
try:
if self.session:
self.session.send("stop\n")
self.session.close()
self.session = None
except BaseException:
logger.warning(
'Unable to correctly stop monitoring agent - session is broken. Pay attention to agent log (%s).',
log_filename,
exc_info=True)
else:
try:
self.ssh.get_file(
os.path.join(
self.path['AGENT_REMOTE_FOLDER'],
"_agent.log"),
log_filename)
self.ssh.get_file(
os.path.join(
self.path['AGENT_REMOTE_FOLDER'],
"monitoring.rawdata"),
data_filename)
self.ssh.rm_r(self.path['AGENT_REMOTE_FOLDER'])
except Exception:
logger.error("Unable to get agent artefacts", exc_info=True)
self._kill_agent()
return log_filename, data_filename | [
"def",
"uninstall",
"(",
"self",
")",
":",
"log_filename",
"=",
"\"agent_{host}.log\"",
".",
"format",
"(",
"host",
"=",
"self",
".",
"host",
")",
"data_filename",
"=",
"\"agent_{host}.rawdata\"",
".",
"format",
"(",
"host",
"=",
"self",
".",
"host",
")",
"try",
":",
"if",
"self",
".",
"session",
":",
"self",
".",
"session",
".",
"send",
"(",
"\"stop\\n\"",
")",
"self",
".",
"session",
".",
"close",
"(",
")",
"self",
".",
"session",
"=",
"None",
"except",
"BaseException",
":",
"logger",
".",
"warning",
"(",
"'Unable to correctly stop monitoring agent - session is broken. Pay attention to agent log (%s).'",
",",
"log_filename",
",",
"exc_info",
"=",
"True",
")",
"else",
":",
"try",
":",
"self",
".",
"ssh",
".",
"get_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"path",
"[",
"'AGENT_REMOTE_FOLDER'",
"]",
",",
"\"_agent.log\"",
")",
",",
"log_filename",
")",
"self",
".",
"ssh",
".",
"get_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"path",
"[",
"'AGENT_REMOTE_FOLDER'",
"]",
",",
"\"monitoring.rawdata\"",
")",
",",
"data_filename",
")",
"self",
".",
"ssh",
".",
"rm_r",
"(",
"self",
".",
"path",
"[",
"'AGENT_REMOTE_FOLDER'",
"]",
")",
"except",
"Exception",
":",
"logger",
".",
"error",
"(",
"\"Unable to get agent artefacts\"",
",",
"exc_info",
"=",
"True",
")",
"self",
".",
"_kill_agent",
"(",
")",
"return",
"log_filename",
",",
"data_filename"
] | 37.264706 | 14.911765 |
def pyLocal(self, time_zone=''):
''' a method to report a python datetime from a labDT object
:param time_zone: [optional] string with timezone to report in
:return: string with date and time info
'''
# validate inputs
get_tz = get_localzone()
title = 'Timezone input for labDT.pyLocal'
if time_zone:
# if time_zone.lower() in ('utc', 'uct', 'universal', 'zulu'):
# raise ValueError('time_zone cannot be UTC. %s requires a local timezone value. Try:\nfor tz in pytz.all_timezones:\n print tz' % title)
try:
get_tz = tz.gettz(time_zone)
except:
raise ValueError('\n%s is not a valid timezone format. Try:\nfor tz in pytz.all_timezones:\n print tz' % title)
# construct python datetime from labDT
dT = self.astimezone(get_tz)
dt_kwargs = {
'year': dT.year,
'month': dT.month,
'day': dT.day,
'hour': dT.hour,
'minute': dT.minute,
'second': dT.second,
'microsecond': dT.microsecond,
'tzinfo': dT.tzinfo
}
return labDT(**dt_kwargs) | [
"def",
"pyLocal",
"(",
"self",
",",
"time_zone",
"=",
"''",
")",
":",
"# validate inputs\r",
"get_tz",
"=",
"get_localzone",
"(",
")",
"title",
"=",
"'Timezone input for labDT.pyLocal'",
"if",
"time_zone",
":",
"# if time_zone.lower() in ('utc', 'uct', 'universal', 'zulu'):\r",
"# raise ValueError('time_zone cannot be UTC. %s requires a local timezone value. Try:\\nfor tz in pytz.all_timezones:\\n print tz' % title)\r",
"try",
":",
"get_tz",
"=",
"tz",
".",
"gettz",
"(",
"time_zone",
")",
"except",
":",
"raise",
"ValueError",
"(",
"'\\n%s is not a valid timezone format. Try:\\nfor tz in pytz.all_timezones:\\n print tz'",
"%",
"title",
")",
"# construct python datetime from labDT\r",
"dT",
"=",
"self",
".",
"astimezone",
"(",
"get_tz",
")",
"dt_kwargs",
"=",
"{",
"'year'",
":",
"dT",
".",
"year",
",",
"'month'",
":",
"dT",
".",
"month",
",",
"'day'",
":",
"dT",
".",
"day",
",",
"'hour'",
":",
"dT",
".",
"hour",
",",
"'minute'",
":",
"dT",
".",
"minute",
",",
"'second'",
":",
"dT",
".",
"second",
",",
"'microsecond'",
":",
"dT",
".",
"microsecond",
",",
"'tzinfo'",
":",
"dT",
".",
"tzinfo",
"}",
"return",
"labDT",
"(",
"*",
"*",
"dt_kwargs",
")"
] | 37.75 | 22.8125 |
def emit(self, event, *event_args):
"""Call the registered listeners for ``event``.
The listeners will be called with any extra arguments passed to
:meth:`emit` first, and then the extra arguments passed to :meth:`on`
"""
listeners = self._listeners[event][:]
for listener in listeners:
args = list(event_args) + list(listener.user_args)
result = listener.callback(*args)
if result is False:
self.off(event, listener.callback) | [
"def",
"emit",
"(",
"self",
",",
"event",
",",
"*",
"event_args",
")",
":",
"listeners",
"=",
"self",
".",
"_listeners",
"[",
"event",
"]",
"[",
":",
"]",
"for",
"listener",
"in",
"listeners",
":",
"args",
"=",
"list",
"(",
"event_args",
")",
"+",
"list",
"(",
"listener",
".",
"user_args",
")",
"result",
"=",
"listener",
".",
"callback",
"(",
"*",
"args",
")",
"if",
"result",
"is",
"False",
":",
"self",
".",
"off",
"(",
"event",
",",
"listener",
".",
"callback",
")"
] | 43 | 14.166667 |
def perform_remote_action(i):
"""
Input: { See 'perform_action' function }
Output: { See 'perform_action' function }
"""
# Import modules compatible with Python 2.x and 3.x
import urllib
try: import urllib.request as urllib2
except: import urllib2 # pragma: no cover
try: from urllib.parse import urlencode
except: from urllib import urlencode # pragma: no cover
rr={'return':0}
# Get action
act=i.get('action','')
# Check output
o=i.get('out','')
if o=='con':
# out('Initiating remote access ...')
# out('')
i['out']='con'
i['quiet']='yes'
if act=='pull':
i['out']='json'
else:
i['out']='json'
# # Clean up input
# if o!='json_file':
# rr['out']='json' # Decided to return json to show that it's remote ...
if 'cid' in i:
del(i['cid']) # already processed
# Get URL
url=i.get('remote_server_url','')
# Process i
if 'remote_server_url' in i: del(i['remote_server_url'])
# Pre process if push file ...
if act=='push':
# Check file
fn=i.get('filename','')
if fn=='':
x=i.get('cids',[])
if len(x)>0:
fn=x[0]
if fn=='':
return {'return':1, 'error':'filename is empty'}
if not os.path.isfile(fn):
return {'return':1, 'error':'file '+fn+' not found'}
rx=convert_file_to_upload_string({'filename':fn})
if rx['return']>0: return rx
i['file_content_base64']=rx['file_content_base64']
# Leave only filename without path
i['filename']=os.path.basename(fn)
# Prepare post variables
r=dumps_json({'dict':i, 'skip_indent':'yes'})
if r['return']>0: return r
s=r['string'].encode('utf8')
post=urlencode({'ck_json':s})
if sys.version_info[0]>2: post=post.encode('utf8')
# If auth
au=i.get('remote_server_user','')
if au!='':
del(i['remote_server_user'])
ap=i.get('remote_server_pass','')
if ap!='':
del(i['remote_server_pass'])
auth = urllib2.HTTPPasswordMgrWithDefaultRealm()
auth.add_password(None, url, au, ap)
urllib2.install_opener(urllib2.build_opener(urllib2.HTTPBasicAuthHandler(auth)))
# Prepare request
request = urllib2.Request(url, post)
# Connect
try:
f=urllib2.urlopen(request)
except Exception as e:
return {'return':1, 'error':'Access to remote CK repository failed ('+format(e)+')'}
# Read from Internet
try:
s=f.read()
f.close()
except Exception as e:
return {'return':1, 'error':'Failed reading stream from remote CK web service ('+format(e)+')'}
# Check output
try: s=s.decode('utf8')
except Exception as e: pass
if o=='con' and act!='pull':
out(s.rstrip())
else:
# Try to convert output to dictionary
r=convert_json_str_to_dict({'str':s, 'skip_quote_replacement':'yes'})
if r['return']>0:
return {'return':1, 'error':'can\'t parse output from remote CK server ('+r['error']+'):\n'+s[:256]+'\n\n...)'}
d=r['dict']
if 'return' in d: d['return']=int(d['return']) # Fix for some strange behavior when 'return' is not integer - should check why ...
if d.get('return',0)>0:
return d
# Post process if pull file ...
if act=='pull':
if o!='json' and o!='json_file':
# Convert encoded file to real file ...
x=d.get('file_content_base64','')
fn=d.get('filename','')
if fn=='': fn=cfg['default_archive_name']
r=convert_upload_string_to_file({'file_content_base64':x, 'filename':fn})
if r['return']>0: return r
if 'file_content_base64' in d: del(d['file_content_base64'])
rr.update(d)
# Restore original output
i['out']=o
return rr | [
"def",
"perform_remote_action",
"(",
"i",
")",
":",
"# Import modules compatible with Python 2.x and 3.x",
"import",
"urllib",
"try",
":",
"import",
"urllib",
".",
"request",
"as",
"urllib2",
"except",
":",
"import",
"urllib2",
"# pragma: no cover",
"try",
":",
"from",
"urllib",
".",
"parse",
"import",
"urlencode",
"except",
":",
"from",
"urllib",
"import",
"urlencode",
"# pragma: no cover",
"rr",
"=",
"{",
"'return'",
":",
"0",
"}",
"# Get action",
"act",
"=",
"i",
".",
"get",
"(",
"'action'",
",",
"''",
")",
"# Check output",
"o",
"=",
"i",
".",
"get",
"(",
"'out'",
",",
"''",
")",
"if",
"o",
"==",
"'con'",
":",
"# out('Initiating remote access ...')",
"# out('')",
"i",
"[",
"'out'",
"]",
"=",
"'con'",
"i",
"[",
"'quiet'",
"]",
"=",
"'yes'",
"if",
"act",
"==",
"'pull'",
":",
"i",
"[",
"'out'",
"]",
"=",
"'json'",
"else",
":",
"i",
"[",
"'out'",
"]",
"=",
"'json'",
"# # Clean up input",
"# if o!='json_file': ",
"# rr['out']='json' # Decided to return json to show that it's remote ...",
"if",
"'cid'",
"in",
"i",
":",
"del",
"(",
"i",
"[",
"'cid'",
"]",
")",
"# already processed",
"# Get URL",
"url",
"=",
"i",
".",
"get",
"(",
"'remote_server_url'",
",",
"''",
")",
"# Process i",
"if",
"'remote_server_url'",
"in",
"i",
":",
"del",
"(",
"i",
"[",
"'remote_server_url'",
"]",
")",
"# Pre process if push file ...",
"if",
"act",
"==",
"'push'",
":",
"# Check file",
"fn",
"=",
"i",
".",
"get",
"(",
"'filename'",
",",
"''",
")",
"if",
"fn",
"==",
"''",
":",
"x",
"=",
"i",
".",
"get",
"(",
"'cids'",
",",
"[",
"]",
")",
"if",
"len",
"(",
"x",
")",
">",
"0",
":",
"fn",
"=",
"x",
"[",
"0",
"]",
"if",
"fn",
"==",
"''",
":",
"return",
"{",
"'return'",
":",
"1",
",",
"'error'",
":",
"'filename is empty'",
"}",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"fn",
")",
":",
"return",
"{",
"'return'",
":",
"1",
",",
"'error'",
":",
"'file '",
"+",
"fn",
"+",
"' not found'",
"}",
"rx",
"=",
"convert_file_to_upload_string",
"(",
"{",
"'filename'",
":",
"fn",
"}",
")",
"if",
"rx",
"[",
"'return'",
"]",
">",
"0",
":",
"return",
"rx",
"i",
"[",
"'file_content_base64'",
"]",
"=",
"rx",
"[",
"'file_content_base64'",
"]",
"# Leave only filename without path",
"i",
"[",
"'filename'",
"]",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"fn",
")",
"# Prepare post variables",
"r",
"=",
"dumps_json",
"(",
"{",
"'dict'",
":",
"i",
",",
"'skip_indent'",
":",
"'yes'",
"}",
")",
"if",
"r",
"[",
"'return'",
"]",
">",
"0",
":",
"return",
"r",
"s",
"=",
"r",
"[",
"'string'",
"]",
".",
"encode",
"(",
"'utf8'",
")",
"post",
"=",
"urlencode",
"(",
"{",
"'ck_json'",
":",
"s",
"}",
")",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
">",
"2",
":",
"post",
"=",
"post",
".",
"encode",
"(",
"'utf8'",
")",
"# If auth",
"au",
"=",
"i",
".",
"get",
"(",
"'remote_server_user'",
",",
"''",
")",
"if",
"au",
"!=",
"''",
":",
"del",
"(",
"i",
"[",
"'remote_server_user'",
"]",
")",
"ap",
"=",
"i",
".",
"get",
"(",
"'remote_server_pass'",
",",
"''",
")",
"if",
"ap",
"!=",
"''",
":",
"del",
"(",
"i",
"[",
"'remote_server_pass'",
"]",
")",
"auth",
"=",
"urllib2",
".",
"HTTPPasswordMgrWithDefaultRealm",
"(",
")",
"auth",
".",
"add_password",
"(",
"None",
",",
"url",
",",
"au",
",",
"ap",
")",
"urllib2",
".",
"install_opener",
"(",
"urllib2",
".",
"build_opener",
"(",
"urllib2",
".",
"HTTPBasicAuthHandler",
"(",
"auth",
")",
")",
")",
"# Prepare request",
"request",
"=",
"urllib2",
".",
"Request",
"(",
"url",
",",
"post",
")",
"# Connect",
"try",
":",
"f",
"=",
"urllib2",
".",
"urlopen",
"(",
"request",
")",
"except",
"Exception",
"as",
"e",
":",
"return",
"{",
"'return'",
":",
"1",
",",
"'error'",
":",
"'Access to remote CK repository failed ('",
"+",
"format",
"(",
"e",
")",
"+",
"')'",
"}",
"# Read from Internet",
"try",
":",
"s",
"=",
"f",
".",
"read",
"(",
")",
"f",
".",
"close",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"return",
"{",
"'return'",
":",
"1",
",",
"'error'",
":",
"'Failed reading stream from remote CK web service ('",
"+",
"format",
"(",
"e",
")",
"+",
"')'",
"}",
"# Check output",
"try",
":",
"s",
"=",
"s",
".",
"decode",
"(",
"'utf8'",
")",
"except",
"Exception",
"as",
"e",
":",
"pass",
"if",
"o",
"==",
"'con'",
"and",
"act",
"!=",
"'pull'",
":",
"out",
"(",
"s",
".",
"rstrip",
"(",
")",
")",
"else",
":",
"# Try to convert output to dictionary",
"r",
"=",
"convert_json_str_to_dict",
"(",
"{",
"'str'",
":",
"s",
",",
"'skip_quote_replacement'",
":",
"'yes'",
"}",
")",
"if",
"r",
"[",
"'return'",
"]",
">",
"0",
":",
"return",
"{",
"'return'",
":",
"1",
",",
"'error'",
":",
"'can\\'t parse output from remote CK server ('",
"+",
"r",
"[",
"'error'",
"]",
"+",
"'):\\n'",
"+",
"s",
"[",
":",
"256",
"]",
"+",
"'\\n\\n...)'",
"}",
"d",
"=",
"r",
"[",
"'dict'",
"]",
"if",
"'return'",
"in",
"d",
":",
"d",
"[",
"'return'",
"]",
"=",
"int",
"(",
"d",
"[",
"'return'",
"]",
")",
"# Fix for some strange behavior when 'return' is not integer - should check why ...",
"if",
"d",
".",
"get",
"(",
"'return'",
",",
"0",
")",
">",
"0",
":",
"return",
"d",
"# Post process if pull file ...",
"if",
"act",
"==",
"'pull'",
":",
"if",
"o",
"!=",
"'json'",
"and",
"o",
"!=",
"'json_file'",
":",
"# Convert encoded file to real file ...",
"x",
"=",
"d",
".",
"get",
"(",
"'file_content_base64'",
",",
"''",
")",
"fn",
"=",
"d",
".",
"get",
"(",
"'filename'",
",",
"''",
")",
"if",
"fn",
"==",
"''",
":",
"fn",
"=",
"cfg",
"[",
"'default_archive_name'",
"]",
"r",
"=",
"convert_upload_string_to_file",
"(",
"{",
"'file_content_base64'",
":",
"x",
",",
"'filename'",
":",
"fn",
"}",
")",
"if",
"r",
"[",
"'return'",
"]",
">",
"0",
":",
"return",
"r",
"if",
"'file_content_base64'",
"in",
"d",
":",
"del",
"(",
"d",
"[",
"'file_content_base64'",
"]",
")",
"rr",
".",
"update",
"(",
"d",
")",
"# Restore original output",
"i",
"[",
"'out'",
"]",
"=",
"o",
"return",
"rr"
] | 26.347222 | 23.291667 |
def to_native(self, value, context=None):
""" Schematics deserializer override
:return: ToOne instance
"""
if isinstance(value, ToOne):
return value
value = self._cast_rid(value)
return ToOne(self.rtype, self.field, rid=value) | [
"def",
"to_native",
"(",
"self",
",",
"value",
",",
"context",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"ToOne",
")",
":",
"return",
"value",
"value",
"=",
"self",
".",
"_cast_rid",
"(",
"value",
")",
"return",
"ToOne",
"(",
"self",
".",
"rtype",
",",
"self",
".",
"field",
",",
"rid",
"=",
"value",
")"
] | 25.363636 | 15.272727 |
def interfaces(self):
"""list[dict]: A list of dictionary items describing the operational
state of interfaces.
This method currently only lists the Physical Interfaces (
Gigabitethernet, tengigabitethernet, fortygigabitethernet,
hundredgigabitethernet) and Loopback interfaces. It currently
excludes VLAN interfaces, FCoE, Port-Channels, Management and Fibre
Channel ports.
"""
urn = "{urn:brocade.com:mgmt:brocade-interface-ext}"
int_ns = 'urn:brocade.com:mgmt:brocade-interface-ext'
result = []
has_more = ''
last_interface_name = ''
last_interface_type = ''
while (has_more == '') or (has_more == 'true'):
request_interface = self.get_interface_detail_request(
last_interface_name, last_interface_type)
interface_result = self._callback(request_interface, 'get')
has_more = interface_result.find('%shas-more' % urn).text
for item in interface_result.findall('%sinterface' % urn):
interface_type = item.find('%sinterface-type' % urn).text
interface_name = item.find('%sinterface-name' % urn).text
last_interface_type = interface_type
last_interface_name = interface_name
if "gigabitethernet" in interface_type:
interface_role = item.find('%sport-role' % urn).text
if_name = item.find('%sif-name' % urn).text
interface_state = item.find('%sif-state' % urn).text
interface_proto_state = item.find('%sline-protocol-state' %
urn).text
interface_mac = item.find(
'%scurrent-hardware-address' % urn).text
item_results = {'interface-type': interface_type,
'interface-name': interface_name,
'interface-role': interface_role,
'if-name': if_name,
'interface-state': interface_state,
'interface-proto-state':
interface_proto_state,
'interface-mac': interface_mac}
result.append(item_results)
# Loopback interfaces. Probably for other non-physical interfaces, too.
ip_result = []
request_interface = ET.Element('get-ip-interface', xmlns=int_ns)
interface_result = self._callback(request_interface, 'get')
for interface in interface_result.findall('%sinterface' % urn):
int_type = interface.find('%sinterface-type' % urn).text
int_name = interface.find('%sinterface-name' % urn).text
if int_type == 'unknown':
continue
int_state = interface.find('%sif-state' % urn).text
int_proto_state = interface.find('%sline-protocol-state' %
urn).text
ip_address = interface.find('.//%sipv4' % urn).text
results = {'interface-type': int_type,
'interface-name': int_name,
'interface-role': None,
'if-name': None,
'interface-state': int_state,
'interface-proto-state': int_proto_state,
'interface-mac': None,
'ip-address': ip_address}
x = next((x for x in result if int_type == x['interface-type'] and
int_name == x['interface-name']), None)
if x is not None:
results.update(x)
ip_result.append(results)
return ip_result | [
"def",
"interfaces",
"(",
"self",
")",
":",
"urn",
"=",
"\"{urn:brocade.com:mgmt:brocade-interface-ext}\"",
"int_ns",
"=",
"'urn:brocade.com:mgmt:brocade-interface-ext'",
"result",
"=",
"[",
"]",
"has_more",
"=",
"''",
"last_interface_name",
"=",
"''",
"last_interface_type",
"=",
"''",
"while",
"(",
"has_more",
"==",
"''",
")",
"or",
"(",
"has_more",
"==",
"'true'",
")",
":",
"request_interface",
"=",
"self",
".",
"get_interface_detail_request",
"(",
"last_interface_name",
",",
"last_interface_type",
")",
"interface_result",
"=",
"self",
".",
"_callback",
"(",
"request_interface",
",",
"'get'",
")",
"has_more",
"=",
"interface_result",
".",
"find",
"(",
"'%shas-more'",
"%",
"urn",
")",
".",
"text",
"for",
"item",
"in",
"interface_result",
".",
"findall",
"(",
"'%sinterface'",
"%",
"urn",
")",
":",
"interface_type",
"=",
"item",
".",
"find",
"(",
"'%sinterface-type'",
"%",
"urn",
")",
".",
"text",
"interface_name",
"=",
"item",
".",
"find",
"(",
"'%sinterface-name'",
"%",
"urn",
")",
".",
"text",
"last_interface_type",
"=",
"interface_type",
"last_interface_name",
"=",
"interface_name",
"if",
"\"gigabitethernet\"",
"in",
"interface_type",
":",
"interface_role",
"=",
"item",
".",
"find",
"(",
"'%sport-role'",
"%",
"urn",
")",
".",
"text",
"if_name",
"=",
"item",
".",
"find",
"(",
"'%sif-name'",
"%",
"urn",
")",
".",
"text",
"interface_state",
"=",
"item",
".",
"find",
"(",
"'%sif-state'",
"%",
"urn",
")",
".",
"text",
"interface_proto_state",
"=",
"item",
".",
"find",
"(",
"'%sline-protocol-state'",
"%",
"urn",
")",
".",
"text",
"interface_mac",
"=",
"item",
".",
"find",
"(",
"'%scurrent-hardware-address'",
"%",
"urn",
")",
".",
"text",
"item_results",
"=",
"{",
"'interface-type'",
":",
"interface_type",
",",
"'interface-name'",
":",
"interface_name",
",",
"'interface-role'",
":",
"interface_role",
",",
"'if-name'",
":",
"if_name",
",",
"'interface-state'",
":",
"interface_state",
",",
"'interface-proto-state'",
":",
"interface_proto_state",
",",
"'interface-mac'",
":",
"interface_mac",
"}",
"result",
".",
"append",
"(",
"item_results",
")",
"# Loopback interfaces. Probably for other non-physical interfaces, too.",
"ip_result",
"=",
"[",
"]",
"request_interface",
"=",
"ET",
".",
"Element",
"(",
"'get-ip-interface'",
",",
"xmlns",
"=",
"int_ns",
")",
"interface_result",
"=",
"self",
".",
"_callback",
"(",
"request_interface",
",",
"'get'",
")",
"for",
"interface",
"in",
"interface_result",
".",
"findall",
"(",
"'%sinterface'",
"%",
"urn",
")",
":",
"int_type",
"=",
"interface",
".",
"find",
"(",
"'%sinterface-type'",
"%",
"urn",
")",
".",
"text",
"int_name",
"=",
"interface",
".",
"find",
"(",
"'%sinterface-name'",
"%",
"urn",
")",
".",
"text",
"if",
"int_type",
"==",
"'unknown'",
":",
"continue",
"int_state",
"=",
"interface",
".",
"find",
"(",
"'%sif-state'",
"%",
"urn",
")",
".",
"text",
"int_proto_state",
"=",
"interface",
".",
"find",
"(",
"'%sline-protocol-state'",
"%",
"urn",
")",
".",
"text",
"ip_address",
"=",
"interface",
".",
"find",
"(",
"'.//%sipv4'",
"%",
"urn",
")",
".",
"text",
"results",
"=",
"{",
"'interface-type'",
":",
"int_type",
",",
"'interface-name'",
":",
"int_name",
",",
"'interface-role'",
":",
"None",
",",
"'if-name'",
":",
"None",
",",
"'interface-state'",
":",
"int_state",
",",
"'interface-proto-state'",
":",
"int_proto_state",
",",
"'interface-mac'",
":",
"None",
",",
"'ip-address'",
":",
"ip_address",
"}",
"x",
"=",
"next",
"(",
"(",
"x",
"for",
"x",
"in",
"result",
"if",
"int_type",
"==",
"x",
"[",
"'interface-type'",
"]",
"and",
"int_name",
"==",
"x",
"[",
"'interface-name'",
"]",
")",
",",
"None",
")",
"if",
"x",
"is",
"not",
"None",
":",
"results",
".",
"update",
"(",
"x",
")",
"ip_result",
".",
"append",
"(",
"results",
")",
"return",
"ip_result"
] | 51.283784 | 21.459459 |
def from_file(self, filename):
"""Read configuration from a .rc file.
`filename` is a file name to read.
"""
self.attempted_config_files.append(filename)
cp = HandyConfigParser()
files_read = cp.read(filename)
if files_read is not None: # return value changed in 2.4
self.config_files.extend(files_read)
for option_spec in self.CONFIG_FILE_OPTIONS:
self.set_attr_from_config_option(cp, *option_spec)
# [paths] is special
if cp.has_section('paths'):
for option in cp.options('paths'):
self.paths[option] = cp.getlist('paths', option) | [
"def",
"from_file",
"(",
"self",
",",
"filename",
")",
":",
"self",
".",
"attempted_config_files",
".",
"append",
"(",
"filename",
")",
"cp",
"=",
"HandyConfigParser",
"(",
")",
"files_read",
"=",
"cp",
".",
"read",
"(",
"filename",
")",
"if",
"files_read",
"is",
"not",
"None",
":",
"# return value changed in 2.4",
"self",
".",
"config_files",
".",
"extend",
"(",
"files_read",
")",
"for",
"option_spec",
"in",
"self",
".",
"CONFIG_FILE_OPTIONS",
":",
"self",
".",
"set_attr_from_config_option",
"(",
"cp",
",",
"*",
"option_spec",
")",
"# [paths] is special",
"if",
"cp",
".",
"has_section",
"(",
"'paths'",
")",
":",
"for",
"option",
"in",
"cp",
".",
"options",
"(",
"'paths'",
")",
":",
"self",
".",
"paths",
"[",
"option",
"]",
"=",
"cp",
".",
"getlist",
"(",
"'paths'",
",",
"option",
")"
] | 32.55 | 17.4 |
def gibbs_binding_energy(self, eads=False):
"""
Returns the adsorption energy or Gibb's binding energy
of an adsorbate on a surface
Args:
eads (bool): Whether to calculate the adsorption energy
(True) or the binding energy (False) which is just
adsorption energy normalized by number of adsorbates.
"""
n = self.get_unit_primitive_area
Nads = self.Nads_in_slab
BE = (self.energy - n * self.clean_entry.energy) / Nads - \
sum([ads.energy_per_atom for ads in self.adsorbates])
return BE * Nads if eads else BE | [
"def",
"gibbs_binding_energy",
"(",
"self",
",",
"eads",
"=",
"False",
")",
":",
"n",
"=",
"self",
".",
"get_unit_primitive_area",
"Nads",
"=",
"self",
".",
"Nads_in_slab",
"BE",
"=",
"(",
"self",
".",
"energy",
"-",
"n",
"*",
"self",
".",
"clean_entry",
".",
"energy",
")",
"/",
"Nads",
"-",
"sum",
"(",
"[",
"ads",
".",
"energy_per_atom",
"for",
"ads",
"in",
"self",
".",
"adsorbates",
"]",
")",
"return",
"BE",
"*",
"Nads",
"if",
"eads",
"else",
"BE"
] | 39.1875 | 17.1875 |
def _find_filepath_in_roots(filename):
"""Look for filename in all MEDIA_ROOTS, and return the first one found."""
for root in settings.DJANGO_STATIC_MEDIA_ROOTS:
filepath = _filename2filepath(filename, root)
if os.path.isfile(filepath):
return filepath, root
# havent found it in DJANGO_STATIC_MEDIA_ROOTS look for apps' files if we're
# in DEBUG mode
if settings.DEBUG:
try:
from django.contrib.staticfiles import finders
absolute_path = finders.find(filename)
if absolute_path:
root, filepath = os.path.split(absolute_path)
return absolute_path, root
except ImportError:
pass
return None, None | [
"def",
"_find_filepath_in_roots",
"(",
"filename",
")",
":",
"for",
"root",
"in",
"settings",
".",
"DJANGO_STATIC_MEDIA_ROOTS",
":",
"filepath",
"=",
"_filename2filepath",
"(",
"filename",
",",
"root",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"filepath",
")",
":",
"return",
"filepath",
",",
"root",
"# havent found it in DJANGO_STATIC_MEDIA_ROOTS look for apps' files if we're",
"# in DEBUG mode",
"if",
"settings",
".",
"DEBUG",
":",
"try",
":",
"from",
"django",
".",
"contrib",
".",
"staticfiles",
"import",
"finders",
"absolute_path",
"=",
"finders",
".",
"find",
"(",
"filename",
")",
"if",
"absolute_path",
":",
"root",
",",
"filepath",
"=",
"os",
".",
"path",
".",
"split",
"(",
"absolute_path",
")",
"return",
"absolute_path",
",",
"root",
"except",
"ImportError",
":",
"pass",
"return",
"None",
",",
"None"
] | 40.444444 | 14.5 |
def load_hgnc(adapter, genes=None, ensembl_lines=None, hgnc_lines=None, exac_lines=None, mim2gene_lines=None,
genemap_lines=None, hpo_lines=None, transcripts_lines=None, build='37', omim_api_key=''):
"""Load Genes and transcripts into the database
If no resources are provided the correct ones will be fetched.
Args:
adapter(scout.adapter.MongoAdapter)
genes(dict): If genes are already parsed
ensembl_lines(iterable(str)): Lines formated with ensembl gene information
hgnc_lines(iterable(str)): Lines with gene information from genenames.org
exac_lines(iterable(str)): Lines with information pLi-scores from ExAC
mim2gene(iterable(str)): Lines with map from omim id to gene symbol
genemap_lines(iterable(str)): Lines with information of omim entries
hpo_lines(iterable(str)): Lines information about map from hpo terms to genes
transcripts_lines(iterable): iterable with ensembl transcript lines
build(str): What build to use. Defaults to '37'
"""
gene_objs = load_hgnc_genes(
adapter=adapter,
genes = genes,
ensembl_lines=ensembl_lines,
hgnc_lines=hgnc_lines,
exac_lines=exac_lines,
mim2gene_lines=mim2gene_lines,
genemap_lines=genemap_lines,
hpo_lines=hpo_lines,
build=build,
omim_api_key=omim_api_key,
)
ensembl_genes = {}
for gene_obj in gene_objs:
ensembl_genes[gene_obj['ensembl_id']] = gene_obj
transcript_objs = load_transcripts(
adapter=adapter,
transcripts_lines=transcripts_lines,
build=build,
ensembl_genes=ensembl_genes) | [
"def",
"load_hgnc",
"(",
"adapter",
",",
"genes",
"=",
"None",
",",
"ensembl_lines",
"=",
"None",
",",
"hgnc_lines",
"=",
"None",
",",
"exac_lines",
"=",
"None",
",",
"mim2gene_lines",
"=",
"None",
",",
"genemap_lines",
"=",
"None",
",",
"hpo_lines",
"=",
"None",
",",
"transcripts_lines",
"=",
"None",
",",
"build",
"=",
"'37'",
",",
"omim_api_key",
"=",
"''",
")",
":",
"gene_objs",
"=",
"load_hgnc_genes",
"(",
"adapter",
"=",
"adapter",
",",
"genes",
"=",
"genes",
",",
"ensembl_lines",
"=",
"ensembl_lines",
",",
"hgnc_lines",
"=",
"hgnc_lines",
",",
"exac_lines",
"=",
"exac_lines",
",",
"mim2gene_lines",
"=",
"mim2gene_lines",
",",
"genemap_lines",
"=",
"genemap_lines",
",",
"hpo_lines",
"=",
"hpo_lines",
",",
"build",
"=",
"build",
",",
"omim_api_key",
"=",
"omim_api_key",
",",
")",
"ensembl_genes",
"=",
"{",
"}",
"for",
"gene_obj",
"in",
"gene_objs",
":",
"ensembl_genes",
"[",
"gene_obj",
"[",
"'ensembl_id'",
"]",
"]",
"=",
"gene_obj",
"transcript_objs",
"=",
"load_transcripts",
"(",
"adapter",
"=",
"adapter",
",",
"transcripts_lines",
"=",
"transcripts_lines",
",",
"build",
"=",
"build",
",",
"ensembl_genes",
"=",
"ensembl_genes",
")"
] | 41.121951 | 21.609756 |
def _args_checks_gen(self, decorated_function, function_spec, arg_specs):
""" Generate checks for positional argument testing
:param decorated_function: function decorator
:param function_spec: function inspect information
:param arg_specs: argument specification (same as arg_specs in :meth:`.Verifier.decorate`)
:return: internal structure, that is used by :meth:`.Verifier._args_checks_test`
"""
inspected_args = function_spec.args
args_check = {}
for i in range(len(inspected_args)):
arg_name = inspected_args[i]
if arg_name in arg_specs.keys():
args_check[arg_name] = self.check(arg_specs[arg_name], arg_name, decorated_function)
return args_check | [
"def",
"_args_checks_gen",
"(",
"self",
",",
"decorated_function",
",",
"function_spec",
",",
"arg_specs",
")",
":",
"inspected_args",
"=",
"function_spec",
".",
"args",
"args_check",
"=",
"{",
"}",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"inspected_args",
")",
")",
":",
"arg_name",
"=",
"inspected_args",
"[",
"i",
"]",
"if",
"arg_name",
"in",
"arg_specs",
".",
"keys",
"(",
")",
":",
"args_check",
"[",
"arg_name",
"]",
"=",
"self",
".",
"check",
"(",
"arg_specs",
"[",
"arg_name",
"]",
",",
"arg_name",
",",
"decorated_function",
")",
"return",
"args_check"
] | 37.166667 | 23.166667 |
def unlink_android(self, path, pkg):
""" Unlink's the android project to this library.
1. In the app's android/settings.gradle, it removes the following
lines (if they exist):
include ':<project-name>'
project(':<project-name>').projectDir = new File(
rootProject.projectDir,
'../venv/packages/<project-name>/android')
2. In the app's android/app/build.gradle, it removes the following
line (if present)
compile project(':<project-name>')
3. In the app's
android/app/src/main/java/<bundle/id>/MainApplication.java,
it removes:
import <package>.<Name>Package;
new <Name>Package(),
If no comma exists it will remove the comma from the previous
line.
"""
bundle_id = self.ctx['bundle_id']
#: Check if it's already linked
with open(join('android', 'settings.gradle')) as f:
settings_gradle = f.read()
with open(join('android', 'app', 'build.gradle')) as f:
build_gradle = f.read()
#: Find the MainApplication.java
main_app_java_path = join('android', 'app', 'src', 'main', 'java',
join(*bundle_id.split(".")),
'MainApplication.java')
with open(main_app_java_path) as f:
main_application_java = f.read()
try:
#: Now link all the EnamlPackages we can find in the new "package"
new_packages = Link.find_packages(join(path, 'android', pkg))
if not new_packages:
print(Colors.RED+"\t[Android] {} No EnamlPackages found to "
"unlink!".format(pkg)+Colors.RESET)
return
#: Unlink settings.gradle
if Link.is_settings_linked(settings_gradle, pkg):
#: Remove the two statements
new_settings = [
line for line in settings_gradle.split("\n")
if line.strip() not in [
"include ':{name}'".format(name=pkg),
"project(':{name}').projectDir = "
"new File(rootProject.projectDir, "
"'../{path}/android/{name}')".format(path=path,
name=pkg)
]
]
with open(join('android', 'settings.gradle'), 'w') as f:
f.write("\n".join(new_settings))
print("\t[Android] {} unlinked settings.gradle!".format(pkg))
else:
print("\t[Android] {} was not linked in "
"settings.gradle!".format(pkg))
#: Unlink app/build.gradle
if Link.is_build_linked(build_gradle, pkg):
#: Add two statements
new_build = [
line for line in build_gradle.split("\n")
if line.strip() not in [
"compile project(':{name}')".format(name=pkg),
"api project(':{name}')".format(name=pkg),
]
]
with open(join('android', 'app', 'build.gradle'), 'w') as f:
f.write("\n".join(new_build))
print("\t[Android] {} unlinked in "
"app/build.gradle!".format(pkg))
else:
print("\t[Android] {} was not linked in "
"app/build.gradle!".format(pkg))
new_app_java = []
for package in new_packages:
#: Add our import statement
javacls = os.path.splitext(package)[0].replace("/", ".")
if Link.is_app_linked(main_application_java, pkg, javacls):
#: Reuse previous if avialable
new_app_java = (new_app_java or
main_application_java.split("\n"))
new_app_java = [
line for line in new_app_java
if line.strip() not in [
"import {};".format(javacls),
"new {}()".format(javacls.split(".")[-1]),
"new {}(),".format(javacls.split(".")[-1]),
]
]
#: Now find the last package and remove the comma if it
#: exists
found = False
j = 0
for i, line in enumerate(new_app_java):
if fnmatch.fnmatch(line.strip(), "new *Package()"):
found = True
elif fnmatch.fnmatch(line.strip(), "new *Package(),"):
j = i
#: We removed the last package so add a comma
if not found:
#: This kills any whitespace...
new_app_java[j] = new_app_java[j][
:new_app_java[j].rfind(',')]
else:
print("\t[Android] {} was not linked in {}!".format(
pkg, main_app_java_path))
if new_app_java:
with open(main_app_java_path, 'w') as f:
f.write("\n".join(new_app_java))
print(Colors.GREEN+"\t[Android] {} unlinked successfully!".format(
pkg)+Colors.RESET)
except Exception as e:
print(Colors.RED+"\t[Android] {} Failed to unlink. "
"Reverting due to error: {}".format(pkg, e)+Colors.RESET)
#: Undo any changes
with open(join('android', 'settings.gradle'), 'w') as f:
f.write(settings_gradle)
with open(join('android', 'app', 'build.gradle'), 'w') as f:
f.write(build_gradle)
with open(main_app_java_path, 'w') as f:
f.write(main_application_java)
#: Now blow up
raise | [
"def",
"unlink_android",
"(",
"self",
",",
"path",
",",
"pkg",
")",
":",
"bundle_id",
"=",
"self",
".",
"ctx",
"[",
"'bundle_id'",
"]",
"#: Check if it's already linked",
"with",
"open",
"(",
"join",
"(",
"'android'",
",",
"'settings.gradle'",
")",
")",
"as",
"f",
":",
"settings_gradle",
"=",
"f",
".",
"read",
"(",
")",
"with",
"open",
"(",
"join",
"(",
"'android'",
",",
"'app'",
",",
"'build.gradle'",
")",
")",
"as",
"f",
":",
"build_gradle",
"=",
"f",
".",
"read",
"(",
")",
"#: Find the MainApplication.java",
"main_app_java_path",
"=",
"join",
"(",
"'android'",
",",
"'app'",
",",
"'src'",
",",
"'main'",
",",
"'java'",
",",
"join",
"(",
"*",
"bundle_id",
".",
"split",
"(",
"\".\"",
")",
")",
",",
"'MainApplication.java'",
")",
"with",
"open",
"(",
"main_app_java_path",
")",
"as",
"f",
":",
"main_application_java",
"=",
"f",
".",
"read",
"(",
")",
"try",
":",
"#: Now link all the EnamlPackages we can find in the new \"package\"",
"new_packages",
"=",
"Link",
".",
"find_packages",
"(",
"join",
"(",
"path",
",",
"'android'",
",",
"pkg",
")",
")",
"if",
"not",
"new_packages",
":",
"print",
"(",
"Colors",
".",
"RED",
"+",
"\"\\t[Android] {} No EnamlPackages found to \"",
"\"unlink!\"",
".",
"format",
"(",
"pkg",
")",
"+",
"Colors",
".",
"RESET",
")",
"return",
"#: Unlink settings.gradle",
"if",
"Link",
".",
"is_settings_linked",
"(",
"settings_gradle",
",",
"pkg",
")",
":",
"#: Remove the two statements",
"new_settings",
"=",
"[",
"line",
"for",
"line",
"in",
"settings_gradle",
".",
"split",
"(",
"\"\\n\"",
")",
"if",
"line",
".",
"strip",
"(",
")",
"not",
"in",
"[",
"\"include ':{name}'\"",
".",
"format",
"(",
"name",
"=",
"pkg",
")",
",",
"\"project(':{name}').projectDir = \"",
"\"new File(rootProject.projectDir, \"",
"\"'../{path}/android/{name}')\"",
".",
"format",
"(",
"path",
"=",
"path",
",",
"name",
"=",
"pkg",
")",
"]",
"]",
"with",
"open",
"(",
"join",
"(",
"'android'",
",",
"'settings.gradle'",
")",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"\"\\n\"",
".",
"join",
"(",
"new_settings",
")",
")",
"print",
"(",
"\"\\t[Android] {} unlinked settings.gradle!\"",
".",
"format",
"(",
"pkg",
")",
")",
"else",
":",
"print",
"(",
"\"\\t[Android] {} was not linked in \"",
"\"settings.gradle!\"",
".",
"format",
"(",
"pkg",
")",
")",
"#: Unlink app/build.gradle",
"if",
"Link",
".",
"is_build_linked",
"(",
"build_gradle",
",",
"pkg",
")",
":",
"#: Add two statements",
"new_build",
"=",
"[",
"line",
"for",
"line",
"in",
"build_gradle",
".",
"split",
"(",
"\"\\n\"",
")",
"if",
"line",
".",
"strip",
"(",
")",
"not",
"in",
"[",
"\"compile project(':{name}')\"",
".",
"format",
"(",
"name",
"=",
"pkg",
")",
",",
"\"api project(':{name}')\"",
".",
"format",
"(",
"name",
"=",
"pkg",
")",
",",
"]",
"]",
"with",
"open",
"(",
"join",
"(",
"'android'",
",",
"'app'",
",",
"'build.gradle'",
")",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"\"\\n\"",
".",
"join",
"(",
"new_build",
")",
")",
"print",
"(",
"\"\\t[Android] {} unlinked in \"",
"\"app/build.gradle!\"",
".",
"format",
"(",
"pkg",
")",
")",
"else",
":",
"print",
"(",
"\"\\t[Android] {} was not linked in \"",
"\"app/build.gradle!\"",
".",
"format",
"(",
"pkg",
")",
")",
"new_app_java",
"=",
"[",
"]",
"for",
"package",
"in",
"new_packages",
":",
"#: Add our import statement",
"javacls",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"package",
")",
"[",
"0",
"]",
".",
"replace",
"(",
"\"/\"",
",",
"\".\"",
")",
"if",
"Link",
".",
"is_app_linked",
"(",
"main_application_java",
",",
"pkg",
",",
"javacls",
")",
":",
"#: Reuse previous if avialable",
"new_app_java",
"=",
"(",
"new_app_java",
"or",
"main_application_java",
".",
"split",
"(",
"\"\\n\"",
")",
")",
"new_app_java",
"=",
"[",
"line",
"for",
"line",
"in",
"new_app_java",
"if",
"line",
".",
"strip",
"(",
")",
"not",
"in",
"[",
"\"import {};\"",
".",
"format",
"(",
"javacls",
")",
",",
"\"new {}()\"",
".",
"format",
"(",
"javacls",
".",
"split",
"(",
"\".\"",
")",
"[",
"-",
"1",
"]",
")",
",",
"\"new {}(),\"",
".",
"format",
"(",
"javacls",
".",
"split",
"(",
"\".\"",
")",
"[",
"-",
"1",
"]",
")",
",",
"]",
"]",
"#: Now find the last package and remove the comma if it",
"#: exists",
"found",
"=",
"False",
"j",
"=",
"0",
"for",
"i",
",",
"line",
"in",
"enumerate",
"(",
"new_app_java",
")",
":",
"if",
"fnmatch",
".",
"fnmatch",
"(",
"line",
".",
"strip",
"(",
")",
",",
"\"new *Package()\"",
")",
":",
"found",
"=",
"True",
"elif",
"fnmatch",
".",
"fnmatch",
"(",
"line",
".",
"strip",
"(",
")",
",",
"\"new *Package(),\"",
")",
":",
"j",
"=",
"i",
"#: We removed the last package so add a comma",
"if",
"not",
"found",
":",
"#: This kills any whitespace...",
"new_app_java",
"[",
"j",
"]",
"=",
"new_app_java",
"[",
"j",
"]",
"[",
":",
"new_app_java",
"[",
"j",
"]",
".",
"rfind",
"(",
"','",
")",
"]",
"else",
":",
"print",
"(",
"\"\\t[Android] {} was not linked in {}!\"",
".",
"format",
"(",
"pkg",
",",
"main_app_java_path",
")",
")",
"if",
"new_app_java",
":",
"with",
"open",
"(",
"main_app_java_path",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"\"\\n\"",
".",
"join",
"(",
"new_app_java",
")",
")",
"print",
"(",
"Colors",
".",
"GREEN",
"+",
"\"\\t[Android] {} unlinked successfully!\"",
".",
"format",
"(",
"pkg",
")",
"+",
"Colors",
".",
"RESET",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"Colors",
".",
"RED",
"+",
"\"\\t[Android] {} Failed to unlink. \"",
"\"Reverting due to error: {}\"",
".",
"format",
"(",
"pkg",
",",
"e",
")",
"+",
"Colors",
".",
"RESET",
")",
"#: Undo any changes",
"with",
"open",
"(",
"join",
"(",
"'android'",
",",
"'settings.gradle'",
")",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"settings_gradle",
")",
"with",
"open",
"(",
"join",
"(",
"'android'",
",",
"'app'",
",",
"'build.gradle'",
")",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"build_gradle",
")",
"with",
"open",
"(",
"main_app_java_path",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"main_application_java",
")",
"#: Now blow up",
"raise"
] | 41.695946 | 20.628378 |
def update_scheduled_time(self, when):
"""
Updates a scheduled task's date to the given date. If the task is not
scheduled, a TaskNotFound exception is raised.
"""
tiger = self.tiger
ts = get_timestamp(when)
assert ts
pipeline = tiger.connection.pipeline()
key = tiger._key(SCHEDULED, self.queue)
tiger.scripts.zadd(key, ts, self.id, mode='xx', client=pipeline)
pipeline.zscore(key, self.id)
_, score = pipeline.execute()
if not score:
raise TaskNotFound('Task {} not found in queue "{}" in state "{}".'.format(
self.id, self.queue, SCHEDULED
))
self._ts = ts | [
"def",
"update_scheduled_time",
"(",
"self",
",",
"when",
")",
":",
"tiger",
"=",
"self",
".",
"tiger",
"ts",
"=",
"get_timestamp",
"(",
"when",
")",
"assert",
"ts",
"pipeline",
"=",
"tiger",
".",
"connection",
".",
"pipeline",
"(",
")",
"key",
"=",
"tiger",
".",
"_key",
"(",
"SCHEDULED",
",",
"self",
".",
"queue",
")",
"tiger",
".",
"scripts",
".",
"zadd",
"(",
"key",
",",
"ts",
",",
"self",
".",
"id",
",",
"mode",
"=",
"'xx'",
",",
"client",
"=",
"pipeline",
")",
"pipeline",
".",
"zscore",
"(",
"key",
",",
"self",
".",
"id",
")",
"_",
",",
"score",
"=",
"pipeline",
".",
"execute",
"(",
")",
"if",
"not",
"score",
":",
"raise",
"TaskNotFound",
"(",
"'Task {} not found in queue \"{}\" in state \"{}\".'",
".",
"format",
"(",
"self",
".",
"id",
",",
"self",
".",
"queue",
",",
"SCHEDULED",
")",
")",
"self",
".",
"_ts",
"=",
"ts"
] | 33.047619 | 18.380952 |
def to_int(data):
"""
:params data: proquint
:returns: proquint decoded into an integer
:type data: string
:rtype: int
"""
if not isinstance(data, basestring):
raise TypeError('Input must be string')
res = 0
for part in data.split('-'):
if len(part) != 5:
raise ValueError('Malformed proquint')
for j in range(5):
try:
if not j % 2:
res <<= 4
res |= CONSONANTS.index(part[j])
else:
res <<= 2
res |= VOWELS.index(part[j])
except ValueError:
raise ValueError('Unknown character \'{!s}\' in proquint'.format(part[j]))
return res | [
"def",
"to_int",
"(",
"data",
")",
":",
"if",
"not",
"isinstance",
"(",
"data",
",",
"basestring",
")",
":",
"raise",
"TypeError",
"(",
"'Input must be string'",
")",
"res",
"=",
"0",
"for",
"part",
"in",
"data",
".",
"split",
"(",
"'-'",
")",
":",
"if",
"len",
"(",
"part",
")",
"!=",
"5",
":",
"raise",
"ValueError",
"(",
"'Malformed proquint'",
")",
"for",
"j",
"in",
"range",
"(",
"5",
")",
":",
"try",
":",
"if",
"not",
"j",
"%",
"2",
":",
"res",
"<<=",
"4",
"res",
"|=",
"CONSONANTS",
".",
"index",
"(",
"part",
"[",
"j",
"]",
")",
"else",
":",
"res",
"<<=",
"2",
"res",
"|=",
"VOWELS",
".",
"index",
"(",
"part",
"[",
"j",
"]",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"'Unknown character \\'{!s}\\' in proquint'",
".",
"format",
"(",
"part",
"[",
"j",
"]",
")",
")",
"return",
"res"
] | 29.2 | 15.6 |
def _async_route(self, msg, in_stream=None):
"""
Arrange for `msg` to be forwarded towards its destination. If its
destination is the local context, then arrange for it to be dispatched
using the local handlers.
This is a lower overhead version of :meth:`route` that may only be
called from the :class:`Broker` thread.
:param Stream in_stream:
If not :data:`None`, the stream the message arrived on. Used for
performing source route verification, to ensure sensitive messages
such as ``CALL_FUNCTION`` arrive only from trusted contexts.
"""
_vv and IOLOG.debug('%r._async_route(%r, %r)', self, msg, in_stream)
if len(msg.data) > self.max_message_size:
self._maybe_send_dead(msg, self.too_large_msg % (
self.max_message_size,
))
return
# Perform source verification.
if in_stream:
parent = self._stream_by_id.get(mitogen.parent_id)
expect = self._stream_by_id.get(msg.auth_id, parent)
if in_stream != expect:
LOG.error('%r: bad auth_id: got %r via %r, not %r: %r',
self, msg.auth_id, in_stream, expect, msg)
return
if msg.src_id != msg.auth_id:
expect = self._stream_by_id.get(msg.src_id, parent)
if in_stream != expect:
LOG.error('%r: bad src_id: got %r via %r, not %r: %r',
self, msg.src_id, in_stream, expect, msg)
return
if in_stream.auth_id is not None:
msg.auth_id = in_stream.auth_id
# Maintain a set of IDs the source ever communicated with.
in_stream.egress_ids.add(msg.dst_id)
if msg.dst_id == mitogen.context_id:
return self._invoke(msg, in_stream)
out_stream = self._stream_by_id.get(msg.dst_id)
if out_stream is None:
out_stream = self._stream_by_id.get(mitogen.parent_id)
if out_stream is None:
self._maybe_send_dead(msg, self.no_route_msg,
msg.dst_id, mitogen.context_id)
return
if in_stream and self.unidirectional and not \
(in_stream.is_privileged or out_stream.is_privileged):
self._maybe_send_dead(msg, self.unidirectional_msg,
in_stream.remote_id, out_stream.remote_id)
return
out_stream._send(msg) | [
"def",
"_async_route",
"(",
"self",
",",
"msg",
",",
"in_stream",
"=",
"None",
")",
":",
"_vv",
"and",
"IOLOG",
".",
"debug",
"(",
"'%r._async_route(%r, %r)'",
",",
"self",
",",
"msg",
",",
"in_stream",
")",
"if",
"len",
"(",
"msg",
".",
"data",
")",
">",
"self",
".",
"max_message_size",
":",
"self",
".",
"_maybe_send_dead",
"(",
"msg",
",",
"self",
".",
"too_large_msg",
"%",
"(",
"self",
".",
"max_message_size",
",",
")",
")",
"return",
"# Perform source verification.",
"if",
"in_stream",
":",
"parent",
"=",
"self",
".",
"_stream_by_id",
".",
"get",
"(",
"mitogen",
".",
"parent_id",
")",
"expect",
"=",
"self",
".",
"_stream_by_id",
".",
"get",
"(",
"msg",
".",
"auth_id",
",",
"parent",
")",
"if",
"in_stream",
"!=",
"expect",
":",
"LOG",
".",
"error",
"(",
"'%r: bad auth_id: got %r via %r, not %r: %r'",
",",
"self",
",",
"msg",
".",
"auth_id",
",",
"in_stream",
",",
"expect",
",",
"msg",
")",
"return",
"if",
"msg",
".",
"src_id",
"!=",
"msg",
".",
"auth_id",
":",
"expect",
"=",
"self",
".",
"_stream_by_id",
".",
"get",
"(",
"msg",
".",
"src_id",
",",
"parent",
")",
"if",
"in_stream",
"!=",
"expect",
":",
"LOG",
".",
"error",
"(",
"'%r: bad src_id: got %r via %r, not %r: %r'",
",",
"self",
",",
"msg",
".",
"src_id",
",",
"in_stream",
",",
"expect",
",",
"msg",
")",
"return",
"if",
"in_stream",
".",
"auth_id",
"is",
"not",
"None",
":",
"msg",
".",
"auth_id",
"=",
"in_stream",
".",
"auth_id",
"# Maintain a set of IDs the source ever communicated with.",
"in_stream",
".",
"egress_ids",
".",
"add",
"(",
"msg",
".",
"dst_id",
")",
"if",
"msg",
".",
"dst_id",
"==",
"mitogen",
".",
"context_id",
":",
"return",
"self",
".",
"_invoke",
"(",
"msg",
",",
"in_stream",
")",
"out_stream",
"=",
"self",
".",
"_stream_by_id",
".",
"get",
"(",
"msg",
".",
"dst_id",
")",
"if",
"out_stream",
"is",
"None",
":",
"out_stream",
"=",
"self",
".",
"_stream_by_id",
".",
"get",
"(",
"mitogen",
".",
"parent_id",
")",
"if",
"out_stream",
"is",
"None",
":",
"self",
".",
"_maybe_send_dead",
"(",
"msg",
",",
"self",
".",
"no_route_msg",
",",
"msg",
".",
"dst_id",
",",
"mitogen",
".",
"context_id",
")",
"return",
"if",
"in_stream",
"and",
"self",
".",
"unidirectional",
"and",
"not",
"(",
"in_stream",
".",
"is_privileged",
"or",
"out_stream",
".",
"is_privileged",
")",
":",
"self",
".",
"_maybe_send_dead",
"(",
"msg",
",",
"self",
".",
"unidirectional_msg",
",",
"in_stream",
".",
"remote_id",
",",
"out_stream",
".",
"remote_id",
")",
"return",
"out_stream",
".",
"_send",
"(",
"msg",
")"
] | 39.650794 | 22.126984 |
def do_help(self, arg):
"""h(elp)
Without argument, print the list of available commands.
With a command name as argument, print help about that command.
"help pdb" shows the full pdb documentation.
"help exec" gives help on the ! command.
"""
if not arg:
return cmd.Cmd.do_help(self, arg)
try:
try:
topic = getattr(self, 'help_' + arg)
return topic()
except AttributeError:
command = getattr(self, 'do_' + arg)
except AttributeError:
self.error('No help for %r' % arg)
else:
if sys.flags.optimize >= 2:
self.error('No help for %r; please do not run Python with -OO '
'if you need command help' % arg)
return
self.message(command.__doc__.rstrip()) | [
"def",
"do_help",
"(",
"self",
",",
"arg",
")",
":",
"if",
"not",
"arg",
":",
"return",
"cmd",
".",
"Cmd",
".",
"do_help",
"(",
"self",
",",
"arg",
")",
"try",
":",
"try",
":",
"topic",
"=",
"getattr",
"(",
"self",
",",
"'help_'",
"+",
"arg",
")",
"return",
"topic",
"(",
")",
"except",
"AttributeError",
":",
"command",
"=",
"getattr",
"(",
"self",
",",
"'do_'",
"+",
"arg",
")",
"except",
"AttributeError",
":",
"self",
".",
"error",
"(",
"'No help for %r'",
"%",
"arg",
")",
"else",
":",
"if",
"sys",
".",
"flags",
".",
"optimize",
">=",
"2",
":",
"self",
".",
"error",
"(",
"'No help for %r; please do not run Python with -OO '",
"'if you need command help'",
"%",
"arg",
")",
"return",
"self",
".",
"message",
"(",
"command",
".",
"__doc__",
".",
"rstrip",
"(",
")",
")"
] | 38.434783 | 14.782609 |
def is_reduced_grid(nc, variable):
'''
Returns True if the feature-type of the variable corresponds to a reduced
horizontal grid.
:param netCDF4.Dataset nc: An open netCDF dataset
:param str variable: name of the variable to check
'''
axis_map = get_axis_map(nc, variable)
if 'X' not in axis_map:
return False
if 'Y' not in axis_map:
return False
if 'C' not in axis_map:
return False
compressed_coordinates = axis_map['C']
if len(compressed_coordinates) > 1:
return False
compressed_coordinate = axis_map['C'][0]
for dim in nc.variables[compressed_coordinate].compress.split():
if dim not in nc.dimensions:
return False
return True | [
"def",
"is_reduced_grid",
"(",
"nc",
",",
"variable",
")",
":",
"axis_map",
"=",
"get_axis_map",
"(",
"nc",
",",
"variable",
")",
"if",
"'X'",
"not",
"in",
"axis_map",
":",
"return",
"False",
"if",
"'Y'",
"not",
"in",
"axis_map",
":",
"return",
"False",
"if",
"'C'",
"not",
"in",
"axis_map",
":",
"return",
"False",
"compressed_coordinates",
"=",
"axis_map",
"[",
"'C'",
"]",
"if",
"len",
"(",
"compressed_coordinates",
")",
">",
"1",
":",
"return",
"False",
"compressed_coordinate",
"=",
"axis_map",
"[",
"'C'",
"]",
"[",
"0",
"]",
"for",
"dim",
"in",
"nc",
".",
"variables",
"[",
"compressed_coordinate",
"]",
".",
"compress",
".",
"split",
"(",
")",
":",
"if",
"dim",
"not",
"in",
"nc",
".",
"dimensions",
":",
"return",
"False",
"return",
"True"
] | 28.88 | 19.04 |
def get_available_course_modes(self, request, course_run_id, enterprise_catalog):
"""
Return the available course modes for the course run.
The provided EnterpriseCustomerCatalog is used to filter and order the
course modes returned using the EnterpriseCustomerCatalog's
field "enabled_course_modes".
"""
modes = EnrollmentApiClient().get_course_modes(course_run_id)
if not modes:
LOGGER.warning('Unable to get course modes for course run id {course_run_id}.'.format(
course_run_id=course_run_id
))
messages.add_generic_info_message_for_error(request)
if enterprise_catalog:
# filter and order course modes according to the enterprise catalog
modes = [mode for mode in modes if mode['slug'] in enterprise_catalog.enabled_course_modes]
modes.sort(key=lambda course_mode: enterprise_catalog.enabled_course_modes.index(course_mode['slug']))
if not modes:
LOGGER.info(
'No matching course modes found for course run {course_run_id} in '
'EnterpriseCustomerCatalog [{enterprise_catalog_uuid}]'.format(
course_run_id=course_run_id,
enterprise_catalog_uuid=enterprise_catalog,
)
)
messages.add_generic_info_message_for_error(request)
return modes | [
"def",
"get_available_course_modes",
"(",
"self",
",",
"request",
",",
"course_run_id",
",",
"enterprise_catalog",
")",
":",
"modes",
"=",
"EnrollmentApiClient",
"(",
")",
".",
"get_course_modes",
"(",
"course_run_id",
")",
"if",
"not",
"modes",
":",
"LOGGER",
".",
"warning",
"(",
"'Unable to get course modes for course run id {course_run_id}.'",
".",
"format",
"(",
"course_run_id",
"=",
"course_run_id",
")",
")",
"messages",
".",
"add_generic_info_message_for_error",
"(",
"request",
")",
"if",
"enterprise_catalog",
":",
"# filter and order course modes according to the enterprise catalog",
"modes",
"=",
"[",
"mode",
"for",
"mode",
"in",
"modes",
"if",
"mode",
"[",
"'slug'",
"]",
"in",
"enterprise_catalog",
".",
"enabled_course_modes",
"]",
"modes",
".",
"sort",
"(",
"key",
"=",
"lambda",
"course_mode",
":",
"enterprise_catalog",
".",
"enabled_course_modes",
".",
"index",
"(",
"course_mode",
"[",
"'slug'",
"]",
")",
")",
"if",
"not",
"modes",
":",
"LOGGER",
".",
"info",
"(",
"'No matching course modes found for course run {course_run_id} in '",
"'EnterpriseCustomerCatalog [{enterprise_catalog_uuid}]'",
".",
"format",
"(",
"course_run_id",
"=",
"course_run_id",
",",
"enterprise_catalog_uuid",
"=",
"enterprise_catalog",
",",
")",
")",
"messages",
".",
"add_generic_info_message_for_error",
"(",
"request",
")",
"return",
"modes"
] | 48.3 | 28.033333 |
def _evaluate(self,x,return_indices = False):
'''
Returns the level of the interpolated function at each value in x. Only
called internally by HARKinterpolator1D.__call__ (etc).
'''
return self._evalOrDer(x,True,False)[0] | [
"def",
"_evaluate",
"(",
"self",
",",
"x",
",",
"return_indices",
"=",
"False",
")",
":",
"return",
"self",
".",
"_evalOrDer",
"(",
"x",
",",
"True",
",",
"False",
")",
"[",
"0",
"]"
] | 42.833333 | 22.166667 |
def _get_json_content_from_folder(folder):
"""yield objects from json files in the folder and subfolders."""
for dirpath, dirnames, filenames in os.walk(folder):
for filename in filenames:
if filename.lower().endswith(".json"):
filepath = os.path.join(dirpath, filename)
with open(filepath, "rb") as file:
yield json.loads(file.read().decode("UTF-8")) | [
"def",
"_get_json_content_from_folder",
"(",
"folder",
")",
":",
"for",
"dirpath",
",",
"dirnames",
",",
"filenames",
"in",
"os",
".",
"walk",
"(",
"folder",
")",
":",
"for",
"filename",
"in",
"filenames",
":",
"if",
"filename",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"\".json\"",
")",
":",
"filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dirpath",
",",
"filename",
")",
"with",
"open",
"(",
"filepath",
",",
"\"rb\"",
")",
"as",
"file",
":",
"yield",
"json",
".",
"loads",
"(",
"file",
".",
"read",
"(",
")",
".",
"decode",
"(",
"\"UTF-8\"",
")",
")"
] | 53 | 10.875 |
def extraction(self, event_collection, timeframe=None, timezone=None, filters=None, latest=None,
email=None, property_names=None):
""" Performs a data extraction
Returns either a JSON object of events or a response
indicating an email will be sent to you with data.
:param event_collection: string, the name of the collection to query
:param timeframe: string or dict, the timeframe in which the events
happened example: "previous_7_days"
:param timezone: int, the timezone you'd like to use for the timeframe
and interval in seconds
:param filters: array of dict, contains the filters you'd like to apply to the data
example: [{"property_name":"device", "operator":"eq", "property_value":"iPhone"}]
:param latest: int, the number of most recent records you'd like to return
:param email: string, optional string containing an email address to email results to
:param property_names: string or list of strings, used to limit the properties returned
"""
params = self.get_params(event_collection=event_collection, timeframe=timeframe, timezone=timezone,
filters=filters, latest=latest, email=email, property_names=property_names)
return self.api.query("extraction", params) | [
"def",
"extraction",
"(",
"self",
",",
"event_collection",
",",
"timeframe",
"=",
"None",
",",
"timezone",
"=",
"None",
",",
"filters",
"=",
"None",
",",
"latest",
"=",
"None",
",",
"email",
"=",
"None",
",",
"property_names",
"=",
"None",
")",
":",
"params",
"=",
"self",
".",
"get_params",
"(",
"event_collection",
"=",
"event_collection",
",",
"timeframe",
"=",
"timeframe",
",",
"timezone",
"=",
"timezone",
",",
"filters",
"=",
"filters",
",",
"latest",
"=",
"latest",
",",
"email",
"=",
"email",
",",
"property_names",
"=",
"property_names",
")",
"return",
"self",
".",
"api",
".",
"query",
"(",
"\"extraction\"",
",",
"params",
")"
] | 60.681818 | 33.818182 |
def __recv_log_set_exc_and_wait(self, msg, level=None, wait_seconds=CONN_RETRY_DELAY_SECONDS):
"""Equivalent to __send_log_set_exc_and_wait but for receiver thread"""
logger.log(
((logging.DEBUG if self.__recv_exc else logging.ERROR) if level is None else level),
msg,
exc_info=DEBUG_ENABLED
)
self.__recv_exc = exc_info()[1]
self.__end.wait(wait_seconds) | [
"def",
"__recv_log_set_exc_and_wait",
"(",
"self",
",",
"msg",
",",
"level",
"=",
"None",
",",
"wait_seconds",
"=",
"CONN_RETRY_DELAY_SECONDS",
")",
":",
"logger",
".",
"log",
"(",
"(",
"(",
"logging",
".",
"DEBUG",
"if",
"self",
".",
"__recv_exc",
"else",
"logging",
".",
"ERROR",
")",
"if",
"level",
"is",
"None",
"else",
"level",
")",
",",
"msg",
",",
"exc_info",
"=",
"DEBUG_ENABLED",
")",
"self",
".",
"__recv_exc",
"=",
"exc_info",
"(",
")",
"[",
"1",
"]",
"self",
".",
"__end",
".",
"wait",
"(",
"wait_seconds",
")"
] | 47 | 21.777778 |
def add_comment(self, line: str) -> None:
'''Keeping track of "last comment" for section and parameter '''
# the rule is like
#
# # comment line --> add to last comment
# blank line --> clears last comment
# [ ] --> use last comment
# parameter: --> use last comment
# All others: clear last comment
self._last_comment += (' ' if self._last_comment else '') + \
line.lstrip('#').strip() | [
"def",
"add_comment",
"(",
"self",
",",
"line",
":",
"str",
")",
"->",
"None",
":",
"# the rule is like",
"#",
"# # comment line --> add to last comment",
"# blank line --> clears last comment",
"# [ ] --> use last comment",
"# parameter: --> use last comment",
"# All others: clear last comment",
"self",
".",
"_last_comment",
"+=",
"(",
"' '",
"if",
"self",
".",
"_last_comment",
"else",
"''",
")",
"+",
"line",
".",
"lstrip",
"(",
"'#'",
")",
".",
"strip",
"(",
")"
] | 41.909091 | 11.909091 |
def do_handshake(self):
"""Perform a handshake with the peer
This method forces an explicit handshake to be performed with either
the client or server peer.
"""
_logger.debug("Initiating handshake...")
try:
self._wrap_socket_library_call(
lambda: SSL_do_handshake(self._ssl.value),
ERR_HANDSHAKE_TIMEOUT)
except openssl_error() as err:
if err.ssl_error == SSL_ERROR_SYSCALL and err.result == -1:
raise_ssl_error(ERR_PORT_UNREACHABLE, err)
raise
self._handshake_done = True
_logger.debug("...completed handshake") | [
"def",
"do_handshake",
"(",
"self",
")",
":",
"_logger",
".",
"debug",
"(",
"\"Initiating handshake...\"",
")",
"try",
":",
"self",
".",
"_wrap_socket_library_call",
"(",
"lambda",
":",
"SSL_do_handshake",
"(",
"self",
".",
"_ssl",
".",
"value",
")",
",",
"ERR_HANDSHAKE_TIMEOUT",
")",
"except",
"openssl_error",
"(",
")",
"as",
"err",
":",
"if",
"err",
".",
"ssl_error",
"==",
"SSL_ERROR_SYSCALL",
"and",
"err",
".",
"result",
"==",
"-",
"1",
":",
"raise_ssl_error",
"(",
"ERR_PORT_UNREACHABLE",
",",
"err",
")",
"raise",
"self",
".",
"_handshake_done",
"=",
"True",
"_logger",
".",
"debug",
"(",
"\"...completed handshake\"",
")"
] | 36.277778 | 15.777778 |
def save(cls, dct, filename):
r"""
Saves data from the given dictionary into the specified file.
Parameters
----------
dct : dictionary
A dictionary to save to file, presumably obtained from the
``to_dict`` method of this class.
filename : string or path object
The filename to store the dictionary.
"""
fname = cls._parse_filename(filename=filename, ext='dct')
dct = sanitize_dict(dct)
with open(fname, 'wb') as f:
pickle.dump(dct, f) | [
"def",
"save",
"(",
"cls",
",",
"dct",
",",
"filename",
")",
":",
"fname",
"=",
"cls",
".",
"_parse_filename",
"(",
"filename",
"=",
"filename",
",",
"ext",
"=",
"'dct'",
")",
"dct",
"=",
"sanitize_dict",
"(",
"dct",
")",
"with",
"open",
"(",
"fname",
",",
"'wb'",
")",
"as",
"f",
":",
"pickle",
".",
"dump",
"(",
"dct",
",",
"f",
")"
] | 30.5 | 17.222222 |
def recursive_model_update(model, props):
"""
Recursively updates attributes on a model including other
models. If the type of the new model matches the old model
properties are simply updated, otherwise the model is replaced.
"""
updates = {}
valid_properties = model.properties_with_values()
for k, v in props.items():
if isinstance(v, Model):
nested_model = getattr(model, k)
if type(v) is type(nested_model):
nested_props = v.properties_with_values(include_defaults=False)
recursive_model_update(nested_model, nested_props)
else:
setattr(model, k, v)
elif k in valid_properties and v != valid_properties[k]:
updates[k] = v
model.update(**updates) | [
"def",
"recursive_model_update",
"(",
"model",
",",
"props",
")",
":",
"updates",
"=",
"{",
"}",
"valid_properties",
"=",
"model",
".",
"properties_with_values",
"(",
")",
"for",
"k",
",",
"v",
"in",
"props",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"Model",
")",
":",
"nested_model",
"=",
"getattr",
"(",
"model",
",",
"k",
")",
"if",
"type",
"(",
"v",
")",
"is",
"type",
"(",
"nested_model",
")",
":",
"nested_props",
"=",
"v",
".",
"properties_with_values",
"(",
"include_defaults",
"=",
"False",
")",
"recursive_model_update",
"(",
"nested_model",
",",
"nested_props",
")",
"else",
":",
"setattr",
"(",
"model",
",",
"k",
",",
"v",
")",
"elif",
"k",
"in",
"valid_properties",
"and",
"v",
"!=",
"valid_properties",
"[",
"k",
"]",
":",
"updates",
"[",
"k",
"]",
"=",
"v",
"model",
".",
"update",
"(",
"*",
"*",
"updates",
")"
] | 41.052632 | 14.631579 |
def Email(v):
"""Verify that the value is an Email or not.
>>> s = Schema(Email())
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("a.com")
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("a@.com")
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("a@.com")
>>> s('t@x.com')
't@x.com'
"""
try:
if not v or "@" not in v:
raise EmailInvalid("Invalid Email")
user_part, domain_part = v.rsplit('@', 1)
if not (USER_REGEX.match(user_part) and DOMAIN_REGEX.match(domain_part)):
raise EmailInvalid("Invalid Email")
return v
except:
raise ValueError | [
"def",
"Email",
"(",
"v",
")",
":",
"try",
":",
"if",
"not",
"v",
"or",
"\"@\"",
"not",
"in",
"v",
":",
"raise",
"EmailInvalid",
"(",
"\"Invalid Email\"",
")",
"user_part",
",",
"domain_part",
"=",
"v",
".",
"rsplit",
"(",
"'@'",
",",
"1",
")",
"if",
"not",
"(",
"USER_REGEX",
".",
"match",
"(",
"user_part",
")",
"and",
"DOMAIN_REGEX",
".",
"match",
"(",
"domain_part",
")",
")",
":",
"raise",
"EmailInvalid",
"(",
"\"Invalid Email\"",
")",
"return",
"v",
"except",
":",
"raise",
"ValueError"
] | 29.565217 | 19.608696 |
def from_transformation_string(cls, transformation_string="a,b,c;0,0,0"):
"""
Construct SpaceGroupTransformation from its transformation string.
:param P: matrix
:param p: origin shift vector
:return:
"""
P, p = JonesFaithfulTransformation.parse_transformation_string(
transformation_string)
return cls(P, p) | [
"def",
"from_transformation_string",
"(",
"cls",
",",
"transformation_string",
"=",
"\"a,b,c;0,0,0\"",
")",
":",
"P",
",",
"p",
"=",
"JonesFaithfulTransformation",
".",
"parse_transformation_string",
"(",
"transformation_string",
")",
"return",
"cls",
"(",
"P",
",",
"p",
")"
] | 37.5 | 16.3 |
def _make_info(self, piece_size, progress, walker, piece_callback=None):
""" Create info dict.
"""
# These collect the file descriptions and piece hashes
file_list = []
pieces = []
# Initialize progress state
hashing_secs = time.time()
totalsize = -1 if self._fifo else self._calc_size()
totalhashed = 0
# Start a new piece
sha1sum = hashlib.sha1()
done = 0
filename = None
# Hash all files
for filename in walker:
# Assemble file info
filesize = os.path.getsize(filename)
filepath = filename[len(os.path.dirname(self.datapath) if self._fifo else self.datapath):].lstrip(os.sep)
file_list.append({
"length": filesize,
"path": [fmt.to_utf8(x) for x in fmt.to_unicode(filepath).replace(os.sep, '/').split('/')],
})
self.LOG.debug("Hashing %r, size %d..." % (filename, filesize))
# Open file and hash it
fileoffset = 0
handle = open(filename, "rb")
try:
while fileoffset < filesize:
# Read rest of piece or file, whatever is smaller
chunk = handle.read(min(filesize - fileoffset, piece_size - done))
sha1sum.update(chunk) # bogus pylint: disable=E1101
done += len(chunk)
fileoffset += len(chunk)
totalhashed += len(chunk)
# Piece is done
if done == piece_size:
pieces.append(sha1sum.digest()) # bogus pylint: disable=E1101
if piece_callback:
piece_callback(filename, pieces[-1])
# Start a new piece
sha1sum = hashlib.sha1()
done = 0
# Report progress
if progress:
progress(totalhashed, totalsize)
finally:
handle.close()
# Add hash of partial last piece
if done > 0:
pieces.append(sha1sum.digest()) # bogus pylint: disable=E1103
if piece_callback:
piece_callback(filename, pieces[-1])
# Build the meta dict
metainfo = {
"pieces": b"".join(pieces),
"piece length": piece_size,
"name": os.path.basename(self.datapath),
}
# Handle directory/FIFO vs. single file
if self._fifo or os.path.isdir(self.datapath):
metainfo["files"] = file_list
else:
metainfo["length"] = totalhashed
hashing_secs = time.time() - hashing_secs
self.LOG.info("Hashing of %s took %.1f secs (%s/s)" % (
fmt.human_size(totalhashed).strip(), hashing_secs, fmt.human_size(totalhashed / hashing_secs).strip(),
))
# Return validated info dict
return check_info(metainfo), totalhashed | [
"def",
"_make_info",
"(",
"self",
",",
"piece_size",
",",
"progress",
",",
"walker",
",",
"piece_callback",
"=",
"None",
")",
":",
"# These collect the file descriptions and piece hashes",
"file_list",
"=",
"[",
"]",
"pieces",
"=",
"[",
"]",
"# Initialize progress state",
"hashing_secs",
"=",
"time",
".",
"time",
"(",
")",
"totalsize",
"=",
"-",
"1",
"if",
"self",
".",
"_fifo",
"else",
"self",
".",
"_calc_size",
"(",
")",
"totalhashed",
"=",
"0",
"# Start a new piece",
"sha1sum",
"=",
"hashlib",
".",
"sha1",
"(",
")",
"done",
"=",
"0",
"filename",
"=",
"None",
"# Hash all files",
"for",
"filename",
"in",
"walker",
":",
"# Assemble file info",
"filesize",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"filename",
")",
"filepath",
"=",
"filename",
"[",
"len",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"self",
".",
"datapath",
")",
"if",
"self",
".",
"_fifo",
"else",
"self",
".",
"datapath",
")",
":",
"]",
".",
"lstrip",
"(",
"os",
".",
"sep",
")",
"file_list",
".",
"append",
"(",
"{",
"\"length\"",
":",
"filesize",
",",
"\"path\"",
":",
"[",
"fmt",
".",
"to_utf8",
"(",
"x",
")",
"for",
"x",
"in",
"fmt",
".",
"to_unicode",
"(",
"filepath",
")",
".",
"replace",
"(",
"os",
".",
"sep",
",",
"'/'",
")",
".",
"split",
"(",
"'/'",
")",
"]",
",",
"}",
")",
"self",
".",
"LOG",
".",
"debug",
"(",
"\"Hashing %r, size %d...\"",
"%",
"(",
"filename",
",",
"filesize",
")",
")",
"# Open file and hash it",
"fileoffset",
"=",
"0",
"handle",
"=",
"open",
"(",
"filename",
",",
"\"rb\"",
")",
"try",
":",
"while",
"fileoffset",
"<",
"filesize",
":",
"# Read rest of piece or file, whatever is smaller",
"chunk",
"=",
"handle",
".",
"read",
"(",
"min",
"(",
"filesize",
"-",
"fileoffset",
",",
"piece_size",
"-",
"done",
")",
")",
"sha1sum",
".",
"update",
"(",
"chunk",
")",
"# bogus pylint: disable=E1101",
"done",
"+=",
"len",
"(",
"chunk",
")",
"fileoffset",
"+=",
"len",
"(",
"chunk",
")",
"totalhashed",
"+=",
"len",
"(",
"chunk",
")",
"# Piece is done",
"if",
"done",
"==",
"piece_size",
":",
"pieces",
".",
"append",
"(",
"sha1sum",
".",
"digest",
"(",
")",
")",
"# bogus pylint: disable=E1101",
"if",
"piece_callback",
":",
"piece_callback",
"(",
"filename",
",",
"pieces",
"[",
"-",
"1",
"]",
")",
"# Start a new piece",
"sha1sum",
"=",
"hashlib",
".",
"sha1",
"(",
")",
"done",
"=",
"0",
"# Report progress",
"if",
"progress",
":",
"progress",
"(",
"totalhashed",
",",
"totalsize",
")",
"finally",
":",
"handle",
".",
"close",
"(",
")",
"# Add hash of partial last piece",
"if",
"done",
">",
"0",
":",
"pieces",
".",
"append",
"(",
"sha1sum",
".",
"digest",
"(",
")",
")",
"# bogus pylint: disable=E1103",
"if",
"piece_callback",
":",
"piece_callback",
"(",
"filename",
",",
"pieces",
"[",
"-",
"1",
"]",
")",
"# Build the meta dict",
"metainfo",
"=",
"{",
"\"pieces\"",
":",
"b\"\"",
".",
"join",
"(",
"pieces",
")",
",",
"\"piece length\"",
":",
"piece_size",
",",
"\"name\"",
":",
"os",
".",
"path",
".",
"basename",
"(",
"self",
".",
"datapath",
")",
",",
"}",
"# Handle directory/FIFO vs. single file",
"if",
"self",
".",
"_fifo",
"or",
"os",
".",
"path",
".",
"isdir",
"(",
"self",
".",
"datapath",
")",
":",
"metainfo",
"[",
"\"files\"",
"]",
"=",
"file_list",
"else",
":",
"metainfo",
"[",
"\"length\"",
"]",
"=",
"totalhashed",
"hashing_secs",
"=",
"time",
".",
"time",
"(",
")",
"-",
"hashing_secs",
"self",
".",
"LOG",
".",
"info",
"(",
"\"Hashing of %s took %.1f secs (%s/s)\"",
"%",
"(",
"fmt",
".",
"human_size",
"(",
"totalhashed",
")",
".",
"strip",
"(",
")",
",",
"hashing_secs",
",",
"fmt",
".",
"human_size",
"(",
"totalhashed",
"/",
"hashing_secs",
")",
".",
"strip",
"(",
")",
",",
")",
")",
"# Return validated info dict",
"return",
"check_info",
"(",
"metainfo",
")",
",",
"totalhashed"
] | 36.341463 | 19.682927 |
def add_ipv4(self, id_network_ipv4, id_equipamento, descricao):
"""Allocate an IP on a network to an equipment.
Insert new IP for network and associate to the equipment
:param id_network_ipv4: ID for NetworkIPv4.
:param id_equipamento: ID for Equipment.
:param descricao: Description for IP.
:return: Following dictionary:
::
{'ip': {'id': < id_ip >,
'id_network_ipv4': < id_network_ipv4 >,
'oct1’: < oct1 >,
'oct2': < oct2 >,
'oct3': < oct3 >,
'oct4': < oct4 >,
'descricao': < descricao >}}
:raise InvalidParameterError: Invalid ID for NetworkIPv4 or Equipment.
:raise InvalidParameterError: The value of description is invalid.
:raise EquipamentoNaoExisteError: Equipment not found.
:raise RedeIPv4NaoExisteError: NetworkIPv4 not found.
:raise IPNaoDisponivelError: There is no network address is available to create the VLAN.
:raise ConfigEnvironmentInvalidError: Invalid Environment Configuration or not registered
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
ip_map = dict()
ip_map['id_network_ipv4'] = id_network_ipv4
ip_map['description'] = descricao
ip_map['id_equipment'] = id_equipamento
code, xml = self.submit({'ip': ip_map}, 'POST', 'ipv4/')
return self.response(code, xml) | [
"def",
"add_ipv4",
"(",
"self",
",",
"id_network_ipv4",
",",
"id_equipamento",
",",
"descricao",
")",
":",
"ip_map",
"=",
"dict",
"(",
")",
"ip_map",
"[",
"'id_network_ipv4'",
"]",
"=",
"id_network_ipv4",
"ip_map",
"[",
"'description'",
"]",
"=",
"descricao",
"ip_map",
"[",
"'id_equipment'",
"]",
"=",
"id_equipamento",
"code",
",",
"xml",
"=",
"self",
".",
"submit",
"(",
"{",
"'ip'",
":",
"ip_map",
"}",
",",
"'POST'",
",",
"'ipv4/'",
")",
"return",
"self",
".",
"response",
"(",
"code",
",",
"xml",
")"
] | 39.605263 | 21.973684 |
def product_get(self, ids=None, names=None,
include_fields=None, exclude_fields=None,
ptype=None):
"""
Raw wrapper around Product.get
https://bugzilla.readthedocs.io/en/latest/api/core/v1/product.html#get-product
This does not perform any caching like other product API calls.
If ids, names, or ptype is not specified, we default to
ptype=accessible for historical reasons
@ids: List of product IDs to lookup
@names: List of product names to lookup
@ptype: Either 'accessible', 'selectable', or 'enterable'. If
specified, we return data for all those
@include_fields: Only include these fields in the output
@exclude_fields: Do not include these fields in the output
"""
if ids is None and names is None and ptype is None:
ptype = "accessible"
if ptype:
raw = None
if ptype == "accessible":
raw = self._proxy.Product.get_accessible_products()
elif ptype == "selectable":
raw = self._proxy.Product.get_selectable_products()
elif ptype == "enterable":
raw = self._proxy.Product.get_enterable_products()
if raw is None:
raise RuntimeError("Unknown ptype=%s" % ptype)
ids = raw['ids']
log.debug("For ptype=%s found ids=%s", ptype, ids)
kwargs = {}
if ids:
kwargs["ids"] = self._listify(ids)
if names:
kwargs["names"] = self._listify(names)
if include_fields:
kwargs["include_fields"] = include_fields
if exclude_fields:
kwargs["exclude_fields"] = exclude_fields
ret = self._proxy.Product.get(kwargs)
return ret['products'] | [
"def",
"product_get",
"(",
"self",
",",
"ids",
"=",
"None",
",",
"names",
"=",
"None",
",",
"include_fields",
"=",
"None",
",",
"exclude_fields",
"=",
"None",
",",
"ptype",
"=",
"None",
")",
":",
"if",
"ids",
"is",
"None",
"and",
"names",
"is",
"None",
"and",
"ptype",
"is",
"None",
":",
"ptype",
"=",
"\"accessible\"",
"if",
"ptype",
":",
"raw",
"=",
"None",
"if",
"ptype",
"==",
"\"accessible\"",
":",
"raw",
"=",
"self",
".",
"_proxy",
".",
"Product",
".",
"get_accessible_products",
"(",
")",
"elif",
"ptype",
"==",
"\"selectable\"",
":",
"raw",
"=",
"self",
".",
"_proxy",
".",
"Product",
".",
"get_selectable_products",
"(",
")",
"elif",
"ptype",
"==",
"\"enterable\"",
":",
"raw",
"=",
"self",
".",
"_proxy",
".",
"Product",
".",
"get_enterable_products",
"(",
")",
"if",
"raw",
"is",
"None",
":",
"raise",
"RuntimeError",
"(",
"\"Unknown ptype=%s\"",
"%",
"ptype",
")",
"ids",
"=",
"raw",
"[",
"'ids'",
"]",
"log",
".",
"debug",
"(",
"\"For ptype=%s found ids=%s\"",
",",
"ptype",
",",
"ids",
")",
"kwargs",
"=",
"{",
"}",
"if",
"ids",
":",
"kwargs",
"[",
"\"ids\"",
"]",
"=",
"self",
".",
"_listify",
"(",
"ids",
")",
"if",
"names",
":",
"kwargs",
"[",
"\"names\"",
"]",
"=",
"self",
".",
"_listify",
"(",
"names",
")",
"if",
"include_fields",
":",
"kwargs",
"[",
"\"include_fields\"",
"]",
"=",
"include_fields",
"if",
"exclude_fields",
":",
"kwargs",
"[",
"\"exclude_fields\"",
"]",
"=",
"exclude_fields",
"ret",
"=",
"self",
".",
"_proxy",
".",
"Product",
".",
"get",
"(",
"kwargs",
")",
"return",
"ret",
"[",
"'products'",
"]"
] | 38.425532 | 18.255319 |
def parser_from_buffer(cls, fp):
"""Construct YamlParser from a file pointer."""
yaml = YAML(typ="safe")
return cls(yaml.load(fp)) | [
"def",
"parser_from_buffer",
"(",
"cls",
",",
"fp",
")",
":",
"yaml",
"=",
"YAML",
"(",
"typ",
"=",
"\"safe\"",
")",
"return",
"cls",
"(",
"yaml",
".",
"load",
"(",
"fp",
")",
")"
] | 37.75 | 6 |
def get_param_tuples(url_rule) -> List[Tuple[str, str]]:
"""
Returns a list of parameter tuples in a URL rule, eg::
url_rule = '/users/<string:username>/roles/<int:id>'
param_tuples = get_param_tuples(url_rule)
assert param_tuples == [('string', 'username'), ('int', 'id')]
"""
if not url_rule:
return []
return [(type_[:-1], name) for type_, name
in re.findall(PARAM_NAME_RE, url_rule)] | [
"def",
"get_param_tuples",
"(",
"url_rule",
")",
"->",
"List",
"[",
"Tuple",
"[",
"str",
",",
"str",
"]",
"]",
":",
"if",
"not",
"url_rule",
":",
"return",
"[",
"]",
"return",
"[",
"(",
"type_",
"[",
":",
"-",
"1",
"]",
",",
"name",
")",
"for",
"type_",
",",
"name",
"in",
"re",
".",
"findall",
"(",
"PARAM_NAME_RE",
",",
"url_rule",
")",
"]"
] | 36.75 | 16.083333 |
def traverse(self):
"""Traverse this linked list.
Yields:
PlasmaObjectFuture: PlasmaObjectFuture instances.
"""
current = self.head
while current is not None:
yield current
current = current.next | [
"def",
"traverse",
"(",
"self",
")",
":",
"current",
"=",
"self",
".",
"head",
"while",
"current",
"is",
"not",
"None",
":",
"yield",
"current",
"current",
"=",
"current",
".",
"next"
] | 26.3 | 14.7 |
def name(self) -> Optional[str]:
"""Returns name specified in Content-Disposition header or None
if missed or header is malformed.
"""
_, params = parse_content_disposition(
self.headers.get(CONTENT_DISPOSITION))
return content_disposition_filename(params, 'name') | [
"def",
"name",
"(",
"self",
")",
"->",
"Optional",
"[",
"str",
"]",
":",
"_",
",",
"params",
"=",
"parse_content_disposition",
"(",
"self",
".",
"headers",
".",
"get",
"(",
"CONTENT_DISPOSITION",
")",
")",
"return",
"content_disposition_filename",
"(",
"params",
",",
"'name'",
")"
] | 38.75 | 10.5 |
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.linode.com/'):
'''
Make a web call to the Linode API.
'''
global LASTCALL
vm_ = get_configured_provider()
ratelimit_sleep = config.get_cloud_config_value(
'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,
)
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
if not isinstance(args, dict):
args = {}
if 'api_key' not in args.keys():
args['api_key'] = apikey
if action and 'api_action' not in args.keys():
args['api_action'] = '{0}.{1}'.format(action, command)
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
now = int(time.mktime(datetime.datetime.now().timetuple()))
if LASTCALL >= now:
time.sleep(ratelimit_sleep)
result = __utils__['http.query'](
url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
hide_fields=['api_key', 'rootPass'],
opts=__opts__,
)
if 'ERRORARRAY' in result['dict']:
if result['dict']['ERRORARRAY']:
error_list = []
for error in result['dict']['ERRORARRAY']:
msg = error['ERRORMESSAGE']
if msg == "Authentication failed":
raise SaltCloudSystemExit(
'Linode API Key is expired or invalid'
)
else:
error_list.append(msg)
raise SaltCloudException(
'Linode API reported error(s): {}'.format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug('Linode Response Status Code: %s', result['status'])
return result['dict'] | [
"def",
"_query",
"(",
"action",
"=",
"None",
",",
"command",
"=",
"None",
",",
"args",
"=",
"None",
",",
"method",
"=",
"'GET'",
",",
"header_dict",
"=",
"None",
",",
"data",
"=",
"None",
",",
"url",
"=",
"'https://api.linode.com/'",
")",
":",
"global",
"LASTCALL",
"vm_",
"=",
"get_configured_provider",
"(",
")",
"ratelimit_sleep",
"=",
"config",
".",
"get_cloud_config_value",
"(",
"'ratelimit_sleep'",
",",
"vm_",
",",
"__opts__",
",",
"search_global",
"=",
"False",
",",
"default",
"=",
"0",
",",
")",
"apikey",
"=",
"config",
".",
"get_cloud_config_value",
"(",
"'apikey'",
",",
"vm_",
",",
"__opts__",
",",
"search_global",
"=",
"False",
")",
"if",
"not",
"isinstance",
"(",
"args",
",",
"dict",
")",
":",
"args",
"=",
"{",
"}",
"if",
"'api_key'",
"not",
"in",
"args",
".",
"keys",
"(",
")",
":",
"args",
"[",
"'api_key'",
"]",
"=",
"apikey",
"if",
"action",
"and",
"'api_action'",
"not",
"in",
"args",
".",
"keys",
"(",
")",
":",
"args",
"[",
"'api_action'",
"]",
"=",
"'{0}.{1}'",
".",
"format",
"(",
"action",
",",
"command",
")",
"if",
"header_dict",
"is",
"None",
":",
"header_dict",
"=",
"{",
"}",
"if",
"method",
"!=",
"'POST'",
":",
"header_dict",
"[",
"'Accept'",
"]",
"=",
"'application/json'",
"decode",
"=",
"True",
"if",
"method",
"==",
"'DELETE'",
":",
"decode",
"=",
"False",
"now",
"=",
"int",
"(",
"time",
".",
"mktime",
"(",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
".",
"timetuple",
"(",
")",
")",
")",
"if",
"LASTCALL",
">=",
"now",
":",
"time",
".",
"sleep",
"(",
"ratelimit_sleep",
")",
"result",
"=",
"__utils__",
"[",
"'http.query'",
"]",
"(",
"url",
",",
"method",
",",
"params",
"=",
"args",
",",
"data",
"=",
"data",
",",
"header_dict",
"=",
"header_dict",
",",
"decode",
"=",
"decode",
",",
"decode_type",
"=",
"'json'",
",",
"text",
"=",
"True",
",",
"status",
"=",
"True",
",",
"hide_fields",
"=",
"[",
"'api_key'",
",",
"'rootPass'",
"]",
",",
"opts",
"=",
"__opts__",
",",
")",
"if",
"'ERRORARRAY'",
"in",
"result",
"[",
"'dict'",
"]",
":",
"if",
"result",
"[",
"'dict'",
"]",
"[",
"'ERRORARRAY'",
"]",
":",
"error_list",
"=",
"[",
"]",
"for",
"error",
"in",
"result",
"[",
"'dict'",
"]",
"[",
"'ERRORARRAY'",
"]",
":",
"msg",
"=",
"error",
"[",
"'ERRORMESSAGE'",
"]",
"if",
"msg",
"==",
"\"Authentication failed\"",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'Linode API Key is expired or invalid'",
")",
"else",
":",
"error_list",
".",
"append",
"(",
"msg",
")",
"raise",
"SaltCloudException",
"(",
"'Linode API reported error(s): {}'",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"error_list",
")",
")",
")",
"LASTCALL",
"=",
"int",
"(",
"time",
".",
"mktime",
"(",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
".",
"timetuple",
"(",
")",
")",
")",
"log",
".",
"debug",
"(",
"'Linode Response Status Code: %s'",
",",
"result",
"[",
"'status'",
"]",
")",
"return",
"result",
"[",
"'dict'",
"]"
] | 26.74359 | 20.487179 |
def _wait_for_state_change(self, target_states, update_interval=10):
"""
Blocking wait until target_state reached. update_interval is in seconds.
Warning: state change must begin before calling this method.
"""
while self.state not in target_states:
if self.state == 'error':
raise Exception('server is in error state')
# update server state every 10s
sleep(update_interval)
self.populate() | [
"def",
"_wait_for_state_change",
"(",
"self",
",",
"target_states",
",",
"update_interval",
"=",
"10",
")",
":",
"while",
"self",
".",
"state",
"not",
"in",
"target_states",
":",
"if",
"self",
".",
"state",
"==",
"'error'",
":",
"raise",
"Exception",
"(",
"'server is in error state'",
")",
"# update server state every 10s",
"sleep",
"(",
"update_interval",
")",
"self",
".",
"populate",
"(",
")"
] | 37.230769 | 17.384615 |
def parse_value(parser, event, node): # pylint: disable=unused-argument
""" Parse CIM/XML VALUE element and return the value"""
value = ''
(next_event, next_node) = six.next(parser)
if next_event == pulldom.CHARACTERS:
value = next_node.nodeValue
(next_event, next_node) = six.next(parser)
if not _is_end(next_event, next_node, 'VALUE'):
raise ParseError('Expecting end VALUE')
return value | [
"def",
"parse_value",
"(",
"parser",
",",
"event",
",",
"node",
")",
":",
"# pylint: disable=unused-argument",
"value",
"=",
"''",
"(",
"next_event",
",",
"next_node",
")",
"=",
"six",
".",
"next",
"(",
"parser",
")",
"if",
"next_event",
"==",
"pulldom",
".",
"CHARACTERS",
":",
"value",
"=",
"next_node",
".",
"nodeValue",
"(",
"next_event",
",",
"next_node",
")",
"=",
"six",
".",
"next",
"(",
"parser",
")",
"if",
"not",
"_is_end",
"(",
"next_event",
",",
"next_node",
",",
"'VALUE'",
")",
":",
"raise",
"ParseError",
"(",
"'Expecting end VALUE'",
")",
"return",
"value"
] | 26.8125 | 22.5 |
def expand_paths(inputs):
"""Yield sys.path directories that might contain "old-style" packages"""
seen = {}
for dirname in inputs:
dirname = normalize_path(dirname)
if dirname in seen:
continue
seen[dirname] = 1
if not os.path.isdir(dirname):
continue
files = os.listdir(dirname)
yield dirname, files
for name in files:
if not name.endswith('.pth'):
# We only care about the .pth files
continue
if name in ('easy-install.pth', 'setuptools.pth'):
# Ignore .pth files that we control
continue
# Read the .pth file
f = open(os.path.join(dirname, name))
lines = list(yield_lines(f))
f.close()
# Yield existing non-dupe, non-import directory lines from it
for line in lines:
if not line.startswith("import"):
line = normalize_path(line.rstrip())
if line not in seen:
seen[line] = 1
if not os.path.isdir(line):
continue
yield line, os.listdir(line) | [
"def",
"expand_paths",
"(",
"inputs",
")",
":",
"seen",
"=",
"{",
"}",
"for",
"dirname",
"in",
"inputs",
":",
"dirname",
"=",
"normalize_path",
"(",
"dirname",
")",
"if",
"dirname",
"in",
"seen",
":",
"continue",
"seen",
"[",
"dirname",
"]",
"=",
"1",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"dirname",
")",
":",
"continue",
"files",
"=",
"os",
".",
"listdir",
"(",
"dirname",
")",
"yield",
"dirname",
",",
"files",
"for",
"name",
"in",
"files",
":",
"if",
"not",
"name",
".",
"endswith",
"(",
"'.pth'",
")",
":",
"# We only care about the .pth files",
"continue",
"if",
"name",
"in",
"(",
"'easy-install.pth'",
",",
"'setuptools.pth'",
")",
":",
"# Ignore .pth files that we control",
"continue",
"# Read the .pth file",
"f",
"=",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dirname",
",",
"name",
")",
")",
"lines",
"=",
"list",
"(",
"yield_lines",
"(",
"f",
")",
")",
"f",
".",
"close",
"(",
")",
"# Yield existing non-dupe, non-import directory lines from it",
"for",
"line",
"in",
"lines",
":",
"if",
"not",
"line",
".",
"startswith",
"(",
"\"import\"",
")",
":",
"line",
"=",
"normalize_path",
"(",
"line",
".",
"rstrip",
"(",
")",
")",
"if",
"line",
"not",
"in",
"seen",
":",
"seen",
"[",
"line",
"]",
"=",
"1",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"line",
")",
":",
"continue",
"yield",
"line",
",",
"os",
".",
"listdir",
"(",
"line",
")"
] | 31.282051 | 16.615385 |
def _message_in_range(self, message):
"""
Determine whether the given message is in the range or
it should be ignored (and avoid loading more chunks).
"""
# No entity means message IDs between chats may vary
if self.entity:
if self.reverse:
if message.id <= self.last_id or message.id >= self.max_id:
return False
else:
if message.id >= self.last_id or message.id <= self.min_id:
return False
return True | [
"def",
"_message_in_range",
"(",
"self",
",",
"message",
")",
":",
"# No entity means message IDs between chats may vary",
"if",
"self",
".",
"entity",
":",
"if",
"self",
".",
"reverse",
":",
"if",
"message",
".",
"id",
"<=",
"self",
".",
"last_id",
"or",
"message",
".",
"id",
">=",
"self",
".",
"max_id",
":",
"return",
"False",
"else",
":",
"if",
"message",
".",
"id",
">=",
"self",
".",
"last_id",
"or",
"message",
".",
"id",
"<=",
"self",
".",
"min_id",
":",
"return",
"False",
"return",
"True"
] | 36.2 | 17.666667 |
def query(self,
attributes=None,
filters=None,
only_unique=True,
use_attr_names=False,
dtypes = None
):
"""Queries the dataset to retrieve the contained data.
Args:
attributes (list[str]): Names of attributes to fetch in query.
Attribute names must correspond to valid attributes. See
the attributes property for a list of valid attributes.
filters (dict[str,any]): Dictionary of filters --> values
to filter the dataset by. Filter names and values must
correspond to valid filters and filter values. See the
filters property for a list of valid filters.
only_unique (bool): Whether to return only rows containing
unique values (True) or to include duplicate rows (False).
use_attr_names (bool): Whether to use the attribute names
as column names in the result (True) or the attribute
display names (False).
dtypes (dict[str,any]): Dictionary of attributes --> data types
to describe to pandas how the columns should be handled
Returns:
pandas.DataFrame: DataFrame containing the query results.
"""
# Example query from Ensembl biomart:
#
# <?xml version="1.0" encoding="UTF-8"?>
# <!DOCTYPE Query>
# <Query virtualSchemaName = "default" formatter = "TSV" header = "0"
# uniqueRows = "0" count = "" datasetConfigVersion = "0.6" >
# <Dataset name = "hsapiens_gene_ensembl" interface = "default" >
# <Filter name = "chromosome_name" value = "1,2"/>
# <Filter name = "end" value = "10000000"/>
# <Filter name = "start" value = "1"/>
# <Attribute name = "ensembl_gene_id" />
# <Attribute name = "ensembl_transcript_id" />
# </Dataset>
# </Query>
# Setup query element.
root = ElementTree.Element('Query')
root.set('virtualSchemaName', self._virtual_schema)
root.set('formatter', 'TSV')
root.set('header', '1')
root.set('uniqueRows', native_str(int(only_unique)))
root.set('datasetConfigVersion', '0.6')
# Add dataset element.
dataset = ElementTree.SubElement(root, 'Dataset')
dataset.set('name', self.name)
dataset.set('interface', 'default')
# Default to default attributes if none requested.
if attributes is None:
attributes = list(self.default_attributes.keys())
# Add attribute elements.
for name in attributes:
try:
attr = self.attributes[name]
self._add_attr_node(dataset, attr)
except KeyError:
raise BiomartException(
'Unknown attribute {}, check dataset attributes '
'for a list of valid attributes.'.format(name))
if filters is not None:
# Add filter elements.
for name, value in filters.items():
try:
filter_ = self.filters[name]
self._add_filter_node(dataset, filter_, value)
except KeyError:
raise BiomartException(
'Unknown filter {}, check dataset filters '
'for a list of valid filters.'.format(name))
# Fetch response.
response = self.get(query=ElementTree.tostring(root))
# Raise exception if an error occurred.
if 'Query ERROR' in response.text:
raise BiomartException(response.text)
# Parse results into a DataFrame.
try:
result = pd.read_csv(StringIO(response.text), sep='\t', dtype=dtypes)
# Type error is raised of a data type is not understood by pandas
except TypeError as err:
raise ValueError("Non valid data type is used in dtypes")
if use_attr_names:
# Rename columns with attribute names instead of display names.
column_map = {
self.attributes[attr].display_name: attr
for attr in attributes
}
result.rename(columns=column_map, inplace=True)
return result | [
"def",
"query",
"(",
"self",
",",
"attributes",
"=",
"None",
",",
"filters",
"=",
"None",
",",
"only_unique",
"=",
"True",
",",
"use_attr_names",
"=",
"False",
",",
"dtypes",
"=",
"None",
")",
":",
"# Example query from Ensembl biomart:",
"#",
"# <?xml version=\"1.0\" encoding=\"UTF-8\"?>",
"# <!DOCTYPE Query>",
"# <Query virtualSchemaName = \"default\" formatter = \"TSV\" header = \"0\"",
"# uniqueRows = \"0\" count = \"\" datasetConfigVersion = \"0.6\" >",
"# <Dataset name = \"hsapiens_gene_ensembl\" interface = \"default\" >",
"# <Filter name = \"chromosome_name\" value = \"1,2\"/>",
"# <Filter name = \"end\" value = \"10000000\"/>",
"# <Filter name = \"start\" value = \"1\"/>",
"# <Attribute name = \"ensembl_gene_id\" />",
"# <Attribute name = \"ensembl_transcript_id\" />",
"# </Dataset>",
"# </Query>",
"# Setup query element.",
"root",
"=",
"ElementTree",
".",
"Element",
"(",
"'Query'",
")",
"root",
".",
"set",
"(",
"'virtualSchemaName'",
",",
"self",
".",
"_virtual_schema",
")",
"root",
".",
"set",
"(",
"'formatter'",
",",
"'TSV'",
")",
"root",
".",
"set",
"(",
"'header'",
",",
"'1'",
")",
"root",
".",
"set",
"(",
"'uniqueRows'",
",",
"native_str",
"(",
"int",
"(",
"only_unique",
")",
")",
")",
"root",
".",
"set",
"(",
"'datasetConfigVersion'",
",",
"'0.6'",
")",
"# Add dataset element.",
"dataset",
"=",
"ElementTree",
".",
"SubElement",
"(",
"root",
",",
"'Dataset'",
")",
"dataset",
".",
"set",
"(",
"'name'",
",",
"self",
".",
"name",
")",
"dataset",
".",
"set",
"(",
"'interface'",
",",
"'default'",
")",
"# Default to default attributes if none requested.",
"if",
"attributes",
"is",
"None",
":",
"attributes",
"=",
"list",
"(",
"self",
".",
"default_attributes",
".",
"keys",
"(",
")",
")",
"# Add attribute elements.",
"for",
"name",
"in",
"attributes",
":",
"try",
":",
"attr",
"=",
"self",
".",
"attributes",
"[",
"name",
"]",
"self",
".",
"_add_attr_node",
"(",
"dataset",
",",
"attr",
")",
"except",
"KeyError",
":",
"raise",
"BiomartException",
"(",
"'Unknown attribute {}, check dataset attributes '",
"'for a list of valid attributes.'",
".",
"format",
"(",
"name",
")",
")",
"if",
"filters",
"is",
"not",
"None",
":",
"# Add filter elements.",
"for",
"name",
",",
"value",
"in",
"filters",
".",
"items",
"(",
")",
":",
"try",
":",
"filter_",
"=",
"self",
".",
"filters",
"[",
"name",
"]",
"self",
".",
"_add_filter_node",
"(",
"dataset",
",",
"filter_",
",",
"value",
")",
"except",
"KeyError",
":",
"raise",
"BiomartException",
"(",
"'Unknown filter {}, check dataset filters '",
"'for a list of valid filters.'",
".",
"format",
"(",
"name",
")",
")",
"# Fetch response.",
"response",
"=",
"self",
".",
"get",
"(",
"query",
"=",
"ElementTree",
".",
"tostring",
"(",
"root",
")",
")",
"# Raise exception if an error occurred.",
"if",
"'Query ERROR'",
"in",
"response",
".",
"text",
":",
"raise",
"BiomartException",
"(",
"response",
".",
"text",
")",
"# Parse results into a DataFrame.",
"try",
":",
"result",
"=",
"pd",
".",
"read_csv",
"(",
"StringIO",
"(",
"response",
".",
"text",
")",
",",
"sep",
"=",
"'\\t'",
",",
"dtype",
"=",
"dtypes",
")",
"# Type error is raised of a data type is not understood by pandas",
"except",
"TypeError",
"as",
"err",
":",
"raise",
"ValueError",
"(",
"\"Non valid data type is used in dtypes\"",
")",
"if",
"use_attr_names",
":",
"# Rename columns with attribute names instead of display names.",
"column_map",
"=",
"{",
"self",
".",
"attributes",
"[",
"attr",
"]",
".",
"display_name",
":",
"attr",
"for",
"attr",
"in",
"attributes",
"}",
"result",
".",
"rename",
"(",
"columns",
"=",
"column_map",
",",
"inplace",
"=",
"True",
")",
"return",
"result"
] | 40.415094 | 20.575472 |
def close_all_pages(self):
"""Closes all tabs of the states editor"""
states_to_be_closed = []
for state_identifier in self.tabs:
states_to_be_closed.append(state_identifier)
for state_identifier in states_to_be_closed:
self.close_page(state_identifier, delete=False) | [
"def",
"close_all_pages",
"(",
"self",
")",
":",
"states_to_be_closed",
"=",
"[",
"]",
"for",
"state_identifier",
"in",
"self",
".",
"tabs",
":",
"states_to_be_closed",
".",
"append",
"(",
"state_identifier",
")",
"for",
"state_identifier",
"in",
"states_to_be_closed",
":",
"self",
".",
"close_page",
"(",
"state_identifier",
",",
"delete",
"=",
"False",
")"
] | 45.285714 | 10.142857 |
def get_codon(seq, codon_no, start_offset):
"""
This function takes a sequece and a codon number and returns the codon
found in the sequence at that position
"""
seq = seq.replace("-","")
codon_start_pos = int(codon_no - 1)*3 - start_offset
codon = seq[codon_start_pos:codon_start_pos + 3]
return codon | [
"def",
"get_codon",
"(",
"seq",
",",
"codon_no",
",",
"start_offset",
")",
":",
"seq",
"=",
"seq",
".",
"replace",
"(",
"\"-\"",
",",
"\"\"",
")",
"codon_start_pos",
"=",
"int",
"(",
"codon_no",
"-",
"1",
")",
"*",
"3",
"-",
"start_offset",
"codon",
"=",
"seq",
"[",
"codon_start_pos",
":",
"codon_start_pos",
"+",
"3",
"]",
"return",
"codon"
] | 36.333333 | 11.444444 |
def sdb_get_or_set_hash(uri,
opts,
length=8,
chars='abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)',
utils=None):
'''
Check if value exists in sdb. If it does, return, otherwise generate a
random string and store it. This can be used for storing secrets in a
centralized place.
'''
if not isinstance(uri, string_types) or not uri.startswith('sdb://'):
return False
if utils is None:
utils = salt.loader.utils(opts)
ret = sdb_get(uri, opts, utils=utils)
if ret is None:
val = ''.join([random.SystemRandom().choice(chars) for _ in range(length)])
sdb_set(uri, val, opts, utils)
return ret or val | [
"def",
"sdb_get_or_set_hash",
"(",
"uri",
",",
"opts",
",",
"length",
"=",
"8",
",",
"chars",
"=",
"'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'",
",",
"utils",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"uri",
",",
"string_types",
")",
"or",
"not",
"uri",
".",
"startswith",
"(",
"'sdb://'",
")",
":",
"return",
"False",
"if",
"utils",
"is",
"None",
":",
"utils",
"=",
"salt",
".",
"loader",
".",
"utils",
"(",
"opts",
")",
"ret",
"=",
"sdb_get",
"(",
"uri",
",",
"opts",
",",
"utils",
"=",
"utils",
")",
"if",
"ret",
"is",
"None",
":",
"val",
"=",
"''",
".",
"join",
"(",
"[",
"random",
".",
"SystemRandom",
"(",
")",
".",
"choice",
"(",
"chars",
")",
"for",
"_",
"in",
"range",
"(",
"length",
")",
"]",
")",
"sdb_set",
"(",
"uri",
",",
"val",
",",
"opts",
",",
"utils",
")",
"return",
"ret",
"or",
"val"
] | 32.565217 | 23.869565 |
def ratio_split(amount, ratios):
""" Split in_value according to the ratios specified in `ratios`
This is special in that it ensures the returned values always sum to
in_value (i.e. we avoid losses or gains due to rounding errors). As a
result, this method returns a list of `Decimal` values with length equal
to that of `ratios`.
Examples:
.. code-block:: python
>>> from hordak.utilities.money import ratio_split
>>> from decimal import Decimal
>>> ratio_split(Decimal('10'), [Decimal('1'), Decimal('2')])
[Decimal('3.33'), Decimal('6.67')]
Note the returned values sum to the original input of ``10``. If we were to
do this calculation in a naive fashion then the returned values would likely
be ``3.33`` and ``6.66``, which would sum to ``9.99``, thereby loosing
``0.01``.
Args:
amount (Decimal): The amount to be split
ratios (list[Decimal]): The ratios that will determine the split
Returns: list(Decimal)
"""
ratio_total = sum(ratios)
divided_value = amount / ratio_total
values = []
for ratio in ratios:
value = divided_value * ratio
values.append(value)
# Now round the values, keeping track of the bits we cut off
rounded = [v.quantize(Decimal("0.01")) for v in values]
remainders = [v - rounded[i] for i, v in enumerate(values)]
remainder = sum(remainders)
# Give the last person the (positive or negative) remainder
rounded[-1] = (rounded[-1] + remainder).quantize(Decimal("0.01"))
assert sum(rounded) == amount
return rounded | [
"def",
"ratio_split",
"(",
"amount",
",",
"ratios",
")",
":",
"ratio_total",
"=",
"sum",
"(",
"ratios",
")",
"divided_value",
"=",
"amount",
"/",
"ratio_total",
"values",
"=",
"[",
"]",
"for",
"ratio",
"in",
"ratios",
":",
"value",
"=",
"divided_value",
"*",
"ratio",
"values",
".",
"append",
"(",
"value",
")",
"# Now round the values, keeping track of the bits we cut off",
"rounded",
"=",
"[",
"v",
".",
"quantize",
"(",
"Decimal",
"(",
"\"0.01\"",
")",
")",
"for",
"v",
"in",
"values",
"]",
"remainders",
"=",
"[",
"v",
"-",
"rounded",
"[",
"i",
"]",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"values",
")",
"]",
"remainder",
"=",
"sum",
"(",
"remainders",
")",
"# Give the last person the (positive or negative) remainder",
"rounded",
"[",
"-",
"1",
"]",
"=",
"(",
"rounded",
"[",
"-",
"1",
"]",
"+",
"remainder",
")",
".",
"quantize",
"(",
"Decimal",
"(",
"\"0.01\"",
")",
")",
"assert",
"sum",
"(",
"rounded",
")",
"==",
"amount",
"return",
"rounded"
] | 34.956522 | 24.369565 |
def extract_acl(cls, acl):
""" parse an individual ACL (i.e.: world:anyone:cdrwa) """
try:
scheme, rest = acl.split(":", 1)
credential = ":".join(rest.split(":")[0:-1])
cdrwa = rest.split(":")[-1]
except ValueError:
raise cls.BadACL("Bad ACL: %s. Format is scheme:id:perms" % (acl))
if scheme not in cls.valid_schemes:
raise cls.BadACL("Invalid scheme: %s" % (acl))
create = True if "c" in cdrwa else False
read = True if "r" in cdrwa else False
write = True if "w" in cdrwa else False
delete = True if "d" in cdrwa else False
admin = True if "a" in cdrwa else False
if scheme == "username_password":
try:
username, password = credential.split(":", 1)
except ValueError:
raise cls.BadACL("Bad ACL: %s. Format is scheme:id:perms" % (acl))
return make_digest_acl(username,
password,
read,
write,
create,
delete,
admin)
else:
return make_acl(scheme,
credential,
read,
write,
create,
delete,
admin) | [
"def",
"extract_acl",
"(",
"cls",
",",
"acl",
")",
":",
"try",
":",
"scheme",
",",
"rest",
"=",
"acl",
".",
"split",
"(",
"\":\"",
",",
"1",
")",
"credential",
"=",
"\":\"",
".",
"join",
"(",
"rest",
".",
"split",
"(",
"\":\"",
")",
"[",
"0",
":",
"-",
"1",
"]",
")",
"cdrwa",
"=",
"rest",
".",
"split",
"(",
"\":\"",
")",
"[",
"-",
"1",
"]",
"except",
"ValueError",
":",
"raise",
"cls",
".",
"BadACL",
"(",
"\"Bad ACL: %s. Format is scheme:id:perms\"",
"%",
"(",
"acl",
")",
")",
"if",
"scheme",
"not",
"in",
"cls",
".",
"valid_schemes",
":",
"raise",
"cls",
".",
"BadACL",
"(",
"\"Invalid scheme: %s\"",
"%",
"(",
"acl",
")",
")",
"create",
"=",
"True",
"if",
"\"c\"",
"in",
"cdrwa",
"else",
"False",
"read",
"=",
"True",
"if",
"\"r\"",
"in",
"cdrwa",
"else",
"False",
"write",
"=",
"True",
"if",
"\"w\"",
"in",
"cdrwa",
"else",
"False",
"delete",
"=",
"True",
"if",
"\"d\"",
"in",
"cdrwa",
"else",
"False",
"admin",
"=",
"True",
"if",
"\"a\"",
"in",
"cdrwa",
"else",
"False",
"if",
"scheme",
"==",
"\"username_password\"",
":",
"try",
":",
"username",
",",
"password",
"=",
"credential",
".",
"split",
"(",
"\":\"",
",",
"1",
")",
"except",
"ValueError",
":",
"raise",
"cls",
".",
"BadACL",
"(",
"\"Bad ACL: %s. Format is scheme:id:perms\"",
"%",
"(",
"acl",
")",
")",
"return",
"make_digest_acl",
"(",
"username",
",",
"password",
",",
"read",
",",
"write",
",",
"create",
",",
"delete",
",",
"admin",
")",
"else",
":",
"return",
"make_acl",
"(",
"scheme",
",",
"credential",
",",
"read",
",",
"write",
",",
"create",
",",
"delete",
",",
"admin",
")"
] | 38.578947 | 12.263158 |
def spkw05(handle, body, center, inframe, first, last, segid, gm, n, states,
epochs):
# see libspice args for solution to array[][N] problem
"""
Write an SPK segment of type 5 given a time-ordered set of
discrete states and epochs, and the gravitational parameter
of a central body.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkw05_c.html
:param handle: Handle of an SPK file open for writing.
:type handle: int
:param body: Body code for ephemeris object.
:type body: int
:param center: Body code for the center of motion of the body.
:type center: int
:param inframe: The reference frame of the states.
:type inframe: str
:param first: First valid time for which states can be computed.
:type first: float
:param last: Last valid time for which states can be computed.
:type last: float
:param segid: Segment identifier.
:type segid: str
:param gm: Gravitational parameter of central body.
:type gm: float
:param n: Number of states and epochs.
:type n: int
:param states: States.
:type states: Nx6-Element Array of floats
:param epochs: Epochs.
:type epochs: Array of floats
"""
handle = ctypes.c_int(handle)
body = ctypes.c_int(body)
center = ctypes.c_int(center)
inframe = stypes.stringToCharP(inframe)
first = ctypes.c_double(first)
last = ctypes.c_double(last)
segid = stypes.stringToCharP(segid)
gm = ctypes.c_double(gm)
n = ctypes.c_int(n)
states = stypes.toDoubleMatrix(states)
epochs = stypes.toDoubleVector(epochs)
libspice.spkw05_c(handle, body, center, inframe, first, last, segid, gm, n,
states, epochs) | [
"def",
"spkw05",
"(",
"handle",
",",
"body",
",",
"center",
",",
"inframe",
",",
"first",
",",
"last",
",",
"segid",
",",
"gm",
",",
"n",
",",
"states",
",",
"epochs",
")",
":",
"# see libspice args for solution to array[][N] problem",
"handle",
"=",
"ctypes",
".",
"c_int",
"(",
"handle",
")",
"body",
"=",
"ctypes",
".",
"c_int",
"(",
"body",
")",
"center",
"=",
"ctypes",
".",
"c_int",
"(",
"center",
")",
"inframe",
"=",
"stypes",
".",
"stringToCharP",
"(",
"inframe",
")",
"first",
"=",
"ctypes",
".",
"c_double",
"(",
"first",
")",
"last",
"=",
"ctypes",
".",
"c_double",
"(",
"last",
")",
"segid",
"=",
"stypes",
".",
"stringToCharP",
"(",
"segid",
")",
"gm",
"=",
"ctypes",
".",
"c_double",
"(",
"gm",
")",
"n",
"=",
"ctypes",
".",
"c_int",
"(",
"n",
")",
"states",
"=",
"stypes",
".",
"toDoubleMatrix",
"(",
"states",
")",
"epochs",
"=",
"stypes",
".",
"toDoubleVector",
"(",
"epochs",
")",
"libspice",
".",
"spkw05_c",
"(",
"handle",
",",
"body",
",",
"center",
",",
"inframe",
",",
"first",
",",
"last",
",",
"segid",
",",
"gm",
",",
"n",
",",
"states",
",",
"epochs",
")"
] | 36.652174 | 15.826087 |
def filter_indices(self, options, verbosity, *args, **kwargs):
"""Filter indices and execute an action for each index."""
index_name_map = {
index.__class__.__name__: index
for index in index_builder.indexes
}
# Process includes.
if options['index']:
indices = set(options['index'])
else:
indices = set(index_name_map.keys())
# Process excludes.
for index_name in options['exclude']:
if index_name not in index_name_map:
self.invalid_index(index_name)
return
indices.discard(index_name)
# Execute action for each remaining index.
for index_name in indices:
try:
index = index_name_map[index_name]
except KeyError:
self.invalid_index(index_name)
return
if verbosity > 0:
self.stdout.write("Processing index '{}'...".format(index_name))
self.handle_index(index, *args, **kwargs) | [
"def",
"filter_indices",
"(",
"self",
",",
"options",
",",
"verbosity",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"index_name_map",
"=",
"{",
"index",
".",
"__class__",
".",
"__name__",
":",
"index",
"for",
"index",
"in",
"index_builder",
".",
"indexes",
"}",
"# Process includes.",
"if",
"options",
"[",
"'index'",
"]",
":",
"indices",
"=",
"set",
"(",
"options",
"[",
"'index'",
"]",
")",
"else",
":",
"indices",
"=",
"set",
"(",
"index_name_map",
".",
"keys",
"(",
")",
")",
"# Process excludes.",
"for",
"index_name",
"in",
"options",
"[",
"'exclude'",
"]",
":",
"if",
"index_name",
"not",
"in",
"index_name_map",
":",
"self",
".",
"invalid_index",
"(",
"index_name",
")",
"return",
"indices",
".",
"discard",
"(",
"index_name",
")",
"# Execute action for each remaining index.",
"for",
"index_name",
"in",
"indices",
":",
"try",
":",
"index",
"=",
"index_name_map",
"[",
"index_name",
"]",
"except",
"KeyError",
":",
"self",
".",
"invalid_index",
"(",
"index_name",
")",
"return",
"if",
"verbosity",
">",
"0",
":",
"self",
".",
"stdout",
".",
"write",
"(",
"\"Processing index '{}'...\"",
".",
"format",
"(",
"index_name",
")",
")",
"self",
".",
"handle_index",
"(",
"index",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | 32.6875 | 16.875 |
def set_mutation_type(self, mut_type=''):
"""Sets the mutation type attribute to a single label based on
attribute flags.
Kwargs:
mut_type (str): value to set self.mut_type
"""
if mut_type:
# user specifies a mutation type
self.mutation_type = mut_type
else:
# mutation type is taken from object attributes
if not self.is_valid:
# does not correctly fall into a category
self.mutation_type = 'not valid'
elif self.unknown_effect:
self.mutation_type = 'unknown effect'
elif self.is_no_protein:
self.mutation_type = 'no protein'
elif self.is_missing_info:
# mutation has a ?
self.mutation_type = 'missing'
else:
# valid mutation type to be counted
if self.is_lost_stop:
self.mutation_type = 'Nonstop_Mutation'
elif self.is_lost_start:
self.mutation_type = 'Translation_Start_Site'
elif self.is_synonymous:
# synonymous must go before missense since mutations
# can be categorized as synonymous and missense. Although
# in reality such cases are actually synonymous and not
# missense mutations.
self.mutation_type = 'Silent'
elif self.is_missense:
self.mutation_type = 'Missense_Mutation'
elif self.is_indel:
self.mutation_type = 'In_Frame_Indel'
elif self.is_nonsense_mutation:
self.mutation_type = 'Nonsense_Mutation'
elif self.is_frame_shift:
self.mutation_type = 'Frame_Shift_Indel' | [
"def",
"set_mutation_type",
"(",
"self",
",",
"mut_type",
"=",
"''",
")",
":",
"if",
"mut_type",
":",
"# user specifies a mutation type",
"self",
".",
"mutation_type",
"=",
"mut_type",
"else",
":",
"# mutation type is taken from object attributes",
"if",
"not",
"self",
".",
"is_valid",
":",
"# does not correctly fall into a category",
"self",
".",
"mutation_type",
"=",
"'not valid'",
"elif",
"self",
".",
"unknown_effect",
":",
"self",
".",
"mutation_type",
"=",
"'unknown effect'",
"elif",
"self",
".",
"is_no_protein",
":",
"self",
".",
"mutation_type",
"=",
"'no protein'",
"elif",
"self",
".",
"is_missing_info",
":",
"# mutation has a ?",
"self",
".",
"mutation_type",
"=",
"'missing'",
"else",
":",
"# valid mutation type to be counted",
"if",
"self",
".",
"is_lost_stop",
":",
"self",
".",
"mutation_type",
"=",
"'Nonstop_Mutation'",
"elif",
"self",
".",
"is_lost_start",
":",
"self",
".",
"mutation_type",
"=",
"'Translation_Start_Site'",
"elif",
"self",
".",
"is_synonymous",
":",
"# synonymous must go before missense since mutations",
"# can be categorized as synonymous and missense. Although",
"# in reality such cases are actually synonymous and not",
"# missense mutations.",
"self",
".",
"mutation_type",
"=",
"'Silent'",
"elif",
"self",
".",
"is_missense",
":",
"self",
".",
"mutation_type",
"=",
"'Missense_Mutation'",
"elif",
"self",
".",
"is_indel",
":",
"self",
".",
"mutation_type",
"=",
"'In_Frame_Indel'",
"elif",
"self",
".",
"is_nonsense_mutation",
":",
"self",
".",
"mutation_type",
"=",
"'Nonsense_Mutation'",
"elif",
"self",
".",
"is_frame_shift",
":",
"self",
".",
"mutation_type",
"=",
"'Frame_Shift_Indel'"
] | 43.904762 | 12.595238 |
def transfer_options(cls, obj, new_obj, backend=None):
"""
Transfers options for all backends from one object to another.
Drops any options defined in the supplied drop list.
"""
backend = cls.current_backend if backend is None else backend
type_name = type(new_obj).__name__
group = type_name if obj.group == type(obj).__name__ else obj.group
spec = '.'.join([s for s in (type_name, group, obj.label) if s])
options = []
for group in Options._option_groups:
opts = cls.lookup_options(backend, obj, group)
if opts and opts.kwargs: options.append(Options(group, **opts.kwargs))
if options:
StoreOptions.set_options(new_obj, {spec: options}, backend) | [
"def",
"transfer_options",
"(",
"cls",
",",
"obj",
",",
"new_obj",
",",
"backend",
"=",
"None",
")",
":",
"backend",
"=",
"cls",
".",
"current_backend",
"if",
"backend",
"is",
"None",
"else",
"backend",
"type_name",
"=",
"type",
"(",
"new_obj",
")",
".",
"__name__",
"group",
"=",
"type_name",
"if",
"obj",
".",
"group",
"==",
"type",
"(",
"obj",
")",
".",
"__name__",
"else",
"obj",
".",
"group",
"spec",
"=",
"'.'",
".",
"join",
"(",
"[",
"s",
"for",
"s",
"in",
"(",
"type_name",
",",
"group",
",",
"obj",
".",
"label",
")",
"if",
"s",
"]",
")",
"options",
"=",
"[",
"]",
"for",
"group",
"in",
"Options",
".",
"_option_groups",
":",
"opts",
"=",
"cls",
".",
"lookup_options",
"(",
"backend",
",",
"obj",
",",
"group",
")",
"if",
"opts",
"and",
"opts",
".",
"kwargs",
":",
"options",
".",
"append",
"(",
"Options",
"(",
"group",
",",
"*",
"*",
"opts",
".",
"kwargs",
")",
")",
"if",
"options",
":",
"StoreOptions",
".",
"set_options",
"(",
"new_obj",
",",
"{",
"spec",
":",
"options",
"}",
",",
"backend",
")"
] | 50.533333 | 19.866667 |
def capture_delete_records(records):
"""Writes all of our delete events to DynamoDB."""
for rec in records:
model = create_delete_model(rec)
if model:
try:
model.delete(condition=(CurrentSecurityGroupModel.eventTime <= rec['detail']['eventTime']))
except DeleteError:
LOG.warning(f'[X] Unable to delete security group. Security group does not exist. Record: {rec}')
else:
LOG.warning(f'[?] Unable to delete security group. Security group does not exist. Record: {rec}') | [
"def",
"capture_delete_records",
"(",
"records",
")",
":",
"for",
"rec",
"in",
"records",
":",
"model",
"=",
"create_delete_model",
"(",
"rec",
")",
"if",
"model",
":",
"try",
":",
"model",
".",
"delete",
"(",
"condition",
"=",
"(",
"CurrentSecurityGroupModel",
".",
"eventTime",
"<=",
"rec",
"[",
"'detail'",
"]",
"[",
"'eventTime'",
"]",
")",
")",
"except",
"DeleteError",
":",
"LOG",
".",
"warning",
"(",
"f'[X] Unable to delete security group. Security group does not exist. Record: {rec}'",
")",
"else",
":",
"LOG",
".",
"warning",
"(",
"f'[?] Unable to delete security group. Security group does not exist. Record: {rec}'",
")"
] | 50.818182 | 28.454545 |
def aliasstr(self):
'''Concatenate the aliases tuple into a string.'''
return ', '.join(repr(self.ns + x) for x in self.aliases) | [
"def",
"aliasstr",
"(",
"self",
")",
":",
"return",
"', '",
".",
"join",
"(",
"repr",
"(",
"self",
".",
"ns",
"+",
"x",
")",
"for",
"x",
"in",
"self",
".",
"aliases",
")"
] | 43.333333 | 17.333333 |
def guess_type(self, path):
"""Guess the type of a file.
Argument is a PATH (a filename).
Return value is a string of the form type/subtype,
usable for a MIME Content-type header.
The default implementation looks the file's extension
up in the table self.extensions_map, using application/octet-stream
as a default; however it would be permissible (if
slow) to look inside the data to make a better guess.
"""
base, ext = posixpath.splitext(path)
if ext in self.extensions_map:
return self.extensions_map[ext]
ext = ext.lower()
if ext in self.extensions_map:
return self.extensions_map[ext]
else:
return self.extensions_map[''] | [
"def",
"guess_type",
"(",
"self",
",",
"path",
")",
":",
"base",
",",
"ext",
"=",
"posixpath",
".",
"splitext",
"(",
"path",
")",
"if",
"ext",
"in",
"self",
".",
"extensions_map",
":",
"return",
"self",
".",
"extensions_map",
"[",
"ext",
"]",
"ext",
"=",
"ext",
".",
"lower",
"(",
")",
"if",
"ext",
"in",
"self",
".",
"extensions_map",
":",
"return",
"self",
".",
"extensions_map",
"[",
"ext",
"]",
"else",
":",
"return",
"self",
".",
"extensions_map",
"[",
"''",
"]"
] | 32.956522 | 16.913043 |
def convert_activation(builder, layer, input_names, output_names, keras_layer):
"""
Convert an activation layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
# Get input and output names
input_name, output_name = (input_names[0], output_names[0])
non_linearity = _get_activation_name_from_keras_layer(keras_layer)
# Add a non-linearity layer
if non_linearity == 'SOFTMAX':
builder.add_softmax(name = layer, input_name = input_name,
output_name = output_name)
return
if non_linearity == 'RELU6':
# No direct support of RELU with max-activation value - use negate and
# clip layers
relu_output_name = output_name + '_relu'
builder.add_activation(layer, 'RELU', input_name, relu_output_name)
# negate it
neg_output_name = relu_output_name + '_neg'
builder.add_activation(layer+'__neg__', 'LINEAR', relu_output_name,
neg_output_name,[-1.0, 0])
# apply threshold
clip_output_name = relu_output_name + '_clip'
builder.add_unary(layer+'__clip__', neg_output_name, clip_output_name,
'threshold', alpha = -6.0)
# negate it back
builder.add_activation(layer+'_neg2', 'LINEAR', clip_output_name,
output_name,[-1.0, 0])
return
if non_linearity == 'SELU':
elu_output_name = output_name + '_elu'
builder.add_activation(layer+'__elu__', 'ELU', input_name, elu_output_name,
params=1.6732)
builder.add_elementwise(layer,
input_names=elu_output_name,
output_name=output_name,
mode='MULTIPLY',
alpha=1.0507)
return
params = None
if non_linearity == 'UNIT_ELU':
params = 1.0
non_linearity = 'ELU'
elif non_linearity == 'LEAKYRELU':
params = [keras_layer.alpha]
elif non_linearity == 'PRELU':
shared_axes = list(keras_layer.shared_axes)
if not (shared_axes == [1,2,3] or shared_axes == [1,2]):
_utils.raise_error_unsupported_scenario(
"Shared axis not being [1,2,3] or [1,2]",
'parametric_relu', layer)
params = _keras.backend.eval(keras_layer.weights[0])
elif non_linearity == 'ELU':
params = keras_layer.alpha
elif non_linearity == 'THRESHOLDEDRELU':
params = keras_layer.theta
else:
pass # do nothing to parameters
builder.add_activation(name = layer,
non_linearity = non_linearity,
input_name = input_name, output_name = output_name,
params = params) | [
"def",
"convert_activation",
"(",
"builder",
",",
"layer",
",",
"input_names",
",",
"output_names",
",",
"keras_layer",
")",
":",
"# Get input and output names",
"input_name",
",",
"output_name",
"=",
"(",
"input_names",
"[",
"0",
"]",
",",
"output_names",
"[",
"0",
"]",
")",
"non_linearity",
"=",
"_get_activation_name_from_keras_layer",
"(",
"keras_layer",
")",
"# Add a non-linearity layer",
"if",
"non_linearity",
"==",
"'SOFTMAX'",
":",
"builder",
".",
"add_softmax",
"(",
"name",
"=",
"layer",
",",
"input_name",
"=",
"input_name",
",",
"output_name",
"=",
"output_name",
")",
"return",
"if",
"non_linearity",
"==",
"'RELU6'",
":",
"# No direct support of RELU with max-activation value - use negate and",
"# clip layers",
"relu_output_name",
"=",
"output_name",
"+",
"'_relu'",
"builder",
".",
"add_activation",
"(",
"layer",
",",
"'RELU'",
",",
"input_name",
",",
"relu_output_name",
")",
"# negate it",
"neg_output_name",
"=",
"relu_output_name",
"+",
"'_neg'",
"builder",
".",
"add_activation",
"(",
"layer",
"+",
"'__neg__'",
",",
"'LINEAR'",
",",
"relu_output_name",
",",
"neg_output_name",
",",
"[",
"-",
"1.0",
",",
"0",
"]",
")",
"# apply threshold",
"clip_output_name",
"=",
"relu_output_name",
"+",
"'_clip'",
"builder",
".",
"add_unary",
"(",
"layer",
"+",
"'__clip__'",
",",
"neg_output_name",
",",
"clip_output_name",
",",
"'threshold'",
",",
"alpha",
"=",
"-",
"6.0",
")",
"# negate it back",
"builder",
".",
"add_activation",
"(",
"layer",
"+",
"'_neg2'",
",",
"'LINEAR'",
",",
"clip_output_name",
",",
"output_name",
",",
"[",
"-",
"1.0",
",",
"0",
"]",
")",
"return",
"if",
"non_linearity",
"==",
"'SELU'",
":",
"elu_output_name",
"=",
"output_name",
"+",
"'_elu'",
"builder",
".",
"add_activation",
"(",
"layer",
"+",
"'__elu__'",
",",
"'ELU'",
",",
"input_name",
",",
"elu_output_name",
",",
"params",
"=",
"1.6732",
")",
"builder",
".",
"add_elementwise",
"(",
"layer",
",",
"input_names",
"=",
"elu_output_name",
",",
"output_name",
"=",
"output_name",
",",
"mode",
"=",
"'MULTIPLY'",
",",
"alpha",
"=",
"1.0507",
")",
"return",
"params",
"=",
"None",
"if",
"non_linearity",
"==",
"'UNIT_ELU'",
":",
"params",
"=",
"1.0",
"non_linearity",
"=",
"'ELU'",
"elif",
"non_linearity",
"==",
"'LEAKYRELU'",
":",
"params",
"=",
"[",
"keras_layer",
".",
"alpha",
"]",
"elif",
"non_linearity",
"==",
"'PRELU'",
":",
"shared_axes",
"=",
"list",
"(",
"keras_layer",
".",
"shared_axes",
")",
"if",
"not",
"(",
"shared_axes",
"==",
"[",
"1",
",",
"2",
",",
"3",
"]",
"or",
"shared_axes",
"==",
"[",
"1",
",",
"2",
"]",
")",
":",
"_utils",
".",
"raise_error_unsupported_scenario",
"(",
"\"Shared axis not being [1,2,3] or [1,2]\"",
",",
"'parametric_relu'",
",",
"layer",
")",
"params",
"=",
"_keras",
".",
"backend",
".",
"eval",
"(",
"keras_layer",
".",
"weights",
"[",
"0",
"]",
")",
"elif",
"non_linearity",
"==",
"'ELU'",
":",
"params",
"=",
"keras_layer",
".",
"alpha",
"elif",
"non_linearity",
"==",
"'THRESHOLDEDRELU'",
":",
"params",
"=",
"keras_layer",
".",
"theta",
"else",
":",
"pass",
"# do nothing to parameters",
"builder",
".",
"add_activation",
"(",
"name",
"=",
"layer",
",",
"non_linearity",
"=",
"non_linearity",
",",
"input_name",
"=",
"input_name",
",",
"output_name",
"=",
"output_name",
",",
"params",
"=",
"params",
")"
] | 37.635135 | 16.283784 |
def addToPrepares(self, prepare: Prepare, sender: str):
"""
Add the specified PREPARE to this replica's list of received
PREPAREs and try sending COMMIT
:param prepare: the PREPARE to add to the list
"""
# BLS multi-sig:
self._bls_bft_replica.process_prepare(prepare, sender)
self.prepares.addVote(prepare, sender)
self.dequeue_commits(prepare.viewNo, prepare.ppSeqNo)
self.tryCommit(prepare) | [
"def",
"addToPrepares",
"(",
"self",
",",
"prepare",
":",
"Prepare",
",",
"sender",
":",
"str",
")",
":",
"# BLS multi-sig:",
"self",
".",
"_bls_bft_replica",
".",
"process_prepare",
"(",
"prepare",
",",
"sender",
")",
"self",
".",
"prepares",
".",
"addVote",
"(",
"prepare",
",",
"sender",
")",
"self",
".",
"dequeue_commits",
"(",
"prepare",
".",
"viewNo",
",",
"prepare",
".",
"ppSeqNo",
")",
"self",
".",
"tryCommit",
"(",
"prepare",
")"
] | 35.538462 | 16.307692 |
def distribution_absent(name, region=None, key=None, keyid=None, profile=None, **kwargs):
'''
Ensure a distribution with the given Name tag does not exist.
Note that CloudFront does not allow directly deleting an enabled
Distribution. If such is requested, Salt will attempt to first update the
distribution's status to Disabled, and once that returns success, to then
delete the resource. THIS CAN TAKE SOME TIME, so be patient :)
name (string)
Name of the state definition.
Name (string)
Name of the CloudFront distribution to be managed. If not provided, the
value of ``name`` will be used as a default. The purpose of this
parameter is only to resolve it to a Resource ID, so be aware that an
explicit value for ``Id`` below will override any value provided, or
defaulted, here.
Id (string)
The Resource ID of a CloudFront distribution to be managed.
region (string)
Region to connect to
key (string)
Secret key to use
keyid (string)
Access key to use
profile (dict or string)
Dict, or pillar key pointing to a dict, containing AWS region/key/keyid.
Example:
.. code-block:: yaml
Ensure a distribution named my_distribution is gone:
boto_cloudfront.distribution_absent:
- Name: my_distribution
'''
Name = kwargs['Name'] if 'Name' in kwargs else name
Id = kwargs.get('Id')
ref = kwargs['Id'] if 'Id' in kwargs else Name
ret = {'name': Id if Id else Name, 'comment': '', 'changes': {}, 'result': True}
authargs = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile}
if not Id:
res = __salt__['boto_cloudfront.get_distributions_by_comment'](Comment=Name, **authargs)
if res is None:
msg = 'Error dereferencing CloudFront distribution `{}` to a Resource ID.'.format(Name)
log.error(msg)
ret['comment'] = msg
ret['result'] = False
return ret
if len(res) > 1:
msg = ('Multiple CloudFront distibutions matched `{}`, no way to know which to'
' delete.`.'.format(Name))
log.error(msg)
ret['comment'] = msg
ret['result'] = False
return ret
if not res:
msg = 'CloudFront Distribution `{}` already absent.'.format(Name)
log.info(msg)
ret['comment'] = msg
ret['result'] = True
return ret
Id = res[0]['Id']
if not __salt__['boto_cloudfront.distribution_exists'](Id=Id, **authargs):
msg = 'CloudFront distribution `{}` already absent.'.format(ref)
log.info(msg)
ret['comment'] = msg
return ret
old = __salt__['boto_cloudfront.get_distribution_v2'](Id=Id, **authargs)
if old is None:
ret['result'] = False
msg = 'Error getting state of CloudFront distribution `{}`.'.format(ref)
log.error(msg)
ret['comment'] = msg
return ret
currETag = old['ETag']
Enabled = old['DistributionConfig']['Enabled']
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'CloudFront distribution `{}` would be {}deleted.'.format(ref,
('disabled and ' if Enabled else ''))
ret['pchanges'] = {'old': old, 'new': None}
return ret
comments = []
if Enabled:
disabled = __salt__['boto_cloudfront.disable_distribution'](Id=Id, **authargs)
if disabled is None:
ret['result'] = False
msg = 'Error disabling CloudFront distribution `{}`'.format(ref)
log.error(msg)
ret['comment'] = msg
return ret
comments += ['CloudFront distribution `{}` disabled.'.format(ref)]
currETag = disabled['ETag']
deleted = __salt__['boto_cloudfront.delete_distribution'](Id=Id, IfMatch=currETag, **authargs)
if deleted is False:
ret['result'] = False
msg = 'Error deleting CloudFront distribution `{}`'.format(ref)
comments += [msg]
log.error(msg)
ret['comment'] = ' '.join(comments)
return ret
msg = 'CloudFront distribution `{}` deleted.'.format(ref)
comments += [msg]
log.info(msg)
ret['comment'] = ' '.join(comments)
ret['changes'] = {'old': old, 'new': None}
return ret | [
"def",
"distribution_absent",
"(",
"name",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"Name",
"=",
"kwargs",
"[",
"'Name'",
"]",
"if",
"'Name'",
"in",
"kwargs",
"else",
"name",
"Id",
"=",
"kwargs",
".",
"get",
"(",
"'Id'",
")",
"ref",
"=",
"kwargs",
"[",
"'Id'",
"]",
"if",
"'Id'",
"in",
"kwargs",
"else",
"Name",
"ret",
"=",
"{",
"'name'",
":",
"Id",
"if",
"Id",
"else",
"Name",
",",
"'comment'",
":",
"''",
",",
"'changes'",
":",
"{",
"}",
",",
"'result'",
":",
"True",
"}",
"authargs",
"=",
"{",
"'region'",
":",
"region",
",",
"'key'",
":",
"key",
",",
"'keyid'",
":",
"keyid",
",",
"'profile'",
":",
"profile",
"}",
"if",
"not",
"Id",
":",
"res",
"=",
"__salt__",
"[",
"'boto_cloudfront.get_distributions_by_comment'",
"]",
"(",
"Comment",
"=",
"Name",
",",
"*",
"*",
"authargs",
")",
"if",
"res",
"is",
"None",
":",
"msg",
"=",
"'Error dereferencing CloudFront distribution `{}` to a Resource ID.'",
".",
"format",
"(",
"Name",
")",
"log",
".",
"error",
"(",
"msg",
")",
"ret",
"[",
"'comment'",
"]",
"=",
"msg",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"return",
"ret",
"if",
"len",
"(",
"res",
")",
">",
"1",
":",
"msg",
"=",
"(",
"'Multiple CloudFront distibutions matched `{}`, no way to know which to'",
"' delete.`.'",
".",
"format",
"(",
"Name",
")",
")",
"log",
".",
"error",
"(",
"msg",
")",
"ret",
"[",
"'comment'",
"]",
"=",
"msg",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"return",
"ret",
"if",
"not",
"res",
":",
"msg",
"=",
"'CloudFront Distribution `{}` already absent.'",
".",
"format",
"(",
"Name",
")",
"log",
".",
"info",
"(",
"msg",
")",
"ret",
"[",
"'comment'",
"]",
"=",
"msg",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"return",
"ret",
"Id",
"=",
"res",
"[",
"0",
"]",
"[",
"'Id'",
"]",
"if",
"not",
"__salt__",
"[",
"'boto_cloudfront.distribution_exists'",
"]",
"(",
"Id",
"=",
"Id",
",",
"*",
"*",
"authargs",
")",
":",
"msg",
"=",
"'CloudFront distribution `{}` already absent.'",
".",
"format",
"(",
"ref",
")",
"log",
".",
"info",
"(",
"msg",
")",
"ret",
"[",
"'comment'",
"]",
"=",
"msg",
"return",
"ret",
"old",
"=",
"__salt__",
"[",
"'boto_cloudfront.get_distribution_v2'",
"]",
"(",
"Id",
"=",
"Id",
",",
"*",
"*",
"authargs",
")",
"if",
"old",
"is",
"None",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"msg",
"=",
"'Error getting state of CloudFront distribution `{}`.'",
".",
"format",
"(",
"ref",
")",
"log",
".",
"error",
"(",
"msg",
")",
"ret",
"[",
"'comment'",
"]",
"=",
"msg",
"return",
"ret",
"currETag",
"=",
"old",
"[",
"'ETag'",
"]",
"Enabled",
"=",
"old",
"[",
"'DistributionConfig'",
"]",
"[",
"'Enabled'",
"]",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"ret",
"[",
"'comment'",
"]",
"=",
"'CloudFront distribution `{}` would be {}deleted.'",
".",
"format",
"(",
"ref",
",",
"(",
"'disabled and '",
"if",
"Enabled",
"else",
"''",
")",
")",
"ret",
"[",
"'pchanges'",
"]",
"=",
"{",
"'old'",
":",
"old",
",",
"'new'",
":",
"None",
"}",
"return",
"ret",
"comments",
"=",
"[",
"]",
"if",
"Enabled",
":",
"disabled",
"=",
"__salt__",
"[",
"'boto_cloudfront.disable_distribution'",
"]",
"(",
"Id",
"=",
"Id",
",",
"*",
"*",
"authargs",
")",
"if",
"disabled",
"is",
"None",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"msg",
"=",
"'Error disabling CloudFront distribution `{}`'",
".",
"format",
"(",
"ref",
")",
"log",
".",
"error",
"(",
"msg",
")",
"ret",
"[",
"'comment'",
"]",
"=",
"msg",
"return",
"ret",
"comments",
"+=",
"[",
"'CloudFront distribution `{}` disabled.'",
".",
"format",
"(",
"ref",
")",
"]",
"currETag",
"=",
"disabled",
"[",
"'ETag'",
"]",
"deleted",
"=",
"__salt__",
"[",
"'boto_cloudfront.delete_distribution'",
"]",
"(",
"Id",
"=",
"Id",
",",
"IfMatch",
"=",
"currETag",
",",
"*",
"*",
"authargs",
")",
"if",
"deleted",
"is",
"False",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"msg",
"=",
"'Error deleting CloudFront distribution `{}`'",
".",
"format",
"(",
"ref",
")",
"comments",
"+=",
"[",
"msg",
"]",
"log",
".",
"error",
"(",
"msg",
")",
"ret",
"[",
"'comment'",
"]",
"=",
"' '",
".",
"join",
"(",
"comments",
")",
"return",
"ret",
"msg",
"=",
"'CloudFront distribution `{}` deleted.'",
".",
"format",
"(",
"ref",
")",
"comments",
"+=",
"[",
"msg",
"]",
"log",
".",
"info",
"(",
"msg",
")",
"ret",
"[",
"'comment'",
"]",
"=",
"' '",
".",
"join",
"(",
"comments",
")",
"ret",
"[",
"'changes'",
"]",
"=",
"{",
"'old'",
":",
"old",
",",
"'new'",
":",
"None",
"}",
"return",
"ret"
] | 36.10084 | 24.403361 |
def negate_gate(wordlen, input='x', output='~x'):
"""Implements two's complement negation."""
neg = bitwise_negate(wordlen, input, "tmp")
inc = inc_gate(wordlen, "tmp", output)
return neg >> inc | [
"def",
"negate_gate",
"(",
"wordlen",
",",
"input",
"=",
"'x'",
",",
"output",
"=",
"'~x'",
")",
":",
"neg",
"=",
"bitwise_negate",
"(",
"wordlen",
",",
"input",
",",
"\"tmp\"",
")",
"inc",
"=",
"inc_gate",
"(",
"wordlen",
",",
"\"tmp\"",
",",
"output",
")",
"return",
"neg",
">>",
"inc"
] | 41.2 | 7.4 |
def split_by_line(content):
"""Split the given content into a list of items by newline.
Both \r\n and \n are supported. This is done since it seems
that TTY devices on POSIX systems use \r\n for newlines in
some instances.
If the given content is an empty string or a string of only
whitespace, an empty list will be returned. If the given
content does not contain any newlines, it will be returned
as the only element in a single item list.
Leading and trailing whitespace is remove from all elements
returned.
:param str content: Content to split by newlines
:return: List of items that were separated by newlines.
:rtype: list
"""
# Make sure we don't end up splitting a string with
# just a single trailing \n or \r\n into multiple parts.
stripped = content.strip()
if not stripped:
return []
if '\r\n' in stripped:
return _strip_all(stripped.split('\r\n'))
if '\n' in stripped:
return _strip_all(stripped.split('\n'))
return _strip_all([stripped]) | [
"def",
"split_by_line",
"(",
"content",
")",
":",
"# Make sure we don't end up splitting a string with",
"# just a single trailing \\n or \\r\\n into multiple parts.",
"stripped",
"=",
"content",
".",
"strip",
"(",
")",
"if",
"not",
"stripped",
":",
"return",
"[",
"]",
"if",
"'\\r\\n'",
"in",
"stripped",
":",
"return",
"_strip_all",
"(",
"stripped",
".",
"split",
"(",
"'\\r\\n'",
")",
")",
"if",
"'\\n'",
"in",
"stripped",
":",
"return",
"_strip_all",
"(",
"stripped",
".",
"split",
"(",
"'\\n'",
")",
")",
"return",
"_strip_all",
"(",
"[",
"stripped",
"]",
")"
] | 34.533333 | 19.866667 |
def blank_stim(self,type=None,fill=0):
'''Makes a blank version of stim. If a type is not given, returned as same type as current stim.
If a column stim, will fill in blanks with ``fill``'''
blank = copy.copy(self)
blank.name = 'Blank'
if type==None:
type = self.type()
if type=="column":
num_reps = self.reps
if num_reps==None:
if self.type()=="column":
self.read_file()
num_reps = len(self.column)
else:
nl.notify('Error: requested to return a blank column, but I can\'t figure out how many reps to make it!',level=nl.level.error)
blank.column = [fill]*num_reps
return blank
if type=="times":
blank.times = []
return blank | [
"def",
"blank_stim",
"(",
"self",
",",
"type",
"=",
"None",
",",
"fill",
"=",
"0",
")",
":",
"blank",
"=",
"copy",
".",
"copy",
"(",
"self",
")",
"blank",
".",
"name",
"=",
"'Blank'",
"if",
"type",
"==",
"None",
":",
"type",
"=",
"self",
".",
"type",
"(",
")",
"if",
"type",
"==",
"\"column\"",
":",
"num_reps",
"=",
"self",
".",
"reps",
"if",
"num_reps",
"==",
"None",
":",
"if",
"self",
".",
"type",
"(",
")",
"==",
"\"column\"",
":",
"self",
".",
"read_file",
"(",
")",
"num_reps",
"=",
"len",
"(",
"self",
".",
"column",
")",
"else",
":",
"nl",
".",
"notify",
"(",
"'Error: requested to return a blank column, but I can\\'t figure out how many reps to make it!'",
",",
"level",
"=",
"nl",
".",
"level",
".",
"error",
")",
"blank",
".",
"column",
"=",
"[",
"fill",
"]",
"*",
"num_reps",
"return",
"blank",
"if",
"type",
"==",
"\"times\"",
":",
"blank",
".",
"times",
"=",
"[",
"]",
"return",
"blank"
] | 41.85 | 18.35 |
def get_all_elements(
node: astroid.node_classes.NodeNG
) -> Iterable[astroid.node_classes.NodeNG]:
"""Recursively returns all atoms in nested lists and tuples."""
if isinstance(node, (astroid.Tuple, astroid.List)):
for child in node.elts:
for e in get_all_elements(child):
yield e
else:
yield node | [
"def",
"get_all_elements",
"(",
"node",
":",
"astroid",
".",
"node_classes",
".",
"NodeNG",
")",
"->",
"Iterable",
"[",
"astroid",
".",
"node_classes",
".",
"NodeNG",
"]",
":",
"if",
"isinstance",
"(",
"node",
",",
"(",
"astroid",
".",
"Tuple",
",",
"astroid",
".",
"List",
")",
")",
":",
"for",
"child",
"in",
"node",
".",
"elts",
":",
"for",
"e",
"in",
"get_all_elements",
"(",
"child",
")",
":",
"yield",
"e",
"else",
":",
"yield",
"node"
] | 34.9 | 12.4 |
def migrateUp(self):
"""
Recreate the hooks in the site store to trigger this SubScheduler.
"""
te = self.store.findFirst(TimedEvent, sort=TimedEvent.time.descending)
if te is not None:
self._transientSchedule(te.time, None) | [
"def",
"migrateUp",
"(",
"self",
")",
":",
"te",
"=",
"self",
".",
"store",
".",
"findFirst",
"(",
"TimedEvent",
",",
"sort",
"=",
"TimedEvent",
".",
"time",
".",
"descending",
")",
"if",
"te",
"is",
"not",
"None",
":",
"self",
".",
"_transientSchedule",
"(",
"te",
".",
"time",
",",
"None",
")"
] | 38.571429 | 16.571429 |
def fit_from_cfg(cls, df, cfgname, debug=False, outcfgname=None):
"""
Parameters
----------
df : DataFrame
The dataframe which contains the columns to use for the estimation.
cfgname : string
The name of the yaml config file which describes the hedonic model.
debug : boolean, optional (default False)
Whether to generate debug information on the model.
outcfgname : string, optional (default cfgname)
The name of the output yaml config file where estimation results are written into.
Returns
-------
RegressionModel which was used to fit
"""
logger.debug('start: fit from configuration {}'.format(cfgname))
hm = cls.from_yaml(str_or_buffer=cfgname)
ret = hm.fit(df, debug=debug)
print(ret.summary())
outcfgname = outcfgname or cfgname
hm.to_yaml(str_or_buffer=outcfgname)
logger.debug('finish: fit into configuration {}'.format(outcfgname))
return hm | [
"def",
"fit_from_cfg",
"(",
"cls",
",",
"df",
",",
"cfgname",
",",
"debug",
"=",
"False",
",",
"outcfgname",
"=",
"None",
")",
":",
"logger",
".",
"debug",
"(",
"'start: fit from configuration {}'",
".",
"format",
"(",
"cfgname",
")",
")",
"hm",
"=",
"cls",
".",
"from_yaml",
"(",
"str_or_buffer",
"=",
"cfgname",
")",
"ret",
"=",
"hm",
".",
"fit",
"(",
"df",
",",
"debug",
"=",
"debug",
")",
"print",
"(",
"ret",
".",
"summary",
"(",
")",
")",
"outcfgname",
"=",
"outcfgname",
"or",
"cfgname",
"hm",
".",
"to_yaml",
"(",
"str_or_buffer",
"=",
"outcfgname",
")",
"logger",
".",
"debug",
"(",
"'finish: fit into configuration {}'",
".",
"format",
"(",
"outcfgname",
")",
")",
"return",
"hm"
] | 41.12 | 19.92 |
def __parse(self, function_meta):
""" init func as lazy functon instance
Args:
function_meta (dict): function meta including name, args and kwargs
"""
self._func = get_mapping_function(
function_meta["func_name"],
self.functions_mapping
)
self.func_name = self._func.__name__
self._args = prepare_lazy_data(
function_meta.get("args", []),
self.functions_mapping,
self.check_variables_set
)
self._kwargs = prepare_lazy_data(
function_meta.get("kwargs", {}),
self.functions_mapping,
self.check_variables_set
)
if self.func_name == "load_csv_file":
if len(self._args) != 1 or self._kwargs:
raise exceptions.ParamsError("P() should only pass in one argument!")
self._args = [self._args[0]]
elif self.func_name == "get_os_environ":
if len(self._args) != 1 or self._kwargs:
raise exceptions.ParamsError("ENV() should only pass in one argument!")
self._args = [self._args[0]] | [
"def",
"__parse",
"(",
"self",
",",
"function_meta",
")",
":",
"self",
".",
"_func",
"=",
"get_mapping_function",
"(",
"function_meta",
"[",
"\"func_name\"",
"]",
",",
"self",
".",
"functions_mapping",
")",
"self",
".",
"func_name",
"=",
"self",
".",
"_func",
".",
"__name__",
"self",
".",
"_args",
"=",
"prepare_lazy_data",
"(",
"function_meta",
".",
"get",
"(",
"\"args\"",
",",
"[",
"]",
")",
",",
"self",
".",
"functions_mapping",
",",
"self",
".",
"check_variables_set",
")",
"self",
".",
"_kwargs",
"=",
"prepare_lazy_data",
"(",
"function_meta",
".",
"get",
"(",
"\"kwargs\"",
",",
"{",
"}",
")",
",",
"self",
".",
"functions_mapping",
",",
"self",
".",
"check_variables_set",
")",
"if",
"self",
".",
"func_name",
"==",
"\"load_csv_file\"",
":",
"if",
"len",
"(",
"self",
".",
"_args",
")",
"!=",
"1",
"or",
"self",
".",
"_kwargs",
":",
"raise",
"exceptions",
".",
"ParamsError",
"(",
"\"P() should only pass in one argument!\"",
")",
"self",
".",
"_args",
"=",
"[",
"self",
".",
"_args",
"[",
"0",
"]",
"]",
"elif",
"self",
".",
"func_name",
"==",
"\"get_os_environ\"",
":",
"if",
"len",
"(",
"self",
".",
"_args",
")",
"!=",
"1",
"or",
"self",
".",
"_kwargs",
":",
"raise",
"exceptions",
".",
"ParamsError",
"(",
"\"ENV() should only pass in one argument!\"",
")",
"self",
".",
"_args",
"=",
"[",
"self",
".",
"_args",
"[",
"0",
"]",
"]"
] | 37.5 | 13.8 |
def id(self):
"""Return a unique id for the detected board, if any."""
# There are some times we want to trick the platform detection
# say if a raspberry pi doesn't have the right ID, or for testing
try:
return os.environ['BLINKA_FORCEBOARD']
except KeyError: # no forced board, continue with testing!
pass
chip_id = self.detector.chip.id
board_id = None
if chip_id == ap_chip.BCM2XXX:
board_id = self._pi_id()
elif chip_id == ap_chip.AM33XX:
board_id = self._beaglebone_id()
elif chip_id == ap_chip.GENERIC_X86:
board_id = GENERIC_LINUX_PC
elif chip_id == ap_chip.SUN8I:
board_id = self._armbian_id()
elif chip_id == ap_chip.SAMA5:
board_id = self._sama5_id()
elif chip_id == ap_chip.ESP8266:
board_id = FEATHER_HUZZAH
elif chip_id == ap_chip.SAMD21:
board_id = FEATHER_M0_EXPRESS
elif chip_id == ap_chip.STM32:
board_id = PYBOARD
elif chip_id == ap_chip.S805:
board_id = ODROID_C1
elif chip_id == ap_chip.S905:
board_id = ODROID_C2
elif chip_id == ap_chip.FT232H:
board_id = FTDI_FT232H
elif chip_id in (ap_chip.T210, ap_chip.T186, ap_chip.T194):
board_id = self._tegra_id()
return board_id | [
"def",
"id",
"(",
"self",
")",
":",
"# There are some times we want to trick the platform detection",
"# say if a raspberry pi doesn't have the right ID, or for testing",
"try",
":",
"return",
"os",
".",
"environ",
"[",
"'BLINKA_FORCEBOARD'",
"]",
"except",
"KeyError",
":",
"# no forced board, continue with testing!",
"pass",
"chip_id",
"=",
"self",
".",
"detector",
".",
"chip",
".",
"id",
"board_id",
"=",
"None",
"if",
"chip_id",
"==",
"ap_chip",
".",
"BCM2XXX",
":",
"board_id",
"=",
"self",
".",
"_pi_id",
"(",
")",
"elif",
"chip_id",
"==",
"ap_chip",
".",
"AM33XX",
":",
"board_id",
"=",
"self",
".",
"_beaglebone_id",
"(",
")",
"elif",
"chip_id",
"==",
"ap_chip",
".",
"GENERIC_X86",
":",
"board_id",
"=",
"GENERIC_LINUX_PC",
"elif",
"chip_id",
"==",
"ap_chip",
".",
"SUN8I",
":",
"board_id",
"=",
"self",
".",
"_armbian_id",
"(",
")",
"elif",
"chip_id",
"==",
"ap_chip",
".",
"SAMA5",
":",
"board_id",
"=",
"self",
".",
"_sama5_id",
"(",
")",
"elif",
"chip_id",
"==",
"ap_chip",
".",
"ESP8266",
":",
"board_id",
"=",
"FEATHER_HUZZAH",
"elif",
"chip_id",
"==",
"ap_chip",
".",
"SAMD21",
":",
"board_id",
"=",
"FEATHER_M0_EXPRESS",
"elif",
"chip_id",
"==",
"ap_chip",
".",
"STM32",
":",
"board_id",
"=",
"PYBOARD",
"elif",
"chip_id",
"==",
"ap_chip",
".",
"S805",
":",
"board_id",
"=",
"ODROID_C1",
"elif",
"chip_id",
"==",
"ap_chip",
".",
"S905",
":",
"board_id",
"=",
"ODROID_C2",
"elif",
"chip_id",
"==",
"ap_chip",
".",
"FT232H",
":",
"board_id",
"=",
"FTDI_FT232H",
"elif",
"chip_id",
"in",
"(",
"ap_chip",
".",
"T210",
",",
"ap_chip",
".",
"T186",
",",
"ap_chip",
".",
"T194",
")",
":",
"board_id",
"=",
"self",
".",
"_tegra_id",
"(",
")",
"return",
"board_id"
] | 37.486486 | 10.513514 |
def __read_device(self):
"""Read the state of the gamepad."""
state = XinputState()
res = self.manager.xinput.XInputGetState(
self.__device_number, ctypes.byref(state))
if res == XINPUT_ERROR_SUCCESS:
return state
if res != XINPUT_ERROR_DEVICE_NOT_CONNECTED:
raise RuntimeError(
"Unknown error %d attempting to get state of device %d" % (
res, self.__device_number))
# else (device is not connected)
return None | [
"def",
"__read_device",
"(",
"self",
")",
":",
"state",
"=",
"XinputState",
"(",
")",
"res",
"=",
"self",
".",
"manager",
".",
"xinput",
".",
"XInputGetState",
"(",
"self",
".",
"__device_number",
",",
"ctypes",
".",
"byref",
"(",
"state",
")",
")",
"if",
"res",
"==",
"XINPUT_ERROR_SUCCESS",
":",
"return",
"state",
"if",
"res",
"!=",
"XINPUT_ERROR_DEVICE_NOT_CONNECTED",
":",
"raise",
"RuntimeError",
"(",
"\"Unknown error %d attempting to get state of device %d\"",
"%",
"(",
"res",
",",
"self",
".",
"__device_number",
")",
")",
"# else (device is not connected)",
"return",
"None"
] | 40.538462 | 11.615385 |
def install_payment_instruction(self, instruction,
token_type="Unrestricted",
transaction_id=None):
"""
InstallPaymentInstruction
instruction: The PaymentInstruction to send, for example:
MyRole=='Caller' orSay 'Roles do not match';
token_type: Defaults to "Unrestricted"
transaction_id: Defaults to a new ID
"""
if(transaction_id == None):
transaction_id = uuid.uuid4()
params = {}
params['PaymentInstruction'] = instruction
params['TokenType'] = token_type
params['CallerReference'] = transaction_id
response = self.make_request("InstallPaymentInstruction", params)
return response | [
"def",
"install_payment_instruction",
"(",
"self",
",",
"instruction",
",",
"token_type",
"=",
"\"Unrestricted\"",
",",
"transaction_id",
"=",
"None",
")",
":",
"if",
"(",
"transaction_id",
"==",
"None",
")",
":",
"transaction_id",
"=",
"uuid",
".",
"uuid4",
"(",
")",
"params",
"=",
"{",
"}",
"params",
"[",
"'PaymentInstruction'",
"]",
"=",
"instruction",
"params",
"[",
"'TokenType'",
"]",
"=",
"token_type",
"params",
"[",
"'CallerReference'",
"]",
"=",
"transaction_id",
"response",
"=",
"self",
".",
"make_request",
"(",
"\"InstallPaymentInstruction\"",
",",
"params",
")",
"return",
"response"
] | 37.285714 | 14.714286 |
def get_anchor_format(a):
"""
Extract the resource file-type format from the anchor.
"""
# (. or format=) then (file_extension) then (? or $)
# e.g. "...format=txt" or "...download.mp4?..."
fmt = re.search(r"(?:\.|format=)(\w+)(?:\?.*)?$", a)
return fmt.group(1) if fmt else None | [
"def",
"get_anchor_format",
"(",
"a",
")",
":",
"# (. or format=) then (file_extension) then (? or $)",
"# e.g. \"...format=txt\" or \"...download.mp4?...\"",
"fmt",
"=",
"re",
".",
"search",
"(",
"r\"(?:\\.|format=)(\\w+)(?:\\?.*)?$\"",
",",
"a",
")",
"return",
"fmt",
".",
"group",
"(",
"1",
")",
"if",
"fmt",
"else",
"None"
] | 33.333333 | 12.888889 |
def propagateClkRst(obj):
"""
Propagate "clk" clock and reset "rst" signal to all subcomponents
"""
clk = obj.clk
rst = obj.rst
for u in obj._units:
_tryConnect(clk, u, 'clk')
_tryConnect(~rst, u, 'rst_n')
_tryConnect(rst, u, 'rst') | [
"def",
"propagateClkRst",
"(",
"obj",
")",
":",
"clk",
"=",
"obj",
".",
"clk",
"rst",
"=",
"obj",
".",
"rst",
"for",
"u",
"in",
"obj",
".",
"_units",
":",
"_tryConnect",
"(",
"clk",
",",
"u",
",",
"'clk'",
")",
"_tryConnect",
"(",
"~",
"rst",
",",
"u",
",",
"'rst_n'",
")",
"_tryConnect",
"(",
"rst",
",",
"u",
",",
"'rst'",
")"
] | 24.636364 | 14.636364 |
def thread_tracker_exception(self, raised_exception):
""" Method is called whenever an exception is raised during registering a event
:param raised_exception: raised exception
:return: None
"""
print('Thread tracker execution was stopped by the exception. Exception: %s' % str(raised_exception))
print('Traceback:')
print(traceback.format_exc()) | [
"def",
"thread_tracker_exception",
"(",
"self",
",",
"raised_exception",
")",
":",
"print",
"(",
"'Thread tracker execution was stopped by the exception. Exception: %s'",
"%",
"str",
"(",
"raised_exception",
")",
")",
"print",
"(",
"'Traceback:'",
")",
"print",
"(",
"traceback",
".",
"format_exc",
"(",
")",
")"
] | 35.2 | 21.2 |
def clear_lock(self, key):
"""
Remove the lock file.
"""
lock_path = self._get_lock_path(key)
os.remove(lock_path) | [
"def",
"clear_lock",
"(",
"self",
",",
"key",
")",
":",
"lock_path",
"=",
"self",
".",
"_get_lock_path",
"(",
"key",
")",
"os",
".",
"remove",
"(",
"lock_path",
")"
] | 24.833333 | 6.833333 |
def getAngle(self, mode='deg'):
""" return bend angle
:param mode: 'deg' or 'rad'
:return: deflecting angle in RAD
"""
if self.refresh is True:
self.getMatrix()
try:
if self.mflag:
if mode == 'deg':
return self.bangle / np.pi * 180
else: # rad
return self.bangle
else:
return 0
except AttributeError:
print("Please execute getMatrix() first.") | [
"def",
"getAngle",
"(",
"self",
",",
"mode",
"=",
"'deg'",
")",
":",
"if",
"self",
".",
"refresh",
"is",
"True",
":",
"self",
".",
"getMatrix",
"(",
")",
"try",
":",
"if",
"self",
".",
"mflag",
":",
"if",
"mode",
"==",
"'deg'",
":",
"return",
"self",
".",
"bangle",
"/",
"np",
".",
"pi",
"*",
"180",
"else",
":",
"# rad",
"return",
"self",
".",
"bangle",
"else",
":",
"return",
"0",
"except",
"AttributeError",
":",
"print",
"(",
"\"Please execute getMatrix() first.\"",
")"
] | 27.368421 | 13.263158 |
def time_slice(self,timerange,surf=False):
'''
slice object given a time range
:parameter timerange: rime range to be used.
'''
if isinstance(timerange[0],str): trange = cnes_convert(timerange)[0]
else: trange=timerange
return self.slice('date',trange,surf=surf) | [
"def",
"time_slice",
"(",
"self",
",",
"timerange",
",",
"surf",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"timerange",
"[",
"0",
"]",
",",
"str",
")",
":",
"trange",
"=",
"cnes_convert",
"(",
"timerange",
")",
"[",
"0",
"]",
"else",
":",
"trange",
"=",
"timerange",
"return",
"self",
".",
"slice",
"(",
"'date'",
",",
"trange",
",",
"surf",
"=",
"surf",
")"
] | 36.333333 | 17.666667 |
def _raw_split(itxt):
"""
Parse HTML from text into array filled with tags end text.
Source code is little bit unintutive, because it is state machine parser.
For better understanding, look at http://bit.ly/1rXRcJj
Example::
>>> dhtmlparser._raw_split('<html><tag params="true"></html>')
['<html>', '<tag params="true">', '</html>']
Args:
itxt (str): Input HTML text, which will be parsed.
Returns:
list: List of strings (input splitted to tags and text).
"""
echr = ""
buff = ["", "", "", ""]
content = ""
array = []
next_state = 0
inside_tag = False
escaped = False
COMMENT_START = ["-", "!", "<"]
COMMENT_END = ["-", "-"]
gc.disable()
for c in itxt:
# content
if next_state == StateEnum.content:
if c == "<":
if content:
array.append(content)
content = c
next_state = StateEnum.tag
inside_tag = False
else:
content += c
# html tag
elif next_state == StateEnum.tag:
if c == ">":
array.append(content + c)
content = ""
next_state = StateEnum.content
elif c == "'" or c == '"':
echr = c
content += c
next_state = StateEnum.parameter
elif c == "-" and buff[:3] == COMMENT_START:
if content[:-3]:
array.append(content[:-3])
content = content[-3:] + c
next_state = StateEnum.comment
else:
if c == "<": # jump back into tag instead of content
array.append(content)
inside_tag = True
content = ""
content += c
# quotes "" / ''
elif next_state == StateEnum.parameter:
if c == echr and not escaped: # end of quotes
next_state = StateEnum.tag
# unescaped end of line - this is good for invalid HTML like
# <a href=something">..., because it allows recovery
if c == "\n" and not escaped and buff[0] == ">":
next_state = StateEnum.content
inside_tag = False
content += c
escaped = not escaped if c == "\\" else False
# html comments
elif next_state == StateEnum.comment:
if c == ">" and buff[:2] == COMMENT_END:
next_state = StateEnum.tag if inside_tag else StateEnum.content
inside_tag = False
array.append(content + c)
content = ""
else:
content += c
# rotate buffer
buff = _rotate_buff(buff)
buff[0] = c
gc.enable()
if content:
array.append(content)
return array | [
"def",
"_raw_split",
"(",
"itxt",
")",
":",
"echr",
"=",
"\"\"",
"buff",
"=",
"[",
"\"\"",
",",
"\"\"",
",",
"\"\"",
",",
"\"\"",
"]",
"content",
"=",
"\"\"",
"array",
"=",
"[",
"]",
"next_state",
"=",
"0",
"inside_tag",
"=",
"False",
"escaped",
"=",
"False",
"COMMENT_START",
"=",
"[",
"\"-\"",
",",
"\"!\"",
",",
"\"<\"",
"]",
"COMMENT_END",
"=",
"[",
"\"-\"",
",",
"\"-\"",
"]",
"gc",
".",
"disable",
"(",
")",
"for",
"c",
"in",
"itxt",
":",
"# content",
"if",
"next_state",
"==",
"StateEnum",
".",
"content",
":",
"if",
"c",
"==",
"\"<\"",
":",
"if",
"content",
":",
"array",
".",
"append",
"(",
"content",
")",
"content",
"=",
"c",
"next_state",
"=",
"StateEnum",
".",
"tag",
"inside_tag",
"=",
"False",
"else",
":",
"content",
"+=",
"c",
"# html tag",
"elif",
"next_state",
"==",
"StateEnum",
".",
"tag",
":",
"if",
"c",
"==",
"\">\"",
":",
"array",
".",
"append",
"(",
"content",
"+",
"c",
")",
"content",
"=",
"\"\"",
"next_state",
"=",
"StateEnum",
".",
"content",
"elif",
"c",
"==",
"\"'\"",
"or",
"c",
"==",
"'\"'",
":",
"echr",
"=",
"c",
"content",
"+=",
"c",
"next_state",
"=",
"StateEnum",
".",
"parameter",
"elif",
"c",
"==",
"\"-\"",
"and",
"buff",
"[",
":",
"3",
"]",
"==",
"COMMENT_START",
":",
"if",
"content",
"[",
":",
"-",
"3",
"]",
":",
"array",
".",
"append",
"(",
"content",
"[",
":",
"-",
"3",
"]",
")",
"content",
"=",
"content",
"[",
"-",
"3",
":",
"]",
"+",
"c",
"next_state",
"=",
"StateEnum",
".",
"comment",
"else",
":",
"if",
"c",
"==",
"\"<\"",
":",
"# jump back into tag instead of content",
"array",
".",
"append",
"(",
"content",
")",
"inside_tag",
"=",
"True",
"content",
"=",
"\"\"",
"content",
"+=",
"c",
"# quotes \"\" / ''",
"elif",
"next_state",
"==",
"StateEnum",
".",
"parameter",
":",
"if",
"c",
"==",
"echr",
"and",
"not",
"escaped",
":",
"# end of quotes",
"next_state",
"=",
"StateEnum",
".",
"tag",
"# unescaped end of line - this is good for invalid HTML like",
"# <a href=something\">..., because it allows recovery",
"if",
"c",
"==",
"\"\\n\"",
"and",
"not",
"escaped",
"and",
"buff",
"[",
"0",
"]",
"==",
"\">\"",
":",
"next_state",
"=",
"StateEnum",
".",
"content",
"inside_tag",
"=",
"False",
"content",
"+=",
"c",
"escaped",
"=",
"not",
"escaped",
"if",
"c",
"==",
"\"\\\\\"",
"else",
"False",
"# html comments",
"elif",
"next_state",
"==",
"StateEnum",
".",
"comment",
":",
"if",
"c",
"==",
"\">\"",
"and",
"buff",
"[",
":",
"2",
"]",
"==",
"COMMENT_END",
":",
"next_state",
"=",
"StateEnum",
".",
"tag",
"if",
"inside_tag",
"else",
"StateEnum",
".",
"content",
"inside_tag",
"=",
"False",
"array",
".",
"append",
"(",
"content",
"+",
"c",
")",
"content",
"=",
"\"\"",
"else",
":",
"content",
"+=",
"c",
"# rotate buffer",
"buff",
"=",
"_rotate_buff",
"(",
"buff",
")",
"buff",
"[",
"0",
"]",
"=",
"c",
"gc",
".",
"enable",
"(",
")",
"if",
"content",
":",
"array",
".",
"append",
"(",
"content",
")",
"return",
"array"
] | 26.342593 | 20.972222 |
def ip_rtm_config_route_static_route_oif_static_route_dest(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ip = ET.SubElement(config, "ip", xmlns="urn:brocade.com:mgmt:brocade-common-def")
rtm_config = ET.SubElement(ip, "rtm-config", xmlns="urn:brocade.com:mgmt:brocade-rtm")
route = ET.SubElement(rtm_config, "route")
static_route_oif = ET.SubElement(route, "static-route-oif")
static_route_oif_type_key = ET.SubElement(static_route_oif, "static-route-oif-type")
static_route_oif_type_key.text = kwargs.pop('static_route_oif_type')
static_route_oif_name_key = ET.SubElement(static_route_oif, "static-route-oif-name")
static_route_oif_name_key.text = kwargs.pop('static_route_oif_name')
static_route_dest = ET.SubElement(static_route_oif, "static-route-dest")
static_route_dest.text = kwargs.pop('static_route_dest')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"ip_rtm_config_route_static_route_oif_static_route_dest",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"ip",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"ip\"",
",",
"xmlns",
"=",
"\"urn:brocade.com:mgmt:brocade-common-def\"",
")",
"rtm_config",
"=",
"ET",
".",
"SubElement",
"(",
"ip",
",",
"\"rtm-config\"",
",",
"xmlns",
"=",
"\"urn:brocade.com:mgmt:brocade-rtm\"",
")",
"route",
"=",
"ET",
".",
"SubElement",
"(",
"rtm_config",
",",
"\"route\"",
")",
"static_route_oif",
"=",
"ET",
".",
"SubElement",
"(",
"route",
",",
"\"static-route-oif\"",
")",
"static_route_oif_type_key",
"=",
"ET",
".",
"SubElement",
"(",
"static_route_oif",
",",
"\"static-route-oif-type\"",
")",
"static_route_oif_type_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'static_route_oif_type'",
")",
"static_route_oif_name_key",
"=",
"ET",
".",
"SubElement",
"(",
"static_route_oif",
",",
"\"static-route-oif-name\"",
")",
"static_route_oif_name_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'static_route_oif_name'",
")",
"static_route_dest",
"=",
"ET",
".",
"SubElement",
"(",
"static_route_oif",
",",
"\"static-route-dest\"",
")",
"static_route_dest",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'static_route_dest'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | 60.058824 | 28.470588 |
def parse_args(args, kwargs):
"""Returns a kwargs dictionary by turning args into kwargs"""
if 'style' in kwargs:
args += (kwargs['style'],)
del kwargs['style']
for arg in args:
if not isinstance(arg, (bytes, unicode)):
raise ValueError("args must be strings:" + repr(args))
if arg.lower() in FG_COLORS:
if 'fg' in kwargs: raise ValueError("fg specified twice")
kwargs['fg'] = FG_COLORS[arg]
elif arg.lower().startswith('on_') and arg[3:].lower() in BG_COLORS:
if 'bg' in kwargs: raise ValueError("fg specified twice")
kwargs['bg'] = BG_COLORS[arg[3:]]
elif arg.lower() in STYLES:
kwargs[arg] = True
else:
raise ValueError("couldn't process arg: "+repr(arg))
for k in kwargs:
if k not in ['fg', 'bg'] + list(STYLES.keys()):
raise ValueError("Can't apply that transformation")
if 'fg' in kwargs:
if kwargs['fg'] in FG_COLORS:
kwargs['fg'] = FG_COLORS[kwargs['fg']]
if kwargs['fg'] not in list(FG_COLORS.values()):
raise ValueError("Bad fg value: %r" % kwargs['fg'])
if 'bg' in kwargs:
if kwargs['bg'] in BG_COLORS:
kwargs['bg'] = BG_COLORS[kwargs['bg']]
if kwargs['bg'] not in list(BG_COLORS.values()):
raise ValueError("Bad bg value: %r" % kwargs['bg'])
return kwargs | [
"def",
"parse_args",
"(",
"args",
",",
"kwargs",
")",
":",
"if",
"'style'",
"in",
"kwargs",
":",
"args",
"+=",
"(",
"kwargs",
"[",
"'style'",
"]",
",",
")",
"del",
"kwargs",
"[",
"'style'",
"]",
"for",
"arg",
"in",
"args",
":",
"if",
"not",
"isinstance",
"(",
"arg",
",",
"(",
"bytes",
",",
"unicode",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"args must be strings:\"",
"+",
"repr",
"(",
"args",
")",
")",
"if",
"arg",
".",
"lower",
"(",
")",
"in",
"FG_COLORS",
":",
"if",
"'fg'",
"in",
"kwargs",
":",
"raise",
"ValueError",
"(",
"\"fg specified twice\"",
")",
"kwargs",
"[",
"'fg'",
"]",
"=",
"FG_COLORS",
"[",
"arg",
"]",
"elif",
"arg",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"'on_'",
")",
"and",
"arg",
"[",
"3",
":",
"]",
".",
"lower",
"(",
")",
"in",
"BG_COLORS",
":",
"if",
"'bg'",
"in",
"kwargs",
":",
"raise",
"ValueError",
"(",
"\"fg specified twice\"",
")",
"kwargs",
"[",
"'bg'",
"]",
"=",
"BG_COLORS",
"[",
"arg",
"[",
"3",
":",
"]",
"]",
"elif",
"arg",
".",
"lower",
"(",
")",
"in",
"STYLES",
":",
"kwargs",
"[",
"arg",
"]",
"=",
"True",
"else",
":",
"raise",
"ValueError",
"(",
"\"couldn't process arg: \"",
"+",
"repr",
"(",
"arg",
")",
")",
"for",
"k",
"in",
"kwargs",
":",
"if",
"k",
"not",
"in",
"[",
"'fg'",
",",
"'bg'",
"]",
"+",
"list",
"(",
"STYLES",
".",
"keys",
"(",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Can't apply that transformation\"",
")",
"if",
"'fg'",
"in",
"kwargs",
":",
"if",
"kwargs",
"[",
"'fg'",
"]",
"in",
"FG_COLORS",
":",
"kwargs",
"[",
"'fg'",
"]",
"=",
"FG_COLORS",
"[",
"kwargs",
"[",
"'fg'",
"]",
"]",
"if",
"kwargs",
"[",
"'fg'",
"]",
"not",
"in",
"list",
"(",
"FG_COLORS",
".",
"values",
"(",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Bad fg value: %r\"",
"%",
"kwargs",
"[",
"'fg'",
"]",
")",
"if",
"'bg'",
"in",
"kwargs",
":",
"if",
"kwargs",
"[",
"'bg'",
"]",
"in",
"BG_COLORS",
":",
"kwargs",
"[",
"'bg'",
"]",
"=",
"BG_COLORS",
"[",
"kwargs",
"[",
"'bg'",
"]",
"]",
"if",
"kwargs",
"[",
"'bg'",
"]",
"not",
"in",
"list",
"(",
"BG_COLORS",
".",
"values",
"(",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Bad bg value: %r\"",
"%",
"kwargs",
"[",
"'bg'",
"]",
")",
"return",
"kwargs"
] | 43.875 | 15.34375 |
def get_stp_mst_detail_output_has_more(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_mst_detail = ET.Element("get_stp_mst_detail")
config = get_stp_mst_detail
output = ET.SubElement(get_stp_mst_detail, "output")
has_more = ET.SubElement(output, "has-more")
has_more.text = kwargs.pop('has_more')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"get_stp_mst_detail_output_has_more",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"get_stp_mst_detail",
"=",
"ET",
".",
"Element",
"(",
"\"get_stp_mst_detail\"",
")",
"config",
"=",
"get_stp_mst_detail",
"output",
"=",
"ET",
".",
"SubElement",
"(",
"get_stp_mst_detail",
",",
"\"output\"",
")",
"has_more",
"=",
"ET",
".",
"SubElement",
"(",
"output",
",",
"\"has-more\"",
")",
"has_more",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'has_more'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | 39.583333 | 12.333333 |
def get_called_sequence(self, section, fastq=False):
""" Return either the called sequence data, if present.
:param section: ['template', 'complement' or '2D']
:param fastq: If True, return a single, multiline fastq string. If
False, return a tuple of (name, sequence, qstring).
:return: Either the fastq string or the (name, sequence, qstring) tuple.
"""
event_group = '{}/BaseCalled_{}'.format(self.group_name, section)
data = self.handle.get_analysis_dataset(event_group, 'Fastq')
if data is None:
raise KeyError("No fastq data in: {} {}".format(event_group, self.filename))
if fastq:
return data
name, sequence, _, qstring = data.strip().split('\n')
name = name[1:]
return name, sequence, qstring | [
"def",
"get_called_sequence",
"(",
"self",
",",
"section",
",",
"fastq",
"=",
"False",
")",
":",
"event_group",
"=",
"'{}/BaseCalled_{}'",
".",
"format",
"(",
"self",
".",
"group_name",
",",
"section",
")",
"data",
"=",
"self",
".",
"handle",
".",
"get_analysis_dataset",
"(",
"event_group",
",",
"'Fastq'",
")",
"if",
"data",
"is",
"None",
":",
"raise",
"KeyError",
"(",
"\"No fastq data in: {} {}\"",
".",
"format",
"(",
"event_group",
",",
"self",
".",
"filename",
")",
")",
"if",
"fastq",
":",
"return",
"data",
"name",
",",
"sequence",
",",
"_",
",",
"qstring",
"=",
"data",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'\\n'",
")",
"name",
"=",
"name",
"[",
"1",
":",
"]",
"return",
"name",
",",
"sequence",
",",
"qstring"
] | 45.833333 | 22.5 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.