text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def load_user_catalog():
"""Return a catalog for the platform-specific user Intake directory"""
cat_dir = user_data_dir()
if not os.path.isdir(cat_dir):
return Catalog()
else:
return YAMLFilesCatalog(cat_dir) | [
"def",
"load_user_catalog",
"(",
")",
":",
"cat_dir",
"=",
"user_data_dir",
"(",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"cat_dir",
")",
":",
"return",
"Catalog",
"(",
")",
"else",
":",
"return",
"YAMLFilesCatalog",
"(",
"cat_dir",
")"
] | 33.428571 | 11.428571 |
def verify(self, windowSize=None):
"""Checks the stimulus, including expanded parameters for invalidating conditions
:param windowSize: acquistion (recording) window size (seconds)
:type windowSize: float
:returns: str -- error message, if any, 0 otherwise"""
if self.samplerate() is None:
return "Multiple recording files with conflicting samplerates"
msg = self._autoParams.verify()
if msg:
return msg
if self.traceCount() == 0:
return "Test is empty"
if windowSize is not None:
durations = self.expandFunction(self.duration)
# print 'windowSize', windowSize, 'self', durations[0], durations[-1]
# ranges are linear, so we only need to test first and last
if durations[0] > windowSize or durations[-1] > windowSize:
return "Stimulus duration exceeds window duration"
msg = self.verifyExpanded(self.samplerate())
if msg:
return msg
if self.caldb is None or self.calv is None:
return "Test reference voltage not set"
if None in self.voltage_limits:
return "Device voltage limits not set"
return 0 | [
"def",
"verify",
"(",
"self",
",",
"windowSize",
"=",
"None",
")",
":",
"if",
"self",
".",
"samplerate",
"(",
")",
"is",
"None",
":",
"return",
"\"Multiple recording files with conflicting samplerates\"",
"msg",
"=",
"self",
".",
"_autoParams",
".",
"verify",
"(",
")",
"if",
"msg",
":",
"return",
"msg",
"if",
"self",
".",
"traceCount",
"(",
")",
"==",
"0",
":",
"return",
"\"Test is empty\"",
"if",
"windowSize",
"is",
"not",
"None",
":",
"durations",
"=",
"self",
".",
"expandFunction",
"(",
"self",
".",
"duration",
")",
"# print 'windowSize', windowSize, 'self', durations[0], durations[-1]",
"# ranges are linear, so we only need to test first and last",
"if",
"durations",
"[",
"0",
"]",
">",
"windowSize",
"or",
"durations",
"[",
"-",
"1",
"]",
">",
"windowSize",
":",
"return",
"\"Stimulus duration exceeds window duration\"",
"msg",
"=",
"self",
".",
"verifyExpanded",
"(",
"self",
".",
"samplerate",
"(",
")",
")",
"if",
"msg",
":",
"return",
"msg",
"if",
"self",
".",
"caldb",
"is",
"None",
"or",
"self",
".",
"calv",
"is",
"None",
":",
"return",
"\"Test reference voltage not set\"",
"if",
"None",
"in",
"self",
".",
"voltage_limits",
":",
"return",
"\"Device voltage limits not set\"",
"return",
"0"
] | 45.185185 | 16.481481 |
def get_roles(self):
"""Get the roles associated with the hosts.
Returns
dict of role -> [host]
"""
machines = self.c_resources["machines"]
result = {}
for desc in machines:
roles = utils.get_roles_as_list(desc)
hosts = self._denormalize(desc)
for role in roles:
result.setdefault(role, [])
result[role].extend(hosts)
return result | [
"def",
"get_roles",
"(",
"self",
")",
":",
"machines",
"=",
"self",
".",
"c_resources",
"[",
"\"machines\"",
"]",
"result",
"=",
"{",
"}",
"for",
"desc",
"in",
"machines",
":",
"roles",
"=",
"utils",
".",
"get_roles_as_list",
"(",
"desc",
")",
"hosts",
"=",
"self",
".",
"_denormalize",
"(",
"desc",
")",
"for",
"role",
"in",
"roles",
":",
"result",
".",
"setdefault",
"(",
"role",
",",
"[",
"]",
")",
"result",
"[",
"role",
"]",
".",
"extend",
"(",
"hosts",
")",
"return",
"result"
] | 28.375 | 13.5 |
def _write_line(self, indent_string, entry, this_entry, comment):
"""Write an individual line, for the write method"""
# NOTE: the calls to self._quote here handles non-StringType values.
if not self.unrepr:
val = self._decode_element(self._quote(this_entry))
else:
val = repr(this_entry)
return '%s%s%s%s%s' % (indent_string,
self._decode_element(self._quote(entry, multiline=False)),
self._a_to_u(' = '),
val,
self._decode_element(comment)) | [
"def",
"_write_line",
"(",
"self",
",",
"indent_string",
",",
"entry",
",",
"this_entry",
",",
"comment",
")",
":",
"# NOTE: the calls to self._quote here handles non-StringType values.",
"if",
"not",
"self",
".",
"unrepr",
":",
"val",
"=",
"self",
".",
"_decode_element",
"(",
"self",
".",
"_quote",
"(",
"this_entry",
")",
")",
"else",
":",
"val",
"=",
"repr",
"(",
"this_entry",
")",
"return",
"'%s%s%s%s%s'",
"%",
"(",
"indent_string",
",",
"self",
".",
"_decode_element",
"(",
"self",
".",
"_quote",
"(",
"entry",
",",
"multiline",
"=",
"False",
")",
")",
",",
"self",
".",
"_a_to_u",
"(",
"' = '",
")",
",",
"val",
",",
"self",
".",
"_decode_element",
"(",
"comment",
")",
")"
] | 51.583333 | 18.416667 |
async def certify(client: Client, certification_signed_raw: str) -> ClientResponse:
"""
POST certification raw document
:param client: Client to connect to the api
:param certification_signed_raw: Certification raw document
:return:
"""
return await client.post(MODULE + '/certify', {'cert': certification_signed_raw}, rtype=RESPONSE_AIOHTTP) | [
"async",
"def",
"certify",
"(",
"client",
":",
"Client",
",",
"certification_signed_raw",
":",
"str",
")",
"->",
"ClientResponse",
":",
"return",
"await",
"client",
".",
"post",
"(",
"MODULE",
"+",
"'/certify'",
",",
"{",
"'cert'",
":",
"certification_signed_raw",
"}",
",",
"rtype",
"=",
"RESPONSE_AIOHTTP",
")"
] | 40.333333 | 23.888889 |
def send(evt):
"Process an outgoing communication"
# get the text written by the user (input textbox control)
msg = ctrl_input.value
# send the message (replace with socket/queue/etc.)
gui.alert(msg, "Message")
# record the message (update the UI)
log(msg)
ctrl_input.value = ""
ctrl_input.set_focus() | [
"def",
"send",
"(",
"evt",
")",
":",
"# get the text written by the user (input textbox control)",
"msg",
"=",
"ctrl_input",
".",
"value",
"# send the message (replace with socket/queue/etc.)",
"gui",
".",
"alert",
"(",
"msg",
",",
"\"Message\"",
")",
"# record the message (update the UI)",
"log",
"(",
"msg",
")",
"ctrl_input",
".",
"value",
"=",
"\"\"",
"ctrl_input",
".",
"set_focus",
"(",
")"
] | 32.8 | 14.6 |
def create_usage(self, sub_add_on, usage):
"""Record the usage on the given subscription add on and update the
usage object with returned xml"""
url = urljoin(self._url, '/add_ons/%s/usage' % (sub_add_on.add_on_code,))
return usage.post(url) | [
"def",
"create_usage",
"(",
"self",
",",
"sub_add_on",
",",
"usage",
")",
":",
"url",
"=",
"urljoin",
"(",
"self",
".",
"_url",
",",
"'/add_ons/%s/usage'",
"%",
"(",
"sub_add_on",
".",
"add_on_code",
",",
")",
")",
"return",
"usage",
".",
"post",
"(",
"url",
")"
] | 53.8 | 10.6 |
def name(self, name):
"""
Set the name of this object.
Tell the parent if the name has changed.
"""
from_name = self.name
assert isinstance(name, str)
self._name = name
if self.has_parent():
self._parent_._name_changed(self, from_name) | [
"def",
"name",
"(",
"self",
",",
"name",
")",
":",
"from_name",
"=",
"self",
".",
"name",
"assert",
"isinstance",
"(",
"name",
",",
"str",
")",
"self",
".",
"_name",
"=",
"name",
"if",
"self",
".",
"has_parent",
"(",
")",
":",
"self",
".",
"_parent_",
".",
"_name_changed",
"(",
"self",
",",
"from_name",
")"
] | 30.2 | 8.8 |
def open(filename, frame='unspecified'):
""" Opens a segmentation image """
data = Image.load_data(filename)
return SegmentationImage(data, frame) | [
"def",
"open",
"(",
"filename",
",",
"frame",
"=",
"'unspecified'",
")",
":",
"data",
"=",
"Image",
".",
"load_data",
"(",
"filename",
")",
"return",
"SegmentationImage",
"(",
"data",
",",
"frame",
")"
] | 41.75 | 1.25 |
def parse_rule(tokens, variables, neighbors, parents, is_merc):
""" Parse a rule set, return a list of declarations.
Requires a dictionary of declared variables. Selectors in the neighbors
list are simply grouped, and are generated from comma-delimited lists
of selectors in the stylesheet. Selectors in the parents list should
be combined with those found by this functions, and are generated
from nested, Less-style rulesets.
A rule set is a combination of selectors and declarations:
http://www.w3.org/TR/CSS2/syndata.html#rule-sets
Nesting is described in the Less CSS spec:
http://lesscss.org/#-nested-rules
To handle groups of selectors, use recursion:
http://www.w3.org/TR/CSS2/selector.html#grouping
"""
#
# Local helper function
#
def validate_selector_elements(elements, line, col):
if len(elements) > 2:
raise ParseException('Only two-element selectors are supported for Mapnik styles', line, col)
if len(elements) == 0:
raise ParseException('At least one element must be present in selectors for Mapnik styles', line, col)
if elements[0].names[0] not in ('Map', 'Layer') and elements[0].names[0][0] not in ('.', '#', '*'):
raise ParseException('All non-ID, non-class first elements must be "Layer" Mapnik styles', line, col)
if set([name[:1] for name in elements[0].names[1:]]) - set('#.'):
raise ParseException('All names after the first must be IDs or classes', line, col)
if len(elements) == 2 and elements[1].countTests():
raise ParseException('Only the first element in a selector may have attributes in Mapnik styles', line, col)
if len(elements) == 2 and elements[1].countIDs():
raise ParseException('Only the first element in a selector may have an ID in Mapnik styles', line, col)
if len(elements) == 2 and elements[1].countClasses():
raise ParseException('Only the first element in a selector may have a class in Mapnik styles', line, col)
def parse_variable_definition(tokens):
""" Look for variable value tokens after an @keyword, return an array.
"""
while True:
tname, tvalue, line, col = tokens.next()
if (tname, tvalue) == ('CHAR', ':'):
vtokens = []
while True:
tname, tvalue, line, col = tokens.next()
if (tname, tvalue) in (('CHAR', ';'), ('S', '\n')):
return vtokens
elif tname not in ('S', 'COMMENT'):
vtokens.append((tname, tvalue, line, col))
elif tname not in ('S', 'COMMENT'):
raise ParseException('Unexpected token in variable definition: "%s"' % tvalue, line, col)
#
# The work.
#
ElementClass = SelectorElement
element = None
elements = []
while True:
tname, tvalue, line, col = tokens.next()
if tname == 'ATKEYWORD':
#
# Likely variable definition:
# http://lesscss.org/#-variables
#
variables[tvalue] = parse_variable_definition(tokens)
elif (tname, tvalue) == ('CHAR', '&'):
#
# Start of a nested block with a "&" combinator
# http://lesscss.org/#-nested-rules
#
ElementClass = ConcatenatedElement
elif tname == 'S':
#
# Definitely no longer in a "&" combinator.
#
ElementClass = SelectorElement
elif tname == 'IDENT':
#
# Identifier always starts a new element.
#
element = ElementClass()
elements.append(element)
element.addName(tvalue)
elif tname == 'HASH':
#
# Hash is an ID selector:
# http://www.w3.org/TR/CSS2/selector.html#id-selectors
#
if not element:
element = ElementClass()
elements.append(element)
element.addName(tvalue)
elif (tname, tvalue) == ('CHAR', '.'):
while True:
tname, tvalue, line, col = tokens.next()
if tname == 'IDENT':
#
# Identifier after a period is a class selector:
# http://www.w3.org/TR/CSS2/selector.html#class-html
#
if not element:
element = ElementClass()
elements.append(element)
element.addName('.'+tvalue)
break
else:
raise ParseException('Malformed class selector', line, col)
elif (tname, tvalue) == ('CHAR', '*'):
#
# Asterisk character is a universal selector:
# http://www.w3.org/TR/CSS2/selector.html#universal-selector
#
if not element:
element = ElementClass()
elements.append(element)
element.addName(tvalue)
elif (tname, tvalue) == ('CHAR', '['):
#
# Left-bracket is the start of an attribute selector:
# http://www.w3.org/TR/CSS2/selector.html#attribute-selectors
#
if not element:
element = ElementClass()
elements.append(element)
test = parse_attribute(tokens, is_merc)
element.addTest(test)
elif (tname, tvalue) == ('CHAR', ','):
#
# Comma delineates one of a group of selectors:
# http://www.w3.org/TR/CSS2/selector.html#grouping
#
# Recurse here.
#
neighbors.append(Selector(*elements))
return parse_rule(tokens, variables, neighbors, parents, is_merc)
elif (tname, tvalue) == ('CHAR', '{'):
#
# Left-brace is the start of a block:
# http://www.w3.org/TR/CSS2/syndata.html#block
#
# Return a full block here.
#
class DummySelector:
def __init__(self, *elements):
self.elements = elements[:]
neighbors.append(DummySelector(*elements))
selectors = []
#
# Combine lists of parents and neighbors into a single list of
# selectors, for passing off to parse_block(). There might not
# be any parents, but there will definitely be neighbors.
#
for parent in (parents or [DummySelector()]):
for neighbor in neighbors:
if len(neighbor.elements) == 0:
raise ParseException('At least one element must be present in selectors for Mapnik styles', line, col)
elements = chain(parent.elements + neighbor.elements)
selector = Selector(deepcopy(elements.next()))
for element in elements:
if element.__class__ is ConcatenatedElement:
for name in element.names: selector.elements[-1].addName(deepcopy(name))
for test in element.tests: selector.elements[-1].addTest(deepcopy(test))
else:
selector.addElement(deepcopy(element))
# selector should be fully valid at this point.
validate_selector_elements(selector.elements, line, col)
selector.convertZoomTests(is_merc)
selectors.append(selector)
return parse_block(tokens, variables, selectors, is_merc)
elif tname not in ('S', 'COMMENT'):
raise ParseException('Unexpected token in selector: "%s"' % tvalue, line, col) | [
"def",
"parse_rule",
"(",
"tokens",
",",
"variables",
",",
"neighbors",
",",
"parents",
",",
"is_merc",
")",
":",
"#",
"# Local helper function",
"#",
"def",
"validate_selector_elements",
"(",
"elements",
",",
"line",
",",
"col",
")",
":",
"if",
"len",
"(",
"elements",
")",
">",
"2",
":",
"raise",
"ParseException",
"(",
"'Only two-element selectors are supported for Mapnik styles'",
",",
"line",
",",
"col",
")",
"if",
"len",
"(",
"elements",
")",
"==",
"0",
":",
"raise",
"ParseException",
"(",
"'At least one element must be present in selectors for Mapnik styles'",
",",
"line",
",",
"col",
")",
"if",
"elements",
"[",
"0",
"]",
".",
"names",
"[",
"0",
"]",
"not",
"in",
"(",
"'Map'",
",",
"'Layer'",
")",
"and",
"elements",
"[",
"0",
"]",
".",
"names",
"[",
"0",
"]",
"[",
"0",
"]",
"not",
"in",
"(",
"'.'",
",",
"'#'",
",",
"'*'",
")",
":",
"raise",
"ParseException",
"(",
"'All non-ID, non-class first elements must be \"Layer\" Mapnik styles'",
",",
"line",
",",
"col",
")",
"if",
"set",
"(",
"[",
"name",
"[",
":",
"1",
"]",
"for",
"name",
"in",
"elements",
"[",
"0",
"]",
".",
"names",
"[",
"1",
":",
"]",
"]",
")",
"-",
"set",
"(",
"'#.'",
")",
":",
"raise",
"ParseException",
"(",
"'All names after the first must be IDs or classes'",
",",
"line",
",",
"col",
")",
"if",
"len",
"(",
"elements",
")",
"==",
"2",
"and",
"elements",
"[",
"1",
"]",
".",
"countTests",
"(",
")",
":",
"raise",
"ParseException",
"(",
"'Only the first element in a selector may have attributes in Mapnik styles'",
",",
"line",
",",
"col",
")",
"if",
"len",
"(",
"elements",
")",
"==",
"2",
"and",
"elements",
"[",
"1",
"]",
".",
"countIDs",
"(",
")",
":",
"raise",
"ParseException",
"(",
"'Only the first element in a selector may have an ID in Mapnik styles'",
",",
"line",
",",
"col",
")",
"if",
"len",
"(",
"elements",
")",
"==",
"2",
"and",
"elements",
"[",
"1",
"]",
".",
"countClasses",
"(",
")",
":",
"raise",
"ParseException",
"(",
"'Only the first element in a selector may have a class in Mapnik styles'",
",",
"line",
",",
"col",
")",
"def",
"parse_variable_definition",
"(",
"tokens",
")",
":",
"\"\"\" Look for variable value tokens after an @keyword, return an array.\n \"\"\"",
"while",
"True",
":",
"tname",
",",
"tvalue",
",",
"line",
",",
"col",
"=",
"tokens",
".",
"next",
"(",
")",
"if",
"(",
"tname",
",",
"tvalue",
")",
"==",
"(",
"'CHAR'",
",",
"':'",
")",
":",
"vtokens",
"=",
"[",
"]",
"while",
"True",
":",
"tname",
",",
"tvalue",
",",
"line",
",",
"col",
"=",
"tokens",
".",
"next",
"(",
")",
"if",
"(",
"tname",
",",
"tvalue",
")",
"in",
"(",
"(",
"'CHAR'",
",",
"';'",
")",
",",
"(",
"'S'",
",",
"'\\n'",
")",
")",
":",
"return",
"vtokens",
"elif",
"tname",
"not",
"in",
"(",
"'S'",
",",
"'COMMENT'",
")",
":",
"vtokens",
".",
"append",
"(",
"(",
"tname",
",",
"tvalue",
",",
"line",
",",
"col",
")",
")",
"elif",
"tname",
"not",
"in",
"(",
"'S'",
",",
"'COMMENT'",
")",
":",
"raise",
"ParseException",
"(",
"'Unexpected token in variable definition: \"%s\"'",
"%",
"tvalue",
",",
"line",
",",
"col",
")",
"#",
"# The work.",
"#",
"ElementClass",
"=",
"SelectorElement",
"element",
"=",
"None",
"elements",
"=",
"[",
"]",
"while",
"True",
":",
"tname",
",",
"tvalue",
",",
"line",
",",
"col",
"=",
"tokens",
".",
"next",
"(",
")",
"if",
"tname",
"==",
"'ATKEYWORD'",
":",
"#",
"# Likely variable definition:",
"# http://lesscss.org/#-variables",
"#",
"variables",
"[",
"tvalue",
"]",
"=",
"parse_variable_definition",
"(",
"tokens",
")",
"elif",
"(",
"tname",
",",
"tvalue",
")",
"==",
"(",
"'CHAR'",
",",
"'&'",
")",
":",
"#",
"# Start of a nested block with a \"&\" combinator",
"# http://lesscss.org/#-nested-rules",
"#",
"ElementClass",
"=",
"ConcatenatedElement",
"elif",
"tname",
"==",
"'S'",
":",
"#",
"# Definitely no longer in a \"&\" combinator.",
"#",
"ElementClass",
"=",
"SelectorElement",
"elif",
"tname",
"==",
"'IDENT'",
":",
"#",
"# Identifier always starts a new element.",
"#",
"element",
"=",
"ElementClass",
"(",
")",
"elements",
".",
"append",
"(",
"element",
")",
"element",
".",
"addName",
"(",
"tvalue",
")",
"elif",
"tname",
"==",
"'HASH'",
":",
"#",
"# Hash is an ID selector:",
"# http://www.w3.org/TR/CSS2/selector.html#id-selectors",
"#",
"if",
"not",
"element",
":",
"element",
"=",
"ElementClass",
"(",
")",
"elements",
".",
"append",
"(",
"element",
")",
"element",
".",
"addName",
"(",
"tvalue",
")",
"elif",
"(",
"tname",
",",
"tvalue",
")",
"==",
"(",
"'CHAR'",
",",
"'.'",
")",
":",
"while",
"True",
":",
"tname",
",",
"tvalue",
",",
"line",
",",
"col",
"=",
"tokens",
".",
"next",
"(",
")",
"if",
"tname",
"==",
"'IDENT'",
":",
"#",
"# Identifier after a period is a class selector:",
"# http://www.w3.org/TR/CSS2/selector.html#class-html",
"#",
"if",
"not",
"element",
":",
"element",
"=",
"ElementClass",
"(",
")",
"elements",
".",
"append",
"(",
"element",
")",
"element",
".",
"addName",
"(",
"'.'",
"+",
"tvalue",
")",
"break",
"else",
":",
"raise",
"ParseException",
"(",
"'Malformed class selector'",
",",
"line",
",",
"col",
")",
"elif",
"(",
"tname",
",",
"tvalue",
")",
"==",
"(",
"'CHAR'",
",",
"'*'",
")",
":",
"#",
"# Asterisk character is a universal selector:",
"# http://www.w3.org/TR/CSS2/selector.html#universal-selector",
"#",
"if",
"not",
"element",
":",
"element",
"=",
"ElementClass",
"(",
")",
"elements",
".",
"append",
"(",
"element",
")",
"element",
".",
"addName",
"(",
"tvalue",
")",
"elif",
"(",
"tname",
",",
"tvalue",
")",
"==",
"(",
"'CHAR'",
",",
"'['",
")",
":",
"#",
"# Left-bracket is the start of an attribute selector:",
"# http://www.w3.org/TR/CSS2/selector.html#attribute-selectors",
"#",
"if",
"not",
"element",
":",
"element",
"=",
"ElementClass",
"(",
")",
"elements",
".",
"append",
"(",
"element",
")",
"test",
"=",
"parse_attribute",
"(",
"tokens",
",",
"is_merc",
")",
"element",
".",
"addTest",
"(",
"test",
")",
"elif",
"(",
"tname",
",",
"tvalue",
")",
"==",
"(",
"'CHAR'",
",",
"','",
")",
":",
"#",
"# Comma delineates one of a group of selectors:",
"# http://www.w3.org/TR/CSS2/selector.html#grouping",
"#",
"# Recurse here.",
"#",
"neighbors",
".",
"append",
"(",
"Selector",
"(",
"*",
"elements",
")",
")",
"return",
"parse_rule",
"(",
"tokens",
",",
"variables",
",",
"neighbors",
",",
"parents",
",",
"is_merc",
")",
"elif",
"(",
"tname",
",",
"tvalue",
")",
"==",
"(",
"'CHAR'",
",",
"'{'",
")",
":",
"#",
"# Left-brace is the start of a block:",
"# http://www.w3.org/TR/CSS2/syndata.html#block",
"#",
"# Return a full block here.",
"#",
"class",
"DummySelector",
":",
"def",
"__init__",
"(",
"self",
",",
"*",
"elements",
")",
":",
"self",
".",
"elements",
"=",
"elements",
"[",
":",
"]",
"neighbors",
".",
"append",
"(",
"DummySelector",
"(",
"*",
"elements",
")",
")",
"selectors",
"=",
"[",
"]",
"#",
"# Combine lists of parents and neighbors into a single list of",
"# selectors, for passing off to parse_block(). There might not",
"# be any parents, but there will definitely be neighbors.",
"#",
"for",
"parent",
"in",
"(",
"parents",
"or",
"[",
"DummySelector",
"(",
")",
"]",
")",
":",
"for",
"neighbor",
"in",
"neighbors",
":",
"if",
"len",
"(",
"neighbor",
".",
"elements",
")",
"==",
"0",
":",
"raise",
"ParseException",
"(",
"'At least one element must be present in selectors for Mapnik styles'",
",",
"line",
",",
"col",
")",
"elements",
"=",
"chain",
"(",
"parent",
".",
"elements",
"+",
"neighbor",
".",
"elements",
")",
"selector",
"=",
"Selector",
"(",
"deepcopy",
"(",
"elements",
".",
"next",
"(",
")",
")",
")",
"for",
"element",
"in",
"elements",
":",
"if",
"element",
".",
"__class__",
"is",
"ConcatenatedElement",
":",
"for",
"name",
"in",
"element",
".",
"names",
":",
"selector",
".",
"elements",
"[",
"-",
"1",
"]",
".",
"addName",
"(",
"deepcopy",
"(",
"name",
")",
")",
"for",
"test",
"in",
"element",
".",
"tests",
":",
"selector",
".",
"elements",
"[",
"-",
"1",
"]",
".",
"addTest",
"(",
"deepcopy",
"(",
"test",
")",
")",
"else",
":",
"selector",
".",
"addElement",
"(",
"deepcopy",
"(",
"element",
")",
")",
"# selector should be fully valid at this point.",
"validate_selector_elements",
"(",
"selector",
".",
"elements",
",",
"line",
",",
"col",
")",
"selector",
".",
"convertZoomTests",
"(",
"is_merc",
")",
"selectors",
".",
"append",
"(",
"selector",
")",
"return",
"parse_block",
"(",
"tokens",
",",
"variables",
",",
"selectors",
",",
"is_merc",
")",
"elif",
"tname",
"not",
"in",
"(",
"'S'",
",",
"'COMMENT'",
")",
":",
"raise",
"ParseException",
"(",
"'Unexpected token in selector: \"%s\"'",
"%",
"tvalue",
",",
"line",
",",
"col",
")"
] | 38.59434 | 22.90566 |
def handle_msg(self, payload):
"""
Handle message for network plugin protocol
:param payload: Received message
:type payload: dict
:return: Response to send (if set)
:rtype: None | dict
"""
self.debug(u"\n{}".format(pformat(payload)))
msg = payload['msg']
res = {
'msg': msg,
'error': ""
}
if msg == "plugin_list":
res['plugin_names'] = []
# Generate list of plugins available for frontend
for plugin in self.controller.plugins:
# Limit to plugins that work with this
if isinstance(plugin, SettablePlugin):
res['plugin_names'].append(plugin.name)
return res
elif msg == "plugin_get":
res['plugin'] = {}
plugin_name = payload.get('plugin_name')
plugin, err = self._plugin_get(plugin_name)
if not plugin:
res['error'] = err
return res
res['plugin_name'] = plugin_name
res['plugin'] = plugin.get_info()
return res
elif msg == "plugin_resource_list":
res['resource_names'] = []
plugin_name = payload.get('plugin_name')
plugin, err = self._plugin_get(plugin_name)
if not plugin:
res['error'] = err
return res
res['plugin_name'] = plugin_name
try:
res['resource_names'] = plugin.resource_get_list()
except PluginException as e:
if str(e) == "No resource path set":
self.debug(
u"Plugin '{}' has no resources".format(plugin.name)
)
else:
self.exception(
u"Failed to get resource list for plugin '{}'".format(
plugin.name
)
)
return res
elif msg == "plugin_resource_get":
res['resource'] = {}
plugin_name = payload.get('plugin_name')
resource_name = payload.get('resource_name')
if not resource_name:
res['error'] = "Resource name not set"
return res
plugin, err = self._plugin_get(plugin_name)
if not plugin:
res['error'] = err
return res
res['plugin_name'] = plugin_name
res['resource_name'] = resource_name
res['resource'] = dict(plugin.resource_get(resource_name))
if "path" in res['resource']:
del res['resource']['path']
return res
elif msg == "plugin_resource_load":
res['resource_data'] = ""
plugin_name = payload.get('plugin_name')
resource_name = payload.get('resource_name')
if not resource_name:
res['error'] = "Resource name not set"
return res
plugin, err = self._plugin_get(plugin_name)
if not plugin:
res['error'] = err
return res
res['plugin_name'] = plugin_name
res['resource_name'] = resource_name
resource_dict = plugin.resource_get(resource_name)
if not resource_dict:
res['error'] = u"Resource '{}' not found".format(resource_name)
return res
self.debug(u"Resource {}".format(resource_dict))
try:
with open(resource_dict['path'], 'rb') as f:
res['resource_data'] = base64.b64encode(f.read())
except:
self.exception(
u"Failed to load '{}'".format(resource_dict['path'])
)
res['error'] = u"Failed to load"
return res
elif msg == "plugin_data_get":
plugin_name = payload.get('plugin_name')
plugin, err = self._plugin_get(plugin_name)
if not plugin:
res['error'] = err
return res
res['plugin_name'] = plugin_name
res['data'] = plugin.get_data()
return res
elif msg == "plugin_data_set":
plugin_name = payload.get('plugin_name')
plugin, err = self._plugin_get(plugin_name)
if not plugin:
res['error'] = err
return res
res['plugin_name'] = plugin_name
data = payload.get('data')
if not data:
res['error'] = u"No data provided"
return res
try:
plugin.on_config(data)
except NotImplementedError:
res['error'] = u"Plugin does not support setting data"
return res
except:
self.exception(
u"Failed to set data for {}".format(plugin_name)
)
res['error'] = "Failed to set data"
return res
return {}
else:
self.error(u"Unknown cmd '{}'\n{}".format(msg, payload))
return {} | [
"def",
"handle_msg",
"(",
"self",
",",
"payload",
")",
":",
"self",
".",
"debug",
"(",
"u\"\\n{}\"",
".",
"format",
"(",
"pformat",
"(",
"payload",
")",
")",
")",
"msg",
"=",
"payload",
"[",
"'msg'",
"]",
"res",
"=",
"{",
"'msg'",
":",
"msg",
",",
"'error'",
":",
"\"\"",
"}",
"if",
"msg",
"==",
"\"plugin_list\"",
":",
"res",
"[",
"'plugin_names'",
"]",
"=",
"[",
"]",
"# Generate list of plugins available for frontend",
"for",
"plugin",
"in",
"self",
".",
"controller",
".",
"plugins",
":",
"# Limit to plugins that work with this",
"if",
"isinstance",
"(",
"plugin",
",",
"SettablePlugin",
")",
":",
"res",
"[",
"'plugin_names'",
"]",
".",
"append",
"(",
"plugin",
".",
"name",
")",
"return",
"res",
"elif",
"msg",
"==",
"\"plugin_get\"",
":",
"res",
"[",
"'plugin'",
"]",
"=",
"{",
"}",
"plugin_name",
"=",
"payload",
".",
"get",
"(",
"'plugin_name'",
")",
"plugin",
",",
"err",
"=",
"self",
".",
"_plugin_get",
"(",
"plugin_name",
")",
"if",
"not",
"plugin",
":",
"res",
"[",
"'error'",
"]",
"=",
"err",
"return",
"res",
"res",
"[",
"'plugin_name'",
"]",
"=",
"plugin_name",
"res",
"[",
"'plugin'",
"]",
"=",
"plugin",
".",
"get_info",
"(",
")",
"return",
"res",
"elif",
"msg",
"==",
"\"plugin_resource_list\"",
":",
"res",
"[",
"'resource_names'",
"]",
"=",
"[",
"]",
"plugin_name",
"=",
"payload",
".",
"get",
"(",
"'plugin_name'",
")",
"plugin",
",",
"err",
"=",
"self",
".",
"_plugin_get",
"(",
"plugin_name",
")",
"if",
"not",
"plugin",
":",
"res",
"[",
"'error'",
"]",
"=",
"err",
"return",
"res",
"res",
"[",
"'plugin_name'",
"]",
"=",
"plugin_name",
"try",
":",
"res",
"[",
"'resource_names'",
"]",
"=",
"plugin",
".",
"resource_get_list",
"(",
")",
"except",
"PluginException",
"as",
"e",
":",
"if",
"str",
"(",
"e",
")",
"==",
"\"No resource path set\"",
":",
"self",
".",
"debug",
"(",
"u\"Plugin '{}' has no resources\"",
".",
"format",
"(",
"plugin",
".",
"name",
")",
")",
"else",
":",
"self",
".",
"exception",
"(",
"u\"Failed to get resource list for plugin '{}'\"",
".",
"format",
"(",
"plugin",
".",
"name",
")",
")",
"return",
"res",
"elif",
"msg",
"==",
"\"plugin_resource_get\"",
":",
"res",
"[",
"'resource'",
"]",
"=",
"{",
"}",
"plugin_name",
"=",
"payload",
".",
"get",
"(",
"'plugin_name'",
")",
"resource_name",
"=",
"payload",
".",
"get",
"(",
"'resource_name'",
")",
"if",
"not",
"resource_name",
":",
"res",
"[",
"'error'",
"]",
"=",
"\"Resource name not set\"",
"return",
"res",
"plugin",
",",
"err",
"=",
"self",
".",
"_plugin_get",
"(",
"plugin_name",
")",
"if",
"not",
"plugin",
":",
"res",
"[",
"'error'",
"]",
"=",
"err",
"return",
"res",
"res",
"[",
"'plugin_name'",
"]",
"=",
"plugin_name",
"res",
"[",
"'resource_name'",
"]",
"=",
"resource_name",
"res",
"[",
"'resource'",
"]",
"=",
"dict",
"(",
"plugin",
".",
"resource_get",
"(",
"resource_name",
")",
")",
"if",
"\"path\"",
"in",
"res",
"[",
"'resource'",
"]",
":",
"del",
"res",
"[",
"'resource'",
"]",
"[",
"'path'",
"]",
"return",
"res",
"elif",
"msg",
"==",
"\"plugin_resource_load\"",
":",
"res",
"[",
"'resource_data'",
"]",
"=",
"\"\"",
"plugin_name",
"=",
"payload",
".",
"get",
"(",
"'plugin_name'",
")",
"resource_name",
"=",
"payload",
".",
"get",
"(",
"'resource_name'",
")",
"if",
"not",
"resource_name",
":",
"res",
"[",
"'error'",
"]",
"=",
"\"Resource name not set\"",
"return",
"res",
"plugin",
",",
"err",
"=",
"self",
".",
"_plugin_get",
"(",
"plugin_name",
")",
"if",
"not",
"plugin",
":",
"res",
"[",
"'error'",
"]",
"=",
"err",
"return",
"res",
"res",
"[",
"'plugin_name'",
"]",
"=",
"plugin_name",
"res",
"[",
"'resource_name'",
"]",
"=",
"resource_name",
"resource_dict",
"=",
"plugin",
".",
"resource_get",
"(",
"resource_name",
")",
"if",
"not",
"resource_dict",
":",
"res",
"[",
"'error'",
"]",
"=",
"u\"Resource '{}' not found\"",
".",
"format",
"(",
"resource_name",
")",
"return",
"res",
"self",
".",
"debug",
"(",
"u\"Resource {}\"",
".",
"format",
"(",
"resource_dict",
")",
")",
"try",
":",
"with",
"open",
"(",
"resource_dict",
"[",
"'path'",
"]",
",",
"'rb'",
")",
"as",
"f",
":",
"res",
"[",
"'resource_data'",
"]",
"=",
"base64",
".",
"b64encode",
"(",
"f",
".",
"read",
"(",
")",
")",
"except",
":",
"self",
".",
"exception",
"(",
"u\"Failed to load '{}'\"",
".",
"format",
"(",
"resource_dict",
"[",
"'path'",
"]",
")",
")",
"res",
"[",
"'error'",
"]",
"=",
"u\"Failed to load\"",
"return",
"res",
"elif",
"msg",
"==",
"\"plugin_data_get\"",
":",
"plugin_name",
"=",
"payload",
".",
"get",
"(",
"'plugin_name'",
")",
"plugin",
",",
"err",
"=",
"self",
".",
"_plugin_get",
"(",
"plugin_name",
")",
"if",
"not",
"plugin",
":",
"res",
"[",
"'error'",
"]",
"=",
"err",
"return",
"res",
"res",
"[",
"'plugin_name'",
"]",
"=",
"plugin_name",
"res",
"[",
"'data'",
"]",
"=",
"plugin",
".",
"get_data",
"(",
")",
"return",
"res",
"elif",
"msg",
"==",
"\"plugin_data_set\"",
":",
"plugin_name",
"=",
"payload",
".",
"get",
"(",
"'plugin_name'",
")",
"plugin",
",",
"err",
"=",
"self",
".",
"_plugin_get",
"(",
"plugin_name",
")",
"if",
"not",
"plugin",
":",
"res",
"[",
"'error'",
"]",
"=",
"err",
"return",
"res",
"res",
"[",
"'plugin_name'",
"]",
"=",
"plugin_name",
"data",
"=",
"payload",
".",
"get",
"(",
"'data'",
")",
"if",
"not",
"data",
":",
"res",
"[",
"'error'",
"]",
"=",
"u\"No data provided\"",
"return",
"res",
"try",
":",
"plugin",
".",
"on_config",
"(",
"data",
")",
"except",
"NotImplementedError",
":",
"res",
"[",
"'error'",
"]",
"=",
"u\"Plugin does not support setting data\"",
"return",
"res",
"except",
":",
"self",
".",
"exception",
"(",
"u\"Failed to set data for {}\"",
".",
"format",
"(",
"plugin_name",
")",
")",
"res",
"[",
"'error'",
"]",
"=",
"\"Failed to set data\"",
"return",
"res",
"return",
"{",
"}",
"else",
":",
"self",
".",
"error",
"(",
"u\"Unknown cmd '{}'\\n{}\"",
".",
"format",
"(",
"msg",
",",
"payload",
")",
")",
"return",
"{",
"}"
] | 38.119403 | 13.41791 |
def send_cmd_recv_rsp(self, cmd_code, cmd_data, timeout,
send_idm=True, check_status=True):
"""Send a command and receive a response.
This low level method sends an arbitrary command with the
8-bit integer *cmd_code*, followed by the captured tag
identifier (IDm) if *send_idm* is :const:`True` and the byte
string or bytearray *cmd_data*. It then waits *timeout*
seconds for a response, verifies that the response is
correctly formatted and, if *check_status* is :const:`True`,
that the status flags do not indicate an error.
All errors raise a :exc:`~nfc.tag.TagCommandError`
exception. Errors from response status flags produce an
:attr:`~nfc.tag.TagCommandError.errno` that is greater than
255, all other errors are below 256.
"""
idm = self.idm if send_idm else bytearray()
cmd = chr(2+len(idm)+len(cmd_data)) + chr(cmd_code) + idm + cmd_data
log.debug(">> {0:02x} {1:02x} {2} {3} ({4}s)".format(
cmd[0], cmd[1], hexlify(cmd[2:10]), hexlify(cmd[10:]), timeout))
started = time.time()
for retry in range(3):
try:
rsp = self.clf.exchange(cmd, timeout)
break
except nfc.clf.CommunicationError as error:
reason = error.__class__.__name__
log.debug("%s after %d retries" % (reason, retry))
else:
if type(error) is nfc.clf.TimeoutError:
raise Type3TagCommandError(nfc.tag.TIMEOUT_ERROR)
if type(error) is nfc.clf.TransmissionError:
raise Type3TagCommandError(nfc.tag.RECEIVE_ERROR)
if type(error) is nfc.clf.ProtocolError: # pragma: no branch
raise Type3TagCommandError(nfc.tag.PROTOCOL_ERROR)
if rsp[0] != len(rsp):
log.debug("incorrect response length {0:02x}".format(rsp[0]))
raise Type3TagCommandError(RSP_LENGTH_ERROR)
if rsp[1] != cmd_code + 1:
log.debug("incorrect response code {0:02x}".format(rsp[1]))
raise Type3TagCommandError(RSP_CODE_ERROR)
if send_idm and rsp[2:10] != self.idm:
log.debug("wrong tag or transaction id " + hexlify(rsp[2:10]))
raise Type3TagCommandError(TAG_IDM_ERROR)
if not send_idm:
log.debug("<< {0:02x} {1:02x} {2}".format(
rsp[0], rsp[1], hexlify(rsp[2:])))
return rsp[2:]
if check_status and rsp[10] != 0:
log.debug("tag returned error status " + hexlify(rsp[10:12]))
raise Type3TagCommandError(unpack(">H", rsp[10:12])[0])
if not check_status:
log.debug("<< {0:02x} {1:02x} {2} {3}".format(
rsp[0], rsp[1], hexlify(rsp[2:10]), hexlify(rsp[10:])))
return rsp[10:]
log.debug("<< {0:02x} {1:02x} {2} {3} {4} ({elapsed:f}s)".format(
rsp[0], rsp[1], hexlify(rsp[2:10]), hexlify(rsp[10:12]),
hexlify(rsp[12:]), elapsed=time.time()-started))
return rsp[12:] | [
"def",
"send_cmd_recv_rsp",
"(",
"self",
",",
"cmd_code",
",",
"cmd_data",
",",
"timeout",
",",
"send_idm",
"=",
"True",
",",
"check_status",
"=",
"True",
")",
":",
"idm",
"=",
"self",
".",
"idm",
"if",
"send_idm",
"else",
"bytearray",
"(",
")",
"cmd",
"=",
"chr",
"(",
"2",
"+",
"len",
"(",
"idm",
")",
"+",
"len",
"(",
"cmd_data",
")",
")",
"+",
"chr",
"(",
"cmd_code",
")",
"+",
"idm",
"+",
"cmd_data",
"log",
".",
"debug",
"(",
"\">> {0:02x} {1:02x} {2} {3} ({4}s)\"",
".",
"format",
"(",
"cmd",
"[",
"0",
"]",
",",
"cmd",
"[",
"1",
"]",
",",
"hexlify",
"(",
"cmd",
"[",
"2",
":",
"10",
"]",
")",
",",
"hexlify",
"(",
"cmd",
"[",
"10",
":",
"]",
")",
",",
"timeout",
")",
")",
"started",
"=",
"time",
".",
"time",
"(",
")",
"for",
"retry",
"in",
"range",
"(",
"3",
")",
":",
"try",
":",
"rsp",
"=",
"self",
".",
"clf",
".",
"exchange",
"(",
"cmd",
",",
"timeout",
")",
"break",
"except",
"nfc",
".",
"clf",
".",
"CommunicationError",
"as",
"error",
":",
"reason",
"=",
"error",
".",
"__class__",
".",
"__name__",
"log",
".",
"debug",
"(",
"\"%s after %d retries\"",
"%",
"(",
"reason",
",",
"retry",
")",
")",
"else",
":",
"if",
"type",
"(",
"error",
")",
"is",
"nfc",
".",
"clf",
".",
"TimeoutError",
":",
"raise",
"Type3TagCommandError",
"(",
"nfc",
".",
"tag",
".",
"TIMEOUT_ERROR",
")",
"if",
"type",
"(",
"error",
")",
"is",
"nfc",
".",
"clf",
".",
"TransmissionError",
":",
"raise",
"Type3TagCommandError",
"(",
"nfc",
".",
"tag",
".",
"RECEIVE_ERROR",
")",
"if",
"type",
"(",
"error",
")",
"is",
"nfc",
".",
"clf",
".",
"ProtocolError",
":",
"# pragma: no branch",
"raise",
"Type3TagCommandError",
"(",
"nfc",
".",
"tag",
".",
"PROTOCOL_ERROR",
")",
"if",
"rsp",
"[",
"0",
"]",
"!=",
"len",
"(",
"rsp",
")",
":",
"log",
".",
"debug",
"(",
"\"incorrect response length {0:02x}\"",
".",
"format",
"(",
"rsp",
"[",
"0",
"]",
")",
")",
"raise",
"Type3TagCommandError",
"(",
"RSP_LENGTH_ERROR",
")",
"if",
"rsp",
"[",
"1",
"]",
"!=",
"cmd_code",
"+",
"1",
":",
"log",
".",
"debug",
"(",
"\"incorrect response code {0:02x}\"",
".",
"format",
"(",
"rsp",
"[",
"1",
"]",
")",
")",
"raise",
"Type3TagCommandError",
"(",
"RSP_CODE_ERROR",
")",
"if",
"send_idm",
"and",
"rsp",
"[",
"2",
":",
"10",
"]",
"!=",
"self",
".",
"idm",
":",
"log",
".",
"debug",
"(",
"\"wrong tag or transaction id \"",
"+",
"hexlify",
"(",
"rsp",
"[",
"2",
":",
"10",
"]",
")",
")",
"raise",
"Type3TagCommandError",
"(",
"TAG_IDM_ERROR",
")",
"if",
"not",
"send_idm",
":",
"log",
".",
"debug",
"(",
"\"<< {0:02x} {1:02x} {2}\"",
".",
"format",
"(",
"rsp",
"[",
"0",
"]",
",",
"rsp",
"[",
"1",
"]",
",",
"hexlify",
"(",
"rsp",
"[",
"2",
":",
"]",
")",
")",
")",
"return",
"rsp",
"[",
"2",
":",
"]",
"if",
"check_status",
"and",
"rsp",
"[",
"10",
"]",
"!=",
"0",
":",
"log",
".",
"debug",
"(",
"\"tag returned error status \"",
"+",
"hexlify",
"(",
"rsp",
"[",
"10",
":",
"12",
"]",
")",
")",
"raise",
"Type3TagCommandError",
"(",
"unpack",
"(",
"\">H\"",
",",
"rsp",
"[",
"10",
":",
"12",
"]",
")",
"[",
"0",
"]",
")",
"if",
"not",
"check_status",
":",
"log",
".",
"debug",
"(",
"\"<< {0:02x} {1:02x} {2} {3}\"",
".",
"format",
"(",
"rsp",
"[",
"0",
"]",
",",
"rsp",
"[",
"1",
"]",
",",
"hexlify",
"(",
"rsp",
"[",
"2",
":",
"10",
"]",
")",
",",
"hexlify",
"(",
"rsp",
"[",
"10",
":",
"]",
")",
")",
")",
"return",
"rsp",
"[",
"10",
":",
"]",
"log",
".",
"debug",
"(",
"\"<< {0:02x} {1:02x} {2} {3} {4} ({elapsed:f}s)\"",
".",
"format",
"(",
"rsp",
"[",
"0",
"]",
",",
"rsp",
"[",
"1",
"]",
",",
"hexlify",
"(",
"rsp",
"[",
"2",
":",
"10",
"]",
")",
",",
"hexlify",
"(",
"rsp",
"[",
"10",
":",
"12",
"]",
")",
",",
"hexlify",
"(",
"rsp",
"[",
"12",
":",
"]",
")",
",",
"elapsed",
"=",
"time",
".",
"time",
"(",
")",
"-",
"started",
")",
")",
"return",
"rsp",
"[",
"12",
":",
"]"
] | 48.507937 | 20.857143 |
def _set_annotation_to_str(annotation_data: Mapping[str, Mapping[str, bool]], key: str) -> str:
"""Return a set annotation string."""
value = annotation_data[key]
if len(value) == 1:
return 'SET {} = "{}"'.format(key, list(value)[0])
x = ('"{}"'.format(v) for v in sorted(value))
return 'SET {} = {{{}}}'.format(key, ', '.join(x)) | [
"def",
"_set_annotation_to_str",
"(",
"annotation_data",
":",
"Mapping",
"[",
"str",
",",
"Mapping",
"[",
"str",
",",
"bool",
"]",
"]",
",",
"key",
":",
"str",
")",
"->",
"str",
":",
"value",
"=",
"annotation_data",
"[",
"key",
"]",
"if",
"len",
"(",
"value",
")",
"==",
"1",
":",
"return",
"'SET {} = \"{}\"'",
".",
"format",
"(",
"key",
",",
"list",
"(",
"value",
")",
"[",
"0",
"]",
")",
"x",
"=",
"(",
"'\"{}\"'",
".",
"format",
"(",
"v",
")",
"for",
"v",
"in",
"sorted",
"(",
"value",
")",
")",
"return",
"'SET {} = {{{}}}'",
".",
"format",
"(",
"key",
",",
"', '",
".",
"join",
"(",
"x",
")",
")"
] | 35.2 | 24.1 |
def read(self, entity=None, attrs=None, ignore=None, params=None):
"""Fetch an attribute missing from the server's response.
For more information, see `Bugzilla #1237257
<https://bugzilla.redhat.com/show_bug.cgi?id=1237257>`_.
Add content_view_component to the response if needed, as
:meth:`nailgun.entity_mixins.EntityReadMixin.read` can't initialize
content_view_component.
"""
if attrs is None:
attrs = self.read_json()
if _get_version(self._server_config) < Version('6.1'):
org = _get_org(self._server_config, attrs['organization']['label'])
attrs['organization'] = org.get_values()
if ignore is None:
ignore = set()
ignore.add('content_view_component')
result = super(ContentView, self).read(entity, attrs, ignore, params)
if 'content_view_components' in attrs and attrs['content_view_components']:
result.content_view_component = [
ContentViewComponent(
self._server_config,
composite_content_view=result.id,
id=content_view_component['id'],
)
for content_view_component in attrs['content_view_components']
]
return result | [
"def",
"read",
"(",
"self",
",",
"entity",
"=",
"None",
",",
"attrs",
"=",
"None",
",",
"ignore",
"=",
"None",
",",
"params",
"=",
"None",
")",
":",
"if",
"attrs",
"is",
"None",
":",
"attrs",
"=",
"self",
".",
"read_json",
"(",
")",
"if",
"_get_version",
"(",
"self",
".",
"_server_config",
")",
"<",
"Version",
"(",
"'6.1'",
")",
":",
"org",
"=",
"_get_org",
"(",
"self",
".",
"_server_config",
",",
"attrs",
"[",
"'organization'",
"]",
"[",
"'label'",
"]",
")",
"attrs",
"[",
"'organization'",
"]",
"=",
"org",
".",
"get_values",
"(",
")",
"if",
"ignore",
"is",
"None",
":",
"ignore",
"=",
"set",
"(",
")",
"ignore",
".",
"add",
"(",
"'content_view_component'",
")",
"result",
"=",
"super",
"(",
"ContentView",
",",
"self",
")",
".",
"read",
"(",
"entity",
",",
"attrs",
",",
"ignore",
",",
"params",
")",
"if",
"'content_view_components'",
"in",
"attrs",
"and",
"attrs",
"[",
"'content_view_components'",
"]",
":",
"result",
".",
"content_view_component",
"=",
"[",
"ContentViewComponent",
"(",
"self",
".",
"_server_config",
",",
"composite_content_view",
"=",
"result",
".",
"id",
",",
"id",
"=",
"content_view_component",
"[",
"'id'",
"]",
",",
")",
"for",
"content_view_component",
"in",
"attrs",
"[",
"'content_view_components'",
"]",
"]",
"return",
"result"
] | 43.133333 | 19.8 |
def remove(self, doc_type, doc_ids, **kwargs):
""" Implements call to remove the documents from the index """
try:
# ignore is flagged as an unexpected-keyword-arg; ES python client documents that it can be used
# pylint: disable=unexpected-keyword-arg
actions = []
for doc_id in doc_ids:
log.debug("Removing document of type %s and index %s", doc_type, doc_id)
action = {
'_op_type': 'delete',
"_index": self.index_name,
"_type": doc_type,
"_id": doc_id
}
actions.append(action)
bulk(self._es, actions, **kwargs)
except BulkIndexError as ex:
valid_errors = [error for error in ex.errors if error['delete']['status'] != 404]
if valid_errors:
log.exception("An error occurred while removing documents from the index.")
raise | [
"def",
"remove",
"(",
"self",
",",
"doc_type",
",",
"doc_ids",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"# ignore is flagged as an unexpected-keyword-arg; ES python client documents that it can be used",
"# pylint: disable=unexpected-keyword-arg",
"actions",
"=",
"[",
"]",
"for",
"doc_id",
"in",
"doc_ids",
":",
"log",
".",
"debug",
"(",
"\"Removing document of type %s and index %s\"",
",",
"doc_type",
",",
"doc_id",
")",
"action",
"=",
"{",
"'_op_type'",
":",
"'delete'",
",",
"\"_index\"",
":",
"self",
".",
"index_name",
",",
"\"_type\"",
":",
"doc_type",
",",
"\"_id\"",
":",
"doc_id",
"}",
"actions",
".",
"append",
"(",
"action",
")",
"bulk",
"(",
"self",
".",
"_es",
",",
"actions",
",",
"*",
"*",
"kwargs",
")",
"except",
"BulkIndexError",
"as",
"ex",
":",
"valid_errors",
"=",
"[",
"error",
"for",
"error",
"in",
"ex",
".",
"errors",
"if",
"error",
"[",
"'delete'",
"]",
"[",
"'status'",
"]",
"!=",
"404",
"]",
"if",
"valid_errors",
":",
"log",
".",
"exception",
"(",
"\"An error occurred while removing documents from the index.\"",
")",
"raise"
] | 42.913043 | 20.130435 |
def get_checkpoint_files(self):
"""
Return a list of checkpoint files for this DAG node and its job.
"""
checkpoint_files = list(self.__checkpoint_files)
if isinstance(self.job(), CondorDAGJob):
checkpoint_files = checkpoint_files + self.job().get_checkpoint_files()
return checkpoint_files | [
"def",
"get_checkpoint_files",
"(",
"self",
")",
":",
"checkpoint_files",
"=",
"list",
"(",
"self",
".",
"__checkpoint_files",
")",
"if",
"isinstance",
"(",
"self",
".",
"job",
"(",
")",
",",
"CondorDAGJob",
")",
":",
"checkpoint_files",
"=",
"checkpoint_files",
"+",
"self",
".",
"job",
"(",
")",
".",
"get_checkpoint_files",
"(",
")",
"return",
"checkpoint_files"
] | 39.375 | 13.125 |
def curve_fit(self):
"""
Fits `scipy_data_fitting.Fit.function` to the data and returns
the output from the specified curve fit function.
See `scipy_data_fitting.Fit.options` for details on how to control
or override the the curve fitting algorithm.
"""
if not hasattr(self,'_curve_fit'):
options = self.options.copy()
fit_function = options.pop('fit_function')
independent_values = self.data.array[0]
dependent_values = self.data.array[1]
if fit_function == 'lmfit':
self._curve_fit = lmfit.minimize(
self.lmfit_fcn2min, self.lmfit_parameters,
args=(independent_values, dependent_values, self.data.error), **options)
else:
p0 = [ prefix_factor(param) * param['guess'] for param in self.fitting_parameters ]
self._curve_fit = fit_function(
self.function, independent_values, dependent_values, p0, **options)
return self._curve_fit | [
"def",
"curve_fit",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_curve_fit'",
")",
":",
"options",
"=",
"self",
".",
"options",
".",
"copy",
"(",
")",
"fit_function",
"=",
"options",
".",
"pop",
"(",
"'fit_function'",
")",
"independent_values",
"=",
"self",
".",
"data",
".",
"array",
"[",
"0",
"]",
"dependent_values",
"=",
"self",
".",
"data",
".",
"array",
"[",
"1",
"]",
"if",
"fit_function",
"==",
"'lmfit'",
":",
"self",
".",
"_curve_fit",
"=",
"lmfit",
".",
"minimize",
"(",
"self",
".",
"lmfit_fcn2min",
",",
"self",
".",
"lmfit_parameters",
",",
"args",
"=",
"(",
"independent_values",
",",
"dependent_values",
",",
"self",
".",
"data",
".",
"error",
")",
",",
"*",
"*",
"options",
")",
"else",
":",
"p0",
"=",
"[",
"prefix_factor",
"(",
"param",
")",
"*",
"param",
"[",
"'guess'",
"]",
"for",
"param",
"in",
"self",
".",
"fitting_parameters",
"]",
"self",
".",
"_curve_fit",
"=",
"fit_function",
"(",
"self",
".",
"function",
",",
"independent_values",
",",
"dependent_values",
",",
"p0",
",",
"*",
"*",
"options",
")",
"return",
"self",
".",
"_curve_fit"
] | 43.916667 | 20.833333 |
def read(self, section, option):
"""Read from file.
Parameters
----------
section : string
Section.
option : string
Option.
Returns
-------
string
Value.
"""
self.config.read(self.filepath)
raw = self.config.get(section, option)
out = tidy_headers._parse_item.string2item(raw, sep=", ")
return out | [
"def",
"read",
"(",
"self",
",",
"section",
",",
"option",
")",
":",
"self",
".",
"config",
".",
"read",
"(",
"self",
".",
"filepath",
")",
"raw",
"=",
"self",
".",
"config",
".",
"get",
"(",
"section",
",",
"option",
")",
"out",
"=",
"tidy_headers",
".",
"_parse_item",
".",
"string2item",
"(",
"raw",
",",
"sep",
"=",
"\", \"",
")",
"return",
"out"
] | 22.157895 | 18.842105 |
def get_sub_balance(self, sub_id, _async=False):
"""
查询子账户各币种账户余额
:param sub_uid: 子账户id
:param _async:
:return:
"""
params = {}
params['sub-uid'] = sub_id
path = '/v1/account/accounts/{sub-uid}'
return api_key_get(params, path, _async=_async) | [
"def",
"get_sub_balance",
"(",
"self",
",",
"sub_id",
",",
"_async",
"=",
"False",
")",
":",
"params",
"=",
"{",
"}",
"params",
"[",
"'sub-uid'",
"]",
"=",
"sub_id",
"path",
"=",
"'/v1/account/accounts/{sub-uid}'",
"return",
"api_key_get",
"(",
"params",
",",
"path",
",",
"_async",
"=",
"_async",
")"
] | 26 | 14.166667 |
def amounts(masses):
"""
Calculate the amounts from the specified compound masses.
:param masses: [kg] dictionary, e.g. {'SiO2': 3.0, 'FeO': 1.5}
:returns: [kmol] dictionary
"""
return {compound: amount(compound, masses[compound])
for compound in masses.keys()} | [
"def",
"amounts",
"(",
"masses",
")",
":",
"return",
"{",
"compound",
":",
"amount",
"(",
"compound",
",",
"masses",
"[",
"compound",
"]",
")",
"for",
"compound",
"in",
"masses",
".",
"keys",
"(",
")",
"}"
] | 26.363636 | 19.454545 |
def _get_encoder_method(stream_type):
"""A function to get the python type to device cloud type converter function.
:param stream_type: The streams data type
:return: A function that when called with the python object will return the serializable
type for sending to the cloud. If there is no function for the given type, or the `stream_type`
is `None` the returned function will simply return the object unchanged.
"""
if stream_type is not None:
return DSTREAM_TYPE_MAP.get(stream_type.upper(), (lambda x: x, lambda x: x))[1]
else:
return lambda x: x | [
"def",
"_get_encoder_method",
"(",
"stream_type",
")",
":",
"if",
"stream_type",
"is",
"not",
"None",
":",
"return",
"DSTREAM_TYPE_MAP",
".",
"get",
"(",
"stream_type",
".",
"upper",
"(",
")",
",",
"(",
"lambda",
"x",
":",
"x",
",",
"lambda",
"x",
":",
"x",
")",
")",
"[",
"1",
"]",
"else",
":",
"return",
"lambda",
"x",
":",
"x"
] | 49.166667 | 24.666667 |
def cumulative_returns(returns, period, freq=None):
"""
Builds cumulative returns from 'period' returns. This function simulates
the cumulative effect that a series of gains or losses (the 'returns')
have on an original amount of capital over a period of time.
if F is the frequency at which returns are computed (e.g. 1 day if
'returns' contains daily values) and N is the period for which the retuns
are computed (e.g. returns after 1 day, 5 hours or 3 days) then:
- if N <= F the cumulative retuns are trivially computed as Compound Return
- if N > F (e.g. F 1 day, and N is 3 days) then the returns overlap and the
cumulative returns are computed building and averaging N interleaved sub
portfolios (started at subsequent periods 1,2,..,N) each one rebalancing
every N periods. This correspond to an algorithm which trades the factor
every single time it is computed, which is statistically more robust and
with a lower volatity compared to an algorithm that trades the factor
every N periods and whose returns depend on the specific starting day of
trading.
Also note that when the factor is not computed at a specific frequency, for
exaple a factor representing a random event, it is not efficient to create
multiples sub-portfolios as it is not certain when the factor will be
traded and this would result in an underleveraged portfolio. In this case
the simulated portfolio is fully invested whenever an event happens and if
a subsequent event occur while the portfolio is still invested in a
previous event then the portfolio is rebalanced and split equally among the
active events.
Parameters
----------
returns: pd.Series
pd.Series containing factor 'period' forward returns, the index
contains timestamps at which the trades are computed and the values
correspond to returns after 'period' time
period: pandas.Timedelta or string
Length of period for which the returns are computed (1 day, 2 mins,
3 hours etc). It can be a Timedelta or a string in the format accepted
by Timedelta constructor ('1 days', '1D', '30m', '3h', '1D1h', etc)
freq : pandas DateOffset, optional
Used to specify a particular trading calendar. If not present
returns.index.freq will be used
Returns
-------
Cumulative returns series : pd.Series
Example:
2015-07-16 09:30:00 -0.012143
2015-07-16 12:30:00 0.012546
2015-07-17 09:30:00 0.045350
2015-07-17 12:30:00 0.065897
2015-07-20 09:30:00 0.030957
"""
if not isinstance(period, pd.Timedelta):
period = pd.Timedelta(period)
if freq is None:
freq = returns.index.freq
if freq is None:
freq = BDay()
warnings.warn("'freq' not set, using business day calendar",
UserWarning)
#
# returns index contains factor computation timestamps, then add returns
# timestamps too (factor timestamps + period) and save them to 'full_idx'
# Cumulative returns will use 'full_idx' index,because we want a cumulative
# returns value for each entry in 'full_idx'
#
trades_idx = returns.index.copy()
returns_idx = utils.add_custom_calendar_timedelta(trades_idx, period, freq)
full_idx = trades_idx.union(returns_idx)
#
# Build N sub_returns from the single returns Series. Each sub_retuns
# stream will contain non-overlapping returns.
# In the next step we'll compute the portfolio returns averaging the
# returns happening on those overlapping returns streams
#
sub_returns = []
while len(trades_idx) > 0:
#
# select non-overlapping returns starting with first timestamp in index
#
sub_index = []
next = trades_idx.min()
while next <= trades_idx.max():
sub_index.append(next)
next = utils.add_custom_calendar_timedelta(next, period, freq)
# make sure to fetch the next available entry after 'period'
try:
i = trades_idx.get_loc(next, method='bfill')
next = trades_idx[i]
except KeyError:
break
sub_index = pd.DatetimeIndex(sub_index, tz=full_idx.tz)
subret = returns[sub_index]
# make the index to have all entries in 'full_idx'
subret = subret.reindex(full_idx)
#
# compute intermediate returns values for each index in subret that are
# in between the timestaps at which the factors are computed and the
# timestamps at which the 'period' returns actually happen
#
for pret_idx in reversed(sub_index):
pret = subret[pret_idx]
# get all timestamps between factor computation and period returns
pret_end_idx = \
utils.add_custom_calendar_timedelta(pret_idx, period, freq)
slice = subret[(subret.index > pret_idx) & (
subret.index <= pret_end_idx)].index
if pd.isnull(pret):
continue
def rate_of_returns(ret, period):
return ((np.nansum(ret) + 1)**(1. / period)) - 1
# compute intermediate 'period' returns values, note that this also
# moves the final 'period' returns value from trading timestamp to
# trading timestamp + 'period'
for slice_idx in slice:
sub_period = utils.diff_custom_calendar_timedeltas(
pret_idx, slice_idx, freq)
subret[slice_idx] = rate_of_returns(pret, period / sub_period)
subret[pret_idx] = np.nan
# transform returns as percentage change from previous value
subret[slice[1:]] = (subret[slice] + 1).pct_change()[slice[1:]]
sub_returns.append(subret)
trades_idx = trades_idx.difference(sub_index)
#
# Compute portfolio cumulative returns averaging the returns happening on
# overlapping returns streams.
#
sub_portfolios = pd.concat(sub_returns, axis=1)
portfolio = pd.Series(index=sub_portfolios.index)
for i, (index, row) in enumerate(sub_portfolios.iterrows()):
# check the active portfolios, count() returns non-nans elements
active_subfolios = row.count()
# fill forward portfolio value
portfolio.iloc[i] = portfolio.iloc[i - 1] if i > 0 else 1.
if active_subfolios <= 0:
continue
# current portfolio is the average of active sub_portfolios
portfolio.iloc[i] *= (row + 1).mean(skipna=True)
return portfolio | [
"def",
"cumulative_returns",
"(",
"returns",
",",
"period",
",",
"freq",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"period",
",",
"pd",
".",
"Timedelta",
")",
":",
"period",
"=",
"pd",
".",
"Timedelta",
"(",
"period",
")",
"if",
"freq",
"is",
"None",
":",
"freq",
"=",
"returns",
".",
"index",
".",
"freq",
"if",
"freq",
"is",
"None",
":",
"freq",
"=",
"BDay",
"(",
")",
"warnings",
".",
"warn",
"(",
"\"'freq' not set, using business day calendar\"",
",",
"UserWarning",
")",
"#",
"# returns index contains factor computation timestamps, then add returns",
"# timestamps too (factor timestamps + period) and save them to 'full_idx'",
"# Cumulative returns will use 'full_idx' index,because we want a cumulative",
"# returns value for each entry in 'full_idx'",
"#",
"trades_idx",
"=",
"returns",
".",
"index",
".",
"copy",
"(",
")",
"returns_idx",
"=",
"utils",
".",
"add_custom_calendar_timedelta",
"(",
"trades_idx",
",",
"period",
",",
"freq",
")",
"full_idx",
"=",
"trades_idx",
".",
"union",
"(",
"returns_idx",
")",
"#",
"# Build N sub_returns from the single returns Series. Each sub_retuns",
"# stream will contain non-overlapping returns.",
"# In the next step we'll compute the portfolio returns averaging the",
"# returns happening on those overlapping returns streams",
"#",
"sub_returns",
"=",
"[",
"]",
"while",
"len",
"(",
"trades_idx",
")",
">",
"0",
":",
"#",
"# select non-overlapping returns starting with first timestamp in index",
"#",
"sub_index",
"=",
"[",
"]",
"next",
"=",
"trades_idx",
".",
"min",
"(",
")",
"while",
"next",
"<=",
"trades_idx",
".",
"max",
"(",
")",
":",
"sub_index",
".",
"append",
"(",
"next",
")",
"next",
"=",
"utils",
".",
"add_custom_calendar_timedelta",
"(",
"next",
",",
"period",
",",
"freq",
")",
"# make sure to fetch the next available entry after 'period'",
"try",
":",
"i",
"=",
"trades_idx",
".",
"get_loc",
"(",
"next",
",",
"method",
"=",
"'bfill'",
")",
"next",
"=",
"trades_idx",
"[",
"i",
"]",
"except",
"KeyError",
":",
"break",
"sub_index",
"=",
"pd",
".",
"DatetimeIndex",
"(",
"sub_index",
",",
"tz",
"=",
"full_idx",
".",
"tz",
")",
"subret",
"=",
"returns",
"[",
"sub_index",
"]",
"# make the index to have all entries in 'full_idx'",
"subret",
"=",
"subret",
".",
"reindex",
"(",
"full_idx",
")",
"#",
"# compute intermediate returns values for each index in subret that are",
"# in between the timestaps at which the factors are computed and the",
"# timestamps at which the 'period' returns actually happen",
"#",
"for",
"pret_idx",
"in",
"reversed",
"(",
"sub_index",
")",
":",
"pret",
"=",
"subret",
"[",
"pret_idx",
"]",
"# get all timestamps between factor computation and period returns",
"pret_end_idx",
"=",
"utils",
".",
"add_custom_calendar_timedelta",
"(",
"pret_idx",
",",
"period",
",",
"freq",
")",
"slice",
"=",
"subret",
"[",
"(",
"subret",
".",
"index",
">",
"pret_idx",
")",
"&",
"(",
"subret",
".",
"index",
"<=",
"pret_end_idx",
")",
"]",
".",
"index",
"if",
"pd",
".",
"isnull",
"(",
"pret",
")",
":",
"continue",
"def",
"rate_of_returns",
"(",
"ret",
",",
"period",
")",
":",
"return",
"(",
"(",
"np",
".",
"nansum",
"(",
"ret",
")",
"+",
"1",
")",
"**",
"(",
"1.",
"/",
"period",
")",
")",
"-",
"1",
"# compute intermediate 'period' returns values, note that this also",
"# moves the final 'period' returns value from trading timestamp to",
"# trading timestamp + 'period'",
"for",
"slice_idx",
"in",
"slice",
":",
"sub_period",
"=",
"utils",
".",
"diff_custom_calendar_timedeltas",
"(",
"pret_idx",
",",
"slice_idx",
",",
"freq",
")",
"subret",
"[",
"slice_idx",
"]",
"=",
"rate_of_returns",
"(",
"pret",
",",
"period",
"/",
"sub_period",
")",
"subret",
"[",
"pret_idx",
"]",
"=",
"np",
".",
"nan",
"# transform returns as percentage change from previous value",
"subret",
"[",
"slice",
"[",
"1",
":",
"]",
"]",
"=",
"(",
"subret",
"[",
"slice",
"]",
"+",
"1",
")",
".",
"pct_change",
"(",
")",
"[",
"slice",
"[",
"1",
":",
"]",
"]",
"sub_returns",
".",
"append",
"(",
"subret",
")",
"trades_idx",
"=",
"trades_idx",
".",
"difference",
"(",
"sub_index",
")",
"#",
"# Compute portfolio cumulative returns averaging the returns happening on",
"# overlapping returns streams.",
"#",
"sub_portfolios",
"=",
"pd",
".",
"concat",
"(",
"sub_returns",
",",
"axis",
"=",
"1",
")",
"portfolio",
"=",
"pd",
".",
"Series",
"(",
"index",
"=",
"sub_portfolios",
".",
"index",
")",
"for",
"i",
",",
"(",
"index",
",",
"row",
")",
"in",
"enumerate",
"(",
"sub_portfolios",
".",
"iterrows",
"(",
")",
")",
":",
"# check the active portfolios, count() returns non-nans elements",
"active_subfolios",
"=",
"row",
".",
"count",
"(",
")",
"# fill forward portfolio value",
"portfolio",
".",
"iloc",
"[",
"i",
"]",
"=",
"portfolio",
".",
"iloc",
"[",
"i",
"-",
"1",
"]",
"if",
"i",
">",
"0",
"else",
"1.",
"if",
"active_subfolios",
"<=",
"0",
":",
"continue",
"# current portfolio is the average of active sub_portfolios",
"portfolio",
".",
"iloc",
"[",
"i",
"]",
"*=",
"(",
"row",
"+",
"1",
")",
".",
"mean",
"(",
"skipna",
"=",
"True",
")",
"return",
"portfolio"
] | 40.276074 | 24.95092 |
def FileEntryExistsByPathSpec(self, path_spec):
"""Determines if a file entry for a path specification exists.
Args:
path_spec (PathSpec): path specification.
Returns:
bool: True if the file entry exists.
"""
# Opening a file by inode number is faster than opening a file by location.
tsk_file = None
inode = getattr(path_spec, 'inode', None)
location = getattr(path_spec, 'location', None)
try:
if inode is not None:
tsk_file = self._tsk_file_system.open_meta(inode=inode)
elif location is not None:
tsk_file = self._tsk_file_system.open(location)
except IOError:
pass
return tsk_file is not None | [
"def",
"FileEntryExistsByPathSpec",
"(",
"self",
",",
"path_spec",
")",
":",
"# Opening a file by inode number is faster than opening a file by location.",
"tsk_file",
"=",
"None",
"inode",
"=",
"getattr",
"(",
"path_spec",
",",
"'inode'",
",",
"None",
")",
"location",
"=",
"getattr",
"(",
"path_spec",
",",
"'location'",
",",
"None",
")",
"try",
":",
"if",
"inode",
"is",
"not",
"None",
":",
"tsk_file",
"=",
"self",
".",
"_tsk_file_system",
".",
"open_meta",
"(",
"inode",
"=",
"inode",
")",
"elif",
"location",
"is",
"not",
"None",
":",
"tsk_file",
"=",
"self",
".",
"_tsk_file_system",
".",
"open",
"(",
"location",
")",
"except",
"IOError",
":",
"pass",
"return",
"tsk_file",
"is",
"not",
"None"
] | 27.875 | 20.916667 |
def _close_rpc_interface(self, connection_id, callback):
"""Disable RPC interface for this IOTile device
Args:
connection_id (int): The unique identifier for the connection
callback (callback): Callback to be called when this command finishes
callback(conn_id, adapter_id, success, failure_reason)
"""
try:
context = self.connections.get_context(connection_id)
except ArgumentError:
callback(connection_id, self.id, False, "Could not find connection information")
return
self.connections.begin_operation(connection_id, 'close_interface', callback, self.get_config('default_timeout'))
try:
service = context['services'][TileBusService]
header_characteristic = service[ReceiveHeaderChar]
payload_characteristic = service[ReceivePayloadChar]
except KeyError:
self.connections.finish_operation(connection_id, False, "Can't find characteristics to open rpc interface")
return
self.bable.set_notification(
enabled=False,
connection_handle=context['connection_handle'],
characteristic=header_characteristic,
on_notification_set=[self._on_interface_closed, context, payload_characteristic],
timeout=1.0
) | [
"def",
"_close_rpc_interface",
"(",
"self",
",",
"connection_id",
",",
"callback",
")",
":",
"try",
":",
"context",
"=",
"self",
".",
"connections",
".",
"get_context",
"(",
"connection_id",
")",
"except",
"ArgumentError",
":",
"callback",
"(",
"connection_id",
",",
"self",
".",
"id",
",",
"False",
",",
"\"Could not find connection information\"",
")",
"return",
"self",
".",
"connections",
".",
"begin_operation",
"(",
"connection_id",
",",
"'close_interface'",
",",
"callback",
",",
"self",
".",
"get_config",
"(",
"'default_timeout'",
")",
")",
"try",
":",
"service",
"=",
"context",
"[",
"'services'",
"]",
"[",
"TileBusService",
"]",
"header_characteristic",
"=",
"service",
"[",
"ReceiveHeaderChar",
"]",
"payload_characteristic",
"=",
"service",
"[",
"ReceivePayloadChar",
"]",
"except",
"KeyError",
":",
"self",
".",
"connections",
".",
"finish_operation",
"(",
"connection_id",
",",
"False",
",",
"\"Can't find characteristics to open rpc interface\"",
")",
"return",
"self",
".",
"bable",
".",
"set_notification",
"(",
"enabled",
"=",
"False",
",",
"connection_handle",
"=",
"context",
"[",
"'connection_handle'",
"]",
",",
"characteristic",
"=",
"header_characteristic",
",",
"on_notification_set",
"=",
"[",
"self",
".",
"_on_interface_closed",
",",
"context",
",",
"payload_characteristic",
"]",
",",
"timeout",
"=",
"1.0",
")"
] | 42.0625 | 28.75 |
def error(self, i: int=None) -> str:
"""
Returns an error message
"""
head = "[" + colors.red("error") + "]"
if i is not None:
head = str(i) + " " + head
return head | [
"def",
"error",
"(",
"self",
",",
"i",
":",
"int",
"=",
"None",
")",
"->",
"str",
":",
"head",
"=",
"\"[\"",
"+",
"colors",
".",
"red",
"(",
"\"error\"",
")",
"+",
"\"]\"",
"if",
"i",
"is",
"not",
"None",
":",
"head",
"=",
"str",
"(",
"i",
")",
"+",
"\" \"",
"+",
"head",
"return",
"head"
] | 27.25 | 7 |
def _get_dest_file_and_url(self, filepath, page_meta={}):
""" Return tuple of the file destination and url """
filename = filepath.split("/")[-1]
filepath_base = filepath.replace(filename, "").rstrip("/")
slug = page_meta.get("slug")
fname = slugify(slug) if slug else filename \
.replace(".html", "") \
.replace(".md", "") \
.replace(".jade", "")
if page_meta.get("pretty_url") is False:
dest_file = os.path.join(filepath_base, "%s.html" % fname)
else:
dest_dir = filepath_base
if filename not in ["index.html", "index.md", "index.jade"]:
dest_dir = os.path.join(filepath_base, fname)
dest_file = os.path.join(dest_dir, "index.html")
url = "/" + dest_file.replace("index.html", "")
return dest_file, url | [
"def",
"_get_dest_file_and_url",
"(",
"self",
",",
"filepath",
",",
"page_meta",
"=",
"{",
"}",
")",
":",
"filename",
"=",
"filepath",
".",
"split",
"(",
"\"/\"",
")",
"[",
"-",
"1",
"]",
"filepath_base",
"=",
"filepath",
".",
"replace",
"(",
"filename",
",",
"\"\"",
")",
".",
"rstrip",
"(",
"\"/\"",
")",
"slug",
"=",
"page_meta",
".",
"get",
"(",
"\"slug\"",
")",
"fname",
"=",
"slugify",
"(",
"slug",
")",
"if",
"slug",
"else",
"filename",
".",
"replace",
"(",
"\".html\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\".md\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\".jade\"",
",",
"\"\"",
")",
"if",
"page_meta",
".",
"get",
"(",
"\"pretty_url\"",
")",
"is",
"False",
":",
"dest_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"filepath_base",
",",
"\"%s.html\"",
"%",
"fname",
")",
"else",
":",
"dest_dir",
"=",
"filepath_base",
"if",
"filename",
"not",
"in",
"[",
"\"index.html\"",
",",
"\"index.md\"",
",",
"\"index.jade\"",
"]",
":",
"dest_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"filepath_base",
",",
"fname",
")",
"dest_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dest_dir",
",",
"\"index.html\"",
")",
"url",
"=",
"\"/\"",
"+",
"dest_file",
".",
"replace",
"(",
"\"index.html\"",
",",
"\"\"",
")",
"return",
"dest_file",
",",
"url"
] | 40.904762 | 17.571429 |
def _varFindLimitSpace(basedir, vars, space, part, lookup_fatal, depth):
''' limits the search space of space to part
basically does space.get(part, None), but with
templating for part and a few more things
'''
# Previous part couldn't be found, nothing to limit to
if space is None:
return space
# A part with escaped .s in it is compounded by { and }, remove them
if part[0] == '{' and part[-1] == '}':
part = part[1:-1]
# Template part to resolve variables within (${var$var2})
part = varReplace(basedir, part, vars, lookup_fatal, depth=depth + 1)
# Now find it
if part in space:
space = space[part]
elif "[" in part:
m = _LISTRE.search(part)
if not m:
return None
else:
try:
space = space[m.group(1)][int(m.group(2))]
except (KeyError, IndexError):
return None
else:
return None
# if space is a string, check if it's a reference to another variable
if isinstance(space, basestring):
space = template_ds(basedir, space, vars, lookup_fatal, depth)
return space | [
"def",
"_varFindLimitSpace",
"(",
"basedir",
",",
"vars",
",",
"space",
",",
"part",
",",
"lookup_fatal",
",",
"depth",
")",
":",
"# Previous part couldn't be found, nothing to limit to",
"if",
"space",
"is",
"None",
":",
"return",
"space",
"# A part with escaped .s in it is compounded by { and }, remove them",
"if",
"part",
"[",
"0",
"]",
"==",
"'{'",
"and",
"part",
"[",
"-",
"1",
"]",
"==",
"'}'",
":",
"part",
"=",
"part",
"[",
"1",
":",
"-",
"1",
"]",
"# Template part to resolve variables within (${var$var2})",
"part",
"=",
"varReplace",
"(",
"basedir",
",",
"part",
",",
"vars",
",",
"lookup_fatal",
",",
"depth",
"=",
"depth",
"+",
"1",
")",
"# Now find it",
"if",
"part",
"in",
"space",
":",
"space",
"=",
"space",
"[",
"part",
"]",
"elif",
"\"[\"",
"in",
"part",
":",
"m",
"=",
"_LISTRE",
".",
"search",
"(",
"part",
")",
"if",
"not",
"m",
":",
"return",
"None",
"else",
":",
"try",
":",
"space",
"=",
"space",
"[",
"m",
".",
"group",
"(",
"1",
")",
"]",
"[",
"int",
"(",
"m",
".",
"group",
"(",
"2",
")",
")",
"]",
"except",
"(",
"KeyError",
",",
"IndexError",
")",
":",
"return",
"None",
"else",
":",
"return",
"None",
"# if space is a string, check if it's a reference to another variable",
"if",
"isinstance",
"(",
"space",
",",
"basestring",
")",
":",
"space",
"=",
"template_ds",
"(",
"basedir",
",",
"space",
",",
"vars",
",",
"lookup_fatal",
",",
"depth",
")",
"return",
"space"
] | 31.555556 | 22 |
def _incr_decr(self, command, key, value, default, time):
"""
Function which increments and decrements.
:param key: Key's name
:type key: six.string_types
:param value: Number to be (de|in)cremented
:type value: int
:param default: Default value if key does not exist.
:type default: int
:param time: Time in seconds to expire key.
:type time: int
:return: Actual value of the key on server
:rtype: int
"""
time = time if time >= 0 else self.MAXIMUM_EXPIRE_TIME
self._send(struct.pack(self.HEADER_STRUCT +
self.COMMANDS[command]['struct'] % len(key),
self.MAGIC['request'],
self.COMMANDS[command]['command'],
len(key),
20, 0, 0, len(key) + 20, 0, 0, value,
default, time, str_to_bytes(key)))
(magic, opcode, keylen, extlen, datatype, status, bodylen, opaque,
cas, extra_content) = self._get_response()
if status not in (self.STATUS['success'], self.STATUS['server_disconnected']):
raise MemcachedException('Code: %d Message: %s' % (status, extra_content), status)
if status == self.STATUS['server_disconnected']:
return 0
return struct.unpack('!Q', extra_content)[0] | [
"def",
"_incr_decr",
"(",
"self",
",",
"command",
",",
"key",
",",
"value",
",",
"default",
",",
"time",
")",
":",
"time",
"=",
"time",
"if",
"time",
">=",
"0",
"else",
"self",
".",
"MAXIMUM_EXPIRE_TIME",
"self",
".",
"_send",
"(",
"struct",
".",
"pack",
"(",
"self",
".",
"HEADER_STRUCT",
"+",
"self",
".",
"COMMANDS",
"[",
"command",
"]",
"[",
"'struct'",
"]",
"%",
"len",
"(",
"key",
")",
",",
"self",
".",
"MAGIC",
"[",
"'request'",
"]",
",",
"self",
".",
"COMMANDS",
"[",
"command",
"]",
"[",
"'command'",
"]",
",",
"len",
"(",
"key",
")",
",",
"20",
",",
"0",
",",
"0",
",",
"len",
"(",
"key",
")",
"+",
"20",
",",
"0",
",",
"0",
",",
"value",
",",
"default",
",",
"time",
",",
"str_to_bytes",
"(",
"key",
")",
")",
")",
"(",
"magic",
",",
"opcode",
",",
"keylen",
",",
"extlen",
",",
"datatype",
",",
"status",
",",
"bodylen",
",",
"opaque",
",",
"cas",
",",
"extra_content",
")",
"=",
"self",
".",
"_get_response",
"(",
")",
"if",
"status",
"not",
"in",
"(",
"self",
".",
"STATUS",
"[",
"'success'",
"]",
",",
"self",
".",
"STATUS",
"[",
"'server_disconnected'",
"]",
")",
":",
"raise",
"MemcachedException",
"(",
"'Code: %d Message: %s'",
"%",
"(",
"status",
",",
"extra_content",
")",
",",
"status",
")",
"if",
"status",
"==",
"self",
".",
"STATUS",
"[",
"'server_disconnected'",
"]",
":",
"return",
"0",
"return",
"struct",
".",
"unpack",
"(",
"'!Q'",
",",
"extra_content",
")",
"[",
"0",
"]"
] | 42.69697 | 20.393939 |
def request_load_source(self, py_db, seq, filename):
'''
:param str filename:
Note: must be already translated for the server.
'''
try:
assert filename.__class__ == str # i.e.: bytes on py2 and str on py3
with open(filename, 'r') as stream:
source = stream.read()
cmd = py_db.cmd_factory.make_load_source_message(seq, source)
except:
cmd = py_db.cmd_factory.make_error_message(seq, get_exception_traceback_str())
py_db.writer.add_command(cmd) | [
"def",
"request_load_source",
"(",
"self",
",",
"py_db",
",",
"seq",
",",
"filename",
")",
":",
"try",
":",
"assert",
"filename",
".",
"__class__",
"==",
"str",
"# i.e.: bytes on py2 and str on py3",
"with",
"open",
"(",
"filename",
",",
"'r'",
")",
"as",
"stream",
":",
"source",
"=",
"stream",
".",
"read",
"(",
")",
"cmd",
"=",
"py_db",
".",
"cmd_factory",
".",
"make_load_source_message",
"(",
"seq",
",",
"source",
")",
"except",
":",
"cmd",
"=",
"py_db",
".",
"cmd_factory",
".",
"make_error_message",
"(",
"seq",
",",
"get_exception_traceback_str",
"(",
")",
")",
"py_db",
".",
"writer",
".",
"add_command",
"(",
"cmd",
")"
] | 37 | 24.733333 |
def status(cls):
"""Retrieve global status from status.gandi.net."""
return cls.json_get('%s/status' % cls.api_url, empty_key=True,
send_key=False) | [
"def",
"status",
"(",
"cls",
")",
":",
"return",
"cls",
".",
"json_get",
"(",
"'%s/status'",
"%",
"cls",
".",
"api_url",
",",
"empty_key",
"=",
"True",
",",
"send_key",
"=",
"False",
")"
] | 47 | 14.25 |
def get_all(self):
"""Returns the cache stats as a list of dicts."""
ret = []
for cache_name, stat in self.stats_per_cache.items():
ret.append({
'cache_name': cache_name,
'num_hits': len(stat.hit_targets),
'num_misses': len(stat.miss_targets),
'hits': stat.hit_targets,
'misses': stat.miss_targets
})
return ret | [
"def",
"get_all",
"(",
"self",
")",
":",
"ret",
"=",
"[",
"]",
"for",
"cache_name",
",",
"stat",
"in",
"self",
".",
"stats_per_cache",
".",
"items",
"(",
")",
":",
"ret",
".",
"append",
"(",
"{",
"'cache_name'",
":",
"cache_name",
",",
"'num_hits'",
":",
"len",
"(",
"stat",
".",
"hit_targets",
")",
",",
"'num_misses'",
":",
"len",
"(",
"stat",
".",
"miss_targets",
")",
",",
"'hits'",
":",
"stat",
".",
"hit_targets",
",",
"'misses'",
":",
"stat",
".",
"miss_targets",
"}",
")",
"return",
"ret"
] | 30.666667 | 14.416667 |
def keys(self, index=None):
"""Returns a list of keys in the database
"""
if index is not None and index not in self._indexes:
raise ValueError('Index {} does not exist'.format(index))
db = self._indexes[index][0] if index else self._main_db
with self._lmdb.begin(db=db) as txn:
return [
key.decode()
for key in txn.cursor().iternext(keys=True, values=False)
] | [
"def",
"keys",
"(",
"self",
",",
"index",
"=",
"None",
")",
":",
"if",
"index",
"is",
"not",
"None",
"and",
"index",
"not",
"in",
"self",
".",
"_indexes",
":",
"raise",
"ValueError",
"(",
"'Index {} does not exist'",
".",
"format",
"(",
"index",
")",
")",
"db",
"=",
"self",
".",
"_indexes",
"[",
"index",
"]",
"[",
"0",
"]",
"if",
"index",
"else",
"self",
".",
"_main_db",
"with",
"self",
".",
"_lmdb",
".",
"begin",
"(",
"db",
"=",
"db",
")",
"as",
"txn",
":",
"return",
"[",
"key",
".",
"decode",
"(",
")",
"for",
"key",
"in",
"txn",
".",
"cursor",
"(",
")",
".",
"iternext",
"(",
"keys",
"=",
"True",
",",
"values",
"=",
"False",
")",
"]"
] | 38.166667 | 18.5 |
def CrearLiquidacion(self, tipo_cbte, pto_vta, nro_cbte, fecha,
cod_deposito_acopio, tipo_compra,
variedad_tabaco, cod_provincia_origen_tabaco,
puerta=None, nro_tarjeta=None, horas=None, control=None,
nro_interno=None, iibb_emisor=None, fecha_inicio_actividad=None,
**kwargs):
"Inicializa internamente los datos de una liquidación para autorizar"
# creo el diccionario con los campos generales de la liquidación:
liq = dict(tipoComprobante=tipo_cbte,
nroComprobante=nro_cbte,
puntoVenta=pto_vta,
iibbEmisor=iibb_emisor,
codDepositoAcopio=cod_deposito_acopio,
fechaLiquidacion=fecha,
tipoCompra=tipo_compra,
condicionVenta=[],
variedadTabaco=variedad_tabaco,
codProvinciaOrigenTabaco=cod_provincia_origen_tabaco,
puerta=puerta,
nroTarjeta=nro_tarjeta,
horas=horas,
control=control,
nroInterno=nro_interno,
fechaInicioActividad=fecha_inicio_actividad,
)
self.solicitud = dict(liquidacion=liq,
receptor={},
romaneo=[],
precioClase=[],
retencion=[],
tributo=[],
)
return True | [
"def",
"CrearLiquidacion",
"(",
"self",
",",
"tipo_cbte",
",",
"pto_vta",
",",
"nro_cbte",
",",
"fecha",
",",
"cod_deposito_acopio",
",",
"tipo_compra",
",",
"variedad_tabaco",
",",
"cod_provincia_origen_tabaco",
",",
"puerta",
"=",
"None",
",",
"nro_tarjeta",
"=",
"None",
",",
"horas",
"=",
"None",
",",
"control",
"=",
"None",
",",
"nro_interno",
"=",
"None",
",",
"iibb_emisor",
"=",
"None",
",",
"fecha_inicio_actividad",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# creo el diccionario con los campos generales de la liquidación:",
"liq",
"=",
"dict",
"(",
"tipoComprobante",
"=",
"tipo_cbte",
",",
"nroComprobante",
"=",
"nro_cbte",
",",
"puntoVenta",
"=",
"pto_vta",
",",
"iibbEmisor",
"=",
"iibb_emisor",
",",
"codDepositoAcopio",
"=",
"cod_deposito_acopio",
",",
"fechaLiquidacion",
"=",
"fecha",
",",
"tipoCompra",
"=",
"tipo_compra",
",",
"condicionVenta",
"=",
"[",
"]",
",",
"variedadTabaco",
"=",
"variedad_tabaco",
",",
"codProvinciaOrigenTabaco",
"=",
"cod_provincia_origen_tabaco",
",",
"puerta",
"=",
"puerta",
",",
"nroTarjeta",
"=",
"nro_tarjeta",
",",
"horas",
"=",
"horas",
",",
"control",
"=",
"control",
",",
"nroInterno",
"=",
"nro_interno",
",",
"fechaInicioActividad",
"=",
"fecha_inicio_actividad",
",",
")",
"self",
".",
"solicitud",
"=",
"dict",
"(",
"liquidacion",
"=",
"liq",
",",
"receptor",
"=",
"{",
"}",
",",
"romaneo",
"=",
"[",
"]",
",",
"precioClase",
"=",
"[",
"]",
",",
"retencion",
"=",
"[",
"]",
",",
"tributo",
"=",
"[",
"]",
",",
")",
"return",
"True"
] | 46.545455 | 12 |
def conversations_read(self, id):
"""
Marks a single conversation as read.
Returns the updated `conversation dict`_.
WARNING: This method is currently not documented in the official API and
might therefore be unstable.
"""
id = self.__unpack_id(id)
url = '/api/v1/conversations/{0}/read'.format(str(id))
return self.__api_request('POST', url) | [
"def",
"conversations_read",
"(",
"self",
",",
"id",
")",
":",
"id",
"=",
"self",
".",
"__unpack_id",
"(",
"id",
")",
"url",
"=",
"'/api/v1/conversations/{0}/read'",
".",
"format",
"(",
"str",
"(",
"id",
")",
")",
"return",
"self",
".",
"__api_request",
"(",
"'POST'",
",",
"url",
")"
] | 35.083333 | 13.583333 |
def send_object(bucket, obj, expected_chksum=None,
logger_data=None, restricted=True, as_attachment=False):
"""Send an object for a given bucket.
:param bucket: The bucket (instance or id) to get the object from.
:param obj: A :class:`invenio_files_rest.models.ObjectVersion`
instance.
:params expected_chksum: Expected checksum.
:param logger_data: The python logger.
:param kwargs: Keyword arguments passed to ``Object.send_file()``
:returns: A Flask response.
"""
if not obj.is_head:
check_permission(
current_permission_factory(obj, 'object-read-version'),
hidden=False
)
if expected_chksum and obj.file.checksum != expected_chksum:
current_app.logger.warning(
'File checksum mismatch detected.', extra=logger_data)
file_downloaded.send(current_app._get_current_object(), obj=obj)
return obj.send_file(restricted=restricted,
as_attachment=as_attachment) | [
"def",
"send_object",
"(",
"bucket",
",",
"obj",
",",
"expected_chksum",
"=",
"None",
",",
"logger_data",
"=",
"None",
",",
"restricted",
"=",
"True",
",",
"as_attachment",
"=",
"False",
")",
":",
"if",
"not",
"obj",
".",
"is_head",
":",
"check_permission",
"(",
"current_permission_factory",
"(",
"obj",
",",
"'object-read-version'",
")",
",",
"hidden",
"=",
"False",
")",
"if",
"expected_chksum",
"and",
"obj",
".",
"file",
".",
"checksum",
"!=",
"expected_chksum",
":",
"current_app",
".",
"logger",
".",
"warning",
"(",
"'File checksum mismatch detected.'",
",",
"extra",
"=",
"logger_data",
")",
"file_downloaded",
".",
"send",
"(",
"current_app",
".",
"_get_current_object",
"(",
")",
",",
"obj",
"=",
"obj",
")",
"return",
"obj",
".",
"send_file",
"(",
"restricted",
"=",
"restricted",
",",
"as_attachment",
"=",
"as_attachment",
")"
] | 43.08 | 20.68 |
def _del_cached_value(self, xblock):
"""Remove a value from the xblock's cache, if the cache exists."""
# pylint: disable=protected-access
if hasattr(xblock, '_field_data_cache') and self.name in xblock._field_data_cache:
del xblock._field_data_cache[self.name] | [
"def",
"_del_cached_value",
"(",
"self",
",",
"xblock",
")",
":",
"# pylint: disable=protected-access",
"if",
"hasattr",
"(",
"xblock",
",",
"'_field_data_cache'",
")",
"and",
"self",
".",
"name",
"in",
"xblock",
".",
"_field_data_cache",
":",
"del",
"xblock",
".",
"_field_data_cache",
"[",
"self",
".",
"name",
"]"
] | 58.6 | 13.4 |
def schema_file(self):
""" Gets the full path to the file in which to load configuration schema. """
path = os.getcwd() + '/' + self.lazy_folder
return path + self.schema_filename | [
"def",
"schema_file",
"(",
"self",
")",
":",
"path",
"=",
"os",
".",
"getcwd",
"(",
")",
"+",
"'/'",
"+",
"self",
".",
"lazy_folder",
"return",
"path",
"+",
"self",
".",
"schema_filename"
] | 50 | 7.75 |
def to_table(components, topo_info):
""" normalize raw logical plan info to table """
inputs, outputs = defaultdict(list), defaultdict(list)
for ctype, component in components.items():
if ctype == 'bolts':
for component_name, component_info in component.items():
for input_stream in component_info['inputs']:
input_name = input_stream['component_name']
inputs[component_name].append(input_name)
outputs[input_name].append(component_name)
info = []
spouts_instance = topo_info['physical_plan']['spouts']
bolts_instance = topo_info['physical_plan']['bolts']
for ctype, component in components.items():
# stages is an int so keep going
if ctype == "stages":
continue
for component_name, component_info in component.items():
row = [ctype[:-1], component_name]
if ctype == 'spouts':
row.append(len(spouts_instance[component_name]))
else:
row.append(len(bolts_instance[component_name]))
row.append(','.join(inputs.get(component_name, ['-'])))
row.append(','.join(outputs.get(component_name, ['-'])))
info.append(row)
header = ['type', 'name', 'parallelism', 'input', 'output']
return info, header | [
"def",
"to_table",
"(",
"components",
",",
"topo_info",
")",
":",
"inputs",
",",
"outputs",
"=",
"defaultdict",
"(",
"list",
")",
",",
"defaultdict",
"(",
"list",
")",
"for",
"ctype",
",",
"component",
"in",
"components",
".",
"items",
"(",
")",
":",
"if",
"ctype",
"==",
"'bolts'",
":",
"for",
"component_name",
",",
"component_info",
"in",
"component",
".",
"items",
"(",
")",
":",
"for",
"input_stream",
"in",
"component_info",
"[",
"'inputs'",
"]",
":",
"input_name",
"=",
"input_stream",
"[",
"'component_name'",
"]",
"inputs",
"[",
"component_name",
"]",
".",
"append",
"(",
"input_name",
")",
"outputs",
"[",
"input_name",
"]",
".",
"append",
"(",
"component_name",
")",
"info",
"=",
"[",
"]",
"spouts_instance",
"=",
"topo_info",
"[",
"'physical_plan'",
"]",
"[",
"'spouts'",
"]",
"bolts_instance",
"=",
"topo_info",
"[",
"'physical_plan'",
"]",
"[",
"'bolts'",
"]",
"for",
"ctype",
",",
"component",
"in",
"components",
".",
"items",
"(",
")",
":",
"# stages is an int so keep going",
"if",
"ctype",
"==",
"\"stages\"",
":",
"continue",
"for",
"component_name",
",",
"component_info",
"in",
"component",
".",
"items",
"(",
")",
":",
"row",
"=",
"[",
"ctype",
"[",
":",
"-",
"1",
"]",
",",
"component_name",
"]",
"if",
"ctype",
"==",
"'spouts'",
":",
"row",
".",
"append",
"(",
"len",
"(",
"spouts_instance",
"[",
"component_name",
"]",
")",
")",
"else",
":",
"row",
".",
"append",
"(",
"len",
"(",
"bolts_instance",
"[",
"component_name",
"]",
")",
")",
"row",
".",
"append",
"(",
"','",
".",
"join",
"(",
"inputs",
".",
"get",
"(",
"component_name",
",",
"[",
"'-'",
"]",
")",
")",
")",
"row",
".",
"append",
"(",
"','",
".",
"join",
"(",
"outputs",
".",
"get",
"(",
"component_name",
",",
"[",
"'-'",
"]",
")",
")",
")",
"info",
".",
"append",
"(",
"row",
")",
"header",
"=",
"[",
"'type'",
",",
"'name'",
",",
"'parallelism'",
",",
"'input'",
",",
"'output'",
"]",
"return",
"info",
",",
"header"
] | 42.821429 | 14.821429 |
def remove_quotes_around_tz(cls, timestr):
"""Remove quotes (single and double) around timezone otherwise
`dateutil.parser.parse` raises
"""
quoted = cls.QUOTED_TIMEZONE.match(timestr)
if quoted is not None:
return quoted.group(1) + quoted.group(2) | [
"def",
"remove_quotes_around_tz",
"(",
"cls",
",",
"timestr",
")",
":",
"quoted",
"=",
"cls",
".",
"QUOTED_TIMEZONE",
".",
"match",
"(",
"timestr",
")",
"if",
"quoted",
"is",
"not",
"None",
":",
"return",
"quoted",
".",
"group",
"(",
"1",
")",
"+",
"quoted",
".",
"group",
"(",
"2",
")"
] | 42 | 5.285714 |
def similarity(self, d, d_):
"""
Compute a similarity score for two documents.
Optionally pass in a `term_sim_ref` dict-like, which should be able
to take `term1, term2` as args and return their similarity.
"""
es = set([e.name for e in d.entities])
es_ = set([e.name for e in d_.entities])
e_weight = (len(es) + len(es_) - abs(len(es) - len(es_)))/2
e_score = sum(self.idf_entity[t] for t in es & es_)
toks = set(d.tokens)
toks_ = set(d_.tokens)
t_weight = (len(toks) + len(toks_) - abs(len(toks) - len(toks_)))/2
# If no term similarity reference is passed,
# look only at surface form overlap (i.e. exact overlap)
shared_toks = toks & toks_
overlap = [(t, t, self.idf[t]) for t in shared_toks]
t_score = sum(self.idf[t] for t in shared_toks)
if self.term_sim_ref is not None:
# Double-count exact overlaps b/c we are
# comparing bidirectional term pairs here
t_score *= 2
for toks1, toks2 in [(toks, toks_), (toks_, toks)]:
for t in toks1 - shared_toks:
best_match = max(toks2, key=lambda t_: self.term_sim_ref[t, t_])
sim = self.term_sim_ref[t, best_match]
t_score += sim * ((self.idf[t] + self.idf[best_match])/2)
if sim > 0:
overlap.append((t, best_match, sim * ((self.idf[t] + self.idf[best_match])/2)))
# Adjust term weight
#t_weight /= 2
t_weight = 1/t_weight if t_weight != 0 else 0
e_weight = 1/e_weight if e_weight != 0 else 0
t_score *= t_weight
e_score *= e_weight
if self.debug:
print('\n-------------------------')
print((d.id, d_.id))
print('DOC:', d.id)
print('DOC:', d_.id)
print('\tEntities:')
print('\t', es)
print('\t', es_)
print('\t\tEntity overlap:', es & es_)
print('\t\tEntity weight:', e_weight)
print('\t\tEntity score:', e_score)
print('\tTokens:')
print('\t\t', toks)
print('\t\t', toks_)
print('\t\tToken overlap:', overlap)
print('\t\tToken weight:', t_weight)
print('\t\tToken score:', t_score)
print('\tTotal score:', t_score + e_score)
return t_score + e_score | [
"def",
"similarity",
"(",
"self",
",",
"d",
",",
"d_",
")",
":",
"es",
"=",
"set",
"(",
"[",
"e",
".",
"name",
"for",
"e",
"in",
"d",
".",
"entities",
"]",
")",
"es_",
"=",
"set",
"(",
"[",
"e",
".",
"name",
"for",
"e",
"in",
"d_",
".",
"entities",
"]",
")",
"e_weight",
"=",
"(",
"len",
"(",
"es",
")",
"+",
"len",
"(",
"es_",
")",
"-",
"abs",
"(",
"len",
"(",
"es",
")",
"-",
"len",
"(",
"es_",
")",
")",
")",
"/",
"2",
"e_score",
"=",
"sum",
"(",
"self",
".",
"idf_entity",
"[",
"t",
"]",
"for",
"t",
"in",
"es",
"&",
"es_",
")",
"toks",
"=",
"set",
"(",
"d",
".",
"tokens",
")",
"toks_",
"=",
"set",
"(",
"d_",
".",
"tokens",
")",
"t_weight",
"=",
"(",
"len",
"(",
"toks",
")",
"+",
"len",
"(",
"toks_",
")",
"-",
"abs",
"(",
"len",
"(",
"toks",
")",
"-",
"len",
"(",
"toks_",
")",
")",
")",
"/",
"2",
"# If no term similarity reference is passed,",
"# look only at surface form overlap (i.e. exact overlap)",
"shared_toks",
"=",
"toks",
"&",
"toks_",
"overlap",
"=",
"[",
"(",
"t",
",",
"t",
",",
"self",
".",
"idf",
"[",
"t",
"]",
")",
"for",
"t",
"in",
"shared_toks",
"]",
"t_score",
"=",
"sum",
"(",
"self",
".",
"idf",
"[",
"t",
"]",
"for",
"t",
"in",
"shared_toks",
")",
"if",
"self",
".",
"term_sim_ref",
"is",
"not",
"None",
":",
"# Double-count exact overlaps b/c we are",
"# comparing bidirectional term pairs here",
"t_score",
"*=",
"2",
"for",
"toks1",
",",
"toks2",
"in",
"[",
"(",
"toks",
",",
"toks_",
")",
",",
"(",
"toks_",
",",
"toks",
")",
"]",
":",
"for",
"t",
"in",
"toks1",
"-",
"shared_toks",
":",
"best_match",
"=",
"max",
"(",
"toks2",
",",
"key",
"=",
"lambda",
"t_",
":",
"self",
".",
"term_sim_ref",
"[",
"t",
",",
"t_",
"]",
")",
"sim",
"=",
"self",
".",
"term_sim_ref",
"[",
"t",
",",
"best_match",
"]",
"t_score",
"+=",
"sim",
"*",
"(",
"(",
"self",
".",
"idf",
"[",
"t",
"]",
"+",
"self",
".",
"idf",
"[",
"best_match",
"]",
")",
"/",
"2",
")",
"if",
"sim",
">",
"0",
":",
"overlap",
".",
"append",
"(",
"(",
"t",
",",
"best_match",
",",
"sim",
"*",
"(",
"(",
"self",
".",
"idf",
"[",
"t",
"]",
"+",
"self",
".",
"idf",
"[",
"best_match",
"]",
")",
"/",
"2",
")",
")",
")",
"# Adjust term weight",
"#t_weight /= 2",
"t_weight",
"=",
"1",
"/",
"t_weight",
"if",
"t_weight",
"!=",
"0",
"else",
"0",
"e_weight",
"=",
"1",
"/",
"e_weight",
"if",
"e_weight",
"!=",
"0",
"else",
"0",
"t_score",
"*=",
"t_weight",
"e_score",
"*=",
"e_weight",
"if",
"self",
".",
"debug",
":",
"print",
"(",
"'\\n-------------------------'",
")",
"print",
"(",
"(",
"d",
".",
"id",
",",
"d_",
".",
"id",
")",
")",
"print",
"(",
"'DOC:'",
",",
"d",
".",
"id",
")",
"print",
"(",
"'DOC:'",
",",
"d_",
".",
"id",
")",
"print",
"(",
"'\\tEntities:'",
")",
"print",
"(",
"'\\t'",
",",
"es",
")",
"print",
"(",
"'\\t'",
",",
"es_",
")",
"print",
"(",
"'\\t\\tEntity overlap:'",
",",
"es",
"&",
"es_",
")",
"print",
"(",
"'\\t\\tEntity weight:'",
",",
"e_weight",
")",
"print",
"(",
"'\\t\\tEntity score:'",
",",
"e_score",
")",
"print",
"(",
"'\\tTokens:'",
")",
"print",
"(",
"'\\t\\t'",
",",
"toks",
")",
"print",
"(",
"'\\t\\t'",
",",
"toks_",
")",
"print",
"(",
"'\\t\\tToken overlap:'",
",",
"overlap",
")",
"print",
"(",
"'\\t\\tToken weight:'",
",",
"t_weight",
")",
"print",
"(",
"'\\t\\tToken score:'",
",",
"t_score",
")",
"print",
"(",
"'\\tTotal score:'",
",",
"t_score",
"+",
"e_score",
")",
"return",
"t_score",
"+",
"e_score"
] | 38.587302 | 18.047619 |
async def async_set_operation_mode(
self, operation_mode: OperationMode, password: str = '') -> None:
"""
Set the operation mode on the base unit.
:param operation_mode: the operation mode to change to
:param password: if specified, will be used instead of the password
property when issuing the command
"""
await self._protocol.async_execute(
SetOpModeCommand(operation_mode),
password=password) | [
"async",
"def",
"async_set_operation_mode",
"(",
"self",
",",
"operation_mode",
":",
"OperationMode",
",",
"password",
":",
"str",
"=",
"''",
")",
"->",
"None",
":",
"await",
"self",
".",
"_protocol",
".",
"async_execute",
"(",
"SetOpModeCommand",
"(",
"operation_mode",
")",
",",
"password",
"=",
"password",
")"
] | 38.076923 | 17.153846 |
def _get_index_urls_locations(self, project_name):
"""Returns the locations found via self.index_urls
Checks the url_name on the main (first in the list) index and
use this url_name to produce all locations
"""
def mkurl_pypi_url(url):
loc = posixpath.join(url, project_url_name)
# For maximum compatibility with easy_install, ensure the path
# ends in a trailing slash. Although this isn't in the spec
# (and PyPI can handle it without the slash) some other index
# implementations might break if they relied on easy_install's
# behavior.
if not loc.endswith('/'):
loc = loc + '/'
return loc
project_url_name = urllib_parse.quote(project_name.lower())
if self.index_urls:
# Check that we have the url_name correctly spelled:
# Only check main index if index URL is given
main_index_url = Link(
mkurl_pypi_url(self.index_urls[0]),
trusted=True,
)
page = self._get_page(main_index_url)
if page is None and PyPI.netloc not in str(main_index_url):
warnings.warn(
"Failed to find %r at %s. It is suggested to upgrade "
"your index to support normalized names as the name in "
"/simple/{name}." % (project_name, main_index_url),
RemovedInPip8Warning,
)
project_url_name = self._find_url_name(
Link(self.index_urls[0], trusted=True),
project_url_name,
) or project_url_name
if project_url_name is not None:
return [mkurl_pypi_url(url) for url in self.index_urls]
return [] | [
"def",
"_get_index_urls_locations",
"(",
"self",
",",
"project_name",
")",
":",
"def",
"mkurl_pypi_url",
"(",
"url",
")",
":",
"loc",
"=",
"posixpath",
".",
"join",
"(",
"url",
",",
"project_url_name",
")",
"# For maximum compatibility with easy_install, ensure the path",
"# ends in a trailing slash. Although this isn't in the spec",
"# (and PyPI can handle it without the slash) some other index",
"# implementations might break if they relied on easy_install's",
"# behavior.",
"if",
"not",
"loc",
".",
"endswith",
"(",
"'/'",
")",
":",
"loc",
"=",
"loc",
"+",
"'/'",
"return",
"loc",
"project_url_name",
"=",
"urllib_parse",
".",
"quote",
"(",
"project_name",
".",
"lower",
"(",
")",
")",
"if",
"self",
".",
"index_urls",
":",
"# Check that we have the url_name correctly spelled:",
"# Only check main index if index URL is given",
"main_index_url",
"=",
"Link",
"(",
"mkurl_pypi_url",
"(",
"self",
".",
"index_urls",
"[",
"0",
"]",
")",
",",
"trusted",
"=",
"True",
",",
")",
"page",
"=",
"self",
".",
"_get_page",
"(",
"main_index_url",
")",
"if",
"page",
"is",
"None",
"and",
"PyPI",
".",
"netloc",
"not",
"in",
"str",
"(",
"main_index_url",
")",
":",
"warnings",
".",
"warn",
"(",
"\"Failed to find %r at %s. It is suggested to upgrade \"",
"\"your index to support normalized names as the name in \"",
"\"/simple/{name}.\"",
"%",
"(",
"project_name",
",",
"main_index_url",
")",
",",
"RemovedInPip8Warning",
",",
")",
"project_url_name",
"=",
"self",
".",
"_find_url_name",
"(",
"Link",
"(",
"self",
".",
"index_urls",
"[",
"0",
"]",
",",
"trusted",
"=",
"True",
")",
",",
"project_url_name",
",",
")",
"or",
"project_url_name",
"if",
"project_url_name",
"is",
"not",
"None",
":",
"return",
"[",
"mkurl_pypi_url",
"(",
"url",
")",
"for",
"url",
"in",
"self",
".",
"index_urls",
"]",
"return",
"[",
"]"
] | 39.434783 | 21.152174 |
def stopEventLoop():
"""
Stop the current event loop if possible
returns True if it expects that it was successful, False otherwise
"""
stopper = PyObjCAppHelperRunLoopStopper_wrap.currentRunLoopStopper()
if stopper is None:
if NSApp() is not None:
NSApp().terminate_(None)
return True
return False
NSTimer.scheduledTimerWithTimeInterval_target_selector_userInfo_repeats_(
0.0,
stopper,
'performStop:',
None,
False)
return True | [
"def",
"stopEventLoop",
"(",
")",
":",
"stopper",
"=",
"PyObjCAppHelperRunLoopStopper_wrap",
".",
"currentRunLoopStopper",
"(",
")",
"if",
"stopper",
"is",
"None",
":",
"if",
"NSApp",
"(",
")",
"is",
"not",
"None",
":",
"NSApp",
"(",
")",
".",
"terminate_",
"(",
"None",
")",
"return",
"True",
"return",
"False",
"NSTimer",
".",
"scheduledTimerWithTimeInterval_target_selector_userInfo_repeats_",
"(",
"0.0",
",",
"stopper",
",",
"'performStop:'",
",",
"None",
",",
"False",
")",
"return",
"True"
] | 29 | 18.666667 |
def grid_model_params_ria(lvgd):
"""Determine grid model parameters for LV grids of sectors
retail/industrial and agricultural
Parameters
----------
lvgd : LVGridDistrictDing0
Low-voltage grid district object
Returns
-------
:obj:`dict`
Structural description of (parts of) LV grid topology
"""
# Choose retail/industrial and agricultural grid model
model_params_ria = {}
if ((lvgd.sector_count_retail +
lvgd.sector_count_industrial > 0) or
(lvgd.peak_load_retail +
lvgd.peak_load_industrial > 0)):
model_params_ria['retail/industrial'] = select_grid_model_ria(
lvgd, 'retail/industrial')
else:
model_params_ria['retail/industrial'] = None
if ((lvgd.sector_count_agricultural > 0) or
(lvgd.peak_load_agricultural > 0)):
model_params_ria['agricultural'] = select_grid_model_ria(lvgd,
'agricultural')
else:
model_params_ria['agricultural'] = None
return model_params_ria | [
"def",
"grid_model_params_ria",
"(",
"lvgd",
")",
":",
"# Choose retail/industrial and agricultural grid model",
"model_params_ria",
"=",
"{",
"}",
"if",
"(",
"(",
"lvgd",
".",
"sector_count_retail",
"+",
"lvgd",
".",
"sector_count_industrial",
">",
"0",
")",
"or",
"(",
"lvgd",
".",
"peak_load_retail",
"+",
"lvgd",
".",
"peak_load_industrial",
">",
"0",
")",
")",
":",
"model_params_ria",
"[",
"'retail/industrial'",
"]",
"=",
"select_grid_model_ria",
"(",
"lvgd",
",",
"'retail/industrial'",
")",
"else",
":",
"model_params_ria",
"[",
"'retail/industrial'",
"]",
"=",
"None",
"if",
"(",
"(",
"lvgd",
".",
"sector_count_agricultural",
">",
"0",
")",
"or",
"(",
"lvgd",
".",
"peak_load_agricultural",
">",
"0",
")",
")",
":",
"model_params_ria",
"[",
"'agricultural'",
"]",
"=",
"select_grid_model_ria",
"(",
"lvgd",
",",
"'agricultural'",
")",
"else",
":",
"model_params_ria",
"[",
"'agricultural'",
"]",
"=",
"None",
"return",
"model_params_ria"
] | 31.970588 | 18.882353 |
def _get_distance_scaling_term(self, C, rjb, mag):
"""
Returns the distance scaling component of the model
Equation 10, Page 63
"""
# Depth adjusted distance, equation 11 (Page 63)
rval = np.sqrt(rjb ** 2.0 + C["c11"] ** 2.0)
f_0, f_1, f_2 = self._get_distance_segment_coefficients(rval)
return ((C["c4"] + C["c5"] * mag) * f_0 +
(C["c6"] + C["c7"] * mag) * f_1 +
(C["c8"] + C["c9"] * mag) * f_2 +
(C["c10"] * rval)) | [
"def",
"_get_distance_scaling_term",
"(",
"self",
",",
"C",
",",
"rjb",
",",
"mag",
")",
":",
"# Depth adjusted distance, equation 11 (Page 63)",
"rval",
"=",
"np",
".",
"sqrt",
"(",
"rjb",
"**",
"2.0",
"+",
"C",
"[",
"\"c11\"",
"]",
"**",
"2.0",
")",
"f_0",
",",
"f_1",
",",
"f_2",
"=",
"self",
".",
"_get_distance_segment_coefficients",
"(",
"rval",
")",
"return",
"(",
"(",
"C",
"[",
"\"c4\"",
"]",
"+",
"C",
"[",
"\"c5\"",
"]",
"*",
"mag",
")",
"*",
"f_0",
"+",
"(",
"C",
"[",
"\"c6\"",
"]",
"+",
"C",
"[",
"\"c7\"",
"]",
"*",
"mag",
")",
"*",
"f_1",
"+",
"(",
"C",
"[",
"\"c8\"",
"]",
"+",
"C",
"[",
"\"c9\"",
"]",
"*",
"mag",
")",
"*",
"f_2",
"+",
"(",
"C",
"[",
"\"c10\"",
"]",
"*",
"rval",
")",
")"
] | 43.083333 | 10.916667 |
def write_struct_field(self, struct_name, field_name, values, x, y, p=0):
"""Write a value into a struct.
This method is particularly useful for writing values into the ``sv``
struct which contains some configuration data. See ``sark.h`` for
details.
Parameters
----------
struct_name : string
Name of the struct to write to, e.g., `"sv"`
field_name : string
Name of the field to write, e.g., `"random"`
values :
Value(s) to be written into the field.
.. warning::
Fields which are arrays must currently be written in their
entirety.
"""
# Look up the struct and field
field, address, pack_chars = \
self._get_struct_field_and_address(struct_name, field_name)
if field.length != 1:
assert len(values) == field.length
data = struct.pack(pack_chars, *values)
else:
data = struct.pack(pack_chars, values)
# Perform the write
self.write(address, data, x, y, p) | [
"def",
"write_struct_field",
"(",
"self",
",",
"struct_name",
",",
"field_name",
",",
"values",
",",
"x",
",",
"y",
",",
"p",
"=",
"0",
")",
":",
"# Look up the struct and field",
"field",
",",
"address",
",",
"pack_chars",
"=",
"self",
".",
"_get_struct_field_and_address",
"(",
"struct_name",
",",
"field_name",
")",
"if",
"field",
".",
"length",
"!=",
"1",
":",
"assert",
"len",
"(",
"values",
")",
"==",
"field",
".",
"length",
"data",
"=",
"struct",
".",
"pack",
"(",
"pack_chars",
",",
"*",
"values",
")",
"else",
":",
"data",
"=",
"struct",
".",
"pack",
"(",
"pack_chars",
",",
"values",
")",
"# Perform the write",
"self",
".",
"write",
"(",
"address",
",",
"data",
",",
"x",
",",
"y",
",",
"p",
")"
] | 33.59375 | 20.21875 |
def list(cls, zone_id, options=None):
"""List zone records for a zone."""
options = options if options else {}
return cls.call('domain.zone.record.list', zone_id, 0, options) | [
"def",
"list",
"(",
"cls",
",",
"zone_id",
",",
"options",
"=",
"None",
")",
":",
"options",
"=",
"options",
"if",
"options",
"else",
"{",
"}",
"return",
"cls",
".",
"call",
"(",
"'domain.zone.record.list'",
",",
"zone_id",
",",
"0",
",",
"options",
")"
] | 48.75 | 9.5 |
def sendCommand(self, command):
"""Sends a command through the web interface of the charger and parses the response"""
data = { 'rapi' : command }
full_url = self.url + urllib.parse.urlencode(data)
data = urllib.request.urlopen(full_url)
response = re.search('\<p>>\$(.+)\<script', data.read().decode('utf-8'))
if response == None:#If we are using version 1 - https://github.com/OpenEVSE/ESP8266_WiFi_v1.x/blob/master/OpenEVSE_RAPI_WiFi_ESP8266.ino#L357
response = re.search('\>\>\$(.+)\<p>', data.read().decode('utf-8'))
return response.group(1).split() | [
"def",
"sendCommand",
"(",
"self",
",",
"command",
")",
":",
"data",
"=",
"{",
"'rapi'",
":",
"command",
"}",
"full_url",
"=",
"self",
".",
"url",
"+",
"urllib",
".",
"parse",
".",
"urlencode",
"(",
"data",
")",
"data",
"=",
"urllib",
".",
"request",
".",
"urlopen",
"(",
"full_url",
")",
"response",
"=",
"re",
".",
"search",
"(",
"'\\<p>>\\$(.+)\\<script'",
",",
"data",
".",
"read",
"(",
")",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"if",
"response",
"==",
"None",
":",
"#If we are using version 1 - https://github.com/OpenEVSE/ESP8266_WiFi_v1.x/blob/master/OpenEVSE_RAPI_WiFi_ESP8266.ino#L357",
"response",
"=",
"re",
".",
"search",
"(",
"'\\>\\>\\$(.+)\\<p>'",
",",
"data",
".",
"read",
"(",
")",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"return",
"response",
".",
"group",
"(",
"1",
")",
".",
"split",
"(",
")"
] | 64.777778 | 24.111111 |
def get_close_matches(word, possibilities, n=3, cutoff=0.6):
"""Use SequenceMatcher to return list of the best "good enough" matches.
word is a sequence for which close matches are desired (typically a
string).
possibilities is a list of sequences against which to match word
(typically a list of strings).
Optional arg n (default 3) is the maximum number of close matches to
return. n must be > 0.
Optional arg cutoff (default 0.6) is a float in [0, 1]. Possibilities
that don't score at least that similar to word are ignored.
The best (no more than n) matches among the possibilities are returned
in a list, sorted by similarity score, most similar first.
>>> get_close_matches("appel", ["ape", "apple", "peach", "puppy"])
['apple', 'ape']
>>> import keyword as _keyword
>>> get_close_matches("wheel", _keyword.kwlist)
['while']
>>> get_close_matches("apple", _keyword.kwlist)
[]
>>> get_close_matches("accept", _keyword.kwlist)
['except']
"""
if not n > 0:
raise ValueError("n must be > 0: %r" % (n,))
if not 0.0 <= cutoff <= 1.0:
raise ValueError("cutoff must be in [0.0, 1.0]: %r" % (cutoff,))
result = []
s = SequenceMatcher()
s.set_seq2(word)
for x in possibilities:
s.set_seq1(x)
if s.real_quick_ratio() >= cutoff and \
s.quick_ratio() >= cutoff and \
s.ratio() >= cutoff:
result.append((s.ratio(), x))
# Move the best scorers to head of list
result = heapq.nlargest(n, result)
# Strip scores for the best n matches
return [x for score, x in result] | [
"def",
"get_close_matches",
"(",
"word",
",",
"possibilities",
",",
"n",
"=",
"3",
",",
"cutoff",
"=",
"0.6",
")",
":",
"if",
"not",
"n",
">",
"0",
":",
"raise",
"ValueError",
"(",
"\"n must be > 0: %r\"",
"%",
"(",
"n",
",",
")",
")",
"if",
"not",
"0.0",
"<=",
"cutoff",
"<=",
"1.0",
":",
"raise",
"ValueError",
"(",
"\"cutoff must be in [0.0, 1.0]: %r\"",
"%",
"(",
"cutoff",
",",
")",
")",
"result",
"=",
"[",
"]",
"s",
"=",
"SequenceMatcher",
"(",
")",
"s",
".",
"set_seq2",
"(",
"word",
")",
"for",
"x",
"in",
"possibilities",
":",
"s",
".",
"set_seq1",
"(",
"x",
")",
"if",
"s",
".",
"real_quick_ratio",
"(",
")",
">=",
"cutoff",
"and",
"s",
".",
"quick_ratio",
"(",
")",
">=",
"cutoff",
"and",
"s",
".",
"ratio",
"(",
")",
">=",
"cutoff",
":",
"result",
".",
"append",
"(",
"(",
"s",
".",
"ratio",
"(",
")",
",",
"x",
")",
")",
"# Move the best scorers to head of list",
"result",
"=",
"heapq",
".",
"nlargest",
"(",
"n",
",",
"result",
")",
"# Strip scores for the best n matches",
"return",
"[",
"x",
"for",
"score",
",",
"x",
"in",
"result",
"]"
] | 34.319149 | 20.468085 |
def _find_link_internal(self, link, args, kwargs):
"""Wrapper around find_link that deals with convenience special-cases:
* If ``link`` has an *href*-attribute, then return it. If not,
consider it as a ``url_regex`` argument.
* If searching for the link fails and debug is active, launch
a browser.
"""
if hasattr(link, 'attrs') and 'href' in link.attrs:
return link
# Check if "link" parameter should be treated as "url_regex"
# but reject obtaining it from both places.
if link and 'url_regex' in kwargs:
raise ValueError('link parameter cannot be treated as '
'url_regex because url_regex is already '
'present in keyword arguments')
elif link:
kwargs['url_regex'] = link
try:
return self.find_link(*args, **kwargs)
except LinkNotFoundError:
if self.get_debug():
print('find_link failed for', kwargs)
self.list_links()
self.launch_browser()
raise | [
"def",
"_find_link_internal",
"(",
"self",
",",
"link",
",",
"args",
",",
"kwargs",
")",
":",
"if",
"hasattr",
"(",
"link",
",",
"'attrs'",
")",
"and",
"'href'",
"in",
"link",
".",
"attrs",
":",
"return",
"link",
"# Check if \"link\" parameter should be treated as \"url_regex\"",
"# but reject obtaining it from both places.",
"if",
"link",
"and",
"'url_regex'",
"in",
"kwargs",
":",
"raise",
"ValueError",
"(",
"'link parameter cannot be treated as '",
"'url_regex because url_regex is already '",
"'present in keyword arguments'",
")",
"elif",
"link",
":",
"kwargs",
"[",
"'url_regex'",
"]",
"=",
"link",
"try",
":",
"return",
"self",
".",
"find_link",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"LinkNotFoundError",
":",
"if",
"self",
".",
"get_debug",
"(",
")",
":",
"print",
"(",
"'find_link failed for'",
",",
"kwargs",
")",
"self",
".",
"list_links",
"(",
")",
"self",
".",
"launch_browser",
"(",
")",
"raise"
] | 38.310345 | 18.482759 |
def delete_user_by_email(self, id, email):
"""Deletes a specified connection user by its email.
Args:
id (str): The id of the connection (must be a database connection).
email (str): The email of the user to delete.
See: https://auth0.com/docs/api/management/v2#!/Connections/delete_users_by_email
Returns:
An empty dict.
"""
return self.client.delete(self._url(id) + '/users', params={'email': email}) | [
"def",
"delete_user_by_email",
"(",
"self",
",",
"id",
",",
"email",
")",
":",
"return",
"self",
".",
"client",
".",
"delete",
"(",
"self",
".",
"_url",
"(",
"id",
")",
"+",
"'/users'",
",",
"params",
"=",
"{",
"'email'",
":",
"email",
"}",
")"
] | 33.928571 | 26.714286 |
def create_option(cls, option, **kwargs):
"""Create Option
Create a new Option
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_option(option, async=True)
>>> result = thread.get()
:param async bool
:param Option option: Attributes of option to create (required)
:return: Option
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._create_option_with_http_info(option, **kwargs)
else:
(data) = cls._create_option_with_http_info(option, **kwargs)
return data | [
"def",
"create_option",
"(",
"cls",
",",
"option",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async'",
")",
":",
"return",
"cls",
".",
"_create_option_with_http_info",
"(",
"option",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"(",
"data",
")",
"=",
"cls",
".",
"_create_option_with_http_info",
"(",
"option",
",",
"*",
"*",
"kwargs",
")",
"return",
"data"
] | 38.142857 | 17.904762 |
def query_helper(request,namespace, docid, configuration=None):
"""Does the actual query, called by query() or pub_query(), not directly"""
flatargs = {
'customslicesize': request.POST.get('customslicesize',settings.CONFIGURATIONS[configuration].get('customslicesize','50')), #for pagination of search results
}
#stupid compatibility stuff
if sys.version < '3':
if hasattr(request, 'body'):
data = json.loads(unicode(request.body,'utf-8')) #pylint: disable=undefined-variable
else: #older django
data = json.loads(unicode(request.raw_post_data,'utf-8')) #pylint: disable=undefined-variable
else:
if hasattr(request, 'body'):
data = json.loads(str(request.body,'utf-8'))
else: #older django
data = json.loads(str(request.raw_post_data,'utf-8'))
if not data['queries']:
return HttpResponseForbidden("No queries to run")
for query in data['queries']:
#get document selector and check it doesn't violate the namespace
docselector, query = getdocumentselector(query)
if not docselector:
return HttpResponseForbidden("Query does not start with a valid document selector (USE keyword)!")
elif docselector[0] != namespace:
return HttpResponseForbidden("Query would affect a different namespace than your current one, forbidden!")
if query != "GET" and query[:4] != "CQL " and query[:4] != "META":
#parse query on this end to catch syntax errors prior to sending, should be fast enough anyway
#first resolve variables to dummies (real ones will be handled server-side) as it won't be valid FQL otherwise
query = query.replace("$FOLIADOCSERVE_PROCESSOR", "PROCESSOR name \"foliadocserve\"")
query = query.replace("$FLAT_PROCESSOR", "PROCESSOR name \"FLAT\" version \"" + VERSION + "\" host \"" + request.get_host() + "\" src \"" + request.build_absolute_uri("/") + "\"") #also another instance in comm.py
try:
query = fql.Query(query)
except fql.SyntaxError as e:
return HttpResponseForbidden("FQL Syntax Error: " + str(e))
needwritepermission = query.declarations or query.action and query.action.action != "SELECT"
else:
needwritepermission = False
if configuration != "pub":
if needwritepermission and not flat.users.models.haswritepermission(request.user.username, namespace, request):
return HttpResponseForbidden("Permission denied, no write access")
query = "\n".join(data['queries']) #throw all queries on a big pile to transmit
try:
d = flat.comm.query(request, query,**flatargs)
except Exception as e:
if sys.version < '3':
errmsg = docserveerror(e)['fatalerror_text']
return HttpResponseForbidden("FoLiA Document Server error: ".encode('utf-8') + errmsg.encode('utf-8'))
else:
return HttpResponseForbidden("FoLiA Document Server error: " + docserveerror(e)['fatalerror_text'])
return HttpResponse(json.dumps(d).encode('utf-8'), content_type='application/json') | [
"def",
"query_helper",
"(",
"request",
",",
"namespace",
",",
"docid",
",",
"configuration",
"=",
"None",
")",
":",
"flatargs",
"=",
"{",
"'customslicesize'",
":",
"request",
".",
"POST",
".",
"get",
"(",
"'customslicesize'",
",",
"settings",
".",
"CONFIGURATIONS",
"[",
"configuration",
"]",
".",
"get",
"(",
"'customslicesize'",
",",
"'50'",
")",
")",
",",
"#for pagination of search results",
"}",
"#stupid compatibility stuff",
"if",
"sys",
".",
"version",
"<",
"'3'",
":",
"if",
"hasattr",
"(",
"request",
",",
"'body'",
")",
":",
"data",
"=",
"json",
".",
"loads",
"(",
"unicode",
"(",
"request",
".",
"body",
",",
"'utf-8'",
")",
")",
"#pylint: disable=undefined-variable",
"else",
":",
"#older django",
"data",
"=",
"json",
".",
"loads",
"(",
"unicode",
"(",
"request",
".",
"raw_post_data",
",",
"'utf-8'",
")",
")",
"#pylint: disable=undefined-variable",
"else",
":",
"if",
"hasattr",
"(",
"request",
",",
"'body'",
")",
":",
"data",
"=",
"json",
".",
"loads",
"(",
"str",
"(",
"request",
".",
"body",
",",
"'utf-8'",
")",
")",
"else",
":",
"#older django",
"data",
"=",
"json",
".",
"loads",
"(",
"str",
"(",
"request",
".",
"raw_post_data",
",",
"'utf-8'",
")",
")",
"if",
"not",
"data",
"[",
"'queries'",
"]",
":",
"return",
"HttpResponseForbidden",
"(",
"\"No queries to run\"",
")",
"for",
"query",
"in",
"data",
"[",
"'queries'",
"]",
":",
"#get document selector and check it doesn't violate the namespace",
"docselector",
",",
"query",
"=",
"getdocumentselector",
"(",
"query",
")",
"if",
"not",
"docselector",
":",
"return",
"HttpResponseForbidden",
"(",
"\"Query does not start with a valid document selector (USE keyword)!\"",
")",
"elif",
"docselector",
"[",
"0",
"]",
"!=",
"namespace",
":",
"return",
"HttpResponseForbidden",
"(",
"\"Query would affect a different namespace than your current one, forbidden!\"",
")",
"if",
"query",
"!=",
"\"GET\"",
"and",
"query",
"[",
":",
"4",
"]",
"!=",
"\"CQL \"",
"and",
"query",
"[",
":",
"4",
"]",
"!=",
"\"META\"",
":",
"#parse query on this end to catch syntax errors prior to sending, should be fast enough anyway",
"#first resolve variables to dummies (real ones will be handled server-side) as it won't be valid FQL otherwise",
"query",
"=",
"query",
".",
"replace",
"(",
"\"$FOLIADOCSERVE_PROCESSOR\"",
",",
"\"PROCESSOR name \\\"foliadocserve\\\"\"",
")",
"query",
"=",
"query",
".",
"replace",
"(",
"\"$FLAT_PROCESSOR\"",
",",
"\"PROCESSOR name \\\"FLAT\\\" version \\\"\"",
"+",
"VERSION",
"+",
"\"\\\" host \\\"\"",
"+",
"request",
".",
"get_host",
"(",
")",
"+",
"\"\\\" src \\\"\"",
"+",
"request",
".",
"build_absolute_uri",
"(",
"\"/\"",
")",
"+",
"\"\\\"\"",
")",
"#also another instance in comm.py",
"try",
":",
"query",
"=",
"fql",
".",
"Query",
"(",
"query",
")",
"except",
"fql",
".",
"SyntaxError",
"as",
"e",
":",
"return",
"HttpResponseForbidden",
"(",
"\"FQL Syntax Error: \"",
"+",
"str",
"(",
"e",
")",
")",
"needwritepermission",
"=",
"query",
".",
"declarations",
"or",
"query",
".",
"action",
"and",
"query",
".",
"action",
".",
"action",
"!=",
"\"SELECT\"",
"else",
":",
"needwritepermission",
"=",
"False",
"if",
"configuration",
"!=",
"\"pub\"",
":",
"if",
"needwritepermission",
"and",
"not",
"flat",
".",
"users",
".",
"models",
".",
"haswritepermission",
"(",
"request",
".",
"user",
".",
"username",
",",
"namespace",
",",
"request",
")",
":",
"return",
"HttpResponseForbidden",
"(",
"\"Permission denied, no write access\"",
")",
"query",
"=",
"\"\\n\"",
".",
"join",
"(",
"data",
"[",
"'queries'",
"]",
")",
"#throw all queries on a big pile to transmit",
"try",
":",
"d",
"=",
"flat",
".",
"comm",
".",
"query",
"(",
"request",
",",
"query",
",",
"*",
"*",
"flatargs",
")",
"except",
"Exception",
"as",
"e",
":",
"if",
"sys",
".",
"version",
"<",
"'3'",
":",
"errmsg",
"=",
"docserveerror",
"(",
"e",
")",
"[",
"'fatalerror_text'",
"]",
"return",
"HttpResponseForbidden",
"(",
"\"FoLiA Document Server error: \"",
".",
"encode",
"(",
"'utf-8'",
")",
"+",
"errmsg",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"else",
":",
"return",
"HttpResponseForbidden",
"(",
"\"FoLiA Document Server error: \"",
"+",
"docserveerror",
"(",
"e",
")",
"[",
"'fatalerror_text'",
"]",
")",
"return",
"HttpResponse",
"(",
"json",
".",
"dumps",
"(",
"d",
")",
".",
"encode",
"(",
"'utf-8'",
")",
",",
"content_type",
"=",
"'application/json'",
")"
] | 57.090909 | 35.545455 |
def add_end_event_to_diagram(self, process_id, end_event_name="", end_event_definition=None, node_id=None):
"""
Adds an EndEvent element to BPMN diagram.
User-defined attributes:
- name
- event definition (creates a special type of end event). Supported event definitions
* `terminate`: 'terminateEventDefinition',
* `signal`: 'signalEventDefinition',
* `error`: 'errorEventDefinition',
* `escalation`: 'escalationEventDefinition',
* `message`: 'messageEventDefinition',
* `compensate`: 'compensateEventDefinition'.
:param process_id: string object. ID of parent process,
:param end_event_name: string object. Name of end event,
:param end_event_definition: list of event definitions. By default - empty.
:param node_id: string object. ID of node. Default value - None.
:return: a tuple, where first value is endEvent ID, second a reference to created object,
"""
end_event_id, end_event = self.add_flow_node_to_diagram(process_id, consts.Consts.end_event, end_event_name,
node_id)
end_event_definitions = {"terminate": "terminateEventDefinition", "escalation": "escalationEventDefinition",
"message": "messageEventDefinition", "compensate": "compensateEventDefinition",
"signal": "signalEventDefinition", "error": "errorEventDefinition"}
event_def_list = []
if end_event_definition == "terminate":
event_def_list.append(self.add_event_definition_element("terminate", end_event_definitions))
elif end_event_definition == "escalation":
event_def_list.append(self.add_event_definition_element("escalation", end_event_definitions))
elif end_event_definition == "message":
event_def_list.append(self.add_event_definition_element("message", end_event_definitions))
elif end_event_definition == "compensate":
event_def_list.append(self.add_event_definition_element("compensate", end_event_definitions))
elif end_event_definition == "signal":
event_def_list.append(self.add_event_definition_element("signal", end_event_definitions))
elif end_event_definition == "error":
event_def_list.append(self.add_event_definition_element("error", end_event_definitions))
self.diagram_graph.node[end_event_id][consts.Consts.event_definitions] = event_def_list
return end_event_id, end_event | [
"def",
"add_end_event_to_diagram",
"(",
"self",
",",
"process_id",
",",
"end_event_name",
"=",
"\"\"",
",",
"end_event_definition",
"=",
"None",
",",
"node_id",
"=",
"None",
")",
":",
"end_event_id",
",",
"end_event",
"=",
"self",
".",
"add_flow_node_to_diagram",
"(",
"process_id",
",",
"consts",
".",
"Consts",
".",
"end_event",
",",
"end_event_name",
",",
"node_id",
")",
"end_event_definitions",
"=",
"{",
"\"terminate\"",
":",
"\"terminateEventDefinition\"",
",",
"\"escalation\"",
":",
"\"escalationEventDefinition\"",
",",
"\"message\"",
":",
"\"messageEventDefinition\"",
",",
"\"compensate\"",
":",
"\"compensateEventDefinition\"",
",",
"\"signal\"",
":",
"\"signalEventDefinition\"",
",",
"\"error\"",
":",
"\"errorEventDefinition\"",
"}",
"event_def_list",
"=",
"[",
"]",
"if",
"end_event_definition",
"==",
"\"terminate\"",
":",
"event_def_list",
".",
"append",
"(",
"self",
".",
"add_event_definition_element",
"(",
"\"terminate\"",
",",
"end_event_definitions",
")",
")",
"elif",
"end_event_definition",
"==",
"\"escalation\"",
":",
"event_def_list",
".",
"append",
"(",
"self",
".",
"add_event_definition_element",
"(",
"\"escalation\"",
",",
"end_event_definitions",
")",
")",
"elif",
"end_event_definition",
"==",
"\"message\"",
":",
"event_def_list",
".",
"append",
"(",
"self",
".",
"add_event_definition_element",
"(",
"\"message\"",
",",
"end_event_definitions",
")",
")",
"elif",
"end_event_definition",
"==",
"\"compensate\"",
":",
"event_def_list",
".",
"append",
"(",
"self",
".",
"add_event_definition_element",
"(",
"\"compensate\"",
",",
"end_event_definitions",
")",
")",
"elif",
"end_event_definition",
"==",
"\"signal\"",
":",
"event_def_list",
".",
"append",
"(",
"self",
".",
"add_event_definition_element",
"(",
"\"signal\"",
",",
"end_event_definitions",
")",
")",
"elif",
"end_event_definition",
"==",
"\"error\"",
":",
"event_def_list",
".",
"append",
"(",
"self",
".",
"add_event_definition_element",
"(",
"\"error\"",
",",
"end_event_definitions",
")",
")",
"self",
".",
"diagram_graph",
".",
"node",
"[",
"end_event_id",
"]",
"[",
"consts",
".",
"Consts",
".",
"event_definitions",
"]",
"=",
"event_def_list",
"return",
"end_event_id",
",",
"end_event"
] | 63.097561 | 32.756098 |
def horizontal_random_walk(size):
"""
Generates random walks that start at the left side of the image and move towards the right
:param size: The size of the image
:return: An iterator of paths, each an iterator of (x,y) coordinates
"""
_, height = size
distribution = {(1, dy): 1 / 3.0 for dy in [-1, 0, 1]}
start_points = [(0, y) for y in range(height)]
return random_walk_path(size, distribution, start_points) | [
"def",
"horizontal_random_walk",
"(",
"size",
")",
":",
"_",
",",
"height",
"=",
"size",
"distribution",
"=",
"{",
"(",
"1",
",",
"dy",
")",
":",
"1",
"/",
"3.0",
"for",
"dy",
"in",
"[",
"-",
"1",
",",
"0",
",",
"1",
"]",
"}",
"start_points",
"=",
"[",
"(",
"0",
",",
"y",
")",
"for",
"y",
"in",
"range",
"(",
"height",
")",
"]",
"return",
"random_walk_path",
"(",
"size",
",",
"distribution",
",",
"start_points",
")"
] | 44 | 16.4 |
def do_execute(self):
"""
The actual execution of the actor.
:return: None if successful, otherwise error message
:rtype: str
"""
if self.storagehandler is None:
return "No storage handler available!"
sname = str(self.resolve_option("storage_name"))
if sname not in self.storagehandler.storage:
return "No storage item called '" + sname + "' present!"
self._output.append(Token(self.storagehandler.storage[sname]))
return None | [
"def",
"do_execute",
"(",
"self",
")",
":",
"if",
"self",
".",
"storagehandler",
"is",
"None",
":",
"return",
"\"No storage handler available!\"",
"sname",
"=",
"str",
"(",
"self",
".",
"resolve_option",
"(",
"\"storage_name\"",
")",
")",
"if",
"sname",
"not",
"in",
"self",
".",
"storagehandler",
".",
"storage",
":",
"return",
"\"No storage item called '\"",
"+",
"sname",
"+",
"\"' present!\"",
"self",
".",
"_output",
".",
"append",
"(",
"Token",
"(",
"self",
".",
"storagehandler",
".",
"storage",
"[",
"sname",
"]",
")",
")",
"return",
"None"
] | 37 | 15.714286 |
def wavg(groupe, var):
'''
Fonction qui calcule la moyenne pondérée par groupe d'une variable
'''
d = groupe[var]
w = groupe['pondmen']
return (d * w).sum() / w.sum() | [
"def",
"wavg",
"(",
"groupe",
",",
"var",
")",
":",
"d",
"=",
"groupe",
"[",
"var",
"]",
"w",
"=",
"groupe",
"[",
"'pondmen'",
"]",
"return",
"(",
"d",
"*",
"w",
")",
".",
"sum",
"(",
")",
"/",
"w",
".",
"sum",
"(",
")"
] | 26.285714 | 22.285714 |
def program_rtr_nwk_next_hop(self, rout_id, next_hop, cidr):
"""Program the next hop for all networks of a tenant. """
namespace = self.find_rtr_namespace(rout_id)
if namespace is None:
LOG.error("Unable to find namespace for router %s", rout_id)
return False
args = ['route', 'add', '-net', cidr, 'gw', next_hop]
ret = self.program_rtr(args, rout_id, namespace=namespace)
if not ret:
LOG.error("Program router returned error for %s", rout_id)
return False
return True | [
"def",
"program_rtr_nwk_next_hop",
"(",
"self",
",",
"rout_id",
",",
"next_hop",
",",
"cidr",
")",
":",
"namespace",
"=",
"self",
".",
"find_rtr_namespace",
"(",
"rout_id",
")",
"if",
"namespace",
"is",
"None",
":",
"LOG",
".",
"error",
"(",
"\"Unable to find namespace for router %s\"",
",",
"rout_id",
")",
"return",
"False",
"args",
"=",
"[",
"'route'",
",",
"'add'",
",",
"'-net'",
",",
"cidr",
",",
"'gw'",
",",
"next_hop",
"]",
"ret",
"=",
"self",
".",
"program_rtr",
"(",
"args",
",",
"rout_id",
",",
"namespace",
"=",
"namespace",
")",
"if",
"not",
"ret",
":",
"LOG",
".",
"error",
"(",
"\"Program router returned error for %s\"",
",",
"rout_id",
")",
"return",
"False",
"return",
"True"
] | 43.153846 | 20.461538 |
def p_if_then_part(p):
""" if_then_part : IF expr then """
if is_number(p[2]):
api.errmsg.warning_condition_is_always(p.lineno(1), bool(p[2].value))
p[0] = p[2] | [
"def",
"p_if_then_part",
"(",
"p",
")",
":",
"if",
"is_number",
"(",
"p",
"[",
"2",
"]",
")",
":",
"api",
".",
"errmsg",
".",
"warning_condition_is_always",
"(",
"p",
".",
"lineno",
"(",
"1",
")",
",",
"bool",
"(",
"p",
"[",
"2",
"]",
".",
"value",
")",
")",
"p",
"[",
"0",
"]",
"=",
"p",
"[",
"2",
"]"
] | 29.333333 | 22.833333 |
def check_errors(self, response):
" Check some common errors."
# Read content.
content = response.content
if 'status' not in content:
raise self.GeneralError('We expect a status field.')
# Return the decoded content if status is success.
if content['status'] == 'success':
response._content = content
return
# Expect messages if some kind of error.
if 'msgs' not in content:
raise self.GeneralError('We expcet messages in case of error.')
try:
messages = list(content['msgs'])
except:
raise self.GeneralError("Messages must be a list.")
# Try to found common errors in the response.
for msg in messages:
if 'LVL' in msg and msg['LVL'] == 'ERROR':
# Check if is a not found error.
if msg['ERR_CD'] == 'NOT_FOUND':
raise self.NotFoundError(msg['INFO'])
# Duplicated target.
elif msg['ERR_CD'] == 'TARGET_EXISTS':
raise self.TargetExistsError(msg['INFO'])
# Some other error.
else:
raise self.DynectError(msg['INFO'])
raise self.GeneralError("We need at least one error message.") | [
"def",
"check_errors",
"(",
"self",
",",
"response",
")",
":",
"# Read content.",
"content",
"=",
"response",
".",
"content",
"if",
"'status'",
"not",
"in",
"content",
":",
"raise",
"self",
".",
"GeneralError",
"(",
"'We expect a status field.'",
")",
"# Return the decoded content if status is success.",
"if",
"content",
"[",
"'status'",
"]",
"==",
"'success'",
":",
"response",
".",
"_content",
"=",
"content",
"return",
"# Expect messages if some kind of error.",
"if",
"'msgs'",
"not",
"in",
"content",
":",
"raise",
"self",
".",
"GeneralError",
"(",
"'We expcet messages in case of error.'",
")",
"try",
":",
"messages",
"=",
"list",
"(",
"content",
"[",
"'msgs'",
"]",
")",
"except",
":",
"raise",
"self",
".",
"GeneralError",
"(",
"\"Messages must be a list.\"",
")",
"# Try to found common errors in the response.",
"for",
"msg",
"in",
"messages",
":",
"if",
"'LVL'",
"in",
"msg",
"and",
"msg",
"[",
"'LVL'",
"]",
"==",
"'ERROR'",
":",
"# Check if is a not found error.",
"if",
"msg",
"[",
"'ERR_CD'",
"]",
"==",
"'NOT_FOUND'",
":",
"raise",
"self",
".",
"NotFoundError",
"(",
"msg",
"[",
"'INFO'",
"]",
")",
"# Duplicated target.",
"elif",
"msg",
"[",
"'ERR_CD'",
"]",
"==",
"'TARGET_EXISTS'",
":",
"raise",
"self",
".",
"TargetExistsError",
"(",
"msg",
"[",
"'INFO'",
"]",
")",
"# Some other error.",
"else",
":",
"raise",
"self",
".",
"DynectError",
"(",
"msg",
"[",
"'INFO'",
"]",
")",
"raise",
"self",
".",
"GeneralError",
"(",
"\"We need at least one error message.\"",
")"
] | 31.512195 | 20.878049 |
def uvindex_around_coords(self, lat, lon):
"""
Queries the OWM Weather API for Ultra Violet value sampled in the
surroundings of the provided geocoordinates and in the specified time
interval. A *UVIndex* object instance is returned, encapsulating a
*Location* object and the UV intensity value.
:param lat: the location's latitude, must be between -90.0 and 90.0
:type lat: int/float
:param lon: the location's longitude, must be between -180.0 and 180.0
:type lon: int/float
:return: a *UVIndex* instance or ``None`` if data is not available
:raises: *ParseResponseException* when OWM Weather API responses' data
cannot be parsed, *APICallException* when OWM Weather API can not be
reached, *ValueError* for wrong input values
"""
geo.assert_is_lon(lon)
geo.assert_is_lat(lat)
params = {'lon': lon, 'lat': lat}
json_data = self._uvapi.get_uvi(params)
uvindex = self._parsers['uvindex'].parse_JSON(json_data)
return uvindex | [
"def",
"uvindex_around_coords",
"(",
"self",
",",
"lat",
",",
"lon",
")",
":",
"geo",
".",
"assert_is_lon",
"(",
"lon",
")",
"geo",
".",
"assert_is_lat",
"(",
"lat",
")",
"params",
"=",
"{",
"'lon'",
":",
"lon",
",",
"'lat'",
":",
"lat",
"}",
"json_data",
"=",
"self",
".",
"_uvapi",
".",
"get_uvi",
"(",
"params",
")",
"uvindex",
"=",
"self",
".",
"_parsers",
"[",
"'uvindex'",
"]",
".",
"parse_JSON",
"(",
"json_data",
")",
"return",
"uvindex"
] | 48.727273 | 20.636364 |
def get_undo_redo_list_from_active_trail_history_item_to_version_id(self, version_id):
"""Perform fast search from currently active branch to specific version_id and collect all recovery steps.
"""
all_trail_action = [a.version_id for a in self.single_trail_history() if a is not None]
all_active_action = self.get_all_active_actions()
undo_redo_list = []
_undo_redo_list = []
intermediate_version_id = version_id
if self.with_verbose:
logger.verbose("Version_id : {0} in".format(intermediate_version_id))
logger.verbose("Active actions: {0} in: {1}".format(all_active_action,
intermediate_version_id in all_active_action))
logger.verbose("Trail actions : {0} in: {1}".format(all_trail_action,
intermediate_version_id in all_trail_action))
if intermediate_version_id not in all_trail_action:
# get undo to come from version_id to trail_action
while intermediate_version_id not in all_trail_action:
_undo_redo_list.insert(0, (intermediate_version_id, 'redo'))
intermediate_version_id = self.all_time_history[intermediate_version_id].prev_id
intermediate_goal_version_id = intermediate_version_id
else:
intermediate_goal_version_id = version_id
intermediate_version_id = self.trail_history[self.trail_pointer].version_id
if self.with_verbose:
logger.verbose("Version_id : {0} {1}".format(intermediate_goal_version_id, intermediate_version_id))
logger.verbose("Active actions: {0} in: {1}".format(all_active_action,
intermediate_version_id in all_active_action))
logger.verbose("Trail actions : {0} in: {1}".format(all_trail_action,
intermediate_version_id in all_trail_action))
# collect undo and redo on trail
if intermediate_goal_version_id in all_active_action:
# collect needed undo to reach intermediate version
while not intermediate_version_id == intermediate_goal_version_id:
undo_redo_list.append((intermediate_version_id, 'undo'))
intermediate_version_id = self.all_time_history[intermediate_version_id].prev_id
elif intermediate_goal_version_id in all_trail_action:
# collect needed redo to reach intermediate version
while not intermediate_version_id == intermediate_goal_version_id:
intermediate_version_id = self.all_time_history[intermediate_version_id].next_id
undo_redo_list.append((intermediate_version_id, 'redo'))
for elem in _undo_redo_list:
undo_redo_list.append(elem)
return undo_redo_list | [
"def",
"get_undo_redo_list_from_active_trail_history_item_to_version_id",
"(",
"self",
",",
"version_id",
")",
":",
"all_trail_action",
"=",
"[",
"a",
".",
"version_id",
"for",
"a",
"in",
"self",
".",
"single_trail_history",
"(",
")",
"if",
"a",
"is",
"not",
"None",
"]",
"all_active_action",
"=",
"self",
".",
"get_all_active_actions",
"(",
")",
"undo_redo_list",
"=",
"[",
"]",
"_undo_redo_list",
"=",
"[",
"]",
"intermediate_version_id",
"=",
"version_id",
"if",
"self",
".",
"with_verbose",
":",
"logger",
".",
"verbose",
"(",
"\"Version_id : {0} in\"",
".",
"format",
"(",
"intermediate_version_id",
")",
")",
"logger",
".",
"verbose",
"(",
"\"Active actions: {0} in: {1}\"",
".",
"format",
"(",
"all_active_action",
",",
"intermediate_version_id",
"in",
"all_active_action",
")",
")",
"logger",
".",
"verbose",
"(",
"\"Trail actions : {0} in: {1}\"",
".",
"format",
"(",
"all_trail_action",
",",
"intermediate_version_id",
"in",
"all_trail_action",
")",
")",
"if",
"intermediate_version_id",
"not",
"in",
"all_trail_action",
":",
"# get undo to come from version_id to trail_action",
"while",
"intermediate_version_id",
"not",
"in",
"all_trail_action",
":",
"_undo_redo_list",
".",
"insert",
"(",
"0",
",",
"(",
"intermediate_version_id",
",",
"'redo'",
")",
")",
"intermediate_version_id",
"=",
"self",
".",
"all_time_history",
"[",
"intermediate_version_id",
"]",
".",
"prev_id",
"intermediate_goal_version_id",
"=",
"intermediate_version_id",
"else",
":",
"intermediate_goal_version_id",
"=",
"version_id",
"intermediate_version_id",
"=",
"self",
".",
"trail_history",
"[",
"self",
".",
"trail_pointer",
"]",
".",
"version_id",
"if",
"self",
".",
"with_verbose",
":",
"logger",
".",
"verbose",
"(",
"\"Version_id : {0} {1}\"",
".",
"format",
"(",
"intermediate_goal_version_id",
",",
"intermediate_version_id",
")",
")",
"logger",
".",
"verbose",
"(",
"\"Active actions: {0} in: {1}\"",
".",
"format",
"(",
"all_active_action",
",",
"intermediate_version_id",
"in",
"all_active_action",
")",
")",
"logger",
".",
"verbose",
"(",
"\"Trail actions : {0} in: {1}\"",
".",
"format",
"(",
"all_trail_action",
",",
"intermediate_version_id",
"in",
"all_trail_action",
")",
")",
"# collect undo and redo on trail",
"if",
"intermediate_goal_version_id",
"in",
"all_active_action",
":",
"# collect needed undo to reach intermediate version",
"while",
"not",
"intermediate_version_id",
"==",
"intermediate_goal_version_id",
":",
"undo_redo_list",
".",
"append",
"(",
"(",
"intermediate_version_id",
",",
"'undo'",
")",
")",
"intermediate_version_id",
"=",
"self",
".",
"all_time_history",
"[",
"intermediate_version_id",
"]",
".",
"prev_id",
"elif",
"intermediate_goal_version_id",
"in",
"all_trail_action",
":",
"# collect needed redo to reach intermediate version",
"while",
"not",
"intermediate_version_id",
"==",
"intermediate_goal_version_id",
":",
"intermediate_version_id",
"=",
"self",
".",
"all_time_history",
"[",
"intermediate_version_id",
"]",
".",
"next_id",
"undo_redo_list",
".",
"append",
"(",
"(",
"intermediate_version_id",
",",
"'redo'",
")",
")",
"for",
"elem",
"in",
"_undo_redo_list",
":",
"undo_redo_list",
".",
"append",
"(",
"elem",
")",
"return",
"undo_redo_list"
] | 60.040816 | 32.591837 |
def PSHUFD(cpu, op0, op1, op3):
"""
Packed shuffle doublewords.
Copies doublewords from source operand (second operand) and inserts them in the destination operand
(first operand) at locations selected with the order operand (third operand).
:param cpu: current CPU.
:param op0: destination operand.
:param op1: source operand.
:param op3: order operand.
"""
size = op0.size
arg0 = op0.read()
arg1 = op1.read()
order = Operators.ZEXTEND(op3.read(), size)
arg0 = arg0 & 0xffffffffffffffffffffffffffffffff00000000000000000000000000000000
arg0 |= ((arg1 >> (((order >> 0) & 3) * 32)) & 0xffffffff)
arg0 |= ((arg1 >> (((order >> 2) & 3) * 32)) & 0xffffffff) << 32
arg0 |= ((arg1 >> (((order >> 4) & 3) * 32)) & 0xffffffff) << 64
arg0 |= ((arg1 >> (((order >> 6) & 3) * 32)) & 0xffffffff) << 96
op0.write(arg0) | [
"def",
"PSHUFD",
"(",
"cpu",
",",
"op0",
",",
"op1",
",",
"op3",
")",
":",
"size",
"=",
"op0",
".",
"size",
"arg0",
"=",
"op0",
".",
"read",
"(",
")",
"arg1",
"=",
"op1",
".",
"read",
"(",
")",
"order",
"=",
"Operators",
".",
"ZEXTEND",
"(",
"op3",
".",
"read",
"(",
")",
",",
"size",
")",
"arg0",
"=",
"arg0",
"&",
"0xffffffffffffffffffffffffffffffff00000000000000000000000000000000",
"arg0",
"|=",
"(",
"(",
"arg1",
">>",
"(",
"(",
"(",
"order",
">>",
"0",
")",
"&",
"3",
")",
"*",
"32",
")",
")",
"&",
"0xffffffff",
")",
"arg0",
"|=",
"(",
"(",
"arg1",
">>",
"(",
"(",
"(",
"order",
">>",
"2",
")",
"&",
"3",
")",
"*",
"32",
")",
")",
"&",
"0xffffffff",
")",
"<<",
"32",
"arg0",
"|=",
"(",
"(",
"arg1",
">>",
"(",
"(",
"(",
"order",
">>",
"4",
")",
"&",
"3",
")",
"*",
"32",
")",
")",
"&",
"0xffffffff",
")",
"<<",
"64",
"arg0",
"|=",
"(",
"(",
"arg1",
">>",
"(",
"(",
"(",
"order",
">>",
"6",
")",
"&",
"3",
")",
"*",
"32",
")",
")",
"&",
"0xffffffff",
")",
"<<",
"96",
"op0",
".",
"write",
"(",
"arg0",
")"
] | 39.125 | 22.916667 |
def flux_minimization(model, fixed, solver, weights={}):
"""Minimize flux of all reactions while keeping certain fluxes fixed.
The fixed reactions are given in a dictionary as reaction id
to value mapping. The weighted L1-norm of the fluxes is minimized.
Args:
model: MetabolicModel to solve.
fixed: dict of additional lower bounds on reaction fluxes.
solver: LP solver instance to use.
weights: dict of weights on the L1-norm terms.
Returns:
An iterator of reaction ID and reaction flux pairs.
"""
fba = FluxBalanceProblem(model, solver)
for reaction_id, value in iteritems(fixed):
flux = fba.get_flux_var(reaction_id)
fba.prob.add_linear_constraints(flux >= value)
fba.minimize_l1()
return ((reaction_id, fba.get_flux(reaction_id))
for reaction_id in model.reactions) | [
"def",
"flux_minimization",
"(",
"model",
",",
"fixed",
",",
"solver",
",",
"weights",
"=",
"{",
"}",
")",
":",
"fba",
"=",
"FluxBalanceProblem",
"(",
"model",
",",
"solver",
")",
"for",
"reaction_id",
",",
"value",
"in",
"iteritems",
"(",
"fixed",
")",
":",
"flux",
"=",
"fba",
".",
"get_flux_var",
"(",
"reaction_id",
")",
"fba",
".",
"prob",
".",
"add_linear_constraints",
"(",
"flux",
">=",
"value",
")",
"fba",
".",
"minimize_l1",
"(",
")",
"return",
"(",
"(",
"reaction_id",
",",
"fba",
".",
"get_flux",
"(",
"reaction_id",
")",
")",
"for",
"reaction_id",
"in",
"model",
".",
"reactions",
")"
] | 33.038462 | 20.653846 |
def getCurrentItem(self): # TODO: rename? getCurrentItemAndIndex? getCurrentTuple? getCurrent?
""" Find the current tree item (and the current index while we're at it)
Returns a tuple with the current item, and its index. The item may be None.
See also the notes at the top of this module on current item vs selected item(s).
"""
currentIndex = self.getRowCurrentIndex()
currentItem = self.model().getItem(currentIndex)
return currentItem, currentIndex | [
"def",
"getCurrentItem",
"(",
"self",
")",
":",
"# TODO: rename? getCurrentItemAndIndex? getCurrentTuple? getCurrent?",
"currentIndex",
"=",
"self",
".",
"getRowCurrentIndex",
"(",
")",
"currentItem",
"=",
"self",
".",
"model",
"(",
")",
".",
"getItem",
"(",
"currentIndex",
")",
"return",
"currentItem",
",",
"currentIndex"
] | 63.625 | 22.25 |
def get_vertical_orientation_property(value, is_bytes=False):
"""Get `VO` property."""
obj = unidata.ascii_vertical_orientation if is_bytes else unidata.unicode_vertical_orientation
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['verticalorientation'].get(negated, negated)
else:
value = unidata.unicode_alias['verticalorientation'].get(value, value)
return obj[value] | [
"def",
"get_vertical_orientation_property",
"(",
"value",
",",
"is_bytes",
"=",
"False",
")",
":",
"obj",
"=",
"unidata",
".",
"ascii_vertical_orientation",
"if",
"is_bytes",
"else",
"unidata",
".",
"unicode_vertical_orientation",
"if",
"value",
".",
"startswith",
"(",
"'^'",
")",
":",
"negated",
"=",
"value",
"[",
"1",
":",
"]",
"value",
"=",
"'^'",
"+",
"unidata",
".",
"unicode_alias",
"[",
"'verticalorientation'",
"]",
".",
"get",
"(",
"negated",
",",
"negated",
")",
"else",
":",
"value",
"=",
"unidata",
".",
"unicode_alias",
"[",
"'verticalorientation'",
"]",
".",
"get",
"(",
"value",
",",
"value",
")",
"return",
"obj",
"[",
"value",
"]"
] | 36.583333 | 29.916667 |
def search_queryset(self):
"""Get search query set"""
queryset = self.get_model_class().objects.get_queryset()
if self.get_model_config().list_select_related:
queryset = queryset.select_related(*self.get_model_config().list_select_related)
return watson.filter(queryset, self.get_search(), ranking=False) | [
"def",
"search_queryset",
"(",
"self",
")",
":",
"queryset",
"=",
"self",
".",
"get_model_class",
"(",
")",
".",
"objects",
".",
"get_queryset",
"(",
")",
"if",
"self",
".",
"get_model_config",
"(",
")",
".",
"list_select_related",
":",
"queryset",
"=",
"queryset",
".",
"select_related",
"(",
"*",
"self",
".",
"get_model_config",
"(",
")",
".",
"list_select_related",
")",
"return",
"watson",
".",
"filter",
"(",
"queryset",
",",
"self",
".",
"get_search",
"(",
")",
",",
"ranking",
"=",
"False",
")"
] | 42.875 | 27.125 |
def save(self):
"""Store config back to file."""
try:
os.makedirs(os.path.dirname(self._configfile))
except:
pass
with open(self._configfile, 'w') as f:
self._config.write(f) | [
"def",
"save",
"(",
"self",
")",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"self",
".",
"_configfile",
")",
")",
"except",
":",
"pass",
"with",
"open",
"(",
"self",
".",
"_configfile",
",",
"'w'",
")",
"as",
"f",
":",
"self",
".",
"_config",
".",
"write",
"(",
"f",
")"
] | 26.111111 | 19.222222 |
def gofwrapper(f, loss_function='squared'):
"""
Goodness-of-fit decorator function for likelihoods
==================================================
Generates goodness-of-fit points for data likelihoods.
Wrap function f(*args, **kwds) where f is a likelihood.
Assume args = (x, parameter1, parameter2, ...)
Before passing the arguments to the function, the wrapper makes sure that
the parameters have the same shape as x.
"""
name = f.__name__[:-5]
# Take a snapshot of the main namespace.
# Find the functions needed to compute the gof points.
expval_func = eval(name + '_expval')
random_func = eval('r' + name)
def wrapper(*args, **kwds):
"""
This wraps a likelihood.
"""
"""Return gof points."""
# Calculate loss
loss = kwds.pop('gof', loss_functions[loss_function])
# Expected value, given parameters
expval = expval_func(*args[1:], **kwds)
y = random_func(size=len(args[0]), *args[1:], **kwds)
f.gof_points = GOFpoints(args[0], y, expval, loss)
"""Return likelihood."""
return f(*args, **kwds)
# Assign function attributes to wrapper.
wrapper.__doc__ = f.__doc__
wrapper.__name__ = f.__name__
wrapper.name = name
return wrapper | [
"def",
"gofwrapper",
"(",
"f",
",",
"loss_function",
"=",
"'squared'",
")",
":",
"name",
"=",
"f",
".",
"__name__",
"[",
":",
"-",
"5",
"]",
"# Take a snapshot of the main namespace.",
"# Find the functions needed to compute the gof points.",
"expval_func",
"=",
"eval",
"(",
"name",
"+",
"'_expval'",
")",
"random_func",
"=",
"eval",
"(",
"'r'",
"+",
"name",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
":",
"\"\"\"\n This wraps a likelihood.\n \"\"\"",
"\"\"\"Return gof points.\"\"\"",
"# Calculate loss",
"loss",
"=",
"kwds",
".",
"pop",
"(",
"'gof'",
",",
"loss_functions",
"[",
"loss_function",
"]",
")",
"# Expected value, given parameters",
"expval",
"=",
"expval_func",
"(",
"*",
"args",
"[",
"1",
":",
"]",
",",
"*",
"*",
"kwds",
")",
"y",
"=",
"random_func",
"(",
"size",
"=",
"len",
"(",
"args",
"[",
"0",
"]",
")",
",",
"*",
"args",
"[",
"1",
":",
"]",
",",
"*",
"*",
"kwds",
")",
"f",
".",
"gof_points",
"=",
"GOFpoints",
"(",
"args",
"[",
"0",
"]",
",",
"y",
",",
"expval",
",",
"loss",
")",
"\"\"\"Return likelihood.\"\"\"",
"return",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
"# Assign function attributes to wrapper.",
"wrapper",
".",
"__doc__",
"=",
"f",
".",
"__doc__",
"wrapper",
".",
"__name__",
"=",
"f",
".",
"__name__",
"wrapper",
".",
"name",
"=",
"name",
"return",
"wrapper"
] | 28.377778 | 18.022222 |
def add_inverse_query(self, key_val={}):
"""
Add an es_dsl inverse query object to the es_dsl Search object
:param key_val: a key-value pair(dict) containing the query to be added to the search object
:returns: self, which allows the method to be chainable with the other methods
"""
q = Q("match", **key_val)
self.search = self.search.query(~q)
return self | [
"def",
"add_inverse_query",
"(",
"self",
",",
"key_val",
"=",
"{",
"}",
")",
":",
"q",
"=",
"Q",
"(",
"\"match\"",
",",
"*",
"*",
"key_val",
")",
"self",
".",
"search",
"=",
"self",
".",
"search",
".",
"query",
"(",
"~",
"q",
")",
"return",
"self"
] | 37.545455 | 22.454545 |
def is_correct(self):
# pylint: disable=too-many-branches
"""
Check if this object configuration is correct ::
* Check our own specific properties
* Call our parent class is_correct checker
:return: True if the configuration is correct, otherwise False
:rtype: bool
"""
state = True
if hasattr(self, 'trigger') and getattr(self, 'trigger', None):
self.add_warning("[%s::%s] 'trigger' property is not allowed"
% (self.my_type, self.get_name()))
# If no notif period, set it to None, mean 24x7
if not hasattr(self, 'notification_period'):
self.notification_period = None
# If freshness_threshold is not set, use check interval or retry interval
if hasattr(self, 'freshness_threshold') and not self.freshness_threshold:
if getattr(self, 'check_interval', 0):
self.freshness_threshold = self.check_interval * 60
# self.add_warning("[%s::%s] using check interval as a freshness threshold: %d s"
# % (self.my_type, self.get_name(), self.freshness_threshold))
elif getattr(self, 'retry_interval', 0):
self.freshness_threshold = self.retry_interval * 60
# self.add_warning("[%s::%s] using retry interval as a freshness threshold: %d s"
# % (self.my_type, self.get_name(), self.freshness_threshold))
# If we got an event handler, it should be valid
if getattr(self, 'event_handler', None) and not self.event_handler.is_valid():
self.add_error("[%s::%s] event_handler '%s' is invalid"
% (self.my_type, self.get_name(), self.event_handler.command))
state = False
if not hasattr(self, 'check_command'):
# todo: This should never happen because the default exists as an empty string
self.add_error("[%s::%s] no property check_command" % (self.my_type, self.get_name()))
state = False
# Ok got a command, but maybe it's invalid
else:
# if not self.check_command:
# self.add_warning("[%s::%s] no check_command, will always be considered as Up"
# % (self.my_type, self.get_name()))
if self.check_command and not self.check_command.is_valid():
self.add_error("[%s::%s] check_command '%s' invalid"
% (self.my_type, self.get_name(), self.check_command.command))
state = False
if self.got_business_rule:
if not self.business_rule.is_valid():
self.add_error("[%s::%s] business_rule invalid"
% (self.my_type, self.get_name()))
for bperror in self.business_rule.configuration_errors:
self.add_error("[%s::%s]: %s" % (self.my_type, self.get_name(), bperror))
state = False
if not hasattr(self, 'notification_interval') \
and self.notifications_enabled is True: # pragma: no cover, should never happen
self.add_error("[%s::%s] no notification_interval but notifications enabled"
% (self.my_type, self.get_name()))
state = False
# if no check_period, means 24x7, like for services
if not hasattr(self, 'check_period'):
self.check_period = None
state = super(SchedulingItem, self).is_correct()
return state | [
"def",
"is_correct",
"(",
"self",
")",
":",
"# pylint: disable=too-many-branches",
"state",
"=",
"True",
"if",
"hasattr",
"(",
"self",
",",
"'trigger'",
")",
"and",
"getattr",
"(",
"self",
",",
"'trigger'",
",",
"None",
")",
":",
"self",
".",
"add_warning",
"(",
"\"[%s::%s] 'trigger' property is not allowed\"",
"%",
"(",
"self",
".",
"my_type",
",",
"self",
".",
"get_name",
"(",
")",
")",
")",
"# If no notif period, set it to None, mean 24x7",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'notification_period'",
")",
":",
"self",
".",
"notification_period",
"=",
"None",
"# If freshness_threshold is not set, use check interval or retry interval",
"if",
"hasattr",
"(",
"self",
",",
"'freshness_threshold'",
")",
"and",
"not",
"self",
".",
"freshness_threshold",
":",
"if",
"getattr",
"(",
"self",
",",
"'check_interval'",
",",
"0",
")",
":",
"self",
".",
"freshness_threshold",
"=",
"self",
".",
"check_interval",
"*",
"60",
"# self.add_warning(\"[%s::%s] using check interval as a freshness threshold: %d s\"",
"# % (self.my_type, self.get_name(), self.freshness_threshold))",
"elif",
"getattr",
"(",
"self",
",",
"'retry_interval'",
",",
"0",
")",
":",
"self",
".",
"freshness_threshold",
"=",
"self",
".",
"retry_interval",
"*",
"60",
"# self.add_warning(\"[%s::%s] using retry interval as a freshness threshold: %d s\"",
"# % (self.my_type, self.get_name(), self.freshness_threshold))",
"# If we got an event handler, it should be valid",
"if",
"getattr",
"(",
"self",
",",
"'event_handler'",
",",
"None",
")",
"and",
"not",
"self",
".",
"event_handler",
".",
"is_valid",
"(",
")",
":",
"self",
".",
"add_error",
"(",
"\"[%s::%s] event_handler '%s' is invalid\"",
"%",
"(",
"self",
".",
"my_type",
",",
"self",
".",
"get_name",
"(",
")",
",",
"self",
".",
"event_handler",
".",
"command",
")",
")",
"state",
"=",
"False",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'check_command'",
")",
":",
"# todo: This should never happen because the default exists as an empty string",
"self",
".",
"add_error",
"(",
"\"[%s::%s] no property check_command\"",
"%",
"(",
"self",
".",
"my_type",
",",
"self",
".",
"get_name",
"(",
")",
")",
")",
"state",
"=",
"False",
"# Ok got a command, but maybe it's invalid",
"else",
":",
"# if not self.check_command:",
"# self.add_warning(\"[%s::%s] no check_command, will always be considered as Up\"",
"# % (self.my_type, self.get_name()))",
"if",
"self",
".",
"check_command",
"and",
"not",
"self",
".",
"check_command",
".",
"is_valid",
"(",
")",
":",
"self",
".",
"add_error",
"(",
"\"[%s::%s] check_command '%s' invalid\"",
"%",
"(",
"self",
".",
"my_type",
",",
"self",
".",
"get_name",
"(",
")",
",",
"self",
".",
"check_command",
".",
"command",
")",
")",
"state",
"=",
"False",
"if",
"self",
".",
"got_business_rule",
":",
"if",
"not",
"self",
".",
"business_rule",
".",
"is_valid",
"(",
")",
":",
"self",
".",
"add_error",
"(",
"\"[%s::%s] business_rule invalid\"",
"%",
"(",
"self",
".",
"my_type",
",",
"self",
".",
"get_name",
"(",
")",
")",
")",
"for",
"bperror",
"in",
"self",
".",
"business_rule",
".",
"configuration_errors",
":",
"self",
".",
"add_error",
"(",
"\"[%s::%s]: %s\"",
"%",
"(",
"self",
".",
"my_type",
",",
"self",
".",
"get_name",
"(",
")",
",",
"bperror",
")",
")",
"state",
"=",
"False",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'notification_interval'",
")",
"and",
"self",
".",
"notifications_enabled",
"is",
"True",
":",
"# pragma: no cover, should never happen",
"self",
".",
"add_error",
"(",
"\"[%s::%s] no notification_interval but notifications enabled\"",
"%",
"(",
"self",
".",
"my_type",
",",
"self",
".",
"get_name",
"(",
")",
")",
")",
"state",
"=",
"False",
"# if no check_period, means 24x7, like for services",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'check_period'",
")",
":",
"self",
".",
"check_period",
"=",
"None",
"state",
"=",
"super",
"(",
"SchedulingItem",
",",
"self",
")",
".",
"is_correct",
"(",
")",
"return",
"state"
] | 50.253521 | 27.267606 |
def get_client_index_from_id(self, client_id):
"""Return client index from id"""
for index, client in enumerate(self.clients):
if id(client) == client_id:
return index | [
"def",
"get_client_index_from_id",
"(",
"self",
",",
"client_id",
")",
":",
"for",
"index",
",",
"client",
"in",
"enumerate",
"(",
"self",
".",
"clients",
")",
":",
"if",
"id",
"(",
"client",
")",
"==",
"client_id",
":",
"return",
"index"
] | 42.2 | 6.6 |
async def get_top_clans(self):
'''Get a list of top clans, info is only brief,
call get_clan() on each of the ClanInfo objects
to get full clan info'''
url = self.BASE + '/top/clans'
data = await self.request(url)
return [ClanInfo(self, c) for c in data.get('clans')] | [
"async",
"def",
"get_top_clans",
"(",
"self",
")",
":",
"url",
"=",
"self",
".",
"BASE",
"+",
"'/top/clans'",
"data",
"=",
"await",
"self",
".",
"request",
"(",
"url",
")",
"return",
"[",
"ClanInfo",
"(",
"self",
",",
"c",
")",
"for",
"c",
"in",
"data",
".",
"get",
"(",
"'clans'",
")",
"]"
] | 34.555556 | 17.222222 |
def write_data(self, command, data, timeout=None):
"""Shortcut for writing specifically a DataMessage."""
self.write_message(FilesyncMessageTypes.DataMessage(command, data), timeout) | [
"def",
"write_data",
"(",
"self",
",",
"command",
",",
"data",
",",
"timeout",
"=",
"None",
")",
":",
"self",
".",
"write_message",
"(",
"FilesyncMessageTypes",
".",
"DataMessage",
"(",
"command",
",",
"data",
")",
",",
"timeout",
")"
] | 62.666667 | 16.666667 |
def deactivate_mfa_device(self, user_name, serial_number):
"""
Deactivates the specified MFA device and removes it from
association with the user.
:type user_name: string
:param user_name: The username of the user
:type serial_number: string
:param seriasl_number: The serial number which uniquely identifies
the MFA device.
"""
params = {'UserName' : user_name,
'SerialNumber' : serial_number}
return self.get_response('DeactivateMFADevice', params) | [
"def",
"deactivate_mfa_device",
"(",
"self",
",",
"user_name",
",",
"serial_number",
")",
":",
"params",
"=",
"{",
"'UserName'",
":",
"user_name",
",",
"'SerialNumber'",
":",
"serial_number",
"}",
"return",
"self",
".",
"get_response",
"(",
"'DeactivateMFADevice'",
",",
"params",
")"
] | 35.9375 | 16.0625 |
def _remove_duplicate_points(points, groups):
''' Removes the duplicate points from the beginning of a section,
if they are present in points-groups representation.
Returns:
points, groups with unique points.
'''
group_initial_ids = groups[:, GPFIRST]
to_be_reduced = np.zeros(len(group_initial_ids))
to_be_removed = []
for ig, g in enumerate(groups):
iid, typ, pid = g[GPFIRST], g[GTYPE], g[GPID]
# Remove first point from sections that are
# not the root section, a soma, or a child of a soma
if pid != -1 and typ != 1 and groups[pid][GTYPE] != 1:
# Remove duplicate from list of points
to_be_removed.append(iid)
# Reduce the id of the following sections
# in groups structure by one
to_be_reduced[ig + 1:] += 1
groups[:, GPFIRST] = groups[:, GPFIRST] - to_be_reduced
points = np.delete(points, to_be_removed, axis=0)
return points, groups | [
"def",
"_remove_duplicate_points",
"(",
"points",
",",
"groups",
")",
":",
"group_initial_ids",
"=",
"groups",
"[",
":",
",",
"GPFIRST",
"]",
"to_be_reduced",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"group_initial_ids",
")",
")",
"to_be_removed",
"=",
"[",
"]",
"for",
"ig",
",",
"g",
"in",
"enumerate",
"(",
"groups",
")",
":",
"iid",
",",
"typ",
",",
"pid",
"=",
"g",
"[",
"GPFIRST",
"]",
",",
"g",
"[",
"GTYPE",
"]",
",",
"g",
"[",
"GPID",
"]",
"# Remove first point from sections that are",
"# not the root section, a soma, or a child of a soma",
"if",
"pid",
"!=",
"-",
"1",
"and",
"typ",
"!=",
"1",
"and",
"groups",
"[",
"pid",
"]",
"[",
"GTYPE",
"]",
"!=",
"1",
":",
"# Remove duplicate from list of points",
"to_be_removed",
".",
"append",
"(",
"iid",
")",
"# Reduce the id of the following sections",
"# in groups structure by one",
"to_be_reduced",
"[",
"ig",
"+",
"1",
":",
"]",
"+=",
"1",
"groups",
"[",
":",
",",
"GPFIRST",
"]",
"=",
"groups",
"[",
":",
",",
"GPFIRST",
"]",
"-",
"to_be_reduced",
"points",
"=",
"np",
".",
"delete",
"(",
"points",
",",
"to_be_removed",
",",
"axis",
"=",
"0",
")",
"return",
"points",
",",
"groups"
] | 33.241379 | 19.655172 |
def _add_point_scalar(self, scalars, name, set_active=False, deep=True):
"""
Adds point scalars to the mesh
Parameters
----------
scalars : numpy.ndarray
Numpy array of scalars. Must match number of points.
name : str
Name of point scalars to add.
set_active : bool, optional
Sets the scalars to the active plotting scalars. Default False.
deep : bool, optional
Does not copy scalars when False. A reference to the scalars
must be kept to avoid a segfault.
"""
if not isinstance(scalars, np.ndarray):
raise TypeError('Input must be a numpy.ndarray')
if scalars.shape[0] != self.n_points:
raise Exception('Number of scalars must match the number of ' +
'points')
# need to track which arrays are boolean as all boolean arrays
# must be stored as uint8
if scalars.dtype == np.bool:
scalars = scalars.view(np.uint8)
if name not in self._point_bool_array_names:
self._point_bool_array_names.append(name)
if not scalars.flags.c_contiguous:
scalars = np.ascontiguousarray(scalars)
vtkarr = numpy_to_vtk(scalars, deep=deep)
vtkarr.SetName(name)
self.GetPointData().AddArray(vtkarr)
if set_active or self.active_scalar_info[1] is None:
self.GetPointData().SetActiveScalars(name)
self._active_scalar_info = [POINT_DATA_FIELD, name] | [
"def",
"_add_point_scalar",
"(",
"self",
",",
"scalars",
",",
"name",
",",
"set_active",
"=",
"False",
",",
"deep",
"=",
"True",
")",
":",
"if",
"not",
"isinstance",
"(",
"scalars",
",",
"np",
".",
"ndarray",
")",
":",
"raise",
"TypeError",
"(",
"'Input must be a numpy.ndarray'",
")",
"if",
"scalars",
".",
"shape",
"[",
"0",
"]",
"!=",
"self",
".",
"n_points",
":",
"raise",
"Exception",
"(",
"'Number of scalars must match the number of '",
"+",
"'points'",
")",
"# need to track which arrays are boolean as all boolean arrays",
"# must be stored as uint8",
"if",
"scalars",
".",
"dtype",
"==",
"np",
".",
"bool",
":",
"scalars",
"=",
"scalars",
".",
"view",
"(",
"np",
".",
"uint8",
")",
"if",
"name",
"not",
"in",
"self",
".",
"_point_bool_array_names",
":",
"self",
".",
"_point_bool_array_names",
".",
"append",
"(",
"name",
")",
"if",
"not",
"scalars",
".",
"flags",
".",
"c_contiguous",
":",
"scalars",
"=",
"np",
".",
"ascontiguousarray",
"(",
"scalars",
")",
"vtkarr",
"=",
"numpy_to_vtk",
"(",
"scalars",
",",
"deep",
"=",
"deep",
")",
"vtkarr",
".",
"SetName",
"(",
"name",
")",
"self",
".",
"GetPointData",
"(",
")",
".",
"AddArray",
"(",
"vtkarr",
")",
"if",
"set_active",
"or",
"self",
".",
"active_scalar_info",
"[",
"1",
"]",
"is",
"None",
":",
"self",
".",
"GetPointData",
"(",
")",
".",
"SetActiveScalars",
"(",
"name",
")",
"self",
".",
"_active_scalar_info",
"=",
"[",
"POINT_DATA_FIELD",
",",
"name",
"]"
] | 35.627907 | 19.255814 |
def _build_ex_tree(self):
"""Construct exception tree from trace."""
# Load exception data into tree structure
sep = self._exh_obj.callables_separator
data = self._exh_obj.exceptions_db
if not data:
raise RuntimeError("Exceptions database is empty")
# Add root node to exceptions, needed when tracing done
# through test runner which is excluded from callable path
for item in data:
item["name"] = "root{sep}{name}".format(sep=sep, name=item["name"])
self._tobj = ptrie.Trie(sep)
try:
self._tobj.add_nodes(data)
except ValueError as eobj:
if str(eobj).startswith("Illegal node name"):
raise RuntimeError("Exceptions do not have a common callable")
raise
# Find closest root node to first multi-leaf branching or first
# callable with exceptions and make that the root node
node = self._tobj.root_name
while (len(self._tobj.get_children(node)) == 1) and (
not self._tobj.get_data(node)
):
node = self._tobj.get_children(node)[0]
if not self._tobj.is_root(node): # pragma: no branch
self._tobj.make_root(node)
nsep = self._tobj.node_separator
prefix = nsep.join(node.split(self._tobj.node_separator)[:-1])
self._tobj.delete_prefix(prefix)
self._print_ex_tree() | [
"def",
"_build_ex_tree",
"(",
"self",
")",
":",
"# Load exception data into tree structure",
"sep",
"=",
"self",
".",
"_exh_obj",
".",
"callables_separator",
"data",
"=",
"self",
".",
"_exh_obj",
".",
"exceptions_db",
"if",
"not",
"data",
":",
"raise",
"RuntimeError",
"(",
"\"Exceptions database is empty\"",
")",
"# Add root node to exceptions, needed when tracing done",
"# through test runner which is excluded from callable path",
"for",
"item",
"in",
"data",
":",
"item",
"[",
"\"name\"",
"]",
"=",
"\"root{sep}{name}\"",
".",
"format",
"(",
"sep",
"=",
"sep",
",",
"name",
"=",
"item",
"[",
"\"name\"",
"]",
")",
"self",
".",
"_tobj",
"=",
"ptrie",
".",
"Trie",
"(",
"sep",
")",
"try",
":",
"self",
".",
"_tobj",
".",
"add_nodes",
"(",
"data",
")",
"except",
"ValueError",
"as",
"eobj",
":",
"if",
"str",
"(",
"eobj",
")",
".",
"startswith",
"(",
"\"Illegal node name\"",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Exceptions do not have a common callable\"",
")",
"raise",
"# Find closest root node to first multi-leaf branching or first",
"# callable with exceptions and make that the root node",
"node",
"=",
"self",
".",
"_tobj",
".",
"root_name",
"while",
"(",
"len",
"(",
"self",
".",
"_tobj",
".",
"get_children",
"(",
"node",
")",
")",
"==",
"1",
")",
"and",
"(",
"not",
"self",
".",
"_tobj",
".",
"get_data",
"(",
"node",
")",
")",
":",
"node",
"=",
"self",
".",
"_tobj",
".",
"get_children",
"(",
"node",
")",
"[",
"0",
"]",
"if",
"not",
"self",
".",
"_tobj",
".",
"is_root",
"(",
"node",
")",
":",
"# pragma: no branch",
"self",
".",
"_tobj",
".",
"make_root",
"(",
"node",
")",
"nsep",
"=",
"self",
".",
"_tobj",
".",
"node_separator",
"prefix",
"=",
"nsep",
".",
"join",
"(",
"node",
".",
"split",
"(",
"self",
".",
"_tobj",
".",
"node_separator",
")",
"[",
":",
"-",
"1",
"]",
")",
"self",
".",
"_tobj",
".",
"delete_prefix",
"(",
"prefix",
")",
"self",
".",
"_print_ex_tree",
"(",
")"
] | 45.83871 | 15.903226 |
def _parse_binary(v, header_d):
""" Parses binary string.
Note:
<str> for py2 and <binary> for py3.
"""
# This is often a no-op, but it ocassionally converts numbers into strings
v = nullify(v)
if v is None:
return None
if six.PY2:
try:
return six.binary_type(v).strip()
except UnicodeEncodeError:
return six.text_type(v).strip()
else:
# py3
try:
return six.binary_type(v, 'utf-8').strip()
except UnicodeEncodeError:
return six.text_type(v).strip() | [
"def",
"_parse_binary",
"(",
"v",
",",
"header_d",
")",
":",
"# This is often a no-op, but it ocassionally converts numbers into strings",
"v",
"=",
"nullify",
"(",
"v",
")",
"if",
"v",
"is",
"None",
":",
"return",
"None",
"if",
"six",
".",
"PY2",
":",
"try",
":",
"return",
"six",
".",
"binary_type",
"(",
"v",
")",
".",
"strip",
"(",
")",
"except",
"UnicodeEncodeError",
":",
"return",
"six",
".",
"text_type",
"(",
"v",
")",
".",
"strip",
"(",
")",
"else",
":",
"# py3",
"try",
":",
"return",
"six",
".",
"binary_type",
"(",
"v",
",",
"'utf-8'",
")",
".",
"strip",
"(",
")",
"except",
"UnicodeEncodeError",
":",
"return",
"six",
".",
"text_type",
"(",
"v",
")",
".",
"strip",
"(",
")"
] | 21.730769 | 21.653846 |
def iter_descendants_labels(self, ontology, iri, size=None, sleep=None):
"""Iterates over the labels for the descendants of a given term
:param str ontology: The name of the ontology
:param str iri: The IRI of a term
:param int size: The size of each page. Defaults to 500, which is the maximum allowed by the EBI.
:param int sleep: The amount of time to sleep between pages. Defaults to 0 seconds.
:rtype: iter[str]
"""
for label in _help_iterate_labels(self.iter_descendants(ontology, iri, size=size, sleep=sleep)):
yield label | [
"def",
"iter_descendants_labels",
"(",
"self",
",",
"ontology",
",",
"iri",
",",
"size",
"=",
"None",
",",
"sleep",
"=",
"None",
")",
":",
"for",
"label",
"in",
"_help_iterate_labels",
"(",
"self",
".",
"iter_descendants",
"(",
"ontology",
",",
"iri",
",",
"size",
"=",
"size",
",",
"sleep",
"=",
"sleep",
")",
")",
":",
"yield",
"label"
] | 54.181818 | 27.090909 |
def _date(val, offset=None):
""" A special pseudo-type for pipeline arguments.
This allows us to parse dates as Python datetimes, including special values like 'now'
and 'today', as well as apply offsets to the datetime.
Args:
val: a string containing the value for the datetime. This can be 'now', 'today' (midnight at
start of day), 'yesterday' (midnight at start of yesterday), or a formatted date that
will be passed to the datetime constructor. Note that 'now' etc are assumed to
be in UTC.
offset: for date arguments a string containing a comma-separated list of
relative offsets to apply of the form <n><u> where <n> is an integer and
<u> is a single character unit (d=day, m=month, y=year, h=hour, m=minute).
Returns:
A Python datetime resulting from starting at <val> and applying the sequence of deltas
specified in <offset>.
"""
if val is None:
return val
if val == '' or val == 'now':
when = datetime.datetime.utcnow()
elif val == 'today':
dt = datetime.datetime.utcnow()
when = datetime.datetime(dt.year, dt.month, dt.day)
elif val == 'yesterday':
dt = datetime.datetime.utcnow() - datetime.timedelta(1)
when = datetime.datetime(dt.year, dt.month, dt.day)
else:
when = datetime.datetime.strptime(val, "%Y%m%d")
if offset is not None:
for part in offset.split(','):
unit = part[-1]
quantity = int(part[:-1])
# We can use timedelta for days and under, but not for years and months
if unit == 'y':
when = datetime.datetime(year=when.year + quantity, month=when.month, day=when.day,
hour=when.hour, minute=when.minute)
elif unit == 'm':
new_year = when.year
new_month = when.month + quantity
if new_month < 1:
new_month = -new_month
new_year += 1 + (new_month // 12)
new_month = 12 - new_month % 12
elif new_month > 12:
new_year += (new_month - 1) // 12
new_month = 1 + (new_month - 1) % 12
when = datetime.datetime(year=new_year, month=new_month, day=when.day,
hour=when.hour, minute=when.minute)
elif unit == 'd':
when += datetime.timedelta(days=quantity)
elif unit == 'h':
when += datetime.timedelta(hours=quantity)
elif unit == 'M':
when += datetime.timedelta(minutes=quantity)
return when | [
"def",
"_date",
"(",
"val",
",",
"offset",
"=",
"None",
")",
":",
"if",
"val",
"is",
"None",
":",
"return",
"val",
"if",
"val",
"==",
"''",
"or",
"val",
"==",
"'now'",
":",
"when",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"elif",
"val",
"==",
"'today'",
":",
"dt",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"when",
"=",
"datetime",
".",
"datetime",
"(",
"dt",
".",
"year",
",",
"dt",
".",
"month",
",",
"dt",
".",
"day",
")",
"elif",
"val",
"==",
"'yesterday'",
":",
"dt",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"-",
"datetime",
".",
"timedelta",
"(",
"1",
")",
"when",
"=",
"datetime",
".",
"datetime",
"(",
"dt",
".",
"year",
",",
"dt",
".",
"month",
",",
"dt",
".",
"day",
")",
"else",
":",
"when",
"=",
"datetime",
".",
"datetime",
".",
"strptime",
"(",
"val",
",",
"\"%Y%m%d\"",
")",
"if",
"offset",
"is",
"not",
"None",
":",
"for",
"part",
"in",
"offset",
".",
"split",
"(",
"','",
")",
":",
"unit",
"=",
"part",
"[",
"-",
"1",
"]",
"quantity",
"=",
"int",
"(",
"part",
"[",
":",
"-",
"1",
"]",
")",
"# We can use timedelta for days and under, but not for years and months",
"if",
"unit",
"==",
"'y'",
":",
"when",
"=",
"datetime",
".",
"datetime",
"(",
"year",
"=",
"when",
".",
"year",
"+",
"quantity",
",",
"month",
"=",
"when",
".",
"month",
",",
"day",
"=",
"when",
".",
"day",
",",
"hour",
"=",
"when",
".",
"hour",
",",
"minute",
"=",
"when",
".",
"minute",
")",
"elif",
"unit",
"==",
"'m'",
":",
"new_year",
"=",
"when",
".",
"year",
"new_month",
"=",
"when",
".",
"month",
"+",
"quantity",
"if",
"new_month",
"<",
"1",
":",
"new_month",
"=",
"-",
"new_month",
"new_year",
"+=",
"1",
"+",
"(",
"new_month",
"//",
"12",
")",
"new_month",
"=",
"12",
"-",
"new_month",
"%",
"12",
"elif",
"new_month",
">",
"12",
":",
"new_year",
"+=",
"(",
"new_month",
"-",
"1",
")",
"//",
"12",
"new_month",
"=",
"1",
"+",
"(",
"new_month",
"-",
"1",
")",
"%",
"12",
"when",
"=",
"datetime",
".",
"datetime",
"(",
"year",
"=",
"new_year",
",",
"month",
"=",
"new_month",
",",
"day",
"=",
"when",
".",
"day",
",",
"hour",
"=",
"when",
".",
"hour",
",",
"minute",
"=",
"when",
".",
"minute",
")",
"elif",
"unit",
"==",
"'d'",
":",
"when",
"+=",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"quantity",
")",
"elif",
"unit",
"==",
"'h'",
":",
"when",
"+=",
"datetime",
".",
"timedelta",
"(",
"hours",
"=",
"quantity",
")",
"elif",
"unit",
"==",
"'M'",
":",
"when",
"+=",
"datetime",
".",
"timedelta",
"(",
"minutes",
"=",
"quantity",
")",
"return",
"when"
] | 40.576271 | 21.762712 |
def bech32_decode(bech):
"""Validate a Bech32 string, and determine HRP and data."""
if ((any(ord(x) < 33 or ord(x) > 126 for x in bech)) or
(bech.lower() != bech and bech.upper() != bech)):
return None, None
bech = bech.lower()
pos = bech.rfind('1')
if pos < 1 or pos + 7 > len(bech) or len(bech) > 90:
return None, None
if not all(x in CHARSET for x in bech[pos+1:]):
return None, None
hrp = bech[:pos]
data = [CHARSET.find(x) for x in bech[pos+1:]]
if not bech32_verify_checksum(hrp, data):
return None, None
return hrp, data[:-6] | [
"def",
"bech32_decode",
"(",
"bech",
")",
":",
"if",
"(",
"(",
"any",
"(",
"ord",
"(",
"x",
")",
"<",
"33",
"or",
"ord",
"(",
"x",
")",
">",
"126",
"for",
"x",
"in",
"bech",
")",
")",
"or",
"(",
"bech",
".",
"lower",
"(",
")",
"!=",
"bech",
"and",
"bech",
".",
"upper",
"(",
")",
"!=",
"bech",
")",
")",
":",
"return",
"None",
",",
"None",
"bech",
"=",
"bech",
".",
"lower",
"(",
")",
"pos",
"=",
"bech",
".",
"rfind",
"(",
"'1'",
")",
"if",
"pos",
"<",
"1",
"or",
"pos",
"+",
"7",
">",
"len",
"(",
"bech",
")",
"or",
"len",
"(",
"bech",
")",
">",
"90",
":",
"return",
"None",
",",
"None",
"if",
"not",
"all",
"(",
"x",
"in",
"CHARSET",
"for",
"x",
"in",
"bech",
"[",
"pos",
"+",
"1",
":",
"]",
")",
":",
"return",
"None",
",",
"None",
"hrp",
"=",
"bech",
"[",
":",
"pos",
"]",
"data",
"=",
"[",
"CHARSET",
".",
"find",
"(",
"x",
")",
"for",
"x",
"in",
"bech",
"[",
"pos",
"+",
"1",
":",
"]",
"]",
"if",
"not",
"bech32_verify_checksum",
"(",
"hrp",
",",
"data",
")",
":",
"return",
"None",
",",
"None",
"return",
"hrp",
",",
"data",
"[",
":",
"-",
"6",
"]"
] | 37.625 | 14.0625 |
def setTabURLs(tabs, webTranslator):
"""
Sets the C{linkURL} attribute on each L{Tab} instance
in C{tabs} that does not already have it set
@param tabs: sequence of L{Tab} instances
@param webTranslator: L{xmantissa.ixmantissa.IWebTranslator}
implementor
@return: None
"""
for tab in tabs:
if not tab.linkURL:
tab.linkURL = webTranslator.linkTo(tab.storeID)
setTabURLs(tab.children, webTranslator) | [
"def",
"setTabURLs",
"(",
"tabs",
",",
"webTranslator",
")",
":",
"for",
"tab",
"in",
"tabs",
":",
"if",
"not",
"tab",
".",
"linkURL",
":",
"tab",
".",
"linkURL",
"=",
"webTranslator",
".",
"linkTo",
"(",
"tab",
".",
"storeID",
")",
"setTabURLs",
"(",
"tab",
".",
"children",
",",
"webTranslator",
")"
] | 29.4375 | 16.4375 |
def configure(ctx, integration, args, show_args, editable):
"""Configure an integration with default parameters.
You can still provide one-off integration arguments to :func:`honeycomb.commands.service.run` if required.
"""
home = ctx.obj["HOME"]
integration_path = plugin_utils.get_plugin_path(home, defs.INTEGRATIONS, integration, editable)
logger.debug("running command %s (%s)", ctx.command.name, ctx.params,
extra={"command": ctx.command.name, "params": ctx.params})
logger.debug("loading {} ({})".format(integration, integration_path))
integration = register_integration(integration_path)
if show_args:
return plugin_utils.print_plugin_args(integration_path)
# get our integration class instance
integration_args = plugin_utils.parse_plugin_args(args, config_utils.get_config_parameters(integration_path))
args_file = os.path.join(integration_path, defs.ARGS_JSON)
with open(args_file, "w") as f:
data = json.dumps(integration_args)
logger.debug("writing %s to %s", data, args_file)
f.write(json.dumps(integration_args))
click.secho("[*] {0} has been configured, make sure to test it with `honeycomb integration test {0}`"
.format(integration.name)) | [
"def",
"configure",
"(",
"ctx",
",",
"integration",
",",
"args",
",",
"show_args",
",",
"editable",
")",
":",
"home",
"=",
"ctx",
".",
"obj",
"[",
"\"HOME\"",
"]",
"integration_path",
"=",
"plugin_utils",
".",
"get_plugin_path",
"(",
"home",
",",
"defs",
".",
"INTEGRATIONS",
",",
"integration",
",",
"editable",
")",
"logger",
".",
"debug",
"(",
"\"running command %s (%s)\"",
",",
"ctx",
".",
"command",
".",
"name",
",",
"ctx",
".",
"params",
",",
"extra",
"=",
"{",
"\"command\"",
":",
"ctx",
".",
"command",
".",
"name",
",",
"\"params\"",
":",
"ctx",
".",
"params",
"}",
")",
"logger",
".",
"debug",
"(",
"\"loading {} ({})\"",
".",
"format",
"(",
"integration",
",",
"integration_path",
")",
")",
"integration",
"=",
"register_integration",
"(",
"integration_path",
")",
"if",
"show_args",
":",
"return",
"plugin_utils",
".",
"print_plugin_args",
"(",
"integration_path",
")",
"# get our integration class instance",
"integration_args",
"=",
"plugin_utils",
".",
"parse_plugin_args",
"(",
"args",
",",
"config_utils",
".",
"get_config_parameters",
"(",
"integration_path",
")",
")",
"args_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"integration_path",
",",
"defs",
".",
"ARGS_JSON",
")",
"with",
"open",
"(",
"args_file",
",",
"\"w\"",
")",
"as",
"f",
":",
"data",
"=",
"json",
".",
"dumps",
"(",
"integration_args",
")",
"logger",
".",
"debug",
"(",
"\"writing %s to %s\"",
",",
"data",
",",
"args_file",
")",
"f",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"integration_args",
")",
")",
"click",
".",
"secho",
"(",
"\"[*] {0} has been configured, make sure to test it with `honeycomb integration test {0}`\"",
".",
"format",
"(",
"integration",
".",
"name",
")",
")"
] | 44.857143 | 28.464286 |
def update(self, labels, preds, masks=None):
# pylint: disable=arguments-differ
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data with class indices as values, one per sample.
preds : list of `NDArray`
Prediction values for samples. Each prediction value can either be the class index,
or a vector of likelihoods for all classes.
masks : list of `NDArray` or None, optional
Masks for samples, with the same shape as `labels`. value of its element must
be either 1 or 0. If None, all samples are considered valid.
"""
labels, preds = check_label_shapes(labels, preds, True)
masks = [None] * len(labels) if masks is None else masks
for label, pred_label, mask in zip(labels, preds, masks):
if pred_label.shape != label.shape:
# TODO(haibin) topk does not support fp16. Issue tracked at:
# https://github.com/apache/incubator-mxnet/issues/14125
# topk is used because argmax is slow:
# https://github.com/apache/incubator-mxnet/issues/11061
pred_label = ndarray.topk(pred_label.astype('float32', copy=False),
k=1, ret_typ='indices', axis=self.axis)
# flatten before checking shapes to avoid shape miss match
pred_label = pred_label.astype('int32', copy=False).reshape((-1,))
label = label.astype('int32', copy=False).reshape((-1,))
check_label_shapes(label, pred_label)
if mask is not None:
mask = mask.astype('int32', copy=False).reshape((-1,))
check_label_shapes(label, mask)
num_correct = ((pred_label == label) * mask).sum().asscalar()
num_inst = mask.sum().asscalar()
else:
num_correct = (pred_label == label).sum().asscalar()
num_inst = len(label)
self.sum_metric += num_correct
self.global_sum_metric += num_correct
self.num_inst += num_inst
self.global_num_inst += num_inst | [
"def",
"update",
"(",
"self",
",",
"labels",
",",
"preds",
",",
"masks",
"=",
"None",
")",
":",
"# pylint: disable=arguments-differ",
"labels",
",",
"preds",
"=",
"check_label_shapes",
"(",
"labels",
",",
"preds",
",",
"True",
")",
"masks",
"=",
"[",
"None",
"]",
"*",
"len",
"(",
"labels",
")",
"if",
"masks",
"is",
"None",
"else",
"masks",
"for",
"label",
",",
"pred_label",
",",
"mask",
"in",
"zip",
"(",
"labels",
",",
"preds",
",",
"masks",
")",
":",
"if",
"pred_label",
".",
"shape",
"!=",
"label",
".",
"shape",
":",
"# TODO(haibin) topk does not support fp16. Issue tracked at:",
"# https://github.com/apache/incubator-mxnet/issues/14125",
"# topk is used because argmax is slow:",
"# https://github.com/apache/incubator-mxnet/issues/11061",
"pred_label",
"=",
"ndarray",
".",
"topk",
"(",
"pred_label",
".",
"astype",
"(",
"'float32'",
",",
"copy",
"=",
"False",
")",
",",
"k",
"=",
"1",
",",
"ret_typ",
"=",
"'indices'",
",",
"axis",
"=",
"self",
".",
"axis",
")",
"# flatten before checking shapes to avoid shape miss match",
"pred_label",
"=",
"pred_label",
".",
"astype",
"(",
"'int32'",
",",
"copy",
"=",
"False",
")",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
")",
")",
"label",
"=",
"label",
".",
"astype",
"(",
"'int32'",
",",
"copy",
"=",
"False",
")",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
")",
")",
"check_label_shapes",
"(",
"label",
",",
"pred_label",
")",
"if",
"mask",
"is",
"not",
"None",
":",
"mask",
"=",
"mask",
".",
"astype",
"(",
"'int32'",
",",
"copy",
"=",
"False",
")",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
")",
")",
"check_label_shapes",
"(",
"label",
",",
"mask",
")",
"num_correct",
"=",
"(",
"(",
"pred_label",
"==",
"label",
")",
"*",
"mask",
")",
".",
"sum",
"(",
")",
".",
"asscalar",
"(",
")",
"num_inst",
"=",
"mask",
".",
"sum",
"(",
")",
".",
"asscalar",
"(",
")",
"else",
":",
"num_correct",
"=",
"(",
"pred_label",
"==",
"label",
")",
".",
"sum",
"(",
")",
".",
"asscalar",
"(",
")",
"num_inst",
"=",
"len",
"(",
"label",
")",
"self",
".",
"sum_metric",
"+=",
"num_correct",
"self",
".",
"global_sum_metric",
"+=",
"num_correct",
"self",
".",
"num_inst",
"+=",
"num_inst",
"self",
".",
"global_num_inst",
"+=",
"num_inst"
] | 50.045455 | 22.022727 |
def ScanForFileSystem(self, source_path_spec):
"""Scans the path specification for a supported file system format.
Args:
source_path_spec (PathSpec): source path specification.
Returns:
PathSpec: file system path specification or None if no supported file
system type was found.
Raises:
BackEndError: if the source cannot be scanned or more than one file
system type is found.
"""
if source_path_spec.type_indicator == (
definitions.TYPE_INDICATOR_APFS_CONTAINER):
# TODO: consider changes this when upstream changes have been made.
# Currently pyfsapfs does not support reading from a volume as a device.
# Also see: https://github.com/log2timeline/dfvfs/issues/332
return path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_APFS, location='/',
parent=source_path_spec)
try:
type_indicators = analyzer.Analyzer.GetFileSystemTypeIndicators(
source_path_spec, resolver_context=self._resolver_context)
except RuntimeError as exception:
raise errors.BackEndError((
'Unable to process source path specification with error: '
'{0!s}').format(exception))
if not type_indicators:
return None
type_indicator = type_indicators[0]
if len(type_indicators) > 1:
if definitions.PREFERRED_NTFS_BACK_END not in type_indicators:
raise errors.BackEndError(
'Unsupported source found more than one file system types.')
type_indicator = definitions.PREFERRED_NTFS_BACK_END
# TODO: determine root location from file system or path specification.
if type_indicator == definitions.TYPE_INDICATOR_NTFS:
root_location = '\\'
else:
root_location = '/'
file_system_path_spec = path_spec_factory.Factory.NewPathSpec(
type_indicator, location=root_location, parent=source_path_spec)
if type_indicator == definitions.TYPE_INDICATOR_TSK:
# Check if the file system can be opened since the file system by
# signature detection results in false positives.
try:
file_system = resolver.Resolver.OpenFileSystem(
file_system_path_spec, resolver_context=self._resolver_context)
file_system.Close()
except errors.BackEndError:
file_system_path_spec = None
return file_system_path_spec | [
"def",
"ScanForFileSystem",
"(",
"self",
",",
"source_path_spec",
")",
":",
"if",
"source_path_spec",
".",
"type_indicator",
"==",
"(",
"definitions",
".",
"TYPE_INDICATOR_APFS_CONTAINER",
")",
":",
"# TODO: consider changes this when upstream changes have been made.",
"# Currently pyfsapfs does not support reading from a volume as a device.",
"# Also see: https://github.com/log2timeline/dfvfs/issues/332",
"return",
"path_spec_factory",
".",
"Factory",
".",
"NewPathSpec",
"(",
"definitions",
".",
"TYPE_INDICATOR_APFS",
",",
"location",
"=",
"'/'",
",",
"parent",
"=",
"source_path_spec",
")",
"try",
":",
"type_indicators",
"=",
"analyzer",
".",
"Analyzer",
".",
"GetFileSystemTypeIndicators",
"(",
"source_path_spec",
",",
"resolver_context",
"=",
"self",
".",
"_resolver_context",
")",
"except",
"RuntimeError",
"as",
"exception",
":",
"raise",
"errors",
".",
"BackEndError",
"(",
"(",
"'Unable to process source path specification with error: '",
"'{0!s}'",
")",
".",
"format",
"(",
"exception",
")",
")",
"if",
"not",
"type_indicators",
":",
"return",
"None",
"type_indicator",
"=",
"type_indicators",
"[",
"0",
"]",
"if",
"len",
"(",
"type_indicators",
")",
">",
"1",
":",
"if",
"definitions",
".",
"PREFERRED_NTFS_BACK_END",
"not",
"in",
"type_indicators",
":",
"raise",
"errors",
".",
"BackEndError",
"(",
"'Unsupported source found more than one file system types.'",
")",
"type_indicator",
"=",
"definitions",
".",
"PREFERRED_NTFS_BACK_END",
"# TODO: determine root location from file system or path specification.",
"if",
"type_indicator",
"==",
"definitions",
".",
"TYPE_INDICATOR_NTFS",
":",
"root_location",
"=",
"'\\\\'",
"else",
":",
"root_location",
"=",
"'/'",
"file_system_path_spec",
"=",
"path_spec_factory",
".",
"Factory",
".",
"NewPathSpec",
"(",
"type_indicator",
",",
"location",
"=",
"root_location",
",",
"parent",
"=",
"source_path_spec",
")",
"if",
"type_indicator",
"==",
"definitions",
".",
"TYPE_INDICATOR_TSK",
":",
"# Check if the file system can be opened since the file system by",
"# signature detection results in false positives.",
"try",
":",
"file_system",
"=",
"resolver",
".",
"Resolver",
".",
"OpenFileSystem",
"(",
"file_system_path_spec",
",",
"resolver_context",
"=",
"self",
".",
"_resolver_context",
")",
"file_system",
".",
"Close",
"(",
")",
"except",
"errors",
".",
"BackEndError",
":",
"file_system_path_spec",
"=",
"None",
"return",
"file_system_path_spec"
] | 37.548387 | 22.387097 |
def haversine_distance(origin, destination):
"""
Calculate the Haversine distance.
Parameters
----------
origin : tuple of float
(lat, long)
destination : tuple of float
(lat, long)
Returns
-------
distance_in_km : float
Examples
--------
>>> munich = (48.1372, 11.5756)
>>> berlin = (52.5186, 13.4083)
>>> round(haversine_distance(munich, berlin), 1)
504.2
>>> new_york_city = (40.712777777778, -74.005833333333) # NYC
>>> round(haversine_distance(berlin, new_york_city), 1)
6385.3
"""
lat1, lon1 = origin
lat2, lon2 = destination
if not (-90.0 <= lat1 <= 90):
raise ValueError('lat1={:2.2f}, but must be in [-90,+90]'.format(lat1))
if not (-90.0 <= lat2 <= 90):
raise ValueError('lat2={:2.2f}, but must be in [-90,+90]'.format(lat2))
if not (-180.0 <= lon1 <= 180):
raise ValueError('lon1={:2.2f}, but must be in [-180,+180]'
.format(lat1))
if not (-180.0 <= lon2 <= 180):
raise ValueError('lon1={:2.2f}, but must be in [-180,+180]'
.format(lat1))
radius = 6371 # km
dlat = math_stl.radians(lat2 - lat1)
dlon = math_stl.radians(lon2 - lon1)
a = (math_stl.sin(dlat / 2) * math_stl.sin(dlat / 2) +
math_stl.cos(math_stl.radians(lat1)) *
math_stl.cos(math_stl.radians(lat2)) *
math_stl.sin(dlon / 2) * math_stl.sin(dlon / 2))
c = 2 * math_stl.atan2(math_stl.sqrt(a), math_stl.sqrt(1 - a))
d = radius * c
return d | [
"def",
"haversine_distance",
"(",
"origin",
",",
"destination",
")",
":",
"lat1",
",",
"lon1",
"=",
"origin",
"lat2",
",",
"lon2",
"=",
"destination",
"if",
"not",
"(",
"-",
"90.0",
"<=",
"lat1",
"<=",
"90",
")",
":",
"raise",
"ValueError",
"(",
"'lat1={:2.2f}, but must be in [-90,+90]'",
".",
"format",
"(",
"lat1",
")",
")",
"if",
"not",
"(",
"-",
"90.0",
"<=",
"lat2",
"<=",
"90",
")",
":",
"raise",
"ValueError",
"(",
"'lat2={:2.2f}, but must be in [-90,+90]'",
".",
"format",
"(",
"lat2",
")",
")",
"if",
"not",
"(",
"-",
"180.0",
"<=",
"lon1",
"<=",
"180",
")",
":",
"raise",
"ValueError",
"(",
"'lon1={:2.2f}, but must be in [-180,+180]'",
".",
"format",
"(",
"lat1",
")",
")",
"if",
"not",
"(",
"-",
"180.0",
"<=",
"lon2",
"<=",
"180",
")",
":",
"raise",
"ValueError",
"(",
"'lon1={:2.2f}, but must be in [-180,+180]'",
".",
"format",
"(",
"lat1",
")",
")",
"radius",
"=",
"6371",
"# km",
"dlat",
"=",
"math_stl",
".",
"radians",
"(",
"lat2",
"-",
"lat1",
")",
"dlon",
"=",
"math_stl",
".",
"radians",
"(",
"lon2",
"-",
"lon1",
")",
"a",
"=",
"(",
"math_stl",
".",
"sin",
"(",
"dlat",
"/",
"2",
")",
"*",
"math_stl",
".",
"sin",
"(",
"dlat",
"/",
"2",
")",
"+",
"math_stl",
".",
"cos",
"(",
"math_stl",
".",
"radians",
"(",
"lat1",
")",
")",
"*",
"math_stl",
".",
"cos",
"(",
"math_stl",
".",
"radians",
"(",
"lat2",
")",
")",
"*",
"math_stl",
".",
"sin",
"(",
"dlon",
"/",
"2",
")",
"*",
"math_stl",
".",
"sin",
"(",
"dlon",
"/",
"2",
")",
")",
"c",
"=",
"2",
"*",
"math_stl",
".",
"atan2",
"(",
"math_stl",
".",
"sqrt",
"(",
"a",
")",
",",
"math_stl",
".",
"sqrt",
"(",
"1",
"-",
"a",
")",
")",
"d",
"=",
"radius",
"*",
"c",
"return",
"d"
] | 30.46 | 18.94 |
def date_created(self):
"""Date the Scopus record was created."""
date_created = self.xml.find('institution-profile/date-created')
if date_created is not None:
date_created = (int(date_created.attrib['year']),
int(date_created.attrib['month']),
int(date_created.attrib['day']))
else:
date_created = (None, None, None)
return date_created | [
"def",
"date_created",
"(",
"self",
")",
":",
"date_created",
"=",
"self",
".",
"xml",
".",
"find",
"(",
"'institution-profile/date-created'",
")",
"if",
"date_created",
"is",
"not",
"None",
":",
"date_created",
"=",
"(",
"int",
"(",
"date_created",
".",
"attrib",
"[",
"'year'",
"]",
")",
",",
"int",
"(",
"date_created",
".",
"attrib",
"[",
"'month'",
"]",
")",
",",
"int",
"(",
"date_created",
".",
"attrib",
"[",
"'day'",
"]",
")",
")",
"else",
":",
"date_created",
"=",
"(",
"None",
",",
"None",
",",
"None",
")",
"return",
"date_created"
] | 44.8 | 16.1 |
def verts_str(verts, pad=1):
r""" makes a string from a list of integer verticies """
if verts is None:
return 'None'
fmtstr = ', '.join(['%' + six.text_type(pad) + 'd' +
', %' + six.text_type(pad) + 'd'] * 1)
return ', '.join(['(' + fmtstr % vert + ')' for vert in verts]) | [
"def",
"verts_str",
"(",
"verts",
",",
"pad",
"=",
"1",
")",
":",
"if",
"verts",
"is",
"None",
":",
"return",
"'None'",
"fmtstr",
"=",
"', '",
".",
"join",
"(",
"[",
"'%'",
"+",
"six",
".",
"text_type",
"(",
"pad",
")",
"+",
"'d'",
"+",
"', %'",
"+",
"six",
".",
"text_type",
"(",
"pad",
")",
"+",
"'d'",
"]",
"*",
"1",
")",
"return",
"', '",
".",
"join",
"(",
"[",
"'('",
"+",
"fmtstr",
"%",
"vert",
"+",
"')'",
"for",
"vert",
"in",
"verts",
"]",
")"
] | 45 | 16.428571 |
def data_integrity(components, components_data):
"""
Check grid data for integrity
Parameters
----------
components: dict
Grid components
components_data: dict
Grid component data (such as p,q and v set points)
Returns
-------
"""
data_check = {}
for comp in ['Bus', 'Load']: # list(components_data.keys()):
data_check[comp] = {}
data_check[comp]['length_diff'] = len(components[comp]) - len(
components_data[comp])
# print short report to user and exit program if not integer
for comp in list(data_check.keys()):
if data_check[comp]['length_diff'] != 0:
logger.exception("{comp} data is invalid. You supplied {no_comp} {comp} "
"objects and {no_data} datasets. Check you grid data "
"and try again".format(comp=comp,
no_comp=len(components[comp]),
no_data=len(components_data[comp])))
sys.exit(1) | [
"def",
"data_integrity",
"(",
"components",
",",
"components_data",
")",
":",
"data_check",
"=",
"{",
"}",
"for",
"comp",
"in",
"[",
"'Bus'",
",",
"'Load'",
"]",
":",
"# list(components_data.keys()):",
"data_check",
"[",
"comp",
"]",
"=",
"{",
"}",
"data_check",
"[",
"comp",
"]",
"[",
"'length_diff'",
"]",
"=",
"len",
"(",
"components",
"[",
"comp",
"]",
")",
"-",
"len",
"(",
"components_data",
"[",
"comp",
"]",
")",
"# print short report to user and exit program if not integer",
"for",
"comp",
"in",
"list",
"(",
"data_check",
".",
"keys",
"(",
")",
")",
":",
"if",
"data_check",
"[",
"comp",
"]",
"[",
"'length_diff'",
"]",
"!=",
"0",
":",
"logger",
".",
"exception",
"(",
"\"{comp} data is invalid. You supplied {no_comp} {comp} \"",
"\"objects and {no_data} datasets. Check you grid data \"",
"\"and try again\"",
".",
"format",
"(",
"comp",
"=",
"comp",
",",
"no_comp",
"=",
"len",
"(",
"components",
"[",
"comp",
"]",
")",
",",
"no_data",
"=",
"len",
"(",
"components_data",
"[",
"comp",
"]",
")",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] | 32.870968 | 22.354839 |
def branches(config, **kwargs):
"""
Show current branch points.
"""
with alembic_lock(
config.registry["sqlalchemy.engine"], config.alembic_config()
) as alembic_config:
alembic.command.branches(alembic_config, **kwargs) | [
"def",
"branches",
"(",
"config",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"alembic_lock",
"(",
"config",
".",
"registry",
"[",
"\"sqlalchemy.engine\"",
"]",
",",
"config",
".",
"alembic_config",
"(",
")",
")",
"as",
"alembic_config",
":",
"alembic",
".",
"command",
".",
"branches",
"(",
"alembic_config",
",",
"*",
"*",
"kwargs",
")"
] | 31.125 | 12.375 |
def get_largest_schedule_within_budget(self, budget, proportion_discard):
"""
Gets the largest hyperband schedule within target_budget.
This is required since the original hyperband algorithm uses R,
the maximum number of resources per configuration.
TODO(maxlam): Possibly binary search it if this becomes a bottleneck.
Args:
budget: total budget of the schedule.
proportion_discard: hyperband parameter that specifies
the proportion of configurations to discard per iteration.
"""
# Exhaustively generate schedules and check if
# they're within budget, adding to a list.
valid_schedules_and_costs = []
for R in range(1, budget):
schedule = self.generate_hyperband_schedule(R, proportion_discard)
cost = self.compute_schedule_cost(schedule)
if cost <= budget:
valid_schedules_and_costs.append((schedule, cost))
# Choose a valid schedule that maximizes usage of the budget.
valid_schedules_and_costs.sort(key=lambda x: x[1], reverse=True)
return valid_schedules_and_costs[0][0] | [
"def",
"get_largest_schedule_within_budget",
"(",
"self",
",",
"budget",
",",
"proportion_discard",
")",
":",
"# Exhaustively generate schedules and check if",
"# they're within budget, adding to a list.",
"valid_schedules_and_costs",
"=",
"[",
"]",
"for",
"R",
"in",
"range",
"(",
"1",
",",
"budget",
")",
":",
"schedule",
"=",
"self",
".",
"generate_hyperband_schedule",
"(",
"R",
",",
"proportion_discard",
")",
"cost",
"=",
"self",
".",
"compute_schedule_cost",
"(",
"schedule",
")",
"if",
"cost",
"<=",
"budget",
":",
"valid_schedules_and_costs",
".",
"append",
"(",
"(",
"schedule",
",",
"cost",
")",
")",
"# Choose a valid schedule that maximizes usage of the budget.",
"valid_schedules_and_costs",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
",",
"reverse",
"=",
"True",
")",
"return",
"valid_schedules_and_costs",
"[",
"0",
"]",
"[",
"0",
"]"
] | 46.4 | 21.92 |
def subalignment(alnfle, subtype, alntype="fasta"):
"""
Subset synonymous or fourfold degenerate sites from an alignment
input should be a codon alignment
"""
aln = AlignIO.read(alnfle, alntype)
alnlen = aln.get_alignment_length()
nseq = len(aln)
subaln = None
subalnfile = alnfle.rsplit(".", 1)[0] + "_{0}.{1}".format(subtype, alntype)
if subtype == "synonymous":
for j in range( 0, alnlen, 3 ):
aa = None
for i in range(nseq):
codon = str(aln[i, j: j + 3].seq)
if codon not in CODON_TRANSLATION:
break
if aa and CODON_TRANSLATION[codon] != aa:
break
else:
aa = CODON_TRANSLATION[codon]
else:
if subaln is None:
subaln = aln[:, j: j + 3]
else:
subaln += aln[:, j: j + 3]
if subtype == "fourfold":
for j in range( 0, alnlen, 3 ):
for i in range(nseq):
codon = str(aln[i, j: j + 3].seq)
if codon not in FOURFOLD:
break
else:
if subaln is None:
subaln = aln[:, j: j + 3]
else:
subaln += aln[:, j: j + 3]
if subaln:
AlignIO.write(subaln, subalnfile, alntype)
return subalnfile
else:
print("No sites {0} selected.".format(subtype), file=sys.stderr)
return None | [
"def",
"subalignment",
"(",
"alnfle",
",",
"subtype",
",",
"alntype",
"=",
"\"fasta\"",
")",
":",
"aln",
"=",
"AlignIO",
".",
"read",
"(",
"alnfle",
",",
"alntype",
")",
"alnlen",
"=",
"aln",
".",
"get_alignment_length",
"(",
")",
"nseq",
"=",
"len",
"(",
"aln",
")",
"subaln",
"=",
"None",
"subalnfile",
"=",
"alnfle",
".",
"rsplit",
"(",
"\".\"",
",",
"1",
")",
"[",
"0",
"]",
"+",
"\"_{0}.{1}\"",
".",
"format",
"(",
"subtype",
",",
"alntype",
")",
"if",
"subtype",
"==",
"\"synonymous\"",
":",
"for",
"j",
"in",
"range",
"(",
"0",
",",
"alnlen",
",",
"3",
")",
":",
"aa",
"=",
"None",
"for",
"i",
"in",
"range",
"(",
"nseq",
")",
":",
"codon",
"=",
"str",
"(",
"aln",
"[",
"i",
",",
"j",
":",
"j",
"+",
"3",
"]",
".",
"seq",
")",
"if",
"codon",
"not",
"in",
"CODON_TRANSLATION",
":",
"break",
"if",
"aa",
"and",
"CODON_TRANSLATION",
"[",
"codon",
"]",
"!=",
"aa",
":",
"break",
"else",
":",
"aa",
"=",
"CODON_TRANSLATION",
"[",
"codon",
"]",
"else",
":",
"if",
"subaln",
"is",
"None",
":",
"subaln",
"=",
"aln",
"[",
":",
",",
"j",
":",
"j",
"+",
"3",
"]",
"else",
":",
"subaln",
"+=",
"aln",
"[",
":",
",",
"j",
":",
"j",
"+",
"3",
"]",
"if",
"subtype",
"==",
"\"fourfold\"",
":",
"for",
"j",
"in",
"range",
"(",
"0",
",",
"alnlen",
",",
"3",
")",
":",
"for",
"i",
"in",
"range",
"(",
"nseq",
")",
":",
"codon",
"=",
"str",
"(",
"aln",
"[",
"i",
",",
"j",
":",
"j",
"+",
"3",
"]",
".",
"seq",
")",
"if",
"codon",
"not",
"in",
"FOURFOLD",
":",
"break",
"else",
":",
"if",
"subaln",
"is",
"None",
":",
"subaln",
"=",
"aln",
"[",
":",
",",
"j",
":",
"j",
"+",
"3",
"]",
"else",
":",
"subaln",
"+=",
"aln",
"[",
":",
",",
"j",
":",
"j",
"+",
"3",
"]",
"if",
"subaln",
":",
"AlignIO",
".",
"write",
"(",
"subaln",
",",
"subalnfile",
",",
"alntype",
")",
"return",
"subalnfile",
"else",
":",
"print",
"(",
"\"No sites {0} selected.\"",
".",
"format",
"(",
"subtype",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"return",
"None"
] | 31.787234 | 15.191489 |
def report_messages_stats(sect, stats, _):
"""make messages type report"""
if not stats["by_msg"]:
# don't print this report when we didn't detected any errors
raise exceptions.EmptyReportError()
in_order = sorted(
[
(value, msg_id)
for msg_id, value in stats["by_msg"].items()
if not msg_id.startswith("I")
]
)
in_order.reverse()
lines = ("message id", "occurrences")
for value, msg_id in in_order:
lines += (msg_id, str(value))
sect.append(report_nodes.Table(children=lines, cols=2, rheaders=1)) | [
"def",
"report_messages_stats",
"(",
"sect",
",",
"stats",
",",
"_",
")",
":",
"if",
"not",
"stats",
"[",
"\"by_msg\"",
"]",
":",
"# don't print this report when we didn't detected any errors",
"raise",
"exceptions",
".",
"EmptyReportError",
"(",
")",
"in_order",
"=",
"sorted",
"(",
"[",
"(",
"value",
",",
"msg_id",
")",
"for",
"msg_id",
",",
"value",
"in",
"stats",
"[",
"\"by_msg\"",
"]",
".",
"items",
"(",
")",
"if",
"not",
"msg_id",
".",
"startswith",
"(",
"\"I\"",
")",
"]",
")",
"in_order",
".",
"reverse",
"(",
")",
"lines",
"=",
"(",
"\"message id\"",
",",
"\"occurrences\"",
")",
"for",
"value",
",",
"msg_id",
"in",
"in_order",
":",
"lines",
"+=",
"(",
"msg_id",
",",
"str",
"(",
"value",
")",
")",
"sect",
".",
"append",
"(",
"report_nodes",
".",
"Table",
"(",
"children",
"=",
"lines",
",",
"cols",
"=",
"2",
",",
"rheaders",
"=",
"1",
")",
")"
] | 34.647059 | 14.705882 |
def ConvertStringToFilename(name):
"""Converts an unicode string to a filesystem safe filename.
For maximum compatibility we escape all chars which are not alphanumeric (in
the unicode sense).
Args:
name: a unicode string that is part of a subject.
Returns:
A safe filename with escaped special chars.
"""
return re.sub(
r"\W", lambda x: "%%%02X" % ord(x.group(0)), name,
flags=re.UNICODE).rstrip("/") | [
"def",
"ConvertStringToFilename",
"(",
"name",
")",
":",
"return",
"re",
".",
"sub",
"(",
"r\"\\W\"",
",",
"lambda",
"x",
":",
"\"%%%02X\"",
"%",
"ord",
"(",
"x",
".",
"group",
"(",
"0",
")",
")",
",",
"name",
",",
"flags",
"=",
"re",
".",
"UNICODE",
")",
".",
"rstrip",
"(",
"\"/\"",
")"
] | 28.2 | 20.666667 |
def set_execution_state(self, execution_state):
""" set exectuion state """
if not execution_state:
self.execution_state = None
self.cluster = None
self.environ = None
else:
self.execution_state = execution_state
cluster, environ = self.get_execution_state_dc_environ(execution_state)
self.cluster = cluster
self.environ = environ
self.zone = cluster
self.trigger_watches() | [
"def",
"set_execution_state",
"(",
"self",
",",
"execution_state",
")",
":",
"if",
"not",
"execution_state",
":",
"self",
".",
"execution_state",
"=",
"None",
"self",
".",
"cluster",
"=",
"None",
"self",
".",
"environ",
"=",
"None",
"else",
":",
"self",
".",
"execution_state",
"=",
"execution_state",
"cluster",
",",
"environ",
"=",
"self",
".",
"get_execution_state_dc_environ",
"(",
"execution_state",
")",
"self",
".",
"cluster",
"=",
"cluster",
"self",
".",
"environ",
"=",
"environ",
"self",
".",
"zone",
"=",
"cluster",
"self",
".",
"trigger_watches",
"(",
")"
] | 32.692308 | 14 |
def store_dummy_router_net(self, net_id, subnet_id, rtr_id):
"""Storing the router attributes. """
self.dummy_net_id = net_id
self.dummy_subnet_id = subnet_id
self.dummy_router_id = rtr_id | [
"def",
"store_dummy_router_net",
"(",
"self",
",",
"net_id",
",",
"subnet_id",
",",
"rtr_id",
")",
":",
"self",
".",
"dummy_net_id",
"=",
"net_id",
"self",
".",
"dummy_subnet_id",
"=",
"subnet_id",
"self",
".",
"dummy_router_id",
"=",
"rtr_id"
] | 43.2 | 5.8 |
def datapoint(self, ind, field_names=None):
""" Loads a tensor datapoint for a given global index.
Parameters
----------
ind : int
global index in the tensor
field_names : :obj:`list` of str
field names to load
Returns
-------
:obj:`TensorDatapoint`
the desired tensor datapoint
"""
# flush if necessary
if self._has_unsaved_data:
self.flush()
# check valid input
if ind >= self._num_datapoints:
raise ValueError('Index %d larger than the number of datapoints in the dataset (%d)' %(ind, self._num_datapoints))
# load the field names
if field_names is None:
field_names = self.field_names
# return the datapoint
datapoint = TensorDatapoint(field_names)
file_num = self._index_to_file_num[ind]
for field_name in field_names:
tensor = self.tensor(field_name, file_num)
tensor_index = ind % self._datapoints_per_file
datapoint[field_name] = tensor.datapoint(tensor_index)
return datapoint | [
"def",
"datapoint",
"(",
"self",
",",
"ind",
",",
"field_names",
"=",
"None",
")",
":",
"# flush if necessary",
"if",
"self",
".",
"_has_unsaved_data",
":",
"self",
".",
"flush",
"(",
")",
"# check valid input",
"if",
"ind",
">=",
"self",
".",
"_num_datapoints",
":",
"raise",
"ValueError",
"(",
"'Index %d larger than the number of datapoints in the dataset (%d)'",
"%",
"(",
"ind",
",",
"self",
".",
"_num_datapoints",
")",
")",
"# load the field names",
"if",
"field_names",
"is",
"None",
":",
"field_names",
"=",
"self",
".",
"field_names",
"# return the datapoint",
"datapoint",
"=",
"TensorDatapoint",
"(",
"field_names",
")",
"file_num",
"=",
"self",
".",
"_index_to_file_num",
"[",
"ind",
"]",
"for",
"field_name",
"in",
"field_names",
":",
"tensor",
"=",
"self",
".",
"tensor",
"(",
"field_name",
",",
"file_num",
")",
"tensor_index",
"=",
"ind",
"%",
"self",
".",
"_datapoints_per_file",
"datapoint",
"[",
"field_name",
"]",
"=",
"tensor",
".",
"datapoint",
"(",
"tensor_index",
")",
"return",
"datapoint"
] | 32.342857 | 16.828571 |
def GetSyncMoConfig(ConfigDoc):
""" Internal support method for SyncManagedObject. """
moConfigMap = {}
configList = ConfigDoc.getElementsByTagName("mo")
for moConfigNode in configList:
classId = None
noun = None
version = None
actionVersion = None
action = None
ignoreReason = None
status = None
excludeList = None
if moConfigNode.hasAttribute("classid"):
classId = moConfigNode.getAttribute("classid")
if moConfigNode.hasAttribute("noun"):
noun = moConfigNode.getAttribute("noun")
if moConfigNode.hasAttribute("version"):
version = moConfigNode.getAttribute("version")
if moConfigNode.hasAttribute("actionVersion"):
actionVersion = moConfigNode.getAttribute("actionVersion")
if moConfigNode.hasAttribute("action"):
action = moConfigNode.getAttribute("action")
if moConfigNode.hasAttribute("ignoreReason"):
ignoreReason = moConfigNode.getAttribute("ignoreReason")
if moConfigNode.hasAttribute("status"):
status = moConfigNode.getAttribute("status")
if moConfigNode.hasAttribute("excludeList"):
excludeList = moConfigNode.getAttribute("excludeList")
# SyncMoConfig Object
moConfig = None
if classId:
moConfig = SyncMoConfig(classId, noun, version, actionVersion, action, ignoreReason, status,
excludeList)
if moConfig:
if classId in moConfigMap:
moConfigMap[classId] = moConfig
else:
moConfigList = []
moConfigList.append(moConfig)
moConfigMap[classId] = moConfigList
return moConfigMap | [
"def",
"GetSyncMoConfig",
"(",
"ConfigDoc",
")",
":",
"moConfigMap",
"=",
"{",
"}",
"configList",
"=",
"ConfigDoc",
".",
"getElementsByTagName",
"(",
"\"mo\"",
")",
"for",
"moConfigNode",
"in",
"configList",
":",
"classId",
"=",
"None",
"noun",
"=",
"None",
"version",
"=",
"None",
"actionVersion",
"=",
"None",
"action",
"=",
"None",
"ignoreReason",
"=",
"None",
"status",
"=",
"None",
"excludeList",
"=",
"None",
"if",
"moConfigNode",
".",
"hasAttribute",
"(",
"\"classid\"",
")",
":",
"classId",
"=",
"moConfigNode",
".",
"getAttribute",
"(",
"\"classid\"",
")",
"if",
"moConfigNode",
".",
"hasAttribute",
"(",
"\"noun\"",
")",
":",
"noun",
"=",
"moConfigNode",
".",
"getAttribute",
"(",
"\"noun\"",
")",
"if",
"moConfigNode",
".",
"hasAttribute",
"(",
"\"version\"",
")",
":",
"version",
"=",
"moConfigNode",
".",
"getAttribute",
"(",
"\"version\"",
")",
"if",
"moConfigNode",
".",
"hasAttribute",
"(",
"\"actionVersion\"",
")",
":",
"actionVersion",
"=",
"moConfigNode",
".",
"getAttribute",
"(",
"\"actionVersion\"",
")",
"if",
"moConfigNode",
".",
"hasAttribute",
"(",
"\"action\"",
")",
":",
"action",
"=",
"moConfigNode",
".",
"getAttribute",
"(",
"\"action\"",
")",
"if",
"moConfigNode",
".",
"hasAttribute",
"(",
"\"ignoreReason\"",
")",
":",
"ignoreReason",
"=",
"moConfigNode",
".",
"getAttribute",
"(",
"\"ignoreReason\"",
")",
"if",
"moConfigNode",
".",
"hasAttribute",
"(",
"\"status\"",
")",
":",
"status",
"=",
"moConfigNode",
".",
"getAttribute",
"(",
"\"status\"",
")",
"if",
"moConfigNode",
".",
"hasAttribute",
"(",
"\"excludeList\"",
")",
":",
"excludeList",
"=",
"moConfigNode",
".",
"getAttribute",
"(",
"\"excludeList\"",
")",
"# SyncMoConfig Object",
"moConfig",
"=",
"None",
"if",
"classId",
":",
"moConfig",
"=",
"SyncMoConfig",
"(",
"classId",
",",
"noun",
",",
"version",
",",
"actionVersion",
",",
"action",
",",
"ignoreReason",
",",
"status",
",",
"excludeList",
")",
"if",
"moConfig",
":",
"if",
"classId",
"in",
"moConfigMap",
":",
"moConfigMap",
"[",
"classId",
"]",
"=",
"moConfig",
"else",
":",
"moConfigList",
"=",
"[",
"]",
"moConfigList",
".",
"append",
"(",
"moConfig",
")",
"moConfigMap",
"[",
"classId",
"]",
"=",
"moConfigList",
"return",
"moConfigMap"
] | 27.072727 | 20.527273 |
def rels_xml_for(self, source_uri):
"""
Return rels item XML for source with *source_uri* or None if no rels
item is present.
"""
try:
rels_xml = self.blob_for(source_uri.rels_uri)
except KeyError:
rels_xml = None
return rels_xml | [
"def",
"rels_xml_for",
"(",
"self",
",",
"source_uri",
")",
":",
"try",
":",
"rels_xml",
"=",
"self",
".",
"blob_for",
"(",
"source_uri",
".",
"rels_uri",
")",
"except",
"KeyError",
":",
"rels_xml",
"=",
"None",
"return",
"rels_xml"
] | 30 | 14.8 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.