text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def clear():
"""Clears the current context figure of all marks axes and grid lines."""
fig = _context['figure']
if fig is not None:
fig.marks = []
fig.axes = []
setattr(fig, 'axis_registry', {})
_context['scales'] = {}
key = _context['current_key']
if key is not None:
_context['scale_registry'][key] = {} | [
"def",
"clear",
"(",
")",
":",
"fig",
"=",
"_context",
"[",
"'figure'",
"]",
"if",
"fig",
"is",
"not",
"None",
":",
"fig",
".",
"marks",
"=",
"[",
"]",
"fig",
".",
"axes",
"=",
"[",
"]",
"setattr",
"(",
"fig",
",",
"'axis_registry'",
",",
"{",
"}",
")",
"_context",
"[",
"'scales'",
"]",
"=",
"{",
"}",
"key",
"=",
"_context",
"[",
"'current_key'",
"]",
"if",
"key",
"is",
"not",
"None",
":",
"_context",
"[",
"'scale_registry'",
"]",
"[",
"key",
"]",
"=",
"{",
"}"
] | 33.363636 | 11.636364 |
def get_request(self, url):
"""Send a get request.
warning: old api.
:return: a dict or raise Exception.
"""
resp = self.session.get(url, timeout=self.timeout,
proxies=self.proxies)
result = resp.json()
if result['code'] != 200:
LOG.error('Return %s when try to get %s', result, url)
raise GetRequestIllegal(result)
else:
return result | [
"def",
"get_request",
"(",
"self",
",",
"url",
")",
":",
"resp",
"=",
"self",
".",
"session",
".",
"get",
"(",
"url",
",",
"timeout",
"=",
"self",
".",
"timeout",
",",
"proxies",
"=",
"self",
".",
"proxies",
")",
"result",
"=",
"resp",
".",
"json",
"(",
")",
"if",
"result",
"[",
"'code'",
"]",
"!=",
"200",
":",
"LOG",
".",
"error",
"(",
"'Return %s when try to get %s'",
",",
"result",
",",
"url",
")",
"raise",
"GetRequestIllegal",
"(",
"result",
")",
"else",
":",
"return",
"result"
] | 30.333333 | 15.466667 |
def get_fault(fault_id=None):
"""Retrieve a randomly-generated error message as a unicode string.
:param fault_id:
Allows you to optionally specify an integer representing the fault_id
from the database table. This allows you to retrieve a specific fault
each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(fault_id, int):
id_ = fault_id
elif isinstance(fault_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(fault_id)
else:
id_ = random.randint(1, counts['max_fau'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_fau']:
fault = __get_fault(counts, fault_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_fau'])))
id_ = counts['max_fau']
fault = __get_fault(counts, fault_id=id_)
if fault is not None:
while fault[0] == 'n':
if id_ is not None:
fault = __get_fault(counts, None)
else:
fault = __get_fault(counts, id_)
if fault[0] == 'y':
result = __process_sentence(fault, counts)
return result
else:
print('ValueError: _fault cannot be None.') | [
"def",
"get_fault",
"(",
"fault_id",
"=",
"None",
")",
":",
"counts",
"=",
"__get_table_limits",
"(",
")",
"result",
"=",
"None",
"id_",
"=",
"0",
"try",
":",
"if",
"isinstance",
"(",
"fault_id",
",",
"int",
")",
":",
"id_",
"=",
"fault_id",
"elif",
"isinstance",
"(",
"fault_id",
",",
"float",
")",
":",
"print",
"(",
"\"\"\"ValueError: Floating point number detected.\n Rounding number to 0 decimal places.\"\"\"",
")",
"id_",
"=",
"round",
"(",
"fault_id",
")",
"else",
":",
"id_",
"=",
"random",
".",
"randint",
"(",
"1",
",",
"counts",
"[",
"'max_fau'",
"]",
")",
"except",
"ValueError",
":",
"print",
"(",
"\"ValueError: Incorrect parameter type detected.\"",
")",
"if",
"id_",
"<=",
"counts",
"[",
"'max_fau'",
"]",
":",
"fault",
"=",
"__get_fault",
"(",
"counts",
",",
"fault_id",
"=",
"id_",
")",
"else",
":",
"print",
"(",
"\"\"\"ValueError: Parameter integer is too high.\n Maximum permitted value is {0}.\"\"\"",
".",
"format",
"(",
"str",
"(",
"counts",
"[",
"'max_fau'",
"]",
")",
")",
")",
"id_",
"=",
"counts",
"[",
"'max_fau'",
"]",
"fault",
"=",
"__get_fault",
"(",
"counts",
",",
"fault_id",
"=",
"id_",
")",
"if",
"fault",
"is",
"not",
"None",
":",
"while",
"fault",
"[",
"0",
"]",
"==",
"'n'",
":",
"if",
"id_",
"is",
"not",
"None",
":",
"fault",
"=",
"__get_fault",
"(",
"counts",
",",
"None",
")",
"else",
":",
"fault",
"=",
"__get_fault",
"(",
"counts",
",",
"id_",
")",
"if",
"fault",
"[",
"0",
"]",
"==",
"'y'",
":",
"result",
"=",
"__process_sentence",
"(",
"fault",
",",
"counts",
")",
"return",
"result",
"else",
":",
"print",
"(",
"'ValueError: _fault cannot be None.'",
")"
] | 33.222222 | 17.822222 |
def _parse_dict(element, definition):
"""Parse xml element by a definition given in dict format.
:param element: ElementTree element
:param definition: definition schema
:type definition: dict
:return: parsed xml
:rtype: dict
"""
sub_dict = {}
for name, subdef in viewitems(definition):
(name, required) = _parse_name(name)
sub_dict[name] = xml_to_json(element, subdef, required)
return sub_dict | [
"def",
"_parse_dict",
"(",
"element",
",",
"definition",
")",
":",
"sub_dict",
"=",
"{",
"}",
"for",
"name",
",",
"subdef",
"in",
"viewitems",
"(",
"definition",
")",
":",
"(",
"name",
",",
"required",
")",
"=",
"_parse_name",
"(",
"name",
")",
"sub_dict",
"[",
"name",
"]",
"=",
"xml_to_json",
"(",
"element",
",",
"subdef",
",",
"required",
")",
"return",
"sub_dict"
] | 25.823529 | 17.411765 |
def verify(self):
'''Check that the database accurately describes the state of the repository'''
c = self.database.cursor()
non_exist = set()
no_db_entry = set(os.listdir(self.cache_dir))
try:
no_db_entry.remove('file_database.db')
no_db_entry.remove('file_database.db-journal')
except:
pass
for row in c.execute("SELECT path FROM files"):
path = row[0]
repo_path = os.path.join(self.cache_dir, path)
if os.path.exists(repo_path):
no_db_entry.remove(path)
else:
non_exist.add(path)
if len(non_exist) > 0:
raise Exception(
"Found {} records in db for files that don't exist: {}" .format(
len(non_exist),
','.join(non_exist)))
if len(no_db_entry) > 0:
raise Exception("Found {} files that don't have db entries: {}"
.format(len(no_db_entry), ','.join(no_db_entry))) | [
"def",
"verify",
"(",
"self",
")",
":",
"c",
"=",
"self",
".",
"database",
".",
"cursor",
"(",
")",
"non_exist",
"=",
"set",
"(",
")",
"no_db_entry",
"=",
"set",
"(",
"os",
".",
"listdir",
"(",
"self",
".",
"cache_dir",
")",
")",
"try",
":",
"no_db_entry",
".",
"remove",
"(",
"'file_database.db'",
")",
"no_db_entry",
".",
"remove",
"(",
"'file_database.db-journal'",
")",
"except",
":",
"pass",
"for",
"row",
"in",
"c",
".",
"execute",
"(",
"\"SELECT path FROM files\"",
")",
":",
"path",
"=",
"row",
"[",
"0",
"]",
"repo_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"cache_dir",
",",
"path",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"repo_path",
")",
":",
"no_db_entry",
".",
"remove",
"(",
"path",
")",
"else",
":",
"non_exist",
".",
"add",
"(",
"path",
")",
"if",
"len",
"(",
"non_exist",
")",
">",
"0",
":",
"raise",
"Exception",
"(",
"\"Found {} records in db for files that don't exist: {}\"",
".",
"format",
"(",
"len",
"(",
"non_exist",
")",
",",
"','",
".",
"join",
"(",
"non_exist",
")",
")",
")",
"if",
"len",
"(",
"no_db_entry",
")",
">",
"0",
":",
"raise",
"Exception",
"(",
"\"Found {} files that don't have db entries: {}\"",
".",
"format",
"(",
"len",
"(",
"no_db_entry",
")",
",",
"','",
".",
"join",
"(",
"no_db_entry",
")",
")",
")"
] | 32.34375 | 22.28125 |
def sents(self, fileids=None) -> Generator[str, str, None]:
"""
:param fileids:
:return: A generator of sentences
"""
for para in self.paras(fileids):
sentences = self._sent_tokenizer.tokenize(para)
for sentence in sentences:
yield sentence | [
"def",
"sents",
"(",
"self",
",",
"fileids",
"=",
"None",
")",
"->",
"Generator",
"[",
"str",
",",
"str",
",",
"None",
"]",
":",
"for",
"para",
"in",
"self",
".",
"paras",
"(",
"fileids",
")",
":",
"sentences",
"=",
"self",
".",
"_sent_tokenizer",
".",
"tokenize",
"(",
"para",
")",
"for",
"sentence",
"in",
"sentences",
":",
"yield",
"sentence"
] | 34.666667 | 7.555556 |
def check_output(self, make_ndx_output, message=None, err=None):
"""Simple tests to flag problems with a ``make_ndx`` run."""
if message is None:
message = ""
else:
message = '\n' + message
def format(output, w=60):
hrule = "====[ GromacsError (diagnostic output) ]".ljust(w,"=")
return hrule + '\n' + str(output) + hrule
rc = True
if self._is_empty_group(make_ndx_output):
warnings.warn("Selection produced empty group.{message!s}".format(**vars()), category=GromacsValueWarning)
rc = False
if self._has_syntax_error(make_ndx_output):
rc = False
out_formatted = format(make_ndx_output)
raise GromacsError("make_ndx encountered a Syntax Error, "
"%(message)s\noutput:\n%(out_formatted)s" % vars())
if make_ndx_output.strip() == "":
rc = False
out_formatted = format(err)
raise GromacsError("make_ndx produced no output, "
"%(message)s\nerror output:\n%(out_formatted)s" % vars())
return rc | [
"def",
"check_output",
"(",
"self",
",",
"make_ndx_output",
",",
"message",
"=",
"None",
",",
"err",
"=",
"None",
")",
":",
"if",
"message",
"is",
"None",
":",
"message",
"=",
"\"\"",
"else",
":",
"message",
"=",
"'\\n'",
"+",
"message",
"def",
"format",
"(",
"output",
",",
"w",
"=",
"60",
")",
":",
"hrule",
"=",
"\"====[ GromacsError (diagnostic output) ]\"",
".",
"ljust",
"(",
"w",
",",
"\"=\"",
")",
"return",
"hrule",
"+",
"'\\n'",
"+",
"str",
"(",
"output",
")",
"+",
"hrule",
"rc",
"=",
"True",
"if",
"self",
".",
"_is_empty_group",
"(",
"make_ndx_output",
")",
":",
"warnings",
".",
"warn",
"(",
"\"Selection produced empty group.{message!s}\"",
".",
"format",
"(",
"*",
"*",
"vars",
"(",
")",
")",
",",
"category",
"=",
"GromacsValueWarning",
")",
"rc",
"=",
"False",
"if",
"self",
".",
"_has_syntax_error",
"(",
"make_ndx_output",
")",
":",
"rc",
"=",
"False",
"out_formatted",
"=",
"format",
"(",
"make_ndx_output",
")",
"raise",
"GromacsError",
"(",
"\"make_ndx encountered a Syntax Error, \"",
"\"%(message)s\\noutput:\\n%(out_formatted)s\"",
"%",
"vars",
"(",
")",
")",
"if",
"make_ndx_output",
".",
"strip",
"(",
")",
"==",
"\"\"",
":",
"rc",
"=",
"False",
"out_formatted",
"=",
"format",
"(",
"err",
")",
"raise",
"GromacsError",
"(",
"\"make_ndx produced no output, \"",
"\"%(message)s\\nerror output:\\n%(out_formatted)s\"",
"%",
"vars",
"(",
")",
")",
"return",
"rc"
] | 45.76 | 21.28 |
def create_widget(self, dim, holomap=None, editable=False):
""""
Given a Dimension creates bokeh widgets to select along that
dimension. For numeric data a slider widget is created which
may be either discrete, if a holomap is supplied or the
Dimension.values are set, or a continuous widget for
DynamicMaps. If the slider is discrete the returned mapping
defines a mapping between values and labels making it possible
sync the two slider and label widgets. For non-numeric data
a simple dropdown selection widget is generated.
"""
label, mapping = None, None
if holomap is None:
if dim.values:
if dim.default is None:
default = dim.values[0]
elif dim.default not in dim.values:
raise ValueError("%s dimension default %r is not in dimension values: %s"
% (dim, dim.default, dim.values))
else:
default = dim.default
value = dim.values.index(default)
if all(isnumeric(v) for v in dim.values):
values = sorted(dim.values)
labels = [unicode(dim.pprint_value(v)) for v in values]
if editable:
label = AutocompleteInput(value=labels[value], completions=labels,
title=dim.pprint_label)
else:
label = Div(text='<b>%s</b>' % dim.pprint_value_string(labels[value]))
widget = Slider(value=value, start=0, end=len(dim.values)-1, title=None, step=1)
mapping = list(enumerate(zip(values, labels)))
else:
values = [(v, dim.pprint_value(v)) for v in dim.values]
widget = Select(title=dim.pprint_label, value=values[value][0],
options=values)
else:
start = dim.soft_range[0] if dim.soft_range[0] else dim.range[0]
end = dim.soft_range[1] if dim.soft_range[1] else dim.range[1]
dim_range = end - start
int_type = isinstance(dim.type, type) and issubclass(dim.type, int)
if dim.step is not None:
step = dim.step
elif isinstance(dim_range, int) or int_type:
step = 1
else:
step = 10**((round(math.log10(dim_range))-3))
if dim.default is None:
default = start
elif (dim.default < start or dim.default > end):
raise ValueError("%s dimension default %r is not in the provided range: %s"
% (dim, dim.default, (start, end)))
else:
default = dim.default
if editable:
label = TextInput(value=str(default), title=dim.pprint_label)
else:
label = Div(text='<b>%s</b>' % dim.pprint_value_string(default))
widget = Slider(value=default, start=start,
end=end, step=step, title=None)
else:
values = (dim.values if dim.values else
list(unique_array(holomap.dimension_values(dim.name))))
if dim.default is None:
default = values[0]
elif dim.default not in values:
raise ValueError("%s dimension default %r is not in dimension values: %s"
% (dim, dim.default, values))
else:
default = dim.default
if isinstance(values[0], np.datetime64) or isnumeric(values[0]):
values = sorted(values)
labels = [dim.pprint_value(v) for v in values]
value = values.index(default)
if editable:
label = AutocompleteInput(value=labels[value], completions=labels,
title=dim.pprint_label)
else:
label = Div(text='<b>%s</b>' % (dim.pprint_value_string(labels[value])))
widget = Slider(value=value, start=0, end=len(values)-1, title=None, step=1)
else:
labels = [dim.pprint_value(v) for v in values]
widget = Select(title=dim.pprint_label, value=default,
options=list(zip(values, labels)))
mapping = list(enumerate(zip(values, labels)))
return widget, label, mapping | [
"def",
"create_widget",
"(",
"self",
",",
"dim",
",",
"holomap",
"=",
"None",
",",
"editable",
"=",
"False",
")",
":",
"label",
",",
"mapping",
"=",
"None",
",",
"None",
"if",
"holomap",
"is",
"None",
":",
"if",
"dim",
".",
"values",
":",
"if",
"dim",
".",
"default",
"is",
"None",
":",
"default",
"=",
"dim",
".",
"values",
"[",
"0",
"]",
"elif",
"dim",
".",
"default",
"not",
"in",
"dim",
".",
"values",
":",
"raise",
"ValueError",
"(",
"\"%s dimension default %r is not in dimension values: %s\"",
"%",
"(",
"dim",
",",
"dim",
".",
"default",
",",
"dim",
".",
"values",
")",
")",
"else",
":",
"default",
"=",
"dim",
".",
"default",
"value",
"=",
"dim",
".",
"values",
".",
"index",
"(",
"default",
")",
"if",
"all",
"(",
"isnumeric",
"(",
"v",
")",
"for",
"v",
"in",
"dim",
".",
"values",
")",
":",
"values",
"=",
"sorted",
"(",
"dim",
".",
"values",
")",
"labels",
"=",
"[",
"unicode",
"(",
"dim",
".",
"pprint_value",
"(",
"v",
")",
")",
"for",
"v",
"in",
"values",
"]",
"if",
"editable",
":",
"label",
"=",
"AutocompleteInput",
"(",
"value",
"=",
"labels",
"[",
"value",
"]",
",",
"completions",
"=",
"labels",
",",
"title",
"=",
"dim",
".",
"pprint_label",
")",
"else",
":",
"label",
"=",
"Div",
"(",
"text",
"=",
"'<b>%s</b>'",
"%",
"dim",
".",
"pprint_value_string",
"(",
"labels",
"[",
"value",
"]",
")",
")",
"widget",
"=",
"Slider",
"(",
"value",
"=",
"value",
",",
"start",
"=",
"0",
",",
"end",
"=",
"len",
"(",
"dim",
".",
"values",
")",
"-",
"1",
",",
"title",
"=",
"None",
",",
"step",
"=",
"1",
")",
"mapping",
"=",
"list",
"(",
"enumerate",
"(",
"zip",
"(",
"values",
",",
"labels",
")",
")",
")",
"else",
":",
"values",
"=",
"[",
"(",
"v",
",",
"dim",
".",
"pprint_value",
"(",
"v",
")",
")",
"for",
"v",
"in",
"dim",
".",
"values",
"]",
"widget",
"=",
"Select",
"(",
"title",
"=",
"dim",
".",
"pprint_label",
",",
"value",
"=",
"values",
"[",
"value",
"]",
"[",
"0",
"]",
",",
"options",
"=",
"values",
")",
"else",
":",
"start",
"=",
"dim",
".",
"soft_range",
"[",
"0",
"]",
"if",
"dim",
".",
"soft_range",
"[",
"0",
"]",
"else",
"dim",
".",
"range",
"[",
"0",
"]",
"end",
"=",
"dim",
".",
"soft_range",
"[",
"1",
"]",
"if",
"dim",
".",
"soft_range",
"[",
"1",
"]",
"else",
"dim",
".",
"range",
"[",
"1",
"]",
"dim_range",
"=",
"end",
"-",
"start",
"int_type",
"=",
"isinstance",
"(",
"dim",
".",
"type",
",",
"type",
")",
"and",
"issubclass",
"(",
"dim",
".",
"type",
",",
"int",
")",
"if",
"dim",
".",
"step",
"is",
"not",
"None",
":",
"step",
"=",
"dim",
".",
"step",
"elif",
"isinstance",
"(",
"dim_range",
",",
"int",
")",
"or",
"int_type",
":",
"step",
"=",
"1",
"else",
":",
"step",
"=",
"10",
"**",
"(",
"(",
"round",
"(",
"math",
".",
"log10",
"(",
"dim_range",
")",
")",
"-",
"3",
")",
")",
"if",
"dim",
".",
"default",
"is",
"None",
":",
"default",
"=",
"start",
"elif",
"(",
"dim",
".",
"default",
"<",
"start",
"or",
"dim",
".",
"default",
">",
"end",
")",
":",
"raise",
"ValueError",
"(",
"\"%s dimension default %r is not in the provided range: %s\"",
"%",
"(",
"dim",
",",
"dim",
".",
"default",
",",
"(",
"start",
",",
"end",
")",
")",
")",
"else",
":",
"default",
"=",
"dim",
".",
"default",
"if",
"editable",
":",
"label",
"=",
"TextInput",
"(",
"value",
"=",
"str",
"(",
"default",
")",
",",
"title",
"=",
"dim",
".",
"pprint_label",
")",
"else",
":",
"label",
"=",
"Div",
"(",
"text",
"=",
"'<b>%s</b>'",
"%",
"dim",
".",
"pprint_value_string",
"(",
"default",
")",
")",
"widget",
"=",
"Slider",
"(",
"value",
"=",
"default",
",",
"start",
"=",
"start",
",",
"end",
"=",
"end",
",",
"step",
"=",
"step",
",",
"title",
"=",
"None",
")",
"else",
":",
"values",
"=",
"(",
"dim",
".",
"values",
"if",
"dim",
".",
"values",
"else",
"list",
"(",
"unique_array",
"(",
"holomap",
".",
"dimension_values",
"(",
"dim",
".",
"name",
")",
")",
")",
")",
"if",
"dim",
".",
"default",
"is",
"None",
":",
"default",
"=",
"values",
"[",
"0",
"]",
"elif",
"dim",
".",
"default",
"not",
"in",
"values",
":",
"raise",
"ValueError",
"(",
"\"%s dimension default %r is not in dimension values: %s\"",
"%",
"(",
"dim",
",",
"dim",
".",
"default",
",",
"values",
")",
")",
"else",
":",
"default",
"=",
"dim",
".",
"default",
"if",
"isinstance",
"(",
"values",
"[",
"0",
"]",
",",
"np",
".",
"datetime64",
")",
"or",
"isnumeric",
"(",
"values",
"[",
"0",
"]",
")",
":",
"values",
"=",
"sorted",
"(",
"values",
")",
"labels",
"=",
"[",
"dim",
".",
"pprint_value",
"(",
"v",
")",
"for",
"v",
"in",
"values",
"]",
"value",
"=",
"values",
".",
"index",
"(",
"default",
")",
"if",
"editable",
":",
"label",
"=",
"AutocompleteInput",
"(",
"value",
"=",
"labels",
"[",
"value",
"]",
",",
"completions",
"=",
"labels",
",",
"title",
"=",
"dim",
".",
"pprint_label",
")",
"else",
":",
"label",
"=",
"Div",
"(",
"text",
"=",
"'<b>%s</b>'",
"%",
"(",
"dim",
".",
"pprint_value_string",
"(",
"labels",
"[",
"value",
"]",
")",
")",
")",
"widget",
"=",
"Slider",
"(",
"value",
"=",
"value",
",",
"start",
"=",
"0",
",",
"end",
"=",
"len",
"(",
"values",
")",
"-",
"1",
",",
"title",
"=",
"None",
",",
"step",
"=",
"1",
")",
"else",
":",
"labels",
"=",
"[",
"dim",
".",
"pprint_value",
"(",
"v",
")",
"for",
"v",
"in",
"values",
"]",
"widget",
"=",
"Select",
"(",
"title",
"=",
"dim",
".",
"pprint_label",
",",
"value",
"=",
"default",
",",
"options",
"=",
"list",
"(",
"zip",
"(",
"values",
",",
"labels",
")",
")",
")",
"mapping",
"=",
"list",
"(",
"enumerate",
"(",
"zip",
"(",
"values",
",",
"labels",
")",
")",
")",
"return",
"widget",
",",
"label",
",",
"mapping"
] | 51.831461 | 22.606742 |
def set_data_dir(directory=None, create=False, save=False):
"""Set vispy data download directory
Parameters
----------
directory : str | None
The directory to use.
create : bool
If True, create directory if it doesn't exist.
save : bool
If True, save the configuration to the vispy config.
"""
if directory is None:
directory = _data_path
if _data_path is None:
raise IOError('default path cannot be determined, please '
'set it manually (directory != None)')
if not op.isdir(directory):
if not create:
raise IOError('directory "%s" does not exist, perhaps try '
'create=True to create it?' % directory)
os.mkdir(directory)
config.update(data_path=directory)
if save:
save_config(data_path=directory) | [
"def",
"set_data_dir",
"(",
"directory",
"=",
"None",
",",
"create",
"=",
"False",
",",
"save",
"=",
"False",
")",
":",
"if",
"directory",
"is",
"None",
":",
"directory",
"=",
"_data_path",
"if",
"_data_path",
"is",
"None",
":",
"raise",
"IOError",
"(",
"'default path cannot be determined, please '",
"'set it manually (directory != None)'",
")",
"if",
"not",
"op",
".",
"isdir",
"(",
"directory",
")",
":",
"if",
"not",
"create",
":",
"raise",
"IOError",
"(",
"'directory \"%s\" does not exist, perhaps try '",
"'create=True to create it?'",
"%",
"directory",
")",
"os",
".",
"mkdir",
"(",
"directory",
")",
"config",
".",
"update",
"(",
"data_path",
"=",
"directory",
")",
"if",
"save",
":",
"save_config",
"(",
"data_path",
"=",
"directory",
")"
] | 34.44 | 17.36 |
def store_file(self, path, data, fast_lane=True):
"""
Store the file in temp folder and stream it to server if online.
This makes sure that we have all newest data of this file on the server directly.
This method always overwrites the content of path. If you want to append always the content,
use Git.stream_file() instead.
At the end of the job, the content the server received is stored as git blob on the server. It is then committed
locally and pushed. Git detects that the server already has the version (through the continuous streaming)
and won't push it again.
"""
self.stream_files_lock.acquire()
try:
full_path = os.path.normpath(self.temp_path + '/store-blob/' + self.job_id + '/' + path)
if not os.path.exists(os.path.dirname(full_path)):
os.makedirs(os.path.dirname(full_path))
if hasattr(data, 'encode'):
data = data.encode("utf-8", 'replace')
already_set = path in self.store_files and self.store_files[path] == data
if is_debug3():
sys.__stderr__.write('git:store_file(%s, %s, %s), already_set=%s\n'
% (str(path), str(data)[0:180], str(fast_lane), str(already_set)))
if already_set:
return
open(full_path, 'wb').write(data)
self.store_files[path] = data
if self.client.online is not False:
self.client.send({'type': 'store-blob', 'path': path, 'data': data}, channel='' if fast_lane else 'files')
finally:
self.stream_files_lock.release() | [
"def",
"store_file",
"(",
"self",
",",
"path",
",",
"data",
",",
"fast_lane",
"=",
"True",
")",
":",
"self",
".",
"stream_files_lock",
".",
"acquire",
"(",
")",
"try",
":",
"full_path",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"self",
".",
"temp_path",
"+",
"'/store-blob/'",
"+",
"self",
".",
"job_id",
"+",
"'/'",
"+",
"path",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"full_path",
")",
")",
":",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"full_path",
")",
")",
"if",
"hasattr",
"(",
"data",
",",
"'encode'",
")",
":",
"data",
"=",
"data",
".",
"encode",
"(",
"\"utf-8\"",
",",
"'replace'",
")",
"already_set",
"=",
"path",
"in",
"self",
".",
"store_files",
"and",
"self",
".",
"store_files",
"[",
"path",
"]",
"==",
"data",
"if",
"is_debug3",
"(",
")",
":",
"sys",
".",
"__stderr__",
".",
"write",
"(",
"'git:store_file(%s, %s, %s), already_set=%s\\n'",
"%",
"(",
"str",
"(",
"path",
")",
",",
"str",
"(",
"data",
")",
"[",
"0",
":",
"180",
"]",
",",
"str",
"(",
"fast_lane",
")",
",",
"str",
"(",
"already_set",
")",
")",
")",
"if",
"already_set",
":",
"return",
"open",
"(",
"full_path",
",",
"'wb'",
")",
".",
"write",
"(",
"data",
")",
"self",
".",
"store_files",
"[",
"path",
"]",
"=",
"data",
"if",
"self",
".",
"client",
".",
"online",
"is",
"not",
"False",
":",
"self",
".",
"client",
".",
"send",
"(",
"{",
"'type'",
":",
"'store-blob'",
",",
"'path'",
":",
"path",
",",
"'data'",
":",
"data",
"}",
",",
"channel",
"=",
"''",
"if",
"fast_lane",
"else",
"'files'",
")",
"finally",
":",
"self",
".",
"stream_files_lock",
".",
"release",
"(",
")"
] | 42.2 | 29.8 |
def timestamp_to_datetime(timestamp):
'''
1514736000 --> datetime object
:param int timestamp: unix timestamp (int)
:return: datetime object or None
:rtype: datetime or None
'''
if isinstance(timestamp, (int, float, str)):
try:
timestamp = float(timestamp)
if timestamp.is_integer():
timestamp = int(timestamp)
except:
return None
temp = str(timestamp).split('.')[0]
if len(temp) == 13:
timestamp = timestamp / 1000.0
if len(temp) < 10:
return None
else:
return None
return datetime.fromtimestamp(timestamp) | [
"def",
"timestamp_to_datetime",
"(",
"timestamp",
")",
":",
"if",
"isinstance",
"(",
"timestamp",
",",
"(",
"int",
",",
"float",
",",
"str",
")",
")",
":",
"try",
":",
"timestamp",
"=",
"float",
"(",
"timestamp",
")",
"if",
"timestamp",
".",
"is_integer",
"(",
")",
":",
"timestamp",
"=",
"int",
"(",
"timestamp",
")",
"except",
":",
"return",
"None",
"temp",
"=",
"str",
"(",
"timestamp",
")",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"if",
"len",
"(",
"temp",
")",
"==",
"13",
":",
"timestamp",
"=",
"timestamp",
"/",
"1000.0",
"if",
"len",
"(",
"temp",
")",
"<",
"10",
":",
"return",
"None",
"else",
":",
"return",
"None",
"return",
"datetime",
".",
"fromtimestamp",
"(",
"timestamp",
")"
] | 26.071429 | 17.857143 |
def _scatter_ndarray(ar, axis=-1, destination=None, blocksize=None):
"""Turn a numpy ndarray into a DistArray or RemoteArray
Args:
ar (array_like)
axis (int, optional): specifies along which axis to split the array to
distribute it. The default is to split along the last axis. `None` means
do not distribute.
destination (int or list of int, optional): Optionally force the array to
go to a specific engine. If an array is to be scattered along an axis,
this should be a list of engine ids with the same length as that axis.
blocksize (int): Optionally control the size of intervals into which the
distributed axis is split (the default splits the distributed axis
evenly over all computing engines).
"""
from .arrays import DistArray, RemoteArray
shape = ar.shape
ndim = len(shape)
if axis is None:
return _directed_scatter([ar], destination=[destination],
blocksize=blocksize)[0]
if axis < -ndim or axis > ndim - 1:
raise DistobValueError('axis out of range')
if axis < 0:
axis = ndim + axis
n = shape[axis]
if n == 1:
return _directed_scatter([ar], destination=[destination])[0]
if isinstance(destination, collections.Sequence):
ne = len(destination) # number of engines to scatter array to
else:
if distob.engine is None:
setup_engines()
ne = distob.engine.nengines # by default scatter across all engines
if blocksize is None:
blocksize = ((n - 1) // ne) + 1
if blocksize > n:
blocksize = n
if isinstance(ar, DistArray):
if axis == ar._distaxis:
return ar
else:
raise DistobError('Currently can only scatter one axis of array')
# Currently, if requested to scatter an array that is already Remote and
# large, first get whole array locally, then scatter. Not really optimal.
if isinstance(ar, RemoteArray) and n > blocksize:
ar = ar._ob
s = slice(None)
subarrays = []
low = 0
for i in range(0, n // blocksize):
high = low + blocksize
index = (s,)*axis + (slice(low, high),) + (s,)*(ndim - axis - 1)
subarrays.append(ar[index])
low += blocksize
if n % blocksize != 0:
high = low + (n % blocksize)
index = (s,)*axis + (slice(low, high),) + (s,)*(ndim - axis - 1)
subarrays.append(ar[index])
subarrays = _directed_scatter(subarrays, destination=destination)
return DistArray(subarrays, axis) | [
"def",
"_scatter_ndarray",
"(",
"ar",
",",
"axis",
"=",
"-",
"1",
",",
"destination",
"=",
"None",
",",
"blocksize",
"=",
"None",
")",
":",
"from",
".",
"arrays",
"import",
"DistArray",
",",
"RemoteArray",
"shape",
"=",
"ar",
".",
"shape",
"ndim",
"=",
"len",
"(",
"shape",
")",
"if",
"axis",
"is",
"None",
":",
"return",
"_directed_scatter",
"(",
"[",
"ar",
"]",
",",
"destination",
"=",
"[",
"destination",
"]",
",",
"blocksize",
"=",
"blocksize",
")",
"[",
"0",
"]",
"if",
"axis",
"<",
"-",
"ndim",
"or",
"axis",
">",
"ndim",
"-",
"1",
":",
"raise",
"DistobValueError",
"(",
"'axis out of range'",
")",
"if",
"axis",
"<",
"0",
":",
"axis",
"=",
"ndim",
"+",
"axis",
"n",
"=",
"shape",
"[",
"axis",
"]",
"if",
"n",
"==",
"1",
":",
"return",
"_directed_scatter",
"(",
"[",
"ar",
"]",
",",
"destination",
"=",
"[",
"destination",
"]",
")",
"[",
"0",
"]",
"if",
"isinstance",
"(",
"destination",
",",
"collections",
".",
"Sequence",
")",
":",
"ne",
"=",
"len",
"(",
"destination",
")",
"# number of engines to scatter array to",
"else",
":",
"if",
"distob",
".",
"engine",
"is",
"None",
":",
"setup_engines",
"(",
")",
"ne",
"=",
"distob",
".",
"engine",
".",
"nengines",
"# by default scatter across all engines",
"if",
"blocksize",
"is",
"None",
":",
"blocksize",
"=",
"(",
"(",
"n",
"-",
"1",
")",
"//",
"ne",
")",
"+",
"1",
"if",
"blocksize",
">",
"n",
":",
"blocksize",
"=",
"n",
"if",
"isinstance",
"(",
"ar",
",",
"DistArray",
")",
":",
"if",
"axis",
"==",
"ar",
".",
"_distaxis",
":",
"return",
"ar",
"else",
":",
"raise",
"DistobError",
"(",
"'Currently can only scatter one axis of array'",
")",
"# Currently, if requested to scatter an array that is already Remote and",
"# large, first get whole array locally, then scatter. Not really optimal.",
"if",
"isinstance",
"(",
"ar",
",",
"RemoteArray",
")",
"and",
"n",
">",
"blocksize",
":",
"ar",
"=",
"ar",
".",
"_ob",
"s",
"=",
"slice",
"(",
"None",
")",
"subarrays",
"=",
"[",
"]",
"low",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"n",
"//",
"blocksize",
")",
":",
"high",
"=",
"low",
"+",
"blocksize",
"index",
"=",
"(",
"s",
",",
")",
"*",
"axis",
"+",
"(",
"slice",
"(",
"low",
",",
"high",
")",
",",
")",
"+",
"(",
"s",
",",
")",
"*",
"(",
"ndim",
"-",
"axis",
"-",
"1",
")",
"subarrays",
".",
"append",
"(",
"ar",
"[",
"index",
"]",
")",
"low",
"+=",
"blocksize",
"if",
"n",
"%",
"blocksize",
"!=",
"0",
":",
"high",
"=",
"low",
"+",
"(",
"n",
"%",
"blocksize",
")",
"index",
"=",
"(",
"s",
",",
")",
"*",
"axis",
"+",
"(",
"slice",
"(",
"low",
",",
"high",
")",
",",
")",
"+",
"(",
"s",
",",
")",
"*",
"(",
"ndim",
"-",
"axis",
"-",
"1",
")",
"subarrays",
".",
"append",
"(",
"ar",
"[",
"index",
"]",
")",
"subarrays",
"=",
"_directed_scatter",
"(",
"subarrays",
",",
"destination",
"=",
"destination",
")",
"return",
"DistArray",
"(",
"subarrays",
",",
"axis",
")"
] | 42.066667 | 19.933333 |
def enable(self):
"""Return True|False if the AMP is enabled in the configuration file (enable=true|false)."""
ret = self.get('enable')
if ret is None:
return False
else:
return ret.lower().startswith('true') | [
"def",
"enable",
"(",
"self",
")",
":",
"ret",
"=",
"self",
".",
"get",
"(",
"'enable'",
")",
"if",
"ret",
"is",
"None",
":",
"return",
"False",
"else",
":",
"return",
"ret",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"'true'",
")"
] | 36.857143 | 14.285714 |
def estimateHE(args):
"""
%prog estimateHE clustSfile
Estimate heterozygosity (H) and error rate (E). Idea borrowed heavily from
the PyRad paper.
"""
p = OptionParser(estimateHE.__doc__)
add_consensus_options(p)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
clustSfile, = args
HEfile = clustSfile.rsplit(".", 1)[0] + ".HE"
if not need_update(clustSfile, HEfile):
logging.debug("File `{0}` found. Computation skipped.".format(HEfile))
return HEfile
D = []
for d in cons(clustSfile, opts.mindepth):
D.extend(d)
logging.debug("Computing base frequencies ...")
P = makeP(D)
C = makeC(D)
logging.debug("Solving log-likelihood function ...")
x0 = [.01, .001] # initital values
H, E = scipy.optimize.fmin(LL, x0, args=(P, C))
fw = must_open(HEfile, "w")
print(H, E, file=fw)
fw.close()
return HEfile | [
"def",
"estimateHE",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"estimateHE",
".",
"__doc__",
")",
"add_consensus_options",
"(",
"p",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"1",
":",
"sys",
".",
"exit",
"(",
"not",
"p",
".",
"print_help",
"(",
")",
")",
"clustSfile",
",",
"=",
"args",
"HEfile",
"=",
"clustSfile",
".",
"rsplit",
"(",
"\".\"",
",",
"1",
")",
"[",
"0",
"]",
"+",
"\".HE\"",
"if",
"not",
"need_update",
"(",
"clustSfile",
",",
"HEfile",
")",
":",
"logging",
".",
"debug",
"(",
"\"File `{0}` found. Computation skipped.\"",
".",
"format",
"(",
"HEfile",
")",
")",
"return",
"HEfile",
"D",
"=",
"[",
"]",
"for",
"d",
"in",
"cons",
"(",
"clustSfile",
",",
"opts",
".",
"mindepth",
")",
":",
"D",
".",
"extend",
"(",
"d",
")",
"logging",
".",
"debug",
"(",
"\"Computing base frequencies ...\"",
")",
"P",
"=",
"makeP",
"(",
"D",
")",
"C",
"=",
"makeC",
"(",
"D",
")",
"logging",
".",
"debug",
"(",
"\"Solving log-likelihood function ...\"",
")",
"x0",
"=",
"[",
".01",
",",
".001",
"]",
"# initital values",
"H",
",",
"E",
"=",
"scipy",
".",
"optimize",
".",
"fmin",
"(",
"LL",
",",
"x0",
",",
"args",
"=",
"(",
"P",
",",
"C",
")",
")",
"fw",
"=",
"must_open",
"(",
"HEfile",
",",
"\"w\"",
")",
"print",
"(",
"H",
",",
"E",
",",
"file",
"=",
"fw",
")",
"fw",
".",
"close",
"(",
")",
"return",
"HEfile"
] | 25.75 | 19.694444 |
def ang_sep(row, ra1, dec1):
"""
Calculate angular separation between two coordinates
Uses Vicenty Formula (http://en.wikipedia.org/wiki/Great-circle_distance) and adapts from astropy's SkyCoord
Written to be used within the Database.search() method
Parameters
----------
row: dict, pandas Row
Coordinate structure containing ra and dec keys in decimal degrees
ra1: float
RA to compare with, in decimal degrees
dec1: float
Dec to compare with, in decimal degrees
Returns
-------
Angular distance, in degrees, between the coordinates
"""
factor = math.pi / 180
sdlon = math.sin((row['ra'] - ra1) * factor) # RA is longitude
cdlon = math.cos((row['ra'] - ra1) * factor)
slat1 = math.sin(dec1 * factor) # Dec is latitude
slat2 = math.sin(row['dec'] * factor)
clat1 = math.cos(dec1 * factor)
clat2 = math.cos(row['dec'] * factor)
num1 = clat2 * sdlon
num2 = clat1 * slat2 - slat1 * clat2 * cdlon
numerator = math.sqrt(num1 ** 2 + num2 ** 2)
denominator = slat1 * slat2 + clat1 * clat2 * cdlon
return np.arctan2(numerator, denominator) / factor | [
"def",
"ang_sep",
"(",
"row",
",",
"ra1",
",",
"dec1",
")",
":",
"factor",
"=",
"math",
".",
"pi",
"/",
"180",
"sdlon",
"=",
"math",
".",
"sin",
"(",
"(",
"row",
"[",
"'ra'",
"]",
"-",
"ra1",
")",
"*",
"factor",
")",
"# RA is longitude",
"cdlon",
"=",
"math",
".",
"cos",
"(",
"(",
"row",
"[",
"'ra'",
"]",
"-",
"ra1",
")",
"*",
"factor",
")",
"slat1",
"=",
"math",
".",
"sin",
"(",
"dec1",
"*",
"factor",
")",
"# Dec is latitude",
"slat2",
"=",
"math",
".",
"sin",
"(",
"row",
"[",
"'dec'",
"]",
"*",
"factor",
")",
"clat1",
"=",
"math",
".",
"cos",
"(",
"dec1",
"*",
"factor",
")",
"clat2",
"=",
"math",
".",
"cos",
"(",
"row",
"[",
"'dec'",
"]",
"*",
"factor",
")",
"num1",
"=",
"clat2",
"*",
"sdlon",
"num2",
"=",
"clat1",
"*",
"slat2",
"-",
"slat1",
"*",
"clat2",
"*",
"cdlon",
"numerator",
"=",
"math",
".",
"sqrt",
"(",
"num1",
"**",
"2",
"+",
"num2",
"**",
"2",
")",
"denominator",
"=",
"slat1",
"*",
"slat2",
"+",
"clat1",
"*",
"clat2",
"*",
"cdlon",
"return",
"np",
".",
"arctan2",
"(",
"numerator",
",",
"denominator",
")",
"/",
"factor"
] | 33.558824 | 20.382353 |
def _git(*command_parts, **kwargs):
""" Convenience function for running git commands. Automatically deals with exceptions and unicode. """
# Special arguments passed to sh: http://amoffat.github.io/sh/special_arguments.html
git_kwargs = {'_tty_out': False}
git_kwargs.update(kwargs)
try:
result = sh.git(*command_parts, **git_kwargs) # pylint: disable=unexpected-keyword-arg
# If we reach this point and the result has an exit_code that is larger than 0, this means that we didn't
# get an exception (which is the default sh behavior for non-zero exit codes) and so the user is expecting
# a non-zero exit code -> just return the entire result
if hasattr(result, 'exit_code') and result.exit_code > 0:
return result
return ustr(result)
except CommandNotFound:
raise GitNotInstalledError()
except ErrorReturnCode as e: # Something went wrong while executing the git command
error_msg = e.stderr.strip()
if '_cwd' in git_kwargs and b"not a git repository" in error_msg.lower():
error_msg = u"{0} is not a git repository.".format(git_kwargs['_cwd'])
else:
error_msg = u"An error occurred while executing '{0}': {1}".format(e.full_cmd, error_msg)
raise GitContextError(error_msg) | [
"def",
"_git",
"(",
"*",
"command_parts",
",",
"*",
"*",
"kwargs",
")",
":",
"# Special arguments passed to sh: http://amoffat.github.io/sh/special_arguments.html",
"git_kwargs",
"=",
"{",
"'_tty_out'",
":",
"False",
"}",
"git_kwargs",
".",
"update",
"(",
"kwargs",
")",
"try",
":",
"result",
"=",
"sh",
".",
"git",
"(",
"*",
"command_parts",
",",
"*",
"*",
"git_kwargs",
")",
"# pylint: disable=unexpected-keyword-arg",
"# If we reach this point and the result has an exit_code that is larger than 0, this means that we didn't",
"# get an exception (which is the default sh behavior for non-zero exit codes) and so the user is expecting",
"# a non-zero exit code -> just return the entire result",
"if",
"hasattr",
"(",
"result",
",",
"'exit_code'",
")",
"and",
"result",
".",
"exit_code",
">",
"0",
":",
"return",
"result",
"return",
"ustr",
"(",
"result",
")",
"except",
"CommandNotFound",
":",
"raise",
"GitNotInstalledError",
"(",
")",
"except",
"ErrorReturnCode",
"as",
"e",
":",
"# Something went wrong while executing the git command",
"error_msg",
"=",
"e",
".",
"stderr",
".",
"strip",
"(",
")",
"if",
"'_cwd'",
"in",
"git_kwargs",
"and",
"b\"not a git repository\"",
"in",
"error_msg",
".",
"lower",
"(",
")",
":",
"error_msg",
"=",
"u\"{0} is not a git repository.\"",
".",
"format",
"(",
"git_kwargs",
"[",
"'_cwd'",
"]",
")",
"else",
":",
"error_msg",
"=",
"u\"An error occurred while executing '{0}': {1}\"",
".",
"format",
"(",
"e",
".",
"full_cmd",
",",
"error_msg",
")",
"raise",
"GitContextError",
"(",
"error_msg",
")"
] | 59.5 | 28.090909 |
def write_config(self, initialize_indices=False):
"""
Write the current config to disk to store them.
"""
if not os.path.exists(self.config_dir):
os.mkdir(self.config_dir)
with open(self.config_file, 'w') as configfile:
self.config.write(configfile)
if initialize_indices:
index = self.get('jackal', 'index')
from jackal import Host, Range, Service, User, Credential, Log
from jackal.core import create_connection
create_connection(self)
Host.init(index="{}-hosts".format(index))
Range.init(index="{}-ranges".format(index))
Service.init(index="{}-services".format(index))
User.init(index="{}-users".format(index))
Credential.init(index="{}-creds".format(index))
Log.init(index="{}-log".format(index)) | [
"def",
"write_config",
"(",
"self",
",",
"initialize_indices",
"=",
"False",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"config_dir",
")",
":",
"os",
".",
"mkdir",
"(",
"self",
".",
"config_dir",
")",
"with",
"open",
"(",
"self",
".",
"config_file",
",",
"'w'",
")",
"as",
"configfile",
":",
"self",
".",
"config",
".",
"write",
"(",
"configfile",
")",
"if",
"initialize_indices",
":",
"index",
"=",
"self",
".",
"get",
"(",
"'jackal'",
",",
"'index'",
")",
"from",
"jackal",
"import",
"Host",
",",
"Range",
",",
"Service",
",",
"User",
",",
"Credential",
",",
"Log",
"from",
"jackal",
".",
"core",
"import",
"create_connection",
"create_connection",
"(",
"self",
")",
"Host",
".",
"init",
"(",
"index",
"=",
"\"{}-hosts\"",
".",
"format",
"(",
"index",
")",
")",
"Range",
".",
"init",
"(",
"index",
"=",
"\"{}-ranges\"",
".",
"format",
"(",
"index",
")",
")",
"Service",
".",
"init",
"(",
"index",
"=",
"\"{}-services\"",
".",
"format",
"(",
"index",
")",
")",
"User",
".",
"init",
"(",
"index",
"=",
"\"{}-users\"",
".",
"format",
"(",
"index",
")",
")",
"Credential",
".",
"init",
"(",
"index",
"=",
"\"{}-creds\"",
".",
"format",
"(",
"index",
")",
")",
"Log",
".",
"init",
"(",
"index",
"=",
"\"{}-log\"",
".",
"format",
"(",
"index",
")",
")"
] | 41.809524 | 13.904762 |
def check_against_chunks(self, chunks):
# type: (Iterator[bytes]) -> None
"""Check good hashes against ones built from iterable of chunks of
data.
Raise HashMismatch if none match.
"""
gots = {}
for hash_name in iterkeys(self._allowed):
try:
gots[hash_name] = hashlib.new(hash_name)
except (ValueError, TypeError):
raise InstallationError('Unknown hash name: %s' % hash_name)
for chunk in chunks:
for hash in itervalues(gots):
hash.update(chunk)
for hash_name, got in iteritems(gots):
if got.hexdigest() in self._allowed[hash_name]:
return
self._raise(gots) | [
"def",
"check_against_chunks",
"(",
"self",
",",
"chunks",
")",
":",
"# type: (Iterator[bytes]) -> None",
"gots",
"=",
"{",
"}",
"for",
"hash_name",
"in",
"iterkeys",
"(",
"self",
".",
"_allowed",
")",
":",
"try",
":",
"gots",
"[",
"hash_name",
"]",
"=",
"hashlib",
".",
"new",
"(",
"hash_name",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"raise",
"InstallationError",
"(",
"'Unknown hash name: %s'",
"%",
"hash_name",
")",
"for",
"chunk",
"in",
"chunks",
":",
"for",
"hash",
"in",
"itervalues",
"(",
"gots",
")",
":",
"hash",
".",
"update",
"(",
"chunk",
")",
"for",
"hash_name",
",",
"got",
"in",
"iteritems",
"(",
"gots",
")",
":",
"if",
"got",
".",
"hexdigest",
"(",
")",
"in",
"self",
".",
"_allowed",
"[",
"hash_name",
"]",
":",
"return",
"self",
".",
"_raise",
"(",
"gots",
")"
] | 31.782609 | 16.434783 |
def cache_component_verify_ticket(self, msg, signature, timestamp, nonce):
"""
处理 wechat server 推送的 component_verify_ticket消息
:params msg: 加密内容
:params signature: 消息签名
:params timestamp: 时间戳
:params nonce: 随机数
"""
warnings.warn('`cache_component_verify_ticket` method of `WeChatComponent` is deprecated,'
'Use `parse_message` instead',
DeprecationWarning, stacklevel=2)
content = self.crypto.decrypt_message(msg, signature, timestamp, nonce)
message = xmltodict.parse(to_text(content))['xml']
o = ComponentVerifyTicketMessage(message)
self.session.set(o.type, o.verify_ticket) | [
"def",
"cache_component_verify_ticket",
"(",
"self",
",",
"msg",
",",
"signature",
",",
"timestamp",
",",
"nonce",
")",
":",
"warnings",
".",
"warn",
"(",
"'`cache_component_verify_ticket` method of `WeChatComponent` is deprecated,'",
"'Use `parse_message` instead'",
",",
"DeprecationWarning",
",",
"stacklevel",
"=",
"2",
")",
"content",
"=",
"self",
".",
"crypto",
".",
"decrypt_message",
"(",
"msg",
",",
"signature",
",",
"timestamp",
",",
"nonce",
")",
"message",
"=",
"xmltodict",
".",
"parse",
"(",
"to_text",
"(",
"content",
")",
")",
"[",
"'xml'",
"]",
"o",
"=",
"ComponentVerifyTicketMessage",
"(",
"message",
")",
"self",
".",
"session",
".",
"set",
"(",
"o",
".",
"type",
",",
"o",
".",
"verify_ticket",
")"
] | 43.875 | 18.5 |
def implied_feature (implicit_value):
""" Returns the implicit feature associated with the given implicit value.
"""
assert isinstance(implicit_value, basestring)
components = implicit_value.split('-')
if components[0] not in __implicit_features:
raise InvalidValue ("'%s' is not a value of an implicit feature" % implicit_value)
return __implicit_features[components[0]] | [
"def",
"implied_feature",
"(",
"implicit_value",
")",
":",
"assert",
"isinstance",
"(",
"implicit_value",
",",
"basestring",
")",
"components",
"=",
"implicit_value",
".",
"split",
"(",
"'-'",
")",
"if",
"components",
"[",
"0",
"]",
"not",
"in",
"__implicit_features",
":",
"raise",
"InvalidValue",
"(",
"\"'%s' is not a value of an implicit feature\"",
"%",
"implicit_value",
")",
"return",
"__implicit_features",
"[",
"components",
"[",
"0",
"]",
"]"
] | 39.6 | 15.7 |
def sX(qubit: Qubit, coefficient: complex = 1.0) -> Pauli:
"""Return the Pauli sigma_X operator acting on the given qubit"""
return Pauli.sigma(qubit, 'X', coefficient) | [
"def",
"sX",
"(",
"qubit",
":",
"Qubit",
",",
"coefficient",
":",
"complex",
"=",
"1.0",
")",
"->",
"Pauli",
":",
"return",
"Pauli",
".",
"sigma",
"(",
"qubit",
",",
"'X'",
",",
"coefficient",
")"
] | 58 | 8.333333 |
def get_lab_managers_formatted_emails(self):
"""Returns a list with lab managers formatted emails
"""
users = api.get_users_by_roles("LabManager")
users = map(lambda user: (user.getProperty("fullname"),
user.getProperty("email")), users)
return map(self.get_formatted_email, users) | [
"def",
"get_lab_managers_formatted_emails",
"(",
"self",
")",
":",
"users",
"=",
"api",
".",
"get_users_by_roles",
"(",
"\"LabManager\"",
")",
"users",
"=",
"map",
"(",
"lambda",
"user",
":",
"(",
"user",
".",
"getProperty",
"(",
"\"fullname\"",
")",
",",
"user",
".",
"getProperty",
"(",
"\"email\"",
")",
")",
",",
"users",
")",
"return",
"map",
"(",
"self",
".",
"get_formatted_email",
",",
"users",
")"
] | 49.857143 | 11.142857 |
def between_time(self, start_time, end_time, include_start=True,
include_end=True, axis=None):
"""
Select values between particular times of the day (e.g., 9:00-9:30 AM).
By setting ``start_time`` to be later than ``end_time``,
you can get the times that are *not* between the two times.
Parameters
----------
start_time : datetime.time or str
end_time : datetime.time or str
include_start : bool, default True
include_end : bool, default True
axis : {0 or 'index', 1 or 'columns'}, default 0
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
at_time : Select values at a particular time of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_between_time : Get just the index locations for
values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
2018-04-12 01:00:00 4
>>> ts.between_time('0:15', '0:45')
A
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
You get the times that are *not* between two times by setting
``start_time`` later than ``end_time``:
>>> ts.between_time('0:45', '0:15')
A
2018-04-09 00:00:00 1
2018-04-12 01:00:00 4
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
index = self._get_axis(axis)
try:
indexer = index.indexer_between_time(
start_time, end_time, include_start=include_start,
include_end=include_end)
except AttributeError:
raise TypeError('Index must be DatetimeIndex')
return self._take(indexer, axis=axis) | [
"def",
"between_time",
"(",
"self",
",",
"start_time",
",",
"end_time",
",",
"include_start",
"=",
"True",
",",
"include_end",
"=",
"True",
",",
"axis",
"=",
"None",
")",
":",
"if",
"axis",
"is",
"None",
":",
"axis",
"=",
"self",
".",
"_stat_axis_number",
"axis",
"=",
"self",
".",
"_get_axis_number",
"(",
"axis",
")",
"index",
"=",
"self",
".",
"_get_axis",
"(",
"axis",
")",
"try",
":",
"indexer",
"=",
"index",
".",
"indexer_between_time",
"(",
"start_time",
",",
"end_time",
",",
"include_start",
"=",
"include_start",
",",
"include_end",
"=",
"include_end",
")",
"except",
"AttributeError",
":",
"raise",
"TypeError",
"(",
"'Index must be DatetimeIndex'",
")",
"return",
"self",
".",
"_take",
"(",
"indexer",
",",
"axis",
"=",
"axis",
")"
] | 32.277778 | 19.666667 |
def old_signing_text(self):
"""Return the text needed for signing using SignatureVersion 1."""
result = []
lower_cmp = lambda x, y: cmp(x[0].lower(), y[0].lower())
for key, value in sorted(self.params.items(), cmp=lower_cmp):
result.append("%s%s" % (key, value))
return "".join(result) | [
"def",
"old_signing_text",
"(",
"self",
")",
":",
"result",
"=",
"[",
"]",
"lower_cmp",
"=",
"lambda",
"x",
",",
"y",
":",
"cmp",
"(",
"x",
"[",
"0",
"]",
".",
"lower",
"(",
")",
",",
"y",
"[",
"0",
"]",
".",
"lower",
"(",
")",
")",
"for",
"key",
",",
"value",
"in",
"sorted",
"(",
"self",
".",
"params",
".",
"items",
"(",
")",
",",
"cmp",
"=",
"lower_cmp",
")",
":",
"result",
".",
"append",
"(",
"\"%s%s\"",
"%",
"(",
"key",
",",
"value",
")",
")",
"return",
"\"\"",
".",
"join",
"(",
"result",
")"
] | 47.285714 | 15 |
def parse_package(self, p_term):
"""Parses package fields."""
# Check there is a pacakge name
if not (p_term, self.spdx_namespace['name'], None) in self.graph:
self.error = True
self.logger.log('Package must have a name.')
# Create dummy package so that we may continue parsing the rest of
# the package fields.
self.builder.create_package(self.doc, 'dummy_package')
else:
for _s, _p, o in self.graph.triples((p_term, self.spdx_namespace['name'], None)):
try:
self.builder.create_package(self.doc, six.text_type(o))
except CardinalityError:
self.more_than_one_error('Package name')
break
self.p_pkg_vinfo(p_term, self.spdx_namespace['versionInfo'])
self.p_pkg_fname(p_term, self.spdx_namespace['packageFileName'])
self.p_pkg_suppl(p_term, self.spdx_namespace['supplier'])
self.p_pkg_originator(p_term, self.spdx_namespace['originator'])
self.p_pkg_down_loc(p_term, self.spdx_namespace['downloadLocation'])
self.p_pkg_homepg(p_term, self.doap_namespace['homepage'])
self.p_pkg_chk_sum(p_term, self.spdx_namespace['checksum'])
self.p_pkg_src_info(p_term, self.spdx_namespace['sourceInfo'])
self.p_pkg_verif_code(p_term, self.spdx_namespace['packageVerificationCode'])
self.p_pkg_lic_conc(p_term, self.spdx_namespace['licenseConcluded'])
self.p_pkg_lic_decl(p_term, self.spdx_namespace['licenseDeclared'])
self.p_pkg_lics_info_from_files(p_term, self.spdx_namespace['licenseInfoFromFiles'])
self.p_pkg_comments_on_lics(p_term, self.spdx_namespace['licenseComments'])
self.p_pkg_cr_text(p_term, self.spdx_namespace['copyrightText'])
self.p_pkg_summary(p_term, self.spdx_namespace['summary'])
self.p_pkg_descr(p_term, self.spdx_namespace['description']) | [
"def",
"parse_package",
"(",
"self",
",",
"p_term",
")",
":",
"# Check there is a pacakge name",
"if",
"not",
"(",
"p_term",
",",
"self",
".",
"spdx_namespace",
"[",
"'name'",
"]",
",",
"None",
")",
"in",
"self",
".",
"graph",
":",
"self",
".",
"error",
"=",
"True",
"self",
".",
"logger",
".",
"log",
"(",
"'Package must have a name.'",
")",
"# Create dummy package so that we may continue parsing the rest of",
"# the package fields.",
"self",
".",
"builder",
".",
"create_package",
"(",
"self",
".",
"doc",
",",
"'dummy_package'",
")",
"else",
":",
"for",
"_s",
",",
"_p",
",",
"o",
"in",
"self",
".",
"graph",
".",
"triples",
"(",
"(",
"p_term",
",",
"self",
".",
"spdx_namespace",
"[",
"'name'",
"]",
",",
"None",
")",
")",
":",
"try",
":",
"self",
".",
"builder",
".",
"create_package",
"(",
"self",
".",
"doc",
",",
"six",
".",
"text_type",
"(",
"o",
")",
")",
"except",
"CardinalityError",
":",
"self",
".",
"more_than_one_error",
"(",
"'Package name'",
")",
"break",
"self",
".",
"p_pkg_vinfo",
"(",
"p_term",
",",
"self",
".",
"spdx_namespace",
"[",
"'versionInfo'",
"]",
")",
"self",
".",
"p_pkg_fname",
"(",
"p_term",
",",
"self",
".",
"spdx_namespace",
"[",
"'packageFileName'",
"]",
")",
"self",
".",
"p_pkg_suppl",
"(",
"p_term",
",",
"self",
".",
"spdx_namespace",
"[",
"'supplier'",
"]",
")",
"self",
".",
"p_pkg_originator",
"(",
"p_term",
",",
"self",
".",
"spdx_namespace",
"[",
"'originator'",
"]",
")",
"self",
".",
"p_pkg_down_loc",
"(",
"p_term",
",",
"self",
".",
"spdx_namespace",
"[",
"'downloadLocation'",
"]",
")",
"self",
".",
"p_pkg_homepg",
"(",
"p_term",
",",
"self",
".",
"doap_namespace",
"[",
"'homepage'",
"]",
")",
"self",
".",
"p_pkg_chk_sum",
"(",
"p_term",
",",
"self",
".",
"spdx_namespace",
"[",
"'checksum'",
"]",
")",
"self",
".",
"p_pkg_src_info",
"(",
"p_term",
",",
"self",
".",
"spdx_namespace",
"[",
"'sourceInfo'",
"]",
")",
"self",
".",
"p_pkg_verif_code",
"(",
"p_term",
",",
"self",
".",
"spdx_namespace",
"[",
"'packageVerificationCode'",
"]",
")",
"self",
".",
"p_pkg_lic_conc",
"(",
"p_term",
",",
"self",
".",
"spdx_namespace",
"[",
"'licenseConcluded'",
"]",
")",
"self",
".",
"p_pkg_lic_decl",
"(",
"p_term",
",",
"self",
".",
"spdx_namespace",
"[",
"'licenseDeclared'",
"]",
")",
"self",
".",
"p_pkg_lics_info_from_files",
"(",
"p_term",
",",
"self",
".",
"spdx_namespace",
"[",
"'licenseInfoFromFiles'",
"]",
")",
"self",
".",
"p_pkg_comments_on_lics",
"(",
"p_term",
",",
"self",
".",
"spdx_namespace",
"[",
"'licenseComments'",
"]",
")",
"self",
".",
"p_pkg_cr_text",
"(",
"p_term",
",",
"self",
".",
"spdx_namespace",
"[",
"'copyrightText'",
"]",
")",
"self",
".",
"p_pkg_summary",
"(",
"p_term",
",",
"self",
".",
"spdx_namespace",
"[",
"'summary'",
"]",
")",
"self",
".",
"p_pkg_descr",
"(",
"p_term",
",",
"self",
".",
"spdx_namespace",
"[",
"'description'",
"]",
")"
] | 58.818182 | 26.757576 |
def extract_init_args(instance):
"""
Given an instance, and under the assumption that member variables have the
same name as the __init__ arguments, extract the arguments so they can
be used to reconstruct the instance when deserializing
"""
cls = instance.__class__
args = [x for x in inspect.getargspec(cls.__init__).args if x != 'self']
return [instance.__dict__[key] for key in args] | [
"def",
"extract_init_args",
"(",
"instance",
")",
":",
"cls",
"=",
"instance",
".",
"__class__",
"args",
"=",
"[",
"x",
"for",
"x",
"in",
"inspect",
".",
"getargspec",
"(",
"cls",
".",
"__init__",
")",
".",
"args",
"if",
"x",
"!=",
"'self'",
"]",
"return",
"[",
"instance",
".",
"__dict__",
"[",
"key",
"]",
"for",
"key",
"in",
"args",
"]"
] | 45.666667 | 17.444444 |
def segwit_sighash(self, index, script, prevout_value=None,
sighash_type=None, anyone_can_pay=False):
'''
this function sets up sighash in BIP143 style
https://github.com/bitcoin/bips/blob/master/bip-0143.mediawiki
https://ricette.giallozafferano.it/Spaghetti-alla-Norma.html
'''
data = ByteData()
# 1. nVersion of the transaction (4-byte little endian)
data += self.version
# 2. hashPrevouts (32-byte hash)
data += self._hash_prevouts(anyone_can_pay=anyone_can_pay)
# 3. hashSequence (32-byte hash)
data += self._hash_sequence(sighash_type=sighash_type,
anyone_can_pay=anyone_can_pay)
# 4. outpoint (32-byte hash + 4-byte little endian)
data += self.tx_ins[index].outpoint
# 5. scriptCode of the input (serialized as scripts inside CTxOuts)
data += self._adjusted_script_code(script=script)
# 6. value of the output spent by this input (8-byte little endian)
data += prevout_value
# 7. nSequence of the input (4-byte little endian)
data += self.tx_ins[index].sequence
# 8. hashOutputs (32-byte hash)
data += self._hash_outputs(index=index, sighash_type=sighash_type)
# 9. nLocktime of the transaction (4-byte little endian)
data += self.lock_time
# 10. sighash type of the signature (4-byte little endian)
data += self._segwit_sighash_adjustment(sighash_type=sighash_type,
anyone_can_pay=anyone_can_pay)
return utils.hash256(data.to_bytes()) | [
"def",
"segwit_sighash",
"(",
"self",
",",
"index",
",",
"script",
",",
"prevout_value",
"=",
"None",
",",
"sighash_type",
"=",
"None",
",",
"anyone_can_pay",
"=",
"False",
")",
":",
"data",
"=",
"ByteData",
"(",
")",
"# 1. nVersion of the transaction (4-byte little endian)",
"data",
"+=",
"self",
".",
"version",
"# 2. hashPrevouts (32-byte hash)",
"data",
"+=",
"self",
".",
"_hash_prevouts",
"(",
"anyone_can_pay",
"=",
"anyone_can_pay",
")",
"# 3. hashSequence (32-byte hash)",
"data",
"+=",
"self",
".",
"_hash_sequence",
"(",
"sighash_type",
"=",
"sighash_type",
",",
"anyone_can_pay",
"=",
"anyone_can_pay",
")",
"# 4. outpoint (32-byte hash + 4-byte little endian)",
"data",
"+=",
"self",
".",
"tx_ins",
"[",
"index",
"]",
".",
"outpoint",
"# 5. scriptCode of the input (serialized as scripts inside CTxOuts)",
"data",
"+=",
"self",
".",
"_adjusted_script_code",
"(",
"script",
"=",
"script",
")",
"# 6. value of the output spent by this input (8-byte little endian)",
"data",
"+=",
"prevout_value",
"# 7. nSequence of the input (4-byte little endian)",
"data",
"+=",
"self",
".",
"tx_ins",
"[",
"index",
"]",
".",
"sequence",
"# 8. hashOutputs (32-byte hash)",
"data",
"+=",
"self",
".",
"_hash_outputs",
"(",
"index",
"=",
"index",
",",
"sighash_type",
"=",
"sighash_type",
")",
"# 9. nLocktime of the transaction (4-byte little endian)",
"data",
"+=",
"self",
".",
"lock_time",
"# 10. sighash type of the signature (4-byte little endian)",
"data",
"+=",
"self",
".",
"_segwit_sighash_adjustment",
"(",
"sighash_type",
"=",
"sighash_type",
",",
"anyone_can_pay",
"=",
"anyone_can_pay",
")",
"return",
"utils",
".",
"hash256",
"(",
"data",
".",
"to_bytes",
"(",
")",
")"
] | 38.928571 | 24.97619 |
def request_port_forward(self, address, port, handler=None):
"""
Ask the server to forward TCP connections from a listening port on
the server, across this SSH session.
If a handler is given, that handler is called from a different thread
whenever a forwarded connection arrives. The handler parameters are::
handler(channel, (origin_addr, origin_port), (server_addr, server_port))
where C{server_addr} and C{server_port} are the address and port that
the server was listening on.
If no handler is set, the default behavior is to send new incoming
forwarded connections into the accept queue, to be picked up via
L{accept}.
@param address: the address to bind when forwarding
@type address: str
@param port: the port to forward, or 0 to ask the server to allocate
any port
@type port: int
@param handler: optional handler for incoming forwarded connections
@type handler: function(Channel, (str, int), (str, int))
@return: the port # allocated by the server
@rtype: int
@raise SSHException: if the server refused the TCP forward request
"""
if not self.active:
raise SSHException('SSH session not active')
address = str(address)
port = int(port)
response = self.global_request('tcpip-forward', (address, port), wait=True)
if response is None:
raise SSHException('TCP forwarding request denied')
if port == 0:
port = response.get_int()
if handler is None:
def default_handler(channel, (src_addr, src_port), (dest_addr, dest_port)):
self._queue_incoming_channel(channel)
handler = default_handler
self._tcp_handler = handler
return port | [
"def",
"request_port_forward",
"(",
"self",
",",
"address",
",",
"port",
",",
"handler",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"active",
":",
"raise",
"SSHException",
"(",
"'SSH session not active'",
")",
"address",
"=",
"str",
"(",
"address",
")",
"port",
"=",
"int",
"(",
"port",
")",
"response",
"=",
"self",
".",
"global_request",
"(",
"'tcpip-forward'",
",",
"(",
"address",
",",
"port",
")",
",",
"wait",
"=",
"True",
")",
"if",
"response",
"is",
"None",
":",
"raise",
"SSHException",
"(",
"'TCP forwarding request denied'",
")",
"if",
"port",
"==",
"0",
":",
"port",
"=",
"response",
".",
"get_int",
"(",
")",
"if",
"handler",
"is",
"None",
":",
"def",
"default_handler",
"(",
"channel",
",",
"(",
"src_addr",
",",
"src_port",
")",
",",
"(",
"dest_addr",
",",
"dest_port",
")",
")",
":",
"self",
".",
"_queue_incoming_channel",
"(",
"channel",
")",
"handler",
"=",
"default_handler",
"self",
".",
"_tcp_handler",
"=",
"handler",
"return",
"port"
] | 41.590909 | 23.5 |
def modify_replication_group(ReplicationGroupId=None, ReplicationGroupDescription=None, PrimaryClusterId=None, SnapshottingClusterId=None, AutomaticFailoverEnabled=None, CacheSecurityGroupNames=None, SecurityGroupIds=None, PreferredMaintenanceWindow=None, NotificationTopicArn=None, CacheParameterGroupName=None, NotificationTopicStatus=None, ApplyImmediately=None, EngineVersion=None, AutoMinorVersionUpgrade=None, SnapshotRetentionLimit=None, SnapshotWindow=None, CacheNodeType=None, NodeGroupId=None):
"""
Modifies the settings for a replication group.
See also: AWS API Documentation
:example: response = client.modify_replication_group(
ReplicationGroupId='string',
ReplicationGroupDescription='string',
PrimaryClusterId='string',
SnapshottingClusterId='string',
AutomaticFailoverEnabled=True|False,
CacheSecurityGroupNames=[
'string',
],
SecurityGroupIds=[
'string',
],
PreferredMaintenanceWindow='string',
NotificationTopicArn='string',
CacheParameterGroupName='string',
NotificationTopicStatus='string',
ApplyImmediately=True|False,
EngineVersion='string',
AutoMinorVersionUpgrade=True|False,
SnapshotRetentionLimit=123,
SnapshotWindow='string',
CacheNodeType='string',
NodeGroupId='string'
)
:type ReplicationGroupId: string
:param ReplicationGroupId: [REQUIRED]
The identifier of the replication group to modify.
:type ReplicationGroupDescription: string
:param ReplicationGroupDescription: A description for the replication group. Maximum length is 255 characters.
:type PrimaryClusterId: string
:param PrimaryClusterId: For replication groups with a single primary, if this parameter is specified, ElastiCache promotes the specified cluster in the specified replication group to the primary role. The nodes of all other clusters in the replication group are read replicas.
:type SnapshottingClusterId: string
:param SnapshottingClusterId: The cache cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis (cluster mode enabled) replication groups.
:type AutomaticFailoverEnabled: boolean
:param AutomaticFailoverEnabled: Determines whether a read replica is automatically promoted to read/write primary if the existing primary encounters a failure.
Valid values: true | false
Note
ElastiCache Multi-AZ replication groups are not supported on:
Redis versions earlier than 2.8.6.
Redis (cluster mode disabled):T1 and T2 cache node types. Redis (cluster mode enabled): T1 node types.
:type CacheSecurityGroupNames: list
:param CacheSecurityGroupNames: A list of cache security group names to authorize for the clusters in this replication group. This change is asynchronously applied as soon as possible.
This parameter can be used only with replication group containing cache clusters running outside of an Amazon Virtual Private Cloud (Amazon VPC).
Constraints: Must contain no more than 255 alphanumeric characters. Must not be Default .
(string) --
:type SecurityGroupIds: list
:param SecurityGroupIds: Specifies the VPC Security Groups associated with the cache clusters in the replication group.
This parameter can be used only with replication group containing cache clusters running in an Amazon Virtual Private Cloud (Amazon VPC).
(string) --
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow: Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period.
Valid values for ddd are:
sun
mon
tue
wed
thu
fri
sat
Example: sun:23:00-mon:01:30
:type NotificationTopicArn: string
:param NotificationTopicArn: The Amazon Resource Name (ARN) of the Amazon SNS topic to which notifications are sent.
Note
The Amazon SNS topic owner must be same as the replication group owner.
:type CacheParameterGroupName: string
:param CacheParameterGroupName: The name of the cache parameter group to apply to all of the clusters in this replication group. This change is asynchronously applied as soon as possible for parameters when the ApplyImmediately parameter is specified as true for this request.
:type NotificationTopicStatus: string
:param NotificationTopicStatus: The status of the Amazon SNS notification topic for the replication group. Notifications are sent only if the status is active .
Valid values: active | inactive
:type ApplyImmediately: boolean
:param ApplyImmediately: If true , this parameter causes the modifications in this request and any pending modifications to be applied, asynchronously and as soon as possible, regardless of the PreferredMaintenanceWindow setting for the replication group.
If false , changes to the nodes in the replication group are applied on the next maintenance reboot, or the next failure reboot, whichever occurs first.
Valid values: true | false
Default: false
:type EngineVersion: string
:param EngineVersion: The upgraded version of the cache engine to be run on the cache clusters in the replication group.
Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version ), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing replication group and create it anew with the earlier engine version.
:type AutoMinorVersionUpgrade: boolean
:param AutoMinorVersionUpgrade: This parameter is currently disabled.
:type SnapshotRetentionLimit: integer
:param SnapshotRetentionLimit: The number of days for which ElastiCache retains automatic node group (shard) snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot that was taken today is retained for 5 days before being deleted.
Important If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off.
:type SnapshotWindow: string
:param SnapshotWindow: The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of the node group (shard) specified by SnapshottingClusterId .
Example: 05:00-09:00
If you do not specify this parameter, ElastiCache automatically chooses an appropriate time range.
:type CacheNodeType: string
:param CacheNodeType: A valid cache node type that you want to scale this replication group to.
:type NodeGroupId: string
:param NodeGroupId: The name of the Node Group (called shard in the console).
:rtype: dict
:return: {
'ReplicationGroup': {
'ReplicationGroupId': 'string',
'Description': 'string',
'Status': 'string',
'PendingModifiedValues': {
'PrimaryClusterId': 'string',
'AutomaticFailoverStatus': 'enabled'|'disabled'
},
'MemberClusters': [
'string',
],
'NodeGroups': [
{
'NodeGroupId': 'string',
'Status': 'string',
'PrimaryEndpoint': {
'Address': 'string',
'Port': 123
},
'Slots': 'string',
'NodeGroupMembers': [
{
'CacheClusterId': 'string',
'CacheNodeId': 'string',
'ReadEndpoint': {
'Address': 'string',
'Port': 123
},
'PreferredAvailabilityZone': 'string',
'CurrentRole': 'string'
},
]
},
],
'SnapshottingClusterId': 'string',
'AutomaticFailover': 'enabled'|'disabled'|'enabling'|'disabling',
'ConfigurationEndpoint': {
'Address': 'string',
'Port': 123
},
'SnapshotRetentionLimit': 123,
'SnapshotWindow': 'string',
'ClusterEnabled': True|False,
'CacheNodeType': 'string'
}
}
:returns:
Redis versions earlier than 2.8.6.
Redis (cluster mode disabled):T1 and T2 cache node types. Redis (cluster mode enabled): T1 node types.
"""
pass | [
"def",
"modify_replication_group",
"(",
"ReplicationGroupId",
"=",
"None",
",",
"ReplicationGroupDescription",
"=",
"None",
",",
"PrimaryClusterId",
"=",
"None",
",",
"SnapshottingClusterId",
"=",
"None",
",",
"AutomaticFailoverEnabled",
"=",
"None",
",",
"CacheSecurityGroupNames",
"=",
"None",
",",
"SecurityGroupIds",
"=",
"None",
",",
"PreferredMaintenanceWindow",
"=",
"None",
",",
"NotificationTopicArn",
"=",
"None",
",",
"CacheParameterGroupName",
"=",
"None",
",",
"NotificationTopicStatus",
"=",
"None",
",",
"ApplyImmediately",
"=",
"None",
",",
"EngineVersion",
"=",
"None",
",",
"AutoMinorVersionUpgrade",
"=",
"None",
",",
"SnapshotRetentionLimit",
"=",
"None",
",",
"SnapshotWindow",
"=",
"None",
",",
"CacheNodeType",
"=",
"None",
",",
"NodeGroupId",
"=",
"None",
")",
":",
"pass"
] | 49.027322 | 35.005464 |
def _fetch_stock_data(self, stock_list):
"""因为 timekline 的返回没有带对应的股票代码,所以要手动带上"""
res = super()._fetch_stock_data(stock_list)
with_stock = []
for stock, resp in zip(stock_list, res):
if resp is not None:
with_stock.append((stock, resp))
return with_stock | [
"def",
"_fetch_stock_data",
"(",
"self",
",",
"stock_list",
")",
":",
"res",
"=",
"super",
"(",
")",
".",
"_fetch_stock_data",
"(",
"stock_list",
")",
"with_stock",
"=",
"[",
"]",
"for",
"stock",
",",
"resp",
"in",
"zip",
"(",
"stock_list",
",",
"res",
")",
":",
"if",
"resp",
"is",
"not",
"None",
":",
"with_stock",
".",
"append",
"(",
"(",
"stock",
",",
"resp",
")",
")",
"return",
"with_stock"
] | 35 | 11.888889 |
def cli(env, ack_all):
"""Summary and acknowledgement of upcoming and ongoing maintenance events"""
manager = AccountManager(env.client)
events = manager.get_upcoming_events()
if ack_all:
for event in events:
result = manager.ack_event(event['id'])
event['acknowledgedFlag'] = result
env.fout(event_table(events)) | [
"def",
"cli",
"(",
"env",
",",
"ack_all",
")",
":",
"manager",
"=",
"AccountManager",
"(",
"env",
".",
"client",
")",
"events",
"=",
"manager",
".",
"get_upcoming_events",
"(",
")",
"if",
"ack_all",
":",
"for",
"event",
"in",
"events",
":",
"result",
"=",
"manager",
".",
"ack_event",
"(",
"event",
"[",
"'id'",
"]",
")",
"event",
"[",
"'acknowledgedFlag'",
"]",
"=",
"result",
"env",
".",
"fout",
"(",
"event_table",
"(",
"events",
")",
")"
] | 32.454545 | 14.636364 |
def serialize(self, user=None):
"""
Serializes message for given user.
Note:
Should be called before first save(). Otherwise "is_update" will get wrong value.
Args:
user: User object
Returns:
Dict. JSON serialization ready dictionary object
"""
return {
'content': self.body,
'type': self.typ,
'updated_at': self.updated_at,
'timestamp': self.updated_at,
'is_update': not hasattr(self, 'unsaved'),
'attachments': [attachment.serialize() for attachment in self.attachment_set],
'title': self.msg_title,
'url': self.url,
'sender_name': self.sender.full_name,
'sender_key': self.sender.key,
'channel_key': self.channel.key,
'cmd': 'message',
'avatar_url': self.sender.avatar,
'key': self.key,
} | [
"def",
"serialize",
"(",
"self",
",",
"user",
"=",
"None",
")",
":",
"return",
"{",
"'content'",
":",
"self",
".",
"body",
",",
"'type'",
":",
"self",
".",
"typ",
",",
"'updated_at'",
":",
"self",
".",
"updated_at",
",",
"'timestamp'",
":",
"self",
".",
"updated_at",
",",
"'is_update'",
":",
"not",
"hasattr",
"(",
"self",
",",
"'unsaved'",
")",
",",
"'attachments'",
":",
"[",
"attachment",
".",
"serialize",
"(",
")",
"for",
"attachment",
"in",
"self",
".",
"attachment_set",
"]",
",",
"'title'",
":",
"self",
".",
"msg_title",
",",
"'url'",
":",
"self",
".",
"url",
",",
"'sender_name'",
":",
"self",
".",
"sender",
".",
"full_name",
",",
"'sender_key'",
":",
"self",
".",
"sender",
".",
"key",
",",
"'channel_key'",
":",
"self",
".",
"channel",
".",
"key",
",",
"'cmd'",
":",
"'message'",
",",
"'avatar_url'",
":",
"self",
".",
"sender",
".",
"avatar",
",",
"'key'",
":",
"self",
".",
"key",
",",
"}"
] | 32.206897 | 16.965517 |
def upload_image(self,
image_file,
referer_url=None,
title=None,
desc=None,
created_at=None,
collection_id=None):
"""Upload an image
:param image_file: File-like object of an image file
:param referer_url: Referer site URL
:param title: Site title
:param desc: Comment
:param created_at: Image's created time in unix time
:param collection_id: Collection ID
"""
url = self.upload_url + '/api/upload'
data = {}
if referer_url is not None:
data['referer_url'] = referer_url
if title is not None:
data['title'] = title
if desc is not None:
data['desc'] = desc
if created_at is not None:
data['created_at'] = str(created_at)
if collection_id is not None:
data['collection_id'] = collection_id
files = {
'imagedata': image_file
}
response = self._request_url(
url, 'post', data=data, files=files, with_access_token=True)
headers, result = self._parse_and_check(response)
return Image.from_dict(result) | [
"def",
"upload_image",
"(",
"self",
",",
"image_file",
",",
"referer_url",
"=",
"None",
",",
"title",
"=",
"None",
",",
"desc",
"=",
"None",
",",
"created_at",
"=",
"None",
",",
"collection_id",
"=",
"None",
")",
":",
"url",
"=",
"self",
".",
"upload_url",
"+",
"'/api/upload'",
"data",
"=",
"{",
"}",
"if",
"referer_url",
"is",
"not",
"None",
":",
"data",
"[",
"'referer_url'",
"]",
"=",
"referer_url",
"if",
"title",
"is",
"not",
"None",
":",
"data",
"[",
"'title'",
"]",
"=",
"title",
"if",
"desc",
"is",
"not",
"None",
":",
"data",
"[",
"'desc'",
"]",
"=",
"desc",
"if",
"created_at",
"is",
"not",
"None",
":",
"data",
"[",
"'created_at'",
"]",
"=",
"str",
"(",
"created_at",
")",
"if",
"collection_id",
"is",
"not",
"None",
":",
"data",
"[",
"'collection_id'",
"]",
"=",
"collection_id",
"files",
"=",
"{",
"'imagedata'",
":",
"image_file",
"}",
"response",
"=",
"self",
".",
"_request_url",
"(",
"url",
",",
"'post'",
",",
"data",
"=",
"data",
",",
"files",
"=",
"files",
",",
"with_access_token",
"=",
"True",
")",
"headers",
",",
"result",
"=",
"self",
".",
"_parse_and_check",
"(",
"response",
")",
"return",
"Image",
".",
"from_dict",
"(",
"result",
")"
] | 35.228571 | 10.628571 |
def compound_statements(logical_line):
"""
Compound statements (multiple statements on the same line) are
generally discouraged.
"""
line = logical_line
found = line.find(':')
if -1 < found < len(line) - 1:
before = line[:found]
if (before.count('{') <= before.count('}') and # {'a': 1} (dict)
before.count('[') <= before.count(']') and # [1:2] (slice)
not re.search(r'\blambda\b', before)): # lambda x: x
return found, "E701 multiple statements on one line (colon)"
found = line.find(';')
if -1 < found:
return found, "E702 multiple statements on one line (semicolon)" | [
"def",
"compound_statements",
"(",
"logical_line",
")",
":",
"line",
"=",
"logical_line",
"found",
"=",
"line",
".",
"find",
"(",
"':'",
")",
"if",
"-",
"1",
"<",
"found",
"<",
"len",
"(",
"line",
")",
"-",
"1",
":",
"before",
"=",
"line",
"[",
":",
"found",
"]",
"if",
"(",
"before",
".",
"count",
"(",
"'{'",
")",
"<=",
"before",
".",
"count",
"(",
"'}'",
")",
"and",
"# {'a': 1} (dict)",
"before",
".",
"count",
"(",
"'['",
")",
"<=",
"before",
".",
"count",
"(",
"']'",
")",
"and",
"# [1:2] (slice)",
"not",
"re",
".",
"search",
"(",
"r'\\blambda\\b'",
",",
"before",
")",
")",
":",
"# lambda x: x",
"return",
"found",
",",
"\"E701 multiple statements on one line (colon)\"",
"found",
"=",
"line",
".",
"find",
"(",
"';'",
")",
"if",
"-",
"1",
"<",
"found",
":",
"return",
"found",
",",
"\"E702 multiple statements on one line (semicolon)\""
] | 40.875 | 17.5 |
def reset(self):
"""Reset coincidence counter"""
self.counts = defaultdict(partial(np.zeros, (465, self.tmax * 2 + 1)))
self.n_timeslices = defaultdict(int) | [
"def",
"reset",
"(",
"self",
")",
":",
"self",
".",
"counts",
"=",
"defaultdict",
"(",
"partial",
"(",
"np",
".",
"zeros",
",",
"(",
"465",
",",
"self",
".",
"tmax",
"*",
"2",
"+",
"1",
")",
")",
")",
"self",
".",
"n_timeslices",
"=",
"defaultdict",
"(",
"int",
")"
] | 44.25 | 16.5 |
def check_applied(result):
"""
Raises LWTException if it looks like a failed LWT request. A LWTException
won't be raised in the special case in which there are several failed LWT
in a :class:`~cqlengine.query.BatchQuery`.
"""
try:
applied = result.was_applied
except Exception:
applied = True # result was not LWT form
if not applied:
raise LWTException(result.one()) | [
"def",
"check_applied",
"(",
"result",
")",
":",
"try",
":",
"applied",
"=",
"result",
".",
"was_applied",
"except",
"Exception",
":",
"applied",
"=",
"True",
"# result was not LWT form",
"if",
"not",
"applied",
":",
"raise",
"LWTException",
"(",
"result",
".",
"one",
"(",
")",
")"
] | 34.5 | 15 |
def _available_formats_helper(count_flag, format_flag):
"""Helper for available_formats() and available_subtypes()."""
count = _ffi.new("int*")
_snd.sf_command(_ffi.NULL, count_flag, count, _ffi.sizeof("int"))
for format_int in range(count[0]):
yield _format_info(format_int, format_flag) | [
"def",
"_available_formats_helper",
"(",
"count_flag",
",",
"format_flag",
")",
":",
"count",
"=",
"_ffi",
".",
"new",
"(",
"\"int*\"",
")",
"_snd",
".",
"sf_command",
"(",
"_ffi",
".",
"NULL",
",",
"count_flag",
",",
"count",
",",
"_ffi",
".",
"sizeof",
"(",
"\"int\"",
")",
")",
"for",
"format_int",
"in",
"range",
"(",
"count",
"[",
"0",
"]",
")",
":",
"yield",
"_format_info",
"(",
"format_int",
",",
"format_flag",
")"
] | 51.166667 | 11.5 |
async def handle(self, record):
"""
Call the handlers for the specified record.
This method is used for unpickled records received from a socket, as
well as those created locally. Logger-level filtering is applied.
"""
if (not self.disabled) and self.filter(record):
await self.callHandlers(record) | [
"async",
"def",
"handle",
"(",
"self",
",",
"record",
")",
":",
"if",
"(",
"not",
"self",
".",
"disabled",
")",
"and",
"self",
".",
"filter",
"(",
"record",
")",
":",
"await",
"self",
".",
"callHandlers",
"(",
"record",
")"
] | 39 | 16.333333 |
def list_ingress_for_all_namespaces(self, **kwargs): # noqa: E501
"""list_ingress_for_all_namespaces # noqa: E501
list or watch objects of kind Ingress # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_ingress_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1beta1IngressList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_ingress_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_ingress_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
return data | [
"def",
"list_ingress_for_all_namespaces",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"list_ingress_for_all_namespaces_with_http_info",
"(",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"list_ingress_for_all_namespaces_with_http_info",
"(",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"return",
"data"
] | 164.137931 | 135.068966 |
def main(self, config_filename):
"""
The "main" of the wrapper generator. Returns 0 on success, 1 if one or more errors occurred.
:param str config_filename: The name of the configuration file.
:rtype: int
"""
self._read_configuration_file(config_filename)
if self._wrapper_class_name:
self._io.title('Wrapper')
self.__generate_wrapper_class()
else:
self._io.log_verbose('Wrapper not enabled')
return 0 | [
"def",
"main",
"(",
"self",
",",
"config_filename",
")",
":",
"self",
".",
"_read_configuration_file",
"(",
"config_filename",
")",
"if",
"self",
".",
"_wrapper_class_name",
":",
"self",
".",
"_io",
".",
"title",
"(",
"'Wrapper'",
")",
"self",
".",
"__generate_wrapper_class",
"(",
")",
"else",
":",
"self",
".",
"_io",
".",
"log_verbose",
"(",
"'Wrapper not enabled'",
")",
"return",
"0"
] | 27.666667 | 22.777778 |
def generator(self, Xgen, Xexc, Xgov, Vgen):
""" Generator model.
Based on Generator.m from MatDyn by Stijn Cole, developed at Katholieke
Universiteit Leuven. See U{http://www.esat.kuleuven.be/electa/teaching/
matdyn/} for more information.
"""
generators = self.dyn_generators
omegas = 2 * pi * self.freq
F = zeros(Xgen.shape)
typ1 = [g._i for g in generators if g.model == CLASSICAL]
typ2 = [g._i for g in generators if g.model == FOURTH_ORDER]
# Generator type 1: classical model
omega = Xgen[typ1, 1]
Pm0 = Xgov[typ1, 0]
H = array([g.h for g in generators])[typ1]
D = array([g.d for g in generators])[typ1]
Pe = Vgen[typ1, 2]
ddelta = omega = omegas
domega = pi * self.freq / H * (-D * (omega - omegas) + Pm0 - Pe)
dEq = zeros(len(typ1))
F[typ1, :] = c_[ddelta, domega, dEq]
# Generator type 2: 4th order model
omega = Xgen[typ2, 1]
Eq_tr = Xgen[typ2, 2]
Ed_tr = Xgen[typ2, 3]
H = array([g.h for g in generators])
D = array([g.d for g in generators])
xd = array([g.xd for g in generators])
xq = array([g.xq for g in generators])
xd_tr = array([g.xd_tr for g in generators])
xq_tr = array([g.xq_tr for g in generators])
Td0_tr = array([g.td for g in generators])
Tq0_tr = array([g.tq for g in generators])
Id = Vgen[typ2, 0]
Iq = Vgen[typ2, 1]
Pe = Vgen[typ2, 2]
Efd = Xexc[typ2, 0]
Pm = Xgov[typ2, 0]
ddelta = omega - omegas
domega = pi * self.freq / H * (-D * (omega - omegas) + Pm - Pe)
dEq = 1 / Td0_tr * (Efd - Eq_tr + (xd - xd_tr) * Id)
dEd = 1 / Tq0_tr * (-Ed_tr - (xq - xq_tr) * Iq)
F[typ2, :] = c_[ddelta, domega, dEq, dEd]
# Generator type 3:
# Generator type 4:
return F | [
"def",
"generator",
"(",
"self",
",",
"Xgen",
",",
"Xexc",
",",
"Xgov",
",",
"Vgen",
")",
":",
"generators",
"=",
"self",
".",
"dyn_generators",
"omegas",
"=",
"2",
"*",
"pi",
"*",
"self",
".",
"freq",
"F",
"=",
"zeros",
"(",
"Xgen",
".",
"shape",
")",
"typ1",
"=",
"[",
"g",
".",
"_i",
"for",
"g",
"in",
"generators",
"if",
"g",
".",
"model",
"==",
"CLASSICAL",
"]",
"typ2",
"=",
"[",
"g",
".",
"_i",
"for",
"g",
"in",
"generators",
"if",
"g",
".",
"model",
"==",
"FOURTH_ORDER",
"]",
"# Generator type 1: classical model",
"omega",
"=",
"Xgen",
"[",
"typ1",
",",
"1",
"]",
"Pm0",
"=",
"Xgov",
"[",
"typ1",
",",
"0",
"]",
"H",
"=",
"array",
"(",
"[",
"g",
".",
"h",
"for",
"g",
"in",
"generators",
"]",
")",
"[",
"typ1",
"]",
"D",
"=",
"array",
"(",
"[",
"g",
".",
"d",
"for",
"g",
"in",
"generators",
"]",
")",
"[",
"typ1",
"]",
"Pe",
"=",
"Vgen",
"[",
"typ1",
",",
"2",
"]",
"ddelta",
"=",
"omega",
"=",
"omegas",
"domega",
"=",
"pi",
"*",
"self",
".",
"freq",
"/",
"H",
"*",
"(",
"-",
"D",
"*",
"(",
"omega",
"-",
"omegas",
")",
"+",
"Pm0",
"-",
"Pe",
")",
"dEq",
"=",
"zeros",
"(",
"len",
"(",
"typ1",
")",
")",
"F",
"[",
"typ1",
",",
":",
"]",
"=",
"c_",
"[",
"ddelta",
",",
"domega",
",",
"dEq",
"]",
"# Generator type 2: 4th order model",
"omega",
"=",
"Xgen",
"[",
"typ2",
",",
"1",
"]",
"Eq_tr",
"=",
"Xgen",
"[",
"typ2",
",",
"2",
"]",
"Ed_tr",
"=",
"Xgen",
"[",
"typ2",
",",
"3",
"]",
"H",
"=",
"array",
"(",
"[",
"g",
".",
"h",
"for",
"g",
"in",
"generators",
"]",
")",
"D",
"=",
"array",
"(",
"[",
"g",
".",
"d",
"for",
"g",
"in",
"generators",
"]",
")",
"xd",
"=",
"array",
"(",
"[",
"g",
".",
"xd",
"for",
"g",
"in",
"generators",
"]",
")",
"xq",
"=",
"array",
"(",
"[",
"g",
".",
"xq",
"for",
"g",
"in",
"generators",
"]",
")",
"xd_tr",
"=",
"array",
"(",
"[",
"g",
".",
"xd_tr",
"for",
"g",
"in",
"generators",
"]",
")",
"xq_tr",
"=",
"array",
"(",
"[",
"g",
".",
"xq_tr",
"for",
"g",
"in",
"generators",
"]",
")",
"Td0_tr",
"=",
"array",
"(",
"[",
"g",
".",
"td",
"for",
"g",
"in",
"generators",
"]",
")",
"Tq0_tr",
"=",
"array",
"(",
"[",
"g",
".",
"tq",
"for",
"g",
"in",
"generators",
"]",
")",
"Id",
"=",
"Vgen",
"[",
"typ2",
",",
"0",
"]",
"Iq",
"=",
"Vgen",
"[",
"typ2",
",",
"1",
"]",
"Pe",
"=",
"Vgen",
"[",
"typ2",
",",
"2",
"]",
"Efd",
"=",
"Xexc",
"[",
"typ2",
",",
"0",
"]",
"Pm",
"=",
"Xgov",
"[",
"typ2",
",",
"0",
"]",
"ddelta",
"=",
"omega",
"-",
"omegas",
"domega",
"=",
"pi",
"*",
"self",
".",
"freq",
"/",
"H",
"*",
"(",
"-",
"D",
"*",
"(",
"omega",
"-",
"omegas",
")",
"+",
"Pm",
"-",
"Pe",
")",
"dEq",
"=",
"1",
"/",
"Td0_tr",
"*",
"(",
"Efd",
"-",
"Eq_tr",
"+",
"(",
"xd",
"-",
"xd_tr",
")",
"*",
"Id",
")",
"dEd",
"=",
"1",
"/",
"Tq0_tr",
"*",
"(",
"-",
"Ed_tr",
"-",
"(",
"xq",
"-",
"xq_tr",
")",
"*",
"Iq",
")",
"F",
"[",
"typ2",
",",
":",
"]",
"=",
"c_",
"[",
"ddelta",
",",
"domega",
",",
"dEq",
",",
"dEd",
"]",
"# Generator type 3:",
"# Generator type 4:",
"return",
"F"
] | 30.142857 | 19.873016 |
def report(function, *args, **kwds):
"""Run a function, catch, report and discard exceptions"""
try:
function(*args, **kwds)
except Exception:
traceback.print_exc() | [
"def",
"report",
"(",
"function",
",",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
":",
"try",
":",
"function",
"(",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
"except",
"Exception",
":",
"traceback",
".",
"print_exc",
"(",
")"
] | 31.166667 | 12.5 |
def _get_cells_headers_ids(self, hed, index):
"""
Returns a list with ids of rows of same column.
:param hed: The list that represents the table header.
:type hed: list(list(hatemile.util.html.htmldomelement.HTMLDOMElement))
:param index: The index of columns.
:type index: int
:return: The list with ids of rows of same column.
:rtype: list(str)
"""
# pylint: disable=no-self-use
ids = []
for row in hed:
if row[index].get_tag_name() == 'TH':
ids.append(row[index].get_attribute('id'))
return ids | [
"def",
"_get_cells_headers_ids",
"(",
"self",
",",
"hed",
",",
"index",
")",
":",
"# pylint: disable=no-self-use",
"ids",
"=",
"[",
"]",
"for",
"row",
"in",
"hed",
":",
"if",
"row",
"[",
"index",
"]",
".",
"get_tag_name",
"(",
")",
"==",
"'TH'",
":",
"ids",
".",
"append",
"(",
"row",
"[",
"index",
"]",
".",
"get_attribute",
"(",
"'id'",
")",
")",
"return",
"ids"
] | 34.111111 | 17 |
def trainable_params_(m):
""" Returns a list of trainable parameters in the model m. (i.e., those that require gradients.) """
if isinstance(m, collections.Sequence):
return [p for p in m if p.requires_grad]
else:
return [p for p in m.parameters() if p.requires_grad] | [
"def",
"trainable_params_",
"(",
"m",
")",
":",
"if",
"isinstance",
"(",
"m",
",",
"collections",
".",
"Sequence",
")",
":",
"return",
"[",
"p",
"for",
"p",
"in",
"m",
"if",
"p",
".",
"requires_grad",
"]",
"else",
":",
"return",
"[",
"p",
"for",
"p",
"in",
"m",
".",
"parameters",
"(",
")",
"if",
"p",
".",
"requires_grad",
"]"
] | 48.333333 | 13 |
def parse_line(self, line):
"""Parse a line of input.
The input line is tokenized using the same rules as the way bash shell
tokenizes inputs. All quoting and escaping rules from the bash shell
apply here too.
The following cases are handled by __exec_line__():
1. Empty line.
2. The input line is completely made of whitespace characters.
3. The input line is the EOF character.
4. The first token, as tokenized by shlex.split(), is '!'.
5. Internal commands, i.e., commands registered with internal =
True
Arguments:
The line to parse.
Returns:
A tuple (cmd, args). The first element cmd must be a python3 string.
The second element is, by default, a list of strings representing
the arguments, as tokenized by shlex.split().
How to overload parse_line():
1. The signature of the method must be the same.
2. The return value must be a tuple (cmd, args), where the cmd is
a string representing the first token, and args is a list of
strings.
"""
toks = shlex.split(line)
# Safe to index the 0-th element because this line would have been
# parsed by __exec_line__ if toks is an empty list.
return ( toks[0], [] if len(toks) == 1 else toks[1:] ) | [
"def",
"parse_line",
"(",
"self",
",",
"line",
")",
":",
"toks",
"=",
"shlex",
".",
"split",
"(",
"line",
")",
"# Safe to index the 0-th element because this line would have been",
"# parsed by __exec_line__ if toks is an empty list.",
"return",
"(",
"toks",
"[",
"0",
"]",
",",
"[",
"]",
"if",
"len",
"(",
"toks",
")",
"==",
"1",
"else",
"toks",
"[",
"1",
":",
"]",
")"
] | 42.818182 | 24.636364 |
def to_dict(self, remove_nones=False):
"""
Creates a dictionary representation of the enclave.
:param remove_nones: Whether ``None`` values should be filtered out of the dictionary. Defaults to ``False``.
:return: A dictionary representation of the EnclavePermissions object.
"""
d = super().to_dict(remove_nones=remove_nones)
d.update({
'read': self.read,
'create': self.create,
'update': self.update
})
return d | [
"def",
"to_dict",
"(",
"self",
",",
"remove_nones",
"=",
"False",
")",
":",
"d",
"=",
"super",
"(",
")",
".",
"to_dict",
"(",
"remove_nones",
"=",
"remove_nones",
")",
"d",
".",
"update",
"(",
"{",
"'read'",
":",
"self",
".",
"read",
",",
"'create'",
":",
"self",
".",
"create",
",",
"'update'",
":",
"self",
".",
"update",
"}",
")",
"return",
"d"
] | 30 | 24.117647 |
def sde(self):
"""
Return the state space representation of the covariance.
"""
variance = float(self.variance.values)
lengthscale = float(self.lengthscale.values)
lamda = np.sqrt(5.0)/lengthscale
kappa = 5.0/3.0*variance/lengthscale**2
F = np.array(((0, 1,0), (0, 0, 1), (-lamda**3, -3.0*lamda**2, -3*lamda)))
L = np.array(((0,),(0,),(1,)))
Qc = np.array((((variance*400.0*np.sqrt(5.0)/3.0/lengthscale**5),),))
H = np.array(((1,0,0),))
Pinf = np.array(((variance,0,-kappa), (0, kappa, 0), (-kappa, 0, 25.0*variance/lengthscale**4)))
P0 = Pinf.copy()
# Allocate space for the derivatives
dF = np.empty((3,3,2))
dQc = np.empty((1,1,2))
dPinf = np.empty((3,3,2))
# The partial derivatives
dFvariance = np.zeros((3,3))
dFlengthscale = np.array(((0,0,0),(0,0,0),(15.0*np.sqrt(5.0)/lengthscale**4,
30.0/lengthscale**3, 3*np.sqrt(5.0)/lengthscale**2)))
dQcvariance = np.array((((400*np.sqrt(5)/3/lengthscale**5,),)))
dQclengthscale = np.array((((-variance*2000*np.sqrt(5)/3/lengthscale**6,),)))
dPinf_variance = Pinf/variance
kappa2 = -2.0*kappa/lengthscale
dPinf_lengthscale = np.array(((0,0,-kappa2),(0,kappa2,0),(-kappa2,
0,-100*variance/lengthscale**5)))
# Combine the derivatives
dF[:,:,0] = dFvariance
dF[:,:,1] = dFlengthscale
dQc[:,:,0] = dQcvariance
dQc[:,:,1] = dQclengthscale
dPinf[:,:,0] = dPinf_variance
dPinf[:,:,1] = dPinf_lengthscale
dP0 = dPinf.copy()
return (F, L, Qc, H, Pinf, P0, dF, dQc, dPinf, dP0) | [
"def",
"sde",
"(",
"self",
")",
":",
"variance",
"=",
"float",
"(",
"self",
".",
"variance",
".",
"values",
")",
"lengthscale",
"=",
"float",
"(",
"self",
".",
"lengthscale",
".",
"values",
")",
"lamda",
"=",
"np",
".",
"sqrt",
"(",
"5.0",
")",
"/",
"lengthscale",
"kappa",
"=",
"5.0",
"/",
"3.0",
"*",
"variance",
"/",
"lengthscale",
"**",
"2",
"F",
"=",
"np",
".",
"array",
"(",
"(",
"(",
"0",
",",
"1",
",",
"0",
")",
",",
"(",
"0",
",",
"0",
",",
"1",
")",
",",
"(",
"-",
"lamda",
"**",
"3",
",",
"-",
"3.0",
"*",
"lamda",
"**",
"2",
",",
"-",
"3",
"*",
"lamda",
")",
")",
")",
"L",
"=",
"np",
".",
"array",
"(",
"(",
"(",
"0",
",",
")",
",",
"(",
"0",
",",
")",
",",
"(",
"1",
",",
")",
")",
")",
"Qc",
"=",
"np",
".",
"array",
"(",
"(",
"(",
"(",
"variance",
"*",
"400.0",
"*",
"np",
".",
"sqrt",
"(",
"5.0",
")",
"/",
"3.0",
"/",
"lengthscale",
"**",
"5",
")",
",",
")",
",",
")",
")",
"H",
"=",
"np",
".",
"array",
"(",
"(",
"(",
"1",
",",
"0",
",",
"0",
")",
",",
")",
")",
"Pinf",
"=",
"np",
".",
"array",
"(",
"(",
"(",
"variance",
",",
"0",
",",
"-",
"kappa",
")",
",",
"(",
"0",
",",
"kappa",
",",
"0",
")",
",",
"(",
"-",
"kappa",
",",
"0",
",",
"25.0",
"*",
"variance",
"/",
"lengthscale",
"**",
"4",
")",
")",
")",
"P0",
"=",
"Pinf",
".",
"copy",
"(",
")",
"# Allocate space for the derivatives ",
"dF",
"=",
"np",
".",
"empty",
"(",
"(",
"3",
",",
"3",
",",
"2",
")",
")",
"dQc",
"=",
"np",
".",
"empty",
"(",
"(",
"1",
",",
"1",
",",
"2",
")",
")",
"dPinf",
"=",
"np",
".",
"empty",
"(",
"(",
"3",
",",
"3",
",",
"2",
")",
")",
"# The partial derivatives ",
"dFvariance",
"=",
"np",
".",
"zeros",
"(",
"(",
"3",
",",
"3",
")",
")",
"dFlengthscale",
"=",
"np",
".",
"array",
"(",
"(",
"(",
"0",
",",
"0",
",",
"0",
")",
",",
"(",
"0",
",",
"0",
",",
"0",
")",
",",
"(",
"15.0",
"*",
"np",
".",
"sqrt",
"(",
"5.0",
")",
"/",
"lengthscale",
"**",
"4",
",",
"30.0",
"/",
"lengthscale",
"**",
"3",
",",
"3",
"*",
"np",
".",
"sqrt",
"(",
"5.0",
")",
"/",
"lengthscale",
"**",
"2",
")",
")",
")",
"dQcvariance",
"=",
"np",
".",
"array",
"(",
"(",
"(",
"(",
"400",
"*",
"np",
".",
"sqrt",
"(",
"5",
")",
"/",
"3",
"/",
"lengthscale",
"**",
"5",
",",
")",
",",
")",
")",
")",
"dQclengthscale",
"=",
"np",
".",
"array",
"(",
"(",
"(",
"(",
"-",
"variance",
"*",
"2000",
"*",
"np",
".",
"sqrt",
"(",
"5",
")",
"/",
"3",
"/",
"lengthscale",
"**",
"6",
",",
")",
",",
")",
")",
")",
"dPinf_variance",
"=",
"Pinf",
"/",
"variance",
"kappa2",
"=",
"-",
"2.0",
"*",
"kappa",
"/",
"lengthscale",
"dPinf_lengthscale",
"=",
"np",
".",
"array",
"(",
"(",
"(",
"0",
",",
"0",
",",
"-",
"kappa2",
")",
",",
"(",
"0",
",",
"kappa2",
",",
"0",
")",
",",
"(",
"-",
"kappa2",
",",
"0",
",",
"-",
"100",
"*",
"variance",
"/",
"lengthscale",
"**",
"5",
")",
")",
")",
"# Combine the derivatives ",
"dF",
"[",
":",
",",
":",
",",
"0",
"]",
"=",
"dFvariance",
"dF",
"[",
":",
",",
":",
",",
"1",
"]",
"=",
"dFlengthscale",
"dQc",
"[",
":",
",",
":",
",",
"0",
"]",
"=",
"dQcvariance",
"dQc",
"[",
":",
",",
":",
",",
"1",
"]",
"=",
"dQclengthscale",
"dPinf",
"[",
":",
",",
":",
",",
"0",
"]",
"=",
"dPinf_variance",
"dPinf",
"[",
":",
",",
":",
",",
"1",
"]",
"=",
"dPinf_lengthscale",
"dP0",
"=",
"dPinf",
".",
"copy",
"(",
")",
"return",
"(",
"F",
",",
"L",
",",
"Qc",
",",
"H",
",",
"Pinf",
",",
"P0",
",",
"dF",
",",
"dQc",
",",
"dPinf",
",",
"dP0",
")"
] | 42.272727 | 18.545455 |
def _CreateTaskStorageWriter(self, path, task):
"""Creates a task storage writer.
Args:
path (str): path to the storage file.
task (Task): task.
Returns:
SQLiteStorageFileWriter: storage writer.
"""
return SQLiteStorageFileWriter(
self._session, path,
storage_type=definitions.STORAGE_TYPE_TASK, task=task) | [
"def",
"_CreateTaskStorageWriter",
"(",
"self",
",",
"path",
",",
"task",
")",
":",
"return",
"SQLiteStorageFileWriter",
"(",
"self",
".",
"_session",
",",
"path",
",",
"storage_type",
"=",
"definitions",
".",
"STORAGE_TYPE_TASK",
",",
"task",
"=",
"task",
")"
] | 26.923077 | 16.153846 |
def polyfit2d(x, y, z, order=3 #bounds=None
):
'''
fit unstructured data
'''
ncols = (order + 1)**2
G = np.zeros((x.size, ncols))
ij = itertools.product(list(range(order+1)), list(range(order+1)))
for k, (i,j) in enumerate(ij):
G[:,k] = x**i * y**j
m = np.linalg.lstsq(G, z)[0]
return m | [
"def",
"polyfit2d",
"(",
"x",
",",
"y",
",",
"z",
",",
"order",
"=",
"3",
"#bounds=None\r",
")",
":",
"ncols",
"=",
"(",
"order",
"+",
"1",
")",
"**",
"2",
"G",
"=",
"np",
".",
"zeros",
"(",
"(",
"x",
".",
"size",
",",
"ncols",
")",
")",
"ij",
"=",
"itertools",
".",
"product",
"(",
"list",
"(",
"range",
"(",
"order",
"+",
"1",
")",
")",
",",
"list",
"(",
"range",
"(",
"order",
"+",
"1",
")",
")",
")",
"for",
"k",
",",
"(",
"i",
",",
"j",
")",
"in",
"enumerate",
"(",
"ij",
")",
":",
"G",
"[",
":",
",",
"k",
"]",
"=",
"x",
"**",
"i",
"*",
"y",
"**",
"j",
"m",
"=",
"np",
".",
"linalg",
".",
"lstsq",
"(",
"G",
",",
"z",
")",
"[",
"0",
"]",
"return",
"m"
] | 28.75 | 17.083333 |
def solution(self, x0, y0):
""" Create a solution function ``y(x)`` such that ``y(x0) = y0``.
A list of solution values ``[y(x0), y(x1) ...]`` is returned if the
function is called with a list ``[x0, x1 ...]`` of ``x`` values.
"""
def soln(x):
if numpy.size(x) > 1:
x = [soln.x] + list(x)
ans = self(soln.y, interval=x)
soln.x = x[-1]
soln.y = ans[-1]
return ans
else:
soln.y = self(soln.y, interval=(soln.x, x))
soln.x = x
return soln.y
soln.x = x0
soln.y = y0
return soln | [
"def",
"solution",
"(",
"self",
",",
"x0",
",",
"y0",
")",
":",
"def",
"soln",
"(",
"x",
")",
":",
"if",
"numpy",
".",
"size",
"(",
"x",
")",
">",
"1",
":",
"x",
"=",
"[",
"soln",
".",
"x",
"]",
"+",
"list",
"(",
"x",
")",
"ans",
"=",
"self",
"(",
"soln",
".",
"y",
",",
"interval",
"=",
"x",
")",
"soln",
".",
"x",
"=",
"x",
"[",
"-",
"1",
"]",
"soln",
".",
"y",
"=",
"ans",
"[",
"-",
"1",
"]",
"return",
"ans",
"else",
":",
"soln",
".",
"y",
"=",
"self",
"(",
"soln",
".",
"y",
",",
"interval",
"=",
"(",
"soln",
".",
"x",
",",
"x",
")",
")",
"soln",
".",
"x",
"=",
"x",
"return",
"soln",
".",
"y",
"soln",
".",
"x",
"=",
"x0",
"soln",
".",
"y",
"=",
"y0",
"return",
"soln"
] | 33.55 | 15.85 |
def fillkeys(Recs):
"""
reconciles keys of dictionaries within Recs.
"""
keylist, OutRecs = [], []
for rec in Recs:
for key in list(rec.keys()):
if key not in keylist:
keylist.append(key)
for rec in Recs:
for key in keylist:
if key not in list(rec.keys()):
rec[key] = ""
OutRecs.append(rec)
return OutRecs, keylist | [
"def",
"fillkeys",
"(",
"Recs",
")",
":",
"keylist",
",",
"OutRecs",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"rec",
"in",
"Recs",
":",
"for",
"key",
"in",
"list",
"(",
"rec",
".",
"keys",
"(",
")",
")",
":",
"if",
"key",
"not",
"in",
"keylist",
":",
"keylist",
".",
"append",
"(",
"key",
")",
"for",
"rec",
"in",
"Recs",
":",
"for",
"key",
"in",
"keylist",
":",
"if",
"key",
"not",
"in",
"list",
"(",
"rec",
".",
"keys",
"(",
")",
")",
":",
"rec",
"[",
"key",
"]",
"=",
"\"\"",
"OutRecs",
".",
"append",
"(",
"rec",
")",
"return",
"OutRecs",
",",
"keylist"
] | 27.2 | 9.866667 |
def create_script_fact(self):
"""
appends the CREATE TABLE, index etc to self.ddl_text
"""
self.ddl_text += '---------------------------------------------\n'
self.ddl_text += '-- CREATE Fact Table - ' + self.fact_table + '\n'
self.ddl_text += '---------------------------------------------\n'
self.ddl_text += 'DROP TABLE ' + self.fact_table + ' CASCADE CONSTRAINTS;\n'
self.ddl_text += 'CREATE TABLE ' + self.fact_table + ' (\n'
self.ddl_text += ' '.join([col + ' VARCHAR2(200), \n' for col in self.col_list])
self.ddl_text += ' ' + self.date_updated_col + ' DATE \n' # + src_table + '; \n'
self.ddl_text += ');\n' | [
"def",
"create_script_fact",
"(",
"self",
")",
":",
"self",
".",
"ddl_text",
"+=",
"'---------------------------------------------\\n'",
"self",
".",
"ddl_text",
"+=",
"'-- CREATE Fact Table - '",
"+",
"self",
".",
"fact_table",
"+",
"'\\n'",
"self",
".",
"ddl_text",
"+=",
"'---------------------------------------------\\n'",
"self",
".",
"ddl_text",
"+=",
"'DROP TABLE '",
"+",
"self",
".",
"fact_table",
"+",
"' CASCADE CONSTRAINTS;\\n'",
"self",
".",
"ddl_text",
"+=",
"'CREATE TABLE '",
"+",
"self",
".",
"fact_table",
"+",
"' (\\n'",
"self",
".",
"ddl_text",
"+=",
"' '",
".",
"join",
"(",
"[",
"col",
"+",
"' VARCHAR2(200), \\n'",
"for",
"col",
"in",
"self",
".",
"col_list",
"]",
")",
"self",
".",
"ddl_text",
"+=",
"' '",
"+",
"self",
".",
"date_updated_col",
"+",
"' DATE \\n'",
"# + src_table + '; \\n'",
"self",
".",
"ddl_text",
"+=",
"');\\n'"
] | 57.666667 | 25.833333 |
def extrair_logs(self):
"""Sobrepõe :meth:`~satcfe.base.FuncoesSAT.extrair_logs`.
:return: Uma resposta SAT especializada em ``ExtrairLogs``.
:rtype: satcfe.resposta.extrairlogs.RespostaExtrairLogs
"""
resp = self._http_post('extrairlogs')
conteudo = resp.json()
return RespostaExtrairLogs.analisar(conteudo.get('retorno')) | [
"def",
"extrair_logs",
"(",
"self",
")",
":",
"resp",
"=",
"self",
".",
"_http_post",
"(",
"'extrairlogs'",
")",
"conteudo",
"=",
"resp",
".",
"json",
"(",
")",
"return",
"RespostaExtrairLogs",
".",
"analisar",
"(",
"conteudo",
".",
"get",
"(",
"'retorno'",
")",
")"
] | 41.333333 | 16.666667 |
def get_temperature(self):
"""
Returns the current environment temperature.
Attention: Returns None if the value can't be queried or is unknown.
"""
#raise NotImplementedError("This should work according to the AVM docs, but don't...")
value = self.box.homeautoswitch("gettemperature", self.actor_id)
if value.isdigit():
self.temperature = float(value)/10
else:
self.temperature = None
return self.temperature | [
"def",
"get_temperature",
"(",
"self",
")",
":",
"#raise NotImplementedError(\"This should work according to the AVM docs, but don't...\")",
"value",
"=",
"self",
".",
"box",
".",
"homeautoswitch",
"(",
"\"gettemperature\"",
",",
"self",
".",
"actor_id",
")",
"if",
"value",
".",
"isdigit",
"(",
")",
":",
"self",
".",
"temperature",
"=",
"float",
"(",
"value",
")",
"/",
"10",
"else",
":",
"self",
".",
"temperature",
"=",
"None",
"return",
"self",
".",
"temperature"
] | 41.166667 | 17.333333 |
def install_documentation(path="./Litho1pt0-Notebooks"):
"""Install the example notebooks for litho1pt0 in the given location
WARNING: If the path exists, the Notebook files will be written into the path
and will overwrite any existing files with which they collide. The default
path ("./Litho1pt0-Notebooks") is chosen to make collision less likely / problematic
The documentation for litho1pt0 is in the form of jupyter notebooks.
Some dependencies exist for the notebooks to be useful:
- matplotlib: for some diagrams
- cartopy: for plotting map examples
litho1pt0 dependencies are explicitly imported into the notebooks including:
- stripy (for interpolating on the sphere)
- numpy
- scipy (for k-d tree point location)
"""
## Question - overwrite or not ? shutils fails if directory exists.
Notebooks_Path = _pkg_resources.resource_filename('litho1pt0', 'Notebooks')
ct = _dir_util.copy_tree(Notebooks_Path, path, preserve_mode=1, preserve_times=1, preserve_symlinks=1, update=0, verbose=1, dry_run=0)
return | [
"def",
"install_documentation",
"(",
"path",
"=",
"\"./Litho1pt0-Notebooks\"",
")",
":",
"## Question - overwrite or not ? shutils fails if directory exists.",
"Notebooks_Path",
"=",
"_pkg_resources",
".",
"resource_filename",
"(",
"'litho1pt0'",
",",
"'Notebooks'",
")",
"ct",
"=",
"_dir_util",
".",
"copy_tree",
"(",
"Notebooks_Path",
",",
"path",
",",
"preserve_mode",
"=",
"1",
",",
"preserve_times",
"=",
"1",
",",
"preserve_symlinks",
"=",
"1",
",",
"update",
"=",
"0",
",",
"verbose",
"=",
"1",
",",
"dry_run",
"=",
"0",
")",
"return"
] | 37.206897 | 31.586207 |
def __get_gui_handle(self, root_dir):
""" get the filepath and filehandle to the .env file for the environment """
gui_path = os.path.join(root_dir, '.gui')
fh = open(gui_path, "w+")
return (gui_path, fh) | [
"def",
"__get_gui_handle",
"(",
"self",
",",
"root_dir",
")",
":",
"gui_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"'.gui'",
")",
"fh",
"=",
"open",
"(",
"gui_path",
",",
"\"w+\"",
")",
"return",
"(",
"gui_path",
",",
"fh",
")"
] | 46.4 | 6 |
def verify(self, obj):
"""Verify that the object conforms to this verifier's schema.
Args:
obj (object): A python object to verify
Raises:
ValidationError: If there is a problem verifying the object, a
ValidationError is thrown with at least the reason key set indicating
the reason for the lack of validation.
"""
if obj not in self.options:
raise ValidationError("Object is not in list of enumerated options",
reason='not in list of enumerated options', object=obj, options=self.options)
return obj | [
"def",
"verify",
"(",
"self",
",",
"obj",
")",
":",
"if",
"obj",
"not",
"in",
"self",
".",
"options",
":",
"raise",
"ValidationError",
"(",
"\"Object is not in list of enumerated options\"",
",",
"reason",
"=",
"'not in list of enumerated options'",
",",
"object",
"=",
"obj",
",",
"options",
"=",
"self",
".",
"options",
")",
"return",
"obj"
] | 37.529412 | 27.764706 |
def getbyuuid(self, uuid):
"""Get a schema by given uuid.
:param str uuid: schema uuid to retrieve.
:rtype: Schema
:raises: KeyError if uuid is not registered already.
"""
if uuid not in self._schbyuuid:
raise KeyError('uuid {0} not registered'.format(uuid))
return self._schbyuuid[uuid] | [
"def",
"getbyuuid",
"(",
"self",
",",
"uuid",
")",
":",
"if",
"uuid",
"not",
"in",
"self",
".",
"_schbyuuid",
":",
"raise",
"KeyError",
"(",
"'uuid {0} not registered'",
".",
"format",
"(",
"uuid",
")",
")",
"return",
"self",
".",
"_schbyuuid",
"[",
"uuid",
"]"
] | 31.545455 | 15.636364 |
def get_layer_by_name(self, name):
"""Return a layer by name
:param name: Name of layer. Case-sensitive.
:rtype: Layer object if found, otherwise ValueError
"""
try:
return self.layernames[name]
except KeyError:
msg = 'Layer "{0}" not found.'
logger.debug(msg.format(name))
raise ValueError | [
"def",
"get_layer_by_name",
"(",
"self",
",",
"name",
")",
":",
"try",
":",
"return",
"self",
".",
"layernames",
"[",
"name",
"]",
"except",
"KeyError",
":",
"msg",
"=",
"'Layer \"{0}\" not found.'",
"logger",
".",
"debug",
"(",
"msg",
".",
"format",
"(",
"name",
")",
")",
"raise",
"ValueError"
] | 31.416667 | 11.416667 |
def get_service_framework_id(
service_name,
inactive=False,
completed=False
):
""" Get the framework ID for a service
:param service_name: the service name
:type service_name: str
:param inactive: whether to include inactive services
:type inactive: bool
:param completed: whether to include completed services
:type completed: bool
:return: a framework id
:rtype: str, or None
"""
service = get_service(service_name, inactive, completed)
if service is not None and service['id']:
return service['id']
return None | [
"def",
"get_service_framework_id",
"(",
"service_name",
",",
"inactive",
"=",
"False",
",",
"completed",
"=",
"False",
")",
":",
"service",
"=",
"get_service",
"(",
"service_name",
",",
"inactive",
",",
"completed",
")",
"if",
"service",
"is",
"not",
"None",
"and",
"service",
"[",
"'id'",
"]",
":",
"return",
"service",
"[",
"'id'",
"]",
"return",
"None"
] | 26.565217 | 18.521739 |
def getCallSetByName(self, name):
"""
Returns a CallSet with the specified name, or raises a
CallSetNameNotFoundException if it does not exist.
"""
if name not in self._callSetNameMap:
raise exceptions.CallSetNameNotFoundException(name)
return self._callSetNameMap[name] | [
"def",
"getCallSetByName",
"(",
"self",
",",
"name",
")",
":",
"if",
"name",
"not",
"in",
"self",
".",
"_callSetNameMap",
":",
"raise",
"exceptions",
".",
"CallSetNameNotFoundException",
"(",
"name",
")",
"return",
"self",
".",
"_callSetNameMap",
"[",
"name",
"]"
] | 40.375 | 9.375 |
def find_channel_groups(chan):
"""Channels are often organized in groups (different grids / strips or
channels in different brain locations), so we use a simple heuristic to
get these channel groups.
Parameters
----------
chan : instance of Channels
channels to group
Returns
-------
groups : dict
channel groups: key is the common string, and the item is a list of
labels
"""
labels = chan.return_label()
group_names = {match('([A-Za-z ]+)\d+', label).group(1) for label in labels}
groups = {}
for group_name in group_names:
groups[group_name] = [label for label in labels if label.startswith(group_name)]
return groups | [
"def",
"find_channel_groups",
"(",
"chan",
")",
":",
"labels",
"=",
"chan",
".",
"return_label",
"(",
")",
"group_names",
"=",
"{",
"match",
"(",
"'([A-Za-z ]+)\\d+'",
",",
"label",
")",
".",
"group",
"(",
"1",
")",
"for",
"label",
"in",
"labels",
"}",
"groups",
"=",
"{",
"}",
"for",
"group_name",
"in",
"group_names",
":",
"groups",
"[",
"group_name",
"]",
"=",
"[",
"label",
"for",
"label",
"in",
"labels",
"if",
"label",
".",
"startswith",
"(",
"group_name",
")",
"]",
"return",
"groups"
] | 28.875 | 24.333333 |
def fromxml(node):
"""Static method returning an MetaField instance (any subclass of AbstractMetaField) from the given XML description. Node can be a string or an etree._Element."""
if not isinstance(node,ElementTree._Element): #pylint: disable=protected-access
node = parsexmlstring(node)
if node.tag.lower() != 'meta':
raise Exception("Expected meta tag but got '" + node.tag + "' instead")
key = node.attrib['id']
if node.text:
value = node.text
else:
value = None
operator = 'set'
if 'operator' in node.attrib:
operator= node.attrib['operator']
if operator == 'set':
cls = SetMetaField
elif operator == 'unset':
cls = UnsetMetaField
elif operator == 'copy':
cls = CopyMetaField
elif operator == 'parameter':
cls = ParameterMetaField
return cls(key, value) | [
"def",
"fromxml",
"(",
"node",
")",
":",
"if",
"not",
"isinstance",
"(",
"node",
",",
"ElementTree",
".",
"_Element",
")",
":",
"#pylint: disable=protected-access",
"node",
"=",
"parsexmlstring",
"(",
"node",
")",
"if",
"node",
".",
"tag",
".",
"lower",
"(",
")",
"!=",
"'meta'",
":",
"raise",
"Exception",
"(",
"\"Expected meta tag but got '\"",
"+",
"node",
".",
"tag",
"+",
"\"' instead\"",
")",
"key",
"=",
"node",
".",
"attrib",
"[",
"'id'",
"]",
"if",
"node",
".",
"text",
":",
"value",
"=",
"node",
".",
"text",
"else",
":",
"value",
"=",
"None",
"operator",
"=",
"'set'",
"if",
"'operator'",
"in",
"node",
".",
"attrib",
":",
"operator",
"=",
"node",
".",
"attrib",
"[",
"'operator'",
"]",
"if",
"operator",
"==",
"'set'",
":",
"cls",
"=",
"SetMetaField",
"elif",
"operator",
"==",
"'unset'",
":",
"cls",
"=",
"UnsetMetaField",
"elif",
"operator",
"==",
"'copy'",
":",
"cls",
"=",
"CopyMetaField",
"elif",
"operator",
"==",
"'parameter'",
":",
"cls",
"=",
"ParameterMetaField",
"return",
"cls",
"(",
"key",
",",
"value",
")"
] | 39.541667 | 13.791667 |
def list_pkgs(versions_as_list=False,
jail=None,
chroot=None,
root=None,
with_origin=False,
**kwargs):
'''
List the packages currently installed as a dict::
{'<package_name>': '<version>'}
jail
List the packages in the specified jail
chroot
List the packages in the specified chroot (ignored if ``jail`` is
specified)
root
List the packages in the specified root (ignored if ``jail`` is
specified)
with_origin : False
Return a nested dictionary containing both the origin name and version
for each installed package.
.. versionadded:: 2014.1.0
CLI Example:
.. code-block:: bash
salt '*' pkg.list_pkgs
salt '*' pkg.list_pkgs jail=<jail name or id>
salt '*' pkg.list_pkgs chroot=/path/to/chroot
'''
# not yet implemented or not applicable
if any([salt.utils.data.is_true(kwargs.get(x))
for x in ('removed', 'purge_desired')]):
return {}
versions_as_list = salt.utils.data.is_true(versions_as_list)
contextkey_pkg = _contextkey(jail, chroot, root)
contextkey_origins = _contextkey(jail, chroot, root, prefix='pkg.origin')
if contextkey_pkg in __context__:
ret = copy.deepcopy(__context__[contextkey_pkg])
if not versions_as_list:
__salt__['pkg_resource.stringify'](ret)
if salt.utils.data.is_true(with_origin):
origins = __context__.get(contextkey_origins, {})
return dict([
(x, {'origin': origins.get(x, ''), 'version': y})
for x, y in six.iteritems(ret)
])
return ret
ret = {}
origins = {}
out = __salt__['cmd.run_stdout'](
_pkg(jail, chroot, root) + ['info', '-ao'],
output_loglevel='trace',
python_shell=False)
for line in salt.utils.itertools.split(out, '\n'):
if not line:
continue
try:
pkg, origin = line.split()
pkgname, pkgver = pkg.rsplit('-', 1)
except ValueError:
continue
__salt__['pkg_resource.add_pkg'](ret, pkgname, pkgver)
origins[pkgname] = origin
__salt__['pkg_resource.sort_pkglist'](ret)
__context__[contextkey_pkg] = copy.deepcopy(ret)
__context__[contextkey_origins] = origins
if not versions_as_list:
__salt__['pkg_resource.stringify'](ret)
if salt.utils.data.is_true(with_origin):
return dict([
(x, {'origin': origins.get(x, ''), 'version': y})
for x, y in six.iteritems(ret)
])
return ret | [
"def",
"list_pkgs",
"(",
"versions_as_list",
"=",
"False",
",",
"jail",
"=",
"None",
",",
"chroot",
"=",
"None",
",",
"root",
"=",
"None",
",",
"with_origin",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"# not yet implemented or not applicable",
"if",
"any",
"(",
"[",
"salt",
".",
"utils",
".",
"data",
".",
"is_true",
"(",
"kwargs",
".",
"get",
"(",
"x",
")",
")",
"for",
"x",
"in",
"(",
"'removed'",
",",
"'purge_desired'",
")",
"]",
")",
":",
"return",
"{",
"}",
"versions_as_list",
"=",
"salt",
".",
"utils",
".",
"data",
".",
"is_true",
"(",
"versions_as_list",
")",
"contextkey_pkg",
"=",
"_contextkey",
"(",
"jail",
",",
"chroot",
",",
"root",
")",
"contextkey_origins",
"=",
"_contextkey",
"(",
"jail",
",",
"chroot",
",",
"root",
",",
"prefix",
"=",
"'pkg.origin'",
")",
"if",
"contextkey_pkg",
"in",
"__context__",
":",
"ret",
"=",
"copy",
".",
"deepcopy",
"(",
"__context__",
"[",
"contextkey_pkg",
"]",
")",
"if",
"not",
"versions_as_list",
":",
"__salt__",
"[",
"'pkg_resource.stringify'",
"]",
"(",
"ret",
")",
"if",
"salt",
".",
"utils",
".",
"data",
".",
"is_true",
"(",
"with_origin",
")",
":",
"origins",
"=",
"__context__",
".",
"get",
"(",
"contextkey_origins",
",",
"{",
"}",
")",
"return",
"dict",
"(",
"[",
"(",
"x",
",",
"{",
"'origin'",
":",
"origins",
".",
"get",
"(",
"x",
",",
"''",
")",
",",
"'version'",
":",
"y",
"}",
")",
"for",
"x",
",",
"y",
"in",
"six",
".",
"iteritems",
"(",
"ret",
")",
"]",
")",
"return",
"ret",
"ret",
"=",
"{",
"}",
"origins",
"=",
"{",
"}",
"out",
"=",
"__salt__",
"[",
"'cmd.run_stdout'",
"]",
"(",
"_pkg",
"(",
"jail",
",",
"chroot",
",",
"root",
")",
"+",
"[",
"'info'",
",",
"'-ao'",
"]",
",",
"output_loglevel",
"=",
"'trace'",
",",
"python_shell",
"=",
"False",
")",
"for",
"line",
"in",
"salt",
".",
"utils",
".",
"itertools",
".",
"split",
"(",
"out",
",",
"'\\n'",
")",
":",
"if",
"not",
"line",
":",
"continue",
"try",
":",
"pkg",
",",
"origin",
"=",
"line",
".",
"split",
"(",
")",
"pkgname",
",",
"pkgver",
"=",
"pkg",
".",
"rsplit",
"(",
"'-'",
",",
"1",
")",
"except",
"ValueError",
":",
"continue",
"__salt__",
"[",
"'pkg_resource.add_pkg'",
"]",
"(",
"ret",
",",
"pkgname",
",",
"pkgver",
")",
"origins",
"[",
"pkgname",
"]",
"=",
"origin",
"__salt__",
"[",
"'pkg_resource.sort_pkglist'",
"]",
"(",
"ret",
")",
"__context__",
"[",
"contextkey_pkg",
"]",
"=",
"copy",
".",
"deepcopy",
"(",
"ret",
")",
"__context__",
"[",
"contextkey_origins",
"]",
"=",
"origins",
"if",
"not",
"versions_as_list",
":",
"__salt__",
"[",
"'pkg_resource.stringify'",
"]",
"(",
"ret",
")",
"if",
"salt",
".",
"utils",
".",
"data",
".",
"is_true",
"(",
"with_origin",
")",
":",
"return",
"dict",
"(",
"[",
"(",
"x",
",",
"{",
"'origin'",
":",
"origins",
".",
"get",
"(",
"x",
",",
"''",
")",
",",
"'version'",
":",
"y",
"}",
")",
"for",
"x",
",",
"y",
"in",
"six",
".",
"iteritems",
"(",
"ret",
")",
"]",
")",
"return",
"ret"
] | 30.564706 | 19.905882 |
def main():
"""
NAME
mst_magic.py
DESCRIPTION
converts MsT data (T,M) to measurements format files
SYNTAX
mst_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-usr USER: identify user, default is ""
-f FILE: specify T,M format input file, required
-spn SPEC: specimen name, required
-fsa SFILE: name with sample, site, location information
-F FILE: specify output file, default is measurements.txt
-dc H: specify applied field during measurement, default is 0.5 T
-DM NUM: output to MagIC data model 2.5 or 3, default 3
-syn : This is a synthetic specimen and has no sample/site/location information
-spc NUM : specify number of characters to designate a specimen, default = 0
-loc LOCNAME : specify location/study name, must have either LOCNAME or SAMPFILE or be a synthetic
-ncn NCON: specify naming convention: default is #1 below
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail ltauxe@ucsd.edu for help.
INPUT files:
T M: T is in Centigrade and M is uncalibrated magnitude
"""
#
# get command line arguments
#
args = sys.argv
if "-h" in args:
print(main.__doc__)
sys.exit()
dir_path = pmag.get_named_arg("-WD", ".")
user = pmag.get_named_arg("-usr", "")
labfield = pmag.get_named_arg("-dc", '0.5')
meas_file = pmag.get_named_arg("-F", "measurements.txt")
samp_file = pmag.get_named_arg("-fsa", "samples.txt")
try:
infile = pmag.get_named_arg("-f", reqd=True)
except pmag.MissingCommandLineArgException:
print(main.__doc__)
print("-f is required option")
sys.exit()
specnum = int(pmag.get_named_arg("-spc", 0))
location = pmag.get_named_arg("-loc", "")
specimen_name = pmag.get_named_arg("-spn", reqd=True)
syn = 0
if "-syn" in args:
syn = 1
samp_con = pmag.get_named_arg("-ncn", "1")
if "-ncn" in args:
ind = args.index("-ncn")
samp_con = sys.argv[ind+1]
data_model_num = int(pmag.get_named_arg("-DM", 3))
convert.mst(infile, specimen_name, dir_path, "", meas_file, samp_file,
user, specnum, samp_con, labfield, location, syn, data_model_num) | [
"def",
"main",
"(",
")",
":",
"#",
"# get command line arguments",
"#",
"args",
"=",
"sys",
".",
"argv",
"if",
"\"-h\"",
"in",
"args",
":",
"print",
"(",
"main",
".",
"__doc__",
")",
"sys",
".",
"exit",
"(",
")",
"dir_path",
"=",
"pmag",
".",
"get_named_arg",
"(",
"\"-WD\"",
",",
"\".\"",
")",
"user",
"=",
"pmag",
".",
"get_named_arg",
"(",
"\"-usr\"",
",",
"\"\"",
")",
"labfield",
"=",
"pmag",
".",
"get_named_arg",
"(",
"\"-dc\"",
",",
"'0.5'",
")",
"meas_file",
"=",
"pmag",
".",
"get_named_arg",
"(",
"\"-F\"",
",",
"\"measurements.txt\"",
")",
"samp_file",
"=",
"pmag",
".",
"get_named_arg",
"(",
"\"-fsa\"",
",",
"\"samples.txt\"",
")",
"try",
":",
"infile",
"=",
"pmag",
".",
"get_named_arg",
"(",
"\"-f\"",
",",
"reqd",
"=",
"True",
")",
"except",
"pmag",
".",
"MissingCommandLineArgException",
":",
"print",
"(",
"main",
".",
"__doc__",
")",
"print",
"(",
"\"-f is required option\"",
")",
"sys",
".",
"exit",
"(",
")",
"specnum",
"=",
"int",
"(",
"pmag",
".",
"get_named_arg",
"(",
"\"-spc\"",
",",
"0",
")",
")",
"location",
"=",
"pmag",
".",
"get_named_arg",
"(",
"\"-loc\"",
",",
"\"\"",
")",
"specimen_name",
"=",
"pmag",
".",
"get_named_arg",
"(",
"\"-spn\"",
",",
"reqd",
"=",
"True",
")",
"syn",
"=",
"0",
"if",
"\"-syn\"",
"in",
"args",
":",
"syn",
"=",
"1",
"samp_con",
"=",
"pmag",
".",
"get_named_arg",
"(",
"\"-ncn\"",
",",
"\"1\"",
")",
"if",
"\"-ncn\"",
"in",
"args",
":",
"ind",
"=",
"args",
".",
"index",
"(",
"\"-ncn\"",
")",
"samp_con",
"=",
"sys",
".",
"argv",
"[",
"ind",
"+",
"1",
"]",
"data_model_num",
"=",
"int",
"(",
"pmag",
".",
"get_named_arg",
"(",
"\"-DM\"",
",",
"3",
")",
")",
"convert",
".",
"mst",
"(",
"infile",
",",
"specimen_name",
",",
"dir_path",
",",
"\"\"",
",",
"meas_file",
",",
"samp_file",
",",
"user",
",",
"specnum",
",",
"samp_con",
",",
"labfield",
",",
"location",
",",
"syn",
",",
"data_model_num",
")"
] | 41.652778 | 22.875 |
def get_order_line_item_by_id(cls, order_line_item_id, **kwargs):
"""Find OrderLineItem
Return single instance of OrderLineItem by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_order_line_item_by_id(order_line_item_id, async=True)
>>> result = thread.get()
:param async bool
:param str order_line_item_id: ID of orderLineItem to return (required)
:return: OrderLineItem
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_order_line_item_by_id_with_http_info(order_line_item_id, **kwargs)
else:
(data) = cls._get_order_line_item_by_id_with_http_info(order_line_item_id, **kwargs)
return data | [
"def",
"get_order_line_item_by_id",
"(",
"cls",
",",
"order_line_item_id",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async'",
")",
":",
"return",
"cls",
".",
"_get_order_line_item_by_id_with_http_info",
"(",
"order_line_item_id",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"(",
"data",
")",
"=",
"cls",
".",
"_get_order_line_item_by_id_with_http_info",
"(",
"order_line_item_id",
",",
"*",
"*",
"kwargs",
")",
"return",
"data"
] | 45.142857 | 22.761905 |
def constraints(self):
"""
Returns full :class:`list` of :class:`Constraint <cqparts.constraint.Constraint>` instances, after
a successful :meth:`build`
:return: list of named :class:`Constraint <cqparts.constraint.Constraint>` instances
:rtype: :class:`list`
For more information read about the :ref:`parts_assembly-build-cycle` .
"""
if self._constraints is None:
self.build(recursive=False)
return self._constraints | [
"def",
"constraints",
"(",
"self",
")",
":",
"if",
"self",
".",
"_constraints",
"is",
"None",
":",
"self",
".",
"build",
"(",
"recursive",
"=",
"False",
")",
"return",
"self",
".",
"_constraints"
] | 37.846154 | 21.846154 |
def safe_joinall(greenlets, timeout=None, raise_error=False):
"""
Wrapper for gevent.joinall if the greenlet that waits for the joins is killed, it kills all the greenlets it
joins for.
"""
greenlets = list(greenlets)
try:
gevent.joinall(greenlets, timeout=timeout, raise_error=raise_error)
except gevent.GreenletExit:
[greenlet.kill() for greenlet in greenlets if not greenlet.ready()]
raise
return greenlets | [
"def",
"safe_joinall",
"(",
"greenlets",
",",
"timeout",
"=",
"None",
",",
"raise_error",
"=",
"False",
")",
":",
"greenlets",
"=",
"list",
"(",
"greenlets",
")",
"try",
":",
"gevent",
".",
"joinall",
"(",
"greenlets",
",",
"timeout",
"=",
"timeout",
",",
"raise_error",
"=",
"raise_error",
")",
"except",
"gevent",
".",
"GreenletExit",
":",
"[",
"greenlet",
".",
"kill",
"(",
")",
"for",
"greenlet",
"in",
"greenlets",
"if",
"not",
"greenlet",
".",
"ready",
"(",
")",
"]",
"raise",
"return",
"greenlets"
] | 37.833333 | 23.833333 |
def random(self, namespace=0):
"""
Returns query string for random page
"""
query = self.LIST.substitute(
WIKI=self.uri,
ENDPOINT=self.endpoint,
LIST='random')
query += "&rnlimit=1&rnnamespace=%d" % namespace
emoji = [
u'\U0001f32f', # burrito or wrap
u'\U0001f355', # slice of pizza
u'\U0001f35c', # steaming bowl of ramen
u'\U0001f363', # sushi
u'\U0001f369', # doughnut
u'\U0001f36a', # cookie
u'\U0001f36d', # lollipop
u'\U0001f370', # strawberry shortcake
]
action = 'random'
if namespace:
action = 'random:%d' % namespace
self.set_status(action, random.choice(emoji))
return query | [
"def",
"random",
"(",
"self",
",",
"namespace",
"=",
"0",
")",
":",
"query",
"=",
"self",
".",
"LIST",
".",
"substitute",
"(",
"WIKI",
"=",
"self",
".",
"uri",
",",
"ENDPOINT",
"=",
"self",
".",
"endpoint",
",",
"LIST",
"=",
"'random'",
")",
"query",
"+=",
"\"&rnlimit=1&rnnamespace=%d\"",
"%",
"namespace",
"emoji",
"=",
"[",
"u'\\U0001f32f'",
",",
"# burrito or wrap",
"u'\\U0001f355'",
",",
"# slice of pizza",
"u'\\U0001f35c'",
",",
"# steaming bowl of ramen",
"u'\\U0001f363'",
",",
"# sushi",
"u'\\U0001f369'",
",",
"# doughnut",
"u'\\U0001f36a'",
",",
"# cookie",
"u'\\U0001f36d'",
",",
"# lollipop",
"u'\\U0001f370'",
",",
"# strawberry shortcake",
"]",
"action",
"=",
"'random'",
"if",
"namespace",
":",
"action",
"=",
"'random:%d'",
"%",
"namespace",
"self",
".",
"set_status",
"(",
"action",
",",
"random",
".",
"choice",
"(",
"emoji",
")",
")",
"return",
"query"
] | 28.678571 | 14.107143 |
def _statsd_address(self):
"""Return a tuple of host and port for the statsd server to send
stats to.
:return: tuple(host, port)
"""
return (self.application.settings.get('statsd',
{}).get('host',
self.STATSD_HOST),
self.application.settings.get('statsd',
{}).get('port',
self.STATSD_PORT)) | [
"def",
"_statsd_address",
"(",
"self",
")",
":",
"return",
"(",
"self",
".",
"application",
".",
"settings",
".",
"get",
"(",
"'statsd'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'host'",
",",
"self",
".",
"STATSD_HOST",
")",
",",
"self",
".",
"application",
".",
"settings",
".",
"get",
"(",
"'statsd'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'port'",
",",
"self",
".",
"STATSD_PORT",
")",
")"
] | 41.230769 | 19.923077 |
def register_plugin(self, name, plugin):
"""
Add a new plugin ``plugin`` with name ``name`` to the active plugins.
"""
if self.has_plugin(name):
self.release_plugin(name)
self._active_plugins[name] = plugin
setattr(self, name, plugin)
return plugin | [
"def",
"register_plugin",
"(",
"self",
",",
"name",
",",
"plugin",
")",
":",
"if",
"self",
".",
"has_plugin",
"(",
"name",
")",
":",
"self",
".",
"release_plugin",
"(",
"name",
")",
"self",
".",
"_active_plugins",
"[",
"name",
"]",
"=",
"plugin",
"setattr",
"(",
"self",
",",
"name",
",",
"plugin",
")",
"return",
"plugin"
] | 34.222222 | 8.222222 |
def generate_py_units(data):
"""Generate the list of units in units.py."""
units = collections.defaultdict(list)
for unit in sorted(data.units, key=lambda a: a.name):
if unit.unit_id in static_data.UNIT_TYPES:
units[unit.race].append(unit)
def print_race(name, race):
print("class %s(enum.IntEnum):" % name)
print(' """%s units."""' % name)
for unit in units[race]:
print(" %s = %s" % (unit.name, unit.unit_id))
print("\n")
print_race("Neutral", sc_common.NoRace)
print_race("Protoss", sc_common.Protoss)
print_race("Terran", sc_common.Terran)
print_race("Zerg", sc_common.Zerg) | [
"def",
"generate_py_units",
"(",
"data",
")",
":",
"units",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"for",
"unit",
"in",
"sorted",
"(",
"data",
".",
"units",
",",
"key",
"=",
"lambda",
"a",
":",
"a",
".",
"name",
")",
":",
"if",
"unit",
".",
"unit_id",
"in",
"static_data",
".",
"UNIT_TYPES",
":",
"units",
"[",
"unit",
".",
"race",
"]",
".",
"append",
"(",
"unit",
")",
"def",
"print_race",
"(",
"name",
",",
"race",
")",
":",
"print",
"(",
"\"class %s(enum.IntEnum):\"",
"%",
"name",
")",
"print",
"(",
"' \"\"\"%s units.\"\"\"'",
"%",
"name",
")",
"for",
"unit",
"in",
"units",
"[",
"race",
"]",
":",
"print",
"(",
"\" %s = %s\"",
"%",
"(",
"unit",
".",
"name",
",",
"unit",
".",
"unit_id",
")",
")",
"print",
"(",
"\"\\n\"",
")",
"print_race",
"(",
"\"Neutral\"",
",",
"sc_common",
".",
"NoRace",
")",
"print_race",
"(",
"\"Protoss\"",
",",
"sc_common",
".",
"Protoss",
")",
"print_race",
"(",
"\"Terran\"",
",",
"sc_common",
".",
"Terran",
")",
"print_race",
"(",
"\"Zerg\"",
",",
"sc_common",
".",
"Zerg",
")"
] | 34.055556 | 10.5 |
def netconf_state_statistics_out_notifications(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
netconf_state = ET.SubElement(config, "netconf-state", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-monitoring")
statistics = ET.SubElement(netconf_state, "statistics")
out_notifications = ET.SubElement(statistics, "out-notifications")
out_notifications.text = kwargs.pop('out_notifications')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"netconf_state_statistics_out_notifications",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"netconf_state",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"netconf-state\"",
",",
"xmlns",
"=",
"\"urn:ietf:params:xml:ns:yang:ietf-netconf-monitoring\"",
")",
"statistics",
"=",
"ET",
".",
"SubElement",
"(",
"netconf_state",
",",
"\"statistics\"",
")",
"out_notifications",
"=",
"ET",
".",
"SubElement",
"(",
"statistics",
",",
"\"out-notifications\"",
")",
"out_notifications",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'out_notifications'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | 50.272727 | 23.272727 |
def GetNeighbors(ID, model = None, neighbors = None, mag_range = None,
cdpp_range = None, aperture_name = None,
cadence = 'lc', **kwargs):
'''
Return `neighbors` random bright stars on the same module as `EPIC`.
:param int ID: The target ID number
:param str model: The :py:obj:`everest` model name. Only used when imposing CDPP bounds. Default :py:obj:`None`
:param int neighbors: Number of neighbors to return. Default None
:param str aperture_name: The name of the aperture to use. Select `custom` to call \
:py:func:`GetCustomAperture`. Default :py:obj:`None`
:param str cadence: The light curve cadence. Default `lc`
:param tuple mag_range: (`low`, `high`) values for the Kepler magnitude. Default :py:obj:`None`
:param tuple cdpp_range: (`low`, `high`) values for the de-trended CDPP. Default :py:obj:`None`
'''
raise NotImplementedError('This mission is not yet supported.') | [
"def",
"GetNeighbors",
"(",
"ID",
",",
"model",
"=",
"None",
",",
"neighbors",
"=",
"None",
",",
"mag_range",
"=",
"None",
",",
"cdpp_range",
"=",
"None",
",",
"aperture_name",
"=",
"None",
",",
"cadence",
"=",
"'lc'",
",",
"*",
"*",
"kwargs",
")",
":",
"raise",
"NotImplementedError",
"(",
"'This mission is not yet supported.'",
")"
] | 52.222222 | 33 |
def get_east_asian_width_property(value, is_bytes=False):
"""Get `EAST ASIAN WIDTH` property."""
obj = unidata.ascii_east_asian_width if is_bytes else unidata.unicode_east_asian_width
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['eastasianwidth'].get(negated, negated)
else:
value = unidata.unicode_alias['eastasianwidth'].get(value, value)
return obj[value] | [
"def",
"get_east_asian_width_property",
"(",
"value",
",",
"is_bytes",
"=",
"False",
")",
":",
"obj",
"=",
"unidata",
".",
"ascii_east_asian_width",
"if",
"is_bytes",
"else",
"unidata",
".",
"unicode_east_asian_width",
"if",
"value",
".",
"startswith",
"(",
"'^'",
")",
":",
"negated",
"=",
"value",
"[",
"1",
":",
"]",
"value",
"=",
"'^'",
"+",
"unidata",
".",
"unicode_alias",
"[",
"'eastasianwidth'",
"]",
".",
"get",
"(",
"negated",
",",
"negated",
")",
"else",
":",
"value",
"=",
"unidata",
".",
"unicode_alias",
"[",
"'eastasianwidth'",
"]",
".",
"get",
"(",
"value",
",",
"value",
")",
"return",
"obj",
"[",
"value",
"]"
] | 35.916667 | 28.083333 |
def sell(self, no, price, value):
''' 賣 '''
self.money += price*value
try:
self.store[no] += -value
except:
self.store[no] = -value
try:
self.avgprice[no]['sell'] += [price]
except:
try:
self.avgprice[no]['sell'] = [price]
except:
self.avgprice[no] = {}
self.avgprice[no]['sell'] = [price] | [
"def",
"sell",
"(",
"self",
",",
"no",
",",
"price",
",",
"value",
")",
":",
"self",
".",
"money",
"+=",
"price",
"*",
"value",
"try",
":",
"self",
".",
"store",
"[",
"no",
"]",
"+=",
"-",
"value",
"except",
":",
"self",
".",
"store",
"[",
"no",
"]",
"=",
"-",
"value",
"try",
":",
"self",
".",
"avgprice",
"[",
"no",
"]",
"[",
"'sell'",
"]",
"+=",
"[",
"price",
"]",
"except",
":",
"try",
":",
"self",
".",
"avgprice",
"[",
"no",
"]",
"[",
"'sell'",
"]",
"=",
"[",
"price",
"]",
"except",
":",
"self",
".",
"avgprice",
"[",
"no",
"]",
"=",
"{",
"}",
"self",
".",
"avgprice",
"[",
"no",
"]",
"[",
"'sell'",
"]",
"=",
"[",
"price",
"]"
] | 23.533333 | 17.533333 |
def _loop_wrapper_func(func, args, shared_mem_run, shared_mem_pause, interval, sigint, sigterm, name,
logging_level, conn_send, func_running, log_queue):
"""
to be executed as a separate process (that's why this functions is declared static)
"""
prefix = get_identifier(name) + ' '
global log
log = logging.getLogger(__name__+".log_{}".format(get_identifier(name, bold=False)))
log.setLevel(logging_level)
log.addHandler(QueueHandler(log_queue))
sys.stdout = StdoutPipe(conn_send)
log.debug("enter wrapper_func")
SIG_handler_Loop(sigint, sigterm, log, prefix)
func_running.value = True
error = False
while shared_mem_run.value:
try:
# in pause mode, simply sleep
if shared_mem_pause.value:
quit_loop = False
else:
# if not pause mode -> call func and see what happens
try:
quit_loop = func(*args)
except LoopInterruptError:
raise
except Exception as e:
log.error("error %s occurred in loop calling 'func(*args)'", type(e))
log.info("show traceback.print_exc()\n%s", traceback.format_exc())
error = True
break
if quit_loop is True:
log.debug("loop stooped because func returned True")
break
time.sleep(interval)
except LoopInterruptError:
log.debug("quit wrapper_func due to InterruptedError")
break
func_running.value = False
if error:
sys.exit(-1)
else:
log.debug("wrapper_func terminates gracefully")
# gets rid of the following warnings
# Exception ignored in: <_io.FileIO name='/dev/null' mode='rb'>
# ResourceWarning: unclosed file <_io.TextIOWrapper name='/dev/null' mode='r' encoding='UTF-8'>
try:
if mp.get_start_method() == "spawn":
sys.stdin.close()
except AttributeError:
pass | [
"def",
"_loop_wrapper_func",
"(",
"func",
",",
"args",
",",
"shared_mem_run",
",",
"shared_mem_pause",
",",
"interval",
",",
"sigint",
",",
"sigterm",
",",
"name",
",",
"logging_level",
",",
"conn_send",
",",
"func_running",
",",
"log_queue",
")",
":",
"prefix",
"=",
"get_identifier",
"(",
"name",
")",
"+",
"' '",
"global",
"log",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
"+",
"\".log_{}\"",
".",
"format",
"(",
"get_identifier",
"(",
"name",
",",
"bold",
"=",
"False",
")",
")",
")",
"log",
".",
"setLevel",
"(",
"logging_level",
")",
"log",
".",
"addHandler",
"(",
"QueueHandler",
"(",
"log_queue",
")",
")",
"sys",
".",
"stdout",
"=",
"StdoutPipe",
"(",
"conn_send",
")",
"log",
".",
"debug",
"(",
"\"enter wrapper_func\"",
")",
"SIG_handler_Loop",
"(",
"sigint",
",",
"sigterm",
",",
"log",
",",
"prefix",
")",
"func_running",
".",
"value",
"=",
"True",
"error",
"=",
"False",
"while",
"shared_mem_run",
".",
"value",
":",
"try",
":",
"# in pause mode, simply sleep",
"if",
"shared_mem_pause",
".",
"value",
":",
"quit_loop",
"=",
"False",
"else",
":",
"# if not pause mode -> call func and see what happens",
"try",
":",
"quit_loop",
"=",
"func",
"(",
"*",
"args",
")",
"except",
"LoopInterruptError",
":",
"raise",
"except",
"Exception",
"as",
"e",
":",
"log",
".",
"error",
"(",
"\"error %s occurred in loop calling 'func(*args)'\"",
",",
"type",
"(",
"e",
")",
")",
"log",
".",
"info",
"(",
"\"show traceback.print_exc()\\n%s\"",
",",
"traceback",
".",
"format_exc",
"(",
")",
")",
"error",
"=",
"True",
"break",
"if",
"quit_loop",
"is",
"True",
":",
"log",
".",
"debug",
"(",
"\"loop stooped because func returned True\"",
")",
"break",
"time",
".",
"sleep",
"(",
"interval",
")",
"except",
"LoopInterruptError",
":",
"log",
".",
"debug",
"(",
"\"quit wrapper_func due to InterruptedError\"",
")",
"break",
"func_running",
".",
"value",
"=",
"False",
"if",
"error",
":",
"sys",
".",
"exit",
"(",
"-",
"1",
")",
"else",
":",
"log",
".",
"debug",
"(",
"\"wrapper_func terminates gracefully\"",
")",
"# gets rid of the following warnings",
"# Exception ignored in: <_io.FileIO name='/dev/null' mode='rb'>",
"# ResourceWarning: unclosed file <_io.TextIOWrapper name='/dev/null' mode='r' encoding='UTF-8'>",
"try",
":",
"if",
"mp",
".",
"get_start_method",
"(",
")",
"==",
"\"spawn\"",
":",
"sys",
".",
"stdin",
".",
"close",
"(",
")",
"except",
"AttributeError",
":",
"pass"
] | 33.704918 | 21.737705 |
def build(self):
'''Constructs the term doc matrix.
Returns
-------
TermDocMatrix
'''
X_factory = CSRMatrixFactory()
mX_factory = CSRMatrixFactory()
term_idx_store = IndexStore()
metadata_idx_store = IndexStore()
parse_pipeline = ParsePipelineFactoryWithoutCategories(self.get_nlp(),
X_factory,
mX_factory,
term_idx_store,
metadata_idx_store,
self)
df = self._clean_and_filter_nulls_and_empties_from_dataframe()
tdm = self._apply_pipeline_and_get_build_instance(X_factory,
mX_factory,
df,
parse_pipeline,
term_idx_store,
metadata_idx_store)
return tdm | [
"def",
"build",
"(",
"self",
")",
":",
"X_factory",
"=",
"CSRMatrixFactory",
"(",
")",
"mX_factory",
"=",
"CSRMatrixFactory",
"(",
")",
"term_idx_store",
"=",
"IndexStore",
"(",
")",
"metadata_idx_store",
"=",
"IndexStore",
"(",
")",
"parse_pipeline",
"=",
"ParsePipelineFactoryWithoutCategories",
"(",
"self",
".",
"get_nlp",
"(",
")",
",",
"X_factory",
",",
"mX_factory",
",",
"term_idx_store",
",",
"metadata_idx_store",
",",
"self",
")",
"df",
"=",
"self",
".",
"_clean_and_filter_nulls_and_empties_from_dataframe",
"(",
")",
"tdm",
"=",
"self",
".",
"_apply_pipeline_and_get_build_instance",
"(",
"X_factory",
",",
"mX_factory",
",",
"df",
",",
"parse_pipeline",
",",
"term_idx_store",
",",
"metadata_idx_store",
")",
"return",
"tdm"
] | 42.666667 | 22.666667 |
def return_job(self, job):
"""
Returns a job to its source job set to be run again later.
"""
if self._closed:
return
js = self._job_sources[job]
if len(self._ready_callbacks) > 0:
callback = self._ready_callbacks.popleft()
callback(job)
else:
del self._job_sources[job]
js.return_job(job) | [
"def",
"return_job",
"(",
"self",
",",
"job",
")",
":",
"if",
"self",
".",
"_closed",
":",
"return",
"js",
"=",
"self",
".",
"_job_sources",
"[",
"job",
"]",
"if",
"len",
"(",
"self",
".",
"_ready_callbacks",
")",
">",
"0",
":",
"callback",
"=",
"self",
".",
"_ready_callbacks",
".",
"popleft",
"(",
")",
"callback",
"(",
"job",
")",
"else",
":",
"del",
"self",
".",
"_job_sources",
"[",
"job",
"]",
"js",
".",
"return_job",
"(",
"job",
")"
] | 26.2 | 15.533333 |
def get_current(self):
"""
Get a Panel that is the current data in view. It is not safe to persist
these objects because internal data might change
"""
where = slice(self._oldest_frame_idx(), self._pos)
major_axis = pd.DatetimeIndex(deepcopy(self.date_buf[where]), tz='utc')
return pd.Panel(self.buffer.values[:, where, :], self.items,
major_axis, self.minor_axis, dtype=self.dtype) | [
"def",
"get_current",
"(",
"self",
")",
":",
"where",
"=",
"slice",
"(",
"self",
".",
"_oldest_frame_idx",
"(",
")",
",",
"self",
".",
"_pos",
")",
"major_axis",
"=",
"pd",
".",
"DatetimeIndex",
"(",
"deepcopy",
"(",
"self",
".",
"date_buf",
"[",
"where",
"]",
")",
",",
"tz",
"=",
"'utc'",
")",
"return",
"pd",
".",
"Panel",
"(",
"self",
".",
"buffer",
".",
"values",
"[",
":",
",",
"where",
",",
":",
"]",
",",
"self",
".",
"items",
",",
"major_axis",
",",
"self",
".",
"minor_axis",
",",
"dtype",
"=",
"self",
".",
"dtype",
")"
] | 45.4 | 22.8 |
def _get_version_mode(self, mode=None):
"""Return a VersionMode for a mode name.
When the mode is None, we are working with the 'base' mode.
"""
version_mode = self._version_modes.get(mode)
if not version_mode:
version_mode = self._version_modes[mode] = VersionMode(name=mode)
return version_mode | [
"def",
"_get_version_mode",
"(",
"self",
",",
"mode",
"=",
"None",
")",
":",
"version_mode",
"=",
"self",
".",
"_version_modes",
".",
"get",
"(",
"mode",
")",
"if",
"not",
"version_mode",
":",
"version_mode",
"=",
"self",
".",
"_version_modes",
"[",
"mode",
"]",
"=",
"VersionMode",
"(",
"name",
"=",
"mode",
")",
"return",
"version_mode"
] | 38.777778 | 15.777778 |
def get_objective_lookup_session_for_objective_bank(self, objective_bank_id=None):
"""Gets the OsidSession associated with the objective lookup
service for the given objective bank.
arg: objectiveBankId (osid.id.Id): the Id of the objective
bank
return: (osid.learning.ObjectiveLookupSession) - an
ObjectiveLookupSession
raise: NotFound - objectiveBankId not found
raise: NullArgument - objectiveBankId is null
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_objective_lookup() or
supports_visible_federation() is false
compliance: optional - This method must be implemented if
supports_objective_lookup() and
supports_visible_federation() are true.
"""
if not objective_bank_id:
raise NullArgument
if not self.supports_objective_lookup():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise OperationFailed()
try:
session = sessions.ObjectiveLookupSession(objective_bank_id, runtime=self._runtime)
except AttributeError:
raise OperationFailed()
return session | [
"def",
"get_objective_lookup_session_for_objective_bank",
"(",
"self",
",",
"objective_bank_id",
"=",
"None",
")",
":",
"if",
"not",
"objective_bank_id",
":",
"raise",
"NullArgument",
"if",
"not",
"self",
".",
"supports_objective_lookup",
"(",
")",
":",
"raise",
"Unimplemented",
"(",
")",
"try",
":",
"from",
".",
"import",
"sessions",
"except",
"ImportError",
":",
"raise",
"OperationFailed",
"(",
")",
"try",
":",
"session",
"=",
"sessions",
".",
"ObjectiveLookupSession",
"(",
"objective_bank_id",
",",
"runtime",
"=",
"self",
".",
"_runtime",
")",
"except",
"AttributeError",
":",
"raise",
"OperationFailed",
"(",
")",
"return",
"session"
] | 41.774194 | 17.225806 |
def princomp(x):
"""Determine the principal components of a vector of measurements
Determine the principal components of a vector of measurements
x should be a M x N numpy array composed of M observations of n variables
The output is:
coeffs - the NxN correlation matrix that can be used to transform x into its components
The code for this function is based on "A Tutorial on Principal Component
Analysis", Shlens, 2005 http://www.snl.salk.edu/~shlens/pub/notes/pca.pdf
(unpublished)
"""
(M,N) = x.shape
Mean = x.mean(0)
y = x - Mean
cov = numpy.dot(y.transpose(),y) / (M-1)
(V,PC) = numpy.linalg.eig(cov)
order = (-V).argsort()
coeff = PC[:,order]
return coeff | [
"def",
"princomp",
"(",
"x",
")",
":",
"(",
"M",
",",
"N",
")",
"=",
"x",
".",
"shape",
"Mean",
"=",
"x",
".",
"mean",
"(",
"0",
")",
"y",
"=",
"x",
"-",
"Mean",
"cov",
"=",
"numpy",
".",
"dot",
"(",
"y",
".",
"transpose",
"(",
")",
",",
"y",
")",
"/",
"(",
"M",
"-",
"1",
")",
"(",
"V",
",",
"PC",
")",
"=",
"numpy",
".",
"linalg",
".",
"eig",
"(",
"cov",
")",
"order",
"=",
"(",
"-",
"V",
")",
".",
"argsort",
"(",
")",
"coeff",
"=",
"PC",
"[",
":",
",",
"order",
"]",
"return",
"coeff"
] | 35.142857 | 23.238095 |
def _find_players(self, year):
"""
Find all player IDs for the requested team.
For the requested team and year (if applicable), pull the roster table
and parse the player ID for all players on the roster and create an
instance of the Player class for the player. All player instances are
added to the 'players' property to get all stats for all players on a
team.
Parameters
----------
year : string
The 6-digit string representing the year to pull the team's roster
from.
"""
if not year:
year = utils._find_year_for_season('nhl')
url = self._create_url(year)
page = self._pull_team_page(url)
if not page:
output = ("Can't pull requested team page. Ensure the following "
"URL exists: %s" % url)
raise ValueError(output)
for player in page('table#roster tbody tr').items():
player_id = self._get_id(player)
if self._slim:
name = self._get_name(player)
self._players[player_id] = name
else:
player_instance = Player(player_id)
self._players.append(player_instance) | [
"def",
"_find_players",
"(",
"self",
",",
"year",
")",
":",
"if",
"not",
"year",
":",
"year",
"=",
"utils",
".",
"_find_year_for_season",
"(",
"'nhl'",
")",
"url",
"=",
"self",
".",
"_create_url",
"(",
"year",
")",
"page",
"=",
"self",
".",
"_pull_team_page",
"(",
"url",
")",
"if",
"not",
"page",
":",
"output",
"=",
"(",
"\"Can't pull requested team page. Ensure the following \"",
"\"URL exists: %s\"",
"%",
"url",
")",
"raise",
"ValueError",
"(",
"output",
")",
"for",
"player",
"in",
"page",
"(",
"'table#roster tbody tr'",
")",
".",
"items",
"(",
")",
":",
"player_id",
"=",
"self",
".",
"_get_id",
"(",
"player",
")",
"if",
"self",
".",
"_slim",
":",
"name",
"=",
"self",
".",
"_get_name",
"(",
"player",
")",
"self",
".",
"_players",
"[",
"player_id",
"]",
"=",
"name",
"else",
":",
"player_instance",
"=",
"Player",
"(",
"player_id",
")",
"self",
".",
"_players",
".",
"append",
"(",
"player_instance",
")"
] | 38.90625 | 18.71875 |
def num_features_model(m:nn.Module)->int:
"Return the number of output features for `model`."
sz = 64
while True:
try: return model_sizes(m, size=(sz,sz))[-1][1]
except Exception as e:
sz *= 2
if sz > 2048: raise | [
"def",
"num_features_model",
"(",
"m",
":",
"nn",
".",
"Module",
")",
"->",
"int",
":",
"sz",
"=",
"64",
"while",
"True",
":",
"try",
":",
"return",
"model_sizes",
"(",
"m",
",",
"size",
"=",
"(",
"sz",
",",
"sz",
")",
")",
"[",
"-",
"1",
"]",
"[",
"1",
"]",
"except",
"Exception",
"as",
"e",
":",
"sz",
"*=",
"2",
"if",
"sz",
">",
"2048",
":",
"raise"
] | 32.125 | 15.625 |
def requested_perm(self, perm, obj, check_groups=True):
"""
Check if user requested a permission for the given object
"""
return self.has_perm(perm, obj, check_groups, False) | [
"def",
"requested_perm",
"(",
"self",
",",
"perm",
",",
"obj",
",",
"check_groups",
"=",
"True",
")",
":",
"return",
"self",
".",
"has_perm",
"(",
"perm",
",",
"obj",
",",
"check_groups",
",",
"False",
")"
] | 40.4 | 12 |
def get(method, hmc, uri, uri_parms, logon_required):
"""Operation: List CPCs."""
query_str = uri_parms[0]
result_cpcs = []
filter_args = parse_query_parms(method, uri, query_str)
for cpc in hmc.cpcs.list(filter_args):
result_cpc = {}
for prop in cpc.properties:
if prop in ('object-uri', 'name', 'status'):
result_cpc[prop] = cpc.properties[prop]
result_cpcs.append(result_cpc)
return {'cpcs': result_cpcs} | [
"def",
"get",
"(",
"method",
",",
"hmc",
",",
"uri",
",",
"uri_parms",
",",
"logon_required",
")",
":",
"query_str",
"=",
"uri_parms",
"[",
"0",
"]",
"result_cpcs",
"=",
"[",
"]",
"filter_args",
"=",
"parse_query_parms",
"(",
"method",
",",
"uri",
",",
"query_str",
")",
"for",
"cpc",
"in",
"hmc",
".",
"cpcs",
".",
"list",
"(",
"filter_args",
")",
":",
"result_cpc",
"=",
"{",
"}",
"for",
"prop",
"in",
"cpc",
".",
"properties",
":",
"if",
"prop",
"in",
"(",
"'object-uri'",
",",
"'name'",
",",
"'status'",
")",
":",
"result_cpc",
"[",
"prop",
"]",
"=",
"cpc",
".",
"properties",
"[",
"prop",
"]",
"result_cpcs",
".",
"append",
"(",
"result_cpc",
")",
"return",
"{",
"'cpcs'",
":",
"result_cpcs",
"}"
] | 43 | 10.416667 |
def timeslide_durations(start1, start2, end1, end2, timeslide_offsets):
""" Find the coincident time for each timeslide.
Find the coincident time for each timeslide, where the first time vector
is slid to the right by the offset in the given timeslide_offsets vector.
Parameters
----------
start1: numpy.ndarray
Array of the start of valid analyzed times for detector 1
start2: numpy.ndarray
Array of the start of valid analyzed times for detector 2
end1: numpy.ndarray
Array of the end of valid analyzed times for detector 1
end2: numpy.ndarray
Array of the end of valid analyzed times for detector 2
timseslide_offset: numpy.ndarray
Array of offsets (in seconds) for each timeslide
Returns
--------
durations: numpy.ndarray
Array of coincident time for each timeslide in the offset array
"""
from . import veto
durations = []
seg2 = veto.start_end_to_segments(start2, end2)
for offset in timeslide_offsets:
seg1 = veto.start_end_to_segments(start1 + offset, end1 + offset)
durations.append(abs((seg1 & seg2).coalesce()))
return numpy.array(durations) | [
"def",
"timeslide_durations",
"(",
"start1",
",",
"start2",
",",
"end1",
",",
"end2",
",",
"timeslide_offsets",
")",
":",
"from",
".",
"import",
"veto",
"durations",
"=",
"[",
"]",
"seg2",
"=",
"veto",
".",
"start_end_to_segments",
"(",
"start2",
",",
"end2",
")",
"for",
"offset",
"in",
"timeslide_offsets",
":",
"seg1",
"=",
"veto",
".",
"start_end_to_segments",
"(",
"start1",
"+",
"offset",
",",
"end1",
"+",
"offset",
")",
"durations",
".",
"append",
"(",
"abs",
"(",
"(",
"seg1",
"&",
"seg2",
")",
".",
"coalesce",
"(",
")",
")",
")",
"return",
"numpy",
".",
"array",
"(",
"durations",
")"
] | 37.580645 | 21.483871 |
def update_tab_label(self, state_m):
"""Update all tab labels
:param rafcon.state_machine.states.state.State state_m: State model who's tab label is to be updated
"""
state_identifier = self.get_state_identifier(state_m)
if state_identifier not in self.tabs and state_identifier not in self.closed_tabs:
return
tab_info = self.tabs[state_identifier] if state_identifier in self.tabs else self.closed_tabs[state_identifier]
page = tab_info['page']
set_tab_label_texts(page.title_label, state_m, tab_info['source_code_view_is_dirty']) | [
"def",
"update_tab_label",
"(",
"self",
",",
"state_m",
")",
":",
"state_identifier",
"=",
"self",
".",
"get_state_identifier",
"(",
"state_m",
")",
"if",
"state_identifier",
"not",
"in",
"self",
".",
"tabs",
"and",
"state_identifier",
"not",
"in",
"self",
".",
"closed_tabs",
":",
"return",
"tab_info",
"=",
"self",
".",
"tabs",
"[",
"state_identifier",
"]",
"if",
"state_identifier",
"in",
"self",
".",
"tabs",
"else",
"self",
".",
"closed_tabs",
"[",
"state_identifier",
"]",
"page",
"=",
"tab_info",
"[",
"'page'",
"]",
"set_tab_label_texts",
"(",
"page",
".",
"title_label",
",",
"state_m",
",",
"tab_info",
"[",
"'source_code_view_is_dirty'",
"]",
")"
] | 54.454545 | 31.454545 |
def console_host(self, new_host):
"""
If allow remote connection we need to bind console host to 0.0.0.0
"""
server_config = Config.instance().get_section_config("Server")
remote_console_connections = server_config.getboolean("allow_remote_console")
if remote_console_connections:
log.warning("Remote console connections are allowed")
self._console_host = "0.0.0.0"
else:
self._console_host = new_host | [
"def",
"console_host",
"(",
"self",
",",
"new_host",
")",
":",
"server_config",
"=",
"Config",
".",
"instance",
"(",
")",
".",
"get_section_config",
"(",
"\"Server\"",
")",
"remote_console_connections",
"=",
"server_config",
".",
"getboolean",
"(",
"\"allow_remote_console\"",
")",
"if",
"remote_console_connections",
":",
"log",
".",
"warning",
"(",
"\"Remote console connections are allowed\"",
")",
"self",
".",
"_console_host",
"=",
"\"0.0.0.0\"",
"else",
":",
"self",
".",
"_console_host",
"=",
"new_host"
] | 43.909091 | 15.727273 |
async def put(self, cid):
"""Update description for content
Accepts:
Query string args:
- "cid" - int
Request body parameters:
- message (signed dict):
- "description" - str
- "coinid" - str
Returns:
dict with following fields:
- "confirmed": None
- "txid" - str
- "description" - str
- "content" - str
- "read_access" - int
- "write_access" - int
- "cid" - int
- "txid" - str
- "seller_pubkey" - str
- "seller_access_string": None or str
Verified: True
"""
if settings.SIGNATURE_VERIFICATION:
super().verify()
try:
body = json.loads(self.request.body)
except:
self.set_status(400)
self.write({"error":400, "reason":"Unexpected data format. JSON required"})
raise tornado.web.Finish
# Get data from signed message
public_key = body.get("public_key", None)
if isinstance(body["message"], str):
message = json.loads(body["message"])
elif isinstance(body["message"], dict):
message = body["message"]
descr = message.get("description")
coinid = message.get("coinid")
if not coinid in settings.bridges.keys():
self.set_status(400)
self.write({"error":400, "reason":"Unknown coin id"})
raise tornado.web.Finish
# Check if all required data exists
if not all([public_key, descr, coinid]):
self.set_status(400)
self.write({"error":400, "reason":"Missed required fields"})
raise tornado.web.Finish
owneraddr = self.account.validator[coinid](public_key)
# Get content owner
response = await self.account.blockchain.ownerbycid(cid=cid)
if isinstance(response, dict):
if "error" in response.keys():
error_code = response["error"]
self.set_status(error_code)
self.write({"error":error_code, "reason":response["error"]})
raise tornado.web.Finish
# Check if current content belongs to current user
if response != owneraddr:
self.set_status(403)
self.write({"error":403, "reason":"Owner does not match."})
raise tornado.web.Finish
# Set fee
fee = await billing.update_description_fee(owneraddr=owneraddr,cid=cid,
description=descr)
# Set bridge url
if coinid in settings.bridges.keys():
self.account.blockchain.setendpoint(settings.bridges[coinid])
else:
self.set_status(400)
self.write({"error":400, "reason":"Invalid coinid"})
raise tornado.web.Finish
# Set description for content. Make request to the bridge
request = await self.account.blockchain.setdescrforcid(cid=cid, descr=descr,
owneraddr=owneraddr)
if "error" in request.keys():
self.set_status(request["error"])
self.write(request)
raise tornado.web.Finish
self.write({"cid":cid, "description":descr,
"coinid":coinid, "owneraddr": owneraddr}) | [
"async",
"def",
"put",
"(",
"self",
",",
"cid",
")",
":",
"if",
"settings",
".",
"SIGNATURE_VERIFICATION",
":",
"super",
"(",
")",
".",
"verify",
"(",
")",
"try",
":",
"body",
"=",
"json",
".",
"loads",
"(",
"self",
".",
"request",
".",
"body",
")",
"except",
":",
"self",
".",
"set_status",
"(",
"400",
")",
"self",
".",
"write",
"(",
"{",
"\"error\"",
":",
"400",
",",
"\"reason\"",
":",
"\"Unexpected data format. JSON required\"",
"}",
")",
"raise",
"tornado",
".",
"web",
".",
"Finish",
"# Get data from signed message",
"public_key",
"=",
"body",
".",
"get",
"(",
"\"public_key\"",
",",
"None",
")",
"if",
"isinstance",
"(",
"body",
"[",
"\"message\"",
"]",
",",
"str",
")",
":",
"message",
"=",
"json",
".",
"loads",
"(",
"body",
"[",
"\"message\"",
"]",
")",
"elif",
"isinstance",
"(",
"body",
"[",
"\"message\"",
"]",
",",
"dict",
")",
":",
"message",
"=",
"body",
"[",
"\"message\"",
"]",
"descr",
"=",
"message",
".",
"get",
"(",
"\"description\"",
")",
"coinid",
"=",
"message",
".",
"get",
"(",
"\"coinid\"",
")",
"if",
"not",
"coinid",
"in",
"settings",
".",
"bridges",
".",
"keys",
"(",
")",
":",
"self",
".",
"set_status",
"(",
"400",
")",
"self",
".",
"write",
"(",
"{",
"\"error\"",
":",
"400",
",",
"\"reason\"",
":",
"\"Unknown coin id\"",
"}",
")",
"raise",
"tornado",
".",
"web",
".",
"Finish",
"# Check if all required data exists",
"if",
"not",
"all",
"(",
"[",
"public_key",
",",
"descr",
",",
"coinid",
"]",
")",
":",
"self",
".",
"set_status",
"(",
"400",
")",
"self",
".",
"write",
"(",
"{",
"\"error\"",
":",
"400",
",",
"\"reason\"",
":",
"\"Missed required fields\"",
"}",
")",
"raise",
"tornado",
".",
"web",
".",
"Finish",
"owneraddr",
"=",
"self",
".",
"account",
".",
"validator",
"[",
"coinid",
"]",
"(",
"public_key",
")",
"# Get content owner",
"response",
"=",
"await",
"self",
".",
"account",
".",
"blockchain",
".",
"ownerbycid",
"(",
"cid",
"=",
"cid",
")",
"if",
"isinstance",
"(",
"response",
",",
"dict",
")",
":",
"if",
"\"error\"",
"in",
"response",
".",
"keys",
"(",
")",
":",
"error_code",
"=",
"response",
"[",
"\"error\"",
"]",
"self",
".",
"set_status",
"(",
"error_code",
")",
"self",
".",
"write",
"(",
"{",
"\"error\"",
":",
"error_code",
",",
"\"reason\"",
":",
"response",
"[",
"\"error\"",
"]",
"}",
")",
"raise",
"tornado",
".",
"web",
".",
"Finish",
"# Check if current content belongs to current user",
"if",
"response",
"!=",
"owneraddr",
":",
"self",
".",
"set_status",
"(",
"403",
")",
"self",
".",
"write",
"(",
"{",
"\"error\"",
":",
"403",
",",
"\"reason\"",
":",
"\"Owner does not match.\"",
"}",
")",
"raise",
"tornado",
".",
"web",
".",
"Finish",
"# Set fee",
"fee",
"=",
"await",
"billing",
".",
"update_description_fee",
"(",
"owneraddr",
"=",
"owneraddr",
",",
"cid",
"=",
"cid",
",",
"description",
"=",
"descr",
")",
"# Set bridge url",
"if",
"coinid",
"in",
"settings",
".",
"bridges",
".",
"keys",
"(",
")",
":",
"self",
".",
"account",
".",
"blockchain",
".",
"setendpoint",
"(",
"settings",
".",
"bridges",
"[",
"coinid",
"]",
")",
"else",
":",
"self",
".",
"set_status",
"(",
"400",
")",
"self",
".",
"write",
"(",
"{",
"\"error\"",
":",
"400",
",",
"\"reason\"",
":",
"\"Invalid coinid\"",
"}",
")",
"raise",
"tornado",
".",
"web",
".",
"Finish",
"# Set description for content. Make request to the bridge",
"request",
"=",
"await",
"self",
".",
"account",
".",
"blockchain",
".",
"setdescrforcid",
"(",
"cid",
"=",
"cid",
",",
"descr",
"=",
"descr",
",",
"owneraddr",
"=",
"owneraddr",
")",
"if",
"\"error\"",
"in",
"request",
".",
"keys",
"(",
")",
":",
"self",
".",
"set_status",
"(",
"request",
"[",
"\"error\"",
"]",
")",
"self",
".",
"write",
"(",
"request",
")",
"raise",
"tornado",
".",
"web",
".",
"Finish",
"self",
".",
"write",
"(",
"{",
"\"cid\"",
":",
"cid",
",",
"\"description\"",
":",
"descr",
",",
"\"coinid\"",
":",
"coinid",
",",
"\"owneraddr\"",
":",
"owneraddr",
"}",
")"
] | 27.536082 | 18.773196 |
def alter_columns(op, name, *columns, **kwargs):
"""Alter columns from a table.
Parameters
----------
name : str
The name of the table.
*columns
The new columns to have.
selection_string : str, optional
The string to use in the selection. If not provided, it will select all
of the new columns from the old table.
Notes
-----
The columns are passed explicitly because this should only be used in a
downgrade where ``zipline.assets.asset_db_schema`` could change.
"""
selection_string = kwargs.pop('selection_string', None)
if kwargs:
raise TypeError(
'alter_columns received extra arguments: %r' % sorted(kwargs),
)
if selection_string is None:
selection_string = ', '.join(column.name for column in columns)
tmp_name = '_alter_columns_' + name
op.rename_table(name, tmp_name)
for column in columns:
# Clear any indices that already exist on this table, otherwise we will
# fail to create the table because the indices will already be present.
# When we create the table below, the indices that we want to preserve
# will just get recreated.
for table in name, tmp_name:
try:
op.drop_index('ix_%s_%s' % (table, column.name))
except sa.exc.OperationalError:
pass
op.create_table(name, *columns)
op.execute(
'insert into %s select %s from %s' % (
name,
selection_string,
tmp_name,
),
)
op.drop_table(tmp_name) | [
"def",
"alter_columns",
"(",
"op",
",",
"name",
",",
"*",
"columns",
",",
"*",
"*",
"kwargs",
")",
":",
"selection_string",
"=",
"kwargs",
".",
"pop",
"(",
"'selection_string'",
",",
"None",
")",
"if",
"kwargs",
":",
"raise",
"TypeError",
"(",
"'alter_columns received extra arguments: %r'",
"%",
"sorted",
"(",
"kwargs",
")",
",",
")",
"if",
"selection_string",
"is",
"None",
":",
"selection_string",
"=",
"', '",
".",
"join",
"(",
"column",
".",
"name",
"for",
"column",
"in",
"columns",
")",
"tmp_name",
"=",
"'_alter_columns_'",
"+",
"name",
"op",
".",
"rename_table",
"(",
"name",
",",
"tmp_name",
")",
"for",
"column",
"in",
"columns",
":",
"# Clear any indices that already exist on this table, otherwise we will",
"# fail to create the table because the indices will already be present.",
"# When we create the table below, the indices that we want to preserve",
"# will just get recreated.",
"for",
"table",
"in",
"name",
",",
"tmp_name",
":",
"try",
":",
"op",
".",
"drop_index",
"(",
"'ix_%s_%s'",
"%",
"(",
"table",
",",
"column",
".",
"name",
")",
")",
"except",
"sa",
".",
"exc",
".",
"OperationalError",
":",
"pass",
"op",
".",
"create_table",
"(",
"name",
",",
"*",
"columns",
")",
"op",
".",
"execute",
"(",
"'insert into %s select %s from %s'",
"%",
"(",
"name",
",",
"selection_string",
",",
"tmp_name",
",",
")",
",",
")",
"op",
".",
"drop_table",
"(",
"tmp_name",
")"
] | 31.918367 | 21.530612 |
def generate_relay_config(project):
"""
Generate Relay Proxy Configuration.
Generate a ld-relay.conf file to quickly spin up a relay proxy.
Right now this is mostly used for integration testing.
:param project: LaunchDarkly project key
"""
ld_api = LaunchDarklyApi(
os.environ.get('LD_API_KEY'),
project_key=project
)
config = ConfigGenerator()
envs = ld_api.get_environments(project)
config.generate_relay_config(envs) | [
"def",
"generate_relay_config",
"(",
"project",
")",
":",
"ld_api",
"=",
"LaunchDarklyApi",
"(",
"os",
".",
"environ",
".",
"get",
"(",
"'LD_API_KEY'",
")",
",",
"project_key",
"=",
"project",
")",
"config",
"=",
"ConfigGenerator",
"(",
")",
"envs",
"=",
"ld_api",
".",
"get_environments",
"(",
"project",
")",
"config",
".",
"generate_relay_config",
"(",
"envs",
")"
] | 27.411765 | 14.823529 |
def lr(self, lis, op):
"""performs this operation on a list from *left to right*
op must take 2 args
a,b,c => op(op(a, b), c)"""
it = iter(lis)
res = trans(it.next())
for e in it:
e = trans(e)
res = op(res, e)
return res | [
"def",
"lr",
"(",
"self",
",",
"lis",
",",
"op",
")",
":",
"it",
"=",
"iter",
"(",
"lis",
")",
"res",
"=",
"trans",
"(",
"it",
".",
"next",
"(",
")",
")",
"for",
"e",
"in",
"it",
":",
"e",
"=",
"trans",
"(",
"e",
")",
"res",
"=",
"op",
"(",
"res",
",",
"e",
")",
"return",
"res"
] | 29.2 | 12.9 |
def s2a(s):
"""
convert 6 element "s" list to 3,3 a matrix (see Tauxe 1998)
"""
a = np.zeros((3, 3,), 'f') # make the a matrix
for i in range(3):
a[i][i] = s[i]
a[0][1], a[1][0] = s[3], s[3]
a[1][2], a[2][1] = s[4], s[4]
a[0][2], a[2][0] = s[5], s[5]
return a | [
"def",
"s2a",
"(",
"s",
")",
":",
"a",
"=",
"np",
".",
"zeros",
"(",
"(",
"3",
",",
"3",
",",
")",
",",
"'f'",
")",
"# make the a matrix",
"for",
"i",
"in",
"range",
"(",
"3",
")",
":",
"a",
"[",
"i",
"]",
"[",
"i",
"]",
"=",
"s",
"[",
"i",
"]",
"a",
"[",
"0",
"]",
"[",
"1",
"]",
",",
"a",
"[",
"1",
"]",
"[",
"0",
"]",
"=",
"s",
"[",
"3",
"]",
",",
"s",
"[",
"3",
"]",
"a",
"[",
"1",
"]",
"[",
"2",
"]",
",",
"a",
"[",
"2",
"]",
"[",
"1",
"]",
"=",
"s",
"[",
"4",
"]",
",",
"s",
"[",
"4",
"]",
"a",
"[",
"0",
"]",
"[",
"2",
"]",
",",
"a",
"[",
"2",
"]",
"[",
"0",
"]",
"=",
"s",
"[",
"5",
"]",
",",
"s",
"[",
"5",
"]",
"return",
"a"
] | 26.818182 | 13.545455 |
def multipart_listuploads(self, bucket):
"""List objects in a bucket.
:param bucket: A :class:`invenio_files_rest.models.Bucket` instance.
:returns: The Flask response.
"""
return self.make_response(
data=MultipartObject.query_by_bucket(bucket).limit(1000).all(),
context={
'class': MultipartObject,
'bucket': bucket,
'many': True,
}
) | [
"def",
"multipart_listuploads",
"(",
"self",
",",
"bucket",
")",
":",
"return",
"self",
".",
"make_response",
"(",
"data",
"=",
"MultipartObject",
".",
"query_by_bucket",
"(",
"bucket",
")",
".",
"limit",
"(",
"1000",
")",
".",
"all",
"(",
")",
",",
"context",
"=",
"{",
"'class'",
":",
"MultipartObject",
",",
"'bucket'",
":",
"bucket",
",",
"'many'",
":",
"True",
",",
"}",
")"
] | 32.5 | 15.428571 |
def parseUri(stream, uri=None):
"""Read an XML document from a URI, and return a :mod:`lxml.etree`
document."""
return etree.parse(stream, parser=_get_xmlparser(), base_url=uri) | [
"def",
"parseUri",
"(",
"stream",
",",
"uri",
"=",
"None",
")",
":",
"return",
"etree",
".",
"parse",
"(",
"stream",
",",
"parser",
"=",
"_get_xmlparser",
"(",
")",
",",
"base_url",
"=",
"uri",
")"
] | 46.5 | 9.5 |
def _convert_choices(self, choices):
"""Auto create display values then call super method"""
final_choices = []
for choice in choices:
if isinstance(choice, ChoiceEntry):
final_choices.append(choice)
continue
original_choice = choice
choice = list(choice)
length = len(choice)
assert 2 <= length <= 4, 'Invalid number of entries in %s' % (original_choice,)
final_choice = []
# do we have attributes?
if length > 2 and isinstance(choice[-1], Mapping):
final_choice.append(choice.pop())
elif length == 4:
attributes = choice.pop()
assert attributes is None or isinstance(attributes, Mapping), 'Last argument must be a dict-like object in %s' % (original_choice,)
if attributes:
final_choice.append(attributes)
# the constant
final_choice.insert(0, choice.pop(0))
# the db value
final_choice.insert(1, choice.pop(0))
if len(choice):
# we were given a display value
final_choice.insert(2, choice.pop(0))
else:
# no display value, we compute it from the constant
final_choice.insert(2, self.display_transform(final_choice[0]))
final_choices.append(final_choice)
return super(AutoDisplayChoices, self)._convert_choices(final_choices) | [
"def",
"_convert_choices",
"(",
"self",
",",
"choices",
")",
":",
"final_choices",
"=",
"[",
"]",
"for",
"choice",
"in",
"choices",
":",
"if",
"isinstance",
"(",
"choice",
",",
"ChoiceEntry",
")",
":",
"final_choices",
".",
"append",
"(",
"choice",
")",
"continue",
"original_choice",
"=",
"choice",
"choice",
"=",
"list",
"(",
"choice",
")",
"length",
"=",
"len",
"(",
"choice",
")",
"assert",
"2",
"<=",
"length",
"<=",
"4",
",",
"'Invalid number of entries in %s'",
"%",
"(",
"original_choice",
",",
")",
"final_choice",
"=",
"[",
"]",
"# do we have attributes?",
"if",
"length",
">",
"2",
"and",
"isinstance",
"(",
"choice",
"[",
"-",
"1",
"]",
",",
"Mapping",
")",
":",
"final_choice",
".",
"append",
"(",
"choice",
".",
"pop",
"(",
")",
")",
"elif",
"length",
"==",
"4",
":",
"attributes",
"=",
"choice",
".",
"pop",
"(",
")",
"assert",
"attributes",
"is",
"None",
"or",
"isinstance",
"(",
"attributes",
",",
"Mapping",
")",
",",
"'Last argument must be a dict-like object in %s'",
"%",
"(",
"original_choice",
",",
")",
"if",
"attributes",
":",
"final_choice",
".",
"append",
"(",
"attributes",
")",
"# the constant",
"final_choice",
".",
"insert",
"(",
"0",
",",
"choice",
".",
"pop",
"(",
"0",
")",
")",
"# the db value",
"final_choice",
".",
"insert",
"(",
"1",
",",
"choice",
".",
"pop",
"(",
"0",
")",
")",
"if",
"len",
"(",
"choice",
")",
":",
"# we were given a display value",
"final_choice",
".",
"insert",
"(",
"2",
",",
"choice",
".",
"pop",
"(",
"0",
")",
")",
"else",
":",
"# no display value, we compute it from the constant",
"final_choice",
".",
"insert",
"(",
"2",
",",
"self",
".",
"display_transform",
"(",
"final_choice",
"[",
"0",
"]",
")",
")",
"final_choices",
".",
"append",
"(",
"final_choice",
")",
"return",
"super",
"(",
"AutoDisplayChoices",
",",
"self",
")",
".",
"_convert_choices",
"(",
"final_choices",
")"
] | 34.883721 | 22.395349 |
def sync_code(self):
"""Sync in code files and the meta file, avoiding syncing the larger files"""
from ambry.orm.file import File
from ambry.bundle.files import BuildSourceFile
self.dstate = self.STATES.BUILDING
synced = 0
for fc in [File.BSFILE.BUILD, File.BSFILE.META, File.BSFILE.LIB, File.BSFILE.TEST, File.BSFILE.DOC]:
bsf = self.build_source_files.file(fc)
if bsf.fs_is_newer:
self.log('Syncing {}'.format(bsf.file_name))
bsf.sync(BuildSourceFile.SYNC_DIR.FILE_TO_RECORD)
synced += 1
# Only the metadata needs to be driven to the objects, since the other files are used as code,
# directly from the file record.
self.build_source_files.file(File.BSFILE.META).record_to_objects()
return synced | [
"def",
"sync_code",
"(",
"self",
")",
":",
"from",
"ambry",
".",
"orm",
".",
"file",
"import",
"File",
"from",
"ambry",
".",
"bundle",
".",
"files",
"import",
"BuildSourceFile",
"self",
".",
"dstate",
"=",
"self",
".",
"STATES",
".",
"BUILDING",
"synced",
"=",
"0",
"for",
"fc",
"in",
"[",
"File",
".",
"BSFILE",
".",
"BUILD",
",",
"File",
".",
"BSFILE",
".",
"META",
",",
"File",
".",
"BSFILE",
".",
"LIB",
",",
"File",
".",
"BSFILE",
".",
"TEST",
",",
"File",
".",
"BSFILE",
".",
"DOC",
"]",
":",
"bsf",
"=",
"self",
".",
"build_source_files",
".",
"file",
"(",
"fc",
")",
"if",
"bsf",
".",
"fs_is_newer",
":",
"self",
".",
"log",
"(",
"'Syncing {}'",
".",
"format",
"(",
"bsf",
".",
"file_name",
")",
")",
"bsf",
".",
"sync",
"(",
"BuildSourceFile",
".",
"SYNC_DIR",
".",
"FILE_TO_RECORD",
")",
"synced",
"+=",
"1",
"# Only the metadata needs to be driven to the objects, since the other files are used as code,",
"# directly from the file record.",
"self",
".",
"build_source_files",
".",
"file",
"(",
"File",
".",
"BSFILE",
".",
"META",
")",
".",
"record_to_objects",
"(",
")",
"return",
"synced"
] | 39.809524 | 24.714286 |
def decode_tx_packet(packet: str) -> dict:
"""Break packet down into primitives, and do basic interpretation.
>>> decode_packet('10;Kaku;ID=41;SWITCH=1;CMD=ON;') == {
... 'node': 'gateway',
... 'protocol': 'kaku',
... 'id': '000041',
... 'switch': '1',
... 'command': 'on',
... }
True
"""
node_id, protocol, attrs = packet.split(DELIM, 2)
data = cast(Dict[str, Any], {
'node': PacketHeader(node_id).name,
})
data['protocol'] = protocol.lower()
for i, attr in enumerate(filter(None, attrs.strip(DELIM).split(DELIM))):
if i == 0:
data['id'] = attr
if i == 1:
data['switch'] = attr
if i == 2:
data['command'] = attr
# correct KaKu device address
if data.get('protocol', '') == 'kaku' and len(data['id']) != 6:
data['id'] = '0000' + data['id']
return data | [
"def",
"decode_tx_packet",
"(",
"packet",
":",
"str",
")",
"->",
"dict",
":",
"node_id",
",",
"protocol",
",",
"attrs",
"=",
"packet",
".",
"split",
"(",
"DELIM",
",",
"2",
")",
"data",
"=",
"cast",
"(",
"Dict",
"[",
"str",
",",
"Any",
"]",
",",
"{",
"'node'",
":",
"PacketHeader",
"(",
"node_id",
")",
".",
"name",
",",
"}",
")",
"data",
"[",
"'protocol'",
"]",
"=",
"protocol",
".",
"lower",
"(",
")",
"for",
"i",
",",
"attr",
"in",
"enumerate",
"(",
"filter",
"(",
"None",
",",
"attrs",
".",
"strip",
"(",
"DELIM",
")",
".",
"split",
"(",
"DELIM",
")",
")",
")",
":",
"if",
"i",
"==",
"0",
":",
"data",
"[",
"'id'",
"]",
"=",
"attr",
"if",
"i",
"==",
"1",
":",
"data",
"[",
"'switch'",
"]",
"=",
"attr",
"if",
"i",
"==",
"2",
":",
"data",
"[",
"'command'",
"]",
"=",
"attr",
"# correct KaKu device address",
"if",
"data",
".",
"get",
"(",
"'protocol'",
",",
"''",
")",
"==",
"'kaku'",
"and",
"len",
"(",
"data",
"[",
"'id'",
"]",
")",
"!=",
"6",
":",
"data",
"[",
"'id'",
"]",
"=",
"'0000'",
"+",
"data",
"[",
"'id'",
"]",
"return",
"data"
] | 27.060606 | 18.969697 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.