text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def insertCallSet(self, callSet):
"""
Inserts a the specified callSet into this repository.
"""
try:
models.Callset.create(
id=callSet.getId(),
name=callSet.getLocalId(),
variantsetid=callSet.getParentContainer().getId(),
biosampleid=callSet.getBiosampleId(),
attributes=json.dumps(callSet.getAttributes()))
except Exception as e:
raise exceptions.RepoManagerException(e) | [
"def",
"insertCallSet",
"(",
"self",
",",
"callSet",
")",
":",
"try",
":",
"models",
".",
"Callset",
".",
"create",
"(",
"id",
"=",
"callSet",
".",
"getId",
"(",
")",
",",
"name",
"=",
"callSet",
".",
"getLocalId",
"(",
")",
",",
"variantsetid",
"=",
"callSet",
".",
"getParentContainer",
"(",
")",
".",
"getId",
"(",
")",
",",
"biosampleid",
"=",
"callSet",
".",
"getBiosampleId",
"(",
")",
",",
"attributes",
"=",
"json",
".",
"dumps",
"(",
"callSet",
".",
"getAttributes",
"(",
")",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"exceptions",
".",
"RepoManagerException",
"(",
"e",
")"
] | 38.692308 | 11.769231 |
def update_section(self, section_id, name, sis_section_id):
"""
Update a canvas section with the given section id.
https://canvas.instructure.com/doc/api/sections.html#method.sections.update
"""
url = SECTIONS_API.format(section_id)
body = {"course_section": {}}
if name:
body["course_section"]["name"] = name
if sis_section_id:
body["course_section"]["sis_section_id"] = sis_section_id
return CanvasSection(data=self._put_resource(url, body)) | [
"def",
"update_section",
"(",
"self",
",",
"section_id",
",",
"name",
",",
"sis_section_id",
")",
":",
"url",
"=",
"SECTIONS_API",
".",
"format",
"(",
"section_id",
")",
"body",
"=",
"{",
"\"course_section\"",
":",
"{",
"}",
"}",
"if",
"name",
":",
"body",
"[",
"\"course_section\"",
"]",
"[",
"\"name\"",
"]",
"=",
"name",
"if",
"sis_section_id",
":",
"body",
"[",
"\"course_section\"",
"]",
"[",
"\"sis_section_id\"",
"]",
"=",
"sis_section_id",
"return",
"CanvasSection",
"(",
"data",
"=",
"self",
".",
"_put_resource",
"(",
"url",
",",
"body",
")",
")"
] | 33 | 21.75 |
def xor(s, pad):
'''XOR a given string ``s`` with the one-time-pad ``pad``'''
from itertools import cycle
s = bytearray(force_bytes(s, encoding='latin-1'))
pad = bytearray(force_bytes(pad, encoding='latin-1'))
return binary_type(bytearray(x ^ y for x, y in zip(s, cycle(pad)))) | [
"def",
"xor",
"(",
"s",
",",
"pad",
")",
":",
"from",
"itertools",
"import",
"cycle",
"s",
"=",
"bytearray",
"(",
"force_bytes",
"(",
"s",
",",
"encoding",
"=",
"'latin-1'",
")",
")",
"pad",
"=",
"bytearray",
"(",
"force_bytes",
"(",
"pad",
",",
"encoding",
"=",
"'latin-1'",
")",
")",
"return",
"binary_type",
"(",
"bytearray",
"(",
"x",
"^",
"y",
"for",
"x",
",",
"y",
"in",
"zip",
"(",
"s",
",",
"cycle",
"(",
"pad",
")",
")",
")",
")"
] | 48.666667 | 19.666667 |
def analyse(node, env, non_generic=None):
"""Computes the type of the expression given by node.
The type of the node is computed in the context of the context of the
supplied type environment env. Data types can be introduced into the
language simply by having a predefined set of identifiers in the initial
environment. Environment; this way there is no need to change the syntax
or more importantly, the type-checking program when extending the language.
Args:
node: The root of the abstract syntax tree.
env: The type environment is a mapping of expression identifier names
to type assignments.
non_generic: A set of non-generic variables, or None
Returns:
The computed type of the expression.
Raises:
InferenceError: The type of the expression could not be inferred,
PythranTypeError: InferenceError with user friendly message + location
"""
if non_generic is None:
non_generic = set()
# expr
if isinstance(node, gast.Name):
if isinstance(node.ctx, (gast.Store)):
new_type = TypeVariable()
non_generic.add(new_type)
env[node.id] = new_type
return get_type(node.id, env, non_generic)
elif isinstance(node, gast.Num):
if isinstance(node.n, int):
return Integer()
elif isinstance(node.n, float):
return Float()
elif isinstance(node.n, complex):
return Complex()
else:
raise NotImplementedError
elif isinstance(node, gast.Str):
return Str()
elif isinstance(node, gast.Compare):
left_type = analyse(node.left, env, non_generic)
comparators_type = [analyse(comparator, env, non_generic)
for comparator in node.comparators]
ops_type = [analyse(op, env, non_generic)
for op in node.ops]
prev_type = left_type
result_type = TypeVariable()
for op_type, comparator_type in zip(ops_type, comparators_type):
try:
unify(Function([prev_type, comparator_type], result_type),
op_type)
prev_type = comparator_type
except InferenceError:
raise PythranTypeError(
"Invalid comparison, between `{}` and `{}`".format(
prev_type,
comparator_type
),
node)
return result_type
elif isinstance(node, gast.Call):
if is_getattr(node):
self_type = analyse(node.args[0], env, non_generic)
attr_name = node.args[1].s
_, attr_signature = attributes[attr_name]
attr_type = tr(attr_signature)
result_type = TypeVariable()
try:
unify(Function([self_type], result_type), attr_type)
except InferenceError:
if isinstance(prune(attr_type), MultiType):
msg = 'no attribute found, tried:\n{}'.format(attr_type)
else:
msg = 'tried {}'.format(attr_type)
raise PythranTypeError(
"Invalid attribute for getattr call with self"
"of type `{}`, {}".format(self_type, msg), node)
else:
fun_type = analyse(node.func, env, non_generic)
arg_types = [analyse(arg, env, non_generic) for arg in node.args]
result_type = TypeVariable()
try:
unify(Function(arg_types, result_type), fun_type)
except InferenceError:
# recover original type
fun_type = analyse(node.func, env, non_generic)
if isinstance(prune(fun_type), MultiType):
msg = 'no overload found, tried:\n{}'.format(fun_type)
else:
msg = 'tried {}'.format(fun_type)
raise PythranTypeError(
"Invalid argument type for function call to "
"`Callable[[{}], ...]`, {}"
.format(', '.join('{}'.format(at) for at in arg_types),
msg),
node)
return result_type
elif isinstance(node, gast.IfExp):
test_type = analyse(node.test, env, non_generic)
unify(Function([test_type], Bool()),
tr(MODULES['__builtin__']['bool_']))
if is_test_is_none(node.test):
none_id = node.test.left.id
body_env = env.copy()
body_env[none_id] = NoneType
else:
none_id = None
body_env = env
body_type = analyse(node.body, body_env, non_generic)
if none_id:
orelse_env = env.copy()
if is_option_type(env[none_id]):
orelse_env[none_id] = prune(env[none_id]).types[0]
else:
orelse_env[none_id] = TypeVariable()
else:
orelse_env = env
orelse_type = analyse(node.orelse, orelse_env, non_generic)
try:
return merge_unify(body_type, orelse_type)
except InferenceError:
raise PythranTypeError(
"Incompatible types from different branches:"
"`{}` and `{}`".format(
body_type,
orelse_type
),
node
)
elif isinstance(node, gast.UnaryOp):
operand_type = analyse(node.operand, env, non_generic)
op_type = analyse(node.op, env, non_generic)
result_type = TypeVariable()
try:
unify(Function([operand_type], result_type), op_type)
return result_type
except InferenceError:
raise PythranTypeError(
"Invalid operand for `{}`: `{}`".format(
symbol_of[type(node.op)],
operand_type
),
node
)
elif isinstance(node, gast.BinOp):
left_type = analyse(node.left, env, non_generic)
op_type = analyse(node.op, env, non_generic)
right_type = analyse(node.right, env, non_generic)
result_type = TypeVariable()
try:
unify(Function([left_type, right_type], result_type), op_type)
except InferenceError:
raise PythranTypeError(
"Invalid operand for `{}`: `{}` and `{}`".format(
symbol_of[type(node.op)],
left_type,
right_type),
node
)
return result_type
elif isinstance(node, gast.Pow):
return tr(MODULES['numpy']['power'])
elif isinstance(node, gast.Sub):
return tr(MODULES['operator_']['sub'])
elif isinstance(node, (gast.USub, gast.UAdd)):
return tr(MODULES['operator_']['pos'])
elif isinstance(node, (gast.Eq, gast.NotEq, gast.Lt, gast.LtE, gast.Gt,
gast.GtE, gast.Is, gast.IsNot)):
return tr(MODULES['operator_']['eq'])
elif isinstance(node, (gast.In, gast.NotIn)):
contains_sig = tr(MODULES['operator_']['contains'])
contains_sig.types[:-1] = reversed(contains_sig.types[:-1])
return contains_sig
elif isinstance(node, gast.Add):
return tr(MODULES['operator_']['add'])
elif isinstance(node, gast.Mult):
return tr(MODULES['operator_']['mul'])
elif isinstance(node, gast.MatMult):
return tr(MODULES['operator_']['matmul'])
elif isinstance(node, (gast.Div, gast.FloorDiv)):
return tr(MODULES['operator_']['floordiv'])
elif isinstance(node, gast.Mod):
return tr(MODULES['operator_']['mod'])
elif isinstance(node, (gast.LShift, gast.RShift)):
return tr(MODULES['operator_']['lshift'])
elif isinstance(node, (gast.BitXor, gast.BitAnd, gast.BitOr)):
return tr(MODULES['operator_']['lshift'])
elif isinstance(node, gast.List):
new_type = TypeVariable()
for elt in node.elts:
elt_type = analyse(elt, env, non_generic)
try:
unify(new_type, elt_type)
except InferenceError:
raise PythranTypeError(
"Incompatible list element type `{}` and `{}`".format(
new_type, elt_type),
node
)
return List(new_type)
elif isinstance(node, gast.Set):
new_type = TypeVariable()
for elt in node.elts:
elt_type = analyse(elt, env, non_generic)
try:
unify(new_type, elt_type)
except InferenceError:
raise PythranTypeError(
"Incompatible set element type `{}` and `{}`".format(
new_type, elt_type),
node
)
return Set(new_type)
elif isinstance(node, gast.Dict):
new_key_type = TypeVariable()
for key in node.keys:
key_type = analyse(key, env, non_generic)
try:
unify(new_key_type, key_type)
except InferenceError:
raise PythranTypeError(
"Incompatible dict key type `{}` and `{}`".format(
new_key_type, key_type),
node
)
new_value_type = TypeVariable()
for value in node.values:
value_type = analyse(value, env, non_generic)
try:
unify(new_value_type, value_type)
except InferenceError:
raise PythranTypeError(
"Incompatible dict value type `{}` and `{}`".format(
new_value_type, value_type),
node
)
return Dict(new_key_type, new_value_type)
elif isinstance(node, gast.Tuple):
return Tuple([analyse(elt, env, non_generic) for elt in node.elts])
elif isinstance(node, gast.Index):
return analyse(node.value, env, non_generic)
elif isinstance(node, gast.Slice):
def unify_int_or_none(t, name):
try:
unify(t, Integer())
except InferenceError:
try:
unify(t, NoneType)
except InferenceError:
raise PythranTypeError(
"Invalid slice {} type `{}`, expecting int or None"
.format(name, t)
)
if node.lower:
lower_type = analyse(node.lower, env, non_generic)
unify_int_or_none(lower_type, 'lower bound')
else:
lower_type = Integer()
if node.upper:
upper_type = analyse(node.upper, env, non_generic)
unify_int_or_none(upper_type, 'upper bound')
else:
upper_type = Integer()
if node.step:
step_type = analyse(node.step, env, non_generic)
unify_int_or_none(step_type, 'step')
else:
step_type = Integer()
return Slice
elif isinstance(node, gast.ExtSlice):
return [analyse(dim, env, non_generic) for dim in node.dims]
elif isinstance(node, gast.NameConstant):
if node.value is None:
return env['None']
elif isinstance(node, gast.Subscript):
new_type = TypeVariable()
value_type = prune(analyse(node.value, env, non_generic))
try:
slice_type = prune(analyse(node.slice, env, non_generic))
except PythranTypeError as e:
raise PythranTypeError(e.msg, node)
if isinstance(node.slice, gast.ExtSlice):
nbslice = len(node.slice.dims)
dtype = TypeVariable()
try:
unify(Array(dtype, nbslice), clone(value_type))
except InferenceError:
raise PythranTypeError(
"Dimension mismatch when slicing `{}`".format(value_type),
node)
return TypeVariable() # FIXME
elif isinstance(node.slice, gast.Index):
# handle tuples in a special way
isnum = isinstance(node.slice.value, gast.Num)
if isnum and is_tuple_type(value_type):
try:
unify(prune(prune(value_type.types[0]).types[0])
.types[node.slice.value.n],
new_type)
return new_type
except IndexError:
raise PythranTypeError(
"Invalid tuple indexing, "
"out-of-bound index `{}` for type `{}`".format(
node.slice.value.n,
value_type),
node)
try:
unify(tr(MODULES['operator_']['getitem']),
Function([value_type, slice_type], new_type))
except InferenceError:
raise PythranTypeError(
"Invalid subscripting of `{}` by `{}`".format(
value_type,
slice_type),
node)
return new_type
return new_type
elif isinstance(node, gast.Attribute):
from pythran.utils import attr_to_path
obj, path = attr_to_path(node)
if obj.signature is typing.Any:
return TypeVariable()
else:
return tr(obj)
# stmt
elif isinstance(node, gast.Import):
for alias in node.names:
if alias.name not in MODULES:
raise NotImplementedError("unknown module: %s " % alias.name)
if alias.asname is None:
target = alias.name
else:
target = alias.asname
env[target] = tr(MODULES[alias.name])
return env
elif isinstance(node, gast.ImportFrom):
if node.module not in MODULES:
raise NotImplementedError("unknown module: %s" % node.module)
for alias in node.names:
if alias.name not in MODULES[node.module]:
raise NotImplementedError(
"unknown function: %s in %s" % (alias.name, node.module))
if alias.asname is None:
target = alias.name
else:
target = alias.asname
env[target] = tr(MODULES[node.module][alias.name])
return env
elif isinstance(node, gast.FunctionDef):
ftypes = []
for i in range(1 + len(node.args.defaults)):
new_env = env.copy()
new_non_generic = non_generic.copy()
# reset return special variables
new_env.pop('@ret', None)
new_env.pop('@gen', None)
hy = HasYield()
for stmt in node.body:
hy.visit(stmt)
new_env['@gen'] = hy.has_yield
arg_types = []
istop = len(node.args.args) - i
for arg in node.args.args[:istop]:
arg_type = TypeVariable()
new_env[arg.id] = arg_type
new_non_generic.add(arg_type)
arg_types.append(arg_type)
for arg, expr in zip(node.args.args[istop:],
node.args.defaults[-i:]):
arg_type = analyse(expr, new_env, new_non_generic)
new_env[arg.id] = arg_type
analyse_body(node.body, new_env, new_non_generic)
result_type = new_env.get('@ret', NoneType)
if new_env['@gen']:
result_type = Generator(result_type)
ftype = Function(arg_types, result_type)
ftypes.append(ftype)
if len(ftypes) == 1:
ftype = ftypes[0]
env[node.name] = ftype
else:
env[node.name] = MultiType(ftypes)
return env
elif isinstance(node, gast.Module):
analyse_body(node.body, env, non_generic)
return env
elif isinstance(node, (gast.Pass, gast.Break, gast.Continue)):
return env
elif isinstance(node, gast.Expr):
analyse(node.value, env, non_generic)
return env
elif isinstance(node, gast.Delete):
for target in node.targets:
if isinstance(target, gast.Name):
if target.id in env:
del env[target.id]
else:
raise PythranTypeError(
"Invalid del: unbound identifier `{}`".format(
target.id),
node)
else:
analyse(target, env, non_generic)
return env
elif isinstance(node, gast.Print):
if node.dest is not None:
analyse(node.dest, env, non_generic)
for value in node.values:
analyse(value, env, non_generic)
return env
elif isinstance(node, gast.Assign):
defn_type = analyse(node.value, env, non_generic)
for target in node.targets:
target_type = analyse(target, env, non_generic)
try:
unify(target_type, defn_type)
except InferenceError:
raise PythranTypeError(
"Invalid assignment from type `{}` to type `{}`".format(
target_type,
defn_type),
node)
return env
elif isinstance(node, gast.AugAssign):
# FIMXE: not optimal: evaluates type of node.value twice
fake_target = deepcopy(node.target)
fake_target.ctx = gast.Load()
fake_op = gast.BinOp(fake_target, node.op, node.value)
gast.copy_location(fake_op, node)
res_type = analyse(fake_op, env, non_generic)
target_type = analyse(node.target, env, non_generic)
try:
unify(target_type, res_type)
except InferenceError:
raise PythranTypeError(
"Invalid update operand for `{}`: `{}` and `{}`".format(
symbol_of[type(node.op)],
res_type,
target_type
),
node
)
return env
elif isinstance(node, gast.Raise):
return env # TODO
elif isinstance(node, gast.Return):
if env['@gen']:
return env
if node.value is None:
ret_type = NoneType
else:
ret_type = analyse(node.value, env, non_generic)
if '@ret' in env:
try:
ret_type = merge_unify(env['@ret'], ret_type)
except InferenceError:
raise PythranTypeError(
"function may returns with incompatible types "
"`{}` and `{}`".format(env['@ret'], ret_type),
node
)
env['@ret'] = ret_type
return env
elif isinstance(node, gast.Yield):
assert env['@gen']
assert node.value is not None
if node.value is None:
ret_type = NoneType
else:
ret_type = analyse(node.value, env, non_generic)
if '@ret' in env:
try:
ret_type = merge_unify(env['@ret'], ret_type)
except InferenceError:
raise PythranTypeError(
"function may yields incompatible types "
"`{}` and `{}`".format(env['@ret'], ret_type),
node
)
env['@ret'] = ret_type
return env
elif isinstance(node, gast.For):
iter_type = analyse(node.iter, env, non_generic)
target_type = analyse(node.target, env, non_generic)
unify(Collection(TypeVariable(), TypeVariable(), TypeVariable(),
target_type),
iter_type)
analyse_body(node.body, env, non_generic)
analyse_body(node.orelse, env, non_generic)
return env
elif isinstance(node, gast.If):
test_type = analyse(node.test, env, non_generic)
unify(Function([test_type], Bool()),
tr(MODULES['__builtin__']['bool_']))
body_env = env.copy()
body_non_generic = non_generic.copy()
if is_test_is_none(node.test):
none_id = node.test.left.id
body_env[none_id] = NoneType
else:
none_id = None
analyse_body(node.body, body_env, body_non_generic)
orelse_env = env.copy()
orelse_non_generic = non_generic.copy()
if none_id:
if is_option_type(env[none_id]):
orelse_env[none_id] = prune(env[none_id]).types[0]
else:
orelse_env[none_id] = TypeVariable()
analyse_body(node.orelse, orelse_env, orelse_non_generic)
for var in body_env:
if var not in env:
if var in orelse_env:
try:
new_type = merge_unify(body_env[var], orelse_env[var])
except InferenceError:
raise PythranTypeError(
"Incompatible types from different branches for "
"`{}`: `{}` and `{}`".format(
var,
body_env[var],
orelse_env[var]
),
node
)
else:
new_type = body_env[var]
env[var] = new_type
for var in orelse_env:
if var not in env:
# may not be unified by the prev loop if a del occured
if var in body_env:
new_type = merge_unify(orelse_env[var], body_env[var])
else:
new_type = orelse_env[var]
env[var] = new_type
if none_id:
try:
new_type = merge_unify(body_env[none_id], orelse_env[none_id])
except InferenceError:
msg = ("Inconsistent types while merging values of `{}` from "
"conditional branches: `{}` and `{}`")
err = msg.format(none_id,
body_env[none_id],
orelse_env[none_id])
raise PythranTypeError(err, node)
env[none_id] = new_type
return env
elif isinstance(node, gast.While):
test_type = analyse(node.test, env, non_generic)
unify(Function([test_type], Bool()),
tr(MODULES['__builtin__']['bool_']))
analyse_body(node.body, env, non_generic)
analyse_body(node.orelse, env, non_generic)
return env
elif isinstance(node, gast.Try):
analyse_body(node.body, env, non_generic)
for handler in node.handlers:
analyse(handler, env, non_generic)
analyse_body(node.orelse, env, non_generic)
analyse_body(node.finalbody, env, non_generic)
return env
elif isinstance(node, gast.ExceptHandler):
if(node.name):
new_type = ExceptionType
non_generic.add(new_type)
if node.name.id in env:
unify(env[node.name.id], new_type)
else:
env[node.name.id] = new_type
analyse_body(node.body, env, non_generic)
return env
elif isinstance(node, gast.Assert):
if node.msg:
analyse(node.msg, env, non_generic)
analyse(node.test, env, non_generic)
return env
elif isinstance(node, gast.UnaryOp):
operand_type = analyse(node.operand, env, non_generic)
return_type = TypeVariable()
op_type = analyse(node.op, env, non_generic)
unify(Function([operand_type], return_type), op_type)
return return_type
elif isinstance(node, gast.Invert):
return MultiType([Function([Bool()], Integer()),
Function([Integer()], Integer())])
elif isinstance(node, gast.Not):
return tr(MODULES['__builtin__']['bool_'])
elif isinstance(node, gast.BoolOp):
op_type = analyse(node.op, env, non_generic)
value_types = [analyse(value, env, non_generic)
for value in node.values]
for value_type in value_types:
unify(Function([value_type], Bool()),
tr(MODULES['__builtin__']['bool_']))
return_type = TypeVariable()
prev_type = value_types[0]
for value_type in value_types[1:]:
unify(Function([prev_type, value_type], return_type), op_type)
prev_type = value_type
return return_type
elif isinstance(node, (gast.And, gast.Or)):
x_type = TypeVariable()
return MultiType([
Function([x_type, x_type], x_type),
Function([TypeVariable(), TypeVariable()], TypeVariable()),
])
raise RuntimeError("Unhandled syntax node {0}".format(type(node))) | [
"def",
"analyse",
"(",
"node",
",",
"env",
",",
"non_generic",
"=",
"None",
")",
":",
"if",
"non_generic",
"is",
"None",
":",
"non_generic",
"=",
"set",
"(",
")",
"# expr",
"if",
"isinstance",
"(",
"node",
",",
"gast",
".",
"Name",
")",
":",
"if",
"isinstance",
"(",
"node",
".",
"ctx",
",",
"(",
"gast",
".",
"Store",
")",
")",
":",
"new_type",
"=",
"TypeVariable",
"(",
")",
"non_generic",
".",
"add",
"(",
"new_type",
")",
"env",
"[",
"node",
".",
"id",
"]",
"=",
"new_type",
"return",
"get_type",
"(",
"node",
".",
"id",
",",
"env",
",",
"non_generic",
")",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"Num",
")",
":",
"if",
"isinstance",
"(",
"node",
".",
"n",
",",
"int",
")",
":",
"return",
"Integer",
"(",
")",
"elif",
"isinstance",
"(",
"node",
".",
"n",
",",
"float",
")",
":",
"return",
"Float",
"(",
")",
"elif",
"isinstance",
"(",
"node",
".",
"n",
",",
"complex",
")",
":",
"return",
"Complex",
"(",
")",
"else",
":",
"raise",
"NotImplementedError",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"Str",
")",
":",
"return",
"Str",
"(",
")",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"Compare",
")",
":",
"left_type",
"=",
"analyse",
"(",
"node",
".",
"left",
",",
"env",
",",
"non_generic",
")",
"comparators_type",
"=",
"[",
"analyse",
"(",
"comparator",
",",
"env",
",",
"non_generic",
")",
"for",
"comparator",
"in",
"node",
".",
"comparators",
"]",
"ops_type",
"=",
"[",
"analyse",
"(",
"op",
",",
"env",
",",
"non_generic",
")",
"for",
"op",
"in",
"node",
".",
"ops",
"]",
"prev_type",
"=",
"left_type",
"result_type",
"=",
"TypeVariable",
"(",
")",
"for",
"op_type",
",",
"comparator_type",
"in",
"zip",
"(",
"ops_type",
",",
"comparators_type",
")",
":",
"try",
":",
"unify",
"(",
"Function",
"(",
"[",
"prev_type",
",",
"comparator_type",
"]",
",",
"result_type",
")",
",",
"op_type",
")",
"prev_type",
"=",
"comparator_type",
"except",
"InferenceError",
":",
"raise",
"PythranTypeError",
"(",
"\"Invalid comparison, between `{}` and `{}`\"",
".",
"format",
"(",
"prev_type",
",",
"comparator_type",
")",
",",
"node",
")",
"return",
"result_type",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"Call",
")",
":",
"if",
"is_getattr",
"(",
"node",
")",
":",
"self_type",
"=",
"analyse",
"(",
"node",
".",
"args",
"[",
"0",
"]",
",",
"env",
",",
"non_generic",
")",
"attr_name",
"=",
"node",
".",
"args",
"[",
"1",
"]",
".",
"s",
"_",
",",
"attr_signature",
"=",
"attributes",
"[",
"attr_name",
"]",
"attr_type",
"=",
"tr",
"(",
"attr_signature",
")",
"result_type",
"=",
"TypeVariable",
"(",
")",
"try",
":",
"unify",
"(",
"Function",
"(",
"[",
"self_type",
"]",
",",
"result_type",
")",
",",
"attr_type",
")",
"except",
"InferenceError",
":",
"if",
"isinstance",
"(",
"prune",
"(",
"attr_type",
")",
",",
"MultiType",
")",
":",
"msg",
"=",
"'no attribute found, tried:\\n{}'",
".",
"format",
"(",
"attr_type",
")",
"else",
":",
"msg",
"=",
"'tried {}'",
".",
"format",
"(",
"attr_type",
")",
"raise",
"PythranTypeError",
"(",
"\"Invalid attribute for getattr call with self\"",
"\"of type `{}`, {}\"",
".",
"format",
"(",
"self_type",
",",
"msg",
")",
",",
"node",
")",
"else",
":",
"fun_type",
"=",
"analyse",
"(",
"node",
".",
"func",
",",
"env",
",",
"non_generic",
")",
"arg_types",
"=",
"[",
"analyse",
"(",
"arg",
",",
"env",
",",
"non_generic",
")",
"for",
"arg",
"in",
"node",
".",
"args",
"]",
"result_type",
"=",
"TypeVariable",
"(",
")",
"try",
":",
"unify",
"(",
"Function",
"(",
"arg_types",
",",
"result_type",
")",
",",
"fun_type",
")",
"except",
"InferenceError",
":",
"# recover original type",
"fun_type",
"=",
"analyse",
"(",
"node",
".",
"func",
",",
"env",
",",
"non_generic",
")",
"if",
"isinstance",
"(",
"prune",
"(",
"fun_type",
")",
",",
"MultiType",
")",
":",
"msg",
"=",
"'no overload found, tried:\\n{}'",
".",
"format",
"(",
"fun_type",
")",
"else",
":",
"msg",
"=",
"'tried {}'",
".",
"format",
"(",
"fun_type",
")",
"raise",
"PythranTypeError",
"(",
"\"Invalid argument type for function call to \"",
"\"`Callable[[{}], ...]`, {}\"",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"'{}'",
".",
"format",
"(",
"at",
")",
"for",
"at",
"in",
"arg_types",
")",
",",
"msg",
")",
",",
"node",
")",
"return",
"result_type",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"IfExp",
")",
":",
"test_type",
"=",
"analyse",
"(",
"node",
".",
"test",
",",
"env",
",",
"non_generic",
")",
"unify",
"(",
"Function",
"(",
"[",
"test_type",
"]",
",",
"Bool",
"(",
")",
")",
",",
"tr",
"(",
"MODULES",
"[",
"'__builtin__'",
"]",
"[",
"'bool_'",
"]",
")",
")",
"if",
"is_test_is_none",
"(",
"node",
".",
"test",
")",
":",
"none_id",
"=",
"node",
".",
"test",
".",
"left",
".",
"id",
"body_env",
"=",
"env",
".",
"copy",
"(",
")",
"body_env",
"[",
"none_id",
"]",
"=",
"NoneType",
"else",
":",
"none_id",
"=",
"None",
"body_env",
"=",
"env",
"body_type",
"=",
"analyse",
"(",
"node",
".",
"body",
",",
"body_env",
",",
"non_generic",
")",
"if",
"none_id",
":",
"orelse_env",
"=",
"env",
".",
"copy",
"(",
")",
"if",
"is_option_type",
"(",
"env",
"[",
"none_id",
"]",
")",
":",
"orelse_env",
"[",
"none_id",
"]",
"=",
"prune",
"(",
"env",
"[",
"none_id",
"]",
")",
".",
"types",
"[",
"0",
"]",
"else",
":",
"orelse_env",
"[",
"none_id",
"]",
"=",
"TypeVariable",
"(",
")",
"else",
":",
"orelse_env",
"=",
"env",
"orelse_type",
"=",
"analyse",
"(",
"node",
".",
"orelse",
",",
"orelse_env",
",",
"non_generic",
")",
"try",
":",
"return",
"merge_unify",
"(",
"body_type",
",",
"orelse_type",
")",
"except",
"InferenceError",
":",
"raise",
"PythranTypeError",
"(",
"\"Incompatible types from different branches:\"",
"\"`{}` and `{}`\"",
".",
"format",
"(",
"body_type",
",",
"orelse_type",
")",
",",
"node",
")",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"UnaryOp",
")",
":",
"operand_type",
"=",
"analyse",
"(",
"node",
".",
"operand",
",",
"env",
",",
"non_generic",
")",
"op_type",
"=",
"analyse",
"(",
"node",
".",
"op",
",",
"env",
",",
"non_generic",
")",
"result_type",
"=",
"TypeVariable",
"(",
")",
"try",
":",
"unify",
"(",
"Function",
"(",
"[",
"operand_type",
"]",
",",
"result_type",
")",
",",
"op_type",
")",
"return",
"result_type",
"except",
"InferenceError",
":",
"raise",
"PythranTypeError",
"(",
"\"Invalid operand for `{}`: `{}`\"",
".",
"format",
"(",
"symbol_of",
"[",
"type",
"(",
"node",
".",
"op",
")",
"]",
",",
"operand_type",
")",
",",
"node",
")",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"BinOp",
")",
":",
"left_type",
"=",
"analyse",
"(",
"node",
".",
"left",
",",
"env",
",",
"non_generic",
")",
"op_type",
"=",
"analyse",
"(",
"node",
".",
"op",
",",
"env",
",",
"non_generic",
")",
"right_type",
"=",
"analyse",
"(",
"node",
".",
"right",
",",
"env",
",",
"non_generic",
")",
"result_type",
"=",
"TypeVariable",
"(",
")",
"try",
":",
"unify",
"(",
"Function",
"(",
"[",
"left_type",
",",
"right_type",
"]",
",",
"result_type",
")",
",",
"op_type",
")",
"except",
"InferenceError",
":",
"raise",
"PythranTypeError",
"(",
"\"Invalid operand for `{}`: `{}` and `{}`\"",
".",
"format",
"(",
"symbol_of",
"[",
"type",
"(",
"node",
".",
"op",
")",
"]",
",",
"left_type",
",",
"right_type",
")",
",",
"node",
")",
"return",
"result_type",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"Pow",
")",
":",
"return",
"tr",
"(",
"MODULES",
"[",
"'numpy'",
"]",
"[",
"'power'",
"]",
")",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"Sub",
")",
":",
"return",
"tr",
"(",
"MODULES",
"[",
"'operator_'",
"]",
"[",
"'sub'",
"]",
")",
"elif",
"isinstance",
"(",
"node",
",",
"(",
"gast",
".",
"USub",
",",
"gast",
".",
"UAdd",
")",
")",
":",
"return",
"tr",
"(",
"MODULES",
"[",
"'operator_'",
"]",
"[",
"'pos'",
"]",
")",
"elif",
"isinstance",
"(",
"node",
",",
"(",
"gast",
".",
"Eq",
",",
"gast",
".",
"NotEq",
",",
"gast",
".",
"Lt",
",",
"gast",
".",
"LtE",
",",
"gast",
".",
"Gt",
",",
"gast",
".",
"GtE",
",",
"gast",
".",
"Is",
",",
"gast",
".",
"IsNot",
")",
")",
":",
"return",
"tr",
"(",
"MODULES",
"[",
"'operator_'",
"]",
"[",
"'eq'",
"]",
")",
"elif",
"isinstance",
"(",
"node",
",",
"(",
"gast",
".",
"In",
",",
"gast",
".",
"NotIn",
")",
")",
":",
"contains_sig",
"=",
"tr",
"(",
"MODULES",
"[",
"'operator_'",
"]",
"[",
"'contains'",
"]",
")",
"contains_sig",
".",
"types",
"[",
":",
"-",
"1",
"]",
"=",
"reversed",
"(",
"contains_sig",
".",
"types",
"[",
":",
"-",
"1",
"]",
")",
"return",
"contains_sig",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"Add",
")",
":",
"return",
"tr",
"(",
"MODULES",
"[",
"'operator_'",
"]",
"[",
"'add'",
"]",
")",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"Mult",
")",
":",
"return",
"tr",
"(",
"MODULES",
"[",
"'operator_'",
"]",
"[",
"'mul'",
"]",
")",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"MatMult",
")",
":",
"return",
"tr",
"(",
"MODULES",
"[",
"'operator_'",
"]",
"[",
"'matmul'",
"]",
")",
"elif",
"isinstance",
"(",
"node",
",",
"(",
"gast",
".",
"Div",
",",
"gast",
".",
"FloorDiv",
")",
")",
":",
"return",
"tr",
"(",
"MODULES",
"[",
"'operator_'",
"]",
"[",
"'floordiv'",
"]",
")",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"Mod",
")",
":",
"return",
"tr",
"(",
"MODULES",
"[",
"'operator_'",
"]",
"[",
"'mod'",
"]",
")",
"elif",
"isinstance",
"(",
"node",
",",
"(",
"gast",
".",
"LShift",
",",
"gast",
".",
"RShift",
")",
")",
":",
"return",
"tr",
"(",
"MODULES",
"[",
"'operator_'",
"]",
"[",
"'lshift'",
"]",
")",
"elif",
"isinstance",
"(",
"node",
",",
"(",
"gast",
".",
"BitXor",
",",
"gast",
".",
"BitAnd",
",",
"gast",
".",
"BitOr",
")",
")",
":",
"return",
"tr",
"(",
"MODULES",
"[",
"'operator_'",
"]",
"[",
"'lshift'",
"]",
")",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"List",
")",
":",
"new_type",
"=",
"TypeVariable",
"(",
")",
"for",
"elt",
"in",
"node",
".",
"elts",
":",
"elt_type",
"=",
"analyse",
"(",
"elt",
",",
"env",
",",
"non_generic",
")",
"try",
":",
"unify",
"(",
"new_type",
",",
"elt_type",
")",
"except",
"InferenceError",
":",
"raise",
"PythranTypeError",
"(",
"\"Incompatible list element type `{}` and `{}`\"",
".",
"format",
"(",
"new_type",
",",
"elt_type",
")",
",",
"node",
")",
"return",
"List",
"(",
"new_type",
")",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"Set",
")",
":",
"new_type",
"=",
"TypeVariable",
"(",
")",
"for",
"elt",
"in",
"node",
".",
"elts",
":",
"elt_type",
"=",
"analyse",
"(",
"elt",
",",
"env",
",",
"non_generic",
")",
"try",
":",
"unify",
"(",
"new_type",
",",
"elt_type",
")",
"except",
"InferenceError",
":",
"raise",
"PythranTypeError",
"(",
"\"Incompatible set element type `{}` and `{}`\"",
".",
"format",
"(",
"new_type",
",",
"elt_type",
")",
",",
"node",
")",
"return",
"Set",
"(",
"new_type",
")",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"Dict",
")",
":",
"new_key_type",
"=",
"TypeVariable",
"(",
")",
"for",
"key",
"in",
"node",
".",
"keys",
":",
"key_type",
"=",
"analyse",
"(",
"key",
",",
"env",
",",
"non_generic",
")",
"try",
":",
"unify",
"(",
"new_key_type",
",",
"key_type",
")",
"except",
"InferenceError",
":",
"raise",
"PythranTypeError",
"(",
"\"Incompatible dict key type `{}` and `{}`\"",
".",
"format",
"(",
"new_key_type",
",",
"key_type",
")",
",",
"node",
")",
"new_value_type",
"=",
"TypeVariable",
"(",
")",
"for",
"value",
"in",
"node",
".",
"values",
":",
"value_type",
"=",
"analyse",
"(",
"value",
",",
"env",
",",
"non_generic",
")",
"try",
":",
"unify",
"(",
"new_value_type",
",",
"value_type",
")",
"except",
"InferenceError",
":",
"raise",
"PythranTypeError",
"(",
"\"Incompatible dict value type `{}` and `{}`\"",
".",
"format",
"(",
"new_value_type",
",",
"value_type",
")",
",",
"node",
")",
"return",
"Dict",
"(",
"new_key_type",
",",
"new_value_type",
")",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"Tuple",
")",
":",
"return",
"Tuple",
"(",
"[",
"analyse",
"(",
"elt",
",",
"env",
",",
"non_generic",
")",
"for",
"elt",
"in",
"node",
".",
"elts",
"]",
")",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"Index",
")",
":",
"return",
"analyse",
"(",
"node",
".",
"value",
",",
"env",
",",
"non_generic",
")",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"Slice",
")",
":",
"def",
"unify_int_or_none",
"(",
"t",
",",
"name",
")",
":",
"try",
":",
"unify",
"(",
"t",
",",
"Integer",
"(",
")",
")",
"except",
"InferenceError",
":",
"try",
":",
"unify",
"(",
"t",
",",
"NoneType",
")",
"except",
"InferenceError",
":",
"raise",
"PythranTypeError",
"(",
"\"Invalid slice {} type `{}`, expecting int or None\"",
".",
"format",
"(",
"name",
",",
"t",
")",
")",
"if",
"node",
".",
"lower",
":",
"lower_type",
"=",
"analyse",
"(",
"node",
".",
"lower",
",",
"env",
",",
"non_generic",
")",
"unify_int_or_none",
"(",
"lower_type",
",",
"'lower bound'",
")",
"else",
":",
"lower_type",
"=",
"Integer",
"(",
")",
"if",
"node",
".",
"upper",
":",
"upper_type",
"=",
"analyse",
"(",
"node",
".",
"upper",
",",
"env",
",",
"non_generic",
")",
"unify_int_or_none",
"(",
"upper_type",
",",
"'upper bound'",
")",
"else",
":",
"upper_type",
"=",
"Integer",
"(",
")",
"if",
"node",
".",
"step",
":",
"step_type",
"=",
"analyse",
"(",
"node",
".",
"step",
",",
"env",
",",
"non_generic",
")",
"unify_int_or_none",
"(",
"step_type",
",",
"'step'",
")",
"else",
":",
"step_type",
"=",
"Integer",
"(",
")",
"return",
"Slice",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"ExtSlice",
")",
":",
"return",
"[",
"analyse",
"(",
"dim",
",",
"env",
",",
"non_generic",
")",
"for",
"dim",
"in",
"node",
".",
"dims",
"]",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"NameConstant",
")",
":",
"if",
"node",
".",
"value",
"is",
"None",
":",
"return",
"env",
"[",
"'None'",
"]",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"Subscript",
")",
":",
"new_type",
"=",
"TypeVariable",
"(",
")",
"value_type",
"=",
"prune",
"(",
"analyse",
"(",
"node",
".",
"value",
",",
"env",
",",
"non_generic",
")",
")",
"try",
":",
"slice_type",
"=",
"prune",
"(",
"analyse",
"(",
"node",
".",
"slice",
",",
"env",
",",
"non_generic",
")",
")",
"except",
"PythranTypeError",
"as",
"e",
":",
"raise",
"PythranTypeError",
"(",
"e",
".",
"msg",
",",
"node",
")",
"if",
"isinstance",
"(",
"node",
".",
"slice",
",",
"gast",
".",
"ExtSlice",
")",
":",
"nbslice",
"=",
"len",
"(",
"node",
".",
"slice",
".",
"dims",
")",
"dtype",
"=",
"TypeVariable",
"(",
")",
"try",
":",
"unify",
"(",
"Array",
"(",
"dtype",
",",
"nbslice",
")",
",",
"clone",
"(",
"value_type",
")",
")",
"except",
"InferenceError",
":",
"raise",
"PythranTypeError",
"(",
"\"Dimension mismatch when slicing `{}`\"",
".",
"format",
"(",
"value_type",
")",
",",
"node",
")",
"return",
"TypeVariable",
"(",
")",
"# FIXME",
"elif",
"isinstance",
"(",
"node",
".",
"slice",
",",
"gast",
".",
"Index",
")",
":",
"# handle tuples in a special way",
"isnum",
"=",
"isinstance",
"(",
"node",
".",
"slice",
".",
"value",
",",
"gast",
".",
"Num",
")",
"if",
"isnum",
"and",
"is_tuple_type",
"(",
"value_type",
")",
":",
"try",
":",
"unify",
"(",
"prune",
"(",
"prune",
"(",
"value_type",
".",
"types",
"[",
"0",
"]",
")",
".",
"types",
"[",
"0",
"]",
")",
".",
"types",
"[",
"node",
".",
"slice",
".",
"value",
".",
"n",
"]",
",",
"new_type",
")",
"return",
"new_type",
"except",
"IndexError",
":",
"raise",
"PythranTypeError",
"(",
"\"Invalid tuple indexing, \"",
"\"out-of-bound index `{}` for type `{}`\"",
".",
"format",
"(",
"node",
".",
"slice",
".",
"value",
".",
"n",
",",
"value_type",
")",
",",
"node",
")",
"try",
":",
"unify",
"(",
"tr",
"(",
"MODULES",
"[",
"'operator_'",
"]",
"[",
"'getitem'",
"]",
")",
",",
"Function",
"(",
"[",
"value_type",
",",
"slice_type",
"]",
",",
"new_type",
")",
")",
"except",
"InferenceError",
":",
"raise",
"PythranTypeError",
"(",
"\"Invalid subscripting of `{}` by `{}`\"",
".",
"format",
"(",
"value_type",
",",
"slice_type",
")",
",",
"node",
")",
"return",
"new_type",
"return",
"new_type",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"Attribute",
")",
":",
"from",
"pythran",
".",
"utils",
"import",
"attr_to_path",
"obj",
",",
"path",
"=",
"attr_to_path",
"(",
"node",
")",
"if",
"obj",
".",
"signature",
"is",
"typing",
".",
"Any",
":",
"return",
"TypeVariable",
"(",
")",
"else",
":",
"return",
"tr",
"(",
"obj",
")",
"# stmt",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"Import",
")",
":",
"for",
"alias",
"in",
"node",
".",
"names",
":",
"if",
"alias",
".",
"name",
"not",
"in",
"MODULES",
":",
"raise",
"NotImplementedError",
"(",
"\"unknown module: %s \"",
"%",
"alias",
".",
"name",
")",
"if",
"alias",
".",
"asname",
"is",
"None",
":",
"target",
"=",
"alias",
".",
"name",
"else",
":",
"target",
"=",
"alias",
".",
"asname",
"env",
"[",
"target",
"]",
"=",
"tr",
"(",
"MODULES",
"[",
"alias",
".",
"name",
"]",
")",
"return",
"env",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"ImportFrom",
")",
":",
"if",
"node",
".",
"module",
"not",
"in",
"MODULES",
":",
"raise",
"NotImplementedError",
"(",
"\"unknown module: %s\"",
"%",
"node",
".",
"module",
")",
"for",
"alias",
"in",
"node",
".",
"names",
":",
"if",
"alias",
".",
"name",
"not",
"in",
"MODULES",
"[",
"node",
".",
"module",
"]",
":",
"raise",
"NotImplementedError",
"(",
"\"unknown function: %s in %s\"",
"%",
"(",
"alias",
".",
"name",
",",
"node",
".",
"module",
")",
")",
"if",
"alias",
".",
"asname",
"is",
"None",
":",
"target",
"=",
"alias",
".",
"name",
"else",
":",
"target",
"=",
"alias",
".",
"asname",
"env",
"[",
"target",
"]",
"=",
"tr",
"(",
"MODULES",
"[",
"node",
".",
"module",
"]",
"[",
"alias",
".",
"name",
"]",
")",
"return",
"env",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"FunctionDef",
")",
":",
"ftypes",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
"+",
"len",
"(",
"node",
".",
"args",
".",
"defaults",
")",
")",
":",
"new_env",
"=",
"env",
".",
"copy",
"(",
")",
"new_non_generic",
"=",
"non_generic",
".",
"copy",
"(",
")",
"# reset return special variables",
"new_env",
".",
"pop",
"(",
"'@ret'",
",",
"None",
")",
"new_env",
".",
"pop",
"(",
"'@gen'",
",",
"None",
")",
"hy",
"=",
"HasYield",
"(",
")",
"for",
"stmt",
"in",
"node",
".",
"body",
":",
"hy",
".",
"visit",
"(",
"stmt",
")",
"new_env",
"[",
"'@gen'",
"]",
"=",
"hy",
".",
"has_yield",
"arg_types",
"=",
"[",
"]",
"istop",
"=",
"len",
"(",
"node",
".",
"args",
".",
"args",
")",
"-",
"i",
"for",
"arg",
"in",
"node",
".",
"args",
".",
"args",
"[",
":",
"istop",
"]",
":",
"arg_type",
"=",
"TypeVariable",
"(",
")",
"new_env",
"[",
"arg",
".",
"id",
"]",
"=",
"arg_type",
"new_non_generic",
".",
"add",
"(",
"arg_type",
")",
"arg_types",
".",
"append",
"(",
"arg_type",
")",
"for",
"arg",
",",
"expr",
"in",
"zip",
"(",
"node",
".",
"args",
".",
"args",
"[",
"istop",
":",
"]",
",",
"node",
".",
"args",
".",
"defaults",
"[",
"-",
"i",
":",
"]",
")",
":",
"arg_type",
"=",
"analyse",
"(",
"expr",
",",
"new_env",
",",
"new_non_generic",
")",
"new_env",
"[",
"arg",
".",
"id",
"]",
"=",
"arg_type",
"analyse_body",
"(",
"node",
".",
"body",
",",
"new_env",
",",
"new_non_generic",
")",
"result_type",
"=",
"new_env",
".",
"get",
"(",
"'@ret'",
",",
"NoneType",
")",
"if",
"new_env",
"[",
"'@gen'",
"]",
":",
"result_type",
"=",
"Generator",
"(",
"result_type",
")",
"ftype",
"=",
"Function",
"(",
"arg_types",
",",
"result_type",
")",
"ftypes",
".",
"append",
"(",
"ftype",
")",
"if",
"len",
"(",
"ftypes",
")",
"==",
"1",
":",
"ftype",
"=",
"ftypes",
"[",
"0",
"]",
"env",
"[",
"node",
".",
"name",
"]",
"=",
"ftype",
"else",
":",
"env",
"[",
"node",
".",
"name",
"]",
"=",
"MultiType",
"(",
"ftypes",
")",
"return",
"env",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"Module",
")",
":",
"analyse_body",
"(",
"node",
".",
"body",
",",
"env",
",",
"non_generic",
")",
"return",
"env",
"elif",
"isinstance",
"(",
"node",
",",
"(",
"gast",
".",
"Pass",
",",
"gast",
".",
"Break",
",",
"gast",
".",
"Continue",
")",
")",
":",
"return",
"env",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"Expr",
")",
":",
"analyse",
"(",
"node",
".",
"value",
",",
"env",
",",
"non_generic",
")",
"return",
"env",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"Delete",
")",
":",
"for",
"target",
"in",
"node",
".",
"targets",
":",
"if",
"isinstance",
"(",
"target",
",",
"gast",
".",
"Name",
")",
":",
"if",
"target",
".",
"id",
"in",
"env",
":",
"del",
"env",
"[",
"target",
".",
"id",
"]",
"else",
":",
"raise",
"PythranTypeError",
"(",
"\"Invalid del: unbound identifier `{}`\"",
".",
"format",
"(",
"target",
".",
"id",
")",
",",
"node",
")",
"else",
":",
"analyse",
"(",
"target",
",",
"env",
",",
"non_generic",
")",
"return",
"env",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"Print",
")",
":",
"if",
"node",
".",
"dest",
"is",
"not",
"None",
":",
"analyse",
"(",
"node",
".",
"dest",
",",
"env",
",",
"non_generic",
")",
"for",
"value",
"in",
"node",
".",
"values",
":",
"analyse",
"(",
"value",
",",
"env",
",",
"non_generic",
")",
"return",
"env",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"Assign",
")",
":",
"defn_type",
"=",
"analyse",
"(",
"node",
".",
"value",
",",
"env",
",",
"non_generic",
")",
"for",
"target",
"in",
"node",
".",
"targets",
":",
"target_type",
"=",
"analyse",
"(",
"target",
",",
"env",
",",
"non_generic",
")",
"try",
":",
"unify",
"(",
"target_type",
",",
"defn_type",
")",
"except",
"InferenceError",
":",
"raise",
"PythranTypeError",
"(",
"\"Invalid assignment from type `{}` to type `{}`\"",
".",
"format",
"(",
"target_type",
",",
"defn_type",
")",
",",
"node",
")",
"return",
"env",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"AugAssign",
")",
":",
"# FIMXE: not optimal: evaluates type of node.value twice",
"fake_target",
"=",
"deepcopy",
"(",
"node",
".",
"target",
")",
"fake_target",
".",
"ctx",
"=",
"gast",
".",
"Load",
"(",
")",
"fake_op",
"=",
"gast",
".",
"BinOp",
"(",
"fake_target",
",",
"node",
".",
"op",
",",
"node",
".",
"value",
")",
"gast",
".",
"copy_location",
"(",
"fake_op",
",",
"node",
")",
"res_type",
"=",
"analyse",
"(",
"fake_op",
",",
"env",
",",
"non_generic",
")",
"target_type",
"=",
"analyse",
"(",
"node",
".",
"target",
",",
"env",
",",
"non_generic",
")",
"try",
":",
"unify",
"(",
"target_type",
",",
"res_type",
")",
"except",
"InferenceError",
":",
"raise",
"PythranTypeError",
"(",
"\"Invalid update operand for `{}`: `{}` and `{}`\"",
".",
"format",
"(",
"symbol_of",
"[",
"type",
"(",
"node",
".",
"op",
")",
"]",
",",
"res_type",
",",
"target_type",
")",
",",
"node",
")",
"return",
"env",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"Raise",
")",
":",
"return",
"env",
"# TODO",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"Return",
")",
":",
"if",
"env",
"[",
"'@gen'",
"]",
":",
"return",
"env",
"if",
"node",
".",
"value",
"is",
"None",
":",
"ret_type",
"=",
"NoneType",
"else",
":",
"ret_type",
"=",
"analyse",
"(",
"node",
".",
"value",
",",
"env",
",",
"non_generic",
")",
"if",
"'@ret'",
"in",
"env",
":",
"try",
":",
"ret_type",
"=",
"merge_unify",
"(",
"env",
"[",
"'@ret'",
"]",
",",
"ret_type",
")",
"except",
"InferenceError",
":",
"raise",
"PythranTypeError",
"(",
"\"function may returns with incompatible types \"",
"\"`{}` and `{}`\"",
".",
"format",
"(",
"env",
"[",
"'@ret'",
"]",
",",
"ret_type",
")",
",",
"node",
")",
"env",
"[",
"'@ret'",
"]",
"=",
"ret_type",
"return",
"env",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"Yield",
")",
":",
"assert",
"env",
"[",
"'@gen'",
"]",
"assert",
"node",
".",
"value",
"is",
"not",
"None",
"if",
"node",
".",
"value",
"is",
"None",
":",
"ret_type",
"=",
"NoneType",
"else",
":",
"ret_type",
"=",
"analyse",
"(",
"node",
".",
"value",
",",
"env",
",",
"non_generic",
")",
"if",
"'@ret'",
"in",
"env",
":",
"try",
":",
"ret_type",
"=",
"merge_unify",
"(",
"env",
"[",
"'@ret'",
"]",
",",
"ret_type",
")",
"except",
"InferenceError",
":",
"raise",
"PythranTypeError",
"(",
"\"function may yields incompatible types \"",
"\"`{}` and `{}`\"",
".",
"format",
"(",
"env",
"[",
"'@ret'",
"]",
",",
"ret_type",
")",
",",
"node",
")",
"env",
"[",
"'@ret'",
"]",
"=",
"ret_type",
"return",
"env",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"For",
")",
":",
"iter_type",
"=",
"analyse",
"(",
"node",
".",
"iter",
",",
"env",
",",
"non_generic",
")",
"target_type",
"=",
"analyse",
"(",
"node",
".",
"target",
",",
"env",
",",
"non_generic",
")",
"unify",
"(",
"Collection",
"(",
"TypeVariable",
"(",
")",
",",
"TypeVariable",
"(",
")",
",",
"TypeVariable",
"(",
")",
",",
"target_type",
")",
",",
"iter_type",
")",
"analyse_body",
"(",
"node",
".",
"body",
",",
"env",
",",
"non_generic",
")",
"analyse_body",
"(",
"node",
".",
"orelse",
",",
"env",
",",
"non_generic",
")",
"return",
"env",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"If",
")",
":",
"test_type",
"=",
"analyse",
"(",
"node",
".",
"test",
",",
"env",
",",
"non_generic",
")",
"unify",
"(",
"Function",
"(",
"[",
"test_type",
"]",
",",
"Bool",
"(",
")",
")",
",",
"tr",
"(",
"MODULES",
"[",
"'__builtin__'",
"]",
"[",
"'bool_'",
"]",
")",
")",
"body_env",
"=",
"env",
".",
"copy",
"(",
")",
"body_non_generic",
"=",
"non_generic",
".",
"copy",
"(",
")",
"if",
"is_test_is_none",
"(",
"node",
".",
"test",
")",
":",
"none_id",
"=",
"node",
".",
"test",
".",
"left",
".",
"id",
"body_env",
"[",
"none_id",
"]",
"=",
"NoneType",
"else",
":",
"none_id",
"=",
"None",
"analyse_body",
"(",
"node",
".",
"body",
",",
"body_env",
",",
"body_non_generic",
")",
"orelse_env",
"=",
"env",
".",
"copy",
"(",
")",
"orelse_non_generic",
"=",
"non_generic",
".",
"copy",
"(",
")",
"if",
"none_id",
":",
"if",
"is_option_type",
"(",
"env",
"[",
"none_id",
"]",
")",
":",
"orelse_env",
"[",
"none_id",
"]",
"=",
"prune",
"(",
"env",
"[",
"none_id",
"]",
")",
".",
"types",
"[",
"0",
"]",
"else",
":",
"orelse_env",
"[",
"none_id",
"]",
"=",
"TypeVariable",
"(",
")",
"analyse_body",
"(",
"node",
".",
"orelse",
",",
"orelse_env",
",",
"orelse_non_generic",
")",
"for",
"var",
"in",
"body_env",
":",
"if",
"var",
"not",
"in",
"env",
":",
"if",
"var",
"in",
"orelse_env",
":",
"try",
":",
"new_type",
"=",
"merge_unify",
"(",
"body_env",
"[",
"var",
"]",
",",
"orelse_env",
"[",
"var",
"]",
")",
"except",
"InferenceError",
":",
"raise",
"PythranTypeError",
"(",
"\"Incompatible types from different branches for \"",
"\"`{}`: `{}` and `{}`\"",
".",
"format",
"(",
"var",
",",
"body_env",
"[",
"var",
"]",
",",
"orelse_env",
"[",
"var",
"]",
")",
",",
"node",
")",
"else",
":",
"new_type",
"=",
"body_env",
"[",
"var",
"]",
"env",
"[",
"var",
"]",
"=",
"new_type",
"for",
"var",
"in",
"orelse_env",
":",
"if",
"var",
"not",
"in",
"env",
":",
"# may not be unified by the prev loop if a del occured",
"if",
"var",
"in",
"body_env",
":",
"new_type",
"=",
"merge_unify",
"(",
"orelse_env",
"[",
"var",
"]",
",",
"body_env",
"[",
"var",
"]",
")",
"else",
":",
"new_type",
"=",
"orelse_env",
"[",
"var",
"]",
"env",
"[",
"var",
"]",
"=",
"new_type",
"if",
"none_id",
":",
"try",
":",
"new_type",
"=",
"merge_unify",
"(",
"body_env",
"[",
"none_id",
"]",
",",
"orelse_env",
"[",
"none_id",
"]",
")",
"except",
"InferenceError",
":",
"msg",
"=",
"(",
"\"Inconsistent types while merging values of `{}` from \"",
"\"conditional branches: `{}` and `{}`\"",
")",
"err",
"=",
"msg",
".",
"format",
"(",
"none_id",
",",
"body_env",
"[",
"none_id",
"]",
",",
"orelse_env",
"[",
"none_id",
"]",
")",
"raise",
"PythranTypeError",
"(",
"err",
",",
"node",
")",
"env",
"[",
"none_id",
"]",
"=",
"new_type",
"return",
"env",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"While",
")",
":",
"test_type",
"=",
"analyse",
"(",
"node",
".",
"test",
",",
"env",
",",
"non_generic",
")",
"unify",
"(",
"Function",
"(",
"[",
"test_type",
"]",
",",
"Bool",
"(",
")",
")",
",",
"tr",
"(",
"MODULES",
"[",
"'__builtin__'",
"]",
"[",
"'bool_'",
"]",
")",
")",
"analyse_body",
"(",
"node",
".",
"body",
",",
"env",
",",
"non_generic",
")",
"analyse_body",
"(",
"node",
".",
"orelse",
",",
"env",
",",
"non_generic",
")",
"return",
"env",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"Try",
")",
":",
"analyse_body",
"(",
"node",
".",
"body",
",",
"env",
",",
"non_generic",
")",
"for",
"handler",
"in",
"node",
".",
"handlers",
":",
"analyse",
"(",
"handler",
",",
"env",
",",
"non_generic",
")",
"analyse_body",
"(",
"node",
".",
"orelse",
",",
"env",
",",
"non_generic",
")",
"analyse_body",
"(",
"node",
".",
"finalbody",
",",
"env",
",",
"non_generic",
")",
"return",
"env",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"ExceptHandler",
")",
":",
"if",
"(",
"node",
".",
"name",
")",
":",
"new_type",
"=",
"ExceptionType",
"non_generic",
".",
"add",
"(",
"new_type",
")",
"if",
"node",
".",
"name",
".",
"id",
"in",
"env",
":",
"unify",
"(",
"env",
"[",
"node",
".",
"name",
".",
"id",
"]",
",",
"new_type",
")",
"else",
":",
"env",
"[",
"node",
".",
"name",
".",
"id",
"]",
"=",
"new_type",
"analyse_body",
"(",
"node",
".",
"body",
",",
"env",
",",
"non_generic",
")",
"return",
"env",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"Assert",
")",
":",
"if",
"node",
".",
"msg",
":",
"analyse",
"(",
"node",
".",
"msg",
",",
"env",
",",
"non_generic",
")",
"analyse",
"(",
"node",
".",
"test",
",",
"env",
",",
"non_generic",
")",
"return",
"env",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"UnaryOp",
")",
":",
"operand_type",
"=",
"analyse",
"(",
"node",
".",
"operand",
",",
"env",
",",
"non_generic",
")",
"return_type",
"=",
"TypeVariable",
"(",
")",
"op_type",
"=",
"analyse",
"(",
"node",
".",
"op",
",",
"env",
",",
"non_generic",
")",
"unify",
"(",
"Function",
"(",
"[",
"operand_type",
"]",
",",
"return_type",
")",
",",
"op_type",
")",
"return",
"return_type",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"Invert",
")",
":",
"return",
"MultiType",
"(",
"[",
"Function",
"(",
"[",
"Bool",
"(",
")",
"]",
",",
"Integer",
"(",
")",
")",
",",
"Function",
"(",
"[",
"Integer",
"(",
")",
"]",
",",
"Integer",
"(",
")",
")",
"]",
")",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"Not",
")",
":",
"return",
"tr",
"(",
"MODULES",
"[",
"'__builtin__'",
"]",
"[",
"'bool_'",
"]",
")",
"elif",
"isinstance",
"(",
"node",
",",
"gast",
".",
"BoolOp",
")",
":",
"op_type",
"=",
"analyse",
"(",
"node",
".",
"op",
",",
"env",
",",
"non_generic",
")",
"value_types",
"=",
"[",
"analyse",
"(",
"value",
",",
"env",
",",
"non_generic",
")",
"for",
"value",
"in",
"node",
".",
"values",
"]",
"for",
"value_type",
"in",
"value_types",
":",
"unify",
"(",
"Function",
"(",
"[",
"value_type",
"]",
",",
"Bool",
"(",
")",
")",
",",
"tr",
"(",
"MODULES",
"[",
"'__builtin__'",
"]",
"[",
"'bool_'",
"]",
")",
")",
"return_type",
"=",
"TypeVariable",
"(",
")",
"prev_type",
"=",
"value_types",
"[",
"0",
"]",
"for",
"value_type",
"in",
"value_types",
"[",
"1",
":",
"]",
":",
"unify",
"(",
"Function",
"(",
"[",
"prev_type",
",",
"value_type",
"]",
",",
"return_type",
")",
",",
"op_type",
")",
"prev_type",
"=",
"value_type",
"return",
"return_type",
"elif",
"isinstance",
"(",
"node",
",",
"(",
"gast",
".",
"And",
",",
"gast",
".",
"Or",
")",
")",
":",
"x_type",
"=",
"TypeVariable",
"(",
")",
"return",
"MultiType",
"(",
"[",
"Function",
"(",
"[",
"x_type",
",",
"x_type",
"]",
",",
"x_type",
")",
",",
"Function",
"(",
"[",
"TypeVariable",
"(",
")",
",",
"TypeVariable",
"(",
")",
"]",
",",
"TypeVariable",
"(",
")",
")",
",",
"]",
")",
"raise",
"RuntimeError",
"(",
"\"Unhandled syntax node {0}\"",
".",
"format",
"(",
"type",
"(",
"node",
")",
")",
")"
] | 37.733025 | 14.825617 |
def transform(self, matrix):
"""Modifies the current transformation matrix (CTM)
by applying :obj:`matrix` as an additional transformation.
The new transformation of user space takes place
after any existing transformation.
:param matrix:
A transformation :class:`Matrix`
to be applied to the user-space axes.
"""
cairo.cairo_transform(self._pointer, matrix._pointer)
self._check_status() | [
"def",
"transform",
"(",
"self",
",",
"matrix",
")",
":",
"cairo",
".",
"cairo_transform",
"(",
"self",
".",
"_pointer",
",",
"matrix",
".",
"_pointer",
")",
"self",
".",
"_check_status",
"(",
")"
] | 35.846154 | 15.384615 |
def write(self, symbol, data, prune_previous_version=True, metadata=None, **kwargs):
"""
Records a write request to be actioned on context exit. Takes exactly the same parameters as the regular
library write call.
"""
if data is not None:
# We only write data if existing data is None or the Timeseries data has changed or metadata has changed
if self.base_ts.data is None or not are_equals(data, self.base_ts.data) or metadata != self.base_ts.metadata:
self._do_write = True
self._write = partial(self._version_store.write, symbol, data, prune_previous_version=prune_previous_version,
metadata=metadata, **kwargs) | [
"def",
"write",
"(",
"self",
",",
"symbol",
",",
"data",
",",
"prune_previous_version",
"=",
"True",
",",
"metadata",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"data",
"is",
"not",
"None",
":",
"# We only write data if existing data is None or the Timeseries data has changed or metadata has changed",
"if",
"self",
".",
"base_ts",
".",
"data",
"is",
"None",
"or",
"not",
"are_equals",
"(",
"data",
",",
"self",
".",
"base_ts",
".",
"data",
")",
"or",
"metadata",
"!=",
"self",
".",
"base_ts",
".",
"metadata",
":",
"self",
".",
"_do_write",
"=",
"True",
"self",
".",
"_write",
"=",
"partial",
"(",
"self",
".",
"_version_store",
".",
"write",
",",
"symbol",
",",
"data",
",",
"prune_previous_version",
"=",
"prune_previous_version",
",",
"metadata",
"=",
"metadata",
",",
"*",
"*",
"kwargs",
")"
] | 65.636364 | 36 |
def get_frequency_grid(times,
samplesperpeak=5,
nyquistfactor=5,
minfreq=None,
maxfreq=None,
returnf0dfnf=False):
'''This calculates a frequency grid for the period finding functions in this
module.
Based on the autofrequency function in astropy.stats.lombscargle.
http://docs.astropy.org/en/stable/_modules/astropy/stats/lombscargle/core.html#LombScargle.autofrequency
Parameters
----------
times : np.array
The times to use to generate the frequency grid over.
samplesperpeak : int
The minimum sample coverage each frequency point in the grid will get.
nyquistfactor : int
The multiplier over the Nyquist rate to use.
minfreq,maxfreq : float or None
If not None, these will be the limits of the frequency grid generated.
returnf0dfnf : bool
If this is True, will return the values of `f0`, `df`, and `Nf`
generated for this grid.
Returns
-------
np.array
A grid of frequencies.
'''
baseline = times.max() - times.min()
nsamples = times.size
df = 1. / baseline / samplesperpeak
if minfreq is not None:
f0 = minfreq
else:
f0 = 0.5 * df
if maxfreq is not None:
Nf = int(np.ceil((maxfreq - f0) / df))
else:
Nf = int(0.5 * samplesperpeak * nyquistfactor * nsamples)
if returnf0dfnf:
return f0, df, Nf, f0 + df * np.arange(Nf)
else:
return f0 + df * np.arange(Nf) | [
"def",
"get_frequency_grid",
"(",
"times",
",",
"samplesperpeak",
"=",
"5",
",",
"nyquistfactor",
"=",
"5",
",",
"minfreq",
"=",
"None",
",",
"maxfreq",
"=",
"None",
",",
"returnf0dfnf",
"=",
"False",
")",
":",
"baseline",
"=",
"times",
".",
"max",
"(",
")",
"-",
"times",
".",
"min",
"(",
")",
"nsamples",
"=",
"times",
".",
"size",
"df",
"=",
"1.",
"/",
"baseline",
"/",
"samplesperpeak",
"if",
"minfreq",
"is",
"not",
"None",
":",
"f0",
"=",
"minfreq",
"else",
":",
"f0",
"=",
"0.5",
"*",
"df",
"if",
"maxfreq",
"is",
"not",
"None",
":",
"Nf",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"(",
"maxfreq",
"-",
"f0",
")",
"/",
"df",
")",
")",
"else",
":",
"Nf",
"=",
"int",
"(",
"0.5",
"*",
"samplesperpeak",
"*",
"nyquistfactor",
"*",
"nsamples",
")",
"if",
"returnf0dfnf",
":",
"return",
"f0",
",",
"df",
",",
"Nf",
",",
"f0",
"+",
"df",
"*",
"np",
".",
"arange",
"(",
"Nf",
")",
"else",
":",
"return",
"f0",
"+",
"df",
"*",
"np",
".",
"arange",
"(",
"Nf",
")"
] | 25.533333 | 25.166667 |
def storage_volumes(self):
"""
:class:`~zhmcclient.StorageVolumeManager`: Access to the
:term:`storage volumes <storage volume>` in this storage group.
"""
# We do here some lazy loading.
if not self._storage_volumes:
self._storage_volumes = StorageVolumeManager(self)
return self._storage_volumes | [
"def",
"storage_volumes",
"(",
"self",
")",
":",
"# We do here some lazy loading.",
"if",
"not",
"self",
".",
"_storage_volumes",
":",
"self",
".",
"_storage_volumes",
"=",
"StorageVolumeManager",
"(",
"self",
")",
"return",
"self",
".",
"_storage_volumes"
] | 39.666667 | 11 |
def get_jid(jid):
'''
Return the information returned when the specified job id was executed
'''
serv = _get_serv(ret=None)
ret = {}
for minion, data in six.iteritems(serv.hgetall('ret:{0}'.format(jid))):
if data:
ret[minion] = salt.utils.json.loads(data)
return ret | [
"def",
"get_jid",
"(",
"jid",
")",
":",
"serv",
"=",
"_get_serv",
"(",
"ret",
"=",
"None",
")",
"ret",
"=",
"{",
"}",
"for",
"minion",
",",
"data",
"in",
"six",
".",
"iteritems",
"(",
"serv",
".",
"hgetall",
"(",
"'ret:{0}'",
".",
"format",
"(",
"jid",
")",
")",
")",
":",
"if",
"data",
":",
"ret",
"[",
"minion",
"]",
"=",
"salt",
".",
"utils",
".",
"json",
".",
"loads",
"(",
"data",
")",
"return",
"ret"
] | 30.5 | 25.9 |
def on_deleted(self, event):
"""
Event Handler when a file is deleted
"""
key = 'filesystem:file_deleted'
data = {
'filepath': event.src_path,
'is_directory': event.is_directory,
'dirpath': os.path.dirname(event.src_path)
}
bmsg = BroadcastMessage(key=key, data=data)
BroadcastManager.broadcast(bmsg) | [
"def",
"on_deleted",
"(",
"self",
",",
"event",
")",
":",
"key",
"=",
"'filesystem:file_deleted'",
"data",
"=",
"{",
"'filepath'",
":",
"event",
".",
"src_path",
",",
"'is_directory'",
":",
"event",
".",
"is_directory",
",",
"'dirpath'",
":",
"os",
".",
"path",
".",
"dirname",
"(",
"event",
".",
"src_path",
")",
"}",
"bmsg",
"=",
"BroadcastMessage",
"(",
"key",
"=",
"key",
",",
"data",
"=",
"data",
")",
"BroadcastManager",
".",
"broadcast",
"(",
"bmsg",
")"
] | 29.923077 | 11.153846 |
def _get_report(self, with_line_nums=True):
"""
Returns a report which includes each distinct error only once, together
with a list of the input lines where the error occurs. The latter will
be omitted if flag is set to False.
Helper for the get_report method.
"""
templ = '{} ← {}' if with_line_nums else '{}'
return '\n'.join([
templ.format(error.string, ','.join(map(str, sorted(set(lines)))))
for error, lines in self.errors.items()]) | [
"def",
"_get_report",
"(",
"self",
",",
"with_line_nums",
"=",
"True",
")",
":",
"templ",
"=",
"'{} ← {}' i",
" w",
"th_line_nums e",
"se '",
"}'",
"return",
"'\\n'",
".",
"join",
"(",
"[",
"templ",
".",
"format",
"(",
"error",
".",
"string",
",",
"','",
".",
"join",
"(",
"map",
"(",
"str",
",",
"sorted",
"(",
"set",
"(",
"lines",
")",
")",
")",
")",
")",
"for",
"error",
",",
"lines",
"in",
"self",
".",
"errors",
".",
"items",
"(",
")",
"]",
")"
] | 34.615385 | 16.615385 |
def CreateLibSymlinks(env, symlinks):
"""Physically creates symlinks. The symlinks argument must be a list in
form [ (link, linktarget), ... ], where link and linktarget are SCons
nodes.
"""
Verbose = False
for link, linktgt in symlinks:
linktgt = link.get_dir().rel_path(linktgt)
link = link.get_path()
if(Verbose):
print("CreateLibSymlinks: preparing to add symlink %r -> %r" % (link, linktgt))
# Delete the (previously created) symlink if exists. Let only symlinks
# to be deleted to prevent accidental deletion of source files...
if env.fs.islink(link):
env.fs.unlink(link)
if(Verbose):
print("CreateLibSymlinks: removed old symlink %r" % link)
# If a file or directory exists with the same name as link, an OSError
# will be thrown, which should be enough, I think.
env.fs.symlink(linktgt, link)
if(Verbose):
print("CreateLibSymlinks: add symlink %r -> %r" % (link, linktgt))
return 0 | [
"def",
"CreateLibSymlinks",
"(",
"env",
",",
"symlinks",
")",
":",
"Verbose",
"=",
"False",
"for",
"link",
",",
"linktgt",
"in",
"symlinks",
":",
"linktgt",
"=",
"link",
".",
"get_dir",
"(",
")",
".",
"rel_path",
"(",
"linktgt",
")",
"link",
"=",
"link",
".",
"get_path",
"(",
")",
"if",
"(",
"Verbose",
")",
":",
"print",
"(",
"\"CreateLibSymlinks: preparing to add symlink %r -> %r\"",
"%",
"(",
"link",
",",
"linktgt",
")",
")",
"# Delete the (previously created) symlink if exists. Let only symlinks",
"# to be deleted to prevent accidental deletion of source files...",
"if",
"env",
".",
"fs",
".",
"islink",
"(",
"link",
")",
":",
"env",
".",
"fs",
".",
"unlink",
"(",
"link",
")",
"if",
"(",
"Verbose",
")",
":",
"print",
"(",
"\"CreateLibSymlinks: removed old symlink %r\"",
"%",
"link",
")",
"# If a file or directory exists with the same name as link, an OSError",
"# will be thrown, which should be enough, I think.",
"env",
".",
"fs",
".",
"symlink",
"(",
"linktgt",
",",
"link",
")",
"if",
"(",
"Verbose",
")",
":",
"print",
"(",
"\"CreateLibSymlinks: add symlink %r -> %r\"",
"%",
"(",
"link",
",",
"linktgt",
")",
")",
"return",
"0"
] | 43.291667 | 21.125 |
def predict_epitopes_from_args(args):
"""
Returns an epitope collection from the given commandline arguments.
Parameters
----------
args : argparse.Namespace
Parsed commandline arguments for Topiary
"""
mhc_model = mhc_binding_predictor_from_args(args)
variants = variant_collection_from_args(args)
gene_expression_dict = rna_gene_expression_dict_from_args(args)
transcript_expression_dict = rna_transcript_expression_dict_from_args(args)
predictor = TopiaryPredictor(
mhc_model=mhc_model,
padding_around_mutation=args.padding_around_mutation,
ic50_cutoff=args.ic50_cutoff,
percentile_cutoff=args.percentile_cutoff,
min_transcript_expression=args.rna_min_transcript_expression,
min_gene_expression=args.rna_min_gene_expression,
only_novel_epitopes=args.only_novel_epitopes,
raise_on_error=not args.skip_variant_errors)
return predictor.predict_from_variants(
variants=variants,
transcript_expression_dict=transcript_expression_dict,
gene_expression_dict=gene_expression_dict) | [
"def",
"predict_epitopes_from_args",
"(",
"args",
")",
":",
"mhc_model",
"=",
"mhc_binding_predictor_from_args",
"(",
"args",
")",
"variants",
"=",
"variant_collection_from_args",
"(",
"args",
")",
"gene_expression_dict",
"=",
"rna_gene_expression_dict_from_args",
"(",
"args",
")",
"transcript_expression_dict",
"=",
"rna_transcript_expression_dict_from_args",
"(",
"args",
")",
"predictor",
"=",
"TopiaryPredictor",
"(",
"mhc_model",
"=",
"mhc_model",
",",
"padding_around_mutation",
"=",
"args",
".",
"padding_around_mutation",
",",
"ic50_cutoff",
"=",
"args",
".",
"ic50_cutoff",
",",
"percentile_cutoff",
"=",
"args",
".",
"percentile_cutoff",
",",
"min_transcript_expression",
"=",
"args",
".",
"rna_min_transcript_expression",
",",
"min_gene_expression",
"=",
"args",
".",
"rna_min_gene_expression",
",",
"only_novel_epitopes",
"=",
"args",
".",
"only_novel_epitopes",
",",
"raise_on_error",
"=",
"not",
"args",
".",
"skip_variant_errors",
")",
"return",
"predictor",
".",
"predict_from_variants",
"(",
"variants",
"=",
"variants",
",",
"transcript_expression_dict",
"=",
"transcript_expression_dict",
",",
"gene_expression_dict",
"=",
"gene_expression_dict",
")"
] | 40.555556 | 16.481481 |
def start_watching(self, cluster, callback):
"""
Initiates the "watching" of a cluster's associated znode.
This is done via kazoo's ChildrenWatch object. When a cluster's
znode's child nodes are updated, a callback is fired and we update
the cluster's `nodes` attribute based on the existing child znodes
and fire a passed-in callback with no arguments once done.
If the cluster's znode does not exist we wait for `NO_NODE_INTERVAL`
seconds before trying again as long as no ChildrenWatch exists for
the given cluster yet and we are not in the process of shutting down.
"""
logger.debug("starting to watch cluster %s", cluster.name)
wait_on_any(self.connected, self.shutdown)
logger.debug("done waiting on (connected, shutdown)")
znode_path = "/".join([self.base_path, cluster.name])
self.stop_events[znode_path] = threading.Event()
def should_stop():
return (
znode_path not in self.stop_events or
self.stop_events[znode_path].is_set() or
self.shutdown.is_set()
)
while not should_stop():
try:
if self.client.exists(znode_path):
break
except exceptions.ConnectionClosedError:
break
wait_on_any(
self.stop_events[znode_path], self.shutdown,
timeout=NO_NODE_INTERVAL
)
logger.debug("setting up ChildrenWatch for %s", znode_path)
@self.client.ChildrenWatch(znode_path)
def watch(children):
if should_stop():
return False
logger.debug("znode children changed! (%s)", znode_path)
new_nodes = []
for child in children:
child_path = "/".join([znode_path, child])
try:
new_nodes.append(
Node.deserialize(self.client.get(child_path)[0])
)
except ValueError:
logger.exception("Invalid node at path '%s'", child)
continue
cluster.nodes = new_nodes
callback() | [
"def",
"start_watching",
"(",
"self",
",",
"cluster",
",",
"callback",
")",
":",
"logger",
".",
"debug",
"(",
"\"starting to watch cluster %s\"",
",",
"cluster",
".",
"name",
")",
"wait_on_any",
"(",
"self",
".",
"connected",
",",
"self",
".",
"shutdown",
")",
"logger",
".",
"debug",
"(",
"\"done waiting on (connected, shutdown)\"",
")",
"znode_path",
"=",
"\"/\"",
".",
"join",
"(",
"[",
"self",
".",
"base_path",
",",
"cluster",
".",
"name",
"]",
")",
"self",
".",
"stop_events",
"[",
"znode_path",
"]",
"=",
"threading",
".",
"Event",
"(",
")",
"def",
"should_stop",
"(",
")",
":",
"return",
"(",
"znode_path",
"not",
"in",
"self",
".",
"stop_events",
"or",
"self",
".",
"stop_events",
"[",
"znode_path",
"]",
".",
"is_set",
"(",
")",
"or",
"self",
".",
"shutdown",
".",
"is_set",
"(",
")",
")",
"while",
"not",
"should_stop",
"(",
")",
":",
"try",
":",
"if",
"self",
".",
"client",
".",
"exists",
"(",
"znode_path",
")",
":",
"break",
"except",
"exceptions",
".",
"ConnectionClosedError",
":",
"break",
"wait_on_any",
"(",
"self",
".",
"stop_events",
"[",
"znode_path",
"]",
",",
"self",
".",
"shutdown",
",",
"timeout",
"=",
"NO_NODE_INTERVAL",
")",
"logger",
".",
"debug",
"(",
"\"setting up ChildrenWatch for %s\"",
",",
"znode_path",
")",
"@",
"self",
".",
"client",
".",
"ChildrenWatch",
"(",
"znode_path",
")",
"def",
"watch",
"(",
"children",
")",
":",
"if",
"should_stop",
"(",
")",
":",
"return",
"False",
"logger",
".",
"debug",
"(",
"\"znode children changed! (%s)\"",
",",
"znode_path",
")",
"new_nodes",
"=",
"[",
"]",
"for",
"child",
"in",
"children",
":",
"child_path",
"=",
"\"/\"",
".",
"join",
"(",
"[",
"znode_path",
",",
"child",
"]",
")",
"try",
":",
"new_nodes",
".",
"append",
"(",
"Node",
".",
"deserialize",
"(",
"self",
".",
"client",
".",
"get",
"(",
"child_path",
")",
"[",
"0",
"]",
")",
")",
"except",
"ValueError",
":",
"logger",
".",
"exception",
"(",
"\"Invalid node at path '%s'\"",
",",
"child",
")",
"continue",
"cluster",
".",
"nodes",
"=",
"new_nodes",
"callback",
"(",
")"
] | 35.548387 | 21.903226 |
def url_to_prefix(self, id):
'''url_to_prefix
High-level api: Convert an identifier from `{namespace}tagname` notation
to `prefix:tagname` notation. If the identifier does not have a
namespace, it is assumed that the whole identifier is a tag name.
Parameters
----------
id : `str`
Identifier in `{namespace}tagname` notation.
Returns
-------
str
Identifier in `prefix:tagname` notation.
'''
ret = re.search('^{(.+)}(.+)$', id)
if ret:
return self.urls[ret.group(1)] + ':' + ret.group(2)
else:
return id | [
"def",
"url_to_prefix",
"(",
"self",
",",
"id",
")",
":",
"ret",
"=",
"re",
".",
"search",
"(",
"'^{(.+)}(.+)$'",
",",
"id",
")",
"if",
"ret",
":",
"return",
"self",
".",
"urls",
"[",
"ret",
".",
"group",
"(",
"1",
")",
"]",
"+",
"':'",
"+",
"ret",
".",
"group",
"(",
"2",
")",
"else",
":",
"return",
"id"
] | 25.8 | 26.84 |
def dangling(prune=False, force=False):
'''
Return top-level images (those on which no other images depend) which do
not have a tag assigned to them. These include:
- Images which were once tagged but were later untagged, such as those
which were superseded by committing a new copy of an existing tagged
image.
- Images which were loaded using :py:func:`docker.load
<salt.modules.dockermod.load>` (or the ``docker load`` Docker CLI
command), but not tagged.
prune : False
Remove these images
force : False
If ``True``, and if ``prune=True``, then forcibly remove these images.
**RETURN DATA**
If ``prune=False``, the return data will be a list of dangling image IDs.
If ``prune=True``, the return data will be a dictionary with each key being
the ID of the dangling image, and the following information for each image:
- ``Comment`` - Any error encountered when trying to prune a dangling image
*(Only present if prune failed)*
- ``Removed`` - A boolean (``True`` if prune was successful, ``False`` if
not)
CLI Example:
.. code-block:: bash
salt myminion docker.dangling
salt myminion docker.dangling prune=True
'''
all_images = images(all=True)
dangling_images = [x[:12] for x in _get_top_level_images(all_images)
if all_images[x]['RepoTags'] is None]
if not prune:
return dangling_images
ret = {}
for image in dangling_images:
try:
ret.setdefault(image, {})['Removed'] = rmi(image, force=force)
except Exception as exc:
err = exc.__str__()
log.error(err)
ret.setdefault(image, {})['Comment'] = err
ret[image]['Removed'] = False
return ret | [
"def",
"dangling",
"(",
"prune",
"=",
"False",
",",
"force",
"=",
"False",
")",
":",
"all_images",
"=",
"images",
"(",
"all",
"=",
"True",
")",
"dangling_images",
"=",
"[",
"x",
"[",
":",
"12",
"]",
"for",
"x",
"in",
"_get_top_level_images",
"(",
"all_images",
")",
"if",
"all_images",
"[",
"x",
"]",
"[",
"'RepoTags'",
"]",
"is",
"None",
"]",
"if",
"not",
"prune",
":",
"return",
"dangling_images",
"ret",
"=",
"{",
"}",
"for",
"image",
"in",
"dangling_images",
":",
"try",
":",
"ret",
".",
"setdefault",
"(",
"image",
",",
"{",
"}",
")",
"[",
"'Removed'",
"]",
"=",
"rmi",
"(",
"image",
",",
"force",
"=",
"force",
")",
"except",
"Exception",
"as",
"exc",
":",
"err",
"=",
"exc",
".",
"__str__",
"(",
")",
"log",
".",
"error",
"(",
"err",
")",
"ret",
".",
"setdefault",
"(",
"image",
",",
"{",
"}",
")",
"[",
"'Comment'",
"]",
"=",
"err",
"ret",
"[",
"image",
"]",
"[",
"'Removed'",
"]",
"=",
"False",
"return",
"ret"
] | 32.054545 | 26.2 |
def create_token_file(username=id_generator(), password=id_generator()):
'''
Store the admins password for further retrieve
'''
cozy_ds_uid = helpers.get_uid('cozy-data-system')
if not os.path.isfile(LOGIN_FILENAME):
with open(LOGIN_FILENAME, 'w+') as token_file:
token_file.write("{0}\n{1}".format(username, password))
helpers.file_rights(LOGIN_FILENAME, mode=0400, uid=cozy_ds_uid, gid=0) | [
"def",
"create_token_file",
"(",
"username",
"=",
"id_generator",
"(",
")",
",",
"password",
"=",
"id_generator",
"(",
")",
")",
":",
"cozy_ds_uid",
"=",
"helpers",
".",
"get_uid",
"(",
"'cozy-data-system'",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"LOGIN_FILENAME",
")",
":",
"with",
"open",
"(",
"LOGIN_FILENAME",
",",
"'w+'",
")",
"as",
"token_file",
":",
"token_file",
".",
"write",
"(",
"\"{0}\\n{1}\"",
".",
"format",
"(",
"username",
",",
"password",
")",
")",
"helpers",
".",
"file_rights",
"(",
"LOGIN_FILENAME",
",",
"mode",
"=",
"0400",
",",
"uid",
"=",
"cozy_ds_uid",
",",
"gid",
"=",
"0",
")"
] | 39.090909 | 25.636364 |
def ai(x, context=None):
"""
Return the Airy function of x.
"""
return _apply_function_in_current_context(
BigFloat,
mpfr.mpfr_ai,
(BigFloat._implicit_convert(x),),
context,
) | [
"def",
"ai",
"(",
"x",
",",
"context",
"=",
"None",
")",
":",
"return",
"_apply_function_in_current_context",
"(",
"BigFloat",
",",
"mpfr",
".",
"mpfr_ai",
",",
"(",
"BigFloat",
".",
"_implicit_convert",
"(",
"x",
")",
",",
")",
",",
"context",
",",
")"
] | 19.818182 | 15.454545 |
def _finalize_features(self):
"""Add/remove features and resolve dependencies between them"""
# First, flag all the enabled items (and thus their dependencies)
for name, feature in self.features.items():
enabled = self.feature_is_included(name)
if enabled or (enabled is None and feature.include_by_default()):
feature.include_in(self)
self._set_feature(name, 1)
# Then disable the rest, so that off-by-default features don't
# get flagged as errors when they're required by an enabled feature
for name, feature in self.features.items():
if not self.feature_is_included(name):
feature.exclude_from(self)
self._set_feature(name, 0) | [
"def",
"_finalize_features",
"(",
"self",
")",
":",
"# First, flag all the enabled items (and thus their dependencies)",
"for",
"name",
",",
"feature",
"in",
"self",
".",
"features",
".",
"items",
"(",
")",
":",
"enabled",
"=",
"self",
".",
"feature_is_included",
"(",
"name",
")",
"if",
"enabled",
"or",
"(",
"enabled",
"is",
"None",
"and",
"feature",
".",
"include_by_default",
"(",
")",
")",
":",
"feature",
".",
"include_in",
"(",
"self",
")",
"self",
".",
"_set_feature",
"(",
"name",
",",
"1",
")",
"# Then disable the rest, so that off-by-default features don't",
"# get flagged as errors when they're required by an enabled feature",
"for",
"name",
",",
"feature",
"in",
"self",
".",
"features",
".",
"items",
"(",
")",
":",
"if",
"not",
"self",
".",
"feature_is_included",
"(",
"name",
")",
":",
"feature",
".",
"exclude_from",
"(",
"self",
")",
"self",
".",
"_set_feature",
"(",
"name",
",",
"0",
")"
] | 47.8125 | 17.25 |
def sg_parallel(func):
r"""Decorates function as multiple gpu support towers.
Args:
func: function to decorate
"""
@wraps(func)
def wrapper(**kwargs):
r"""Manages arguments of `tf.sg_opt`.
Args:
kwargs: keyword arguments. The wrapped function will be provided with gpu_index argument.
"""
# parse option
opt = tf.sg_opt(kwargs)
# loop for all available GPUs
res = []
for i in range(sg_gpus()):
# specify device
with tf.device('/gpu:%d' % i):
# give new scope only to operation
with tf.name_scope('gpu_%d' % i):
# save reuse flag
with sg_context(reuse=(True if i > 0 else False)):
# call function
res.append(func(opt * tf.sg_opt(gpu_index=i)))
return res
return wrapper | [
"def",
"sg_parallel",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"*",
"kwargs",
")",
":",
"r\"\"\"Manages arguments of `tf.sg_opt`.\n\n Args:\n kwargs: keyword arguments. The wrapped function will be provided with gpu_index argument.\n \"\"\"",
"# parse option",
"opt",
"=",
"tf",
".",
"sg_opt",
"(",
"kwargs",
")",
"# loop for all available GPUs",
"res",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"sg_gpus",
"(",
")",
")",
":",
"# specify device",
"with",
"tf",
".",
"device",
"(",
"'/gpu:%d'",
"%",
"i",
")",
":",
"# give new scope only to operation",
"with",
"tf",
".",
"name_scope",
"(",
"'gpu_%d'",
"%",
"i",
")",
":",
"# save reuse flag",
"with",
"sg_context",
"(",
"reuse",
"=",
"(",
"True",
"if",
"i",
">",
"0",
"else",
"False",
")",
")",
":",
"# call function",
"res",
".",
"append",
"(",
"func",
"(",
"opt",
"*",
"tf",
".",
"sg_opt",
"(",
"gpu_index",
"=",
"i",
")",
")",
")",
"return",
"res",
"return",
"wrapper"
] | 30.033333 | 18 |
def _cache(self, key, val):
"""
Request that a key/value pair be considered for caching.
"""
cache_size = (1 if util.dimensionless_contents(self.streams, self.kdims)
else self.cache_size)
if len(self) >= cache_size:
first_key = next(k for k in self.data)
self.data.pop(first_key)
self[key] = val | [
"def",
"_cache",
"(",
"self",
",",
"key",
",",
"val",
")",
":",
"cache_size",
"=",
"(",
"1",
"if",
"util",
".",
"dimensionless_contents",
"(",
"self",
".",
"streams",
",",
"self",
".",
"kdims",
")",
"else",
"self",
".",
"cache_size",
")",
"if",
"len",
"(",
"self",
")",
">=",
"cache_size",
":",
"first_key",
"=",
"next",
"(",
"k",
"for",
"k",
"in",
"self",
".",
"data",
")",
"self",
".",
"data",
".",
"pop",
"(",
"first_key",
")",
"self",
"[",
"key",
"]",
"=",
"val"
] | 38 | 11.6 |
def _find_convertable_object(self, data):
"""
Get the first instance of a `self.pod_types`
"""
data = list(data)
convertable_object_idxs = [
idx
for idx, obj
in enumerate(data)
if obj.get('kind') in self.pod_types.keys()
]
if len(convertable_object_idxs) < 1:
raise Exception("Kubernetes config didn't contain any of {}".format(
', '.join(self.pod_types.keys())
))
return list(data)[convertable_object_idxs[0]] | [
"def",
"_find_convertable_object",
"(",
"self",
",",
"data",
")",
":",
"data",
"=",
"list",
"(",
"data",
")",
"convertable_object_idxs",
"=",
"[",
"idx",
"for",
"idx",
",",
"obj",
"in",
"enumerate",
"(",
"data",
")",
"if",
"obj",
".",
"get",
"(",
"'kind'",
")",
"in",
"self",
".",
"pod_types",
".",
"keys",
"(",
")",
"]",
"if",
"len",
"(",
"convertable_object_idxs",
")",
"<",
"1",
":",
"raise",
"Exception",
"(",
"\"Kubernetes config didn't contain any of {}\"",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"self",
".",
"pod_types",
".",
"keys",
"(",
")",
")",
")",
")",
"return",
"list",
"(",
"data",
")",
"[",
"convertable_object_idxs",
"[",
"0",
"]",
"]"
] | 34.1875 | 13.8125 |
def pprint(walker):
"""Pretty printer for tree walkers
Takes a TreeWalker instance and pretty prints the output of walking the tree.
:arg walker: a TreeWalker instance
"""
output = []
indent = 0
for token in concatenateCharacterTokens(walker):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
# tag name
if token["namespace"] and token["namespace"] != constants.namespaces["html"]:
if token["namespace"] in constants.prefixes:
ns = constants.prefixes[token["namespace"]]
else:
ns = token["namespace"]
name = "%s %s" % (ns, token["name"])
else:
name = token["name"]
output.append("%s<%s>" % (" " * indent, name))
indent += 2
# attributes (sorted for consistent ordering)
attrs = token["data"]
for (namespace, localname), value in sorted(attrs.items()):
if namespace:
if namespace in constants.prefixes:
ns = constants.prefixes[namespace]
else:
ns = namespace
name = "%s %s" % (ns, localname)
else:
name = localname
output.append("%s%s=\"%s\"" % (" " * indent, name, value))
# self-closing
if type == "EmptyTag":
indent -= 2
elif type == "EndTag":
indent -= 2
elif type == "Comment":
output.append("%s<!-- %s -->" % (" " * indent, token["data"]))
elif type == "Doctype":
if token["name"]:
if token["publicId"]:
output.append("""%s<!DOCTYPE %s "%s" "%s">""" %
(" " * indent,
token["name"],
token["publicId"],
token["systemId"] if token["systemId"] else ""))
elif token["systemId"]:
output.append("""%s<!DOCTYPE %s "" "%s">""" %
(" " * indent,
token["name"],
token["systemId"]))
else:
output.append("%s<!DOCTYPE %s>" % (" " * indent,
token["name"]))
else:
output.append("%s<!DOCTYPE >" % (" " * indent,))
elif type == "Characters":
output.append("%s\"%s\"" % (" " * indent, token["data"]))
elif type == "SpaceCharacters":
assert False, "concatenateCharacterTokens should have got rid of all Space tokens"
else:
raise ValueError("Unknown token type, %s" % type)
return "\n".join(output) | [
"def",
"pprint",
"(",
"walker",
")",
":",
"output",
"=",
"[",
"]",
"indent",
"=",
"0",
"for",
"token",
"in",
"concatenateCharacterTokens",
"(",
"walker",
")",
":",
"type",
"=",
"token",
"[",
"\"type\"",
"]",
"if",
"type",
"in",
"(",
"\"StartTag\"",
",",
"\"EmptyTag\"",
")",
":",
"# tag name",
"if",
"token",
"[",
"\"namespace\"",
"]",
"and",
"token",
"[",
"\"namespace\"",
"]",
"!=",
"constants",
".",
"namespaces",
"[",
"\"html\"",
"]",
":",
"if",
"token",
"[",
"\"namespace\"",
"]",
"in",
"constants",
".",
"prefixes",
":",
"ns",
"=",
"constants",
".",
"prefixes",
"[",
"token",
"[",
"\"namespace\"",
"]",
"]",
"else",
":",
"ns",
"=",
"token",
"[",
"\"namespace\"",
"]",
"name",
"=",
"\"%s %s\"",
"%",
"(",
"ns",
",",
"token",
"[",
"\"name\"",
"]",
")",
"else",
":",
"name",
"=",
"token",
"[",
"\"name\"",
"]",
"output",
".",
"append",
"(",
"\"%s<%s>\"",
"%",
"(",
"\" \"",
"*",
"indent",
",",
"name",
")",
")",
"indent",
"+=",
"2",
"# attributes (sorted for consistent ordering)",
"attrs",
"=",
"token",
"[",
"\"data\"",
"]",
"for",
"(",
"namespace",
",",
"localname",
")",
",",
"value",
"in",
"sorted",
"(",
"attrs",
".",
"items",
"(",
")",
")",
":",
"if",
"namespace",
":",
"if",
"namespace",
"in",
"constants",
".",
"prefixes",
":",
"ns",
"=",
"constants",
".",
"prefixes",
"[",
"namespace",
"]",
"else",
":",
"ns",
"=",
"namespace",
"name",
"=",
"\"%s %s\"",
"%",
"(",
"ns",
",",
"localname",
")",
"else",
":",
"name",
"=",
"localname",
"output",
".",
"append",
"(",
"\"%s%s=\\\"%s\\\"\"",
"%",
"(",
"\" \"",
"*",
"indent",
",",
"name",
",",
"value",
")",
")",
"# self-closing",
"if",
"type",
"==",
"\"EmptyTag\"",
":",
"indent",
"-=",
"2",
"elif",
"type",
"==",
"\"EndTag\"",
":",
"indent",
"-=",
"2",
"elif",
"type",
"==",
"\"Comment\"",
":",
"output",
".",
"append",
"(",
"\"%s<!-- %s -->\"",
"%",
"(",
"\" \"",
"*",
"indent",
",",
"token",
"[",
"\"data\"",
"]",
")",
")",
"elif",
"type",
"==",
"\"Doctype\"",
":",
"if",
"token",
"[",
"\"name\"",
"]",
":",
"if",
"token",
"[",
"\"publicId\"",
"]",
":",
"output",
".",
"append",
"(",
"\"\"\"%s<!DOCTYPE %s \"%s\" \"%s\">\"\"\"",
"%",
"(",
"\" \"",
"*",
"indent",
",",
"token",
"[",
"\"name\"",
"]",
",",
"token",
"[",
"\"publicId\"",
"]",
",",
"token",
"[",
"\"systemId\"",
"]",
"if",
"token",
"[",
"\"systemId\"",
"]",
"else",
"\"\"",
")",
")",
"elif",
"token",
"[",
"\"systemId\"",
"]",
":",
"output",
".",
"append",
"(",
"\"\"\"%s<!DOCTYPE %s \"\" \"%s\">\"\"\"",
"%",
"(",
"\" \"",
"*",
"indent",
",",
"token",
"[",
"\"name\"",
"]",
",",
"token",
"[",
"\"systemId\"",
"]",
")",
")",
"else",
":",
"output",
".",
"append",
"(",
"\"%s<!DOCTYPE %s>\"",
"%",
"(",
"\" \"",
"*",
"indent",
",",
"token",
"[",
"\"name\"",
"]",
")",
")",
"else",
":",
"output",
".",
"append",
"(",
"\"%s<!DOCTYPE >\"",
"%",
"(",
"\" \"",
"*",
"indent",
",",
")",
")",
"elif",
"type",
"==",
"\"Characters\"",
":",
"output",
".",
"append",
"(",
"\"%s\\\"%s\\\"\"",
"%",
"(",
"\" \"",
"*",
"indent",
",",
"token",
"[",
"\"data\"",
"]",
")",
")",
"elif",
"type",
"==",
"\"SpaceCharacters\"",
":",
"assert",
"False",
",",
"\"concatenateCharacterTokens should have got rid of all Space tokens\"",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unknown token type, %s\"",
"%",
"type",
")",
"return",
"\"\\n\"",
".",
"join",
"(",
"output",
")"
] | 37.92 | 19.186667 |
def http_request(self,
verb,
uri,
data=None,
headers=None,
files=None,
response_format=None,
is_rdf = True,
stream = False
):
'''
Primary route for all HTTP requests to repository. Ability to set most parameters for requests library,
with some additional convenience parameters as well.
Args:
verb (str): HTTP verb to use for request, e.g. PUT, POST, GET, HEAD, PATCH, etc.
uri (rdflib.term.URIRef,str): input URI
data (str,file): payload of data to send for request, may be overridden in preperation of request
headers (dict): optional dictionary of headers passed directly to requests.request
files (dict): optional dictionary of files passed directly to requests.request
response_format (str): desired response format for resource's payload, e.g. 'application/rdf+xml', 'text/turtle', etc.
is_rdf (bool): if True, set Accept header based on combination of response_format and headers
stream (bool): passed directly to requests.request for stream parameter
Returns:
requests.models.Response
'''
# set content negotiated response format for RDFSources
if is_rdf:
'''
Acceptable content negotiated response formats include:
application/ld+json (discouraged, if not prohibited, as it drops prefixes used in repository)
application/n-triples
application/rdf+xml
text/n3 (or text/rdf+n3)
text/plain
text/turtle (or application/x-turtle)
'''
# set for GET requests only
if verb == 'GET':
# if no response_format has been requested to this point, use repository instance default
if not response_format:
response_format = self.repo.default_serialization
# if headers present, append
if headers and 'Accept' not in headers.keys():
headers['Accept'] = response_format
# if headers are blank, init dictionary
else:
headers = {'Accept':response_format}
# prepare uri for HTTP request
if type(uri) == rdflib.term.URIRef:
uri = uri.toPython()
logger.debug("%s request for %s, format %s, headers %s" %
(verb, uri, response_format, headers))
# manually prepare request
session = requests.Session()
request = requests.Request(verb, uri, auth=(self.repo.username, self.repo.password), data=data, headers=headers, files=files)
prepped_request = session.prepare_request(request)
response = session.send(prepped_request,
stream=stream,
)
return response | [
"def",
"http_request",
"(",
"self",
",",
"verb",
",",
"uri",
",",
"data",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"files",
"=",
"None",
",",
"response_format",
"=",
"None",
",",
"is_rdf",
"=",
"True",
",",
"stream",
"=",
"False",
")",
":",
"# set content negotiated response format for RDFSources",
"if",
"is_rdf",
":",
"'''\n\t\t\tAcceptable content negotiated response formats include:\n\t\t\t\tapplication/ld+json (discouraged, if not prohibited, as it drops prefixes used in repository)\n\t\t\t\tapplication/n-triples\n\t\t\t\tapplication/rdf+xml\n\t\t\t\ttext/n3 (or text/rdf+n3)\n\t\t\t\ttext/plain\n\t\t\t\ttext/turtle (or application/x-turtle)\n\t\t\t'''",
"# set for GET requests only",
"if",
"verb",
"==",
"'GET'",
":",
"# if no response_format has been requested to this point, use repository instance default",
"if",
"not",
"response_format",
":",
"response_format",
"=",
"self",
".",
"repo",
".",
"default_serialization",
"# if headers present, append",
"if",
"headers",
"and",
"'Accept'",
"not",
"in",
"headers",
".",
"keys",
"(",
")",
":",
"headers",
"[",
"'Accept'",
"]",
"=",
"response_format",
"# if headers are blank, init dictionary",
"else",
":",
"headers",
"=",
"{",
"'Accept'",
":",
"response_format",
"}",
"# prepare uri for HTTP request",
"if",
"type",
"(",
"uri",
")",
"==",
"rdflib",
".",
"term",
".",
"URIRef",
":",
"uri",
"=",
"uri",
".",
"toPython",
"(",
")",
"logger",
".",
"debug",
"(",
"\"%s request for %s, format %s, headers %s\"",
"%",
"(",
"verb",
",",
"uri",
",",
"response_format",
",",
"headers",
")",
")",
"# manually prepare request",
"session",
"=",
"requests",
".",
"Session",
"(",
")",
"request",
"=",
"requests",
".",
"Request",
"(",
"verb",
",",
"uri",
",",
"auth",
"=",
"(",
"self",
".",
"repo",
".",
"username",
",",
"self",
".",
"repo",
".",
"password",
")",
",",
"data",
"=",
"data",
",",
"headers",
"=",
"headers",
",",
"files",
"=",
"files",
")",
"prepped_request",
"=",
"session",
".",
"prepare_request",
"(",
"request",
")",
"response",
"=",
"session",
".",
"send",
"(",
"prepped_request",
",",
"stream",
"=",
"stream",
",",
")",
"return",
"response"
] | 35.104478 | 26.895522 |
def accel_ES(q: np.ndarray):
"""
Compute the gravitational accelerations in the earth-sun system.
q in row vector of 6 elements: sun (x, y, z), earth (x, y, z)
"""
# Number of celestial bodies
num_bodies: int = 2
# Number of dimensions in arrays; 3 spatial dimensions times the number of bodies
dims = 3 * num_bodies
# Body 0 is the sun; Body 1 is the earth
m0 = mass[0]
m1 = mass[1]
# Extract position of the sun and earth as 3-vectors
pos_0 = q[slices[0]]
pos_1 = q[slices[1]]
# Displacement vector from sun to earth
dv_01: np.ndarray = pos_1 - pos_0
# Distance from sun to earth
r_01: float = np.linalg.norm(dv_01)
# Unit vector pointing from sun to earth
udv_01 = dv_01 / r_01
# The force between these has magnitude G*m0*m1 / r^2
f_01: float = (G * m0 * m1) / (r_01 ** 2)
# Initialize acceleration as 6x1 array
a: np.ndarray = np.zeros(dims)
# The force vectors are attractive
a[slices[0]] += f_01 * udv_01 / m0
a[slices[1]] -= f_01 * udv_01 / m1
# Return the acceleration vector
return a | [
"def",
"accel_ES",
"(",
"q",
":",
"np",
".",
"ndarray",
")",
":",
"# Number of celestial bodies",
"num_bodies",
":",
"int",
"=",
"2",
"# Number of dimensions in arrays; 3 spatial dimensions times the number of bodies",
"dims",
"=",
"3",
"*",
"num_bodies",
"# Body 0 is the sun; Body 1 is the earth",
"m0",
"=",
"mass",
"[",
"0",
"]",
"m1",
"=",
"mass",
"[",
"1",
"]",
"# Extract position of the sun and earth as 3-vectors",
"pos_0",
"=",
"q",
"[",
"slices",
"[",
"0",
"]",
"]",
"pos_1",
"=",
"q",
"[",
"slices",
"[",
"1",
"]",
"]",
"# Displacement vector from sun to earth",
"dv_01",
":",
"np",
".",
"ndarray",
"=",
"pos_1",
"-",
"pos_0",
"# Distance from sun to earth",
"r_01",
":",
"float",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"dv_01",
")",
"# Unit vector pointing from sun to earth",
"udv_01",
"=",
"dv_01",
"/",
"r_01",
"# The force between these has magnitude G*m0*m1 / r^2",
"f_01",
":",
"float",
"=",
"(",
"G",
"*",
"m0",
"*",
"m1",
")",
"/",
"(",
"r_01",
"**",
"2",
")",
"# Initialize acceleration as 6x1 array",
"a",
":",
"np",
".",
"ndarray",
"=",
"np",
".",
"zeros",
"(",
"dims",
")",
"# The force vectors are attractive",
"a",
"[",
"slices",
"[",
"0",
"]",
"]",
"+=",
"f_01",
"*",
"udv_01",
"/",
"m0",
"a",
"[",
"slices",
"[",
"1",
"]",
"]",
"-=",
"f_01",
"*",
"udv_01",
"/",
"m1",
"# Return the acceleration vector",
"return",
"a"
] | 27.3 | 18.5 |
def cli(env, volume_id, lun_id):
"""Set the LUN ID on an existing block storage volume.
The LUN ID only takes effect during the Host Authorization process. It is
recommended (but not necessary) to de-authorize all hosts before using this
method. See `block access-revoke`.
VOLUME_ID - the volume ID on which to set the LUN ID.
LUN_ID - recommended range is an integer between 0 and 255. Advanced users
can use an integer between 0 and 4095.
"""
block_storage_manager = SoftLayer.BlockStorageManager(env.client)
res = block_storage_manager.create_or_update_lun_id(volume_id, lun_id)
if 'value' in res and lun_id == res['value']:
click.echo(
'Block volume with id %s is reporting LUN ID %s' % (res['volumeId'], res['value']))
else:
click.echo(
'Failed to confirm the new LUN ID on volume %s' % (volume_id)) | [
"def",
"cli",
"(",
"env",
",",
"volume_id",
",",
"lun_id",
")",
":",
"block_storage_manager",
"=",
"SoftLayer",
".",
"BlockStorageManager",
"(",
"env",
".",
"client",
")",
"res",
"=",
"block_storage_manager",
".",
"create_or_update_lun_id",
"(",
"volume_id",
",",
"lun_id",
")",
"if",
"'value'",
"in",
"res",
"and",
"lun_id",
"==",
"res",
"[",
"'value'",
"]",
":",
"click",
".",
"echo",
"(",
"'Block volume with id %s is reporting LUN ID %s'",
"%",
"(",
"res",
"[",
"'volumeId'",
"]",
",",
"res",
"[",
"'value'",
"]",
")",
")",
"else",
":",
"click",
".",
"echo",
"(",
"'Failed to confirm the new LUN ID on volume %s'",
"%",
"(",
"volume_id",
")",
")"
] | 38.086957 | 26.826087 |
def get_full_perm(perm, obj):
"""Join action with the content type of ``obj``.
Permission is returned in the format of ``<action>_<object_type>``.
"""
ctype = ContentType.objects.get_for_model(obj)
# Camel case class names are converted into a space-separated
# content types, so spaces have to be removed.
ctype = str(ctype).replace(' ', '')
return '{}_{}'.format(perm.lower(), ctype) | [
"def",
"get_full_perm",
"(",
"perm",
",",
"obj",
")",
":",
"ctype",
"=",
"ContentType",
".",
"objects",
".",
"get_for_model",
"(",
"obj",
")",
"# Camel case class names are converted into a space-separated",
"# content types, so spaces have to be removed.",
"ctype",
"=",
"str",
"(",
"ctype",
")",
".",
"replace",
"(",
"' '",
",",
"''",
")",
"return",
"'{}_{}'",
".",
"format",
"(",
"perm",
".",
"lower",
"(",
")",
",",
"ctype",
")"
] | 37.181818 | 15.818182 |
def urlopen(url, headers={}, data=None, retries=RETRIES, timeout=TIMEOUT):
'''打开一个http连接, 并返回Request.
headers 是一个dict. 默认提供了一些项目, 比如User-Agent, Referer等, 就
不需要重复加入了.
这个函数只能用于http请求, 不可以用于下载大文件.
如果服务器支持gzip压缩的话, 就会使用gzip对数据进行压缩, 然后在本地自动
解压.
req.data 里面放着的是最终的http数据内容, 通常都是UTF-8编码的文本.
'''
headers_merged = default_headers.copy()
for key in headers.keys():
headers_merged[key] = headers[key]
opener = urllib.request.build_opener(ForbiddenHandler)
opener.addheaders = [(k, v) for k,v in headers_merged.items()]
for i in range(retries):
try:
req = opener.open(url, data=data, timeout=timeout)
encoding = req.headers.get('Content-encoding')
req.data = req.read()
if encoding == 'gzip':
req.data = gzip.decompress(req.data)
elif encoding == 'deflate':
req.data = zlib.decompress(req.data, -zlib.MAX_WBITS)
return req
except OSError:
logger.error(traceback.format_exc())
except:
logger.error(traceback.format_exc())
return None | [
"def",
"urlopen",
"(",
"url",
",",
"headers",
"=",
"{",
"}",
",",
"data",
"=",
"None",
",",
"retries",
"=",
"RETRIES",
",",
"timeout",
"=",
"TIMEOUT",
")",
":",
"headers_merged",
"=",
"default_headers",
".",
"copy",
"(",
")",
"for",
"key",
"in",
"headers",
".",
"keys",
"(",
")",
":",
"headers_merged",
"[",
"key",
"]",
"=",
"headers",
"[",
"key",
"]",
"opener",
"=",
"urllib",
".",
"request",
".",
"build_opener",
"(",
"ForbiddenHandler",
")",
"opener",
".",
"addheaders",
"=",
"[",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"headers_merged",
".",
"items",
"(",
")",
"]",
"for",
"i",
"in",
"range",
"(",
"retries",
")",
":",
"try",
":",
"req",
"=",
"opener",
".",
"open",
"(",
"url",
",",
"data",
"=",
"data",
",",
"timeout",
"=",
"timeout",
")",
"encoding",
"=",
"req",
".",
"headers",
".",
"get",
"(",
"'Content-encoding'",
")",
"req",
".",
"data",
"=",
"req",
".",
"read",
"(",
")",
"if",
"encoding",
"==",
"'gzip'",
":",
"req",
".",
"data",
"=",
"gzip",
".",
"decompress",
"(",
"req",
".",
"data",
")",
"elif",
"encoding",
"==",
"'deflate'",
":",
"req",
".",
"data",
"=",
"zlib",
".",
"decompress",
"(",
"req",
".",
"data",
",",
"-",
"zlib",
".",
"MAX_WBITS",
")",
"return",
"req",
"except",
"OSError",
":",
"logger",
".",
"error",
"(",
"traceback",
".",
"format_exc",
"(",
")",
")",
"except",
":",
"logger",
".",
"error",
"(",
"traceback",
".",
"format_exc",
"(",
")",
")",
"return",
"None"
] | 33.294118 | 19 |
def _srcRect_x(self, attr_name):
"""
Value of `p:blipFill/a:srcRect/@{attr_name}` or 0.0 if not present.
"""
srcRect = self.blipFill.srcRect
if srcRect is None:
return 0.0
return getattr(srcRect, attr_name) | [
"def",
"_srcRect_x",
"(",
"self",
",",
"attr_name",
")",
":",
"srcRect",
"=",
"self",
".",
"blipFill",
".",
"srcRect",
"if",
"srcRect",
"is",
"None",
":",
"return",
"0.0",
"return",
"getattr",
"(",
"srcRect",
",",
"attr_name",
")"
] | 32.375 | 9.625 |
def bin(sample, options={}, **kwargs):
"""Draw a histogram in the current context figure.
Parameters
----------
sample: numpy.ndarray, 1d
The sample for which the histogram must be generated.
options: dict (default: {})
Options for the scales to be created. If a scale labeled 'x'
is required for that mark, options['x'] contains optional keyword
arguments for the constructor of the corresponding scale type.
axes_options: dict (default: {})
Options for the axes to be created. If an axis labeled 'x' is
required for that mark, axes_options['x'] contains optional
keyword arguments for the constructor of the corresponding axis type.
"""
kwargs['sample'] = sample
scales = kwargs.pop('scales', {})
for xy in ['x', 'y']:
if xy not in scales:
dimension = _get_attribute_dimension(xy, Bars)
if dimension in _context['scales']:
scales[xy] = _context['scales'][dimension]
else:
scales[xy] = LinearScale(**options.get(xy, {}))
_context['scales'][dimension] = scales[xy]
kwargs['scales'] = scales
return _draw_mark(Bins, options=options, **kwargs) | [
"def",
"bin",
"(",
"sample",
",",
"options",
"=",
"{",
"}",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'sample'",
"]",
"=",
"sample",
"scales",
"=",
"kwargs",
".",
"pop",
"(",
"'scales'",
",",
"{",
"}",
")",
"for",
"xy",
"in",
"[",
"'x'",
",",
"'y'",
"]",
":",
"if",
"xy",
"not",
"in",
"scales",
":",
"dimension",
"=",
"_get_attribute_dimension",
"(",
"xy",
",",
"Bars",
")",
"if",
"dimension",
"in",
"_context",
"[",
"'scales'",
"]",
":",
"scales",
"[",
"xy",
"]",
"=",
"_context",
"[",
"'scales'",
"]",
"[",
"dimension",
"]",
"else",
":",
"scales",
"[",
"xy",
"]",
"=",
"LinearScale",
"(",
"*",
"*",
"options",
".",
"get",
"(",
"xy",
",",
"{",
"}",
")",
")",
"_context",
"[",
"'scales'",
"]",
"[",
"dimension",
"]",
"=",
"scales",
"[",
"xy",
"]",
"kwargs",
"[",
"'scales'",
"]",
"=",
"scales",
"return",
"_draw_mark",
"(",
"Bins",
",",
"options",
"=",
"options",
",",
"*",
"*",
"kwargs",
")"
] | 44.851852 | 16.888889 |
def is_valid(self, qstr=None):
"""Return True if string is valid"""
if qstr is None:
qstr = self.currentText()
return osp.isfile(to_text_string(qstr)) | [
"def",
"is_valid",
"(",
"self",
",",
"qstr",
"=",
"None",
")",
":",
"if",
"qstr",
"is",
"None",
":",
"qstr",
"=",
"self",
".",
"currentText",
"(",
")",
"return",
"osp",
".",
"isfile",
"(",
"to_text_string",
"(",
"qstr",
")",
")"
] | 37.2 | 6.6 |
def enablePackrat(cache_size_limit=128):
"""Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
Parameters:
- cache_size_limit - (default= ``128``) - if an integer value is provided
will limit the size of the packrat cache; if None is passed, then
the cache size will be unbounded; if 0 is passed, the cache will
be effectively disabled.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method :class:`ParserElement.enablePackrat`.
For best results, call ``enablePackrat()`` immediately after
importing pyparsing.
Example::
import pyparsing
pyparsing.ParserElement.enablePackrat()
"""
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
if cache_size_limit is None:
ParserElement.packrat_cache = ParserElement._UnboundedCache()
else:
ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)
ParserElement._parse = ParserElement._parseCache | [
"def",
"enablePackrat",
"(",
"cache_size_limit",
"=",
"128",
")",
":",
"if",
"not",
"ParserElement",
".",
"_packratEnabled",
":",
"ParserElement",
".",
"_packratEnabled",
"=",
"True",
"if",
"cache_size_limit",
"is",
"None",
":",
"ParserElement",
".",
"packrat_cache",
"=",
"ParserElement",
".",
"_UnboundedCache",
"(",
")",
"else",
":",
"ParserElement",
".",
"packrat_cache",
"=",
"ParserElement",
".",
"_FifoCache",
"(",
"cache_size_limit",
")",
"ParserElement",
".",
"_parse",
"=",
"ParserElement",
".",
"_parseCache"
] | 49.151515 | 25.909091 |
def make_tone(freq, db, dur, risefall, samplerate, caldb=100, calv=0.1):
"""
Produce a pure tone signal
:param freq: Frequency of the tone to be produced (Hz)
:type freq: int
:param db: Intensity of the tone in dB SPL
:type db: int
:param dur: duration (seconds)
:type dur: float
:param risefall: linear rise fall of (seconds)
:type risefall: float
:param samplerate: generation frequency of tone (Hz)
:type samplerate: int
:param caldb: Reference intensity (dB SPL). Together with calv, provides a reference point for what intensity equals what output voltage level
:type caldb: int
:param calv: Reference voltage (V). Together with caldb, provides a reference point for what intensity equals what output voltage level
:type calv: float
:returns: tone, timevals -- the signal and the time index values
"""
if risefall > dur:
raise ValueError('Duration must be greater than risefall time')
if samplerate <= 0:
raise ValueError("Samplerate must be greater than 0")
if caldb <= 0:
raise ValueError("Calibration dB SPL must be greater than 0")
npts = int(dur * samplerate)
amp = (10 ** ((db - caldb) / 20) * calv)
if USE_RMS:
amp = amp * 1.414213562373
if VERBOSE:
print(
"current dB: {}, fs: {}, current frequency: {} kHz, AO Amp: {:.6f}".format(db, samplerate, freq / 1000, amp))
print("cal dB: {}, V at cal dB: {}".format(caldb, calv))
tone = amp * np.sin((freq * dur) * np.linspace(0, 2 * np.pi, npts))
# print 'tone max', np.amax(tone)
if risefall > 0:
rf_npts = int(risefall * samplerate) // 2
# print('amp {}, freq {}, npts {}, rf_npts {}'.format(amp,freq,npts,rf_npts))
wnd = hann(rf_npts * 2) # cosine taper
tone[:rf_npts] = tone[:rf_npts] * wnd[:rf_npts]
tone[-rf_npts:] = tone[-rf_npts:] * wnd[rf_npts:]
timevals = np.arange(npts) / samplerate
return tone, timevals | [
"def",
"make_tone",
"(",
"freq",
",",
"db",
",",
"dur",
",",
"risefall",
",",
"samplerate",
",",
"caldb",
"=",
"100",
",",
"calv",
"=",
"0.1",
")",
":",
"if",
"risefall",
">",
"dur",
":",
"raise",
"ValueError",
"(",
"'Duration must be greater than risefall time'",
")",
"if",
"samplerate",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Samplerate must be greater than 0\"",
")",
"if",
"caldb",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Calibration dB SPL must be greater than 0\"",
")",
"npts",
"=",
"int",
"(",
"dur",
"*",
"samplerate",
")",
"amp",
"=",
"(",
"10",
"**",
"(",
"(",
"db",
"-",
"caldb",
")",
"/",
"20",
")",
"*",
"calv",
")",
"if",
"USE_RMS",
":",
"amp",
"=",
"amp",
"*",
"1.414213562373",
"if",
"VERBOSE",
":",
"print",
"(",
"\"current dB: {}, fs: {}, current frequency: {} kHz, AO Amp: {:.6f}\"",
".",
"format",
"(",
"db",
",",
"samplerate",
",",
"freq",
"/",
"1000",
",",
"amp",
")",
")",
"print",
"(",
"\"cal dB: {}, V at cal dB: {}\"",
".",
"format",
"(",
"caldb",
",",
"calv",
")",
")",
"tone",
"=",
"amp",
"*",
"np",
".",
"sin",
"(",
"(",
"freq",
"*",
"dur",
")",
"*",
"np",
".",
"linspace",
"(",
"0",
",",
"2",
"*",
"np",
".",
"pi",
",",
"npts",
")",
")",
"# print 'tone max', np.amax(tone) ",
"if",
"risefall",
">",
"0",
":",
"rf_npts",
"=",
"int",
"(",
"risefall",
"*",
"samplerate",
")",
"//",
"2",
"# print('amp {}, freq {}, npts {}, rf_npts {}'.format(amp,freq,npts,rf_npts))",
"wnd",
"=",
"hann",
"(",
"rf_npts",
"*",
"2",
")",
"# cosine taper",
"tone",
"[",
":",
"rf_npts",
"]",
"=",
"tone",
"[",
":",
"rf_npts",
"]",
"*",
"wnd",
"[",
":",
"rf_npts",
"]",
"tone",
"[",
"-",
"rf_npts",
":",
"]",
"=",
"tone",
"[",
"-",
"rf_npts",
":",
"]",
"*",
"wnd",
"[",
"rf_npts",
":",
"]",
"timevals",
"=",
"np",
".",
"arange",
"(",
"npts",
")",
"/",
"samplerate",
"return",
"tone",
",",
"timevals"
] | 39.02 | 24.78 |
def submit_async_work_chain(self, work_chain, workunit_parent, done_hook=None):
"""Submit work to be executed in the background.
- work_chain: An iterable of Work instances. Will be invoked serially. Each instance may
have a different cardinality. There is no output-input chaining: the argument
tuples must already be present in each work instance. If any work throws an
exception no subsequent work in the chain will be attempted.
- workunit_parent: Work is accounted for under this workunit.
- done_hook: If not None, invoked with no args after all work is done, or on error.
"""
def done():
if done_hook:
done_hook()
with self._pending_workchains_cond:
self._pending_workchains -= 1
self._pending_workchains_cond.notify()
def error(e):
done()
self._run_tracker.log(Report.ERROR, '{}'.format(e))
# We filter out Nones defensively. There shouldn't be any, but if a bug causes one,
# Pants might hang indefinitely without this filtering.
work_iter = (_f for _f in work_chain if _f)
def submit_next():
try:
self.submit_async_work(next(work_iter), workunit_parent=workunit_parent,
on_success=lambda x: submit_next(), on_failure=error)
except StopIteration:
done() # The success case.
with self._pending_workchains_cond:
self._pending_workchains += 1
try:
submit_next()
except Exception as e: # Handles errors in the submission code.
done()
self._run_tracker.log(Report.ERROR, '{}'.format(e))
raise | [
"def",
"submit_async_work_chain",
"(",
"self",
",",
"work_chain",
",",
"workunit_parent",
",",
"done_hook",
"=",
"None",
")",
":",
"def",
"done",
"(",
")",
":",
"if",
"done_hook",
":",
"done_hook",
"(",
")",
"with",
"self",
".",
"_pending_workchains_cond",
":",
"self",
".",
"_pending_workchains",
"-=",
"1",
"self",
".",
"_pending_workchains_cond",
".",
"notify",
"(",
")",
"def",
"error",
"(",
"e",
")",
":",
"done",
"(",
")",
"self",
".",
"_run_tracker",
".",
"log",
"(",
"Report",
".",
"ERROR",
",",
"'{}'",
".",
"format",
"(",
"e",
")",
")",
"# We filter out Nones defensively. There shouldn't be any, but if a bug causes one,",
"# Pants might hang indefinitely without this filtering.",
"work_iter",
"=",
"(",
"_f",
"for",
"_f",
"in",
"work_chain",
"if",
"_f",
")",
"def",
"submit_next",
"(",
")",
":",
"try",
":",
"self",
".",
"submit_async_work",
"(",
"next",
"(",
"work_iter",
")",
",",
"workunit_parent",
"=",
"workunit_parent",
",",
"on_success",
"=",
"lambda",
"x",
":",
"submit_next",
"(",
")",
",",
"on_failure",
"=",
"error",
")",
"except",
"StopIteration",
":",
"done",
"(",
")",
"# The success case.",
"with",
"self",
".",
"_pending_workchains_cond",
":",
"self",
".",
"_pending_workchains",
"+=",
"1",
"try",
":",
"submit_next",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"# Handles errors in the submission code.",
"done",
"(",
")",
"self",
".",
"_run_tracker",
".",
"log",
"(",
"Report",
".",
"ERROR",
",",
"'{}'",
".",
"format",
"(",
"e",
")",
")",
"raise"
] | 40.3 | 25.975 |
def u2handlers(self):
"""
Get a collection of urllib handlers.
@return: A list of handlers to be installed in the opener.
@rtype: [Handler,...]
"""
handlers = []
handlers.append(u2.ProxyHandler(self.proxy))
return handlers | [
"def",
"u2handlers",
"(",
"self",
")",
":",
"handlers",
"=",
"[",
"]",
"handlers",
".",
"append",
"(",
"u2",
".",
"ProxyHandler",
"(",
"self",
".",
"proxy",
")",
")",
"return",
"handlers"
] | 30.888889 | 12 |
def getPublicIP():
"""Get the IP that this machine uses to contact the internet.
If behind a NAT, this will still be this computer's IP, and not the router's."""
try:
# Try to get the internet-facing IP by attempting a connection
# to a non-existent server and reading what IP was used.
with closing(socket.socket(socket.AF_INET, socket.SOCK_DGRAM)) as sock:
# 203.0.113.0/24 is reserved as TEST-NET-3 by RFC 5737, so
# there is guaranteed to be no one listening on the other
# end (and we won't accidentally DOS anyone).
sock.connect(('203.0.113.1', 1))
ip = sock.getsockname()[0]
return ip
except:
# Something went terribly wrong. Just give loopback rather
# than killing everything, because this is often called just
# to provide a default argument
return '127.0.0.1' | [
"def",
"getPublicIP",
"(",
")",
":",
"try",
":",
"# Try to get the internet-facing IP by attempting a connection",
"# to a non-existent server and reading what IP was used.",
"with",
"closing",
"(",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_DGRAM",
")",
")",
"as",
"sock",
":",
"# 203.0.113.0/24 is reserved as TEST-NET-3 by RFC 5737, so",
"# there is guaranteed to be no one listening on the other",
"# end (and we won't accidentally DOS anyone).",
"sock",
".",
"connect",
"(",
"(",
"'203.0.113.1'",
",",
"1",
")",
")",
"ip",
"=",
"sock",
".",
"getsockname",
"(",
")",
"[",
"0",
"]",
"return",
"ip",
"except",
":",
"# Something went terribly wrong. Just give loopback rather",
"# than killing everything, because this is often called just",
"# to provide a default argument",
"return",
"'127.0.0.1'"
] | 47 | 20.526316 |
def add_to_document(self, parent):
"""Adds an ``Argument`` object to this ElementTree document.
Adds an <arg> subelement to the parent element, typically <args>
and sets up its subelements with their respective text.
:param parent: An ``ET.Element`` to be the parent of a new <arg> subelement
:returns: An ``ET.Element`` object representing this argument.
"""
arg = ET.SubElement(parent, "arg")
arg.set("name", self.name)
if self.title is not None:
ET.SubElement(arg, "title").text = self.title
if self.description is not None:
ET.SubElement(arg, "description").text = self.description
if self.validation is not None:
ET.SubElement(arg, "validation").text = self.validation
# add all other subelements to this Argument, represented by (tag, text)
subelements = [
("data_type", self.data_type),
("required_on_edit", self.required_on_edit),
("required_on_create", self.required_on_create)
]
for name, value in subelements:
ET.SubElement(arg, name).text = str(value).lower()
return arg | [
"def",
"add_to_document",
"(",
"self",
",",
"parent",
")",
":",
"arg",
"=",
"ET",
".",
"SubElement",
"(",
"parent",
",",
"\"arg\"",
")",
"arg",
".",
"set",
"(",
"\"name\"",
",",
"self",
".",
"name",
")",
"if",
"self",
".",
"title",
"is",
"not",
"None",
":",
"ET",
".",
"SubElement",
"(",
"arg",
",",
"\"title\"",
")",
".",
"text",
"=",
"self",
".",
"title",
"if",
"self",
".",
"description",
"is",
"not",
"None",
":",
"ET",
".",
"SubElement",
"(",
"arg",
",",
"\"description\"",
")",
".",
"text",
"=",
"self",
".",
"description",
"if",
"self",
".",
"validation",
"is",
"not",
"None",
":",
"ET",
".",
"SubElement",
"(",
"arg",
",",
"\"validation\"",
")",
".",
"text",
"=",
"self",
".",
"validation",
"# add all other subelements to this Argument, represented by (tag, text)",
"subelements",
"=",
"[",
"(",
"\"data_type\"",
",",
"self",
".",
"data_type",
")",
",",
"(",
"\"required_on_edit\"",
",",
"self",
".",
"required_on_edit",
")",
",",
"(",
"\"required_on_create\"",
",",
"self",
".",
"required_on_create",
")",
"]",
"for",
"name",
",",
"value",
"in",
"subelements",
":",
"ET",
".",
"SubElement",
"(",
"arg",
",",
"name",
")",
".",
"text",
"=",
"str",
"(",
"value",
")",
".",
"lower",
"(",
")",
"return",
"arg"
] | 36.59375 | 22.25 |
def create(self, subscription_id, name, parameters, type='analysis', service='facebook'):
""" Create a PYLON task
:param subscription_id: The ID of the recording to create the task for
:type subscription_id: str
:param name: The name of the new task
:type name: str
:param parameters: The parameters for this task
:type parameters: dict
:param type: The type of analysis to create, currently only 'analysis' is accepted
:type type: str
:param service: The PYLON service (facebook)
:type service: str
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`,
:class:`requests.exceptions.HTTPError`
"""
params = {
'subscription_id': subscription_id,
'name': name,
'parameters': parameters,
'type': type
}
return self.request.post(service + '/task/', params) | [
"def",
"create",
"(",
"self",
",",
"subscription_id",
",",
"name",
",",
"parameters",
",",
"type",
"=",
"'analysis'",
",",
"service",
"=",
"'facebook'",
")",
":",
"params",
"=",
"{",
"'subscription_id'",
":",
"subscription_id",
",",
"'name'",
":",
"name",
",",
"'parameters'",
":",
"parameters",
",",
"'type'",
":",
"type",
"}",
"return",
"self",
".",
"request",
".",
"post",
"(",
"service",
"+",
"'/task/'",
",",
"params",
")"
] | 40.666667 | 20.666667 |
def runSql(self, migrationName, version):
"""
Given a migration name and version lookup the sql file and run it.
"""
sys.stdout.write("Running migration %s to version %s: ..."%(migrationName, version))
sqlPath = os.path.join(self.migrationDirectory, migrationName)
sql = open(sqlPath, "r").read()
try:
if self.session.is_active:
print "session is active"
self.session.commit()
self.session.begin()
executeBatch(self.session, sql)
self.session.add(models.Migration(version, migrationName))
except:
print "\n"
self.session.rollback()
raise
else:
self.session.commit()
sys.stdout.write("\r")
sys.stdout.flush()
sys.stdout.write("Running migration %s to version %s: SUCCESS!\n"%(migrationName, version)) | [
"def",
"runSql",
"(",
"self",
",",
"migrationName",
",",
"version",
")",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"Running migration %s to version %s: ...\"",
"%",
"(",
"migrationName",
",",
"version",
")",
")",
"sqlPath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"migrationDirectory",
",",
"migrationName",
")",
"sql",
"=",
"open",
"(",
"sqlPath",
",",
"\"r\"",
")",
".",
"read",
"(",
")",
"try",
":",
"if",
"self",
".",
"session",
".",
"is_active",
":",
"print",
"\"session is active\"",
"self",
".",
"session",
".",
"commit",
"(",
")",
"self",
".",
"session",
".",
"begin",
"(",
")",
"executeBatch",
"(",
"self",
".",
"session",
",",
"sql",
")",
"self",
".",
"session",
".",
"add",
"(",
"models",
".",
"Migration",
"(",
"version",
",",
"migrationName",
")",
")",
"except",
":",
"print",
"\"\\n\"",
"self",
".",
"session",
".",
"rollback",
"(",
")",
"raise",
"else",
":",
"self",
".",
"session",
".",
"commit",
"(",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"\\r\"",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"Running migration %s to version %s: SUCCESS!\\n\"",
"%",
"(",
"migrationName",
",",
"version",
")",
")"
] | 39.173913 | 16.565217 |
def extend_access_token(self, app_id, app_secret):
"""
Extends the expiration time of a valid OAuth access token. See
<https://developers.facebook.com/roadmap/offline-access-removal/
#extend_token>
"""
args = {
"client_id": app_id,
"client_secret": app_secret,
"grant_type": "fb_exchange_token",
"fb_exchange_token": self.access_token,
}
response = urllib2.urlopen("https://graph.facebook.com/oauth/"
"access_token?" +
urllib.parse.urlencode(args)).read().decode('utf-8')
query_str = parse_qs(response)
if "access_token" in query_str:
result = {"accesstoken": query_str["access_token"][0]}
if "expires" in query_str:
result["expire"] = query_str["expires"][0]
return result
else:
response = json.loads(response)
raise GraphAPIError(response) | [
"def",
"extend_access_token",
"(",
"self",
",",
"app_id",
",",
"app_secret",
")",
":",
"args",
"=",
"{",
"\"client_id\"",
":",
"app_id",
",",
"\"client_secret\"",
":",
"app_secret",
",",
"\"grant_type\"",
":",
"\"fb_exchange_token\"",
",",
"\"fb_exchange_token\"",
":",
"self",
".",
"access_token",
",",
"}",
"response",
"=",
"urllib2",
".",
"urlopen",
"(",
"\"https://graph.facebook.com/oauth/\"",
"\"access_token?\"",
"+",
"urllib",
".",
"parse",
".",
"urlencode",
"(",
"args",
")",
")",
".",
"read",
"(",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"query_str",
"=",
"parse_qs",
"(",
"response",
")",
"if",
"\"access_token\"",
"in",
"query_str",
":",
"result",
"=",
"{",
"\"accesstoken\"",
":",
"query_str",
"[",
"\"access_token\"",
"]",
"[",
"0",
"]",
"}",
"if",
"\"expires\"",
"in",
"query_str",
":",
"result",
"[",
"\"expire\"",
"]",
"=",
"query_str",
"[",
"\"expires\"",
"]",
"[",
"0",
"]",
"return",
"result",
"else",
":",
"response",
"=",
"json",
".",
"loads",
"(",
"response",
")",
"raise",
"GraphAPIError",
"(",
"response",
")"
] | 40 | 15.76 |
def _is_group(token):
"""
sqlparse 0.2.2 changed it from a callable to a bool property
"""
is_group = token.is_group
if isinstance(is_group, bool):
return is_group
else:
return is_group() | [
"def",
"_is_group",
"(",
"token",
")",
":",
"is_group",
"=",
"token",
".",
"is_group",
"if",
"isinstance",
"(",
"is_group",
",",
"bool",
")",
":",
"return",
"is_group",
"else",
":",
"return",
"is_group",
"(",
")"
] | 24.333333 | 13.666667 |
def strduration_long (duration, do_translate=True):
"""Turn a time value in seconds into x hours, x minutes, etc."""
if do_translate:
# use global translator functions
global _, _n
else:
# do not translate
_ = lambda x: x
_n = lambda a, b, n: a if n==1 else b
if duration < 0:
duration = abs(duration)
prefix = "-"
else:
prefix = ""
if duration < 1:
return _("%(prefix)s%(duration).02f seconds") % \
{"prefix": prefix, "duration": duration}
# translation dummies
_n("%d second", "%d seconds", 1)
_n("%d minute", "%d minutes", 1)
_n("%d hour", "%d hours", 1)
_n("%d day", "%d days", 1)
_n("%d year", "%d years", 1)
cutoffs = [
(60, "%d second", "%d seconds"),
(60, "%d minute", "%d minutes"),
(24, "%d hour", "%d hours"),
(365, "%d day", "%d days"),
(None, "%d year", "%d years"),
]
time_str = []
for divisor, single, plural in cutoffs:
if duration < 1:
break
if divisor is None:
duration, unit = 0, duration
else:
duration, unit = divmod(duration, divisor)
if unit:
time_str.append(_n(single, plural, unit) % unit)
time_str.reverse()
if len(time_str) > 2:
time_str.pop()
return "%s%s" % (prefix, ", ".join(time_str)) | [
"def",
"strduration_long",
"(",
"duration",
",",
"do_translate",
"=",
"True",
")",
":",
"if",
"do_translate",
":",
"# use global translator functions",
"global",
"_",
",",
"_n",
"else",
":",
"# do not translate",
"_",
"=",
"lambda",
"x",
":",
"x",
"_n",
"=",
"lambda",
"a",
",",
"b",
",",
"n",
":",
"a",
"if",
"n",
"==",
"1",
"else",
"b",
"if",
"duration",
"<",
"0",
":",
"duration",
"=",
"abs",
"(",
"duration",
")",
"prefix",
"=",
"\"-\"",
"else",
":",
"prefix",
"=",
"\"\"",
"if",
"duration",
"<",
"1",
":",
"return",
"_",
"(",
"\"%(prefix)s%(duration).02f seconds\"",
")",
"%",
"{",
"\"prefix\"",
":",
"prefix",
",",
"\"duration\"",
":",
"duration",
"}",
"# translation dummies",
"_n",
"(",
"\"%d second\"",
",",
"\"%d seconds\"",
",",
"1",
")",
"_n",
"(",
"\"%d minute\"",
",",
"\"%d minutes\"",
",",
"1",
")",
"_n",
"(",
"\"%d hour\"",
",",
"\"%d hours\"",
",",
"1",
")",
"_n",
"(",
"\"%d day\"",
",",
"\"%d days\"",
",",
"1",
")",
"_n",
"(",
"\"%d year\"",
",",
"\"%d years\"",
",",
"1",
")",
"cutoffs",
"=",
"[",
"(",
"60",
",",
"\"%d second\"",
",",
"\"%d seconds\"",
")",
",",
"(",
"60",
",",
"\"%d minute\"",
",",
"\"%d minutes\"",
")",
",",
"(",
"24",
",",
"\"%d hour\"",
",",
"\"%d hours\"",
")",
",",
"(",
"365",
",",
"\"%d day\"",
",",
"\"%d days\"",
")",
",",
"(",
"None",
",",
"\"%d year\"",
",",
"\"%d years\"",
")",
",",
"]",
"time_str",
"=",
"[",
"]",
"for",
"divisor",
",",
"single",
",",
"plural",
"in",
"cutoffs",
":",
"if",
"duration",
"<",
"1",
":",
"break",
"if",
"divisor",
"is",
"None",
":",
"duration",
",",
"unit",
"=",
"0",
",",
"duration",
"else",
":",
"duration",
",",
"unit",
"=",
"divmod",
"(",
"duration",
",",
"divisor",
")",
"if",
"unit",
":",
"time_str",
".",
"append",
"(",
"_n",
"(",
"single",
",",
"plural",
",",
"unit",
")",
"%",
"unit",
")",
"time_str",
".",
"reverse",
"(",
")",
"if",
"len",
"(",
"time_str",
")",
">",
"2",
":",
"time_str",
".",
"pop",
"(",
")",
"return",
"\"%s%s\"",
"%",
"(",
"prefix",
",",
"\", \"",
".",
"join",
"(",
"time_str",
")",
")"
] | 31 | 13.954545 |
def get_templates(model):
""" Return a list of templates usable by a model. """
for template_name, template in templates.items():
if issubclass(template.model, model):
yield (template_name, template.layout._meta.verbose_name) | [
"def",
"get_templates",
"(",
"model",
")",
":",
"for",
"template_name",
",",
"template",
"in",
"templates",
".",
"items",
"(",
")",
":",
"if",
"issubclass",
"(",
"template",
".",
"model",
",",
"model",
")",
":",
"yield",
"(",
"template_name",
",",
"template",
".",
"layout",
".",
"_meta",
".",
"verbose_name",
")"
] | 49.8 | 12.4 |
def sorted(cls, items, orders):
'''Returns the elements in `items` sorted according to `orders`'''
return sorted(items, cmp=cls.multipleOrderComparison(orders)) | [
"def",
"sorted",
"(",
"cls",
",",
"items",
",",
"orders",
")",
":",
"return",
"sorted",
"(",
"items",
",",
"cmp",
"=",
"cls",
".",
"multipleOrderComparison",
"(",
"orders",
")",
")"
] | 55.333333 | 21.333333 |
def import_complex_gateway_to_graph(diagram_graph, process_id, process_attributes, element):
"""
Adds to graph the new element that represents BPMN complex gateway.
In addition to attributes inherited from Gateway type, complex gateway
has additional attribute default flow (default value - none).
:param diagram_graph: NetworkX graph representing a BPMN process diagram,
:param process_id: string object, representing an ID of process element,
:param process_attributes: dictionary that holds attribute values of 'process' element, which is parent of
imported flow node,
:param element: object representing a BPMN XML 'complexGateway' element.
"""
element_id = element.getAttribute(consts.Consts.id)
BpmnDiagramGraphImport.import_gateway_to_graph(diagram_graph, process_id, process_attributes, element)
diagram_graph.node[element_id][consts.Consts.default] = element.getAttribute(consts.Consts.default) \
if element.hasAttribute(consts.Consts.default) else None | [
"def",
"import_complex_gateway_to_graph",
"(",
"diagram_graph",
",",
"process_id",
",",
"process_attributes",
",",
"element",
")",
":",
"element_id",
"=",
"element",
".",
"getAttribute",
"(",
"consts",
".",
"Consts",
".",
"id",
")",
"BpmnDiagramGraphImport",
".",
"import_gateway_to_graph",
"(",
"diagram_graph",
",",
"process_id",
",",
"process_attributes",
",",
"element",
")",
"diagram_graph",
".",
"node",
"[",
"element_id",
"]",
"[",
"consts",
".",
"Consts",
".",
"default",
"]",
"=",
"element",
".",
"getAttribute",
"(",
"consts",
".",
"Consts",
".",
"default",
")",
"if",
"element",
".",
"hasAttribute",
"(",
"consts",
".",
"Consts",
".",
"default",
")",
"else",
"None"
] | 66.75 | 36.5 |
def _cldf2wld(dataset):
"""Make lingpy-compatible dictinary out of cldf main data."""
header = [f for f in dataset.dataset.lexeme_class.fieldnames() if f != 'ID']
D = {0: ['lid'] + [h.lower() for h in header]}
for idx, row in enumerate(dataset.objects['FormTable']):
row = deepcopy(row)
row['Segments'] = ' '.join(row['Segments'])
D[idx + 1] = [row['ID']] + [row[h] for h in header]
return D | [
"def",
"_cldf2wld",
"(",
"dataset",
")",
":",
"header",
"=",
"[",
"f",
"for",
"f",
"in",
"dataset",
".",
"dataset",
".",
"lexeme_class",
".",
"fieldnames",
"(",
")",
"if",
"f",
"!=",
"'ID'",
"]",
"D",
"=",
"{",
"0",
":",
"[",
"'lid'",
"]",
"+",
"[",
"h",
".",
"lower",
"(",
")",
"for",
"h",
"in",
"header",
"]",
"}",
"for",
"idx",
",",
"row",
"in",
"enumerate",
"(",
"dataset",
".",
"objects",
"[",
"'FormTable'",
"]",
")",
":",
"row",
"=",
"deepcopy",
"(",
"row",
")",
"row",
"[",
"'Segments'",
"]",
"=",
"' '",
".",
"join",
"(",
"row",
"[",
"'Segments'",
"]",
")",
"D",
"[",
"idx",
"+",
"1",
"]",
"=",
"[",
"row",
"[",
"'ID'",
"]",
"]",
"+",
"[",
"row",
"[",
"h",
"]",
"for",
"h",
"in",
"header",
"]",
"return",
"D"
] | 47.444444 | 17.555556 |
def download_and_unpack(self, url, *paths, **kw):
"""
Download a zipfile and immediately unpack selected content.
:param url:
:param paths:
:param kw:
:return:
"""
with self.temp_download(url, 'ds.zip', log=kw.pop('log', None)) as zipp:
with TemporaryDirectory() as tmpdir:
with zipfile.ZipFile(zipp.as_posix()) as zipf:
for path in paths:
zipf.extract(as_posix(path), path=tmpdir.as_posix())
copy(tmpdir.joinpath(path), self) | [
"def",
"download_and_unpack",
"(",
"self",
",",
"url",
",",
"*",
"paths",
",",
"*",
"*",
"kw",
")",
":",
"with",
"self",
".",
"temp_download",
"(",
"url",
",",
"'ds.zip'",
",",
"log",
"=",
"kw",
".",
"pop",
"(",
"'log'",
",",
"None",
")",
")",
"as",
"zipp",
":",
"with",
"TemporaryDirectory",
"(",
")",
"as",
"tmpdir",
":",
"with",
"zipfile",
".",
"ZipFile",
"(",
"zipp",
".",
"as_posix",
"(",
")",
")",
"as",
"zipf",
":",
"for",
"path",
"in",
"paths",
":",
"zipf",
".",
"extract",
"(",
"as_posix",
"(",
"path",
")",
",",
"path",
"=",
"tmpdir",
".",
"as_posix",
"(",
")",
")",
"copy",
"(",
"tmpdir",
".",
"joinpath",
"(",
"path",
")",
",",
"self",
")"
] | 38.2 | 19.133333 |
def run_before_script(script_file, cwd=None):
"""Function to wrap try/except for subprocess.check_call()."""
try:
proc = subprocess.Popen(
shlex.split(str(script_file)),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
cwd=cwd,
)
for line in iter(proc.stdout.readline, b''):
sys.stdout.write(console_to_str(line))
proc.wait()
if proc.returncode:
stderr = proc.stderr.read()
proc.stderr.close()
stderr = console_to_str(stderr).split('\n')
stderr = '\n'.join(list(filter(None, stderr))) # filter empty
raise exc.BeforeLoadScriptError(
proc.returncode, os.path.abspath(script_file), stderr
)
return proc.returncode
except OSError as e:
if e.errno == 2:
raise exc.BeforeLoadScriptNotExists(e, os.path.abspath(script_file))
else:
raise e | [
"def",
"run_before_script",
"(",
"script_file",
",",
"cwd",
"=",
"None",
")",
":",
"try",
":",
"proc",
"=",
"subprocess",
".",
"Popen",
"(",
"shlex",
".",
"split",
"(",
"str",
"(",
"script_file",
")",
")",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"cwd",
"=",
"cwd",
",",
")",
"for",
"line",
"in",
"iter",
"(",
"proc",
".",
"stdout",
".",
"readline",
",",
"b''",
")",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"console_to_str",
"(",
"line",
")",
")",
"proc",
".",
"wait",
"(",
")",
"if",
"proc",
".",
"returncode",
":",
"stderr",
"=",
"proc",
".",
"stderr",
".",
"read",
"(",
")",
"proc",
".",
"stderr",
".",
"close",
"(",
")",
"stderr",
"=",
"console_to_str",
"(",
"stderr",
")",
".",
"split",
"(",
"'\\n'",
")",
"stderr",
"=",
"'\\n'",
".",
"join",
"(",
"list",
"(",
"filter",
"(",
"None",
",",
"stderr",
")",
")",
")",
"# filter empty",
"raise",
"exc",
".",
"BeforeLoadScriptError",
"(",
"proc",
".",
"returncode",
",",
"os",
".",
"path",
".",
"abspath",
"(",
"script_file",
")",
",",
"stderr",
")",
"return",
"proc",
".",
"returncode",
"except",
"OSError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"==",
"2",
":",
"raise",
"exc",
".",
"BeforeLoadScriptNotExists",
"(",
"e",
",",
"os",
".",
"path",
".",
"abspath",
"(",
"script_file",
")",
")",
"else",
":",
"raise",
"e"
] | 32.931034 | 18.37931 |
def dims_knight(self, move):
'''Knight on the rim is dim'''
if self.board.piece_type_at(move.from_square) == chess.KNIGHT:
rim = SquareSet(
chess.BB_RANK_1 | \
chess.BB_RANK_8 | \
chess.BB_FILE_A | \
chess.BB_FILE_H)
return move.to_square in rim | [
"def",
"dims_knight",
"(",
"self",
",",
"move",
")",
":",
"if",
"self",
".",
"board",
".",
"piece_type_at",
"(",
"move",
".",
"from_square",
")",
"==",
"chess",
".",
"KNIGHT",
":",
"rim",
"=",
"SquareSet",
"(",
"chess",
".",
"BB_RANK_1",
"|",
"chess",
".",
"BB_RANK_8",
"|",
"chess",
".",
"BB_FILE_A",
"|",
"chess",
".",
"BB_FILE_H",
")",
"return",
"move",
".",
"to_square",
"in",
"rim"
] | 33 | 12.777778 |
def get_plugins(modules, classes):
"""Find all given (sub-)classes in all modules.
@param modules: the modules to search
@ptype modules: iterator of modules
@return: found classes
@rytpe: iterator of class objects
"""
for module in modules:
for plugin in get_module_plugins(module, classes):
yield plugin | [
"def",
"get_plugins",
"(",
"modules",
",",
"classes",
")",
":",
"for",
"module",
"in",
"modules",
":",
"for",
"plugin",
"in",
"get_module_plugins",
"(",
"module",
",",
"classes",
")",
":",
"yield",
"plugin"
] | 34.3 | 7.3 |
def order_by(self, field_path, **kwargs):
"""Create an "order by" query with this collection as parent.
See
:meth:`~.firestore_v1beta1.query.Query.order_by` for
more information on this method.
Args:
field_path (str): A field path (``.``-delimited list of
field names) on which to order the query results.
kwargs (Dict[str, Any]): The keyword arguments to pass along
to the query. The only supported keyword is ``direction``,
see :meth:`~.firestore_v1beta1.query.Query.order_by` for
more information.
Returns:
~.firestore_v1beta1.query.Query: An "order by" query.
"""
query = query_mod.Query(self)
return query.order_by(field_path, **kwargs) | [
"def",
"order_by",
"(",
"self",
",",
"field_path",
",",
"*",
"*",
"kwargs",
")",
":",
"query",
"=",
"query_mod",
".",
"Query",
"(",
"self",
")",
"return",
"query",
".",
"order_by",
"(",
"field_path",
",",
"*",
"*",
"kwargs",
")"
] | 39.85 | 20.85 |
def setActiveCamera(self,name):
"""
Sets the active camera.
This method also calls the :py:meth:`Camera.on_activate() <peng3d.camera.Camera.on_activate>` event handler if the camera is not already active.
"""
if name == self.activeCamera:
return # Cam is already active
if name not in self.world.cameras:
raise ValueError("Unknown camera name")
old = self.activeCamera
self.activeCamera = name
self.cam.on_activate(old) | [
"def",
"setActiveCamera",
"(",
"self",
",",
"name",
")",
":",
"if",
"name",
"==",
"self",
".",
"activeCamera",
":",
"return",
"# Cam is already active",
"if",
"name",
"not",
"in",
"self",
".",
"world",
".",
"cameras",
":",
"raise",
"ValueError",
"(",
"\"Unknown camera name\"",
")",
"old",
"=",
"self",
".",
"activeCamera",
"self",
".",
"activeCamera",
"=",
"name",
"self",
".",
"cam",
".",
"on_activate",
"(",
"old",
")"
] | 39.384615 | 15.692308 |
def _parse_string(self, line):
"""
Consume the complete string until next " or \n
"""
log.debug("*** parse STRING: >>>%r<<<", line)
parts = self.regex_split_string.split(line, maxsplit=1)
if len(parts) == 1: # end
return parts[0], None
pre, match, post = parts
log.debug("\tpre: >>>%r<<<", pre)
log.debug("\tmatch: >>>%r<<<", match)
log.debug("\tpost: >>>%r<<<", post)
pre = pre + match
log.debug("Parse string result: %r,%r", pre, post)
return pre, post | [
"def",
"_parse_string",
"(",
"self",
",",
"line",
")",
":",
"log",
".",
"debug",
"(",
"\"*** parse STRING: >>>%r<<<\"",
",",
"line",
")",
"parts",
"=",
"self",
".",
"regex_split_string",
".",
"split",
"(",
"line",
",",
"maxsplit",
"=",
"1",
")",
"if",
"len",
"(",
"parts",
")",
"==",
"1",
":",
"# end",
"return",
"parts",
"[",
"0",
"]",
",",
"None",
"pre",
",",
"match",
",",
"post",
"=",
"parts",
"log",
".",
"debug",
"(",
"\"\\tpre: >>>%r<<<\"",
",",
"pre",
")",
"log",
".",
"debug",
"(",
"\"\\tmatch: >>>%r<<<\"",
",",
"match",
")",
"log",
".",
"debug",
"(",
"\"\\tpost: >>>%r<<<\"",
",",
"post",
")",
"pre",
"=",
"pre",
"+",
"match",
"log",
".",
"debug",
"(",
"\"Parse string result: %r,%r\"",
",",
"pre",
",",
"post",
")",
"return",
"pre",
",",
"post"
] | 34.8125 | 11.1875 |
def create_model(name, *attributes, **params):
'''Create a :class:`Model` class for objects requiring
and interface similar to :class:`StdModel`. We refers to this type
of models as :ref:`local models <local-models>` since instances of such
models are not persistent on a :class:`stdnet.BackendDataServer`.
:param name: Name of the model class.
:param attributes: positiona attribute names. These are the only attribute
available to the model during the default constructor.
:param params: key-valued parameter to pass to the :class:`ModelMeta`
constructor.
:return: a local :class:`Model` class.
'''
params['register'] = False
params['attributes'] = attributes
kwargs = {'manager_class': params.pop('manager_class', Manager),
'Meta': params}
return ModelType(name, (StdModel,), kwargs) | [
"def",
"create_model",
"(",
"name",
",",
"*",
"attributes",
",",
"*",
"*",
"params",
")",
":",
"params",
"[",
"'register'",
"]",
"=",
"False",
"params",
"[",
"'attributes'",
"]",
"=",
"attributes",
"kwargs",
"=",
"{",
"'manager_class'",
":",
"params",
".",
"pop",
"(",
"'manager_class'",
",",
"Manager",
")",
",",
"'Meta'",
":",
"params",
"}",
"return",
"ModelType",
"(",
"name",
",",
"(",
"StdModel",
",",
")",
",",
"kwargs",
")"
] | 45.333333 | 19.333333 |
def _parse_json(s):
' parse str into JsonDict '
def _obj_hook(pairs):
' convert json object to python object '
o = JsonDict()
for k, v in pairs.iteritems():
o[str(k)] = v
return o
return json.loads(s, object_hook=_obj_hook) | [
"def",
"_parse_json",
"(",
"s",
")",
":",
"def",
"_obj_hook",
"(",
"pairs",
")",
":",
"' convert json object to python object '",
"o",
"=",
"JsonDict",
"(",
")",
"for",
"k",
",",
"v",
"in",
"pairs",
".",
"iteritems",
"(",
")",
":",
"o",
"[",
"str",
"(",
"k",
")",
"]",
"=",
"v",
"return",
"o",
"return",
"json",
".",
"loads",
"(",
"s",
",",
"object_hook",
"=",
"_obj_hook",
")"
] | 27.1 | 15.9 |
def _get_float_remainder(fvalue, signs=9):
"""
Get remainder of float, i.e. 2.05 -> '05'
@param fvalue: input value
@type fvalue: C{integer types}, C{float} or C{Decimal}
@param signs: maximum number of signs
@type signs: C{integer types}
@return: remainder
@rtype: C{str}
@raise ValueError: fvalue is negative
@raise ValueError: signs overflow
"""
check_positive(fvalue)
if isinstance(fvalue, six.integer_types):
return "0"
if isinstance(fvalue, Decimal) and fvalue.as_tuple()[2] == 0:
# Decimal.as_tuple() -> (sign, digit_tuple, exponent)
# если экспонента "0" -- значит дробной части нет
return "0"
signs = min(signs, len(FRACTIONS))
# нужно remainder в строке, потому что дробные X.0Y
# будут "ломаться" до X.Y
remainder = str(fvalue).split('.')[1]
iremainder = int(remainder)
orig_remainder = remainder
factor = len(str(remainder)) - signs
if factor > 0:
# после запятой цифр больше чем signs, округляем
iremainder = int(round(iremainder / (10.0**factor)))
format = "%%0%dd" % min(len(remainder), signs)
remainder = format % iremainder
if len(remainder) > signs:
# при округлении цифр вида 0.998 ругаться
raise ValueError("Signs overflow: I can't round only fractional part \
of %s to fit %s in %d signs" % \
(str(fvalue), orig_remainder, signs))
return remainder | [
"def",
"_get_float_remainder",
"(",
"fvalue",
",",
"signs",
"=",
"9",
")",
":",
"check_positive",
"(",
"fvalue",
")",
"if",
"isinstance",
"(",
"fvalue",
",",
"six",
".",
"integer_types",
")",
":",
"return",
"\"0\"",
"if",
"isinstance",
"(",
"fvalue",
",",
"Decimal",
")",
"and",
"fvalue",
".",
"as_tuple",
"(",
")",
"[",
"2",
"]",
"==",
"0",
":",
"# Decimal.as_tuple() -> (sign, digit_tuple, exponent)",
"# если экспонента \"0\" -- значит дробной части нет",
"return",
"\"0\"",
"signs",
"=",
"min",
"(",
"signs",
",",
"len",
"(",
"FRACTIONS",
")",
")",
"# нужно remainder в строке, потому что дробные X.0Y",
"# будут \"ломаться\" до X.Y",
"remainder",
"=",
"str",
"(",
"fvalue",
")",
".",
"split",
"(",
"'.'",
")",
"[",
"1",
"]",
"iremainder",
"=",
"int",
"(",
"remainder",
")",
"orig_remainder",
"=",
"remainder",
"factor",
"=",
"len",
"(",
"str",
"(",
"remainder",
")",
")",
"-",
"signs",
"if",
"factor",
">",
"0",
":",
"# после запятой цифр больше чем signs, округляем",
"iremainder",
"=",
"int",
"(",
"round",
"(",
"iremainder",
"/",
"(",
"10.0",
"**",
"factor",
")",
")",
")",
"format",
"=",
"\"%%0%dd\"",
"%",
"min",
"(",
"len",
"(",
"remainder",
")",
",",
"signs",
")",
"remainder",
"=",
"format",
"%",
"iremainder",
"if",
"len",
"(",
"remainder",
")",
">",
"signs",
":",
"# при округлении цифр вида 0.998 ругаться",
"raise",
"ValueError",
"(",
"\"Signs overflow: I can't round only fractional part \\\n of %s to fit %s in %d signs\"",
"%",
"(",
"str",
"(",
"fvalue",
")",
",",
"orig_remainder",
",",
"signs",
")",
")",
"return",
"remainder"
] | 30.87234 | 18.106383 |
def send_error_email(subject, message, additional_recipients=None):
"""
Sends an email to the configured error email, if it's configured.
"""
recipients = _email_recipients(additional_recipients)
sender = email().sender
send_email(
subject=subject,
message=message,
sender=sender,
recipients=recipients
) | [
"def",
"send_error_email",
"(",
"subject",
",",
"message",
",",
"additional_recipients",
"=",
"None",
")",
":",
"recipients",
"=",
"_email_recipients",
"(",
"additional_recipients",
")",
"sender",
"=",
"email",
"(",
")",
".",
"sender",
"send_email",
"(",
"subject",
"=",
"subject",
",",
"message",
"=",
"message",
",",
"sender",
"=",
"sender",
",",
"recipients",
"=",
"recipients",
")"
] | 29.416667 | 17.25 |
def next(self):
"""Return the next transaction object.
StopIteration will be propagated from self.csvreader.next()
"""
try:
return self.dict_to_xn(self.csvreader.next())
except MetadataException:
# row was metadata; proceed to next row
return next(self) | [
"def",
"next",
"(",
"self",
")",
":",
"try",
":",
"return",
"self",
".",
"dict_to_xn",
"(",
"self",
".",
"csvreader",
".",
"next",
"(",
")",
")",
"except",
"MetadataException",
":",
"# row was metadata; proceed to next row",
"return",
"next",
"(",
"self",
")"
] | 32.1 | 16.6 |
def delete(instance_id, profile=None, **kwargs):
'''
Delete an instance
instance_id
ID of the instance to be deleted
CLI Example:
.. code-block:: bash
salt '*' nova.delete 1138
'''
conn = _auth(profile, **kwargs)
return conn.delete(instance_id) | [
"def",
"delete",
"(",
"instance_id",
",",
"profile",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"conn",
"=",
"_auth",
"(",
"profile",
",",
"*",
"*",
"kwargs",
")",
"return",
"conn",
".",
"delete",
"(",
"instance_id",
")"
] | 17.625 | 23.375 |
def get_value(self):
"""
Evaluate self.expr to get the parameter's value
"""
if (self._value is None) and (self.expr is not None):
self._value = self.expr.get_value()
return self._value | [
"def",
"get_value",
"(",
"self",
")",
":",
"if",
"(",
"self",
".",
"_value",
"is",
"None",
")",
"and",
"(",
"self",
".",
"expr",
"is",
"not",
"None",
")",
":",
"self",
".",
"_value",
"=",
"self",
".",
"expr",
".",
"get_value",
"(",
")",
"return",
"self",
".",
"_value"
] | 28.875 | 14.625 |
def _add_module(self, aModule):
"""
Private method to add a module object to the snapshot.
@type aModule: L{Module}
@param aModule: Module object.
"""
## if not isinstance(aModule, Module):
## if hasattr(aModule, '__class__'):
## typename = aModule.__class__.__name__
## else:
## typename = str(type(aModule))
## msg = "Expected Module, got %s instead" % typename
## raise TypeError(msg)
lpBaseOfDll = aModule.get_base()
## if lpBaseOfDll in self.__moduleDict:
## msg = "Module already exists: %d" % lpBaseOfDll
## raise KeyError(msg)
aModule.set_process(self)
self.__moduleDict[lpBaseOfDll] = aModule | [
"def",
"_add_module",
"(",
"self",
",",
"aModule",
")",
":",
"## if not isinstance(aModule, Module):",
"## if hasattr(aModule, '__class__'):",
"## typename = aModule.__class__.__name__",
"## else:",
"## typename = str(type(aModule))",
"## msg = \"Expected Module, got %s instead\" % typename",
"## raise TypeError(msg)",
"lpBaseOfDll",
"=",
"aModule",
".",
"get_base",
"(",
")",
"## if lpBaseOfDll in self.__moduleDict:",
"## msg = \"Module already exists: %d\" % lpBaseOfDll",
"## raise KeyError(msg)",
"aModule",
".",
"set_process",
"(",
"self",
")",
"self",
".",
"__moduleDict",
"[",
"lpBaseOfDll",
"]",
"=",
"aModule"
] | 37.9 | 10.7 |
def values(self):
"""Gets user inputs
:returns: dict of inputs:
| *'fontsz'*: int -- font size for text throughout the GUI
| *'display_attributes'*: dict -- what attributes of stimuli to report as they are being presented
"""
result = {}
result['fontsz'] = self.ui.fontszSpnbx.value()
result['display_attributes'] = self.ui.detailWidget.getCheckedDetails()
return result | [
"def",
"values",
"(",
"self",
")",
":",
"result",
"=",
"{",
"}",
"result",
"[",
"'fontsz'",
"]",
"=",
"self",
".",
"ui",
".",
"fontszSpnbx",
".",
"value",
"(",
")",
"result",
"[",
"'display_attributes'",
"]",
"=",
"self",
".",
"ui",
".",
"detailWidget",
".",
"getCheckedDetails",
"(",
")",
"return",
"result"
] | 39.363636 | 23.181818 |
def _serialize_int(value, size=32, padding=0):
"""
Translates a signed python integral or a BitVec into a 32 byte string, MSB first
"""
if size <= 0 or size > 32:
raise ValueError
if not isinstance(value, (int, BitVec)):
raise ValueError
if issymbolic(value):
buf = ArrayVariable(index_bits=256, index_max=32, value_bits=8, name='temp{}'.format(uuid.uuid1()))
value = Operators.SEXTEND(value, value.size, size * 8)
buf = ArrayProxy(buf.write_BE(padding, value, size))
else:
value = int(value)
buf = bytearray()
for _ in range(padding):
buf.append(0)
for position in reversed(range(size)):
buf.append(Operators.EXTRACT(value, position * 8, 8))
return buf | [
"def",
"_serialize_int",
"(",
"value",
",",
"size",
"=",
"32",
",",
"padding",
"=",
"0",
")",
":",
"if",
"size",
"<=",
"0",
"or",
"size",
">",
"32",
":",
"raise",
"ValueError",
"if",
"not",
"isinstance",
"(",
"value",
",",
"(",
"int",
",",
"BitVec",
")",
")",
":",
"raise",
"ValueError",
"if",
"issymbolic",
"(",
"value",
")",
":",
"buf",
"=",
"ArrayVariable",
"(",
"index_bits",
"=",
"256",
",",
"index_max",
"=",
"32",
",",
"value_bits",
"=",
"8",
",",
"name",
"=",
"'temp{}'",
".",
"format",
"(",
"uuid",
".",
"uuid1",
"(",
")",
")",
")",
"value",
"=",
"Operators",
".",
"SEXTEND",
"(",
"value",
",",
"value",
".",
"size",
",",
"size",
"*",
"8",
")",
"buf",
"=",
"ArrayProxy",
"(",
"buf",
".",
"write_BE",
"(",
"padding",
",",
"value",
",",
"size",
")",
")",
"else",
":",
"value",
"=",
"int",
"(",
"value",
")",
"buf",
"=",
"bytearray",
"(",
")",
"for",
"_",
"in",
"range",
"(",
"padding",
")",
":",
"buf",
".",
"append",
"(",
"0",
")",
"for",
"position",
"in",
"reversed",
"(",
"range",
"(",
"size",
")",
")",
":",
"buf",
".",
"append",
"(",
"Operators",
".",
"EXTRACT",
"(",
"value",
",",
"position",
"*",
"8",
",",
"8",
")",
")",
"return",
"buf"
] | 39.904762 | 18.47619 |
def parse_neighbors(neighbors, vars=[]):
"""Convert a string of the form 'X: Y Z; Y: Z' into a dict mapping
regions to neighbors. The syntax is a region name followed by a ':'
followed by zero or more region names, followed by ';', repeated for
each region name. If you say 'X: Y' you don't need 'Y: X'.
>>> parse_neighbors('X: Y Z; Y: Z')
{'Y': ['X', 'Z'], 'X': ['Y', 'Z'], 'Z': ['X', 'Y']}
"""
dict = DefaultDict([])
for var in vars:
dict[var] = []
specs = [spec.split(':') for spec in neighbors.split(';')]
for (A, Aneighbors) in specs:
A = A.strip()
dict.setdefault(A, [])
for B in Aneighbors.split():
dict[A].append(B)
dict[B].append(A)
return dict | [
"def",
"parse_neighbors",
"(",
"neighbors",
",",
"vars",
"=",
"[",
"]",
")",
":",
"dict",
"=",
"DefaultDict",
"(",
"[",
"]",
")",
"for",
"var",
"in",
"vars",
":",
"dict",
"[",
"var",
"]",
"=",
"[",
"]",
"specs",
"=",
"[",
"spec",
".",
"split",
"(",
"':'",
")",
"for",
"spec",
"in",
"neighbors",
".",
"split",
"(",
"';'",
")",
"]",
"for",
"(",
"A",
",",
"Aneighbors",
")",
"in",
"specs",
":",
"A",
"=",
"A",
".",
"strip",
"(",
")",
"dict",
".",
"setdefault",
"(",
"A",
",",
"[",
"]",
")",
"for",
"B",
"in",
"Aneighbors",
".",
"split",
"(",
")",
":",
"dict",
"[",
"A",
"]",
".",
"append",
"(",
"B",
")",
"dict",
"[",
"B",
"]",
".",
"append",
"(",
"A",
")",
"return",
"dict"
] | 39 | 13.894737 |
def pause(self, pause=0):
"""Insert a `pause`, in seconds."""
fr = self.frames[-1]
n = int(self.fps * pause)
for i in range(n):
fr2 = "/tmp/vpvid/" + str(len(self.frames)) + ".png"
self.frames.append(fr2)
os.system("cp -f %s %s" % (fr, fr2)) | [
"def",
"pause",
"(",
"self",
",",
"pause",
"=",
"0",
")",
":",
"fr",
"=",
"self",
".",
"frames",
"[",
"-",
"1",
"]",
"n",
"=",
"int",
"(",
"self",
".",
"fps",
"*",
"pause",
")",
"for",
"i",
"in",
"range",
"(",
"n",
")",
":",
"fr2",
"=",
"\"/tmp/vpvid/\"",
"+",
"str",
"(",
"len",
"(",
"self",
".",
"frames",
")",
")",
"+",
"\".png\"",
"self",
".",
"frames",
".",
"append",
"(",
"fr2",
")",
"os",
".",
"system",
"(",
"\"cp -f %s %s\"",
"%",
"(",
"fr",
",",
"fr2",
")",
")"
] | 37.75 | 10.625 |
def connect_bulk(self, si, logger, vcenter_data_model, request):
"""
:param si:
:param logger:
:param VMwarevCenterResourceModel vcenter_data_model:
:param request:
:return:
"""
self.logger = logger
self.logger.info('Apply connectivity changes has started')
self.logger.debug('Apply connectivity changes has started with the requet: {0}'.format(request))
holder = DeployDataHolder(jsonpickle.decode(request))
self.vcenter_data_model = vcenter_data_model
if vcenter_data_model.reserved_networks:
self.reserved_networks = [name.strip() for name in vcenter_data_model.reserved_networks.split(',')]
if not vcenter_data_model.default_dvswitch:
return self._handle_no_dvswitch_error(holder)
dvswitch_location = VMLocation.create_from_full_path(vcenter_data_model.default_dvswitch)
self.dv_switch_path = VMLocation.combine([vcenter_data_model.default_datacenter, dvswitch_location.path])
self.dv_switch_name = dvswitch_location.name
self.default_network = VMLocation.combine(
[vcenter_data_model.default_datacenter, vcenter_data_model.holding_network])
mappings = self._map_requsets(holder.driverRequest.actions)
self.logger.debug('Connectivity actions mappings: {0}'.format(jsonpickle.encode(mappings, unpicklable=False)))
pool = ThreadPool()
async_results = self._run_async_connection_actions(si, mappings, pool, logger)
results = self._get_async_results(async_results, pool)
self.logger.info('Apply connectivity changes done')
self.logger.debug('Apply connectivity has finished with the results: {0}'.format(jsonpickle.encode(results,
unpicklable=False)))
return results | [
"def",
"connect_bulk",
"(",
"self",
",",
"si",
",",
"logger",
",",
"vcenter_data_model",
",",
"request",
")",
":",
"self",
".",
"logger",
"=",
"logger",
"self",
".",
"logger",
".",
"info",
"(",
"'Apply connectivity changes has started'",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"'Apply connectivity changes has started with the requet: {0}'",
".",
"format",
"(",
"request",
")",
")",
"holder",
"=",
"DeployDataHolder",
"(",
"jsonpickle",
".",
"decode",
"(",
"request",
")",
")",
"self",
".",
"vcenter_data_model",
"=",
"vcenter_data_model",
"if",
"vcenter_data_model",
".",
"reserved_networks",
":",
"self",
".",
"reserved_networks",
"=",
"[",
"name",
".",
"strip",
"(",
")",
"for",
"name",
"in",
"vcenter_data_model",
".",
"reserved_networks",
".",
"split",
"(",
"','",
")",
"]",
"if",
"not",
"vcenter_data_model",
".",
"default_dvswitch",
":",
"return",
"self",
".",
"_handle_no_dvswitch_error",
"(",
"holder",
")",
"dvswitch_location",
"=",
"VMLocation",
".",
"create_from_full_path",
"(",
"vcenter_data_model",
".",
"default_dvswitch",
")",
"self",
".",
"dv_switch_path",
"=",
"VMLocation",
".",
"combine",
"(",
"[",
"vcenter_data_model",
".",
"default_datacenter",
",",
"dvswitch_location",
".",
"path",
"]",
")",
"self",
".",
"dv_switch_name",
"=",
"dvswitch_location",
".",
"name",
"self",
".",
"default_network",
"=",
"VMLocation",
".",
"combine",
"(",
"[",
"vcenter_data_model",
".",
"default_datacenter",
",",
"vcenter_data_model",
".",
"holding_network",
"]",
")",
"mappings",
"=",
"self",
".",
"_map_requsets",
"(",
"holder",
".",
"driverRequest",
".",
"actions",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"'Connectivity actions mappings: {0}'",
".",
"format",
"(",
"jsonpickle",
".",
"encode",
"(",
"mappings",
",",
"unpicklable",
"=",
"False",
")",
")",
")",
"pool",
"=",
"ThreadPool",
"(",
")",
"async_results",
"=",
"self",
".",
"_run_async_connection_actions",
"(",
"si",
",",
"mappings",
",",
"pool",
",",
"logger",
")",
"results",
"=",
"self",
".",
"_get_async_results",
"(",
"async_results",
",",
"pool",
")",
"self",
".",
"logger",
".",
"info",
"(",
"'Apply connectivity changes done'",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"'Apply connectivity has finished with the results: {0}'",
".",
"format",
"(",
"jsonpickle",
".",
"encode",
"(",
"results",
",",
"unpicklable",
"=",
"False",
")",
")",
")",
"return",
"results"
] | 47.175 | 32.825 |
def _array_slice(array, index):
"""Slice or index `array` at `index`.
Parameters
----------
index : int or ibis.expr.types.IntegerValue or slice
Returns
-------
sliced_array : ibis.expr.types.ValueExpr
If `index` is an ``int`` or :class:`~ibis.expr.types.IntegerValue` then
the return type is the element type of `array`. If `index` is a
``slice`` then the return type is the same type as the input.
"""
if isinstance(index, slice):
start = index.start
stop = index.stop
if (start is not None and start < 0) or (
stop is not None and stop < 0
):
raise ValueError('negative slicing not yet supported')
step = index.step
if step is not None and step != 1:
raise NotImplementedError('step can only be 1')
op = ops.ArraySlice(array, start if start is not None else 0, stop)
else:
op = ops.ArrayIndex(array, index)
return op.to_expr() | [
"def",
"_array_slice",
"(",
"array",
",",
"index",
")",
":",
"if",
"isinstance",
"(",
"index",
",",
"slice",
")",
":",
"start",
"=",
"index",
".",
"start",
"stop",
"=",
"index",
".",
"stop",
"if",
"(",
"start",
"is",
"not",
"None",
"and",
"start",
"<",
"0",
")",
"or",
"(",
"stop",
"is",
"not",
"None",
"and",
"stop",
"<",
"0",
")",
":",
"raise",
"ValueError",
"(",
"'negative slicing not yet supported'",
")",
"step",
"=",
"index",
".",
"step",
"if",
"step",
"is",
"not",
"None",
"and",
"step",
"!=",
"1",
":",
"raise",
"NotImplementedError",
"(",
"'step can only be 1'",
")",
"op",
"=",
"ops",
".",
"ArraySlice",
"(",
"array",
",",
"start",
"if",
"start",
"is",
"not",
"None",
"else",
"0",
",",
"stop",
")",
"else",
":",
"op",
"=",
"ops",
".",
"ArrayIndex",
"(",
"array",
",",
"index",
")",
"return",
"op",
".",
"to_expr",
"(",
")"
] | 31.354839 | 21.290323 |
def get_qpimage(self, index):
"""Return a single QPImage of the series
Parameters
----------
index: int or str
Index or identifier of the QPImage
Notes
-----
Instead of ``qps.get_qpimage(index)``, it is possible
to use the short-hand ``qps[index]``.
"""
if isinstance(index, str):
# search for the identifier
for ii in range(len(self)):
qpi = self[ii]
if "identifier" in qpi and qpi["identifier"] == index:
group = self.h5["qpi_{}".format(ii)]
break
else:
msg = "QPImage identifier '{}' not found!".format(index)
raise KeyError(msg)
else:
# integer index
if index < -len(self):
msg = "Index {} out of bounds for QPSeries of size {}!".format(
index, len(self))
raise ValueError(msg)
elif index < 0:
index += len(self)
name = "qpi_{}".format(index)
if name in self.h5:
group = self.h5[name]
else:
msg = "Index {} not found for QPSeries of length {}".format(
index, len(self))
raise KeyError(msg)
return QPImage(h5file=group) | [
"def",
"get_qpimage",
"(",
"self",
",",
"index",
")",
":",
"if",
"isinstance",
"(",
"index",
",",
"str",
")",
":",
"# search for the identifier",
"for",
"ii",
"in",
"range",
"(",
"len",
"(",
"self",
")",
")",
":",
"qpi",
"=",
"self",
"[",
"ii",
"]",
"if",
"\"identifier\"",
"in",
"qpi",
"and",
"qpi",
"[",
"\"identifier\"",
"]",
"==",
"index",
":",
"group",
"=",
"self",
".",
"h5",
"[",
"\"qpi_{}\"",
".",
"format",
"(",
"ii",
")",
"]",
"break",
"else",
":",
"msg",
"=",
"\"QPImage identifier '{}' not found!\"",
".",
"format",
"(",
"index",
")",
"raise",
"KeyError",
"(",
"msg",
")",
"else",
":",
"# integer index",
"if",
"index",
"<",
"-",
"len",
"(",
"self",
")",
":",
"msg",
"=",
"\"Index {} out of bounds for QPSeries of size {}!\"",
".",
"format",
"(",
"index",
",",
"len",
"(",
"self",
")",
")",
"raise",
"ValueError",
"(",
"msg",
")",
"elif",
"index",
"<",
"0",
":",
"index",
"+=",
"len",
"(",
"self",
")",
"name",
"=",
"\"qpi_{}\"",
".",
"format",
"(",
"index",
")",
"if",
"name",
"in",
"self",
".",
"h5",
":",
"group",
"=",
"self",
".",
"h5",
"[",
"name",
"]",
"else",
":",
"msg",
"=",
"\"Index {} not found for QPSeries of length {}\"",
".",
"format",
"(",
"index",
",",
"len",
"(",
"self",
")",
")",
"raise",
"KeyError",
"(",
"msg",
")",
"return",
"QPImage",
"(",
"h5file",
"=",
"group",
")"
] | 34.410256 | 14.589744 |
def get_specificity(self, scalar=None):
"""True_Negative / (True_Negative + False_Positive)"""
if ((not self._scalar_stats and not scalar and self._num_classes > 2) or
((scalar is False or self._scalar_stats is False) and self._num_classes > 1)):
spec = PrettyDict()
for pos_label in self.columns:
neg_labels = [label for label in self.columns if label != pos_label]
tn = sum(self[label][label] for label in neg_labels)
# fp = self[pos_label][neg_labels].sum()
fp = self.loc[neg_labels].sum()[pos_label]
assert(self[pos_label][neg_labels].sum() == fp)
spec[pos_label] = float(tn) / (tn + fp)
return pd.Series(spec)
return self._binary_specificity | [
"def",
"get_specificity",
"(",
"self",
",",
"scalar",
"=",
"None",
")",
":",
"if",
"(",
"(",
"not",
"self",
".",
"_scalar_stats",
"and",
"not",
"scalar",
"and",
"self",
".",
"_num_classes",
">",
"2",
")",
"or",
"(",
"(",
"scalar",
"is",
"False",
"or",
"self",
".",
"_scalar_stats",
"is",
"False",
")",
"and",
"self",
".",
"_num_classes",
">",
"1",
")",
")",
":",
"spec",
"=",
"PrettyDict",
"(",
")",
"for",
"pos_label",
"in",
"self",
".",
"columns",
":",
"neg_labels",
"=",
"[",
"label",
"for",
"label",
"in",
"self",
".",
"columns",
"if",
"label",
"!=",
"pos_label",
"]",
"tn",
"=",
"sum",
"(",
"self",
"[",
"label",
"]",
"[",
"label",
"]",
"for",
"label",
"in",
"neg_labels",
")",
"# fp = self[pos_label][neg_labels].sum()",
"fp",
"=",
"self",
".",
"loc",
"[",
"neg_labels",
"]",
".",
"sum",
"(",
")",
"[",
"pos_label",
"]",
"assert",
"(",
"self",
"[",
"pos_label",
"]",
"[",
"neg_labels",
"]",
".",
"sum",
"(",
")",
"==",
"fp",
")",
"spec",
"[",
"pos_label",
"]",
"=",
"float",
"(",
"tn",
")",
"/",
"(",
"tn",
"+",
"fp",
")",
"return",
"pd",
".",
"Series",
"(",
"spec",
")",
"return",
"self",
".",
"_binary_specificity"
] | 57.5 | 18.357143 |
def find_vmrun(self):
"""
Searches for vmrun.
:returns: path to vmrun
"""
# look for vmrun
vmrun_path = self.config.get_section_config("VMware").get("vmrun_path")
if not vmrun_path:
if sys.platform.startswith("win"):
vmrun_path = shutil.which("vmrun")
if vmrun_path is None:
# look for vmrun.exe using the VMware Workstation directory listed in the registry
vmrun_path = self._find_vmrun_registry(r"SOFTWARE\Wow6432Node\VMware, Inc.\VMware Workstation")
if vmrun_path is None:
# look for vmrun.exe using the VIX directory listed in the registry
vmrun_path = self._find_vmrun_registry(r"SOFTWARE\Wow6432Node\VMware, Inc.\VMware VIX")
elif sys.platform.startswith("darwin"):
vmrun_path = "/Applications/VMware Fusion.app/Contents/Library/vmrun"
else:
vmrun_path = "vmrun"
if vmrun_path and not os.path.isabs(vmrun_path):
vmrun_path = shutil.which(vmrun_path)
if not vmrun_path:
raise VMwareError("Could not find VMware vmrun, please make sure it is installed")
if not os.path.isfile(vmrun_path):
raise VMwareError("vmrun {} is not accessible".format(vmrun_path))
if not os.access(vmrun_path, os.X_OK):
raise VMwareError("vmrun is not executable")
if os.path.basename(vmrun_path).lower() not in ["vmrun", "vmrun.exe"]:
raise VMwareError("Invalid vmrun executable name {}".format(os.path.basename(vmrun_path)))
self._vmrun_path = vmrun_path
return vmrun_path | [
"def",
"find_vmrun",
"(",
"self",
")",
":",
"# look for vmrun",
"vmrun_path",
"=",
"self",
".",
"config",
".",
"get_section_config",
"(",
"\"VMware\"",
")",
".",
"get",
"(",
"\"vmrun_path\"",
")",
"if",
"not",
"vmrun_path",
":",
"if",
"sys",
".",
"platform",
".",
"startswith",
"(",
"\"win\"",
")",
":",
"vmrun_path",
"=",
"shutil",
".",
"which",
"(",
"\"vmrun\"",
")",
"if",
"vmrun_path",
"is",
"None",
":",
"# look for vmrun.exe using the VMware Workstation directory listed in the registry",
"vmrun_path",
"=",
"self",
".",
"_find_vmrun_registry",
"(",
"r\"SOFTWARE\\Wow6432Node\\VMware, Inc.\\VMware Workstation\"",
")",
"if",
"vmrun_path",
"is",
"None",
":",
"# look for vmrun.exe using the VIX directory listed in the registry",
"vmrun_path",
"=",
"self",
".",
"_find_vmrun_registry",
"(",
"r\"SOFTWARE\\Wow6432Node\\VMware, Inc.\\VMware VIX\"",
")",
"elif",
"sys",
".",
"platform",
".",
"startswith",
"(",
"\"darwin\"",
")",
":",
"vmrun_path",
"=",
"\"/Applications/VMware Fusion.app/Contents/Library/vmrun\"",
"else",
":",
"vmrun_path",
"=",
"\"vmrun\"",
"if",
"vmrun_path",
"and",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"vmrun_path",
")",
":",
"vmrun_path",
"=",
"shutil",
".",
"which",
"(",
"vmrun_path",
")",
"if",
"not",
"vmrun_path",
":",
"raise",
"VMwareError",
"(",
"\"Could not find VMware vmrun, please make sure it is installed\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"vmrun_path",
")",
":",
"raise",
"VMwareError",
"(",
"\"vmrun {} is not accessible\"",
".",
"format",
"(",
"vmrun_path",
")",
")",
"if",
"not",
"os",
".",
"access",
"(",
"vmrun_path",
",",
"os",
".",
"X_OK",
")",
":",
"raise",
"VMwareError",
"(",
"\"vmrun is not executable\"",
")",
"if",
"os",
".",
"path",
".",
"basename",
"(",
"vmrun_path",
")",
".",
"lower",
"(",
")",
"not",
"in",
"[",
"\"vmrun\"",
",",
"\"vmrun.exe\"",
"]",
":",
"raise",
"VMwareError",
"(",
"\"Invalid vmrun executable name {}\"",
".",
"format",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"vmrun_path",
")",
")",
")",
"self",
".",
"_vmrun_path",
"=",
"vmrun_path",
"return",
"vmrun_path"
] | 46.027027 | 25.540541 |
def loadsItem(self, data, contentType=None, version=None):
'''
[OPTIONAL] Identical to :meth:`loadItem`, except the serialized
form is provided as a string representation in `data` instead of
as a stream. The default implementation just wraps
:meth:`loadItem`.
'''
buf = six.StringIO(data)
return self.loadItem(buf, contentType, version) | [
"def",
"loadsItem",
"(",
"self",
",",
"data",
",",
"contentType",
"=",
"None",
",",
"version",
"=",
"None",
")",
":",
"buf",
"=",
"six",
".",
"StringIO",
"(",
"data",
")",
"return",
"self",
".",
"loadItem",
"(",
"buf",
",",
"contentType",
",",
"version",
")"
] | 40.111111 | 21.666667 |
def parse_pubmed_url(pubmed_url):
"""Get PubMed ID (pmid) from PubMed URL."""
parse_result = urlparse(pubmed_url)
pattern = re.compile(r'^/pubmed/(\d+)$')
pmid = pattern.match(parse_result.path).group(1)
return pmid | [
"def",
"parse_pubmed_url",
"(",
"pubmed_url",
")",
":",
"parse_result",
"=",
"urlparse",
"(",
"pubmed_url",
")",
"pattern",
"=",
"re",
".",
"compile",
"(",
"r'^/pubmed/(\\d+)$'",
")",
"pmid",
"=",
"pattern",
".",
"match",
"(",
"parse_result",
".",
"path",
")",
".",
"group",
"(",
"1",
")",
"return",
"pmid"
] | 41.666667 | 9.166667 |
def get_center_offset(self):
""" Return x, y pair that will change world coords to screen coords
:return: int, int
"""
return (-self.view_rect.centerx + self._half_width,
-self.view_rect.centery + self._half_height) | [
"def",
"get_center_offset",
"(",
"self",
")",
":",
"return",
"(",
"-",
"self",
".",
"view_rect",
".",
"centerx",
"+",
"self",
".",
"_half_width",
",",
"-",
"self",
".",
"view_rect",
".",
"centery",
"+",
"self",
".",
"_half_height",
")"
] | 43 | 11 |
def _webdav_move_copy(self, remote_path_source, remote_path_target,
operation):
"""Copies or moves a remote file or directory
:param remote_path_source: source file or folder to copy / move
:param remote_path_target: target file to which to copy / move
:param operation: MOVE or COPY
:returns: True if the operation succeeded, False otherwise
:raises: HTTPResponseError in case an HTTP error status was returned
"""
if operation != "MOVE" and operation != "COPY":
return False
if remote_path_target[-1] == '/':
remote_path_target += os.path.basename(remote_path_source)
if not (remote_path_target[0] == '/'):
remote_path_target = '/' + remote_path_target
remote_path_source = self._normalize_path(remote_path_source)
headers = {
'Destination': self._webdav_url + parse.quote(
self._encode_string(remote_path_target))
}
return self._make_dav_request(
operation,
remote_path_source,
headers=headers
) | [
"def",
"_webdav_move_copy",
"(",
"self",
",",
"remote_path_source",
",",
"remote_path_target",
",",
"operation",
")",
":",
"if",
"operation",
"!=",
"\"MOVE\"",
"and",
"operation",
"!=",
"\"COPY\"",
":",
"return",
"False",
"if",
"remote_path_target",
"[",
"-",
"1",
"]",
"==",
"'/'",
":",
"remote_path_target",
"+=",
"os",
".",
"path",
".",
"basename",
"(",
"remote_path_source",
")",
"if",
"not",
"(",
"remote_path_target",
"[",
"0",
"]",
"==",
"'/'",
")",
":",
"remote_path_target",
"=",
"'/'",
"+",
"remote_path_target",
"remote_path_source",
"=",
"self",
".",
"_normalize_path",
"(",
"remote_path_source",
")",
"headers",
"=",
"{",
"'Destination'",
":",
"self",
".",
"_webdav_url",
"+",
"parse",
".",
"quote",
"(",
"self",
".",
"_encode_string",
"(",
"remote_path_target",
")",
")",
"}",
"return",
"self",
".",
"_make_dav_request",
"(",
"operation",
",",
"remote_path_source",
",",
"headers",
"=",
"headers",
")"
] | 35.03125 | 22.09375 |
def parse_union_member_types(lexer: Lexer) -> List[NamedTypeNode]:
"""UnionMemberTypes"""
types: List[NamedTypeNode] = []
if expect_optional_token(lexer, TokenKind.EQUALS):
# optional leading pipe
expect_optional_token(lexer, TokenKind.PIPE)
append = types.append
while True:
append(parse_named_type(lexer))
if not expect_optional_token(lexer, TokenKind.PIPE):
break
return types | [
"def",
"parse_union_member_types",
"(",
"lexer",
":",
"Lexer",
")",
"->",
"List",
"[",
"NamedTypeNode",
"]",
":",
"types",
":",
"List",
"[",
"NamedTypeNode",
"]",
"=",
"[",
"]",
"if",
"expect_optional_token",
"(",
"lexer",
",",
"TokenKind",
".",
"EQUALS",
")",
":",
"# optional leading pipe",
"expect_optional_token",
"(",
"lexer",
",",
"TokenKind",
".",
"PIPE",
")",
"append",
"=",
"types",
".",
"append",
"while",
"True",
":",
"append",
"(",
"parse_named_type",
"(",
"lexer",
")",
")",
"if",
"not",
"expect_optional_token",
"(",
"lexer",
",",
"TokenKind",
".",
"PIPE",
")",
":",
"break",
"return",
"types"
] | 38 | 14 |
def list_guests(self, host_id, tags=None, cpus=None, memory=None, hostname=None,
domain=None, local_disk=None, nic_speed=None, public_ip=None,
private_ip=None, **kwargs):
"""Retrieve a list of all virtual servers on the dedicated host.
Example::
# Print out a list of instances with 4 cpu cores in the host id 12345.
for vsi in mgr.list_guests(host_id=12345, cpus=4):
print vsi['fullyQualifiedDomainName'], vsi['primaryIpAddress']
# Using a custom object-mask. Will get ONLY what is specified
object_mask = "mask[hostname,monitoringRobot[robotStatus]]"
for vsi in mgr.list_guests(mask=object_mask,cpus=4):
print vsi
:param integer host_id: the identifier of dedicated host
:param list tags: filter based on list of tags
:param integer cpus: filter based on number of CPUS
:param integer memory: filter based on amount of memory
:param string hostname: filter based on hostname
:param string domain: filter based on domain
:param string local_disk: filter based on local_disk
:param integer nic_speed: filter based on network speed (in MBPS)
:param string public_ip: filter based on public ip address
:param string private_ip: filter based on private ip address
:param dict \\*\\*kwargs: response-level options (mask, limit, etc.)
:returns: Returns a list of dictionaries representing the matching
virtual servers
"""
if 'mask' not in kwargs:
items = [
'id',
'globalIdentifier',
'hostname',
'domain',
'fullyQualifiedDomainName',
'primaryBackendIpAddress',
'primaryIpAddress',
'lastKnownPowerState.name',
'hourlyBillingFlag',
'powerState',
'maxCpu',
'maxMemory',
'datacenter',
'activeTransaction.transactionStatus[friendlyName,name]',
'status',
]
kwargs['mask'] = "mask[%s]" % ','.join(items)
_filter = utils.NestedDict(kwargs.get('filter') or {})
if tags:
_filter['guests']['tagReferences']['tag']['name'] = {
'operation': 'in',
'options': [{'name': 'data', 'value': tags}],
}
if cpus:
_filter['guests']['maxCpu'] = utils.query_filter(cpus)
if memory:
_filter['guests']['maxMemory'] = utils.query_filter(memory)
if hostname:
_filter['guests']['hostname'] = utils.query_filter(hostname)
if domain:
_filter['guests']['domain'] = utils.query_filter(domain)
if local_disk is not None:
_filter['guests']['localDiskFlag'] = (
utils.query_filter(bool(local_disk)))
if nic_speed:
_filter['guests']['networkComponents']['maxSpeed'] = (
utils.query_filter(nic_speed))
if public_ip:
_filter['guests']['primaryIpAddress'] = (
utils.query_filter(public_ip))
if private_ip:
_filter['guests']['primaryBackendIpAddress'] = (
utils.query_filter(private_ip))
kwargs['filter'] = _filter.to_dict()
kwargs['iter'] = True
return self.host.getGuests(id=host_id, **kwargs) | [
"def",
"list_guests",
"(",
"self",
",",
"host_id",
",",
"tags",
"=",
"None",
",",
"cpus",
"=",
"None",
",",
"memory",
"=",
"None",
",",
"hostname",
"=",
"None",
",",
"domain",
"=",
"None",
",",
"local_disk",
"=",
"None",
",",
"nic_speed",
"=",
"None",
",",
"public_ip",
"=",
"None",
",",
"private_ip",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"'mask'",
"not",
"in",
"kwargs",
":",
"items",
"=",
"[",
"'id'",
",",
"'globalIdentifier'",
",",
"'hostname'",
",",
"'domain'",
",",
"'fullyQualifiedDomainName'",
",",
"'primaryBackendIpAddress'",
",",
"'primaryIpAddress'",
",",
"'lastKnownPowerState.name'",
",",
"'hourlyBillingFlag'",
",",
"'powerState'",
",",
"'maxCpu'",
",",
"'maxMemory'",
",",
"'datacenter'",
",",
"'activeTransaction.transactionStatus[friendlyName,name]'",
",",
"'status'",
",",
"]",
"kwargs",
"[",
"'mask'",
"]",
"=",
"\"mask[%s]\"",
"%",
"','",
".",
"join",
"(",
"items",
")",
"_filter",
"=",
"utils",
".",
"NestedDict",
"(",
"kwargs",
".",
"get",
"(",
"'filter'",
")",
"or",
"{",
"}",
")",
"if",
"tags",
":",
"_filter",
"[",
"'guests'",
"]",
"[",
"'tagReferences'",
"]",
"[",
"'tag'",
"]",
"[",
"'name'",
"]",
"=",
"{",
"'operation'",
":",
"'in'",
",",
"'options'",
":",
"[",
"{",
"'name'",
":",
"'data'",
",",
"'value'",
":",
"tags",
"}",
"]",
",",
"}",
"if",
"cpus",
":",
"_filter",
"[",
"'guests'",
"]",
"[",
"'maxCpu'",
"]",
"=",
"utils",
".",
"query_filter",
"(",
"cpus",
")",
"if",
"memory",
":",
"_filter",
"[",
"'guests'",
"]",
"[",
"'maxMemory'",
"]",
"=",
"utils",
".",
"query_filter",
"(",
"memory",
")",
"if",
"hostname",
":",
"_filter",
"[",
"'guests'",
"]",
"[",
"'hostname'",
"]",
"=",
"utils",
".",
"query_filter",
"(",
"hostname",
")",
"if",
"domain",
":",
"_filter",
"[",
"'guests'",
"]",
"[",
"'domain'",
"]",
"=",
"utils",
".",
"query_filter",
"(",
"domain",
")",
"if",
"local_disk",
"is",
"not",
"None",
":",
"_filter",
"[",
"'guests'",
"]",
"[",
"'localDiskFlag'",
"]",
"=",
"(",
"utils",
".",
"query_filter",
"(",
"bool",
"(",
"local_disk",
")",
")",
")",
"if",
"nic_speed",
":",
"_filter",
"[",
"'guests'",
"]",
"[",
"'networkComponents'",
"]",
"[",
"'maxSpeed'",
"]",
"=",
"(",
"utils",
".",
"query_filter",
"(",
"nic_speed",
")",
")",
"if",
"public_ip",
":",
"_filter",
"[",
"'guests'",
"]",
"[",
"'primaryIpAddress'",
"]",
"=",
"(",
"utils",
".",
"query_filter",
"(",
"public_ip",
")",
")",
"if",
"private_ip",
":",
"_filter",
"[",
"'guests'",
"]",
"[",
"'primaryBackendIpAddress'",
"]",
"=",
"(",
"utils",
".",
"query_filter",
"(",
"private_ip",
")",
")",
"kwargs",
"[",
"'filter'",
"]",
"=",
"_filter",
".",
"to_dict",
"(",
")",
"kwargs",
"[",
"'iter'",
"]",
"=",
"True",
"return",
"self",
".",
"host",
".",
"getGuests",
"(",
"id",
"=",
"host_id",
",",
"*",
"*",
"kwargs",
")"
] | 38.3 | 22.311111 |
def variable_status(code: str,
exclude_variable: Union[set, None] = None,
jsonable_parameter: bool = True) -> tuple:
"""
Find the possible parameters and "global" variables from a python code.
This is achieved by parsing the abstract syntax tree.
Parameters
----------
code : str
Input code as string.
exclude_variable : set, None, optional
Variable to exclude.
jsonable_parameter: bool, True, optional
Consider only jsonable parameter
Returns
-------
tuple
(a set of possible parameter, a set of parameter to exclude, a dictionary of possible parameter )
A variable is a possible parameter if 1) it is not in the input exclude_variable,
2) the code contains only assignments, and 3) it is used only to bound objects.
The set of parameter to exclude is the union of the input exclude_variable and all names that looks like a global variable.
The dictionary of possible parameter {parameter name, parameter value} is available only if jsonable_parameter is True.
>>> variable_status("a=3")
({'a'}, {'a'}, {'a': 3})
>>> variable_status("a=3",jsonable_parameter=False)
({'a'}, {'a'}, {})
>>> variable_status("a += 1")
(set(), {'a'}, {})
>>> variable_status("def f(x,y=3):\\n\\t pass")
(set(), {'f'}, {})
>>> variable_status("class C(A):\\n\\t pass")
(set(), {'C'}, {})
>>> variable_status("import f")
(set(), {'f'}, {})
>>> variable_status("import f as g")
(set(), {'g'}, {})
>>> variable_status("from X import f")
(set(), {'f'}, {})
>>> variable_status("from X import f as g")
(set(), {'g'}, {})
"""
if exclude_variable is None:
exclude_variable = set()
else:
exclude_variable = copy.deepcopy(exclude_variable)
root = ast.parse(code)
store_variable_name = set()
assign_only = True
dict_parameter={}
for node in ast.iter_child_nodes(root):
if isinstance(node, ast.Assign):
for assign_node in ast.walk(node):
if isinstance(assign_node, ast.Name):
if isinstance(assign_node.ctx, ast.Store):
if jsonable_parameter is False:
store_variable_name |= {assign_node.id}
else:
exclude_variable |= {assign_node.id}
_is_literal_eval,_value=is_literal_eval(node.value)
if jsonable_parameter is True:
for assign_node in ast.iter_child_nodes(node):
if isinstance(assign_node, ast.Tuple):
i=0
for assign_tuple_node in ast.iter_child_nodes(assign_node):
if isinstance(assign_tuple_node, ast.Name):
if isinstance(_value,(collections.Iterable)) and is_jsonable(_value[i]) and _is_literal_eval:
dict_parameter[assign_tuple_node.id]=_value[i]
store_variable_name |= {assign_tuple_node.id}
else:
exclude_variable |= {assign_tuple_node.id}
i += 1
else:
if isinstance(assign_node, ast.Name):
if is_jsonable(_value) and _is_literal_eval:
dict_parameter[assign_node.id]=_value
store_variable_name |= {assign_node.id}
else:
exclude_variable |= {assign_node.id}
elif isinstance(node, ast.AugAssign):
for assign_node in ast.walk(node):
if isinstance(assign_node, ast.Name):
exclude_variable |= {assign_node.id}
# class and function
elif isinstance(node, (ast.FunctionDef, ast.ClassDef)):
assign_only = False
exclude_variable |= {node.name}
# import
elif isinstance(node, ast.Import):
assign_only = False
for node1 in ast.iter_child_nodes(node):
if node1.asname is not None:
exclude_variable |= {node1.asname}
else:
exclude_variable |= {node1.name}
# import from
elif isinstance(node, ast.ImportFrom):
assign_only = False
for node1 in ast.iter_child_nodes(node):
if node1.asname is not None:
exclude_variable |= {node1.asname}
else:
exclude_variable |= {node1.name}
else:
assign_only = False
if assign_only is True:
possible_parameter = store_variable_name-exclude_variable
if jsonable_parameter is True:
dict_parameter = {k:dict_parameter[k] for k in possible_parameter}
return (possible_parameter, store_variable_name | exclude_variable, dict_parameter)
return set(), store_variable_name | exclude_variable, {} | [
"def",
"variable_status",
"(",
"code",
":",
"str",
",",
"exclude_variable",
":",
"Union",
"[",
"set",
",",
"None",
"]",
"=",
"None",
",",
"jsonable_parameter",
":",
"bool",
"=",
"True",
")",
"->",
"tuple",
":",
"if",
"exclude_variable",
"is",
"None",
":",
"exclude_variable",
"=",
"set",
"(",
")",
"else",
":",
"exclude_variable",
"=",
"copy",
".",
"deepcopy",
"(",
"exclude_variable",
")",
"root",
"=",
"ast",
".",
"parse",
"(",
"code",
")",
"store_variable_name",
"=",
"set",
"(",
")",
"assign_only",
"=",
"True",
"dict_parameter",
"=",
"{",
"}",
"for",
"node",
"in",
"ast",
".",
"iter_child_nodes",
"(",
"root",
")",
":",
"if",
"isinstance",
"(",
"node",
",",
"ast",
".",
"Assign",
")",
":",
"for",
"assign_node",
"in",
"ast",
".",
"walk",
"(",
"node",
")",
":",
"if",
"isinstance",
"(",
"assign_node",
",",
"ast",
".",
"Name",
")",
":",
"if",
"isinstance",
"(",
"assign_node",
".",
"ctx",
",",
"ast",
".",
"Store",
")",
":",
"if",
"jsonable_parameter",
"is",
"False",
":",
"store_variable_name",
"|=",
"{",
"assign_node",
".",
"id",
"}",
"else",
":",
"exclude_variable",
"|=",
"{",
"assign_node",
".",
"id",
"}",
"_is_literal_eval",
",",
"_value",
"=",
"is_literal_eval",
"(",
"node",
".",
"value",
")",
"if",
"jsonable_parameter",
"is",
"True",
":",
"for",
"assign_node",
"in",
"ast",
".",
"iter_child_nodes",
"(",
"node",
")",
":",
"if",
"isinstance",
"(",
"assign_node",
",",
"ast",
".",
"Tuple",
")",
":",
"i",
"=",
"0",
"for",
"assign_tuple_node",
"in",
"ast",
".",
"iter_child_nodes",
"(",
"assign_node",
")",
":",
"if",
"isinstance",
"(",
"assign_tuple_node",
",",
"ast",
".",
"Name",
")",
":",
"if",
"isinstance",
"(",
"_value",
",",
"(",
"collections",
".",
"Iterable",
")",
")",
"and",
"is_jsonable",
"(",
"_value",
"[",
"i",
"]",
")",
"and",
"_is_literal_eval",
":",
"dict_parameter",
"[",
"assign_tuple_node",
".",
"id",
"]",
"=",
"_value",
"[",
"i",
"]",
"store_variable_name",
"|=",
"{",
"assign_tuple_node",
".",
"id",
"}",
"else",
":",
"exclude_variable",
"|=",
"{",
"assign_tuple_node",
".",
"id",
"}",
"i",
"+=",
"1",
"else",
":",
"if",
"isinstance",
"(",
"assign_node",
",",
"ast",
".",
"Name",
")",
":",
"if",
"is_jsonable",
"(",
"_value",
")",
"and",
"_is_literal_eval",
":",
"dict_parameter",
"[",
"assign_node",
".",
"id",
"]",
"=",
"_value",
"store_variable_name",
"|=",
"{",
"assign_node",
".",
"id",
"}",
"else",
":",
"exclude_variable",
"|=",
"{",
"assign_node",
".",
"id",
"}",
"elif",
"isinstance",
"(",
"node",
",",
"ast",
".",
"AugAssign",
")",
":",
"for",
"assign_node",
"in",
"ast",
".",
"walk",
"(",
"node",
")",
":",
"if",
"isinstance",
"(",
"assign_node",
",",
"ast",
".",
"Name",
")",
":",
"exclude_variable",
"|=",
"{",
"assign_node",
".",
"id",
"}",
"# class and function",
"elif",
"isinstance",
"(",
"node",
",",
"(",
"ast",
".",
"FunctionDef",
",",
"ast",
".",
"ClassDef",
")",
")",
":",
"assign_only",
"=",
"False",
"exclude_variable",
"|=",
"{",
"node",
".",
"name",
"}",
"# import",
"elif",
"isinstance",
"(",
"node",
",",
"ast",
".",
"Import",
")",
":",
"assign_only",
"=",
"False",
"for",
"node1",
"in",
"ast",
".",
"iter_child_nodes",
"(",
"node",
")",
":",
"if",
"node1",
".",
"asname",
"is",
"not",
"None",
":",
"exclude_variable",
"|=",
"{",
"node1",
".",
"asname",
"}",
"else",
":",
"exclude_variable",
"|=",
"{",
"node1",
".",
"name",
"}",
"# import from",
"elif",
"isinstance",
"(",
"node",
",",
"ast",
".",
"ImportFrom",
")",
":",
"assign_only",
"=",
"False",
"for",
"node1",
"in",
"ast",
".",
"iter_child_nodes",
"(",
"node",
")",
":",
"if",
"node1",
".",
"asname",
"is",
"not",
"None",
":",
"exclude_variable",
"|=",
"{",
"node1",
".",
"asname",
"}",
"else",
":",
"exclude_variable",
"|=",
"{",
"node1",
".",
"name",
"}",
"else",
":",
"assign_only",
"=",
"False",
"if",
"assign_only",
"is",
"True",
":",
"possible_parameter",
"=",
"store_variable_name",
"-",
"exclude_variable",
"if",
"jsonable_parameter",
"is",
"True",
":",
"dict_parameter",
"=",
"{",
"k",
":",
"dict_parameter",
"[",
"k",
"]",
"for",
"k",
"in",
"possible_parameter",
"}",
"return",
"(",
"possible_parameter",
",",
"store_variable_name",
"|",
"exclude_variable",
",",
"dict_parameter",
")",
"return",
"set",
"(",
")",
",",
"store_variable_name",
"|",
"exclude_variable",
",",
"{",
"}"
] | 40.36 | 21.864 |
def bulk_delete(self, *filters_or_records):
"""Shortcut to bulk delete records
.. versionadded:: 2.17.0
Args:
*filters_or_records (tuple) or (Record): Either a list of Records, or a list of filters.
Notes:
Requires Swimlane 2.17+
Examples:
::
# Bulk delete records by filter
app.records.bulk_delete(
('Field_1', 'equals', value1),
('Field_2', 'equals', value2)
)
# Bulk delete by record instances
record1 = app.records.get(tracking_id='APP-1')
record2 = app.records.get(tracking_id='APP-2')
record3 = app.records.get(tracking_id='APP-3')
app.records.bulk_delete(record1, record2, record3)
Returns:
:class:`string`: Bulk Modify Job ID
"""
_type = validate_filters_or_records(filters_or_records)
data_dict = {}
# build record_id list
if _type is Record:
record_ids = []
for record in filters_or_records:
record_ids.append(record.id)
data_dict['recordIds'] = record_ids
# build filters
else:
filters = []
record_stub = record_factory(self._app)
for filter_tuples in filters_or_records:
field = record_stub.get_field(filter_tuples[0])
filters.append({
"fieldId": field.id,
"filterType": filter_tuples[1],
"value": field.get_report(filter_tuples[2])
})
data_dict['filters'] = filters
return self._swimlane.request('DELETE', "app/{0}/record/batch".format(self._app.id), json=data_dict).text | [
"def",
"bulk_delete",
"(",
"self",
",",
"*",
"filters_or_records",
")",
":",
"_type",
"=",
"validate_filters_or_records",
"(",
"filters_or_records",
")",
"data_dict",
"=",
"{",
"}",
"# build record_id list",
"if",
"_type",
"is",
"Record",
":",
"record_ids",
"=",
"[",
"]",
"for",
"record",
"in",
"filters_or_records",
":",
"record_ids",
".",
"append",
"(",
"record",
".",
"id",
")",
"data_dict",
"[",
"'recordIds'",
"]",
"=",
"record_ids",
"# build filters",
"else",
":",
"filters",
"=",
"[",
"]",
"record_stub",
"=",
"record_factory",
"(",
"self",
".",
"_app",
")",
"for",
"filter_tuples",
"in",
"filters_or_records",
":",
"field",
"=",
"record_stub",
".",
"get_field",
"(",
"filter_tuples",
"[",
"0",
"]",
")",
"filters",
".",
"append",
"(",
"{",
"\"fieldId\"",
":",
"field",
".",
"id",
",",
"\"filterType\"",
":",
"filter_tuples",
"[",
"1",
"]",
",",
"\"value\"",
":",
"field",
".",
"get_report",
"(",
"filter_tuples",
"[",
"2",
"]",
")",
"}",
")",
"data_dict",
"[",
"'filters'",
"]",
"=",
"filters",
"return",
"self",
".",
"_swimlane",
".",
"request",
"(",
"'DELETE'",
",",
"\"app/{0}/record/batch\"",
".",
"format",
"(",
"self",
".",
"_app",
".",
"id",
")",
",",
"json",
"=",
"data_dict",
")",
".",
"text"
] | 33.709091 | 20.018182 |
def to_dict(self):
"""Convert instance to a serializable mapping."""
config = {}
for attr in dir(self):
if not attr.startswith('_'):
value = getattr(self, attr)
if not hasattr(value, '__call__'):
config[attr] = value
return config | [
"def",
"to_dict",
"(",
"self",
")",
":",
"config",
"=",
"{",
"}",
"for",
"attr",
"in",
"dir",
"(",
"self",
")",
":",
"if",
"not",
"attr",
".",
"startswith",
"(",
"'_'",
")",
":",
"value",
"=",
"getattr",
"(",
"self",
",",
"attr",
")",
"if",
"not",
"hasattr",
"(",
"value",
",",
"'__call__'",
")",
":",
"config",
"[",
"attr",
"]",
"=",
"value",
"return",
"config"
] | 35.333333 | 9.444444 |
def compute_hash(func, string):
"""compute hash of string using given hash function"""
h = func()
h.update(string)
return h.hexdigest() | [
"def",
"compute_hash",
"(",
"func",
",",
"string",
")",
":",
"h",
"=",
"func",
"(",
")",
"h",
".",
"update",
"(",
"string",
")",
"return",
"h",
".",
"hexdigest",
"(",
")"
] | 29.4 | 14.2 |
def update_entitlement(owner, repo, identifier, name, token, show_tokens):
"""Update an entitlement in a repository."""
client = get_entitlements_api()
data = {}
if name is not None:
data["name"] = name
if token is not None:
data["token"] = token
with catch_raise_api_exception():
data, _, headers = client.entitlements_partial_update_with_http_info(
owner=owner,
repo=repo,
identifier=identifier,
data=data,
show_tokens=show_tokens,
)
ratelimits.maybe_rate_limit(client, headers)
return data.to_dict() | [
"def",
"update_entitlement",
"(",
"owner",
",",
"repo",
",",
"identifier",
",",
"name",
",",
"token",
",",
"show_tokens",
")",
":",
"client",
"=",
"get_entitlements_api",
"(",
")",
"data",
"=",
"{",
"}",
"if",
"name",
"is",
"not",
"None",
":",
"data",
"[",
"\"name\"",
"]",
"=",
"name",
"if",
"token",
"is",
"not",
"None",
":",
"data",
"[",
"\"token\"",
"]",
"=",
"token",
"with",
"catch_raise_api_exception",
"(",
")",
":",
"data",
",",
"_",
",",
"headers",
"=",
"client",
".",
"entitlements_partial_update_with_http_info",
"(",
"owner",
"=",
"owner",
",",
"repo",
"=",
"repo",
",",
"identifier",
"=",
"identifier",
",",
"data",
"=",
"data",
",",
"show_tokens",
"=",
"show_tokens",
",",
")",
"ratelimits",
".",
"maybe_rate_limit",
"(",
"client",
",",
"headers",
")",
"return",
"data",
".",
"to_dict",
"(",
")"
] | 27.681818 | 19.863636 |
def build_mv_grid_district(self, poly_id, subst_id, grid_district_geo_data,
station_geo_data):
"""Initiates single MV grid_district including station and grid
Parameters
----------
poly_id: int
ID of grid_district according to database table. Also used as ID for created grid #TODO: check type
subst_id: int
ID of station according to database table #TODO: check type
grid_district_geo_data: :shapely:`Shapely Polygon object<polygons>`
Polygon of grid district
station_geo_data: :shapely:`Shapely Point object<points>`
Point of station
Returns
-------
:shapely:`Shapely Polygon object<polygons>`
Description of return #TODO: check
"""
mv_station = MVStationDing0(id_db=subst_id, geo_data=station_geo_data)
mv_grid = MVGridDing0(network=self,
id_db=poly_id,
station=mv_station)
mv_grid_district = MVGridDistrictDing0(id_db=poly_id,
mv_grid=mv_grid,
geo_data=grid_district_geo_data)
mv_grid.grid_district = mv_grid_district
mv_station.grid = mv_grid
self.add_mv_grid_district(mv_grid_district)
return mv_grid_district | [
"def",
"build_mv_grid_district",
"(",
"self",
",",
"poly_id",
",",
"subst_id",
",",
"grid_district_geo_data",
",",
"station_geo_data",
")",
":",
"mv_station",
"=",
"MVStationDing0",
"(",
"id_db",
"=",
"subst_id",
",",
"geo_data",
"=",
"station_geo_data",
")",
"mv_grid",
"=",
"MVGridDing0",
"(",
"network",
"=",
"self",
",",
"id_db",
"=",
"poly_id",
",",
"station",
"=",
"mv_station",
")",
"mv_grid_district",
"=",
"MVGridDistrictDing0",
"(",
"id_db",
"=",
"poly_id",
",",
"mv_grid",
"=",
"mv_grid",
",",
"geo_data",
"=",
"grid_district_geo_data",
")",
"mv_grid",
".",
"grid_district",
"=",
"mv_grid_district",
"mv_station",
".",
"grid",
"=",
"mv_grid",
"self",
".",
"add_mv_grid_district",
"(",
"mv_grid_district",
")",
"return",
"mv_grid_district"
] | 38.361111 | 22.361111 |
def release_address(self, address, vpnid):
"""Release a specific lease, called after delete_client_entry"""
query = address + "?action=releaseAddress&vpnId=" + vpnid
request_url = self._build_url(['Lease', query])
return self._do_request('DELETE', request_url) | [
"def",
"release_address",
"(",
"self",
",",
"address",
",",
"vpnid",
")",
":",
"query",
"=",
"address",
"+",
"\"?action=releaseAddress&vpnId=\"",
"+",
"vpnid",
"request_url",
"=",
"self",
".",
"_build_url",
"(",
"[",
"'Lease'",
",",
"query",
"]",
")",
"return",
"self",
".",
"_do_request",
"(",
"'DELETE'",
",",
"request_url",
")"
] | 57.6 | 11.2 |
def printdata(self) -> None:
""" Prints data to stdout """
np.set_printoptions(threshold=np.nan)
print(self.data)
np.set_printoptions(threshold=1000) | [
"def",
"printdata",
"(",
"self",
")",
"->",
"None",
":",
"np",
".",
"set_printoptions",
"(",
"threshold",
"=",
"np",
".",
"nan",
")",
"print",
"(",
"self",
".",
"data",
")",
"np",
".",
"set_printoptions",
"(",
"threshold",
"=",
"1000",
")"
] | 35.4 | 7.2 |
def copy(self):
"""
Deepcopy the parameter (with a new uniqueid). All other tags will remain
the same... so some other tag should be changed before attaching back to
a ParameterSet or Bundle.
:return: the copied :class:`Parameter` object
"""
s = self.to_json()
cpy = parameter_from_json(s)
# TODO: may need to subclass for Parameters that require bundle by using this line instead:
# cpy = parameter_from_json(s, bundle=self._bundle)
cpy.set_uniqueid(_uniqueid())
return cpy | [
"def",
"copy",
"(",
"self",
")",
":",
"s",
"=",
"self",
".",
"to_json",
"(",
")",
"cpy",
"=",
"parameter_from_json",
"(",
"s",
")",
"# TODO: may need to subclass for Parameters that require bundle by using this line instead:",
"# cpy = parameter_from_json(s, bundle=self._bundle)",
"cpy",
".",
"set_uniqueid",
"(",
"_uniqueid",
"(",
")",
")",
"return",
"cpy"
] | 39.928571 | 20.5 |
def read(cls, iprot):
'''
Read a new object from the given input protocol and return the object.
:type iprot: thryft.protocol._input_protocol._InputProtocol
:rtype: pastpy.gen.database.impl.online.online_database_objects_list_item.OnlineDatabaseObjectsListItem
'''
init_kwds = {}
iprot.read_struct_begin()
while True:
ifield_name, ifield_type, _ifield_id = iprot.read_field_begin()
if ifield_type == 0: # STOP
break
elif ifield_name == 'detail_href':
init_kwds['detail_href'] = iprot.read_string()
elif ifield_name == 'record_type':
init_kwds['record_type'] = iprot.read_string()
elif ifield_name == 'title':
init_kwds['title'] = iprot.read_string()
elif ifield_name == 'thumbnail_url':
try:
init_kwds['thumbnail_url'] = iprot.read_string()
except (TypeError, ValueError,):
pass
iprot.read_field_end()
iprot.read_struct_end()
return cls(**init_kwds) | [
"def",
"read",
"(",
"cls",
",",
"iprot",
")",
":",
"init_kwds",
"=",
"{",
"}",
"iprot",
".",
"read_struct_begin",
"(",
")",
"while",
"True",
":",
"ifield_name",
",",
"ifield_type",
",",
"_ifield_id",
"=",
"iprot",
".",
"read_field_begin",
"(",
")",
"if",
"ifield_type",
"==",
"0",
":",
"# STOP",
"break",
"elif",
"ifield_name",
"==",
"'detail_href'",
":",
"init_kwds",
"[",
"'detail_href'",
"]",
"=",
"iprot",
".",
"read_string",
"(",
")",
"elif",
"ifield_name",
"==",
"'record_type'",
":",
"init_kwds",
"[",
"'record_type'",
"]",
"=",
"iprot",
".",
"read_string",
"(",
")",
"elif",
"ifield_name",
"==",
"'title'",
":",
"init_kwds",
"[",
"'title'",
"]",
"=",
"iprot",
".",
"read_string",
"(",
")",
"elif",
"ifield_name",
"==",
"'thumbnail_url'",
":",
"try",
":",
"init_kwds",
"[",
"'thumbnail_url'",
"]",
"=",
"iprot",
".",
"read_string",
"(",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
",",
")",
":",
"pass",
"iprot",
".",
"read_field_end",
"(",
")",
"iprot",
".",
"read_struct_end",
"(",
")",
"return",
"cls",
"(",
"*",
"*",
"init_kwds",
")"
] | 37.5 | 21.633333 |
def stop_supporting_containers(get_container_name, extra_containers):
"""
Stop postgres and solr containers, along with any specified extra containers
"""
docker.remove_container(get_container_name('postgres'))
docker.remove_container(get_container_name('solr'))
for container in extra_containers:
docker.remove_container(get_container_name(container)) | [
"def",
"stop_supporting_containers",
"(",
"get_container_name",
",",
"extra_containers",
")",
":",
"docker",
".",
"remove_container",
"(",
"get_container_name",
"(",
"'postgres'",
")",
")",
"docker",
".",
"remove_container",
"(",
"get_container_name",
"(",
"'solr'",
")",
")",
"for",
"container",
"in",
"extra_containers",
":",
"docker",
".",
"remove_container",
"(",
"get_container_name",
"(",
"container",
")",
")"
] | 47.125 | 15.875 |
def ask(self, question, default=False):
""" Ask a y/n question to the user.
"""
choices = '[%s/%s]' % ('Y' if default else 'y', 'n' if default else 'N')
while True:
response = raw_input('%s %s' % (question, choices)).strip()
if not response:
return default
elif response in 'yYoO':
return True
elif response in 'nN':
return False | [
"def",
"ask",
"(",
"self",
",",
"question",
",",
"default",
"=",
"False",
")",
":",
"choices",
"=",
"'[%s/%s]'",
"%",
"(",
"'Y'",
"if",
"default",
"else",
"'y'",
",",
"'n'",
"if",
"default",
"else",
"'N'",
")",
"while",
"True",
":",
"response",
"=",
"raw_input",
"(",
"'%s %s'",
"%",
"(",
"question",
",",
"choices",
")",
")",
".",
"strip",
"(",
")",
"if",
"not",
"response",
":",
"return",
"default",
"elif",
"response",
"in",
"'yYoO'",
":",
"return",
"True",
"elif",
"response",
"in",
"'nN'",
":",
"return",
"False"
] | 37.166667 | 12.5 |
def do_POST(self):
"""Handles the HTTP POST request.
Attempts to interpret all HTTP POST requests as XML-RPC calls,
which are forwarded to the server's _dispatch method for handling.
"""
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
try:
# Get arguments by reading body of request.
# We read this in chunks to avoid straining
# socket.read(); around the 10 or 15Mb mark, some platforms
# begin to have problems (bug #792570).
max_chunk_size = 10 * 1024 * 1024
size_remaining = int(self.headers["content-length"])
L = []
while size_remaining:
chunk_size = min(size_remaining, max_chunk_size)
L.append(self.rfile.read(chunk_size))
size_remaining -= len(L[-1])
data = ''.join(L)
# In previous versions of SimpleXMLRPCServer, _dispatch
# could be overridden in this class, instead of in
# SimpleXMLRPCDispatcher. To maintain backwards compatibility,
# check to see if a subclass implements _dispatch and dispatch
# using that method if present.
response = self.server._marshaled_dispatch(
data, getattr(self, '_dispatch', None)
)
except: # This should only happen if the module is buggy
# internal error, report as HTTP server error
self.send_response(500)
self.end_headers()
else:
# got a valid XML RPC response
self.send_response(200)
self.send_header("Content-type", "text/xml")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
# shut down the connection
self.wfile.flush()
self.connection.shutdown(1) | [
"def",
"do_POST",
"(",
"self",
")",
":",
"# Check that the path is legal",
"if",
"not",
"self",
".",
"is_rpc_path_valid",
"(",
")",
":",
"self",
".",
"report_404",
"(",
")",
"return",
"try",
":",
"# Get arguments by reading body of request.",
"# We read this in chunks to avoid straining",
"# socket.read(); around the 10 or 15Mb mark, some platforms",
"# begin to have problems (bug #792570).",
"max_chunk_size",
"=",
"10",
"*",
"1024",
"*",
"1024",
"size_remaining",
"=",
"int",
"(",
"self",
".",
"headers",
"[",
"\"content-length\"",
"]",
")",
"L",
"=",
"[",
"]",
"while",
"size_remaining",
":",
"chunk_size",
"=",
"min",
"(",
"size_remaining",
",",
"max_chunk_size",
")",
"L",
".",
"append",
"(",
"self",
".",
"rfile",
".",
"read",
"(",
"chunk_size",
")",
")",
"size_remaining",
"-=",
"len",
"(",
"L",
"[",
"-",
"1",
"]",
")",
"data",
"=",
"''",
".",
"join",
"(",
"L",
")",
"# In previous versions of SimpleXMLRPCServer, _dispatch",
"# could be overridden in this class, instead of in",
"# SimpleXMLRPCDispatcher. To maintain backwards compatibility,",
"# check to see if a subclass implements _dispatch and dispatch",
"# using that method if present.",
"response",
"=",
"self",
".",
"server",
".",
"_marshaled_dispatch",
"(",
"data",
",",
"getattr",
"(",
"self",
",",
"'_dispatch'",
",",
"None",
")",
")",
"except",
":",
"# This should only happen if the module is buggy",
"# internal error, report as HTTP server error",
"self",
".",
"send_response",
"(",
"500",
")",
"self",
".",
"end_headers",
"(",
")",
"else",
":",
"# got a valid XML RPC response",
"self",
".",
"send_response",
"(",
"200",
")",
"self",
".",
"send_header",
"(",
"\"Content-type\"",
",",
"\"text/xml\"",
")",
"self",
".",
"send_header",
"(",
"\"Content-length\"",
",",
"str",
"(",
"len",
"(",
"response",
")",
")",
")",
"self",
".",
"end_headers",
"(",
")",
"self",
".",
"wfile",
".",
"write",
"(",
"response",
")",
"# shut down the connection",
"self",
".",
"wfile",
".",
"flush",
"(",
")",
"self",
".",
"connection",
".",
"shutdown",
"(",
"1",
")"
] | 39.918367 | 17.632653 |
def load_and_save(path, output_path=None, use_nep8=True):
"""
Call `load_and_save` to load a Python file to be compiled to the .avm format and save the result.
By default, the resultant .avm file is saved along side the source file.
:param path: The path of the Python file to compile
:param output_path: Optional path to save the compiled `.avm` file
:return: the instance of the compiler
The following returns the compiler object for inspection
.. code-block:: python
from boa.compiler import Compiler
Compiler.load_and_save('path/to/your/file.py')
"""
compiler = Compiler.load(os.path.abspath(path), use_nep8=use_nep8)
data = compiler.write()
if output_path is None:
fullpath = os.path.realpath(path)
path, filename = os.path.split(fullpath)
newfilename = filename.replace('.py', '.avm')
output_path = '%s/%s' % (path, newfilename)
Compiler.write_file(data, output_path)
compiler.entry_module.export_debug(output_path)
return data | [
"def",
"load_and_save",
"(",
"path",
",",
"output_path",
"=",
"None",
",",
"use_nep8",
"=",
"True",
")",
":",
"compiler",
"=",
"Compiler",
".",
"load",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"path",
")",
",",
"use_nep8",
"=",
"use_nep8",
")",
"data",
"=",
"compiler",
".",
"write",
"(",
")",
"if",
"output_path",
"is",
"None",
":",
"fullpath",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"path",
")",
"path",
",",
"filename",
"=",
"os",
".",
"path",
".",
"split",
"(",
"fullpath",
")",
"newfilename",
"=",
"filename",
".",
"replace",
"(",
"'.py'",
",",
"'.avm'",
")",
"output_path",
"=",
"'%s/%s'",
"%",
"(",
"path",
",",
"newfilename",
")",
"Compiler",
".",
"write_file",
"(",
"data",
",",
"output_path",
")",
"compiler",
".",
"entry_module",
".",
"export_debug",
"(",
"output_path",
")",
"return",
"data"
] | 36.8 | 23.333333 |
def spin1_a(self):
"""Returns the dimensionless spin magnitude of mass 1."""
return coordinates.cartesian_to_spherical_rho(
self.spin1x, self.spin1y, self.spin1z) | [
"def",
"spin1_a",
"(",
"self",
")",
":",
"return",
"coordinates",
".",
"cartesian_to_spherical_rho",
"(",
"self",
".",
"spin1x",
",",
"self",
".",
"spin1y",
",",
"self",
".",
"spin1z",
")"
] | 52.75 | 17.5 |
def init_command_set(self, scrapyd_url):
"""
Initialize command set by scrapyd_url,each element is a list such as ['command','supported http method type']
"""
if scrapyd_url[-1:] != '/':
scrapyd_url = scrapyd_url + '/'
self['daemonstatus'] = [scrapyd_url + 'daemonstatus.json', http_utils.METHOD_GET]
self['addversion'] = [scrapyd_url + 'addversion.json', http_utils.METHOD_POST]
self['schedule'] = [scrapyd_url + 'schedule.json', http_utils.METHOD_POST]
self['cancel'] = [scrapyd_url + 'cancel.json', http_utils.METHOD_POST]
self['listprojects'] = [scrapyd_url + 'listprojects.json', http_utils.METHOD_GET]
self['listversions'] = [scrapyd_url + 'listversions.json', http_utils.METHOD_GET]
self['listspiders'] = [scrapyd_url + 'listspiders.json', http_utils.METHOD_GET]
self['listjobs'] = [scrapyd_url + 'listjobs.json', http_utils.METHOD_GET]
self['delversion'] = [scrapyd_url + 'delversion.json', http_utils.METHOD_POST]
self['delproject'] = [scrapyd_url + 'delproject.json', http_utils.METHOD_POST]
self['logs'] = [scrapyd_url + 'logs/', http_utils.METHOD_GET] | [
"def",
"init_command_set",
"(",
"self",
",",
"scrapyd_url",
")",
":",
"if",
"scrapyd_url",
"[",
"-",
"1",
":",
"]",
"!=",
"'/'",
":",
"scrapyd_url",
"=",
"scrapyd_url",
"+",
"'/'",
"self",
"[",
"'daemonstatus'",
"]",
"=",
"[",
"scrapyd_url",
"+",
"'daemonstatus.json'",
",",
"http_utils",
".",
"METHOD_GET",
"]",
"self",
"[",
"'addversion'",
"]",
"=",
"[",
"scrapyd_url",
"+",
"'addversion.json'",
",",
"http_utils",
".",
"METHOD_POST",
"]",
"self",
"[",
"'schedule'",
"]",
"=",
"[",
"scrapyd_url",
"+",
"'schedule.json'",
",",
"http_utils",
".",
"METHOD_POST",
"]",
"self",
"[",
"'cancel'",
"]",
"=",
"[",
"scrapyd_url",
"+",
"'cancel.json'",
",",
"http_utils",
".",
"METHOD_POST",
"]",
"self",
"[",
"'listprojects'",
"]",
"=",
"[",
"scrapyd_url",
"+",
"'listprojects.json'",
",",
"http_utils",
".",
"METHOD_GET",
"]",
"self",
"[",
"'listversions'",
"]",
"=",
"[",
"scrapyd_url",
"+",
"'listversions.json'",
",",
"http_utils",
".",
"METHOD_GET",
"]",
"self",
"[",
"'listspiders'",
"]",
"=",
"[",
"scrapyd_url",
"+",
"'listspiders.json'",
",",
"http_utils",
".",
"METHOD_GET",
"]",
"self",
"[",
"'listjobs'",
"]",
"=",
"[",
"scrapyd_url",
"+",
"'listjobs.json'",
",",
"http_utils",
".",
"METHOD_GET",
"]",
"self",
"[",
"'delversion'",
"]",
"=",
"[",
"scrapyd_url",
"+",
"'delversion.json'",
",",
"http_utils",
".",
"METHOD_POST",
"]",
"self",
"[",
"'delproject'",
"]",
"=",
"[",
"scrapyd_url",
"+",
"'delproject.json'",
",",
"http_utils",
".",
"METHOD_POST",
"]",
"self",
"[",
"'logs'",
"]",
"=",
"[",
"scrapyd_url",
"+",
"'logs/'",
",",
"http_utils",
".",
"METHOD_GET",
"]"
] | 65.555556 | 33.777778 |
def sample(self, batch_size, batch_idxs=None):
"""Return a randomized batch of experiences
# Argument
batch_size (int): Size of the all batch
batch_idxs (int): Indexes to extract
# Returns
A list of experiences randomly selected
"""
# It is not possible to tell whether the first state in the memory is terminal, because it
# would require access to the "terminal" flag associated to the previous state. As a result
# we will never return this first state (only using `self.terminals[0]` to know whether the
# second state is terminal).
# In addition we need enough entries to fill the desired window length.
assert self.nb_entries >= self.window_length + 2, 'not enough entries in the memory'
if batch_idxs is None:
# Draw random indexes such that we have enough entries before each index to fill the
# desired window length.
batch_idxs = sample_batch_indexes(
self.window_length, self.nb_entries - 1, size=batch_size)
batch_idxs = np.array(batch_idxs) + 1
assert np.min(batch_idxs) >= self.window_length + 1
assert np.max(batch_idxs) < self.nb_entries
assert len(batch_idxs) == batch_size
# Create experiences
experiences = []
for idx in batch_idxs:
terminal0 = self.terminals[idx - 2]
while terminal0:
# Skip this transition because the environment was reset here. Select a new, random
# transition and use this instead. This may cause the batch to contain the same
# transition twice.
idx = sample_batch_indexes(self.window_length + 1, self.nb_entries, size=1)[0]
terminal0 = self.terminals[idx - 2]
assert self.window_length + 1 <= idx < self.nb_entries
# This code is slightly complicated by the fact that subsequent observations might be
# from different episodes. We ensure that an experience never spans multiple episodes.
# This is probably not that important in practice but it seems cleaner.
state0 = [self.observations[idx - 1]]
for offset in range(0, self.window_length - 1):
current_idx = idx - 2 - offset
assert current_idx >= 1
current_terminal = self.terminals[current_idx - 1]
if current_terminal and not self.ignore_episode_boundaries:
# The previously handled observation was terminal, don't add the current one.
# Otherwise we would leak into a different episode.
break
state0.insert(0, self.observations[current_idx])
while len(state0) < self.window_length:
state0.insert(0, zeroed_observation(state0[0]))
action = self.actions[idx - 1]
reward = self.rewards[idx - 1]
terminal1 = self.terminals[idx - 1]
# Okay, now we need to create the follow-up state. This is state0 shifted on timestep
# to the right. Again, we need to be careful to not include an observation from the next
# episode if the last state is terminal.
state1 = [np.copy(x) for x in state0[1:]]
state1.append(self.observations[idx])
assert len(state0) == self.window_length
assert len(state1) == len(state0)
experiences.append(Experience(state0=state0, action=action, reward=reward,
state1=state1, terminal1=terminal1))
assert len(experiences) == batch_size
return experiences | [
"def",
"sample",
"(",
"self",
",",
"batch_size",
",",
"batch_idxs",
"=",
"None",
")",
":",
"# It is not possible to tell whether the first state in the memory is terminal, because it",
"# would require access to the \"terminal\" flag associated to the previous state. As a result",
"# we will never return this first state (only using `self.terminals[0]` to know whether the",
"# second state is terminal).",
"# In addition we need enough entries to fill the desired window length.",
"assert",
"self",
".",
"nb_entries",
">=",
"self",
".",
"window_length",
"+",
"2",
",",
"'not enough entries in the memory'",
"if",
"batch_idxs",
"is",
"None",
":",
"# Draw random indexes such that we have enough entries before each index to fill the",
"# desired window length.",
"batch_idxs",
"=",
"sample_batch_indexes",
"(",
"self",
".",
"window_length",
",",
"self",
".",
"nb_entries",
"-",
"1",
",",
"size",
"=",
"batch_size",
")",
"batch_idxs",
"=",
"np",
".",
"array",
"(",
"batch_idxs",
")",
"+",
"1",
"assert",
"np",
".",
"min",
"(",
"batch_idxs",
")",
">=",
"self",
".",
"window_length",
"+",
"1",
"assert",
"np",
".",
"max",
"(",
"batch_idxs",
")",
"<",
"self",
".",
"nb_entries",
"assert",
"len",
"(",
"batch_idxs",
")",
"==",
"batch_size",
"# Create experiences",
"experiences",
"=",
"[",
"]",
"for",
"idx",
"in",
"batch_idxs",
":",
"terminal0",
"=",
"self",
".",
"terminals",
"[",
"idx",
"-",
"2",
"]",
"while",
"terminal0",
":",
"# Skip this transition because the environment was reset here. Select a new, random",
"# transition and use this instead. This may cause the batch to contain the same",
"# transition twice.",
"idx",
"=",
"sample_batch_indexes",
"(",
"self",
".",
"window_length",
"+",
"1",
",",
"self",
".",
"nb_entries",
",",
"size",
"=",
"1",
")",
"[",
"0",
"]",
"terminal0",
"=",
"self",
".",
"terminals",
"[",
"idx",
"-",
"2",
"]",
"assert",
"self",
".",
"window_length",
"+",
"1",
"<=",
"idx",
"<",
"self",
".",
"nb_entries",
"# This code is slightly complicated by the fact that subsequent observations might be",
"# from different episodes. We ensure that an experience never spans multiple episodes.",
"# This is probably not that important in practice but it seems cleaner.",
"state0",
"=",
"[",
"self",
".",
"observations",
"[",
"idx",
"-",
"1",
"]",
"]",
"for",
"offset",
"in",
"range",
"(",
"0",
",",
"self",
".",
"window_length",
"-",
"1",
")",
":",
"current_idx",
"=",
"idx",
"-",
"2",
"-",
"offset",
"assert",
"current_idx",
">=",
"1",
"current_terminal",
"=",
"self",
".",
"terminals",
"[",
"current_idx",
"-",
"1",
"]",
"if",
"current_terminal",
"and",
"not",
"self",
".",
"ignore_episode_boundaries",
":",
"# The previously handled observation was terminal, don't add the current one.",
"# Otherwise we would leak into a different episode.",
"break",
"state0",
".",
"insert",
"(",
"0",
",",
"self",
".",
"observations",
"[",
"current_idx",
"]",
")",
"while",
"len",
"(",
"state0",
")",
"<",
"self",
".",
"window_length",
":",
"state0",
".",
"insert",
"(",
"0",
",",
"zeroed_observation",
"(",
"state0",
"[",
"0",
"]",
")",
")",
"action",
"=",
"self",
".",
"actions",
"[",
"idx",
"-",
"1",
"]",
"reward",
"=",
"self",
".",
"rewards",
"[",
"idx",
"-",
"1",
"]",
"terminal1",
"=",
"self",
".",
"terminals",
"[",
"idx",
"-",
"1",
"]",
"# Okay, now we need to create the follow-up state. This is state0 shifted on timestep",
"# to the right. Again, we need to be careful to not include an observation from the next",
"# episode if the last state is terminal.",
"state1",
"=",
"[",
"np",
".",
"copy",
"(",
"x",
")",
"for",
"x",
"in",
"state0",
"[",
"1",
":",
"]",
"]",
"state1",
".",
"append",
"(",
"self",
".",
"observations",
"[",
"idx",
"]",
")",
"assert",
"len",
"(",
"state0",
")",
"==",
"self",
".",
"window_length",
"assert",
"len",
"(",
"state1",
")",
"==",
"len",
"(",
"state0",
")",
"experiences",
".",
"append",
"(",
"Experience",
"(",
"state0",
"=",
"state0",
",",
"action",
"=",
"action",
",",
"reward",
"=",
"reward",
",",
"state1",
"=",
"state1",
",",
"terminal1",
"=",
"terminal1",
")",
")",
"assert",
"len",
"(",
"experiences",
")",
"==",
"batch_size",
"return",
"experiences"
] | 53.188406 | 24.695652 |
def _unpack_truisms(self, c):
"""
Given a constraint, _unpack_truisms() returns a set of constraints that must be True
this constraint to be True.
"""
try:
op = getattr(self, '_unpack_truisms_'+c.op)
except AttributeError:
return set()
return op(c) | [
"def",
"_unpack_truisms",
"(",
"self",
",",
"c",
")",
":",
"try",
":",
"op",
"=",
"getattr",
"(",
"self",
",",
"'_unpack_truisms_'",
"+",
"c",
".",
"op",
")",
"except",
"AttributeError",
":",
"return",
"set",
"(",
")",
"return",
"op",
"(",
"c",
")"
] | 29 | 17.909091 |
def get_control_connection_host(self):
"""
Returns the control connection host metadata.
"""
connection = self.control_connection._connection
endpoint = connection.endpoint if connection else None
return self.metadata.get_host(endpoint) if endpoint else None | [
"def",
"get_control_connection_host",
"(",
"self",
")",
":",
"connection",
"=",
"self",
".",
"control_connection",
".",
"_connection",
"endpoint",
"=",
"connection",
".",
"endpoint",
"if",
"connection",
"else",
"None",
"return",
"self",
".",
"metadata",
".",
"get_host",
"(",
"endpoint",
")",
"if",
"endpoint",
"else",
"None"
] | 42.857143 | 11.714286 |
def Get(self):
"""Return a GrrMessage instance from the transaction log or None."""
try:
value, reg_type = winreg.QueryValueEx(_GetServiceKey(), "Transaction")
except OSError:
return
if reg_type != winreg.REG_BINARY:
return
try:
return rdf_flows.GrrMessage.FromSerializedString(value)
except message.Error:
return | [
"def",
"Get",
"(",
"self",
")",
":",
"try",
":",
"value",
",",
"reg_type",
"=",
"winreg",
".",
"QueryValueEx",
"(",
"_GetServiceKey",
"(",
")",
",",
"\"Transaction\"",
")",
"except",
"OSError",
":",
"return",
"if",
"reg_type",
"!=",
"winreg",
".",
"REG_BINARY",
":",
"return",
"try",
":",
"return",
"rdf_flows",
".",
"GrrMessage",
".",
"FromSerializedString",
"(",
"value",
")",
"except",
"message",
".",
"Error",
":",
"return"
] | 25.428571 | 25 |
def get(self, request=None, timeout=1.0):
"""Get an NDEF message from the server. Temporarily connects
to the default SNEP server if the client is not yet connected.
.. deprecated:: 0.13
Use :meth:`get_records` or :meth:`get_octets`.
"""
if request is None:
request = nfc.ndef.Message(nfc.ndef.Record())
if not isinstance(request, nfc.ndef.Message):
raise TypeError("request type must be nfc.ndef.Message")
response_data = self._get(request, timeout)
if response_data is not None:
try:
response = nfc.ndef.Message(response_data)
except Exception as error:
log.error(repr(error))
else:
return response | [
"def",
"get",
"(",
"self",
",",
"request",
"=",
"None",
",",
"timeout",
"=",
"1.0",
")",
":",
"if",
"request",
"is",
"None",
":",
"request",
"=",
"nfc",
".",
"ndef",
".",
"Message",
"(",
"nfc",
".",
"ndef",
".",
"Record",
"(",
")",
")",
"if",
"not",
"isinstance",
"(",
"request",
",",
"nfc",
".",
"ndef",
".",
"Message",
")",
":",
"raise",
"TypeError",
"(",
"\"request type must be nfc.ndef.Message\"",
")",
"response_data",
"=",
"self",
".",
"_get",
"(",
"request",
",",
"timeout",
")",
"if",
"response_data",
"is",
"not",
"None",
":",
"try",
":",
"response",
"=",
"nfc",
".",
"ndef",
".",
"Message",
"(",
"response_data",
")",
"except",
"Exception",
"as",
"error",
":",
"log",
".",
"error",
"(",
"repr",
"(",
"error",
")",
")",
"else",
":",
"return",
"response"
] | 33.304348 | 18.391304 |
def backend_monitor(backend):
"""Monitor a single IBMQ backend.
Args:
backend (IBMQBackend): Backend to monitor.
Raises:
QiskitError: Input is not a IBMQ backend.
"""
if not isinstance(backend, IBMQBackend):
raise QiskitError('Input variable is not of type IBMQBackend.')
config = backend.configuration().to_dict()
status = backend.status().to_dict()
config_dict = {**status, **config}
if not config['simulator']:
props = backend.properties().to_dict()
print(backend.name())
print('='*len(backend.name()))
print('Configuration')
print('-'*13)
offset = ' '
upper_list = ['n_qubits', 'operational',
'status_msg', 'pending_jobs',
'basis_gates', 'local', 'simulator']
lower_list = list(set(config_dict.keys()).difference(upper_list))
# Remove gates because they are in a different tab
lower_list.remove('gates')
for item in upper_list+lower_list:
print(offset+item+':', config_dict[item])
# Stop here if simulator
if config['simulator']:
return
print()
qubit_header = 'Qubits [Name / Freq / T1 / T2 / U1 err / U2 err / U3 err / Readout err]'
print(qubit_header)
print('-'*len(qubit_header))
sep = ' / '
for qub in range(len(props['qubits'])):
name = 'Q%s' % qub
qubit_data = props['qubits'][qub]
gate_data = props['gates'][3*qub:3*qub+3]
t1_info = qubit_data[0]
t2_info = qubit_data[1]
freq_info = qubit_data[2]
readout_info = qubit_data[3]
freq = str(round(freq_info['value'], 5))+' '+freq_info['unit']
T1 = str(round(t1_info['value'], # pylint: disable=invalid-name
5))+' ' + t1_info['unit']
T2 = str(round(t2_info['value'], # pylint: disable=invalid-name
5))+' ' + t2_info['unit']
# pylint: disable=invalid-name
U1 = str(round(gate_data[0]['parameters'][0]['value'], 5))
# pylint: disable=invalid-name
U2 = str(round(gate_data[1]['parameters'][0]['value'], 5))
# pylint: disable=invalid-name
U3 = str(round(gate_data[2]['parameters'][0]['value'], 5))
readout_error = str(round(readout_info['value'], 5))
qstr = sep.join([name, freq, T1, T2, U1, U2, U3, readout_error])
print(offset+qstr)
print()
multi_qubit_gates = props['gates'][3*config['n_qubits']:]
multi_header = 'Multi-Qubit Gates [Name / Type / Gate Error]'
print(multi_header)
print('-'*len(multi_header))
for gate in multi_qubit_gates:
name = gate['name']
ttype = gate['gate']
error = str(round(gate['parameters'][0]['value'], 5))
mstr = sep.join([name, ttype, error])
print(offset+mstr) | [
"def",
"backend_monitor",
"(",
"backend",
")",
":",
"if",
"not",
"isinstance",
"(",
"backend",
",",
"IBMQBackend",
")",
":",
"raise",
"QiskitError",
"(",
"'Input variable is not of type IBMQBackend.'",
")",
"config",
"=",
"backend",
".",
"configuration",
"(",
")",
".",
"to_dict",
"(",
")",
"status",
"=",
"backend",
".",
"status",
"(",
")",
".",
"to_dict",
"(",
")",
"config_dict",
"=",
"{",
"*",
"*",
"status",
",",
"*",
"*",
"config",
"}",
"if",
"not",
"config",
"[",
"'simulator'",
"]",
":",
"props",
"=",
"backend",
".",
"properties",
"(",
")",
".",
"to_dict",
"(",
")",
"print",
"(",
"backend",
".",
"name",
"(",
")",
")",
"print",
"(",
"'='",
"*",
"len",
"(",
"backend",
".",
"name",
"(",
")",
")",
")",
"print",
"(",
"'Configuration'",
")",
"print",
"(",
"'-'",
"*",
"13",
")",
"offset",
"=",
"' '",
"upper_list",
"=",
"[",
"'n_qubits'",
",",
"'operational'",
",",
"'status_msg'",
",",
"'pending_jobs'",
",",
"'basis_gates'",
",",
"'local'",
",",
"'simulator'",
"]",
"lower_list",
"=",
"list",
"(",
"set",
"(",
"config_dict",
".",
"keys",
"(",
")",
")",
".",
"difference",
"(",
"upper_list",
")",
")",
"# Remove gates because they are in a different tab",
"lower_list",
".",
"remove",
"(",
"'gates'",
")",
"for",
"item",
"in",
"upper_list",
"+",
"lower_list",
":",
"print",
"(",
"offset",
"+",
"item",
"+",
"':'",
",",
"config_dict",
"[",
"item",
"]",
")",
"# Stop here if simulator",
"if",
"config",
"[",
"'simulator'",
"]",
":",
"return",
"print",
"(",
")",
"qubit_header",
"=",
"'Qubits [Name / Freq / T1 / T2 / U1 err / U2 err / U3 err / Readout err]'",
"print",
"(",
"qubit_header",
")",
"print",
"(",
"'-'",
"*",
"len",
"(",
"qubit_header",
")",
")",
"sep",
"=",
"' / '",
"for",
"qub",
"in",
"range",
"(",
"len",
"(",
"props",
"[",
"'qubits'",
"]",
")",
")",
":",
"name",
"=",
"'Q%s'",
"%",
"qub",
"qubit_data",
"=",
"props",
"[",
"'qubits'",
"]",
"[",
"qub",
"]",
"gate_data",
"=",
"props",
"[",
"'gates'",
"]",
"[",
"3",
"*",
"qub",
":",
"3",
"*",
"qub",
"+",
"3",
"]",
"t1_info",
"=",
"qubit_data",
"[",
"0",
"]",
"t2_info",
"=",
"qubit_data",
"[",
"1",
"]",
"freq_info",
"=",
"qubit_data",
"[",
"2",
"]",
"readout_info",
"=",
"qubit_data",
"[",
"3",
"]",
"freq",
"=",
"str",
"(",
"round",
"(",
"freq_info",
"[",
"'value'",
"]",
",",
"5",
")",
")",
"+",
"' '",
"+",
"freq_info",
"[",
"'unit'",
"]",
"T1",
"=",
"str",
"(",
"round",
"(",
"t1_info",
"[",
"'value'",
"]",
",",
"# pylint: disable=invalid-name",
"5",
")",
")",
"+",
"' '",
"+",
"t1_info",
"[",
"'unit'",
"]",
"T2",
"=",
"str",
"(",
"round",
"(",
"t2_info",
"[",
"'value'",
"]",
",",
"# pylint: disable=invalid-name",
"5",
")",
")",
"+",
"' '",
"+",
"t2_info",
"[",
"'unit'",
"]",
"# pylint: disable=invalid-name",
"U1",
"=",
"str",
"(",
"round",
"(",
"gate_data",
"[",
"0",
"]",
"[",
"'parameters'",
"]",
"[",
"0",
"]",
"[",
"'value'",
"]",
",",
"5",
")",
")",
"# pylint: disable=invalid-name",
"U2",
"=",
"str",
"(",
"round",
"(",
"gate_data",
"[",
"1",
"]",
"[",
"'parameters'",
"]",
"[",
"0",
"]",
"[",
"'value'",
"]",
",",
"5",
")",
")",
"# pylint: disable=invalid-name",
"U3",
"=",
"str",
"(",
"round",
"(",
"gate_data",
"[",
"2",
"]",
"[",
"'parameters'",
"]",
"[",
"0",
"]",
"[",
"'value'",
"]",
",",
"5",
")",
")",
"readout_error",
"=",
"str",
"(",
"round",
"(",
"readout_info",
"[",
"'value'",
"]",
",",
"5",
")",
")",
"qstr",
"=",
"sep",
".",
"join",
"(",
"[",
"name",
",",
"freq",
",",
"T1",
",",
"T2",
",",
"U1",
",",
"U2",
",",
"U3",
",",
"readout_error",
"]",
")",
"print",
"(",
"offset",
"+",
"qstr",
")",
"print",
"(",
")",
"multi_qubit_gates",
"=",
"props",
"[",
"'gates'",
"]",
"[",
"3",
"*",
"config",
"[",
"'n_qubits'",
"]",
":",
"]",
"multi_header",
"=",
"'Multi-Qubit Gates [Name / Type / Gate Error]'",
"print",
"(",
"multi_header",
")",
"print",
"(",
"'-'",
"*",
"len",
"(",
"multi_header",
")",
")",
"for",
"gate",
"in",
"multi_qubit_gates",
":",
"name",
"=",
"gate",
"[",
"'name'",
"]",
"ttype",
"=",
"gate",
"[",
"'gate'",
"]",
"error",
"=",
"str",
"(",
"round",
"(",
"gate",
"[",
"'parameters'",
"]",
"[",
"0",
"]",
"[",
"'value'",
"]",
",",
"5",
")",
")",
"mstr",
"=",
"sep",
".",
"join",
"(",
"[",
"name",
",",
"ttype",
",",
"error",
"]",
")",
"print",
"(",
"offset",
"+",
"mstr",
")"
] | 34.225 | 18.325 |
def eni_absent(
name,
release_eip=False,
region=None,
key=None,
keyid=None,
profile=None):
'''
Ensure the EC2 ENI is absent.
.. versionadded:: 2016.3.0
name
Name tag associated with the ENI.
release_eip
True/False - release any EIP associated with the ENI
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
r = __salt__['boto_ec2.get_network_interface'](
name=name, region=region, key=key, keyid=keyid, profile=profile
)
if 'error' in r:
ret['result'] = False
ret['comment'] = 'Error when attempting to find eni: {0}.'.format(
r['error']['message']
)
return ret
if not r['result']:
if __opts__['test']:
ret['comment'] = 'ENI is set to be deleted.'
ret['result'] = None
return ret
else:
if __opts__['test']:
ret['comment'] = 'ENI is set to be deleted.'
if release_eip and 'allocationId' in r['result']:
ret['comment'] = ' '.join([ret['comment'], 'Allocated/associated EIP is set to be released'])
ret['result'] = None
return ret
if 'id' in r['result']['attachment']:
result_detach = __salt__['boto_ec2.detach_network_interface'](
name=name, force=True, region=region, key=key,
keyid=keyid, profile=profile
)
if 'error' in result_detach:
ret['result'] = False
ret['comment'] = 'Failed to detach ENI: {0}'.format(
result_detach['error']['message']
)
return ret
# TODO: Ensure the detach occurs before continuing
result_delete = __salt__['boto_ec2.delete_network_interface'](
name=name, region=region, key=key,
keyid=keyid, profile=profile
)
if 'error' in result_delete:
ret['result'] = False
ret['comment'] = 'Failed to delete ENI: {0}'.format(
result_delete['error']['message']
)
return ret
ret['comment'] = 'Deleted ENI {0}'.format(name)
ret['changes']['id'] = None
if release_eip and 'allocationId' in r['result']:
_ret = __salt__['boto_ec2.release_eip_address'](public_ip=None,
allocation_id=r['result']['allocationId'],
region=region,
key=key,
keyid=keyid,
profile=profile)
if not _ret:
ret['comment'] = ' '.join([ret['comment'], 'Failed to release EIP allocated to the ENI.'])
ret['result'] = False
return ret
else:
ret['comment'] = ' '.join([ret['comment'], 'EIP released.'])
ret['changes']['eip released'] = True
return ret | [
"def",
"eni_absent",
"(",
"name",
",",
"release_eip",
"=",
"False",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'result'",
":",
"True",
",",
"'comment'",
":",
"''",
",",
"'changes'",
":",
"{",
"}",
"}",
"r",
"=",
"__salt__",
"[",
"'boto_ec2.get_network_interface'",
"]",
"(",
"name",
"=",
"name",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"if",
"'error'",
"in",
"r",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"'Error when attempting to find eni: {0}.'",
".",
"format",
"(",
"r",
"[",
"'error'",
"]",
"[",
"'message'",
"]",
")",
"return",
"ret",
"if",
"not",
"r",
"[",
"'result'",
"]",
":",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'ENI is set to be deleted.'",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"return",
"ret",
"else",
":",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'ENI is set to be deleted.'",
"if",
"release_eip",
"and",
"'allocationId'",
"in",
"r",
"[",
"'result'",
"]",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"' '",
".",
"join",
"(",
"[",
"ret",
"[",
"'comment'",
"]",
",",
"'Allocated/associated EIP is set to be released'",
"]",
")",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"return",
"ret",
"if",
"'id'",
"in",
"r",
"[",
"'result'",
"]",
"[",
"'attachment'",
"]",
":",
"result_detach",
"=",
"__salt__",
"[",
"'boto_ec2.detach_network_interface'",
"]",
"(",
"name",
"=",
"name",
",",
"force",
"=",
"True",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"if",
"'error'",
"in",
"result_detach",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"'Failed to detach ENI: {0}'",
".",
"format",
"(",
"result_detach",
"[",
"'error'",
"]",
"[",
"'message'",
"]",
")",
"return",
"ret",
"# TODO: Ensure the detach occurs before continuing",
"result_delete",
"=",
"__salt__",
"[",
"'boto_ec2.delete_network_interface'",
"]",
"(",
"name",
"=",
"name",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"if",
"'error'",
"in",
"result_delete",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"'Failed to delete ENI: {0}'",
".",
"format",
"(",
"result_delete",
"[",
"'error'",
"]",
"[",
"'message'",
"]",
")",
"return",
"ret",
"ret",
"[",
"'comment'",
"]",
"=",
"'Deleted ENI {0}'",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'changes'",
"]",
"[",
"'id'",
"]",
"=",
"None",
"if",
"release_eip",
"and",
"'allocationId'",
"in",
"r",
"[",
"'result'",
"]",
":",
"_ret",
"=",
"__salt__",
"[",
"'boto_ec2.release_eip_address'",
"]",
"(",
"public_ip",
"=",
"None",
",",
"allocation_id",
"=",
"r",
"[",
"'result'",
"]",
"[",
"'allocationId'",
"]",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"if",
"not",
"_ret",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"' '",
".",
"join",
"(",
"[",
"ret",
"[",
"'comment'",
"]",
",",
"'Failed to release EIP allocated to the ENI.'",
"]",
")",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"return",
"ret",
"else",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"' '",
".",
"join",
"(",
"[",
"ret",
"[",
"'comment'",
"]",
",",
"'EIP released.'",
"]",
")",
"ret",
"[",
"'changes'",
"]",
"[",
"'eip released'",
"]",
"=",
"True",
"return",
"ret"
] | 36.195652 | 22.565217 |
def _enter_namespace(self, namespace_name):
"""
A namespace is usually an absolute file name of the grammar.
A special namespace '__base__' is used for BASETYPE namespace.
"""
if namespace_name not in self.namespaces:
self.namespaces[namespace_name] = {}
# BASETYPE namespace is imported in each namespace
# as the first namespace to be searched.
self._imported_namespaces[namespace_name] = \
[self.namespaces['__base__']]
self._namespace_stack.append(namespace_name) | [
"def",
"_enter_namespace",
"(",
"self",
",",
"namespace_name",
")",
":",
"if",
"namespace_name",
"not",
"in",
"self",
".",
"namespaces",
":",
"self",
".",
"namespaces",
"[",
"namespace_name",
"]",
"=",
"{",
"}",
"# BASETYPE namespace is imported in each namespace",
"# as the first namespace to be searched.",
"self",
".",
"_imported_namespaces",
"[",
"namespace_name",
"]",
"=",
"[",
"self",
".",
"namespaces",
"[",
"'__base__'",
"]",
"]",
"self",
".",
"_namespace_stack",
".",
"append",
"(",
"namespace_name",
")"
] | 40.571429 | 16.142857 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.