text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def _set_qsfp28(self, v, load=False):
"""
Setter method for qsfp28, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfp28 (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_qsfp28 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_qsfp28() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=qsfp28.qsfp28, is_container='container', presence=False, yang_name="qsfp28", rest_name="qsfp28", parent=self, choice=(u'interface-identifier', u'qsfp28'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """qsfp28 must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=qsfp28.qsfp28, is_container='container', presence=False, yang_name="qsfp28", rest_name="qsfp28", parent=self, choice=(u'interface-identifier', u'qsfp28'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='container', is_config=True)""",
})
self.__qsfp28 = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_qsfp28",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
... | 72.545455 | 34.590909 |
def run(self):
"""
Run the schedule
"""
self.main_task.thread.start()
self.main_task.thread.join() | [
"def",
"run",
"(",
"self",
")",
":",
"self",
".",
"main_task",
".",
"thread",
".",
"start",
"(",
")",
"self",
".",
"main_task",
".",
"thread",
".",
"join",
"(",
")"
] | 22.166667 | 8.166667 |
def parse(parser, argv=None, settings_key='settings', no_args_func=None):
"""
parser cliez app
:param argparse.ArgumentParser parser: an instance
of argparse.ArgumentParser
:param argv: argument list,default is `sys.argv`
:type argv: list or tuple
:param str settings: settings option name,
default is settings.
:param object no_args_func: a callable object.if no sub-parser matched,
parser will call it.
:return: an instance of `cliez.component.Component` or its subclass
"""
argv = argv or sys.argv
commands = command_list()
if type(argv) not in [list, tuple]:
raise TypeError("argv only can be list or tuple")
# match sub-parser
if len(argv) >= 2 and argv[1] in commands:
sub_parsers = parser.add_subparsers()
class_name = argv[1].capitalize() + 'Component'
from cliez.conf import (COMPONENT_ROOT,
LOGGING_CONFIG,
EPILOG,
GENERAL_ARGUMENTS)
sys.path.insert(0, os.path.dirname(COMPONENT_ROOT))
mod = importlib.import_module(
'{}.components.{}'.format(os.path.basename(COMPONENT_ROOT),
argv[1]))
# dynamic load component
klass = getattr(mod, class_name)
sub_parser = append_arguments(klass, sub_parsers, EPILOG,
GENERAL_ARGUMENTS)
options = parser.parse_args(argv[1:])
settings = Settings.bind(
getattr(options, settings_key)
) if settings_key and hasattr(options, settings_key) else None
obj = klass(parser, sub_parser, options, settings)
# init logger
logger_level = logging.CRITICAL
if hasattr(options, 'verbose'):
if options.verbose == 1:
logger_level = logging.ERROR
elif options.verbose == 2:
logger_level = logging.WARNING
elif options.verbose == 3:
logger_level = logging.INFO
obj.logger.setLevel(logging.INFO)
pass
if hasattr(options, 'debug') and options.debug:
logger_level = logging.DEBUG
# http lib use a strange way to logging
try:
import http.client as http_client
http_client.HTTPConnection.debuglevel = 1
except Exception:
# do nothing
pass
pass
loggers = LOGGING_CONFIG['loggers']
for k, v in loggers.items():
v.setdefault('level', logger_level)
if logger_level in [logging.INFO, logging.DEBUG]:
v['handlers'] = ['stdout']
pass
logging_config.dictConfig(LOGGING_CONFIG)
# this may not necessary
# obj.logger.setLevel(logger_level)
obj.run(options)
# return object to make unit test easy
return obj
# print all sub commands when user set.
if not parser.description and len(commands):
sub_parsers = parser.add_subparsers()
[sub_parsers.add_parser(v) for v in commands]
pass
pass
options = parser.parse_args(argv[1:])
if no_args_func and callable(no_args_func):
return no_args_func(options)
else:
parser._print_message("nothing to do...\n")
pass | [
"def",
"parse",
"(",
"parser",
",",
"argv",
"=",
"None",
",",
"settings_key",
"=",
"'settings'",
",",
"no_args_func",
"=",
"None",
")",
":",
"argv",
"=",
"argv",
"or",
"sys",
".",
"argv",
"commands",
"=",
"command_list",
"(",
")",
"if",
"type",
"(",
... | 32.15534 | 17.398058 |
def erank(self):
""" Effective rank of the TT-vector """
r = self.r
n = self.n
d = self.d
if d <= 1:
er = 0e0
else:
sz = _np.dot(n * r[0:d], r[1:])
if sz == 0:
er = 0e0
else:
b = r[0] * n[0] + n[d - 1] * r[d]
if d is 2:
er = sz * 1.0 / b
else:
a = _np.sum(n[1:d - 1])
er = (_np.sqrt(b * b + 4 * a * sz) - b) / (2 * a)
return er | [
"def",
"erank",
"(",
"self",
")",
":",
"r",
"=",
"self",
".",
"r",
"n",
"=",
"self",
".",
"n",
"d",
"=",
"self",
".",
"d",
"if",
"d",
"<=",
"1",
":",
"er",
"=",
"0e0",
"else",
":",
"sz",
"=",
"_np",
".",
"dot",
"(",
"n",
"*",
"r",
"[",
... | 28.263158 | 16.736842 |
def ng_get_scope_property(self, element, prop):
"""
:Description: Will return value of property of element's scope.
:Warning: This will only work for angular.js 1.x.
:Warning: Requires angular debugging to be enabled.
:param element: Element for browser instance to target.
:param prop: Property of element's angular scope to target.
:type prop: string
:example: 'messages.total'
:return: string
"""
return self.browser.execute_script(
'return angular.element(arguments[0]).scope()%s;' % self.__d2b_notation(
prop=prop
), element) | [
"def",
"ng_get_scope_property",
"(",
"self",
",",
"element",
",",
"prop",
")",
":",
"return",
"self",
".",
"browser",
".",
"execute_script",
"(",
"'return angular.element(arguments[0]).scope()%s;'",
"%",
"self",
".",
"__d2b_notation",
"(",
"prop",
"=",
"prop",
")"... | 42.933333 | 16 |
def flair(self, name, text, css_class):
"""Sets flair for `user` in this subreddit (POST). Calls :meth:`narwal.Reddit.flairlist`.
:param name: name of the user
:param text: flair text to assign
:param css_class: CSS class to assign to flair text
"""
return self._reddit.flair(self.display_name, name, text, css_class) | [
"def",
"flair",
"(",
"self",
",",
"name",
",",
"text",
",",
"css_class",
")",
":",
"return",
"self",
".",
"_reddit",
".",
"flair",
"(",
"self",
".",
"display_name",
",",
"name",
",",
"text",
",",
"css_class",
")"
] | 46 | 11.375 |
def _get_SConscript_filenames(self, ls, kw):
"""
Convert the parameters passed to SConscript() calls into a list
of files and export variables. If the parameters are invalid,
throws SCons.Errors.UserError. Returns a tuple (l, e) where l
is a list of SConscript filenames and e is a list of exports.
"""
exports = []
if len(ls) == 0:
try:
dirs = kw["dirs"]
except KeyError:
raise SCons.Errors.UserError("Invalid SConscript usage - no parameters")
if not SCons.Util.is_List(dirs):
dirs = [ dirs ]
dirs = list(map(str, dirs))
name = kw.get('name', 'SConscript')
files = [os.path.join(n, name) for n in dirs]
elif len(ls) == 1:
files = ls[0]
elif len(ls) == 2:
files = ls[0]
exports = self.Split(ls[1])
else:
raise SCons.Errors.UserError("Invalid SConscript() usage - too many arguments")
if not SCons.Util.is_List(files):
files = [ files ]
if kw.get('exports'):
exports.extend(self.Split(kw['exports']))
variant_dir = kw.get('variant_dir') or kw.get('build_dir')
if variant_dir:
if len(files) != 1:
raise SCons.Errors.UserError("Invalid SConscript() usage - can only specify one SConscript with a variant_dir")
duplicate = kw.get('duplicate', 1)
src_dir = kw.get('src_dir')
if not src_dir:
src_dir, fname = os.path.split(str(files[0]))
files = [os.path.join(str(variant_dir), fname)]
else:
if not isinstance(src_dir, SCons.Node.Node):
src_dir = self.fs.Dir(src_dir)
fn = files[0]
if not isinstance(fn, SCons.Node.Node):
fn = self.fs.File(fn)
if fn.is_under(src_dir):
# Get path relative to the source directory.
fname = fn.get_path(src_dir)
files = [os.path.join(str(variant_dir), fname)]
else:
files = [fn.get_abspath()]
kw['src_dir'] = variant_dir
self.fs.VariantDir(variant_dir, src_dir, duplicate)
return (files, exports) | [
"def",
"_get_SConscript_filenames",
"(",
"self",
",",
"ls",
",",
"kw",
")",
":",
"exports",
"=",
"[",
"]",
"if",
"len",
"(",
"ls",
")",
"==",
"0",
":",
"try",
":",
"dirs",
"=",
"kw",
"[",
"\"dirs\"",
"]",
"except",
"KeyError",
":",
"raise",
"SCons"... | 34.761194 | 21.208955 |
def prt_txt_desc2nts(self, prt, desc2nts, prtfmt):
"""Print grouped and sorted GO IDs."""
# 1-D: data to print is a flat list of namedtuples
if 'flat' in desc2nts:
nts = desc2nts.get('flat')
# sys.stdout.write("FLAT NTS: {FLDS}\n".format(FLDS=" ".join(next(iter(nts))._fields)))
prt_txt(prt, nts, prtfmt)
# 2-D: data to print is a list of [(section, nts), ...
else:
for section, nts in desc2nts['sections']:
prt.write("\nSECTION: {SEC}\n".format(SEC=section))
prt_txt(prt, nts, prtfmt)
grprobj = self.sortobj.grprobj
dat = SummarySec2dHdrGos().summarize_sec2hdrnts(desc2nts['sections'])
ugos_y = dat['G'].intersection(grprobj.usrgos)
ugos_n = dat['U'].intersection(grprobj.usrgos)
return {'GO_DESC':'usr', 'SECs':len(dat['S']), 'GOs':len(ugos_y),
'UNGRP':len(ugos_n), 'undesc':'ungrpd'} | [
"def",
"prt_txt_desc2nts",
"(",
"self",
",",
"prt",
",",
"desc2nts",
",",
"prtfmt",
")",
":",
"# 1-D: data to print is a flat list of namedtuples",
"if",
"'flat'",
"in",
"desc2nts",
":",
"nts",
"=",
"desc2nts",
".",
"get",
"(",
"'flat'",
")",
"# sys.stdout.write(\... | 53.833333 | 18.166667 |
def stop_program(self, turn_off_load=True):
"""
Stops running programmed test sequence
:return: None
"""
self.__set_buffer_start(self.CMD_STOP_PROG)
self.__set_checksum()
self.__send_buffer()
if turn_off_load and self.load_on:
self.load_on = False | [
"def",
"stop_program",
"(",
"self",
",",
"turn_off_load",
"=",
"True",
")",
":",
"self",
".",
"__set_buffer_start",
"(",
"self",
".",
"CMD_STOP_PROG",
")",
"self",
".",
"__set_checksum",
"(",
")",
"self",
".",
"__send_buffer",
"(",
")",
"if",
"turn_off_load"... | 31.4 | 7.2 |
def update(self, period = None, start = None, stop = None, value = None):
"""Change the value for a given period.
:param period: Period where the value is modified. If set, `start` and `stop` should be `None`.
:param start: Start of the period. Instance of `openfisca_core.periods.Instant`. If set, `period` should be `None`.
:param stop: Stop of the period. Instance of `openfisca_core.periods.Instant`. If set, `period` should be `None`.
:param value: New value. If `None`, the parameter is removed from the legislation parameters for the given period.
"""
if period is not None:
if start is not None or stop is not None:
raise TypeError("Wrong input for 'update' method: use either 'update(period, value = value)' or 'update(start = start, stop = stop, value = value)'. You cannot both use 'period' and 'start' or 'stop'.")
if isinstance(period, str):
period = periods.period(period)
start = period.start
stop = period.stop
if start is None:
raise ValueError("You must provide either a start or a period")
start_str = str(start)
stop_str = str(stop.offset(1, 'day')) if stop else None
old_values = self.values_list
new_values = []
n = len(old_values)
i = 0
# Future intervals : not affected
if stop_str:
while (i < n) and (old_values[i].instant_str >= stop_str):
new_values.append(old_values[i])
i += 1
# Right-overlapped interval
if stop_str:
if new_values and (stop_str == new_values[-1].instant_str):
pass # such interval is empty
else:
if i < n:
overlapped_value = old_values[i].value
value_name = _compose_name(self.name, item_name = stop_str)
new_interval = ParameterAtInstant(value_name, stop_str, data = {'value': overlapped_value})
new_values.append(new_interval)
else:
value_name = _compose_name(self.name, item_name = stop_str)
new_interval = ParameterAtInstant(value_name, stop_str, data = {'value': None})
new_values.append(new_interval)
# Insert new interval
value_name = _compose_name(self.name, item_name = start_str)
new_interval = ParameterAtInstant(value_name, start_str, data = {'value': value})
new_values.append(new_interval)
# Remove covered intervals
while (i < n) and (old_values[i].instant_str >= start_str):
i += 1
# Past intervals : not affected
while i < n:
new_values.append(old_values[i])
i += 1
self.values_list = new_values | [
"def",
"update",
"(",
"self",
",",
"period",
"=",
"None",
",",
"start",
"=",
"None",
",",
"stop",
"=",
"None",
",",
"value",
"=",
"None",
")",
":",
"if",
"period",
"is",
"not",
"None",
":",
"if",
"start",
"is",
"not",
"None",
"or",
"stop",
"is",
... | 46.032787 | 27.377049 |
def deleteByteArray(self, context, page, returnError):
"""please override"""
returnError.contents.value = self.IllegalStateError
raise NotImplementedError("You must override this method.") | [
"def",
"deleteByteArray",
"(",
"self",
",",
"context",
",",
"page",
",",
"returnError",
")",
":",
"returnError",
".",
"contents",
".",
"value",
"=",
"self",
".",
"IllegalStateError",
"raise",
"NotImplementedError",
"(",
"\"You must override this method.\"",
")"
] | 52.25 | 15 |
def create( cls, length = sys.maxint, typecode = 'd', start_index = 0 ):
"""Construct a StepVector of the given length, with indices starting
at the given start_index and counting up to (but not including)
start_index + length.
The typecode may be:
'd' for float values (C type 'double'),
'i' for int values,
'b' for Boolean values,
'O' for arbitrary Python objects as value.
The vector is initialized with the value zero (or, for typecode 'O',
with None).
"""
if typecode == 'd':
swigclass = _StepVector_float
elif typecode == 'i':
swigclass = _StepVector_int
elif typecode == 'b':
swigclass = _StepVector_bool
elif typecode == 'O':
swigclass = _StepVector_obj
else:
raise ValueError, "unsupported typecode"
obj = cls()
obj._typecode = typecode
obj._swigobj = swigclass( )
obj.start = start_index
obj.stop = start_index + length
return obj | [
"def",
"create",
"(",
"cls",
",",
"length",
"=",
"sys",
".",
"maxint",
",",
"typecode",
"=",
"'d'",
",",
"start_index",
"=",
"0",
")",
":",
"if",
"typecode",
"==",
"'d'",
":",
"swigclass",
"=",
"_StepVector_float",
"elif",
"typecode",
"==",
"'i'",
":",... | 33.4 | 14.766667 |
def build_tree_from_distance_matrix(matrix, best_tree=False, params={},\
working_dir='/tmp'):
"""Returns a tree from a distance matrix.
matrix: a square Dict2D object (cogent.util.dict2d)
best_tree: if True (default:False), uses a slower but more accurate
algorithm to build the tree.
params: dict of parameters to pass in to the Clearcut app controller.
The result will be an cogent.core.tree.PhyloNode object, or None if tree
fails.
"""
params['--out'] = get_tmp_filename(working_dir)
# Create instance of app controller, enable tree, disable alignment
app = Clearcut(InputHandler='_input_as_multiline_string', params=params, \
WorkingDir=working_dir, SuppressStdout=True,\
SuppressStderr=True)
#Turn off input as alignment
app.Parameters['-a'].off()
#Input is a distance matrix
app.Parameters['-d'].on()
if best_tree:
app.Parameters['-N'].on()
# Turn the dict2d object into the expected input format
matrix_input, int_keys = _matrix_input_from_dict2d(matrix)
# Collect result
result = app(matrix_input)
# Build tree
tree = DndParser(result['Tree'].read(), constructor=PhyloNode)
# reassign to original names
for node in tree.tips():
node.Name = int_keys[node.Name]
# Clean up
result.cleanUp()
del(app, result, params)
return tree | [
"def",
"build_tree_from_distance_matrix",
"(",
"matrix",
",",
"best_tree",
"=",
"False",
",",
"params",
"=",
"{",
"}",
",",
"working_dir",
"=",
"'/tmp'",
")",
":",
"params",
"[",
"'--out'",
"]",
"=",
"get_tmp_filename",
"(",
"working_dir",
")",
"# Create insta... | 29.76087 | 23.456522 |
def close(self):
"""
Closes the worker. No more jobs will be handled by the worker, and any
running job is immediately returned to the job manager.
"""
if self._closed:
return
self._closed = True
if self._job is not None:
self._manager.return_job(self._job)
self._job = None | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"_closed",
":",
"return",
"self",
".",
"_closed",
"=",
"True",
"if",
"self",
".",
"_job",
"is",
"not",
"None",
":",
"self",
".",
"_manager",
".",
"return_job",
"(",
"self",
".",
"_job",
")",... | 25.428571 | 20.142857 |
def add_branch_node(self, tree_id, node_id, feature_index, feature_value,
branch_mode, true_child_id, false_child_id, relative_hit_rate = None,
missing_value_tracks_true_child = False):
"""
Add a branch node to the tree ensemble.
Parameters
----------
tree_id: int
ID of the tree to add the node to.
node_id: int
ID of the node within the tree.
feature_index: int
Index of the feature in the input being split on.
feature_value: double or int
The value used in the feature comparison determining the traversal
direction from this node.
branch_mode: str
Branch mode of the node, specifying the condition under which the node
referenced by `true_child_id` is called next.
Must be one of the following:
- `"BranchOnValueLessThanEqual"`. Traverse to node `true_child_id`
if `input[feature_index] <= feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueLessThan"`. Traverse to node `true_child_id`
if `input[feature_index] < feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueGreaterThanEqual"`. Traverse to node `true_child_id`
if `input[feature_index] >= feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueGreaterThan"`. Traverse to node `true_child_id`
if `input[feature_index] > feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueEqual"`. Traverse to node `true_child_id`
if `input[feature_index] == feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueNotEqual"`. Traverse to node `true_child_id`
if `input[feature_index] != feature_value`, and `false_child_id`
otherwise.
true_child_id: int
ID of the child under the true condition of the split. An error will
be raised at model validation if this does not match the `node_id`
of a node instantiated by `add_branch_node` or `add_leaf_node` within
this `tree_id`.
false_child_id: int
ID of the child under the false condition of the split. An error will
be raised at model validation if this does not match the `node_id`
of a node instantiated by `add_branch_node` or `add_leaf_node` within
this `tree_id`.
relative_hit_rate: float [optional]
When the model is converted compiled by CoreML, this gives hints to
Core ML about which node is more likely to be hit on evaluation,
allowing for additional optimizations. The values can be on any scale,
with the values between child nodes being compared relative to each
other.
missing_value_tracks_true_child: bool [optional]
If the training data contains NaN values or missing values, then this
flag determines which direction a NaN value traverses.
"""
spec_node = self.tree_parameters.nodes.add()
spec_node.treeId = tree_id
spec_node.nodeId = node_id
spec_node.branchFeatureIndex = feature_index
spec_node.branchFeatureValue = feature_value
spec_node.trueChildNodeId = true_child_id
spec_node.falseChildNodeId = false_child_id
spec_node.nodeBehavior = \
_TreeEnsemble_pb2.TreeEnsembleParameters.TreeNode.TreeNodeBehavior.Value(branch_mode)
if relative_hit_rate is not None:
spec_node.relativeHitRate = relative_hit_rate
spec_node.missingValueTracksTrueChild = missing_value_tracks_true_child | [
"def",
"add_branch_node",
"(",
"self",
",",
"tree_id",
",",
"node_id",
",",
"feature_index",
",",
"feature_value",
",",
"branch_mode",
",",
"true_child_id",
",",
"false_child_id",
",",
"relative_hit_rate",
"=",
"None",
",",
"missing_value_tracks_true_child",
"=",
"F... | 42.943182 | 26.738636 |
def make_nn_funs(layer_sizes, L2_reg, noise_variance, nonlinearity=np.tanh):
"""These functions implement a standard multi-layer perceptron,
vectorized over both training examples and weight samples."""
shapes = list(zip(layer_sizes[:-1], layer_sizes[1:]))
num_weights = sum((m+1)*n for m, n in shapes)
def unpack_layers(weights):
num_weight_sets = len(weights)
for m, n in shapes:
yield weights[:, :m*n] .reshape((num_weight_sets, m, n)),\
weights[:, m*n:m*n+n].reshape((num_weight_sets, 1, n))
weights = weights[:, (m+1)*n:]
def predictions(weights, inputs):
"""weights is shape (num_weight_samples x num_weights)
inputs is shape (num_datapoints x D)"""
inputs = np.expand_dims(inputs, 0)
for W, b in unpack_layers(weights):
outputs = np.einsum('mnd,mdo->mno', inputs, W) + b
inputs = nonlinearity(outputs)
return outputs
def logprob(weights, inputs, targets):
log_prior = -L2_reg * np.sum(weights**2, axis=1)
preds = predictions(weights, inputs)
log_lik = -np.sum((preds - targets)**2, axis=1)[:, 0] / noise_variance
return log_prior + log_lik
return num_weights, predictions, logprob | [
"def",
"make_nn_funs",
"(",
"layer_sizes",
",",
"L2_reg",
",",
"noise_variance",
",",
"nonlinearity",
"=",
"np",
".",
"tanh",
")",
":",
"shapes",
"=",
"list",
"(",
"zip",
"(",
"layer_sizes",
"[",
":",
"-",
"1",
"]",
",",
"layer_sizes",
"[",
"1",
":",
... | 43.344828 | 14.965517 |
def increment(self, key, cache=None, amount=1):
"""Query the server to increment the value of the key by the specified
amount. Negative amounts can be used to decrement.
Keyword arguments:
key -- the key the item is stored under. Required.
cache -- the cache the item belongs to. Defaults to None, which uses
self.name. If no name is set, raises a ValueError.
amount -- the amount to increment the value by. Can be negative to
decrement the value. Defaults to 1.
"""
if cache is None:
cache = self.name
if cache is None:
raise ValueError("Cache name must be set")
cache = quote_plus(cache)
key = quote_plus(key)
body = json.dumps({"amount": amount})
result = self.client.post("caches/%s/items/%s/increment" % (cache,
key), body, {"Content-Type": "application/json"})
result = result["body"]
return Item(values=result, cache=cache, key=key) | [
"def",
"increment",
"(",
"self",
",",
"key",
",",
"cache",
"=",
"None",
",",
"amount",
"=",
"1",
")",
":",
"if",
"cache",
"is",
"None",
":",
"cache",
"=",
"self",
".",
"name",
"if",
"cache",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Cache n... | 42.083333 | 18.541667 |
def reset(self, state_size_changed=False):
"""Reset parser state.
Parameters
----------
state_size_changed : `bool`, optional
`True` if maximum state size changed (default: `False`).
"""
if state_size_changed:
self.state = deque(repeat('', self.state_size),
maxlen=self.state_size)
else:
self.state.extend(repeat('', self.state_size))
self.end = True | [
"def",
"reset",
"(",
"self",
",",
"state_size_changed",
"=",
"False",
")",
":",
"if",
"state_size_changed",
":",
"self",
".",
"state",
"=",
"deque",
"(",
"repeat",
"(",
"''",
",",
"self",
".",
"state_size",
")",
",",
"maxlen",
"=",
"self",
".",
"state_... | 33.5 | 16 |
def input_int_default(question="", default=0):
"""A function that works for both, Python 2.x and Python 3.x.
It asks the user for input and returns it as a string.
"""
answer = input_string(question)
if answer == "" or answer == "yes":
return default
else:
return int(answer) | [
"def",
"input_int_default",
"(",
"question",
"=",
"\"\"",
",",
"default",
"=",
"0",
")",
":",
"answer",
"=",
"input_string",
"(",
"question",
")",
"if",
"answer",
"==",
"\"\"",
"or",
"answer",
"==",
"\"yes\"",
":",
"return",
"default",
"else",
":",
"retu... | 34.444444 | 10.666667 |
def swipe(self, element, x, y, duration=None):
"""Swipe over an element
:param element: either a WebElement, PageElement or element locator as a tuple (locator_type, locator_value)
:param x: horizontal movement
:param y: vertical movement
:param duration: time to take the swipe, in ms
"""
if not self.driver_wrapper.is_mobile_test():
raise Exception('Swipe method is not implemented in Selenium')
# Get center coordinates of element
center = self.get_center(element)
initial_context = self.driver_wrapper.driver.current_context
if self.driver_wrapper.is_web_test() or initial_context != 'NATIVE_APP':
center = self.get_native_coords(center)
# Android needs absolute end coordinates and ios needs movement
end_x = x if self.driver_wrapper.is_ios_test() else center['x'] + x
end_y = y if self.driver_wrapper.is_ios_test() else center['y'] + y
self.driver_wrapper.driver.swipe(center['x'], center['y'], end_x, end_y, duration)
if self.driver_wrapper.is_web_test() or initial_context != 'NATIVE_APP':
self.driver_wrapper.driver.switch_to.context(initial_context) | [
"def",
"swipe",
"(",
"self",
",",
"element",
",",
"x",
",",
"y",
",",
"duration",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"driver_wrapper",
".",
"is_mobile_test",
"(",
")",
":",
"raise",
"Exception",
"(",
"'Swipe method is not implemented in Seleniu... | 50.166667 | 25.708333 |
def compute_norm_tig(g, **kwargs):
r"""
Compute the :math:`\ell_2` norm of the Tig.
See :func:`compute_tig`.
Parameters
----------
g: Filter
The filter or filter bank.
kwargs: dict
Additional parameters to be passed to the
:func:`pygsp.filters.Filter.filter` method.
"""
tig = compute_tig(g, **kwargs)
return np.linalg.norm(tig, axis=1, ord=2) | [
"def",
"compute_norm_tig",
"(",
"g",
",",
"*",
"*",
"kwargs",
")",
":",
"tig",
"=",
"compute_tig",
"(",
"g",
",",
"*",
"*",
"kwargs",
")",
"return",
"np",
".",
"linalg",
".",
"norm",
"(",
"tig",
",",
"axis",
"=",
"1",
",",
"ord",
"=",
"2",
")"
... | 26.266667 | 13.666667 |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self,
'counterexamples') and self.counterexamples is not None:
_dict['counterexamples'] = [
x._to_dict() for x in self.counterexamples
]
if hasattr(self, 'pagination') and self.pagination is not None:
_dict['pagination'] = self.pagination._to_dict()
return _dict | [
"def",
"_to_dict",
"(",
"self",
")",
":",
"_dict",
"=",
"{",
"}",
"if",
"hasattr",
"(",
"self",
",",
"'counterexamples'",
")",
"and",
"self",
".",
"counterexamples",
"is",
"not",
"None",
":",
"_dict",
"[",
"'counterexamples'",
"]",
"=",
"[",
"x",
".",
... | 41.909091 | 19.090909 |
def emit(self, batch):
"""
:type batch:
:class:`~opencensus.ext.jaeger.trace_exporter.gen.jaeger.Batch`
:param batch: Object to emit Jaeger spans.
"""
udp_socket = None
try:
self.client._seqid = 0
# truncate and reset the position of BytesIO object
self.buffer._buffer.truncate(0)
self.buffer._buffer.seek(0)
self.client.emitBatch(batch)
buff = self.buffer.getvalue()
if len(buff) > self.max_packet_size:
logging.warn('Data exceeds the max UDP packet size; size {},\
max {}'.format(len(buff), self.max_packet_size))
else:
udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp_socket.sendto(buff, self.address)
except Exception as e: # pragma: NO COVER
logging.error(getattr(e, 'message', e))
finally:
if udp_socket is not None:
udp_socket.close() | [
"def",
"emit",
"(",
"self",
",",
"batch",
")",
":",
"udp_socket",
"=",
"None",
"try",
":",
"self",
".",
"client",
".",
"_seqid",
"=",
"0",
"# truncate and reset the position of BytesIO object",
"self",
".",
"buffer",
".",
"_buffer",
".",
"truncate",
"(",
"0... | 37.777778 | 16.444444 |
def paint(self, painter, option, widget):
"""
Overloads the default QGraphicsItem paint event to update the \
node when necessary and call the draw method for the node.
:param painter <QPainter>
:param option <QStyleOptionGraphicsItem>
:param widget <QWidget>
"""
# rebuild when dirty
if self.isDirty():
self.rebuild()
painter.save()
painter.setOpacity( self.opacity() )
if self.drawHotspotsUnderneath():
self.drawHotspots(painter)
if self.isEnabled():
painter.setPen(self.penColor())
painter.setBrush(self.brush())
else:
painter.setPen(self.disabledPenColor())
painter.setBrush(self.disabledBrush())
# draw the item
self.draw(painter, option, widget)
if not self.drawHotspotsUnderneath():
self.drawHotspots(painter)
painter.restore() | [
"def",
"paint",
"(",
"self",
",",
"painter",
",",
"option",
",",
"widget",
")",
":",
"# rebuild when dirty",
"if",
"self",
".",
"isDirty",
"(",
")",
":",
"self",
".",
"rebuild",
"(",
")",
"painter",
".",
"save",
"(",
")",
"painter",
".",
"setOpacity",
... | 30.969697 | 14.363636 |
def get(self, name):
"""Gets the module with the given name if it exists in
this code parser."""
if name not in self.modules:
self.load_dependency(name, False, False, False)
if name in self.modules:
return self.modules[name]
else:
return None | [
"def",
"get",
"(",
"self",
",",
"name",
")",
":",
"if",
"name",
"not",
"in",
"self",
".",
"modules",
":",
"self",
".",
"load_dependency",
"(",
"name",
",",
"False",
",",
"False",
",",
"False",
")",
"if",
"name",
"in",
"self",
".",
"modules",
":",
... | 31 | 13.8 |
def _select_property(self, line):
"""try to match a property and load it"""
g = self.current['graph']
if not line:
out = g.all_properties
using_pattern = False
else:
using_pattern = True
if line.isdigit():
line = int(line)
out = g.get_property(line)
if out:
if type(out) == type([]):
choice = self._selectFromList(out, using_pattern, "property")
if choice:
self.currentEntity = {'name': choice.locale or choice.uri,
'object': choice, 'type': 'property'}
else:
self.currentEntity = {'name': out.locale or out.uri,
'object': out, 'type': 'property'}
# ..finally:
if self.currentEntity:
self._print_entity_intro(entity=self.currentEntity)
else:
print("not found") | [
"def",
"_select_property",
"(",
"self",
",",
"line",
")",
":",
"g",
"=",
"self",
".",
"current",
"[",
"'graph'",
"]",
"if",
"not",
"line",
":",
"out",
"=",
"g",
".",
"all_properties",
"using_pattern",
"=",
"False",
"else",
":",
"using_pattern",
"=",
"T... | 36.444444 | 18.777778 |
def convertPixelXYToLngLat(self, pixelX, pixelY, level):
'''
converts a pixel x, y to a latitude and longitude.
'''
mapSize = self.getMapDimensionsByZoomLevel(level)
x = (self.clipValue(pixelX, 0, mapSize - 1) / mapSize) - 0.5
y = 0.5 - (self.clipValue(pixelY, 0, mapSize - 1) / mapSize)
lat = 90 - 360 * math.atan(math.exp(-y * 2 * math.pi)) / math.pi
lng = 360 * x
return (lng, lat) | [
"def",
"convertPixelXYToLngLat",
"(",
"self",
",",
"pixelX",
",",
"pixelY",
",",
"level",
")",
":",
"mapSize",
"=",
"self",
".",
"getMapDimensionsByZoomLevel",
"(",
"level",
")",
"x",
"=",
"(",
"self",
".",
"clipValue",
"(",
"pixelX",
",",
"0",
",",
"map... | 37.25 | 25.916667 |
def get_beamarea_pix(self, ra, dec):
"""
Calculate the beam area in square pixels.
Parameters
----------
ra, dec : float
The sky coordinates at which the calculation is made
dec
Returns
-------
area : float
The beam area in square pixels.
"""
parea = abs(self.pixscale[0] * self.pixscale[1]) # in deg**2 at reference coords
barea = self.get_beamarea_deg2(ra, dec)
return barea / parea | [
"def",
"get_beamarea_pix",
"(",
"self",
",",
"ra",
",",
"dec",
")",
":",
"parea",
"=",
"abs",
"(",
"self",
".",
"pixscale",
"[",
"0",
"]",
"*",
"self",
".",
"pixscale",
"[",
"1",
"]",
")",
"# in deg**2 at reference coords",
"barea",
"=",
"self",
".",
... | 27.666667 | 19.333333 |
def get_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
prog=__title__,
description=__description__,
)
parser.add_argument(
'--testdb', action='store_true',
help='create and use a database with test users'
)
parser.add_argument(
'-v', '--verbose', action='store_true',
help='print a detailed log'
)
parser.add_argument(
'--debug', action='store_true',
help='print debug log'
)
parser.add_argument(
'--log-sql', action='store_true',
help='log sql transactions'
)
parser.add_argument(
'-V', '--version', action='store_true',
help='print version info and exit'
)
parser.add_argument(
'--tk', action='store_true',
help='use old tk interface'
)
return parser.parse_args() | [
"def",
"get_args",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"prog",
"=",
"__title__",
",",
"description",
"=",
"__description__",
",",
")",
"parser",
".",
"add_argument",
"(",
"'--testdb'",
",",
"action",
"=",
"'store_true'",
","... | 27.16129 | 14.935484 |
def handle_new_task(self, task_name, record):
"""
Do everything needed when a task is starting
Params:
task_name (str): name of the task that is starting
record (logging.LogRecord): log record with all the info
Returns:
None
"""
record.msg = ColorFormatter.colored('default', START_TASK_MSG)
record.task = task_name
self.tasks[task_name] = Task(name=task_name, maxlen=self.buffer_size)
if self.should_show_by_depth():
self.pretty_emit(record, is_header=True) | [
"def",
"handle_new_task",
"(",
"self",
",",
"task_name",
",",
"record",
")",
":",
"record",
".",
"msg",
"=",
"ColorFormatter",
".",
"colored",
"(",
"'default'",
",",
"START_TASK_MSG",
")",
"record",
".",
"task",
"=",
"task_name",
"self",
".",
"tasks",
"[",... | 33.235294 | 20.529412 |
def checkedIndexes( self ):
"""
Returns a list of checked indexes for this combobox.
:return [<int>, ..]
"""
if ( not self.isCheckable() ):
return []
model = self.model()
return [i for i in range(self.count()) if model.item(i).checkState()] | [
"def",
"checkedIndexes",
"(",
"self",
")",
":",
"if",
"(",
"not",
"self",
".",
"isCheckable",
"(",
")",
")",
":",
"return",
"[",
"]",
"model",
"=",
"self",
".",
"model",
"(",
")",
"return",
"[",
"i",
"for",
"i",
"in",
"range",
"(",
"self",
".",
... | 29.181818 | 15.909091 |
def validate(self, document):
"""
Check input for Python syntax errors.
"""
# When the input starts with Ctrl-Z, always accept. This means EOF in a
# Python REPL.
if document.text.startswith('\x1a'):
return
try:
if self.get_compiler_flags:
flags = self.get_compiler_flags()
else:
flags = 0
compile(document.text, '<input>', 'exec', flags=flags, dont_inherit=True)
except SyntaxError as e:
# Note, the 'or 1' for offset is required because Python 2.7
# gives `None` as offset in case of '4=4' as input. (Looks like
# fixed in Python 3.)
index = document.translate_row_col_to_index(e.lineno - 1, (e.offset or 1) - 1)
raise ValidationError(index, 'Syntax Error')
except TypeError as e:
# e.g. "compile() expected string without null bytes"
raise ValidationError(0, str(e))
except ValueError as e:
# In Python 2, compiling "\x9" (an invalid escape sequence) raises
# ValueError instead of SyntaxError.
raise ValidationError(0, 'Syntax Error: %s' % e) | [
"def",
"validate",
"(",
"self",
",",
"document",
")",
":",
"# When the input starts with Ctrl-Z, always accept. This means EOF in a",
"# Python REPL.",
"if",
"document",
".",
"text",
".",
"startswith",
"(",
"'\\x1a'",
")",
":",
"return",
"try",
":",
"if",
"self",
".... | 41.413793 | 19.413793 |
def merge_errors(errors1, errors2):
"""Deeply merges two error messages. Error messages can be
string, list of strings or dict of error messages (recursively).
Format is the same as accepted by :exc:`ValidationError`.
Returns new error messages.
"""
if errors1 is None:
return errors2
elif errors2 is None:
return errors1
if isinstance(errors1, list):
if not errors1:
return errors2
if isinstance(errors2, list):
return errors1 + errors2
elif isinstance(errors2, dict):
return dict(
errors2,
**{SCHEMA: merge_errors(errors1, errors2.get(SCHEMA))}
)
else:
return errors1 + [errors2]
elif isinstance(errors1, dict):
if isinstance(errors2, list):
return dict(
errors1,
**{SCHEMA: merge_errors(errors1.get(SCHEMA), errors2)}
)
elif isinstance(errors2, dict):
errors = dict(errors1)
for k, v in iteritems(errors2):
if k in errors:
errors[k] = merge_errors(errors[k], v)
else:
errors[k] = v
return errors
else:
return dict(
errors1,
**{SCHEMA: merge_errors(errors1.get(SCHEMA), errors2)}
)
else:
if isinstance(errors2, list):
return [errors1] + errors2 if errors2 else errors1
elif isinstance(errors2, dict):
return dict(
errors2,
**{SCHEMA: merge_errors(errors1, errors2.get(SCHEMA))}
)
else:
return [errors1, errors2] | [
"def",
"merge_errors",
"(",
"errors1",
",",
"errors2",
")",
":",
"if",
"errors1",
"is",
"None",
":",
"return",
"errors2",
"elif",
"errors2",
"is",
"None",
":",
"return",
"errors1",
"if",
"isinstance",
"(",
"errors1",
",",
"list",
")",
":",
"if",
"not",
... | 31.90566 | 15.886792 |
def estimate_entropy(X, epsilon=None):
r"""Estimate a dataset's Shannon entropy.
This function can take datasets of mixed discrete and continuous
features, and uses a set of heuristics to determine which functions
to apply to each.
Because this function is a subroutine in a mutual information estimator,
we employ the Kozachenko Estimator[1] for continuous features when this
function is _not_ used for mutual information and an adaptation of the
Kraskov Estimator[2] when it is.
Let X be made of continuous features c and discrete features d.
To deal with both continuous and discrete features, We use the
following reworking of entropy:
$ H(X) = H(c,d) = \sum_{x \in d} p(x) \times H(c(x)) + H(d) $
Where c(x) is a dataset that represents the rows of the continuous dataset
in the same row as a discrete column with value x in the original dataset.
Args:
X (array-like): An array-like (np arr, pandas df, etc.) with shape
(n_samples, n_features) or (n_samples)
epsilon (array-like): An array with shape (n_samples, 1) that is
the epsilon used in Kraskov Estimator. Represents the chebyshev
distance from an element to its k-th nearest neighbor in the full
dataset.
Returns:
float: A floating-point number representing the entropy in X.
If the dataset is fully discrete, an exact calculation is done.
If this is not the case and epsilon is not provided, this
will be the Kozacheko Estimator of the dataset's entropy.
If epsilon is provided, this is a partial estimation of the
Kraskov entropy estimator. The bias is cancelled out when
computing mutual information.
References:
.. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004.
.. [2] L. F. Kozachenko, N. N. Leonenko, "Sample Estimate of the Entropy
of a Random Vector:, Probl. Peredachi Inf., 23:2 (1987), 9-16.
"""
X = asarray2d(X)
n_samples, n_features = X.shape
if n_features < 1:
return 0
disc_mask = _get_discrete_columns(X)
cont_mask = ~disc_mask
# If our dataset is fully discrete/continuous, do something easier
if np.all(disc_mask):
return calculate_disc_entropy(X)
elif np.all(cont_mask):
return estimate_cont_entropy(X, epsilon)
# Separate the dataset into discrete and continuous datasets d,c
disc_features = asarray2d(X[:, disc_mask])
cont_features = asarray2d(X[:, cont_mask])
entropy = 0
uniques, counts = np.unique(disc_features, axis=0, return_counts=True)
empirical_p = counts / n_samples
# $\sum_{x \in d} p(x) \times H(c(x))$
for i in range(counts.size):
unique_mask = np.all(disc_features == uniques[i], axis=1)
selected_cont_samples = cont_features[unique_mask, :]
if epsilon is None:
selected_epsilon = None
else:
selected_epsilon = epsilon[unique_mask, :]
conditional_cont_entropy = estimate_cont_entropy(
selected_cont_samples, selected_epsilon)
entropy += empirical_p[i] * conditional_cont_entropy
# H(d)
entropy += calculate_disc_entropy(disc_features)
if epsilon is None:
entropy = max(0, entropy)
return entropy | [
"def",
"estimate_entropy",
"(",
"X",
",",
"epsilon",
"=",
"None",
")",
":",
"X",
"=",
"asarray2d",
"(",
"X",
")",
"n_samples",
",",
"n_features",
"=",
"X",
".",
"shape",
"if",
"n_features",
"<",
"1",
":",
"return",
"0",
"disc_mask",
"=",
"_get_discrete... | 40.54878 | 23.353659 |
def check_is_a_sequence(var, allow_none=False):
""" Calls is_a_sequence and raises a type error if the check fails.
"""
if not is_a_sequence(var, allow_none=allow_none):
raise TypeError("var must be a list or tuple, however type(var) is {}"
.format(type(var))) | [
"def",
"check_is_a_sequence",
"(",
"var",
",",
"allow_none",
"=",
"False",
")",
":",
"if",
"not",
"is_a_sequence",
"(",
"var",
",",
"allow_none",
"=",
"allow_none",
")",
":",
"raise",
"TypeError",
"(",
"\"var must be a list or tuple, however type(var) is {}\"",
".",... | 49.833333 | 10.166667 |
def publish(self, **kwargs):
"""
Publishes the global workflow, so all users can find it and use it on the platform.
The current user must be a developer of the workflow.
"""
if self._dxid is not None:
return dxpy.api.global_workflow_publish(self._dxid, **kwargs)
else:
return dxpy.api.global_workflow_publish('globalworkflow-' + self._name, alias=self._alias, **kwargs) | [
"def",
"publish",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_dxid",
"is",
"not",
"None",
":",
"return",
"dxpy",
".",
"api",
".",
"global_workflow_publish",
"(",
"self",
".",
"_dxid",
",",
"*",
"*",
"kwargs",
")",
"else",
"... | 43.4 | 26.2 |
def add_from_child(self, resource, **kwargs):
""" Add a resource with its all children resources to the current
resource.
"""
new_resource = self.add(
resource.member_name, resource.collection_name, **kwargs)
for child in resource.children:
new_resource.add_from_child(child, **kwargs) | [
"def",
"add_from_child",
"(",
"self",
",",
"resource",
",",
"*",
"*",
"kwargs",
")",
":",
"new_resource",
"=",
"self",
".",
"add",
"(",
"resource",
".",
"member_name",
",",
"resource",
".",
"collection_name",
",",
"*",
"*",
"kwargs",
")",
"for",
"child",... | 38 | 13.555556 |
def url_quote(string, charset='utf-8', errors='strict', safe='/:'):
"""URL encode a single string with a given encoding.
:param s: the string to quote.
:param charset: the charset to be used.
:param safe: an optional sequence of safe characters.
"""
if not isinstance(string, (text_type, bytes, bytearray)):
string = text_type(string)
if isinstance(string, text_type):
string = string.encode(charset, errors)
if isinstance(safe, text_type):
safe = safe.encode(charset, errors)
safe = frozenset(bytearray(safe) + _always_safe)
rv = bytearray()
for char in bytearray(string):
if char in safe:
rv.append(char)
else:
rv.extend(('%%%02X' % char).encode('ascii'))
return to_native(bytes(rv)) | [
"def",
"url_quote",
"(",
"string",
",",
"charset",
"=",
"'utf-8'",
",",
"errors",
"=",
"'strict'",
",",
"safe",
"=",
"'/:'",
")",
":",
"if",
"not",
"isinstance",
"(",
"string",
",",
"(",
"text_type",
",",
"bytes",
",",
"bytearray",
")",
")",
":",
"st... | 37.047619 | 12.238095 |
def msg_curse(self, args=None, max_width=None):
"""Return the dict to display in the curse interface."""
# Init the return message
# Only process if stats exist and display plugin enable...
ret = []
if not self.stats or args.disable_ports:
return ret
# Max size for the interface name
name_max_width = max_width - 7
# Build the string message
for p in self.stats:
if 'host' in p:
if p['host'] is None:
status = 'None'
elif p['status'] is None:
status = 'Scanning'
elif isinstance(p['status'], bool_type) and p['status'] is True:
status = 'Open'
elif p['status'] == 0:
status = 'Timeout'
else:
# Convert second to ms
status = '{0:.0f}ms'.format(p['status'] * 1000.0)
msg = '{:{width}}'.format(p['description'][0:name_max_width],
width=name_max_width)
ret.append(self.curse_add_line(msg))
msg = '{:>9}'.format(status)
ret.append(self.curse_add_line(msg,
self.get_ports_alert(p,
header=p['indice'] + '_rtt')))
ret.append(self.curse_new_line())
elif 'url' in p:
msg = '{:{width}}'.format(p['description'][0:name_max_width],
width=name_max_width)
ret.append(self.curse_add_line(msg))
if isinstance(p['status'], numbers.Number):
status = 'Code {}'.format(p['status'])
elif p['status'] is None:
status = 'Scanning'
else:
status = p['status']
msg = '{:>9}'.format(status)
ret.append(self.curse_add_line(msg,
self.get_web_alert(p,
header=p['indice'] + '_rtt')))
ret.append(self.curse_new_line())
# Delete the last empty line
try:
ret.pop()
except IndexError:
pass
return ret | [
"def",
"msg_curse",
"(",
"self",
",",
"args",
"=",
"None",
",",
"max_width",
"=",
"None",
")",
":",
"# Init the return message",
"# Only process if stats exist and display plugin enable...",
"ret",
"=",
"[",
"]",
"if",
"not",
"self",
".",
"stats",
"or",
"args",
... | 40.655172 | 17.551724 |
def init_app(self, app):
'''Initializes the Flask application with this extension. It grabs
the necessary configuration values from ``app.config``, those being
HASHING_METHOD and HASHING_ROUNDS. HASHING_METHOD defaults to ``sha256``
but can be any one of ``hashlib.algorithms``. HASHING_ROUNDS specifies
the number of times to hash the input with the specified algorithm.
This defaults to 1.
:param app: Flask application object
'''
self.algorithm = app.config.get('HASHING_METHOD', 'sha256')
if self.algorithm not in algs:
raise ValueError('{} not one of {}'.format(self.algorithm, algs))
self.rounds = app.config.get('HASHING_ROUNDS', 1)
if not isinstance(self.rounds, int):
raise TypeError('HASHING_ROUNDS must be type int') | [
"def",
"init_app",
"(",
"self",
",",
"app",
")",
":",
"self",
".",
"algorithm",
"=",
"app",
".",
"config",
".",
"get",
"(",
"'HASHING_METHOD'",
",",
"'sha256'",
")",
"if",
"self",
".",
"algorithm",
"not",
"in",
"algs",
":",
"raise",
"ValueError",
"(",
... | 52.0625 | 24.5625 |
def get_new_category(self, api_category):
"""
Instantiate a new Category from api data.
:param api_category: the api data for the Category
:return: the new Category
"""
return Category(site_id=self.site_id,
wp_id=api_category["ID"],
**self.api_object_data("category", api_category)) | [
"def",
"get_new_category",
"(",
"self",
",",
"api_category",
")",
":",
"return",
"Category",
"(",
"site_id",
"=",
"self",
".",
"site_id",
",",
"wp_id",
"=",
"api_category",
"[",
"\"ID\"",
"]",
",",
"*",
"*",
"self",
".",
"api_object_data",
"(",
"\"category... | 37 | 12.2 |
def create_app():
""" Flask application factory """
# Create Flask app load app.config
app = Flask(__name__)
app.config.from_object(__name__+'.ConfigClass')
# Initialize Flask-BabelEx
babel = Babel(app)
# Initialize Flask-SQLAlchemy
db = SQLAlchemy(app)
# Define the User data-model.
# NB: Make sure to add flask_user UserMixin !!!
class User(db.Model, UserMixin):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
active = db.Column('is_active', db.Boolean(), nullable=False, server_default='1')
# User authentication information. The collation='NOCASE' is required
# to search case insensitively when USER_IFIND_MODE is 'nocase_collation'.
email = db.Column(db.String(255, collation='NOCASE'), nullable=False, unique=True)
email_confirmed_at = db.Column(db.DateTime())
password = db.Column(db.String(255), nullable=False, server_default='')
# User information
first_name = db.Column(db.String(100, collation='NOCASE'), nullable=False, server_default='')
last_name = db.Column(db.String(100, collation='NOCASE'), nullable=False, server_default='')
# Define the relationship to Role via UserRoles
roles = db.relationship('Role', secondary='user_roles')
# Define the Role data-model
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(50), unique=True)
# Define the UserRoles association table
class UserRoles(db.Model):
__tablename__ = 'user_roles'
id = db.Column(db.Integer(), primary_key=True)
user_id = db.Column(db.Integer(), db.ForeignKey('users.id', ondelete='CASCADE'))
role_id = db.Column(db.Integer(), db.ForeignKey('roles.id', ondelete='CASCADE'))
# Setup Flask-User and specify the User data-model
user_manager = UserManager(app, db, User)
# Create all database tables
db.create_all()
# Create 'member@example.com' user with no roles
if not User.query.filter(User.email == 'member@example.com').first():
user = User(
email='member@example.com',
email_confirmed_at=datetime.datetime.utcnow(),
password=user_manager.hash_password('Password1'),
)
db.session.add(user)
db.session.commit()
# Create 'admin@example.com' user with 'Admin' and 'Agent' roles
if not User.query.filter(User.email == 'admin@example.com').first():
user = User(
email='admin@example.com',
email_confirmed_at=datetime.datetime.utcnow(),
password=user_manager.hash_password('Password1'),
)
user.roles.append(Role(name='Admin'))
user.roles.append(Role(name='Agent'))
db.session.add(user)
db.session.commit()
# The Home page is accessible to anyone
@app.route('/')
def home_page():
return render_template_string("""
{% extends "flask_user_layout.html" %}
{% block content %}
<h2>{%trans%}Home page{%endtrans%}</h2>
<p><a href={{ url_for('user.register') }}>{%trans%}Register{%endtrans%}</a></p>
<p><a href={{ url_for('user.login') }}>{%trans%}Sign in{%endtrans%}</a></p>
<p><a href={{ url_for('home_page') }}>{%trans%}Home Page{%endtrans%}</a> (accessible to anyone)</p>
<p><a href={{ url_for('member_page') }}>{%trans%}Member Page{%endtrans%}</a> (login_required: member@example.com / Password1)</p>
<p><a href={{ url_for('admin_page') }}>{%trans%}Admin Page{%endtrans%}</a> (role_required: admin@example.com / Password1')</p>
<p><a href={{ url_for('user.logout') }}>{%trans%}Sign out{%endtrans%}</a></p>
{% endblock %}
""")
# The Members page is only accessible to authenticated users
@app.route('/members')
@login_required # Use of @login_required decorator
def member_page():
return render_template_string("""
{% extends "flask_user_layout.html" %}
{% block content %}
<h2>{%trans%}Members page{%endtrans%}</h2>
<p><a href={{ url_for('user.register') }}>{%trans%}Register{%endtrans%}</a></p>
<p><a href={{ url_for('user.login') }}>{%trans%}Sign in{%endtrans%}</a></p>
<p><a href={{ url_for('home_page') }}>{%trans%}Home Page{%endtrans%}</a> (accessible to anyone)</p>
<p><a href={{ url_for('member_page') }}>{%trans%}Member Page{%endtrans%}</a> (login_required: member@example.com / Password1)</p>
<p><a href={{ url_for('admin_page') }}>{%trans%}Admin Page{%endtrans%}</a> (role_required: admin@example.com / Password1')</p>
<p><a href={{ url_for('user.logout') }}>{%trans%}Sign out{%endtrans%}</a></p>
{% endblock %}
""")
# The Admin page requires an 'Admin' role.
@app.route('/admin')
@roles_required('Admin') # Use of @roles_required decorator
def admin_page():
return render_template_string("""
{% extends "flask_user_layout.html" %}
{% block content %}
<h2>{%trans%}Admin Page{%endtrans%}</h2>
<p><a href={{ url_for('user.register') }}>{%trans%}Register{%endtrans%}</a></p>
<p><a href={{ url_for('user.login') }}>{%trans%}Sign in{%endtrans%}</a></p>
<p><a href={{ url_for('home_page') }}>{%trans%}Home Page{%endtrans%}</a> (accessible to anyone)</p>
<p><a href={{ url_for('member_page') }}>{%trans%}Member Page{%endtrans%}</a> (login_required: member@example.com / Password1)</p>
<p><a href={{ url_for('admin_page') }}>{%trans%}Admin Page{%endtrans%}</a> (role_required: admin@example.com / Password1')</p>
<p><a href={{ url_for('user.logout') }}>{%trans%}Sign out{%endtrans%}</a></p>
{% endblock %}
""")
return app | [
"def",
"create_app",
"(",
")",
":",
"# Create Flask app load app.config",
"app",
"=",
"Flask",
"(",
"__name__",
")",
"app",
".",
"config",
".",
"from_object",
"(",
"__name__",
"+",
"'.ConfigClass'",
")",
"# Initialize Flask-BabelEx",
"babel",
"=",
"Babel",
"(",
... | 48.632 | 28.472 |
def pop(self, key, timeout=1, is_async=False, only_read=False):
"""
Test:
>>> cache = Cache(log_level=logging.WARNING)
>>> cache.put('a', 0)
>>> cache.pop('a')
0
>>> cache.pop('b') == None
True
"""
if key not in self.cache_items:
return None
return self.cache_items.pop(key)[key] | [
"def",
"pop",
"(",
"self",
",",
"key",
",",
"timeout",
"=",
"1",
",",
"is_async",
"=",
"False",
",",
"only_read",
"=",
"False",
")",
":",
"if",
"key",
"not",
"in",
"self",
".",
"cache_items",
":",
"return",
"None",
"return",
"self",
".",
"cache_items... | 26.214286 | 15.357143 |
def OnCellTextRotation(self, event):
"""Cell text rotation event handler"""
with undo.group(_("Rotation")):
self.grid.actions.toggle_attr("angle")
self.grid.ForceRefresh()
self.grid.update_attribute_toolbar()
if is_gtk():
try:
wx.Yield()
except:
pass
event.Skip() | [
"def",
"OnCellTextRotation",
"(",
"self",
",",
"event",
")",
":",
"with",
"undo",
".",
"group",
"(",
"_",
"(",
"\"Rotation\"",
")",
")",
":",
"self",
".",
"grid",
".",
"actions",
".",
"toggle_attr",
"(",
"\"angle\"",
")",
"self",
".",
"grid",
".",
"F... | 21.647059 | 20.352941 |
def start_transaction(hostname, username, password, label):
'''
A function to connect to a bigip device and start a new transaction.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
label
The name / alias for this transaction. The actual transaction
id will be stored within a grain called ``bigip_f5_trans:<label>``
CLI Example::
salt '*' bigip.start_transaction bigip admin admin my_transaction
'''
#build the session
bigip_session = _build_session(username, password)
payload = {}
#post to REST to get trans id
try:
response = bigip_session.post(
BIG_IP_URL_BASE.format(host=hostname) + '/transaction',
data=salt.utils.json.dumps(payload)
)
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
#extract the trans_id
data = _load_response(response)
if data['code'] == 200:
trans_id = data['content']['transId']
__salt__['grains.setval']('bigip_f5_trans', {label: trans_id})
return 'Transaction: {trans_id} - has successfully been stored in the grain: bigip_f5_trans:{label}'.format(trans_id=trans_id,
label=label)
else:
return data | [
"def",
"start_transaction",
"(",
"hostname",
",",
"username",
",",
"password",
",",
"label",
")",
":",
"#build the session",
"bigip_session",
"=",
"_build_session",
"(",
"username",
",",
"password",
")",
"payload",
"=",
"{",
"}",
"#post to REST to get trans id",
"... | 30.361702 | 28.446809 |
def exception(maxTBlevel=None):
"""Retrieve useful information about an exception.
Returns a bunch (attribute-access dict) with the following information:
* name: exception class name
* cls: the exception class
* exception: the exception instance
* trace: the traceback instance
* formatted: formatted traceback
* args: arguments to the exception instance
This functionality allows you to trap an exception in a method agnostic to
differences between Python 2.x and 3.x.
"""
try:
from marrow.util.bunch import Bunch
cls, exc, trbk = sys.exc_info()
excName = cls.__name__
excArgs = getattr(exc, 'args', None)
excTb = ''.join(traceback.format_exception(cls, exc, trbk, maxTBlevel))
return Bunch(
name=excName,
cls=cls,
exception=exc,
trace=trbk,
formatted=excTb,
args=excArgs
)
finally:
del cls, exc, trbk | [
"def",
"exception",
"(",
"maxTBlevel",
"=",
"None",
")",
":",
"try",
":",
"from",
"marrow",
".",
"util",
".",
"bunch",
"import",
"Bunch",
"cls",
",",
"exc",
",",
"trbk",
"=",
"sys",
".",
"exc_info",
"(",
")",
"excName",
"=",
"cls",
".",
"__name__",
... | 27.555556 | 19.083333 |
def create(path, saltenv=None):
'''
join `path` and `saltenv` into a 'salt://' URL.
'''
if salt.utils.platform.is_windows():
path = salt.utils.path.sanitize_win_path(path)
path = salt.utils.data.decode(path)
query = 'saltenv={0}'.format(saltenv) if saltenv else ''
url = salt.utils.data.decode(urlunparse(('file', '', path, '', query, '')))
return 'salt://{0}'.format(url[len('file:///'):]) | [
"def",
"create",
"(",
"path",
",",
"saltenv",
"=",
"None",
")",
":",
"if",
"salt",
".",
"utils",
".",
"platform",
".",
"is_windows",
"(",
")",
":",
"path",
"=",
"salt",
".",
"utils",
".",
"path",
".",
"sanitize_win_path",
"(",
"path",
")",
"path",
... | 38.272727 | 19.363636 |
def _get_http_request(netloc, path="/", headers=None, ssl=False):
"""
Actually gets the http. Moved this to it's own private method since
it is called several times for following redirects
:param host:
:param path:
:param headers:
:param ssl:
:return:
"""
if ssl:
port = 443
else:
port = 80
host = netloc
if len(netloc.split(":")) == 2:
host, port = netloc.split(":")
request = {"host": host,
"port": port,
"path": path,
"ssl": ssl,
"method": "GET"}
if headers:
request["headers"] = headers
response = {}
try:
conn = ICHTTPConnection(host=host, port=port, timeout=10)
conn.request(path, headers, ssl, timeout=10)
response["status"] = conn.status
response["reason"] = conn.reason
response["headers"] = conn.headers
body = conn.body
try:
response["body"] = body.encode('utf-8')
except UnicodeDecodeError:
# if utf-8 fails to encode, just use base64
response["body.b64"] = body.encode('base64')
except Exception as err:
response["failure"] = str(err)
result = {"response": response,
"request": request}
return result | [
"def",
"_get_http_request",
"(",
"netloc",
",",
"path",
"=",
"\"/\"",
",",
"headers",
"=",
"None",
",",
"ssl",
"=",
"False",
")",
":",
"if",
"ssl",
":",
"port",
"=",
"443",
"else",
":",
"port",
"=",
"80",
"host",
"=",
"netloc",
"if",
"len",
"(",
... | 23.886792 | 20.566038 |
def line(self, x0, y0, x1, y1):
"""Draw a line using Xiaolin Wu's antialiasing technique"""
# clean params
x0, y0, x1, y1 = int(x0), int(y0), int(x1), int(y1)
if y0 > y1:
y0, y1, x0, x1 = y1, y0, x1, x0
dx = x1 - x0
if dx < 0:
sx = -1
else:
sx = 1
dx *= sx
dy = y1 - y0
# 'easy' cases
if dy == 0:
for x in range(x0, x1, sx):
self.point(x, y0)
return
if dx == 0:
for y in range(y0, y1):
self.point(x0, y)
self.point(x1, y1)
return
if dx == dy:
for x in range(x0, x1, sx):
self.point(x, y0)
y0 += 1
return
# main loop
self.point(x0, y0)
e_acc = 0
if dy > dx: # vertical displacement
e = (dx << 16) // dy
for i in range(y0, y1 - 1):
e_acc_temp, e_acc = e_acc, (e_acc + e) & 0xFFFF
if e_acc <= e_acc_temp:
x0 += sx
w = 0xFF-(e_acc >> 8)
self.point(x0, y0, intensity(self.color, w))
y0 += 1
self.point(x0 + sx, y0, intensity(self.color, (0xFF - w)))
self.point(x1, y1)
return
# horizontal displacement
e = (dy << 16) // dx
for i in range(x0, x1 - sx, sx):
e_acc_temp, e_acc = e_acc, (e_acc + e) & 0xFFFF
if e_acc <= e_acc_temp:
y0 += 1
w = 0xFF-(e_acc >> 8)
self.point(x0, y0, intensity(self.color, w))
x0 += sx
self.point(x0, y0 + 1, intensity(self.color, (0xFF-w)))
self.point(x1, y1) | [
"def",
"line",
"(",
"self",
",",
"x0",
",",
"y0",
",",
"x1",
",",
"y1",
")",
":",
"# clean params",
"x0",
",",
"y0",
",",
"x1",
",",
"y1",
"=",
"int",
"(",
"x0",
")",
",",
"int",
"(",
"y0",
")",
",",
"int",
"(",
"x1",
")",
",",
"int",
"("... | 30.403509 | 15.859649 |
def create(self, teamId, personId=None, personEmail=None,
isModerator=False, **request_parameters):
"""Add someone to a team by Person ID or email address.
Add someone to a team by Person ID or email address; optionally making
them a moderator.
Args:
teamId(basestring): The team ID.
personId(basestring): The person ID.
personEmail(basestring): The email address of the person.
isModerator(bool): Set to True to make the person a team moderator.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
TeamMembership: A TeamMembership object with the details of the
created team membership.
Raises:
TypeError: If the parameter types are incorrect.
ApiError: If the Webex Teams cloud returns an error.
"""
check_type(teamId, basestring, may_be_none=False)
check_type(personId, basestring)
check_type(personEmail, basestring)
check_type(isModerator, bool)
post_data = dict_from_items_with_values(
request_parameters,
teamId=teamId,
personId=personId,
personEmail=personEmail,
isModerator=isModerator,
)
# API request
json_data = self._session.post(API_ENDPOINT, json=post_data)
# Return a team membership object created from the response JSON data
return self._object_factory(OBJECT_TYPE, json_data) | [
"def",
"create",
"(",
"self",
",",
"teamId",
",",
"personId",
"=",
"None",
",",
"personEmail",
"=",
"None",
",",
"isModerator",
"=",
"False",
",",
"*",
"*",
"request_parameters",
")",
":",
"check_type",
"(",
"teamId",
",",
"basestring",
",",
"may_be_none",... | 37.428571 | 21.809524 |
def set_tree(self, tree):
""" set_channel: records progress from creating the tree
Args: tree (ChannelManager): manager Ricecooker is using
Returns: None
"""
self.tree = tree
self.__record_progress(Status.DOWNLOAD_FILES) | [
"def",
"set_tree",
"(",
"self",
",",
"tree",
")",
":",
"self",
".",
"tree",
"=",
"tree",
"self",
".",
"__record_progress",
"(",
"Status",
".",
"DOWNLOAD_FILES",
")"
] | 38.571429 | 12.428571 |
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
CLI Examples:
.. code-block:: bash
salt-cloud -f show_pricing my-softlayerhw-config profile=my-profile
If pricing sources have not been cached, they will be downloaded. Once they
have been cached, they will not be updated automatically. To manually update
all prices, use the following command:
.. code-block:: bash
salt-cloud -f update_pricing <provider>
.. versionadded:: 2015.8.0
'''
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
return {'Error': 'The requested profile was not found'}
# Make sure the profile belongs to Softlayer HW
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'softlayer_hw':
return {'Error': 'The requested profile does not belong to Softlayer HW'}
raw = {}
ret = {}
ret['per_hour'] = 0
conn = get_conn(service='SoftLayer_Product_Item_Price')
for item in profile:
if item in ('profile', 'provider', 'location'):
continue
price = conn.getObject(id=profile[item])
raw[item] = price
ret['per_hour'] += decimal.Decimal(price.get('hourlyRecurringFee', 0))
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = ret['per_day'] * 30
ret['per_year'] = ret['per_week'] * 52
if kwargs.get('raw', False):
ret['_raw'] = raw
return {profile['profile']: ret} | [
"def",
"show_pricing",
"(",
"kwargs",
"=",
"None",
",",
"call",
"=",
"None",
")",
":",
"profile",
"=",
"__opts__",
"[",
"'profiles'",
"]",
".",
"get",
"(",
"kwargs",
"[",
"'profile'",
"]",
",",
"{",
"}",
")",
"if",
"not",
"profile",
":",
"return",
... | 31.333333 | 22.745098 |
def set_context(self, context):
"""
Switch this connection to a new session context.
:param context: A :class:`Context` instance giving the new session
context to use.
"""
if not isinstance(context, Context):
raise TypeError("context must be a Context instance")
_lib.SSL_set_SSL_CTX(self._ssl, context._context)
self._context = context | [
"def",
"set_context",
"(",
"self",
",",
"context",
")",
":",
"if",
"not",
"isinstance",
"(",
"context",
",",
"Context",
")",
":",
"raise",
"TypeError",
"(",
"\"context must be a Context instance\"",
")",
"_lib",
".",
"SSL_set_SSL_CTX",
"(",
"self",
".",
"_ssl"... | 33.916667 | 17.25 |
def initialize_non_bfd(self, architecture=None, machine=None,
endian=ENDIAN_UNKNOWN):
"""Initialize underlying libOpcodes library not using BFD."""
if None in [architecture, machine, endian]:
return
self.architecture = architecture
self.machine = machine
self.endian = endian | [
"def",
"initialize_non_bfd",
"(",
"self",
",",
"architecture",
"=",
"None",
",",
"machine",
"=",
"None",
",",
"endian",
"=",
"ENDIAN_UNKNOWN",
")",
":",
"if",
"None",
"in",
"[",
"architecture",
",",
"machine",
",",
"endian",
"]",
":",
"return",
"self",
"... | 32.8 | 16.5 |
def atlas_rank_peers_by_health( peer_list=None, peer_table=None, with_zero_requests=False, with_rank=False ):
"""
Get a ranking of peers to contact for a zonefile.
Peers are ranked by health (i.e. response ratio).
Optionally include peers we haven't talked to yet (@with_zero_requests)
Optionally return [(health, peer)] list instead of just [peer] list (@with_rank)
"""
with AtlasPeerTableLocked(peer_table) as ptbl:
if peer_list is None:
peer_list = ptbl.keys()[:]
peer_health_ranking = [] # (health score, peer hostport)
for peer_hostport in peer_list:
reqcount = atlas_peer_get_request_count( peer_hostport, peer_table=ptbl )
if reqcount == 0 and not with_zero_requests:
continue
health_score = atlas_peer_get_health( peer_hostport, peer_table=ptbl)
peer_health_ranking.append( (health_score, peer_hostport) )
# sort on health
peer_health_ranking.sort()
peer_health_ranking.reverse()
if not with_rank:
return [peer_hp for _, peer_hp in peer_health_ranking]
else:
# include the score.
return peer_health_ranking | [
"def",
"atlas_rank_peers_by_health",
"(",
"peer_list",
"=",
"None",
",",
"peer_table",
"=",
"None",
",",
"with_zero_requests",
"=",
"False",
",",
"with_rank",
"=",
"False",
")",
":",
"with",
"AtlasPeerTableLocked",
"(",
"peer_table",
")",
"as",
"ptbl",
":",
"i... | 37.709677 | 23.774194 |
def conj(coll, *xs):
"""Conjoin xs to collection. New elements may be added in different positions
depending on the type of coll. conj returns the same type as coll. If coll
is None, return a list with xs conjoined."""
if coll is None:
l = llist.List.empty()
return l.cons(*xs)
if isinstance(coll, IPersistentCollection):
return coll.cons(*xs)
raise TypeError(
f"Object of type {type(coll)} does not implement Collection interface"
) | [
"def",
"conj",
"(",
"coll",
",",
"*",
"xs",
")",
":",
"if",
"coll",
"is",
"None",
":",
"l",
"=",
"llist",
".",
"List",
".",
"empty",
"(",
")",
"return",
"l",
".",
"cons",
"(",
"*",
"xs",
")",
"if",
"isinstance",
"(",
"coll",
",",
"IPersistentCo... | 40.166667 | 17.75 |
def remove_primary_analyses(self):
"""Remove analyses relocated to partitions
"""
for ar, analyses in self.analyses_to_remove.items():
analyses_ids = list(set(map(api.get_id, analyses)))
ar.manage_delObjects(analyses_ids)
self.analyses_to_remove = dict() | [
"def",
"remove_primary_analyses",
"(",
"self",
")",
":",
"for",
"ar",
",",
"analyses",
"in",
"self",
".",
"analyses_to_remove",
".",
"items",
"(",
")",
":",
"analyses_ids",
"=",
"list",
"(",
"set",
"(",
"map",
"(",
"api",
".",
"get_id",
",",
"analyses",
... | 43.428571 | 7.857143 |
def _make_celery_app(config):
"""This exposes the celery app. The app is actually created as part
of the configuration. However, this does make the celery app functional
as a stand-alone celery application.
This puts the pyramid configuration object on the celery app to be
used for making the registry available to tasks running inside the
celery worker process pool. See ``CustomTask.__call__``.
"""
# Tack the pyramid config on the celery app for later use.
config.registry.celery_app.conf['pyramid_config'] = config
return config.registry.celery_app | [
"def",
"_make_celery_app",
"(",
"config",
")",
":",
"# Tack the pyramid config on the celery app for later use.",
"config",
".",
"registry",
".",
"celery_app",
".",
"conf",
"[",
"'pyramid_config'",
"]",
"=",
"config",
"return",
"config",
".",
"registry",
".",
"celery_... | 44.846154 | 19.461538 |
def results(self, names=None, alpha=_alpha, mode='peak', **kwargs):
"""
Calculate the results for a set of parameters.
"""
if names is None: names = self.names
ret = odict()
for n in names:
ret[n] = getattr(self,'%s_interval'%mode)(n, **kwargs)
return ret | [
"def",
"results",
"(",
"self",
",",
"names",
"=",
"None",
",",
"alpha",
"=",
"_alpha",
",",
"mode",
"=",
"'peak'",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"names",
"is",
"None",
":",
"names",
"=",
"self",
".",
"names",
"ret",
"=",
"odict",
"(",
... | 35 | 14.333333 |
def save_downloaded_file(filename, save_file_at, file_stream):
""" Save Downloaded File to Disk Helper Function
:param save_file_at: Path of where to save the file.
:param file_stream: File stream
:param filename: Name to save the file.
"""
filename = os.path.join(save_file_at, filename)
with open(filename, 'wb') as f:
f.write(file_stream)
f.flush() | [
"def",
"save_downloaded_file",
"(",
"filename",
",",
"save_file_at",
",",
"file_stream",
")",
":",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"save_file_at",
",",
"filename",
")",
"with",
"open",
"(",
"filename",
",",
"'wb'",
")",
"as",
"f",
"... | 38.363636 | 12.090909 |
def Chen_1979(Re, eD):
r'''Calculates Darcy friction factor using the method in Chen (1979) [2]_
as shown in [1]_.
.. math::
\frac{1}{\sqrt{f_f}} = -4\log\left[\frac{\epsilon}{3.7065D}
-\frac{5.0452}{Re}\log A_4\right]
.. math::
A_4 = \frac{(\epsilon/D)^{1.1098}}{2.8257}
+ \left(\frac{7.149}{Re}\right)^{0.8981}
Parameters
----------
Re : float
Reynolds number, [-]
eD : float
Relative roughness, [-]
Returns
-------
fd : float
Darcy friction factor [-]
Notes
-----
Range is 4E3 <= Re <= 4E8; 1E-7 <= eD <= 5E-2.
Examples
--------
>>> Chen_1979(1E5, 1E-4)
0.018552817507472126
References
----------
.. [1] Winning, H. and T. Coole. "Explicit Friction Factor Accuracy and
Computational Efficiency for Turbulent Flow in Pipes." Flow, Turbulence
and Combustion 90, no. 1 (January 1, 2013): 1-27.
doi:10.1007/s10494-012-9419-7
.. [2] Chen, Ning Hsing. "An Explicit Equation for Friction Factor in
Pipe." Industrial & Engineering Chemistry Fundamentals 18, no. 3
(August 1, 1979): 296-97. doi:10.1021/i160071a019.
'''
A4 = eD**1.1098/2.8257 + (7.149/Re)**0.8981
ff = (-4*log10(eD/3.7065 - 5.0452/Re*log10(A4)))**-2
return 4*ff | [
"def",
"Chen_1979",
"(",
"Re",
",",
"eD",
")",
":",
"A4",
"=",
"eD",
"**",
"1.1098",
"/",
"2.8257",
"+",
"(",
"7.149",
"/",
"Re",
")",
"**",
"0.8981",
"ff",
"=",
"(",
"-",
"4",
"*",
"log10",
"(",
"eD",
"/",
"3.7065",
"-",
"5.0452",
"/",
"Re",... | 27.847826 | 24.673913 |
def _make_splice_targets_dict(df, feature, strand):
"""Make dict mapping each donor to the location of all acceptors it splices
to or each acceptor to all donors it splices from.
Parameters
----------
df : pandas.DataFrame
Dataframe with splice junction information from external database
containing columns 'gene', 'chrom', 'start', 'end', 'strand',
'chrom:start', 'chrom:end', 'donor', 'acceptor', 'intron'.
feature : string
Either 'donor' or 'acceptor'.
strand : string
Either '+' or '-'.
Returns
-------
d : dict
If feature='donor', dict whose keys are all distinct donors in df and
whose values are the distinct locations (integers) of the acceptors that
donor splices to in a numpy array. If feature='acceptor', dict whose
keys are all distinct acceptors in df and whose values are the distinct
locations (integers) of the donors that acceptor splices from in a numpy
array.
"""
g = df[df.strand == strand].groupby(feature)
d = dict()
if strand == '+':
if feature == 'donor':
target = 'end'
if feature == 'acceptor':
target = 'start'
if strand == '-':
if feature == 'donor':
target = 'start'
if feature == 'acceptor':
target = 'end'
for k in g.groups.keys():
d[k] = np.array(list(set(df.ix[g.groups[k], target])))
d[k].sort()
return d | [
"def",
"_make_splice_targets_dict",
"(",
"df",
",",
"feature",
",",
"strand",
")",
":",
"g",
"=",
"df",
"[",
"df",
".",
"strand",
"==",
"strand",
"]",
".",
"groupby",
"(",
"feature",
")",
"d",
"=",
"dict",
"(",
")",
"if",
"strand",
"==",
"'+'",
":"... | 32.422222 | 22.6 |
def lowdata_fmt():
'''
Validate and format lowdata from incoming unserialized request data
This tool requires that the hypermedia_in tool has already been run.
'''
if cherrypy.request.method.upper() != 'POST':
return
data = cherrypy.request.unserialized_data
# if the data was sent as urlencoded, we need to make it a list.
# this is a very forgiving implementation as different clients set different
# headers for form encoded data (including charset or something similar)
if data and isinstance(data, collections.Mapping):
# Make the 'arg' param a list if not already
if 'arg' in data and not isinstance(data['arg'], list):
data['arg'] = [data['arg']]
# Finally, make a Low State and put it in request
cherrypy.request.lowstate = [data]
else:
cherrypy.serving.request.lowstate = data | [
"def",
"lowdata_fmt",
"(",
")",
":",
"if",
"cherrypy",
".",
"request",
".",
"method",
".",
"upper",
"(",
")",
"!=",
"'POST'",
":",
"return",
"data",
"=",
"cherrypy",
".",
"request",
".",
"unserialized_data",
"# if the data was sent as urlencoded, we need to make i... | 36.291667 | 25.125 |
def _pack_prms():
"""if you introduce new 'save-able' parameter dictionaries, then you have
to include them here"""
config_dict = {
"Paths": prms.Paths.to_dict(),
"FileNames": prms.FileNames.to_dict(),
"Db": prms.Db.to_dict(),
"DbCols": prms.DbCols.to_dict(),
"DataSet": prms.DataSet.to_dict(),
"Reader": prms.Reader.to_dict(),
"Instruments": prms.Instruments.to_dict(),
# "excel_db_cols": prms.excel_db_cols.to_dict(),
# "excel_db_filename_cols": prms.excel_db_filename_cols.to_dict(),
"Batch": prms.Batch.to_dict(),
}
return config_dict | [
"def",
"_pack_prms",
"(",
")",
":",
"config_dict",
"=",
"{",
"\"Paths\"",
":",
"prms",
".",
"Paths",
".",
"to_dict",
"(",
")",
",",
"\"FileNames\"",
":",
"prms",
".",
"FileNames",
".",
"to_dict",
"(",
")",
",",
"\"Db\"",
":",
"prms",
".",
"Db",
".",
... | 36.647059 | 12.764706 |
def ungettext(singular, plural, number, context=None):
"""Always return a stripped string, localized if possible"""
singular_stripped = strip_whitespace(singular)
plural_stripped = strip_whitespace(plural)
if context:
singular = add_context(context, singular_stripped)
plural = add_context(context, plural_stripped)
else:
singular = singular_stripped
plural = plural_stripped
ret = django_nugettext(singular, plural, number)
# If the context isn't found, the string is returned as it came
if ret == singular:
return singular_stripped
elif ret == plural:
return plural_stripped
return ret | [
"def",
"ungettext",
"(",
"singular",
",",
"plural",
",",
"number",
",",
"context",
"=",
"None",
")",
":",
"singular_stripped",
"=",
"strip_whitespace",
"(",
"singular",
")",
"plural_stripped",
"=",
"strip_whitespace",
"(",
"plural",
")",
"if",
"context",
":",
... | 32.95 | 18.35 |
def non_fluents_scope(self) -> Dict[str, TensorFluent]:
'''Returns a partial scope with non-fluents.
Returns:
A mapping from non-fluent names to :obj:`rddl2tf.fluent.TensorFluent`.
'''
if self.__dict__.get('non_fluents') is None:
self._initialize_non_fluents()
return dict(self.non_fluents) | [
"def",
"non_fluents_scope",
"(",
"self",
")",
"->",
"Dict",
"[",
"str",
",",
"TensorFluent",
"]",
":",
"if",
"self",
".",
"__dict__",
".",
"get",
"(",
"'non_fluents'",
")",
"is",
"None",
":",
"self",
".",
"_initialize_non_fluents",
"(",
")",
"return",
"d... | 38.555556 | 19.888889 |
def _submit_changes_to_github_repo(path, url):
""" Temporarily commits local changes and submits them to
the GitHub repository that the user has specified. Then
reverts the changes to the git repository if a commit was
necessary. """
try:
repo = git.Repo(path)
except Exception:
raise RuntimeError('Couldn\'t locate a repository at `%s`.' % path)
commited = False
try:
try:
repo.delete_remote('trytravis')
except Exception:
pass
print('Adding a temporary remote to '
'`%s`...' % url)
remote = repo.create_remote('trytravis', url)
print('Adding all local changes...')
repo.git.add('--all')
try:
print('Committing local changes...')
timestamp = datetime.datetime.now().isoformat()
repo.git.commit(m='trytravis-' + timestamp)
commited = True
except git.exc.GitCommandError as e:
if 'nothing to commit' in str(e):
commited = False
else:
raise
commit = repo.head.commit.hexsha
committed_at = repo.head.commit.committed_datetime
print('Pushing to `trytravis` remote...')
remote.push(force=True)
finally:
if commited:
print('Reverting to old state...')
repo.git.reset('HEAD^')
try:
repo.delete_remote('trytravis')
except Exception:
pass
return commit, committed_at | [
"def",
"_submit_changes_to_github_repo",
"(",
"path",
",",
"url",
")",
":",
"try",
":",
"repo",
"=",
"git",
".",
"Repo",
"(",
"path",
")",
"except",
"Exception",
":",
"raise",
"RuntimeError",
"(",
"'Couldn\\'t locate a repository at `%s`.'",
"%",
"path",
")",
... | 32.911111 | 15.644444 |
def encodeRNAStructure(seq_vec, maxlen=None, seq_align="start",
W=240, L=160, U=1,
tmpdir="/tmp/RNAplfold/"):
"""Compute RNA secondary structure with RNAplfold implemented in
Kazan et al 2010, [doi](https://doi.org/10.1371/journal.pcbi.1000832).
# Note
Secondary structure is represented as the probability
to be in the following states:
- `["Pairedness", "Hairpin loop", "Internal loop", "Multi loop", "External region"]`
See Kazan et al 2010, [doi](https://doi.org/10.1371/journal.pcbi.1000832)
for more information.
# Arguments
seq_vec: list of DNA/RNA sequences
maxlen: Maximum sequence length. See `concise.preprocessing.pad_sequences` for more detail
seq_align: How to align the sequences of variable lengths. See `concise.preprocessing.pad_sequences` for more detail
W: Int; span - window length
L: Int; maxiumm span
U: Int; size of unpaired region
tmpdir: Where to store the intermediary files of RNAplfold.
# Note
Recommended parameters:
- for human, mouse use W, L, u : 240, 160, 1
- for fly, yeast use W, L, u : 80, 40, 1
# Returns
np.ndarray of shape `(len(seq_vec), maxlen, 5)`
"""
# extend the tmpdir with uuid string to allow for parallel execution
tmpdir = tmpdir + "/" + str(uuid4()) + "/"
if not isinstance(seq_vec, list):
seq_vec = seq_vec.tolist()
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
fasta_path = tmpdir + "/input.fasta"
write_fasta(fasta_path, seq_vec)
run_RNAplfold(fasta_path, tmpdir, W=W, L=L, U=U)
# 1. split the fasta into pieces
# 2. run_RNAplfold for each of them
# 3. Read the results
return read_RNAplfold(tmpdir, maxlen, seq_align=seq_align, pad_with="E") | [
"def",
"encodeRNAStructure",
"(",
"seq_vec",
",",
"maxlen",
"=",
"None",
",",
"seq_align",
"=",
"\"start\"",
",",
"W",
"=",
"240",
",",
"L",
"=",
"160",
",",
"U",
"=",
"1",
",",
"tmpdir",
"=",
"\"/tmp/RNAplfold/\"",
")",
":",
"# extend the tmpdir with uuid... | 39.06383 | 21.297872 |
def process(self, message, user):
"""
Process a message.
"""
if not self._socket:
self.connect()
self.lhlo()
else:
self.rset()
if not self.dlmtp:
raise DspamClientError('DLMTP mode not available')
self.mailfrom(client_args='--process --deliver=summary')
self.rcptto((user,))
self.data(message)
# check for valid result format
if 'class' not in self.results[user]:
raise DspamClientError(
'Unexpected response format from server at END-OF-DATA, '
'an error occured')
return self.results[user] | [
"def",
"process",
"(",
"self",
",",
"message",
",",
"user",
")",
":",
"if",
"not",
"self",
".",
"_socket",
":",
"self",
".",
"connect",
"(",
")",
"self",
".",
"lhlo",
"(",
")",
"else",
":",
"self",
".",
"rset",
"(",
")",
"if",
"not",
"self",
".... | 27.5 | 17.083333 |
def build(self):
"""Builds a Solenoid using the defined attributes."""
self._molecules = []
if self.handedness == 'l':
handedness = -1
else:
handedness = 1
rot_ang = self.rot_ang * handedness
for i in range(self.num_of_repeats):
dup_unit = copy.deepcopy(self.repeat_unit)
z = (self.rise * i) * numpy.array([0, 0, 1])
dup_unit.translate(z)
dup_unit.rotate(rot_ang * i, [0, 0, 1])
self.extend(dup_unit)
self.relabel_all()
return | [
"def",
"build",
"(",
"self",
")",
":",
"self",
".",
"_molecules",
"=",
"[",
"]",
"if",
"self",
".",
"handedness",
"==",
"'l'",
":",
"handedness",
"=",
"-",
"1",
"else",
":",
"handedness",
"=",
"1",
"rot_ang",
"=",
"self",
".",
"rot_ang",
"*",
"hand... | 34.9375 | 12.375 |
def record(self):
# type: () -> bytes
'''
Return a string representation of the Directory Record date.
Parameters:
None.
Returns:
A string representing this Directory Record Date.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record Date not initialized')
return struct.pack(self.FMT, self.years_since_1900, self.month,
self.day_of_month, self.hour, self.minute,
self.second, self.gmtoffset) | [
"def",
"record",
"(",
"self",
")",
":",
"# type: () -> bytes",
"if",
"not",
"self",
".",
"_initialized",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInternalError",
"(",
"'Directory Record Date not initialized'",
")",
"return",
"struct",
".",
"pack",
"(",
"self",... | 35.3125 | 26.8125 |
def _set_channel_group(self, v, load=False):
"""
Setter method for channel_group, mapped from YANG variable /interface/ethernet/channel_group (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_channel_group is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_channel_group() directly.
YANG Description: A container of configuration leaf elements for managing
the channel-group membership.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=channel_group.channel_group, is_container='container', presence=False, yang_name="channel-group", rest_name="channel-group", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'info': u'LACP channel commands', u'cli-sequence-commands': None, u'cli-full-no': None, u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_MODE_CHANNEL_GROUP_MEMBER'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """channel_group must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=channel_group.channel_group, is_container='container', presence=False, yang_name="channel-group", rest_name="channel-group", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'info': u'LACP channel commands', u'cli-sequence-commands': None, u'cli-full-no': None, u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_MODE_CHANNEL_GROUP_MEMBER'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__channel_group = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_channel_group",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"... | 82.72 | 40.36 |
def checkFileExists(filename, directory=None):
"""
Checks to see if file specified exists in current or specified directory.
Default is current directory. Returns 1 if it exists, 0 if not found.
"""
if directory is not None:
fname = os.path.join(directory,filename)
else:
fname = filename
_exist = os.path.exists(fname)
return _exist | [
"def",
"checkFileExists",
"(",
"filename",
",",
"directory",
"=",
"None",
")",
":",
"if",
"directory",
"is",
"not",
"None",
":",
"fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"filename",
")",
"else",
":",
"fname",
"=",
"filename"... | 28.615385 | 19.384615 |
def num2hexstring(number, size=1, little_endian=False):
"""
Converts a number to a big endian hexstring of a suitable size, optionally little endian
:param {number} number
:param {number} size - The required size in hex chars, eg 2 for Uint8, 4 for Uint16. Defaults to 2.
:param {boolean} little_endian - Encode the hex in little endian form
:return {string}
"""
# if (type(number) != = 'number') throw new Error('num must be numeric')
# if (num < 0) throw new RangeError('num is unsigned (>= 0)')
# if (size % 1 !== 0) throw new Error('size must be a whole integer')
# if (!Number.isSafeInteger(num)) throw new RangeError(`num (${num}) must be a safe integer`)
size = size * 2
hexstring = hex(number)[2:]
if len(hexstring) % size != 0:
hexstring = ('0' * size + hexstring)[len(hexstring):]
if little_endian:
hexstring = reverse_hex(hexstring)
return hexstring | [
"def",
"num2hexstring",
"(",
"number",
",",
"size",
"=",
"1",
",",
"little_endian",
"=",
"False",
")",
":",
"# if (type(number) != = 'number') throw new Error('num must be numeric')",
"# if (num < 0) throw new RangeError('num is unsigned (>= 0)')",
"# if (size % 1 !== 0) throw new Er... | 48.526316 | 23.473684 |
def add_all_transport_reactions(model, boundaries, allow_duplicates=False):
"""Add all transport reactions to database and to model.
Add transport reactions for all boundaries. Boundaries are defined
by pairs (2-tuples) of compartment IDs. Transport reactions are
added for all compounds in the model, not just for compounds in the
two boundary compartments.
Args:
model: :class:`psamm.metabolicmodel.MetabolicModel`.
boundaries: Set of compartment boundary pairs.
Returns:
Set of IDs of reactions that were added.
"""
all_reactions = {}
if not allow_duplicates:
# TODO: Avoid adding reactions that already exist in the database.
# This should be integrated in the database.
for rxnid in model.database.reactions:
rx = model.database.get_reaction(rxnid)
all_reactions[rx] = rxnid
boundary_pairs = set()
for source, dest in boundaries:
if source != dest:
boundary_pairs.add(tuple(sorted((source, dest))))
added = set()
added_pairs = set()
initial_compounds = set(model.compounds)
reactions = set(model.database.reactions)
for compound in initial_compounds:
for c1, c2 in boundary_pairs:
compound1 = compound.in_compartment(c1)
compound2 = compound.in_compartment(c2)
pair = compound1, compound2
if pair in added_pairs:
continue
rxnid_tp = create_transport_id(reactions, compound1, compound2)
reaction_tp = Reaction(Direction.Both, {
compound1: -1,
compound2: 1
})
if reaction_tp not in all_reactions:
model.database.set_reaction(rxnid_tp, reaction_tp)
reactions.add(rxnid_tp)
else:
rxnid_tp = all_reactions[reaction_tp]
if not model.has_reaction(rxnid_tp):
added.add(rxnid_tp)
model.add_reaction(rxnid_tp)
added_pairs.add(pair)
return added | [
"def",
"add_all_transport_reactions",
"(",
"model",
",",
"boundaries",
",",
"allow_duplicates",
"=",
"False",
")",
":",
"all_reactions",
"=",
"{",
"}",
"if",
"not",
"allow_duplicates",
":",
"# TODO: Avoid adding reactions that already exist in the database.",
"# This should... | 34.220339 | 18.508475 |
def _validate_date_like_dtype(dtype):
"""
Check whether the dtype is a date-like dtype. Raises an error if invalid.
Parameters
----------
dtype : dtype, type
The dtype to check.
Raises
------
TypeError : The dtype could not be casted to a date-like dtype.
ValueError : The dtype is an illegal date-like dtype (e.g. the
the frequency provided is too specific)
"""
try:
typ = np.datetime_data(dtype)[0]
except ValueError as e:
raise TypeError('{error}'.format(error=e))
if typ != 'generic' and typ != 'ns':
msg = '{name!r} is too specific of a frequency, try passing {type!r}'
raise ValueError(msg.format(name=dtype.name, type=dtype.type.__name__)) | [
"def",
"_validate_date_like_dtype",
"(",
"dtype",
")",
":",
"try",
":",
"typ",
"=",
"np",
".",
"datetime_data",
"(",
"dtype",
")",
"[",
"0",
"]",
"except",
"ValueError",
"as",
"e",
":",
"raise",
"TypeError",
"(",
"'{error}'",
".",
"format",
"(",
"error",... | 32 | 21.826087 |
def expand_indent(line):
"""
Return the amount of indentation.
Tabs are expanded to the next multiple of 8.
>>> expand_indent(' ')
4
>>> expand_indent('\\t')
8
>>> expand_indent(' \\t')
8
>>> expand_indent(' \\t')
8
>>> expand_indent(' \\t')
16
"""
result = 0
for char in line:
if char == '\t':
result = result / 8 * 8 + 8
elif char == ' ':
result += 1
else:
break
return result | [
"def",
"expand_indent",
"(",
"line",
")",
":",
"result",
"=",
"0",
"for",
"char",
"in",
"line",
":",
"if",
"char",
"==",
"'\\t'",
":",
"result",
"=",
"result",
"/",
"8",
"*",
"8",
"+",
"8",
"elif",
"char",
"==",
"' '",
":",
"result",
"+=",
"1",
... | 20.08 | 17.92 |
def register(cache):
''' Registers a cache. '''
global caches
name = cache().name
if not caches.has_key(name):
caches[name] = cache | [
"def",
"register",
"(",
"cache",
")",
":",
"global",
"caches",
"name",
"=",
"cache",
"(",
")",
".",
"name",
"if",
"not",
"caches",
".",
"has_key",
"(",
"name",
")",
":",
"caches",
"[",
"name",
"]",
"=",
"cache"
] | 18.857143 | 21.142857 |
def simulate(self):
"""Generates a random integer in the available range."""
min_ = (-sys.maxsize - 1) if self._min is None else self._min
max_ = sys.maxsize if self._max is None else self._max
return random.randint(min_, max_) | [
"def",
"simulate",
"(",
"self",
")",
":",
"min_",
"=",
"(",
"-",
"sys",
".",
"maxsize",
"-",
"1",
")",
"if",
"self",
".",
"_min",
"is",
"None",
"else",
"self",
".",
"_min",
"max_",
"=",
"sys",
".",
"maxsize",
"if",
"self",
".",
"_max",
"is",
"N... | 51 | 14.6 |
def make_blob(self, store=current_store):
"""Gets the byte string of the image from the ``store``.
:param store: the storage which contains the image.
:data:`~sqlalchemy_imageattach.context.current_store`
by default
:type store: :class:`~sqlalchemy_imageattach.store.Store`
:returns: the binary data of the image
:rtype: :class:`str`
"""
with self.open_file(store) as f:
return f.read() | [
"def",
"make_blob",
"(",
"self",
",",
"store",
"=",
"current_store",
")",
":",
"with",
"self",
".",
"open_file",
"(",
"store",
")",
"as",
"f",
":",
"return",
"f",
".",
"read",
"(",
")"
] | 37.538462 | 15.307692 |
def device_gen(chain, urls):
"""Device object generator."""
itr = iter(urls)
last = next(itr)
for url in itr:
yield Device(chain, make_hop_info_from_url(last), driver_name='jumphost', is_target=False)
last = url
yield Device(chain, make_hop_info_from_url(last), driver_name='generic', is_target=True) | [
"def",
"device_gen",
"(",
"chain",
",",
"urls",
")",
":",
"itr",
"=",
"iter",
"(",
"urls",
")",
"last",
"=",
"next",
"(",
"itr",
")",
"for",
"url",
"in",
"itr",
":",
"yield",
"Device",
"(",
"chain",
",",
"make_hop_info_from_url",
"(",
"last",
")",
... | 41.125 | 25.625 |
def requires_private_key(func):
"""
Decorator for functions that require the private key to be defined.
"""
def func_wrapper(self, *args, **kwargs):
if hasattr(self, "_DiffieHellman__private_key"):
func(self, *args, **kwargs)
else:
self.generate_private_key()
func(self, *args, **kwargs)
return func_wrapper | [
"def",
"requires_private_key",
"(",
"func",
")",
":",
"def",
"func_wrapper",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"\"_DiffieHellman__private_key\"",
")",
":",
"func",
"(",
"self",
",",
"*",
... | 28.384615 | 14.384615 |
def _candidate_tempdir_list():
"""Generate a list of candidate temporary directories which
_get_default_tempdir will try."""
dirlist = []
# First, try the environment.
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = _os.getenv(envname)
if dirname: dirlist.append(dirname)
# Failing that, try OS-specific locations.
if _os.name == 'nt':
dirlist.extend([ r'c:\temp', r'c:\tmp', r'\temp', r'\tmp' ])
else:
dirlist.extend([ '/tmp', '/var/tmp', '/usr/tmp' ])
# As a last resort, the current directory.
try:
dirlist.append(_os.getcwd())
except (AttributeError, OSError):
dirlist.append(_os.curdir)
return dirlist | [
"def",
"_candidate_tempdir_list",
"(",
")",
":",
"dirlist",
"=",
"[",
"]",
"# First, try the environment.",
"for",
"envname",
"in",
"'TMPDIR'",
",",
"'TEMP'",
",",
"'TMP'",
":",
"dirname",
"=",
"_os",
".",
"getenv",
"(",
"envname",
")",
"if",
"dirname",
":",... | 28.583333 | 17.583333 |
def _clean_servers(self):
"""
Clean the list of running console servers
"""
if len(self._telnet_servers) > 0:
for telnet_server in self._telnet_servers:
telnet_server.close()
yield from telnet_server.wait_closed()
self._telnet_servers = [] | [
"def",
"_clean_servers",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"_telnet_servers",
")",
">",
"0",
":",
"for",
"telnet_server",
"in",
"self",
".",
"_telnet_servers",
":",
"telnet_server",
".",
"close",
"(",
")",
"yield",
"from",
"telnet_serve... | 35.444444 | 6.555556 |
def notify(self, level, value, target=None, ntype=None, rule=None):
"""Notify main reactor about event."""
# Did we see the event before?
if target in self.state and level == self.state[target]:
return False
# Do we see the event first time?
if target not in self.state and level == 'normal' \
and not self.reactor.options['send_initial']:
return False
self.state[target] = level
return self.reactor.notify(level, self, value, target=target, ntype=ntype, rule=rule) | [
"def",
"notify",
"(",
"self",
",",
"level",
",",
"value",
",",
"target",
"=",
"None",
",",
"ntype",
"=",
"None",
",",
"rule",
"=",
"None",
")",
":",
"# Did we see the event before?",
"if",
"target",
"in",
"self",
".",
"state",
"and",
"level",
"==",
"se... | 42.384615 | 20.384615 |
def _call(self, path, method, body=None, headers=None):
"""
Wrapper around http.do_call that transforms some HTTPError into
our own exceptions
"""
try:
resp = self.http.do_call(path, method, body, headers)
except http.HTTPError as err:
if err.status == 401:
raise PermissionError('Insufficient permissions to query ' +
'%s with user %s :%s' % (path, self.user, err))
raise
return resp | [
"def",
"_call",
"(",
"self",
",",
"path",
",",
"method",
",",
"body",
"=",
"None",
",",
"headers",
"=",
"None",
")",
":",
"try",
":",
"resp",
"=",
"self",
".",
"http",
".",
"do_call",
"(",
"path",
",",
"method",
",",
"body",
",",
"headers",
")",
... | 38.461538 | 17.692308 |
def validate_min_itemsize(self, min_itemsize):
"""validate the min_itemisze doesn't contain items that are not in the
axes this needs data_columns to be defined
"""
if min_itemsize is None:
return
if not isinstance(min_itemsize, dict):
return
q = self.queryables()
for k, v in min_itemsize.items():
# ok, apply generally
if k == 'values':
continue
if k not in q:
raise ValueError(
"min_itemsize has the key [{key}] which is not an axis or "
"data_column".format(key=k)) | [
"def",
"validate_min_itemsize",
"(",
"self",
",",
"min_itemsize",
")",
":",
"if",
"min_itemsize",
"is",
"None",
":",
"return",
"if",
"not",
"isinstance",
"(",
"min_itemsize",
",",
"dict",
")",
":",
"return",
"q",
"=",
"self",
".",
"queryables",
"(",
")",
... | 33.736842 | 14.105263 |
def fully_expanded_path(self):
"""
Returns the absolutely absolute path. Calls os.(
normpath, normcase, expandvars and expanduser).
"""
return os.path.abspath(
os.path.normpath(
os.path.normcase(
os.path.expandvars(
os.path.expanduser(self.path))))) | [
"def",
"fully_expanded_path",
"(",
"self",
")",
":",
"return",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"normcase",
"(",
"os",
".",
"path",
".",
"expandvars",
"(",
"os",
".",
"path",
"... | 35.2 | 8.6 |
def valuecount(table, field, value, missing=None):
"""
Count the number of occurrences of `value` under the given field. Returns
the absolute count and relative frequency as a pair. E.g.::
>>> import petl as etl
>>> table = [['foo', 'bar'],
... ['a', 1],
... ['b', 2],
... ['b', 7]]
>>> etl.valuecount(table, 'foo', 'b')
(2, 0.6666666666666666)
The `field` argument can be a single field name or index (starting from
zero) or a tuple of field names and/or indexes.
"""
total = 0
vs = 0
for v in values(table, field, missing=missing):
total += 1
if v == value:
vs += 1
return vs, float(vs)/total | [
"def",
"valuecount",
"(",
"table",
",",
"field",
",",
"value",
",",
"missing",
"=",
"None",
")",
":",
"total",
"=",
"0",
"vs",
"=",
"0",
"for",
"v",
"in",
"values",
"(",
"table",
",",
"field",
",",
"missing",
"=",
"missing",
")",
":",
"total",
"+... | 29 | 18.92 |
def clear(self, *resource_types):
"""Clear cache for each provided APIResource class, or all resources if no classes are provided"""
resource_types = resource_types or tuple(self.__caches.keys())
for cls in resource_types:
# Clear and delete cache instances to guarantee no lingering references
self.__caches[cls].clear()
del self.__caches[cls] | [
"def",
"clear",
"(",
"self",
",",
"*",
"resource_types",
")",
":",
"resource_types",
"=",
"resource_types",
"or",
"tuple",
"(",
"self",
".",
"__caches",
".",
"keys",
"(",
")",
")",
"for",
"cls",
"in",
"resource_types",
":",
"# Clear and delete cache instances ... | 49.75 | 16.75 |
def getenv(option_name, default=None):
"""Return the option from the environment in the FASTFOOD namespace."""
env = "%s_%s" % (NAMESPACE.upper(), option_name.upper())
return os.environ.get(env, default) | [
"def",
"getenv",
"(",
"option_name",
",",
"default",
"=",
"None",
")",
":",
"env",
"=",
"\"%s_%s\"",
"%",
"(",
"NAMESPACE",
".",
"upper",
"(",
")",
",",
"option_name",
".",
"upper",
"(",
")",
")",
"return",
"os",
".",
"environ",
".",
"get",
"(",
"e... | 53 | 5.75 |
def insertDict(self, tblname, d, fields = None):
'''Simple function for inserting a dictionary whose keys match the fieldnames of tblname.'''
if fields == None:
fields = sorted(d.keys())
values = None
try:
SQL = 'INSERT INTO %s (%s) VALUES (%s)' % (tblname, join(fields, ", "), join(['%s' for x in range(len(fields))], ','))
values = tuple([d[k] for k in fields])
self.locked_execute(SQL, parameters = values)
except Exception, e:
if SQL and values:
sys.stderr.write("\nSQL execution error in query '%s' %% %s at %s:" % (SQL, values, datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
sys.stderr.write("\nError: '%s'.\n" % (str(e)))
sys.stderr.flush()
raise Exception("Error occurred during database insertion: '%s'." % str(e)) | [
"def",
"insertDict",
"(",
"self",
",",
"tblname",
",",
"d",
",",
"fields",
"=",
"None",
")",
":",
"if",
"fields",
"==",
"None",
":",
"fields",
"=",
"sorted",
"(",
"d",
".",
"keys",
"(",
")",
")",
"values",
"=",
"None",
"try",
":",
"SQL",
"=",
"... | 46.875 | 30 |
def bounding_sphere(self):
"""
A minimum volume bounding sphere for the current mesh.
Note that the Sphere primitive returned has an unpadded, exact
sphere_radius so while the distance of every vertex of the current
mesh from sphere_center will be less than sphere_radius, the faceted
sphere primitive may not contain every vertex
Returns
--------
minball: trimesh.primitives.Sphere
Sphere primitive containing current mesh
"""
from . import primitives, nsphere
center, radius = nsphere.minimum_nsphere(self)
minball = primitives.Sphere(center=center,
radius=radius,
mutable=False)
return minball | [
"def",
"bounding_sphere",
"(",
"self",
")",
":",
"from",
".",
"import",
"primitives",
",",
"nsphere",
"center",
",",
"radius",
"=",
"nsphere",
".",
"minimum_nsphere",
"(",
"self",
")",
"minball",
"=",
"primitives",
".",
"Sphere",
"(",
"center",
"=",
"cente... | 38.65 | 17.65 |
def _prevNonCommentBlock(self, block):
"""Return the closest non-empty line, ignoring comments
(result <= line). Return -1 if the document
"""
block = self._prevNonEmptyBlock(block)
while block.isValid() and self._isCommentBlock(block):
block = self._prevNonEmptyBlock(block)
return block | [
"def",
"_prevNonCommentBlock",
"(",
"self",
",",
"block",
")",
":",
"block",
"=",
"self",
".",
"_prevNonEmptyBlock",
"(",
"block",
")",
"while",
"block",
".",
"isValid",
"(",
")",
"and",
"self",
".",
"_isCommentBlock",
"(",
"block",
")",
":",
"block",
"=... | 42.625 | 8.875 |
def _make_elastic_range(begin, end):
"""Generate an S-curved range of pages.
Start from both left and right, adding exponentially growing indexes,
until the two trends collide.
"""
# Limit growth for huge numbers of pages.
starting_factor = max(1, (end - begin) // 100)
factor = _iter_factors(starting_factor)
left_half, right_half = [], []
left_val, right_val = begin, end
right_val = end
while left_val < right_val:
left_half.append(left_val)
right_half.append(right_val)
next_factor = next(factor)
left_val = begin + next_factor
right_val = end - next_factor
# If the trends happen to meet exactly at one point, retain it.
if left_val == right_val:
left_half.append(left_val)
right_half.reverse()
return left_half + right_half | [
"def",
"_make_elastic_range",
"(",
"begin",
",",
"end",
")",
":",
"# Limit growth for huge numbers of pages.",
"starting_factor",
"=",
"max",
"(",
"1",
",",
"(",
"end",
"-",
"begin",
")",
"//",
"100",
")",
"factor",
"=",
"_iter_factors",
"(",
"starting_factor",
... | 35.478261 | 10 |
def handle_args():
"""
Default values are defined here.
"""
default_database_name = os.environ.get(
'VOEVENTDB_DBNAME',
dbconfig.testdb_corpus_url.database)
default_logfile_path = os.path.expanduser("~/voeventdb_packet_ingest.log")
parser = argparse.ArgumentParser(
prog=os.path.basename(__file__),
formatter_class=argparse.ArgumentDefaultsHelpFormatter, )
parser.description = """
Ingest a packet from stdin and attempt to ingest into a voeventdb database.
Usage:
cat test.xml | voeventdb_ingest_packet.py -d mydb -l /tmp/my.log
"""
parser.add_argument('-d', '--dbname', nargs='?',
default=str(default_database_name),
help='Database name')
parser.add_argument('-l', '--logfile_path', nargs='?',
default=default_logfile_path,
)
return parser.parse_args() | [
"def",
"handle_args",
"(",
")",
":",
"default_database_name",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'VOEVENTDB_DBNAME'",
",",
"dbconfig",
".",
"testdb_corpus_url",
".",
"database",
")",
"default_logfile_path",
"=",
"os",
".",
"path",
".",
"expanduser",
... | 29.612903 | 20.096774 |
def computeRange(corners):
""" Determine the range spanned by an array of pixel positions. """
x = corners[:, 0]
y = corners[:, 1]
_xrange = (np.minimum.reduce(x), np.maximum.reduce(x))
_yrange = (np.minimum.reduce(y), np.maximum.reduce(y))
return _xrange, _yrange | [
"def",
"computeRange",
"(",
"corners",
")",
":",
"x",
"=",
"corners",
"[",
":",
",",
"0",
"]",
"y",
"=",
"corners",
"[",
":",
",",
"1",
"]",
"_xrange",
"=",
"(",
"np",
".",
"minimum",
".",
"reduce",
"(",
"x",
")",
",",
"np",
".",
"maximum",
"... | 40.285714 | 14.428571 |
def _ParseRecord(self, parser_mediator, text_file_object):
"""Parses an Opera global history record.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
text_file_object (dfvfs.TextFile): text file.
Returns:
bool: True if the record was successfully parsed.
"""
try:
title = text_file_object.readline()
except UnicodeDecodeError:
parser_mediator.ProduceExtractionWarning(
'unable to read and decode title')
return False
if not title:
return False
try:
url = text_file_object.readline()
except UnicodeDecodeError:
parser_mediator.ProduceExtractionWarning(
'unable to read and decode url')
return False
try:
timestamp = text_file_object.readline()
except UnicodeDecodeError:
parser_mediator.ProduceExtractionWarning(
'unable to read and decode timestamp')
return False
try:
popularity_index = text_file_object.readline()
except UnicodeDecodeError:
parser_mediator.ProduceExtractionWarning(
'unable to read and decode popularity index')
return False
event_data = OperaGlobalHistoryEventData()
event_data.url = url.strip()
title = title.strip()
if title != event_data.url:
event_data.title = title
popularity_index = popularity_index.strip()
try:
event_data.popularity_index = int(popularity_index, 10)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'unable to convert popularity index: {0:s}'.format(popularity_index))
if event_data.popularity_index < 0:
event_data.description = 'First and Only Visit'
else:
event_data.description = 'Last Visit'
timestamp = timestamp.strip()
try:
timestamp = int(timestamp, 10)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'unable to convert timestamp: {0:s}'.format(timestamp))
timestamp = None
if timestamp is None:
date_time = dfdatetime_semantic_time.SemanticTime('Invalid')
else:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)
parser_mediator.ProduceEventWithEventData(event, event_data)
return True | [
"def",
"_ParseRecord",
"(",
"self",
",",
"parser_mediator",
",",
"text_file_object",
")",
":",
"try",
":",
"title",
"=",
"text_file_object",
".",
"readline",
"(",
")",
"except",
"UnicodeDecodeError",
":",
"parser_mediator",
".",
"ProduceExtractionWarning",
"(",
"'... | 29.3 | 20.525 |
def insert_completion(self, p_insert):
"""
Inserts currently chosen completion (p_insert parameter) into proper
place in edit_text and adjusts cursor position accordingly.
"""
start, end = self._surrounding_text
final_text = start + p_insert + end
self.set_edit_text(final_text)
self.set_edit_pos(len(start) + len(p_insert)) | [
"def",
"insert_completion",
"(",
"self",
",",
"p_insert",
")",
":",
"start",
",",
"end",
"=",
"self",
".",
"_surrounding_text",
"final_text",
"=",
"start",
"+",
"p_insert",
"+",
"end",
"self",
".",
"set_edit_text",
"(",
"final_text",
")",
"self",
".",
"set... | 42.222222 | 9.555556 |
def hexdump(src, length=16, sep='.'):
"""
Hexdump function by sbz and 7h3rAm on Github:
(https://gist.github.com/7h3rAm/5603718).
:param src: Source, the string to be shown in hexadecimal format
:param length: Number of hex characters to print in one row
:param sep: Unprintable characters representation
:return:
"""
filtr = ''.join([(len(repr(chr(x))) == 3) and chr(x) or sep for x in range(256)])
lines = []
for c in xrange(0, len(src), length):
chars = src[c:c+length]
hexstring = ' '.join(["%02x" % ord(x) for x in chars])
if len(hexstring) > 24:
hexstring = "%s %s" % (hexstring[:24], hexstring[24:])
printable = ''.join(["%s" % ((ord(x) <= 127 and filtr[ord(x)]) or sep) for x in chars])
lines.append(" %02x: %-*s |%s|\n" % (c, length*3, hexstring, printable))
print(''.join(lines)) | [
"def",
"hexdump",
"(",
"src",
",",
"length",
"=",
"16",
",",
"sep",
"=",
"'.'",
")",
":",
"filtr",
"=",
"''",
".",
"join",
"(",
"[",
"(",
"len",
"(",
"repr",
"(",
"chr",
"(",
"x",
")",
")",
")",
"==",
"3",
")",
"and",
"chr",
"(",
"x",
")"... | 46.157895 | 19.105263 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.