text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def register_user(self, user, allow_login=None, send_email=None,
_force_login_without_confirmation=False):
"""
Service method to register a user.
Sends signal `user_registered`.
Returns True if the user has been logged in, False otherwise.
"""
should_login_user = (not self.security.confirmable
or self.security.login_without_confirmation
or _force_login_without_confirmation)
should_login_user = (should_login_user if allow_login is None
else allow_login and should_login_user)
if should_login_user:
user.active = True
# confirmation token depends on having user.id set, which requires
# the user be committed to the database
self.user_manager.save(user, commit=True)
confirmation_link, token = None, None
if self.security.confirmable and not _force_login_without_confirmation:
token = self.security_utils_service.generate_confirmation_token(user)
confirmation_link = url_for('security_controller.confirm_email',
token=token, _external=True)
user_registered.send(app._get_current_object(),
user=user, confirm_token=token)
if (send_email or (
send_email is None
and app.config.SECURITY_SEND_REGISTER_EMAIL)):
self.send_mail(_('flask_unchained.bundles.security:email_subject.register'),
to=user.email,
template='security/email/welcome.html',
user=user,
confirmation_link=confirmation_link)
if should_login_user:
return self.login_user(user, force=_force_login_without_confirmation)
return False | [
"def",
"register_user",
"(",
"self",
",",
"user",
",",
"allow_login",
"=",
"None",
",",
"send_email",
"=",
"None",
",",
"_force_login_without_confirmation",
"=",
"False",
")",
":",
"should_login_user",
"=",
"(",
"not",
"self",
".",
"security",
".",
"confirmabl... | 44.595238 | 22.880952 |
def save(self):
"""
:return: save this routing area on Ariane server (create or update)
"""
LOGGER.debug("RoutingArea.save")
post_payload = {}
consolidated_loc_id = []
if self.id is not None:
post_payload['routingAreaID'] = self.id
if self.name is not None:
post_payload['routingAreaName'] = self.name
if self.description is not None:
post_payload['routingAreaDescription'] = self.description
if self.type is not None:
post_payload['routingAreaType'] = self.type
if self.multicast is not None:
post_payload['routingAreaMulticast'] = self.multicast
if self.loc_ids is not None:
consolidated_loc_id = copy.deepcopy(self.loc_ids)
if self.loc_2_rm is not None:
for loc_2_rm in self.loc_2_rm:
if loc_2_rm.id is None:
loc_2_rm.sync()
consolidated_loc_id.remove(loc_2_rm.id)
if self.loc_2_add is not None:
for loc_2_add in self.loc_2_add:
if loc_2_add.id is None:
loc_2_add.save()
consolidated_loc_id.append(loc_2_add.id)
post_payload['routingAreaLocationsID'] = consolidated_loc_id
if self.subnet_ids is not None:
post_payload['routingAreaSubnetsID'] = self.subnet_ids
args = {'http_operation': 'POST', 'operation_path': '', 'parameters': {'payload': json.dumps(post_payload)}}
response = RoutingAreaService.requester.call(args)
if response.rc != 0:
LOGGER.warning(
'RoutingArea.save - Problem while saving routing area' + self.name +
'. Reason: ' + str(response.response_content) + '-' + str(response.error_message) +
" (" + str(response.rc) + ")"
)
else:
self.id = response.response_content['routingAreaID']
if self.loc_2_add is not None:
for loc_2_add in self.loc_2_add:
loc_2_add.sync()
if self.loc_2_rm is not None:
for loc_2_rm in self.loc_2_rm:
loc_2_rm.sync()
self.loc_2_add.clear()
self.loc_2_rm.clear()
self.sync()
return self | [
"def",
"save",
"(",
"self",
")",
":",
"LOGGER",
".",
"debug",
"(",
"\"RoutingArea.save\"",
")",
"post_payload",
"=",
"{",
"}",
"consolidated_loc_id",
"=",
"[",
"]",
"if",
"self",
".",
"id",
"is",
"not",
"None",
":",
"post_payload",
"[",
"'routingAreaID'",
... | 37.716667 | 17.483333 |
def connect(self, obj, network):
"""Connect to the specified AP."""
network_summary = self._send_cmd_to_wpas(
obj['name'],
'LIST_NETWORKS',
True)
network_summary = network_summary[:-1].split('\n')
if len(network_summary) == 1:
return networks
for l in network_summary[1:]:
values = l.split('\t')
if values[1] == network.ssid:
network_summary = self._send_cmd_to_wpas(
obj['name'],
'SELECT_NETWORK {}'.format(values[0]),
True) | [
"def",
"connect",
"(",
"self",
",",
"obj",
",",
"network",
")",
":",
"network_summary",
"=",
"self",
".",
"_send_cmd_to_wpas",
"(",
"obj",
"[",
"'name'",
"]",
",",
"'LIST_NETWORKS'",
",",
"True",
")",
"network_summary",
"=",
"network_summary",
"[",
":",
"-... | 33.222222 | 13.888889 |
def apply(self, function):
"""
For each row or column in cuts, read a list of its colors,
apply the function to that list of colors, then write it back
to the layout.
"""
for cut in self.cuts:
value = self.read(cut)
function(value)
self.write(cut, value) | [
"def",
"apply",
"(",
"self",
",",
"function",
")",
":",
"for",
"cut",
"in",
"self",
".",
"cuts",
":",
"value",
"=",
"self",
".",
"read",
"(",
"cut",
")",
"function",
"(",
"value",
")",
"self",
".",
"write",
"(",
"cut",
",",
"value",
")"
] | 32.9 | 12.3 |
def post(self, path, args, wait=False):
"""POST an HTTP request to a daemon
:param path: path to do the request
:type path: str
:param args: args to add in the request
:type args: dict
:param wait: True for a long timeout
:type wait: bool
:return: Content of the HTTP response if server returned 200
:rtype: str
"""
uri = self.make_uri(path)
timeout = self.make_timeout(wait)
for (key, value) in list(args.items()):
args[key] = serialize(value, True)
try:
logger.debug("post: %s, timeout: %s, params: %s", uri, timeout, args)
rsp = self._requests_con.post(uri, json=args, timeout=timeout, verify=self.strong_ssl)
logger.debug("got: %d - %s", rsp.status_code, rsp.text)
if rsp.status_code != 200:
raise HTTPClientDataException(rsp.status_code, rsp.text, uri)
return rsp.content
except (requests.Timeout, requests.ConnectTimeout):
raise HTTPClientTimeoutException(timeout, uri)
except requests.ConnectionError as exp:
raise HTTPClientConnectionException(uri, exp.args[0])
except Exception as exp:
raise HTTPClientException('Request error to %s: %s' % (uri, exp)) | [
"def",
"post",
"(",
"self",
",",
"path",
",",
"args",
",",
"wait",
"=",
"False",
")",
":",
"uri",
"=",
"self",
".",
"make_uri",
"(",
"path",
")",
"timeout",
"=",
"self",
".",
"make_timeout",
"(",
"wait",
")",
"for",
"(",
"key",
",",
"value",
")",... | 44.586207 | 16.931034 |
def _update_classmethod(self, oldcm, newcm):
"""Update a classmethod update."""
# While we can't modify the classmethod object itself (it has no
# mutable attributes), we *can* extract the underlying function
# (by calling __get__(), which returns a method object) and update
# it in-place. We don't have the class available to pass to
# __get__() but any object except None will do.
self._update(None, None, oldcm.__get__(0), newcm.__get__(0)) | [
"def",
"_update_classmethod",
"(",
"self",
",",
"oldcm",
",",
"newcm",
")",
":",
"# While we can't modify the classmethod object itself (it has no",
"# mutable attributes), we *can* extract the underlying function",
"# (by calling __get__(), which returns a method object) and update",
"# i... | 61.75 | 21.5 |
def check_uniqueness(self, *args):
"""For a unique index, check if the given args are not used twice
For the parameters, seen BaseIndex.check_uniqueness
"""
self.get_unique_index().check_uniqueness(*self.prepare_args(args, transform=False)) | [
"def",
"check_uniqueness",
"(",
"self",
",",
"*",
"args",
")",
":",
"self",
".",
"get_unique_index",
"(",
")",
".",
"check_uniqueness",
"(",
"*",
"self",
".",
"prepare_args",
"(",
"args",
",",
"transform",
"=",
"False",
")",
")"
] | 38.285714 | 22.285714 |
def update(self, *args, **kwargs):
'''
Updates multiple attributes in a model. If ``args`` are provided, this
method will assign attributes in the order returned by
``list(self._columns)`` until one or both are exhausted.
If ``kwargs`` are provided, this method will assign attributes to the
names provided, after ``args`` have been processed.
'''
sa = setattr
for a, v in zip(self._columns, args):
sa(self, a, v)
for a, v in kwargs.items():
sa(self, a, v)
return self | [
"def",
"update",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"sa",
"=",
"setattr",
"for",
"a",
",",
"v",
"in",
"zip",
"(",
"self",
".",
"_columns",
",",
"args",
")",
":",
"sa",
"(",
"self",
",",
"a",
",",
"v",
")",
"fo... | 37.8 | 21.533333 |
def utime(self, *args, **kwargs):
""" Set the access and modified times of the file specified by path. """
os.utime(self.extended_path, *args, **kwargs) | [
"def",
"utime",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"os",
".",
"utime",
"(",
"self",
".",
"extended_path",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | 55.333333 | 6.666667 |
def python_value(self, dtype, dvalue):
"""Convert a CLIPS type into Python."""
try:
return CONVERTERS[dtype](dvalue)
except KeyError:
if dtype == clips.common.CLIPSType.MULTIFIELD:
return self.multifield_to_list()
if dtype == clips.common.CLIPSType.FACT_ADDRESS:
return clips.facts.new_fact(self._env, lib.to_pointer(dvalue))
if dtype == clips.common.CLIPSType.INSTANCE_ADDRESS:
return clips.classes.Instance(self._env, lib.to_pointer(dvalue))
return None | [
"def",
"python_value",
"(",
"self",
",",
"dtype",
",",
"dvalue",
")",
":",
"try",
":",
"return",
"CONVERTERS",
"[",
"dtype",
"]",
"(",
"dvalue",
")",
"except",
"KeyError",
":",
"if",
"dtype",
"==",
"clips",
".",
"common",
".",
"CLIPSType",
".",
"MULTIF... | 44 | 19.923077 |
def _hyphens_to_dashes(self):
"""Transform hyphens to various kinds of dashes"""
problematic_hyphens = [(r'-([.,!)])', r'---\1'),
(r'(?<=\d)-(?=\d)', '--'),
(r'(?<=\s)-(?=\s)', '---')]
for problem_case in problematic_hyphens:
self._regex_replacement(*problem_case) | [
"def",
"_hyphens_to_dashes",
"(",
"self",
")",
":",
"problematic_hyphens",
"=",
"[",
"(",
"r'-([.,!)])'",
",",
"r'---\\1'",
")",
",",
"(",
"r'(?<=\\d)-(?=\\d)'",
",",
"'--'",
")",
",",
"(",
"r'(?<=\\s)-(?=\\s)'",
",",
"'---'",
")",
"]",
"for",
"problem_case",
... | 38.222222 | 16.666667 |
def unpublish_view(self, request, object_id):
"""
Instantiates a class-based view that redirects to Wagtail's 'unpublish'
view for models that extend 'Page' (if the user has sufficient
permissions). We do this via our own view so that we can reliably
control redirection of the user back to the index_view once the action
is completed. The view class used can be overridden by changing the
'unpublish_view_class' attribute.
"""
kwargs = {'model_admin': self, 'object_id': object_id}
view_class = self.unpublish_view_class
return view_class.as_view(**kwargs)(request) | [
"def",
"unpublish_view",
"(",
"self",
",",
"request",
",",
"object_id",
")",
":",
"kwargs",
"=",
"{",
"'model_admin'",
":",
"self",
",",
"'object_id'",
":",
"object_id",
"}",
"view_class",
"=",
"self",
".",
"unpublish_view_class",
"return",
"view_class",
".",
... | 53.583333 | 18.416667 |
def get_weights_of_nn_sites(self, structure, n):
"""
Get weight associated with each near neighbor of site with
index n in structure.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine the weights.
Returns:
weights (list of floats): near-neighbor weights.
"""
return [e['weight'] for e in self.get_nn_info(structure, n)] | [
"def",
"get_weights_of_nn_sites",
"(",
"self",
",",
"structure",
",",
"n",
")",
":",
"return",
"[",
"e",
"[",
"'weight'",
"]",
"for",
"e",
"in",
"self",
".",
"get_nn_info",
"(",
"structure",
",",
"n",
")",
"]"
] | 34.384615 | 20.692308 |
def distance_strength_of_connection(A, V, theta=2.0, relative_drop=True):
"""Distance based strength-of-connection.
Parameters
----------
A : csr_matrix or bsr_matrix
Square, sparse matrix in CSR or BSR format
V : array
Coordinates of the vertices of the graph of A
relative_drop : bool
If false, then a connection must be within a distance of theta
from a point to be strongly connected.
If true, then the closest connection is always strong, and other points
must be within theta times the smallest distance to be strong
Returns
-------
C : csr_matrix
C(i,j) = distance(point_i, point_j)
Strength of connection matrix where strength values are
distances, i.e. the smaller the value, the stronger the connection.
Sparsity pattern of C is copied from A.
Notes
-----
- theta is a drop tolerance that is applied row-wise
- If a BSR matrix given, then the return matrix is still CSR. The strength
is given between super nodes based on the BSR block size.
Examples
--------
>>> from pyamg.gallery import load_example
>>> from pyamg.strength import distance_strength_of_connection
>>> data = load_example('airfoil')
>>> A = data['A'].tocsr()
>>> S = distance_strength_of_connection(data['A'], data['vertices'])
"""
# Amalgamate for the supernode case
if sparse.isspmatrix_bsr(A):
sn = int(A.shape[0] / A.blocksize[0])
u = np.ones((A.data.shape[0],))
A = sparse.csr_matrix((u, A.indices, A.indptr), shape=(sn, sn))
if not sparse.isspmatrix_csr(A):
warn("Implicit conversion of A to csr", sparse.SparseEfficiencyWarning)
A = sparse.csr_matrix(A)
dim = V.shape[1]
# Create two arrays for differencing the different coordinates such
# that C(i,j) = distance(point_i, point_j)
cols = A.indices
rows = np.repeat(np.arange(A.shape[0]), A.indptr[1:] - A.indptr[0:-1])
# Insert difference for each coordinate into C
C = (V[rows, 0] - V[cols, 0])**2
for d in range(1, dim):
C += (V[rows, d] - V[cols, d])**2
C = np.sqrt(C)
C[C < 1e-6] = 1e-6
C = sparse.csr_matrix((C, A.indices.copy(), A.indptr.copy()),
shape=A.shape)
# Apply drop tolerance
if relative_drop is True:
if theta != np.inf:
amg_core.apply_distance_filter(C.shape[0], theta, C.indptr,
C.indices, C.data)
else:
amg_core.apply_absolute_distance_filter(C.shape[0], theta, C.indptr,
C.indices, C.data)
C.eliminate_zeros()
C = C + sparse.eye(C.shape[0], C.shape[1], format='csr')
# Standardized strength values require small values be weak and large
# values be strong. So, we invert the distances.
C.data = 1.0 / C.data
# Scale C by the largest magnitude entry in each row
C = scale_rows_by_largest_entry(C)
return C | [
"def",
"distance_strength_of_connection",
"(",
"A",
",",
"V",
",",
"theta",
"=",
"2.0",
",",
"relative_drop",
"=",
"True",
")",
":",
"# Amalgamate for the supernode case",
"if",
"sparse",
".",
"isspmatrix_bsr",
"(",
"A",
")",
":",
"sn",
"=",
"int",
"(",
"A",... | 34.776471 | 22.494118 |
def render_dynamic_electrode_state_shapes(self):
'''
Render **dynamic** states reported by the electrode controller.
**Dynamic** electrode states are only applied while a protocol is
running -- _not_ while in real-time programming mode.
See also :meth:`render_electrode_shapes()`.
.. versionadded:: 0.12
'''
df_shapes = self.canvas.df_canvas_shapes.copy()
# Only include shapes for electrodes reported as actuated.
on_electrodes = self._dynamic_electrodes[self._dynamic_electrodes > 0]
df_shapes = (df_shapes.set_index('id').loc[on_electrodes.index]
.reset_index())
return self.render_electrode_shapes(df_shapes=df_shapes,
shape_scale=0.75,
# Lignt blue
fill=(136 / 255.,
189 / 255.,
230 / 255.)) | [
"def",
"render_dynamic_electrode_state_shapes",
"(",
"self",
")",
":",
"df_shapes",
"=",
"self",
".",
"canvas",
".",
"df_canvas_shapes",
".",
"copy",
"(",
")",
"# Only include shapes for electrodes reported as actuated.",
"on_electrodes",
"=",
"self",
".",
"_dynamic_elect... | 42.791667 | 25.458333 |
def remove_lb_nodes(self, lb_id, node_ids):
"""
Remove one or more nodes
:param string lb_id: Balancer id
:param list node_ids: List of node ids
"""
log.info("Removing load balancer nodes %s" % node_ids)
for node_id in node_ids:
self._request('delete', '/loadbalancers/%s/nodes/%s' % (lb_id, node_id)) | [
"def",
"remove_lb_nodes",
"(",
"self",
",",
"lb_id",
",",
"node_ids",
")",
":",
"log",
".",
"info",
"(",
"\"Removing load balancer nodes %s\"",
"%",
"node_ids",
")",
"for",
"node_id",
"in",
"node_ids",
":",
"self",
".",
"_request",
"(",
"'delete'",
",",
"'/l... | 33 | 15.727273 |
def evaluate(condition):
"""
Evaluate simple condition.
>>> Condition.evaluate(' 2 == 2 ')
True
>>> Condition.evaluate(' not 2 == 2 ')
False
>>> Condition.evaluate(' not "abc" == "xyz" ')
True
>>> Condition.evaluate('2 in [2, 4, 6, 8, 10]')
True
>>> Condition.evaluate('5 in [2, 4, 6, 8, 10]')
False
>>> Condition.evaluate('"apple" in ["apple", "kiwi", "orange"]')
True
>>> Condition.evaluate('5 not in [2, 4, 6, 8, 10]')
True
>>> Condition.evaluate('"apple" not in ["kiwi", "orange"]')
True
Args:
condition (str): Python condition as string.
Returns:
bool: True when condition evaluates to True.
"""
success = False
if len(condition) > 0:
try:
rule_name, ast_tokens, evaluate_function = Condition.find_rule(condition)
if not rule_name == 'undefined':
success = evaluate_function(ast_tokens)
except AttributeError as exception:
Logger.get_logger(__name__).error("Attribute error: %s", exception)
else:
success = True
return success | [
"def",
"evaluate",
"(",
"condition",
")",
":",
"success",
"=",
"False",
"if",
"len",
"(",
"condition",
")",
">",
"0",
":",
"try",
":",
"rule_name",
",",
"ast_tokens",
",",
"evaluate_function",
"=",
"Condition",
".",
"find_rule",
"(",
"condition",
")",
"i... | 32.605263 | 21.763158 |
def _TryPrintAsAnyMessage(self, message):
"""Serializes if message is a google.protobuf.Any field."""
packed_message = _BuildMessageFromTypeName(message.TypeName(),
self.descriptor_pool)
if packed_message:
packed_message.MergeFromString(message.value)
self.out.write('%s[%s]' % (self.indent * ' ', message.type_url))
self._PrintMessageFieldValue(packed_message)
self.out.write(' ' if self.as_one_line else '\n')
return True
else:
return False | [
"def",
"_TryPrintAsAnyMessage",
"(",
"self",
",",
"message",
")",
":",
"packed_message",
"=",
"_BuildMessageFromTypeName",
"(",
"message",
".",
"TypeName",
"(",
")",
",",
"self",
".",
"descriptor_pool",
")",
"if",
"packed_message",
":",
"packed_message",
".",
"M... | 44.166667 | 17.916667 |
def _vax_to_ieee_single_float(data):
"""Converts a float in Vax format to IEEE format.
data should be a single string of chars that have been read in from
a binary file. These will be processed 4 at a time into float values.
Thus the total number of byte/chars in the string should be divisible
by 4.
Based on VAX data organization in a byte file, we need to do a bunch of
bitwise operations to separate out the numbers that correspond to the
sign, the exponent and the fraction portions of this floating point
number
role : S EEEEEEEE FFFFFFF FFFFFFFF FFFFFFFF
bits : 1 2 9 10 32
bytes : byte2 byte1 byte4 byte3
"""
f = []
nfloat = int(len(data) / 4)
for i in range(nfloat):
byte2 = data[0 + i*4]
byte1 = data[1 + i*4]
byte4 = data[2 + i*4]
byte3 = data[3 + i*4]
# hex 0x80 = binary mask 10000000
# hex 0x7f = binary mask 01111111
sign = (byte1 & 0x80) >> 7
expon = ((byte1 & 0x7f) << 1) + ((byte2 & 0x80) >> 7)
fract = ((byte2 & 0x7f) << 16) + (byte3 << 8) + byte4
if sign == 0:
sign_mult = 1.0
else:
sign_mult = -1.0
if 0 < expon:
# note 16777216.0 == 2^24
val = sign_mult * (0.5 + (fract/16777216.0)) * pow(2.0, expon - 128.0)
f.append(val)
elif expon == 0 and sign == 0:
f.append(0)
else:
f.append(0)
# may want to raise an exception here ...
return f | [
"def",
"_vax_to_ieee_single_float",
"(",
"data",
")",
":",
"f",
"=",
"[",
"]",
"nfloat",
"=",
"int",
"(",
"len",
"(",
"data",
")",
"/",
"4",
")",
"for",
"i",
"in",
"range",
"(",
"nfloat",
")",
":",
"byte2",
"=",
"data",
"[",
"0",
"+",
"i",
"*",... | 32.16 | 23.2 |
def analytic(input_type, output_type):
"""Define an *analytic* user-defined function that takes N
pandas Series or scalar values as inputs and produces N rows of output.
Parameters
----------
input_type : List[ibis.expr.datatypes.DataType]
A list of the types found in :mod:`~ibis.expr.datatypes`. The
length of this list must match the number of arguments to the
function. Variadic arguments are not yet supported.
output_type : ibis.expr.datatypes.DataType
The return type of the function.
Examples
--------
>>> import ibis
>>> import ibis.expr.datatypes as dt
>>> from ibis.pandas.udf import udf
>>> @udf.analytic(input_type=[dt.double], output_type=dt.double)
... def zscore(series): # note the use of aggregate functions
... return (series - series.mean()) / series.std()
"""
return udf._grouped(
input_type,
output_type,
base_class=ops.AnalyticOp,
output_type_method=operator.attrgetter('column_type'),
) | [
"def",
"analytic",
"(",
"input_type",
",",
"output_type",
")",
":",
"return",
"udf",
".",
"_grouped",
"(",
"input_type",
",",
"output_type",
",",
"base_class",
"=",
"ops",
".",
"AnalyticOp",
",",
"output_type_method",
"=",
"operator",
".",
"attrgetter",
"(",
... | 40.071429 | 19.392857 |
def _render_internal_label(self):
''' Render with a label inside the bar graph. '''
ncc = self._num_complete_chars
bar = self._lbl.center(self.iwidth)
cm_chars = self._comp_style(bar[:ncc])
em_chars = self._empt_style(bar[ncc:])
return f'{self._first}{cm_chars}{em_chars}{self._last}' | [
"def",
"_render_internal_label",
"(",
"self",
")",
":",
"ncc",
"=",
"self",
".",
"_num_complete_chars",
"bar",
"=",
"self",
".",
"_lbl",
".",
"center",
"(",
"self",
".",
"iwidth",
")",
"cm_chars",
"=",
"self",
".",
"_comp_style",
"(",
"bar",
"[",
":",
... | 46.571429 | 9.142857 |
def _setHeaders(self, request):
"""
Those headers will allow you to call API methods from web browsers, they require CORS:
https://en.wikipedia.org/wiki/Cross-origin_resource_sharing
"""
request.responseHeaders.addRawHeader(b'content-type', b'application/json')
request.responseHeaders.addRawHeader(b'Access-Control-Allow-Origin', b'*')
request.responseHeaders.addRawHeader(b'Access-Control-Allow-Methods', b'GET, POST, PUT, DELETE')
request.responseHeaders.addRawHeader(b'Access-Control-Allow-Headers', b'x-prototype-version,x-requested-with')
request.responseHeaders.addRawHeader(b'Access-Control-Max-Age', 2520)
return request | [
"def",
"_setHeaders",
"(",
"self",
",",
"request",
")",
":",
"request",
".",
"responseHeaders",
".",
"addRawHeader",
"(",
"b'content-type'",
",",
"b'application/json'",
")",
"request",
".",
"responseHeaders",
".",
"addRawHeader",
"(",
"b'Access-Control-Allow-Origin'",... | 63.909091 | 34.090909 |
def scan_for_valid_codon(codon_span, strand, seqid, genome, type='start'):
"""
Given a codon span, strand and reference seqid, scan upstream/downstream
to find a valid in-frame start/stop codon
"""
s, e = codon_span[0], codon_span[1]
while True:
if (type == 'start' and strand == '+') or \
(type == 'stop' and strand == '-'):
s, e = s - 3, e - 3
else:
s, e = s + 3, e + 3
codon = _fasta_slice(genome, seqid, s, e, strand)
is_valid = is_valid_codon(codon, type=type)
if not is_valid:
if type == 'start':
## if we are scanning upstream for a valid start codon,
## stop scanning when we encounter a stop
if is_valid_codon(codon, type='stop'):
return (None, None)
elif type == 'stop':
## if we are scanning downstream for a valid stop codon,
## stop scanning when we encounter a start
if is_valid_codon(codon, type='start'):
return (None, None)
continue
break
return (s, e) | [
"def",
"scan_for_valid_codon",
"(",
"codon_span",
",",
"strand",
",",
"seqid",
",",
"genome",
",",
"type",
"=",
"'start'",
")",
":",
"s",
",",
"e",
"=",
"codon_span",
"[",
"0",
"]",
",",
"codon_span",
"[",
"1",
"]",
"while",
"True",
":",
"if",
"(",
... | 37.533333 | 16.8 |
def prune_unspecified_categories(modules, categories):
"""
Removes unspecified module categories.
Mutates dictionary and returns it.
"""
res = {}
for mod_name, mod_info in modules.items():
mod_categories = mod_info.get("categories", all_categories)
for category in categories:
if category in mod_categories:
break
else:
continue
for input_name, input_info in mod_info["inputs"].items():
for c in input_info["categories"]:
if c in categories:
break
else:
del mod_info["inputs"][input_name]
for output_name, output_info in mod_info["outputs"].items():
for c in output_info["categories"]:
if c in categories:
break
else:
del mod_info["outputs"][output_name]
res[mod_name] = mod_info
return res | [
"def",
"prune_unspecified_categories",
"(",
"modules",
",",
"categories",
")",
":",
"res",
"=",
"{",
"}",
"for",
"mod_name",
",",
"mod_info",
"in",
"modules",
".",
"items",
"(",
")",
":",
"mod_categories",
"=",
"mod_info",
".",
"get",
"(",
"\"categories\"",
... | 34.518519 | 13.333333 |
def to_snake_case(text):
"""Convert to snake case.
:param str text:
:rtype: str
:return:
"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', text)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() | [
"def",
"to_snake_case",
"(",
"text",
")",
":",
"s1",
"=",
"re",
".",
"sub",
"(",
"'(.)([A-Z][a-z]+)'",
",",
"r'\\1_\\2'",
",",
"text",
")",
"return",
"re",
".",
"sub",
"(",
"'([a-z0-9])([A-Z])'",
",",
"r'\\1_\\2'",
",",
"s1",
")",
".",
"lower",
"(",
")... | 24.222222 | 17.777778 |
def _build_ds_from_instruction(instruction, ds_from_file_fn):
"""Map an instruction to a real datasets for one particular shard.
Args:
instruction: A `dict` of `tf.Tensor` containing the instruction to load
the particular shard (filename, mask,...)
ds_from_file_fn: `fct`, function which returns the dataset associated to
the filename
Returns:
dataset: `tf.data.Dataset`, The shard loaded from the instruction
"""
# Create the example and mask ds for this particular shard
examples_ds = ds_from_file_fn(instruction["filepath"])
mask_ds = _build_mask_ds(
mask_offset=instruction["mask_offset"],
mask=instruction["mask"],
)
# Zip the mask and real examples
ds = tf.data.Dataset.zip((examples_ds, mask_ds))
# Filter according to the mask (only keep True)
ds = ds.filter(lambda example, mask: mask)
# Only keep the examples
ds = ds.map(lambda example, mask: example)
return ds | [
"def",
"_build_ds_from_instruction",
"(",
"instruction",
",",
"ds_from_file_fn",
")",
":",
"# Create the example and mask ds for this particular shard",
"examples_ds",
"=",
"ds_from_file_fn",
"(",
"instruction",
"[",
"\"filepath\"",
"]",
")",
"mask_ds",
"=",
"_build_mask_ds",... | 35.230769 | 19.576923 |
def add(self, name, attributes):
"""
Add the relation to the Schema.
:param name: The name of a relation.
:param attributes: A list of attributes for the relation.
:raise RelationReferenceError: Raised if the name already exists.
"""
if name in self._data:
raise RelationReferenceError(
'Relation \'{name}\' already exists.'.format(name=name))
self._data[name] = attributes[:] | [
"def",
"add",
"(",
"self",
",",
"name",
",",
"attributes",
")",
":",
"if",
"name",
"in",
"self",
".",
"_data",
":",
"raise",
"RelationReferenceError",
"(",
"'Relation \\'{name}\\' already exists.'",
".",
"format",
"(",
"name",
"=",
"name",
")",
")",
"self",
... | 41.636364 | 10.363636 |
def color_electrodes(self, config_nr, ax):
"""
Color the electrodes used in specific configuration.
Voltage electrodes are yellow, Current electrodes are red ?!
"""
electrodes = np.loadtxt(options.config_file, skiprows=1)
electrodes = self.configs[~np.isnan(self.configs).any(1)]
electrodes = electrodes.astype(int)
conf = []
for dim in range(0, electrodes.shape[1]):
c = electrodes[config_nr, dim]
# c = c.partition('0')
a = np.round(c / 10000) - 1
b = np.mod(c, 10000) - 1
conf.append(a)
conf.append(b)
Ex, Ez = elem.get_electrodes()
color = ['#ffed00', '#ffed00', '#ff0000', '#ff0000']
ax.scatter(Ex[conf], Ez[conf], c=color, marker='s', s=60,
clip_on=False, edgecolors='k') | [
"def",
"color_electrodes",
"(",
"self",
",",
"config_nr",
",",
"ax",
")",
":",
"electrodes",
"=",
"np",
".",
"loadtxt",
"(",
"options",
".",
"config_file",
",",
"skiprows",
"=",
"1",
")",
"electrodes",
"=",
"self",
".",
"configs",
"[",
"~",
"np",
".",
... | 40.238095 | 12.904762 |
def _get_attribute(self, attribute, name):
"""Device attribute getter"""
try:
if attribute is None:
attribute = self._attribute_file_open( name )
else:
attribute.seek(0)
return attribute, attribute.read().strip().decode()
except Exception as ex:
self._raise_friendly_access_error(ex, name) | [
"def",
"_get_attribute",
"(",
"self",
",",
"attribute",
",",
"name",
")",
":",
"try",
":",
"if",
"attribute",
"is",
"None",
":",
"attribute",
"=",
"self",
".",
"_attribute_file_open",
"(",
"name",
")",
"else",
":",
"attribute",
".",
"seek",
"(",
"0",
"... | 38.4 | 13.5 |
def as_cql_query(self, formatted=False):
"""
Returns a CQL query that can be used to recreate this type.
If `formatted` is set to :const:`True`, extra whitespace will
be added to make the query more readable.
"""
ret = "CREATE TYPE %s.%s (%s" % (
protect_name(self.keyspace),
protect_name(self.name),
"\n" if formatted else "")
if formatted:
field_join = ",\n"
padding = " "
else:
field_join = ", "
padding = ""
fields = []
for field_name, field_type in zip(self.field_names, self.field_types):
fields.append("%s %s" % (protect_name(field_name), field_type))
ret += field_join.join("%s%s" % (padding, field) for field in fields)
ret += "\n)" if formatted else ")"
return ret | [
"def",
"as_cql_query",
"(",
"self",
",",
"formatted",
"=",
"False",
")",
":",
"ret",
"=",
"\"CREATE TYPE %s.%s (%s\"",
"%",
"(",
"protect_name",
"(",
"self",
".",
"keyspace",
")",
",",
"protect_name",
"(",
"self",
".",
"name",
")",
",",
"\"\\n\"",
"if",
... | 34.24 | 17.68 |
def fetch(self, value_obj=None):
''' Fetch the next two values '''
val = None
try:
val = next(self.__iterable)
except StopIteration:
return None
if value_obj is None:
value_obj = Value(value=val)
else:
value_obj.value = val
return value_obj | [
"def",
"fetch",
"(",
"self",
",",
"value_obj",
"=",
"None",
")",
":",
"val",
"=",
"None",
"try",
":",
"val",
"=",
"next",
"(",
"self",
".",
"__iterable",
")",
"except",
"StopIteration",
":",
"return",
"None",
"if",
"value_obj",
"is",
"None",
":",
"va... | 27.75 | 12.416667 |
def _encode_auth(auth):
"""
A function compatible with Python 2.3-3.3 that will encode
auth from a URL suitable for an HTTP header.
>>> _encode_auth('username%3Apassword')
u'dXNlcm5hbWU6cGFzc3dvcmQ='
"""
auth_s = urllib2.unquote(auth)
# convert to bytes
auth_bytes = auth_s.encode()
# use the legacy interface for Python 2.3 support
encoded_bytes = base64.encodestring(auth_bytes)
# convert back to a string
encoded = encoded_bytes.decode()
# strip the trailing carriage return
return encoded.rstrip() | [
"def",
"_encode_auth",
"(",
"auth",
")",
":",
"auth_s",
"=",
"urllib2",
".",
"unquote",
"(",
"auth",
")",
"# convert to bytes",
"auth_bytes",
"=",
"auth_s",
".",
"encode",
"(",
")",
"# use the legacy interface for Python 2.3 support",
"encoded_bytes",
"=",
"base64",... | 34.125 | 8.875 |
def direct_command(self, device_id, command, command2, extended_payload=None):
"""Wrapper to send posted direct command and get response. Level is 0-100.
extended_payload is 14 bytes/28 chars..but last 2 chars is a generated checksum so leave off"""
extended_payload = extended_payload or ''
if not extended_payload:
msg_type = '0'
msg_type_desc = 'Standard'
else:
msg_type = '1'
msg_type_desc = 'Extended'
extended_payload = extended_payload.ljust(26, '0')
### Determine checksum to add onto the payload for I2CS support
checksum_payload_hex = [int("0x" + extended_payload[i:i+2],16) for i in range(0,len(extended_payload)-1,2)]
checksum_payload_hex.insert(0, int("0x" + command2, 16))
checksum_payload_hex.insert(0, int("0x" + command, 16))
# Get sum of all hex bytes
bytessum = 0
for ch in checksum_payload_hex:
bytessum += ch
bytessumStr = hex(bytessum)
# Get last byte of the bytessum
lastByte = bytessumStr[-2:]
lastByte = '0x' + lastByte.zfill(2)
# Determine compliment of last byte
lastByteHex = int(lastByte, 16)
lastCompliment = lastByteHex ^ 0xFF
# Add one to create checksum
checksum = hex(lastCompliment + 0x01)
# Remove 0x prefix
checksum_final = (format(int(checksum, 16), 'x'))
checksum_final = checksum_final.upper()
#print("final checksum")
#pprint.pprint(checksum_final)
extended_payload = extended_payload + checksum_final
self.logger.info("direct_command: Device: %s Command: %s Command 2: %s Extended: %s MsgType: %s", device_id, command, command2, extended_payload, msg_type_desc)
device_id = device_id.upper()
command_url = (self.hub_url + '/3?' + "0262"
+ device_id + msg_type + "F"
+ command + command2 + extended_payload + "=I=3")
return self.post_direct_command(command_url) | [
"def",
"direct_command",
"(",
"self",
",",
"device_id",
",",
"command",
",",
"command2",
",",
"extended_payload",
"=",
"None",
")",
":",
"extended_payload",
"=",
"extended_payload",
"or",
"''",
"if",
"not",
"extended_payload",
":",
"msg_type",
"=",
"'0'",
"msg... | 47.133333 | 18.755556 |
def query_os_kernel_log(self, max_messages):
"""Tries to get the kernel log (dmesg) of the guest OS.
in max_messages of type int
Max number of messages to return, counting from the end of the
log. If 0, there is no limit.
return dmesg of type str
The kernel log.
"""
if not isinstance(max_messages, baseinteger):
raise TypeError("max_messages can only be an instance of type baseinteger")
dmesg = self._call("queryOSKernelLog",
in_p=[max_messages])
return dmesg | [
"def",
"query_os_kernel_log",
"(",
"self",
",",
"max_messages",
")",
":",
"if",
"not",
"isinstance",
"(",
"max_messages",
",",
"baseinteger",
")",
":",
"raise",
"TypeError",
"(",
"\"max_messages can only be an instance of type baseinteger\"",
")",
"dmesg",
"=",
"self"... | 35.9375 | 17.0625 |
def collect_random_trajectory(env, timesteps=1000):
"""Run a random policy to collect trajectories.
The rollout trajectory is saved to files in npz format.
Modify the DataCollectionWrapper wrapper to add new fields or change data formats.
"""
obs = env.reset()
dof = env.dof
for t in range(timesteps):
action = 0.5 * np.random.randn(dof)
obs, reward, done, info = env.step(action)
env.render()
if t % 100 == 0:
print(t) | [
"def",
"collect_random_trajectory",
"(",
"env",
",",
"timesteps",
"=",
"1000",
")",
":",
"obs",
"=",
"env",
".",
"reset",
"(",
")",
"dof",
"=",
"env",
".",
"dof",
"for",
"t",
"in",
"range",
"(",
"timesteps",
")",
":",
"action",
"=",
"0.5",
"*",
"np... | 29.9375 | 19.8125 |
def _generate_event_resources(self, lambda_function, execution_role, event_resources, lambda_alias=None):
"""Generates and returns the resources associated with this function's events.
:param model.lambda_.LambdaFunction lambda_function: generated Lambda function
:param iam.IAMRole execution_role: generated Lambda execution role
:param implicit_api: Global Implicit API resource where the implicit APIs get attached to, if necessary
:param implicit_api_stage: Global implicit API stage resource where implicit APIs get attached to, if necessary
:param event_resources: All the event sources associated with this Lambda function
:param model.lambda_.LambdaAlias lambda_alias: Optional Lambda Alias resource if we want to connect the
event sources to this alias
:returns: a list containing the function's event resources
:rtype: list
"""
resources = []
if self.Events:
for logical_id, event_dict in self.Events.items():
try:
eventsource = self.event_resolver.resolve_resource_type(event_dict).from_dict(
lambda_function.logical_id + logical_id, event_dict, logical_id)
except TypeError as e:
raise InvalidEventException(logical_id, "{}".format(e))
kwargs = {
# When Alias is provided, connect all event sources to the alias and *not* the function
'function': lambda_alias or lambda_function,
'role': execution_role,
}
for name, resource in event_resources[logical_id].items():
kwargs[name] = resource
resources += eventsource.to_cloudformation(**kwargs)
return resources | [
"def",
"_generate_event_resources",
"(",
"self",
",",
"lambda_function",
",",
"execution_role",
",",
"event_resources",
",",
"lambda_alias",
"=",
"None",
")",
":",
"resources",
"=",
"[",
"]",
"if",
"self",
".",
"Events",
":",
"for",
"logical_id",
",",
"event_d... | 53.235294 | 32.205882 |
def create_release_branch(self, branch_name):
"""
Create a new release branch.
:param branch_name: The name of the release branch to create (a string).
:raises: The following exceptions can be raised:
- :exc:`~exceptions.TypeError` when :attr:`release_scheme`
isn't set to 'branches'.
- :exc:`~exceptions.ValueError` when the branch name doesn't
match the configured :attr:`release_filter` or no parent
release branches are available.
This method automatically checks out the new release branch, but note
that the new branch may not actually exist until a commit has been made
on the branch.
"""
# Validate the release scheme.
self.ensure_release_scheme('branches')
# Validate the name of the release branch.
if self.compiled_filter.match(branch_name) is None:
msg = "The branch name '%s' doesn't match the release filter!"
raise ValueError(msg % branch_name)
# Make sure the local repository exists.
self.create()
# Figure out the correct parent release branch.
candidates = natsort([r.revision.branch for r in self.ordered_releases] + [branch_name])
index = candidates.index(branch_name) - 1
if index < 0:
msg = "Failed to determine suitable parent branch for release branch '%s'!"
raise ValueError(msg % branch_name)
parent_branch = candidates[index]
self.checkout(parent_branch)
self.create_branch(branch_name) | [
"def",
"create_release_branch",
"(",
"self",
",",
"branch_name",
")",
":",
"# Validate the release scheme.",
"self",
".",
"ensure_release_scheme",
"(",
"'branches'",
")",
"# Validate the name of the release branch.",
"if",
"self",
".",
"compiled_filter",
".",
"match",
"("... | 46.941176 | 19.647059 |
def append_tz_time_only(self, tag, timestamp=None, precision=3,
header=False):
"""Append a field with a TZTimeOnly value.
:param tag: Integer or string FIX tag number.
:param timestamp: Time value, see below.
:param precision: Number of decimal places: 0, 3 (ms) or 6 (us).
:param header: Append to FIX header if True; default to body.
The `timestamp` value should be a local datetime, such as created
by datetime.datetime.now(); a float, being the number of seconds
since midnight 1 Jan 1970 UTC, such as returned by time.time();
or, None, in which case datetime.datetime.now() is used to
get the current UTC time.
Precision values other than None (minutes), zero (seconds),
3 (milliseconds), or 6 (microseconds) will raise an exception.
Note that prior to FIX 5.0, only values of 0 or 3 comply with the
standard."""
if timestamp is None:
t = datetime.datetime.now()
elif type(timestamp) is float:
t = datetime.datetime.fromtimestamp(timestamp)
else:
t = timestamp
now = time.mktime(t.timetuple()) + (t.microsecond * 1e-6)
utc = datetime.datetime.utcfromtimestamp(now)
td = t - utc
offset = int(((td.days * 86400) + td.seconds) / 60)
s = t.strftime("%H:%M")
if precision == 0:
s += t.strftime(":%S")
elif precision == 3:
s += t.strftime(":%S")
s += ".%03u" % (t.microsecond / 1000)
elif precision == 6:
s += t.strftime(":%S")
s += ".%06u" % t.microsecond
elif precision is not None:
raise ValueError("Precision should be one of "
"None, 0, 3 or 6 digits")
s += self._tz_offset_string(offset)
return self.append_pair(tag, s, header=header) | [
"def",
"append_tz_time_only",
"(",
"self",
",",
"tag",
",",
"timestamp",
"=",
"None",
",",
"precision",
"=",
"3",
",",
"header",
"=",
"False",
")",
":",
"if",
"timestamp",
"is",
"None",
":",
"t",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")"... | 40.191489 | 18.829787 |
def history_date(soup, date_type = None):
"""
Find a date in the history tag for the specific date_type
typical date_type values: received, accepted
"""
if(date_type == None):
return None
history_date = raw_parser.history_date(soup, date_type)
if history_date is None:
return None
(day, month, year) = ymd(history_date)
return date_struct(year, month, day) | [
"def",
"history_date",
"(",
"soup",
",",
"date_type",
"=",
"None",
")",
":",
"if",
"(",
"date_type",
"==",
"None",
")",
":",
"return",
"None",
"history_date",
"=",
"raw_parser",
".",
"history_date",
"(",
"soup",
",",
"date_type",
")",
"if",
"history_date",... | 30.538462 | 12.230769 |
def chk_date_arg(s):
"""Checks if the string `s` is a valid date string.
Return True of False."""
if re_date.search(s) is None:
return False
comp = s.split('-')
try:
dt = datetime.date(int(comp[0]), int(comp[1]), int(comp[2]))
return True
except Exception as e:
return False | [
"def",
"chk_date_arg",
"(",
"s",
")",
":",
"if",
"re_date",
".",
"search",
"(",
"s",
")",
"is",
"None",
":",
"return",
"False",
"comp",
"=",
"s",
".",
"split",
"(",
"'-'",
")",
"try",
":",
"dt",
"=",
"datetime",
".",
"date",
"(",
"int",
"(",
"c... | 26.666667 | 18.25 |
def compute(self, *args, **kwargs)->[Any, None]:
"""Compose and evaluate the function.
"""
return super().compute(
self.compose, *args, **kwargs
) | [
"def",
"compute",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"->",
"[",
"Any",
",",
"None",
"]",
":",
"return",
"super",
"(",
")",
".",
"compute",
"(",
"self",
".",
"compose",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | 30.833333 | 8.166667 |
def day_of_week(abbr=False):
"""Return a random (abbreviated if `abbr`) day of week name."""
if abbr:
return random.choice(DAYS_ABBR)
else:
return random.choice(DAYS) | [
"def",
"day_of_week",
"(",
"abbr",
"=",
"False",
")",
":",
"if",
"abbr",
":",
"return",
"random",
".",
"choice",
"(",
"DAYS_ABBR",
")",
"else",
":",
"return",
"random",
".",
"choice",
"(",
"DAYS",
")"
] | 31.5 | 13 |
def getstruct(self, msgid, as_json=False, stream=sys.stdout):
"""Get and print the whole message.
as_json indicates whether to print the part list as JSON or not.
"""
parts = [part.get_content_type() for hdr, part in self._get(msgid)]
if as_json:
print(json.dumps(parts), file=stream)
else:
for c in parts:
print(c, file=stream) | [
"def",
"getstruct",
"(",
"self",
",",
"msgid",
",",
"as_json",
"=",
"False",
",",
"stream",
"=",
"sys",
".",
"stdout",
")",
":",
"parts",
"=",
"[",
"part",
".",
"get_content_type",
"(",
")",
"for",
"hdr",
",",
"part",
"in",
"self",
".",
"_get",
"("... | 37 | 18.272727 |
def matplotlib_scraper(block, block_vars, gallery_conf, **kwargs):
"""Scrape Matplotlib images.
Parameters
----------
block : tuple
A tuple containing the (label, content, line_number) of the block.
block_vars : dict
Dict of block variables.
gallery_conf : dict
Contains the configuration of Sphinx-Gallery
**kwargs : dict
Additional keyword arguments to pass to
:meth:`~matplotlib.figure.Figure.savefig`, e.g. ``format='svg'``.
The ``format`` kwarg in particular is used to set the file extension
of the output file (currently only 'png' and 'svg' are supported).
Returns
-------
rst : str
The ReSTructuredText that will be rendered to HTML containing
the images. This is often produced by :func:`figure_rst`.
"""
matplotlib, plt = _import_matplotlib()
image_path_iterator = block_vars['image_path_iterator']
image_paths = list()
for fig_num, image_path in zip(plt.get_fignums(), image_path_iterator):
if 'format' in kwargs:
image_path = '%s.%s' % (os.path.splitext(image_path)[0],
kwargs['format'])
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
fig = plt.figure(fig_num)
to_rgba = matplotlib.colors.colorConverter.to_rgba
for attr in ['facecolor', 'edgecolor']:
fig_attr = getattr(fig, 'get_' + attr)()
default_attr = matplotlib.rcParams['figure.' + attr]
if to_rgba(fig_attr) != to_rgba(default_attr) and \
attr not in kwargs:
kwargs[attr] = fig_attr
fig.savefig(image_path, **kwargs)
image_paths.append(image_path)
plt.close('all')
return figure_rst(image_paths, gallery_conf['src_dir']) | [
"def",
"matplotlib_scraper",
"(",
"block",
",",
"block_vars",
",",
"gallery_conf",
",",
"*",
"*",
"kwargs",
")",
":",
"matplotlib",
",",
"plt",
"=",
"_import_matplotlib",
"(",
")",
"image_path_iterator",
"=",
"block_vars",
"[",
"'image_path_iterator'",
"]",
"ima... | 41.681818 | 19.068182 |
def add_username(user, apps):
"""When using broser login, username was not stored so look it up"""
if not user:
return None
apps = [a for a in apps if a.instance == user.instance]
if not apps:
return None
from toot.api import verify_credentials
creds = verify_credentials(apps.pop(), user)
return User(user.instance, creds['username'], user.access_token) | [
"def",
"add_username",
"(",
"user",
",",
"apps",
")",
":",
"if",
"not",
"user",
":",
"return",
"None",
"apps",
"=",
"[",
"a",
"for",
"a",
"in",
"apps",
"if",
"a",
".",
"instance",
"==",
"user",
".",
"instance",
"]",
"if",
"not",
"apps",
":",
"ret... | 27.785714 | 22.785714 |
def handle_tick(self):
"""Internal callback every time 1 second has passed."""
self.uptime += 1
for name, interval in self.ticks.items():
if interval == 0:
continue
self.tick_counters[name] += 1
if self.tick_counters[name] == interval:
self.graph_input(self.TICK_STREAMS[name], self.uptime)
self.tick_counters[name] = 0 | [
"def",
"handle_tick",
"(",
"self",
")",
":",
"self",
".",
"uptime",
"+=",
"1",
"for",
"name",
",",
"interval",
"in",
"self",
".",
"ticks",
".",
"items",
"(",
")",
":",
"if",
"interval",
"==",
"0",
":",
"continue",
"self",
".",
"tick_counters",
"[",
... | 32.153846 | 18.230769 |
def readline(self, echo=None, prompt='', use_history=True):
"""Return a line of text, including the terminating LF
If echo is true always echo, if echo is false never echo
If echo is None follow the negotiated setting.
prompt is the current prompt to write (and rewrite if needed)
use_history controls if this current line uses (and adds to) the command history.
"""
line = []
insptr = 0
ansi = 0
histptr = len(self.history)
if self.DOECHO:
self.write(prompt)
self._current_prompt = prompt
else:
self._current_prompt = ''
self._current_line = ''
while True:
c = self.getc(block=True)
c = self.ansi_to_curses(c)
if c == theNULL:
continue
elif c == curses.KEY_LEFT:
if insptr > 0:
insptr = insptr - 1
self._readline_echo(self.CODES['CSRLEFT'], echo)
else:
self._readline_echo(BELL, echo)
continue
elif c == curses.KEY_RIGHT:
if insptr < len(line):
insptr = insptr + 1
self._readline_echo(self.CODES['CSRRIGHT'], echo)
else:
self._readline_echo(BELL, echo)
continue
elif c == curses.KEY_UP or c == curses.KEY_DOWN:
if not use_history:
self._readline_echo(BELL, echo)
continue
if c == curses.KEY_UP:
if histptr > 0:
histptr = histptr - 1
else:
self._readline_echo(BELL, echo)
continue
elif c == curses.KEY_DOWN:
if histptr < len(self.history):
histptr = histptr + 1
else:
self._readline_echo(BELL, echo)
continue
line = []
if histptr < len(self.history):
line.extend(self.history[histptr])
for char in range(insptr):
self._readline_echo(self.CODES['CSRLEFT'], echo)
self._readline_echo(self.CODES['DEOL'], echo)
self._readline_echo(''.join(line), echo)
insptr = len(line)
continue
elif c == chr(3):
self._readline_echo('\n' + curses.ascii.unctrl(c) + ' ABORT\n', echo)
return ''
elif c == chr(4):
if len(line) > 0:
self._readline_echo('\n' + curses.ascii.unctrl(c) + ' ABORT (QUIT)\n', echo)
return ''
self._readline_echo('\n' + curses.ascii.unctrl(c) + ' QUIT\n', echo)
return 'QUIT'
elif c == chr(10):
self._readline_echo(c, echo)
result = ''.join(line)
if use_history:
self.history.append(result)
if echo is False:
if prompt:
self.write( chr(10) )
log.debug('readline: %s(hidden text)', prompt)
else:
log.debug('readline: %s%r', prompt, result)
return result
elif c == curses.KEY_BACKSPACE or c == chr(127) or c == chr(8):
if insptr > 0:
self._readline_echo(self.CODES['CSRLEFT'] + self.CODES['DEL'], echo)
insptr = insptr - 1
del line[insptr]
else:
self._readline_echo(BELL, echo)
continue
elif c == curses.KEY_DC:
if insptr < len(line):
self._readline_echo(self.CODES['DEL'], echo)
del line[insptr]
else:
self._readline_echo(BELL, echo)
continue
else:
if ord(c) < 32:
c = curses.ascii.unctrl(c)
if len(line) > insptr:
self._readline_insert(c, echo, insptr, line)
else:
self._readline_echo(c, echo)
line[insptr:insptr] = c
insptr = insptr + len(c)
if self._readline_do_echo(echo):
self._current_line = line | [
"def",
"readline",
"(",
"self",
",",
"echo",
"=",
"None",
",",
"prompt",
"=",
"''",
",",
"use_history",
"=",
"True",
")",
":",
"line",
"=",
"[",
"]",
"insptr",
"=",
"0",
"ansi",
"=",
"0",
"histptr",
"=",
"len",
"(",
"self",
".",
"history",
")",
... | 39.495575 | 14.265487 |
def _wkt(eivals, timescales, normalization, normalized_laplacian):
"""
Computes wave kernel trace from given eigenvalues, timescales, and normalization.
For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18.
Parameters
----------
eivals : numpy.ndarray
Eigenvalue vector
timescales : numpy.ndarray
Vector of discrete timesteps for the kernel computation
normalization : str or numpy.ndarray
Either 'empty', 'complete' or None.
If None or any ther value, return unnormalized wave kernel trace.
For the details how 'empty' and 'complete' are computed, please refer to the paper.
If np.ndarray, they are treated as exact normalization constants
normalized_laplacian: bool
Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization.
Returns
-------
numpy.ndarray
Wave kernel trace signature
"""
nv = eivals.shape[0]
wkt = np.zeros(timescales.shape)
for idx, t in enumerate(timescales):
wkt[idx] = np.sum(np.exp(-1j * t * eivals))
if isinstance(normalization, np.ndarray):
return hkt / normalization
if normalization == 'empty' or normalization == True:
return wkt / nv
if normalization == 'complete':
if normalized_laplacian:
return wkt / (1 + (nv - 1) * np.cos(timescales))
else:
return wkt / (1 + (nv - 1) * np.cos(nv * timescales))
return wkt | [
"def",
"_wkt",
"(",
"eivals",
",",
"timescales",
",",
"normalization",
",",
"normalized_laplacian",
")",
":",
"nv",
"=",
"eivals",
".",
"shape",
"[",
"0",
"]",
"wkt",
"=",
"np",
".",
"zeros",
"(",
"timescales",
".",
"shape",
")",
"for",
"idx",
",",
"... | 39.425 | 23.675 |
def files_walker(directory, filters_in=None, filters_out=None, flags=0):
"""
Defines a generator used to walk files using given filters.
Usage::
>>> for file in files_walker("./foundations/tests/tests_foundations/resources/standard/level_0"):
... print(file)
...
./foundations/tests/tests_foundations/resources/standard/level_0/level_1/level_2/standard.sIBLT
./foundations/tests/tests_foundations/resources/standard/level_0/level_1/lorem_ipsum.txt
./foundations/tests/tests_foundations/resources/standard/level_0/level_1/standard.rc
./foundations/tests/tests_foundations/resources/standard/level_0/standard.ibl
>>> for file in files_walker("./foundations/tests/tests_foundations/resources/standard/level_0", ("\.sIBLT",)):
... print(file)
...
./foundations/tests/tests_foundations/resources/standard/level_0/level_1/level_2/standard.sIBLT
:param directory: Directory to recursively walk.
:type directory: unicode
:param filters_in: Regex filters in list.
:type filters_in: tuple or list
:param filters_in: Regex filters out list.
:type filters_in: tuple or list
:param flags: Regex flags.
:type flags: int
:return: File.
:rtype: unicode
"""
if filters_in:
LOGGER.debug("> Current filters in: '{0}'.".format(filters_in))
if filters_out:
LOGGER.debug("> Current filters out: '{0}'.".format(filters_out))
for parent_directory, directories, files in os.walk(directory, topdown=False, followlinks=True):
for file in files:
LOGGER.debug("> Current file: '{0}' in '{1}'.".format(file, directory))
path = foundations.strings.to_forward_slashes(os.path.join(parent_directory, file))
if os.path.isfile(path):
if not foundations.strings.filter_words((path,), filters_in, filters_out, flags):
continue
LOGGER.debug("> '{0}' file filtered in!".format(path))
yield path | [
"def",
"files_walker",
"(",
"directory",
",",
"filters_in",
"=",
"None",
",",
"filters_out",
"=",
"None",
",",
"flags",
"=",
"0",
")",
":",
"if",
"filters_in",
":",
"LOGGER",
".",
"debug",
"(",
"\"> Current filters in: '{0}'.\"",
".",
"format",
"(",
"filters... | 42.744681 | 30.319149 |
def add_job_to_db(self, key, job):
"""Add job info to the database."""
job_msg = self.registry.deep_encode(job)
prov = prov_key(job_msg)
def set_link(duplicate_id):
self.cur.execute(
'update "jobs" set "link" = ?, "status" = ? where "id" = ?',
(duplicate_id, Status.DUPLICATE, key))
with self.lock:
self.cur.execute(
'select * from "jobs" where "prov" = ? '
'and (("result" is not null) or '
'("session" = ? and "link" is null))',
(prov, self.session))
rec = self.cur.fetchone()
rec = JobEntry(*rec) if rec is not None else None
self.cur.execute(
'update "jobs" set "prov" = ?, "version" = ?, "function" = ?, '
'"arguments" = ?, "status" = ? where "id" = ?',
(prov, job_msg['data']['hints'].get('version'),
json.dumps(job_msg['data']['function']),
json.dumps(job_msg['data']['arguments']),
Status.WAITING,
key))
if not rec:
# no duplicate found, go on
return 'initialized', None
set_link(rec.id)
if rec.result is not None and rec.status == Status.WORKFLOW:
# the found duplicate returned a workflow
if rec.link is not None:
# link is set, so result is fully realized
self.cur.execute(
'select * from "jobs" where "id" = ?',
(rec.link,))
rec = self.cur.fetchone()
assert rec is not None, "database integrity violation"
rec = JobEntry(*rec)
else:
# link is not set, the result is still waited upon
assert rec.session == self.session, \
"database integrity violation"
self.attached[rec.id].append(key)
return 'attached', None
if rec.result is not None:
# result is found! return it
result_value = self.registry.from_json(rec.result, deref=True)
result = ResultMessage(
key, 'retrieved', result_value, None)
return 'retrieved', result
if rec.session == self.session:
# still waiting for result, attach
self.attached[rec.id].append(key)
return 'attached', None | [
"def",
"add_job_to_db",
"(",
"self",
",",
"key",
",",
"job",
")",
":",
"job_msg",
"=",
"self",
".",
"registry",
".",
"deep_encode",
"(",
"job",
")",
"prov",
"=",
"prov_key",
"(",
"job_msg",
")",
"def",
"set_link",
"(",
"duplicate_id",
")",
":",
"self",... | 40.285714 | 16.746032 |
def addStepListener(listener):
"""addStepListener(traci.StepListener) -> bool
Append the step listener (its step function is called at the end of every call to traci.simulationStep())
Returns True if the listener was added successfully, False otherwise.
"""
if issubclass(type(listener), StepListener):
_stepListeners.append(listener)
return True
warnings.warn(
"Proposed listener's type must inherit from traci.StepListener. Not adding object of type '%s'" % type(listener))
return False | [
"def",
"addStepListener",
"(",
"listener",
")",
":",
"if",
"issubclass",
"(",
"type",
"(",
"listener",
")",
",",
"StepListener",
")",
":",
"_stepListeners",
".",
"append",
"(",
"listener",
")",
"return",
"True",
"warnings",
".",
"warn",
"(",
"\"Proposed list... | 44.166667 | 25.75 |
def get_config(variable, default=None):
""" Get configuration variable for strudel.* packages
Args:
variable (str): name of the config variable
default: value to use of config variable not set
Returns:
variable value
Order of search:
1. stutils.CONFIG
2. settings.py of the current folder
3. environment variable
Known config vars so far:
strudel.utils
ST_FS_CACHE_DURATION - duration of filesystem cache in seconds
ST_FS_CACHE_PATH - path to the folder to store filesystem cache
strudel.ecosystems
PYPI_SAVE_PATH - place to store downloaded PyPI packages
PYPI_TIMEOUT - network timeout for PyPI API
strudel.scraper
GITHUB_API_TOKENS - comma separated list of GitHub tokens
GITLAB_API_TOKENS - same for GitLab API
"""
if variable in CONFIG:
return CONFIG[variable]
if hasattr(settings, variable):
return getattr(settings, variable)
if variable in os.environ:
return os.environ[variable]
return default | [
"def",
"get_config",
"(",
"variable",
",",
"default",
"=",
"None",
")",
":",
"if",
"variable",
"in",
"CONFIG",
":",
"return",
"CONFIG",
"[",
"variable",
"]",
"if",
"hasattr",
"(",
"settings",
",",
"variable",
")",
":",
"return",
"getattr",
"(",
"settings... | 27.815789 | 20.236842 |
def forward_message(chat_id, from_chat_id, message_id,
**kwargs):
"""
Use this method to forward messages of any kind.
:param chat_id: Unique identifier for the message recipient — User or GroupChat id
:param from_chat_id: Unique identifier for the chat where the original message was sent — User or
GroupChat id
:param message_id: Unique message identifier
:param \*\*kwargs: Args that get passed down to :class:`TelegramBotRPCRequest`
:type chat_id: int
:type from_chat_id: int
:type message_id: int
:returns: On success, the sent Message is returned.
:rtype: Message
"""
# required args
params = dict(
chat_id=chat_id,
from_chat_id=from_chat_id,
message_id=message_id
)
return TelegramBotRPCRequest('forwardMessage', params=params, on_result=Message.from_result, **kwargs) | [
"def",
"forward_message",
"(",
"chat_id",
",",
"from_chat_id",
",",
"message_id",
",",
"*",
"*",
"kwargs",
")",
":",
"# required args",
"params",
"=",
"dict",
"(",
"chat_id",
"=",
"chat_id",
",",
"from_chat_id",
"=",
"from_chat_id",
",",
"message_id",
"=",
"... | 32.851852 | 24.259259 |
def on_connect(self, ws):
""" Todo """
self.logger.info("onconnect")
msg = {'op': self.IDENTIFY,
'd': {'token': self.token,
'properties': {'$os': 'lnx',
'$browser': 'discord_simple',
'$device': 'discord_simple',
'$refferer': '',
'$reffering_domain': ''},
'compress': False,
'large_threshold': 250,
'v' : 3}}
ws.send(json.dumps(msg)) | [
"def",
"on_connect",
"(",
"self",
",",
"ws",
")",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"onconnect\"",
")",
"msg",
"=",
"{",
"'op'",
":",
"self",
".",
"IDENTIFY",
",",
"'d'",
":",
"{",
"'token'",
":",
"self",
".",
"token",
",",
"'properti... | 38.714286 | 9.714286 |
def _ProcessFileEntry(self, mediator, file_entry):
"""Processes a file entry.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
file_entry (dfvfs.FileEntry): file entry.
"""
display_name = mediator.GetDisplayName()
logger.debug(
'[ProcessFileEntry] processing file entry: {0:s}'.format(display_name))
reference_count = mediator.resolver_context.GetFileObjectReferenceCount(
file_entry.path_spec)
try:
if self._IsMetadataFile(file_entry):
self._ProcessMetadataFile(mediator, file_entry)
else:
file_entry_processed = False
for data_stream in file_entry.data_streams:
if self._abort:
break
if self._CanSkipDataStream(file_entry, data_stream):
logger.debug((
'[ProcessFileEntry] Skipping datastream {0:s} for {1:s}: '
'{2:s}').format(
data_stream.name, file_entry.type_indicator, display_name))
continue
self._ProcessFileEntryDataStream(mediator, file_entry, data_stream)
file_entry_processed = True
if not file_entry_processed:
# For when the file entry does not contain a data stream.
self._ProcessFileEntryDataStream(mediator, file_entry, None)
finally:
new_reference_count = (
mediator.resolver_context.GetFileObjectReferenceCount(
file_entry.path_spec))
if reference_count != new_reference_count:
# Clean up after parsers that do not call close explicitly.
if mediator.resolver_context.ForceRemoveFileObject(
file_entry.path_spec):
logger.warning(
'File-object not explicitly closed for file: {0:s}'.format(
display_name))
logger.debug(
'[ProcessFileEntry] done processing file entry: {0:s}'.format(
display_name)) | [
"def",
"_ProcessFileEntry",
"(",
"self",
",",
"mediator",
",",
"file_entry",
")",
":",
"display_name",
"=",
"mediator",
".",
"GetDisplayName",
"(",
")",
"logger",
".",
"debug",
"(",
"'[ProcessFileEntry] processing file entry: {0:s}'",
".",
"format",
"(",
"display_na... | 35.363636 | 22.545455 |
def eeg_microstates_relabel(method, results, microstates_labels, reverse_microstates=None):
"""
Relabel the microstates.
"""
microstates = list(method['microstates'])
for index, microstate in enumerate(method['microstates']):
if microstate in list(reverse_microstates.keys()):
microstates[index] = reverse_microstates[microstate]
method["data"][index] = -1*method["data"][index]
if microstate in list(microstates_labels.keys()):
microstates[index] = microstates_labels[microstate]
method['microstates'] = np.array(microstates)
return(results, method) | [
"def",
"eeg_microstates_relabel",
"(",
"method",
",",
"results",
",",
"microstates_labels",
",",
"reverse_microstates",
"=",
"None",
")",
":",
"microstates",
"=",
"list",
"(",
"method",
"[",
"'microstates'",
"]",
")",
"for",
"index",
",",
"microstate",
"in",
"... | 32.526316 | 23.894737 |
def start_logging(gconfig, logpath):
'''Turn on logging and set up the global config.
This expects the :mod:`yakonfig` global configuration to be unset,
and establishes it. It starts the log system via the :mod:`dblogger`
setup. In addition to :mod:`dblogger`'s defaults, if `logpath` is
provided, a :class:`logging.handlers.RotatingFileHandler` is set to
write log messages to that file.
This should not be called if the target worker is
:class:`rejester.workers.ForkWorker`, since that manages logging on
its own.
:param dict gconfig: the :mod:`yakonfig` global configuration
:param str logpath: optional location to write a log file
'''
yakonfig.set_default_config([rejester, dblogger], config=gconfig)
if logpath:
formatter = dblogger.FixedWidthFormatter()
# TODO: do we want byte-size RotatingFileHandler or TimedRotatingFileHandler?
handler = logging.handlers.RotatingFileHandler(
logpath, maxBytes=10000000, backupCount=3)
handler.setFormatter(formatter)
logging.getLogger('').addHandler(handler) | [
"def",
"start_logging",
"(",
"gconfig",
",",
"logpath",
")",
":",
"yakonfig",
".",
"set_default_config",
"(",
"[",
"rejester",
",",
"dblogger",
"]",
",",
"config",
"=",
"gconfig",
")",
"if",
"logpath",
":",
"formatter",
"=",
"dblogger",
".",
"FixedWidthForma... | 43.72 | 24.12 |
def expand_branch_name(self, name):
"""
Expand branch names to their unambiguous form.
:param name: The name of a local or remote branch (a string).
:returns: The unambiguous form of the branch name (a string).
This internal method is used by methods like :func:`find_revision_id()`
and :func:`find_revision_number()` to detect and expand remote branch
names into their unambiguous form which is accepted by commands like
``git rev-parse`` and ``git rev-list --count``.
"""
# If no name is given we pick the default revision.
if not name:
return self.default_revision
# Run `git for-each-ref' once and remember the results.
branches = list(self.find_branches_raw())
# Check for an exact match against a local branch.
for prefix, other_name, revision_id in branches:
if prefix == 'refs/heads/' and name == other_name:
# If we find a local branch whose name exactly matches the name
# given by the caller then we consider the argument given by
# the caller unambiguous.
logger.debug("Branch name %r matches local branch.", name)
return name
# Check for an exact match against a remote branch.
for prefix, other_name, revision_id in branches:
if prefix.startswith('refs/remotes/') and name == other_name:
# If we find a remote branch whose name exactly matches the
# name given by the caller then we expand the name given by the
# caller into the full %(refname) emitted by `git for-each-ref'.
unambiguous_name = prefix + name
logger.debug("Branch name %r matches remote branch %r.", name, unambiguous_name)
return unambiguous_name
# As a fall back we return the given name without expanding it.
# This code path might not be necessary but was added out of
# conservativeness, with the goal of trying to guarantee
# backwards compatibility.
logger.debug("Failed to expand branch name %r.", name)
return name | [
"def",
"expand_branch_name",
"(",
"self",
",",
"name",
")",
":",
"# If no name is given we pick the default revision.",
"if",
"not",
"name",
":",
"return",
"self",
".",
"default_revision",
"# Run `git for-each-ref' once and remember the results.",
"branches",
"=",
"list",
"... | 54.075 | 22.825 |
def log_view(func):
"""
Helpful while debugging Selenium unittests.
e.g.: server response an error in AJAX requests
"""
@functools.wraps(func)
def view_logger(*args, **kwargs):
log.debug("call view %r", func.__name__)
try:
response = func(*args, **kwargs)
except Exception as err:
log.error("view exception: %s", err)
traceback.print_exc(file=sys.stderr)
raise
log.debug("Response: %s", response)
return response
return view_logger | [
"def",
"log_view",
"(",
"func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"view_logger",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"log",
".",
"debug",
"(",
"\"call view %r\"",
",",
"func",
".",
"__name__",
")",
... | 26.55 | 15.05 |
def siblings(self, **kwargs):
# type: (Any) -> Any
"""Retrieve the siblings of this `Part` as `Partset`.
Siblings are other Parts sharing the same parent of this `Part`, including the part itself.
:param kwargs: Additional search arguments to search for, check :class:`pykechain.Client.parts`
for additional info
:type kwargs: dict
:return: a set of `Parts` as a :class:`PartSet`. Will be empty if no siblings.
:raises APIError: When an error occurs.
"""
if self.parent_id:
return self._client.parts(parent=self.parent_id, category=self.category, **kwargs)
else:
from pykechain.models.partset import PartSet
return PartSet(parts=[]) | [
"def",
"siblings",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"# type: (Any) -> Any",
"if",
"self",
".",
"parent_id",
":",
"return",
"self",
".",
"_client",
".",
"parts",
"(",
"parent",
"=",
"self",
".",
"parent_id",
",",
"category",
"=",
"self",
... | 44.529412 | 24.058824 |
def find_first_match(basedir, string):
"""
return the first file that matches string starting from basedir
"""
matches = find(basedir, string)
return matches[0] if matches else matches | [
"def",
"find_first_match",
"(",
"basedir",
",",
"string",
")",
":",
"matches",
"=",
"find",
"(",
"basedir",
",",
"string",
")",
"return",
"matches",
"[",
"0",
"]",
"if",
"matches",
"else",
"matches"
] | 33.166667 | 6.5 |
def launch(self):
"""Launches pantsd in a subprocess.
N.B. This should always be called under care of the `lifecycle_lock`.
:returns: A Handle for the pantsd instance.
:rtype: PantsDaemon.Handle
"""
self.terminate(include_watchman=False)
self.watchman_launcher.maybe_launch()
self._logger.debug('launching pantsd')
self.daemon_spawn()
# Wait up to 60 seconds for pantsd to write its pidfile.
pantsd_pid = self.await_pid(60)
listening_port = self.read_named_socket('pailgun', int)
self._logger.debug('pantsd is running at pid {}, pailgun port is {}'
.format(self.pid, listening_port))
return self.Handle(pantsd_pid, listening_port, text_type(self._metadata_base_dir)) | [
"def",
"launch",
"(",
"self",
")",
":",
"self",
".",
"terminate",
"(",
"include_watchman",
"=",
"False",
")",
"self",
".",
"watchman_launcher",
".",
"maybe_launch",
"(",
")",
"self",
".",
"_logger",
".",
"debug",
"(",
"'launching pantsd'",
")",
"self",
"."... | 40.555556 | 17.444444 |
def diff_medians_abs(array_one, array_two):
"""
Computes the absolute (symmetric) difference in medians between two arrays of values.
Given arrays will be flattened (to 1D array) regardless of dimension,
and any non-finite/NaN values will be ignored.
Parameters
----------
array_one, array_two : iterable
Two arrays of values, possibly of different length.
Returns
-------
diff_medians : float
scalar measuring the difference in medians, ignoring NaNs/non-finite values.
Raises
------
ValueError
If one or more of the arrays are empty.
"""
abs_diff_medians = np.abs(diff_medians(array_one, array_two))
return abs_diff_medians | [
"def",
"diff_medians_abs",
"(",
"array_one",
",",
"array_two",
")",
":",
"abs_diff_medians",
"=",
"np",
".",
"abs",
"(",
"diff_medians",
"(",
"array_one",
",",
"array_two",
")",
")",
"return",
"abs_diff_medians"
] | 25.851852 | 26.074074 |
def get_ngrok_public_url():
"""Get the ngrok public HTTP URL from the local client API."""
try:
response = requests.get(url=NGROK_CLIENT_API_BASE_URL + "/tunnels",
headers={'content-type': 'application/json'})
response.raise_for_status()
except requests.exceptions.RequestException:
print("Could not connect to the ngrok client API; "
"assuming not running.")
return None
else:
for tunnel in response.json()["tunnels"]:
if tunnel.get("public_url", "").startswith("http://"):
print("Found ngrok public HTTP URL:", tunnel["public_url"])
return tunnel["public_url"] | [
"def",
"get_ngrok_public_url",
"(",
")",
":",
"try",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"url",
"=",
"NGROK_CLIENT_API_BASE_URL",
"+",
"\"/tunnels\"",
",",
"headers",
"=",
"{",
"'content-type'",
":",
"'application/json'",
"}",
")",
"response",
"... | 41.764706 | 20.882353 |
def _get_result(self) -> float:
"""Return current measurement result in lx."""
try:
data = self._bus.read_word_data(self._i2c_add, self._mode)
self._ok = True
except OSError as exc:
self.log_error("Bad reading in bus: %s", exc)
self._ok = False
return -1
count = data >> 8 | (data & 0xff) << 8
mode2coeff = 2 if self._high_res else 1
ratio = 1 / (1.2 * (self._mtreg / 69.0) * mode2coeff)
return ratio * count | [
"def",
"_get_result",
"(",
"self",
")",
"->",
"float",
":",
"try",
":",
"data",
"=",
"self",
".",
"_bus",
".",
"read_word_data",
"(",
"self",
".",
"_i2c_add",
",",
"self",
".",
"_mode",
")",
"self",
".",
"_ok",
"=",
"True",
"except",
"OSError",
"as",... | 36.571429 | 16 |
def eqdate(y):
"""
Like eq but compares datetime with y,m,d tuple.
Also accepts magic string 'TODAY'.
"""
y = datetime.date.today() if y == 'TODAY' else datetime.date(*y)
return lambda x: x == y | [
"def",
"eqdate",
"(",
"y",
")",
":",
"y",
"=",
"datetime",
".",
"date",
".",
"today",
"(",
")",
"if",
"y",
"==",
"'TODAY'",
"else",
"datetime",
".",
"date",
"(",
"*",
"y",
")",
"return",
"lambda",
"x",
":",
"x",
"==",
"y"
] | 30.285714 | 11.428571 |
async def set_bucket(self, *,
chat: typing.Union[str, int, None] = None,
user: typing.Union[str, int, None] = None,
bucket: typing.Dict = None):
"""
Set bucket for user in chat
Chat or user is always required. If one of them is not provided,
you have to set missing value based on the provided one.
:param chat:
:param user:
:param bucket:
"""
raise NotImplementedError | [
"async",
"def",
"set_bucket",
"(",
"self",
",",
"*",
",",
"chat",
":",
"typing",
".",
"Union",
"[",
"str",
",",
"int",
",",
"None",
"]",
"=",
"None",
",",
"user",
":",
"typing",
".",
"Union",
"[",
"str",
",",
"int",
",",
"None",
"]",
"=",
"None... | 33.6 | 18.933333 |
def _add_has_where(self, has_query, relation, operator, count, boolean):
"""
Add the "has" condition where clause to the query.
:param has_query: The has query
:type has_query: Builder
:param relation: The relation to count
:type relation: orator.orm.relations.Relation
:param operator: The operator
:type operator: str
:param count: The count
:type count: int
:param boolean: The boolean value
:type boolean: str
:rtype: Builder
"""
self._merge_model_defined_relation_wheres_to_has_query(has_query, relation)
if isinstance(count, basestring) and count.isdigit():
count = QueryExpression(count)
return self.where(
QueryExpression("(%s)" % has_query.to_sql()), operator, count, boolean
) | [
"def",
"_add_has_where",
"(",
"self",
",",
"has_query",
",",
"relation",
",",
"operator",
",",
"count",
",",
"boolean",
")",
":",
"self",
".",
"_merge_model_defined_relation_wheres_to_has_query",
"(",
"has_query",
",",
"relation",
")",
"if",
"isinstance",
"(",
"... | 28.758621 | 21.517241 |
def _errstr(value):
"""Returns the value str, truncated to MAX_ERROR_STR_LEN characters. If
it's truncated, the returned value will have '...' on the end.
"""
value = str(value) # We won't make the caller convert value to a string each time.
if len(value) > MAX_ERROR_STR_LEN:
return value[:MAX_ERROR_STR_LEN] + '...'
else:
return value | [
"def",
"_errstr",
"(",
"value",
")",
":",
"value",
"=",
"str",
"(",
"value",
")",
"# We won't make the caller convert value to a string each time.",
"if",
"len",
"(",
"value",
")",
">",
"MAX_ERROR_STR_LEN",
":",
"return",
"value",
"[",
":",
"MAX_ERROR_STR_LEN",
"]... | 36.8 | 19.4 |
def _get_model_fitting(self, mf_id):
"""
Retreive model fitting with identifier 'mf_id' from the list of model
fitting objects stored in self.model_fitting
"""
for model_fitting in self.model_fittings:
if model_fitting.activity.id == mf_id:
return model_fitting
raise Exception("Model fitting activity with id: " + str(mf_id) +
" not found.") | [
"def",
"_get_model_fitting",
"(",
"self",
",",
"mf_id",
")",
":",
"for",
"model_fitting",
"in",
"self",
".",
"model_fittings",
":",
"if",
"model_fitting",
".",
"activity",
".",
"id",
"==",
"mf_id",
":",
"return",
"model_fitting",
"raise",
"Exception",
"(",
"... | 39.363636 | 13.727273 |
def get_models(self):
"""
Get a list of content models the object subscribes to.
"""
try:
rels = self.rels_ext.content
except RequestFailed:
# if rels-ext can't be retrieved, confirm this object does not have a RELS-EXT
# (in which case, it does not have any content models)
if "RELS-EXT" not in self.ds_list.keys():
return []
else:
raise
return list(rels.objects(self.uriref, modelns.hasModel)) | [
"def",
"get_models",
"(",
"self",
")",
":",
"try",
":",
"rels",
"=",
"self",
".",
"rels_ext",
".",
"content",
"except",
"RequestFailed",
":",
"# if rels-ext can't be retrieved, confirm this object does not have a RELS-EXT",
"# (in which case, it does not have any content models... | 34.8 | 19.333333 |
def add(self):
"""
Add the currently tested element into the database.
"""
if self._authorization():
# We are authorized to work.
if self.epoch < int(PyFunceble.time()):
state = "past"
else:
state = "future"
if self.is_in_database():
# The element we are working with is in the database.
if (
str(self.epoch)
!= PyFunceble.INTERN["whois_db"][PyFunceble.INTERN["file_to_test"]][
PyFunceble.INTERN["to_test"]
]["epoch"]
):
# The given epoch is diffent from the one saved.
# We update it.
PyFunceble.INTERN["whois_db"][PyFunceble.INTERN["file_to_test"]][
PyFunceble.INTERN["to_test"]
].update(
{
"epoch": str(self.epoch),
"state": state,
"expiration_date": self.expiration_date,
}
)
elif self.is_time_older():
# The expiration date from the database is in the past.
if (
PyFunceble.INTERN["whois_db"][
PyFunceble.INTERN["file_to_test"]
][PyFunceble.INTERN["to_test"]]["state"]
!= "past"
): # pragma: no cover
# The state of the element in the datbase is not
# equal to `past`.
# We update it to `past`.
PyFunceble.INTERN["whois_db"][
PyFunceble.INTERN["file_to_test"]
][PyFunceble.INTERN["to_test"]].update({"state": "past"})
elif (
PyFunceble.INTERN["whois_db"][PyFunceble.INTERN["file_to_test"]][
PyFunceble.INTERN["to_test"]
]["state"]
!= "future"
):
# * The expiration date from the database is in the future.
# and
# * The state of the element in the database is not
# equal to `future`.
# We update it to `future`.
PyFunceble.INTERN["whois_db"][PyFunceble.INTERN["file_to_test"]][
PyFunceble.INTERN["to_test"]
].update({"state": "future"})
else:
# The element we are working with is not in the database.
if (
not PyFunceble.INTERN["file_to_test"]
in PyFunceble.INTERN["whois_db"]
):
# The file path is not in the database.
# We initiate it.
PyFunceble.INTERN["whois_db"][
PyFunceble.INTERN["file_to_test"]
] = {}
# We create the first dataset.
PyFunceble.INTERN["whois_db"][PyFunceble.INTERN["file_to_test"]].update(
{
PyFunceble.INTERN["to_test"]: {
"epoch": str(self.epoch),
"state": state,
"expiration_date": self.expiration_date,
}
}
)
# We do a safety backup of our database.
self._backup() | [
"def",
"add",
"(",
"self",
")",
":",
"if",
"self",
".",
"_authorization",
"(",
")",
":",
"# We are authorized to work.",
"if",
"self",
".",
"epoch",
"<",
"int",
"(",
"PyFunceble",
".",
"time",
"(",
")",
")",
":",
"state",
"=",
"\"past\"",
"else",
":",
... | 38.408602 | 20.27957 |
def is_same_filename (filename1, filename2):
"""Check if filename1 and filename2 are the same filename."""
return os.path.realpath(filename1) == os.path.realpath(filename2) | [
"def",
"is_same_filename",
"(",
"filename1",
",",
"filename2",
")",
":",
"return",
"os",
".",
"path",
".",
"realpath",
"(",
"filename1",
")",
"==",
"os",
".",
"path",
".",
"realpath",
"(",
"filename2",
")"
] | 59.333333 | 11 |
def _load_w2v(model_file=_f_model, binary=True):
'''
load word2vec model
'''
if not os.path.exists(model_file):
print("os.path : ", os.path)
raise Exception("Model file [%s] does not exist." % model_file)
return KeyedVectors.load_word2vec_format(
model_file, binary=binary, unicode_errors='ignore') | [
"def",
"_load_w2v",
"(",
"model_file",
"=",
"_f_model",
",",
"binary",
"=",
"True",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"model_file",
")",
":",
"print",
"(",
"\"os.path : \"",
",",
"os",
".",
"path",
")",
"raise",
"Exception",... | 37.111111 | 16.888889 |
def send(self):
"""
Send the message.
First, a message is constructed, then a session with the email
servers is created, finally the message is sent and the session
is stopped.
"""
self._generate_email()
if self.verbose:
print(
"Debugging info"
"\n--------------"
"\n{} Message created.".format(timestamp())
)
recipients = []
for i in (self.to, self.cc, self.bcc):
if i:
if isinstance(i, MutableSequence):
recipients += i
else:
recipients.append(i)
session = self._get_session()
if self.verbose:
print(timestamp(), "Login successful.")
session.sendmail(self.from_, recipients, self.message.as_string())
session.quit()
if self.verbose:
print(timestamp(), "Logged out.")
if self.verbose:
print(
timestamp(),
type(self).__name__ + " info:",
self.__str__(indentation="\n * "),
)
print("Message sent.") | [
"def",
"send",
"(",
"self",
")",
":",
"self",
".",
"_generate_email",
"(",
")",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"\"Debugging info\"",
"\"\\n--------------\"",
"\"\\n{} Message created.\"",
".",
"format",
"(",
"timestamp",
"(",
")",
")",
")",
... | 27.404762 | 18.97619 |
def _count_tasks(self):
"""Count the number of tasks, both in the json and directory.
Returns
-------
num_tasks : int
The total number of all tasks included in the `tasks.json` file.
"""
self.log.warning("Tasks:")
tasks, task_names = self.catalog._load_task_list_from_file()
# Total number of all tasks
num_tasks = len(tasks)
# Number which are active by default
num_tasks_act = len([tt for tt, vv in tasks.items() if vv.active])
# Number of python files in the tasks directory
num_task_files = os.path.join(self.catalog.PATHS.tasks_dir, '*.py')
num_task_files = len(glob(num_task_files))
tasks_str = "{} ({} default active) with {} task-files.".format(
num_tasks, num_tasks_act, num_task_files)
self.log.warning(tasks_str)
return num_tasks | [
"def",
"_count_tasks",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"warning",
"(",
"\"Tasks:\"",
")",
"tasks",
",",
"task_names",
"=",
"self",
".",
"catalog",
".",
"_load_task_list_from_file",
"(",
")",
"# Total number of all tasks",
"num_tasks",
"=",
"len... | 40.045455 | 18.772727 |
def create_threadpool_executed_func(original_func):
"""
Returns a function wrapper that defers function calls execute inside gevent's threadpool but keeps any exception
or backtrace in the caller's context.
:param original_func: function to wrap
:returns: wrapper function
"""
def wrapped_func(*args, **kwargs):
try:
result = original_func(*args, **kwargs)
return True, result
except:
return False, sys.exc_info()
def new_func(*args, **kwargs):
status, result = gevent.get_hub().threadpool.apply(wrapped_func, args, kwargs)
if status:
return result
else:
six.reraise(*result)
new_func.__name__ = original_func.__name__
new_func.__doc__ = "(gevent-friendly)" + (" " + original_func.__doc__ if original_func.__doc__ is not None else "")
return new_func | [
"def",
"create_threadpool_executed_func",
"(",
"original_func",
")",
":",
"def",
"wrapped_func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"result",
"=",
"original_func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"T... | 37.956522 | 19.347826 |
def _fill_table_entry(self, row, col):
"""""
Fill an entry of the observation table.
Args:
row (str): The row of the observation table
col (str): The column of the observation table
Returns:
None
"""
self.observation_table[row, col] = self._membership_query(row + col) | [
"def",
"_fill_table_entry",
"(",
"self",
",",
"row",
",",
"col",
")",
":",
"self",
".",
"observation_table",
"[",
"row",
",",
"col",
"]",
"=",
"self",
".",
"_membership_query",
"(",
"row",
"+",
"col",
")"
] | 34.3 | 15.3 |
def _adjust_boundaries(self, boundary_indices, text_file, real_wave_mfcc, sync_root, force_aba_auto=False, leaf_level=False):
"""
Adjust boundaries as requested by the user.
Return the computed time map, that is,
a list of pairs ``[start_time, end_time]``,
of length equal to number of fragments + 2,
where the two extra elements are for
the HEAD (first) and TAIL (last).
"""
# boundary_indices contains the boundary indices in the all_mfcc of real_wave_mfcc
# starting with the (head-1st fragment) and ending with (-1th fragment-tail)
aba_parameters = self.task.configuration.aba_parameters()
if force_aba_auto:
self.log(u"Forced running algorithm: 'auto'")
aba_parameters["algorithm"] = (AdjustBoundaryAlgorithm.AUTO, [])
# note that the other aba settings (nonspeech and nozero)
# remain as specified by the user
self.log([u"ABA parameters: %s", aba_parameters])
aba = AdjustBoundaryAlgorithm(rconf=self.rconf, logger=self.logger)
aba.adjust(
aba_parameters=aba_parameters,
real_wave_mfcc=real_wave_mfcc,
boundary_indices=boundary_indices,
text_file=text_file,
allow_arbitrary_shift=leaf_level
)
aba.append_fragment_list_to_sync_root(sync_root=sync_root) | [
"def",
"_adjust_boundaries",
"(",
"self",
",",
"boundary_indices",
",",
"text_file",
",",
"real_wave_mfcc",
",",
"sync_root",
",",
"force_aba_auto",
"=",
"False",
",",
"leaf_level",
"=",
"False",
")",
":",
"# boundary_indices contains the boundary indices in the all_mfcc ... | 49.107143 | 19.321429 |
def public_ip_addresses_list_all(**kwargs):
'''
.. versionadded:: 2019.2.0
List all public IP addresses within a subscription.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.public_ip_addresses_list_all
'''
result = {}
netconn = __utils__['azurearm.get_client']('network', **kwargs)
try:
pub_ips = __utils__['azurearm.paged_object_to_list'](netconn.public_ip_addresses.list_all())
for ip in pub_ips:
result[ip['name']] = ip
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
result = {'error': str(exc)}
return result | [
"def",
"public_ip_addresses_list_all",
"(",
"*",
"*",
"kwargs",
")",
":",
"result",
"=",
"{",
"}",
"netconn",
"=",
"__utils__",
"[",
"'azurearm.get_client'",
"]",
"(",
"'network'",
",",
"*",
"*",
"kwargs",
")",
"try",
":",
"pub_ips",
"=",
"__utils__",
"[",... | 26.16 | 26.96 |
def drop_continuous_query(self, name, database=None):
"""Drop an existing continuous query for a database.
:param name: the name of continuous query to drop
:type name: str
:param database: the database for which the continuous query is
dropped. Defaults to current client's database
:type database: str
"""
query_string = (
"DROP CONTINUOUS QUERY {0} ON {1}"
).format(quote_ident(name), quote_ident(database or self._database))
self.query(query_string) | [
"def",
"drop_continuous_query",
"(",
"self",
",",
"name",
",",
"database",
"=",
"None",
")",
":",
"query_string",
"=",
"(",
"\"DROP CONTINUOUS QUERY {0} ON {1}\"",
")",
".",
"format",
"(",
"quote_ident",
"(",
"name",
")",
",",
"quote_ident",
"(",
"database",
"... | 41.384615 | 16.538462 |
def remove_docstrings(tokens):
"""
Removes docstrings from *tokens* which is expected to be a list equivalent
of `tokenize.generate_tokens()` (so we can update in-place).
"""
prev_tok_type = None
for index, tok in enumerate(tokens):
token_type = tok[0]
if token_type == tokenize.STRING:
if prev_tok_type == tokenize.INDENT:
# Definitely a docstring
tokens[index][1] = '' # Remove it
# Remove the leftover indentation and newline:
tokens[index-1][1] = ''
tokens[index-2][1] = ''
elif prev_tok_type == tokenize.NL:
# This captures whole-module docstrings:
if tokens[index+1][0] == tokenize.NEWLINE:
tokens[index][1] = ''
# Remove the trailing newline:
tokens[index+1][1] = ''
prev_tok_type = token_type | [
"def",
"remove_docstrings",
"(",
"tokens",
")",
":",
"prev_tok_type",
"=",
"None",
"for",
"index",
",",
"tok",
"in",
"enumerate",
"(",
"tokens",
")",
":",
"token_type",
"=",
"tok",
"[",
"0",
"]",
"if",
"token_type",
"==",
"tokenize",
".",
"STRING",
":",
... | 41.954545 | 9.227273 |
def evalsha(self, sha, numkeys, *keys_and_args):
"""Emulates evalsha"""
if not self.script_exists(sha)[0]:
raise RedisError("Sha not registered")
script_callable = Script(self, self.shas[sha], self.load_lua_dependencies)
numkeys = max(numkeys, 0)
keys = keys_and_args[:numkeys]
args = keys_and_args[numkeys:]
return script_callable(keys, args) | [
"def",
"evalsha",
"(",
"self",
",",
"sha",
",",
"numkeys",
",",
"*",
"keys_and_args",
")",
":",
"if",
"not",
"self",
".",
"script_exists",
"(",
"sha",
")",
"[",
"0",
"]",
":",
"raise",
"RedisError",
"(",
"\"Sha not registered\"",
")",
"script_callable",
... | 44.777778 | 8.333333 |
def get_files_types(self):
"""
Return the files inside the APK with their associated types (by using python-magic)
At the same time, the CRC32 are calculated for the files.
:rtype: a dictionnary
"""
if self._files == {}:
# Generate File Types / CRC List
for i in self.get_files():
buffer = self._get_crc32(i)
self._files[i] = self._get_file_magic_name(buffer)
return self._files | [
"def",
"get_files_types",
"(",
"self",
")",
":",
"if",
"self",
".",
"_files",
"==",
"{",
"}",
":",
"# Generate File Types / CRC List",
"for",
"i",
"in",
"self",
".",
"get_files",
"(",
")",
":",
"buffer",
"=",
"self",
".",
"_get_crc32",
"(",
"i",
")",
"... | 31.933333 | 18.733333 |
def extracturls(mesg):
"""Given a text message, extract all the URLs found in the message, along
with their surrounding context. The output is a list of sequences of Chunk
objects, corresponding to the contextual regions extracted from the string.
"""
lines = NLRE.split(mesg)
# The number of lines of context above to provide.
# above_context = 1
# The number of lines of context below to provide.
# below_context = 1
# Plan here is to first transform lines into the form
# [line_fragments] where each fragment is a chunk as
# seen by parse_text_urls. Note that this means that
# lines with more than one entry or one entry that's
# a URL are the only lines containing URLs.
linechunks = [parse_text_urls(l) for l in lines]
return extract_with_context(linechunks,
lambda chunk: len(chunk) > 1 or
(len(chunk) == 1 and chunk[0].url is not None),
1, 1) | [
"def",
"extracturls",
"(",
"mesg",
")",
":",
"lines",
"=",
"NLRE",
".",
"split",
"(",
"mesg",
")",
"# The number of lines of context above to provide.",
"# above_context = 1",
"# The number of lines of context below to provide.",
"# below_context = 1",
"# Plan here is to first tr... | 39.72 | 20.92 |
def get_form_success_data(self, form):
"""
Allows customization of the JSON data returned when a valid form submission occurs.
"""
data = {
"html": render_to_string(
"pinax/teams/_invite_form.html",
{
"invite_form": self.get_unbound_form(),
"team": self.team
},
request=self.request
)
}
membership = self.membership
if membership is not None:
if membership.state == Membership.STATE_APPLIED:
fragment_class = ".applicants"
elif membership.state == Membership.STATE_INVITED:
fragment_class = ".invitees"
elif membership.state in (Membership.STATE_AUTO_JOINED, Membership.STATE_ACCEPTED):
fragment_class = {
Membership.ROLE_OWNER: ".owners",
Membership.ROLE_MANAGER: ".managers",
Membership.ROLE_MEMBER: ".members"
}[membership.role]
data.update({
"append-fragments": {
fragment_class: render_to_string(
"pinax/teams/_membership.html",
{
"membership": membership,
"team": self.team
},
request=self.request
)
}
})
return data | [
"def",
"get_form_success_data",
"(",
"self",
",",
"form",
")",
":",
"data",
"=",
"{",
"\"html\"",
":",
"render_to_string",
"(",
"\"pinax/teams/_invite_form.html\"",
",",
"{",
"\"invite_form\"",
":",
"self",
".",
"get_unbound_form",
"(",
")",
",",
"\"team\"",
":"... | 37.1 | 15.4 |
def fix_style(style='basic', ax=None, **kwargs):
'''
Add an extra formatting layer to an axe, that couldn't be changed directly
in matplotlib.rcParams or with styles. Apply this function to every axe
you created.
Parameters
----------
ax: a matplotlib axe.
If None, the last axe generated is used
style: string or list of string
['basic', 'article', 'poster', 'B&W','talk','origin']
one of the styles previously defined. It should match the style you
chose in set_style but nothing forces you to.
kwargs: dict
edit any of the style_params keys. ex:
>>> tight_layout=False
Examples
--------
plb.set_style('poster')
plt.plot(a,np.cos(a))
plb.fix_style('poster',**{'draggable_legend':False})
See Also
--------
:func:`~publib.publib.set_style`
:func:`~publib.tools.tools.reset_defaults`
'''
style = _read_style(style)
# Apply all styles
for s in style:
if not s in style_params.keys():
avail = [f.replace('.mplstyle', '') for f in os.listdir(
_get_lib()) if f.endswith('.mplstyle')]
raise ValueError('{0} is not a valid style. '.format(s) +
'Please pick a style from the list available in ' +
'{0}: {1}'.format(_get_lib(), avail))
_fix_style(style, ax, **kwargs) | [
"def",
"fix_style",
"(",
"style",
"=",
"'basic'",
",",
"ax",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"style",
"=",
"_read_style",
"(",
"style",
")",
"# Apply all styles",
"for",
"s",
"in",
"style",
":",
"if",
"not",
"s",
"in",
"style_params",
... | 30.521739 | 23.913043 |
def insert_level(df, label, level=0, copy=0, axis=0, level_name=None):
"""Add a new level to the index with the specified label. The newly created index will be a MultiIndex.
:param df: DataFrame
:param label: label to insert
:param copy: If True, copy the DataFrame before assigning new index
:param axis: If 0, then columns. If 1, then index
:return:
"""
df = df if not copy else df.copy()
src = df.columns if axis == 0 else df.index
current = [src.get_level_values(lvl) for lvl in range(src.nlevels)]
current.insert(level, [label] * len(src))
idx = pd.MultiIndex.from_arrays(current)
level_name and idx.set_names(level_name, level, inplace=1)
if axis == 0:
df.columns = idx
else:
df.index = idx
return df | [
"def",
"insert_level",
"(",
"df",
",",
"label",
",",
"level",
"=",
"0",
",",
"copy",
"=",
"0",
",",
"axis",
"=",
"0",
",",
"level_name",
"=",
"None",
")",
":",
"df",
"=",
"df",
"if",
"not",
"copy",
"else",
"df",
".",
"copy",
"(",
")",
"src",
... | 39.2 | 17.4 |
def request_many(self, queries, source='Datastream',
fields=None, options=None, symbol_set=None, tag=None):
"""General function to retrieve one record in raw format.
query - list of query strings for DWE system.
source - The name of datasource (default: "Datastream")
fields - Fields to be retrieved (used when the requester does not want all
fields to be delivered).
options - Options for specific data source. Many of datasources do not require
opptions string. Refer to the documentation of the specific
datasource for allowed syntax.
symbol_set - The symbol set used inside the instrument (used for mapping
identifiers within the request. Refer to the documentation for
the details.
tag - User-defined cookie that can be used to match up requests and response.
It will be returned back in the response. The string should not be
longer than 256 characters.
NB! source, options, symbol_set and tag are assumed to be identical for all
requests in the list
"""
if self.show_request:
print(('Requests:', queries))
if not isinstance(queries, list):
queries = [queries]
req = self.client.factory.create('ArrayOfRequestData')
req.RequestData = []
for q in queries:
rd = self.client.factory.create('RequestData')
rd.Source = source
rd.Instrument = q
if fields is not None:
rd.Fields = self.client.factory.create('ArrayOfString')
rd.Fields.string = fields
rd.SymbolSet = symbol_set
rd.Options = options
rd.Tag = tag
req.RequestData.append(rd)
return self.client.service.RequestRecords(self.userdata, req, 0)[0] | [
"def",
"request_many",
"(",
"self",
",",
"queries",
",",
"source",
"=",
"'Datastream'",
",",
"fields",
"=",
"None",
",",
"options",
"=",
"None",
",",
"symbol_set",
"=",
"None",
",",
"tag",
"=",
"None",
")",
":",
"if",
"self",
".",
"show_request",
":",
... | 46.214286 | 21.166667 |
def document_url(self):
"""
Constructs and returns the document URL.
:returns: Document URL
"""
if '_id' not in self or self['_id'] is None:
return None
# handle design document url
if self['_id'].startswith('_design/'):
return '/'.join((
self._database_host,
url_quote_plus(self._database_name),
'_design',
url_quote(self['_id'][8:], safe='')
))
# handle document url
return '/'.join((
self._database_host,
url_quote_plus(self._database_name),
url_quote(self['_id'], safe='')
)) | [
"def",
"document_url",
"(",
"self",
")",
":",
"if",
"'_id'",
"not",
"in",
"self",
"or",
"self",
"[",
"'_id'",
"]",
"is",
"None",
":",
"return",
"None",
"# handle design document url",
"if",
"self",
"[",
"'_id'",
"]",
".",
"startswith",
"(",
"'_design/'",
... | 28.125 | 14.458333 |
def do_setmode(self, arg):
''' shift from ASM to DISASM '''
op_modes = config.get_op_modes()
if arg in op_modes:
op_mode = op_modes[arg]
op_mode.cmdloop()
else:
print("Error: unknown operational mode, please use 'help setmode'.") | [
"def",
"do_setmode",
"(",
"self",
",",
"arg",
")",
":",
"op_modes",
"=",
"config",
".",
"get_op_modes",
"(",
")",
"if",
"arg",
"in",
"op_modes",
":",
"op_mode",
"=",
"op_modes",
"[",
"arg",
"]",
"op_mode",
".",
"cmdloop",
"(",
")",
"else",
":",
"prin... | 36.25 | 13.75 |
def email_on_invoice_change(cls, invoice, old_status, new_status):
''' Sends out all of the necessary notifications that the status of the
invoice has changed to:
- Invoice is now paid
- Invoice is now refunded
'''
# The statuses that we don't care about.
silent_status = [
commerce.Invoice.STATUS_VOID,
commerce.Invoice.STATUS_UNPAID,
]
if old_status == new_status:
return
if False and new_status in silent_status:
pass
cls.email(invoice, "invoice_updated") | [
"def",
"email_on_invoice_change",
"(",
"cls",
",",
"invoice",
",",
"old_status",
",",
"new_status",
")",
":",
"# The statuses that we don't care about.",
"silent_status",
"=",
"[",
"commerce",
".",
"Invoice",
".",
"STATUS_VOID",
",",
"commerce",
".",
"Invoice",
".",... | 27.571429 | 21.095238 |
def copy(self, src_fs, src_path, dst_fs, dst_path):
# type: (FS, Text, FS, Text) -> None
"""Copy a file from one fs to another."""
if self.queue is None:
# This should be the most performant for a single-thread
copy_file_internal(src_fs, src_path, dst_fs, dst_path)
else:
src_file = src_fs.openbin(src_path, "r")
try:
dst_file = dst_fs.openbin(dst_path, "w")
except Exception:
src_file.close()
raise
task = _CopyTask(src_file, dst_file)
self.queue.put(task) | [
"def",
"copy",
"(",
"self",
",",
"src_fs",
",",
"src_path",
",",
"dst_fs",
",",
"dst_path",
")",
":",
"# type: (FS, Text, FS, Text) -> None",
"if",
"self",
".",
"queue",
"is",
"None",
":",
"# This should be the most performant for a single-thread",
"copy_file_internal",... | 40.466667 | 14.133333 |
def add_paginated_grid_widget(self, part_model, delete=False, edit=True, export=True, clone=True,
new_instance=False, parent_part_instance=None, max_height=None, custom_title=False,
emphasize_edit=False, emphasize_clone=False, emphasize_new_instance=True,
sort_property=None, sort_direction=SortTable.ASCENDING, page_size=25,
collapse_filters=False):
"""
Add a KE-chain paginatedGrid (e.g. paginated table widget) to the customization.
The widget will be saved to KE-chain.
:param emphasize_new_instance: Emphasize the New instance button (default True)
:type emphasize_new_instance: bool
:param emphasize_edit: Emphasize the Edit button (default False)
:type emphasize_edit: bool
:param emphasize_clone: Emphasize the Clone button (default False)
:type emphasize_clone: bool
:param new_instance: Show or hide the New instance button (default False). You need to provide a
`parent_part_instance` in order for this to work.
:type new_instance: bool
:param export: Show or hide the Export Grid button (default True)
:type export: bool
:param edit: Show or hide the Edit button (default True)
:type edit: bool
:param clone: Show or hide the Clone button (default True)
:type clone: bool
:param delete: Show or hide the Delete button (default False)
:type delete: bool
:param page_size: Number of parts that will be shown per page in the grid.
:type page_size: int
:param collapse_filters: Hide or show the filters pane (default False)
:type collapse_filters: bool
:param part_model: The part model based on which all instances will be shown.
:type parent_part_instance: :class:`Part` or UUID
:param parent_part_instance: The parent part instance for which the instances will be shown or to which new
instances will be added.
:type parent_part_instance: :class:`Part` or UUID
:param max_height: The max height of the paginated grid in pixels
:type max_height: int or None
:param custom_title: A custom title for the paginated grid::
* False (default): Part instance name
* String value: Custom title
* None: No title
:type custom_title: bool or basestring or None
:param sort_property: The property model on which the part instances are being sorted on
:type sort_property: :class:`Property` or UUID
:param sort_direction: The direction on which the values of property instances are being sorted on::
* ASC (default): Sort in ascending order
* DESC: Sort in descending order
:type sort_direction: basestring (see :class:`enums.SortTable`)
:raises IllegalArgumentError: When unknown or illegal arguments are passed.
"""
height = max_height
# Check whether the part_model is uuid type or class `Part`
if isinstance(part_model, Part):
part_model_id = part_model.id
elif isinstance(part_model, text_type) and is_uuid(part_model):
part_model_id = part_model
part_model = self._client.model(id=part_model_id)
else:
raise IllegalArgumentError("When using the add_paginated_grid_widget, part_model must be a Part or Part id."
" Type is: {}".format(type(part_model)))
# Check whether the parent_part_instance is uuid type or class `Part`
if isinstance(parent_part_instance, Part):
parent_part_instance_id = parent_part_instance.id
elif isinstance(parent_part_instance, text_type) and is_uuid(parent_part_instance):
parent_part_instance_id = parent_part_instance
parent_part_instance = self._client.part(id=parent_part_instance_id)
elif isinstance(parent_part_instance, type(None)):
parent_part_instance_id = None
else:
raise IllegalArgumentError("When using the add_paginated_grid_widget, parent_part_instance must be a "
"Part, Part id or None. Type is: {}".format(type(parent_part_instance)))
# Check whether the sort_property is uuid type or class `Property`
if isinstance(sort_property, Property):
sort_property_id = sort_property.id
elif isinstance(sort_property, text_type) and is_uuid(sort_property):
sort_property_id = sort_property
sort_property = self._client.property(id=sort_property_id, category=Category.MODEL)
elif isinstance(sort_property, type(None)):
sort_property_id = None
else:
raise IllegalArgumentError("When using the add_paginated_grid_widget, sort_property must be a "
"Property, Property id or None. Type is: {}".format(type(sort_property)))
# Assertions
if not parent_part_instance and new_instance:
raise IllegalArgumentError("If you want to allow the creation of new part instances, you must specify a "
"parent_part_instance")
if sort_property and sort_property.part.id != part_model.id:
raise IllegalArgumentError("If you want to sort on a property, then sort_property must be located under "
"part_model")
# Add custom title
if custom_title is False:
show_title_value = "Default"
title = part_model.name
elif custom_title is None:
show_title_value = "No title"
title = ' '
else:
show_title_value = "Custom title"
title = str(custom_title)
# Set the collapse filters value
if collapse_filters:
collapse_filters_value = "Collapsed"
else:
collapse_filters_value = "Expanded"
# Declare paginatedGrid config
config = {
"xtype": ComponentXType.FILTEREDGRID,
"filter": {
"activity_id": str(self.activity.id),
},
"grid": {
"viewModel": {
"data": {
"actions": {
"delete": delete,
"edit": edit,
"export": export,
"newInstance": new_instance,
"cloneInstance": clone
},
"sorters": [{
"direction": sort_direction,
"property": sort_property_id
}] if sort_property_id else [],
"ui": {
"newInstance": "primary-action" if emphasize_new_instance else "default-toolbar",
"edit": "primary-action" if emphasize_edit else "default-toolbar",
"cloneInstance": "primary-action" if emphasize_clone else "default-toolbar"
},
"pageSize": page_size
}
},
"xtype": ComponentXType.PAGINATEDSUPERGRID,
"title": title,
"showTitleValue": show_title_value,
},
"maxHeight": height if height else None,
"parentInstanceId": parent_part_instance_id,
"partModelId": part_model_id,
"collapseFilters": collapse_filters
}
# Declare the meta info for the paginatedGrid
meta = {
"cloneButtonUi": "primary-action" if emphasize_clone else "defualt-toolbar",
"cloneButtonVisible": clone,
"primaryCloneUiValue": emphasize_clone,
"parentInstanceId": parent_part_instance_id,
"editButtonUi": "primary-action" if emphasize_edit else "default-toolbar",
"editButtonVisible": edit,
"customHeight": height if height else None,
"primaryAddUiValue": emphasize_new_instance,
"activityId": str(self.activity.id),
"customTitle": title,
"primaryEditUiValue": emphasize_edit,
"downloadButtonVisible": export,
"addButtonUi": "primary-action" if emphasize_new_instance else "default-toolbar",
"deleteButtonVisible": delete,
"addButtonVisible": new_instance,
"showTitleValue": show_title_value,
"partModelId": str(part_model_id),
"showHeightValue": "Set height" if height else "Automatic height",
"sortDirection": sort_direction,
"sortedColumn": sort_property_id if sort_property_id else None,
"collapseFilters": collapse_filters,
"showCollapseFiltersValue": collapse_filters_value,
"customPageSize": page_size
}
self._add_widget(dict(config=config, meta=meta, name=WidgetNames.FILTEREDGRIDWIDGET)) | [
"def",
"add_paginated_grid_widget",
"(",
"self",
",",
"part_model",
",",
"delete",
"=",
"False",
",",
"edit",
"=",
"True",
",",
"export",
"=",
"True",
",",
"clone",
"=",
"True",
",",
"new_instance",
"=",
"False",
",",
"parent_part_instance",
"=",
"None",
"... | 50.892655 | 23.367232 |
def inflect(self):
"""Return instance of inflect."""
if self._inflect is None:
import inflect
self._inflect = inflect.engine()
return self._inflect | [
"def",
"inflect",
"(",
"self",
")",
":",
"if",
"self",
".",
"_inflect",
"is",
"None",
":",
"import",
"inflect",
"self",
".",
"_inflect",
"=",
"inflect",
".",
"engine",
"(",
")",
"return",
"self",
".",
"_inflect"
] | 27.142857 | 14.142857 |
def isMasterReqLatencyTooHigh(self):
"""
Return whether the request latency of the master instance is greater
than the acceptable threshold
"""
# TODO for now, view_change procedure can take more that 15 minutes
# (5 minutes for catchup and 10 minutes for primary's answer).
# Therefore, view_change triggering by max latency is not indicative now.
r = self.masterReqLatencyTooHigh or \
next(((key, lat) for key, lat in self.masterReqLatencies.items() if
lat > self.Lambda), None)
if r:
logger.display("{}{} found master's latency {} to be higher than the threshold for request {}.".
format(MONITORING_PREFIX, self, r[1], r[0]))
else:
logger.trace("{} found master's latency to be lower than the "
"threshold for all requests.".format(self))
return r | [
"def",
"isMasterReqLatencyTooHigh",
"(",
"self",
")",
":",
"# TODO for now, view_change procedure can take more that 15 minutes",
"# (5 minutes for catchup and 10 minutes for primary's answer).",
"# Therefore, view_change triggering by max latency is not indicative now.",
"r",
"=",
"self",
"... | 48.789474 | 25 |
def find_1wf_files(self):
"""
Abinit adds the idir-ipert index at the end of the 1WF file and this breaks the extension
e.g. out_1WF4. This method scans the files in the directories and returns a list of namedtuple
Each named tuple gives the `path` of the 1FK file and the `pertcase` index.
"""
regex = re.compile(r"out_1WF(\d+)(\.nc)?$")
wf_paths = [f for f in self.list_filepaths() if regex.match(os.path.basename(f))]
if not wf_paths: return None
# Build list of (pertcase, path) tuples.
pertfile_list = []
for path in wf_paths:
name = os.path.basename(path)
match = regex.match(name)
pertcase, ncext = match.groups()
pertfile_list.append((int(pertcase), path))
# DSU sort.
pertfile_list = sorted(pertfile_list, key=lambda t: t[0])
return [dict2namedtuple(pertcase=item[0], path=item[1]) for item in pertfile_list] | [
"def",
"find_1wf_files",
"(",
"self",
")",
":",
"regex",
"=",
"re",
".",
"compile",
"(",
"r\"out_1WF(\\d+)(\\.nc)?$\"",
")",
"wf_paths",
"=",
"[",
"f",
"for",
"f",
"in",
"self",
".",
"list_filepaths",
"(",
")",
"if",
"regex",
".",
"match",
"(",
"os",
"... | 43.590909 | 23.318182 |
def render(self):
""" Render the menu into a sorted by order multi dict """
menu_list = []
menu_index = 0
for _, menu in copy.deepcopy(self.MENU).items():
subnav = []
menu["kwargs"]["_id"] = str(menu_index)
menu["kwargs"]["active"] = False
if "visible" in menu["kwargs"]:
menu["kwargs"]["visible"] = self._test_visibility(menu["kwargs"]["visible"])
for s in menu["subnav"]:
if s["title"]:
s["title"] = self._get_title(s["title"])
if s["endpoint"] == request.endpoint:
s["active"] = True
menu["kwargs"]["active"] = True
s["visible"] = self._test_visibility(s["visible"])
menu_index += 1
s["_id"] = str(menu_index)
subnav.append(s)
_kwargs = menu["kwargs"]
if menu["title"]:
_kwargs.update({
"subnav": self._sort(subnav),
"order": menu["order"],
"title": self._get_title(menu["title"])
})
menu_list.append(_kwargs)
else:
menu_list += subnav
menu_index += 1
return self._sort(menu_list) | [
"def",
"render",
"(",
"self",
")",
":",
"menu_list",
"=",
"[",
"]",
"menu_index",
"=",
"0",
"for",
"_",
",",
"menu",
"in",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"MENU",
")",
".",
"items",
"(",
")",
":",
"subnav",
"=",
"[",
"]",
"menu",
"["... | 34.105263 | 16.552632 |
def extract(query, choices, processor=default_processor, scorer=default_scorer, limit=5):
"""Select the best match in a list or dictionary of choices.
Find best matches in a list or dictionary of choices, return a
list of tuples containing the match and its score. If a dictionary
is used, also returns the key for each match.
Arguments:
query: An object representing the thing we want to find.
choices: An iterable or dictionary-like object containing choices
to be matched against the query. Dictionary arguments of
{key: value} pairs will attempt to match the query against
each value.
processor: Optional function of the form f(a) -> b, where a is the query or
individual choice and b is the choice to be used in matching.
This can be used to match against, say, the first element of
a list:
lambda x: x[0]
Defaults to fuzzywuzzy.utils.full_process().
scorer: Optional function for scoring matches between the query and
an individual processed choice. This should be a function
of the form f(query, choice) -> int.
By default, fuzz.WRatio() is used and expects both query and
choice to be strings.
limit: Optional maximum for the number of elements returned. Defaults
to 5.
Returns:
List of tuples containing the match and its score.
If a list is used for choices, then the result will be 2-tuples.
If a dictionary is used, then the result will be 3-tuples containing
the key for each match.
For example, searching for 'bird' in the dictionary
{'bard': 'train', 'dog': 'man'}
may return
[('train', 22, 'bard'), ('man', 0, 'dog')]
"""
sl = extractWithoutOrder(query, choices, processor, scorer)
return heapq.nlargest(limit, sl, key=lambda i: i[1]) if limit is not None else \
sorted(sl, key=lambda i: i[1], reverse=True) | [
"def",
"extract",
"(",
"query",
",",
"choices",
",",
"processor",
"=",
"default_processor",
",",
"scorer",
"=",
"default_scorer",
",",
"limit",
"=",
"5",
")",
":",
"sl",
"=",
"extractWithoutOrder",
"(",
"query",
",",
"choices",
",",
"processor",
",",
"scor... | 41.479167 | 27 |
def sendMessage(self,chat_id,text,parse_mode=None,disable_web=None,reply_msg_id=None,markup=None):
''' On failure returns False
On success returns Message Object '''
payload={'chat_id' : chat_id, 'text' : text, 'parse_mode': parse_mode , 'disable_web_page_preview' : disable_web , 'reply_to_message_id' : reply_msg_id}
if(markup):
payload['reply_markup']=json.dumps(markup)
response_str = self._command('sendMessage',payload,method='post')
return _validate_response_msg(response_str) | [
"def",
"sendMessage",
"(",
"self",
",",
"chat_id",
",",
"text",
",",
"parse_mode",
"=",
"None",
",",
"disable_web",
"=",
"None",
",",
"reply_msg_id",
"=",
"None",
",",
"markup",
"=",
"None",
")",
":",
"payload",
"=",
"{",
"'chat_id'",
":",
"chat_id",
"... | 45.272727 | 32.727273 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.