text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def try_add_variable(self, variable_name: str, replacement: VariableReplacement) -> None:
"""Try to add the variable with its replacement to the substitution.
This considers an existing replacement and will only succeed if the new replacement
can be merged with the old replacement. Merging can occur if either the two replacements
are equivalent. Replacements can also be merged if the old replacement for the variable_name was
unordered (i.e. a :class:`~.Multiset`) and the new one is an equivalent ordered version of it:
>>> subst = Substitution({'x': Multiset(['a', 'b'])})
>>> subst.try_add_variable('x', ('a', 'b'))
>>> print(subst)
{x ↦ (a, b)}
Args:
variable:
The name of the variable to add.
replacement:
The replacement for the variable.
Raises:
ValueError:
if the variable cannot be merged because it conflicts with the existing
substitution for the variable_name.
"""
if variable_name not in self:
self[variable_name] = replacement.copy() if isinstance(replacement, Multiset) else replacement
else:
existing_value = self[variable_name]
if isinstance(existing_value, tuple):
if isinstance(replacement, Multiset):
if Multiset(existing_value) != replacement:
raise ValueError
elif replacement != existing_value:
raise ValueError
elif isinstance(existing_value, Multiset):
if not isinstance(replacement, (tuple, list, Multiset)):
raise ValueError
compare_value = Multiset(replacement)
if existing_value == compare_value:
if not isinstance(replacement, Multiset):
self[variable_name] = replacement
else:
raise ValueError
elif replacement != existing_value:
raise ValueError | [
"def",
"try_add_variable",
"(",
"self",
",",
"variable_name",
":",
"str",
",",
"replacement",
":",
"VariableReplacement",
")",
"->",
"None",
":",
"if",
"variable_name",
"not",
"in",
"self",
":",
"self",
"[",
"variable_name",
"]",
"=",
"replacement",
".",
"co... | 45.043478 | 22.673913 |
def setInstrumentParameters(self, instrpars):
""" This method overrides the superclass to set default values into
the parameter dictionary, in case empty entries are provided.
"""
pri_header = self._image[0].header
if self._isNotValid (instrpars['gain'], instrpars['gnkeyword']):
instrpars['gnkeyword'] = 'ATODGAIN'
if self._isNotValid (instrpars['rdnoise'], instrpars['rnkeyword']):
instrpars['rnkeyword'] = 'READNSE'
if self._isNotValid (instrpars['exptime'], instrpars['expkeyword']):
instrpars['expkeyword'] = 'EXPTIME'
for chip in self.returnAllChips(extname=self.scienceExt):
chip._gain = self.getInstrParameter(instrpars['gain'], pri_header,
instrpars['gnkeyword'])
chip._rdnoise = self.getInstrParameter(instrpars['rdnoise'], pri_header,
instrpars['rnkeyword'])
chip._exptime = self.getInstrParameter(instrpars['exptime'], chip.header,
instrpars['expkeyword'])
if chip._gain is None or chip._rdnoise is None or chip._exptime is None:
print('ERROR: invalid instrument task parameter')
raise ValueError
chip._effGain = chip._gain
self._assignSignature(chip._chip) #this is used in the static mask
self.doUnitConversions() | [
"def",
"setInstrumentParameters",
"(",
"self",
",",
"instrpars",
")",
":",
"pri_header",
"=",
"self",
".",
"_image",
"[",
"0",
"]",
".",
"header",
"if",
"self",
".",
"_isNotValid",
"(",
"instrpars",
"[",
"'gain'",
"]",
",",
"instrpars",
"[",
"'gnkeyword'",... | 49.6 | 26.6 |
def _update_criteria_with_filters(self, query, section_name):
"""
This method updates the 'query' dictionary with the criteria stored in
dashboard cookie.
:param query: A dictionary with search criteria.
:param section_name: The dashboard section name
:return: The 'query' dictionary
"""
if self.dashboard_cookie is None:
return query
cookie_criteria = self.dashboard_cookie.get(section_name)
if cookie_criteria == 'mine':
query['Creator'] = self.member.getId()
return query | [
"def",
"_update_criteria_with_filters",
"(",
"self",
",",
"query",
",",
"section_name",
")",
":",
"if",
"self",
".",
"dashboard_cookie",
"is",
"None",
":",
"return",
"query",
"cookie_criteria",
"=",
"self",
".",
"dashboard_cookie",
".",
"get",
"(",
"section_name... | 38.2 | 14.733333 |
def isentropic_interpolation(theta_levels, pressure, temperature, *args, **kwargs):
r"""Interpolate data in isobaric coordinates to isentropic coordinates.
Parameters
----------
theta_levels : array
One-dimensional array of desired theta surfaces
pressure : array
One-dimensional array of pressure levels
temperature : array
Array of temperature
args : array, optional
Any additional variables will be interpolated to each isentropic level.
Returns
-------
list
List with pressure at each isentropic level, followed by each additional
argument interpolated to isentropic coordinates.
Other Parameters
----------------
axis : int, optional
The axis corresponding to the vertical in the temperature array, defaults to 0.
tmpk_out : bool, optional
If true, will calculate temperature and output as the last item in the output list.
Defaults to False.
max_iters : int, optional
The maximum number of iterations to use in calculation, defaults to 50.
eps : float, optional
The desired absolute error in the calculated value, defaults to 1e-6.
bottom_up_search : bool, optional
Controls whether to search for theta levels bottom-up, or top-down. Defaults to
True, which is bottom-up search.
Notes
-----
Input variable arrays must have the same number of vertical levels as the pressure levels
array. Pressure is calculated on isentropic surfaces by assuming that temperature varies
linearly with the natural log of pressure. Linear interpolation is then used in the
vertical to find the pressure at each isentropic level. Interpolation method from
[Ziv1994]_. Any additional arguments are assumed to vary linearly with temperature and will
be linearly interpolated to the new isentropic levels.
See Also
--------
potential_temperature
"""
# iteration function to be used later
# Calculates theta from linearly interpolated temperature and solves for pressure
def _isen_iter(iter_log_p, isentlevs_nd, ka, a, b, pok):
exner = pok * np.exp(-ka * iter_log_p)
t = a * iter_log_p + b
# Newton-Raphson iteration
f = isentlevs_nd - t * exner
fp = exner * (ka * t - a)
return iter_log_p - (f / fp)
# Change when Python 2.7 no longer supported
# Pull out keyword arguments
tmpk_out = kwargs.pop('tmpk_out', False)
max_iters = kwargs.pop('max_iters', 50)
eps = kwargs.pop('eps', 1e-6)
axis = kwargs.pop('axis', 0)
bottom_up_search = kwargs.pop('bottom_up_search', True)
# Get dimensions in temperature
ndim = temperature.ndim
# Convert units
pres = pressure.to('hPa')
temperature = temperature.to('kelvin')
slices = [np.newaxis] * ndim
slices[axis] = slice(None)
slices = tuple(slices)
pres = np.broadcast_to(pres[slices], temperature.shape) * pres.units
# Sort input data
sort_pres = np.argsort(pres.m, axis=axis)
sort_pres = np.swapaxes(np.swapaxes(sort_pres, 0, axis)[::-1], 0, axis)
sorter = broadcast_indices(pres, sort_pres, ndim, axis)
levs = pres[sorter]
tmpk = temperature[sorter]
theta_levels = np.asanyarray(theta_levels.to('kelvin')).reshape(-1)
isentlevels = theta_levels[np.argsort(theta_levels)]
# Make the desired isentropic levels the same shape as temperature
shape = list(temperature.shape)
shape[axis] = isentlevels.size
isentlevs_nd = np.broadcast_to(isentlevels[slices], shape)
# exponent to Poisson's Equation, which is imported above
ka = mpconsts.kappa.m_as('dimensionless')
# calculate theta for each point
pres_theta = potential_temperature(levs, tmpk)
# Raise error if input theta level is larger than pres_theta max
if np.max(pres_theta.m) < np.max(theta_levels):
raise ValueError('Input theta level out of data bounds')
# Find log of pressure to implement assumption of linear temperature dependence on
# ln(p)
log_p = np.log(levs.m)
# Calculations for interpolation routine
pok = mpconsts.P0 ** ka
# index values for each point for the pressure level nearest to the desired theta level
above, below, good = find_bounding_indices(pres_theta.m, theta_levels, axis,
from_below=bottom_up_search)
# calculate constants for the interpolation
a = (tmpk.m[above] - tmpk.m[below]) / (log_p[above] - log_p[below])
b = tmpk.m[above] - a * log_p[above]
# calculate first guess for interpolation
isentprs = 0.5 * (log_p[above] + log_p[below])
# Make sure we ignore any nans in the data for solving; checking a is enough since it
# combines log_p and tmpk.
good &= ~np.isnan(a)
# iterative interpolation using scipy.optimize.fixed_point and _isen_iter defined above
log_p_solved = so.fixed_point(_isen_iter, isentprs[good],
args=(isentlevs_nd[good], ka, a[good], b[good], pok.m),
xtol=eps, maxiter=max_iters)
# get back pressure from log p
isentprs[good] = np.exp(log_p_solved)
# Mask out points we know are bad as well as points that are beyond the max pressure
isentprs[~(good & _less_or_close(isentprs, np.max(pres.m)))] = np.nan
# create list for storing output data
ret = [isentprs * units.hPa]
# if tmpk_out = true, calculate temperature and output as last item in list
if tmpk_out:
ret.append((isentlevs_nd / ((mpconsts.P0.m / isentprs) ** ka)) * units.kelvin)
# do an interpolation for each additional argument
if args:
others = interpolate_1d(isentlevels, pres_theta.m, *(arr[sorter] for arr in args),
axis=axis)
if len(args) > 1:
ret.extend(others)
else:
ret.append(others)
return ret | [
"def",
"isentropic_interpolation",
"(",
"theta_levels",
",",
"pressure",
",",
"temperature",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# iteration function to be used later",
"# Calculates theta from linearly interpolated temperature and solves for pressure",
"def",... | 37.720779 | 24.422078 |
def init_celery(project_name):
""" init celery app without the need of redundant code """
os.environ.setdefault('DJANGO_SETTINGS_MODULE', '%s.settings' % project_name)
app = Celery(project_name)
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(settings.INSTALLED_APPS, related_name='tasks')
return app | [
"def",
"init_celery",
"(",
"project_name",
")",
":",
"os",
".",
"environ",
".",
"setdefault",
"(",
"'DJANGO_SETTINGS_MODULE'",
",",
"'%s.settings'",
"%",
"project_name",
")",
"app",
"=",
"Celery",
"(",
"project_name",
")",
"app",
".",
"config_from_object",
"(",
... | 48.571429 | 18.571429 |
def system_call(cmd, **kwargs):
"""Call cmd and return (stdout, stderr, return_value).
Parameters
----------
cmd: str
Can be either a string containing the command to be run, or a sequence
of strings that are the tokens of the command.
kwargs : dict, optional
Ignored. Available so that this function is compatible with
_redis_wrap.
Notes
-----
This function is ported from QIIME (http://www.qiime.org), previously
named qiime_system_call. QIIME is a GPL project, but we obtained permission
from the authors of this function to port it to pyqi (and keep it under
pyqi's BSD license).
"""
proc = Popen(cmd,
universal_newlines=True,
shell=True,
stdout=PIPE,
stderr=PIPE)
# communicate pulls all stdout/stderr from the PIPEs to
# avoid blocking -- don't remove this line!
stdout, stderr = proc.communicate()
return_value = proc.returncode
if return_value != 0:
raise ValueError("Failed to execute: %s\nstdout: %s\nstderr: %s" %
(cmd, stdout, stderr))
return stdout, stderr, return_value | [
"def",
"system_call",
"(",
"cmd",
",",
"*",
"*",
"kwargs",
")",
":",
"proc",
"=",
"Popen",
"(",
"cmd",
",",
"universal_newlines",
"=",
"True",
",",
"shell",
"=",
"True",
",",
"stdout",
"=",
"PIPE",
",",
"stderr",
"=",
"PIPE",
")",
"# communicate pulls ... | 34.205882 | 20.294118 |
def list_articles(self, project, articleset, page=1, **filters):
"""List the articles in a set"""
url = URL.article.format(**locals())
return self.get_pages(url, page=page, **filters) | [
"def",
"list_articles",
"(",
"self",
",",
"project",
",",
"articleset",
",",
"page",
"=",
"1",
",",
"*",
"*",
"filters",
")",
":",
"url",
"=",
"URL",
".",
"article",
".",
"format",
"(",
"*",
"*",
"locals",
"(",
")",
")",
"return",
"self",
".",
"g... | 51 | 11 |
def conv2d_trans(ni:int, nf:int, ks:int=2, stride:int=2, padding:int=0, bias=False) -> nn.ConvTranspose2d:
"Create `nn.ConvTranspose2d` layer."
return nn.ConvTranspose2d(ni, nf, kernel_size=ks, stride=stride, padding=padding, bias=bias) | [
"def",
"conv2d_trans",
"(",
"ni",
":",
"int",
",",
"nf",
":",
"int",
",",
"ks",
":",
"int",
"=",
"2",
",",
"stride",
":",
"int",
"=",
"2",
",",
"padding",
":",
"int",
"=",
"0",
",",
"bias",
"=",
"False",
")",
"->",
"nn",
".",
"ConvTranspose2d",... | 80.666667 | 40.666667 |
def fullversion():
'''
Return server version (``apachectl -V``)
CLI Example:
.. code-block:: bash
salt '*' apache.fullversion
'''
cmd = '{0} -V'.format(_detect_os())
ret = {}
ret['compiled_with'] = []
out = __salt__['cmd.run'](cmd).splitlines()
# Example
# -D APR_HAS_MMAP
define_re = re.compile(r'^\s+-D\s+')
for line in out:
if ': ' in line:
comps = line.split(': ')
if not comps:
continue
ret[comps[0].strip().lower().replace(' ', '_')] = comps[1].strip()
elif ' -D' in line:
cwith = define_re.sub('', line)
ret['compiled_with'].append(cwith)
return ret | [
"def",
"fullversion",
"(",
")",
":",
"cmd",
"=",
"'{0} -V'",
".",
"format",
"(",
"_detect_os",
"(",
")",
")",
"ret",
"=",
"{",
"}",
"ret",
"[",
"'compiled_with'",
"]",
"=",
"[",
"]",
"out",
"=",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"cmd",
")",
... | 25.555556 | 18.740741 |
def compute_hr(sig_len, qrs_inds, fs):
"""
Compute instantaneous heart rate from peak indices.
Parameters
----------
sig_len : int
The length of the corresponding signal
qrs_inds : numpy array
The qrs index locations
fs : int, or float
The corresponding signal's sampling frequency.
Returns
-------
heart_rate : numpy array
An array of the instantaneous heart rate, with the length of the
corresponding signal. Contains numpy.nan where heart rate could
not be computed.
"""
heart_rate = np.full(sig_len, np.nan, dtype='float32')
if len(qrs_inds) < 2:
return heart_rate
for i in range(0, len(qrs_inds)-2):
a = qrs_inds[i]
b = qrs_inds[i+1]
c = qrs_inds[i+2]
rr = (b-a) * (1.0 / fs) * 1000
hr = 60000.0 / rr
heart_rate[b+1:c+1] = hr
heart_rate[qrs_inds[-1]:] = heart_rate[qrs_inds[-1]]
return heart_rate | [
"def",
"compute_hr",
"(",
"sig_len",
",",
"qrs_inds",
",",
"fs",
")",
":",
"heart_rate",
"=",
"np",
".",
"full",
"(",
"sig_len",
",",
"np",
".",
"nan",
",",
"dtype",
"=",
"'float32'",
")",
"if",
"len",
"(",
"qrs_inds",
")",
"<",
"2",
":",
"return",... | 25.405405 | 19.945946 |
def build_on_entry(self, runnable, regime, on_entry):
"""
Build OnEntry start handler code.
@param on_entry: OnEntry start handler object
@type on_entry: lems.model.dynamics.OnEntry
@return: Generated OnEntry code
@rtype: list(string)
"""
on_entry_code = []
on_entry_code += ['if self.current_regime != self.last_regime:']
on_entry_code += [' self.last_regime = self.current_regime']
for action in on_entry.actions:
code = self.build_action(runnable, regime, action)
for line in code:
on_entry_code += [' ' + line]
return on_entry_code | [
"def",
"build_on_entry",
"(",
"self",
",",
"runnable",
",",
"regime",
",",
"on_entry",
")",
":",
"on_entry_code",
"=",
"[",
"]",
"on_entry_code",
"+=",
"[",
"'if self.current_regime != self.last_regime:'",
"]",
"on_entry_code",
"+=",
"[",
"' self.last_regime = self... | 30.090909 | 19.181818 |
def list(self, **kwargs):
"""Retrieve a list of objects.
Args:
all (bool): If True, return all the items, without pagination
per_page (int): Number of items to retrieve per request
page (int): ID of the page to return (starts with page 1)
as_list (bool): If set to False and no pagination option is
defined, return a generator instead of a list
**kwargs: Extra options to send to the server (e.g. sudo)
Returns:
list: The list of objects, or a generator if `as_list` is False
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabListError: If the server cannot perform the request
"""
# Duplicate data to avoid messing with what the user sent us
data = kwargs.copy()
if self.gitlab.per_page:
data.setdefault('per_page', self.gitlab.per_page)
# We get the attributes that need some special transformation
types = getattr(self, '_types', {})
if types:
for attr_name, type_cls in types.items():
if attr_name in data.keys():
type_obj = type_cls(data[attr_name])
data[attr_name] = type_obj.get_for_api()
# Allow to overwrite the path, handy for custom listings
path = data.pop('path', self.path)
obj = self.gitlab.http_list(path, **data)
if isinstance(obj, list):
return [self._obj_cls(self, item) for item in obj]
else:
return base.RESTObjectList(self, self._obj_cls, obj) | [
"def",
"list",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"# Duplicate data to avoid messing with what the user sent us",
"data",
"=",
"kwargs",
".",
"copy",
"(",
")",
"if",
"self",
".",
"gitlab",
".",
"per_page",
":",
"data",
".",
"setdefault",
"(",
"'... | 40.025 | 23.2 |
def validate_ports_string(ports):
""" Validate that provided string has proper port numbers:
1. port number < 65535
2. range start < range end
"""
pattern = re.compile('^\\d+(-\\d+)?(,\\d+(-\\d+)?)*$')
if pattern.match(ports) is None:
return False
ranges = PortsRangeHelper._get_string_port_ranges(ports)
for r in ranges:
if r.start > r.end or r.start > 65535 or r.end > 65535:
return False
return True | [
"def",
"validate_ports_string",
"(",
"ports",
")",
":",
"pattern",
"=",
"re",
".",
"compile",
"(",
"'^\\\\d+(-\\\\d+)?(,\\\\d+(-\\\\d+)?)*$'",
")",
"if",
"pattern",
".",
"match",
"(",
"ports",
")",
"is",
"None",
":",
"return",
"False",
"ranges",
"=",
"PortsRan... | 36.428571 | 13.785714 |
def print(self, txt: str, hold: bool=False) -> None:
""" Conditionally print txt
:param txt: text to print
:param hold: If true, hang on to the text until another print comes through
:param hold: If true, drop both print statements if another hasn't intervened
:return:
"""
if hold:
self.held_prints[self.trace_depth] = txt
elif self.held_prints[self.trace_depth]:
if self.max_print_depth > self.trace_depth:
print(self.held_prints[self.trace_depth])
print(txt)
self.max_print_depth = self.trace_depth
del self.held_prints[self.trace_depth]
else:
print(txt)
self.max_print_depth = self.trace_depth | [
"def",
"print",
"(",
"self",
",",
"txt",
":",
"str",
",",
"hold",
":",
"bool",
"=",
"False",
")",
"->",
"None",
":",
"if",
"hold",
":",
"self",
".",
"held_prints",
"[",
"self",
".",
"trace_depth",
"]",
"=",
"txt",
"elif",
"self",
".",
"held_prints"... | 40 | 18 |
def _nucmer_command(self, ref, qry, outprefix):
'''Construct the nucmer command'''
if self.use_promer:
command = 'promer'
else:
command = 'nucmer'
command += ' -p ' + outprefix
if self.breaklen is not None:
command += ' -b ' + str(self.breaklen)
if self.diagdiff is not None and not self.use_promer:
command += ' -D ' + str(self.diagdiff)
if self.diagfactor:
command += ' -d ' + str(self.diagfactor)
if self.maxgap:
command += ' -g ' + str(self.maxgap)
if self.maxmatch:
command += ' --maxmatch'
if self.mincluster is not None:
command += ' -c ' + str(self.mincluster)
if not self.simplify and not self.use_promer:
command += ' --nosimplify'
return command + ' ' + ref + ' ' + qry | [
"def",
"_nucmer_command",
"(",
"self",
",",
"ref",
",",
"qry",
",",
"outprefix",
")",
":",
"if",
"self",
".",
"use_promer",
":",
"command",
"=",
"'promer'",
"else",
":",
"command",
"=",
"'nucmer'",
"command",
"+=",
"' -p '",
"+",
"outprefix",
"if",
"self... | 27.741935 | 18.774194 |
def fibonacci(n):
"""A recursive Fibonacci to exercise task switching."""
if n <= 1:
raise ndb.Return(n)
a, b = yield fibonacci(n - 1), fibonacci(n - 2)
raise ndb.Return(a + b) | [
"def",
"fibonacci",
"(",
"n",
")",
":",
"if",
"n",
"<=",
"1",
":",
"raise",
"ndb",
".",
"Return",
"(",
"n",
")",
"a",
",",
"b",
"=",
"yield",
"fibonacci",
"(",
"n",
"-",
"1",
")",
",",
"fibonacci",
"(",
"n",
"-",
"2",
")",
"raise",
"ndb",
"... | 30.5 | 15.333333 |
def push_func(self, cuin, callback):
"""Push a function for dfp.
:param cuin: str,unicode: Callback Unique Identifier Name.
:param callback: callable: Corresponding to the cuin to perform a function.
:raises: DFPError,NotCallableError: raises an exception
.. versionadded:: 2.3.0
"""
if cuin and isinstance(cuin, string_types) and callable(callback):
if cuin in self._dfp_funcs:
raise DFPError("The cuin already exists")
else:
self._dfp_funcs[cuin] = callback
else:
if not callable(callback):
raise NotCallableError("The cuin %s cannot be called back" % cuin)
raise DFPError("Invalid parameter") | [
"def",
"push_func",
"(",
"self",
",",
"cuin",
",",
"callback",
")",
":",
"if",
"cuin",
"and",
"isinstance",
"(",
"cuin",
",",
"string_types",
")",
"and",
"callable",
"(",
"callback",
")",
":",
"if",
"cuin",
"in",
"self",
".",
"_dfp_funcs",
":",
"raise"... | 37 | 21.3 |
def increment_failed_logins(self):
""" Increment failed logins counter"""
if not self.failed_logins:
self.failed_logins = 1
elif not self.failed_login_limit_reached():
self.failed_logins += 1
else:
self.reset_login_counter()
self.lock_account(30) | [
"def",
"increment_failed_logins",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"failed_logins",
":",
"self",
".",
"failed_logins",
"=",
"1",
"elif",
"not",
"self",
".",
"failed_login_limit_reached",
"(",
")",
":",
"self",
".",
"failed_logins",
"+=",
"1",... | 35.333333 | 7.777778 |
def open_mask_rle(mask_rle:str, shape:Tuple[int, int])->ImageSegment:
"Return `ImageSegment` object create from run-length encoded string in `mask_lre` with size in `shape`."
x = FloatTensor(rle_decode(str(mask_rle), shape).astype(np.uint8))
x = x.view(shape[1], shape[0], -1)
return ImageSegment(x.permute(2,0,1)) | [
"def",
"open_mask_rle",
"(",
"mask_rle",
":",
"str",
",",
"shape",
":",
"Tuple",
"[",
"int",
",",
"int",
"]",
")",
"->",
"ImageSegment",
":",
"x",
"=",
"FloatTensor",
"(",
"rle_decode",
"(",
"str",
"(",
"mask_rle",
")",
",",
"shape",
")",
".",
"astyp... | 65.2 | 26 |
def match(self, node):
"""Returns match for a given parse tree node.
Should return a true or false object (not necessarily a bool).
It may return a non-empty dict of matching sub-nodes as
returned by a matching pattern.
Subclass may override.
"""
results = {"node": node}
return self.pattern.match(node, results) and results | [
"def",
"match",
"(",
"self",
",",
"node",
")",
":",
"results",
"=",
"{",
"\"node\"",
":",
"node",
"}",
"return",
"self",
".",
"pattern",
".",
"match",
"(",
"node",
",",
"results",
")",
"and",
"results"
] | 34.545455 | 17.272727 |
def read_pixels(viewport=None, alpha=True, out_type='unsigned_byte'):
"""Read pixels from the currently selected buffer.
Under most circumstances, this function reads from the front buffer.
Unlike all other functions in vispy.gloo, this function directly executes
an OpenGL command.
Parameters
----------
viewport : array-like | None
4-element list of x, y, w, h parameters. If None (default),
the current GL viewport will be queried and used.
alpha : bool
If True (default), the returned array has 4 elements (RGBA).
If False, it has 3 (RGB).
out_type : str | dtype
Can be 'unsigned_byte' or 'float'. Note that this does not
use casting, but instead determines how values are read from
the current buffer. Can also be numpy dtypes ``np.uint8``,
``np.ubyte``, or ``np.float32``.
Returns
-------
pixels : array
3D array of pixels in np.uint8 or np.float32 format.
The array shape is (h, w, 3) or (h, w, 4), with the top-left corner
of the framebuffer at index [0, 0] in the returned array.
"""
# Check whether the GL context is direct or remote
context = get_current_canvas().context
if context.shared.parser.is_remote():
raise RuntimeError('Cannot use read_pixels() with remote GLIR parser')
finish() # noqa - finish first, also flushes GLIR commands
type_dict = {'unsigned_byte': gl.GL_UNSIGNED_BYTE,
np.uint8: gl.GL_UNSIGNED_BYTE,
'float': gl.GL_FLOAT,
np.float32: gl.GL_FLOAT}
type_ = _check_conversion(out_type, type_dict)
if viewport is None:
viewport = gl.glGetParameter(gl.GL_VIEWPORT)
viewport = np.array(viewport, int)
if viewport.ndim != 1 or viewport.size != 4:
raise ValueError('viewport should be 1D 4-element array-like, not %s'
% (viewport,))
x, y, w, h = viewport
gl.glPixelStorei(gl.GL_PACK_ALIGNMENT, 1) # PACK, not UNPACK
fmt = gl.GL_RGBA if alpha else gl.GL_RGB
im = gl.glReadPixels(x, y, w, h, fmt, type_)
gl.glPixelStorei(gl.GL_PACK_ALIGNMENT, 4)
# reshape, flip, and return
if not isinstance(im, np.ndarray):
np_dtype = np.uint8 if type_ == gl.GL_UNSIGNED_BYTE else np.float32
im = np.frombuffer(im, np_dtype)
im.shape = h, w, (4 if alpha else 3) # RGBA vs RGB
im = im[::-1, :, :] # flip the image
return im | [
"def",
"read_pixels",
"(",
"viewport",
"=",
"None",
",",
"alpha",
"=",
"True",
",",
"out_type",
"=",
"'unsigned_byte'",
")",
":",
"# Check whether the GL context is direct or remote",
"context",
"=",
"get_current_canvas",
"(",
")",
".",
"context",
"if",
"context",
... | 41.810345 | 18.293103 |
def _cnvkit_fix(cnns, background_cnn, items, ckouts):
"""Normalize samples, correcting sources of bias.
"""
return [_cnvkit_fix_base(cnns, background_cnn, items, ckouts)] | [
"def",
"_cnvkit_fix",
"(",
"cnns",
",",
"background_cnn",
",",
"items",
",",
"ckouts",
")",
":",
"return",
"[",
"_cnvkit_fix_base",
"(",
"cnns",
",",
"background_cnn",
",",
"items",
",",
"ckouts",
")",
"]"
] | 44.75 | 9.75 |
def equate_initial(name1, name2):
"""
Evaluates whether names match, or one name is the initial of the other
"""
if len(name1) == 0 or len(name2) == 0:
return False
if len(name1) == 1 or len(name2) == 1:
return name1[0] == name2[0]
return name1 == name2 | [
"def",
"equate_initial",
"(",
"name1",
",",
"name2",
")",
":",
"if",
"len",
"(",
"name1",
")",
"==",
"0",
"or",
"len",
"(",
"name2",
")",
"==",
"0",
":",
"return",
"False",
"if",
"len",
"(",
"name1",
")",
"==",
"1",
"or",
"len",
"(",
"name2",
"... | 25.909091 | 15 |
def get_path(self, api_info):
"""Get the path portion of the URL to the method (for RESTful methods).
Request path can be specified in the method, and it could have a base
path prepended to it.
Args:
api_info: API information for this API, possibly including a base path.
This is the api_info property on the class that's been annotated for
this API.
Returns:
This method's request path (not including the http://.../{base_path}
prefix).
Raises:
ApiConfigurationError: If the path isn't properly formatted.
"""
path = self.__path or ''
if path and path[0] == '/':
# Absolute path, ignoring any prefixes. Just strip off the leading /.
path = path[1:]
else:
# Relative path.
if api_info.path:
path = '%s%s%s' % (api_info.path, '/' if path else '', path)
# Verify that the path seems valid.
parts = path.split('/')
for n, part in enumerate(parts):
r = _VALID_PART_RE if n < len(parts) - 1 else _VALID_LAST_PART_RE
if part and '{' in part and '}' in part:
if not r.match(part):
raise api_exceptions.ApiConfigurationError(
'Invalid path segment: %s (part of %s)' % (part, path))
return path | [
"def",
"get_path",
"(",
"self",
",",
"api_info",
")",
":",
"path",
"=",
"self",
".",
"__path",
"or",
"''",
"if",
"path",
"and",
"path",
"[",
"0",
"]",
"==",
"'/'",
":",
"# Absolute path, ignoring any prefixes. Just strip off the leading /.",
"path",
"=",
"pat... | 34.111111 | 23.111111 |
def load_agency_profile(cls, source):
'''
Classmethod loading metadata on a data provider. ``source`` must
be a json-formated string or file-like object describing one or more data providers
(URL of the SDMX web API, resource types etc.
The dict ``Request._agencies`` is updated with the metadata from the
source.
Returns None
'''
if not isinstance(source, str_type):
# so it must be a text file
source = source.read()
new_agencies = json.loads(source)
cls._agencies.update(new_agencies) | [
"def",
"load_agency_profile",
"(",
"cls",
",",
"source",
")",
":",
"if",
"not",
"isinstance",
"(",
"source",
",",
"str_type",
")",
":",
"# so it must be a text file",
"source",
"=",
"source",
".",
"read",
"(",
")",
"new_agencies",
"=",
"json",
".",
"loads",
... | 39.066667 | 19.466667 |
def _draw_fold_indicator(self, top, mouse_over, collapsed, painter):
"""
Draw the fold indicator/trigger (arrow).
:param top: Top position
:param mouse_over: Whether the mouse is over the indicator
:param collapsed: Whether the trigger is collapsed or not.
:param painter: QPainter
"""
rect = QtCore.QRect(0, top, self.sizeHint().width(),
self.sizeHint().height())
if self._native:
if os.environ['QT_API'].lower() not in PYQT5_API:
opt = QtGui.QStyleOptionViewItemV2()
else:
opt = QtWidgets.QStyleOptionViewItem()
opt.rect = rect
opt.state = (QtWidgets.QStyle.State_Active |
QtWidgets.QStyle.State_Item |
QtWidgets.QStyle.State_Children)
if not collapsed:
opt.state |= QtWidgets.QStyle.State_Open
if mouse_over:
opt.state |= (QtWidgets.QStyle.State_MouseOver |
QtWidgets.QStyle.State_Enabled |
QtWidgets.QStyle.State_Selected)
opt.palette.setBrush(QtGui.QPalette.Window,
self.palette().highlight())
opt.rect.translate(-2, 0)
self.style().drawPrimitive(QtWidgets.QStyle.PE_IndicatorBranch,
opt, painter, self)
else:
index = 0
if not collapsed:
index = 2
if mouse_over:
index += 1
QtGui.QIcon(self._custom_indicators[index]).paint(painter, rect) | [
"def",
"_draw_fold_indicator",
"(",
"self",
",",
"top",
",",
"mouse_over",
",",
"collapsed",
",",
"painter",
")",
":",
"rect",
"=",
"QtCore",
".",
"QRect",
"(",
"0",
",",
"top",
",",
"self",
".",
"sizeHint",
"(",
")",
".",
"width",
"(",
")",
",",
"... | 43.605263 | 17.552632 |
def get_getter(cls, prop_name, # @NoSelf
user_getter=None, getter_takes_name=False):
"""This implementation returns the PROP_NAME value if there
exists such property. Otherwise there must exist a logical
getter (user_getter) which the value is taken from. If no
getter is found, None is returned (i.e. the property cannot
be created)"""
has_prop_variable = cls.has_prop_attribute(prop_name)
# WARNING! Deprecated
has_specific_getter = hasattr(cls, GET_PROP_NAME % \
{'prop_name' : prop_name})
has_general_getter = hasattr(cls, GET_GENERIC_NAME)
if not (has_prop_variable or
has_specific_getter or
has_general_getter or
user_getter):
return None
# when property variable is given, it overrides all the getters
if has_prop_variable:
if has_specific_getter or user_getter:
logger.warning("In class %s.%s ignoring custom logical getter "
"for property '%s' as a corresponding "
"attribute exists" \
% (cls.__module__, cls.__name__, prop_name))
# user_getter is ignored here, so it has not to be passed up
user_getter = None
getter_takes_name = False
else:
# uses logical getter. Sees if the getter needs to receive the
# property name (i.e. if the getter is used for multiple
# properties)
if user_getter:
pass
else:
if has_specific_getter:
_getter = getattr(cls, GET_PROP_NAME % \
{'prop_name' : prop_name})
_deps = type(cls)._get_old_style_getter_deps(cls,
prop_name,
_getter)
# this is done to delay getter call, to have
# bound methods to allow overloading of getter in
# derived classes
def __getter(self, deps=_deps):
_getter = getattr(self, GET_PROP_NAME % \
{'prop_name' : prop_name})
return _getter()
user_getter = __getter
getter_takes_name = False
else:
assert has_general_getter
_getter = getattr(cls, GET_GENERIC_NAME)
_deps = type(cls)._get_old_style_getter_deps(cls,
prop_name,
_getter)
def __getter(self, name, deps=_deps):
_getter = getattr(self, GET_GENERIC_NAME)
return _getter(name)
user_getter = __getter
getter_takes_name = True
return PropertyMeta.get_getter(cls, prop_name, user_getter,
getter_takes_name) | [
"def",
"get_getter",
"(",
"cls",
",",
"prop_name",
",",
"# @NoSelf",
"user_getter",
"=",
"None",
",",
"getter_takes_name",
"=",
"False",
")",
":",
"has_prop_variable",
"=",
"cls",
".",
"has_prop_attribute",
"(",
"prop_name",
")",
"# WARNING! Deprecated",
"has_spec... | 44.805556 | 21.125 |
def _loadConfiguration(self):
"""
Load module configuration files.
:return: <void>
"""
configPath = os.path.join(self.path, "config")
if not os.path.isdir(configPath):
return
config = Config(configPath)
Config.mergeDictionaries(config.getData(), self.application.config) | [
"def",
"_loadConfiguration",
"(",
"self",
")",
":",
"configPath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"path",
",",
"\"config\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"configPath",
")",
":",
"return",
"config",
"=... | 25.923077 | 17.307692 |
def read_file(filename):
"""Read a file."""
logging.debug(_('Reading file: %s'), filename)
try:
with open(filename) as readable:
return readable.read()
except OSError:
logging.error(_('Error reading file: %s'), filename)
return '' | [
"def",
"read_file",
"(",
"filename",
")",
":",
"logging",
".",
"debug",
"(",
"_",
"(",
"'Reading file: %s'",
")",
",",
"filename",
")",
"try",
":",
"with",
"open",
"(",
"filename",
")",
"as",
"readable",
":",
"return",
"readable",
".",
"read",
"(",
")"... | 30.444444 | 14.222222 |
def unlock_repo(self, repo_name):
"""
:calls: `DELETE /user/migrations/:migration_id/repos/:repo_name/lock`_
:param repo_name: str
:rtype: None
"""
assert isinstance(repo_name, (str, unicode)), repo_name
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.url + "/repos/" + repo_name + "/lock",
headers={
"Accept": Consts.mediaTypeMigrationPreview
}
) | [
"def",
"unlock_repo",
"(",
"self",
",",
"repo_name",
")",
":",
"assert",
"isinstance",
"(",
"repo_name",
",",
"(",
"str",
",",
"unicode",
")",
")",
",",
"repo_name",
"headers",
",",
"data",
"=",
"self",
".",
"_requester",
".",
"requestJsonAndCheck",
"(",
... | 34.428571 | 17.714286 |
def axisfn(reverse=False, principal_node_type=xml.dom.Node.ELEMENT_NODE):
"""Axis function decorator.
An axis function will take a node as an argument and return a sequence
over the nodes along an XPath axis. Axis functions have two extra
attributes indicating the axis direction and principal node type.
"""
def decorate(f):
f.__name__ = f.__name__.replace('_', '-')
f.reverse = reverse
f.principal_node_type = principal_node_type
return f
return decorate | [
"def",
"axisfn",
"(",
"reverse",
"=",
"False",
",",
"principal_node_type",
"=",
"xml",
".",
"dom",
".",
"Node",
".",
"ELEMENT_NODE",
")",
":",
"def",
"decorate",
"(",
"f",
")",
":",
"f",
".",
"__name__",
"=",
"f",
".",
"__name__",
".",
"replace",
"("... | 38.923077 | 20.307692 |
def write_file_list_cache(opts, data, list_cache, w_lock):
'''
Checks the cache file to see if there is a new enough file list cache, and
returns the match (if found, along with booleans used by the fileserver
backend to determine if the cache needs to be refreshed/written).
'''
serial = salt.payload.Serial(opts)
with salt.utils.files.fopen(list_cache, 'w+b') as fp_:
fp_.write(serial.dumps(data))
_unlock_cache(w_lock)
log.trace('Lockfile %s removed', w_lock) | [
"def",
"write_file_list_cache",
"(",
"opts",
",",
"data",
",",
"list_cache",
",",
"w_lock",
")",
":",
"serial",
"=",
"salt",
".",
"payload",
".",
"Serial",
"(",
"opts",
")",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"list_cache",
"... | 45.818182 | 20.727273 |
def view_history(name, gitref):
"""Serve a page name from git repo (an old version of a page).
.. note:: this is a bottle view
* this is a GET only method : you can not change a committed page
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
:gitref: (str) -- hexsha of the git commit to look into
Returns:
bottle response object or 404 error page
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
content = read_committed_file(gitref, name + '.rst')
if content:
html_body = publish_parts(content,
writer=AttowikiWriter(),
settings=None,
settings_overrides=None)['html_body']
history = commit_history(name + '.rst')
return template('page',
type="history",
name=name,
extended_name=None,
is_repo=check_repo(),
history=history,
gitref=gitref,
content=html_body)
else:
return abort(404) | [
"def",
"view_history",
"(",
"name",
",",
"gitref",
")",
":",
"response",
".",
"set_header",
"(",
"'Cache-control'",
",",
"'no-cache'",
")",
"response",
".",
"set_header",
"(",
"'Pragma'",
",",
"'no-cache'",
")",
"content",
"=",
"read_committed_file",
"(",
"git... | 36.575758 | 15.515152 |
def arg_scope(list_ops_or_scope, **kwargs):
"""Stores the default arguments for the given set of list_ops.
For usage, please see examples at top of the file.
Args:
list_ops_or_scope: List or tuple of operations to set argument scope for or
a dictionary containg the current scope. When list_ops_or_scope is a dict,
kwargs must be empty. When list_ops_or_scope is a list or tuple, then
every op in it need to be decorated with @add_arg_scope to work.
**kwargs: keyword=value that will define the defaults for each op in
list_ops. All the ops need to accept the given set of arguments.
Yields:
the current_scope, which is a dictionary of {op: {arg: value}}
Raises:
TypeError: if list_ops is not a list or a tuple.
ValueError: if any op in list_ops has not be decorated with @add_arg_scope.
"""
if isinstance(list_ops_or_scope, dict):
# Assumes that list_ops_or_scope is a scope that is being reused.
if kwargs:
raise ValueError("When attempting to re-use a scope by suppling a"
"dictionary, kwargs must be empty.")
current_scope = list_ops_or_scope.copy()
try:
_get_arg_stack().append(current_scope)
yield current_scope
finally:
_get_arg_stack().pop()
else:
# Assumes that list_ops_or_scope is a list/tuple of ops with kwargs.
if not isinstance(list_ops_or_scope, (list, tuple)):
raise TypeError("list_ops_or_scope must either be a list/tuple or reused"
"scope (i.e. dict)")
try:
current_scope = _current_arg_scope().copy()
for op in list_ops_or_scope:
key_op = (op.__module__, op.__name__)
if not has_arg_scope(op):
raise ValueError("%s is not decorated with @add_arg_scope", key_op)
if key_op in current_scope:
current_kwargs = current_scope[key_op].copy()
current_kwargs.update(kwargs)
current_scope[key_op] = current_kwargs
else:
current_scope[key_op] = kwargs.copy()
_get_arg_stack().append(current_scope)
yield current_scope
finally:
_get_arg_stack().pop() | [
"def",
"arg_scope",
"(",
"list_ops_or_scope",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"list_ops_or_scope",
",",
"dict",
")",
":",
"# Assumes that list_ops_or_scope is a scope that is being reused.",
"if",
"kwargs",
":",
"raise",
"ValueError",
"(",
... | 41.254902 | 20.843137 |
def prefix_keys(self, prefix, strip_prefix=False):
"""Get all keys that begin with ``prefix``.
:param prefix: Lexical prefix for keys to search.
:type prefix: bytes
:param strip_prefix: True to strip the prefix from yielded items.
:type strip_prefix: bool
:yields: All keys in the store that begin with ``prefix``.
"""
keys = self.keys(key_from=prefix)
start = 0
if strip_prefix:
start = len(prefix)
for key in keys:
if not key.startswith(prefix):
break
yield key[start:] | [
"def",
"prefix_keys",
"(",
"self",
",",
"prefix",
",",
"strip_prefix",
"=",
"False",
")",
":",
"keys",
"=",
"self",
".",
"keys",
"(",
"key_from",
"=",
"prefix",
")",
"start",
"=",
"0",
"if",
"strip_prefix",
":",
"start",
"=",
"len",
"(",
"prefix",
")... | 27.090909 | 20.181818 |
def main():
'''Entry point'''
if len(sys.argv) == 1:
print("Usage: tyler [filename]")
sys.exit(0)
filename = sys.argv[1]
if not os.path.isfile(filename):
print("Specified file does not exists")
sys.exit(8)
my_tyler = Tyler(filename=filename)
while True:
try:
for line in my_tyler:
print(line)
time.sleep(1)
except KeyboardInterrupt:
print("Quit signal received")
sys.exit(0) | [
"def",
"main",
"(",
")",
":",
"if",
"len",
"(",
"sys",
".",
"argv",
")",
"==",
"1",
":",
"print",
"(",
"\"Usage: tyler [filename]\"",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"filename",
"=",
"sys",
".",
"argv",
"[",
"1",
"]",
"if",
"not",
"os",
... | 24.65 | 16.15 |
def _get_contents_between(string, opener, closer):
"""
Get the contents of a string between two characters
"""
opener_location = string.index(opener)
closer_location = string.index(closer)
content = string[opener_location + 1:closer_location]
return content | [
"def",
"_get_contents_between",
"(",
"string",
",",
"opener",
",",
"closer",
")",
":",
"opener_location",
"=",
"string",
".",
"index",
"(",
"opener",
")",
"closer_location",
"=",
"string",
".",
"index",
"(",
"closer",
")",
"content",
"=",
"string",
"[",
"o... | 34.75 | 8.5 |
def loadFeatures(self, path_to_fc):
"""
loads a feature class features to the object
"""
from ..common.spatial import featureclass_to_json
v = json.loads(featureclass_to_json(path_to_fc))
self.value = v | [
"def",
"loadFeatures",
"(",
"self",
",",
"path_to_fc",
")",
":",
"from",
".",
".",
"common",
".",
"spatial",
"import",
"featureclass_to_json",
"v",
"=",
"json",
".",
"loads",
"(",
"featureclass_to_json",
"(",
"path_to_fc",
")",
")",
"self",
".",
"value",
"... | 34.857143 | 9.714286 |
def _next_numId(self):
"""
The first ``numId`` unused by a ``<w:num>`` element, starting at
1 and filling any gaps in numbering between existing ``<w:num>``
elements.
"""
numId_strs = self.xpath('./w:num/@w:numId')
num_ids = [int(numId_str) for numId_str in numId_strs]
for num in range(1, len(num_ids)+2):
if num not in num_ids:
break
return num | [
"def",
"_next_numId",
"(",
"self",
")",
":",
"numId_strs",
"=",
"self",
".",
"xpath",
"(",
"'./w:num/@w:numId'",
")",
"num_ids",
"=",
"[",
"int",
"(",
"numId_str",
")",
"for",
"numId_str",
"in",
"numId_strs",
"]",
"for",
"num",
"in",
"range",
"(",
"1",
... | 36.25 | 15.75 |
def set_tmp_folder():
""" Create a temporary folder using the current time in which
the zip can be extracted and which should be destroyed afterward.
"""
output = "%s" % datetime.datetime.now()
for char in [' ', ':', '.', '-']:
output = output.replace(char, '')
output.strip()
tmp_folder = os.path.join(tempfile.gettempdir(), output)
return tmp_folder | [
"def",
"set_tmp_folder",
"(",
")",
":",
"output",
"=",
"\"%s\"",
"%",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"for",
"char",
"in",
"[",
"' '",
",",
"':'",
",",
"'.'",
",",
"'-'",
"]",
":",
"output",
"=",
"output",
".",
"replace",
"(",
... | 38.2 | 11.6 |
def _parse_info(line):
"""
The output can be:
- [LaCrosseITPlusReader.10.1s (RFM12B f:0 r:17241)]
- [LaCrosseITPlusReader.10.1s (RFM12B f:0 t:10~3)]
"""
re_info = re.compile(
r'\[(?P<name>\w+).(?P<ver>.*) ' +
r'\((?P<rfm1name>\w+) (\w+):(?P<rfm1freq>\d+) ' +
r'(?P<rfm1mode>.*)\)\]')
info = {
'name': None,
'version': None,
'rfm1name': None,
'rfm1frequency': None,
'rfm1datarate': None,
'rfm1toggleinterval': None,
'rfm1togglemask': None,
}
match = re_info.match(line)
if match:
info['name'] = match.group('name')
info['version'] = match.group('ver')
info['rfm1name'] = match.group('rfm1name')
info['rfm1frequency'] = match.group('rfm1freq')
values = match.group('rfm1mode').split(':')
if values[0] == 'r':
info['rfm1datarate'] = values[1]
elif values[0] == 't':
toggle = values[1].split('~')
info['rfm1toggleinterval'] = toggle[0]
info['rfm1togglemask'] = toggle[1]
return info | [
"def",
"_parse_info",
"(",
"line",
")",
":",
"re_info",
"=",
"re",
".",
"compile",
"(",
"r'\\[(?P<name>\\w+).(?P<ver>.*) '",
"+",
"r'\\((?P<rfm1name>\\w+) (\\w+):(?P<rfm1freq>\\d+) '",
"+",
"r'(?P<rfm1mode>.*)\\)\\]'",
")",
"info",
"=",
"{",
"'name'",
":",
"None",
","... | 34.342857 | 13.257143 |
def create(type_dict, *type_parameters):
"""
EnumFactory.create(*type_parameters) expects:
enumeration name, (enumeration values)
"""
name, values = type_parameters
assert isinstance(values, (list, tuple))
for value in values:
assert isinstance(value, Compatibility.stringy)
return TypeMetaclass(str(name), (EnumContainer,), { 'VALUES': values }) | [
"def",
"create",
"(",
"type_dict",
",",
"*",
"type_parameters",
")",
":",
"name",
",",
"values",
"=",
"type_parameters",
"assert",
"isinstance",
"(",
"values",
",",
"(",
"list",
",",
"tuple",
")",
")",
"for",
"value",
"in",
"values",
":",
"assert",
"isin... | 38.1 | 9.1 |
def finish_and_die(self):
"""
If there is a request pending, let it finish and be handled, then
disconnect and die. If not, cancel any pending queue requests and
just die.
"""
self.logstate('finish_and_die')
self.stop_working_on_queue()
if self.jobphase != 'pending_request':
self.stopFactory() | [
"def",
"finish_and_die",
"(",
"self",
")",
":",
"self",
".",
"logstate",
"(",
"'finish_and_die'",
")",
"self",
".",
"stop_working_on_queue",
"(",
")",
"if",
"self",
".",
"jobphase",
"!=",
"'pending_request'",
":",
"self",
".",
"stopFactory",
"(",
")"
] | 36.1 | 12.5 |
def available_actions(self, obs):
"""Return the list of available action ids."""
available_actions = set()
hide_specific_actions = self._agent_interface_format.hide_specific_actions
for i, func in six.iteritems(actions.FUNCTIONS_AVAILABLE):
if func.avail_fn(obs):
available_actions.add(i)
for a in obs.abilities:
if a.ability_id not in actions.ABILITY_IDS:
logging.warning("Unknown ability %s seen as available.", a.ability_id)
continue
for func in actions.ABILITY_IDS[a.ability_id]:
if func.function_type in actions.POINT_REQUIRED_FUNCS[a.requires_point]:
if func.general_id == 0 or not hide_specific_actions:
available_actions.add(func.id)
if func.general_id != 0: # Always offer generic actions.
for general_func in actions.ABILITY_IDS[func.general_id]:
if general_func.function_type is func.function_type:
# Only the right type. Don't want to expose the general action
# to minimap if only the screen version is available.
available_actions.add(general_func.id)
break
return list(available_actions) | [
"def",
"available_actions",
"(",
"self",
",",
"obs",
")",
":",
"available_actions",
"=",
"set",
"(",
")",
"hide_specific_actions",
"=",
"self",
".",
"_agent_interface_format",
".",
"hide_specific_actions",
"for",
"i",
",",
"func",
"in",
"six",
".",
"iteritems",
... | 51.173913 | 19.434783 |
def render_category(slug):
"""Template tag to render a category with all it's entries."""
try:
category = EntryCategory.objects.get(slug=slug)
except EntryCategory.DoesNotExist:
pass
else:
return {'category': category}
return {} | [
"def",
"render_category",
"(",
"slug",
")",
":",
"try",
":",
"category",
"=",
"EntryCategory",
".",
"objects",
".",
"get",
"(",
"slug",
"=",
"slug",
")",
"except",
"EntryCategory",
".",
"DoesNotExist",
":",
"pass",
"else",
":",
"return",
"{",
"'category'",... | 29.333333 | 16.888889 |
def match(self, item):
""" Return True if filter matches item.
"""
if getattr(item, self._name) is None:
# Never match "N/A" items, except when "-0" was specified
return False if self._value else self._cmp(-1, 0)
else:
return super(DurationFilter, self).match(item) | [
"def",
"match",
"(",
"self",
",",
"item",
")",
":",
"if",
"getattr",
"(",
"item",
",",
"self",
".",
"_name",
")",
"is",
"None",
":",
"# Never match \"N/A\" items, except when \"-0\" was specified",
"return",
"False",
"if",
"self",
".",
"_value",
"else",
"self"... | 40.75 | 14.75 |
def check_rest(module, names, dots=True):
"""
Check reStructuredText formatting of docstrings
Returns: [(name, success_flag, output), ...]
"""
try:
skip_types = (dict, str, unicode, float, int)
except NameError:
# python 3
skip_types = (dict, str, float, int)
results = []
if module.__name__[6:] not in OTHER_MODULE_DOCS:
results += [(module.__name__,) +
validate_rst_syntax(inspect.getdoc(module),
module.__name__, dots=dots)]
for name in names:
full_name = module.__name__ + '.' + name
obj = getattr(module, name, None)
if obj is None:
results.append((full_name, False, "%s has no docstring" % (full_name,)))
continue
elif isinstance(obj, skip_types):
continue
if inspect.ismodule(obj):
text = inspect.getdoc(obj)
else:
try:
text = str(get_doc_object(obj))
except:
import traceback
results.append((full_name, False,
"Error in docstring format!\n" +
traceback.format_exc()))
continue
m = re.search("([\x00-\x09\x0b-\x1f])", text)
if m:
msg = ("Docstring contains a non-printable character %r! "
"Maybe forgot r\"\"\"?" % (m.group(1),))
results.append((full_name, False, msg))
continue
try:
src_file = short_path(inspect.getsourcefile(obj))
except TypeError:
src_file = None
if src_file:
file_full_name = src_file + ':' + full_name
else:
file_full_name = full_name
results.append((full_name,) +
validate_rst_syntax(text, file_full_name, dots=dots))
return results | [
"def",
"check_rest",
"(",
"module",
",",
"names",
",",
"dots",
"=",
"True",
")",
":",
"try",
":",
"skip_types",
"=",
"(",
"dict",
",",
"str",
",",
"unicode",
",",
"float",
",",
"int",
")",
"except",
"NameError",
":",
"# python 3",
"skip_types",
"=",
... | 29.761905 | 20.460317 |
def new(self, bootstrap_with=None, use_timer=False):
"""
Actual constructor of the solver.
"""
if not self.minicard:
self.minicard = pysolvers.minicard_new()
if bootstrap_with:
for clause in bootstrap_with:
self.add_clause(clause)
self.use_timer = use_timer
self.call_time = 0.0 # time spent for the last call to oracle
self.accu_time = 0.0 | [
"def",
"new",
"(",
"self",
",",
"bootstrap_with",
"=",
"None",
",",
"use_timer",
"=",
"False",
")",
":",
"if",
"not",
"self",
".",
"minicard",
":",
"self",
".",
"minicard",
"=",
"pysolvers",
".",
"minicard_new",
"(",
")",
"if",
"bootstrap_with",
":",
"... | 30.8 | 14.8 |
def runUncertainLocations(missingLoc=None, profile=False):
"""
Runs the same experiment as above, with missing locations at some timesteps
during inference (if it was not successfully computed by the rest of the
network for example).
@param missingLoc (dict)
A dictionary mapping indices in the object to location index to
replace with during inference (-1 means no location, a tuple means
an union of locations).
@param profile (bool)
If True, the network will be profiled after learning and inference
"""
if missingLoc is None:
missingLoc = {}
exp = L4L2Experiment(
"uncertain_location",
enableLateralSP = True,
enableFeedForwardSP=True
)
pairs = createThreeObjects()
objects = createObjectMachine(
machineType="simple",
numInputBits=20,
sensorInputSize=1024,
externalInputSize=1024
)
for object in pairs:
objects.addObject(object)
exp.learnObjects(objects.provideObjectsToLearn())
# create pairs with missing locations
objectA = objects[0]
for key, val in missingLoc.iteritems():
objectA[key] = (val, key)
inferConfig = {
"numSteps": 10,
"pairs": {
0: objectA
}
}
exp.infer(objects.provideObjectToInfer(inferConfig), objectName=0)
if profile:
exp.printProfile()
exp.plotInferenceStats(
fields=["L2 Representation",
"Overlap L2 with object",
"L4 Representation",
"L4 Predictive"],
) | [
"def",
"runUncertainLocations",
"(",
"missingLoc",
"=",
"None",
",",
"profile",
"=",
"False",
")",
":",
"if",
"missingLoc",
"is",
"None",
":",
"missingLoc",
"=",
"{",
"}",
"exp",
"=",
"L4L2Experiment",
"(",
"\"uncertain_location\"",
",",
"enableLateralSP",
"="... | 24.741379 | 22.224138 |
def cancel_notification(cls, notification_or_id, tag=None):
""" Cancel the notification.
Parameters
----------
notification_or_id: Notification.Builder or int
The notification or id of a notification to clear
tag: String
The tag of the notification to clear
"""
def on_ready(mgr):
if isinstance(notification_or_id, JavaBridgeObject):
nid = notification_or_id.__id__
else:
nid = notification_or_id
if tag is None:
mgr.cancel_(nid)
else:
mgr.cancel(tag, nid)
cls.get().then(on_ready) | [
"def",
"cancel_notification",
"(",
"cls",
",",
"notification_or_id",
",",
"tag",
"=",
"None",
")",
":",
"def",
"on_ready",
"(",
"mgr",
")",
":",
"if",
"isinstance",
"(",
"notification_or_id",
",",
"JavaBridgeObject",
")",
":",
"nid",
"=",
"notification_or_id",... | 32.380952 | 15 |
def wkt_rewind(x, digits = None):
'''
reverse WKT winding order
:param x: [str] WKT string
:param digits: [int] number of digits after decimal to use for the return string.
by default, we use the mean number of digits in your string.
:return: a string
Usage::
from pygbif import wkt_rewind
x = 'POLYGON((144.6 13.2, 144.6 13.6, 144.9 13.6, 144.9 13.2, 144.6 13.2))'
wkt_rewind(x)
wkt_rewind(x, digits = 0)
wkt_rewind(x, digits = 3)
wkt_rewind(x, digits = 7)
'''
z = wkt.loads(x)
if digits is None:
coords = z['coordinates']
nums = __flatten(coords)
dec_n = [ decimal.Decimal(str(w)).as_tuple().exponent for w in nums ]
digits = abs(statistics.mean(dec_n))
else:
if not isinstance(digits, int):
raise TypeError("'digits' must be an int")
wound = rewind(z)
back_to_wkt = wkt.dumps(wound, decimals = digits)
return back_to_wkt | [
"def",
"wkt_rewind",
"(",
"x",
",",
"digits",
"=",
"None",
")",
":",
"z",
"=",
"wkt",
".",
"loads",
"(",
"x",
")",
"if",
"digits",
"is",
"None",
":",
"coords",
"=",
"z",
"[",
"'coordinates'",
"]",
"nums",
"=",
"__flatten",
"(",
"coords",
")",
"de... | 30.709677 | 20.709677 |
def marshmallow_loader(schema_class):
"""Marshmallow loader for JSON requests."""
def json_loader():
request_json = request.get_json()
context = {}
pid_data = request.view_args.get('pid_value')
if pid_data:
pid, _ = pid_data.data
context['pid'] = pid
result = schema_class(context=context).load(request_json)
if result.errors:
raise MarshmallowErrors(result.errors)
return result.data
return json_loader | [
"def",
"marshmallow_loader",
"(",
"schema_class",
")",
":",
"def",
"json_loader",
"(",
")",
":",
"request_json",
"=",
"request",
".",
"get_json",
"(",
")",
"context",
"=",
"{",
"}",
"pid_data",
"=",
"request",
".",
"view_args",
".",
"get",
"(",
"'pid_value... | 29.058824 | 17.117647 |
def get_edge_annotation_layers(docgraph):
"""
WARNING: this is higly inefficient!
Fix this via Issue #36.
Returns
-------
all_layers : set or dict
the set of all annotation layers used for annotating edges in the given
graph
"""
all_layers = set()
for source_id, target_id, edge_attribs in docgraph.edges_iter(data=True):
for layer in edge_attribs['layers']:
all_layers.add(layer)
return all_layers | [
"def",
"get_edge_annotation_layers",
"(",
"docgraph",
")",
":",
"all_layers",
"=",
"set",
"(",
")",
"for",
"source_id",
",",
"target_id",
",",
"edge_attribs",
"in",
"docgraph",
".",
"edges_iter",
"(",
"data",
"=",
"True",
")",
":",
"for",
"layer",
"in",
"e... | 28.75 | 17.25 |
def transform_future(transformation, future):
"""Returns a new future that will resolve with a transformed value
Takes the resolution value of `future` and applies transformation(*future.result())
to it before setting the result of the new future with the transformed value. If
future() resolves with an exception, it is passed through to the new future.
Assumes `future` is a tornado Future.
"""
new_future = tornado_Future()
def _transform(f):
assert f is future
if f.exc_info() is not None:
new_future.set_exc_info(f.exc_info())
else:
try:
new_future.set_result(transformation(f.result()))
except Exception:
# An exception here idicates that the transformation was unsuccesful
new_future.set_exc_info(sys.exc_info())
future.add_done_callback(_transform)
return new_future | [
"def",
"transform_future",
"(",
"transformation",
",",
"future",
")",
":",
"new_future",
"=",
"tornado_Future",
"(",
")",
"def",
"_transform",
"(",
"f",
")",
":",
"assert",
"f",
"is",
"future",
"if",
"f",
".",
"exc_info",
"(",
")",
"is",
"not",
"None",
... | 37.625 | 21.416667 |
def _process_dataset(name, directory, num_shards, synset_to_human,
image_to_bboxes):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
"""
filenames, synsets, labels = _find_image_files(directory, FLAGS.labels_file)
humans = _find_human_readable_labels(synsets, synset_to_human)
bboxes = _find_image_bounding_boxes(filenames, image_to_bboxes)
_process_image_files(name, filenames, synsets, labels,
humans, bboxes, num_shards) | [
"def",
"_process_dataset",
"(",
"name",
",",
"directory",
",",
"num_shards",
",",
"synset_to_human",
",",
"image_to_bboxes",
")",
":",
"filenames",
",",
"synsets",
",",
"labels",
"=",
"_find_image_files",
"(",
"directory",
",",
"FLAGS",
".",
"labels_file",
")",
... | 49.333333 | 18.555556 |
def write_file(self, *args, **kwargs):
"""Write a file into this directory
This method takes the same arguments as :meth:`.FileDataAPI.write_file`
with the exception of the ``path`` argument which is not needed here.
"""
return self._fdapi.write_file(self.get_path(), *args, **kwargs) | [
"def",
"write_file",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_fdapi",
".",
"write_file",
"(",
"self",
".",
"get_path",
"(",
")",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | 39.875 | 23.625 |
def _read_byte(self):
"""Read a byte from input."""
to_return = ""
if (self._mode == PROP_MODE_SERIAL):
to_return = self._serial.read(1)
elif (self._mode == PROP_MODE_TCP):
to_return = self._socket.recv(1)
elif (self._mode == PROP_MODE_FILE):
to_return = struct.pack("B", int(self._file.readline()))
_LOGGER.debug("READ: " + str(ord(to_return)))
self._logdata.append(ord(to_return))
if (len(self._logdata) > self._logdatalen):
self._logdata = self._logdata[len(self._logdata) - self._logdatalen:]
self._debug(PROP_LOGLEVEL_TRACE, "READ: " + str(ord(to_return)))
return to_return | [
"def",
"_read_byte",
"(",
"self",
")",
":",
"to_return",
"=",
"\"\"",
"if",
"(",
"self",
".",
"_mode",
"==",
"PROP_MODE_SERIAL",
")",
":",
"to_return",
"=",
"self",
".",
"_serial",
".",
"read",
"(",
"1",
")",
"elif",
"(",
"self",
".",
"_mode",
"==",
... | 36.421053 | 19 |
def copy(self, dest):
""" Copy file to destination """
if isinstance(dest, File):
dest_dir = dest.get_directory()
dest_dir.create()
dest = dest.filename
elif isinstance(dest, Directory):
dest = dest.dirname
shutil.copy2(self.filename, dest) | [
"def",
"copy",
"(",
"self",
",",
"dest",
")",
":",
"if",
"isinstance",
"(",
"dest",
",",
"File",
")",
":",
"dest_dir",
"=",
"dest",
".",
"get_directory",
"(",
")",
"dest_dir",
".",
"create",
"(",
")",
"dest",
"=",
"dest",
".",
"filename",
"elif",
"... | 31.2 | 9.8 |
def reparse(self):
'''Reparse all children of this directory.
This effectively rebuilds the tree below this node.
This operation takes an unbounded time to complete; if there are a lot
of objects registered below this directory's context, they will all
need to be parsed.
'''
self._remove_all_children()
self._parse_context(self._context, self.orb) | [
"def",
"reparse",
"(",
"self",
")",
":",
"self",
".",
"_remove_all_children",
"(",
")",
"self",
".",
"_parse_context",
"(",
"self",
".",
"_context",
",",
"self",
".",
"orb",
")"
] | 33.666667 | 25.333333 |
def run(self):
"""
run the plugin
"""
try:
self.hide_files = get_hide_files(self.workflow)
except KeyError:
self.log.info("Skipping hide files: no files to hide")
return
self._populate_start_file_lines()
self._populate_end_file_lines()
self.dfp = df_parser(self.workflow.builder.df_path)
stages = self._find_stages()
# For each stage, wrap it with the extra lines we want.
# Work backwards to preserve line numbers.
for stage in reversed(stages):
self._update_dockerfile(**stage) | [
"def",
"run",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"hide_files",
"=",
"get_hide_files",
"(",
"self",
".",
"workflow",
")",
"except",
"KeyError",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Skipping hide files: no files to hide\"",
")",
"return",... | 30.35 | 16.95 |
def delay(
self,
identifier: typing.Any,
until: typing.Union[int, float]=-1,
) -> bool:
"""Delay a deferred function until the given time.
Args:
identifier (typing.Any): The identifier returned from a call
to defer or defer_for.
until (typing.Union[int, float]): A numeric value that represents
the clock time when the callback becomes available for
execution. Values that are less than the current time result in
the function being called at the next opportunity.
Returns:
bool: True if the call is delayed. False if the identifier is
invalid or if the deferred call is already executed.
"""
raise NotImplementedError() | [
"def",
"delay",
"(",
"self",
",",
"identifier",
":",
"typing",
".",
"Any",
",",
"until",
":",
"typing",
".",
"Union",
"[",
"int",
",",
"float",
"]",
"=",
"-",
"1",
",",
")",
"->",
"bool",
":",
"raise",
"NotImplementedError",
"(",
")"
] | 39.95 | 22.7 |
def named_value_float_send(self, time_boot_ms, name, value, force_mavlink1=False):
'''
Send a key-value pair as float. The use of this message is discouraged
for normal packets, but a quite efficient way for
testing new messages and getting experimental debug
output.
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
name : Name of the debug variable (char)
value : Floating point value (float)
'''
return self.send(self.named_value_float_encode(time_boot_ms, name, value), force_mavlink1=force_mavlink1) | [
"def",
"named_value_float_send",
"(",
"self",
",",
"time_boot_ms",
",",
"name",
",",
"value",
",",
"force_mavlink1",
"=",
"False",
")",
":",
"return",
"self",
".",
"send",
"(",
"self",
".",
"named_value_float_encode",
"(",
"time_boot_ms",
",",
"name",
",",
"... | 56 | 37.384615 |
async def items(self, name=None, *, watch=None):
"""Lists the most recent events an agent has seen
Parameters:
name (str): Filter events by name.
watch (Blocking): Do a blocking query
Returns:
CollectionMeta: where value is a list of events
It returns a JSON body like this::
[
{
"ID": "b54fe110-7af5-cafc-d1fb-afc8ba432b1c",
"Name": "deploy",
"Payload": bytes("abcd"),
"NodeFilter": re.compile("node-\d+"),
"ServiceFilter": "",
"TagFilter": "",
"Version": 1,
"LTime": 19
},
...
]
"""
path = "/v1/event/list"
params = {"name": name}
response = await self._api.get(path, params=params, watch=watch)
results = [format_event(data) for data in response.body]
return consul(results, meta=extract_meta(response.headers)) | [
"async",
"def",
"items",
"(",
"self",
",",
"name",
"=",
"None",
",",
"*",
",",
"watch",
"=",
"None",
")",
":",
"path",
"=",
"\"/v1/event/list\"",
"params",
"=",
"{",
"\"name\"",
":",
"name",
"}",
"response",
"=",
"await",
"self",
".",
"_api",
".",
... | 34.533333 | 16.666667 |
def list_(bank):
'''
Lists entries stored in the specified bank.
'''
redis_server = _get_redis_server()
bank_redis_key = _get_bank_redis_key(bank)
try:
banks = redis_server.smembers(bank_redis_key)
except (RedisConnectionError, RedisResponseError) as rerr:
mesg = 'Cannot list the Redis cache key {rkey}: {rerr}'.format(rkey=bank_redis_key,
rerr=rerr)
log.error(mesg)
raise SaltCacheError(mesg)
if not banks:
return []
return list(banks) | [
"def",
"list_",
"(",
"bank",
")",
":",
"redis_server",
"=",
"_get_redis_server",
"(",
")",
"bank_redis_key",
"=",
"_get_bank_redis_key",
"(",
"bank",
")",
"try",
":",
"banks",
"=",
"redis_server",
".",
"smembers",
"(",
"bank_redis_key",
")",
"except",
"(",
"... | 35.5625 | 21.9375 |
def _signature(self, *parts):
"""
Creates signature for the session.
"""
signature = hmac.new(six.b(self.secret), digestmod=hashlib.sha1)
signature.update(six.b('|'.join(parts)))
return signature.hexdigest() | [
"def",
"_signature",
"(",
"self",
",",
"*",
"parts",
")",
":",
"signature",
"=",
"hmac",
".",
"new",
"(",
"six",
".",
"b",
"(",
"self",
".",
"secret",
")",
",",
"digestmod",
"=",
"hashlib",
".",
"sha1",
")",
"signature",
".",
"update",
"(",
"six",
... | 35.571429 | 8.142857 |
def _toolkit_serialize_summary_struct(model, sections, section_titles):
"""
Serialize model summary into a dict with ordered lists of sections and section titles
Parameters
----------
model : Model object
sections : Ordered list of lists (sections) of tuples (field,value)
[
[(field1, value1), (field2, value2)],
[(field3, value3), (field4, value4)],
]
section_titles : Ordered list of section titles
Returns
-------
output_dict : A dict with two entries:
'sections' : ordered list with tuples of the form ('label',value)
'section_titles' : ordered list of section labels
"""
output_dict = dict()
output_dict['sections'] = [ [ ( field[0], __extract_model_summary_value(model, field[1]) ) \
for field in section ]
for section in sections ]
output_dict['section_titles'] = section_titles
return output_dict | [
"def",
"_toolkit_serialize_summary_struct",
"(",
"model",
",",
"sections",
",",
"section_titles",
")",
":",
"output_dict",
"=",
"dict",
"(",
")",
"output_dict",
"[",
"'sections'",
"]",
"=",
"[",
"[",
"(",
"field",
"[",
"0",
"]",
",",
"__extract_model_summary_v... | 37.964286 | 27.892857 |
async def get_input_entity(self, peer):
"""
Turns the given peer into its input entity version. Most requests
use this kind of :tl:`InputPeer`, so this is the most suitable call
to make for those cases. **Generally you should let the library do
its job** and don't worry about getting the input entity first, but
if you're going to use an entity often, consider making the call:
>>> import asyncio
>>> rc = asyncio.get_event_loop().run_until_complete
>>>
>>> from telethon import TelegramClient
>>> client = TelegramClient(...)
>>> # If you're going to use "username" often in your code
>>> # (make a lot of calls), consider getting its input entity
>>> # once, and then using the "user" everywhere instead.
>>> user = rc(client.get_input_entity('username'))
>>> # The same applies to IDs, chats or channels.
>>> chat = rc(client.get_input_entity(-123456789))
entity (`str` | `int` | :tl:`Peer` | :tl:`InputPeer`):
If a username or invite link is given, **the library will
use the cache**. This means that it's possible to be using
a username that *changed* or an old invite link (this only
happens if an invite link for a small group chat is used
after it was upgraded to a mega-group).
If the username or ID from the invite link is not found in
the cache, it will be fetched. The same rules apply to phone
numbers (``'+34 123456789'``) from people in your contact list.
If an exact name is given, it must be in the cache too. This
is not reliable as different people can share the same name
and which entity is returned is arbitrary, and should be used
only for quick tests.
If a positive integer ID is given, the entity will be searched
in cached users, chats or channels, without making any call.
If a negative integer ID is given, the entity will be searched
exactly as either a chat (prefixed with ``-``) or as a channel
(prefixed with ``-100``).
If a :tl:`Peer` is given, it will be searched exactly in the
cache as either a user, chat or channel.
If the given object can be turned into an input entity directly,
said operation will be done.
Unsupported types will raise ``TypeError``.
If the entity can't be found, ``ValueError`` will be raised.
Returns:
:tl:`InputPeerUser`, :tl:`InputPeerChat` or :tl:`InputPeerChannel`
or :tl:`InputPeerSelf` if the parameter is ``'me'`` or ``'self'``.
If you need to get the ID of yourself, you should use
`get_me` with ``input_peer=True``) instead.
"""
# Short-circuit if the input parameter directly maps to an InputPeer
try:
return utils.get_input_peer(peer)
except TypeError:
pass
# Next in priority is having a peer (or its ID) cached in-memory
try:
# 0x2d45687 == crc32(b'Peer')
if isinstance(peer, int) or peer.SUBCLASS_OF_ID == 0x2d45687:
return self._entity_cache[peer]
except (AttributeError, KeyError):
pass
# Then come known strings that take precedence
if peer in ('me', 'self'):
return types.InputPeerSelf()
# No InputPeer, cached peer, or known string. Fetch from disk cache
try:
return self.session.get_input_entity(peer)
except ValueError:
pass
# Only network left to try
if isinstance(peer, str):
return utils.get_input_peer(
await self._get_entity_from_string(peer))
# If we're a bot and the user has messaged us privately users.getUsers
# will work with access_hash = 0. Similar for channels.getChannels.
# If we're not a bot but the user is in our contacts, it seems to work
# regardless. These are the only two special-cased requests.
peer = utils.get_peer(peer)
if isinstance(peer, types.PeerUser):
users = await self(functions.users.GetUsersRequest([
types.InputUser(peer.user_id, access_hash=0)]))
if users and not isinstance(users[0], types.UserEmpty):
# If the user passed a valid ID they expect to work for
# channels but would be valid for users, we get UserEmpty.
# Avoid returning the invalid empty input peer for that.
#
# We *could* try to guess if it's a channel first, and if
# it's not, work as a chat and try to validate it through
# another request, but that becomes too much work.
return utils.get_input_peer(users[0])
elif isinstance(peer, types.PeerChat):
return types.InputPeerChat(peer.chat_id)
elif isinstance(peer, types.PeerChannel):
try:
channels = await self(functions.channels.GetChannelsRequest([
types.InputChannel(peer.channel_id, access_hash=0)]))
return utils.get_input_peer(channels.chats[0])
except errors.ChannelInvalidError:
pass
raise ValueError(
'Could not find the input entity for {!r}. Please read https://'
'telethon.readthedocs.io/en/latest/extra/basic/entities.html to'
' find out more details.'
.format(peer)
) | [
"async",
"def",
"get_input_entity",
"(",
"self",
",",
"peer",
")",
":",
"# Short-circuit if the input parameter directly maps to an InputPeer",
"try",
":",
"return",
"utils",
".",
"get_input_peer",
"(",
"peer",
")",
"except",
"TypeError",
":",
"pass",
"# Next in priorit... | 45.467213 | 24.598361 |
def scalar_inc_dec(word, valence, is_cap_diff):
"""
Check if the preceding words increase, decrease, or negate/nullify the
valence
"""
scalar = 0.0
word_lower = word.lower()
if word_lower in BOOSTER_DICT:
scalar = BOOSTER_DICT[word_lower]
if valence < 0:
scalar *= -1
# check if booster/dampener word is in ALLCAPS (while others aren't)
if word.isupper() and is_cap_diff:
if valence > 0:
scalar += C_INCR
else:
scalar -= C_INCR
return scalar | [
"def",
"scalar_inc_dec",
"(",
"word",
",",
"valence",
",",
"is_cap_diff",
")",
":",
"scalar",
"=",
"0.0",
"word_lower",
"=",
"word",
".",
"lower",
"(",
")",
"if",
"word_lower",
"in",
"BOOSTER_DICT",
":",
"scalar",
"=",
"BOOSTER_DICT",
"[",
"word_lower",
"]... | 30.888889 | 14.333333 |
def calculeToday(self):
"""Calcule the intervals from the last date."""
self.__logger.debug("Add today")
last = datetime.datetime.strptime(self.__lastDay, "%Y-%m-%d")
today = datetime.datetime.now().date()
self.__validInterval(last, today) | [
"def",
"calculeToday",
"(",
"self",
")",
":",
"self",
".",
"__logger",
".",
"debug",
"(",
"\"Add today\"",
")",
"last",
"=",
"datetime",
".",
"datetime",
".",
"strptime",
"(",
"self",
".",
"__lastDay",
",",
"\"%Y-%m-%d\"",
")",
"today",
"=",
"datetime",
... | 45.666667 | 8.833333 |
def do_dissect_payload(self, s):
"""
Perform the dissection of the layer's payload
:param str s: the raw layer
"""
if s:
cls = self.guess_payload_class(s)
try:
p = cls(s, _internal=1, _underlayer=self)
except KeyboardInterrupt:
raise
except Exception:
if conf.debug_dissector:
if issubtype(cls, Packet):
log_runtime.error("%s dissector failed" % cls.__name__)
else:
log_runtime.error("%s.guess_payload_class() returned [%s]" % (self.__class__.__name__, repr(cls))) # noqa: E501
if cls is not None:
raise
p = conf.raw_layer(s, _internal=1, _underlayer=self)
self.add_payload(p) | [
"def",
"do_dissect_payload",
"(",
"self",
",",
"s",
")",
":",
"if",
"s",
":",
"cls",
"=",
"self",
".",
"guess_payload_class",
"(",
"s",
")",
"try",
":",
"p",
"=",
"cls",
"(",
"s",
",",
"_internal",
"=",
"1",
",",
"_underlayer",
"=",
"self",
")",
... | 38.772727 | 17.136364 |
def lazy_load_modules(*modules):
"""
Decorator to load module to perform related operation for specific function
and delete the module from imports once the task is done. GC frees the memory
related to module during clean-up.
"""
def decorator(function):
def wrapper(*args, **kwargs):
module_dict = {}
for module_string in modules:
module = __import__(module_string)
# Add `module` entry in `sys.modules`. After deleting the module
# from `sys.modules` and re-importing the module don't update
# the module entry in `sys.modules` dict
sys.modules[module.__package__] = module
reload_module(module)
module_dict[module_string] = module
func_response = function(*args, **kwargs)
for module_string, module in module_dict.items():
# delete idna module
delete_module(module_string)
del module # delete reference to idna
return func_response
return wrapper
return decorator | [
"def",
"lazy_load_modules",
"(",
"*",
"modules",
")",
":",
"def",
"decorator",
"(",
"function",
")",
":",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"module_dict",
"=",
"{",
"}",
"for",
"module_string",
"in",
"modules",
":",... | 36.933333 | 18.466667 |
def summarize(self, rows):
"""Return summary rows for `rows`.
Parameters
----------
rows : list of dicts
Normalized rows to summarize.
Returns
-------
A list of summary rows. Each row is a tuple where the first item is
the data and the second is a dict of keyword arguments that can be
passed to StyleFields.render.
"""
columns = list(rows[0].keys())
agg_styles = {c: self.style[c]["aggregate"]
for c in columns if "aggregate" in self.style[c]}
summaries = {}
for col, agg_fn in agg_styles.items():
lgr.debug("Summarizing column %r with %r", col, agg_fn)
colvals = filter(lambda x: not isinstance(x, Nothing),
(row[col] for row in rows))
summaries[col] = agg_fn(list(colvals))
# The rest is just restructuring the summaries into rows that are
# compatible with pyout.Content. Most the complexity below comes from
# the fact that a summary function is allowed to return either a single
# item or a list of items.
maxlen = max(len(v) if isinstance(v, list) else 1
for v in summaries.values())
summary_rows = []
for rowidx in range(maxlen):
sumrow = {}
for column, values in summaries.items():
if isinstance(values, list):
if rowidx >= len(values):
continue
sumrow[column] = values[rowidx]
elif rowidx == 0:
sumrow[column] = values
for column in columns:
if column not in sumrow:
sumrow[column] = ""
summary_rows.append((sumrow,
{"style": self.style.get("aggregate_"),
"adopt": False}))
return summary_rows | [
"def",
"summarize",
"(",
"self",
",",
"rows",
")",
":",
"columns",
"=",
"list",
"(",
"rows",
"[",
"0",
"]",
".",
"keys",
"(",
")",
")",
"agg_styles",
"=",
"{",
"c",
":",
"self",
".",
"style",
"[",
"c",
"]",
"[",
"\"aggregate\"",
"]",
"for",
"c"... | 38.5 | 17.44 |
def from_raw_message(cls, rawmessage):
"""Create message from raw byte stream."""
return ManageAllLinkRecord(rawmessage[2:3],
rawmessage[3:4],
rawmessage[4:7],
rawmessage[7:8],
rawmessage[8:9],
rawmessage[9:10],
rawmessage[10:11]) | [
"def",
"from_raw_message",
"(",
"cls",
",",
"rawmessage",
")",
":",
"return",
"ManageAllLinkRecord",
"(",
"rawmessage",
"[",
"2",
":",
"3",
"]",
",",
"rawmessage",
"[",
"3",
":",
"4",
"]",
",",
"rawmessage",
"[",
"4",
":",
"7",
"]",
",",
"rawmessage",
... | 49.777778 | 9.111111 |
def _proc_builtin(self, tarfile):
"""Process a builtin type or an unknown type which
will be treated as a regular file.
"""
self.offset_data = tarfile.fileobj.tell()
offset = self.offset_data
if self.isreg() or self.type not in SUPPORTED_TYPES:
# Skip the following data blocks.
offset += self._block(self.size)
tarfile.offset = offset
# Patch the TarInfo object with saved global
# header information.
self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors)
return self | [
"def",
"_proc_builtin",
"(",
"self",
",",
"tarfile",
")",
":",
"self",
".",
"offset_data",
"=",
"tarfile",
".",
"fileobj",
".",
"tell",
"(",
")",
"offset",
"=",
"self",
".",
"offset_data",
"if",
"self",
".",
"isreg",
"(",
")",
"or",
"self",
".",
"typ... | 37 | 14.5625 |
def unique_list_dicts(dlist, key):
"""Return a list of dictionaries which are sorted for only unique entries.
:param dlist:
:param key:
:return list:
"""
return list(dict((val[key], val) for val in dlist).values()) | [
"def",
"unique_list_dicts",
"(",
"dlist",
",",
"key",
")",
":",
"return",
"list",
"(",
"dict",
"(",
"(",
"val",
"[",
"key",
"]",
",",
"val",
")",
"for",
"val",
"in",
"dlist",
")",
".",
"values",
"(",
")",
")"
] | 25.777778 | 20.111111 |
def find_by_url(self, space_url, id_only=True):
"""
Returns a space ID given the URL of the space.
:param space_url: URL of the Space
:param id_only: ?
:return: space_id: Space url
:rtype: str
"""
resp = self.transport.GET(url='/space/url?%s' % urlencode({'url': space_url}))
if id_only:
return resp['space_id']
return resp | [
"def",
"find_by_url",
"(",
"self",
",",
"space_url",
",",
"id_only",
"=",
"True",
")",
":",
"resp",
"=",
"self",
".",
"transport",
".",
"GET",
"(",
"url",
"=",
"'/space/url?%s'",
"%",
"urlencode",
"(",
"{",
"'url'",
":",
"space_url",
"}",
")",
")",
"... | 31.076923 | 15.076923 |
def colored(text, color=None, on_color=None, attrs=None):
"""Colorize text using a reimplementation of the colorizer from
https://github.com/pavdmyt/yaspin so that it works on windows.
Available text colors:
red, green, yellow, blue, magenta, cyan, white.
Available text highlights:
on_red, on_green, on_yellow, on_blue, on_magenta, on_cyan, on_white.
Available attributes:
bold, dark, underline, blink, reverse, concealed.
Example:
colored('Hello, World!', 'red', 'on_grey', ['blue', 'blink'])
colored('Hello, World!', 'green')
"""
return colorize(text, fg=color, bg=on_color, attrs=attrs) | [
"def",
"colored",
"(",
"text",
",",
"color",
"=",
"None",
",",
"on_color",
"=",
"None",
",",
"attrs",
"=",
"None",
")",
":",
"return",
"colorize",
"(",
"text",
",",
"fg",
"=",
"color",
",",
"bg",
"=",
"on_color",
",",
"attrs",
"=",
"attrs",
")"
] | 36.055556 | 21.611111 |
def send(self, payloads, logger, num_tries=5):
"""
Enqueue payloads to the SQS queue, retrying failed messages with
exponential backoff.
"""
from time import sleep
backoff_interval = 1
backoff_factor = 2
for try_counter in xrange(0, num_tries):
failed_messages = self.send_without_retry(payloads)
# success!
if not failed_messages:
payloads = []
break
# output some information about the failures for debugging
# purposes. we expect failures to be quite rare, so we can be
# pretty verbose.
if logger:
for msg in failed_messages:
logger.warning("Failed to send message on try %d: Id=%r, "
"SenderFault=%r, Code=%r, Message=%r" %
(try_counter, msg['Id'],
msg.get('SenderFault'), msg.get('Code'),
msg.get('Message')))
# wait a little while, in case the problem is that we're talking
# too fast.
sleep(backoff_interval)
backoff_interval *= backoff_factor
# filter out the failed payloads for retry
retry_payloads = []
for msg in failed_messages:
i = int(msg['Id'])
retry_payloads.append(payloads[i])
payloads = retry_payloads
if payloads:
raise Exception('Messages failed to send to sqs after %d '
'retries: %s' % (num_tries, len(payloads))) | [
"def",
"send",
"(",
"self",
",",
"payloads",
",",
"logger",
",",
"num_tries",
"=",
"5",
")",
":",
"from",
"time",
"import",
"sleep",
"backoff_interval",
"=",
"1",
"backoff_factor",
"=",
"2",
"for",
"try_counter",
"in",
"xrange",
"(",
"0",
",",
"num_tries... | 37.181818 | 19.909091 |
def register(name):
"""Return a decorator that registers the decorated class as a
resolver with the given *name*."""
def decorator(class_):
if name in known_resolvers:
raise ValueError('duplicate resolver name "%s"' % name)
known_resolvers[name] = class_
return decorator | [
"def",
"register",
"(",
"name",
")",
":",
"def",
"decorator",
"(",
"class_",
")",
":",
"if",
"name",
"in",
"known_resolvers",
":",
"raise",
"ValueError",
"(",
"'duplicate resolver name \"%s\"'",
"%",
"name",
")",
"known_resolvers",
"[",
"name",
"]",
"=",
"cl... | 38.5 | 11.125 |
def report_all_label(self):
"""
Return the best label of the asked entry.
Parameters
----------
Returns
-------
labels: list of object, shape=(m)
The best label of all samples.
"""
labels = np.empty(len(self.dataset), dtype=int)
for pruning in self.prunings:
best_label = self._best_label(pruning)
leaves = self._find_leaves(pruning)
labels[leaves] = best_label
return labels | [
"def",
"report_all_label",
"(",
"self",
")",
":",
"labels",
"=",
"np",
".",
"empty",
"(",
"len",
"(",
"self",
".",
"dataset",
")",
",",
"dtype",
"=",
"int",
")",
"for",
"pruning",
"in",
"self",
".",
"prunings",
":",
"best_label",
"=",
"self",
".",
... | 26.105263 | 15.473684 |
def is_xpath_selector(selector):
"""
A basic method to determine if a selector is an xpath selector.
"""
if (selector.startswith('/') or selector.startswith('./') or (
selector.startswith('('))):
return True
return False | [
"def",
"is_xpath_selector",
"(",
"selector",
")",
":",
"if",
"(",
"selector",
".",
"startswith",
"(",
"'/'",
")",
"or",
"selector",
".",
"startswith",
"(",
"'./'",
")",
"or",
"(",
"selector",
".",
"startswith",
"(",
"'('",
")",
")",
")",
":",
"return",... | 31.625 | 13.375 |
def _eval_kwargs(self):
"""Evaluates any parameterized methods in the kwargs"""
evaled_kwargs = {}
for k, v in self.p.kwargs.items():
if util.is_param_method(v):
v = v()
evaled_kwargs[k] = v
return evaled_kwargs | [
"def",
"_eval_kwargs",
"(",
"self",
")",
":",
"evaled_kwargs",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"self",
".",
"p",
".",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"util",
".",
"is_param_method",
"(",
"v",
")",
":",
"v",
"=",
"v",
"("... | 34.5 | 8.875 |
def logout(self):
"""
登出会话
:return: self
"""
self.req(API_ACCOUNT_LOGOUT % self.ck())
self.cookies = {}
self.user_alias = None
self.persist() | [
"def",
"logout",
"(",
"self",
")",
":",
"self",
".",
"req",
"(",
"API_ACCOUNT_LOGOUT",
"%",
"self",
".",
"ck",
"(",
")",
")",
"self",
".",
"cookies",
"=",
"{",
"}",
"self",
".",
"user_alias",
"=",
"None",
"self",
".",
"persist",
"(",
")"
] | 20.5 | 15.3 |
def force_clean(self, remove_rw=False, allow_lazy=False, retries=5, sleep_interval=0.5):
"""Attempts to call the clean method, but will retry automatically if an error is raised. When the attempts
run out, it will raise the last error.
Note that the method will only catch :class:`ImageMounterError` exceptions.
:param bool remove_rw: indicates whether a read-write cache should be removed
:param bool allow_lazy: indicates whether lazy unmounting is allowed
:param retries: Maximum amount of retries while unmounting
:param sleep_interval: The sleep interval between attempts.
:raises SubsystemError: when one of the underlying commands fails. Some are swallowed.
:raises CleanupError: when actual cleanup fails. Some are swallowed.
"""
while True:
try:
self.clean(remove_rw=remove_rw, allow_lazy=allow_lazy)
except ImageMounterError:
if retries == 0:
raise
retries -= 1
time.sleep(sleep_interval)
else:
return | [
"def",
"force_clean",
"(",
"self",
",",
"remove_rw",
"=",
"False",
",",
"allow_lazy",
"=",
"False",
",",
"retries",
"=",
"5",
",",
"sleep_interval",
"=",
"0.5",
")",
":",
"while",
"True",
":",
"try",
":",
"self",
".",
"clean",
"(",
"remove_rw",
"=",
... | 46.458333 | 24.875 |
def regex(self):
"""
RFC822 Email Address Regex
Originally written by Cal Henderson
c.f. http://iamcal.com/publish/articles/php/parsing_email/
Translated to Python by Tim Fletcher with changes suggested by Dan Kubb
http://tfletcher.com/lib/rfc822.py
Licensed under a Creative Commons Attribution-ShareAlike 2.5 License
http://creativecommons.org/licenses/by-sa/2.5/
:return:
"""
qtext = '[^\\x0d\\x22\\x5c\\x80-\\xff]'
dtext = '[^\\x0d\\x5b-\\x5d\\x80-\\xff]'
atom = '[^\\x00-\\x20\\x22\\x28\\x29\\x2c\\x2e\\x3a-\\x3c\\x3e\\x40'
atom += '\\x5b-\\x5d\\x7f-\\xff]+'
quoted_pair = '\\x5c[\\x00-\\x7f]'
domain_literal = "\\x5b(?:%s|%s)*\\x5d" % (dtext, quoted_pair)
quoted_string = "\\x22(?:%s|%s)*\\x22" % (qtext, quoted_pair)
domain_ref = atom
sub_domain = "(?:%s|%s)" % (domain_ref, domain_literal)
word = "(?:%s|%s)" % (atom, quoted_string)
domain = "%s(?:\\x2e%s)*" % (sub_domain, sub_domain)
local_part = "%s(?:\\x2e%s)*" % (word, word)
addr_spec = "%s\\x40%s" % (local_part, domain)
email_address = re.compile('\A%s\Z' % addr_spec)
return email_address | [
"def",
"regex",
"(",
"self",
")",
":",
"qtext",
"=",
"'[^\\\\x0d\\\\x22\\\\x5c\\\\x80-\\\\xff]'",
"dtext",
"=",
"'[^\\\\x0d\\\\x5b-\\\\x5d\\\\x80-\\\\xff]'",
"atom",
"=",
"'[^\\\\x00-\\\\x20\\\\x22\\\\x28\\\\x29\\\\x2c\\\\x2e\\\\x3a-\\\\x3c\\\\x3e\\\\x40'",
"atom",
"+=",
"'\\\\x5b-... | 41 | 19 |
def _edge_group_substitution(
self, ndid, nsplit, idxs, sr_tab, ndoffset, ed_remove, into_or_from
):
"""
Reconnect edges.
:param ndid: id of low resolution edges
:param nsplit: number of split
:param idxs: indexes of low resolution
:param sr_tab:
:param ndoffset:
:param ed_remove:
:param into_or_from: if zero, connection of input edges is done. If one, connection of output edges
is performed.
:return:
"""
# this is useful for type(idxs) == np.ndarray
eidxs = idxs[nm.where(self.edges[idxs, 1 - into_or_from] == ndid)[0]]
# selected_edges = self.edges[idxs, 1 - into_or_from]
# selected_edges == ndid
# whre = nm.where(self.edges[idxs, 1 - into_or_from] == ndid)
# whre0 = (nm.where(self.edges[idxs, 1 - into_or_from] == ndid) == ndid)[0]
# eidxs = [idxs[i] for i in idxs]
for igrp in self.edges_by_group(eidxs):
if igrp.shape[0] > 1:
# high resolution block to high resolution block
# all directions are the same
directions = self.edge_dir[igrp[0]]
edge_indexes = sr_tab[directions, :].T.flatten() + ndoffset
# debug code
# if len(igrp) != len(edge_indexes):
# print("Problem ")
self.edges[igrp, 1] = edge_indexes
if self._edge_weight_table is not None:
self.edges_weights[igrp] = self._edge_weight_table[1, directions]
else:
# low res block to hi res block, if into_or_from is set to 0
# hig res block to low res block, if into_or_from is set to 1
ed_remove.append(igrp[0])
# number of new edges is equal to number of pixels on one side of the box (in 2D and D too)
nnewed = np.power(nsplit, self.data.ndim - 1)
muleidxs = nm.tile(igrp, nnewed)
# copy the low-res edge multipletime
newed = self.edges[muleidxs, :]
neweddir = self.edge_dir[muleidxs]
local_node_ids = sr_tab[
self.edge_dir[igrp] + self.data.ndim * into_or_from, :
].T.flatten()
# first or second (the actual) node id is substitued by new node indexes
newed[:, 1 - into_or_from] = local_node_ids + ndoffset
if self._edge_weight_table is not None:
self.add_edges(
newed, neweddir, self.edge_group[igrp], edge_low_or_high=1
)
else:
self.add_edges(
newed, neweddir, self.edge_group[igrp], edge_low_or_high=None
)
return ed_remove | [
"def",
"_edge_group_substitution",
"(",
"self",
",",
"ndid",
",",
"nsplit",
",",
"idxs",
",",
"sr_tab",
",",
"ndoffset",
",",
"ed_remove",
",",
"into_or_from",
")",
":",
"# this is useful for type(idxs) == np.ndarray",
"eidxs",
"=",
"idxs",
"[",
"nm",
".",
"wher... | 48.241379 | 19.586207 |
def periodicity(self) -> str:
"""Get a random periodicity string.
:return: Periodicity.
"""
periodicity = self._data['periodicity']
return self.random.choice(periodicity) | [
"def",
"periodicity",
"(",
"self",
")",
"->",
"str",
":",
"periodicity",
"=",
"self",
".",
"_data",
"[",
"'periodicity'",
"]",
"return",
"self",
".",
"random",
".",
"choice",
"(",
"periodicity",
")"
] | 29.285714 | 10.714286 |
def constants_pyx():
"""generate CONST = ZMQ_CONST and __all__ for constants.pxi"""
all_lines = []
assign_lines = []
for name in all_names:
if name == "NULL":
# avoid conflict with NULL in Cython
assign_lines.append("globals()['NULL'] = ZMQ_NULL")
else:
assign_lines.append('{0} = ZMQ_{0}'.format(name))
all_lines.append(' "{0}",'.format(name))
return dict(ASSIGNMENTS='\n'.join(assign_lines), ALL='\n'.join(all_lines)) | [
"def",
"constants_pyx",
"(",
")",
":",
"all_lines",
"=",
"[",
"]",
"assign_lines",
"=",
"[",
"]",
"for",
"name",
"in",
"all_names",
":",
"if",
"name",
"==",
"\"NULL\"",
":",
"# avoid conflict with NULL in Cython",
"assign_lines",
".",
"append",
"(",
"\"globals... | 40.75 | 17.916667 |
def _upcoming_datetime_from(self):
"""
The datetime this event next starts in the local time zone, or None if
it is finished.
"""
nextDt = self.__localAfter(timezone.localtime(), dt.time.max,
excludeCancellations=True,
excludeExtraInfo=True)
return nextDt | [
"def",
"_upcoming_datetime_from",
"(",
"self",
")",
":",
"nextDt",
"=",
"self",
".",
"__localAfter",
"(",
"timezone",
".",
"localtime",
"(",
")",
",",
"dt",
".",
"time",
".",
"max",
",",
"excludeCancellations",
"=",
"True",
",",
"excludeExtraInfo",
"=",
"T... | 40.555556 | 16.333333 |
def get_next_slug(self, slug, **kwargs):
"""Gets the next available slug.
:param slug: the slug to slugify
:param kwargs: additional filter criteria to check for when looking for
a unique slug.
Example:
if the value "my-slug" is already taken, this method will append "-n"
to the end of the slug until the next available slug is found.
"""
original_slug = slug = slugify(slug)
count = 0
while not self.is_slug_available(slug=slug, **kwargs):
count += 1
slug = '{0}-{1}'.format(original_slug, count)
return slug | [
"def",
"get_next_slug",
"(",
"self",
",",
"slug",
",",
"*",
"*",
"kwargs",
")",
":",
"original_slug",
"=",
"slug",
"=",
"slugify",
"(",
"slug",
")",
"count",
"=",
"0",
"while",
"not",
"self",
".",
"is_slug_available",
"(",
"slug",
"=",
"slug",
",",
"... | 29.52381 | 23.285714 |
def send(self, commands):
"""Ship commands to the daemon
Arguments:
commands: e.g., '?WATCH={{'enable':true,'json':true}}'|'?VERSION;'|'?DEVICES;'|'?DEVICE;'|'?POLL;'
"""
try:
self.streamSock.send(bytes(commands, encoding='utf-8'))
except TypeError:
self.streamSock.send(commands) # 2.7 chokes on 'bytes' and 'encoding='
except (OSError, IOError) as error: # HEY MOE, LEAVE THIS ALONE FOR NOW!
sys.stderr.write(f'\nAGPS3 send command fail with {error}\n') | [
"def",
"send",
"(",
"self",
",",
"commands",
")",
":",
"try",
":",
"self",
".",
"streamSock",
".",
"send",
"(",
"bytes",
"(",
"commands",
",",
"encoding",
"=",
"'utf-8'",
")",
")",
"except",
"TypeError",
":",
"self",
".",
"streamSock",
".",
"send",
"... | 49.363636 | 26.727273 |
def assign_license(license_key, license_name, entity, entity_display_name,
safety_checks=True, service_instance=None):
'''
Assigns a license to an entity
license_key
Key of the license to assign
See ``_get_entity`` docstrings for format.
license_name
Display name of license
entity
Dictionary representation of an entity
entity_display_name
Entity name used in logging
safety_checks
Specify whether to perform safety check or to skip the checks and try
performing the required task. Default is False.
service_instance
Service instance (vim.ServiceInstance) of the vCenter/ESXi host.
Default is None.
.. code-block:: bash
salt '*' vsphere.assign_license license_key=00000:00000
license name=test entity={type:cluster,datacenter:dc,cluster:cl}
'''
log.trace('Assigning license %s to entity %s', license_key, entity)
_validate_entity(entity)
if safety_checks:
licenses = salt.utils.vmware.get_licenses(service_instance)
if not [l for l in licenses if l.licenseKey == license_key]:
raise VMwareObjectRetrievalError('License \'{0}\' wasn\'t found'
''.format(license_name))
salt.utils.vmware.assign_license(
service_instance,
license_key,
license_name,
entity_ref=_get_entity(service_instance, entity),
entity_name=entity_display_name) | [
"def",
"assign_license",
"(",
"license_key",
",",
"license_name",
",",
"entity",
",",
"entity_display_name",
",",
"safety_checks",
"=",
"True",
",",
"service_instance",
"=",
"None",
")",
":",
"log",
".",
"trace",
"(",
"'Assigning license %s to entity %s'",
",",
"l... | 33.5 | 23.909091 |
def invalidate(self):
"""Invalidate cached data for this page."""
cache.delete(self.PAGE_LANGUAGES_KEY % (self.pk))
cache.delete('PAGE_FIRST_ROOT_ID')
cache.delete(self.CHILDREN_KEY % self.pk)
cache.delete(self.PUB_CHILDREN_KEY % self.pk)
# XXX: Should this have a depth limit?
if self.parent_id:
self.parent.invalidate()
self._languages = None
self._complete_slug = None
self._content_dict = dict()
placeholders = get_placeholders(self.get_template())
p_names = [p.ctype for p in placeholders]
if 'slug' not in p_names:
p_names.append('slug')
if 'title' not in p_names:
p_names.append('title')
from pages.managers import fake_page
shared = [p for p in placeholders if p.shared]
for share in shared:
fake_page.invalidate(share.ctype)
# delete content cache, frozen or not
for name in p_names:
# frozen
cache.delete(
PAGE_CONTENT_DICT_KEY %
(self.pk, name, 1))
# not frozen
cache.delete(
PAGE_CONTENT_DICT_KEY %
(self.pk, name, 0))
cache.delete(self.PAGE_URL_KEY % (self.pk)) | [
"def",
"invalidate",
"(",
"self",
")",
":",
"cache",
".",
"delete",
"(",
"self",
".",
"PAGE_LANGUAGES_KEY",
"%",
"(",
"self",
".",
"pk",
")",
")",
"cache",
".",
"delete",
"(",
"'PAGE_FIRST_ROOT_ID'",
")",
"cache",
".",
"delete",
"(",
"self",
".",
"CHIL... | 32.358974 | 13.820513 |
def gap_proportion(sequences, gap_chars='-'):
"""
Generates a list with the proportion of gaps by index in a set of
sequences.
"""
aln_len = None
gaps = []
for i, sequence in enumerate(sequences):
if aln_len is None:
aln_len = len(sequence)
gaps = [0] * aln_len
else:
if not len(sequence) == aln_len:
raise ValueError(("Unexpected sequence length {0}. Is this "
"an alignment?").format(len(sequence)))
# Update any gap positions in gap list
for j, char in enumerate(sequence.seq):
if char in gap_chars:
gaps[j] += 1
sequence_count = float(i + 1)
gap_props = [i / sequence_count for i in gaps]
return gap_props | [
"def",
"gap_proportion",
"(",
"sequences",
",",
"gap_chars",
"=",
"'-'",
")",
":",
"aln_len",
"=",
"None",
"gaps",
"=",
"[",
"]",
"for",
"i",
",",
"sequence",
"in",
"enumerate",
"(",
"sequences",
")",
":",
"if",
"aln_len",
"is",
"None",
":",
"aln_len",... | 32.25 | 16.166667 |
def getHostsFromFile(filename):
"""Parse a file to return a list of hosts."""
valid_hostname = r"^[^ /\t=\n]+"
workers = r"\d+"
hostname_re = re.compile(valid_hostname)
worker_re = re.compile(workers)
hosts = []
with open(filename) as f:
for line in f:
# check to see if it is a SLURM grouping instead of a
# regular list of hosts
if re.search('[\[\]]', line):
hosts = hosts + parseSLURM(line.strip())
else:
host = hostname_re.search(line.strip())
if host:
hostname = host.group()
n = worker_re.search(line[host.end():])
if n:
n = n.group()
else:
# Automatically assign based on CPU count
n = 0
hosts.append((hostname, int(n)))
return hosts | [
"def",
"getHostsFromFile",
"(",
"filename",
")",
":",
"valid_hostname",
"=",
"r\"^[^ /\\t=\\n]+\"",
"workers",
"=",
"r\"\\d+\"",
"hostname_re",
"=",
"re",
".",
"compile",
"(",
"valid_hostname",
")",
"worker_re",
"=",
"re",
".",
"compile",
"(",
"workers",
")",
... | 36.96 | 13 |
def train_on_replay_memory(self, batch_info):
""" Train agent on a memory gotten from replay buffer """
self.model.train()
# Algo will aggregate data into this list:
batch_info['sub_batch_data'] = []
for i in range(self.settings.training_rounds):
sampled_rollout = self.env_roller.sample(batch_info, self.model, self.settings.training_steps)
batch_result = self.algo.optimizer_step(
batch_info=batch_info,
device=self.device,
model=self.model,
rollout=sampled_rollout.to_device(self.device)
)
self.env_roller.update(rollout=sampled_rollout, batch_info=batch_result)
batch_info['sub_batch_data'].append(batch_result)
batch_info.aggregate_key('sub_batch_data') | [
"def",
"train_on_replay_memory",
"(",
"self",
",",
"batch_info",
")",
":",
"self",
".",
"model",
".",
"train",
"(",
")",
"# Algo will aggregate data into this list:",
"batch_info",
"[",
"'sub_batch_data'",
"]",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"s... | 37.045455 | 22.727273 |
def handle_gateway_ready_20(msg):
"""Process an internal gateway ready message."""
_LOGGER.info(
'n:%s c:%s t:%s s:%s p:%s', msg.node_id, msg.child_id, msg.type,
msg.sub_type, msg.payload)
msg.gateway.alert(msg)
return msg.copy(
node_id=255, ack=0,
sub_type=msg.gateway.const.Internal.I_DISCOVER, payload='') | [
"def",
"handle_gateway_ready_20",
"(",
"msg",
")",
":",
"_LOGGER",
".",
"info",
"(",
"'n:%s c:%s t:%s s:%s p:%s'",
",",
"msg",
".",
"node_id",
",",
"msg",
".",
"child_id",
",",
"msg",
".",
"type",
",",
"msg",
".",
"sub_type",
",",
"msg",
".",
"payload",
... | 38.666667 | 15.777778 |
def supports_heading_type(self, heading_type=None):
"""Tests if the given heading type is supported.
arg: heading_type (osid.type.Type): a heading Type
return: (boolean) - ``true`` if the type is supported, ``false``
otherwise
raise: IllegalState - syntax is not a ``HEADING``
raise: NullArgument - ``heading_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.Metadata.supports_coordinate_type
from .osid_errors import IllegalState, NullArgument
if not heading_type:
raise NullArgument('no input Type provided')
if self._kwargs['syntax'] not in ['``HEADING``']:
raise IllegalState('put more meaninful message here')
return heading_type in self.get_heading_types | [
"def",
"supports_heading_type",
"(",
"self",
",",
"heading_type",
"=",
"None",
")",
":",
"# Implemented from template for osid.Metadata.supports_coordinate_type",
"from",
".",
"osid_errors",
"import",
"IllegalState",
",",
"NullArgument",
"if",
"not",
"heading_type",
":",
... | 47.555556 | 20.166667 |
def split_levels(fields):
"""
Convert dot-notation such as ['a', 'a.b', 'a.d', 'c'] into
current-level fields ['a', 'c'] and next-level fields
{'a': ['b', 'd']}.
"""
first_level_fields = []
next_level_fields = {}
if not fields:
return first_level_fields, next_level_fields
if not isinstance(fields, list):
fields = [a.strip() for a in fields.split(",") if a.strip()]
for e in fields:
if "." in e:
first_level, next_level = e.split(".", 1)
first_level_fields.append(first_level)
next_level_fields.setdefault(first_level, []).append(next_level)
else:
first_level_fields.append(e)
first_level_fields = list(set(first_level_fields))
return first_level_fields, next_level_fields | [
"def",
"split_levels",
"(",
"fields",
")",
":",
"first_level_fields",
"=",
"[",
"]",
"next_level_fields",
"=",
"{",
"}",
"if",
"not",
"fields",
":",
"return",
"first_level_fields",
",",
"next_level_fields",
"if",
"not",
"isinstance",
"(",
"fields",
",",
"list"... | 33.041667 | 18.208333 |
def ori(ip, rc=None, r=None, iq=None, ico=None, pl=None, fl=None, fs=None,
ot=None, coe=None, moc=None):
# pylint: disable=too-many-arguments, redefined-outer-name, invalid-name
"""
This function is a wrapper for
:meth:`~pywbem.WBEMConnection.OpenReferenceInstances`.
Open an enumeration session to retrieve the association instances that
reference a source instance.
Use the :func:`~wbemcli.piwp` function to retrieve the next set of
instances or the :func:`~wbcmeli.ce` function to close the enumeration
session before it is complete.
Parameters:
ip (:class:`~pywbem.CIMInstanceName`):
Source instance path.
rc (:term:`string`):
ResultClass filter: Include only traversals across this association
(result) class.
`None` means this filter is not applied.
r (:term:`string`):
Role filter: Include only traversals from this role (= reference
name) in source object.
`None` means this filter is not applied.
iq (:class:`py:bool`):
IncludeQualifiers flag: Include qualifiers.
`None` will cause the server default of `False` to be used.
Deprecated in :term:`DSP0200`: Clients cannot rely on qualifiers to
be returned in this operation.
ico (:class:`py:bool`):
IncludeClassOrigin flag: Include class origin information for the
properties in the retrieved instances.
`None` will cause the server default of `False` to be used.
Deprecated in :term:`DSP0200`: WBEM servers may either implement this
parameter as specified, or may treat any specified value as `False`.
pl (:term:`string` or :term:`py:iterable` of :term:`string`):
PropertyList: Names of properties to be included (if not otherwise
excluded). An empty iterable indicates to include no properties.
If `None`, all properties will be included.
fl (:term:`string`):
Filter query language to be used for the filter defined in the `fs`
parameter. The DMTF-defined Filter Query Language
(see :term:`DSP0212`) is specified as "DMTF:FQL".
`None` means that no such filtering is peformed.
fs (:term:`string`):
Filter to apply to objects to be returned. Based on filter query
language defined by `fl` parameter.
`None` means that no such filtering is peformed.
ot (:class:`~pywbem.Uint32`):
Operation timeout in seconds. This is the minimum time the WBEM server
must keep the enumeration session open between requests on that
session.
A value of 0 indicates that the server should never time out.
The server may reject the proposed value.
`None` will cause the server to use its default timeout.
coe (:class:`py:bool`):
Continue on error flag.
`None` will cause the server to use its default of `False`.
moc (:class:`~pywbem.Uint32`):
Maximum number of objects to return for this operation.
`None` will cause the server to use its default of 0.
Returns:
A :func:`~py:collections.namedtuple` object containing the following
named items:
* **instances** (list of :class:`~pywbem.CIMInstance`):
The retrieved instances.
* **eos** (:class:`py:bool`):
`True` if the enumeration session is exhausted after this operation.
Otherwise `eos` is `False` and the `context` item is the context
object for the next operation on the enumeration session.
* **context** (:func:`py:tuple` of server_context, namespace):
A context object identifying the open enumeration session, including
its current enumeration state, and the namespace. This object must be
supplied with the next pull or close operation for this enumeration
session.
"""
return CONN.OpenReferenceInstances(ip,
ResultClass=rc,
Role=r,
IncludeQualifiers=iq,
IncludeClassOrigin=ico,
PropertyList=pl,
FilterQueryLanguage=fl,
FilterQuery=fs,
OperationTimeout=ot,
ContinueOnError=coe,
MaxObjectCount=moc) | [
"def",
"ori",
"(",
"ip",
",",
"rc",
"=",
"None",
",",
"r",
"=",
"None",
",",
"iq",
"=",
"None",
",",
"ico",
"=",
"None",
",",
"pl",
"=",
"None",
",",
"fl",
"=",
"None",
",",
"fs",
"=",
"None",
",",
"ot",
"=",
"None",
",",
"coe",
"=",
"Non... | 38.161017 | 25.991525 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.