text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
async def get_match(self, m_id, force_update=False) -> Match:
""" get a single match by id
|methcoro|
Args:
m_id: match id
force_update (default=False): True to force an update to the Challonge API
Returns:
Match
Raises:
APIException
"""
found_m = self._find_match(m_id)
if force_update or found_m is None:
await self.get_matches()
found_m = self._find_match(m_id)
return found_m | [
"async",
"def",
"get_match",
"(",
"self",
",",
"m_id",
",",
"force_update",
"=",
"False",
")",
"->",
"Match",
":",
"found_m",
"=",
"self",
".",
"_find_match",
"(",
"m_id",
")",
"if",
"force_update",
"or",
"found_m",
"is",
"None",
":",
"await",
"self",
... | 24.190476 | 21.285714 |
def get_policies_by_id(profile_manager, policy_ids):
'''
Returns a list of policies with the specified ids.
profile_manager
Reference to the profile manager.
policy_ids
List of policy ids to retrieve.
'''
try:
return profile_manager.RetrieveContent(policy_ids)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise VMwareApiError('Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise VMwareRuntimeError(exc.msg) | [
"def",
"get_policies_by_id",
"(",
"profile_manager",
",",
"policy_ids",
")",
":",
"try",
":",
"return",
"profile_manager",
".",
"RetrieveContent",
"(",
"policy_ids",
")",
"except",
"vim",
".",
"fault",
".",
"NoPermission",
"as",
"exc",
":",
"log",
".",
"except... | 32 | 17.181818 |
def comm_grid(patch, cols, splits, divs, metric='Sorensen'):
"""
Calculates commonality as a function of distance for a gridded patch
Parameters
----------
{0}
divs : str
Description of how to divide x_col and y_col. Unlike SAR and EAR, only
one division can be given at a time. See notes.
metric : str
One of Sorensen or Jaccard, giving the metric to use for commonality
calculation
Returns
-------
{1} Result has three columns, pair, x, and y, that give the locations of
the pair of patches for which commonality is calculated, the distance
between those cells, and the Sorensen or Jaccard result.
Notes
-----
{2}
For gridded commonality, cols must also contain x_col and y_col, giving the
x and y dimensions along which to grid the patch.
{3}
"""
(spp_col, count_col, x_col, y_col), patch = \
_get_cols(['spp_col', 'count_col', 'x_col', 'y_col'], cols, patch)
# Loop through each split
result_list = []
for substring, subpatch in _yield_subpatches(patch, splits):
# Get spatial table and break out columns
spatial_table = _yield_spatial_table(subpatch, divs, spp_col,
count_col, x_col, y_col)
spp_set = spatial_table['spp_set']
cell_loc = spatial_table['cell_loc']
n_spp = spatial_table['n_spp']
# Get all possible pairwise combinations of cells
pair_list = []
dist_list = []
comm_list = []
for i in range(len(spatial_table)):
for j in range(i+1, len(spatial_table)):
iloc = np.round(cell_loc[i], 6)
jloc = np.round(cell_loc[j], 6)
pair_list.append('('+str(iloc[0])+' '+str(iloc[1])+') - '+
'('+str(jloc[0])+' '+str(jloc[1])+')')
dist_list.append(_distance(cell_loc[i], cell_loc[j]))
ij_intersect = spp_set[i] & spp_set[j]
if metric.lower() == 'sorensen':
comm = 2*len(ij_intersect) / (n_spp[i] + n_spp[j])
elif metric.lower() == 'jaccard':
comm = len(ij_intersect) / len(spp_set[i] | spp_set[j])
else:
raise ValueError, ("Only Sorensen and Jaccard metrics are "
"available for gridded commonality")
comm_list.append(comm)
# Append subset result
subresult = pd.DataFrame({'pair': pair_list, 'x': dist_list,
'y': comm_list})
result_list.append((substring, subresult))
# Return all results
return result_list | [
"def",
"comm_grid",
"(",
"patch",
",",
"cols",
",",
"splits",
",",
"divs",
",",
"metric",
"=",
"'Sorensen'",
")",
":",
"(",
"spp_col",
",",
"count_col",
",",
"x_col",
",",
"y_col",
")",
",",
"patch",
"=",
"_get_cols",
"(",
"[",
"'spp_col'",
",",
"'co... | 34.986842 | 24.644737 |
def _setSmsMemory(self, readDelete=None, write=None):
""" Set the current SMS memory to use for read/delete/write operations """
# Switch to the correct memory type if required
if write != None and write != self._smsMemWrite:
self.write()
readDel = readDelete or self._smsMemReadDelete
self.write('AT+CPMS="{0}","{1}"'.format(readDel, write))
self._smsMemReadDelete = readDel
self._smsMemWrite = write
elif readDelete != None and readDelete != self._smsMemReadDelete:
self.write('AT+CPMS="{0}"'.format(readDelete))
self._smsMemReadDelete = readDelete | [
"def",
"_setSmsMemory",
"(",
"self",
",",
"readDelete",
"=",
"None",
",",
"write",
"=",
"None",
")",
":",
"# Switch to the correct memory type if required",
"if",
"write",
"!=",
"None",
"and",
"write",
"!=",
"self",
".",
"_smsMemWrite",
":",
"self",
".",
"writ... | 54.583333 | 14.25 |
def mpfr_floordiv(rop, x, y, rnd):
"""
Given two MPFR numbers x and y, compute floor(x / y),
rounded if necessary using the given rounding mode.
The result is placed in 'rop'.
"""
# Algorithm notes
# ---------------
# A simple and obvious approach is to compute floor(x / y) exactly, and
# then round to the nearest representable value using the given rounding
# mode. This requires computing x / y to a precision sufficient to ensure
# that floor(x / y) is exactly representable. If abs(x / y) < 2**r, then
# abs(floor(x / y)) <= 2**r, and so r bits of precision is enough.
# However, for large quotients this is impractical, and we need some other
# method. For x / y sufficiently large, it's possible to show that x / y
# and floor(x / y) are indistinguishable, in the sense that both quantities
# round to the same value. More precisely, we have the following theorem:
#
# Theorem. Suppose that x and y are nonzero finite binary floats
# representable with p and q bits of precision, respectively. Let R be any
# of the IEEE 754 standard rounding modes, and choose a target precision r.
# Write rnd for the rounding operation from Q to precision-r binary floats
# with rounding mode R. Write bin(x) for the binade of a nonzero float x.
#
# If R is a round-to-nearest rounding mode, and either
#
# (1) p <= q + r and |x / y| >= 2^(q + r), or
# (2) p > q + r and bin(x) - bin(y) >= p
#
# then
#
# rnd(floor(x / y)) == rnd(x / y)
#
# Conversely, if R is a directed rounding mode, and either
#
# (1) p < q + r and |x / y| >= 2^(q + r - 1), or
# (2) p >= q + r and bin(x) - bin(y) >= p
#
# then again
#
# rnd(floor(x / y)) == rnd(x / y).
#
# Proof. See separate notes and Coq proof in the float-proofs
# repository.
#
# Rather than distinguish between the various cases (R directed
# or not, p large versus p small) above, we use a weaker but
# simpler amalgamation of the above result:
#
# Corollary 1. With x, y, p, q, R, r and rnd as above, if
#
# |x / y| >= 2^max(q + r, p)
#
# then
#
# rnd(floor(x / y)) == rnd(x / y).
#
# Proof. Note that |x / y| >= 2^p implies bin(x) - bin(y) >= p,
# so it's enough that |x / y| >= 2^max(p, q + r) in the case of
# a round-to-nearest mode, and that |x / y| >= 2^max(p, q + r - 1)
# in the case of a directed rounding mode.
# In special cases, it's safe to defer to mpfr_div: the result in
# these cases is always 0, infinity, or nan.
if not mpfr.mpfr_regular_p(x) or not mpfr.mpfr_regular_p(y):
return mpfr.mpfr_div(rop, x, y, rnd)
e = _quotient_exponent(x, y)
p = mpfr.mpfr_get_prec(x)
q = mpfr.mpfr_get_prec(y)
r = mpfr.mpfr_get_prec(rop)
# If e - 1 >= max(p, q+r) then |x / y| >= 2^(e-1) >= 2^max(p, q+r),
# so by the above theorem, round(floordiv(x, y)) == round(div(x, y)).
if e - 1 >= max(p, q + r):
return mpfr.mpfr_div(rop, x, y, rnd)
# Slow version: compute to sufficient bits to get integer precision. Given
# that 2**(e-1) <= x / y < 2**e, need >= e bits of precision.
z_prec = max(e, 2)
z = mpfr.Mpfr_t()
mpfr.mpfr_init2(z, z_prec)
# Compute the floor exactly. The division may set the
# inexact flag, so we save its state first.
old_inexact = mpfr.mpfr_inexflag_p()
mpfr.mpfr_div(z, x, y, mpfr.MPFR_RNDD)
if not old_inexact:
mpfr.mpfr_clear_inexflag()
# Floor result should be exactly representable, so any rounding mode will
# do.
ternary = mpfr.mpfr_rint_floor(z, z, rnd)
assert ternary == 0
# ... and round to the given rounding mode.
return mpfr.mpfr_set(rop, z, rnd) | [
"def",
"mpfr_floordiv",
"(",
"rop",
",",
"x",
",",
"y",
",",
"rnd",
")",
":",
"# Algorithm notes",
"# ---------------",
"# A simple and obvious approach is to compute floor(x / y) exactly, and",
"# then round to the nearest representable value using the given rounding",
"# mode. Thi... | 38.020408 | 23.408163 |
def select(self, domain_or_name, query='', next_token=None,
consistent_read=False):
"""
Returns a set of Attributes for item names within domain_name that
match the query. The query must be expressed in using the SELECT
style syntax rather than the original SimpleDB query language.
Even though the select request does not require a domain object,
a domain object must be passed into this method so the Item objects
returned can point to the appropriate domain.
:type domain_or_name: string or :class:`boto.sdb.domain.Domain` object
:param domain_or_name: Either the name of a domain or a Domain object
:type query: string
:param query: The SimpleDB query to be performed.
:type consistent_read: bool
:param consistent_read: When set to true, ensures that the most recent
data is returned.
:rtype: ResultSet
:return: An iterator containing the results.
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'SelectExpression' : query}
if consistent_read:
params['ConsistentRead'] = 'true'
if next_token:
params['NextToken'] = next_token
try:
return self.get_list('Select', params, [('Item', self.item_cls)],
parent=domain)
except SDBResponseError, e:
e.body = "Query: %s\n%s" % (query, e.body)
raise e | [
"def",
"select",
"(",
"self",
",",
"domain_or_name",
",",
"query",
"=",
"''",
",",
"next_token",
"=",
"None",
",",
"consistent_read",
"=",
"False",
")",
":",
"domain",
",",
"domain_name",
"=",
"self",
".",
"get_domain_and_name",
"(",
"domain_or_name",
")",
... | 43.285714 | 20.485714 |
def ad_stat(data):
"""
Calculates the Anderson-Darling statistic for sorted values from U(0, 1).
The statistic is not defined if any of the values is exactly 0 or 1. You
will get infinity as a result and a divide-by-zero warning for such values.
The warning can be silenced or raised using numpy.errstate(divide=...).
"""
samples = len(data)
factors = arange(1, 2 * samples, 2)
return -samples - (factors * log(data * (1 - data[::-1]))).sum() / samples | [
"def",
"ad_stat",
"(",
"data",
")",
":",
"samples",
"=",
"len",
"(",
"data",
")",
"factors",
"=",
"arange",
"(",
"1",
",",
"2",
"*",
"samples",
",",
"2",
")",
"return",
"-",
"samples",
"-",
"(",
"factors",
"*",
"log",
"(",
"data",
"*",
"(",
"1"... | 43.545455 | 24.090909 |
def _is_cow(path):
'''
Check if the subvolume is copy on write
'''
dirname = os.path.dirname(path)
return 'C' not in __salt__['file.lsattr'](dirname)[path] | [
"def",
"_is_cow",
"(",
"path",
")",
":",
"dirname",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
"return",
"'C'",
"not",
"in",
"__salt__",
"[",
"'file.lsattr'",
"]",
"(",
"dirname",
")",
"[",
"path",
"]"
] | 28.333333 | 19.333333 |
def stats(request, server_name):
"""
Show server statistics.
"""
server_name = server_name.strip('/')
data = _context_data({
'title': _('Memcache Statistics for %s') % server_name,
'cache_stats': _get_cache_stats(server_name),
},
request)
return render_to_response('memcache_admin/stats.html', data, RequestContext(request)) | [
"def",
"stats",
"(",
"request",
",",
"server_name",
")",
":",
"server_name",
"=",
"server_name",
".",
"strip",
"(",
"'/'",
")",
"data",
"=",
"_context_data",
"(",
"{",
"'title'",
":",
"_",
"(",
"'Memcache Statistics for %s'",
")",
"%",
"server_name",
",",
... | 33.272727 | 16.181818 |
def cleanup_lib(self):
""" unload the previously loaded shared library """
if not self.using_openmp:
#this if statement is necessary because shared libraries that use
#OpenMP will core dump when unloaded, this is a well-known issue with OpenMP
logging.debug('unloading shared library')
_ctypes.dlclose(self.lib._handle) | [
"def",
"cleanup_lib",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"using_openmp",
":",
"#this if statement is necessary because shared libraries that use",
"#OpenMP will core dump when unloaded, this is a well-known issue with OpenMP",
"logging",
".",
"debug",
"(",
"'unloadi... | 53.857143 | 18.285714 |
def use_partial_data(self, sample_pct:float=0.01, seed:int=None)->'ItemList':
"Use only a sample of `sample_pct`of the full dataset and an optional `seed`."
if seed is not None: np.random.seed(seed)
rand_idx = np.random.permutation(range_of(self))
cut = int(sample_pct * len(self))
return self[rand_idx[:cut]] | [
"def",
"use_partial_data",
"(",
"self",
",",
"sample_pct",
":",
"float",
"=",
"0.01",
",",
"seed",
":",
"int",
"=",
"None",
")",
"->",
"'ItemList'",
":",
"if",
"seed",
"is",
"not",
"None",
":",
"np",
".",
"random",
".",
"seed",
"(",
"seed",
")",
"r... | 57.333333 | 19 |
def _get_version_info():
"""
Returns the currently-installed awslimitchecker version, and a best-effort
attempt at finding the origin URL and commit/tag if installed from an
editable git clone.
:returns: awslimitchecker version
:rtype: str
"""
if os.environ.get('VERSIONCHECK_DEBUG', '') != 'true':
for lname in ['versionfinder', 'pip', 'git']:
l = logging.getLogger(lname)
l.setLevel(logging.CRITICAL)
l.propagate = True
try:
vinfo = find_version('awslimitchecker')
dirty = ''
if vinfo.git_is_dirty:
dirty = '*'
tag = vinfo.git_tag
if tag is not None:
tag += dirty
commit = vinfo.git_commit
if commit is not None:
if len(commit) > 7:
commit = commit[:8]
commit += dirty
return AWSLimitCheckerVersion(
vinfo.version,
vinfo.url,
tag=tag,
commit=commit
)
except Exception:
logger.exception("Error checking installed version; this installation "
"may not be in compliance with the AGPLv3 license:")
# fall back to returning just the hard-coded release information
return AWSLimitCheckerVersion(_VERSION, _PROJECT_URL) | [
"def",
"_get_version_info",
"(",
")",
":",
"if",
"os",
".",
"environ",
".",
"get",
"(",
"'VERSIONCHECK_DEBUG'",
",",
"''",
")",
"!=",
"'true'",
":",
"for",
"lname",
"in",
"[",
"'versionfinder'",
",",
"'pip'",
",",
"'git'",
"]",
":",
"l",
"=",
"logging"... | 33.868421 | 16.5 |
def rewrite_with_operator_pm_cc(expr):
"""Try to rewrite expr using :class:`OperatorPlusMinusCC`
Example:
>>> A = OperatorSymbol('A', hs=1)
>>> sum = A + A.dag()
>>> sum2 = rewrite_with_operator_pm_cc(sum)
>>> print(ascii(sum2))
A^(1) + c.c.
"""
# TODO: move this to the toolbox
from qnet.algebra.toolbox.core import temporary_rules
def _combine_operator_p_cc(A, B):
if B.adjoint() == A:
return OperatorPlusMinusCC(A, sign=+1)
else:
raise CannotSimplify
def _combine_operator_m_cc(A, B):
if B.adjoint() == A:
return OperatorPlusMinusCC(A, sign=-1)
else:
raise CannotSimplify
def _scal_combine_operator_pm_cc(c, A, d, B):
if B.adjoint() == A:
if c == d:
return c * OperatorPlusMinusCC(A, sign=+1)
elif c == -d:
return c * OperatorPlusMinusCC(A, sign=-1)
raise CannotSimplify
A = wc("A", head=Operator)
B = wc("B", head=Operator)
c = wc("c", head=Scalar)
d = wc("d", head=Scalar)
with temporary_rules(OperatorPlus, clear=True):
OperatorPlus.add_rule(
'PM1', pattern_head(A, B), _combine_operator_p_cc)
OperatorPlus.add_rule(
'PM2',
pattern_head(pattern(ScalarTimesOperator, -1, B), A),
_combine_operator_m_cc)
OperatorPlus.add_rule(
'PM3',
pattern_head(
pattern(ScalarTimesOperator, c, A),
pattern(ScalarTimesOperator, d, B)),
_scal_combine_operator_pm_cc)
return expr.rebuild() | [
"def",
"rewrite_with_operator_pm_cc",
"(",
"expr",
")",
":",
"# TODO: move this to the toolbox",
"from",
"qnet",
".",
"algebra",
".",
"toolbox",
".",
"core",
"import",
"temporary_rules",
"def",
"_combine_operator_p_cc",
"(",
"A",
",",
"B",
")",
":",
"if",
"B",
"... | 30.622642 | 15.792453 |
def iter_variants(self):
"""Iterate over marker information."""
for variant in self._bgen.iter_variant_info():
yield Variant(
variant.name,
CHROM_STR_ENCODE.get(variant.chrom, variant.chrom),
variant.pos, [variant.a1, variant.a2],
) | [
"def",
"iter_variants",
"(",
"self",
")",
":",
"for",
"variant",
"in",
"self",
".",
"_bgen",
".",
"iter_variant_info",
"(",
")",
":",
"yield",
"Variant",
"(",
"variant",
".",
"name",
",",
"CHROM_STR_ENCODE",
".",
"get",
"(",
"variant",
".",
"chrom",
",",... | 39.125 | 15.375 |
def block(self, tofile="block.dat"):
'''
获取证券板块信息
:param tofile:
:return: pd.dataFrame or None
'''
with self.client.connect(*self.bestip):
data = self.client.get_and_parse_block_info(tofile)
return self.client.to_df(data) | [
"def",
"block",
"(",
"self",
",",
"tofile",
"=",
"\"block.dat\"",
")",
":",
"with",
"self",
".",
"client",
".",
"connect",
"(",
"*",
"self",
".",
"bestip",
")",
":",
"data",
"=",
"self",
".",
"client",
".",
"get_and_parse_block_info",
"(",
"tofile",
")... | 28.5 | 17.9 |
def add_child(self, u, v):
''' add child to search tree itself.
Arguments:
u {int} -- father id
v {int} -- child id
'''
if u == -1:
self.root = v
self.adj_list[v] = []
return
if v not in self.adj_list[u]:
self.adj_list[u].append(v)
if v not in self.adj_list:
self.adj_list[v] = [] | [
"def",
"add_child",
"(",
"self",
",",
"u",
",",
"v",
")",
":",
"if",
"u",
"==",
"-",
"1",
":",
"self",
".",
"root",
"=",
"v",
"self",
".",
"adj_list",
"[",
"v",
"]",
"=",
"[",
"]",
"return",
"if",
"v",
"not",
"in",
"self",
".",
"adj_list",
... | 26.666667 | 13.866667 |
def get_methods_names(public_properties):
"""
Generates the names of the fields where to inject the getter and setter
methods
:param public_properties: If True, returns the names of public property
accessors, else of hidden property ones
:return: getter and a setter field names
"""
if public_properties:
prefix = ipopo_constants.IPOPO_PROPERTY_PREFIX
else:
prefix = ipopo_constants.IPOPO_HIDDEN_PROPERTY_PREFIX
return (
"{0}{1}".format(prefix, ipopo_constants.IPOPO_GETTER_SUFFIX),
"{0}{1}".format(prefix, ipopo_constants.IPOPO_SETTER_SUFFIX),
) | [
"def",
"get_methods_names",
"(",
"public_properties",
")",
":",
"if",
"public_properties",
":",
"prefix",
"=",
"ipopo_constants",
".",
"IPOPO_PROPERTY_PREFIX",
"else",
":",
"prefix",
"=",
"ipopo_constants",
".",
"IPOPO_HIDDEN_PROPERTY_PREFIX",
"return",
"(",
"\"{0}{1}\"... | 38.5 | 23.722222 |
def alerts(self, alert_level='High'):
"""Get a filtered list of alerts at the given alert level, and sorted by alert level."""
alerts = self.zap.core.alerts()
alert_level_value = self.alert_levels[alert_level]
alerts = sorted((a for a in alerts if self.alert_levels[a['risk']] >= alert_level_value),
key=lambda k: self.alert_levels[k['risk']], reverse=True)
return alerts | [
"def",
"alerts",
"(",
"self",
",",
"alert_level",
"=",
"'High'",
")",
":",
"alerts",
"=",
"self",
".",
"zap",
".",
"core",
".",
"alerts",
"(",
")",
"alert_level_value",
"=",
"self",
".",
"alert_levels",
"[",
"alert_level",
"]",
"alerts",
"=",
"sorted",
... | 47.666667 | 24.333333 |
def SearchFetchable(session=None, **kwargs):
"""Search okcupid.com with the given parameters. Parameters are registered
to this function through :meth:`~okcupyd.filter.Filters.register_filter_builder`
of :data:`~okcupyd.html_search.search_filters`.
:returns: A :class:`~okcupyd.util.fetchable.Fetchable` of
:class:`~okcupyd.profile.Profile` instances.
:param session: A logged in session.
:type session: :class:`~okcupyd.session.Session`
:param location: A location string which will be used to filter results.
:param gender: The gender of the user performing the search.
:param keywords: A list or space delimeted string of words to search for.
:param order_by: The criteria to use for ordering results. expected_values:
'match', 'online', 'special_blend'
"""
session = session or Session.login()
return util.Fetchable.fetch_marshall(
SearchHTMLFetcher(session, **kwargs),
util.SimpleProcessor(
session,
lambda match_card_div: Profile(
session=session,
**MatchCardExtractor(match_card_div).as_dict
),
_match_card_xpb
)
) | [
"def",
"SearchFetchable",
"(",
"session",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"session",
"=",
"session",
"or",
"Session",
".",
"login",
"(",
")",
"return",
"util",
".",
"Fetchable",
".",
"fetch_marshall",
"(",
"SearchHTMLFetcher",
"(",
"session... | 42.535714 | 18.357143 |
def run(*args, **kwargs):
'''Returns True if successful, False if failure'''
kwargs.setdefault('env', os.environ)
kwargs.setdefault('shell', True)
try:
subprocess.check_call(' '.join(args), **kwargs)
return True
except subprocess.CalledProcessError:
logger.debug('Error running: {}'.format(args))
return False | [
"def",
"run",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
".",
"setdefault",
"(",
"'env'",
",",
"os",
".",
"environ",
")",
"kwargs",
".",
"setdefault",
"(",
"'shell'",
",",
"True",
")",
"try",
":",
"subprocess",
".",
"check_call",
... | 29.333333 | 18 |
def run(self, *args):
"""Show information about countries."""
params = self.parser.parse_args(args)
ct = params.code_or_term
if ct and len(ct) < 2:
self.error('Code country or term must have 2 or more characters length')
return CODE_INVALID_FORMAT_ERROR
code = ct if ct and len(ct) == 2 else None
term = ct if ct and len(ct) > 2 else None
try:
countries = api.countries(self.db, code=code, term=term)
self.display('countries.tmpl', countries=countries)
except (NotFoundError, InvalidValueError) as e:
self.error(str(e))
return e.code
return CMD_SUCCESS | [
"def",
"run",
"(",
"self",
",",
"*",
"args",
")",
":",
"params",
"=",
"self",
".",
"parser",
".",
"parse_args",
"(",
"args",
")",
"ct",
"=",
"params",
".",
"code_or_term",
"if",
"ct",
"and",
"len",
"(",
"ct",
")",
"<",
"2",
":",
"self",
".",
"e... | 30.954545 | 21.909091 |
def die(self):
"""Time to quit"""
log.info('Time to die')
if self.connected:
try:
self.send('Die')
except Exception:
pass
if self._socket:
self._socket.close()
self.pop() | [
"def",
"die",
"(",
"self",
")",
":",
"log",
".",
"info",
"(",
"'Time to die'",
")",
"if",
"self",
".",
"connected",
":",
"try",
":",
"self",
".",
"send",
"(",
"'Die'",
")",
"except",
"Exception",
":",
"pass",
"if",
"self",
".",
"_socket",
":",
"sel... | 24.363636 | 14.363636 |
def sign_in(self, timeout=60, safe=True, tries=1, channel=None):
'''
Send a sign in request to the master, sets the key information and
returns a dict containing the master publish interface to bind to
and the decrypted aes key for transport decryption.
:param int timeout: Number of seconds to wait before timing out the sign-in request
:param bool safe: If True, do not raise an exception on timeout. Retry instead.
:param int tries: The number of times to try to authenticate before giving up.
:raises SaltReqTimeoutError: If the sign-in request has timed out and :param safe: is not set
:return: Return a string on failure indicating the reason for failure. On success, return a dictionary
with the publication port and the shared AES key.
'''
auth = {}
auth_timeout = self.opts.get('auth_timeout', None)
if auth_timeout is not None:
timeout = auth_timeout
auth_safemode = self.opts.get('auth_safemode', None)
if auth_safemode is not None:
safe = auth_safemode
auth_tries = self.opts.get('auth_tries', None)
if auth_tries is not None:
tries = auth_tries
m_pub_fn = os.path.join(self.opts['pki_dir'], self.mpub)
auth['master_uri'] = self.opts['master_uri']
close_channel = False
if not channel:
close_channel = True
channel = salt.transport.client.AsyncReqChannel.factory(self.opts,
crypt='clear',
io_loop=self.io_loop)
sign_in_payload = self.minion_sign_in_payload()
try:
payload = yield channel.send(
sign_in_payload,
tries=tries,
timeout=timeout
)
except SaltReqTimeoutError as e:
if safe:
log.warning('SaltReqTimeoutError: %s', e)
raise tornado.gen.Return('retry')
if self.opts.get('detect_mode') is True:
raise tornado.gen.Return('retry')
else:
raise SaltClientError('Attempt to authenticate with the salt master failed with timeout error')
finally:
if close_channel:
channel.close()
if not isinstance(payload, dict):
log.error('Sign-in attempt failed: %s', payload)
raise tornado.gen.Return(False)
if 'load' in payload:
if 'ret' in payload['load']:
if not payload['load']['ret']:
if self.opts['rejected_retry']:
log.error(
'The Salt Master has rejected this minion\'s public '
'key.\nTo repair this issue, delete the public key '
'for this minion on the Salt Master.\nThe Salt '
'Minion will attempt to to re-authenicate.'
)
raise tornado.gen.Return('retry')
else:
log.critical(
'The Salt Master has rejected this minion\'s public '
'key!\nTo repair this issue, delete the public key '
'for this minion on the Salt Master and restart this '
'minion.\nOr restart the Salt Master in open mode to '
'clean out the keys. The Salt Minion will now exit.'
)
# Add a random sleep here for systems that are using a
# a service manager to immediately restart the service
# to avoid overloading the system
time.sleep(random.randint(10, 20))
sys.exit(salt.defaults.exitcodes.EX_NOPERM)
# has the master returned that its maxed out with minions?
elif payload['load']['ret'] == 'full':
raise tornado.gen.Return('full')
else:
log.error(
'The Salt Master has cached the public key for this '
'node, this salt minion will wait for %s seconds '
'before attempting to re-authenticate',
self.opts['acceptance_wait_time']
)
raise tornado.gen.Return('retry')
auth['aes'] = self.verify_master(payload, master_pub='token' in sign_in_payload)
if not auth['aes']:
log.critical(
'The Salt Master server\'s public key did not authenticate!\n'
'The master may need to be updated if it is a version of Salt '
'lower than %s, or\n'
'If you are confident that you are connecting to a valid Salt '
'Master, then remove the master public key and restart the '
'Salt Minion.\nThe master public key can be found '
'at:\n%s', salt.version.__version__, m_pub_fn
)
raise SaltClientError('Invalid master key')
if self.opts.get('syndic_master', False): # Is syndic
syndic_finger = self.opts.get('syndic_finger', self.opts.get('master_finger', False))
if syndic_finger:
if salt.utils.crypt.pem_finger(m_pub_fn, sum_type=self.opts['hash_type']) != syndic_finger:
self._finger_fail(syndic_finger, m_pub_fn)
else:
if self.opts.get('master_finger', False):
if salt.utils.crypt.pem_finger(m_pub_fn, sum_type=self.opts['hash_type']) != self.opts['master_finger']:
self._finger_fail(self.opts['master_finger'], m_pub_fn)
auth['publish_port'] = payload['publish_port']
raise tornado.gen.Return(auth) | [
"def",
"sign_in",
"(",
"self",
",",
"timeout",
"=",
"60",
",",
"safe",
"=",
"True",
",",
"tries",
"=",
"1",
",",
"channel",
"=",
"None",
")",
":",
"auth",
"=",
"{",
"}",
"auth_timeout",
"=",
"self",
".",
"opts",
".",
"get",
"(",
"'auth_timeout'",
... | 49.436975 | 25.168067 |
def update(self, sparql_query_only=False, auto_refresh=None, update_binary=True):
'''
Method to update resources in repository. Firing this method computes the difference in the local modified graph and the original one,
creates an instance of SparqlUpdate and builds a sparql query that represents these differences, and sends this as a PATCH request.
Note: send PATCH request, regardless of RDF or NonRDF, to [uri]/fcr:metadata
If the resource is NonRDF (Binary), this also method also updates the binary data.
Args:
sparql_query_only (bool): If True, returns only the sparql query string and does not perform any actual updates
auto_refresh (bool): If True, refreshes resource after update. If left None, defaults to repo.default_auto_refresh
update_binary (bool): If True, and resource is NonRDF, updates binary data as well
Returns:
(bool)
'''
# run diff on graphs, send as PATCH request
self._diff_graph()
sq = SparqlUpdate(self.rdf.prefixes, self.rdf.diffs)
if sparql_query_only:
return sq.build_query()
response = self.repo.api.http_request(
'PATCH',
'%s/fcr:metadata' % self.uri, # send RDF updates to URI/fcr:metadata
data=sq.build_query(),
headers={'Content-Type':'application/sparql-update'})
# if RDF update not 204, raise Exception
if response.status_code != 204:
logger.debug(response.content)
raise Exception('HTTP %s, expecting 204' % response.status_code)
# if NonRDFSource, and self.binary.data is not a Response object, update binary as well
if type(self) == NonRDFSource and update_binary and type(self.binary.data) != requests.models.Response:
self.binary._prep_binary()
binary_data = self.binary.data
binary_response = self.repo.api.http_request(
'PUT',
self.uri,
data=binary_data,
headers={'Content-Type':self.binary.mimetype})
# if not refreshing RDF, still update binary here
if not auto_refresh and not self.repo.default_auto_refresh:
logger.debug("not refreshing resource RDF, but updated binary, so must refresh binary data")
updated_self = self.repo.get_resource(self.uri)
self.binary.refresh(updated_self)
# fire optional post-update hook
if hasattr(self,'_post_update'):
self._post_update()
# determine refreshing
'''
If not updating binary, pass that bool to refresh as refresh_binary flag to avoid touching binary data
'''
if auto_refresh:
self.refresh(refresh_binary=update_binary)
elif auto_refresh == None:
if self.repo.default_auto_refresh:
self.refresh(refresh_binary=update_binary)
return True | [
"def",
"update",
"(",
"self",
",",
"sparql_query_only",
"=",
"False",
",",
"auto_refresh",
"=",
"None",
",",
"update_binary",
"=",
"True",
")",
":",
"# run diff on graphs, send as PATCH request",
"self",
".",
"_diff_graph",
"(",
")",
"sq",
"=",
"SparqlUpdate",
"... | 38.8 | 29.261538 |
def pixel(self, x, y, size=None):
"""Return color for a pixel."""
if (size is None):
size = self.sz
# Have we go to the smallest element?
if (size <= 3):
if (_middle(x, y)):
return (None)
else:
return (0, 0, 0)
divisor = size // 3
if (_middle(x // divisor, y // divisor)):
return None
return self.pixel(x % divisor, y % divisor, divisor) | [
"def",
"pixel",
"(",
"self",
",",
"x",
",",
"y",
",",
"size",
"=",
"None",
")",
":",
"if",
"(",
"size",
"is",
"None",
")",
":",
"size",
"=",
"self",
".",
"sz",
"# Have we go to the smallest element?",
"if",
"(",
"size",
"<=",
"3",
")",
":",
"if",
... | 32.857143 | 11.928571 |
def create(customer, **data):
"""
Create a card instance.
:param customer: the customer id or object
:type customer: string|Customer
:param data: data required to create the card
:return: The card resource
:rtype resources.Card
"""
if isinstance(customer, resources.Customer):
customer = customer.id
http_client = HttpClient()
response, _ = http_client.post(routes.url(routes.CARD_RESOURCE, customer_id=customer), data)
return resources.Card(**response) | [
"def",
"create",
"(",
"customer",
",",
"*",
"*",
"data",
")",
":",
"if",
"isinstance",
"(",
"customer",
",",
"resources",
".",
"Customer",
")",
":",
"customer",
"=",
"customer",
".",
"id",
"http_client",
"=",
"HttpClient",
"(",
")",
"response",
",",
"_... | 32.235294 | 15.647059 |
def get_inclusion_states(self, transactions, tips):
# type: (Iterable[TransactionHash], Iterable[TransactionHash]) -> dict
"""
Get the inclusion states of a set of transactions. This is for
determining if a transaction was accepted and confirmed by the
network or not. You can search for multiple tips (and thus,
milestones) to get past inclusion states of transactions.
:param transactions:
List of transactions you want to get the inclusion state
for.
:param tips:
List of tips (including milestones) you want to search for
the inclusion state.
References:
- https://iota.readme.io/docs/getinclusionstates
"""
return core.GetInclusionStatesCommand(self.adapter)(
transactions=transactions,
tips=tips,
) | [
"def",
"get_inclusion_states",
"(",
"self",
",",
"transactions",
",",
"tips",
")",
":",
"# type: (Iterable[TransactionHash], Iterable[TransactionHash]) -> dict",
"return",
"core",
".",
"GetInclusionStatesCommand",
"(",
"self",
".",
"adapter",
")",
"(",
"transactions",
"="... | 35.875 | 22.958333 |
def setup(config):
"""Setup persistence to be used in cinderlib.
By default memory persistance will be used, but there are other mechanisms
available and other ways to use custom mechanisms:
- Persistence plugins: Plugin mechanism uses Python entrypoints under
namespace cinderlib.persistence.storage, and cinderlib comes with 3
different mechanisms, "memory", "dbms", and "memory_dbms". To use any of
these one must pass the string name in the storage parameter and any
other configuration as keyword arguments.
- Passing a class that inherits from PersistenceDriverBase as storage
parameter and initialization parameters as keyword arguments.
- Passing an instance that inherits from PersistenceDriverBase as storage
parameter.
"""
if config is None:
config = {}
else:
config = config.copy()
# Prevent driver dynamic loading clearing configuration options
volume_cmd.CONF._ConfigOpts__cache = MyDict()
# Default configuration is using memory storage
storage = config.pop('storage', None) or DEFAULT_STORAGE
if isinstance(storage, base.PersistenceDriverBase):
return storage
if inspect.isclass(storage) and issubclass(storage,
base.PersistenceDriverBase):
return storage(**config)
if not isinstance(storage, six.string_types):
raise exception.InvalidPersistence(storage)
persistence_driver = driver.DriverManager(
namespace='cinderlib.persistence.storage',
name=storage,
invoke_on_load=True,
invoke_kwds=config,
)
return persistence_driver.driver | [
"def",
"setup",
"(",
"config",
")",
":",
"if",
"config",
"is",
"None",
":",
"config",
"=",
"{",
"}",
"else",
":",
"config",
"=",
"config",
".",
"copy",
"(",
")",
"# Prevent driver dynamic loading clearing configuration options",
"volume_cmd",
".",
"CONF",
".",... | 38.232558 | 22.744186 |
def run(self, root_allowed=False):
"""Start daemon mode
:param bool root_allowed: Only used for ExecuteCmd
:return: loop
"""
self.root_allowed = root_allowed
scan_devices(self.on_push, lambda d: d.src.lower() in self.devices, self.settings.get('interface')) | [
"def",
"run",
"(",
"self",
",",
"root_allowed",
"=",
"False",
")",
":",
"self",
".",
"root_allowed",
"=",
"root_allowed",
"scan_devices",
"(",
"self",
".",
"on_push",
",",
"lambda",
"d",
":",
"d",
".",
"src",
".",
"lower",
"(",
")",
"in",
"self",
"."... | 37.375 | 18.75 |
def corner_grid(self):
"""Return a grid with only the corner points.
Returns
-------
cgrid : `RectGrid`
Grid with size 2 in non-degenerate dimensions and 1
in degenerate ones
Examples
--------
>>> g = RectGrid([0, 1], [-1, 0, 2])
>>> g.corner_grid()
uniform_grid([ 0., -1.], [ 1., 2.], (2, 2))
"""
minmax_vecs = []
for axis in range(self.ndim):
if self.shape[axis] == 1:
minmax_vecs.append(self.coord_vectors[axis][0])
else:
minmax_vecs.append((self.coord_vectors[axis][0],
self.coord_vectors[axis][-1]))
return RectGrid(*minmax_vecs) | [
"def",
"corner_grid",
"(",
"self",
")",
":",
"minmax_vecs",
"=",
"[",
"]",
"for",
"axis",
"in",
"range",
"(",
"self",
".",
"ndim",
")",
":",
"if",
"self",
".",
"shape",
"[",
"axis",
"]",
"==",
"1",
":",
"minmax_vecs",
".",
"append",
"(",
"self",
... | 30.625 | 18.041667 |
def psffunc(self, x, y, z, **kwargs):
"""Calculates a pinhole psf"""
#do_pinhole?? FIXME
if self.polychromatic:
func = psfcalc.calculate_polychrome_pinhole_psf
else:
func = psfcalc.calculate_pinhole_psf
x0, y0 = [psfcalc.vec_to_halfvec(v) for v in [x,y]]
vls = psfcalc.wrap_and_calc_psf(x0, y0, z, func, **kwargs)
return vls / vls.sum() | [
"def",
"psffunc",
"(",
"self",
",",
"x",
",",
"y",
",",
"z",
",",
"*",
"*",
"kwargs",
")",
":",
"#do_pinhole?? FIXME",
"if",
"self",
".",
"polychromatic",
":",
"func",
"=",
"psfcalc",
".",
"calculate_polychrome_pinhole_psf",
"else",
":",
"func",
"=",
"ps... | 40.7 | 13.5 |
def compare_branches_tags_commits(self, project_id, from_id, to_id):
"""
Compare branches, tags or commits
:param project_id: The ID of a project
:param from_id: the commit sha or branch name
:param to_id: the commit sha or branch name
:return: commit list and diff between two branches tags or commits provided by name
:raise: HttpError: If invalid response returned
"""
data = {'from': from_id, 'to': to_id}
request = requests.get(
'{0}/{1}/repository/compare'.format(self.projects_url, project_id),
params=data, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout,
headers=self.headers)
if request.status_code == 200:
return request.json()
else:
return False | [
"def",
"compare_branches_tags_commits",
"(",
"self",
",",
"project_id",
",",
"from_id",
",",
"to_id",
")",
":",
"data",
"=",
"{",
"'from'",
":",
"from_id",
",",
"'to'",
":",
"to_id",
"}",
"request",
"=",
"requests",
".",
"get",
"(",
"'{0}/{1}/repository/comp... | 38.52381 | 19.190476 |
def local_attr_ancestors(self, name, context=None):
"""Iterate over the parents that define the given name.
:param name: The name to find definitions for.
:type name: str
:returns: The parents that define the given name.
:rtype: iterable(NodeNG)
"""
# Look up in the mro if we can. This will result in the
# attribute being looked up just as Python does it.
try:
ancestors = self.mro(context)[1:]
except exceptions.MroError:
# Fallback to use ancestors, we can't determine
# a sane MRO.
ancestors = self.ancestors(context=context)
for astroid in ancestors:
if name in astroid:
yield astroid | [
"def",
"local_attr_ancestors",
"(",
"self",
",",
"name",
",",
"context",
"=",
"None",
")",
":",
"# Look up in the mro if we can. This will result in the",
"# attribute being looked up just as Python does it.",
"try",
":",
"ancestors",
"=",
"self",
".",
"mro",
"(",
"contex... | 36.85 | 15.15 |
def do(self, fn, message=None, *args, **kwargs):
"""Add a 'do' action to the steps. This is a function to execute
:param fn: A function
:param message: Message indicating what this function does (used for debugging if assertions fail)
"""
self.items.put(ChainItem(fn, self.do, message, *args, **kwargs))
return self | [
"def",
"do",
"(",
"self",
",",
"fn",
",",
"message",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"items",
".",
"put",
"(",
"ChainItem",
"(",
"fn",
",",
"self",
".",
"do",
",",
"message",
",",
"*",
"args",
",... | 44.625 | 22.25 |
def hash(self):
'''
:rtype: int
:return: hash of the field
'''
hashed = super(RandomBits, self).hash()
return khash(hashed, self._min_length, self._max_length, self._num_mutations, self._step, self._seed) | [
"def",
"hash",
"(",
"self",
")",
":",
"hashed",
"=",
"super",
"(",
"RandomBits",
",",
"self",
")",
".",
"hash",
"(",
")",
"return",
"khash",
"(",
"hashed",
",",
"self",
".",
"_min_length",
",",
"self",
".",
"_max_length",
",",
"self",
".",
"_num_muta... | 35.142857 | 26.571429 |
def partition_version_classifiers(
classifiers: t.Sequence[str], version_prefix: str = 'Programming Language :: Python :: ',
only_suffix: str = ' :: Only') -> t.Tuple[t.List[str], t.List[str]]:
"""Find version number classifiers in given list and partition them into 2 groups."""
versions_min, versions_only = [], []
for classifier in classifiers:
version = classifier.replace(version_prefix, '')
versions = versions_min
if version.endswith(only_suffix):
version = version.replace(only_suffix, '')
versions = versions_only
try:
versions.append(tuple([int(_) for _ in version.split('.')]))
except ValueError:
pass
return versions_min, versions_only | [
"def",
"partition_version_classifiers",
"(",
"classifiers",
":",
"t",
".",
"Sequence",
"[",
"str",
"]",
",",
"version_prefix",
":",
"str",
"=",
"'Programming Language :: Python :: '",
",",
"only_suffix",
":",
"str",
"=",
"' :: Only'",
")",
"->",
"t",
".",
"Tuple... | 47 | 15.5625 |
def get_docker_secret(name, default=None, cast_to=str, autocast_name=True, getenv=True, safe=True,
secrets_dir=os.path.join(root, 'var', 'run', 'secrets')):
"""This function fetches a docker secret
:param name: the name of the docker secret
:param default: the default value if no secret found
:param cast_to: casts the value to the given type
:param autocast_name: whether the name should be lowercase for secrets and upper case for environment
:param getenv: if environment variable should be fetched as fallback
:param safe: Whether the function should raise exceptions
:param secrets_dir: the directory where the secrets are stored
:returns: docker secret or environment variable depending on params
:raises TypeError: if cast fails due to wrong type (None)
:raises ValueError: if casts fails due to Value
"""
# cast name if autocast enabled
name_secret = name.lower() if autocast_name else name
name_env = name.upper() if autocast_name else name
# initiallize value
value = None
# try to read from secret file
try:
with open(os.path.join(secrets_dir, name_secret), 'r') as secret_file:
value = secret_file.read()
except IOError as e:
# try to read from env if enabled
if getenv:
value = os.environ.get(name_env)
# set default value if no value found
if value is None:
value = default
# try to cast
try:
# so None wont be cast to 'None'
if value is None:
raise TypeError('value is None')
# special case bool
if cast_to == bool:
if value not in ('True', 'true', 'False', 'false'):
raise ValueError('value %s not of type bool' % value)
value = 1 if value in ('True', 'true') else 0
# try to cast
return cast_to(value)
except (TypeError, ValueError) as e:
# whether exception should be thrown
if safe:
return default
raise e | [
"def",
"get_docker_secret",
"(",
"name",
",",
"default",
"=",
"None",
",",
"cast_to",
"=",
"str",
",",
"autocast_name",
"=",
"True",
",",
"getenv",
"=",
"True",
",",
"safe",
"=",
"True",
",",
"secrets_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"... | 35.589286 | 21.428571 |
def fetch(url, **kwargs):
"""Fetches an URL and returns the response.
Parameters
----------
url : str
An URL to crawl.
spider_cls : scrapy.Spider (default: DefaultSpider)
A spider class to be used in the crawler instance.
capture_items : bool (default: True)
If enabled, the scraped items are captured and returned.
return_crawler : bool (default: False)
If enabled, the crawler instance is returned. If ``capture_items`` is
enabled, the scraped items is collected in ``crawler.items``.
settings : dict, optional
Custom crawler settings.
timeout : int, (default: DEFAULT_TIMEOUT)
Result wait timeout.
Returns
-------
out : Response or None
Returns a ``Response`` instance if the crawler is able to retrieve a
response, otherwise it returns ``None``.
Raises
------
crochet.TimeoutError
"""
timeout = kwargs.pop('timeout', DEFAULT_TIMEOUT)
kwargs['return_crawler'] = True
crawler = wait_for(timeout, _fetch_in_reactor, url, **kwargs)
if hasattr(crawler.spider, 'response'):
return crawler.spider.response | [
"def",
"fetch",
"(",
"url",
",",
"*",
"*",
"kwargs",
")",
":",
"timeout",
"=",
"kwargs",
".",
"pop",
"(",
"'timeout'",
",",
"DEFAULT_TIMEOUT",
")",
"kwargs",
"[",
"'return_crawler'",
"]",
"=",
"True",
"crawler",
"=",
"wait_for",
"(",
"timeout",
",",
"_... | 32.342857 | 19.142857 |
def get_inventory_by_name(nme, character):
"""
returns the inventory index by name
"""
for ndx, sk in enumerate(character["inventory"]):
#print("sk = ", sk, " , nme = ", nme)
if sk["name"] == nme:
return ndx
return 0 | [
"def",
"get_inventory_by_name",
"(",
"nme",
",",
"character",
")",
":",
"for",
"ndx",
",",
"sk",
"in",
"enumerate",
"(",
"character",
"[",
"\"inventory\"",
"]",
")",
":",
"#print(\"sk = \", sk, \" , nme = \", nme)",
"if",
"sk",
"[",
"\"name\"",
"]",
"==",
"nme... | 25.6 | 11.8 |
def check_dataset(dataset):
"""Confirm shape (3 colors x rows x cols) and values [0 to 255] are OK."""
if isinstance(dataset, numpy.ndarray) and not len(dataset.shape) == 4:
check_dataset_shape(dataset)
check_dataset_range(dataset)
else: # must be a list of arrays or a 4D NumPy array
for i, d in enumerate(dataset):
if not isinstance(d, numpy.ndarray):
raise ValueError(
'Requires a NumPy array (rgb x rows x cols) '
'with integer values in the range [0, 255].'
)
try:
check_dataset_shape(d)
check_dataset_range(d)
except ValueError as err:
raise ValueError(
'{}\nAt position {} in the list of arrays.'
.format(err, i)
) | [
"def",
"check_dataset",
"(",
"dataset",
")",
":",
"if",
"isinstance",
"(",
"dataset",
",",
"numpy",
".",
"ndarray",
")",
"and",
"not",
"len",
"(",
"dataset",
".",
"shape",
")",
"==",
"4",
":",
"check_dataset_shape",
"(",
"dataset",
")",
"check_dataset_rang... | 42.55 | 12.45 |
def im_open(self, *, user: str, **kwargs) -> SlackResponse:
"""Opens a direct message channel.
Args:
user (str): The user id to open a DM with. e.g. 'W1234567890'
"""
kwargs.update({"user": user})
return self.api_call("im.open", json=kwargs) | [
"def",
"im_open",
"(",
"self",
",",
"*",
",",
"user",
":",
"str",
",",
"*",
"*",
"kwargs",
")",
"->",
"SlackResponse",
":",
"kwargs",
".",
"update",
"(",
"{",
"\"user\"",
":",
"user",
"}",
")",
"return",
"self",
".",
"api_call",
"(",
"\"im.open\"",
... | 35.875 | 16.75 |
def analyze(self):
"""Populate an enriched index by processing input items in blocks.
:return: total number of out_items written.
"""
from_date = self._out.latest_date()
if from_date:
logger.info("Reading items since " + from_date)
else:
logger.info("Reading items since the beginning of times")
cont = 0
total_processed = 0
total_written = 0
for item_block in self._in.read_block(size=self._block_size, from_date=from_date):
cont = cont + len(item_block)
process_results = self.process(item_block)
total_processed += process_results.processed
if len(process_results.out_items) > 0:
self._out.write(process_results.out_items)
total_written += len(process_results.out_items)
else:
logger.info("No new items to be written this time.")
logger.info(
"Items read/to be written/total read/total processed/total written: "
"{0}/{1}/{2}/{3}/{4}".format(str(len(item_block)),
str(len(process_results.out_items)),
str(cont),
str(total_processed),
str(total_written)))
logger.info("SUMMARY: Items total read/total processed/total written: "
"{0}/{1}/{2}".format(str(cont),
str(total_processed),
str(total_written)))
logger.info("This is the end.")
return total_written | [
"def",
"analyze",
"(",
"self",
")",
":",
"from_date",
"=",
"self",
".",
"_out",
".",
"latest_date",
"(",
")",
"if",
"from_date",
":",
"logger",
".",
"info",
"(",
"\"Reading items since \"",
"+",
"from_date",
")",
"else",
":",
"logger",
".",
"info",
"(",
... | 39.209302 | 23.837209 |
def handle_version_flag():
"""If the --version flag is passed, print version to stdout and exit.
Within dsub commands, --version should be the highest priority flag.
This function supplies a repeatable and DRY way of checking for the
version flag and printing the version. Callers still need to define a version
flag in the command's flags so that it shows up in help output.
"""
parser = argparse.ArgumentParser(description='Version parser', add_help=False)
parser.add_argument('--version', '-v', dest='version', action='store_true')
parser.set_defaults(version=False)
args, _ = parser.parse_known_args()
if args.version:
print('dsub version: %s' % DSUB_VERSION)
sys.exit() | [
"def",
"handle_version_flag",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'Version parser'",
",",
"add_help",
"=",
"False",
")",
"parser",
".",
"add_argument",
"(",
"'--version'",
",",
"'-v'",
",",
"dest",
"=",
... | 46.066667 | 20.866667 |
def get_axis_variables(ds):
'''
Returns a list of variables that define an axis of the dataset
:param netCDF4.Dataset ds: An open netCDF4 Dataset
'''
axis_variables = []
for ncvar in ds.get_variables_by_attributes(axis=lambda x: x is not None):
axis_variables.append(ncvar.name)
return axis_variables | [
"def",
"get_axis_variables",
"(",
"ds",
")",
":",
"axis_variables",
"=",
"[",
"]",
"for",
"ncvar",
"in",
"ds",
".",
"get_variables_by_attributes",
"(",
"axis",
"=",
"lambda",
"x",
":",
"x",
"is",
"not",
"None",
")",
":",
"axis_variables",
".",
"append",
... | 32.8 | 23 |
def draw(
self,
show_tip_labels=True,
show_node_support=False,
use_edge_lengths=False,
orient="right",
print_args=False,
*args,
**kwargs):
"""
plot the tree using toyplot.graph.
Parameters:
-----------
show_tip_labels: bool
Show tip names from tree.
use_edge_lengths: bool
Use edge lengths from newick tree.
show_node_support: bool
Show support values at nodes using a set of default
options.
...
"""
## re-decompose tree for new orient and edges args
self._decompose_tree(orient=orient, use_edge_lengths=use_edge_lengths)
## update kwargs with entered args and all other kwargs
dwargs = {}
dwargs["show_tip_labels"] = show_tip_labels
dwargs["show_node_support"] = show_node_support
dwargs.update(kwargs)
## pass to panel plotter
canvas, axes, panel = tree_panel_plot(self, print_args, **dwargs)
return canvas, axes, panel | [
"def",
"draw",
"(",
"self",
",",
"show_tip_labels",
"=",
"True",
",",
"show_node_support",
"=",
"False",
",",
"use_edge_lengths",
"=",
"False",
",",
"orient",
"=",
"\"right\"",
",",
"print_args",
"=",
"False",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
"... | 30.361111 | 18.027778 |
def initialize_view(self):
"""Clean the tree and view parameters"""
self.clear()
self.item_depth = 0 # To be use for collapsing/expanding one level
self.item_list = [] # To be use for collapsing/expanding one level
self.items_to_be_shown = {}
self.current_view_depth = 0 | [
"def",
"initialize_view",
"(",
"self",
")",
":",
"self",
".",
"clear",
"(",
")",
"self",
".",
"item_depth",
"=",
"0",
"# To be use for collapsing/expanding one level\r",
"self",
".",
"item_list",
"=",
"[",
"]",
"# To be use for collapsing/expanding one level\r",
"self... | 45.857143 | 16.285714 |
def infer_modifications(stmts):
"""Return inferred Modification from RegulateActivity + ActiveForm.
This function looks for combinations of Activation/Inhibition Statements
and ActiveForm Statements that imply a Modification Statement.
For example, if we know that A activates B, and phosphorylated B is
active, then we can infer that A leads to the phosphorylation of B.
An additional requirement when making this assumption is that the
activity of B should only be dependent on the modified state and not
other context - otherwise the inferred Modification is not necessarily
warranted.
Parameters
----------
stmts : list[indra.statements.Statement]
A list of Statements to infer Modifications from.
Returns
-------
linked_stmts : list[indra.mechlinker.LinkedStatement]
A list of LinkedStatements representing the inferred Statements.
"""
linked_stmts = []
for act_stmt in _get_statements_by_type(stmts, RegulateActivity):
for af_stmt in _get_statements_by_type(stmts, ActiveForm):
if not af_stmt.agent.entity_matches(act_stmt.obj):
continue
mods = af_stmt.agent.mods
# Make sure the ActiveForm only involves modified sites
if af_stmt.agent.mutations or \
af_stmt.agent.bound_conditions or \
af_stmt.agent.location:
continue
if not af_stmt.agent.mods:
continue
for mod in af_stmt.agent.mods:
evs = act_stmt.evidence + af_stmt.evidence
for ev in evs:
ev.epistemics['direct'] = False
if mod.is_modified:
mod_type_name = mod.mod_type
else:
mod_type_name = modtype_to_inverse[mod.mod_type]
mod_class = modtype_to_modclass[mod_type_name]
if not mod_class:
continue
st = mod_class(act_stmt.subj,
act_stmt.obj,
mod.residue, mod.position,
evidence=evs)
ls = LinkedStatement([act_stmt, af_stmt], st)
linked_stmts.append(ls)
logger.info('inferred: %s' % st)
return linked_stmts | [
"def",
"infer_modifications",
"(",
"stmts",
")",
":",
"linked_stmts",
"=",
"[",
"]",
"for",
"act_stmt",
"in",
"_get_statements_by_type",
"(",
"stmts",
",",
"RegulateActivity",
")",
":",
"for",
"af_stmt",
"in",
"_get_statements_by_type",
"(",
"stmts",
",",
"Activ... | 46.37037 | 18.925926 |
def _fake_openassociatorinstancepaths(self, namespace, **params):
# pylint: disable=invalid-name
"""
Implements WBEM server responder for
:meth:`~pywbem.WBEMConnection.OpenAssociatorInstancePaths`
with data from the instance repository.
"""
self._validate_namespace(namespace)
self._validate_open_params(**params)
params['ObjectName'] = params['InstanceName']
del params['InstanceName']
result = self._fake_associatornames(namespace, **params)
objects = [] if result is None else [x[2] for x in result[0][2]]
return self._open_response(objects, namespace,
'PullInstancePaths', **params) | [
"def",
"_fake_openassociatorinstancepaths",
"(",
"self",
",",
"namespace",
",",
"*",
"*",
"params",
")",
":",
"# pylint: disable=invalid-name",
"self",
".",
"_validate_namespace",
"(",
"namespace",
")",
"self",
".",
"_validate_open_params",
"(",
"*",
"*",
"params",
... | 39.5 | 16.944444 |
def free_sources(self, free=True, pars=None, cuts=None,
distance=None, skydir=None, minmax_ts=None, minmax_npred=None,
exclude=None, square=False, **kwargs):
"""Free or fix sources in the ROI model satisfying the given
selection. When multiple selections are defined, the selected
sources will be those satisfying the logical AND of all
selections (e.g. distance < X && minmax_ts[0] < ts <
minmax_ts[1] && ...).
Parameters
----------
free : bool
Choose whether to free (free=True) or fix (free=False)
source parameters.
pars : list
Set a list of parameters to be freed/fixed for each
source. If none then all source parameters will be
freed/fixed. If pars='norm' then only normalization
parameters will be freed.
cuts : dict
Dictionary of [min,max] selections on source properties.
distance : float
Cut on angular distance from ``skydir``. If None then no
selection will be applied.
skydir : `~astropy.coordinates.SkyCoord`
Reference sky coordinate for ``distance`` selection. If
None then the distance selection will be applied with
respect to the ROI center.
minmax_ts : list
Free sources that have TS in the range [min,max]. If
either min or max are None then only a lower (upper) bound
will be applied. If this parameter is none no selection
will be applied.
minmax_npred : list
Free sources that have npred in the range [min,max]. If
either min or max are None then only a lower (upper) bound
will be applied. If this parameter is none no selection
will be applied.
exclude : list
Names of sources that will be excluded from the selection.
square : bool
Switch between applying a circular or square (ROI-like)
selection on the maximum projected distance from the ROI
center.
Returns
-------
srcs : list
A list of `~fermipy.roi_model.Model` objects.
"""
srcs = self.roi.get_sources(skydir=skydir, distance=distance,
cuts=cuts, minmax_ts=minmax_ts,
minmax_npred=minmax_npred, exclude=exclude,
square=square,
coordsys=self.config['binning']['coordsys'])
for s in srcs:
self.free_source(s.name, free=free, pars=pars, **kwargs)
return srcs | [
"def",
"free_sources",
"(",
"self",
",",
"free",
"=",
"True",
",",
"pars",
"=",
"None",
",",
"cuts",
"=",
"None",
",",
"distance",
"=",
"None",
",",
"skydir",
"=",
"None",
",",
"minmax_ts",
"=",
"None",
",",
"minmax_npred",
"=",
"None",
",",
"exclude... | 37.676056 | 25.070423 |
def rmv_normal(mu, tau, size=1):
"""
Random multivariate normal variates.
"""
sig = np.linalg.cholesky(tau)
mu_size = np.shape(mu)
if size == 1:
out = np.random.normal(size=mu_size)
try:
flib.dtrsm_wrap(sig, out, 'L', 'T', 'L', 1.)
except:
out = np.linalg.solve(sig, out)
out += mu
return out
else:
if not hasattr(size, '__iter__'):
size = (size,)
tot_size = np.prod(size)
out = np.random.normal(size=(tot_size,) + mu_size)
for i in xrange(tot_size):
try:
flib.dtrsm_wrap(sig, out[i, :], 'L', 'T', 'L', 1.)
except:
out[i, :] = np.linalg.solve(sig, out[i,:])
out[i, :] += mu
return out.reshape(size + mu_size) | [
"def",
"rmv_normal",
"(",
"mu",
",",
"tau",
",",
"size",
"=",
"1",
")",
":",
"sig",
"=",
"np",
".",
"linalg",
".",
"cholesky",
"(",
"tau",
")",
"mu_size",
"=",
"np",
".",
"shape",
"(",
"mu",
")",
"if",
"size",
"==",
"1",
":",
"out",
"=",
"np"... | 28.392857 | 15.535714 |
def make(self, apps):
"""
Create the report from application results
"""
for subreport in self.subreports:
logger.debug('Make subreport "{0}"'.format(subreport.name))
subreport.make(apps)
for subreport in self.subreports:
subreport.compact_tables() | [
"def",
"make",
"(",
"self",
",",
"apps",
")",
":",
"for",
"subreport",
"in",
"self",
".",
"subreports",
":",
"logger",
".",
"debug",
"(",
"'Make subreport \"{0}\"'",
".",
"format",
"(",
"subreport",
".",
"name",
")",
")",
"subreport",
".",
"make",
"(",
... | 31.6 | 11.2 |
def _download_csv_from_gdocs(self, trans_csv_path, meta_csv_path):
"""
Download csv from GDoc.
:return: returns resource if worksheets are present
:except: raises PODocsError with info if communication
with GDocs lead to any errors
"""
try:
entry = self.gd_client.GetResourceById(self.key)
self.gd_client.DownloadResource(
entry, trans_csv_path,
extra_params={'gid': 0, 'exportFormat': 'csv'}
)
self.gd_client.DownloadResource(
entry, meta_csv_path,
extra_params={'gid': 1, 'exportFormat': 'csv'}
)
except (RequestError, IOError) as e:
raise PODocsError(e)
return entry | [
"def",
"_download_csv_from_gdocs",
"(",
"self",
",",
"trans_csv_path",
",",
"meta_csv_path",
")",
":",
"try",
":",
"entry",
"=",
"self",
".",
"gd_client",
".",
"GetResourceById",
"(",
"self",
".",
"key",
")",
"self",
".",
"gd_client",
".",
"DownloadResource",
... | 38.35 | 13.65 |
def sphere_example():
"""A basic example of how to use the sphere agent."""
env = holodeck.make("MazeWorld")
# This command is to constantly rotate to the right
command = 2
for i in range(10):
env.reset()
for _ in range(1000):
state, reward, terminal, _ = env.step(command)
# To access specific sensor data:
pixels = state[Sensors.PIXEL_CAMERA]
orientation = state[Sensors.ORIENTATION_SENSOR] | [
"def",
"sphere_example",
"(",
")",
":",
"env",
"=",
"holodeck",
".",
"make",
"(",
"\"MazeWorld\"",
")",
"# This command is to constantly rotate to the right",
"command",
"=",
"2",
"for",
"i",
"in",
"range",
"(",
"10",
")",
":",
"env",
".",
"reset",
"(",
")",... | 33.214286 | 17.285714 |
def prepare_intercept(callback):
"""
Registers a Windows low level keyboard hook. The provided callback will
be invoked for each high-level keyboard event, and is expected to return
True if the key event should be passed to the next program, or False if
the event is to be blocked.
No event is processed until the Windows messages are pumped (see
start_intercept).
"""
_setup_name_tables()
def process_key(event_type, vk, scan_code, is_extended):
global shift_is_pressed, altgr_is_pressed, ignore_next_right_alt
#print(event_type, vk, scan_code, is_extended)
# Pressing alt-gr also generates an extra "right alt" event
if vk == 0xA5 and ignore_next_right_alt:
ignore_next_right_alt = False
return True
modifiers = (
('shift',) * shift_is_pressed +
('alt gr',) * altgr_is_pressed +
('num lock',) * (user32.GetKeyState(0x90) & 1) +
('caps lock',) * (user32.GetKeyState(0x14) & 1) +
('scroll lock',) * (user32.GetKeyState(0x91) & 1)
)
entry = (scan_code, vk, is_extended, modifiers)
if entry not in to_name:
to_name[entry] = list(get_event_names(*entry))
names = to_name[entry]
name = names[0] if names else None
# TODO: inaccurate when holding multiple different shifts.
if vk in shift_vks:
shift_is_pressed = event_type == KEY_DOWN
if scan_code == 541 and vk == 162:
ignore_next_right_alt = True
altgr_is_pressed = event_type == KEY_DOWN
is_keypad = (scan_code, vk, is_extended) in keypad_keys
return callback(KeyboardEvent(event_type=event_type, scan_code=scan_code or -vk, name=name, is_keypad=is_keypad))
def low_level_keyboard_handler(nCode, wParam, lParam):
try:
vk = lParam.contents.vk_code
# Ignore the second `alt` DOWN observed in some cases.
fake_alt = (LLKHF_INJECTED | 0x20)
# Ignore events generated by SendInput with Unicode.
if vk != VK_PACKET and lParam.contents.flags & fake_alt != fake_alt:
event_type = keyboard_event_types[wParam]
is_extended = lParam.contents.flags & 1
scan_code = lParam.contents.scan_code
should_continue = process_key(event_type, vk, scan_code, is_extended)
if not should_continue:
return -1
except Exception as e:
print('Error in keyboard hook:')
traceback.print_exc()
return CallNextHookEx(None, nCode, wParam, lParam)
WH_KEYBOARD_LL = c_int(13)
keyboard_callback = LowLevelKeyboardProc(low_level_keyboard_handler)
handle = GetModuleHandleW(None)
thread_id = DWORD(0)
keyboard_hook = SetWindowsHookEx(WH_KEYBOARD_LL, keyboard_callback, handle, thread_id)
# Register to remove the hook when the interpreter exits. Unfortunately a
# try/finally block doesn't seem to work here.
atexit.register(UnhookWindowsHookEx, keyboard_callback) | [
"def",
"prepare_intercept",
"(",
"callback",
")",
":",
"_setup_name_tables",
"(",
")",
"def",
"process_key",
"(",
"event_type",
",",
"vk",
",",
"scan_code",
",",
"is_extended",
")",
":",
"global",
"shift_is_pressed",
",",
"altgr_is_pressed",
",",
"ignore_next_righ... | 41.821918 | 21.164384 |
def _stream(self, context, message_factory):
"""write request/response into frames
Transform request/response into protocol level message objects based on
types and argstreams.
Assumption: the chunk data read from stream can fit into memory.
If arg stream is at init or streaming state, build the message based on
current chunk. If arg stream is at completed state, put current chunk
into args[] array, and continue to read next arg stream in order to
compose a larger message instead of sending multi small messages.
Note: the message built at this stage is not guaranteed the size is
less then 64KB.
Possible messages created sequence:
Take request as an example::
CallRequestMessage(flags=fragment)
--> CallRequestContinueMessage(flags=fragment)
....
--> CallRequestContinueMessage(flags=fragment)
--> CallRequestMessage(flags=none)
:param context: Request or Response object
"""
args = []
try:
for argstream in context.argstreams:
chunk = yield argstream.read()
args.append(chunk)
chunk = yield argstream.read()
while chunk:
message = (message_factory.
build_raw_message(context, args))
yield self.write(message)
args = [chunk]
chunk = yield argstream.read()
# last piece of request/response.
message = (message_factory.
build_raw_message(context, args, is_completed=True))
yield self.write(message)
context.state = StreamState.completed
# Stop streamming immediately if exception occurs on the handler side
except TChannelError:
# raise by tchannel intentionally
log.info("Stopped outgoing streams because of an error",
exc_info=sys.exc_info()) | [
"def",
"_stream",
"(",
"self",
",",
"context",
",",
"message_factory",
")",
":",
"args",
"=",
"[",
"]",
"try",
":",
"for",
"argstream",
"in",
"context",
".",
"argstreams",
":",
"chunk",
"=",
"yield",
"argstream",
".",
"read",
"(",
")",
"args",
".",
"... | 40.3 | 19.42 |
def _hdfs_datanode_metrics(self, beans, tags):
"""
Process HDFS Datanode metrics from given beans
"""
# Only get the first bean
bean = next(iter(beans))
bean_name = bean.get('name')
self.log.debug("Bean name retrieved: {}".format(bean_name))
for metric, (metric_name, metric_type) in iteritems(self.HDFS_METRICS):
metric_value = bean.get(metric)
if metric_value is not None:
self._set_metric(metric_name, metric_type, metric_value, tags) | [
"def",
"_hdfs_datanode_metrics",
"(",
"self",
",",
"beans",
",",
"tags",
")",
":",
"# Only get the first bean",
"bean",
"=",
"next",
"(",
"iter",
"(",
"beans",
")",
")",
"bean_name",
"=",
"bean",
".",
"get",
"(",
"'name'",
")",
"self",
".",
"log",
".",
... | 37.857143 | 16.142857 |
def search(self, searchString, category="", math=False, game=False, searchFiles=False, extension=""):
"""Core function to search the indexes and return data"""
data = []
nameData = {}
fileData = {}
#Search the name index
if searchFiles:
fileData = self.searchNamesIndex(self.fileIndex, fileData, searchString, category, math, game, extension, searchFiles)
else:
nameData = self.searchNamesIndex(self.nameIndex, nameData, searchString)
#Now search the file index
if searchFiles:
nameData, fileData = self.searchFilesIndex(fileData, nameData, self.nameIndex, searchString)
else:
fileData, nameData = self.searchFilesIndex(nameData, fileData, self.fileIndex, searchString, category, math, game, extension)
# Bail out if we failed to do either of those things.
if fileData is None or nameData is None:
self.repo.printd("Error: failed to load one or more of the index files for this repo. Exiting.")
self.repo.printd("Please run 'calcpkg update' and retry this command.")
sys.exit(1)
# Prepare output to parse
space = 0
longestFile = len("File Name:")
for key, value in nameData.iteritems():
fileValue = fileData[key]
data.append([fileValue, value])
if not fileValue is None:
folder = fileValue.rpartition("/")[0]
if space < len(folder):
space = len(folder)
if not value is None:
if longestFile < len(value):
longestFile = len(value)
#Print output
space += 5
if len(data) != 0:
self.repo.printd("Results for repo: " + self.repo.name)
self.repo.printd(structureOutput("File Category:", "File Name:", False, False, space))
self.repo.printd("-" * (space + longestFile))
else:
self.repo.printd("No packages found")
returnData = []
for datum in data:
try:
self.repo.printd(structureOutput(datum[0], datum[1], searchFiles, True, space))
returnData.append([datum[0], datum[1]])
except:
pass
self.repo.printd(" ")
#Return data
return returnData | [
"def",
"search",
"(",
"self",
",",
"searchString",
",",
"category",
"=",
"\"\"",
",",
"math",
"=",
"False",
",",
"game",
"=",
"False",
",",
"searchFiles",
"=",
"False",
",",
"extension",
"=",
"\"\"",
")",
":",
"data",
"=",
"[",
"]",
"nameData",
"=",
... | 33.842105 | 25.894737 |
def get_ui_class(ui_file):
"""Get UI Python class from .ui file.
Can be filename.ui or subdirectory/filename.ui
:param ui_file: The file of the ui in safe.gui.ui
:type ui_file: str
"""
os.path.sep.join(ui_file.split('/'))
ui_file_path = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
os.pardir,
'gui',
'ui',
ui_file
)
)
return uic.loadUiType(ui_file_path)[0] | [
"def",
"get_ui_class",
"(",
"ui_file",
")",
":",
"os",
".",
"path",
".",
"sep",
".",
"join",
"(",
"ui_file",
".",
"split",
"(",
"'/'",
")",
")",
"ui_file_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os... | 24.631579 | 16.631579 |
def color_electrodes(self, config_nr, ax):
"""
Color the electrodes used in specific configuration.
Voltage electrodes are yellow, Current electrodes are red ?!
"""
electrodes = np.loadtxt(options.config_file, skiprows=1)
electrodes = self.configs[~np.isnan(self.configs).any(1)]
electrodes = electrodes.astype(int)
conf = []
for dim in range(0, electrodes.shape[1]):
c = electrodes[config_nr, dim]
# c = c.partition('0')
a = np.round(c / 10000) - 1
b = np.mod(c, 10000) - 1
conf.append(a)
conf.append(b)
Ex, Ez = elem.get_electrodes()
color = ['#ffed00', '#ffed00', '#ff0000', '#ff0000']
ax.scatter(Ex[conf], Ez[conf], c=color, marker='s', s=60,
clip_on=False, edgecolors='k') | [
"def",
"color_electrodes",
"(",
"self",
",",
"config_nr",
",",
"ax",
")",
":",
"electrodes",
"=",
"np",
".",
"loadtxt",
"(",
"options",
".",
"config_file",
",",
"skiprows",
"=",
"1",
")",
"electrodes",
"=",
"self",
".",
"configs",
"[",
"~",
"np",
".",
... | 40.238095 | 12.904762 |
def idxmin(self, axis=0, skipna=True, *args, **kwargs):
"""
Return the row label of the minimum value.
If multiple values equal the minimum, the first row label with that
value is returned.
Parameters
----------
skipna : bool, default True
Exclude NA/null values. If the entire Series is NA, the result
will be NA.
axis : int, default 0
For compatibility with DataFrame.idxmin. Redundant for application
on Series.
*args, **kwargs
Additional keywords have no effect but might be accepted
for compatibility with NumPy.
Returns
-------
Index
Label of the minimum value.
Raises
------
ValueError
If the Series is empty.
See Also
--------
numpy.argmin : Return indices of the minimum values
along the given axis.
DataFrame.idxmin : Return index of first occurrence of minimum
over requested axis.
Series.idxmax : Return index *label* of the first occurrence
of maximum of values.
Notes
-----
This method is the Series version of ``ndarray.argmin``. This method
returns the label of the minimum, while ``ndarray.argmin`` returns
the position. To get the position, use ``series.values.argmin()``.
Examples
--------
>>> s = pd.Series(data=[1, None, 4, 1],
... index=['A', 'B', 'C', 'D'])
>>> s
A 1.0
B NaN
C 4.0
D 1.0
dtype: float64
>>> s.idxmin()
'A'
If `skipna` is False and there is an NA value in the data,
the function returns ``nan``.
>>> s.idxmin(skipna=False)
nan
"""
skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs)
i = nanops.nanargmin(com.values_from_object(self), skipna=skipna)
if i == -1:
return np.nan
return self.index[i] | [
"def",
"idxmin",
"(",
"self",
",",
"axis",
"=",
"0",
",",
"skipna",
"=",
"True",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"skipna",
"=",
"nv",
".",
"validate_argmin_with_skipna",
"(",
"skipna",
",",
"args",
",",
"kwargs",
")",
"i",
"=",... | 29.318841 | 22.884058 |
def set_padist_gaussian_loss_cone(self, boundary_rad, expwidth):
"""Set the pitch-angle distribution to a Gaussian loss cone.
**Call signature**
*boundary_rad*
The angle inside which there are no losses, in radians.
*expwidth*
The characteristic width of the Gaussian loss profile
*in direction-cosine units*.
Returns
*self* for convenience in chaining.
See ``OnlineI.pdf`` in the Supplementary Data for a precise
definition. (And note the distinction between α_c and μ_c since not
everything is direction cosines.)
"""
self.in_vals[IN_VAL_PADIST] = PADIST_GLC
self.in_vals[IN_VAL_LCBDY] = boundary_rad * 180 / np.pi # rad => deg
self.in_vals[IN_VAL_DELTAMU] = expwidth
return self | [
"def",
"set_padist_gaussian_loss_cone",
"(",
"self",
",",
"boundary_rad",
",",
"expwidth",
")",
":",
"self",
".",
"in_vals",
"[",
"IN_VAL_PADIST",
"]",
"=",
"PADIST_GLC",
"self",
".",
"in_vals",
"[",
"IN_VAL_LCBDY",
"]",
"=",
"boundary_rad",
"*",
"180",
"/",
... | 36.727273 | 20.590909 |
def iter_entries(self):
"""
Generate an |_IfdEntry| instance corresponding to each entry in the
directory.
"""
for idx in range(self._entry_count):
dir_entry_offset = self._offset + 2 + (idx*12)
ifd_entry = _IfdEntryFactory(self._stream_rdr, dir_entry_offset)
yield ifd_entry | [
"def",
"iter_entries",
"(",
"self",
")",
":",
"for",
"idx",
"in",
"range",
"(",
"self",
".",
"_entry_count",
")",
":",
"dir_entry_offset",
"=",
"self",
".",
"_offset",
"+",
"2",
"+",
"(",
"idx",
"*",
"12",
")",
"ifd_entry",
"=",
"_IfdEntryFactory",
"("... | 38.111111 | 16.111111 |
def bodypart_types(self, method, input=True):
"""
Get a list of I{parameter definitions} (pdefs) defined for the
specified method.
An input I{pdef} is a (I{name}, L{xsd.sxbase.SchemaObject}) tuple,
while an output I{pdef} is a L{xsd.sxbase.SchemaObject}.
@param method: A service method.
@type method: I{service.Method}
@param input: Defines input/output message.
@type input: boolean
@return: A list of parameter definitions
@rtype: [I{pdef},...]
"""
if input:
parts = method.soap.input.body.parts
else:
parts = method.soap.output.body.parts
return [self.__part_type(p, input) for p in parts] | [
"def",
"bodypart_types",
"(",
"self",
",",
"method",
",",
"input",
"=",
"True",
")",
":",
"if",
"input",
":",
"parts",
"=",
"method",
".",
"soap",
".",
"input",
".",
"body",
".",
"parts",
"else",
":",
"parts",
"=",
"method",
".",
"soap",
".",
"outp... | 34.333333 | 17 |
def native(s, encoding='utf-8', fallback='iso-8859-1'):
"""Convert a given string into a native string."""
if isinstance(s, str):
return s
if str is unicode: # Python 3.x ->
return unicodestr(s, encoding, fallback)
return bytestring(s, encoding, fallback) | [
"def",
"native",
"(",
"s",
",",
"encoding",
"=",
"'utf-8'",
",",
"fallback",
"=",
"'iso-8859-1'",
")",
":",
"if",
"isinstance",
"(",
"s",
",",
"str",
")",
":",
"return",
"s",
"if",
"str",
"is",
"unicode",
":",
"# Python 3.x ->",
"return",
"unicodestr",
... | 28.2 | 18.6 |
def from_dict(cls, d):
"""
Returns a COHP object from a dict representation of the COHP.
"""
if "ICOHP" in d:
icohp = {Spin(int(key)): np.array(val)
for key, val in d["ICOHP"].items()}
else:
icohp = None
return Cohp(d["efermi"], d["energies"],
{Spin(int(key)): np.array(val)
for key, val in d["COHP"].items()},
icohp=icohp, are_coops=d["are_coops"]) | [
"def",
"from_dict",
"(",
"cls",
",",
"d",
")",
":",
"if",
"\"ICOHP\"",
"in",
"d",
":",
"icohp",
"=",
"{",
"Spin",
"(",
"int",
"(",
"key",
")",
")",
":",
"np",
".",
"array",
"(",
"val",
")",
"for",
"key",
",",
"val",
"in",
"d",
"[",
"\"ICOHP\"... | 35.071429 | 15.928571 |
def last(self, count=1):
"""
Returns the last record in the query (sorting by id unless modified by
`order_by`, whereupon it reverses the order passed in `order_by`).
Returns None if the query has no records.
"""
if self._order_with:
order = self._order_with.values()[0]
order = "desc" if order == "asc" else "asc"
order_with = self._order_with
self._order_with = {}
result = self.order_by(**{order_with.keys()[0]: order}).first(count)
self._order_with = order_with
return result
else:
return self.order_by(id="desc").first(count) | [
"def",
"last",
"(",
"self",
",",
"count",
"=",
"1",
")",
":",
"if",
"self",
".",
"_order_with",
":",
"order",
"=",
"self",
".",
"_order_with",
".",
"values",
"(",
")",
"[",
"0",
"]",
"order",
"=",
"\"desc\"",
"if",
"order",
"==",
"\"asc\"",
"else",... | 41.6875 | 14.9375 |
def embed_MDS(X, ndim=2, how='metric', distance_metric='euclidean',
n_jobs=1, seed=None, verbose=0):
"""Performs classic, metric, and non-metric MDS
Metric MDS is initialized using classic MDS,
non-metric MDS is initialized using metric MDS.
Parameters
----------
X: ndarray [n_samples, n_samples]
2 dimensional input data array with n_samples
embed_MDS does not check for matrix squareness,
but this is necessary for PHATE
n_dim : int, optional, default: 2
number of dimensions in which the data will be embedded
how : string, optional, default: 'classic'
choose from ['classic', 'metric', 'nonmetric']
which MDS algorithm is used for dimensionality reduction
distance_metric : string, optional, default: 'euclidean'
choose from ['cosine', 'euclidean']
distance metric for MDS
n_jobs : integer, optional, default: 1
The number of jobs to use for the computation.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging.
For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for
n_jobs = -2, all CPUs but one are used
seed: integer or numpy.RandomState, optional
The generator used to initialize SMACOF (metric, nonmetric) MDS
If an integer is given, it fixes the seed
Defaults to the global numpy random number generator
Returns
-------
Y : ndarray [n_samples, n_dim]
low dimensional embedding of X using MDS
"""
if how not in ['classic', 'metric', 'nonmetric']:
raise ValueError("Allowable 'how' values for MDS: 'classic', "
"'metric', or 'nonmetric'. "
"'{}' was passed.".format(how))
# MDS embeddings, each gives a different output.
X_dist = squareform(pdist(X, distance_metric))
# initialize all by CMDS
Y = cmdscale_fast(X_dist, ndim)
if how in ['metric', 'nonmetric']:
tasklogger.log_debug("Performing metric MDS on "
"{} of shape {}...".format(type(X_dist),
X_dist.shape))
# Metric MDS from sklearn
Y, _ = smacof(X_dist, n_components=ndim, metric=True, max_iter=3000,
eps=1e-6, random_state=seed, n_jobs=n_jobs,
n_init=1, init=Y, verbose=verbose)
if how == 'nonmetric':
tasklogger.log_debug(
"Performing non-metric MDS on "
"{} of shape {}...".format(type(X_dist),
X_dist.shape))
# Nonmetric MDS from sklearn using metric MDS as an initialization
Y, _ = smacof(X_dist, n_components=ndim, metric=True, max_iter=3000,
eps=1e-6, random_state=seed, n_jobs=n_jobs,
n_init=1, init=Y, verbose=verbose)
return Y | [
"def",
"embed_MDS",
"(",
"X",
",",
"ndim",
"=",
"2",
",",
"how",
"=",
"'metric'",
",",
"distance_metric",
"=",
"'euclidean'",
",",
"n_jobs",
"=",
"1",
",",
"seed",
"=",
"None",
",",
"verbose",
"=",
"0",
")",
":",
"if",
"how",
"not",
"in",
"[",
"'... | 41.385714 | 19.157143 |
def clean_delete(self):
"""
Deletes this router & associated files (nvram, disks etc.)
"""
yield from self._hypervisor.send('vm clean_delete "{}"'.format(self._name))
self._hypervisor.devices.remove(self)
try:
yield from wait_run_in_executor(shutil.rmtree, self._working_directory)
except OSError as e:
log.warn("Could not delete file {}".format(e))
log.info('Router "{name}" [{id}] has been deleted (including associated files)'.format(name=self._name, id=self._id)) | [
"def",
"clean_delete",
"(",
"self",
")",
":",
"yield",
"from",
"self",
".",
"_hypervisor",
".",
"send",
"(",
"'vm clean_delete \"{}\"'",
".",
"format",
"(",
"self",
".",
"_name",
")",
")",
"self",
".",
"_hypervisor",
".",
"devices",
".",
"remove",
"(",
"... | 45.416667 | 26.416667 |
def populate_jobset(job, jobset, depth):
""" Creates a set of jobs, containing jobs at difference depths of the
dependency tree, retaining dependencies as strings, not Jobs.
"""
jobset.add(job)
if len(job.dependencies) == 0:
return jobset
for j in job.dependencies:
jobset = populate_jobset(j, jobset, depth+1)
return jobset | [
"def",
"populate_jobset",
"(",
"job",
",",
"jobset",
",",
"depth",
")",
":",
"jobset",
".",
"add",
"(",
"job",
")",
"if",
"len",
"(",
"job",
".",
"dependencies",
")",
"==",
"0",
":",
"return",
"jobset",
"for",
"j",
"in",
"job",
".",
"dependencies",
... | 35.9 | 11.6 |
def read_pmc(self, pmcid):
"""Read a given PMC article.
Parameters
----------
pmcid : str
The PMC ID of the article to read. Note that only
articles in the open-access subset of PMC will work.
"""
msg = KQMLPerformative('REQUEST')
msg.set('receiver', 'READER')
content = KQMLList('run-pmcid')
content.sets('pmcid', pmcid)
content.set('reply-when-done', 'true')
msg.set('content', content)
msg.set('reply-with', 'P-%s' % pmcid)
self.reply_counter += 1
self.send(msg) | [
"def",
"read_pmc",
"(",
"self",
",",
"pmcid",
")",
":",
"msg",
"=",
"KQMLPerformative",
"(",
"'REQUEST'",
")",
"msg",
".",
"set",
"(",
"'receiver'",
",",
"'READER'",
")",
"content",
"=",
"KQMLList",
"(",
"'run-pmcid'",
")",
"content",
".",
"sets",
"(",
... | 32.5 | 12 |
def check(self, file):
"""
Checks a given file against all available yara rules
:param file: Path to file
:type file:str
:returns: Python list with matched rules info
:rtype: list
"""
result = []
all_matches = []
for filerules in os.listdir(self.rulepaths):
try:
rule = yara.compile(os.path.join(self.rulepaths, filerules))
except yara.SyntaxError:
continue
matches = rule.match(file)
if len(matches) > 0:
for rulem in matches:
rule_family = "_".join([x for x in rulem.rule.replace("_", ".", 1).split("_")[:-1]])
if rule_family not in all_matches:
all_matches.append(rule_family)
for rule_family in all_matches:
rules_info_txt = requests.get('{}/family/{}'.format(self.baseurl, rule_family),
auth=HTTPBasicAuth(self.user, self.pwd))
rules_info_json = json.loads(rules_info_txt.text)
result.append({
'family': rule_family,
'common_name': rules_info_json['common_name'],
'description': rules_info_json['description'],
'attribution': rules_info_json['attribution'],
'alt_names': rules_info_json['alt_names'],
'urls': rules_info_json['urls']
})
return result | [
"def",
"check",
"(",
"self",
",",
"file",
")",
":",
"result",
"=",
"[",
"]",
"all_matches",
"=",
"[",
"]",
"for",
"filerules",
"in",
"os",
".",
"listdir",
"(",
"self",
".",
"rulepaths",
")",
":",
"try",
":",
"rule",
"=",
"yara",
".",
"compile",
"... | 41.8 | 18.2 |
def reset(self):
"""
Releases all entities held by this Unit Of Work (i.e., removes state
information from all registered entities and clears the entity map).
"""
for ents in self.__entity_set_map.values():
for ent in ents:
EntityState.release(ent, self)
self.__entity_set_map.clear() | [
"def",
"reset",
"(",
"self",
")",
":",
"for",
"ents",
"in",
"self",
".",
"__entity_set_map",
".",
"values",
"(",
")",
":",
"for",
"ent",
"in",
"ents",
":",
"EntityState",
".",
"release",
"(",
"ent",
",",
"self",
")",
"self",
".",
"__entity_set_map",
... | 39.111111 | 14.222222 |
def Action(act, *args, **kw):
"""A factory for action objects."""
# Really simple: the _do_create_* routines do the heavy lifting.
_do_create_keywords(args, kw)
if is_List(act):
return _do_create_list_action(act, kw)
return _do_create_action(act, kw) | [
"def",
"Action",
"(",
"act",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"# Really simple: the _do_create_* routines do the heavy lifting.",
"_do_create_keywords",
"(",
"args",
",",
"kw",
")",
"if",
"is_List",
"(",
"act",
")",
":",
"return",
"_do_create_lis... | 38.857143 | 10.714286 |
def get_data_feed(self, train_mode=True, qname_in='input', qname_out='output', input_mapping=None):
"""Convenience function to access ``TFNode.DataFeed`` directly from this object instance."""
return TFNode.DataFeed(self.mgr, train_mode, qname_in, qname_out, input_mapping) | [
"def",
"get_data_feed",
"(",
"self",
",",
"train_mode",
"=",
"True",
",",
"qname_in",
"=",
"'input'",
",",
"qname_out",
"=",
"'output'",
",",
"input_mapping",
"=",
"None",
")",
":",
"return",
"TFNode",
".",
"DataFeed",
"(",
"self",
".",
"mgr",
",",
"trai... | 93 | 34.333333 |
def make_avsc_object(json_data, names=None):
# type: (Union[Dict[Text, Text], List[Any], Text], Optional[Names]) -> Schema
"""
Build Avro Schema from data parsed out of JSON string.
@arg names: A Name object (tracks seen names and default space)
"""
if names is None:
names = Names()
assert isinstance(names, Names)
# JSON object (non-union)
if hasattr(json_data, 'get') and callable(json_data.get): # type: ignore
assert isinstance(json_data, Dict)
atype = cast(Text, json_data.get('type'))
other_props = get_other_props(json_data, SCHEMA_RESERVED_PROPS)
if atype in PRIMITIVE_TYPES:
return PrimitiveSchema(atype, other_props)
if atype in NAMED_TYPES:
name = cast(Text, json_data.get('name'))
namespace = cast(Text, json_data.get('namespace',
names.default_namespace))
if atype == 'enum':
symbols = cast(List[Text], json_data.get('symbols'))
doc = json_data.get('doc')
return EnumSchema(name, namespace, symbols, names, doc, other_props)
if atype in ['record', 'error']:
fields = cast(List, json_data.get('fields'))
doc = json_data.get('doc')
return RecordSchema(name, namespace, fields, names, atype, doc, other_props)
raise SchemaParseException('Unknown Named Type: %s' % atype)
if atype in VALID_TYPES:
if atype == 'array':
items = cast(List, json_data.get('items'))
return ArraySchema(items, names, other_props)
if atype is None:
raise SchemaParseException('No "type" property: %s' % json_data)
raise SchemaParseException('Undefined type: %s' % atype)
# JSON array (union)
if isinstance(json_data, list):
return UnionSchema(json_data, names)
# JSON string (primitive)
if json_data in PRIMITIVE_TYPES:
return PrimitiveSchema(cast(Text, json_data))
# not for us!
fail_msg = "Could not make an Avro Schema object from %s." % json_data
raise SchemaParseException(fail_msg) | [
"def",
"make_avsc_object",
"(",
"json_data",
",",
"names",
"=",
"None",
")",
":",
"# type: (Union[Dict[Text, Text], List[Any], Text], Optional[Names]) -> Schema",
"if",
"names",
"is",
"None",
":",
"names",
"=",
"Names",
"(",
")",
"assert",
"isinstance",
"(",
"names",
... | 45.851064 | 17.595745 |
def agg_autocorrelation(x, param):
r"""
Calculates the value of an aggregation function :math:`f_{agg}` (e.g. the variance or the mean) over the
autocorrelation :math:`R(l)` for different lags. The autocorrelation :math:`R(l)` for lag :math:`l` is defined as
.. math::
R(l) = \frac{1}{(n-l)\sigma^{2}} \sum_{t=1}^{n-l}(X_{t}-\mu )(X_{t+l}-\mu)
where :math:`X_i` are the values of the time series, :math:`n` its length. Finally, :math:`\sigma^2` and
:math:`\mu` are estimators for its variance and mean
(See `Estimation of the Autocorrelation function <http://en.wikipedia.org/wiki/Autocorrelation#Estimation>`_).
The :math:`R(l)` for different lags :math:`l` form a vector. This feature calculator applies the aggregation
function :math:`f_{agg}` to this vector and returns
.. math::
f_{agg} \left( R(1), \ldots, R(m)\right) \quad \text{for} \quad m = max(n, maxlag).
Here :math:`maxlag` is the second parameter passed to this function.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"f_agg": x, "maxlag", n} with x str, the name of a numpy function
(e.g. "mean", "var", "std", "median"), its the name of the aggregator function that is applied to the
autocorrelations. Further, n is an int and the maximal number of lags to consider.
:type param: list
:return: the value of this feature
:return type: float
"""
# if the time series is longer than the following threshold, we use fft to calculate the acf
THRESHOLD_TO_USE_FFT = 1250
var = np.var(x)
n = len(x)
max_maxlag = max([config["maxlag"] for config in param])
if np.abs(var) < 10**-10 or n == 1:
a = [0] * len(x)
else:
a = acf(x, unbiased=True, fft=n > THRESHOLD_TO_USE_FFT, nlags=max_maxlag)[1:]
return [("f_agg_\"{}\"__maxlag_{}".format(config["f_agg"], config["maxlag"]),
getattr(np, config["f_agg"])(a[:int(config["maxlag"])])) for config in param] | [
"def",
"agg_autocorrelation",
"(",
"x",
",",
"param",
")",
":",
"# if the time series is longer than the following threshold, we use fft to calculate the acf",
"THRESHOLD_TO_USE_FFT",
"=",
"1250",
"var",
"=",
"np",
".",
"var",
"(",
"x",
")",
"n",
"=",
"len",
"(",
"x",... | 47.162791 | 35.465116 |
def boundary_maximum_exponential(graph, xxx_todo_changeme3):
r"""
Boundary term processing adjacent voxels maximum value using an exponential relationship.
An implementation of a boundary term, suitable to be used with the
`~medpy.graphcut.generate.graph_from_voxels` function.
The same as `boundary_difference_exponential`, but working on the gradient image instead
of the original. See there for details.
Parameters
----------
graph : GCGraph
The graph to add the weights to.
gradient_image : ndarray
The gradient image.
sigma : float
The sigma parameter to use in the boundary term.
spacing : sequence of float or False
A sequence containing the slice spacing used for weighting the
computed neighbourhood weight value for different dimensions. If
`False`, no distance based weighting of the graph edges is performed.
Notes
-----
This function requires the gradient image to be passed along. That means that
`~medpy.graphcut.generate.graph_from_voxels` has to be called with ``boundary_term_args`` set to the
gradient image.
"""
(gradient_image, sigma, spacing) = xxx_todo_changeme3
gradient_image = scipy.asarray(gradient_image)
def boundary_term_exponential(intensities):
"""
Implementation of a exponential boundary term computation over an array.
"""
# apply exp-(x**2/sigma**2)
intensities = scipy.power(intensities, 2)
intensities /= math.pow(sigma, 2)
intensities *= -1
intensities = scipy.exp(intensities)
intensities[intensities <= 0] = sys.float_info.min
return intensities
__skeleton_maximum(graph, gradient_image, boundary_term_exponential, spacing) | [
"def",
"boundary_maximum_exponential",
"(",
"graph",
",",
"xxx_todo_changeme3",
")",
":",
"(",
"gradient_image",
",",
"sigma",
",",
"spacing",
")",
"=",
"xxx_todo_changeme3",
"gradient_image",
"=",
"scipy",
".",
"asarray",
"(",
"gradient_image",
")",
"def",
"bound... | 39.355556 | 22.088889 |
def update_grammar_to_be_variable_free(grammar_dictionary: Dict[str, List[str]]):
"""
SQL is a predominately variable free language in terms of simple usage, in the
sense that most queries do not create references to variables which are not
already static tables in a dataset. However, it is possible to do this via
derived tables. If we don't require this functionality, we can tighten the
grammar, because we don't need to support aliased tables.
"""
# Tables in variable free grammars cannot be aliased, so we
# remove this functionality from the grammar.
grammar_dictionary["select_result"] = ['"*"', '(table_name ws ".*")', 'expr']
# Similarly, collapse the definition of a source table
# to not contain aliases and modify references to subqueries.
grammar_dictionary["single_source"] = ['table_name', '("(" ws query ws ")")']
del grammar_dictionary["source_subq"]
del grammar_dictionary["source_table"]
grammar_dictionary["expr"] = ['in_expr',
'(value wsp "LIKE" wsp string)',
'(value ws "BETWEEN" wsp value ws "AND" wsp value)',
'(value ws binaryop wsp expr)',
'(unaryop ws expr)',
'(col_ref ws "IS" ws "NOT" ws "NULL")',
'(col_ref ws "IS" ws "NULL")',
# This used to be source_subq - now
# we don't need aliases, we can colapse it to queries.
'("(" ws query ws ")")',
'value']
# Finally, remove the ability to reference an arbitrary name,
# because now we don't have aliased tables, we don't need
# to recognise new variables.
del grammar_dictionary["name"] | [
"def",
"update_grammar_to_be_variable_free",
"(",
"grammar_dictionary",
":",
"Dict",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
")",
":",
"# Tables in variable free grammars cannot be aliased, so we",
"# remove this functionality from the grammar.",
"grammar_dictionary",
"[... | 53 | 24.771429 |
def clean_cornell_movies(filename='cornell_movie_dialogs_corpus.zip', subdir='cornell movie-dialogs corpus'):
""" Load a dataframe of ~100k raw (uncollated) movie lines from the cornell movies dialog corpus
>>> local_filepath = download_file(BIG_URLS['cornell_movie_dialogs_corpus'][0])
>>> df = clean_cornell_movies(filename='cornell_movie_dialogs_corpus.zip')
>>> df.describe(include='all')
user movie person utterance
count 304713 304713 304713 304446
unique 9035 617 5356 265783
top u4525 m289 JACK What?
freq 537 1530 3032 1684
"""
fullpath_zipfile = find_filepath(filename)
dirname = os.path.basename(filename)
subdir = 'cornell movie-dialogs corpus'
if fullpath_zipfile.lower().endswith('.zip'):
retval = unzip(fullpath_zipfile)
dirname = dirname[:-4]
fullpath_movie_lines = os.path.join(BIGDATA_PATH, dirname, subdir, 'movie_lines.txt')
dialog = pd.read_csv(
fullpath_movie_lines, sep=r'\+\+\+\$\+\+\+', engine='python', header=None, index_col=0)
dialog.columns = 'user movie person utterance'.split()
dialog.index.name = 'line'
dialog.index = [int(s.strip()[1:]) for s in dialog.index.values]
dialog.sort_index(inplace=True)
for col in dialog.columns:
dialog[col] = dialog[col].str.strip()
return dialog | [
"def",
"clean_cornell_movies",
"(",
"filename",
"=",
"'cornell_movie_dialogs_corpus.zip'",
",",
"subdir",
"=",
"'cornell movie-dialogs corpus'",
")",
":",
"fullpath_zipfile",
"=",
"find_filepath",
"(",
"filename",
")",
"dirname",
"=",
"os",
".",
"path",
".",
"basename... | 49.107143 | 16.357143 |
def iter_files(self):
"""Iterate over files."""
# file_iter may be a callable or an iterator
if callable(self.file_iter):
return self.file_iter()
return iter(self.file_iter) | [
"def",
"iter_files",
"(",
"self",
")",
":",
"# file_iter may be a callable or an iterator",
"if",
"callable",
"(",
"self",
".",
"file_iter",
")",
":",
"return",
"self",
".",
"file_iter",
"(",
")",
"return",
"iter",
"(",
"self",
".",
"file_iter",
")"
] | 35.333333 | 7.5 |
def _get_self_bounds(self):
"""
Computes the bounds of the object itself (not including it's children)
in the form [[lat_min, lon_min], [lat_max, lon_max]].
"""
bounds = [[None, None], [None, None]]
for point in self.data:
bounds = [
[
none_min(bounds[0][0], point[0]),
none_min(bounds[0][1], point[1]),
],
[
none_max(bounds[1][0], point[0]),
none_max(bounds[1][1], point[1]),
],
]
return bounds | [
"def",
"_get_self_bounds",
"(",
"self",
")",
":",
"bounds",
"=",
"[",
"[",
"None",
",",
"None",
"]",
",",
"[",
"None",
",",
"None",
"]",
"]",
"for",
"point",
"in",
"self",
".",
"data",
":",
"bounds",
"=",
"[",
"[",
"none_min",
"(",
"bounds",
"[",... | 31.684211 | 17.473684 |
def create_profile():
"""If this is the user's first login, the create_or_login function
will redirect here so that the user can set up his profile.
"""
if g.user is not None or 'openid' not in session:
return redirect(url_for('index'))
if request.method == 'POST':
name = request.form['name']
email = request.form['email']
if not name:
flash(u'Error: you have to provide a name')
elif '@' not in email:
flash(u'Error: you have to enter a valid email address')
else:
flash(u'Profile successfully created')
User.get_collection().insert(User(name, email, session['openid']))
return redirect(oid.get_next_url())
return render_template('create_profile.html', next_url=oid.get_next_url()) | [
"def",
"create_profile",
"(",
")",
":",
"if",
"g",
".",
"user",
"is",
"not",
"None",
"or",
"'openid'",
"not",
"in",
"session",
":",
"return",
"redirect",
"(",
"url_for",
"(",
"'index'",
")",
")",
"if",
"request",
".",
"method",
"==",
"'POST'",
":",
"... | 44.333333 | 14.722222 |
def change_password(self, usrname, oldpwd, newpwd, callback=None):
'''
Change password.
'''
params = {'usrName': usrname,
'oldPwd' : oldpwd,
'newPwd' : newpwd,
}
return self.execute_command('changePassword',
params, callback=callback) | [
"def",
"change_password",
"(",
"self",
",",
"usrname",
",",
"oldpwd",
",",
"newpwd",
",",
"callback",
"=",
"None",
")",
":",
"params",
"=",
"{",
"'usrName'",
":",
"usrname",
",",
"'oldPwd'",
":",
"oldpwd",
",",
"'newPwd'",
":",
"newpwd",
",",
"}",
"ret... | 35.4 | 16.8 |
def get(self, entry):
"""Gets an entry by key. Will return None if there is no
matching entry."""
try:
list = self.cache[entry.key]
return list[list.index(entry)]
except:
return None | [
"def",
"get",
"(",
"self",
",",
"entry",
")",
":",
"try",
":",
"list",
"=",
"self",
".",
"cache",
"[",
"entry",
".",
"key",
"]",
"return",
"list",
"[",
"list",
".",
"index",
"(",
"entry",
")",
"]",
"except",
":",
"return",
"None"
] | 30.5 | 11.375 |
def calcstats(data, t1, t2, sr):
"""Calculate the mean and standard deviation of some array between
t1 and t2 provided the sample rate sr.
"""
dataseg = data[sr*t1:sr*t2]
meandata = np.mean(dataseg[~np.isnan(dataseg)])
stddata = np.std(dataseg[~np.isnan(dataseg)])
return meandata, stddata | [
"def",
"calcstats",
"(",
"data",
",",
"t1",
",",
"t2",
",",
"sr",
")",
":",
"dataseg",
"=",
"data",
"[",
"sr",
"*",
"t1",
":",
"sr",
"*",
"t2",
"]",
"meandata",
"=",
"np",
".",
"mean",
"(",
"dataseg",
"[",
"~",
"np",
".",
"isnan",
"(",
"datas... | 39.625 | 6.5 |
def _write_config_file(template_file):
"""
Write a config file to the source bundle location to identify the entry point.
:param template_file: path to the task template subclass (executable)
"""
config_filename = '.cloud_harness_config.json'
config_path = os.path.dirname(template_file)
filename = os.path.split(template_file)[1]
if filename.endswith('.pyc'):
filename = filename[:-1]
config_file = os.path.join(config_path, config_filename)
with open(config_file, 'w') as f:
f.write(json.dumps({'task_filename': filename}))
return config_file | [
"def",
"_write_config_file",
"(",
"template_file",
")",
":",
"config_filename",
"=",
"'.cloud_harness_config.json'",
"config_path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"template_file",
")",
"filename",
"=",
"os",
".",
"path",
".",
"split",
"(",
"templat... | 33.842105 | 20.368421 |
def _handle_subscribed(self, *args, chanId=None, channel=None, **kwargs):
"""
Handles responses to subscribe() commands - registers a channel id with
the client and assigns a data handler to it.
:param chanId: int, represent channel id as assigned by server
:param channel: str, represents channel name
"""
log.debug("_handle_subscribed: %s - %s - %s", chanId, channel, kwargs)
if chanId in self.channels:
raise AlreadyRegisteredError()
self._heartbeats[chanId] = time.time()
try:
channel_key = ('raw_'+channel
if kwargs['prec'].startswith('R') and channel == 'book'
else channel)
except KeyError:
channel_key = channel
try:
self.channels[chanId] = self._data_handlers[channel_key]
except KeyError:
raise UnknownChannelError()
# prep kwargs to be used as secondary value in dict key
try:
kwargs.pop('event')
except KeyError:
pass
try:
kwargs.pop('len')
except KeyError:
pass
try:
kwargs.pop('chanId')
except KeyError:
pass
self.channel_labels[chanId] = (channel_key, kwargs) | [
"def",
"_handle_subscribed",
"(",
"self",
",",
"*",
"args",
",",
"chanId",
"=",
"None",
",",
"channel",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"log",
".",
"debug",
"(",
"\"_handle_subscribed: %s - %s - %s\"",
",",
"chanId",
",",
"channel",
",",
... | 30.833333 | 21.404762 |
def deployed(name, jboss_config, salt_source=None):
'''Ensures that the given application is deployed on server.
jboss_config:
Dict with connection properties (see state description)
salt_source:
How to find the artifact to be deployed.
target_file:
Where to look in the minion's file system for the artifact to be deployed (e.g. '/tmp/application-web-0.39.war'). When source is specified, also specifies where to save the retrieved file.
source:
(optional) File on salt master (e.g. salt://application-web-0.39.war). If absent, no files will be retrieved and the artifact in target_file will be used for the deployment.
undeploy:
(optional) Regular expression to match against existing deployments. When present, if there is a deployment that matches the regular expression, it will be undeployed before the new artifact is deployed.
undeploy_force:
(optional) If True, the artifact will be undeployed although it has not changed.
Examples:
Deployment of a file from minion's local file system:
.. code-block:: yaml
application_deployed:
jboss7.deployed:
- salt_source:
target_file: '/tmp/webapp.war'
- jboss_config: {{ pillar['jboss'] }}
It is assumed that /tmp/webapp.war was made available by some
other means. No applications will be undeployed; if an existing
deployment that shares that name exists, then it will be replaced
with the updated version.
Deployment of a file from the Salt master's file system:
.. code-block:: yaml
application_deployed:
jboss7.deployed:
- salt_source:
source: salt://application-web-0.39.war
target_file: '/tmp/application-web-0.39.war'
undeploy: 'application-web-.*'
- jboss_config: {{ pillar['jboss'] }}
Here, application-web-0.39.war file is downloaded from Salt file system to /tmp/application-web-0.39.war file on minion.
Existing deployments are checked if any of them matches 'application-web-.*' regular expression, and if so then it
is undeployed before deploying the application. This is useful to automate deployment of new application versions.
If the source parameter of salt_source is specified, it can use
any protocol that the file states use. This includes not only
downloading from the master but also HTTP, HTTPS, FTP,
Amazon S3, and OpenStack Swift.
'''
log.debug(" ======================== STATE: jboss7.deployed (name: %s) ", name)
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
comment = ''
validate_success, validate_comment = __validate_arguments(jboss_config, salt_source)
if not validate_success:
return _error(ret, validate_comment)
resolved_source, get_artifact_comment, changed = __get_artifact(salt_source)
log.debug('resolved_source=%s', resolved_source)
log.debug('get_artifact_comment=%s', get_artifact_comment)
comment = __append_comment(new_comment=get_artifact_comment, current_comment=comment)
if resolved_source is None:
return _error(ret, get_artifact_comment)
find_success, deployment, find_comment = __find_deployment(jboss_config, salt_source)
if not find_success:
return _error(ret, find_comment)
require_deployment = True
log.debug('deployment=%s', deployment)
if deployment is not None:
if 'undeploy_force' in salt_source:
if salt_source['undeploy_force']:
ret['changes']['undeployed'] = __undeploy(jboss_config, deployment)
else:
if changed:
ret['changes']['undeployed'] = __undeploy(jboss_config, deployment)
else:
require_deployment = False
comment = __append_comment(new_comment='The artifact {} was already deployed'.format(deployment), current_comment=comment)
else:
ret['changes']['undeployed'] = __undeploy(jboss_config, deployment)
if require_deployment:
deploy_result = __salt__['jboss7.deploy'](jboss_config=jboss_config, source_file=resolved_source)
log.debug('deploy_result=%s', str(deploy_result))
if deploy_result['success']:
comment = __append_comment(new_comment='Deployment completed.', current_comment=comment)
ret['changes']['deployed'] = resolved_source
else:
comment = __append_comment(new_comment='''Deployment failed\nreturn code={retcode}\nstdout='{stdout}'\nstderr='{stderr}'''.format(**deploy_result), current_comment=comment)
_error(ret, comment)
ret['comment'] = comment
return ret | [
"def",
"deployed",
"(",
"name",
",",
"jboss_config",
",",
"salt_source",
"=",
"None",
")",
":",
"log",
".",
"debug",
"(",
"\" ======================== STATE: jboss7.deployed (name: %s) \"",
",",
"name",
")",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'result'... | 44.055556 | 31.796296 |
def _generic_definetext_parser(self, obj, rgb_struct):
"""Generic parser for the DefineTextN tags."""
obj.CharacterID = unpack_ui16(self._src)
obj.TextBounds = self._get_struct_rect()
obj.TextMatrix = self._get_struct_matrix()
obj.GlyphBits = glyph_bits = unpack_ui8(self._src)
obj.AdvanceBits = advance_bits = unpack_ui8(self._src)
# textrecords
obj.TextRecords = records = []
while True:
endofrecords_flag = unpack_ui8(self._src)
if endofrecords_flag == 0:
# all done
obj.EndOfRecordsFlag = 0
break
# we have a TEXTRECORD, let's go back the 8 bits and set the obj
self._src.seek(-1, io.SEEK_CUR)
record = _make_object("TextRecord")
records.append(record)
bc = BitConsumer(self._src)
record.TextRecordType = bc.u_get(1)
record.StyleFlagsReserved = bc.u_get(3)
record.StyleFlagsHasFont = bc.u_get(1)
record.StyleFlagsHasColor = bc.u_get(1)
record.StyleFlagsHasYOffset = bc.u_get(1)
record.StyleFlagsHasXOffset = bc.u_get(1)
if record.StyleFlagsHasFont:
record.FontID = unpack_ui16(self._src)
if record.StyleFlagsHasColor:
record.TextColor = rgb_struct()
if record.StyleFlagsHasXOffset:
record.XOffset = unpack_si16(self._src)
if record.StyleFlagsHasYOffset:
record.YOffset = unpack_si16(self._src)
if record.StyleFlagsHasFont:
record.TextHeight = unpack_ui16(self._src)
record.GlyphCount = unpack_ui8(self._src)
bc = BitConsumer(self._src)
record.GlyphEntries = glyphs = []
for _ in range(record.GlyphCount):
glyph = _make_object("GlyphEntry")
glyphs.append(glyph)
glyph.GlyphIndex = bc.u_get(glyph_bits)
glyph.GlyphAdvance = bc.u_get(advance_bits) | [
"def",
"_generic_definetext_parser",
"(",
"self",
",",
"obj",
",",
"rgb_struct",
")",
":",
"obj",
".",
"CharacterID",
"=",
"unpack_ui16",
"(",
"self",
".",
"_src",
")",
"obj",
".",
"TextBounds",
"=",
"self",
".",
"_get_struct_rect",
"(",
")",
"obj",
".",
... | 41.489796 | 13 |
def validate(defaults, metadata, config):
"""
Validate configuration.
"""
for path, _, default, parent, value in zip_dicts(defaults, config):
if isinstance(default, Requirement):
# validate the current value and assign the output
parent[path[-1]] = default.validate(metadata, path, value) | [
"def",
"validate",
"(",
"defaults",
",",
"metadata",
",",
"config",
")",
":",
"for",
"path",
",",
"_",
",",
"default",
",",
"parent",
",",
"value",
"in",
"zip_dicts",
"(",
"defaults",
",",
"config",
")",
":",
"if",
"isinstance",
"(",
"default",
",",
... | 36.555556 | 15.666667 |
def is_predecessor_of_other(self, predecessor, others):
"""Returns whether the predecessor is a predecessor or a predecessor
of a predecessor...of any of the others.
Args:
predecessor (str): The txn id of the predecessor.
others (list(str)): The txn id of the successor.
Returns:
(bool)
"""
return any(predecessor in self._predecessors_by_id[o] for o in others) | [
"def",
"is_predecessor_of_other",
"(",
"self",
",",
"predecessor",
",",
"others",
")",
":",
"return",
"any",
"(",
"predecessor",
"in",
"self",
".",
"_predecessors_by_id",
"[",
"o",
"]",
"for",
"o",
"in",
"others",
")"
] | 31.142857 | 23.928571 |
def gen_items_from_sql_csv(s: str) -> Generator[str, None, None]:
"""
Splits a comma-separated list of quoted SQL values, with ``'`` as the quote
character. Allows escaping of the quote character by doubling it. Returns
the quotes (and escaped quotes) as part of the result. Allows newlines etc.
within the string passed.
"""
# csv.reader will not both process the quotes and return the quotes;
# we need them to distinguish e.g. NULL from 'NULL'.
# log.warning('gen_items_from_sql_csv: s = {0!r}', s)
if not s:
return
n = len(s)
startpos = 0
pos = 0
in_quotes = False
while pos < n:
if not in_quotes:
if s[pos] == COMMA:
# end of chunk
chunk = s[startpos:pos] # does not include s[pos]
result = chunk.strip()
# log.warning('yielding: {0!r}', result)
yield result
startpos = pos + 1
elif s[pos] == SQUOTE:
# start of quote
in_quotes = True
else:
if pos < n - 1 and s[pos] == SQUOTE and s[pos + 1] == SQUOTE:
# double quote, '', is an escaped quote, not end of quote
pos += 1 # skip one more than we otherwise would
elif s[pos] == SQUOTE:
# end of quote
in_quotes = False
pos += 1
# Last chunk
result = s[startpos:].strip()
# log.warning('yielding last: {0!r}', result)
yield result | [
"def",
"gen_items_from_sql_csv",
"(",
"s",
":",
"str",
")",
"->",
"Generator",
"[",
"str",
",",
"None",
",",
"None",
"]",
":",
"# csv.reader will not both process the quotes and return the quotes;",
"# we need them to distinguish e.g. NULL from 'NULL'.",
"# log.warning('gen_ite... | 37.3 | 18.4 |
async def enqueue_job(
self,
function: str,
*args: Any,
_job_id: Optional[str] = None,
_defer_until: Optional[datetime] = None,
_defer_by: Union[None, int, float, timedelta] = None,
_expires: Union[None, int, float, timedelta] = None,
_job_try: Optional[int] = None,
**kwargs: Any,
) -> Optional[Job]:
"""
Enqueue a job.
:param function: Name of the function to call
:param args: args to pass to the function
:param _job_id: ID of the job, can be used to enforce job uniqueness
:param _defer_until: datetime at which to run the job
:param _defer_by: duration to wait before running the job
:param _expires: if the job still hasn't started after this duration, do not run it
:param _job_try: useful when re-enqueueing jobs within a job
:param kwargs: any keyword arguments to pass to the function
:return: :class:`arq.jobs.Job` instance or ``None`` if a job with this ID already exists
"""
job_id = _job_id or uuid4().hex
job_key = job_key_prefix + job_id
assert not (_defer_until and _defer_by), "use either 'defer_until' or 'defer_by' or neither, not both"
defer_by_ms = to_ms(_defer_by)
expires_ms = to_ms(_expires)
with await self as conn:
pipe = conn.pipeline()
pipe.unwatch()
pipe.watch(job_key)
job_exists = pipe.exists(job_key)
await pipe.execute()
if await job_exists:
return
enqueue_time_ms = timestamp_ms()
if _defer_until is not None:
score = to_unix_ms(_defer_until)
elif defer_by_ms:
score = enqueue_time_ms + defer_by_ms
else:
score = enqueue_time_ms
expires_ms = expires_ms or score - enqueue_time_ms + expires_extra_ms
job = pickle_job(function, args, kwargs, _job_try, enqueue_time_ms)
tr = conn.multi_exec()
tr.psetex(job_key, expires_ms, job)
tr.zadd(queue_name, score, job_id)
try:
await tr.execute()
except MultiExecError:
# job got enqueued since we checked 'job_exists'
return
return Job(job_id, self) | [
"async",
"def",
"enqueue_job",
"(",
"self",
",",
"function",
":",
"str",
",",
"*",
"args",
":",
"Any",
",",
"_job_id",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"_defer_until",
":",
"Optional",
"[",
"datetime",
"]",
"=",
"None",
",",
"_defe... | 38.583333 | 18.25 |
def parse(args):
"""
Define the available arguments
"""
from tzlocal import get_localzone
try:
timezone = get_localzone()
if isinstance(timezone, pytz.BaseTzInfo):
timezone = timezone.zone
except Exception: # pragma: no cover
timezone = 'UTC'
if timezone == 'local':
timezone = 'UTC'
parser = argparse.ArgumentParser(description="""Bootstrap a django CMS project.
Major usage modes:
- wizard: djangocms -w -p /path/whatever project_name: ask for all the options through a
CLI wizard.
- batch: djangocms project_name: runs with the default values plus any
additional option provided (see below) with no question asked.
- config file: djangocms_installer --config-file /path/to/config.ini project_name: reads values
from an ini-style config file.
Check https://djangocms-installer.readthedocs.io/en/latest/usage.html for detailed usage
information.
""", formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--config-file', dest='config_file', action='store',
default=None,
help='Configuration file for djangocms_installer')
parser.add_argument('--config-dump', dest='config_dump', action='store',
default=None,
help='Dump configuration file with current args')
parser.add_argument('--db', '-d', dest='db', action=DbAction,
default='sqlite://localhost/project.db',
help='Database configuration (in URL format). '
'Example: sqlite://localhost/project.db')
parser.add_argument('--i18n', '-i', dest='i18n', action='store',
choices=('yes', 'no'),
default='yes', help='Activate Django I18N / L10N setting; this is '
'automatically activated if more than '
'language is provided')
parser.add_argument('--use-tz', '-z', dest='use_timezone', action='store',
choices=('yes', 'no'),
default='yes', help='Activate Django timezone support')
parser.add_argument('--timezone', '-t', dest='timezone',
required=False, default=timezone,
action='store', help='Optional default time zone. Example: Europe/Rome')
parser.add_argument('--reversion', '-e', dest='reversion', action='store',
choices=('yes', 'no'),
default='yes', help='Install and configure reversion support '
'(only for django CMS 3.2 and 3.3)')
parser.add_argument('--permissions', dest='permissions', action='store',
choices=('yes', 'no'),
default='no', help='Activate CMS permission management')
parser.add_argument('--pip-options', help='pass custom pip options', default='')
parser.add_argument('--languages', '-l', dest='languages', action='append',
help='Languages to enable. Option can be provided multiple times, or as a '
'comma separated list. Only language codes supported by Django can '
'be used here. Example: en, fr-FR, it-IT')
parser.add_argument('--django-version', dest='django_version', action='store',
choices=data.DJANGO_SUPPORTED,
default=data.DJANGO_DEFAULT, help='Django version')
parser.add_argument('--cms-version', '-v', dest='cms_version', action='store',
choices=data.DJANGOCMS_SUPPORTED,
default=data.DJANGOCMS_DEFAULT, help='django CMS version')
parser.add_argument('--parent-dir', '-p', dest='project_directory',
default='',
action='store', help='Optional project parent directory')
parser.add_argument('--bootstrap', dest='bootstrap', action='store',
choices=('yes', 'no'),
default='no', help='Use Twitter Bootstrap Theme')
parser.add_argument('--templates', dest='templates', action='store',
default='no', help='Use custom template set')
parser.add_argument('--starting-page', dest='starting_page', action='store',
choices=('yes', 'no'),
default='no', help='Load a starting page with examples after installation '
'(english language only). Choose "no" if you use a '
'custom template set.')
parser.add_argument(dest='project_name', action='store',
help='Name of the project to be created')
# Command that lists the supported plugins in verbose description
parser.add_argument('--list-plugins', '-P', dest='plugins', action='store_true',
help='List plugins that\'s going to be installed and configured')
# Command that lists the supported plugins in verbose description
parser.add_argument('--dump-requirements', '-R', dest='dump_reqs', action='store_true',
help='It dumps the requirements that would be installed according to '
'parameters given. Together with --requirements argument is useful '
'for customizing the virtualenv')
# Advanced options. These have a predefined default and are not asked
# by config wizard.
parser.add_argument('--no-input', '-q', dest='noinput', action='store_true',
default=True, help='Don\'t run the configuration wizard, just use the '
'provided values')
parser.add_argument('--wizard', '-w', dest='wizard', action='store_true',
default=False, help='Run the configuration wizard')
parser.add_argument('--verbose', dest='verbose', action='store_true',
default=False,
help='Be more verbose and don\'t swallow subcommands output')
parser.add_argument('--filer', '-f', dest='filer', action='store_true',
default=True, help='Install and configure django-filer plugins '
'- Always enabled')
parser.add_argument('--requirements', '-r', dest='requirements_file', action='store',
default=None, help='Externally defined requirements file')
parser.add_argument('--no-deps', '-n', dest='no_deps', action='store_true',
default=False, help='Don\'t install package dependencies')
parser.add_argument('--no-plugins', dest='no_plugins', action='store_true',
default=False, help='Don\'t install plugins')
parser.add_argument('--no-db-driver', dest='no_db_driver', action='store_true',
default=False, help='Don\'t install database package')
parser.add_argument('--no-sync', '-m', dest='no_sync', action='store_true',
default=False, help='Don\'t run syncdb / migrate after bootstrapping')
parser.add_argument('--no-user', '-u', dest='no_user', action='store_true',
default=False, help='Don\'t create the admin user')
parser.add_argument('--template', dest='template', action='store',
default=None, help='The path or URL to load the django project '
'template from.')
parser.add_argument('--extra-settings', dest='extra_settings', action='store',
default=None, help='The path to an file that contains extra settings.')
parser.add_argument('--skip-empty-check', '-s', dest='skip_project_dir_check',
action='store_true',
default=False, help='Skip the check if project dir is empty.')
parser.add_argument('--delete-project-dir', '-c', dest='delete_project_dir',
action='store_true',
default=False, help='Delete project directory on creation failure.')
parser.add_argument('--utc', dest='utc',
action='store_true',
default=False, help='Use UTC timezone.')
if '--utc' in args:
for action in parser._positionals._actions:
if action.dest == 'timezone':
action.default = 'UTC'
# If config_args then pretend that config args came from the stdin and run parser again.
config_args = ini.parse_config_file(parser, args)
args = parser.parse_args(config_args + args)
if not args.wizard:
args.noinput = True
else:
args.noinput = False
if not args.project_directory:
args.project_directory = args.project_name
args.project_directory = os.path.abspath(args.project_directory)
# First of all, check if the project name is valid
if not validate_project(args.project_name):
sys.stderr.write(
'Project name "{0}" is not a valid app name, or it\'s already defined. '
'Please use only numbers, letters and underscores.\n'.format(args.project_name)
)
sys.exit(3)
# Checking the given path
setattr(args, 'project_path', os.path.join(args.project_directory, args.project_name).strip())
if not args.skip_project_dir_check:
if (os.path.exists(args.project_directory) and
[path for path in os.listdir(args.project_directory) if not path.startswith('.')]):
sys.stderr.write(
'Path "{0}" already exists and is not empty, please choose a different one\n'
'If you want to use this path anyway use the -s flag to skip this check.\n'
''.format(args.project_directory)
)
sys.exit(4)
if os.path.exists(args.project_path):
sys.stderr.write(
'Path "{0}" already exists, please choose a different one\n'.format(args.project_path)
)
sys.exit(4)
if args.config_dump and os.path.isfile(args.config_dump):
sys.stdout.write(
'Cannot dump because given configuration file "{0}" exists.\n'.format(args.config_dump)
)
sys.exit(8)
args = _manage_args(parser, args)
# what do we want here?!
# * if languages are given as multiple arguments, let's use it as is
# * if no languages are given, use a default and stop handling it further
# * if languages are given as a comma-separated list, split it and use the
# resulting list.
if not args.languages:
try:
args.languages = [locale.getdefaultlocale()[0].split('_')[0]]
except Exception: # pragma: no cover
args.languages = ['en']
elif isinstance(args.languages, six.string_types):
args.languages = args.languages.split(',')
elif len(args.languages) == 1 and isinstance(args.languages[0], six.string_types):
args.languages = args.languages[0].split(',')
args.languages = [lang.strip().lower() for lang in args.languages]
if len(args.languages) > 1:
args.i18n = 'yes'
args.aldryn = False
args.filer = True
# Convert version to numeric format for easier checking
try:
django_version, cms_version = supported_versions(args.django_version, args.cms_version)
cms_package = data.PACKAGE_MATRIX.get(
cms_version, data.PACKAGE_MATRIX[data.DJANGOCMS_LTS]
)
except RuntimeError as e: # pragma: no cover
sys.stderr.write(compat.unicode(e))
sys.exit(6)
if django_version is None: # pragma: no cover
sys.stderr.write(
'Please provide a Django supported version: {0}. Only Major.Minor '
'version selector is accepted\n'.format(', '.join(data.DJANGO_SUPPORTED))
)
sys.exit(6)
if cms_version is None: # pragma: no cover
sys.stderr.write(
'Please provide a django CMS supported version: {0}. Only Major.Minor '
'version selector is accepted\n'.format(', '.join(data.DJANGOCMS_SUPPORTED))
)
sys.exit(6)
default_settings = '{}.settings'.format(args.project_name)
env_settings = os.environ.get('DJANGO_SETTINGS_MODULE', default_settings)
if env_settings != default_settings:
sys.stderr.write(
'`DJANGO_SETTINGS_MODULE` is currently set to \'{0}\' which is not compatible with '
'djangocms installer.\nPlease unset `DJANGO_SETTINGS_MODULE` and re-run the installer '
'\n'.format(env_settings)
)
sys.exit(10)
if not getattr(args, 'requirements_file'):
requirements = []
# django CMS version check
if args.cms_version == 'develop':
requirements.append(cms_package)
warnings.warn(data.VERSION_WARNING.format('develop', 'django CMS'))
elif args.cms_version == 'rc': # pragma: no cover
requirements.append(cms_package)
elif args.cms_version == 'beta': # pragma: no cover
requirements.append(cms_package)
warnings.warn(data.VERSION_WARNING.format('beta', 'django CMS'))
else:
requirements.append(cms_package)
if args.cms_version in ('rc', 'develop'):
requirements.extend(data.REQUIREMENTS['cms-master'])
elif LooseVersion(cms_version) >= LooseVersion('3.6'):
requirements.extend(data.REQUIREMENTS['cms-3.6'])
elif LooseVersion(cms_version) >= LooseVersion('3.5'):
requirements.extend(data.REQUIREMENTS['cms-3.5'])
elif LooseVersion(cms_version) >= LooseVersion('3.4'):
requirements.extend(data.REQUIREMENTS['cms-3.4'])
if not args.no_db_driver:
requirements.append(args.db_driver)
if not args.no_plugins:
if args.cms_version in ('rc', 'develop'):
requirements.extend(data.REQUIREMENTS['plugins-master'])
elif LooseVersion(cms_version) >= LooseVersion('3.6'):
requirements.extend(data.REQUIREMENTS['plugins-3.6'])
elif LooseVersion(cms_version) >= LooseVersion('3.5'):
requirements.extend(data.REQUIREMENTS['plugins-3.5'])
elif LooseVersion(cms_version) >= LooseVersion('3.4'):
requirements.extend(data.REQUIREMENTS['plugins-3.4'])
requirements.extend(data.REQUIREMENTS['filer'])
if args.aldryn: # pragma: no cover
requirements.extend(data.REQUIREMENTS['aldryn'])
# Django version check
if args.django_version == 'develop': # pragma: no cover
requirements.append(data.DJANGO_DEVELOP)
warnings.warn(data.VERSION_WARNING.format('develop', 'Django'))
elif args.django_version == 'beta': # pragma: no cover
requirements.append(data.DJANGO_BETA)
warnings.warn(data.VERSION_WARNING.format('beta', 'Django'))
else:
requirements.append('Django<{0}'.format(less_than_version(django_version)))
if django_version == '1.8':
requirements.extend(data.REQUIREMENTS['django-1.8'])
elif django_version == '1.9':
requirements.extend(data.REQUIREMENTS['django-1.9'])
elif django_version == '1.10':
requirements.extend(data.REQUIREMENTS['django-1.10'])
elif django_version == '1.11':
requirements.extend(data.REQUIREMENTS['django-1.11'])
elif django_version == '2.0':
requirements.extend(data.REQUIREMENTS['django-2.0'])
elif django_version == '2.1':
requirements.extend(data.REQUIREMENTS['django-2.1'])
requirements.extend(data.REQUIREMENTS['default'])
setattr(args, 'requirements', '\n'.join(requirements).strip())
# Convenient shortcuts
setattr(args, 'cms_version', cms_version)
setattr(args, 'django_version', django_version)
setattr(args, 'settings_path',
os.path.join(args.project_directory, args.project_name, 'settings.py').strip())
setattr(args, 'urlconf_path',
os.path.join(args.project_directory, args.project_name, 'urls.py').strip())
if args.config_dump:
ini.dump_config_file(args.config_dump, args, parser)
return args | [
"def",
"parse",
"(",
"args",
")",
":",
"from",
"tzlocal",
"import",
"get_localzone",
"try",
":",
"timezone",
"=",
"get_localzone",
"(",
")",
"if",
"isinstance",
"(",
"timezone",
",",
"pytz",
".",
"BaseTzInfo",
")",
":",
"timezone",
"=",
"timezone",
".",
... | 50.745283 | 26.613208 |
def get_significant_digits(numeric_value):
"""
Returns the precision for a given floatable value.
If value is None or not floatable, returns None.
Will return positive values if the result is below 1 and will
return 0 values if the result is above or equal to 1.
:param numeric_value: the value to get the precision from
:returns: the numeric_value's precision
Examples:
numeric_value Returns
0 0
0.22 1
1.34 0
0.0021 3
0.013 2
2 0
22 0
"""
try:
numeric_value = float(numeric_value)
except (TypeError, ValueError):
return None
if numeric_value == 0:
return 0
significant_digit = int(math.floor(math.log10(abs(numeric_value))))
return 0 if significant_digit > 0 else abs(significant_digit) | [
"def",
"get_significant_digits",
"(",
"numeric_value",
")",
":",
"try",
":",
"numeric_value",
"=",
"float",
"(",
"numeric_value",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"return",
"None",
"if",
"numeric_value",
"==",
"0",
":",
"return",
... | 35.884615 | 13.423077 |
def put_file(up_token, key, file_path, params=None,
mime_type='application/octet-stream', check_crc=False,
progress_handler=None, upload_progress_recorder=None, keep_last_modified=False):
"""上传文件到七牛
Args:
up_token: 上传凭证
key: 上传文件名
file_path: 上传文件的路径
params: 自定义变量,规格参考 http://developer.qiniu.com/docs/v6/api/overview/up/response/vars.html#xvar
mime_type: 上传数据的mimeType
check_crc: 是否校验crc32
progress_handler: 上传进度
upload_progress_recorder: 记录上传进度,用于断点续传
Returns:
一个dict变量,类似 {"hash": "<Hash string>", "key": "<Key string>"}
一个ResponseInfo对象
"""
ret = {}
size = os.stat(file_path).st_size
# fname = os.path.basename(file_path)
with open(file_path, 'rb') as input_stream:
file_name = os.path.basename(file_path)
modify_time = int(os.path.getmtime(file_path))
if size > config._BLOCK_SIZE * 2:
ret, info = put_stream(up_token, key, input_stream, file_name, size, params,
mime_type, progress_handler,
upload_progress_recorder=upload_progress_recorder,
modify_time=modify_time, keep_last_modified=keep_last_modified)
else:
crc = file_crc32(file_path)
ret, info = _form_put(up_token, key, input_stream, params, mime_type,
crc, progress_handler, file_name,
modify_time=modify_time, keep_last_modified=keep_last_modified)
return ret, info | [
"def",
"put_file",
"(",
"up_token",
",",
"key",
",",
"file_path",
",",
"params",
"=",
"None",
",",
"mime_type",
"=",
"'application/octet-stream'",
",",
"check_crc",
"=",
"False",
",",
"progress_handler",
"=",
"None",
",",
"upload_progress_recorder",
"=",
"None",... | 45.305556 | 22.277778 |
def validate(self):
"""
Validates the state of this XBlock.
Subclasses should override validate_field_data() to validate fields and override this
only for validation not related to this block's field values.
"""
validation = super(StudioEditableXBlockMixin, self).validate()
self.validate_field_data(validation, self)
return validation | [
"def",
"validate",
"(",
"self",
")",
":",
"validation",
"=",
"super",
"(",
"StudioEditableXBlockMixin",
",",
"self",
")",
".",
"validate",
"(",
")",
"self",
".",
"validate_field_data",
"(",
"validation",
",",
"self",
")",
"return",
"validation"
] | 39.1 | 20.1 |
def addButton(
fnc,
states=("On", "Off"),
c=("w", "w"),
bc=("dg", "dr"),
pos=(20, 40),
size=24,
font="arial",
bold=False,
italic=False,
alpha=1,
angle=0,
):
"""Add a button to the renderer window.
:param list states: a list of possible states ['On', 'Off']
:param c: a list of colors for each state
:param bc: a list of background colors for each state
:param pos: 2D position in pixels from left-bottom corner
:param size: size of button font
:param str font: font type (arial, courier, times)
:param bool bold: bold face (False)
:param bool italic: italic face (False)
:param float alpha: opacity level
:param float angle: anticlockwise rotation in degrees
.. hint:: |buttons| |buttons.py|_
"""
vp = settings.plotter_instance
if not vp.renderer:
colors.printc("~timesError: Use addButton() after rendering the scene.", c=1)
return
import vtkplotter.vtkio as vtkio
bu = vtkio.Button(fnc, states, c, bc, pos, size, font, bold, italic, alpha, angle)
vp.renderer.AddActor2D(bu.actor)
vp.window.Render()
vp.buttons.append(bu)
return bu | [
"def",
"addButton",
"(",
"fnc",
",",
"states",
"=",
"(",
"\"On\"",
",",
"\"Off\"",
")",
",",
"c",
"=",
"(",
"\"w\"",
",",
"\"w\"",
")",
",",
"bc",
"=",
"(",
"\"dg\"",
",",
"\"dr\"",
")",
",",
"pos",
"=",
"(",
"20",
",",
"40",
")",
",",
"size"... | 30.657895 | 19.447368 |
def _make_all_matchers(cls, parameters):
'''
For every parameter, create a matcher if the parameter has an
annotation.
'''
for name, param in parameters:
annotation = param.annotation
if annotation is not Parameter.empty:
yield name, cls._make_param_matcher(annotation, param.kind) | [
"def",
"_make_all_matchers",
"(",
"cls",
",",
"parameters",
")",
":",
"for",
"name",
",",
"param",
"in",
"parameters",
":",
"annotation",
"=",
"param",
".",
"annotation",
"if",
"annotation",
"is",
"not",
"Parameter",
".",
"empty",
":",
"yield",
"name",
","... | 39.222222 | 17.222222 |
def update_intervals(self, back=None):
'''
Return the update intervals for all of the enabled fileserver backends
which support variable update intervals.
'''
back = self.backends(back)
ret = {}
for fsb in back:
fstr = '{0}.update_intervals'.format(fsb)
if fstr in self.servers:
ret[fsb] = self.servers[fstr]()
return ret | [
"def",
"update_intervals",
"(",
"self",
",",
"back",
"=",
"None",
")",
":",
"back",
"=",
"self",
".",
"backends",
"(",
"back",
")",
"ret",
"=",
"{",
"}",
"for",
"fsb",
"in",
"back",
":",
"fstr",
"=",
"'{0}.update_intervals'",
".",
"format",
"(",
"fsb... | 34.5 | 16.5 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.